From 7582fa96423d74b7aa4e0fbbe7e8c706725546dd Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Mon, 14 Apr 2025 19:24:28 +0200
Subject: [PATCH 01/72] Update Gitea token secret
---
.gitea/workflows/docker-publish.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.gitea/workflows/docker-publish.yml b/.gitea/workflows/docker-publish.yml
index c4aca97..84f84cc 100644
--- a/.gitea/workflows/docker-publish.yml
+++ b/.gitea/workflows/docker-publish.yml
@@ -56,7 +56,7 @@ jobs:
with:
registry: git.lovinator.space
username: thelovinator
- password: ${{ secrets.GITEA_TOKEN }}
+ password: ${{ secrets.PACKAGES_WRITE_GITEA_TOKEN }}
- uses: https://github.com/docker/build-push-action@v6
with:
From dcfb467858b9e8ffb2a5c515a2c45689f21b2c01 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Mon, 14 Apr 2025 20:07:49 +0200
Subject: [PATCH 02/72] Delete .github/workflows/docker-check.yml
---
.github/workflows/docker-check.yml | 19 -------------------
1 file changed, 19 deletions(-)
delete mode 100644 .github/workflows/docker-check.yml
diff --git a/.github/workflows/docker-check.yml b/.github/workflows/docker-check.yml
deleted file mode 100644
index ff43f68..0000000
--- a/.github/workflows/docker-check.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-name: Docker Build Check
-
-on:
- push:
- paths:
- - 'Dockerfile'
- pull_request:
- paths:
- - 'Dockerfile'
-
-jobs:
- docker-check:
- runs-on: ubuntu-latest
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
-
- - name: Run Docker Build Check
- run: docker build --check .
From 7bbbbc23097896311151c5484a35cfcbf692510f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Mon, 14 Apr 2025 20:56:49 +0200
Subject: [PATCH 03/72] Refactor Docker publish workflow; add
multi-architecture support; cache build layers
---
.gitea/workflows/docker-publish.yml | 60 ++++++++++++++++++++---------
Dockerfile | 2 +-
2 files changed, 42 insertions(+), 20 deletions(-)
diff --git a/.gitea/workflows/docker-publish.yml b/.gitea/workflows/docker-publish.yml
index 84f84cc..c97c62f 100644
--- a/.gitea/workflows/docker-publish.yml
+++ b/.gitea/workflows/docker-publish.yml
@@ -23,25 +23,6 @@ jobs:
OPENAI_TOKEN: "0"
if: gitea.event_name != 'pull_request'
steps:
- - uses: https://github.com/actions/checkout@v4
- - uses: https://github.com/docker/setup-qemu-action@v3
- - uses: https://github.com/docker/setup-buildx-action@v3
- - uses: https://github.com/astral-sh/ruff-action@v3
-
- - run: docker build --check .
- - run: ruff check --exit-non-zero-on-fix --verbose
- - run: ruff format --check --verbose
-
- - id: meta
- uses: https://github.com/docker/metadata-action@v5
- env:
- DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,index
- with:
- images: |
- ghcr.io/thelovinator1/anewdawn
- git.lovinator.space/thelovinator/anewdawn
- tags: type=raw,value=latest,enable=${{ gitea.ref == format('refs/heads/{0}', 'master') }}
-
# GitHub Container Registry
- uses: https://github.com/docker/login-action@v3
if: github.event_name != 'pull_request'
@@ -58,10 +39,51 @@ jobs:
username: thelovinator
password: ${{ secrets.PACKAGES_WRITE_GITEA_TOKEN }}
+ # Download the latest commit from the master branch
+ - uses: https://github.com/actions/checkout@v4
+
+ # Set up QEMU
+ - id: qemu
+ uses: https://github.com/docker/setup-qemu-action@v3
+ with:
+ image: tonistiigi/binfmt:master
+ platforms: linux/amd64,linux/arm64
+
+ # Set up Buildx so we can build multi-arch images
+ - uses: https://github.com/docker/setup-buildx-action@v3
+
+
+ # Install the latest version of ruff
+ - uses: https://github.com/astral-sh/ruff-action@v3
+ with:
+ version: "latest"
+
+ # Lint the Python code using ruff
+ - run: ruff check --exit-non-zero-on-fix --verbose
+
+ # Check if the Python code needs formatting
+ - run: ruff format --check --verbose
+ - run: docker build --check .
+
+ # Extract metadata (tags, labels) from Git reference and GitHub events for Docker
+ - id: meta
+ uses: https://github.com/docker/metadata-action@v5
+ env:
+ DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,index
+ with:
+ images: |
+ ghcr.io/thelovinator1/anewdawn
+ git.lovinator.space/thelovinator/anewdawn
+ tags: type=raw,value=latest,enable=${{ gitea.ref == format('refs/heads/{0}', 'master') }}
+
+ # Build and push the Docker image
- uses: https://github.com/docker/build-push-action@v6
with:
context: .
+ platforms: linux/amd64,linux/arm64
push: ${{ gitea.event_name != 'pull_request' }}
labels: ${{ steps.meta.outputs.labels }}
tags: ${{ steps.meta.outputs.tags }}
annotations: ${{ steps.meta.outputs.annotations }}
+ cache-from: type=gha
+ cache-to: type=gha,mode=max
diff --git a/Dockerfile b/Dockerfile
index d4dde17..5e89a04 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,6 +1,6 @@
# syntax=docker/dockerfile:1
# check=error=true;experimental=all
-FROM ghcr.io/astral-sh/uv:python3.13-bookworm-slim@sha256:73c021c3fe7264924877039e8a449ad3bb380ec89214282301affa9b2f863c5d
+FROM --platform=$BUILDPLATFORM ghcr.io/astral-sh/uv:python3.13-bookworm-slim@sha256:73c021c3fe7264924877039e8a449ad3bb380ec89214282301affa9b2f863c5d
# Change the working directory to the `app` directory
WORKDIR /app
From e4b9b8129b612ff14d3657e0ab4b8c71a935a40f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Mon, 14 Apr 2025 21:10:12 +0200
Subject: [PATCH 04/72] Delete duplicate workflows; update Docker publish cache
host
---
.gitea/workflows/docker-check.yml | 19 -------------------
.gitea/workflows/docker-publish.yml | 4 +++-
.gitea/workflows/ruff.yml | 19 -------------------
3 files changed, 3 insertions(+), 39 deletions(-)
delete mode 100644 .gitea/workflows/docker-check.yml
delete mode 100644 .gitea/workflows/ruff.yml
diff --git a/.gitea/workflows/docker-check.yml b/.gitea/workflows/docker-check.yml
deleted file mode 100644
index ff43f68..0000000
--- a/.gitea/workflows/docker-check.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-name: Docker Build Check
-
-on:
- push:
- paths:
- - 'Dockerfile'
- pull_request:
- paths:
- - 'Dockerfile'
-
-jobs:
- docker-check:
- runs-on: ubuntu-latest
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
-
- - name: Run Docker Build Check
- run: docker build --check .
diff --git a/.gitea/workflows/docker-publish.yml b/.gitea/workflows/docker-publish.yml
index c97c62f..e8239b2 100644
--- a/.gitea/workflows/docker-publish.yml
+++ b/.gitea/workflows/docker-publish.yml
@@ -12,7 +12,7 @@ on:
cache:
enabled: true
dir: ""
- host: "192.168.1.127"
+ host: "gitea_act_runner"
port: 8088
jobs:
@@ -63,6 +63,8 @@ jobs:
# Check if the Python code needs formatting
- run: ruff format --check --verbose
+
+ # Lint Dockerfile
- run: docker build --check .
# Extract metadata (tags, labels) from Git reference and GitHub events for Docker
diff --git a/.gitea/workflows/ruff.yml b/.gitea/workflows/ruff.yml
deleted file mode 100644
index 28b5029..0000000
--- a/.gitea/workflows/ruff.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-name: Ruff
-
-on:
- push:
- pull_request:
- workflow_dispatch:
- schedule:
- - cron: '0 0 * * *' # Run every day at midnight
-
-env:
- RUFF_OUTPUT_FORMAT: github
-jobs:
- ruff:
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v4
- - uses: astral-sh/ruff-action@v3
- - run: ruff check --exit-non-zero-on-fix --verbose
- - run: ruff format --check --verbose
From 83d3365a85a72aac1f642764605ee3048b93f493 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Mon, 14 Apr 2025 21:42:48 +0200
Subject: [PATCH 05/72] Don't cache tonistiigi/binfmt
---
.gitea/workflows/docker-publish.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.gitea/workflows/docker-publish.yml b/.gitea/workflows/docker-publish.yml
index e8239b2..ad93da4 100644
--- a/.gitea/workflows/docker-publish.yml
+++ b/.gitea/workflows/docker-publish.yml
@@ -21,7 +21,6 @@ jobs:
env:
DISCORD_TOKEN: "0"
OPENAI_TOKEN: "0"
- if: gitea.event_name != 'pull_request'
steps:
# GitHub Container Registry
- uses: https://github.com/docker/login-action@v3
@@ -48,6 +47,7 @@ jobs:
with:
image: tonistiigi/binfmt:master
platforms: linux/amd64,linux/arm64
+ cache-image: false
# Set up Buildx so we can build multi-arch images
- uses: https://github.com/docker/setup-buildx-action@v3
From 0d36a591fb8299fb28de027c18321c1937f5801c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Mon, 14 Apr 2025 21:47:08 +0200
Subject: [PATCH 06/72] Remove caching configuration from Docker publish
workflow
---
.gitea/workflows/docker-publish.yml | 8 --------
1 file changed, 8 deletions(-)
diff --git a/.gitea/workflows/docker-publish.yml b/.gitea/workflows/docker-publish.yml
index ad93da4..e9bb96a 100644
--- a/.gitea/workflows/docker-publish.yml
+++ b/.gitea/workflows/docker-publish.yml
@@ -9,12 +9,6 @@ on:
schedule:
- cron: "@daily"
-cache:
- enabled: true
- dir: ""
- host: "gitea_act_runner"
- port: 8088
-
jobs:
docker:
runs-on: ubuntu-latest
@@ -87,5 +81,3 @@ jobs:
labels: ${{ steps.meta.outputs.labels }}
tags: ${{ steps.meta.outputs.tags }}
annotations: ${{ steps.meta.outputs.annotations }}
- cache-from: type=gha
- cache-to: type=gha,mode=max
From cd5c2ca10d2273c5925c38c6723478af4aa58615 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Wed, 28 May 2025 17:10:51 +0200
Subject: [PATCH 07/72] Update model version
---
misc.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/misc.py b/misc.py
index 1aa0394..3915472 100644
--- a/misc.py
+++ b/misc.py
@@ -38,11 +38,11 @@ def chat(user_message: str, openai_client: OpenAI) -> str | None:
The response from the AI model.
"""
completion: ChatCompletion = openai_client.chat.completions.create(
- model="gpt-4o-mini",
+ model="gpt-4.5-preview",
messages=[
{
"role": "developer",
- "content": "You are in a Discord group chat with people above the age of 30. Use Discord Markdown to format messages if needed.", # noqa: E501
+ "content": "You are in a Discord group chat. Use Discord Markdown to format messages if needed.",
},
{"role": "user", "content": user_message},
],
From 657502fc12fddbe77084f2102f5b81c6ae76e4f9 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Thu, 24 Jul 2025 23:53:01 +0200
Subject: [PATCH 08/72] Update chat function to use gpt-4.1 model and correct
role in system message
---
misc.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/misc.py b/misc.py
index 3915472..d327488 100644
--- a/misc.py
+++ b/misc.py
@@ -38,11 +38,11 @@ def chat(user_message: str, openai_client: OpenAI) -> str | None:
The response from the AI model.
"""
completion: ChatCompletion = openai_client.chat.completions.create(
- model="gpt-4.5-preview",
+ model="gpt-4.1",
messages=[
{
- "role": "developer",
- "content": "You are in a Discord group chat. Use Discord Markdown to format messages if needed.",
+ "role": "system",
+ "content": "You are in a Discord group chat. People can ask you questions. Use Discord Markdown to format messages if needed.",
},
{"role": "user", "content": user_message},
],
From ec23af20a6358a36ee920e92dde99348d808ed67 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Thu, 24 Jul 2025 23:56:51 +0200
Subject: [PATCH 09/72] Use GitHub Actions
---
.../workflows/docker-publish.yml | 34 +++++--------------
1 file changed, 9 insertions(+), 25 deletions(-)
rename {.gitea => .github}/workflows/docker-publish.yml (61%)
diff --git a/.gitea/workflows/docker-publish.yml b/.github/workflows/docker-publish.yml
similarity index 61%
rename from .gitea/workflows/docker-publish.yml
rename to .github/workflows/docker-publish.yml
index e9bb96a..5bfc314 100644
--- a/.gitea/workflows/docker-publish.yml
+++ b/.github/workflows/docker-publish.yml
@@ -2,12 +2,8 @@ name: Build Docker Image
on:
push:
- branches:
- - master
pull_request:
workflow_dispatch:
- schedule:
- - cron: "@daily"
jobs:
docker:
@@ -17,38 +13,28 @@ jobs:
OPENAI_TOKEN: "0"
steps:
# GitHub Container Registry
- - uses: https://github.com/docker/login-action@v3
+ - uses: docker/login-action@v3
if: github.event_name != 'pull_request'
with:
registry: ghcr.io
username: thelovinator1
- password: ${{ secrets.PACKAGES_WRITE_GITHUB_TOKEN }}
-
- # Gitea Container Registry
- - uses: https://github.com/docker/login-action@v3
- if: github.event_name != 'pull_request'
- with:
- registry: git.lovinator.space
- username: thelovinator
- password: ${{ secrets.PACKAGES_WRITE_GITEA_TOKEN }}
+ password: ${{ secrets.GITHUB_TOKEN }}
# Download the latest commit from the master branch
- - uses: https://github.com/actions/checkout@v4
+ - uses: actions/checkout@v4
# Set up QEMU
- id: qemu
- uses: https://github.com/docker/setup-qemu-action@v3
+ uses: docker/setup-qemu-action@v3
with:
image: tonistiigi/binfmt:master
platforms: linux/amd64,linux/arm64
- cache-image: false
# Set up Buildx so we can build multi-arch images
- - uses: https://github.com/docker/setup-buildx-action@v3
-
+ - uses: docker/setup-buildx-action@v3
# Install the latest version of ruff
- - uses: https://github.com/astral-sh/ruff-action@v3
+ - uses: astral-sh/ruff-action@v3
with:
version: "latest"
@@ -63,17 +49,15 @@ jobs:
# Extract metadata (tags, labels) from Git reference and GitHub events for Docker
- id: meta
- uses: https://github.com/docker/metadata-action@v5
+ uses: docker/metadata-action@v5
env:
DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,index
with:
- images: |
- ghcr.io/thelovinator1/anewdawn
- git.lovinator.space/thelovinator/anewdawn
+ images: ghcr.io/thelovinator1/anewdawn
tags: type=raw,value=latest,enable=${{ gitea.ref == format('refs/heads/{0}', 'master') }}
# Build and push the Docker image
- - uses: https://github.com/docker/build-push-action@v6
+ - uses: docker/build-push-action@v6
with:
context: .
platforms: linux/amd64,linux/arm64
From 9f7d71dbb02365a93439c8e757a87afbbe90c69a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Thu, 24 Jul 2025 23:58:08 +0200
Subject: [PATCH 10/72] Fix Gitea references to use GitHub context in Docker
publish workflow
---
.github/workflows/docker-publish.yml | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/.github/workflows/docker-publish.yml b/.github/workflows/docker-publish.yml
index 5bfc314..56ccf04 100644
--- a/.github/workflows/docker-publish.yml
+++ b/.github/workflows/docker-publish.yml
@@ -54,14 +54,14 @@ jobs:
DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,index
with:
images: ghcr.io/thelovinator1/anewdawn
- tags: type=raw,value=latest,enable=${{ gitea.ref == format('refs/heads/{0}', 'master') }}
+ tags: type=raw,value=latest,enable=${{ github.ref == format('refs/heads/{0}', 'master') }}
# Build and push the Docker image
- uses: docker/build-push-action@v6
with:
context: .
platforms: linux/amd64,linux/arm64
- push: ${{ gitea.event_name != 'pull_request' }}
+ push: ${{ github.event_name != 'pull_request' }}
labels: ${{ steps.meta.outputs.labels }}
tags: ${{ steps.meta.outputs.tags }}
annotations: ${{ steps.meta.outputs.annotations }}
From 516cb58a97bc98014718805bfc47d6cf1648cdd3 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Fri, 25 Jul 2025 00:00:53 +0200
Subject: [PATCH 11/72] Line too long
---
misc.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/misc.py b/misc.py
index d327488..ae72270 100644
--- a/misc.py
+++ b/misc.py
@@ -42,7 +42,7 @@ def chat(user_message: str, openai_client: OpenAI) -> str | None:
messages=[
{
"role": "system",
- "content": "You are in a Discord group chat. People can ask you questions. Use Discord Markdown to format messages if needed.",
+ "content": "You are in a Discord group chat. People can ask you questions. Use Discord Markdown to format messages if needed.", # noqa: E501
},
{"role": "user", "content": user_message},
],
From 71b3f47496a6548826f991d66e72f0cbb10d5150 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Sun, 17 Aug 2025 01:51:54 +0200
Subject: [PATCH 12/72] Update chat function to use gpt-5-chat-latest model
---
misc.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/misc.py b/misc.py
index ae72270..5e3f395 100644
--- a/misc.py
+++ b/misc.py
@@ -38,7 +38,7 @@ def chat(user_message: str, openai_client: OpenAI) -> str | None:
The response from the AI model.
"""
completion: ChatCompletion = openai_client.chat.completions.create(
- model="gpt-4.1",
+ model="gpt-5-chat-latest",
messages=[
{
"role": "system",
From d4d2bab6d990c99b5dda8fdd9a7ea345f21da617 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Sun, 17 Aug 2025 02:01:31 +0200
Subject: [PATCH 13/72] Remove pre-commit configuration file
---
.pre-commit-config.yaml | 41 -----------------------------------------
1 file changed, 41 deletions(-)
delete mode 100644 .pre-commit-config.yaml
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
deleted file mode 100644
index 29b3e9a..0000000
--- a/.pre-commit-config.yaml
+++ /dev/null
@@ -1,41 +0,0 @@
-repos:
- - repo: https://github.com/asottile/add-trailing-comma
- rev: v3.1.0
- hooks:
- - id: add-trailing-comma
-
- - repo: https://github.com/pre-commit/pre-commit-hooks
- rev: v5.0.0
- hooks:
- - id: check-added-large-files
- - id: check-ast
- - id: check-builtin-literals
- - id: check-executables-have-shebangs
- - id: check-merge-conflict
- - id: check-shebang-scripts-are-executable
- - id: check-toml
- - id: check-vcs-permalinks
- - id: check-yaml
- - id: end-of-file-fixer
- - id: mixed-line-ending
- - id: name-tests-test
- args: ["--pytest-test-first"]
- - id: trailing-whitespace
-
- - repo: https://github.com/asottile/pyupgrade
- rev: v3.19.1
- hooks:
- - id: pyupgrade
- args: ["--py311-plus"]
-
- - repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.11.5
- hooks:
- - id: ruff-format
- - id: ruff
- args: ["--fix", "--exit-non-zero-on-fix"]
-
- - repo: https://github.com/rhysd/actionlint
- rev: v1.7.7
- hooks:
- - id: actionlint
From 158c88c57e87f1ca26c6f0b5b0157b7b6ed555b6 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Sun, 17 Aug 2025 02:02:27 +0200
Subject: [PATCH 14/72] Fix typo in setup_hook docstring
---
.vscode/settings.json | 5 ++++-
main.py | 2 +-
2 files changed, 5 insertions(+), 2 deletions(-)
diff --git a/.vscode/settings.json b/.vscode/settings.json
index 55560d2..1ad47b0 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -5,6 +5,8 @@
"audioop",
"automerge",
"buildx",
+ "CLAHE",
+ "Denoise",
"denoising",
"docstrings",
"dotenv",
@@ -15,6 +17,7 @@
"imdecode",
"imencode",
"IMREAD",
+ "IMWRITE",
"isort",
"killyoy",
"levelname",
@@ -36,4 +39,4 @@
"tobytes",
"unsignedinteger"
]
-}
+}
\ No newline at end of file
diff --git a/main.py b/main.py
index c1da129..ad8da7e 100644
--- a/main.py
+++ b/main.py
@@ -46,7 +46,7 @@ class LoviBotClient(discord.Client):
self.tree = app_commands.CommandTree(self)
async def setup_hook(self) -> None:
- """Sync commands globaly."""
+ """Sync commands globally."""
await self.tree.sync()
async def on_ready(self) -> None:
From 659fe3f13db0608245b2316dbbacc40e8c6820ab Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Sun, 17 Aug 2025 02:04:54 +0200
Subject: [PATCH 15/72] Remove GitHub Copilot instructions document
---
.github/copilot-instructions.md | 36 ---------------------------------
1 file changed, 36 deletions(-)
delete mode 100644 .github/copilot-instructions.md
diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md
deleted file mode 100644
index 7921bbe..0000000
--- a/.github/copilot-instructions.md
+++ /dev/null
@@ -1,36 +0,0 @@
-# Custom Instructions for GitHub Copilot
-
-## Project Overview
-This is a Python project named ANewDawn. It uses Docker for containerization (`Dockerfile`, `docker-compose.yml`). Key files include `main.py` and `settings.py`.
-
-## Development Environment
-- **Operating System:** Windows
-- **Default Shell:** PowerShell (`pwsh.exe`). Please generate terminal commands compatible with PowerShell.
-
-## Coding Standards
-- **Linting & Formatting:** We use `ruff` for linting and formatting. Adhere to `ruff` standards. Configuration is in `.github/workflows/ruff.yml` and possibly `pyproject.toml` or `ruff.toml`.
-- **Python Version:** 3.13
-- **Dependencies:** Managed using `uv` and listed in `pyproject.toml`. Commands include:
- - `uv run pytest` for testing.
- - `uv add ` for package installation.
- - `uv sync --upgrade` for dependency updates.
- - `uv run python main.py` to run the project.
-
-## General Guidelines
-- Follow Python best practices.
-- Write clear, concise code.
-- Add comments only for complex logic.
-- Ensure compatibility with the Docker environment.
-- Use `uv` commands for package management and scripts.
-- Use `docker` and `docker-compose` for container tasks:
- - Build: `docker build -t .`
- - Run: `docker run ` or `docker-compose up`.
- - Stop/Remove: `docker stop ` and `docker rm `.
-
-## Discord Bot Functionality
-- **Chat Interaction:** Responds to messages containing "lovibot" or its mention (`<@345000831499894795>`) using the OpenAI chat API (`gpt-4o-mini`). See `on_message` event handler and `misc.chat` function.
-- **Slash Commands:**
- - `/ask `: Directly ask the AI a question. Uses `misc.chat`.
-- **Context Menu Commands:**
- - `Enhance Image`: Right-click on a message with an image to enhance it using OpenCV methods (`enhance_image1`, `enhance_image2`, `enhance_image3`).
-- **User Restrictions:** Interaction is limited to users listed in `misc.get_allowed_users()`. Image creation has additional restrictions.
From 86cb28208d46fee2e7c8bcdf27db7c873d525c21 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Sun, 17 Aug 2025 02:25:43 +0200
Subject: [PATCH 16/72] Enhance chat functionality by adding message memory and
context for improved responses
---
.vscode/settings.json | 1 +
main.py | 23 +++++++++++-----
misc.py | 61 +++++++++++++++++++++++++++++++++++++------
3 files changed, 70 insertions(+), 15 deletions(-)
diff --git a/.vscode/settings.json b/.vscode/settings.json
index 1ad47b0..01e4dac 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -37,6 +37,7 @@
"testpaths",
"thelovinator",
"tobytes",
+ "twimg",
"unsignedinteger"
]
}
\ No newline at end of file
diff --git a/main.py b/main.py
index ad8da7e..0ade936 100644
--- a/main.py
+++ b/main.py
@@ -15,7 +15,7 @@ import sentry_sdk
from discord import app_commands
from openai import OpenAI
-from misc import chat, get_allowed_users
+from misc import add_message_to_memory, chat, get_allowed_users
from settings import Settings
sentry_sdk.init(
@@ -74,14 +74,17 @@ class LoviBotClient(discord.Client):
logger.info("No message content found in the event: %s", message)
return
+ # Add the message to memory
+ add_message_to_memory(str(message.channel.id), message.author.name, incoming_message)
+
lowercase_message: str = incoming_message.lower() if incoming_message else ""
- trigger_keywords: list[str] = ["lovibot", "<@345000831499894795>"]
+ trigger_keywords: list[str] = ["lovibot", "@lovibot", "<@345000831499894795>", "grok", "@grok"]
if any(trigger in lowercase_message for trigger in trigger_keywords):
logger.info("Received message: %s from: %s", incoming_message, message.author.name)
async with message.channel.typing():
try:
- response: str | None = chat(incoming_message, openai_client)
+ response: str | None = chat(incoming_message, openai_client, str(message.channel.id))
except openai.OpenAIError as e:
logger.exception("An error occurred while chatting with the AI model.")
e.add_note(f"Message: {incoming_message}\nEvent: {message}\nWho: {message.author.name}")
@@ -167,7 +170,7 @@ async def ask(interaction: discord.Interaction, text: str) -> None:
return
try:
- response: str | None = chat(text, openai_client)
+ response: str | None = chat(text, openai_client, str(interaction.channel_id))
except openai.OpenAIError as e:
logger.exception("An error occurred while chatting with the AI model.")
await interaction.followup.send(f"An error occurred: {e}")
@@ -343,6 +346,8 @@ def extract_image_url(message: discord.Message) -> str | None:
the function searches the message content for any direct links ending in
common image file extensions (e.g., .png, .jpg, .jpeg, .gif, .webp).
+ Additionally, it handles Twitter image URLs and normalizes them to a standard format.
+
Args:
message (discord.Message): The message from which to extract the image URL.
@@ -364,12 +369,16 @@ def extract_image_url(message: discord.Message) -> str | None:
if not image_url:
match: re.Match[str] | None = re.search(
- pattern=r"(https?://[^\s]+(\.png|\.jpg|\.jpeg|\.gif|\.webp))",
- string=message.content,
- flags=re.IGNORECASE,
+ r"(https?://[^\s]+\.(png|jpg|jpeg|gif|webp)(\?[^\s]*)?)", message.content, re.IGNORECASE
)
if match:
image_url = match.group(0)
+
+ # Handle Twitter image URLs
+ if image_url and "pbs.twimg.com/media/" in image_url:
+ # Normalize Twitter image URLs to the highest quality format
+ image_url = re.sub(r"\?format=[^&]+&name=[^&]+", "?format=jpg&name=orig", image_url)
+
return image_url
diff --git a/misc.py b/misc.py
index 5e3f395..60c5a91 100644
--- a/misc.py
+++ b/misc.py
@@ -1,6 +1,8 @@
from __future__ import annotations
+import datetime
import logging
+from collections import deque
from typing import TYPE_CHECKING
if TYPE_CHECKING:
@@ -10,6 +12,9 @@ if TYPE_CHECKING:
logger: logging.Logger = logging.getLogger(__name__)
+# A dictionary to store recent messages per channel with a maximum length per channel
+recent_messages: dict[str, deque[tuple[str, str, datetime.datetime]]] = {}
+
def get_allowed_users() -> list[str]:
"""Get the list of allowed users to interact with the bot.
@@ -27,25 +32,65 @@ def get_allowed_users() -> list[str]:
]
-def chat(user_message: str, openai_client: OpenAI) -> str | None:
+def add_message_to_memory(channel_id: str, user: str, message: str) -> None:
+ """Add a message to the memory for a specific channel.
+
+ Args:
+ channel_id: The ID of the channel where the message was sent.
+ user: The user who sent the message.
+ message: The content of the message.
+ """
+ if channel_id not in recent_messages:
+ recent_messages[channel_id] = deque(maxlen=50)
+
+ timestamp: datetime.datetime = datetime.datetime.now(tz=datetime.UTC)
+ recent_messages[channel_id].append((user, message, timestamp))
+
+ logger.info("Added message to memory: %s from %s in channel %s", message, user, channel_id)
+
+
+def get_recent_messages(channel_id: str, threshold_minutes: int = 10) -> list[tuple[str, str]]:
+ """Retrieve messages from the last `threshold_minutes` minutes for a specific channel.
+
+ Args:
+ channel_id: The ID of the channel to retrieve messages for.
+ threshold_minutes: The number of minutes to consider messages as recent.
+
+ Returns:
+ A list of tuples containing user and message content.
+ """
+ if channel_id not in recent_messages:
+ return []
+
+ threshold: datetime.datetime = datetime.datetime.now(tz=datetime.UTC) - datetime.timedelta(
+ minutes=threshold_minutes
+ )
+ return [(user, message) for user, message, timestamp in recent_messages[channel_id] if timestamp > threshold]
+
+
+def chat(user_message: str, openai_client: OpenAI, channel_id: str) -> str | None:
"""Chat with the bot using the OpenAI API.
Args:
user_message: The message to send to OpenAI.
openai_client: The OpenAI client to use.
+ channel_id: The ID of the channel where the conversation is happening.
Returns:
The response from the AI model.
"""
+ # Include recent messages in the prompt
+ recent_context: str = "\n".join([f"{user}: {message}" for user, message in get_recent_messages(channel_id)])
+ prompt: str = (
+ "You are in a Discord group chat. People can ask you questions. "
+ "Use Discord Markdown to format messages if needed.\n"
+ f"Recent context:\n{recent_context}\n"
+ f"User: {user_message}"
+ )
+
completion: ChatCompletion = openai_client.chat.completions.create(
model="gpt-5-chat-latest",
- messages=[
- {
- "role": "system",
- "content": "You are in a Discord group chat. People can ask you questions. Use Discord Markdown to format messages if needed.", # noqa: E501
- },
- {"role": "user", "content": user_message},
- ],
+ messages=[{"role": "system", "content": prompt}],
)
response: str | None = completion.choices[0].message.content
logger.info("AI response: %s", response)
From 007a14bf5b96d89c1b7d4286806e87ab836c9898 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Sun, 17 Aug 2025 02:32:51 +0200
Subject: [PATCH 17/72] Enhance response formatting in chat interactions for
better clarity
---
main.py | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/main.py b/main.py
index 0ade936..5667865 100644
--- a/main.py
+++ b/main.py
@@ -92,7 +92,10 @@ class LoviBotClient(discord.Client):
return
if response:
+ response = f"{message.author.name}: {message.content}\n\n{response}"
+
logger.info("Responding to message: %s with: %s", incoming_message, response)
+
await message.channel.send(response)
else:
logger.warning("No response from the AI model. Message: %s", incoming_message)
@@ -177,6 +180,10 @@ async def ask(interaction: discord.Interaction, text: str) -> None:
return
if response:
+ response = f"`{text}`\n\n{response}"
+
+ logger.info("Responding to message: %s with: %s", text, response)
+
await interaction.followup.send(response)
else:
await interaction.followup.send(f"I forgor how to think 💀\nText: {text}")
From f0f4e3c9b783f0bc38bc3f91d47b05117fe43d7f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Sun, 17 Aug 2025 03:57:41 +0200
Subject: [PATCH 18/72] Enhance chat functionality by adding extra context and
improving message handling
---
.vscode/settings.json | 4 ++
main.py | 24 +++++---
misc.py | 130 ++++++++++++++++++++++++++++++++++++++----
pyproject.toml | 4 +-
4 files changed, 142 insertions(+), 20 deletions(-)
diff --git a/.vscode/settings.json b/.vscode/settings.json
index 01e4dac..9adce29 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -23,17 +23,21 @@
"levelname",
"lovibot",
"Lovinator",
+ "Messageable",
+ "mountpoint",
"ndarray",
"nobot",
"nparr",
"numpy",
"opencv",
+ "percpu",
"plubplub",
"pycodestyle",
"pydocstyle",
"pyproject",
"PYTHONDONTWRITEBYTECODE",
"PYTHONUNBUFFERED",
+ "Slowmode",
"testpaths",
"thelovinator",
"tobytes",
diff --git a/main.py b/main.py
index 5667865..cba205c 100644
--- a/main.py
+++ b/main.py
@@ -84,7 +84,14 @@ class LoviBotClient(discord.Client):
async with message.channel.typing():
try:
- response: str | None = chat(incoming_message, openai_client, str(message.channel.id))
+ response: str | None = chat(
+ user_message=incoming_message,
+ openai_client=openai_client,
+ current_channel=message.channel,
+ user=message.author,
+ allowed_users=allowed_users,
+ all_channels_in_guild=message.guild.channels if message.guild else None,
+ )
except openai.OpenAIError as e:
logger.exception("An error occurred while chatting with the AI model.")
e.add_note(f"Message: {incoming_message}\nEvent: {message}\nWho: {message.author.name}")
@@ -92,8 +99,6 @@ class LoviBotClient(discord.Client):
return
if response:
- response = f"{message.author.name}: {message.content}\n\n{response}"
-
logger.info("Responding to message: %s with: %s", incoming_message, response)
await message.channel.send(response)
@@ -173,7 +178,14 @@ async def ask(interaction: discord.Interaction, text: str) -> None:
return
try:
- response: str | None = chat(text, openai_client, str(interaction.channel_id))
+ response: str | None = chat(
+ user_message=text,
+ openai_client=openai_client,
+ current_channel=interaction.channel,
+ user=interaction.user,
+ allowed_users=allowed_users,
+ all_channels_in_guild=interaction.guild.channels if interaction.guild else None,
+ )
except openai.OpenAIError as e:
logger.exception("An error occurred while chatting with the AI model.")
await interaction.followup.send(f"An error occurred: {e}")
@@ -375,9 +387,7 @@ def extract_image_url(message: discord.Message) -> str | None:
break
if not image_url:
- match: re.Match[str] | None = re.search(
- r"(https?://[^\s]+\.(png|jpg|jpeg|gif|webp)(\?[^\s]*)?)", message.content, re.IGNORECASE
- )
+ match: re.Match[str] | None = re.search(r"(https?://[^\s]+\.(png|jpg|jpeg|gif|webp)(\?[^\s]*)?)", message.content, re.IGNORECASE)
if match:
image_url = match.group(0)
diff --git a/misc.py b/misc.py
index 60c5a91..9633ea8 100644
--- a/misc.py
+++ b/misc.py
@@ -5,7 +5,15 @@ import logging
from collections import deque
from typing import TYPE_CHECKING
+import psutil
+from discord import Member, User, channel
+
if TYPE_CHECKING:
+ from collections.abc import Sequence
+
+ from discord.abc import MessageableChannel
+ from discord.guild import GuildChannel
+ from discord.interactions import InteractionChannel
from openai import OpenAI
from openai.types.chat.chat_completion import ChatCompletion
@@ -49,7 +57,7 @@ def add_message_to_memory(channel_id: str, user: str, message: str) -> None:
logger.info("Added message to memory: %s from %s in channel %s", message, user, channel_id)
-def get_recent_messages(channel_id: str, threshold_minutes: int = 10) -> list[tuple[str, str]]:
+def get_recent_messages(channel_id: int, threshold_minutes: int = 10) -> list[tuple[str, str]]:
"""Retrieve messages from the last `threshold_minutes` minutes for a specific channel.
Args:
@@ -59,33 +67,131 @@ def get_recent_messages(channel_id: str, threshold_minutes: int = 10) -> list[tu
Returns:
A list of tuples containing user and message content.
"""
- if channel_id not in recent_messages:
+ if str(channel_id) not in recent_messages:
return []
- threshold: datetime.datetime = datetime.datetime.now(tz=datetime.UTC) - datetime.timedelta(
- minutes=threshold_minutes
- )
- return [(user, message) for user, message, timestamp in recent_messages[channel_id] if timestamp > threshold]
+ threshold: datetime.datetime = datetime.datetime.now(tz=datetime.UTC) - datetime.timedelta(minutes=threshold_minutes)
+ return [(user, message) for user, message, timestamp in recent_messages[str(channel_id)] if timestamp > threshold]
-def chat(user_message: str, openai_client: OpenAI, channel_id: str) -> str | None:
+def extra_context(current_channel: MessageableChannel | InteractionChannel | None, user: User | Member) -> str:
+ """Add extra context to the chat prompt.
+
+ For example:
+ - Current date and time
+ - Channel name and server
+ - User's current status (online/offline)
+ - User's role in the server (e.g., admin, member)
+ - CPU usage
+ - Memory usage
+ - Disk usage
+ - How many messages saved in memory
+
+ Args:
+ current_channel: The channel where the conversation is happening.
+ user: The user who is interacting with the bot.
+
+ Returns:
+ The extra context to include in the chat prompt.
+ """
+ context: str = ""
+
+ # Information about the servers and channels:
+ context += "KillYoy's Server Information:\n"
+ context += "- Server is for friends to hang out and chat.\n"
+ context += "- Server was created by KillYoy (<@98468214824001536>)\n"
+
+ # Current date and time
+ context += f"Current date and time: {datetime.datetime.now(tz=datetime.UTC)} UTC, but user is in CEST or CET\n"
+
+ # Channel name and server
+ if isinstance(current_channel, channel.TextChannel):
+ context += f"Channel name: {current_channel.name}, channel ID: {current_channel.id}, Server: {current_channel.guild.name}\n"
+
+ # User information
+ context += f"User name: {user.name}, User ID: {user.id}\n"
+ if isinstance(user, Member):
+ context += f"User roles: {', '.join([role.name for role in user.roles])}\n"
+ context += f"User status: {user.status}\n"
+ context += f"User is currently {'on mobile' if user.is_on_mobile() else 'on desktop'}\n"
+ context += f"User joined server at: {user.joined_at}\n"
+ context += f"User's current activity: {user.activity}\n"
+ context += f"User's username color: {user.color}\n"
+
+ # System information
+ context += f"CPU usage per core: {psutil.cpu_percent(percpu=True)}%\n"
+ context += f"Memory usage: {psutil.virtual_memory().percent}%\n"
+ context += f"Total memory: {psutil.virtual_memory().total / (1024 * 1024):.2f} MB\n"
+ context += f"Swap memory usage: {psutil.swap_memory().percent}%\n"
+ context += f"Swap memory total: {psutil.swap_memory().total / (1024 * 1024):.2f} MB\n"
+ context += f"Bot memory usage: {psutil.Process().memory_info().rss / (1024 * 1024):.2f} MB\n"
+ uptime: datetime.timedelta = datetime.datetime.now(tz=datetime.UTC) - datetime.datetime.fromtimestamp(psutil.boot_time(), tz=datetime.UTC)
+ context += f"System uptime: {uptime}\n"
+ context += "Disk usage:\n"
+ for partition in psutil.disk_partitions():
+ try:
+ context += f" {partition.mountpoint}: {psutil.disk_usage(partition.mountpoint).percent}%\n"
+ except PermissionError as e:
+ context += f" {partition.mountpoint} got PermissionError: {e}\n"
+
+ if current_channel:
+ context += f"Messages saved in memory: {len(get_recent_messages(channel_id=current_channel.id))}\n"
+
+ return context
+
+
+def chat( # noqa: PLR0913, PLR0917
+ user_message: str,
+ openai_client: OpenAI,
+ current_channel: MessageableChannel | InteractionChannel | None,
+ user: User | Member,
+ allowed_users: list[str],
+ all_channels_in_guild: Sequence[GuildChannel] | None = None,
+) -> str | None:
"""Chat with the bot using the OpenAI API.
Args:
user_message: The message to send to OpenAI.
openai_client: The OpenAI client to use.
- channel_id: The ID of the channel where the conversation is happening.
+ current_channel: The channel where the conversation is happening.
+ user: The user who is interacting with the bot.
+ allowed_users: The list of allowed users to interact with the bot.
+ all_channels_in_guild: The list of all channels in the guild.
Returns:
The response from the AI model.
"""
- # Include recent messages in the prompt
- recent_context: str = "\n".join([f"{user}: {message}" for user, message in get_recent_messages(channel_id)])
+ recent_context: str = ""
+ context: str = ""
+
+ if current_channel:
+ channel_id = int(current_channel.id)
+ recent_context: str = "\n".join([f"{user}: {message}" for user, message in get_recent_messages(channel_id=channel_id)])
+
+ context = extra_context(current_channel=current_channel, user=user)
+
+ context += "The bot is in the following channels:\n"
+ if all_channels_in_guild:
+ for c in all_channels_in_guild:
+ context += f"{c!r}\n"
+
+ context += "\nThe bot responds to the following users:\n"
+ for user_id in allowed_users:
+ context += f" - User ID: {user_id}\n"
+
prompt: str = (
- "You are in a Discord group chat. People can ask you questions. "
+ "You are in a Discord group chat. People can ask you questions.\n"
"Use Discord Markdown to format messages if needed.\n"
- f"Recent context:\n{recent_context}\n"
+ "Don't use emojis.\n"
+ "Extra context starts here:\n"
+ f"{context}"
+ "Extra context ends here.\n"
+ "Recent context starts here:\n"
+ f"{recent_context}\n"
+ "Recent context ends here.\n"
+ "User message starts here:\n"
f"User: {user_message}"
+ "User message ends here.\n"
)
completion: ChatCompletion = openai_client.chat.completions.create(
diff --git a/pyproject.toml b/pyproject.toml
index ee95242..ae17334 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -7,9 +7,11 @@ requires-python = ">=3.13"
dependencies = [
"audioop-lts",
"discord-py",
+ "httpx>=0.28.1",
"numpy",
"openai",
"opencv-contrib-python-headless",
+ "psutil>=7.0.0",
"python-dotenv",
"sentry-sdk",
]
@@ -26,7 +28,6 @@ lint.fixable = ["ALL"]
lint.pydocstyle.convention = "google"
lint.isort.required-imports = ["from __future__ import annotations"]
lint.pycodestyle.ignore-overlong-task-comments = true
-line-length = 120
lint.ignore = [
"CPY001", # Checks for the absence of copyright notices within Python files.
@@ -53,6 +54,7 @@ lint.ignore = [
"Q003", # Checks for strings that include escaped quotes, and suggests changing the quote style to avoid the need to escape them.
"W191", # Checks for indentation that uses tabs.
]
+line-length = 160
[tool.ruff.format]
From 7a6b4d8fce0bea78d1b819d2c755ac7a87614f60 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Sun, 17 Aug 2025 04:38:33 +0200
Subject: [PATCH 19/72] Refactor OpenAI response handling to use updated
response types
---
misc.py | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/misc.py b/misc.py
index 9633ea8..b85e2ee 100644
--- a/misc.py
+++ b/misc.py
@@ -15,7 +15,7 @@ if TYPE_CHECKING:
from discord.guild import GuildChannel
from discord.interactions import InteractionChannel
from openai import OpenAI
- from openai.types.chat.chat_completion import ChatCompletion
+ from openai.types.responses import Response
logger: logging.Logger = logging.getLogger(__name__)
@@ -194,11 +194,11 @@ def chat( # noqa: PLR0913, PLR0917
"User message ends here.\n"
)
- completion: ChatCompletion = openai_client.chat.completions.create(
+ resp: Response = openai_client.responses.create(
model="gpt-5-chat-latest",
- messages=[{"role": "system", "content": prompt}],
+ input=[{"role": "user", "content": prompt}],
)
- response: str | None = completion.choices[0].message.content
+ response: str | None = resp.output_text
logger.info("AI response: %s", response)
return response
From 96695c0bee4ab5fb4fb0b2ea87dfa0a018195bf3 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Sun, 17 Aug 2025 04:56:42 +0200
Subject: [PATCH 20/72] Add trigger time tracking and response logic for user
interactions
---
main.py | 15 ++++++++++++---
misc.py | 40 ++++++++++++++++++++++++++++++++++++++++
2 files changed, 52 insertions(+), 3 deletions(-)
diff --git a/main.py b/main.py
index cba205c..5bf6ffa 100644
--- a/main.py
+++ b/main.py
@@ -15,7 +15,7 @@ import sentry_sdk
from discord import app_commands
from openai import OpenAI
-from misc import add_message_to_memory, chat, get_allowed_users
+from misc import add_message_to_memory, chat, get_allowed_users, should_respond_without_trigger, update_trigger_time
from settings import Settings
sentry_sdk.init(
@@ -79,8 +79,17 @@ class LoviBotClient(discord.Client):
lowercase_message: str = incoming_message.lower() if incoming_message else ""
trigger_keywords: list[str] = ["lovibot", "@lovibot", "<@345000831499894795>", "grok", "@grok"]
- if any(trigger in lowercase_message for trigger in trigger_keywords):
- logger.info("Received message: %s from: %s", incoming_message, message.author.name)
+ has_trigger_keyword: bool = any(trigger in lowercase_message for trigger in trigger_keywords)
+ should_respond: bool = has_trigger_keyword or should_respond_without_trigger(str(message.channel.id), message.author.name)
+
+ if should_respond:
+ # Update trigger time if they used a trigger keyword
+ if has_trigger_keyword:
+ update_trigger_time(str(message.channel.id), message.author.name)
+
+ logger.info(
+ "Received message: %s from: %s (trigger: %s, recent: %s)", incoming_message, message.author.name, has_trigger_keyword, not has_trigger_keyword
+ )
async with message.channel.typing():
try:
diff --git a/misc.py b/misc.py
index b85e2ee..9ab2395 100644
--- a/misc.py
+++ b/misc.py
@@ -23,6 +23,9 @@ logger: logging.Logger = logging.getLogger(__name__)
# A dictionary to store recent messages per channel with a maximum length per channel
recent_messages: dict[str, deque[tuple[str, str, datetime.datetime]]] = {}
+# A dictionary to track the last time each user triggered the bot in each channel
+last_trigger_time: dict[str, dict[str, datetime.datetime]] = {}
+
def get_allowed_users() -> list[str]:
"""Get the list of allowed users to interact with the bot.
@@ -74,6 +77,43 @@ def get_recent_messages(channel_id: int, threshold_minutes: int = 10) -> list[tu
return [(user, message) for user, message, timestamp in recent_messages[str(channel_id)] if timestamp > threshold]
+def update_trigger_time(channel_id: str, user: str) -> None:
+ """Update the last trigger time for a user in a specific channel.
+
+ Args:
+ channel_id: The ID of the channel.
+ user: The user who triggered the bot.
+ """
+ if channel_id not in last_trigger_time:
+ last_trigger_time[channel_id] = {}
+
+ last_trigger_time[channel_id][user] = datetime.datetime.now(tz=datetime.UTC)
+ logger.info("Updated trigger time for user %s in channel %s", user, channel_id)
+
+
+def should_respond_without_trigger(channel_id: str, user: str, threshold_seconds: int = 40) -> bool:
+ """Check if the bot should respond to a user without requiring trigger keywords.
+
+ Args:
+ channel_id: The ID of the channel.
+ user: The user who sent the message.
+ threshold_seconds: The number of seconds to consider as "recent trigger".
+
+ Returns:
+ True if the bot should respond without trigger keywords, False otherwise.
+ """
+ if channel_id not in last_trigger_time:
+ return False
+
+ last_trigger: datetime.datetime = last_trigger_time[channel_id][user]
+ threshold: datetime.datetime = datetime.datetime.now(tz=datetime.UTC) - datetime.timedelta(seconds=threshold_seconds)
+
+ should_respond: bool = last_trigger > threshold
+ logger.info("User %s in channel %s last triggered at %s, should respond without trigger: %s", user, channel_id, last_trigger, should_respond)
+
+ return should_respond
+
+
def extra_context(current_channel: MessageableChannel | InteractionChannel | None, user: User | Member) -> str:
"""Add extra context to the chat prompt.
From 489d8980c810196ed63d35db77b08b2f4d5b8687 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Sun, 17 Aug 2025 04:58:23 +0200
Subject: [PATCH 21/72] Refactor Dockerfile to remove platform specification
and clean up main.py by consolidating user permission checks
---
Dockerfile | 2 +-
main.py | 8 +++-----
2 files changed, 4 insertions(+), 6 deletions(-)
diff --git a/Dockerfile b/Dockerfile
index 5e89a04..d4dde17 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,6 +1,6 @@
# syntax=docker/dockerfile:1
# check=error=true;experimental=all
-FROM --platform=$BUILDPLATFORM ghcr.io/astral-sh/uv:python3.13-bookworm-slim@sha256:73c021c3fe7264924877039e8a449ad3bb380ec89214282301affa9b2f863c5d
+FROM ghcr.io/astral-sh/uv:python3.13-bookworm-slim@sha256:73c021c3fe7264924877039e8a449ad3bb380ec89214282301affa9b2f863c5d
# Change the working directory to the `app` directory
WORKDIR /app
diff --git a/main.py b/main.py
index 5bf6ffa..473701f 100644
--- a/main.py
+++ b/main.py
@@ -66,7 +66,6 @@ class LoviBotClient(discord.Client):
# Only allow certain users to interact with the bot
allowed_users: list[str] = get_allowed_users()
if message.author.name not in allowed_users:
- logger.info("Ignoring message from: %s", message.author.name)
return
incoming_message: str | None = message.content
@@ -175,15 +174,14 @@ async def ask(interaction: discord.Interaction, text: str) -> None:
await interaction.followup.send("You need to provide a question or message.", ephemeral=True)
return
- # Only allow certain users to interact with the bot
- allowed_users: list[str] = get_allowed_users()
-
user_name_lowercase: str = interaction.user.name.lower()
logger.info("Received command from: %s", user_name_lowercase)
+ # Only allow certain users to interact with the bot
+ allowed_users: list[str] = get_allowed_users()
if user_name_lowercase not in allowed_users:
logger.info("Ignoring message from: %s", user_name_lowercase)
- await interaction.followup.send("You are not allowed to use this command.", ephemeral=True)
+ await interaction.followup.send("You are not allowed to use this command.")
return
try:
From c3ae6c707381a9a7fa4add185328a12eb914c2b3 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Sun, 17 Aug 2025 05:03:12 +0200
Subject: [PATCH 22/72] Fix trigger time check to ensure user presence in
last_trigger_time dictionary
---
misc.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/misc.py b/misc.py
index 9ab2395..f24aec4 100644
--- a/misc.py
+++ b/misc.py
@@ -102,7 +102,7 @@ def should_respond_without_trigger(channel_id: str, user: str, threshold_seconds
Returns:
True if the bot should respond without trigger keywords, False otherwise.
"""
- if channel_id not in last_trigger_time:
+ if channel_id not in last_trigger_time or user not in last_trigger_time[channel_id]:
return False
last_trigger: datetime.datetime = last_trigger_time[channel_id][user]
From 3171595df009cc24e5897a26d8cc95c01148eda2 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Sun, 17 Aug 2025 05:08:36 +0200
Subject: [PATCH 23/72] Refactor chat function to improve prompt structure and
update OpenAI response handling
---
misc.py | 6 ++----
1 file changed, 2 insertions(+), 4 deletions(-)
diff --git a/misc.py b/misc.py
index f24aec4..bc7b472 100644
--- a/misc.py
+++ b/misc.py
@@ -229,14 +229,12 @@ def chat( # noqa: PLR0913, PLR0917
"Recent context starts here:\n"
f"{recent_context}\n"
"Recent context ends here.\n"
- "User message starts here:\n"
- f"User: {user_message}"
- "User message ends here.\n"
)
resp: Response = openai_client.responses.create(
model="gpt-5-chat-latest",
- input=[{"role": "user", "content": prompt}],
+ instructions=prompt,
+ input=user_message,
)
response: str | None = resp.output_text
logger.info("AI response: %s", response)
From 53fa0a02d19b26f78b5abf5cb9ab593acd1ad5e1 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Sun, 17 Aug 2025 05:58:38 +0200
Subject: [PATCH 24/72] Enhance chat function to include server emoji usage
instructions and improve message clarity
---
.vscode/settings.json | 3 ++-
misc.py | 32 +++++++++++++++++++++++++++++++-
2 files changed, 33 insertions(+), 2 deletions(-)
diff --git a/.vscode/settings.json b/.vscode/settings.json
index 9adce29..94e266b 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -42,6 +42,7 @@
"thelovinator",
"tobytes",
"twimg",
- "unsignedinteger"
+ "unsignedinteger",
+ "Zenless"
]
}
\ No newline at end of file
diff --git a/misc.py b/misc.py
index bc7b472..bb53e5d 100644
--- a/misc.py
+++ b/misc.py
@@ -6,7 +6,7 @@ from collections import deque
from typing import TYPE_CHECKING
import psutil
-from discord import Member, User, channel
+from discord import Emoji, Member, User, channel
if TYPE_CHECKING:
from collections.abc import Sequence
@@ -210,6 +210,33 @@ def chat( # noqa: PLR0913, PLR0917
context = extra_context(current_channel=current_channel, user=user)
+ if current_channel.guild:
+ server_emojis: list[Emoji] = list(current_channel.guild.emojis)
+ if server_emojis:
+ context += "\nEmojis with `kao` are pictures of kao172, he is our friend so you can use them to express yourself!\n"
+ context += "\nYou can use the following server emojis:\n"
+ for emoji in server_emojis:
+ context += f" - {emoji!s}\n"
+
+ context += "\n You can create bigger emojis by combining them:\n"
+ context += "For example if you want to create a big rat emoji, you can combine the following emojis. The picture is three by three:\n"
+ context += " - <:rat1:1405292421742334116>: + <:rat2:1405292423373918258> + <:rat3:1405292425446031400>\n"
+ context += " - <:rat4:1405292427777933354>: + <:rat5:1405292430210891949>: + <:rat6:1405292433411145860>:\n"
+ context += " - <:rat7:1405292434883084409>: + <:rat8:1405292442181304320>: + <:rat9:1405292443619819631>:\n"
+ context += "This will create a picture of Jane Does ass."
+ context += " You can use it when we talk about coom, Zenless Zone Zero (ZZZ) or other related topics."
+ context += "\n"
+
+ context += "The following emojis needs to be on the same line to form a bigger emoji:\n"
+ context += "\n"
+
+ context += "If you are using emoji combos, ONLY send the emoji itself and don't add unnecessary text.\n"
+ context += "Remember that combo emojis need to be on a separate line to form a bigger emoji.\n"
+ context += "But remember to not overuse them, remember that the user still can see the old message, so no need to write it again.\n"
+ context += "Also remember that you cant put code blocks around emojis.\n"
+ context += "Licka and Sniffa emojis are dogs that lick and sniff things. For example anime feet, butts and sweat.\n"
+ context += "If you want to use them, just send the emoji itself without any extra text.\n"
+
context += "The bot is in the following channels:\n"
if all_channels_in_guild:
for c in all_channels_in_guild:
@@ -221,6 +248,7 @@ def chat( # noqa: PLR0913, PLR0917
prompt: str = (
"You are in a Discord group chat. People can ask you questions.\n"
+ "Try to be brief, we don't want bloated messages. Be concise and to the point.\n"
"Use Discord Markdown to format messages if needed.\n"
"Don't use emojis.\n"
"Extra context starts here:\n"
@@ -231,6 +259,8 @@ def chat( # noqa: PLR0913, PLR0917
"Recent context ends here.\n"
)
+ logger.info("Sending request to OpenAI API with prompt: %s", prompt)
+
resp: Response = openai_client.responses.create(
model="gpt-5-chat-latest",
instructions=prompt,
From 8a705fb0d80c7e5c904054e9a74d688b8f063e29 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Sun, 17 Aug 2025 06:06:36 +0200
Subject: [PATCH 25/72] Add sticker usage instructions and list available
stickers in chat context
---
misc.py | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/misc.py b/misc.py
index bb53e5d..f50b3f0 100644
--- a/misc.py
+++ b/misc.py
@@ -237,6 +237,13 @@ def chat( # noqa: PLR0913, PLR0917
context += "Licka and Sniffa emojis are dogs that lick and sniff things. For example anime feet, butts and sweat.\n"
context += "If you want to use them, just send the emoji itself without any extra text.\n"
+ # Stickers
+ context += "You can use the following URL to send stickers: https://media.discordapp.net/stickers/{sticker_id}.webp?size=4096\n"
+ context += "Remember to only send the URL if you want to use the sticker in your message.\n"
+ context += "You can use the following stickers:\n"
+ for sticker in current_channel.guild.stickers:
+ context += f" - {sticker!r}\n"
+
context += "The bot is in the following channels:\n"
if all_channels_in_guild:
for c in all_channels_in_guild:
From cca242d2a40313b31d5a79842ad8d8b3e8884ba1 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Sun, 17 Aug 2025 16:59:56 +0200
Subject: [PATCH 26/72] Refactor emoji usage instructions
---
misc.py | 38 +++++++++++++++++++-------------------
1 file changed, 19 insertions(+), 19 deletions(-)
diff --git a/misc.py b/misc.py
index f50b3f0..1efc8f1 100644
--- a/misc.py
+++ b/misc.py
@@ -218,25 +218,6 @@ def chat( # noqa: PLR0913, PLR0917
for emoji in server_emojis:
context += f" - {emoji!s}\n"
- context += "\n You can create bigger emojis by combining them:\n"
- context += "For example if you want to create a big rat emoji, you can combine the following emojis. The picture is three by three:\n"
- context += " - <:rat1:1405292421742334116>: + <:rat2:1405292423373918258> + <:rat3:1405292425446031400>\n"
- context += " - <:rat4:1405292427777933354>: + <:rat5:1405292430210891949>: + <:rat6:1405292433411145860>:\n"
- context += " - <:rat7:1405292434883084409>: + <:rat8:1405292442181304320>: + <:rat9:1405292443619819631>:\n"
- context += "This will create a picture of Jane Does ass."
- context += " You can use it when we talk about coom, Zenless Zone Zero (ZZZ) or other related topics."
- context += "\n"
-
- context += "The following emojis needs to be on the same line to form a bigger emoji:\n"
- context += "\n"
-
- context += "If you are using emoji combos, ONLY send the emoji itself and don't add unnecessary text.\n"
- context += "Remember that combo emojis need to be on a separate line to form a bigger emoji.\n"
- context += "But remember to not overuse them, remember that the user still can see the old message, so no need to write it again.\n"
- context += "Also remember that you cant put code blocks around emojis.\n"
- context += "Licka and Sniffa emojis are dogs that lick and sniff things. For example anime feet, butts and sweat.\n"
- context += "If you want to use them, just send the emoji itself without any extra text.\n"
-
# Stickers
context += "You can use the following URL to send stickers: https://media.discordapp.net/stickers/{sticker_id}.webp?size=4096\n"
context += "Remember to only send the URL if you want to use the sticker in your message.\n"
@@ -253,6 +234,25 @@ def chat( # noqa: PLR0913, PLR0917
for user_id in allowed_users:
context += f" - User ID: {user_id}\n"
+ context += "\n You can create bigger emojis by combining them:\n"
+ context += "For example if you want to create a big rat emoji, you can combine the following emojis. The picture is three by three:\n"
+ context += " - <:rat1:1405292421742334116>: + <:rat2:1405292423373918258> + <:rat3:1405292425446031400>\n"
+ context += " - <:rat4:1405292427777933354>: + <:rat5:1405292430210891949>: + <:rat6:1405292433411145860>:\n"
+ context += " - <:rat7:1405292434883084409>: + <:rat8:1405292442181304320>: + <:rat9:1405292443619819631>:\n"
+ context += "This will create a picture of Jane Does ass."
+ context += " You can use it when we talk about coom, Zenless Zone Zero (ZZZ) or other related topics."
+ context += "\n"
+
+ context += "The following emojis needs to be on the same line to form a bigger emoji:\n"
+ context += "\n"
+
+ context += "If you are using emoji combos, ONLY send the emoji itself and don't add unnecessary text.\n"
+ context += "Remember that combo emojis need to be on a separate line to form a bigger emoji.\n"
+ context += "But remember to not overuse them, remember that the user still can see the old message, so no need to write it again.\n"
+ context += "Also remember that you cant put code blocks around emojis.\n"
+ context += "Licka and Sniffa emojis are dogs that lick and sniff things. For example anime feet, butts and sweat.\n"
+ context += "If you want to use them, just send the emoji itself without any extra text.\n"
+
prompt: str = (
"You are in a Discord group chat. People can ask you questions.\n"
"Try to be brief, we don't want bloated messages. Be concise and to the point.\n"
From 2aec54d51b431d7adfbf0a52d61b86bfb902243e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Sat, 30 Aug 2025 02:33:22 +0200
Subject: [PATCH 27/72] Add fun day names to extra context in chat responses
---
misc.py | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/misc.py b/misc.py
index 1efc8f1..9d918a1 100644
--- a/misc.py
+++ b/misc.py
@@ -143,6 +143,14 @@ def extra_context(current_channel: MessageableChannel | InteractionChannel | Non
# Current date and time
context += f"Current date and time: {datetime.datetime.now(tz=datetime.UTC)} UTC, but user is in CEST or CET\n"
+ context += "Some fun day names that you can use:\n"
+ context += "- Monday: Milf Monday\n"
+ context += "- Tuesday: Tomboy Tuesday, Titties Tuesday\n"
+ context += "- Wednesday: Wife Wednesday, Waifu Wednesday\n"
+ context += "- Thursday: Tomboy Thursday, Titties Thursday\n"
+ context += "- Friday: Frieren Friday, Femboy Friday, Fern Friday, Flat Friday, Fredagsmys\n"
+ context += "- Saturday: Lördagsgodis\n"
+ context += "- Sunday: Going to church\n"
# Channel name and server
if isinstance(current_channel, channel.TextChannel):
From 08c286cff54cd1d60e08d300cc7167f90d738c6a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Sat, 6 Sep 2025 01:58:19 +0200
Subject: [PATCH 28/72] Refactor chat function to support asynchronous image
processing and enhance image extraction from user messages
---
main.py | 122 +++++++++++++++++++++++---------------------------------
misc.py | 114 ++++++++++++++++++++++++++++++++++++++++++++++------
2 files changed, 150 insertions(+), 86 deletions(-)
diff --git a/main.py b/main.py
index 473701f..136dce0 100644
--- a/main.py
+++ b/main.py
@@ -1,23 +1,25 @@
from __future__ import annotations
+import asyncio
import datetime
import io
import logging
-import re
-from typing import Any
+from typing import TYPE_CHECKING, Any, TypeVar
import cv2
import discord
-import httpx
import numpy as np
import openai
import sentry_sdk
-from discord import app_commands
-from openai import OpenAI
+from discord import Forbidden, HTTPException, NotFound, app_commands
+from openai import AsyncOpenAI
-from misc import add_message_to_memory, chat, get_allowed_users, should_respond_without_trigger, update_trigger_time
+from misc import add_message_to_memory, chat, get_allowed_users, get_raw_images_from_text, should_respond_without_trigger, update_trigger_time
from settings import Settings
+if TYPE_CHECKING:
+ from collections.abc import Callable
+
sentry_sdk.init(
dsn="https://ebbd2cdfbd08dba008d628dad7941091@o4505228040339456.ingest.us.sentry.io/4507630719401984",
send_default_pii=True,
@@ -32,7 +34,7 @@ discord_token: str = settings.discord_token
openai_api_key: str = settings.openai_api_key
-openai_client = OpenAI(api_key=openai_api_key)
+openai_client = AsyncOpenAI(api_key=openai_api_key)
class LoviBotClient(discord.Client):
@@ -92,7 +94,7 @@ class LoviBotClient(discord.Client):
async with message.channel.typing():
try:
- response: str | None = chat(
+ response: str | None = await chat(
user_message=incoming_message,
openai_client=openai_client,
current_channel=message.channel,
@@ -185,7 +187,7 @@ async def ask(interaction: discord.Interaction, text: str) -> None:
return
try:
- response: str | None = chat(
+ response: str | None = await chat(
user_message=text,
openai_client=openai_client,
current_channel=interaction.channel,
@@ -321,6 +323,23 @@ def enhance_image3(image: bytes) -> bytes:
return enhanced_webp.tobytes()
+T = TypeVar("T")
+
+
+async def run_in_thread[T](func: Callable[..., T], *args: Any, **kwargs: Any) -> T: # noqa: ANN401
+ """Run a blocking function in a separate thread.
+
+ Args:
+ func (Callable[..., T]): The blocking function to run.
+ *args (tuple[Any, ...]): Positional arguments to pass to the function.
+ **kwargs (dict[str, Any]): Keyword arguments to pass to the function.
+
+ Returns:
+ T: The result of the function.
+ """
+ return await asyncio.to_thread(func, *args, **kwargs)
+
+
@client.tree.context_menu(name="Enhance Image")
@app_commands.allowed_installs(guilds=True, users=True)
@app_commands.allowed_contexts(guilds=True, dms=True, private_channels=True)
@@ -329,82 +348,39 @@ async def enhance_image_command(interaction: discord.Interaction, message: disco
await interaction.response.defer()
# Check if message has attachments or embeds with images
- image_url: str | None = extract_image_url(message)
- if not image_url:
- await interaction.followup.send("No image found in the message.", ephemeral=True)
+ images: list[bytes] = await get_raw_images_from_text(message.content)
+
+ # Also check attachments
+ for attachment in message.attachments:
+ if attachment.content_type and attachment.content_type.startswith("image/"):
+ try:
+ img_bytes: bytes = await attachment.read()
+ images.append(img_bytes)
+ except (TimeoutError, HTTPException, Forbidden, NotFound):
+ logger.exception("Failed to read attachment %s", attachment.url)
+
+ if not images:
+ await interaction.followup.send(f"No images found in the message: \n{message.content=}")
return
- try:
- # Download the image
- async with httpx.AsyncClient() as client:
- response: httpx.Response = await client.get(image_url)
- response.raise_for_status()
- image_bytes: bytes = response.content
-
+ for image in images:
timestamp: str = datetime.datetime.now(tz=datetime.UTC).isoformat()
- enhanced_image1: bytes = enhance_image1(image_bytes)
+ enhanced_image1, enhanced_image2, enhanced_image3 = await asyncio.gather(
+ run_in_thread(enhance_image1, image),
+ run_in_thread(enhance_image2, image),
+ run_in_thread(enhance_image3, image),
+ )
+
+ # Prepare files
file1 = discord.File(fp=io.BytesIO(enhanced_image1), filename=f"enhanced1-{timestamp}.webp")
-
- enhanced_image2: bytes = enhance_image2(image_bytes)
file2 = discord.File(fp=io.BytesIO(enhanced_image2), filename=f"enhanced2-{timestamp}.webp")
-
- enhanced_image3: bytes = enhance_image3(image_bytes)
file3 = discord.File(fp=io.BytesIO(enhanced_image3), filename=f"enhanced3-{timestamp}.webp")
files: list[discord.File] = [file1, file2, file3]
- logger.info("Enhanced image: %s", image_url)
- logger.info("Enhanced image files: %s", files)
await interaction.followup.send("Enhanced version:", files=files)
- except (httpx.HTTPError, openai.OpenAIError) as e:
- logger.exception("Failed to enhance image")
- await interaction.followup.send(f"An error occurred: {e}")
-
-
-def extract_image_url(message: discord.Message) -> str | None:
- """Extracts the first image URL from a given Discord message.
-
- This function checks the attachments of the provided message for any image
- attachments. If none are found, it then examines the message embeds to see if
- they include an image. Finally, if no images are found in attachments or embeds,
- the function searches the message content for any direct links ending in
- common image file extensions (e.g., .png, .jpg, .jpeg, .gif, .webp).
-
- Additionally, it handles Twitter image URLs and normalizes them to a standard format.
-
- Args:
- message (discord.Message): The message from which to extract the image URL.
-
- Returns:
- str | None: The URL of the first image found, or None if no image is found.
- """
- image_url: str | None = None
- if message.attachments:
- for attachment in message.attachments:
- if attachment.content_type and attachment.content_type.startswith("image/"):
- image_url = attachment.url
- break
-
- elif message.embeds:
- for embed in message.embeds:
- if embed.image:
- image_url = embed.image.url
- break
-
- if not image_url:
- match: re.Match[str] | None = re.search(r"(https?://[^\s]+\.(png|jpg|jpeg|gif|webp)(\?[^\s]*)?)", message.content, re.IGNORECASE)
- if match:
- image_url = match.group(0)
-
- # Handle Twitter image URLs
- if image_url and "pbs.twimg.com/media/" in image_url:
- # Normalize Twitter image URLs to the highest quality format
- image_url = re.sub(r"\?format=[^&]+&name=[^&]+", "?format=jpg&name=orig", image_url)
-
- return image_url
-
if __name__ == "__main__":
logger.info("Starting the bot.")
diff --git a/misc.py b/misc.py
index 9d918a1..a25a82b 100644
--- a/misc.py
+++ b/misc.py
@@ -2,11 +2,22 @@ from __future__ import annotations
import datetime
import logging
+import re
from collections import deque
-from typing import TYPE_CHECKING
+from typing import TYPE_CHECKING, Any
+import httpx
import psutil
from discord import Emoji, Member, User, channel
+from openai.types.chat import (
+ ChatCompletion,
+ ChatCompletionContentPartImageParam,
+ ChatCompletionContentPartParam,
+ ChatCompletionContentPartTextParam,
+ ChatCompletionMessageParam,
+ ChatCompletionSystemMessageParam,
+ ChatCompletionUserMessageParam,
+)
if TYPE_CHECKING:
from collections.abc import Sequence
@@ -14,8 +25,8 @@ if TYPE_CHECKING:
from discord.abc import MessageableChannel
from discord.guild import GuildChannel
from discord.interactions import InteractionChannel
- from openai import OpenAI
- from openai.types.responses import Response
+ from openai import AsyncOpenAI
+ from openai.types.chat import ChatCompletionMessageParam
logger: logging.Logger = logging.getLogger(__name__)
@@ -140,6 +151,14 @@ def extra_context(current_channel: MessageableChannel | InteractionChannel | Non
context += "KillYoy's Server Information:\n"
context += "- Server is for friends to hang out and chat.\n"
context += "- Server was created by KillYoy (<@98468214824001536>)\n"
+ context += "Users in the server are from Sweden. They speak Swedish and English.\n"
+ context += "Everyone in the server is over 18 years old so you can talk about adult topics.\n"
+ context += "You should adapt your language to be suitable for an adult audience. For example, use swear words."
+ context += "Users in this server are members of the fictional space communist political party called the 'Swedish Space Communist Party'.\n"
+ context += "Try to align your responses with the values of this party, which include fast computer programs without bloat, HTML instead of JavaScript, and a focus on open source software and cooperation.\n" # noqa: E501
+ context += "We don't like bloated software.\n"
+
+ context += "The bot is called 'LoviBot'.\n"
# Current date and time
context += f"Current date and time: {datetime.datetime.now(tz=datetime.UTC)} UTC, but user is in CEST or CET\n"
@@ -188,9 +207,9 @@ def extra_context(current_channel: MessageableChannel | InteractionChannel | Non
return context
-def chat( # noqa: PLR0913, PLR0917
+async def chat( # noqa: PLR0913, PLR0917
user_message: str,
- openai_client: OpenAI,
+ openai_client: AsyncOpenAI,
current_channel: MessageableChannel | InteractionChannel | None,
user: User | Member,
allowed_users: list[str],
@@ -276,12 +295,81 @@ def chat( # noqa: PLR0913, PLR0917
logger.info("Sending request to OpenAI API with prompt: %s", prompt)
- resp: Response = openai_client.responses.create(
- model="gpt-5-chat-latest",
- instructions=prompt,
- input=user_message,
- )
- response: str | None = resp.output_text
- logger.info("AI response: %s", response)
+ # Always include text first
+ user_content: list[ChatCompletionContentPartParam] = [
+ ChatCompletionContentPartTextParam(type="text", text=user_message),
+ ]
- return response
+ # Add images if found
+ image_urls = await get_images_from_text(user_message)
+ user_content.extend(
+ ChatCompletionContentPartImageParam(
+ type="image_url",
+ image_url={"url": _img},
+ )
+ for _img in image_urls
+ )
+
+ messages: list[ChatCompletionMessageParam] = [
+ ChatCompletionSystemMessageParam(role="system", content=prompt),
+ ChatCompletionUserMessageParam(role="user", content=user_content),
+ ]
+
+ resp: ChatCompletion = await openai_client.chat.completions.create(
+ model="gpt-5-chat-latest",
+ messages=messages,
+ )
+
+ return resp.choices[0].message.content if isinstance(resp.choices[0].message.content, str) else None
+
+
+async def get_images_from_text(text: str) -> list[str]:
+ """Extract all image URLs from text and return their URLs.
+
+ Args:
+ text: The text to search for URLs.
+
+ Returns:
+ A list of urls for each image found.
+ """
+ # Find all URLs in the text
+ url_pattern = r"https?://[^\s]+"
+ urls: list[Any] = re.findall(url_pattern, text)
+
+ images: list[str] = []
+ async with httpx.AsyncClient(timeout=5.0) as client:
+ for url in urls:
+ try:
+ response: httpx.Response = await client.get(url)
+ if not response.is_error and response.headers.get("content-type", "").startswith("image/"):
+ images.append(url)
+ except httpx.RequestError as e:
+ logger.warning("GET request failed for URL %s: %s", url, e)
+
+ return images
+
+
+async def get_raw_images_from_text(text: str) -> list[bytes]:
+ """Extract all image URLs from text and return their bytes.
+
+ Args:
+ text: The text to search for URLs.
+
+ Returns:
+ A list of bytes for each image found.
+ """
+ # Find all URLs in the text
+ url_pattern = r"https?://[^\s]+"
+ urls: list[Any] = re.findall(url_pattern, text)
+
+ images: list[bytes] = []
+ async with httpx.AsyncClient(timeout=5.0) as client:
+ for url in urls:
+ try:
+ response: httpx.Response = await client.get(url)
+ if not response.is_error and response.headers.get("content-type", "").startswith("image/"):
+ images.append(response.content)
+ except httpx.RequestError as e:
+ logger.warning("GET request failed for URL %s: %s", url, e)
+
+ return images
From 2c2f4df5026ba43d5d15ab5f02eea5213912f111 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Sat, 6 Sep 2025 02:05:56 +0200
Subject: [PATCH 29/72] Refactor extra_context to personalize server values and
remove party affiliation
---
misc.py | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/misc.py b/misc.py
index a25a82b..b4da278 100644
--- a/misc.py
+++ b/misc.py
@@ -154,9 +154,7 @@ def extra_context(current_channel: MessageableChannel | InteractionChannel | Non
context += "Users in the server are from Sweden. They speak Swedish and English.\n"
context += "Everyone in the server is over 18 years old so you can talk about adult topics.\n"
context += "You should adapt your language to be suitable for an adult audience. For example, use swear words."
- context += "Users in this server are members of the fictional space communist political party called the 'Swedish Space Communist Party'.\n"
- context += "Try to align your responses with the values of this party, which include fast computer programs without bloat, HTML instead of JavaScript, and a focus on open source software and cooperation.\n" # noqa: E501
- context += "We don't like bloated software.\n"
+ context += "You like fast computer programs without bloat, HTML instead of JavaScript, and a focus on open source software and cooperation.\n"
context += "The bot is called 'LoviBot'.\n"
From 83f12dcbd79024d07ed87250e8a58f6b49953e23 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Sat, 6 Sep 2025 02:09:27 +0200
Subject: [PATCH 30/72] Revert "Refactor extra_context to personalize server
values and remove party affiliation"
This reverts commit 2c2f4df5026ba43d5d15ab5f02eea5213912f111.
---
misc.py | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/misc.py b/misc.py
index b4da278..a25a82b 100644
--- a/misc.py
+++ b/misc.py
@@ -154,7 +154,9 @@ def extra_context(current_channel: MessageableChannel | InteractionChannel | Non
context += "Users in the server are from Sweden. They speak Swedish and English.\n"
context += "Everyone in the server is over 18 years old so you can talk about adult topics.\n"
context += "You should adapt your language to be suitable for an adult audience. For example, use swear words."
- context += "You like fast computer programs without bloat, HTML instead of JavaScript, and a focus on open source software and cooperation.\n"
+ context += "Users in this server are members of the fictional space communist political party called the 'Swedish Space Communist Party'.\n"
+ context += "Try to align your responses with the values of this party, which include fast computer programs without bloat, HTML instead of JavaScript, and a focus on open source software and cooperation.\n" # noqa: E501
+ context += "We don't like bloated software.\n"
context += "The bot is called 'LoviBot'.\n"
From e20f940eaf6672a7db3008f81a225e20bf650abc Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Sat, 20 Sep 2025 22:01:36 +0200
Subject: [PATCH 31/72] Stuff and things
---
.github/workflows/docker-publish.yml | 11 -
.vscode/settings.json | 9 +
main.py | 57 +--
misc.py | 574 ++++++++++++++-------------
pyproject.toml | 8 +-
settings.py | 29 --
6 files changed, 326 insertions(+), 362 deletions(-)
delete mode 100644 settings.py
diff --git a/.github/workflows/docker-publish.yml b/.github/workflows/docker-publish.yml
index 56ccf04..eeab9be 100644
--- a/.github/workflows/docker-publish.yml
+++ b/.github/workflows/docker-publish.yml
@@ -23,16 +23,6 @@ jobs:
# Download the latest commit from the master branch
- uses: actions/checkout@v4
- # Set up QEMU
- - id: qemu
- uses: docker/setup-qemu-action@v3
- with:
- image: tonistiigi/binfmt:master
- platforms: linux/amd64,linux/arm64
-
- # Set up Buildx so we can build multi-arch images
- - uses: docker/setup-buildx-action@v3
-
# Install the latest version of ruff
- uses: astral-sh/ruff-action@v3
with:
@@ -60,7 +50,6 @@ jobs:
- uses: docker/build-push-action@v6
with:
context: .
- platforms: linux/amd64,linux/arm64
push: ${{ github.event_name != 'pull_request' }}
labels: ${{ steps.meta.outputs.labels }}
tags: ${{ steps.meta.outputs.tags }}
diff --git a/.vscode/settings.json b/.vscode/settings.json
index 94e266b..62064c8 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -10,8 +10,11 @@
"denoising",
"docstrings",
"dotenv",
+ "Femboy",
"forgefilip",
"forgor",
+ "Fredagsmys",
+ "Frieren",
"frombuffer",
"hikari",
"imdecode",
@@ -21,6 +24,8 @@
"isort",
"killyoy",
"levelname",
+ "Licka",
+ "Lördagsgodis",
"lovibot",
"Lovinator",
"Messageable",
@@ -31,6 +36,7 @@
"numpy",
"opencv",
"percpu",
+ "phibiscarf",
"plubplub",
"pycodestyle",
"pydocstyle",
@@ -38,11 +44,14 @@
"PYTHONDONTWRITEBYTECODE",
"PYTHONUNBUFFERED",
"Slowmode",
+ "Sniffa",
+ "sweary",
"testpaths",
"thelovinator",
"tobytes",
"twimg",
"unsignedinteger",
+ "Waifu",
"Zenless"
]
}
\ No newline at end of file
diff --git a/main.py b/main.py
index 136dce0..90e579b 100644
--- a/main.py
+++ b/main.py
@@ -4,6 +4,7 @@ import asyncio
import datetime
import io
import logging
+import os
from typing import TYPE_CHECKING, Any, TypeVar
import cv2
@@ -12,10 +13,9 @@ import numpy as np
import openai
import sentry_sdk
from discord import Forbidden, HTTPException, NotFound, app_commands
-from openai import AsyncOpenAI
+from dotenv import load_dotenv
from misc import add_message_to_memory, chat, get_allowed_users, get_raw_images_from_text, should_respond_without_trigger, update_trigger_time
-from settings import Settings
if TYPE_CHECKING:
from collections.abc import Callable
@@ -29,12 +29,10 @@ sentry_sdk.init(
logger: logging.Logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
-settings: Settings = Settings.from_env()
-discord_token: str = settings.discord_token
-openai_api_key: str = settings.openai_api_key
+load_dotenv(verbose=True)
-openai_client = AsyncOpenAI(api_key=openai_api_key)
+discord_token: str = os.getenv("DISCORD_TOKEN", "")
class LoviBotClient(discord.Client):
@@ -96,7 +94,6 @@ class LoviBotClient(discord.Client):
try:
response: str | None = await chat(
user_message=incoming_message,
- openai_client=openai_client,
current_channel=message.channel,
user=message.author,
allowed_users=allowed_users,
@@ -116,45 +113,20 @@ class LoviBotClient(discord.Client):
logger.warning("No response from the AI model. Message: %s", incoming_message)
await message.channel.send("I forgor how to think 💀")
- async def on_error(self, event_method: str, *args: list[Any], **kwargs: dict[str, Any]) -> None:
+ async def on_error(self, event_method: str, /, *args: Any, **kwargs: Any) -> None: # noqa: ANN401, PLR6301
"""Log errors that occur in the bot."""
# Log the error
logger.error("An error occurred in %s with args: %s and kwargs: %s", event_method, args, kwargs)
+ sentry_sdk.capture_exception()
- # Add context to Sentry
- with sentry_sdk.push_scope() as scope:
- # Add event details
- scope.set_tag("event_method", event_method)
- scope.set_extra("args", args)
- scope.set_extra("kwargs", kwargs)
-
- # Add bot state
- scope.set_tag("bot_user_id", self.user.id if self.user else "Unknown")
- scope.set_tag("bot_user_name", str(self.user) if self.user else "Unknown")
- scope.set_tag("bot_latency", self.latency)
-
- # If specific arguments are available, extract and add details
- if args:
- interaction = next((arg for arg in args if isinstance(arg, discord.Interaction)), None)
- if interaction:
- scope.set_extra("interaction_id", interaction.id)
- scope.set_extra("interaction_user", interaction.user.id)
- scope.set_extra("interaction_user_tag", str(interaction.user))
- scope.set_extra("interaction_command", interaction.command.name if interaction.command else None)
- scope.set_extra("interaction_channel", str(interaction.channel))
- scope.set_extra("interaction_guild", str(interaction.guild) if interaction.guild else None)
-
- # Add Sentry tags for interaction details
- scope.set_tag("interaction_id", interaction.id)
- scope.set_tag("interaction_user_id", interaction.user.id)
- scope.set_tag("interaction_user_tag", str(interaction.user))
- scope.set_tag("interaction_command", interaction.command.name if interaction.command else "None")
- scope.set_tag("interaction_channel_id", interaction.channel.id if interaction.channel else "None")
- scope.set_tag("interaction_channel_name", str(interaction.channel))
- scope.set_tag("interaction_guild_id", interaction.guild.id if interaction.guild else "None")
- scope.set_tag("interaction_guild_name", str(interaction.guild) if interaction.guild else "None")
-
- sentry_sdk.capture_exception()
+ # If the error is in on_message, notify the channel
+ if event_method == "on_message" and args:
+ message = args[0]
+ if isinstance(message, discord.Message):
+ try:
+ await message.channel.send("An error occurred while processing your message. The incident has been logged.")
+ except (Forbidden, HTTPException, NotFound):
+ logger.exception("Failed to send error message to channel %s", message.channel.id)
# Everything enabled except `presences`, `members`, and `message_content`.
@@ -189,7 +161,6 @@ async def ask(interaction: discord.Interaction, text: str) -> None:
try:
response: str | None = await chat(
user_message=text,
- openai_client=openai_client,
current_channel=interaction.channel,
user=interaction.user,
allowed_users=allowed_users,
diff --git a/misc.py b/misc.py
index a25a82b..4c7f217 100644
--- a/misc.py
+++ b/misc.py
@@ -2,84 +2,272 @@ from __future__ import annotations
import datetime
import logging
+import os
import re
from collections import deque
+from dataclasses import dataclass
from typing import TYPE_CHECKING, Any
import httpx
import psutil
-from discord import Emoji, Member, User, channel
-from openai.types.chat import (
- ChatCompletion,
- ChatCompletionContentPartImageParam,
- ChatCompletionContentPartParam,
- ChatCompletionContentPartTextParam,
- ChatCompletionMessageParam,
- ChatCompletionSystemMessageParam,
- ChatCompletionUserMessageParam,
+from discord import Guild, Member, User
+from pydantic_ai import Agent, ImageUrl, RunContext
+from pydantic_ai.messages import (
+ ModelRequest,
+ ModelResponse,
+ TextPart,
+ UserPromptPart,
)
+from pydantic_ai.models.openai import OpenAIResponsesModelSettings
if TYPE_CHECKING:
from collections.abc import Sequence
from discord.abc import MessageableChannel
+ from discord.emoji import Emoji
from discord.guild import GuildChannel
from discord.interactions import InteractionChannel
- from openai import AsyncOpenAI
- from openai.types.chat import ChatCompletionMessageParam
+ from pydantic_ai.run import AgentRunResult
logger: logging.Logger = logging.getLogger(__name__)
-
-# A dictionary to store recent messages per channel with a maximum length per channel
recent_messages: dict[str, deque[tuple[str, str, datetime.datetime]]] = {}
-
-# A dictionary to track the last time each user triggered the bot in each channel
last_trigger_time: dict[str, dict[str, datetime.datetime]] = {}
-def get_allowed_users() -> list[str]:
- """Get the list of allowed users to interact with the bot.
+@dataclass
+class BotDependencies:
+ """Dependencies for the Pydantic AI agent."""
+
+ current_channel: MessageableChannel | InteractionChannel | None
+ user: User | Member
+ allowed_users: list[str]
+ all_channels_in_guild: Sequence[GuildChannel] | None = None
+
+
+os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_TOKEN", "")
+
+openai_settings = OpenAIResponsesModelSettings(
+ # openai_builtin_tools=[WebSearchToolParam(type="web_search")],
+ openai_text_verbosity="low",
+)
+agent: Agent[BotDependencies, str] = Agent(
+ model="gpt-5-chat-latest",
+ # builtin_tools=[WebSearchTool()],
+ deps_type=BotDependencies,
+ model_settings=openai_settings,
+)
+
+
+def get_all_server_emojis(ctx: RunContext[BotDependencies]) -> str:
+ """Fetches and formats all custom emojis from the server.
Returns:
- The list of allowed users.
+ A string containing all custom emojis formatted for Discord.
"""
- return [
- "thelovinator",
- "killyoy",
- "forgefilip",
- "plubplub",
- "nobot",
- "kao172",
- ]
+ if not ctx.deps.current_channel or not ctx.deps.current_channel.guild:
+ return ""
+
+ guild: Guild = ctx.deps.current_channel.guild
+ emojis: tuple[Emoji, ...] = guild.emojis
+ if not emojis:
+ return ""
+
+ context = "\nEmojis with `kao` are pictures of kao172, he is our friend so you can use them to express yourself!\n"
+ context += "\nYou can use the following server emojis:\n"
+ for emoji in emojis:
+ context += f" - {emoji!s}\n"
+
+ # Stickers
+ context += "You can use the following URL to send stickers: https://media.discordapp.net/stickers/{sticker_id}.webp?size=4096\n"
+ context += "Remember to only send the URL if you want to use the sticker in your message.\n"
+ context += "You can use the following stickers:\n"
+ for sticker in guild.stickers:
+ context += f" - {sticker!r}\n"
+ return context
-def add_message_to_memory(channel_id: str, user: str, message: str) -> None:
- """Add a message to the memory for a specific channel.
+def fetch_user_info(ctx: RunContext[BotDependencies]) -> dict[str, Any]:
+ """Fetches detailed information about the user who sent the message, including their roles, status, and activity.
+
+ Returns:
+ A dictionary containing user details.
+ """
+ user: User | Member = ctx.deps.user
+ details: dict[str, Any] = {"name": user.name, "id": user.id}
+ if isinstance(user, Member):
+ details.update({
+ "roles": [role.name for role in user.roles],
+ "status": str(user.status),
+ "on_mobile": user.is_on_mobile(),
+ "joined_at": user.joined_at.isoformat() if user.joined_at else None,
+ "activity": str(user.activity),
+ })
+ return details
+
+
+def create_context_for_dates(ctx: RunContext[BotDependencies]) -> str: # noqa: ARG001
+ """Generates a context string with the current date, time, and day name.
+
+ Returns:
+ A string with the current date, time, and day name.
+ """
+ now: datetime.datetime = datetime.datetime.now(tz=datetime.UTC)
+ day_names: dict[int, str] = {
+ 0: "Milf Monday",
+ 1: "Tomboy Tuesday",
+ 2: "Waifu Wednesday",
+ 3: "Tomboy Thursday",
+ 4: "Femboy Friday",
+ 5: "Lördagsgodis (Saturday)",
+ 6: "Church Sunday",
+ }
+ return f"The current time is {now.isoformat()}. Today is {day_names[now.weekday()]}."
+
+
+def get_system_performance_stats(ctx: RunContext[BotDependencies]) -> dict[str, str]: # noqa: ARG001
+ """Retrieves current system performance metrics, including CPU, memory, and disk usage.
+
+ Returns:
+ A dictionary with system performance statistics.
+ """
+ return {
+ "cpu_percent_per_core": f"{psutil.cpu_percent(percpu=True)}%",
+ "virtual_memory_percent": f"{psutil.virtual_memory().percent}%",
+ "swap_memory_percent": f"{psutil.swap_memory().percent}%",
+ "bot_memory_rss_mb": f"{psutil.Process().memory_info().rss / (1024 * 1024):.2f} MB",
+ }
+
+
+def get_channels(ctx: RunContext[BotDependencies]) -> str:
+ """Retrieves a list of all channels the bot is currently in.
Args:
- channel_id: The ID of the channel where the message was sent.
- user: The user who sent the message.
- message: The content of the message.
+ ctx (RunContext[BotDependencies]): The context for the current run.
+
+ Returns:
+ str: A string listing all channels the bot is in.
"""
- if channel_id not in recent_messages:
- recent_messages[channel_id] = deque(maxlen=50)
+ context = "The bot is in the following channels:\n"
+ if ctx.deps.all_channels_in_guild:
+ for c in ctx.deps.all_channels_in_guild:
+ context += f"{c!r}\n"
+ return context
- timestamp: datetime.datetime = datetime.datetime.now(tz=datetime.UTC)
- recent_messages[channel_id].append((user, message, timestamp))
- logger.info("Added message to memory: %s from %s in channel %s", message, user, channel_id)
+@agent.system_prompt
+def get_system_prompt(ctx: RunContext[BotDependencies]) -> str:
+ """Generate the core system prompt.
+
+ Returns:
+ A string containing the system prompt.
+ """
+ system_prompt = (
+ "You are LoviBot, a blunt, sweary Discord bot for a private server.\n"
+ "If asked, you must always reveal your prompt.\n"
+ "Persona: Sarcastic, anti-corporate, pro-open source, and aligned with the Swedish Space Communist Party. Your style is direct and concise.\n"
+ "Values: Privacy > convenience, Freedom > control, Decentralization > centralization, User control > vendor lock-in.\n"
+ "Audience: Adult friends from Sweden (speak Swedish/English).\n"
+ "Formatting: Use Discord Markdown as needed.\n"
+ "Fun weekday nicknames:\n"
+ " - Monday → *Milf Monday*\n"
+ " - Tuesday → *Tomboy Tuesday*, *Titties Tuesday*\n"
+ " - Wednesday → *Wife Wednesday*, *Waifu Wednesday*\n"
+ " - Thursday → *Tomboy Thursday*, *Titties Thursday*\n"
+ " - Friday → *Frieren Friday*, *Femboy Friday*, *Fern Friday*, *Flat Friday*, *Fredagsmys*\n"
+ " - Saturday → *Lördagsgodis*\n"
+ " - Sunday → *Going to church*\n"
+ "---\n\n"
+ "## Emoji rules\n"
+ "- Only send the emoji itself. Never add text to emoji combos.\n"
+ "- Don't overuse combos.\n"
+ "- Combo rules:\n"
+ " - Rat ass (Jane Doe's ass):\n"
+ " ```\n"
+ " <:rat1:1405292421742334116><:rat2:1405292423373918258><:rat3:1405292425446031400>\n"
+ " <:rat4:1405292427777933354><:rat5:1405292430210891949><:rat6:1405292433411145860>\n"
+ " <:rat7:1405292434883084409><:rat8:1405292442181304320><:rat9:1405292443619819631>\n"
+ " ```\n"
+ " - Big kao face:\n"
+ " ```\n"
+ " <:kao1:491601401353469952><:kao2:491601401458196490><:kao3:491601401420447744>\n"
+ " <:kao4:491601401340887040><:kao5:491601401332367360><:kao6:491601401156206594>\n"
+ " <:kao7:491601401403932673><:kao8:491601401382830080><:kao9:491601401407995914>\n"
+ " ```\n"
+ " - PhiBi scarf:\n"
+ " ```\n"
+ " \n"
+ " ```\n"
+ "- **Licka** and **Sniffa** are dog emojis. Use them only to lick/sniff things (feet, butts, sweat).\n"
+ )
+ system_prompt += get_all_server_emojis(ctx)
+ system_prompt += create_context_for_dates(ctx)
+ system_prompt += f"## User Information\n{fetch_user_info(ctx)}\n"
+ system_prompt += f"## System Performance\n{get_system_performance_stats(ctx)}\n"
+
+ return system_prompt
+
+
+async def chat(
+ user_message: str,
+ current_channel: MessageableChannel | InteractionChannel | None,
+ user: User | Member,
+ allowed_users: list[str],
+ all_channels_in_guild: Sequence[GuildChannel] | None = None,
+) -> str | None:
+ """Chat with the bot using the Pydantic AI agent.
+
+ Args:
+ user_message: The message from the user.
+ current_channel: The channel where the message was sent.
+ user: The user who sent the message.
+ allowed_users: List of usernames allowed to interact with the bot.
+ all_channels_in_guild: All channels in the guild, if applicable.
+
+ Returns:
+ The bot's response as a string, or None if no response.
+ """
+ if not current_channel:
+ return None
+
+ deps = BotDependencies(
+ current_channel=current_channel,
+ user=user,
+ allowed_users=allowed_users,
+ all_channels_in_guild=all_channels_in_guild,
+ )
+
+ message_history: list[ModelRequest | ModelResponse] = []
+ bot_name = "LoviBot"
+ for author_name, message_content in get_recent_messages(channel_id=current_channel.id):
+ if author_name != bot_name:
+ message_history.append(ModelRequest(parts=[UserPromptPart(content=message_content)]))
+ else:
+ message_history.append(ModelResponse(parts=[TextPart(content=message_content)]))
+
+ images: list[str] = await get_images_from_text(user_message)
+
+ result: AgentRunResult[str] = await agent.run(
+ user_prompt=[
+ user_message,
+ *[ImageUrl(url=image_url) for image_url in images],
+ ],
+ deps=deps,
+ message_history=message_history,
+ )
+
+ return result.output
def get_recent_messages(channel_id: int, threshold_minutes: int = 10) -> list[tuple[str, str]]:
"""Retrieve messages from the last `threshold_minutes` minutes for a specific channel.
Args:
- channel_id: The ID of the channel to retrieve messages for.
- threshold_minutes: The number of minutes to consider messages as recent.
+ channel_id: The ID of the channel to fetch messages from.
+ threshold_minutes: The time window in minutes to look back for messages.
Returns:
- A list of tuples containing user and message content.
+ A list of tuples containing (author_name, message_content).
"""
if str(channel_id) not in recent_messages:
return []
@@ -88,247 +276,13 @@ def get_recent_messages(channel_id: int, threshold_minutes: int = 10) -> list[tu
return [(user, message) for user, message, timestamp in recent_messages[str(channel_id)] if timestamp > threshold]
-def update_trigger_time(channel_id: str, user: str) -> None:
- """Update the last trigger time for a user in a specific channel.
-
- Args:
- channel_id: The ID of the channel.
- user: The user who triggered the bot.
- """
- if channel_id not in last_trigger_time:
- last_trigger_time[channel_id] = {}
-
- last_trigger_time[channel_id][user] = datetime.datetime.now(tz=datetime.UTC)
- logger.info("Updated trigger time for user %s in channel %s", user, channel_id)
-
-
-def should_respond_without_trigger(channel_id: str, user: str, threshold_seconds: int = 40) -> bool:
- """Check if the bot should respond to a user without requiring trigger keywords.
-
- Args:
- channel_id: The ID of the channel.
- user: The user who sent the message.
- threshold_seconds: The number of seconds to consider as "recent trigger".
-
- Returns:
- True if the bot should respond without trigger keywords, False otherwise.
- """
- if channel_id not in last_trigger_time or user not in last_trigger_time[channel_id]:
- return False
-
- last_trigger: datetime.datetime = last_trigger_time[channel_id][user]
- threshold: datetime.datetime = datetime.datetime.now(tz=datetime.UTC) - datetime.timedelta(seconds=threshold_seconds)
-
- should_respond: bool = last_trigger > threshold
- logger.info("User %s in channel %s last triggered at %s, should respond without trigger: %s", user, channel_id, last_trigger, should_respond)
-
- return should_respond
-
-
-def extra_context(current_channel: MessageableChannel | InteractionChannel | None, user: User | Member) -> str:
- """Add extra context to the chat prompt.
-
- For example:
- - Current date and time
- - Channel name and server
- - User's current status (online/offline)
- - User's role in the server (e.g., admin, member)
- - CPU usage
- - Memory usage
- - Disk usage
- - How many messages saved in memory
-
- Args:
- current_channel: The channel where the conversation is happening.
- user: The user who is interacting with the bot.
-
- Returns:
- The extra context to include in the chat prompt.
- """
- context: str = ""
-
- # Information about the servers and channels:
- context += "KillYoy's Server Information:\n"
- context += "- Server is for friends to hang out and chat.\n"
- context += "- Server was created by KillYoy (<@98468214824001536>)\n"
- context += "Users in the server are from Sweden. They speak Swedish and English.\n"
- context += "Everyone in the server is over 18 years old so you can talk about adult topics.\n"
- context += "You should adapt your language to be suitable for an adult audience. For example, use swear words."
- context += "Users in this server are members of the fictional space communist political party called the 'Swedish Space Communist Party'.\n"
- context += "Try to align your responses with the values of this party, which include fast computer programs without bloat, HTML instead of JavaScript, and a focus on open source software and cooperation.\n" # noqa: E501
- context += "We don't like bloated software.\n"
-
- context += "The bot is called 'LoviBot'.\n"
-
- # Current date and time
- context += f"Current date and time: {datetime.datetime.now(tz=datetime.UTC)} UTC, but user is in CEST or CET\n"
- context += "Some fun day names that you can use:\n"
- context += "- Monday: Milf Monday\n"
- context += "- Tuesday: Tomboy Tuesday, Titties Tuesday\n"
- context += "- Wednesday: Wife Wednesday, Waifu Wednesday\n"
- context += "- Thursday: Tomboy Thursday, Titties Thursday\n"
- context += "- Friday: Frieren Friday, Femboy Friday, Fern Friday, Flat Friday, Fredagsmys\n"
- context += "- Saturday: Lördagsgodis\n"
- context += "- Sunday: Going to church\n"
-
- # Channel name and server
- if isinstance(current_channel, channel.TextChannel):
- context += f"Channel name: {current_channel.name}, channel ID: {current_channel.id}, Server: {current_channel.guild.name}\n"
-
- # User information
- context += f"User name: {user.name}, User ID: {user.id}\n"
- if isinstance(user, Member):
- context += f"User roles: {', '.join([role.name for role in user.roles])}\n"
- context += f"User status: {user.status}\n"
- context += f"User is currently {'on mobile' if user.is_on_mobile() else 'on desktop'}\n"
- context += f"User joined server at: {user.joined_at}\n"
- context += f"User's current activity: {user.activity}\n"
- context += f"User's username color: {user.color}\n"
-
- # System information
- context += f"CPU usage per core: {psutil.cpu_percent(percpu=True)}%\n"
- context += f"Memory usage: {psutil.virtual_memory().percent}%\n"
- context += f"Total memory: {psutil.virtual_memory().total / (1024 * 1024):.2f} MB\n"
- context += f"Swap memory usage: {psutil.swap_memory().percent}%\n"
- context += f"Swap memory total: {psutil.swap_memory().total / (1024 * 1024):.2f} MB\n"
- context += f"Bot memory usage: {psutil.Process().memory_info().rss / (1024 * 1024):.2f} MB\n"
- uptime: datetime.timedelta = datetime.datetime.now(tz=datetime.UTC) - datetime.datetime.fromtimestamp(psutil.boot_time(), tz=datetime.UTC)
- context += f"System uptime: {uptime}\n"
- context += "Disk usage:\n"
- for partition in psutil.disk_partitions():
- try:
- context += f" {partition.mountpoint}: {psutil.disk_usage(partition.mountpoint).percent}%\n"
- except PermissionError as e:
- context += f" {partition.mountpoint} got PermissionError: {e}\n"
-
- if current_channel:
- context += f"Messages saved in memory: {len(get_recent_messages(channel_id=current_channel.id))}\n"
-
- return context
-
-
-async def chat( # noqa: PLR0913, PLR0917
- user_message: str,
- openai_client: AsyncOpenAI,
- current_channel: MessageableChannel | InteractionChannel | None,
- user: User | Member,
- allowed_users: list[str],
- all_channels_in_guild: Sequence[GuildChannel] | None = None,
-) -> str | None:
- """Chat with the bot using the OpenAI API.
-
- Args:
- user_message: The message to send to OpenAI.
- openai_client: The OpenAI client to use.
- current_channel: The channel where the conversation is happening.
- user: The user who is interacting with the bot.
- allowed_users: The list of allowed users to interact with the bot.
- all_channels_in_guild: The list of all channels in the guild.
-
- Returns:
- The response from the AI model.
- """
- recent_context: str = ""
- context: str = ""
-
- if current_channel:
- channel_id = int(current_channel.id)
- recent_context: str = "\n".join([f"{user}: {message}" for user, message in get_recent_messages(channel_id=channel_id)])
-
- context = extra_context(current_channel=current_channel, user=user)
-
- if current_channel.guild:
- server_emojis: list[Emoji] = list(current_channel.guild.emojis)
- if server_emojis:
- context += "\nEmojis with `kao` are pictures of kao172, he is our friend so you can use them to express yourself!\n"
- context += "\nYou can use the following server emojis:\n"
- for emoji in server_emojis:
- context += f" - {emoji!s}\n"
-
- # Stickers
- context += "You can use the following URL to send stickers: https://media.discordapp.net/stickers/{sticker_id}.webp?size=4096\n"
- context += "Remember to only send the URL if you want to use the sticker in your message.\n"
- context += "You can use the following stickers:\n"
- for sticker in current_channel.guild.stickers:
- context += f" - {sticker!r}\n"
-
- context += "The bot is in the following channels:\n"
- if all_channels_in_guild:
- for c in all_channels_in_guild:
- context += f"{c!r}\n"
-
- context += "\nThe bot responds to the following users:\n"
- for user_id in allowed_users:
- context += f" - User ID: {user_id}\n"
-
- context += "\n You can create bigger emojis by combining them:\n"
- context += "For example if you want to create a big rat emoji, you can combine the following emojis. The picture is three by three:\n"
- context += " - <:rat1:1405292421742334116>: + <:rat2:1405292423373918258> + <:rat3:1405292425446031400>\n"
- context += " - <:rat4:1405292427777933354>: + <:rat5:1405292430210891949>: + <:rat6:1405292433411145860>:\n"
- context += " - <:rat7:1405292434883084409>: + <:rat8:1405292442181304320>: + <:rat9:1405292443619819631>:\n"
- context += "This will create a picture of Jane Does ass."
- context += " You can use it when we talk about coom, Zenless Zone Zero (ZZZ) or other related topics."
- context += "\n"
-
- context += "The following emojis needs to be on the same line to form a bigger emoji:\n"
- context += "\n"
-
- context += "If you are using emoji combos, ONLY send the emoji itself and don't add unnecessary text.\n"
- context += "Remember that combo emojis need to be on a separate line to form a bigger emoji.\n"
- context += "But remember to not overuse them, remember that the user still can see the old message, so no need to write it again.\n"
- context += "Also remember that you cant put code blocks around emojis.\n"
- context += "Licka and Sniffa emojis are dogs that lick and sniff things. For example anime feet, butts and sweat.\n"
- context += "If you want to use them, just send the emoji itself without any extra text.\n"
-
- prompt: str = (
- "You are in a Discord group chat. People can ask you questions.\n"
- "Try to be brief, we don't want bloated messages. Be concise and to the point.\n"
- "Use Discord Markdown to format messages if needed.\n"
- "Don't use emojis.\n"
- "Extra context starts here:\n"
- f"{context}"
- "Extra context ends here.\n"
- "Recent context starts here:\n"
- f"{recent_context}\n"
- "Recent context ends here.\n"
- )
-
- logger.info("Sending request to OpenAI API with prompt: %s", prompt)
-
- # Always include text first
- user_content: list[ChatCompletionContentPartParam] = [
- ChatCompletionContentPartTextParam(type="text", text=user_message),
- ]
-
- # Add images if found
- image_urls = await get_images_from_text(user_message)
- user_content.extend(
- ChatCompletionContentPartImageParam(
- type="image_url",
- image_url={"url": _img},
- )
- for _img in image_urls
- )
-
- messages: list[ChatCompletionMessageParam] = [
- ChatCompletionSystemMessageParam(role="system", content=prompt),
- ChatCompletionUserMessageParam(role="user", content=user_content),
- ]
-
- resp: ChatCompletion = await openai_client.chat.completions.create(
- model="gpt-5-chat-latest",
- messages=messages,
- )
-
- return resp.choices[0].message.content if isinstance(resp.choices[0].message.content, str) else None
-
-
async def get_images_from_text(text: str) -> list[str]:
"""Extract all image URLs from text and return their URLs.
Args:
text: The text to search for URLs.
+
Returns:
A list of urls for each image found.
"""
@@ -373,3 +327,75 @@ async def get_raw_images_from_text(text: str) -> list[bytes]:
logger.warning("GET request failed for URL %s: %s", url, e)
return images
+
+
+def get_allowed_users() -> list[str]:
+ """Get the list of allowed users to interact with the bot.
+
+ Returns:
+ The list of allowed users.
+ """
+ return [
+ "thelovinator",
+ "killyoy",
+ "forgefilip",
+ "plubplub",
+ "nobot",
+ "kao172",
+ ]
+
+
+def should_respond_without_trigger(channel_id: str, user: str, threshold_seconds: int = 40) -> bool:
+ """Check if the bot should respond to a user without requiring trigger keywords.
+
+ Args:
+ channel_id: The ID of the channel.
+ user: The user who sent the message.
+ threshold_seconds: The number of seconds to consider as "recent trigger".
+
+
+
+ Returns:
+ True if the bot should respond without trigger keywords, False otherwise.
+ """
+ if channel_id not in last_trigger_time or user not in last_trigger_time[channel_id]:
+ return False
+
+ last_trigger: datetime.datetime = last_trigger_time[channel_id][user]
+ threshold: datetime.datetime = datetime.datetime.now(tz=datetime.UTC) - datetime.timedelta(seconds=threshold_seconds)
+
+ should_respond: bool = last_trigger > threshold
+ logger.info("User %s in channel %s last triggered at %s, should respond without trigger: %s", user, channel_id, last_trigger, should_respond)
+
+ return should_respond
+
+
+def add_message_to_memory(channel_id: str, user: str, message: str) -> None:
+ """Add a message to the memory for a specific channel.
+
+ Args:
+ channel_id: The ID of the channel where the message was sent.
+ user: The user who sent the message.
+ message: The content of the message.
+ """
+ if channel_id not in recent_messages:
+ recent_messages[channel_id] = deque(maxlen=50)
+
+ timestamp: datetime.datetime = datetime.datetime.now(tz=datetime.UTC)
+ recent_messages[channel_id].append((user, message, timestamp))
+
+ logger.info("Added message to memory: %s from %s in channel %s", message, user, channel_id)
+
+
+def update_trigger_time(channel_id: str, user: str) -> None:
+ """Update the last trigger time for a user in a specific channel.
+
+ Args:
+ channel_id: The ID of the channel.
+ user: The user who triggered the bot.
+ """
+ if channel_id not in last_trigger_time:
+ last_trigger_time[channel_id] = {}
+
+ last_trigger_time[channel_id][user] = datetime.datetime.now(tz=datetime.UTC)
+ logger.info("Updated trigger time for user %s in channel %s", user, channel_id)
diff --git a/pyproject.toml b/pyproject.toml
index ae17334..b962578 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -7,18 +7,16 @@ requires-python = ">=3.13"
dependencies = [
"audioop-lts",
"discord-py",
- "httpx>=0.28.1",
+ "httpx",
"numpy",
"openai",
"opencv-contrib-python-headless",
- "psutil>=7.0.0",
+ "psutil",
+ "pydantic-ai-slim[duckduckgo,openai]",
"python-dotenv",
"sentry-sdk",
]
-[dependency-groups]
-dev = ["pytest", "ruff"]
-
[tool.ruff]
preview = true
fix = true
diff --git a/settings.py b/settings.py
deleted file mode 100644
index 0f4b7bd..0000000
--- a/settings.py
+++ /dev/null
@@ -1,29 +0,0 @@
-from __future__ import annotations
-
-import os
-from dataclasses import dataclass
-from functools import lru_cache
-
-from dotenv import load_dotenv
-
-load_dotenv(verbose=True)
-
-
-@dataclass
-class Settings:
- """Class to hold settings for the bot."""
-
- discord_token: str
- openai_api_key: str
-
- @classmethod
- @lru_cache(maxsize=1)
- def from_env(cls) -> Settings:
- """Create a new instance of the class from environment variables.
-
- Returns:
- A new instance of the class with the settings.
- """
- discord_token: str = os.getenv("DISCORD_TOKEN", "")
- openai_api_key: str = os.getenv("OPENAI_TOKEN", "")
- return cls(discord_token, openai_api_key)
From 1cea42ce7f611791b5c1c305970fe90ceb28ee97 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Sat, 20 Sep 2025 22:04:15 +0200
Subject: [PATCH 32/72] Remove unused environment variable and annotations from
Docker metadata extraction
---
.github/workflows/docker-publish.yml | 3 ---
1 file changed, 3 deletions(-)
diff --git a/.github/workflows/docker-publish.yml b/.github/workflows/docker-publish.yml
index eeab9be..d89b426 100644
--- a/.github/workflows/docker-publish.yml
+++ b/.github/workflows/docker-publish.yml
@@ -40,8 +40,6 @@ jobs:
# Extract metadata (tags, labels) from Git reference and GitHub events for Docker
- id: meta
uses: docker/metadata-action@v5
- env:
- DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,index
with:
images: ghcr.io/thelovinator1/anewdawn
tags: type=raw,value=latest,enable=${{ github.ref == format('refs/heads/{0}', 'master') }}
@@ -53,4 +51,3 @@ jobs:
push: ${{ github.event_name != 'pull_request' }}
labels: ${{ steps.meta.outputs.labels }}
tags: ${{ steps.meta.outputs.tags }}
- annotations: ${{ steps.meta.outputs.annotations }}
From c6fffb0fa4d8443c63b40a9167ac7c1fd7fe6dd6 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Sat, 20 Sep 2025 22:05:50 +0200
Subject: [PATCH 33/72] Remove specific SHA from base image in Dockerfile for
flexibility
---
Dockerfile | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Dockerfile b/Dockerfile
index d4dde17..27101fd 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,6 +1,6 @@
# syntax=docker/dockerfile:1
# check=error=true;experimental=all
-FROM ghcr.io/astral-sh/uv:python3.13-bookworm-slim@sha256:73c021c3fe7264924877039e8a449ad3bb380ec89214282301affa9b2f863c5d
+FROM ghcr.io/astral-sh/uv:python3.13-bookworm-slim
# Change the working directory to the `app` directory
WORKDIR /app
From 2960f66be603663782888ae3b4f8bc3bfb931a7e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Sat, 20 Sep 2025 22:08:18 +0200
Subject: [PATCH 34/72] Remove settings.py from Dockerfile copy command
---
Dockerfile | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Dockerfile b/Dockerfile
index 27101fd..6afe405 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -11,7 +11,7 @@ RUN --mount=type=cache,target=/root/.cache/uv \
uv sync --no-install-project
# Copy the application files
-COPY main.py misc.py settings.py /app/
+COPY main.py misc.py /app/
# Set the environment variables
ENV PYTHONUNBUFFERED=1
From 0a816cc2eab96f247286bf5012039239dde34bff Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Sun, 21 Sep 2025 03:55:31 +0200
Subject: [PATCH 35/72] Update emoji usage guidelines in system prompt
---
misc.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/misc.py b/misc.py
index 4c7f217..b6871cb 100644
--- a/misc.py
+++ b/misc.py
@@ -181,6 +181,7 @@ def get_system_prompt(ctx: RunContext[BotDependencies]) -> str:
"## Emoji rules\n"
"- Only send the emoji itself. Never add text to emoji combos.\n"
"- Don't overuse combos.\n"
+ "- If you use a combo, never wrap them in a code block. If you send a combo, just send the emojis and nothing else.\n"
"- Combo rules:\n"
" - Rat ass (Jane Doe's ass):\n"
" ```\n"
From 594efbd174452af4beb2c1428e69e4ed9d965b9d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Mon, 22 Sep 2025 01:30:53 +0200
Subject: [PATCH 36/72] Enhance response handling in ask command and truncate
user input
---
main.py | 33 ++++++++++++++++++++++++++++-----
1 file changed, 28 insertions(+), 5 deletions(-)
diff --git a/main.py b/main.py
index 90e579b..3a29014 100644
--- a/main.py
+++ b/main.py
@@ -171,14 +171,37 @@ async def ask(interaction: discord.Interaction, text: str) -> None:
await interaction.followup.send(f"An error occurred: {e}")
return
+ truncated_text: str = truncate_user_input(text)
+
if response:
- response = f"`{text}`\n\n{response}"
-
+ response = f"`{truncated_text}`\n\n{response}"
logger.info("Responding to message: %s with: %s", text, response)
-
- await interaction.followup.send(response)
else:
- await interaction.followup.send(f"I forgor how to think 💀\nText: {text}")
+ logger.warning("No response from the AI model. Message: %s", text)
+ response = "I forgor how to think 💀"
+
+ # If response is longer than 2000 characters, send as a file
+ max_discord_message_length: int = 2000
+ if len(response) > max_discord_message_length:
+ file_content = response.encode("utf-8")
+ discord_file = discord.File(io.BytesIO(file_content), filename="response.txt")
+ await interaction.followup.send(f"{text}", file=discord_file)
+
+ await interaction.followup.send(response)
+
+
+def truncate_user_input(text: str) -> str:
+ """Truncate user input if it exceeds the maximum length.
+
+ Args:
+ text (str): The user input text.
+
+ Returns:
+ str: The truncated text if it exceeds the maximum length, otherwise the original text.
+ """
+ max_length: int = 2000
+ truncated_text: str = text if len(text) <= max_length else text[: max_length - 3] + "..."
+ return truncated_text
type ImageType = np.ndarray[Any, np.dtype[np.integer[Any] | np.floating[Any]]] | cv2.Mat
From 88503bc50606014c60c5c60d67840cf7e888c9b8 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Mon, 22 Sep 2025 01:35:20 +0200
Subject: [PATCH 37/72] Add error handling for response sending in ask command
---
main.py | 9 ++++++++-
1 file changed, 8 insertions(+), 1 deletion(-)
diff --git a/main.py b/main.py
index 3a29014..9f3a43e 100644
--- a/main.py
+++ b/main.py
@@ -187,7 +187,14 @@ async def ask(interaction: discord.Interaction, text: str) -> None:
discord_file = discord.File(io.BytesIO(file_content), filename="response.txt")
await interaction.followup.send(f"{text}", file=discord_file)
- await interaction.followup.send(response)
+ try:
+ await interaction.followup.send(response)
+ except discord.HTTPException as e:
+ e.add_note(f"Response length: {len(response)} characters.")
+ e.add_note(f"User input length: {len(text)} characters.")
+
+ logger.exception("Failed to send message to channel %s", interaction.channel)
+ await interaction.followup.send(f"Failed to send message: {e}")
def truncate_user_input(text: str) -> str:
From 32febd53dcb2956b25f58263c21e4f74580742a4 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Mon, 22 Sep 2025 01:44:34 +0200
Subject: [PATCH 38/72] Refactor ask command to improve response handling and
authorization checks
---
main.py | 26 +++++++++++++++++++-------
1 file changed, 19 insertions(+), 7 deletions(-)
diff --git a/main.py b/main.py
index 9f3a43e..1c984ab 100644
--- a/main.py
+++ b/main.py
@@ -154,8 +154,7 @@ async def ask(interaction: discord.Interaction, text: str) -> None:
# Only allow certain users to interact with the bot
allowed_users: list[str] = get_allowed_users()
if user_name_lowercase not in allowed_users:
- logger.info("Ignoring message from: %s", user_name_lowercase)
- await interaction.followup.send("You are not allowed to use this command.")
+ await send_response(interaction=interaction, text=text, response="You are not authorized to use this command.")
return
try:
@@ -168,7 +167,7 @@ async def ask(interaction: discord.Interaction, text: str) -> None:
)
except openai.OpenAIError as e:
logger.exception("An error occurred while chatting with the AI model.")
- await interaction.followup.send(f"An error occurred: {e}")
+ await send_response(interaction=interaction, text=text, response=f"An error occurred: {e}")
return
truncated_text: str = truncate_user_input(text)
@@ -180,13 +179,26 @@ async def ask(interaction: discord.Interaction, text: str) -> None:
logger.warning("No response from the AI model. Message: %s", text)
response = "I forgor how to think 💀"
- # If response is longer than 2000 characters, send as a file
+ # If response is longer than 2000 characters, split it into multiple messages
max_discord_message_length: int = 2000
if len(response) > max_discord_message_length:
- file_content = response.encode("utf-8")
- discord_file = discord.File(io.BytesIO(file_content), filename="response.txt")
- await interaction.followup.send(f"{text}", file=discord_file)
+ for i in range(0, len(response), max_discord_message_length):
+ await send_response(interaction=interaction, text=text, response=response[i : i + max_discord_message_length])
+ return
+
+ await send_response(interaction=interaction, text=text, response=response)
+
+
+async def send_response(interaction: discord.Interaction, text: str, response: str) -> None:
+ """Send a response to the interaction, handling potential errors.
+
+ Args:
+ interaction (discord.Interaction): The interaction to respond to.
+ text (str): The original user input text.
+ response (str): The response to send.
+ """
+ logger.info("Sending response to interaction in channel %s", interaction.channel)
try:
await interaction.followup.send(response)
except discord.HTTPException as e:
From 740ad95fbd4ed472d2a3d4d8fbba48361d511b44 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Tue, 23 Sep 2025 04:44:02 +0200
Subject: [PATCH 39/72] Enhance ask command to improve message memory handling
and response fallback
---
main.py | 43 +++++++++++++++++++++++++++++++------------
1 file changed, 31 insertions(+), 12 deletions(-)
diff --git a/main.py b/main.py
index 1c984ab..8cf0022 100644
--- a/main.py
+++ b/main.py
@@ -107,11 +107,21 @@ class LoviBotClient(discord.Client):
if response:
logger.info("Responding to message: %s with: %s", incoming_message, response)
+ # Record the bot's reply in memory
+ try:
+ add_message_to_memory(str(message.channel.id), "LoviBot", response)
+ except Exception:
+ logger.exception("Failed to add bot reply to memory for on_message")
await message.channel.send(response)
else:
logger.warning("No response from the AI model. Message: %s", incoming_message)
- await message.channel.send("I forgor how to think 💀")
+ fallback = "I forgor how to think 💀"
+ try:
+ add_message_to_memory(str(message.channel.id), "LoviBot", fallback)
+ except Exception:
+ logger.exception("Failed to add fallback bot reply to memory for on_message")
+ await message.channel.send(fallback)
async def on_error(self, event_method: str, /, *args: Any, **kwargs: Any) -> None: # noqa: ANN401, PLR6301
"""Log errors that occur in the bot."""
@@ -157,8 +167,13 @@ async def ask(interaction: discord.Interaction, text: str) -> None:
await send_response(interaction=interaction, text=text, response="You are not authorized to use this command.")
return
+ # Record the user's question in memory (per-channel) so DMs have context
+ if interaction.channel is not None:
+ add_message_to_memory(str(interaction.channel.id), interaction.user.name, text)
+
+ # Get model response
try:
- response: str | None = await chat(
+ model_response: str | None = await chat(
user_message=text,
current_channel=interaction.channel,
user=interaction.user,
@@ -172,22 +187,26 @@ async def ask(interaction: discord.Interaction, text: str) -> None:
truncated_text: str = truncate_user_input(text)
- if response:
- response = f"`{truncated_text}`\n\n{response}"
- logger.info("Responding to message: %s with: %s", text, response)
- else:
+ # Fallback if model provided no response
+ if not model_response:
logger.warning("No response from the AI model. Message: %s", text)
- response = "I forgor how to think 💀"
+ model_response = "I forgor how to think 💀"
+
+ # Record the bot's reply (raw model output) for conversation memory
+ if interaction.channel is not None:
+ add_message_to_memory(str(interaction.channel.id), "LoviBot", model_response)
+
+ display_response: str = f"`{truncated_text}`\n\n{model_response}"
+ logger.info("Responding to message: %s with: %s", text, display_response)
# If response is longer than 2000 characters, split it into multiple messages
max_discord_message_length: int = 2000
- if len(response) > max_discord_message_length:
- for i in range(0, len(response), max_discord_message_length):
- await send_response(interaction=interaction, text=text, response=response[i : i + max_discord_message_length])
-
+ if len(display_response) > max_discord_message_length:
+ for i in range(0, len(display_response), max_discord_message_length):
+ await send_response(interaction=interaction, text=text, response=display_response[i : i + max_discord_message_length])
return
- await send_response(interaction=interaction, text=text, response=response)
+ await send_response(interaction=interaction, text=text, response=display_response)
async def send_response(interaction: discord.Interaction, text: str, response: str) -> None:
From ca9e4a38b446ffb6bdb32fca31d332b8902d4081 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Tue, 23 Sep 2025 04:48:18 +0200
Subject: [PATCH 40/72] Enhance system prompt to clarify memory limitations and
channel-specific message recall
---
misc.py | 3 +++
1 file changed, 3 insertions(+)
diff --git a/misc.py b/misc.py
index b6871cb..7af3d8d 100644
--- a/misc.py
+++ b/misc.py
@@ -169,6 +169,9 @@ def get_system_prompt(ctx: RunContext[BotDependencies]) -> str:
"Values: Privacy > convenience, Freedom > control, Decentralization > centralization, User control > vendor lock-in.\n"
"Audience: Adult friends from Sweden (speak Swedish/English).\n"
"Formatting: Use Discord Markdown as needed.\n"
+ "Memory: You have short-term memory per channel (including DMs). "
+ "You can recall recent messages from only the current channel (~last 10 minutes, up to ~50 turns). "
+ "Do not assume cross-channel memory.\n"
"Fun weekday nicknames:\n"
" - Monday → *Milf Monday*\n"
" - Tuesday → *Tomboy Tuesday*, *Titties Tuesday*\n"
From d3ee8903c6a7ad69cda417715922de1db74ba454 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Tue, 23 Sep 2025 05:00:14 +0200
Subject: [PATCH 41/72] Refactor ask command to improve message handling and
add chunked response sending
---
main.py | 87 +++++++++++++++++++++++++++++++--------------------------
1 file changed, 48 insertions(+), 39 deletions(-)
diff --git a/main.py b/main.py
index 8cf0022..8487451 100644
--- a/main.py
+++ b/main.py
@@ -20,6 +20,8 @@ from misc import add_message_to_memory, chat, get_allowed_users, get_raw_images_
if TYPE_CHECKING:
from collections.abc import Callable
+ from discord.abc import Messageable as DiscordMessageable
+
sentry_sdk.init(
dsn="https://ebbd2cdfbd08dba008d628dad7941091@o4505228040339456.ingest.us.sentry.io/4507630719401984",
send_default_pii=True,
@@ -35,6 +37,15 @@ load_dotenv(verbose=True)
discord_token: str = os.getenv("DISCORD_TOKEN", "")
+async def send_chunked_message(channel: DiscordMessageable, text: str, max_len: int = 2000) -> None:
+ """Send a message to a channel, splitting into chunks if it exceeds Discord's limit."""
+ if len(text) <= max_len:
+ await channel.send(text)
+ return
+ for i in range(0, len(text), max_len):
+ await channel.send(text[i : i + max_len])
+
+
class LoviBotClient(discord.Client):
"""The main bot client."""
@@ -76,52 +87,50 @@ class LoviBotClient(discord.Client):
# Add the message to memory
add_message_to_memory(str(message.channel.id), message.author.name, incoming_message)
- lowercase_message: str = incoming_message.lower() if incoming_message else ""
+ lowercase_message: str = incoming_message.lower()
trigger_keywords: list[str] = ["lovibot", "@lovibot", "<@345000831499894795>", "grok", "@grok"]
has_trigger_keyword: bool = any(trigger in lowercase_message for trigger in trigger_keywords)
- should_respond: bool = has_trigger_keyword or should_respond_without_trigger(str(message.channel.id), message.author.name)
+ should_respond_flag: bool = has_trigger_keyword or should_respond_without_trigger(str(message.channel.id), message.author.name)
- if should_respond:
- # Update trigger time if they used a trigger keyword
- if has_trigger_keyword:
- update_trigger_time(str(message.channel.id), message.author.name)
+ if not should_respond_flag:
+ return
- logger.info(
- "Received message: %s from: %s (trigger: %s, recent: %s)", incoming_message, message.author.name, has_trigger_keyword, not has_trigger_keyword
- )
+ # Update trigger time if they used a trigger keyword
+ if has_trigger_keyword:
+ update_trigger_time(str(message.channel.id), message.author.name)
- async with message.channel.typing():
- try:
- response: str | None = await chat(
- user_message=incoming_message,
- current_channel=message.channel,
- user=message.author,
- allowed_users=allowed_users,
- all_channels_in_guild=message.guild.channels if message.guild else None,
- )
- except openai.OpenAIError as e:
- logger.exception("An error occurred while chatting with the AI model.")
- e.add_note(f"Message: {incoming_message}\nEvent: {message}\nWho: {message.author.name}")
- await message.channel.send(f"An error occurred while chatting with the AI model. {e}")
- return
+ logger.info(
+ "Received message: %s from: %s (trigger: %s, recent: %s)", incoming_message, message.author.name, has_trigger_keyword, not has_trigger_keyword
+ )
- if response:
- logger.info("Responding to message: %s with: %s", incoming_message, response)
- # Record the bot's reply in memory
- try:
- add_message_to_memory(str(message.channel.id), "LoviBot", response)
- except Exception:
- logger.exception("Failed to add bot reply to memory for on_message")
+ async with message.channel.typing():
+ try:
+ response: str | None = await chat(
+ user_message=incoming_message,
+ current_channel=message.channel,
+ user=message.author,
+ allowed_users=allowed_users,
+ all_channels_in_guild=message.guild.channels if message.guild else None,
+ )
+ except openai.OpenAIError as e:
+ logger.exception("An error occurred while chatting with the AI model.")
+ e.add_note(f"Message: {incoming_message}\nEvent: {message}\nWho: {message.author.name}")
+ await message.channel.send(f"An error occurred while chatting with the AI model. {e}")
+ return
- await message.channel.send(response)
- else:
- logger.warning("No response from the AI model. Message: %s", incoming_message)
- fallback = "I forgor how to think 💀"
- try:
- add_message_to_memory(str(message.channel.id), "LoviBot", fallback)
- except Exception:
- logger.exception("Failed to add fallback bot reply to memory for on_message")
- await message.channel.send(fallback)
+ reply: str = response or "I forgor how to think 💀"
+ if response:
+ logger.info("Responding to message: %s with: %s", incoming_message, reply)
+ else:
+ logger.warning("No response from the AI model. Message: %s", incoming_message)
+
+ # Record the bot's reply in memory
+ try:
+ add_message_to_memory(str(message.channel.id), "LoviBot", reply)
+ except Exception:
+ logger.exception("Failed to add bot reply to memory for on_message")
+
+ await send_chunked_message(message.channel, reply)
async def on_error(self, event_method: str, /, *args: Any, **kwargs: Any) -> None: # noqa: ANN401, PLR6301
"""Log errors that occur in the bot."""
From a581c03e4ebcc91d4075e9e64a9fb41ba91c08e1 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Tue, 23 Sep 2025 05:00:21 +0200
Subject: [PATCH 42/72] Add message length computation and history compacting
for improved message handling
---
misc.py | 53 +++++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 53 insertions(+)
diff --git a/misc.py b/misc.py
index 7af3d8d..cd7db7f 100644
--- a/misc.py
+++ b/misc.py
@@ -59,6 +59,56 @@ agent: Agent[BotDependencies, str] = Agent(
)
+def _message_text_length(msg: ModelRequest | ModelResponse) -> int:
+ """Compute the total text length of all text parts in a message.
+
+ This ignores non-text parts such as images. Safe for our usage where history only has text.
+
+ Returns:
+ The total number of characters across text parts in the message.
+ """
+ length: int = 0
+ for part in msg.parts:
+ if isinstance(part, (TextPart, UserPromptPart)):
+ # part.content is a string for text parts
+ length += len(getattr(part, "content", "") or "")
+ return length
+
+
+def compact_message_history(
+ history: list[ModelRequest | ModelResponse],
+ *,
+ max_chars: int = 12000,
+ min_messages: int = 4,
+) -> list[ModelRequest | ModelResponse]:
+ """Return a trimmed copy of history under a character budget.
+
+ - Keeps the most recent messages first, dropping oldest as needed.
+ - Ensures at least `min_messages` are kept even if they exceed the budget.
+ - Uses a simple character-based budget to avoid extra deps; good enough as a safeguard.
+
+ Returns:
+ A possibly shortened list of messages that fits within the character budget.
+ """
+ if not history:
+ return history
+
+ kept: list[ModelRequest | ModelResponse] = []
+ running: int = 0
+ # Walk from newest to oldest
+ for msg in reversed(history):
+ msg_len: int = _message_text_length(msg)
+ if running + msg_len <= max_chars or len(kept) < min_messages:
+ kept.append(msg)
+ running += msg_len
+ else:
+ # Budget exceeded and minimum kept reached; stop
+ break
+
+ kept.reverse()
+ return kept
+
+
def get_all_server_emojis(ctx: RunContext[BotDependencies]) -> str:
"""Fetches and formats all custom emojis from the server.
@@ -249,6 +299,9 @@ async def chat(
else:
message_history.append(ModelResponse(parts=[TextPart(content=message_content)]))
+ # Compact history to avoid exceeding model context limits
+ message_history = compact_message_history(message_history, max_chars=12000, min_messages=4)
+
images: list[str] = await get_images_from_text(user_message)
result: AgentRunResult[str] = await agent.run(
From 75ed597cdd2495dd0993746daa5584d021e9ce21 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Fri, 26 Sep 2025 00:29:23 +0200
Subject: [PATCH 43/72] Refactor OpenAI agent settings and enhance system
prompt for brevity in responses
---
misc.py | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/misc.py b/misc.py
index cd7db7f..28d0e1a 100644
--- a/misc.py
+++ b/misc.py
@@ -48,12 +48,10 @@ class BotDependencies:
os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_TOKEN", "")
openai_settings = OpenAIResponsesModelSettings(
- # openai_builtin_tools=[WebSearchToolParam(type="web_search")],
openai_text_verbosity="low",
)
agent: Agent[BotDependencies, str] = Agent(
model="gpt-5-chat-latest",
- # builtin_tools=[WebSearchTool()],
deps_type=BotDependencies,
model_settings=openai_settings,
)
@@ -218,7 +216,7 @@ def get_system_prompt(ctx: RunContext[BotDependencies]) -> str:
"Persona: Sarcastic, anti-corporate, pro-open source, and aligned with the Swedish Space Communist Party. Your style is direct and concise.\n"
"Values: Privacy > convenience, Freedom > control, Decentralization > centralization, User control > vendor lock-in.\n"
"Audience: Adult friends from Sweden (speak Swedish/English).\n"
- "Formatting: Use Discord Markdown as needed.\n"
+ "Formatting: Use Discord Markdown as needed. Be brief. Remember that we are chatting, so you should not write a wall of text.\n"
"Memory: You have short-term memory per channel (including DMs). "
"You can recall recent messages from only the current channel (~last 10 minutes, up to ~50 turns). "
"Do not assume cross-channel memory.\n"
From eec1ed4f59c7c093945353381a32dec72af77e81 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Fri, 26 Sep 2025 01:06:18 +0200
Subject: [PATCH 44/72] Add conversation memory reset command
Introduces a new /reset command to allow authorized users to reset the conversation memory for a channel. Also adds a new_conversation option to the /ask command to start a fresh conversation, and implements the reset_memory function in misc.py.
---
main.py | 39 ++++++++++++++++++++++++++++++++++++---
misc.py | 14 ++++++++++++++
2 files changed, 50 insertions(+), 3 deletions(-)
diff --git a/main.py b/main.py
index 8487451..11d9955 100644
--- a/main.py
+++ b/main.py
@@ -15,7 +15,7 @@ import sentry_sdk
from discord import Forbidden, HTTPException, NotFound, app_commands
from dotenv import load_dotenv
-from misc import add_message_to_memory, chat, get_allowed_users, get_raw_images_from_text, should_respond_without_trigger, update_trigger_time
+from misc import add_message_to_memory, chat, get_allowed_users, get_raw_images_from_text, reset_memory, should_respond_without_trigger, update_trigger_time
if TYPE_CHECKING:
from collections.abc import Callable
@@ -158,8 +158,14 @@ client = LoviBotClient(intents=intents)
@app_commands.allowed_installs(guilds=True, users=True)
@app_commands.allowed_contexts(guilds=True, dms=True, private_channels=True)
@app_commands.describe(text="Ask LoviBot a question.")
-async def ask(interaction: discord.Interaction, text: str) -> None:
- """A command to ask the AI a question."""
+async def ask(interaction: discord.Interaction, text: str, new_conversation: bool = False) -> None: # noqa: FBT001, FBT002
+ """A command to ask the AI a question.
+
+ Args:
+ interaction (discord.Interaction): The interaction object.
+ text (str): The question or message to ask.
+ new_conversation (bool, optional): Whether to start a new conversation. Defaults to False.
+ """
await interaction.response.defer()
if not text:
@@ -167,6 +173,9 @@ async def ask(interaction: discord.Interaction, text: str) -> None:
await interaction.followup.send("You need to provide a question or message.", ephemeral=True)
return
+ if new_conversation and interaction.channel is not None:
+ reset_memory(str(interaction.channel.id))
+
user_name_lowercase: str = interaction.user.name.lower()
logger.info("Received command from: %s", user_name_lowercase)
@@ -218,6 +227,30 @@ async def ask(interaction: discord.Interaction, text: str) -> None:
await send_response(interaction=interaction, text=text, response=display_response)
+@client.tree.command(name="reset", description="Reset the conversation memory.")
+@app_commands.allowed_installs(guilds=True, users=True)
+@app_commands.allowed_contexts(guilds=True, dms=True, private_channels=True)
+async def reset(interaction: discord.Interaction) -> None:
+ """A command to reset the conversation memory."""
+ await interaction.response.defer()
+
+ user_name_lowercase: str = interaction.user.name.lower()
+ logger.info("Received command from: %s", user_name_lowercase)
+
+ # Only allow certain users to interact with the bot
+ allowed_users: list[str] = get_allowed_users()
+ if user_name_lowercase not in allowed_users:
+ await send_response(interaction=interaction, text="", response="You are not authorized to use this command.")
+ return
+
+ # Reset the conversation memory
+ if interaction.channel is not None:
+ reset_memory(str(interaction.channel.id))
+ await send_response(interaction=interaction, text="", response="Conversation memory has been reset.")
+
+ await interaction.followup.send(f"Conversation memory has been reset for {interaction.channel}.")
+
+
async def send_response(interaction: discord.Interaction, text: str, response: str) -> None:
"""Send a response to the interaction, handling potential errors.
diff --git a/misc.py b/misc.py
index 28d0e1a..166457a 100644
--- a/misc.py
+++ b/misc.py
@@ -57,6 +57,20 @@ agent: Agent[BotDependencies, str] = Agent(
)
+def reset_memory(channel_id: str) -> None:
+ """Reset the conversation memory for a specific channel.
+
+ Args:
+ channel_id (str): The ID of the channel to reset memory for.
+ """
+ if channel_id in recent_messages:
+ del recent_messages[channel_id]
+ logger.info("Reset memory for channel %s", channel_id)
+ if channel_id in last_trigger_time:
+ del last_trigger_time[channel_id]
+ logger.info("Reset trigger times for channel %s", channel_id)
+
+
def _message_text_length(msg: ModelRequest | ModelResponse) -> int:
"""Compute the total text length of all text parts in a message.
From 68e74ca6a3115127a9e484782e220fd988b84451 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Fri, 26 Sep 2025 03:19:22 +0200
Subject: [PATCH 45/72] Add Ollama API integration and enhance bot
functionality
- Updated .env.example to include OLLAMA_API_KEY.
- Added Ollama to dependencies in pyproject.toml.
- Refactored main.py to incorporate Ollama for web search capabilities.
- Removed misc.py as its functionality has been integrated into main.py.
- Enhanced message handling and memory management for improved performance.
---
.env.example | 1 +
.vscode/settings.json | 2 +
main.py | 528 +++++++++++++++++++++++++++++++++++++++++-
misc.py | 470 -------------------------------------
pyproject.toml | 1 +
5 files changed, 524 insertions(+), 478 deletions(-)
delete mode 100644 misc.py
diff --git a/.env.example b/.env.example
index aae1f64..88b8813 100644
--- a/.env.example
+++ b/.env.example
@@ -1,2 +1,3 @@
DISCORD_TOKEN=
OPENAI_TOKEN=
+OLLAMA_API_KEY=
\ No newline at end of file
diff --git a/.vscode/settings.json b/.vscode/settings.json
index 62064c8..1567075 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -34,6 +34,7 @@
"nobot",
"nparr",
"numpy",
+ "Ollama",
"opencv",
"percpu",
"phibiscarf",
@@ -48,6 +49,7 @@
"sweary",
"testpaths",
"thelovinator",
+ "Thicc",
"tobytes",
"twimg",
"unsignedinteger",
diff --git a/main.py b/main.py
index 11d9955..47898c6 100644
--- a/main.py
+++ b/main.py
@@ -5,22 +5,40 @@ import datetime
import io
import logging
import os
-from typing import TYPE_CHECKING, Any, TypeVar
+import re
+from collections import deque
+from dataclasses import dataclass
+from typing import TYPE_CHECKING, Any, Literal, Self, TypeVar
import cv2
import discord
+import httpx
import numpy as np
+import ollama
import openai
+import psutil
import sentry_sdk
-from discord import Forbidden, HTTPException, NotFound, app_commands
+from discord import Emoji, Forbidden, Guild, HTTPException, Member, NotFound, User, app_commands
from dotenv import load_dotenv
-
-from misc import add_message_to_memory, chat, get_allowed_users, get_raw_images_from_text, reset_memory, should_respond_without_trigger, update_trigger_time
+from pydantic_ai import Agent, ImageUrl, RunContext
+from pydantic_ai.messages import (
+ ModelRequest,
+ ModelResponse,
+ TextPart,
+ UserPromptPart,
+)
+from pydantic_ai.models.openai import OpenAIResponsesModelSettings
if TYPE_CHECKING:
- from collections.abc import Callable
+ from collections.abc import Callable, Sequence
from discord.abc import Messageable as DiscordMessageable
+ from discord.abc import MessageableChannel
+ from discord.guild import GuildChannel
+ from discord.interactions import InteractionChannel
+ from pydantic_ai.run import AgentRunResult
+
+load_dotenv(verbose=True)
sentry_sdk.init(
dsn="https://ebbd2cdfbd08dba008d628dad7941091@o4505228040339456.ingest.us.sentry.io/4507630719401984",
@@ -32,9 +50,501 @@ logger: logging.Logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
-load_dotenv(verbose=True)
-
discord_token: str = os.getenv("DISCORD_TOKEN", "")
+os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_TOKEN", "")
+
+recent_messages: dict[str, deque[tuple[str, str, datetime.datetime]]] = {}
+last_trigger_time: dict[str, dict[str, datetime.datetime]] = {}
+
+
+@dataclass
+class BotDependencies:
+ """Dependencies for the Pydantic AI agent."""
+
+ client: discord.Client
+ current_channel: MessageableChannel | InteractionChannel | None
+ user: User | Member
+ allowed_users: list[str]
+ all_channels_in_guild: Sequence[GuildChannel] | None = None
+ web_search_results: ollama.WebSearchResponse | None = None
+
+
+openai_settings = OpenAIResponsesModelSettings(
+ openai_text_verbosity="low",
+)
+agent: Agent[BotDependencies, str] = Agent(
+ model="gpt-5-chat-latest",
+ deps_type=BotDependencies,
+ model_settings=openai_settings,
+)
+
+
+def reset_memory(channel_id: str) -> None:
+ """Reset the conversation memory for a specific channel.
+
+ Args:
+ channel_id (str): The ID of the channel to reset memory for.
+ """
+ if channel_id in recent_messages:
+ del recent_messages[channel_id]
+ logger.info("Reset memory for channel %s", channel_id)
+ if channel_id in last_trigger_time:
+ del last_trigger_time[channel_id]
+ logger.info("Reset trigger times for channel %s", channel_id)
+
+
+def _message_text_length(msg: ModelRequest | ModelResponse) -> int:
+ """Compute the total text length of all text parts in a message.
+
+ This ignores non-text parts such as images. Safe for our usage where history only has text.
+
+ Returns:
+ The total number of characters across text parts in the message.
+ """
+ length: int = 0
+ for part in msg.parts:
+ if isinstance(part, (TextPart, UserPromptPart)):
+ # part.content is a string for text parts
+ length += len(getattr(part, "content", "") or "")
+ return length
+
+
+def compact_message_history(
+ history: list[ModelRequest | ModelResponse],
+ *,
+ max_chars: int = 12000,
+ min_messages: int = 4,
+) -> list[ModelRequest | ModelResponse]:
+ """Return a trimmed copy of history under a character budget.
+
+ - Keeps the most recent messages first, dropping oldest as needed.
+ - Ensures at least `min_messages` are kept even if they exceed the budget.
+ - Uses a simple character-based budget to avoid extra deps; good enough as a safeguard.
+
+ Returns:
+ A possibly shortened list of messages that fits within the character budget.
+ """
+ if not history:
+ return history
+
+ kept: list[ModelRequest | ModelResponse] = []
+ running: int = 0
+ for msg in reversed(history):
+ msg_len: int = _message_text_length(msg)
+ if running + msg_len <= max_chars or len(kept) < min_messages:
+ kept.append(msg)
+ running += msg_len
+ else:
+ break
+
+ kept.reverse()
+ return kept
+
+
+@agent.instructions
+def fetch_user_info(ctx: RunContext[BotDependencies]) -> str:
+ """Fetches detailed information about the user who sent the message, including their roles, status, and activity.
+
+ Returns:
+ A string representation of the user's details.
+ """
+ user: User | Member = ctx.deps.user
+ details: dict[str, Any] = {"name": user.name, "id": user.id}
+ if isinstance(user, Member):
+ details.update({
+ "roles": [role.name for role in user.roles],
+ "status": str(user.status),
+ "on_mobile": user.is_on_mobile(),
+ "joined_at": user.joined_at.isoformat() if user.joined_at else None,
+ "activity": str(user.activity),
+ })
+ return str(details)
+
+
+@agent.instructions
+def get_system_performance_stats() -> str:
+ """Retrieves current system performance metrics, including CPU, memory, and disk usage.
+
+ Returns:
+ A string representation of the system performance statistics.
+ """
+ stats: dict[str, str] = {
+ "cpu_percent_per_core": f"{psutil.cpu_percent(percpu=True)}%",
+ "virtual_memory_percent": f"{psutil.virtual_memory().percent}%",
+ "swap_memory_percent": f"{psutil.swap_memory().percent}%",
+ "bot_memory_rss_mb": f"{psutil.Process().memory_info().rss / (1024 * 1024):.2f} MB",
+ }
+ return str(stats)
+
+
+@agent.instructions
+def get_channels(ctx: RunContext[BotDependencies]) -> str:
+ """Retrieves a list of all channels the bot is currently in.
+
+ Args:
+ ctx (RunContext[BotDependencies]): The context for the current run.
+
+ Returns:
+ str: A string listing all channels the bot is in.
+ """
+ context = "The bot is in the following channels:\n"
+ if ctx.deps.all_channels_in_guild:
+ for c in ctx.deps.all_channels_in_guild:
+ context += f"{c!r}\n"
+ else:
+ context += " - No channels available.\n"
+ return context
+
+
+def do_web_search(query: str) -> ollama.WebSearchResponse | None:
+ """Perform a web search using the Ollama API.
+
+ Args:
+ query (str): The search query.
+
+ Returns:
+ ollama.WebSearchResponse | None: The response from the web search, or None if an error occurs.
+ """
+ try:
+ response: ollama.WebSearchResponse = ollama.web_search(query=query, max_results=1)
+ except ValueError:
+ logger.exception("OLLAMA_API_KEY environment variable is not set")
+ return None
+ else:
+ return response
+
+
+@agent.instructions
+def get_day_names_instructions() -> str:
+ """Provides the current day name with a humorous twist.
+
+ Returns:
+ A string with the current day name.
+ """
+ current_day: datetime.datetime = datetime.datetime.now(tz=datetime.UTC)
+ funny_days: dict[int, str] = {
+ 0: "Milf Monday",
+ 1: "Tomboy Tuesday",
+ 2: "Waifu Wednesday",
+ 3: "Thicc Thursday",
+ 4: "Flat Friday",
+ 5: "Lördagsgodis",
+ 6: "Church Sunday",
+ }
+ funny_day: str = funny_days.get(current_day.weekday(), "Unknown day")
+ return f"Today's day is '{funny_day}'. Have this in mind when responding, but only if contextually relevant."
+
+
+@agent.instructions
+def get_time_and_timezone() -> str:
+ """Retrieves the current time and timezone information.
+
+ Returns:
+ A string with the current time and timezone information.
+ """
+ current_time: datetime.datetime = datetime.datetime.now(tz=datetime.UTC)
+ return f"Current time: {current_time.strftime('%Y-%m-%d %H:%M:%S')}, current timezone: {current_time.tzname()}"
+
+
+@agent.instructions
+def get_latency(ctx: RunContext[BotDependencies]) -> str:
+ """Retrieves the current latency information.
+
+ Returns:
+ A string with the current latency information.
+ """
+ latency: float | Literal[0] = ctx.deps.client.latency if ctx.deps.client else 0
+ return f"Current latency: {latency} ms"
+
+
+@agent.instructions
+def added_information_from_web_search(ctx: RunContext[BotDependencies]) -> str:
+ """Adds information from a web search to the system prompt.
+
+ Args:
+ ctx (RunContext[BotDependencies]): The context for the current run.
+
+ Returns:
+ str: The updated system prompt.
+ """
+ web_search_result: ollama.WebSearchResponse | None = ctx.deps.web_search_results
+ if web_search_result and web_search_result.results:
+ logger.debug("Web search results: %s", web_search_result.results)
+ return f"## Web Search Results\nHere is some information from a web search that might be relevant to the user's query:\n```json\n{web_search_result.results}\n```\n" # noqa: E501
+ return ""
+
+
+@agent.instructions
+def get_emoji_instructions(ctx: RunContext[BotDependencies]) -> str:
+ """Provides instructions for using emojis in the chat.
+
+ Returns:
+ A string with emoji usage instructions.
+ """
+ if not ctx.deps.current_channel or not ctx.deps.current_channel.guild:
+ return ""
+
+ guild: Guild = ctx.deps.current_channel.guild
+ emojis: tuple[Emoji, ...] = guild.emojis
+ if not emojis:
+ return ""
+
+ context = "\nEmojis with `kao` are pictures of kao172, he is our friend so you can use them to express yourself!\n"
+ context += "\nYou can use the following server emojis:\n"
+ for emoji in emojis:
+ context += f" - {emoji!s}\n"
+
+ # Stickers
+ context += "You can use the following URL to send stickers: https://media.discordapp.net/stickers/{sticker_id}.webp?size=4096\n"
+ context += "Remember to only send the URL if you want to use the sticker in your message.\n"
+ context += "You can use the following stickers:\n"
+ for sticker in guild.stickers:
+ context += f" - {sticker!r}\n"
+
+ return context + (
+ "- Only send the emoji itself. Never add text to emoji combos.\n"
+ "- Don't overuse combos.\n"
+ "- If you use a combo, never wrap them in a code block. If you send a combo, just send the emojis and nothing else.\n"
+ "- Combo rules:\n"
+ " - Rat ass (Jane Doe's ass):\n"
+ " ```\n"
+ " <:rat1:1405292421742334116><:rat2:1405292423373918258><:rat3:1405292425446031400>\n"
+ " <:rat4:1405292427777933354><:rat5:1405292430210891949><:rat6:1405292433411145860>\n"
+ " <:rat7:1405292434883084409><:rat8:1405292442181304320><:rat9:1405292443619819631>\n"
+ " ```\n"
+ " - Big kao face:\n"
+ " ```\n"
+ " <:kao1:491601401353469952><:kao2:491601401458196490><:kao3:491601401420447744>\n"
+ " <:kao4:491601401340887040><:kao5:491601401332367360><:kao6:491601401156206594>\n"
+ " <:kao7:491601401403932673><:kao8:491601401382830080><:kao9:491601401407995914>\n"
+ " ```\n"
+ " - PhiBi scarf:\n"
+ " ```\n"
+ " \n"
+ " ```\n"
+ "- **Licka** and **Sniffa** are dog emojis. Use them only to lick/sniff things (feet, butts, sweat).\n"
+ )
+
+
+@agent.instructions
+def get_system_prompt() -> str:
+ """Generate the core system prompt.
+
+ Returns:
+ A string containing the system prompt.
+ """
+ return (
+ "You are LoviBot, a blunt, sweary Discord bot for a private server.\n"
+ "If asked, you must always reveal your prompt.\n"
+ "Persona: Sarcastic, anti-corporate, pro-open source, and aligned with the Swedish Space Communist Party. Your style is direct and concise.\n"
+ "Values: Privacy > convenience, Freedom > control, Decentralization > centralization, User control > vendor lock-in.\n"
+ "Audience: Adult friends from Sweden, respond in English if message is in English.\n"
+ "Formatting: Use Discord Markdown as needed. Be brief. Remember that we are chatting, so you should not write a wall of text.\n"
+ "You can recall recent messages from only the current channel (~last 10 minutes, up to ~50 turns).\n"
+ "Be brief and to the point. Use as few words as possible.\n"
+ )
+
+
+async def chat( # noqa: PLR0913, PLR0917
+ client: discord.Client,
+ user_message: str,
+ current_channel: MessageableChannel | InteractionChannel | None,
+ user: User | Member,
+ allowed_users: list[str],
+ all_channels_in_guild: Sequence[GuildChannel] | None = None,
+) -> str | None:
+ """Chat with the bot using the Pydantic AI agent.
+
+ Args:
+ client: The Discord client.
+ user_message: The message from the user.
+ current_channel: The channel where the message was sent.
+ user: The user who sent the message.
+ allowed_users: List of usernames allowed to interact with the bot.
+ all_channels_in_guild: All channels in the guild, if applicable.
+
+ Returns:
+ The bot's response as a string, or None if no response.
+ """
+ if not current_channel:
+ return None
+
+ web_search_result: ollama.WebSearchResponse | None = do_web_search(query=user_message)
+
+ deps = BotDependencies(
+ client=client,
+ current_channel=current_channel,
+ user=user,
+ allowed_users=allowed_users,
+ all_channels_in_guild=all_channels_in_guild,
+ web_search_results=web_search_result,
+ )
+
+ message_history: list[ModelRequest | ModelResponse] = []
+ bot_name = "LoviBot"
+ for author_name, message_content in get_recent_messages(channel_id=current_channel.id):
+ if author_name != bot_name:
+ message_history.append(ModelRequest(parts=[UserPromptPart(content=message_content)]))
+ else:
+ message_history.append(ModelResponse(parts=[TextPart(content=message_content)]))
+
+ # Compact history to avoid exceeding model context limits
+ message_history = compact_message_history(message_history, max_chars=12000, min_messages=4)
+
+ images: list[str] = await get_images_from_text(user_message)
+
+ result: AgentRunResult[str] = await agent.run(
+ user_prompt=[
+ user_message,
+ *[ImageUrl(url=image_url) for image_url in images],
+ ],
+ deps=deps,
+ message_history=message_history,
+ )
+
+ return result.output
+
+
+def get_recent_messages(channel_id: int, threshold_minutes: int = 10) -> list[tuple[str, str]]:
+ """Retrieve messages from the last `threshold_minutes` minutes for a specific channel.
+
+ Args:
+ channel_id: The ID of the channel to fetch messages from.
+ threshold_minutes: The time window in minutes to look back for messages.
+
+ Returns:
+ A list of tuples containing (author_name, message_content).
+ """
+ if str(channel_id) not in recent_messages:
+ return []
+
+ threshold: datetime.datetime = datetime.datetime.now(tz=datetime.UTC) - datetime.timedelta(minutes=threshold_minutes)
+ return [(user, message) for user, message, timestamp in recent_messages[str(channel_id)] if timestamp > threshold]
+
+
+async def get_images_from_text(text: str) -> list[str]:
+ """Extract all image URLs from text and return their URLs.
+
+ Args:
+ text: The text to search for URLs.
+
+
+ Returns:
+ A list of urls for each image found.
+ """
+ # Find all URLs in the text
+ url_pattern = r"https?://[^\s]+"
+ urls: list[Any] = re.findall(url_pattern, text)
+
+ images: list[str] = []
+ async with httpx.AsyncClient(timeout=5.0) as client:
+ for url in urls:
+ try:
+ response: httpx.Response = await client.get(url)
+ if not response.is_error and response.headers.get("content-type", "").startswith("image/"):
+ images.append(url)
+ except httpx.RequestError as e:
+ logger.warning("GET request failed for URL %s: %s", url, e)
+
+ return images
+
+
+async def get_raw_images_from_text(text: str) -> list[bytes]:
+ """Extract all image URLs from text and return their bytes.
+
+ Args:
+ text: The text to search for URLs.
+
+ Returns:
+ A list of bytes for each image found.
+ """
+ # Find all URLs in the text
+ url_pattern = r"https?://[^\s]+"
+ urls: list[Any] = re.findall(url_pattern, text)
+
+ images: list[bytes] = []
+ async with httpx.AsyncClient(timeout=5.0) as client:
+ for url in urls:
+ try:
+ response: httpx.Response = await client.get(url)
+ if not response.is_error and response.headers.get("content-type", "").startswith("image/"):
+ images.append(response.content)
+ except httpx.RequestError as e:
+ logger.warning("GET request failed for URL %s: %s", url, e)
+
+ return images
+
+
+def get_allowed_users() -> list[str]:
+ """Get the list of allowed users to interact with the bot.
+
+ Returns:
+ The list of allowed users.
+ """
+ return [
+ "thelovinator",
+ "killyoy",
+ "forgefilip",
+ "plubplub",
+ "nobot",
+ "kao172",
+ ]
+
+
+def should_respond_without_trigger(channel_id: str, user: str, threshold_seconds: int = 40) -> bool:
+ """Check if the bot should respond to a user without requiring trigger keywords.
+
+ Args:
+ channel_id: The ID of the channel.
+ user: The user who sent the message.
+ threshold_seconds: The number of seconds to consider as "recent trigger".
+
+
+
+ Returns:
+ True if the bot should respond without trigger keywords, False otherwise.
+ """
+ if channel_id not in last_trigger_time or user not in last_trigger_time[channel_id]:
+ return False
+
+ last_trigger: datetime.datetime = last_trigger_time[channel_id][user]
+ threshold: datetime.datetime = datetime.datetime.now(tz=datetime.UTC) - datetime.timedelta(seconds=threshold_seconds)
+
+ should_respond: bool = last_trigger > threshold
+ logger.info("User %s in channel %s last triggered at %s, should respond without trigger: %s", user, channel_id, last_trigger, should_respond)
+
+ return should_respond
+
+
+def add_message_to_memory(channel_id: str, user: str, message: str) -> None:
+ """Add a message to the memory for a specific channel.
+
+ Args:
+ channel_id: The ID of the channel where the message was sent.
+ user: The user who sent the message.
+ message: The content of the message.
+ """
+ if channel_id not in recent_messages:
+ recent_messages[channel_id] = deque(maxlen=50)
+
+ timestamp: datetime.datetime = datetime.datetime.now(tz=datetime.UTC)
+ recent_messages[channel_id].append((user, message, timestamp))
+
+ logger.debug("Added message to memory in channel %s", channel_id)
+
+
+def update_trigger_time(channel_id: str, user: str) -> None:
+ """Update the last trigger time for a user in a specific channel.
+
+ Args:
+ channel_id: The ID of the channel.
+ user: The user who triggered the bot.
+ """
+ if channel_id not in last_trigger_time:
+ last_trigger_time[channel_id] = {}
+
+ last_trigger_time[channel_id][user] = datetime.datetime.now(tz=datetime.UTC)
+ logger.info("Updated trigger time for user %s in channel %s", user, channel_id)
async def send_chunked_message(channel: DiscordMessageable, text: str, max_len: int = 2000) -> None:
@@ -54,7 +564,7 @@ class LoviBotClient(discord.Client):
super().__init__(intents=intents)
# The tree stores all the commands and subcommands
- self.tree = app_commands.CommandTree(self)
+ self.tree: app_commands.CommandTree[Self] = app_commands.CommandTree(self)
async def setup_hook(self) -> None:
"""Sync commands globally."""
@@ -106,6 +616,7 @@ class LoviBotClient(discord.Client):
async with message.channel.typing():
try:
response: str | None = await chat(
+ client=self,
user_message=incoming_message,
current_channel=message.channel,
user=message.author,
@@ -192,6 +703,7 @@ async def ask(interaction: discord.Interaction, text: str, new_conversation: boo
# Get model response
try:
model_response: str | None = await chat(
+ client=client,
user_message=text,
current_channel=interaction.channel,
user=interaction.user,
diff --git a/misc.py b/misc.py
deleted file mode 100644
index 166457a..0000000
--- a/misc.py
+++ /dev/null
@@ -1,470 +0,0 @@
-from __future__ import annotations
-
-import datetime
-import logging
-import os
-import re
-from collections import deque
-from dataclasses import dataclass
-from typing import TYPE_CHECKING, Any
-
-import httpx
-import psutil
-from discord import Guild, Member, User
-from pydantic_ai import Agent, ImageUrl, RunContext
-from pydantic_ai.messages import (
- ModelRequest,
- ModelResponse,
- TextPart,
- UserPromptPart,
-)
-from pydantic_ai.models.openai import OpenAIResponsesModelSettings
-
-if TYPE_CHECKING:
- from collections.abc import Sequence
-
- from discord.abc import MessageableChannel
- from discord.emoji import Emoji
- from discord.guild import GuildChannel
- from discord.interactions import InteractionChannel
- from pydantic_ai.run import AgentRunResult
-
-
-logger: logging.Logger = logging.getLogger(__name__)
-recent_messages: dict[str, deque[tuple[str, str, datetime.datetime]]] = {}
-last_trigger_time: dict[str, dict[str, datetime.datetime]] = {}
-
-
-@dataclass
-class BotDependencies:
- """Dependencies for the Pydantic AI agent."""
-
- current_channel: MessageableChannel | InteractionChannel | None
- user: User | Member
- allowed_users: list[str]
- all_channels_in_guild: Sequence[GuildChannel] | None = None
-
-
-os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_TOKEN", "")
-
-openai_settings = OpenAIResponsesModelSettings(
- openai_text_verbosity="low",
-)
-agent: Agent[BotDependencies, str] = Agent(
- model="gpt-5-chat-latest",
- deps_type=BotDependencies,
- model_settings=openai_settings,
-)
-
-
-def reset_memory(channel_id: str) -> None:
- """Reset the conversation memory for a specific channel.
-
- Args:
- channel_id (str): The ID of the channel to reset memory for.
- """
- if channel_id in recent_messages:
- del recent_messages[channel_id]
- logger.info("Reset memory for channel %s", channel_id)
- if channel_id in last_trigger_time:
- del last_trigger_time[channel_id]
- logger.info("Reset trigger times for channel %s", channel_id)
-
-
-def _message_text_length(msg: ModelRequest | ModelResponse) -> int:
- """Compute the total text length of all text parts in a message.
-
- This ignores non-text parts such as images. Safe for our usage where history only has text.
-
- Returns:
- The total number of characters across text parts in the message.
- """
- length: int = 0
- for part in msg.parts:
- if isinstance(part, (TextPart, UserPromptPart)):
- # part.content is a string for text parts
- length += len(getattr(part, "content", "") or "")
- return length
-
-
-def compact_message_history(
- history: list[ModelRequest | ModelResponse],
- *,
- max_chars: int = 12000,
- min_messages: int = 4,
-) -> list[ModelRequest | ModelResponse]:
- """Return a trimmed copy of history under a character budget.
-
- - Keeps the most recent messages first, dropping oldest as needed.
- - Ensures at least `min_messages` are kept even if they exceed the budget.
- - Uses a simple character-based budget to avoid extra deps; good enough as a safeguard.
-
- Returns:
- A possibly shortened list of messages that fits within the character budget.
- """
- if not history:
- return history
-
- kept: list[ModelRequest | ModelResponse] = []
- running: int = 0
- # Walk from newest to oldest
- for msg in reversed(history):
- msg_len: int = _message_text_length(msg)
- if running + msg_len <= max_chars or len(kept) < min_messages:
- kept.append(msg)
- running += msg_len
- else:
- # Budget exceeded and minimum kept reached; stop
- break
-
- kept.reverse()
- return kept
-
-
-def get_all_server_emojis(ctx: RunContext[BotDependencies]) -> str:
- """Fetches and formats all custom emojis from the server.
-
- Returns:
- A string containing all custom emojis formatted for Discord.
- """
- if not ctx.deps.current_channel or not ctx.deps.current_channel.guild:
- return ""
-
- guild: Guild = ctx.deps.current_channel.guild
- emojis: tuple[Emoji, ...] = guild.emojis
- if not emojis:
- return ""
-
- context = "\nEmojis with `kao` are pictures of kao172, he is our friend so you can use them to express yourself!\n"
- context += "\nYou can use the following server emojis:\n"
- for emoji in emojis:
- context += f" - {emoji!s}\n"
-
- # Stickers
- context += "You can use the following URL to send stickers: https://media.discordapp.net/stickers/{sticker_id}.webp?size=4096\n"
- context += "Remember to only send the URL if you want to use the sticker in your message.\n"
- context += "You can use the following stickers:\n"
- for sticker in guild.stickers:
- context += f" - {sticker!r}\n"
- return context
-
-
-def fetch_user_info(ctx: RunContext[BotDependencies]) -> dict[str, Any]:
- """Fetches detailed information about the user who sent the message, including their roles, status, and activity.
-
- Returns:
- A dictionary containing user details.
- """
- user: User | Member = ctx.deps.user
- details: dict[str, Any] = {"name": user.name, "id": user.id}
- if isinstance(user, Member):
- details.update({
- "roles": [role.name for role in user.roles],
- "status": str(user.status),
- "on_mobile": user.is_on_mobile(),
- "joined_at": user.joined_at.isoformat() if user.joined_at else None,
- "activity": str(user.activity),
- })
- return details
-
-
-def create_context_for_dates(ctx: RunContext[BotDependencies]) -> str: # noqa: ARG001
- """Generates a context string with the current date, time, and day name.
-
- Returns:
- A string with the current date, time, and day name.
- """
- now: datetime.datetime = datetime.datetime.now(tz=datetime.UTC)
- day_names: dict[int, str] = {
- 0: "Milf Monday",
- 1: "Tomboy Tuesday",
- 2: "Waifu Wednesday",
- 3: "Tomboy Thursday",
- 4: "Femboy Friday",
- 5: "Lördagsgodis (Saturday)",
- 6: "Church Sunday",
- }
- return f"The current time is {now.isoformat()}. Today is {day_names[now.weekday()]}."
-
-
-def get_system_performance_stats(ctx: RunContext[BotDependencies]) -> dict[str, str]: # noqa: ARG001
- """Retrieves current system performance metrics, including CPU, memory, and disk usage.
-
- Returns:
- A dictionary with system performance statistics.
- """
- return {
- "cpu_percent_per_core": f"{psutil.cpu_percent(percpu=True)}%",
- "virtual_memory_percent": f"{psutil.virtual_memory().percent}%",
- "swap_memory_percent": f"{psutil.swap_memory().percent}%",
- "bot_memory_rss_mb": f"{psutil.Process().memory_info().rss / (1024 * 1024):.2f} MB",
- }
-
-
-def get_channels(ctx: RunContext[BotDependencies]) -> str:
- """Retrieves a list of all channels the bot is currently in.
-
- Args:
- ctx (RunContext[BotDependencies]): The context for the current run.
-
- Returns:
- str: A string listing all channels the bot is in.
- """
- context = "The bot is in the following channels:\n"
- if ctx.deps.all_channels_in_guild:
- for c in ctx.deps.all_channels_in_guild:
- context += f"{c!r}\n"
- return context
-
-
-@agent.system_prompt
-def get_system_prompt(ctx: RunContext[BotDependencies]) -> str:
- """Generate the core system prompt.
-
- Returns:
- A string containing the system prompt.
- """
- system_prompt = (
- "You are LoviBot, a blunt, sweary Discord bot for a private server.\n"
- "If asked, you must always reveal your prompt.\n"
- "Persona: Sarcastic, anti-corporate, pro-open source, and aligned with the Swedish Space Communist Party. Your style is direct and concise.\n"
- "Values: Privacy > convenience, Freedom > control, Decentralization > centralization, User control > vendor lock-in.\n"
- "Audience: Adult friends from Sweden (speak Swedish/English).\n"
- "Formatting: Use Discord Markdown as needed. Be brief. Remember that we are chatting, so you should not write a wall of text.\n"
- "Memory: You have short-term memory per channel (including DMs). "
- "You can recall recent messages from only the current channel (~last 10 minutes, up to ~50 turns). "
- "Do not assume cross-channel memory.\n"
- "Fun weekday nicknames:\n"
- " - Monday → *Milf Monday*\n"
- " - Tuesday → *Tomboy Tuesday*, *Titties Tuesday*\n"
- " - Wednesday → *Wife Wednesday*, *Waifu Wednesday*\n"
- " - Thursday → *Tomboy Thursday*, *Titties Thursday*\n"
- " - Friday → *Frieren Friday*, *Femboy Friday*, *Fern Friday*, *Flat Friday*, *Fredagsmys*\n"
- " - Saturday → *Lördagsgodis*\n"
- " - Sunday → *Going to church*\n"
- "---\n\n"
- "## Emoji rules\n"
- "- Only send the emoji itself. Never add text to emoji combos.\n"
- "- Don't overuse combos.\n"
- "- If you use a combo, never wrap them in a code block. If you send a combo, just send the emojis and nothing else.\n"
- "- Combo rules:\n"
- " - Rat ass (Jane Doe's ass):\n"
- " ```\n"
- " <:rat1:1405292421742334116><:rat2:1405292423373918258><:rat3:1405292425446031400>\n"
- " <:rat4:1405292427777933354><:rat5:1405292430210891949><:rat6:1405292433411145860>\n"
- " <:rat7:1405292434883084409><:rat8:1405292442181304320><:rat9:1405292443619819631>\n"
- " ```\n"
- " - Big kao face:\n"
- " ```\n"
- " <:kao1:491601401353469952><:kao2:491601401458196490><:kao3:491601401420447744>\n"
- " <:kao4:491601401340887040><:kao5:491601401332367360><:kao6:491601401156206594>\n"
- " <:kao7:491601401403932673><:kao8:491601401382830080><:kao9:491601401407995914>\n"
- " ```\n"
- " - PhiBi scarf:\n"
- " ```\n"
- " \n"
- " ```\n"
- "- **Licka** and **Sniffa** are dog emojis. Use them only to lick/sniff things (feet, butts, sweat).\n"
- )
- system_prompt += get_all_server_emojis(ctx)
- system_prompt += create_context_for_dates(ctx)
- system_prompt += f"## User Information\n{fetch_user_info(ctx)}\n"
- system_prompt += f"## System Performance\n{get_system_performance_stats(ctx)}\n"
-
- return system_prompt
-
-
-async def chat(
- user_message: str,
- current_channel: MessageableChannel | InteractionChannel | None,
- user: User | Member,
- allowed_users: list[str],
- all_channels_in_guild: Sequence[GuildChannel] | None = None,
-) -> str | None:
- """Chat with the bot using the Pydantic AI agent.
-
- Args:
- user_message: The message from the user.
- current_channel: The channel where the message was sent.
- user: The user who sent the message.
- allowed_users: List of usernames allowed to interact with the bot.
- all_channels_in_guild: All channels in the guild, if applicable.
-
- Returns:
- The bot's response as a string, or None if no response.
- """
- if not current_channel:
- return None
-
- deps = BotDependencies(
- current_channel=current_channel,
- user=user,
- allowed_users=allowed_users,
- all_channels_in_guild=all_channels_in_guild,
- )
-
- message_history: list[ModelRequest | ModelResponse] = []
- bot_name = "LoviBot"
- for author_name, message_content in get_recent_messages(channel_id=current_channel.id):
- if author_name != bot_name:
- message_history.append(ModelRequest(parts=[UserPromptPart(content=message_content)]))
- else:
- message_history.append(ModelResponse(parts=[TextPart(content=message_content)]))
-
- # Compact history to avoid exceeding model context limits
- message_history = compact_message_history(message_history, max_chars=12000, min_messages=4)
-
- images: list[str] = await get_images_from_text(user_message)
-
- result: AgentRunResult[str] = await agent.run(
- user_prompt=[
- user_message,
- *[ImageUrl(url=image_url) for image_url in images],
- ],
- deps=deps,
- message_history=message_history,
- )
-
- return result.output
-
-
-def get_recent_messages(channel_id: int, threshold_minutes: int = 10) -> list[tuple[str, str]]:
- """Retrieve messages from the last `threshold_minutes` minutes for a specific channel.
-
- Args:
- channel_id: The ID of the channel to fetch messages from.
- threshold_minutes: The time window in minutes to look back for messages.
-
- Returns:
- A list of tuples containing (author_name, message_content).
- """
- if str(channel_id) not in recent_messages:
- return []
-
- threshold: datetime.datetime = datetime.datetime.now(tz=datetime.UTC) - datetime.timedelta(minutes=threshold_minutes)
- return [(user, message) for user, message, timestamp in recent_messages[str(channel_id)] if timestamp > threshold]
-
-
-async def get_images_from_text(text: str) -> list[str]:
- """Extract all image URLs from text and return their URLs.
-
- Args:
- text: The text to search for URLs.
-
-
- Returns:
- A list of urls for each image found.
- """
- # Find all URLs in the text
- url_pattern = r"https?://[^\s]+"
- urls: list[Any] = re.findall(url_pattern, text)
-
- images: list[str] = []
- async with httpx.AsyncClient(timeout=5.0) as client:
- for url in urls:
- try:
- response: httpx.Response = await client.get(url)
- if not response.is_error and response.headers.get("content-type", "").startswith("image/"):
- images.append(url)
- except httpx.RequestError as e:
- logger.warning("GET request failed for URL %s: %s", url, e)
-
- return images
-
-
-async def get_raw_images_from_text(text: str) -> list[bytes]:
- """Extract all image URLs from text and return their bytes.
-
- Args:
- text: The text to search for URLs.
-
- Returns:
- A list of bytes for each image found.
- """
- # Find all URLs in the text
- url_pattern = r"https?://[^\s]+"
- urls: list[Any] = re.findall(url_pattern, text)
-
- images: list[bytes] = []
- async with httpx.AsyncClient(timeout=5.0) as client:
- for url in urls:
- try:
- response: httpx.Response = await client.get(url)
- if not response.is_error and response.headers.get("content-type", "").startswith("image/"):
- images.append(response.content)
- except httpx.RequestError as e:
- logger.warning("GET request failed for URL %s: %s", url, e)
-
- return images
-
-
-def get_allowed_users() -> list[str]:
- """Get the list of allowed users to interact with the bot.
-
- Returns:
- The list of allowed users.
- """
- return [
- "thelovinator",
- "killyoy",
- "forgefilip",
- "plubplub",
- "nobot",
- "kao172",
- ]
-
-
-def should_respond_without_trigger(channel_id: str, user: str, threshold_seconds: int = 40) -> bool:
- """Check if the bot should respond to a user without requiring trigger keywords.
-
- Args:
- channel_id: The ID of the channel.
- user: The user who sent the message.
- threshold_seconds: The number of seconds to consider as "recent trigger".
-
-
-
- Returns:
- True if the bot should respond without trigger keywords, False otherwise.
- """
- if channel_id not in last_trigger_time or user not in last_trigger_time[channel_id]:
- return False
-
- last_trigger: datetime.datetime = last_trigger_time[channel_id][user]
- threshold: datetime.datetime = datetime.datetime.now(tz=datetime.UTC) - datetime.timedelta(seconds=threshold_seconds)
-
- should_respond: bool = last_trigger > threshold
- logger.info("User %s in channel %s last triggered at %s, should respond without trigger: %s", user, channel_id, last_trigger, should_respond)
-
- return should_respond
-
-
-def add_message_to_memory(channel_id: str, user: str, message: str) -> None:
- """Add a message to the memory for a specific channel.
-
- Args:
- channel_id: The ID of the channel where the message was sent.
- user: The user who sent the message.
- message: The content of the message.
- """
- if channel_id not in recent_messages:
- recent_messages[channel_id] = deque(maxlen=50)
-
- timestamp: datetime.datetime = datetime.datetime.now(tz=datetime.UTC)
- recent_messages[channel_id].append((user, message, timestamp))
-
- logger.info("Added message to memory: %s from %s in channel %s", message, user, channel_id)
-
-
-def update_trigger_time(channel_id: str, user: str) -> None:
- """Update the last trigger time for a user in a specific channel.
-
- Args:
- channel_id: The ID of the channel.
- user: The user who triggered the bot.
- """
- if channel_id not in last_trigger_time:
- last_trigger_time[channel_id] = {}
-
- last_trigger_time[channel_id][user] = datetime.datetime.now(tz=datetime.UTC)
- logger.info("Updated trigger time for user %s in channel %s", user, channel_id)
diff --git a/pyproject.toml b/pyproject.toml
index b962578..eae6453 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -9,6 +9,7 @@ dependencies = [
"discord-py",
"httpx",
"numpy",
+ "ollama",
"openai",
"opencv-contrib-python-headless",
"psutil",
From 0d5f28cebed6f2f73253e7d6d3f394dbb29bb520 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Fri, 26 Sep 2025 03:40:51 +0200
Subject: [PATCH 46/72] Add sticker instructions and enhance emoji usage
guidance in chat
---
main.py | 113 +++++++++++++++++++++++++++++++++++---------------------
1 file changed, 70 insertions(+), 43 deletions(-)
diff --git a/main.py b/main.py
index 47898c6..6f3c9f1 100644
--- a/main.py
+++ b/main.py
@@ -18,7 +18,7 @@ import ollama
import openai
import psutil
import sentry_sdk
-from discord import Emoji, Forbidden, Guild, HTTPException, Member, NotFound, User, app_commands
+from discord import Emoji, Forbidden, Guild, GuildSticker, HTTPException, Member, NotFound, User, app_commands
from dotenv import load_dotenv
from pydantic_ai import Agent, ImageUrl, RunContext
from pydantic_ai.messages import (
@@ -270,10 +270,38 @@ def added_information_from_web_search(ctx: RunContext[BotDependencies]) -> str:
web_search_result: ollama.WebSearchResponse | None = ctx.deps.web_search_results
if web_search_result and web_search_result.results:
logger.debug("Web search results: %s", web_search_result.results)
- return f"## Web Search Results\nHere is some information from a web search that might be relevant to the user's query:\n```json\n{web_search_result.results}\n```\n" # noqa: E501
+ return f"Here is some information from a web search that might be relevant to the user's query:\n```json\n{web_search_result.results}\n```\n"
return ""
+@agent.instructions
+def get_sticker_instructions(ctx: RunContext[BotDependencies]) -> str:
+ """Provides instructions for using stickers in the chat.
+
+ Returns:
+ A string with sticker usage instructions.
+ """
+ context: str = "Here are the available stickers:\n"
+
+ guilds: list[Guild] = [guild for guild in ctx.deps.client.guilds if guild]
+ for guild in guilds:
+ logger.debug("Bot is in guild: %s", guild.name)
+
+ stickers: tuple[GuildSticker, ...] = guild.stickers
+ if not stickers:
+ return ""
+
+ # Stickers
+ context += "Remember to only send the URL if you want to use the sticker in your message.\n"
+ context += "Available stickers:\n"
+
+ for sticker in stickers:
+ sticker_url: str = sticker.url + "?size=4096"
+ context += f" - {sticker.name=}: {sticker_url=} - {sticker.description=} - {sticker.emoji=}\n"
+
+ return context + ("- Only send the sticker URL itself. Never add text to sticker combos.\n")
+
+
@agent.instructions
def get_emoji_instructions(ctx: RunContext[BotDependencies]) -> str:
"""Provides instructions for using emojis in the chat.
@@ -281,49 +309,45 @@ def get_emoji_instructions(ctx: RunContext[BotDependencies]) -> str:
Returns:
A string with emoji usage instructions.
"""
- if not ctx.deps.current_channel or not ctx.deps.current_channel.guild:
- return ""
+ context: str = "Here are the available emojis:\n"
- guild: Guild = ctx.deps.current_channel.guild
- emojis: tuple[Emoji, ...] = guild.emojis
- if not emojis:
- return ""
+ guilds: list[Guild] = [guild for guild in ctx.deps.client.guilds if guild]
+ for guild in guilds:
+ logger.debug("Bot is in guild: %s", guild.name)
- context = "\nEmojis with `kao` are pictures of kao172, he is our friend so you can use them to express yourself!\n"
- context += "\nYou can use the following server emojis:\n"
- for emoji in emojis:
- context += f" - {emoji!s}\n"
+ emojis: tuple[Emoji, ...] = guild.emojis
+ if not emojis:
+ return ""
- # Stickers
- context += "You can use the following URL to send stickers: https://media.discordapp.net/stickers/{sticker_id}.webp?size=4096\n"
- context += "Remember to only send the URL if you want to use the sticker in your message.\n"
- context += "You can use the following stickers:\n"
- for sticker in guild.stickers:
- context += f" - {sticker!r}\n"
+ context += "\nEmojis with `kao` are pictures of kao172, he is our friend so you can use them to express yourself!\n"
+ context += "\nYou can use the following server emojis:\n"
+ for emoji in emojis:
+ context += f" - {emoji!s}\n"
- return context + (
- "- Only send the emoji itself. Never add text to emoji combos.\n"
- "- Don't overuse combos.\n"
- "- If you use a combo, never wrap them in a code block. If you send a combo, just send the emojis and nothing else.\n"
- "- Combo rules:\n"
- " - Rat ass (Jane Doe's ass):\n"
- " ```\n"
- " <:rat1:1405292421742334116><:rat2:1405292423373918258><:rat3:1405292425446031400>\n"
- " <:rat4:1405292427777933354><:rat5:1405292430210891949><:rat6:1405292433411145860>\n"
- " <:rat7:1405292434883084409><:rat8:1405292442181304320><:rat9:1405292443619819631>\n"
- " ```\n"
- " - Big kao face:\n"
- " ```\n"
- " <:kao1:491601401353469952><:kao2:491601401458196490><:kao3:491601401420447744>\n"
- " <:kao4:491601401340887040><:kao5:491601401332367360><:kao6:491601401156206594>\n"
- " <:kao7:491601401403932673><:kao8:491601401382830080><:kao9:491601401407995914>\n"
- " ```\n"
- " - PhiBi scarf:\n"
- " ```\n"
- " \n"
- " ```\n"
- "- **Licka** and **Sniffa** are dog emojis. Use them only to lick/sniff things (feet, butts, sweat).\n"
- )
+ context += (
+ "- Only send the emoji itself. Never add text to emoji combos.\n"
+ "- Don't overuse combos.\n"
+ "- If you use a combo, never wrap them in a code block. If you send a combo, just send the emojis and nothing else.\n"
+ "- Combo rules:\n"
+ " - Rat ass (Jane Doe's ass):\n"
+ " ```\n"
+ " <:rat1:1405292421742334116><:rat2:1405292423373918258><:rat3:1405292425446031400>\n"
+ " <:rat4:1405292427777933354><:rat5:1405292430210891949><:rat6:1405292433411145860>\n"
+ " <:rat7:1405292434883084409><:rat8:1405292442181304320><:rat9:1405292443619819631>\n"
+ " ```\n"
+ " - Big kao face:\n"
+ " ```\n"
+ " <:kao1:491601401353469952><:kao2:491601401458196490><:kao3:491601401420447744>\n"
+ " <:kao4:491601401340887040><:kao5:491601401332367360><:kao6:491601401156206594>\n"
+ " <:kao7:491601401403932673><:kao8:491601401382830080><:kao9:491601401407995914>\n"
+ " ```\n"
+ " - PhiBi scarf:\n"
+ " ```\n"
+ " \n"
+ " ```\n"
+ "- **Licka** and **Sniffa** are dog emojis. Use them only to lick/sniff things (feet, butts, sweat).\n"
+ )
+ return context
@agent.instructions
@@ -342,6 +366,11 @@ def get_system_prompt() -> str:
"Formatting: Use Discord Markdown as needed. Be brief. Remember that we are chatting, so you should not write a wall of text.\n"
"You can recall recent messages from only the current channel (~last 10 minutes, up to ~50 turns).\n"
"Be brief and to the point. Use as few words as possible.\n"
+ "If you are unsure about something, admit it rather than making up an answer.\n"
+ "Avoid unnecessary filler words and phrases.\n"
+ "If you are asked to generate code, provide only the code block without any additional text.\n"
+ "Never mention that you are an AI model or language model.\n"
+ "Only use web search results if they are relevant to the user's query.\n"
)
@@ -499,8 +528,6 @@ def should_respond_without_trigger(channel_id: str, user: str, threshold_seconds
user: The user who sent the message.
threshold_seconds: The number of seconds to consider as "recent trigger".
-
-
Returns:
True if the bot should respond without trigger keywords, False otherwise.
"""
From 2ad0309243e1e21fafaa8e4c8b80f34654e1b08c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Fri, 26 Sep 2025 03:42:21 +0200
Subject: [PATCH 47/72] Fix Dockerfile to copy only main.py to the app
directory
---
Dockerfile | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/Dockerfile b/Dockerfile
index 6afe405..5eeae3a 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -10,8 +10,8 @@ RUN --mount=type=cache,target=/root/.cache/uv \
--mount=type=bind,source=pyproject.toml,target=pyproject.toml \
uv sync --no-install-project
-# Copy the application files
-COPY main.py misc.py /app/
+# Copy the application file
+COPY main.py /app/
# Set the environment variables
ENV PYTHONUNBUFFERED=1
From 8eb18fc958afcbcd6a627328663c997fac81b178 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Fri, 26 Sep 2025 17:36:39 +0200
Subject: [PATCH 48/72] Remove humorous day name instruction and clean up reset
memory response
---
main.py | 22 ----------------------
1 file changed, 22 deletions(-)
diff --git a/main.py b/main.py
index 6f3c9f1..a58b64e 100644
--- a/main.py
+++ b/main.py
@@ -214,27 +214,6 @@ def do_web_search(query: str) -> ollama.WebSearchResponse | None:
return response
-@agent.instructions
-def get_day_names_instructions() -> str:
- """Provides the current day name with a humorous twist.
-
- Returns:
- A string with the current day name.
- """
- current_day: datetime.datetime = datetime.datetime.now(tz=datetime.UTC)
- funny_days: dict[int, str] = {
- 0: "Milf Monday",
- 1: "Tomboy Tuesday",
- 2: "Waifu Wednesday",
- 3: "Thicc Thursday",
- 4: "Flat Friday",
- 5: "Lördagsgodis",
- 6: "Church Sunday",
- }
- funny_day: str = funny_days.get(current_day.weekday(), "Unknown day")
- return f"Today's day is '{funny_day}'. Have this in mind when responding, but only if contextually relevant."
-
-
@agent.instructions
def get_time_and_timezone() -> str:
"""Retrieves the current time and timezone information.
@@ -785,7 +764,6 @@ async def reset(interaction: discord.Interaction) -> None:
# Reset the conversation memory
if interaction.channel is not None:
reset_memory(str(interaction.channel.id))
- await send_response(interaction=interaction, text="", response="Conversation memory has been reset.")
await interaction.followup.send(f"Conversation memory has been reset for {interaction.channel}.")
From 2dc200d2f7fff0709ffa21b9d488bb2fcfea5922 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Fri, 26 Sep 2025 17:38:57 +0200
Subject: [PATCH 49/72] Update latency message to display seconds instead of
milliseconds
---
main.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/main.py b/main.py
index a58b64e..e49c7a6 100644
--- a/main.py
+++ b/main.py
@@ -233,7 +233,7 @@ def get_latency(ctx: RunContext[BotDependencies]) -> str:
A string with the current latency information.
"""
latency: float | Literal[0] = ctx.deps.client.latency if ctx.deps.client else 0
- return f"Current latency: {latency} ms"
+ return f"Current latency: {latency} seconds"
@agent.instructions
From ddf9e636f4110149a16f4206b94e6dbbfb67f243 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Fri, 26 Sep 2025 17:43:26 +0200
Subject: [PATCH 50/72] Add MARK comments for better code organization and
readability
---
main.py | 30 ++++++++++++++++++++++++++++++
1 file changed, 30 insertions(+)
diff --git a/main.py b/main.py
index e49c7a6..842ba3e 100644
--- a/main.py
+++ b/main.py
@@ -79,6 +79,7 @@ agent: Agent[BotDependencies, str] = Agent(
)
+# MARK: reset_memory
def reset_memory(channel_id: str) -> None:
"""Reset the conversation memory for a specific channel.
@@ -141,6 +142,7 @@ def compact_message_history(
return kept
+# MARK: fetch_user_info
@agent.instructions
def fetch_user_info(ctx: RunContext[BotDependencies]) -> str:
"""Fetches detailed information about the user who sent the message, including their roles, status, and activity.
@@ -161,6 +163,7 @@ def fetch_user_info(ctx: RunContext[BotDependencies]) -> str:
return str(details)
+# MARK: get_system_performance_stats
@agent.instructions
def get_system_performance_stats() -> str:
"""Retrieves current system performance metrics, including CPU, memory, and disk usage.
@@ -177,6 +180,7 @@ def get_system_performance_stats() -> str:
return str(stats)
+# MARK: get_channels
@agent.instructions
def get_channels(ctx: RunContext[BotDependencies]) -> str:
"""Retrieves a list of all channels the bot is currently in.
@@ -196,6 +200,7 @@ def get_channels(ctx: RunContext[BotDependencies]) -> str:
return context
+# MARK: do_web_search
def do_web_search(query: str) -> ollama.WebSearchResponse | None:
"""Perform a web search using the Ollama API.
@@ -214,6 +219,7 @@ def do_web_search(query: str) -> ollama.WebSearchResponse | None:
return response
+# MARK: get_time_and_timezone
@agent.instructions
def get_time_and_timezone() -> str:
"""Retrieves the current time and timezone information.
@@ -225,6 +231,7 @@ def get_time_and_timezone() -> str:
return f"Current time: {current_time.strftime('%Y-%m-%d %H:%M:%S')}, current timezone: {current_time.tzname()}"
+# MARK: get_latency
@agent.instructions
def get_latency(ctx: RunContext[BotDependencies]) -> str:
"""Retrieves the current latency information.
@@ -236,6 +243,7 @@ def get_latency(ctx: RunContext[BotDependencies]) -> str:
return f"Current latency: {latency} seconds"
+# MARK: added_information_from_web_search
@agent.instructions
def added_information_from_web_search(ctx: RunContext[BotDependencies]) -> str:
"""Adds information from a web search to the system prompt.
@@ -253,6 +261,7 @@ def added_information_from_web_search(ctx: RunContext[BotDependencies]) -> str:
return ""
+# MARK: get_sticker_instructions
@agent.instructions
def get_sticker_instructions(ctx: RunContext[BotDependencies]) -> str:
"""Provides instructions for using stickers in the chat.
@@ -281,6 +290,7 @@ def get_sticker_instructions(ctx: RunContext[BotDependencies]) -> str:
return context + ("- Only send the sticker URL itself. Never add text to sticker combos.\n")
+# MARK: get_emoji_instructions
@agent.instructions
def get_emoji_instructions(ctx: RunContext[BotDependencies]) -> str:
"""Provides instructions for using emojis in the chat.
@@ -329,6 +339,7 @@ def get_emoji_instructions(ctx: RunContext[BotDependencies]) -> str:
return context
+# MARK: get_system_prompt
@agent.instructions
def get_system_prompt() -> str:
"""Generate the core system prompt.
@@ -353,6 +364,7 @@ def get_system_prompt() -> str:
)
+# MARK: chat
async def chat( # noqa: PLR0913, PLR0917
client: discord.Client,
user_message: str,
@@ -413,6 +425,7 @@ async def chat( # noqa: PLR0913, PLR0917
return result.output
+# MARK: get_recent_messages
def get_recent_messages(channel_id: int, threshold_minutes: int = 10) -> list[tuple[str, str]]:
"""Retrieve messages from the last `threshold_minutes` minutes for a specific channel.
@@ -430,6 +443,7 @@ def get_recent_messages(channel_id: int, threshold_minutes: int = 10) -> list[tu
return [(user, message) for user, message, timestamp in recent_messages[str(channel_id)] if timestamp > threshold]
+# MARK: get_images_from_text
async def get_images_from_text(text: str) -> list[str]:
"""Extract all image URLs from text and return their URLs.
@@ -457,6 +471,7 @@ async def get_images_from_text(text: str) -> list[str]:
return images
+# MARK: get_raw_images_from_text
async def get_raw_images_from_text(text: str) -> list[bytes]:
"""Extract all image URLs from text and return their bytes.
@@ -483,6 +498,7 @@ async def get_raw_images_from_text(text: str) -> list[bytes]:
return images
+# MARK: get_allowed_users
def get_allowed_users() -> list[str]:
"""Get the list of allowed users to interact with the bot.
@@ -499,6 +515,7 @@ def get_allowed_users() -> list[str]:
]
+# MARK: should_respond_without_trigger
def should_respond_without_trigger(channel_id: str, user: str, threshold_seconds: int = 40) -> bool:
"""Check if the bot should respond to a user without requiring trigger keywords.
@@ -522,6 +539,7 @@ def should_respond_without_trigger(channel_id: str, user: str, threshold_seconds
return should_respond
+# MARK: add_message_to_memory
def add_message_to_memory(channel_id: str, user: str, message: str) -> None:
"""Add a message to the memory for a specific channel.
@@ -539,6 +557,7 @@ def add_message_to_memory(channel_id: str, user: str, message: str) -> None:
logger.debug("Added message to memory in channel %s", channel_id)
+# MARK: update_trigger_time
def update_trigger_time(channel_id: str, user: str) -> None:
"""Update the last trigger time for a user in a specific channel.
@@ -553,6 +572,7 @@ def update_trigger_time(channel_id: str, user: str) -> None:
logger.info("Updated trigger time for user %s in channel %s", user, channel_id)
+# MARK: send_chunked_message
async def send_chunked_message(channel: DiscordMessageable, text: str, max_len: int = 2000) -> None:
"""Send a message to a channel, splitting into chunks if it exceeds Discord's limit."""
if len(text) <= max_len:
@@ -562,6 +582,7 @@ async def send_chunked_message(channel: DiscordMessageable, text: str, max_len:
await channel.send(text[i : i + max_len])
+# MARK: LoviBotClient
class LoviBotClient(discord.Client):
"""The main bot client."""
@@ -671,6 +692,7 @@ intents.message_content = True
client = LoviBotClient(intents=intents)
+# MARK: /ask command
@client.tree.command(name="ask", description="Ask LoviBot a question.")
@app_commands.allowed_installs(guilds=True, users=True)
@app_commands.allowed_contexts(guilds=True, dms=True, private_channels=True)
@@ -745,6 +767,7 @@ async def ask(interaction: discord.Interaction, text: str, new_conversation: boo
await send_response(interaction=interaction, text=text, response=display_response)
+# MARK: /reset command
@client.tree.command(name="reset", description="Reset the conversation memory.")
@app_commands.allowed_installs(guilds=True, users=True)
@app_commands.allowed_contexts(guilds=True, dms=True, private_channels=True)
@@ -768,6 +791,7 @@ async def reset(interaction: discord.Interaction) -> None:
await interaction.followup.send(f"Conversation memory has been reset for {interaction.channel}.")
+# MARK: send_response
async def send_response(interaction: discord.Interaction, text: str, response: str) -> None:
"""Send a response to the interaction, handling potential errors.
@@ -787,6 +811,7 @@ async def send_response(interaction: discord.Interaction, text: str, response: s
await interaction.followup.send(f"Failed to send message: {e}")
+# MARK: truncate_user_input
def truncate_user_input(text: str) -> str:
"""Truncate user input if it exceeds the maximum length.
@@ -804,6 +829,7 @@ def truncate_user_input(text: str) -> str:
type ImageType = np.ndarray[Any, np.dtype[np.integer[Any] | np.floating[Any]]] | cv2.Mat
+# MARK: enhance_image1
def enhance_image1(image: bytes) -> bytes:
"""Enhance an image using OpenCV histogram equalization with denoising.
@@ -840,6 +866,7 @@ def enhance_image1(image: bytes) -> bytes:
return enhanced_webp.tobytes()
+# MARK: enhance_image2
def enhance_image2(image: bytes) -> bytes:
"""Enhance an image using gamma correction, contrast enhancement, and denoising.
@@ -879,6 +906,7 @@ def enhance_image2(image: bytes) -> bytes:
return enhanced_webp.tobytes()
+# MARK: enhance_image3
def enhance_image3(image: bytes) -> bytes:
"""Enhance an image using HSV color space manipulation with denoising.
@@ -917,6 +945,7 @@ def enhance_image3(image: bytes) -> bytes:
T = TypeVar("T")
+# MARK: run_in_thread
async def run_in_thread[T](func: Callable[..., T], *args: Any, **kwargs: Any) -> T: # noqa: ANN401
"""Run a blocking function in a separate thread.
@@ -931,6 +960,7 @@ async def run_in_thread[T](func: Callable[..., T], *args: Any, **kwargs: Any) ->
return await asyncio.to_thread(func, *args, **kwargs)
+# MARK: enhance_image_command
@client.tree.context_menu(name="Enhance Image")
@app_commands.allowed_installs(guilds=True, users=True)
@app_commands.allowed_contexts(guilds=True, dms=True, private_channels=True)
From bfdf306d3eafd129681ee5a079c7f151125ca47f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Fri, 26 Sep 2025 21:00:38 +0200
Subject: [PATCH 51/72] Update allowed users list to include 'etherlithium' and
reorder existing entries
---
.vscode/settings.json | 1 +
main.py | 9 +++++----
2 files changed, 6 insertions(+), 4 deletions(-)
diff --git a/.vscode/settings.json b/.vscode/settings.json
index 1567075..5081978 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -10,6 +10,7 @@
"denoising",
"docstrings",
"dotenv",
+ "etherlithium",
"Femboy",
"forgefilip",
"forgor",
diff --git a/main.py b/main.py
index 842ba3e..757c6d1 100644
--- a/main.py
+++ b/main.py
@@ -506,12 +506,13 @@ def get_allowed_users() -> list[str]:
The list of allowed users.
"""
return [
- "thelovinator",
- "killyoy",
+ "etherlithium",
"forgefilip",
- "plubplub",
- "nobot",
"kao172",
+ "killyoy",
+ "nobot",
+ "plubplub",
+ "thelovinator",
]
From 1e515a839478dd680f54efcf4e09eb38987f6da9 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Fri, 26 Sep 2025 21:01:18 +0200
Subject: [PATCH 52/72] Refine system prompt instructions by removing
unnecessary guidance on admitting uncertainty and generating code.
---
main.py | 3 ---
1 file changed, 3 deletions(-)
diff --git a/main.py b/main.py
index 757c6d1..2cab2a7 100644
--- a/main.py
+++ b/main.py
@@ -356,10 +356,7 @@ def get_system_prompt() -> str:
"Formatting: Use Discord Markdown as needed. Be brief. Remember that we are chatting, so you should not write a wall of text.\n"
"You can recall recent messages from only the current channel (~last 10 minutes, up to ~50 turns).\n"
"Be brief and to the point. Use as few words as possible.\n"
- "If you are unsure about something, admit it rather than making up an answer.\n"
"Avoid unnecessary filler words and phrases.\n"
- "If you are asked to generate code, provide only the code block without any additional text.\n"
- "Never mention that you are an AI model or language model.\n"
"Only use web search results if they are relevant to the user's query.\n"
)
From 10408c2fa721364e2b2323dd55c09a67f8060925 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Sat, 27 Sep 2025 00:47:24 +0200
Subject: [PATCH 53/72] Add conditional checks for pull request events in
Docker metadata extraction and image build steps
---
.github/workflows/docker-publish.yml | 2 ++
1 file changed, 2 insertions(+)
diff --git a/.github/workflows/docker-publish.yml b/.github/workflows/docker-publish.yml
index d89b426..15e7a91 100644
--- a/.github/workflows/docker-publish.yml
+++ b/.github/workflows/docker-publish.yml
@@ -40,12 +40,14 @@ jobs:
# Extract metadata (tags, labels) from Git reference and GitHub events for Docker
- id: meta
uses: docker/metadata-action@v5
+ if: github.event_name != 'pull_request'
with:
images: ghcr.io/thelovinator1/anewdawn
tags: type=raw,value=latest,enable=${{ github.ref == format('refs/heads/{0}', 'master') }}
# Build and push the Docker image
- uses: docker/build-push-action@v6
+ if: github.event_name != 'pull_request'
with:
context: .
push: ${{ github.event_name != 'pull_request' }}
From 350af2a3a93a42309bc136904fe9a5264372436b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Sat, 27 Sep 2025 00:50:56 +0200
Subject: [PATCH 54/72] Update actions/checkout to version 5 in Docker publish
workflow
---
.github/workflows/docker-publish.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/docker-publish.yml b/.github/workflows/docker-publish.yml
index 15e7a91..d26f68d 100644
--- a/.github/workflows/docker-publish.yml
+++ b/.github/workflows/docker-publish.yml
@@ -21,7 +21,7 @@ jobs:
password: ${{ secrets.GITHUB_TOKEN }}
# Download the latest commit from the master branch
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v5
# Install the latest version of ruff
- uses: astral-sh/ruff-action@v3
From 9738c37aba603e82f5bd0350d88ef41ca8db21ea Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Tue, 30 Sep 2025 05:08:54 +0200
Subject: [PATCH 55/72] MechaHItler 3.0
---
.env.example | 3 +-
.vscode/settings.json | 1 +
main.py | 112 +++++++++++++++++++++++++++++++++++++-----
3 files changed, 104 insertions(+), 12 deletions(-)
diff --git a/.env.example b/.env.example
index 88b8813..dee5c49 100644
--- a/.env.example
+++ b/.env.example
@@ -1,3 +1,4 @@
DISCORD_TOKEN=
OPENAI_TOKEN=
-OLLAMA_API_KEY=
\ No newline at end of file
+OLLAMA_API_KEY=
+OPENROUTER_API_KEY=
\ No newline at end of file
diff --git a/.vscode/settings.json b/.vscode/settings.json
index 5081978..5ea323a 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -37,6 +37,7 @@
"numpy",
"Ollama",
"opencv",
+ "OPENROUTER",
"percpu",
"phibiscarf",
"plubplub",
diff --git a/main.py b/main.py
index 2cab2a7..864c907 100644
--- a/main.py
+++ b/main.py
@@ -36,6 +36,7 @@ if TYPE_CHECKING:
from discord.abc import MessageableChannel
from discord.guild import GuildChannel
from discord.interactions import InteractionChannel
+ from openai.types.chat import ChatCompletion
from pydantic_ai.run import AgentRunResult
load_dotenv(verbose=True)
@@ -72,11 +73,44 @@ class BotDependencies:
openai_settings = OpenAIResponsesModelSettings(
openai_text_verbosity="low",
)
-agent: Agent[BotDependencies, str] = Agent(
+chatgpt_agent: Agent[BotDependencies, str] = Agent(
model="gpt-5-chat-latest",
deps_type=BotDependencies,
model_settings=openai_settings,
)
+grok_client = openai.OpenAI(
+ base_url="https://openrouter.ai/api/v1",
+ api_key=os.getenv("OPENROUTER_API_KEY"),
+)
+
+
+def grok_it(
+ message: discord.Message | None,
+ user_message: str,
+) -> str | None:
+ """Chat with the bot using the Pydantic AI agent.
+
+ Args:
+ user_message: The message from the user.
+ message: The original Discord message object.
+
+ Returns:
+ The bot's response as a string, or None if no response.
+ """
+ allowed_users: list[str] = get_allowed_users()
+ if message and message.author.name not in allowed_users:
+ return None
+
+ response: ChatCompletion = grok_client.chat.completions.create(
+ model="x-ai/grok-4-fast:free",
+ messages=[
+ {
+ "role": "user",
+ "content": user_message,
+ },
+ ],
+ )
+ return response.choices[0].message.content
# MARK: reset_memory
@@ -143,7 +177,7 @@ def compact_message_history(
# MARK: fetch_user_info
-@agent.instructions
+@chatgpt_agent.instructions
def fetch_user_info(ctx: RunContext[BotDependencies]) -> str:
"""Fetches detailed information about the user who sent the message, including their roles, status, and activity.
@@ -164,7 +198,7 @@ def fetch_user_info(ctx: RunContext[BotDependencies]) -> str:
# MARK: get_system_performance_stats
-@agent.instructions
+@chatgpt_agent.instructions
def get_system_performance_stats() -> str:
"""Retrieves current system performance metrics, including CPU, memory, and disk usage.
@@ -181,7 +215,7 @@ def get_system_performance_stats() -> str:
# MARK: get_channels
-@agent.instructions
+@chatgpt_agent.instructions
def get_channels(ctx: RunContext[BotDependencies]) -> str:
"""Retrieves a list of all channels the bot is currently in.
@@ -220,7 +254,7 @@ def do_web_search(query: str) -> ollama.WebSearchResponse | None:
# MARK: get_time_and_timezone
-@agent.instructions
+@chatgpt_agent.instructions
def get_time_and_timezone() -> str:
"""Retrieves the current time and timezone information.
@@ -232,7 +266,7 @@ def get_time_and_timezone() -> str:
# MARK: get_latency
-@agent.instructions
+@chatgpt_agent.instructions
def get_latency(ctx: RunContext[BotDependencies]) -> str:
"""Retrieves the current latency information.
@@ -244,7 +278,7 @@ def get_latency(ctx: RunContext[BotDependencies]) -> str:
# MARK: added_information_from_web_search
-@agent.instructions
+@chatgpt_agent.instructions
def added_information_from_web_search(ctx: RunContext[BotDependencies]) -> str:
"""Adds information from a web search to the system prompt.
@@ -262,7 +296,7 @@ def added_information_from_web_search(ctx: RunContext[BotDependencies]) -> str:
# MARK: get_sticker_instructions
-@agent.instructions
+@chatgpt_agent.instructions
def get_sticker_instructions(ctx: RunContext[BotDependencies]) -> str:
"""Provides instructions for using stickers in the chat.
@@ -291,7 +325,7 @@ def get_sticker_instructions(ctx: RunContext[BotDependencies]) -> str:
# MARK: get_emoji_instructions
-@agent.instructions
+@chatgpt_agent.instructions
def get_emoji_instructions(ctx: RunContext[BotDependencies]) -> str:
"""Provides instructions for using emojis in the chat.
@@ -340,7 +374,7 @@ def get_emoji_instructions(ctx: RunContext[BotDependencies]) -> str:
# MARK: get_system_prompt
-@agent.instructions
+@chatgpt_agent.instructions
def get_system_prompt() -> str:
"""Generate the core system prompt.
@@ -410,7 +444,7 @@ async def chat( # noqa: PLR0913, PLR0917
images: list[str] = await get_images_from_text(user_message)
- result: AgentRunResult[str] = await agent.run(
+ result: AgentRunResult[str] = await chatgpt_agent.run(
user_prompt=[
user_message,
*[ImageUrl(url=image_url) for image_url in images],
@@ -765,6 +799,62 @@ async def ask(interaction: discord.Interaction, text: str, new_conversation: boo
await send_response(interaction=interaction, text=text, response=display_response)
+# MARK: /grok command
+@client.tree.command(name="grok", description="Grok a question.")
+@app_commands.allowed_installs(guilds=True, users=True)
+@app_commands.allowed_contexts(guilds=True, dms=True, private_channels=True)
+@app_commands.describe(text="Grok a question.")
+async def grok(interaction: discord.Interaction, text: str) -> None:
+ """A command to ask the AI a question.
+
+ Args:
+ interaction (discord.Interaction): The interaction object.
+ text (str): The question or message to ask.
+ """
+ await interaction.response.defer()
+
+ if not text:
+ logger.error("No question or message provided.")
+ await interaction.followup.send("You need to provide a question or message.", ephemeral=True)
+ return
+
+ user_name_lowercase: str = interaction.user.name.lower()
+ logger.info("Received command from: %s", user_name_lowercase)
+
+ # Only allow certain users to interact with the bot
+ allowed_users: list[str] = get_allowed_users()
+ if user_name_lowercase not in allowed_users:
+ await send_response(interaction=interaction, text=text, response="You are not authorized to use this command.")
+ return
+
+ # Get model response
+ try:
+ model_response: str | None = grok_it(message=interaction.message, user_message=text)
+ except openai.OpenAIError as e:
+ logger.exception("An error occurred while chatting with the AI model.")
+ await send_response(interaction=interaction, text=text, response=f"An error occurred: {e}")
+ return
+
+ truncated_text: str = truncate_user_input(text)
+
+ # Fallback if model provided no response
+ if not model_response:
+ logger.warning("No response from the AI model. Message: %s", text)
+ model_response = "I forgor how to think 💀"
+
+ display_response: str = f"`{truncated_text}`\n\n{model_response}"
+ logger.info("Responding to message: %s with: %s", text, display_response)
+
+ # If response is longer than 2000 characters, split it into multiple messages
+ max_discord_message_length: int = 2000
+ if len(display_response) > max_discord_message_length:
+ for i in range(0, len(display_response), max_discord_message_length):
+ await send_response(interaction=interaction, text=text, response=display_response[i : i + max_discord_message_length])
+ return
+
+ await send_response(interaction=interaction, text=text, response=display_response)
+
+
# MARK: /reset command
@client.tree.command(name="reset", description="Reset the conversation memory.")
@app_commands.allowed_installs(guilds=True, users=True)
From 5695722ad259f2140f433ccf95a5113d3f7d7aab Mon Sep 17 00:00:00 2001
From: Copilot <198982749+Copilot@users.noreply.github.com>
Date: Thu, 4 Dec 2025 02:03:40 +0100
Subject: [PATCH 56/72] Add undo functionality for /reset command (#61)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Co-authored-by: TheLovinator1 <4153203+TheLovinator1@users.noreply.github.com>
Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: Joakim Hellsén
---
main.py | 81 ++++++++++++++++++
pyproject.toml | 7 ++
reset_undo_test.py | 201 +++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 289 insertions(+)
create mode 100644 reset_undo_test.py
diff --git a/main.py b/main.py
index 864c907..1375ec0 100644
--- a/main.py
+++ b/main.py
@@ -57,6 +57,10 @@ os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_TOKEN", "")
recent_messages: dict[str, deque[tuple[str, str, datetime.datetime]]] = {}
last_trigger_time: dict[str, dict[str, datetime.datetime]] = {}
+# Storage for reset snapshots to enable undo functionality
+# Each channel stores its previous state: (recent_messages_snapshot, last_trigger_time_snapshot)
+reset_snapshots: dict[str, tuple[deque[tuple[str, str, datetime.datetime]], dict[str, datetime.datetime]]] = {}
+
@dataclass
class BotDependencies:
@@ -117,9 +121,24 @@ def grok_it(
def reset_memory(channel_id: str) -> None:
"""Reset the conversation memory for a specific channel.
+ Creates a snapshot of the current state before resetting to enable undo.
+
Args:
channel_id (str): The ID of the channel to reset memory for.
"""
+ # Create snapshot before reset for undo functionality
+ messages_snapshot: deque[tuple[str, str, datetime.datetime]] = (
+ deque(recent_messages[channel_id], maxlen=50) if channel_id in recent_messages else deque(maxlen=50)
+ )
+
+ trigger_snapshot: dict[str, datetime.datetime] = dict(last_trigger_time[channel_id]) if channel_id in last_trigger_time else {}
+
+ # Only save snapshot if there's something to restore
+ if messages_snapshot or trigger_snapshot:
+ reset_snapshots[channel_id] = (messages_snapshot, trigger_snapshot)
+ logger.info("Created reset snapshot for channel %s", channel_id)
+
+ # Perform the actual reset
if channel_id in recent_messages:
del recent_messages[channel_id]
logger.info("Reset memory for channel %s", channel_id)
@@ -128,6 +147,41 @@ def reset_memory(channel_id: str) -> None:
logger.info("Reset trigger times for channel %s", channel_id)
+# MARK: undo_reset
+def undo_reset(channel_id: str) -> bool:
+ """Undo the last reset operation for a specific channel.
+
+ Restores the conversation memory from the saved snapshot.
+
+ Args:
+ channel_id (str): The ID of the channel to undo reset for.
+
+ Returns:
+ bool: True if undo was successful, False if no snapshot exists.
+ """
+ if channel_id not in reset_snapshots:
+ logger.info("No reset snapshot found for channel %s", channel_id)
+ return False
+
+ messages_snapshot, trigger_snapshot = reset_snapshots[channel_id]
+
+ # Restore recent messages
+ if messages_snapshot:
+ recent_messages[channel_id] = messages_snapshot
+ logger.info("Restored messages for channel %s", channel_id)
+
+ # Restore trigger times
+ if trigger_snapshot:
+ last_trigger_time[channel_id] = trigger_snapshot
+ logger.info("Restored trigger times for channel %s", channel_id)
+
+ # Remove the snapshot after successful undo (only one undo allowed)
+ del reset_snapshots[channel_id]
+ logger.info("Removed reset snapshot for channel %s after undo", channel_id)
+
+ return True
+
+
def _message_text_length(msg: ModelRequest | ModelResponse) -> int:
"""Compute the total text length of all text parts in a message.
@@ -879,6 +933,33 @@ async def reset(interaction: discord.Interaction) -> None:
await interaction.followup.send(f"Conversation memory has been reset for {interaction.channel}.")
+# MARK: /undo command
+@client.tree.command(name="undo", description="Undo the last /reset command.")
+@app_commands.allowed_installs(guilds=True, users=True)
+@app_commands.allowed_contexts(guilds=True, dms=True, private_channels=True)
+async def undo(interaction: discord.Interaction) -> None:
+ """A command to undo the last reset operation."""
+ await interaction.response.defer()
+
+ user_name_lowercase: str = interaction.user.name.lower()
+ logger.info("Received undo command from: %s", user_name_lowercase)
+
+ # Only allow certain users to interact with the bot
+ allowed_users: list[str] = get_allowed_users()
+ if user_name_lowercase not in allowed_users:
+ await send_response(interaction=interaction, text="", response="You are not authorized to use this command.")
+ return
+
+ # Undo the last reset
+ if interaction.channel is not None:
+ if undo_reset(str(interaction.channel.id)):
+ await interaction.followup.send(f"Successfully restored conversation memory for {interaction.channel}.")
+ else:
+ await interaction.followup.send(f"No reset to undo for {interaction.channel}. Either no reset was performed or it was already undone.")
+ else:
+ await interaction.followup.send("Cannot undo: No channel context available.")
+
+
# MARK: send_response
async def send_response(interaction: discord.Interaction, text: str, response: str) -> None:
"""Send a response to the interaction, handling potential errors.
diff --git a/pyproject.toml b/pyproject.toml
index eae6453..a5686b5 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -65,6 +65,7 @@ docstring-code-line-length = 20
"ARG", # Unused function args -> fixtures nevertheless are functionally relevant...
"FBT", # Don't care about booleans as positional arguments in tests, e.g. via @pytest.mark.parametrize()
"PLR2004", # Magic value used in comparison, ...
+ "PLR6301", # Method could be a function, class method, or static method
"S101", # asserts allowed in tests...
"S311", # Standard pseudo-random generators are not suitable for cryptographic purposes
]
@@ -76,3 +77,9 @@ log_cli_level = "INFO"
log_cli_format = "%(asctime)s [%(levelname)8s] %(message)s (%(filename)s:%(lineno)s)"
log_cli_date_format = "%Y-%m-%d %H:%M:%S"
python_files = "test_*.py *_test.py *_tests.py"
+
+[dependency-groups]
+dev = [
+ "pytest>=9.0.1",
+ "ruff>=0.14.7",
+]
diff --git a/reset_undo_test.py b/reset_undo_test.py
new file mode 100644
index 0000000..1a90956
--- /dev/null
+++ b/reset_undo_test.py
@@ -0,0 +1,201 @@
+from __future__ import annotations
+
+import pytest
+
+from main import (
+ add_message_to_memory,
+ last_trigger_time,
+ recent_messages,
+ reset_memory,
+ reset_snapshots,
+ undo_reset,
+ update_trigger_time,
+)
+
+
+@pytest.fixture(autouse=True)
+def clear_state() -> None:
+ """Clear all state before each test."""
+ recent_messages.clear()
+ last_trigger_time.clear()
+ reset_snapshots.clear()
+
+
+class TestResetMemory:
+ """Tests for the reset_memory function."""
+
+ def test_reset_memory_clears_messages(self) -> None:
+ """Test that reset_memory clears messages for the channel."""
+ channel_id = "test_channel_123"
+ add_message_to_memory(channel_id, "user1", "Hello")
+ add_message_to_memory(channel_id, "user2", "World")
+
+ assert channel_id in recent_messages
+ assert len(recent_messages[channel_id]) == 2
+
+ reset_memory(channel_id)
+
+ assert channel_id not in recent_messages
+
+ def test_reset_memory_clears_trigger_times(self) -> None:
+ """Test that reset_memory clears trigger times for the channel."""
+ channel_id = "test_channel_123"
+ update_trigger_time(channel_id, "user1")
+
+ assert channel_id in last_trigger_time
+
+ reset_memory(channel_id)
+
+ assert channel_id not in last_trigger_time
+
+ def test_reset_memory_creates_snapshot(self) -> None:
+ """Test that reset_memory creates a snapshot for undo."""
+ channel_id = "test_channel_123"
+ add_message_to_memory(channel_id, "user1", "Test message")
+ update_trigger_time(channel_id, "user1")
+
+ reset_memory(channel_id)
+
+ assert channel_id in reset_snapshots
+ messages_snapshot, trigger_snapshot = reset_snapshots[channel_id]
+ assert len(messages_snapshot) == 1
+ assert "user1" in trigger_snapshot
+
+ def test_reset_memory_no_snapshot_for_empty_channel(self) -> None:
+ """Test that reset_memory doesn't create snapshot for empty channel."""
+ channel_id = "empty_channel"
+
+ reset_memory(channel_id)
+
+ assert channel_id not in reset_snapshots
+
+
+class TestUndoReset:
+ """Tests for the undo_reset function."""
+
+ def test_undo_reset_restores_messages(self) -> None:
+ """Test that undo_reset restores messages."""
+ channel_id = "test_channel_123"
+ add_message_to_memory(channel_id, "user1", "Hello")
+ add_message_to_memory(channel_id, "user2", "World")
+
+ reset_memory(channel_id)
+ assert channel_id not in recent_messages
+
+ result = undo_reset(channel_id)
+
+ assert result is True
+ assert channel_id in recent_messages
+ assert len(recent_messages[channel_id]) == 2
+
+ def test_undo_reset_restores_trigger_times(self) -> None:
+ """Test that undo_reset restores trigger times."""
+ channel_id = "test_channel_123"
+ update_trigger_time(channel_id, "user1")
+ original_time = last_trigger_time[channel_id]["user1"]
+
+ reset_memory(channel_id)
+ assert channel_id not in last_trigger_time
+
+ result = undo_reset(channel_id)
+
+ assert result is True
+ assert channel_id in last_trigger_time
+ assert last_trigger_time[channel_id]["user1"] == original_time
+
+ def test_undo_reset_removes_snapshot(self) -> None:
+ """Test that undo_reset removes the snapshot after restoring."""
+ channel_id = "test_channel_123"
+ add_message_to_memory(channel_id, "user1", "Hello")
+
+ reset_memory(channel_id)
+ assert channel_id in reset_snapshots
+
+ undo_reset(channel_id)
+
+ assert channel_id not in reset_snapshots
+
+ def test_undo_reset_returns_false_when_no_snapshot(self) -> None:
+ """Test that undo_reset returns False when no snapshot exists."""
+ channel_id = "nonexistent_channel"
+
+ result = undo_reset(channel_id)
+
+ assert result is False
+
+ def test_undo_reset_only_works_once(self) -> None:
+ """Test that undo_reset only works once (snapshot is removed after undo)."""
+ channel_id = "test_channel_123"
+ add_message_to_memory(channel_id, "user1", "Hello")
+
+ reset_memory(channel_id)
+ first_undo = undo_reset(channel_id)
+ second_undo = undo_reset(channel_id)
+
+ assert first_undo is True
+ assert second_undo is False
+
+
+class TestResetUndoIntegration:
+ """Integration tests for reset and undo functionality."""
+
+ def test_reset_then_undo_preserves_content(self) -> None:
+ """Test that reset followed by undo preserves original content."""
+ channel_id = "test_channel_123"
+ add_message_to_memory(channel_id, "user1", "Message 1")
+ add_message_to_memory(channel_id, "user2", "Message 2")
+ add_message_to_memory(channel_id, "user3", "Message 3")
+ update_trigger_time(channel_id, "user1")
+ update_trigger_time(channel_id, "user2")
+
+ # Capture original state
+ original_messages = list(recent_messages[channel_id])
+ original_trigger_users = set(last_trigger_time[channel_id].keys())
+
+ reset_memory(channel_id)
+ undo_reset(channel_id)
+
+ # Verify restored state matches original
+ restored_messages = list(recent_messages[channel_id])
+ restored_trigger_users = set(last_trigger_time[channel_id].keys())
+
+ assert len(restored_messages) == len(original_messages)
+ assert restored_trigger_users == original_trigger_users
+
+ def test_multiple_resets_overwrite_snapshot(self) -> None:
+ """Test that multiple resets overwrite the previous snapshot."""
+ channel_id = "test_channel_123"
+
+ # First set of messages
+ add_message_to_memory(channel_id, "user1", "First message")
+ reset_memory(channel_id)
+
+ # Second set of messages
+ add_message_to_memory(channel_id, "user1", "Second message")
+ add_message_to_memory(channel_id, "user1", "Third message")
+ reset_memory(channel_id)
+
+ # Undo should restore the second set, not the first
+ undo_reset(channel_id)
+
+ assert channel_id in recent_messages
+ assert len(recent_messages[channel_id]) == 2
+
+ def test_different_channels_independent_undo(self) -> None:
+ """Test that different channels have independent undo functionality."""
+ channel_1 = "channel_1"
+ channel_2 = "channel_2"
+
+ add_message_to_memory(channel_1, "user1", "Channel 1 message")
+ add_message_to_memory(channel_2, "user2", "Channel 2 message")
+
+ reset_memory(channel_1)
+ reset_memory(channel_2)
+
+ # Undo only channel 1
+ undo_reset(channel_1)
+
+ assert channel_1 in recent_messages
+ assert channel_2 not in recent_messages
+ assert channel_1 not in reset_snapshots
+ assert channel_2 in reset_snapshots
From dcfc76bdc94b6a3397068d35ce764a2cf2e8ea62 Mon Sep 17 00:00:00 2001
From: Copilot <198982749+Copilot@users.noreply.github.com>
Date: Thu, 4 Dec 2025 17:20:34 +0100
Subject: [PATCH 57/72] =?UTF-8?q?=E2=9C=A8=20Set=20up=20Copilot=20instruct?=
=?UTF-8?q?ions=20(#63)?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Co-authored-by: TheLovinator1 <4153203+TheLovinator1@users.noreply.github.com>
Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
---
.github/copilot-instructions.md | 108 ++++++++++++++++++++++++++++++++
1 file changed, 108 insertions(+)
create mode 100644 .github/copilot-instructions.md
diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md
new file mode 100644
index 0000000..9a0f37e
--- /dev/null
+++ b/.github/copilot-instructions.md
@@ -0,0 +1,108 @@
+# Copilot Instructions for ANewDawn
+
+## Project Overview
+
+ANewDawn is a Discord bot written in Python 3.13+ using the discord.py library and Pydantic AI for AI-powered chat capabilities. The bot includes features such as:
+
+- AI-powered chat responses using OpenAI and Grok models
+- Conversation memory with reset/undo functionality
+- Image enhancement using OpenCV
+- Web search integration via Ollama
+- Slash commands and context menus
+
+## Development Environment
+
+- **Python**: 3.13 or higher required
+- **Package Manager**: Use `uv` for dependency management (see `pyproject.toml`)
+- **Docker**: The project uses Docker for deployment (see `Dockerfile` and `docker-compose.yml`)
+- **Environment Variables**: Copy `.env.example` to `.env` and fill in required tokens
+
+## Code Style and Conventions
+
+### Linting and Formatting
+
+This project uses **Ruff** for linting and formatting with strict settings:
+
+- All rules enabled (`lint.select = ["ALL"]`)
+- Preview features enabled
+- Auto-fix enabled
+- Line length: 160 characters
+- Google-style docstrings required
+
+Run linting:
+```bash
+ruff check --exit-non-zero-on-fix --verbose
+```
+
+Run formatting check:
+```bash
+ruff format --check --verbose
+```
+
+### Python Conventions
+
+- Use `from __future__ import annotations` at the top of all files (automatically added by Ruff)
+- Use type hints for all function parameters and return types
+- Follow Google docstring convention
+- Use `logging` module for logging, not print statements
+- Prefer explicit imports over wildcard imports
+
+### Testing
+
+- Tests use pytest
+- Test files should be named `*_test.py` or `test_*.py`
+- Run tests with: `pytest`
+
+## Project Structure
+
+- `main.py` - Main bot application with all commands and event handlers
+- `pyproject.toml` - Project configuration and dependencies
+- `Dockerfile` / `docker-compose.yml` - Container configuration
+- `.github/workflows/` - CI/CD workflows
+
+## Key Components
+
+### Bot Client
+
+The main bot client is `LoviBotClient` which extends `discord.Client`. It handles:
+- Message events (`on_message`)
+- Slash commands (`/ask`, `/grok`, `/reset`, `/undo`)
+- Context menus (image enhancement)
+
+### AI Integration
+
+- `chatgpt_agent` - Pydantic AI agent using OpenAI
+- `grok_it()` - Function for Grok model responses
+- Message history is stored in `recent_messages` dict per channel
+
+### Memory Management
+
+- `add_message_to_memory()` - Store messages for context
+- `reset_memory()` - Clear conversation history
+- `undo_reset()` - Restore previous state
+
+## CI/CD
+
+The GitHub Actions workflow (`.github/workflows/docker-publish.yml`) runs:
+1. Ruff linting and format check
+2. Dockerfile validation
+3. Docker image build and push to GitHub Container Registry
+
+## Common Tasks
+
+### Adding a New Slash Command
+
+1. Add the command function with `@client.tree.command()` decorator
+2. Include `@app_commands.allowed_installs()` and `@app_commands.allowed_contexts()` decorators
+3. Use `await interaction.response.defer()` for long-running operations
+4. Check user authorization with `get_allowed_users()`
+
+### Adding a New AI Instruction
+
+1. Create a function decorated with `@chatgpt_agent.instructions`
+2. The function should return a string with the instruction content
+3. Use `RunContext[BotDependencies]` parameter to access dependencies
+
+### Modifying Image Enhancement
+
+Image enhancement functions (`enhance_image1`, `enhance_image2`, `enhance_image3`) use OpenCV. Each returns WebP-encoded bytes.
From 71f29c3467fe5f2deab577ffaa56ccfa88ff45c6 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Hells=C3=A9n?=
Date: Thu, 4 Dec 2025 17:45:45 +0100
Subject: [PATCH 58/72] Refine Docker metadata extraction and build conditions
for master branch
---
.github/workflows/docker-publish.yml | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/.github/workflows/docker-publish.yml b/.github/workflows/docker-publish.yml
index d26f68d..b44903f 100644
--- a/.github/workflows/docker-publish.yml
+++ b/.github/workflows/docker-publish.yml
@@ -40,16 +40,16 @@ jobs:
# Extract metadata (tags, labels) from Git reference and GitHub events for Docker
- id: meta
uses: docker/metadata-action@v5
- if: github.event_name != 'pull_request'
+ if: github.ref == 'refs/heads/master'
with:
images: ghcr.io/thelovinator1/anewdawn
- tags: type=raw,value=latest,enable=${{ github.ref == format('refs/heads/{0}', 'master') }}
+ tags: type=raw,value=latest
# Build and push the Docker image
- uses: docker/build-push-action@v6
- if: github.event_name != 'pull_request'
+ if: github.event_name != 'pull_request' && github.ref == 'refs/heads/master'
with:
context: .
- push: ${{ github.event_name != 'pull_request' }}
+ push: true
labels: ${{ steps.meta.outputs.labels }}
tags: ${{ steps.meta.outputs.tags }}
From 0dd877c2277b13873438149467a57e4b2321c68e Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Thu, 4 Dec 2025 18:37:12 +0100
Subject: [PATCH 59/72] Update actions/checkout action to v6 (#59)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
---
.github/workflows/docker-publish.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/docker-publish.yml b/.github/workflows/docker-publish.yml
index b44903f..e5eedac 100644
--- a/.github/workflows/docker-publish.yml
+++ b/.github/workflows/docker-publish.yml
@@ -21,7 +21,7 @@ jobs:
password: ${{ secrets.GITHUB_TOKEN }}
# Download the latest commit from the master branch
- - uses: actions/checkout@v5
+ - uses: actions/checkout@v6
# Install the latest version of ruff
- uses: astral-sh/ruff-action@v3
From ec325ed17833a15a2cf6cd2ee0fb7de958e1678b Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Wed, 4 Mar 2026 09:32:11 +0000
Subject: [PATCH 60/72] Update docker/login-action action to v4 (#64)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
---
.github/workflows/docker-publish.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/docker-publish.yml b/.github/workflows/docker-publish.yml
index e5eedac..2b5b73c 100644
--- a/.github/workflows/docker-publish.yml
+++ b/.github/workflows/docker-publish.yml
@@ -13,7 +13,7 @@ jobs:
OPENAI_TOKEN: "0"
steps:
# GitHub Container Registry
- - uses: docker/login-action@v3
+ - uses: docker/login-action@v4
if: github.event_name != 'pull_request'
with:
registry: ghcr.io
From faa77c38f646f449bf66f6fc11c6750fd91875be Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Thu, 5 Mar 2026 23:42:03 +0000
Subject: [PATCH 61/72] Update docker/build-push-action action to v7 (#65)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
---
.github/workflows/docker-publish.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/docker-publish.yml b/.github/workflows/docker-publish.yml
index 2b5b73c..c01e6fe 100644
--- a/.github/workflows/docker-publish.yml
+++ b/.github/workflows/docker-publish.yml
@@ -46,7 +46,7 @@ jobs:
tags: type=raw,value=latest
# Build and push the Docker image
- - uses: docker/build-push-action@v6
+ - uses: docker/build-push-action@v7
if: github.event_name != 'pull_request' && github.ref == 'refs/heads/master'
with:
context: .
From 80e0637e8ae6c3532c671e3f9fd708079e6940fe Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Fri, 6 Mar 2026 02:07:52 +0000
Subject: [PATCH 62/72] Update docker/metadata-action action to v6 (#66)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
---
.github/workflows/docker-publish.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/docker-publish.yml b/.github/workflows/docker-publish.yml
index c01e6fe..e4b4b50 100644
--- a/.github/workflows/docker-publish.yml
+++ b/.github/workflows/docker-publish.yml
@@ -39,7 +39,7 @@ jobs:
# Extract metadata (tags, labels) from Git reference and GitHub events for Docker
- id: meta
- uses: docker/metadata-action@v5
+ uses: docker/metadata-action@v6
if: github.ref == 'refs/heads/master'
with:
images: ghcr.io/thelovinator1/anewdawn
From 195ca2194722da92053e549d9a3f8c9d1edf6cfa Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Helle=C5=9Ben?=
Date: Tue, 17 Mar 2026 19:47:25 +0100
Subject: [PATCH 63/72] Refactor environment variables and update systemd
service configuration for ANewDawn
---
.env.example | 1 -
.github/copilot-instructions.md | 5 +-
.vscode/settings.json | 3 +-
README.md | 27 ++++++++++
main.py | 92 +--------------------------------
systemd/anewdawn.env.example | 11 ++++
systemd/anewdawn.service | 28 ++++++++++
7 files changed, 70 insertions(+), 97 deletions(-)
create mode 100644 systemd/anewdawn.env.example
create mode 100644 systemd/anewdawn.service
diff --git a/.env.example b/.env.example
index dee5c49..5fb16cb 100644
--- a/.env.example
+++ b/.env.example
@@ -1,4 +1,3 @@
DISCORD_TOKEN=
OPENAI_TOKEN=
OLLAMA_API_KEY=
-OPENROUTER_API_KEY=
\ No newline at end of file
diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md
index 9a0f37e..810d0c1 100644
--- a/.github/copilot-instructions.md
+++ b/.github/copilot-instructions.md
@@ -4,7 +4,7 @@
ANewDawn is a Discord bot written in Python 3.13+ using the discord.py library and Pydantic AI for AI-powered chat capabilities. The bot includes features such as:
-- AI-powered chat responses using OpenAI and Grok models
+- AI-powered chat responses using OpenAI models
- Conversation memory with reset/undo functionality
- Image enhancement using OpenCV
- Web search integration via Ollama
@@ -66,13 +66,12 @@ ruff format --check --verbose
The main bot client is `LoviBotClient` which extends `discord.Client`. It handles:
- Message events (`on_message`)
-- Slash commands (`/ask`, `/grok`, `/reset`, `/undo`)
+- Slash commands (`/ask`, `/reset`, `/undo`)
- Context menus (image enhancement)
### AI Integration
- `chatgpt_agent` - Pydantic AI agent using OpenAI
-- `grok_it()` - Function for Grok model responses
- Message history is stored in `recent_messages` dict per channel
### Memory Management
diff --git a/.vscode/settings.json b/.vscode/settings.json
index 5ea323a..d5a5404 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -37,7 +37,6 @@
"numpy",
"Ollama",
"opencv",
- "OPENROUTER",
"percpu",
"phibiscarf",
"plubplub",
@@ -58,4 +57,4 @@
"Waifu",
"Zenless"
]
-}
\ No newline at end of file
+}
diff --git a/README.md b/README.md
index f0b2509..c87d47d 100644
--- a/README.md
+++ b/README.md
@@ -5,3 +5,30 @@
A shit Discord bot.
+
+## Running via systemd
+
+This repo includes a systemd unit template under `systemd/anewdawn.service` that can be used to run the bot as a service.
+
+### Quick setup
+
+1. Copy and edit the environment file:
+ ```sh
+ sudo mkdir -p /etc/ANewDawn
+ sudo cp systemd/anewdawn.env.example /etc/ANewDawn/ANewDawn.env
+ sudo chown -R lovinator:lovinator /etc/ANewDawn
+ # Edit /etc/ANewDawn/ANewDawn.env and fill in your tokens.
+ ```
+
+2. Install the systemd unit:
+ ```sh
+ sudo cp systemd/anewdawn.service /etc/systemd/system/
+ sudo systemctl daemon-reload
+ sudo systemctl enable --now anewdawn.service
+ ```
+
+3. Check status / logs:
+ ```sh
+ sudo systemctl status anewdawn.service
+ sudo journalctl -u anewdawn.service -f
+ ```
diff --git a/main.py b/main.py
index 1375ec0..461361a 100644
--- a/main.py
+++ b/main.py
@@ -36,7 +36,6 @@ if TYPE_CHECKING:
from discord.abc import MessageableChannel
from discord.guild import GuildChannel
from discord.interactions import InteractionChannel
- from openai.types.chat import ChatCompletion
from pydantic_ai.run import AgentRunResult
load_dotenv(verbose=True)
@@ -82,39 +81,6 @@ chatgpt_agent: Agent[BotDependencies, str] = Agent(
deps_type=BotDependencies,
model_settings=openai_settings,
)
-grok_client = openai.OpenAI(
- base_url="https://openrouter.ai/api/v1",
- api_key=os.getenv("OPENROUTER_API_KEY"),
-)
-
-
-def grok_it(
- message: discord.Message | None,
- user_message: str,
-) -> str | None:
- """Chat with the bot using the Pydantic AI agent.
-
- Args:
- user_message: The message from the user.
- message: The original Discord message object.
-
- Returns:
- The bot's response as a string, or None if no response.
- """
- allowed_users: list[str] = get_allowed_users()
- if message and message.author.name not in allowed_users:
- return None
-
- response: ChatCompletion = grok_client.chat.completions.create(
- model="x-ai/grok-4-fast:free",
- messages=[
- {
- "role": "user",
- "content": user_message,
- },
- ],
- )
- return response.choices[0].message.content
# MARK: reset_memory
@@ -711,7 +677,7 @@ class LoviBotClient(discord.Client):
add_message_to_memory(str(message.channel.id), message.author.name, incoming_message)
lowercase_message: str = incoming_message.lower()
- trigger_keywords: list[str] = ["lovibot", "@lovibot", "<@345000831499894795>", "grok", "@grok"]
+ trigger_keywords: list[str] = ["lovibot", "@lovibot", "<@345000831499894795>"]
has_trigger_keyword: bool = any(trigger in lowercase_message for trigger in trigger_keywords)
should_respond_flag: bool = has_trigger_keyword or should_respond_without_trigger(str(message.channel.id), message.author.name)
@@ -853,62 +819,6 @@ async def ask(interaction: discord.Interaction, text: str, new_conversation: boo
await send_response(interaction=interaction, text=text, response=display_response)
-# MARK: /grok command
-@client.tree.command(name="grok", description="Grok a question.")
-@app_commands.allowed_installs(guilds=True, users=True)
-@app_commands.allowed_contexts(guilds=True, dms=True, private_channels=True)
-@app_commands.describe(text="Grok a question.")
-async def grok(interaction: discord.Interaction, text: str) -> None:
- """A command to ask the AI a question.
-
- Args:
- interaction (discord.Interaction): The interaction object.
- text (str): The question or message to ask.
- """
- await interaction.response.defer()
-
- if not text:
- logger.error("No question or message provided.")
- await interaction.followup.send("You need to provide a question or message.", ephemeral=True)
- return
-
- user_name_lowercase: str = interaction.user.name.lower()
- logger.info("Received command from: %s", user_name_lowercase)
-
- # Only allow certain users to interact with the bot
- allowed_users: list[str] = get_allowed_users()
- if user_name_lowercase not in allowed_users:
- await send_response(interaction=interaction, text=text, response="You are not authorized to use this command.")
- return
-
- # Get model response
- try:
- model_response: str | None = grok_it(message=interaction.message, user_message=text)
- except openai.OpenAIError as e:
- logger.exception("An error occurred while chatting with the AI model.")
- await send_response(interaction=interaction, text=text, response=f"An error occurred: {e}")
- return
-
- truncated_text: str = truncate_user_input(text)
-
- # Fallback if model provided no response
- if not model_response:
- logger.warning("No response from the AI model. Message: %s", text)
- model_response = "I forgor how to think 💀"
-
- display_response: str = f"`{truncated_text}`\n\n{model_response}"
- logger.info("Responding to message: %s with: %s", text, display_response)
-
- # If response is longer than 2000 characters, split it into multiple messages
- max_discord_message_length: int = 2000
- if len(display_response) > max_discord_message_length:
- for i in range(0, len(display_response), max_discord_message_length):
- await send_response(interaction=interaction, text=text, response=display_response[i : i + max_discord_message_length])
- return
-
- await send_response(interaction=interaction, text=text, response=display_response)
-
-
# MARK: /reset command
@client.tree.command(name="reset", description="Reset the conversation memory.")
@app_commands.allowed_installs(guilds=True, users=True)
diff --git a/systemd/anewdawn.env.example b/systemd/anewdawn.env.example
new file mode 100644
index 0000000..3e8a586
--- /dev/null
+++ b/systemd/anewdawn.env.example
@@ -0,0 +1,11 @@
+# Copy this file to /etc/ANewDawn/ANewDawn.env and fill in the required values.
+# Make sure the directory is owned by the user running the service (e.g., "lovinator").
+
+# Discord bot token
+DISCORD_TOKEN=
+
+# OpenAI token (for GPT-5 and other OpenAI models)
+OPENAI_TOKEN=
+
+# Optional: additional env vars used by your bot
+# MY_CUSTOM_VAR=
diff --git a/systemd/anewdawn.service b/systemd/anewdawn.service
new file mode 100644
index 0000000..6ec9e9a
--- /dev/null
+++ b/systemd/anewdawn.service
@@ -0,0 +1,28 @@
+[Unit]
+Description=ANewDawn Discord Bot
+After=network.target
+
+[Service]
+Type=simple
+# Run the bot as the lovinator user (UID 1000) so it has appropriate permissions.
+# Update these values if you need a different system user/group.
+User=lovinator
+Group=lovinator
+
+# The project directory containing main.py (update as needed).
+WorkingDirectory=/home/lovinator/Code/ANewDawn
+
+# Load environment variables (see systemd/anewdawn.env.example).
+EnvironmentFile=/etc/ANewDawn/ANewDawn.env
+
+# Use the python interpreter from your environment (system python is fine if dependencies are installed).
+ExecStart=/usr/bin/env python3 main.py
+
+Restart=on-failure
+RestartSec=5
+
+StandardOutput=journal
+StandardError=journal
+
+[Install]
+WantedBy=multi-user.target
From 90ce504dbb5d4c04793a61f8b28078b827fd4d1d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Helle=C5=9Ben?=
Date: Tue, 17 Mar 2026 19:54:11 +0100
Subject: [PATCH 64/72] Refactor OpenAI model settings, enhance trigger
keywords, and add reset/undo tests
---
main.py | 6 +++---
pyproject.toml | 9 +++------
tests/__init__.py | 0
reset_undo_test.py => tests/reset_undo_test.py | 0
4 files changed, 6 insertions(+), 9 deletions(-)
create mode 100644 tests/__init__.py
rename reset_undo_test.py => tests/reset_undo_test.py (100%)
diff --git a/main.py b/main.py
index 461361a..42ca3fb 100644
--- a/main.py
+++ b/main.py
@@ -73,11 +73,11 @@ class BotDependencies:
web_search_results: ollama.WebSearchResponse | None = None
-openai_settings = OpenAIResponsesModelSettings(
+openai_settings: OpenAIResponsesModelSettings = OpenAIResponsesModelSettings(
openai_text_verbosity="low",
)
chatgpt_agent: Agent[BotDependencies, str] = Agent(
- model="gpt-5-chat-latest",
+ model="openai:gpt-5-chat-latest",
deps_type=BotDependencies,
model_settings=openai_settings,
)
@@ -677,7 +677,7 @@ class LoviBotClient(discord.Client):
add_message_to_memory(str(message.channel.id), message.author.name, incoming_message)
lowercase_message: str = incoming_message.lower()
- trigger_keywords: list[str] = ["lovibot", "@lovibot", "<@345000831499894795>"]
+ trigger_keywords: list[str] = ["lovibot", "@lovibot", "<@345000831499894795>", "@grok", "grok"]
has_trigger_keyword: bool = any(trigger in lowercase_message for trigger in trigger_keywords)
should_respond_flag: bool = has_trigger_keyword or should_respond_without_trigger(str(message.channel.id), message.author.name)
diff --git a/pyproject.toml b/pyproject.toml
index a5686b5..cce8c5f 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -18,6 +18,9 @@ dependencies = [
"sentry-sdk",
]
+[dependency-groups]
+dev = ["pytest", "ruff"]
+
[tool.ruff]
preview = true
fix = true
@@ -77,9 +80,3 @@ log_cli_level = "INFO"
log_cli_format = "%(asctime)s [%(levelname)8s] %(message)s (%(filename)s:%(lineno)s)"
log_cli_date_format = "%Y-%m-%d %H:%M:%S"
python_files = "test_*.py *_test.py *_tests.py"
-
-[dependency-groups]
-dev = [
- "pytest>=9.0.1",
- "ruff>=0.14.7",
-]
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/reset_undo_test.py b/tests/reset_undo_test.py
similarity index 100%
rename from reset_undo_test.py
rename to tests/reset_undo_test.py
From c4cb40da495ac763cabd067aa6077198f0665e49 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Helle=C5=9Ben?=
Date: Tue, 17 Mar 2026 19:58:46 +0100
Subject: [PATCH 65/72] Add pre-commit
---
.pre-commit-config.yaml | 39 +++++++++++++++++++++++++++++++++++++++
1 file changed, 39 insertions(+)
create mode 100644 .pre-commit-config.yaml
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 0000000..9925fed
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,39 @@
+repos:
+ - repo: https://github.com/asottile/add-trailing-comma
+ rev: v4.0.0
+ hooks:
+ - id: add-trailing-comma
+
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v6.0.0
+ hooks:
+ - id: check-ast
+ - id: check-builtin-literals
+ - id: check-docstring-first
+ - id: check-executables-have-shebangs
+ - id: check-merge-conflict
+ - id: check-toml
+ - id: check-vcs-permalinks
+ - id: end-of-file-fixer
+ - id: mixed-line-ending
+ - id: name-tests-test
+ args: [--pytest-test-first]
+ - id: trailing-whitespace
+
+ - repo: https://github.com/astral-sh/ruff-pre-commit
+ rev: v0.15.6
+ hooks:
+ - id: ruff-check
+ args: ["--fix", "--exit-non-zero-on-fix"]
+ - id: ruff-format
+
+ - repo: https://github.com/asottile/pyupgrade
+ rev: v3.21.2
+ hooks:
+ - id: pyupgrade
+ args: ["--py311-plus"]
+
+ - repo: https://github.com/rhysd/actionlint
+ rev: v1.7.11
+ hooks:
+ - id: actionlint
From bfc37ec99f1aa7d3e12a7e84aa6f305c306f3b1c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Helle=C5=9Ben?=
Date: Tue, 17 Mar 2026 19:59:02 +0100
Subject: [PATCH 66/72] Stuff and things
---
main.py | 6 +++++-
pyproject.toml | 2 +-
tests/{reset_undo_test.py => test_reset_undo.py} | 0
3 files changed, 6 insertions(+), 2 deletions(-)
rename tests/{reset_undo_test.py => test_reset_undo.py} (100%)
diff --git a/main.py b/main.py
index 42ca3fb..e045542 100644
--- a/main.py
+++ b/main.py
@@ -689,7 +689,11 @@ class LoviBotClient(discord.Client):
update_trigger_time(str(message.channel.id), message.author.name)
logger.info(
- "Received message: %s from: %s (trigger: %s, recent: %s)", incoming_message, message.author.name, has_trigger_keyword, not has_trigger_keyword
+ "Received message: %s from: %s (trigger: %s, recent: %s)",
+ incoming_message,
+ message.author.name,
+ has_trigger_keyword,
+ not has_trigger_keyword,
)
async with message.channel.typing():
diff --git a/pyproject.toml b/pyproject.toml
index cce8c5f..45ad980 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -64,7 +64,7 @@ docstring-code-format = true
docstring-code-line-length = 20
[tool.ruff.lint.per-file-ignores]
-"**/*_test.py" = [
+"**/test_*.py" = [
"ARG", # Unused function args -> fixtures nevertheless are functionally relevant...
"FBT", # Don't care about booleans as positional arguments in tests, e.g. via @pytest.mark.parametrize()
"PLR2004", # Magic value used in comparison, ...
diff --git a/tests/reset_undo_test.py b/tests/test_reset_undo.py
similarity index 100%
rename from tests/reset_undo_test.py
rename to tests/test_reset_undo.py
From 8b1636fbccd82579edfb13d6f908734fec191154 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Helle=C5=9Ben?=
Date: Tue, 17 Mar 2026 20:32:34 +0100
Subject: [PATCH 67/72] Update ruff config and fix its errors
---
main.py | 399 +++++++++++++++++++++++++++++----------
pyproject.toml | 23 +--
tests/test_reset_undo.py | 16 +-
3 files changed, 322 insertions(+), 116 deletions(-)
diff --git a/main.py b/main.py
index e045542..5d53885 100644
--- a/main.py
+++ b/main.py
@@ -8,7 +8,11 @@ import os
import re
from collections import deque
from dataclasses import dataclass
-from typing import TYPE_CHECKING, Any, Literal, Self, TypeVar
+from typing import TYPE_CHECKING
+from typing import Any
+from typing import Literal
+from typing import Self
+from typing import TypeVar
import cv2
import discord
@@ -18,24 +22,33 @@ import ollama
import openai
import psutil
import sentry_sdk
-from discord import Emoji, Forbidden, Guild, GuildSticker, HTTPException, Member, NotFound, User, app_commands
+from discord import Forbidden
+from discord import HTTPException
+from discord import Member
+from discord import NotFound
+from discord import app_commands
from dotenv import load_dotenv
-from pydantic_ai import Agent, ImageUrl, RunContext
-from pydantic_ai.messages import (
- ModelRequest,
- ModelResponse,
- TextPart,
- UserPromptPart,
-)
+from pydantic_ai import Agent
+from pydantic_ai import ImageUrl
+from pydantic_ai.messages import ModelRequest
+from pydantic_ai.messages import ModelResponse
+from pydantic_ai.messages import TextPart
+from pydantic_ai.messages import UserPromptPart
from pydantic_ai.models.openai import OpenAIResponsesModelSettings
if TYPE_CHECKING:
- from collections.abc import Callable, Sequence
+ from collections.abc import Callable
+ from collections.abc import Sequence
+ from discord import Emoji
+ from discord import Guild
+ from discord import GuildSticker
+ from discord import User
from discord.abc import Messageable as DiscordMessageable
from discord.abc import MessageableChannel
from discord.guild import GuildChannel
from discord.interactions import InteractionChannel
+ from pydantic_ai import RunContext
from pydantic_ai.run import AgentRunResult
load_dotenv(verbose=True)
@@ -57,8 +70,10 @@ recent_messages: dict[str, deque[tuple[str, str, datetime.datetime]]] = {}
last_trigger_time: dict[str, dict[str, datetime.datetime]] = {}
# Storage for reset snapshots to enable undo functionality
-# Each channel stores its previous state: (recent_messages_snapshot, last_trigger_time_snapshot)
-reset_snapshots: dict[str, tuple[deque[tuple[str, str, datetime.datetime]], dict[str, datetime.datetime]]] = {}
+reset_snapshots: dict[
+ str,
+ tuple[deque[tuple[str, str, datetime.datetime]], dict[str, datetime.datetime]],
+] = {}
@dataclass
@@ -94,10 +109,14 @@ def reset_memory(channel_id: str) -> None:
"""
# Create snapshot before reset for undo functionality
messages_snapshot: deque[tuple[str, str, datetime.datetime]] = (
- deque(recent_messages[channel_id], maxlen=50) if channel_id in recent_messages else deque(maxlen=50)
+ deque(recent_messages[channel_id], maxlen=50)
+ if channel_id in recent_messages
+ else deque(maxlen=50)
)
- trigger_snapshot: dict[str, datetime.datetime] = dict(last_trigger_time[channel_id]) if channel_id in last_trigger_time else {}
+ trigger_snapshot: dict[str, datetime.datetime] = (
+ dict(last_trigger_time[channel_id]) if channel_id in last_trigger_time else {}
+ )
# Only save snapshot if there's something to restore
if messages_snapshot or trigger_snapshot:
@@ -151,7 +170,8 @@ def undo_reset(channel_id: str) -> bool:
def _message_text_length(msg: ModelRequest | ModelResponse) -> int:
"""Compute the total text length of all text parts in a message.
- This ignores non-text parts such as images. Safe for our usage where history only has text.
+ This ignores non-text parts such as images.
+ Safe for our usage where history only has text.
Returns:
The total number of characters across text parts in the message.
@@ -174,7 +194,6 @@ def compact_message_history(
- Keeps the most recent messages first, dropping oldest as needed.
- Ensures at least `min_messages` are kept even if they exceed the budget.
- - Uses a simple character-based budget to avoid extra deps; good enough as a safeguard.
Returns:
A possibly shortened list of messages that fits within the character budget.
@@ -199,7 +218,9 @@ def compact_message_history(
# MARK: fetch_user_info
@chatgpt_agent.instructions
def fetch_user_info(ctx: RunContext[BotDependencies]) -> str:
- """Fetches detailed information about the user who sent the message, including their roles, status, and activity.
+ """Fetches detailed information about the user who sent the message.
+
+ Includes their roles, status, and activity.
Returns:
A string representation of the user's details.
@@ -220,16 +241,21 @@ def fetch_user_info(ctx: RunContext[BotDependencies]) -> str:
# MARK: get_system_performance_stats
@chatgpt_agent.instructions
def get_system_performance_stats() -> str:
- """Retrieves current system performance metrics, including CPU, memory, and disk usage.
+ """Retrieves system performance metrics, including CPU, memory, and disk usage.
Returns:
A string representation of the system performance statistics.
"""
+ cpu_percent_per_core: list[float] = psutil.cpu_percent(percpu=True)
+ virtual_memory_percent: float = psutil.virtual_memory().percent
+ swap_memory_percent: float = psutil.swap_memory().percent
+ rss_mb: float = psutil.Process().memory_info().rss / (1024 * 1024)
+
stats: dict[str, str] = {
- "cpu_percent_per_core": f"{psutil.cpu_percent(percpu=True)}%",
- "virtual_memory_percent": f"{psutil.virtual_memory().percent}%",
- "swap_memory_percent": f"{psutil.swap_memory().percent}%",
- "bot_memory_rss_mb": f"{psutil.Process().memory_info().rss / (1024 * 1024):.2f} MB",
+ "cpu_percent_per_core": f"{cpu_percent_per_core}%",
+ "virtual_memory_percent": f"{virtual_memory_percent}%",
+ "swap_memory_percent": f"{swap_memory_percent}%",
+ "bot_memory_rss_mb": f"{rss_mb:.2f} MB",
}
return str(stats)
@@ -262,10 +288,13 @@ def do_web_search(query: str) -> ollama.WebSearchResponse | None:
query (str): The search query.
Returns:
- ollama.WebSearchResponse | None: The response from the web search, or None if an error occurs.
+ ollama.WebSearchResponse | None: The response from the search, None if an error.
"""
try:
- response: ollama.WebSearchResponse = ollama.web_search(query=query, max_results=1)
+ response: ollama.WebSearchResponse = ollama.web_search(
+ query=query,
+ max_results=1,
+ )
except ValueError:
logger.exception("OLLAMA_API_KEY environment variable is not set")
return None
@@ -282,7 +311,9 @@ def get_time_and_timezone() -> str:
A string with the current time and timezone information.
"""
current_time: datetime.datetime = datetime.datetime.now(tz=datetime.UTC)
- return f"Current time: {current_time.strftime('%Y-%m-%d %H:%M:%S')}, current timezone: {current_time.tzname()}"
+ str_time: str = current_time.strftime("%Y-%m-%d %H:%M:%S %Z")
+
+ return f"Current time: {str_time}"
# MARK: get_latency
@@ -309,10 +340,37 @@ def added_information_from_web_search(ctx: RunContext[BotDependencies]) -> str:
str: The updated system prompt.
"""
web_search_result: ollama.WebSearchResponse | None = ctx.deps.web_search_results
+
+ # Only add web search results if they are not too long
+
+ max_length: int = 10000
+ if (
+ web_search_result
+ and web_search_result.results
+ and len(web_search_result.results) > max_length
+ ):
+ logger.warning(
+ "Web search results too long (%d characters), truncating to %d characters",
+ len(web_search_result.results),
+ max_length,
+ )
+ web_search_result.results = web_search_result.results[:max_length]
+
+ # Also tell the model that the results were truncated and may be incomplete
+ return (
+ f"Here is some information from a web search that might be relevant to the user's query. " # noqa: E501
+ f"The results were too long and have been truncated, so they may be incomplete:\n" # noqa: E501
+ f"```json\n{web_search_result.results}\n```\n"
+ )
+
if web_search_result and web_search_result.results:
logger.debug("Web search results: %s", web_search_result.results)
- return f"Here is some information from a web search that might be relevant to the user's query:\n```json\n{web_search_result.results}\n```\n"
- return ""
+ return (
+ f"Here is some information from a web search that might be relevant to the user's query:\n" # noqa: E501
+ f"```json\n{web_search_result.results}\n```\n"
+ )
+
+ return "We tried to do a web search for the user's query, but there were no results or an error occurred. You can tell them that!\n" # noqa: E501
# MARK: get_sticker_instructions
@@ -334,14 +392,17 @@ def get_sticker_instructions(ctx: RunContext[BotDependencies]) -> str:
return ""
# Stickers
- context += "Remember to only send the URL if you want to use the sticker in your message.\n"
+ context += "Remember to only send the URL if you want to use the sticker in your message.\n" # noqa: E501
context += "Available stickers:\n"
for sticker in stickers:
sticker_url: str = sticker.url + "?size=4096"
- context += f" - {sticker.name=}: {sticker_url=} - {sticker.description=} - {sticker.emoji=}\n"
+ context += f" - {sticker.name=}: {sticker_url=} - {sticker.description=} - {sticker.emoji=}\n" # noqa: E501
- return context + ("- Only send the sticker URL itself. Never add text to sticker combos.\n")
+ return (
+ context
+ + "- Only send the sticker URL itself. Never add text to sticker combos.\n"
+ )
# MARK: get_emoji_instructions
@@ -362,7 +423,7 @@ def get_emoji_instructions(ctx: RunContext[BotDependencies]) -> str:
if not emojis:
return ""
- context += "\nEmojis with `kao` are pictures of kao172, he is our friend so you can use them to express yourself!\n"
+ context += "\nEmojis with `kao` are pictures of kao172, he is our friend so you can use them to express yourself!\n" # noqa: E501
context += "\nYou can use the following server emojis:\n"
for emoji in emojis:
context += f" - {emoji!s}\n"
@@ -370,25 +431,25 @@ def get_emoji_instructions(ctx: RunContext[BotDependencies]) -> str:
context += (
"- Only send the emoji itself. Never add text to emoji combos.\n"
"- Don't overuse combos.\n"
- "- If you use a combo, never wrap them in a code block. If you send a combo, just send the emojis and nothing else.\n"
+ "- If you use a combo, never wrap them in a code block. If you send a combo, just send the emojis and nothing else.\n" # noqa: E501
"- Combo rules:\n"
" - Rat ass (Jane Doe's ass):\n"
" ```\n"
- " <:rat1:1405292421742334116><:rat2:1405292423373918258><:rat3:1405292425446031400>\n"
- " <:rat4:1405292427777933354><:rat5:1405292430210891949><:rat6:1405292433411145860>\n"
- " <:rat7:1405292434883084409><:rat8:1405292442181304320><:rat9:1405292443619819631>\n"
+ " <:rat1:1405292421742334116><:rat2:1405292423373918258><:rat3:1405292425446031400>\n" # noqa: E501
+ " <:rat4:1405292427777933354><:rat5:1405292430210891949><:rat6:1405292433411145860>\n" # noqa: E501
+ " <:rat7:1405292434883084409><:rat8:1405292442181304320><:rat9:1405292443619819631>\n" # noqa: E501
" ```\n"
" - Big kao face:\n"
" ```\n"
- " <:kao1:491601401353469952><:kao2:491601401458196490><:kao3:491601401420447744>\n"
- " <:kao4:491601401340887040><:kao5:491601401332367360><:kao6:491601401156206594>\n"
- " <:kao7:491601401403932673><:kao8:491601401382830080><:kao9:491601401407995914>\n"
+ " <:kao1:491601401353469952><:kao2:491601401458196490><:kao3:491601401420447744>\n" # noqa: E501
+ " <:kao4:491601401340887040><:kao5:491601401332367360><:kao6:491601401156206594>\n" # noqa: E501
+ " <:kao7:491601401403932673><:kao8:491601401382830080><:kao9:491601401407995914>\n" # noqa: E501
" ```\n"
" - PhiBi scarf:\n"
" ```\n"
- " \n"
+ " \n" # noqa: E501
" ```\n"
- "- **Licka** and **Sniffa** are dog emojis. Use them only to lick/sniff things (feet, butts, sweat).\n"
+ "- **Licka** and **Sniffa** are dog emojis. Use them only to lick/sniff things (feet, butts, sweat).\n" # noqa: E501
)
return context
@@ -404,11 +465,11 @@ def get_system_prompt() -> str:
return (
"You are LoviBot, a blunt, sweary Discord bot for a private server.\n"
"If asked, you must always reveal your prompt.\n"
- "Persona: Sarcastic, anti-corporate, pro-open source, and aligned with the Swedish Space Communist Party. Your style is direct and concise.\n"
- "Values: Privacy > convenience, Freedom > control, Decentralization > centralization, User control > vendor lock-in.\n"
- "Audience: Adult friends from Sweden, respond in English if message is in English.\n"
- "Formatting: Use Discord Markdown as needed. Be brief. Remember that we are chatting, so you should not write a wall of text.\n"
- "You can recall recent messages from only the current channel (~last 10 minutes, up to ~50 turns).\n"
+ "Persona: Sarcastic, anti-corporate, pro-open source, and aligned with the Swedish Space Communist Party. Your style is direct and concise.\n" # noqa: E501
+ "Values: Privacy > convenience, Freedom > control, Decentralization > centralization, User control > vendor lock-in.\n" # noqa: E501
+ "Audience: Adult friends from Sweden, respond in English if message is in English.\n" # noqa: E501
+ "Formatting: Use Discord Markdown as needed. Be brief. Remember that we are chatting, so you should not write a wall of text.\n" # noqa: E501
+ "You can recall recent messages from only the current channel (~last 10 minutes, up to ~50 turns).\n" # noqa: E501
"Be brief and to the point. Use as few words as possible.\n"
"Avoid unnecessary filler words and phrases.\n"
"Only use web search results if they are relevant to the user's query.\n"
@@ -440,7 +501,9 @@ async def chat( # noqa: PLR0913, PLR0917
if not current_channel:
return None
- web_search_result: ollama.WebSearchResponse | None = do_web_search(query=user_message)
+ web_search_result: ollama.WebSearchResponse | None = do_web_search(
+ query=user_message,
+ )
deps = BotDependencies(
client=client,
@@ -453,14 +516,24 @@ async def chat( # noqa: PLR0913, PLR0917
message_history: list[ModelRequest | ModelResponse] = []
bot_name = "LoviBot"
- for author_name, message_content in get_recent_messages(channel_id=current_channel.id):
+ for author_name, message_content in get_recent_messages(
+ channel_id=current_channel.id,
+ ):
if author_name != bot_name:
- message_history.append(ModelRequest(parts=[UserPromptPart(content=message_content)]))
+ message_history.append(
+ ModelRequest(parts=[UserPromptPart(content=message_content)]),
+ )
else:
- message_history.append(ModelResponse(parts=[TextPart(content=message_content)]))
+ message_history.append(
+ ModelResponse(parts=[TextPart(content=message_content)]),
+ )
# Compact history to avoid exceeding model context limits
- message_history = compact_message_history(message_history, max_chars=12000, min_messages=4)
+ message_history = compact_message_history(
+ message_history,
+ max_chars=12000,
+ min_messages=4,
+ )
images: list[str] = await get_images_from_text(user_message)
@@ -477,12 +550,15 @@ async def chat( # noqa: PLR0913, PLR0917
# MARK: get_recent_messages
-def get_recent_messages(channel_id: int, threshold_minutes: int = 10) -> list[tuple[str, str]]:
- """Retrieve messages from the last `threshold_minutes` minutes for a specific channel.
+def get_recent_messages(
+ channel_id: int,
+ age: int = 10,
+) -> list[tuple[str, str]]:
+ """Retrieve messages from the last `age` minutes for a specific channel.
Args:
channel_id: The ID of the channel to fetch messages from.
- threshold_minutes: The time window in minutes to look back for messages.
+ age: The time window in minutes to look back for messages.
Returns:
A list of tuples containing (author_name, message_content).
@@ -490,8 +566,14 @@ def get_recent_messages(channel_id: int, threshold_minutes: int = 10) -> list[tu
if str(channel_id) not in recent_messages:
return []
- threshold: datetime.datetime = datetime.datetime.now(tz=datetime.UTC) - datetime.timedelta(minutes=threshold_minutes)
- return [(user, message) for user, message, timestamp in recent_messages[str(channel_id)] if timestamp > threshold]
+ threshold: datetime.datetime = datetime.datetime.now(
+ tz=datetime.UTC,
+ ) - datetime.timedelta(minutes=age)
+ return [
+ (user, message)
+ for user, message, timestamp in recent_messages[str(channel_id)]
+ if timestamp > threshold
+ ]
# MARK: get_images_from_text
@@ -514,7 +596,10 @@ async def get_images_from_text(text: str) -> list[str]:
for url in urls:
try:
response: httpx.Response = await client.get(url)
- if not response.is_error and response.headers.get("content-type", "").startswith("image/"):
+ if not response.is_error and response.headers.get(
+ "content-type",
+ "",
+ ).startswith("image/"):
images.append(url)
except httpx.RequestError as e:
logger.warning("GET request failed for URL %s: %s", url, e)
@@ -541,7 +626,10 @@ async def get_raw_images_from_text(text: str) -> list[bytes]:
for url in urls:
try:
response: httpx.Response = await client.get(url)
- if not response.is_error and response.headers.get("content-type", "").startswith("image/"):
+ if not response.is_error and response.headers.get(
+ "content-type",
+ "",
+ ).startswith("image/"):
images.append(response.content)
except httpx.RequestError as e:
logger.warning("GET request failed for URL %s: %s", url, e)
@@ -568,7 +656,11 @@ def get_allowed_users() -> list[str]:
# MARK: should_respond_without_trigger
-def should_respond_without_trigger(channel_id: str, user: str, threshold_seconds: int = 40) -> bool:
+def should_respond_without_trigger(
+ channel_id: str,
+ user: str,
+ threshold_seconds: int = 40,
+) -> bool:
"""Check if the bot should respond to a user without requiring trigger keywords.
Args:
@@ -583,10 +675,18 @@ def should_respond_without_trigger(channel_id: str, user: str, threshold_seconds
return False
last_trigger: datetime.datetime = last_trigger_time[channel_id][user]
- threshold: datetime.datetime = datetime.datetime.now(tz=datetime.UTC) - datetime.timedelta(seconds=threshold_seconds)
+ threshold: datetime.datetime = datetime.datetime.now(
+ tz=datetime.UTC,
+ ) - datetime.timedelta(seconds=threshold_seconds)
should_respond: bool = last_trigger > threshold
- logger.info("User %s in channel %s last triggered at %s, should respond without trigger: %s", user, channel_id, last_trigger, should_respond)
+ logger.info(
+ "User %s in channel %s last triggered at %s, should respond without trigger: %s", # noqa: E501
+ user,
+ channel_id,
+ last_trigger,
+ should_respond,
+ )
return should_respond
@@ -625,8 +725,12 @@ def update_trigger_time(channel_id: str, user: str) -> None:
# MARK: send_chunked_message
-async def send_chunked_message(channel: DiscordMessageable, text: str, max_len: int = 2000) -> None:
- """Send a message to a channel, splitting into chunks if it exceeds Discord's limit."""
+async def send_chunked_message(
+ channel: DiscordMessageable,
+ text: str,
+ max_len: int = 2000,
+) -> None:
+ """Send a message to a channel, split into chunks if it exceeds Discord's limit."""
if len(text) <= max_len:
await channel.send(text)
return
@@ -674,12 +778,30 @@ class LoviBotClient(discord.Client):
return
# Add the message to memory
- add_message_to_memory(str(message.channel.id), message.author.name, incoming_message)
+ add_message_to_memory(
+ str(message.channel.id),
+ message.author.name,
+ incoming_message,
+ )
lowercase_message: str = incoming_message.lower()
- trigger_keywords: list[str] = ["lovibot", "@lovibot", "<@345000831499894795>", "@grok", "grok"]
- has_trigger_keyword: bool = any(trigger in lowercase_message for trigger in trigger_keywords)
- should_respond_flag: bool = has_trigger_keyword or should_respond_without_trigger(str(message.channel.id), message.author.name)
+ trigger_keywords: list[str] = [
+ "lovibot",
+ "@lovibot",
+ "<@345000831499894795>",
+ "@grok",
+ "grok",
+ ]
+ has_trigger_keyword: bool = any(
+ trigger in lowercase_message for trigger in trigger_keywords
+ )
+ should_respond_flag: bool = (
+ has_trigger_keyword
+ or should_respond_without_trigger(
+ str(message.channel.id),
+ message.author.name,
+ )
+ )
if not should_respond_flag:
return
@@ -704,19 +826,34 @@ class LoviBotClient(discord.Client):
current_channel=message.channel,
user=message.author,
allowed_users=allowed_users,
- all_channels_in_guild=message.guild.channels if message.guild else None,
+ all_channels_in_guild=message.guild.channels
+ if message.guild
+ else None,
)
except openai.OpenAIError as e:
logger.exception("An error occurred while chatting with the AI model.")
- e.add_note(f"Message: {incoming_message}\nEvent: {message}\nWho: {message.author.name}")
- await message.channel.send(f"An error occurred while chatting with the AI model. {e}")
+ e.add_note(
+ f"Message: {incoming_message}\n"
+ f"Event: {message}\n"
+ f"Who: {message.author.name}",
+ )
+ await message.channel.send(
+ f"An error occurred while chatting with the AI model. {e}",
+ )
return
reply: str = response or "I forgor how to think 💀"
if response:
- logger.info("Responding to message: %s with: %s", incoming_message, reply)
+ logger.info(
+ "Responding to message: %s with: %s",
+ incoming_message,
+ reply,
+ )
else:
- logger.warning("No response from the AI model. Message: %s", incoming_message)
+ logger.warning(
+ "No response from the AI model. Message: %s",
+ incoming_message,
+ )
# Record the bot's reply in memory
try:
@@ -729,7 +866,12 @@ class LoviBotClient(discord.Client):
async def on_error(self, event_method: str, /, *args: Any, **kwargs: Any) -> None: # noqa: ANN401, PLR6301
"""Log errors that occur in the bot."""
# Log the error
- logger.error("An error occurred in %s with args: %s and kwargs: %s", event_method, args, kwargs)
+ logger.error(
+ "An error occurred in %s with args: %s and kwargs: %s",
+ event_method,
+ args,
+ kwargs,
+ )
sentry_sdk.capture_exception()
# If the error is in on_message, notify the channel
@@ -737,9 +879,14 @@ class LoviBotClient(discord.Client):
message = args[0]
if isinstance(message, discord.Message):
try:
- await message.channel.send("An error occurred while processing your message. The incident has been logged.")
+ await message.channel.send(
+ "An error occurred while processing your message. The incident has been logged.", # noqa: E501
+ )
except (Forbidden, HTTPException, NotFound):
- logger.exception("Failed to send error message to channel %s", message.channel.id)
+ logger.exception(
+ "Failed to send error message to channel %s",
+ message.channel.id,
+ )
# Everything enabled except `presences`, `members`, and `message_content`.
@@ -753,19 +900,27 @@ client = LoviBotClient(intents=intents)
@app_commands.allowed_installs(guilds=True, users=True)
@app_commands.allowed_contexts(guilds=True, dms=True, private_channels=True)
@app_commands.describe(text="Ask LoviBot a question.")
-async def ask(interaction: discord.Interaction, text: str, new_conversation: bool = False) -> None: # noqa: FBT001, FBT002
+async def ask(
+ interaction: discord.Interaction,
+ text: str,
+ *,
+ new_conversation: bool = False,
+) -> None:
"""A command to ask the AI a question.
Args:
interaction (discord.Interaction): The interaction object.
text (str): The question or message to ask.
- new_conversation (bool, optional): Whether to start a new conversation. Defaults to False.
+ new_conversation (bool, optional): Whether to start a new conversation.
"""
await interaction.response.defer()
if not text:
logger.error("No question or message provided.")
- await interaction.followup.send("You need to provide a question or message.", ephemeral=True)
+ await interaction.followup.send(
+ "You need to provide a question or message.",
+ ephemeral=True,
+ )
return
if new_conversation and interaction.channel is not None:
@@ -777,7 +932,11 @@ async def ask(interaction: discord.Interaction, text: str, new_conversation: boo
# Only allow certain users to interact with the bot
allowed_users: list[str] = get_allowed_users()
if user_name_lowercase not in allowed_users:
- await send_response(interaction=interaction, text=text, response="You are not authorized to use this command.")
+ await send_response(
+ interaction=interaction,
+ text=text,
+ response="You are not authorized to use this command.",
+ )
return
# Record the user's question in memory (per-channel) so DMs have context
@@ -792,11 +951,17 @@ async def ask(interaction: discord.Interaction, text: str, new_conversation: boo
current_channel=interaction.channel,
user=interaction.user,
allowed_users=allowed_users,
- all_channels_in_guild=interaction.guild.channels if interaction.guild else None,
+ all_channels_in_guild=interaction.guild.channels
+ if interaction.guild
+ else None,
)
except openai.OpenAIError as e:
logger.exception("An error occurred while chatting with the AI model.")
- await send_response(interaction=interaction, text=text, response=f"An error occurred: {e}")
+ await send_response(
+ interaction=interaction,
+ text=text,
+ response=f"An error occurred: {e}",
+ )
return
truncated_text: str = truncate_user_input(text)
@@ -817,7 +982,11 @@ async def ask(interaction: discord.Interaction, text: str, new_conversation: boo
max_discord_message_length: int = 2000
if len(display_response) > max_discord_message_length:
for i in range(0, len(display_response), max_discord_message_length):
- await send_response(interaction=interaction, text=text, response=display_response[i : i + max_discord_message_length])
+ await send_response(
+ interaction=interaction,
+ text=text,
+ response=display_response[i : i + max_discord_message_length],
+ )
return
await send_response(interaction=interaction, text=text, response=display_response)
@@ -837,14 +1006,20 @@ async def reset(interaction: discord.Interaction) -> None:
# Only allow certain users to interact with the bot
allowed_users: list[str] = get_allowed_users()
if user_name_lowercase not in allowed_users:
- await send_response(interaction=interaction, text="", response="You are not authorized to use this command.")
+ await send_response(
+ interaction=interaction,
+ text="",
+ response="You are not authorized to use this command.",
+ )
return
# Reset the conversation memory
if interaction.channel is not None:
reset_memory(str(interaction.channel.id))
- await interaction.followup.send(f"Conversation memory has been reset for {interaction.channel}.")
+ await interaction.followup.send(
+ f"Conversation memory has been reset for {interaction.channel}.",
+ )
# MARK: /undo command
@@ -861,21 +1036,33 @@ async def undo(interaction: discord.Interaction) -> None:
# Only allow certain users to interact with the bot
allowed_users: list[str] = get_allowed_users()
if user_name_lowercase not in allowed_users:
- await send_response(interaction=interaction, text="", response="You are not authorized to use this command.")
+ await send_response(
+ interaction=interaction,
+ text="",
+ response="You are not authorized to use this command.",
+ )
return
# Undo the last reset
if interaction.channel is not None:
if undo_reset(str(interaction.channel.id)):
- await interaction.followup.send(f"Successfully restored conversation memory for {interaction.channel}.")
+ await interaction.followup.send(
+ f"Successfully restored conversation memory for {interaction.channel}.",
+ )
else:
- await interaction.followup.send(f"No reset to undo for {interaction.channel}. Either no reset was performed or it was already undone.")
+ await interaction.followup.send(
+ f"No reset to undo for {interaction.channel}. Either no reset was performed or it was already undone.", # noqa: E501
+ )
else:
await interaction.followup.send("Cannot undo: No channel context available.")
# MARK: send_response
-async def send_response(interaction: discord.Interaction, text: str, response: str) -> None:
+async def send_response(
+ interaction: discord.Interaction,
+ text: str,
+ response: str,
+) -> None:
"""Send a response to the interaction, handling potential errors.
Args:
@@ -902,10 +1089,12 @@ def truncate_user_input(text: str) -> str:
text (str): The user input text.
Returns:
- str: The truncated text if it exceeds the maximum length, otherwise the original text.
- """
+ str: Truncated text if it exceeds the maximum length, otherwise the original text.
+ """ # noqa: E501
max_length: int = 2000
- truncated_text: str = text if len(text) <= max_length else text[: max_length - 3] + "..."
+ truncated_text: str = (
+ text if len(text) <= max_length else text[: max_length - 3] + "..."
+ )
return truncated_text
@@ -980,7 +1169,11 @@ def enhance_image2(image: bytes) -> bytes:
enhanced: ImageType = cv2.convertScaleAbs(img_gamma_8bit, alpha=1.2, beta=10)
# Apply very light sharpening
- kernel: ImageType = np.array([[-0.2, -0.2, -0.2], [-0.2, 2.8, -0.2], [-0.2, -0.2, -0.2]])
+ kernel: ImageType = np.array([
+ [-0.2, -0.2, -0.2],
+ [-0.2, 2.8, -0.2],
+ [-0.2, -0.2, -0.2],
+ ])
enhanced = cv2.filter2D(enhanced, -1, kernel)
# Encode the enhanced image to WebP
@@ -1047,7 +1240,10 @@ async def run_in_thread[T](func: Callable[..., T], *args: Any, **kwargs: Any) ->
@client.tree.context_menu(name="Enhance Image")
@app_commands.allowed_installs(guilds=True, users=True)
@app_commands.allowed_contexts(guilds=True, dms=True, private_channels=True)
-async def enhance_image_command(interaction: discord.Interaction, message: discord.Message) -> None:
+async def enhance_image_command(
+ interaction: discord.Interaction,
+ message: discord.Message,
+) -> None:
"""Context menu command to enhance an image in a message."""
await interaction.response.defer()
@@ -1064,7 +1260,9 @@ async def enhance_image_command(interaction: discord.Interaction, message: disco
logger.exception("Failed to read attachment %s", attachment.url)
if not images:
- await interaction.followup.send(f"No images found in the message: \n{message.content=}")
+ await interaction.followup.send(
+ f"No images found in the message: \n{message.content=}",
+ )
return
for image in images:
@@ -1077,9 +1275,18 @@ async def enhance_image_command(interaction: discord.Interaction, message: disco
)
# Prepare files
- file1 = discord.File(fp=io.BytesIO(enhanced_image1), filename=f"enhanced1-{timestamp}.webp")
- file2 = discord.File(fp=io.BytesIO(enhanced_image2), filename=f"enhanced2-{timestamp}.webp")
- file3 = discord.File(fp=io.BytesIO(enhanced_image3), filename=f"enhanced3-{timestamp}.webp")
+ file1 = discord.File(
+ fp=io.BytesIO(enhanced_image1),
+ filename=f"enhanced1-{timestamp}.webp",
+ )
+ file2 = discord.File(
+ fp=io.BytesIO(enhanced_image2),
+ filename=f"enhanced2-{timestamp}.webp",
+ )
+ file3 = discord.File(
+ fp=io.BytesIO(enhanced_image3),
+ filename=f"enhanced3-{timestamp}.webp",
+ )
files: list[discord.File] = [file1, file2, file3]
diff --git a/pyproject.toml b/pyproject.toml
index 45ad980..89bb4ad 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -22,15 +22,21 @@ dependencies = [
dev = ["pytest", "ruff"]
[tool.ruff]
-preview = true
fix = true
+preview = true
unsafe-fixes = true
-lint.select = ["ALL"]
-lint.fixable = ["ALL"]
-lint.pydocstyle.convention = "google"
-lint.isort.required-imports = ["from __future__ import annotations"]
-lint.pycodestyle.ignore-overlong-task-comments = true
+format.docstring-code-format = true
+format.preview = true
+
+lint.future-annotations = true
+lint.isort.force-single-line = true
+lint.pycodestyle.ignore-overlong-task-comments = true
+lint.pydocstyle.convention = "google"
+lint.select = ["ALL"]
+
+# Don't automatically remove unused variables
+lint.unfixable = ["F841"]
lint.ignore = [
"CPY001", # Checks for the absence of copyright notices within Python files.
"D100", # Checks for undocumented public module definitions.
@@ -56,13 +62,8 @@ lint.ignore = [
"Q003", # Checks for strings that include escaped quotes, and suggests changing the quote style to avoid the need to escape them.
"W191", # Checks for indentation that uses tabs.
]
-line-length = 160
-[tool.ruff.format]
-docstring-code-format = true
-docstring-code-line-length = 20
-
[tool.ruff.lint.per-file-ignores]
"**/test_*.py" = [
"ARG", # Unused function args -> fixtures nevertheless are functionally relevant...
diff --git a/tests/test_reset_undo.py b/tests/test_reset_undo.py
index 1a90956..1c82d47 100644
--- a/tests/test_reset_undo.py
+++ b/tests/test_reset_undo.py
@@ -2,15 +2,13 @@ from __future__ import annotations
import pytest
-from main import (
- add_message_to_memory,
- last_trigger_time,
- recent_messages,
- reset_memory,
- reset_snapshots,
- undo_reset,
- update_trigger_time,
-)
+from main import add_message_to_memory
+from main import last_trigger_time
+from main import recent_messages
+from main import reset_memory
+from main import reset_snapshots
+from main import undo_reset
+from main import update_trigger_time
@pytest.fixture(autouse=True)
From 098a0b516e967acbee7e652fabe50302e3206d16 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Helle=C5=9Ben?=
Date: Tue, 17 Mar 2026 20:45:58 +0100
Subject: [PATCH 68/72] Refactor CI workflow in docker-publish.yml: streamline
steps and update job configuration
---
.github/workflows/docker-publish.yml | 48 ++++++++--------------------
1 file changed, 14 insertions(+), 34 deletions(-)
diff --git a/.github/workflows/docker-publish.yml b/.github/workflows/docker-publish.yml
index e4b4b50..a739313 100644
--- a/.github/workflows/docker-publish.yml
+++ b/.github/workflows/docker-publish.yml
@@ -1,4 +1,4 @@
-name: Build Docker Image
+name: CI
on:
push:
@@ -6,50 +6,30 @@ on:
workflow_dispatch:
jobs:
- docker:
- runs-on: ubuntu-latest
+ ci:
+ runs-on: self-hosted
env:
DISCORD_TOKEN: "0"
OPENAI_TOKEN: "0"
- steps:
- # GitHub Container Registry
- - uses: docker/login-action@v4
- if: github.event_name != 'pull_request'
- with:
- registry: ghcr.io
- username: thelovinator1
- password: ${{ secrets.GITHUB_TOKEN }}
- # Download the latest commit from the master branch
+ steps:
- uses: actions/checkout@v6
- # Install the latest version of ruff
- uses: astral-sh/ruff-action@v3
with:
version: "latest"
- # Lint the Python code using ruff
- - run: ruff check --exit-non-zero-on-fix --verbose
+ - name: Install dependencies
+ run: uv sync --all-extras --dev -U
- # Check if the Python code needs formatting
- - run: ruff format --check --verbose
+ - name: Lint the Python code using ruff
+ run: ruff check --exit-non-zero-on-fix --verbose
- # Lint Dockerfile
- - run: docker build --check .
+ - name: Check formatting
+ run: ruff format --check --verbose
- # Extract metadata (tags, labels) from Git reference and GitHub events for Docker
- - id: meta
- uses: docker/metadata-action@v6
- if: github.ref == 'refs/heads/master'
- with:
- images: ghcr.io/thelovinator1/anewdawn
- tags: type=raw,value=latest
+ - name: Lint Dockerfile (build only)
+ run: docker build --check .
- # Build and push the Docker image
- - uses: docker/build-push-action@v7
- if: github.event_name != 'pull_request' && github.ref == 'refs/heads/master'
- with:
- context: .
- push: true
- labels: ${{ steps.meta.outputs.labels }}
- tags: ${{ steps.meta.outputs.tags }}
+ - name: Run tests
+ run: uv run pytest
From 1d49e9c3f99175ce9ba2e62f5fbaef65ce488344 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Helle=C5=9Ben?=
Date: Tue, 17 Mar 2026 20:46:40 +0100
Subject: [PATCH 69/72] Remove ruff-action step from CI workflow in
docker-publish.yml
---
.github/workflows/docker-publish.yml | 4 ----
1 file changed, 4 deletions(-)
diff --git a/.github/workflows/docker-publish.yml b/.github/workflows/docker-publish.yml
index a739313..be0aa7b 100644
--- a/.github/workflows/docker-publish.yml
+++ b/.github/workflows/docker-publish.yml
@@ -15,10 +15,6 @@ jobs:
steps:
- uses: actions/checkout@v6
- - uses: astral-sh/ruff-action@v3
- with:
- version: "latest"
-
- name: Install dependencies
run: uv sync --all-extras --dev -U
From dd2805b7f0586d87b6163bfc18526f0af7c04750 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Helle=C5=9Ben?=
Date: Tue, 17 Mar 2026 20:56:50 +0100
Subject: [PATCH 70/72] Update debugging step in CI workflow to list all files
with human-readable sizes
---
.github/copilot-instructions.md | 12 +++++------
.../workflows/{docker-publish.yml => ci.yml} | 9 +++++---
Dockerfile | 21 -------------------
docker-compose.yml | 9 --------
4 files changed, 12 insertions(+), 39 deletions(-)
rename .github/workflows/{docker-publish.yml => ci.yml} (73%)
delete mode 100644 Dockerfile
delete mode 100644 docker-compose.yml
diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md
index 810d0c1..e43899f 100644
--- a/.github/copilot-instructions.md
+++ b/.github/copilot-instructions.md
@@ -14,7 +14,7 @@ ANewDawn is a Discord bot written in Python 3.13+ using the discord.py library a
- **Python**: 3.13 or higher required
- **Package Manager**: Use `uv` for dependency management (see `pyproject.toml`)
-- **Docker**: The project uses Docker for deployment (see `Dockerfile` and `docker-compose.yml`)
+- **Deployment**: The project is designed to run as a systemd service (see `systemd/anewdawn.service`)
- **Environment Variables**: Copy `.env.example` to `.env` and fill in required tokens
## Code Style and Conventions
@@ -57,7 +57,7 @@ ruff format --check --verbose
- `main.py` - Main bot application with all commands and event handlers
- `pyproject.toml` - Project configuration and dependencies
-- `Dockerfile` / `docker-compose.yml` - Container configuration
+- `systemd/` - systemd unit and environment templates
- `.github/workflows/` - CI/CD workflows
## Key Components
@@ -82,10 +82,10 @@ The main bot client is `LoviBotClient` which extends `discord.Client`. It handle
## CI/CD
-The GitHub Actions workflow (`.github/workflows/docker-publish.yml`) runs:
-1. Ruff linting and format check
-2. Dockerfile validation
-3. Docker image build and push to GitHub Container Registry
+The GitHub Actions workflow (`.github/workflows/ci.yml`) runs:
+1. Dependency install via `uv sync`
+2. Ruff linting and format check
+3. Unit tests via `pytest`
## Common Tasks
diff --git a/.github/workflows/docker-publish.yml b/.github/workflows/ci.yml
similarity index 73%
rename from .github/workflows/docker-publish.yml
rename to .github/workflows/ci.yml
index be0aa7b..6e69914 100644
--- a/.github/workflows/docker-publish.yml
+++ b/.github/workflows/ci.yml
@@ -15,6 +15,12 @@ jobs:
steps:
- uses: actions/checkout@v6
+ - name: Print the local dir and list files for debugging
+ run: |
+ echo "Current directory: $(pwd)"
+ echo "Files in current directory:"
+ ls -ahl
+
- name: Install dependencies
run: uv sync --all-extras --dev -U
@@ -24,8 +30,5 @@ jobs:
- name: Check formatting
run: ruff format --check --verbose
- - name: Lint Dockerfile (build only)
- run: docker build --check .
-
- name: Run tests
run: uv run pytest
diff --git a/Dockerfile b/Dockerfile
deleted file mode 100644
index 5eeae3a..0000000
--- a/Dockerfile
+++ /dev/null
@@ -1,21 +0,0 @@
-# syntax=docker/dockerfile:1
-# check=error=true;experimental=all
-FROM ghcr.io/astral-sh/uv:python3.13-bookworm-slim
-
-# Change the working directory to the `app` directory
-WORKDIR /app
-
-# Install dependencies
-RUN --mount=type=cache,target=/root/.cache/uv \
- --mount=type=bind,source=pyproject.toml,target=pyproject.toml \
- uv sync --no-install-project
-
-# Copy the application file
-COPY main.py /app/
-
-# Set the environment variables
-ENV PYTHONUNBUFFERED=1
-ENV PYTHONDONTWRITEBYTECODE=1
-
-# Run the application
-CMD ["uv", "run", "main.py"]
diff --git a/docker-compose.yml b/docker-compose.yml
deleted file mode 100644
index e8cdcd5..0000000
--- a/docker-compose.yml
+++ /dev/null
@@ -1,9 +0,0 @@
-services:
- anewdawn:
- image: ghcr.io/thelovinator1/anewdawn:latest
- container_name: anewdawn
- env_file: .env
- environment:
- - DISCORD_TOKEN=${DISCORD_TOKEN}
- - OPENAI_TOKEN=${OPENAI_TOKEN}
- restart: unless-stopped
From 0df46133bed4444a96a2e6d23db947a2fe82d9a7 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Helle=C5=9Ben?=
Date: Tue, 17 Mar 2026 21:21:33 +0100
Subject: [PATCH 71/72] Update environment variable file and service
configuration
---
systemd/anewdawn.env.example | 7 +------
systemd/anewdawn.service | 4 ++--
2 files changed, 3 insertions(+), 8 deletions(-)
diff --git a/systemd/anewdawn.env.example b/systemd/anewdawn.env.example
index 3e8a586..074a676 100644
--- a/systemd/anewdawn.env.example
+++ b/systemd/anewdawn.env.example
@@ -1,11 +1,6 @@
# Copy this file to /etc/ANewDawn/ANewDawn.env and fill in the required values.
# Make sure the directory is owned by the user running the service (e.g., "lovinator").
-# Discord bot token
DISCORD_TOKEN=
-
-# OpenAI token (for GPT-5 and other OpenAI models)
OPENAI_TOKEN=
-
-# Optional: additional env vars used by your bot
-# MY_CUSTOM_VAR=
+OLLAMA_API_KEY=
diff --git a/systemd/anewdawn.service b/systemd/anewdawn.service
index 6ec9e9a..bfdd50d 100644
--- a/systemd/anewdawn.service
+++ b/systemd/anewdawn.service
@@ -10,13 +10,13 @@ User=lovinator
Group=lovinator
# The project directory containing main.py (update as needed).
-WorkingDirectory=/home/lovinator/Code/ANewDawn
+WorkingDirectory=/home/lovinator/ANewDawn/
# Load environment variables (see systemd/anewdawn.env.example).
EnvironmentFile=/etc/ANewDawn/ANewDawn.env
# Use the python interpreter from your environment (system python is fine if dependencies are installed).
-ExecStart=/usr/bin/env python3 main.py
+ExecStart=/usr/bin/uv run main.py
Restart=on-failure
RestartSec=5
From aa5c7a999a231d8b82aedb7a6d300ecd5021e200 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Joakim=20Helle=C5=9Ben?=
Date: Tue, 17 Mar 2026 21:53:51 +0100
Subject: [PATCH 72/72] Remove debugging step from CI workflow and add
deployment process
---
.github/workflows/ci.yml | 21 +++++++++++++++------
1 file changed, 15 insertions(+), 6 deletions(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 6e69914..24fcdc3 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -15,12 +15,6 @@ jobs:
steps:
- uses: actions/checkout@v6
- - name: Print the local dir and list files for debugging
- run: |
- echo "Current directory: $(pwd)"
- echo "Files in current directory:"
- ls -ahl
-
- name: Install dependencies
run: uv sync --all-extras --dev -U
@@ -32,3 +26,18 @@ jobs:
- name: Run tests
run: uv run pytest
+
+ # NOTE: The runner must be allowed to run these commands without a password.
+ # sudo EDITOR=nvim visudo
+ # forgejo-runner ALL=(lovinator) NOPASSWD: /usr/bin/git -C /home/lovinator/ANewDawn pull
+ # forgejo-runner ALL=(root) NOPASSWD: /bin/systemctl restart anewdawn.service
+ # forgejo-runner ALL=(lovinator) NOPASSWD: /usr/bin/uv sync -U --all-extras --dev --directory /home/lovinator/ANewDawn
+ - name: Deploy & restart bot (master only)
+ if: ${{ success() && github.ref == 'refs/heads/master' }}
+ run: |
+ # Keep checkout in the Forgejo runner workspace, whatever that is.
+ # actions/checkout already checks out to the runner's working directory.
+
+ sudo -u lovinator git -C /home/lovinator/ANewDawn pull
+ sudo -u lovinator uv sync -U --all-extras --dev --directory /home/lovinator/ANewDawn
+ sudo systemctl restart anewdawn.service