Compare commits

...

No commits in common. "react-django" and "Phoenix" have entirely different histories.

127 changed files with 0 additions and 11933 deletions

View file

@ -1,16 +0,0 @@
---
name: Feature request
about: Suggest an improvement or new feature for the web UI
title: ''
labels: 'enhancement'
assignees: ''
---
**Description**
A clear and concise description of what you want to be implemented.
**Additional Context**
If applicable, please provide any extra information, external links, or screenshots that could be useful.

344
.gitignore vendored
View file

@ -1,344 +0,0 @@
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
pnpm-debug.log*
lerna-debug.log*
node_modules
dist
dist-ssr
*.local
# Editor directories and files
.vscode/*
!.vscode/extensions.json
.idea
.DS_Store
*.suo
*.ntvs*
*.njsproj
*.sln
*.sw?
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
lerna-debug.log*
.pnpm-debug.log*
# Diagnostic reports (https://nodejs.org/api/report.html)
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
# Runtime data
pids
*.pid
*.seed
*.pid.lock
# Directory for instrumented libs generated by jscoverage/JSCover
lib-cov
# Coverage directory used by tools like istanbul
coverage
*.lcov
# nyc test coverage
.nyc_output
# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
.grunt
# Bower dependency directory (https://bower.io/)
bower_components
# node-waf configuration
.lock-wscript
# Compiled binary addons (https://nodejs.org/api/addons.html)
build/Release
# Dependency directories
node_modules/
jspm_packages/
# Snowpack dependency directory (https://snowpack.dev/)
web_modules/
# TypeScript cache
*.tsbuildinfo
# Optional npm cache directory
.npm
# Optional eslint cache
.eslintcache
# Optional stylelint cache
.stylelintcache
# Microbundle cache
.rpt2_cache/
.rts2_cache_cjs/
.rts2_cache_es/
.rts2_cache_umd/
# Optional REPL history
.node_repl_history
# Output of 'npm pack'
*.tgz
# Yarn Integrity file
.yarn-integrity
# dotenv environment variable files
.env
.env.development.local
.env.test.local
.env.production.local
.env.local
# parcel-bundler cache (https://parceljs.org/)
.cache
.parcel-cache
# Next.js build output
.next
out
# Nuxt.js build / generate output
.nuxt
dist
# Gatsby files
.cache/
# Comment in the public line in if your project uses Gatsby and not Next.js
# https://nextjs.org/blog/next-9-1#public-directory-support
# public
# vuepress build output
.vuepress/dist
# vuepress v2.x temp and cache directory
.temp
.cache
# vitepress build output
**/.vitepress/dist
# vitepress cache directory
**/.vitepress/cache
# Docusaurus cache and generated files
.docusaurus
# Serverless directories
.serverless/
# FuseBox cache
.fusebox/
# DynamoDB Local files
.dynamodb/
# TernJS port file
.tern-port
# Stores VSCode versions used for testing VSCode extensions
.vscode-test
# yarn v2
.yarn/cache
.yarn/unplugged
.yarn/build-state.yml
.yarn/install-state.gz
.pnp.*
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# UV
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
#uv.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
.pdm.toml
.pdm-python
.pdm-build/
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
# Ruff stuff:
.ruff_cache/
# PyPI configuration file
.pypirc
# ValKey (Redis)
dump.rdb
# Folders
tmp/
backend/media/
media/

View file

@ -1,19 +0,0 @@
# Gallery Archivist
**Note:** This is an early prototype and is not intended for use in production.
This is a complete rebuild of the [Gallery Archivist](https://git.aroy-art.com/Aroy/Gallery-Archivist) project.
With a new frontend built with React and Vite and also a
complete restructure of the django backend to only serve
the API and database.
## Trademarks
### External Sites
The logos of external sites used in Gallery-Archivist are
trademarks of their respective owners. The use of these
trademarks does not indicate endorsement of the trademark
holder by the repository, its owners or contributors.
Gallery-Archivist is not endorsed by or affiliated
with any of the trademark holders.

View file

@ -1,14 +0,0 @@
# Text/doc files
*.md
*.txt
*.log
# Git
.git/
.gitignore
# Folders
media/
# Allow python dependencie list
!requirements.txt

View file

@ -1,44 +0,0 @@
# ./backend/Dockerfile
# Use an official Python runtime as a parent image
FROM python:3.12-slim
# --- Add arguments for user/group IDs ---
ARG UID=1000
ARG GID=1000
# Set environment variables
ENV PYTHONDONTWRITEBYTECODE 1
ENV PYTHONUNBUFFERED 1
# Set work directory
WORKDIR /app
# --- Create a non-root user and group ---
RUN groupadd -g $GID -o archivist && \
useradd -u $UID -g $GID -o -m -s /bin/bash archivist
# -o allows reusing UID/GID if needed, -m creates home dir, -s sets shell
# Install Python dependencies
# Copy only requirements first to leverage Docker cache
COPY requirements.txt ./
RUN pip install --no-cache-dir -r requirements.txt
# Copy the entrypoint script first
COPY --chown=archivist:archivist ./entrypoint.sh /app/entrypoint.sh
# Ensure it's executable inside the container too
RUN chmod +x /app/entrypoint.sh
# Copy the rest of the backend source code
COPY --chown=archivist:archivist . .
# --- Swithc the user to the archivist user ---
USER archivist
# Set the entrypoint script
ENTRYPOINT ["/app/entrypoint.sh"]
# Set the default command that the entrypoint will execute if none is provided by compose
# This is useful if you run the image directly without compose sometimes
CMD ["python", "manage.py", "runserver", "0.0.0.0:8080"]

View file

@ -1,3 +0,0 @@
from django.contrib import admin
# Register your models here.

View file

@ -1,6 +0,0 @@
from django.apps import AppConfig
class ApiConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'api'

View file

@ -1,6 +0,0 @@
from django.apps import AppConfig
class ApiConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "api.authentication"

View file

@ -1,46 +0,0 @@
# api/authentication/middleware.py
from rest_framework_simplejwt.authentication import JWTAuthentication
from rest_framework_simplejwt.exceptions import InvalidToken, TokenError
class JWTParamMiddleware:
"""
Middleware that allows JWT authentication via query parameters.
This middleware extracts a JWT token from a query parameter named 'token'
and authenticates the user if the token is valid.
"""
def __init__(self, get_response):
self.get_response = get_response
self.jwt_auth = JWTAuthentication()
def __call__(self, request):
self._authenticate_token_param(request)
response = self.get_response(request)
return response
def _authenticate_token_param(self, request):
# Don't authenticate if already authenticated via headers
if hasattr(request, "user") and request.user.is_authenticated:
return
# Get token from the query parameter
token = request.GET.get("token")
if not token:
return
# Validate the token
try:
validated_token = self.jwt_auth.get_validated_token(token)
user = self.jwt_auth.get_user(validated_token)
# Set the authenticated user on the request
request.user = user
# Also set auth in DRF format for API views
request._auth = validated_token
except (InvalidToken, TokenError):
# Don't raise exceptions, just leave as anonymous
pass

View file

@ -1,5 +0,0 @@
from rest_framework import serializers
class LogoutSerializer(serializers.Serializer):
refresh = serializers.CharField()

View file

@ -1,14 +0,0 @@
from django.urls import path
from rest_framework_simplejwt.views import (
TokenObtainPairView,
TokenRefreshView,
)
from api.authentication.views import LogoutView
urlpatterns = [
path("logout/", LogoutView.as_view(), name="logout"),
path("token/", TokenObtainPairView.as_view(), name="token_obtain_pair"),
path("token/refresh/", TokenRefreshView.as_view(), name="token_refresh"),
]

View file

@ -1,21 +0,0 @@
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.permissions import IsAuthenticated
from rest_framework_simplejwt.tokens import RefreshToken
from api.authentication.serializers import LogoutSerializer
# Logout View
class LogoutView(APIView):
permission_classes = [IsAuthenticated]
serializer_class = LogoutSerializer
def post(self, request):
try:
refresh_token = request.data["refresh"]
token = RefreshToken(refresh_token)
token.blacklist()
return Response({"message": "Logout successful"})
except Exception as e:
return Response({"error": str(e)}, status=400)

View file

@ -1,6 +0,0 @@
from django.apps import AppConfig
class ApiConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "api.creators"

View file

@ -1,128 +0,0 @@
from typing import Dict, List, Optional
from rest_framework import serializers
from django.utils.timezone import localtime
from apps.archive.models import CreatorDescription, CreatorModel
class CreatorListSerializer(serializers.ModelSerializer):
source_site = serializers.SerializerMethodField()
date = serializers.SerializerMethodField()
description = serializers.SerializerMethodField()
avatar_url = serializers.SerializerMethodField()
banner_url = serializers.SerializerMethodField()
class Meta:
model = CreatorModel
fields = [
"creator_id",
"source_site",
"slug",
"name",
"mature",
"date",
"description",
"avatar_url",
"banner_url",
]
def get_source_site(self, obj) -> str:
return obj.source_site.slug
def get_date(self, obj) -> Dict[str, str]:
return {
"created": localtime(obj.date_created).isoformat(),
"imported": localtime(obj.date_imported).isoformat(),
"last_import": localtime(obj.date_last_import).isoformat(),
}
def get_description(self, obj) -> Optional[str]:
# Get the most recent CreatorDescription entry
creator_desc = (
CreatorDescription.objects.filter(creator=obj)
.order_by("-date_imported")
.first()
)
if creator_desc:
return creator_desc.description.content
return None # Return None if no descriptions exist
def get_avatar_url(self, obj) -> Optional[str]:
if obj.avatar:
return obj.avatar.file.url
else:
return None
def get_banner_url(self, obj) -> Optional[str]:
if obj.banner:
return obj.banner.file.url
else:
return None
class CreatorDetailsSerializer(serializers.ModelSerializer):
description = serializers.SerializerMethodField()
date = serializers.SerializerMethodField()
avatar_url = serializers.SerializerMethodField()
banner_url = serializers.SerializerMethodField()
categories = serializers.SerializerMethodField()
class Meta:
model = CreatorModel
fields = [
"creator_id",
"slug",
"name",
"mature",
"avatar_url",
"banner_url",
"description",
"date",
"categories",
]
def get_description(self, obj) -> List[Dict[str, str]]:
data = []
# Fetch descriptions ordered by date_imported from the through model
creator_descriptions = CreatorDescription.objects.filter(creator=obj).order_by(
"-date_imported"
)
for creator_desc in creator_descriptions:
data.append(
{
"id": creator_desc.description.id,
"content": creator_desc.description.content,
"date_imported": creator_desc.date_imported,
}
)
return data
def get_avatar_url(self, obj) -> Optional[str]:
if obj.avatar:
return obj.avatar.file.url
else:
return None
def get_banner_url(self, obj) -> Optional[str]:
if obj.banner:
return obj.banner.file.url
else:
return None
def get_date(self, obj) -> Dict[str, str]:
return {
"created": localtime(obj.date_created).isoformat(),
"imported": localtime(obj.date_imported).isoformat(),
"last_import": localtime(obj.date_last_import).isoformat(),
}
def get_categories(self, obj) -> List[Dict[str, str]]:
data = []
for cat in obj.categories.all():
data.append({"name": cat.name, "slug": cat.slug})
return data

View file

@ -1,12 +0,0 @@
from django.urls import path
from .views import CreatorListView, CreatorDetailsView
urlpatterns = [
path("", CreatorListView.as_view(), name="creator_list"),
path(
"<str:creator_id>/",
CreatorDetailsView.as_view(),
name="creator_details",
),
]

View file

@ -1,38 +0,0 @@
from rest_framework.generics import ListAPIView, RetrieveAPIView
from rest_framework.response import Response
from rest_framework.exceptions import NotFound
from rest_framework.permissions import IsAuthenticated
from apps.archive.models import CreatorModel
from .serializers import (
CreatorListSerializer,
CreatorDetailsSerializer,
)
class CreatorListView(ListAPIView):
permission_classes = [IsAuthenticated]
serializer_class = CreatorListSerializer
def get_queryset(self):
user = self.request.user.userprofile
if user.show_mature:
return CreatorModel.objects.all()
else:
return CreatorModel.objects.filter(mature=False)
class CreatorDetailsView(RetrieveAPIView):
permission_classes = [IsAuthenticated]
serializer_class = CreatorDetailsSerializer
lookup_field = "creator_id"
def get_queryset(self):
user = self.request.user.userprofile
if user.show_mature:
return CreatorModel.objects.all()
else:
return CreatorModel.objects.filter(mature=False)

View file

@ -1,55 +0,0 @@
from rest_framework import serializers
from apps.files.models import PostFileModel
class PostFileSerializer(serializers.ModelSerializer):
"""Serializer for PostFileModel."""
filename = serializers.SerializerMethodField()
thumbnails = serializers.SerializerMethodField()
download_url = serializers.SerializerMethodField()
class Meta:
model = PostFileModel
fields = [
"hash_blake3",
"file_type",
"file",
"thumbnail",
"filename",
"thumbnails",
"download_url",
]
def get_filename(self, obj):
try:
return obj.name.first().filename
except (AttributeError, IndexError):
return "Unknown"
def get_thumbnails(self, obj):
base_url = f"/api/files/{obj.hash_blake3}/"
thumbnails = {}
for size_key in ["sx", "sm", "md", "lg", "xl"]:
thumbnails[size_key] = f"{base_url}?t={size_key}"
return thumbnails
def get_download_url(self, obj):
return f"/api/files/{obj.hash_blake3}/?d=0"
class FileResponseSerializer(serializers.Serializer):
"""
Dummy serializer for file response schema documentation.
This is only used for OpenAPI schema generation and will never be used to serialize data.
"""
file = serializers.FileField(help_text="The file content")
class ErrorResponseSerializer(serializers.Serializer):
"""
Serializer for error responses.
"""
error = serializers.CharField(help_text="Error message")

View file

@ -1,9 +0,0 @@
from django.urls import path
from .views import FileServeView, FileDetailView
urlpatterns = [
# Serve the actual file
path("<str:file_hash>/", FileServeView.as_view(), name="serve_file"),
# Get file metadata
path("<str:file_hash>/info/", FileDetailView.as_view(), name="file_info"),
]

View file

@ -1,168 +0,0 @@
import os
from django.conf import settings
from django.http import FileResponse
from rest_framework import status
from rest_framework.generics import GenericAPIView
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from drf_spectacular.utils import extend_schema, OpenApiParameter, OpenApiResponse
from sorl.thumbnail import get_thumbnail
from apps.files.models import PostFileModel
from .serializers import (
PostFileSerializer,
FileResponseSerializer,
ErrorResponseSerializer,
)
THUMBNAIL_SIZES = {
"sx": (64, ".thumb_64.jpg"),
"sm": (256, ".thumb_256.jpg"),
"md": (748, ".thumb_748.jpg"),
"lg": (1024, ".thumb_1024.jpg"),
"xl": (2048, ".thumb_2048.jpg"),
}
class FileServeView(GenericAPIView):
"""
API view to serve content files for download or inline viewing.
Authentication can be provided via:
1. Authorization header (JWT token)
2. 'token' query parameter (JWT token)
"""
# Set permissions as needed
permission_classes = [IsAuthenticated]
serializer_class = FileResponseSerializer
queryset = PostFileModel.objects.all()
def get_thumbnail_file(self, source_path, size_key):
"""Generates and retrieves the thumbnail file."""
size, suffix = THUMBNAIL_SIZES.get(size_key, (None, ""))
if size:
thumbnail_file = get_thumbnail(source_path, str(size), upscale=False)
return os.path.abspath(
os.path.join(settings.MEDIA_ROOT, thumbnail_file.name)
), suffix
return None, ""
@extend_schema(
parameters=[
OpenApiParameter(
name="d",
description="Download flag (0 = download, otherwise inline)",
required=False,
type=str,
),
OpenApiParameter(
name="t",
description="Thumbnail size (sx, sm, md, lg, xl)",
required=False,
type=str,
),
OpenApiParameter(
name="token",
description="JWT token for authentication (alternative to Authorization header)",
required=False,
type=str,
),
],
responses={
200: OpenApiResponse(description="File returned successfully"),
401: ErrorResponseSerializer,
404: ErrorResponseSerializer,
500: ErrorResponseSerializer,
},
)
def get(self, request, file_hash):
"""Handle GET requests for file serving."""
download = request.query_params.get("d") == "0"
thumbnail_key = request.query_params.get("t")
try:
obj_file = PostFileModel.objects.filter(hash_blake3=file_hash).first()
if not obj_file:
return Response(
{"error": "File not found"}, status=status.HTTP_404_NOT_FOUND
)
file_name = obj_file.name.first().filename
file_type = obj_file.file_type
source_file = obj_file.file
# Use thumbnail if requested and file type is image
if thumbnail_key and file_type != "image":
source_file = obj_file.thumbnail
# Retrieve the requested thumbnail file if applicable
if thumbnail_key in THUMBNAIL_SIZES:
thumbnail_path, suffix = self.get_thumbnail_file(
source_file.path, thumbnail_key
)
if thumbnail_path:
file_name += suffix
file = open(thumbnail_path, "rb")
else:
file = source_file.file
else:
file = source_file.file
response = FileResponse(file)
disposition_type = "attachment" if download else "inline"
response["Content-Disposition"] = (
f'{disposition_type}; filename="{file_name}"'
)
return response
except Exception as e:
return Response(
{"error": str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR
)
class FileDetailView(GenericAPIView):
"""
API view to get file metadata without serving the actual file.
Authentication can be provided via:
1. Authorization header (JWT token)
2. 'token' query parameter (JWT token)
"""
permission_classes = [IsAuthenticated]
serializer_class = PostFileSerializer
queryset = PostFileModel.objects.all()
@extend_schema(
parameters=[
OpenApiParameter(
name="token",
description="JWT token for authentication (alternative to Authorization header)",
required=False,
type=str,
)
],
responses={
200: PostFileSerializer,
401: ErrorResponseSerializer,
404: ErrorResponseSerializer,
500: ErrorResponseSerializer,
},
)
def get(self, request, file_hash):
"""Return file metadata."""
try:
obj_file = PostFileModel.objects.filter(hash_blake3=file_hash).first()
if not obj_file:
return Response(
{"error": "File not found"}, status=status.HTTP_404_NOT_FOUND
)
serializer = self.get_serializer(obj_file)
return Response(serializer.data)
except Exception as e:
return Response(
{"error": str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR
)

View file

@ -1,3 +0,0 @@
from django.db import models
# Create your models here.

View file

@ -1,6 +0,0 @@
from django.apps import AppConfig
class ApiConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "api.posts"

View file

@ -1,168 +0,0 @@
from typing import Dict, List, Optional
from rest_framework import serializers
from django.urls import reverse
from django.utils.timezone import localtime
from apps.archive.models import PostModel
class PostPreviewSerializer(serializers.ModelSerializer):
title = serializers.SerializerMethodField()
description = serializers.SerializerMethodField()
date = serializers.SerializerMethodField()
media = serializers.SerializerMethodField()
media_count = serializers.SerializerMethodField()
source_site = serializers.SerializerMethodField()
category = serializers.SerializerMethodField()
creator = serializers.SerializerMethodField()
tags = serializers.SerializerMethodField()
class Meta:
model = PostModel
fields = [
"post_id",
"mature",
"title",
"description",
"source_site",
"category",
"creator",
"date",
"media_count",
"media",
"tags",
]
def get_title(self, obj: PostModel) -> Optional[str]:
return obj.title.first().content if obj.title.exists() else None
def get_description(self, obj: PostModel) -> Optional[str]:
return obj.description.first().content if obj.description.exists() else None
def get_source_site(self, obj: PostModel) -> Dict[str, str]:
return {
"slug": obj.source_site.slug,
"name": obj.source_site.name,
}
def get_category(self, obj: PostModel) -> List[Dict[str, str]]:
return [{"slug": i.slug, "name": i.name} for i in obj.category.all()]
def get_creator(self, obj: PostModel) -> Dict[str, Optional[str]]:
return {
"slug": obj.creator.slug,
"name": obj.creator.name,
"avatar": obj.creator.avatar.hash_blake3 if obj.creator.avatar else None,
"blur_hash": obj.creator.avatar.blur_hash if obj.creator.avatar else None,
}
def get_date(self, obj: PostModel) -> Dict[str, str]:
return {
"created": localtime(obj.date_created).isoformat(),
"imported": localtime(obj.date_imported).isoformat(),
"last_import": localtime(obj.date_last_import).isoformat(),
}
def get_media(self, obj: PostModel) -> List[Dict[str, str]]:
data = []
for i in obj.files.all():
data.append(
{
"type": i.file_type,
"hash": i.hash_blake3,
"blur_hash": i.blur_hash,
}
)
return data
def get_media_count(self, obj: PostModel) -> int:
return obj.files.count()
def get_tags(self, obj: PostModel) -> List[str]:
return [tag.slug for tag in obj.tags.all()]
class PostSerializer(serializers.ModelSerializer):
source_site = serializers.SerializerMethodField()
title = serializers.SerializerMethodField()
description = serializers.SerializerMethodField()
creator = serializers.SerializerMethodField()
tags = serializers.SerializerMethodField()
date = serializers.SerializerMethodField()
media = serializers.SerializerMethodField()
class Meta:
model = PostModel
fields = [
"post_id",
"title",
"description",
"source_site",
"creator",
"date",
"mature",
"media",
"tags",
]
def get_source_site(self, obj) -> Dict[str, str]:
return {
"slug": obj.source_site.slug,
"name": obj.source_site.name,
}
def get_title(self, obj) -> Dict[str, Optional[str]]:
title = obj.title.first().content if obj.title.exists() else None
if not title:
count = 0
else:
count = obj.title.count()
return {
"count": count,
"content": title,
}
def get_description(self, obj) -> Dict[str, str]:
desc = obj.description.first().content
if desc == "":
count = 0
else:
count = obj.description.count()
return {
"count": count,
"content": desc,
}
def get_creator(self, obj) -> Dict[str, Optional[str]]:
return {
"slug": obj.creator.slug,
"name": obj.creator.name,
"avatar": obj.creator.avatar.hash_blake3 if obj.creator.avatar else None,
}
def get_tags(self, obj) -> List[str]:
return [tag.slug for tag in obj.tags.all()]
def get_date(self, obj) -> Dict[str, str]:
return {
"created": localtime(obj.date_created).isoformat(),
"imported": localtime(obj.date_imported).isoformat(),
"last_import": localtime(obj.date_last_import).isoformat(),
}
def get_media(self, obj) -> List[Dict[str, str]]:
data = []
for i in obj.files.all():
data.append(
{
"type": i.file_type,
"mimetype": i.mimetype,
"size": i.file.size,
"hash": i.hash_blake3,
"blur_hash": i.blur_hash,
}
)
return data

View file

@ -1,30 +0,0 @@
from django.urls import path
from api.posts.views import (
PostDetailView,
PostDetailSiteCreatorView,
PostListView,
PostListSourceView,
PostListSourceCreatorView,
PostListSourceCategoryView,
)
urlpatterns = [
path("<str:post_id>/", PostDetailView.as_view(), name="post_detail"),
path(
"<str:source_site>/<str:creator_slug_or_id>/<str:post_id>/",
PostDetailSiteCreatorView.as_view(),
name="post_detail_with_site_creator",
),
path("", PostListView.as_view(), name="post_list"),
path("<str:source_site>/", PostListSourceView.as_view(), name="post_list_source"),
path(
"<str:source_site>/<str:creator_slug_or_id>/",
PostListSourceCreatorView.as_view(),
name="post_list_source_creator",
),
path(
"<str:source_site>/<str:creator_slug_or_id>/<str:category>",
PostListSourceCategoryView.as_view(),
name="post_list_source_creator_category",
),
]

View file

@ -1,104 +0,0 @@
from rest_framework.generics import ListAPIView, RetrieveAPIView
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from drf_spectacular.utils import extend_schema
from apps.archive.models import PostModel
from .serializers import PostPreviewSerializer, PostSerializer
from rest_framework.pagination import PageNumberPagination
class PostListPagination(PageNumberPagination):
page_size = 10 # number of items per page
page_size_query_param = "page_size" # allows clients to specify page size
max_page_size = 100 # maximum page size allowed
def get_paginated_response(self, data):
return Response(
{
"count": self.page.paginator.count,
"totalPages": self.page.paginator.num_pages, # total number of pages
"next": self.get_next_link(),
"previous": self.get_previous_link(),
"results": data,
}
)
class PostListView(ListAPIView):
permission_classes = [IsAuthenticated]
serializer_class = (
PostPreviewSerializer # Each post will be serialized using this serializer
)
pagination_class = PostListPagination
def get_queryset(self):
user = self.request.user.userprofile
queryset = PostModel.objects.all()
# Apply mature filtering
if not user.show_mature:
queryset = queryset.filter(mature=False)
# Extract optional parameters
source_site = self.kwargs.get("source_site")
creator_slug_or_id = self.kwargs.get("creator_slug_or_id")
category = self.kwargs.get("category")
# Filter by source_site if provided
if source_site:
queryset = queryset.filter(source_site__slug=source_site)
# Filter by creator (either by slug or id)
if creator_slug_or_id:
queryset = queryset.filter(
creator__slug=creator_slug_or_id
) | queryset.filter(creator__creator_id=creator_slug_or_id)
if category:
queryset = queryset.filter(category__slug=category)
return queryset.order_by("-date_created")
@extend_schema(operation_id="posts_list_all")
def get(self, request, *args, **kwargs):
return super().get(request, *args, **kwargs)
class PostListSourceView(PostListView):
@extend_schema(operation_id="posts_list_by_source")
def get(self, request, *args, **kwargs):
return super().get(request, *args, **kwargs)
class PostListSourceCreatorView(PostListView):
@extend_schema(operation_id="posts_list_by_source_and_creator")
def get(self, request, *args, **kwargs):
return super().get(request, *args, **kwargs)
class PostListSourceCategoryView(PostListView):
@extend_schema(operation_id="posts_list_by_source_creator_and_category")
def get(self, request, *args, **kwargs):
return super().get(request, *args, **kwargs)
class PostDetailView(RetrieveAPIView):
permission_classes = [IsAuthenticated]
serializer_class = PostSerializer
lookup_field = (
"post_id" # This tells DRF to use the "post_id" URL kwarg for lookups.
)
queryset = PostModel.objects.all()
@extend_schema(operation_id="posts_retrieve_by_id")
def get(self, request, *args, **kwargs):
return super().get(request, *args, **kwargs)
class PostDetailSiteCreatorView(PostDetailView):
@extend_schema(operation_id="posts_retrieve_by_site_creator_id")
def get(self, request, *args, **kwargs):
return super().get(request, *args, **kwargs)

View file

@ -1,6 +0,0 @@
from django.apps import AppConfig
class ApiConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "api.schema"

View file

@ -1,22 +0,0 @@
from django.urls import path
from drf_spectacular.views import (
SpectacularAPIView,
SpectacularRedocView,
SpectacularSwaggerView,
)
urlpatterns = [
path("", SpectacularAPIView.as_view(), name="schema"),
# Optional UI:
path(
"swagger-ui/",
SpectacularSwaggerView.as_view(url_name="schema"),
name="swagger-ui",
),
path(
"redoc/",
SpectacularRedocView.as_view(url_name="schema"),
name="redoc",
),
]

View file

@ -1,11 +0,0 @@
from django.urls import path, include
urlpatterns = [
path("schema/", include("api.schema.urls")),
path("auth/", include("api.authentication.urls")),
path("user/", include("api.user.urls")),
path("posts/", include("api.posts.urls")),
path("creators/", include("api.creators.urls")),
path("files/", include("api.files.urls")),
]

View file

@ -1,10 +0,0 @@
from django.contrib import admin
from .models import UserProfile
class UserProfileAdmin(admin.ModelAdmin):
list_display = ("user", "show_mature")
admin.site.register(UserProfile, UserProfileAdmin)

View file

@ -1,6 +0,0 @@
from django.apps import AppConfig
class UserConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "api.user"

View file

@ -1,17 +0,0 @@
# api/user/management/commands/backfill_user_profiles.py
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from api.user.models import UserProfile # assuming you have a UserProfile model
class Command(BaseCommand):
help = "Backfill user profiles for existing users"
def handle(self, *args, **options):
users = User.objects.all()
for user in users:
if not UserProfile.objects.filter(user=user).exists():
# create a new user profile
UserProfile.objects.create(user=user)
self.stdout.write(f"Created user profile for {user.username}")
self.stdout.write("Backfill complete")

View file

@ -1,15 +0,0 @@
from django.db import models
from django.contrib.auth.models import User
class UserProfile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, unique=True)
show_mature = models.BooleanField(default=False)
class Meta:
verbose_name = "User Profile"
verbose_name_plural = "User Profiles"
def __str__(self):
return self.user.username

View file

@ -1,42 +0,0 @@
# api/user/serializers.py
from rest_framework import serializers
from .models import UserProfile
from django.contrib.auth.models import User
class UserProfileSerializer(serializers.ModelSerializer):
class Meta:
model = UserProfile
fields = ["show_mature"]
def update(self, instance, validated_data):
# Use setattr to assign values to the instance fields
for attr, value in validated_data.items():
setattr(instance, attr, value)
instance.save() # Save the changes to the database
return instance
class UserSerializer(serializers.ModelSerializer):
profile = UserProfileSerializer(source="userprofile", read_only=False)
class Meta:
model = User
fields = ["username", "email", "first_name", "last_name", "profile"]
def update(self, instance, validated_data):
# Extract profile data if it exists
profile_data = validated_data.pop("userprofile", None)
# Update the UserProfile instance
if profile_data:
userprofile_instance = instance.userprofile # The related UserProfile instance
profile_serializer = self.fields["profile"] # Get the nested serializer
profile_serializer.update(userprofile_instance, profile_data) # Update the UserProfile
# Update the User fields
for attr, value in validated_data.items():
setattr(instance, attr, value)
instance.save() # Save the User instance
return instance

View file

@ -1,3 +0,0 @@
from django.test import TestCase
# Create your tests here.

View file

@ -1,7 +0,0 @@
from django.urls import path
from .views import ProfileView
urlpatterns = [
path("profile/", ProfileView.as_view(), name="profile"),
]

View file

@ -1,13 +0,0 @@
from rest_framework.generics import RetrieveUpdateAPIView
from rest_framework.permissions import IsAuthenticated
from .serializers import UserSerializer
class ProfileView(RetrieveUpdateAPIView):
"""Retrieve and update the authenticated user's profile."""
permission_classes = [IsAuthenticated]
serializer_class = UserSerializer
def get_object(self):
return self.request.user

View file

@ -1,3 +0,0 @@
from django.contrib import admin
# Register your models here.

View file

@ -1,6 +0,0 @@
from django.apps import AppConfig
class AppsConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'apps'

View file

@ -1,91 +0,0 @@
from django.contrib import admin
from django.utils.dateformat import format
from django.utils.timezone import localtime
from .models import SourceSiteModel, CreatorModel, DescriptionModel, TagModel, PostModel
class SourceSiteAdmin(admin.ModelAdmin):
list_display = ["slug", "name", "date_last_import", "date_created"]
admin.site.register(SourceSiteModel, SourceSiteAdmin)
class DescriptionAdmin(admin.ModelAdmin):
list_display = ["hash", "description_at", "date_modified", "date_created"]
def description_at(self, obj):
if len(str(obj.content)) >= 80:
return obj.content[:77] + "..."
else:
return obj.content
description_at.short_description = "Description"
admin.site.register(DescriptionModel, DescriptionAdmin)
class TagAdmin(admin.ModelAdmin):
list_display = ["slug", "name", "date_modified", "date_created"]
admin.site.register(TagModel, TagAdmin)
class CreatorAdmin(admin.ModelAdmin):
list_display = [
"slug",
"name",
"source_site",
"date_created",
"date_last_import",
]
admin.site.register(CreatorModel, CreatorAdmin)
class PostAdmin(admin.ModelAdmin):
list_display = [
"post_id",
"source_site",
"creator",
"title_at",
"description_at",
"mature",
"date_created_fromated",
"date_imported_formated",
"date_last_import_formated",
]
@admin.display(description="Title")
def title_at(self, obj):
if obj.title.first() is not None:
if len(str(obj.title.first().content)) >= 80:
return obj.title.first().content[:77] + "..."
else:
return obj.title.first().content
@admin.display(description="Description")
def description_at(self, obj):
if len(str(obj.description.first().content)) >= 80:
return obj.description.first().content[:77] + "..."
else:
return obj.description.first().content
@admin.display(description="Date Created")
def date_created_fromated(self, obj):
return format(localtime(obj.date_created), "Y-m-d H:i:s")
@admin.display(description="Date Imported")
def date_imported_formated(self, obj):
return format(localtime(obj.date_imported), "Y-m-d H:i:s")
@admin.display(description="Last Import Date")
def date_last_import_formated(self, obj):
return format(localtime(obj.date_last_import), "Y-m-d H:i:s")
admin.site.register(PostModel, PostAdmin)

View file

@ -1,24 +0,0 @@
from django.apps import AppConfig
from django.db.models.signals import post_migrate
from django.dispatch import receiver
class ArchiveConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "apps.archive"
@receiver(post_migrate)
def insert_defualt_data(sender, **kwargs):
from apps.archive.models import SourceSiteModel
categories = {
"furaffinity": "FurAffinity",
"deviantart": "DeviantArt",
"twitter": "Twitter/X",
}
if sender.name == ArchiveConfig.name:
for key, value in categories.items():
SourceSiteModel.objects.get_or_create(slug=key, name=value)

View file

@ -1,758 +0,0 @@
from datetime import datetime
import os
import json
import logging
from abc import ABC, abstractmethod
from typing import Dict, Any, Optional
from django.utils import timezone
from tqdm.auto import tqdm
from PIL import Image as PillowImage
from django.core.management.base import BaseCommand
from apps.files.models import (
FileNameModel,
PostFileModel,
)
from apps.archive.models import (
SourceSiteModel,
CategoryModel,
CreatorModel,
PostModel,
TitleModel,
CreatorTitle,
PostTitle,
DescriptionModel,
CreatorDescription,
PostDescription,
TagModel,
)
from utils.hash import compute_string_hash_blake3, compute_file_hash_blake3
from utils.files import get_mime_type, categorize_mime_type
from apps.files.tasks import (
generate_blur_hash_PostFile,
generate_md5_hash_PostFile,
generate_video_thumbnail,
generate_pdf_thumbnail,
)
class BaseImporter(ABC):
"""Base abstract class for all site importers."""
def __init__(self, command_instance):
self.command = command_instance
self.logger = logging.getLogger(f"importer.{self.__class__.__name__}")
@abstractmethod
def import_data(
self, data: Dict[str, Any], file_path_json: str, delete: bool
) -> None:
"""Import data from JSON file into the database."""
pass
def log_info(self, message: str) -> None:
"""Log an informational message."""
tqdm.write(message)
self.logger.info(message)
def log_success(self, message: str) -> None:
"""Log a success message."""
styled_message = self.command.style.SUCCESS(message)
tqdm.write(styled_message)
self.logger.info(message)
def log_error(self, message: str) -> None:
"""Log an error message."""
styled_message = self.command.style.ERROR(message)
tqdm.write(styled_message)
self.logger.error(message)
def log_warning(self, message: str) -> None:
"""Log a warning message."""
styled_message = self.command.style.WARNING(message)
tqdm.write(styled_message)
self.logger.warning(message)
def get_or_create_source_site(self, category: str) -> SourceSiteModel:
"""Get or create a source site model instance."""
source_site_instance, _ = SourceSiteModel.objects.get_or_create(slug=category)
source_site_instance.save()
return source_site_instance
def import_file(
self, file_path: str, delete: bool = False
) -> Optional[PostFileModel]:
"""
Import a file if it doesn't already exist in the database and returns the instance.
Args:
file_path: The path to the file to import.
delete: Whether to delete the imported file after processing.
Returns:
The file instance or None if file doesn't exist.
"""
if not os.path.exists(file_path):
self.log_warning(f"File not found: {file_path}")
return None
try:
file_hash = compute_file_hash_blake3(file_path, logger=self.command)
file_name = os.path.basename(file_path)
_, file_ext = os.path.splitext(file_name)
hash_file_name = file_hash + file_ext
# Get or create file name
file_name_instance, _ = FileNameModel.objects.get_or_create(
filename=file_name
)
# Get or create file
file_instance, created = PostFileModel.objects.get_or_create(
hash_blake3=file_hash
)
if created or not file_instance.file:
with open(file_path, "rb") as file:
file_instance.file.save(hash_file_name, file)
# Add file metadata
file_instance.name.add(file_name_instance)
file_instance.extension = file_ext
file_instance.size = os.path.getsize(file_path)
file_mime = get_mime_type(file_path)
file_type = categorize_mime_type(file_mime)
file_instance.file_type = file_type
file_instance.mimetype = file_mime
file_instance.save()
# Process image-specific properties
if file_instance.mimetype.startswith("image/"):
# Add Image blur hash if not existing
if not file_instance.blur_hash:
generate_blur_hash_PostFile.delay(file_instance.id)
# Get image resolution
try:
im = PillowImage.open(file_instance.file)
file_instance.height, file_instance.width = im.size
file_instance.save()
except Exception as e:
self.log_error(f"Error getting image dimensions: {str(e)}")
# Process video thumbnails
if file_instance.file_type in ["video", "gif"]:
if not file_instance.thumbnail:
generate_video_thumbnail.delay(file_instance.id)
# Process PDF thumbnails
if file_instance.file_type in ["pdf"]:
if not file_instance.thumbnail:
generate_pdf_thumbnail.delay(file_instance.id)
# Generate MD5 hash if not exists
if not file_instance.hash_md5:
generate_md5_hash_PostFile.delay(file_instance.id)
if created:
self.log_success(f"Imported: {file_path} file, new instance created")
else:
self.log_success(f"Imported: {file_path} file, instance updated")
# Delete the imported file if the --delete flag is used
if delete and os.path.exists(file_path):
os.remove(file_path)
self.log_success(f"Deleted: {file_path}")
return file_instance
except Exception as e:
self.log_error(f"Error importing file {file_path}: {str(e)}")
return None
def add_title(
self,
title_text: str,
date_str: str,
date_format: str,
owner_instance,
owner_type: str,
file_date,
) -> None:
"""
Add title to a post or creator.
Args:
title_text: The title text to add
owner_instance: The post or creator instance
owner_type: Either 'post' or 'creator'
"""
try:
title_hash = compute_string_hash_blake3(title_text, logger=self.command)
title_instance, created = TitleModel.objects.get_or_create(hash=title_hash)
if created:
title_instance.content = title_text
title_instance.date_created = timezone.make_aware(
datetime.strptime(date_str, date_format)
)
title_instance.save()
if owner_type == "creator":
relation, created = CreatorTitle.objects.get_or_create(
creator=owner_instance, title=title_instance
)
else: # post
relation, created = PostTitle.objects.get_or_create(
post=owner_instance, title=title_instance
)
relation.date_imported = timezone.make_aware(
datetime.fromtimestamp(file_date)
)
relation.save()
if owner_type == "post":
owner_instance.title.add(title_instance)
except Exception as e:
self.log_error(f"Error adding description: {str(e)}")
def add_description(
self,
description_text: str,
date_str: str,
date_format: str,
owner_instance,
owner_type: str,
file_date,
) -> None:
"""
Add description to a post or creator.
Args:
description_text: The description text to add
date_str: Date string of when the description was created
date_format: Format of the date string
owner_instance: The post or creator instance
owner_type: Either 'post' or 'creator'
file_date: Timestamp of the file for imported date
"""
try:
description_hash = compute_string_hash_blake3(
description_text, logger=self.command
)
description_instance, created = DescriptionModel.objects.get_or_create(
hash=description_hash
)
if created:
description_instance.content = description_text
description_instance.date_created = timezone.make_aware(
datetime.strptime(date_str, date_format)
)
description_instance.save()
if owner_type == "creator":
relation, created = CreatorDescription.objects.get_or_create(
creator=owner_instance, description=description_instance
)
else: # post
relation, created = PostDescription.objects.get_or_create(
post=owner_instance, description=description_instance
)
relation.date_imported = timezone.make_aware(
datetime.fromtimestamp(file_date)
)
relation.save()
if owner_type == "post":
owner_instance.description.add(description_instance)
except Exception as e:
self.log_error(f"Error adding description: {str(e)}")
def add_tags(self, tags_list, post_instance):
"""Add tags to a post."""
for tag in tags_list:
try:
tag_instance, created = TagModel.objects.get_or_create(slug=tag)
if created or not tag_instance.name:
tag_instance.name = tag
tag_instance.save()
post_instance.tags.add(tag_instance)
except Exception as e:
self.log_error(f"Error adding tag '{tag}': {str(e)}")
def ensure_boolean_field(self, value, default=False):
"""Convert potentially null/None values to boolean."""
if value is None:
return default
return bool(value)
class TwitterImporter(BaseImporter):
"""Importer for Twitter data."""
def import_data(
self, data: Dict[str, Any], file_path_json: str, delete: bool
) -> None:
"""Import Twitter data from JSON into the database."""
try:
category = data.get("category", "twitter")
source_site_instance = self.get_or_create_source_site(category)
# Process creator if present
creator_instance = None
if "author" in data:
creator_instance = self._process_creator(
data, source_site_instance, file_path_json
)
# Get subcategory if available
category_instance = None
if "subcategory" in data:
category_instance = self._process_category(data)
# Process the post
self._process_post(
data,
source_site_instance,
creator_instance,
category_instance,
file_path_json,
delete,
)
except Exception as e:
self.log_error(f"Error importing Twitter data: {str(e)}")
def _process_creator(self, data, source_site_instance, file_path_json):
"""Process creator data for Twitter."""
creator_instance, _ = CreatorModel.objects.get_or_create(
slug=data["author"]["name"], source_site=source_site_instance
)
creator_instance.creator_id = data["author"]["id"]
creator_instance.name = data["author"]["nick"]
# Add creator description if available
if "description" in data["author"]:
self.add_description(
description_text=data["author"]["description"],
date_str=data["author"]["date"],
date_format="%Y-%m-%d %H:%M:%S",
owner_instance=creator_instance,
owner_type="creator",
file_date=os.path.getmtime(file_path_json),
)
creator_instance.date_created = timezone.make_aware(
datetime.strptime(data["author"]["date"], "%Y-%m-%d %H:%M:%S")
)
creator_instance.save()
return creator_instance
def _process_category(self, data):
"""Process category data."""
category_instance, created = CategoryModel.objects.get_or_create(
slug=data["subcategory"]
)
if created:
category_instance.name = data["subcategory"].capitalize()
category_instance.save()
return category_instance
def _process_post(
self,
data,
source_site_instance,
creator_instance,
category_instance,
file_path_json,
delete,
):
"""Process post data for Twitter."""
post_instance, _ = PostModel.objects.get_or_create(
post_id=data["tweet_id"],
source_site=source_site_instance,
defaults={
# Set a default for mature to avoid null constraint error
"mature": False
},
)
if category_instance:
if creator_instance:
creator_instance.refresh_from_db()
creator_instance.categories.add(category_instance)
creator_instance.save()
post_instance.category.add(category_instance)
# Link creator
if creator_instance:
post_instance.creator = creator_instance
post_instance.save()
post_instance.date_created = timezone.make_aware(
datetime.strptime(data["date"], "%Y-%m-%d %H:%M:%S")
)
# Set mature flag if available
if "sensitive" in data:
post_instance.mature = self.ensure_boolean_field(data.get("sensitive"))
# Add post description if available
if "content" in data:
self.add_description(
description_text=data["content"],
date_str=data["date"],
date_format="%Y-%m-%d %H:%M:%S",
owner_instance=post_instance,
owner_type="post",
file_date=os.path.getmtime(file_path_json),
)
# Add hashtags if available
if "hashtags" in data:
self.add_tags(data["hashtags"], post_instance)
# Import the file
file_path = file_path_json.removesuffix(".json")
file_instance = self.import_file(file_path, delete)
if file_instance:
post_instance.files.add(file_instance)
# Handle profile images
if category_instance:
if category_instance.slug == "avatar" and creator_instance:
creator_instance.refresh_from_db()
creator_instance.avatar = file_instance
creator_instance.save()
if category_instance.slug == "background" and creator_instance:
creator_instance.refresh_from_db()
creator_instance.banner = file_instance
creator_instance.save()
post_instance.save()
class FurAffinityImporter(BaseImporter):
"""Importer for FurAffinity data."""
def import_data(
self, data: Dict[str, Any], file_path_json: str, delete: bool
) -> None:
"""Import FurAffinity data from JSON into the database."""
try:
category = data.get("category", "furaffinity")
source_site_instance = self.get_or_create_source_site(category)
# Process creator
creator_instance = self._process_creator(data, source_site_instance)
# Process category
category_instance = self._process_category(data)
# Process post
self._process_post(
data,
source_site_instance,
creator_instance,
category_instance,
file_path_json,
delete,
)
except Exception as e:
self.log_error(f"Error importing FurAffinity data: {str(e)}")
def _process_creator(self, data, source_site_instance):
"""Process creator data for FurAffinity."""
# Use artist if available, otherwise fall back to user field
artist = data.get("artist", "")
artist_url = data.get("artist_url", artist.lower())
if not artist_url and "user" in data:
artist_url = data.get("user", "")
creator_instance, _ = CreatorModel.objects.get_or_create(
slug=artist_url, source_site=source_site_instance
)
if artist:
creator_instance.name = artist
else:
creator_instance.name = artist_url
creator_instance.creator_id = artist_url
# We don't have creator creation date in FurAffinity data
# Using post date as an approximation
if "date" in data:
creator_instance.date_created = timezone.make_aware(
datetime.strptime(data["date"], "%Y-%m-%d %H:%M:%S")
)
creator_instance.save()
return creator_instance
def _process_category(self, data):
"""Process category data for FurAffinity."""
subcategory = data.get("subcategory", "gallery")
category_instance, created = CategoryModel.objects.get_or_create(
slug=subcategory
)
if created:
category_instance.name = subcategory.capitalize()
# Process FA-specific categories
if "fa_category" in data:
fa_category = data["fa_category"]
fa_category_instance, _ = CategoryModel.objects.get_or_create(
slug=fa_category.lower().replace(" ", "_")
)
fa_category_instance.name = fa_category
fa_category_instance.save()
category_instance.save()
return category_instance
def _process_post(
self,
data,
source_site_instance,
creator_instance,
category_instance,
file_path_json,
delete,
):
"""Process post data for FurAffinity."""
post_id = str(data.get("id", ""))
post_instance, _ = PostModel.objects.get_or_create(
post_id=post_id, source_site=source_site_instance
)
# Add category
if category_instance:
post_instance.category.add(category_instance)
# Add category to creator
if creator_instance:
creator_instance.refresh_from_db()
creator_instance.categories.add(category_instance)
creator_instance.save()
# Link creator
if creator_instance:
post_instance.creator = creator_instance
post_instance.save()
# Set creation date
if "date" in data:
post_instance.date_created = timezone.make_aware(
datetime.strptime(data["date"], "%Y-%m-%d %H:%M:%S")
)
# Set mature content flag based on rating
rating = data.get("rating", "").lower()
post_instance.mature = rating in ["mature", "adult"]
# Add title
title_text = data.get("title", "")
if title_text:
self.add_title(
title_text=title_text,
date_str=data["date"],
date_format="%Y-%m-%d %H:%M:%S",
owner_instance=post_instance,
owner_type="post",
file_date=os.path.getmtime(file_path_json),
)
# Add description
description_text = data.get("description", "")
if description_text:
self.add_description(
description_text=description_text,
date_str=data["date"],
date_format="%Y-%m-%d %H:%M:%S",
owner_instance=post_instance,
owner_type="post",
file_date=os.path.getmtime(file_path_json),
)
# Add tags
if "tags" in data:
self.add_tags(data["tags"], post_instance)
# Add species as a special tag if present
if "species" in data and data["species"] not in [
"Unspecified / Any",
"Any",
]:
species_tags = [s.strip() for s in data["species"].split("/")]
self.add_tags(species_tags, post_instance)
# Add gender as a special tag if present
if "gender" in data and data["gender"] not in ["Unspecified / Any", "Any"]:
gender_tags = [g.strip() for g in data["gender"].split("/")]
self.add_tags(gender_tags, post_instance)
# Add metadata as JSON field if your model supports it
metadata = {}
for field in ["views", "favorites", "comments", "theme", "fa_category"]:
if field in data:
metadata[field] = data[field]
# If your PostModel has a metadata JSONField, uncomment this
# post_instance.metadata = metadata
# Import the file
file_path = file_path_json.removesuffix(".json")
# Check if the file exists, otherwise try to construct from filename and extension
if not os.path.exists(file_path) and "filename" in data and "extension" in data:
alt_file_path = f"{os.path.dirname(file_path_json)}/{data['filename']}.{data['extension']}"
file_instance = self.import_file(alt_file_path, delete)
else:
file_instance = self.import_file(file_path, delete)
if file_instance:
post_instance.files.add(file_instance)
# Add known image dimensions if available
if not file_instance.width and "width" in data:
file_instance.width = data.get("width")
if not file_instance.height and "height" in data:
file_instance.height = data.get("height")
if "width" in data or "height" in data:
file_instance.save()
post_instance.save()
class Command(BaseCommand):
help = (
"Import data from JSON files in a folder or a single JSON file to the archive"
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.importers = {
"twitter": TwitterImporter(self),
"furaffinity": FurAffinityImporter(self),
}
# Set up logging
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
filename="import.log",
)
self.logger = logging.getLogger("import_command")
def add_arguments(self, parser):
parser.add_argument(
"path",
type=str,
help="Path to the folder containing JSON files or a single JSON file",
)
parser.add_argument(
"--delete", action="store_true", help="Delete imported files"
)
parser.add_argument(
"--site",
type=str,
choices=list(self.importers.keys()),
help="Only import files for the specified site",
)
def handle(self, *args, **kwargs):
path = kwargs["path"]
delete = kwargs["delete"]
site_filter = kwargs.get("site")
if os.path.isfile(path):
self.process_json_file(path, delete, site_filter)
elif os.path.isdir(path):
self.process_json_folder(path, delete, site_filter)
else:
self.stdout.write(
self.style.ERROR(f"The path '{path}' is not a valid file or folder.")
)
return
def process_json_file(self, file_path, delete, site_filter=None):
tqdm.write(f"Importing data from: {file_path}")
try:
with open(file_path, "r") as f:
data = json.load(f)
category = data.get("category", "")
# Skip if site filter is set and doesn't match
if site_filter and category != site_filter:
tqdm.write(
f"Skipping {file_path}, category {category} doesn't match filter {site_filter}"
)
return
# Check if we have an importer for this category
if category in self.importers:
self.importers[category].import_data(data, file_path, delete)
tqdm.write(
self.style.SUCCESS(f"Data imported successfully for {category}.")
)
else:
tqdm.write(
self.style.WARNING(f"No importer found for category: {category}")
)
except json.JSONDecodeError:
tqdm.write(self.style.ERROR(f"Invalid JSON file: {file_path}"))
except Exception as e:
tqdm.write(self.style.ERROR(f"Error processing {file_path}: {str(e)}"))
def process_json_folder(self, folder_path, delete, site_filter=None):
if not os.path.exists(folder_path):
tqdm.write(self.style.ERROR(f"The folder '{folder_path}' does not exist."))
return
# Count total files
tqdm.write("Counting total files...")
total_files = sum(len(files) for _, _, files in os.walk(folder_path))
with tqdm(
total=total_files, desc="Processing JSON files", dynamic_ncols=True
) as progress_bar:
for root, dirs, files in os.walk(folder_path):
for file_name in files:
progress_bar.update(1) # Increment progress for each file
if file_name.endswith(".json"):
file_path = os.path.join(root, file_name)
self.process_json_file(file_path, delete, site_filter)

View file

@ -1,186 +0,0 @@
from django.db import models
from apps.files.models import PostFileModel
class SourceSiteModel(models.Model):
slug = models.CharField(max_length=64)
name = models.CharField(max_length=64)
date_last_import = models.DateTimeField(auto_now=True)
date_created = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name = "Source Site"
verbose_name_plural = "Source Sites"
def __str__(self):
if self.name:
return str(self.name)
else:
return str(self.slug)
class CategoryModel(models.Model):
slug = models.CharField(max_length=64)
name = models.CharField(max_length=64)
date_modified = models.DateTimeField(auto_now=True)
date_created = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name = "Category"
verbose_name_plural = "Categories"
def __str__(self):
if len(str(self.name)) > 0:
return str(self.name)
else:
return str(self.slug)
class TitleModel(models.Model):
hash = models.CharField(max_length=128)
content = models.TextField()
date_modified = models.DateTimeField(auto_now=True, editable=True)
date_created = models.DateTimeField(auto_now_add=True, editable=True)
class Meta:
verbose_name = "Title"
verbose_name_plural = "Titles"
def __str__(self):
return str(self.hash)
class PostTitle(models.Model):
post = models.ForeignKey("PostModel", on_delete=models.CASCADE)
title = models.ForeignKey(TitleModel, on_delete=models.CASCADE)
# order = models.IntegerField()
date_imported = models.DateTimeField(auto_now_add=True)
class CreatorTitle(models.Model):
creator = models.ForeignKey("CreatorModel", on_delete=models.CASCADE)
title = models.ForeignKey(TitleModel, on_delete=models.CASCADE)
# order = models.IntegerField()
date_imported = models.DateTimeField(auto_now_add=True)
class DescriptionModel(models.Model):
hash = models.CharField(max_length=128)
content = models.TextField()
date_modified = models.DateTimeField(auto_now=True, editable=True)
date_created = models.DateTimeField(auto_now_add=True, editable=True)
class Meta:
verbose_name = "Description"
verbose_name_plural = "Descriptions"
def __str__(self):
return str(self.hash)
class PostDescription(models.Model):
post = models.ForeignKey("PostModel", on_delete=models.CASCADE)
description = models.ForeignKey(DescriptionModel, on_delete=models.CASCADE)
date_imported = models.DateTimeField(auto_now_add=True)
class CreatorDescription(models.Model):
creator = models.ForeignKey("CreatorModel", on_delete=models.CASCADE)
description = models.ForeignKey(DescriptionModel, on_delete=models.CASCADE)
date_imported = models.DateTimeField(auto_now_add=True)
class CreatorModel(models.Model):
creator_id = models.CharField(max_length=128, null=True)
source_site = models.ForeignKey(
to=SourceSiteModel, on_delete=models.CASCADE, related_name="creators"
)
slug = models.CharField(max_length=64)
name = models.CharField(max_length=64)
description = models.ManyToManyField(
to=DescriptionModel, related_name="creators", through=CreatorDescription
)
avatar = models.ForeignKey(
to=PostFileModel,
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name="creators_avatar",
)
banner = models.ForeignKey(
to=PostFileModel,
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name="creators_banner",
)
categories = models.ManyToManyField(to=CategoryModel, related_name="creators")
date_created = models.DateTimeField(auto_now_add=True, editable=True)
date_last_import = models.DateTimeField(auto_now=True, editable=True)
date_imported = models.DateTimeField(auto_now_add=True, editable=True)
mature = models.BooleanField(default=False)
class Meta:
verbose_name = "Creator"
verbose_name_plural = "Creators"
def __str__(self):
return str(self.name)
class TagModel(models.Model):
slug = models.CharField(max_length=64)
name = models.CharField(max_length=64)
date_modified = models.DateTimeField(auto_now=True)
date_created = models.DateTimeField(auto_now_add=True)
mature = models.BooleanField(default=False)
class Meta:
verbose_name = "Tag"
verbose_name_plural = "Tags"
def __str__(self):
if len(str(self.name)) > 0:
return str(self.name)
else:
return str(self.slug)
class PostModel(models.Model):
post_id = models.CharField(max_length=128, db_index=True)
title = models.ManyToManyField(
to=TitleModel, related_name="posts", through=PostTitle
)
description = models.ManyToManyField(
to=DescriptionModel, related_name="posts", through=PostDescription
)
creator = models.ForeignKey(
to=CreatorModel,
on_delete=models.CASCADE,
null=True,
blank=True,
related_name="posts",
)
source_site = models.ForeignKey(
to=SourceSiteModel, on_delete=models.CASCADE, related_name="posts"
)
category = models.ManyToManyField(to=CategoryModel, related_name="posts")
tags = models.ManyToManyField(to=TagModel, related_name="posts")
mature = models.BooleanField(default=False)
files = models.ManyToManyField(to=PostFileModel, related_name="posts")
date_created = models.DateTimeField(auto_now_add=True, editable=True)
date_imported = models.DateTimeField(auto_now_add=True, editable=True)
date_last_import = models.DateTimeField(auto_now=True, editable=True)
class Meta:
verbose_name = "Post"
verbose_name_plural = "Posts"
def __str__(self):
return str(self.title)

View file

@ -1,3 +0,0 @@
from django.test import TestCase
# Create your tests here.

View file

@ -1,3 +0,0 @@
from django.shortcuts import render
# Create your views here.

View file

@ -1,2 +0,0 @@
from django.contrib import admin

View file

@ -1,6 +0,0 @@
from django.apps import AppConfig
class FilesConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "apps.files"

View file

@ -1,22 +0,0 @@
from django.core.management.base import BaseCommand
from apps.files.tasks import generate_video_thumbnail
from apps.files.models import PostFileModel
from tqdm import tqdm
class Command(BaseCommand):
help = "Backfill video and gif thumbs"
def handle(self, *args, **options):
# Get all PostFileModel instances that need thumbs
files = PostFileModel.objects.filter(file_type__in=["video", "gif"])
# Create a progress bar
pbar = tqdm(total=files.count(), desc="Generating thumbs")
# Loop through each file and generate thumbs
for file in files:
generate_video_thumbnail.delay(file.id)
pbar.update(1)
pbar.close()

View file

@ -1,67 +0,0 @@
from django.db import models
from django.urls import reverse
def get_upload_to(instance, filename, folder):
return f"{folder}/{instance.hash_blake3[:2]}/{instance.hash_blake3[2:4]}/{filename}"
def get_upload_to_posts(instance, filename):
return get_upload_to(instance, filename, "posts")
def get_upload_to_thumbnails(instance, filename):
return get_upload_to(instance, filename, "thumbnails")
class FileNameModel(models.Model):
filename = models.CharField(max_length=512)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = "File Name"
verbose_name_plural = "File Names"
def __str__(self):
return str(self.filename)
class PostFileModel(models.Model):
name = models.ManyToManyField(to=FileNameModel, related_name="post_files")
file = models.FileField(upload_to=get_upload_to_posts, blank=True)
# Hash for file identification (blake3 is used for deduplication)
hash_blake3 = models.CharField(max_length=128)
hash_md5 = models.CharField(max_length=32)
hash_sha1 = models.CharField(max_length=40)
hash_sha256 = models.CharField(max_length=64)
hash_sha512 = models.CharField(max_length=128)
# Image Blur Hash for preview presentation
blur_hash = models.CharField(max_length=32)
# If file has width and height save it.
height = models.IntegerField(null=True)
width = models.IntegerField(null=True)
mimetype = models.CharField(max_length=256, blank=True)
file_type = models.CharField(max_length=16, blank=True)
extension = models.CharField(max_length=64, blank=True)
manual_added = models.BooleanField(default=False)
# Thumbnails for video file and others
thumbnail = models.FileField(upload_to=get_upload_to_thumbnails, blank=True)
thumbnail_hash_blake3 = models.CharField(max_length=128, blank=True)
thumbnail_blur_hash = models.CharField(max_length=64, blank=True)
class Meta:
verbose_name = "Post File"
verbose_name_plural = "Post Files"
def __str__(self):
return str(self.name.first())
def get_absolute_url(self):
return reverse("serve_file", args=[self.hash_blake3])

View file

@ -1,286 +0,0 @@
import os
import io
import subprocess
from pathlib import Path
from typing import Optional, Tuple
from django.db import transaction
from celery import shared_task
from celery.exceptions import Retry
from PIL import Image as PillowImage
import blurhash
from .models import PostFileModel
from utils.hash import compute_file_hash_blake3, compute_md5_hash, compute_blur_hash
class ThumbnailGenerationError(Exception):
"""Custom exception for thumbnail generation errors."""
pass
def _setup_output_path(file_hash: str, prefix: str = "thumbnail") -> Tuple[str, str]:
"""
Set up the output directory and generate a unique filename.
Args:
file_hash (str): Hash to use in the filename
prefix (str): Prefix for the filename
Returns:
Tuple[str, str]: Output directory path and full file path
"""
output_dir = "/tmp/thumbgen/"
os.makedirs(output_dir, exist_ok=True)
filename = f"{prefix}_{file_hash}.png"
filepath = os.path.join(output_dir, filename)
return output_dir, filepath
def _update_file_model(
file_model: PostFileModel, thumbnail_path: str, thumbnail_filename: str
) -> None:
"""
Update the PostFileModel with the new thumbnail and related hashes.
Args:
file_model (PostFileModel): The model to update
thumbnail_path (str): Path to the generated thumbnail
thumbnail_filename (str): Filename for the saved thumbnail
"""
# Compute the hash for the generated thumbnail
thumbnail_hash_blake3 = compute_file_hash_blake3(thumbnail_path)
# Update the PostFileModel's thumbnail field with the new file
with open(thumbnail_path, "rb") as file:
file_model.thumbnail.save(thumbnail_filename, file)
# Set the thumbnail hash
file_model.thumbnail_hash_blake3 = thumbnail_hash_blake3
# Generate and set the blur hash for the thumbnail
file_model.thumbnail_blur_hash = compute_blur_hash(thumbnail_path)
# Save the model
file_model.save()
def _handle_task_error(e: Exception, file_id: int, process_name: str):
"""
Handle errors in thumbnail generation tasks.
Args:
e (Exception): The exception that occurred
file_id (int): ID of the file being processed
process_name (str): Name of the process for error reporting
Raises:
Retry: To trigger Celery retry mechanism
"""
error_message = f"Error in {process_name} for file {file_id}: {str(e)}"
print(error_message)
raise Retry(exc=e)
@shared_task(autoretry_for=(Exception,), retry_backoff=True, max_retries=5)
def generate_blur_hash_PostFile(file_id: int) -> str:
"""
Generate and save a blur hash for an image stored in PostFileModel.
Args:
file_id (int): ID of the PostFileModel instance
Returns:
str: Success message
"""
try:
with transaction.atomic():
img = PostFileModel.objects.select_for_update().get(id=file_id)
image_data = io.BytesIO(img.file.read())
pil_img = PillowImage.open(image_data)
blurhash_string = blurhash.encode(pil_img, 4, 3)
img.refresh_from_db()
img.blur_hash = blurhash_string
img.save()
return f"Successfully generated blur hash for file {file_id}"
except Exception as e:
_handle_task_error(e, file_id, "blur hash generation")
@shared_task(autoretry_for=(Exception,), retry_backoff=True, max_retries=5)
def generate_md5_hash_PostFile(file_id: int) -> str:
"""
Generate and save an MD5 hash for a file stored in PostFileModel.
Args:
file_id (int): ID of the PostFileModel instance
Returns:
str: Success message
"""
try:
with transaction.atomic():
pstfile = PostFileModel.objects.select_for_update().get(id=file_id)
# Compute the MD5 hash
md5_hash = compute_md5_hash(pstfile.file.path)
# Save the computed hash
pstfile.refresh_from_db()
pstfile.hash_md5 = md5_hash
pstfile.save()
return f"Successfully generated MD5 hash for file {file_id}"
except Exception as e:
_handle_task_error(e, file_id, "MD5 hash generation")
@shared_task(name="generate_video_thumbnail")
def generate_video_thumbnail(
file_id: int,
size: int = 0,
timestamp: Optional[float] = None,
movie_strip: bool = False,
) -> str:
"""
Generate video thumbnails using ffmpegthumbnailer and update the PostFileModel instance.
Args:
file_id (int): ID of the PostFileModel instance
size (int): Desired thumbnail width or height (defaults to video size)
timestamp (float): Timestamp in seconds where the thumbnail should be extracted
movie_strip (bool): Create a movie strip overlay
Returns:
str: Success message or error message
"""
try:
with transaction.atomic():
# Retrieve the PostFileModel instance with a lock
pstfile = PostFileModel.objects.select_for_update().get(id=file_id)
if not pstfile.file:
return "Error: Video file not found for the given file_id."
video_path = pstfile.file.path
# Setup output path
_, thumbnail_file_path = _setup_output_path(
pstfile.hash_blake3, "video_thumbnail"
)
thumbnail_filename = Path(thumbnail_file_path).name
# Build command
cmd = [
"ffmpegthumbnailer",
"-i",
video_path,
"-o",
thumbnail_file_path,
"-s",
str(size),
"-m",
]
if movie_strip:
cmd.extend(["-f"])
# Generate thumbnail at specified timestamps
if timestamp is not None:
cmd.extend(["-t", f"{timestamp}"])
# Execute command
subprocess.run(cmd, check=True)
# Update model with new thumbnail
_update_file_model(pstfile, thumbnail_file_path, thumbnail_filename)
# Clean up temporary file
os.remove(thumbnail_file_path)
return f"Video thumbnail generated successfully for file {file_id}"
except subprocess.CalledProcessError as e:
_handle_task_error(e, file_id, "video thumbnail generation")
except Exception as e:
_handle_task_error(e, file_id, "video thumbnail generation")
@shared_task(autoretry_for=(Exception,), retry_backoff=True, max_retries=5)
def generate_pdf_thumbnail(
file_id: int, page: int = 1, size: Optional[Tuple[int, int]] = None, dpi: int = 200
) -> str:
"""
Generate PDF thumbnails using pdf2image and update the PostFileModel instance.
Args:
file_id (int): ID of the PostFileModel instance
page (int): Page number to use for thumbnail (defaults to first page)
size (Tuple[int, int], optional): Desired thumbnail (width, height) or None to maintain original size
dpi (int): DPI for rendering the PDF (higher values result in larger images)
Returns:
str: Success message or error message
"""
try:
from pdf2image import convert_from_path
with transaction.atomic():
# Retrieve the PostFileModel instance with a lock
pstfile = PostFileModel.objects.select_for_update().get(id=file_id)
if not pstfile.file:
return "Error: PDF file not found for the given file_id."
pdf_path = pstfile.file.path
# Setup output path
_, thumbnail_file_path = _setup_output_path(
pstfile.hash_blake3, "pdf_thumbnail"
)
thumbnail_filename = Path(thumbnail_file_path).name
# Convert PDF to image using pdf2image
# first_page and last_page are 1-indexed
images = convert_from_path(
pdf_path, dpi=dpi, first_page=page, last_page=page
)
# Get the first page (should be the only one based on our parameters)
if not images:
raise ValueError(f"Could not extract page {page} from PDF")
image = images[0]
# Resize if size is specified
if size:
image = image.resize(size, PillowImage.LANCZOS)
# Save the image
image.save(thumbnail_file_path, "PNG")
# Update model with new thumbnail
_update_file_model(pstfile, thumbnail_file_path, thumbnail_filename)
# Clean up temporary file
os.remove(thumbnail_file_path)
return f"PDF thumbnail generated successfully for file {file_id}"
except ImportError:
error_message = (
"pdf2image library is not installed. Install it with: pip install pdf2image"
)
print(error_message)
raise
except Exception as e:
_handle_task_error(e, file_id, "PDF thumbnail generation")

View file

@ -1,3 +0,0 @@
from django.test import TestCase
# Create your tests here.

View file

@ -1,3 +0,0 @@
from django.db import models
# Create your models here.

View file

@ -1,3 +0,0 @@
from django.test import TestCase
# Create your tests here.

View file

@ -1,3 +0,0 @@
from django.shortcuts import render
# Create your views here.

View file

@ -1,4 +0,0 @@
# Always make sure django celery is imported when django starts.
from .celery import app as celery_app
__all__ = ("celery_app",)

View file

@ -1,16 +0,0 @@
"""
ASGI config for gallery project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/5.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "core.settings")
application = get_asgi_application()

View file

@ -1,11 +0,0 @@
import os
from celery import Celery
# from celery.schedules import crontab
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "core.settings")
app = Celery("Gallery Archivis")
app.config_from_object("django.conf:settings", namespace="CELERY")
app.autodiscover_tasks()

View file

@ -1,218 +0,0 @@
"""
Django settings for gallery project.
Generated by 'django-admin startproject' using Django 5.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/5.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/5.1/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/5.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "django-insecure-%ym(_f4tmylz_@$5ty#w4k#m2^2nkp!2h8at@wx@rmc&mf8&q7"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get("DEBUG", False)
ALLOWED_HOSTS = [
"localhost",
"127.0.0.1",
] + [host.strip() for host in os.environ.get("ALLOWED_HOSTS", "").split(",")]
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
# Library apps
"rest_framework",
"rest_framework_simplejwt",
"drf_spectacular",
"drf_spectacular_sidecar",
"corsheaders",
"django_celery_results",
"django_celery_beat",
"sorl.thumbnail",
# API apps
"api",
"api.schema",
"api.authentication",
"api.user",
"api.posts",
"api.creators",
"api.categories",
# Apps for Backend logic
"apps",
"apps.archive",
"apps.files",
# App for handeling deletion to of model files
"django_cleanup.apps.CleanupConfig",
]
MIDDLEWARE = [
"corsheaders.middleware.CorsMiddleware",
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"api.authentication.middleware.JWTParamMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "core.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "core.wsgi.application"
# Database
# https://docs.djangoproject.com/en/5.1/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql",
"NAME": os.environ.get("DB_NAME", "archivist"),
"USER": os.environ.get("DB_USER", "archivist"),
"PASSWORD": os.environ.get("DB_PASSWORD", "password"),
# or the service name in your docker-compose.
"HOST": os.environ.get("DB_HOST", "localhost"),
"PORT": os.environ.get("DB_PORT", "5432"),
}
# "default": {
# "ENGINE": "django.db.backends.sqlite3",
# "NAME": BASE_DIR / "db.sqlite3",
# }
}
# Password validation
# https://docs.djangoproject.com/en/5.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework.authentication.SessionAuthentication",
"rest_framework_simplejwt.authentication.JWTAuthentication",
),
"DEFAULT_SCHEMA_CLASS": "drf_spectacular.openapi.AutoSchema",
}
SPECTACULAR_SETTINGS = {
"TITLE": "Gallery-Archivist API",
"DESCRIPTION": "A tool for archiving online galleries",
"VERSION": "1.0.0",
"SERVE_INCLUDE_SCHEMA": False,
"SWAGGER_UI_DIST": "SIDECAR", # shorthand to use the sidecar instead
"SWAGGER_UI_FAVICON_HREF": "SIDECAR",
"REDOC_DIST": "SIDECAR",
}
# Internationalization
# https://docs.djangoproject.com/en/5.1/topics/i18n/
LANGUAGE_CODE = "en-gb"
# if os.environ.get("TZ"):
# TIME_ZONE = os.environ.get("TZ")
# else:
# TIME_ZONE = "UTC"
TIME_ZONE = "Europe/Berlin"
USE_I18N = True
USE_TZ = True
# DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
# DATETIME_INPUT_FORMATS = DATETIME_FORMAT
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/5.1/howto/static-files/
STATIC_URL = "static/"
# Default primary key field type
# https://docs.djangoproject.com/en/5.1/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
MEDIA_URL = "/media/"
MEDIA_ROOT = BASE_DIR / "media"
# if debug mode is on allow all origins
# if DEBUG:
# CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOWED_ORIGINS = [
"http://localhost:5173",
"http://northpaw.aroy.hs.tun",
]
CELERY_BROKER_URL = os.environ.get("CELERY_BROKER", "redis://localhost:6379/0")
CELERY_RESULT_BACKEND = "django-db"
CELERY_CACHE_BACKEND = "django-cache"
CELERY_enable_utc = True
CELERY_timezone = TIME_ZONE
CELERY_task_track_started = True
CELERY_result_extended = True
CELERY_BROKER_CONNECTION_RETRY_ON_STARTUP = True
CELERY_worker_cancel_long_running_tasks_on_connection_loss = False
CELERY_TASK_DEFAULT_QUEUE = "default"

View file

@ -1,24 +0,0 @@
"""
URL configuration for gallery project.
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/5.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path("admin/", admin.site.urls),
path("api/", include("api.urls")),
]

View file

@ -1,16 +0,0 @@
"""
WSGI config for gallery project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/5.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "core.settings")
application = get_wsgi_application()

View file

@ -1,29 +0,0 @@
services:
db:
image: postgres:16
container_name: postgres_dev
restart: unless-stopped
environment:
POSTGRES_USER: archivist
POSTGRES_PASSWORD: password
POSTGRES_DB: archivist
TZ: Europe/Berlin
ports:
- "5432:5432"
volumes:
- postgres_data:/var/lib/postgresql/data
pgadmin:
image: dpage/pgadmin4
container_name: pgadmin_dev
restart: unless-stopped #always
environment:
PGADMIN_DEFAULT_EMAIL: aroy-art@pm.me
PGADMIN_DEFAULT_PASSWORD: admin
ports:
- "5050:80"
depends_on:
- db
volumes:
postgres_data:

View file

@ -1,23 +0,0 @@
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "core.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()

View file

@ -1,66 +0,0 @@
import mimetypes
def get_mime_type(file_path: str) -> str:
"""
Get the MIME type of a file based on the file path.
Parameters:
file_path (str): The path to the file from which to determine the MIME type.
Returns:
str: The MIME type of the file.
"""
mime_type, encoding = mimetypes.guess_type(file_path)
return mime_type
def categorize_mime_type(mime_type: str) -> str:
image_types = {
"image/jpeg",
"image/png",
"image/bmp",
"image/tiff",
"image/webp",
"image/svg+xml",
"image/heif",
"image/heic",
}
gif_types = {"image/gif"}
video_types = {
"video/mp4",
"video/mpeg",
"video/quicktime",
"video/x-msvideo",
"video/x-matroska",
"video/webm",
"video/ogg",
}
flash_types = {"application/x-shockwave-flash", "application/vnd.adobe.flash.movie"}
archive_types = {
"application/zip",
"application/x-rar-compressed",
"application/x-tar",
"application/gzip",
"application/x-7z-compressed",
"application/x-bzip2",
}
pdf_types = {"application/pdf"}
audio_types = {"audio/mpeg", "audio/wav", "audio/ogg", "audio/flac", "audio/aac"}
if mime_type in image_types:
return "image"
elif mime_type in gif_types:
return "gif"
elif mime_type in video_types:
return "video"
elif mime_type in flash_types:
return "flash"
elif mime_type in archive_types:
return "archive"
elif mime_type in pdf_types:
return "pdf"
elif mime_type in audio_types:
return "audio"
else:
return "other"

View file

@ -1,104 +0,0 @@
import hashlib
from blake3 import blake3
from blurhash import encode
from tqdm.auto import tqdm
from PIL import Image
def compute_blake3_hash(data, is_file=False, logger=None):
"""
Compute BLAKE3 hash of a file or string.
Args:
data (str): File path (if is_file=True) or raw string.
is_file (bool): Whether the input is a file path. Defaults to False.
logger: Optional logger for error messages (e.g., Django `self` or `tqdm`).
Returns:
str: BLAKE3 hash or None if an error occurs.
"""
try:
hasher = blake3()
if is_file:
with open(data, "rb") as f:
while chunk := f.read(65536):
hasher.update(chunk)
else:
hasher.update(data.encode())
return hasher.hexdigest()
except Exception as e:
error_message = f"Error computing hash: {e}"
if logger:
if hasattr(logger, "style") and hasattr(logger, "stdout"): # Django command
logger.stdout.write(logger.style.WARNING(error_message))
else: # Default to tqdm
tqdm.write(error_message)
return None
# Convenience wrappers for readability
def compute_file_hash_blake3(file_path, logger=None):
return compute_blake3_hash(file_path, is_file=True, logger=logger)
def compute_string_hash_blake3(string, logger=None):
return compute_blake3_hash(string, is_file=False, logger=logger)
def compute_md5_hash(file_path):
"""
Compute the MD5 hash of a file.
Args:
file_path (str): Path to the file.
Returns:
str: MD5 hash of the file.
"""
try:
hash_md5 = hashlib.md5()
with open(file_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
except Exception as e:
tqdm.write(f"Error computing MD5 hash: {e}")
return None
def compute_blur_hash(image_path, components_x=4, components_y=4, logger=None):
"""
Compute the BlurHash of an image.
Args:
image_path (str): Path to the image file.
components_x (int): Number of horizontal components for BlurHash.
components_y (int): Number of vertical components for BlurHash.
logger: Optional logger for error messages.
Returns:
str: BlurHash string or None if an error occurs.
"""
try:
with Image.open(image_path) as img:
img = img.convert("RGB") # Ensure it's in RGB mode
blur_hash = encode(img, components_x, components_y)
return blur_hash
except Exception as e:
error_message = f"Error computing BlurHash: {e}"
if logger:
if hasattr(logger, "style") and hasattr(logger, "stdout"): # Django command
logger.stdout.write(logger.style.WARNING(error_message))
else: # Default to tqdm
tqdm.write(error_message)
return None

View file

@ -1,21 +0,0 @@
{
"$schema": "https://ui.shadcn.com/schema.json",
"style": "new-york",
"rsc": false,
"tsx": true,
"tailwind": {
"config": "tailwind.config.js",
"css": "src/index.css",
"baseColor": "zinc",
"cssVariables": true,
"prefix": ""
},
"aliases": {
"components": "@/components",
"utils": "@/lib/utils",
"ui": "@/components/ui",
"lib": "@/lib",
"hooks": "@/hooks"
},
"iconLibrary": "lucide"
}

View file

@ -1,28 +0,0 @@
import js from '@eslint/js'
import globals from 'globals'
import reactHooks from 'eslint-plugin-react-hooks'
import reactRefresh from 'eslint-plugin-react-refresh'
import tseslint from 'typescript-eslint'
export default tseslint.config(
{ ignores: ['dist'] },
{
extends: [js.configs.recommended, ...tseslint.configs.recommended],
files: ['**/*.{ts,tsx}'],
languageOptions: {
ecmaVersion: 2020,
globals: globals.browser,
},
plugins: {
'react-hooks': reactHooks,
'react-refresh': reactRefresh,
},
rules: {
...reactHooks.configs.recommended.rules,
'react-refresh/only-export-components': [
'warn',
{ allowConstantExport: true },
],
},
},
)

View file

@ -1,13 +0,0 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<link rel="icon" type="image/svg+xml" href="/vite.svg" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Vite + React + TS</title>
</head>
<body>
<div id="root"></div>
<script type="module" src="/src/main.tsx"></script>
</body>
</html>

File diff suppressed because it is too large Load diff

View file

@ -1,48 +0,0 @@
{
"name": "frontend",
"private": true,
"version": "0.0.0",
"type": "module",
"scripts": {
"dev": "vite",
"build": "tsc -b && vite build",
"lint": "eslint .",
"preview": "vite preview"
},
"dependencies": {
"@radix-ui/react-avatar": "^1.1.2",
"@radix-ui/react-dropdown-menu": "^2.1.4",
"@radix-ui/react-label": "^2.1.1",
"@radix-ui/react-navigation-menu": "^1.2.5",
"@radix-ui/react-separator": "^1.1.1",
"@radix-ui/react-slot": "^1.1.2",
"@radix-ui/react-switch": "^1.1.3",
"axios": "^1.7.9",
"class-variance-authority": "^0.7.1",
"clsx": "^2.1.1",
"lucide-react": "^0.469.0",
"react": "^18.3.1",
"react-dom": "^18.3.1",
"react-helmet-async": "^2.0.5",
"react-router-dom": "^7.1.1",
"tailwind-merge": "^2.6.0",
"tailwindcss-animate": "^1.0.7"
},
"devDependencies": {
"@eslint/js": "^9.17.0",
"@types/node": "^22.10.5",
"@types/react": "^18.3.18",
"@types/react-dom": "^18.3.5",
"@vitejs/plugin-react": "^4.3.4",
"autoprefixer": "^10.4.20",
"eslint": "^9.17.0",
"eslint-plugin-react-hooks": "^5.0.0",
"eslint-plugin-react-refresh": "^0.4.16",
"globals": "^15.14.0",
"postcss": "^8.4.49",
"tailwindcss": "^3.4.17",
"typescript": "~5.6.2",
"typescript-eslint": "^8.18.2",
"vite": "^6.0.5"
}
}

View file

@ -1,6 +0,0 @@
export default {
plugins: {
tailwindcss: {},
autoprefixer: {},
},
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 893 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.7 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.2 MiB

View file

@ -1,29 +0,0 @@
import { BrowserRouter as Router, Routes, Route } from 'react-router-dom';
import { HelmetProvider } from "react-helmet-async";
import { ThemeProvider } from "@/components/ThemeProvider";
import HomePage from "@/pages/HomePage";
import LoginPage from "@/pages/LoginPage";
function App() {
return (
<HelmetProvider>
<ThemeProvider defaultTheme="system" storageKey="ga-ui-theme">
<Router>
<Routes>
<Route path="/">
<Route index element={<HomePage />} />
<Route path="user/">
<Route path="login" element={<LoginPage />} />
</Route>
</Route>
</Routes>
</Router>
</ThemeProvider>
</HelmetProvider>
);
};
export default App;

View file

@ -1,31 +0,0 @@
import { PostCard } from "@/components/partials/PostCard"
interface Post {
post_id: string
title: string
description: string
creator: {
[key: string]: string
}
date: {
[key: string]: string
}
media: Array<{
[key: string]: string
}>
}
interface GalleryGridProps {
items: Post[]
}
export function GalleryGrid({ items }: GalleryGridProps) {
return (
<div className="flex flex-wrap flex-cols-6 justify-center gap-3 max-w-[120rem]">
{items.map((item) => (
<PostCard key={item.post_id} {...item} />
))}
</div>
)
}

View file

@ -1,39 +0,0 @@
import { Moon, Sun } from "lucide-react"
import { Button } from "@/components/ui/button"
import {
DropdownMenu,
DropdownMenuContent,
DropdownMenuItem,
DropdownMenuTrigger,
} from "@/components/ui/dropdown-menu"
import { useTheme } from "@/components/ThemeProvider"
export function LightModeToggle() {
const { setTheme } = useTheme()
return (
<DropdownMenu>
<DropdownMenuTrigger asChild>
<Button variant="link" size="icon">
<Sun className="h-[1.2rem] w-[1.2rem] rotate-0 scale-100 transition-all dark:-rotate-90 dark:scale-0" />
<Moon className="absolute h-[1.2rem] w-[1.2rem] rotate-90 scale-0 transition-all dark:rotate-0 dark:scale-100" />
<span className="sr-only">Toggle theme</span>
</Button>
</DropdownMenuTrigger>
<DropdownMenuContent align="end">
<DropdownMenuItem onClick={() => setTheme("light")}>
Light
</DropdownMenuItem>
<DropdownMenuItem onClick={() => setTheme("dark")}>
Dark
</DropdownMenuItem>
<DropdownMenuItem onClick={() => setTheme("system")}>
System
</DropdownMenuItem>
</DropdownMenuContent>
</DropdownMenu>
)
}
export default LightModeToggle

View file

@ -1,94 +0,0 @@
import { useState, useEffect } from "react";
import { Link } from "react-router-dom";
import LightModeToggle from "@/components/LightModeToggle";
import { Separator } from "@/components/ui/separator";
import { logout } from "@/services/auth"; // Import the logout function
import ProfileDropdown from "@/components/ProfileDropdown";
import UserDropdown from "@/components/UserDropdown";
const Navbar = () => {
const [isOpen, setIsOpen] = useState(false);
const [isLoggedIn, setIsLoggedIn] = useState(false);
// Check login status on component mount
useEffect(() => {
const token = localStorage.getItem("access_token");
setIsLoggedIn(!!token); // Convert token presence to boolean
}, []);
return (
<nav className="bg-violet-600 p-4 shadow-md">
<div className="container mx-auto flex items-center justify-between">
{/* Logo (Always Centered on Mobile) */}
<Link to="/" className="text-white text-2xl font-bold mx-auto md:mx-0">
{__SITE_NAME__}
</Link>
{/* Desktop Navigation */}
<div className="hidden md:flex items-center space-x-6">
<Link to="/" className="text-white hover:text-gray-300">Home</Link>
<Link to="/browse/" className="text-white hover:text-gray-300">Browse</Link>
<Link to="/gallery" className="text-white hover:text-gray-300">Gallery</Link>
<LightModeToggle />
{isLoggedIn ? (
<UserDropdown />
) : (
<Link to="/user/login" className="text-white hover:text-gray-300">Login</Link>
)}
</div>
{/* Mobile Menu Button */}
<button
onClick={() => setIsOpen(!isOpen)}
className="text-white text-2xl font-bold md:hidden"
>
{isOpen ? '✖' : '☰'}
</button>
</div>
{/* Mobile Side Panel */}
{isOpen && (
<div className="fixed top-0 right-0 w-2/3 h-full bg-violet-700 z-40 shadow-lg p-4">
<div className="flex justify-end">
<button
onClick={() => setIsOpen(false)}
className="text-white text-2xl font-bold"
>
</button>
</div>
<ul className="space-y-4 mt-4">
<li>
<Link to="/" className="text-white hover:text-gray-300 block" onClick={() => setIsOpen(false)}>Home</Link>
</li>
<li>
<Link to="/browse/" className="text-white hover:text-gray-300 block" onClick={() => setIsOpen(false)}>Browse</Link>
</li>
<li>
<Link to="/gallery" className="text-white hover:text-gray-300 block" onClick={() => setIsOpen(false)}>Gallery</Link>
</li>
<li>
<LightModeToggle />
</li>
<li>
{isLoggedIn ? (
<UserDropdown />
) : (
<Link to="/user/login" className="text-white hover:text-gray-300 block" onClick={() => setIsOpen(false)}>
Login
</Link>
)}
</li>
<li>
<Link to="/protected" className="text-white hover:text-gray-300 block" onClick={() => setIsOpen(false)}>Protected</Link>
</li>
</ul>
</div>
)}
</nav>
);
};
export default Navbar;

View file

@ -1,13 +0,0 @@
const Spinner = ({ size = 'md' }) => {
const sizes = {
sm: 'h-8 w-8 border-t-2 border-t-pink-600 border-b-2 border-b-sky-400',
md: 'h-16 w-16 border-t-4 border-t-pink-600 border-b-4 border-b-sky-400',
lg: 'h-24 w-24 border-t-4 border-t-pink-600 border-b-4 border-b-sky-400',
};
return (
<div className={`animate-spin rounded-full ${sizes[size]}`}></div>
);
};
export default Spinner;

View file

@ -1,74 +0,0 @@
import { createContext, useContext, useEffect, useState } from "react"
type Theme = "dark" | "light" | "system"
type ThemeProviderProps = {
children: React.ReactNode
defaultTheme?: Theme
storageKey?: string
}
type ThemeProviderState = {
theme: Theme
setTheme: (theme: Theme) => void
}
const initialState: ThemeProviderState = {
theme: "system",
setTheme: () => null,
}
const ThemeProviderContext = createContext<ThemeProviderState>(initialState)
export function ThemeProvider({
children,
defaultTheme = "system",
storageKey = "vite-ui-theme",
...props
}: ThemeProviderProps) {
const [theme, setTheme] = useState<Theme>(
() => (localStorage.getItem(storageKey) as Theme) || defaultTheme
)
useEffect(() => {
const root = window.document.documentElement
root.classList.remove("light", "dark")
if (theme === "system") {
const systemTheme = window.matchMedia("(prefers-color-scheme: dark)")
.matches
? "dark"
: "light"
root.classList.add(systemTheme)
return
}
root.classList.add(theme)
}, [theme])
const value = {
theme,
setTheme: (theme: Theme) => {
localStorage.setItem(storageKey, theme)
setTheme(theme)
},
}
return (
<ThemeProviderContext.Provider {...props} value={value}>
{children}
</ThemeProviderContext.Provider>
)
}
export const useTheme = () => {
const context = useContext(ThemeProviderContext)
if (context === undefined)
throw new Error("useTheme must be used within a ThemeProvider")
return context
}

View file

@ -1,117 +0,0 @@
import { useState, useEffect } from "react";
import { Link } from "react-router-dom";
import { Switch } from "@/components/ui/switch";
import { Button } from "@/components/ui/button";
import {
DropdownMenu,
DropdownMenuContent,
DropdownMenuItem,
DropdownMenuTrigger,
} from "@/components/ui/dropdown-menu";
import { Skeleton } from "@/components/ui/skeleton";
import apiInstance from "@/services/api";
import { logout } from "@/services/auth";
interface UserProfile {
username: string;
email: string;
profile: {
show_mature: boolean;
};
}
const UserDropdown = () => {
const [user, setUser] = useState<UserProfile | null>(null);
const [loading, setLoading] = useState(true);
const [nsfwEnabled, setNsfwEnabled] = useState(false);
useEffect(() => {
const fetchUser = async () => {
try {
const response = await apiInstance.get<UserProfile>("user/profile/");
setUser(response.data);
setNsfwEnabled(response.data.profile.show_mature);
} catch (error) {
console.error("Failed to fetch user:", error);
} finally {
setLoading(false);
}
};
fetchUser();
}, []);
const handleNsfwToggle = async () => {
if (!user) return;
const newSetting = !nsfwEnabled;
setNsfwEnabled(newSetting);
try {
await apiInstance.patch("user/profile/", {
profile: { show_mature: newSetting },
});
} catch (error) {
console.error("Failed to update NSFW setting:", error);
setNsfwEnabled(!newSetting);
}
};
const handleLogout = () => {
logout();
};
return (
<DropdownMenu>
<DropdownMenuTrigger asChild>
<Link className="relative flex items-center space-x-2">
{loading ? (
<Skeleton className="h-8 w-8 rounded-full bg-gray-500 hover:bg-gray-600" />
) : (
<div className="h-8 w-8 bg-gray-500 hover:bg-gray-600 rounded-full flex items-center justify-center shadow-lg">
{user?.username.charAt(0).toUpperCase()}
</div>
)}
</Link>
</DropdownMenuTrigger>
<DropdownMenuContent align="end" className="p-4 shadow-lg rounded-lg border">
{loading ? (
<div className="flex flex-col space-y-3">
<Skeleton className="h-4 w-32" />
<Skeleton className="h-4 w-48" />
<Skeleton className="h-10 w-full" />
</div>
) : (
<div className="space-y-2">
{/* User Info */}
<div className="text-sm">
<p className="font-semibold">{user?.username}</p>
<p className="text-gray-500">{user?.email}</p>
</div>
<div className="border-t my-2"></div>
{/* NSFW Toggle (Prevent Dropdown from Closing) */}
<DropdownMenuItem asChild>
<div
className="flex items-center justify-between w-full cursor-pointer"
onClick={(e) => e.stopPropagation()} // Prevents dropdown from closing
>
<span className="text-sm">Show NSFW Content</span>
<Switch checked={nsfwEnabled} onCheckedChange={handleNsfwToggle} />
</div>
</DropdownMenuItem>
{/* Logout Button */}
<Button variant="destructive" className="w-full" onClick={handleLogout}>
Logout
</Button>
</div>
)}
</DropdownMenuContent>
</DropdownMenu>
);
};
export default UserDropdown;

Some files were not shown because too many files have changed in this diff Show more