Backend:
- db_async.py: new combat_stats + combat_stats_sessions tables
- main.py: combat_stats message handler with DB upsert (lifetime +
session snapshots), in-memory live_combat_stats dict, broadcast
to browser clients.
- REST: GET /combat-stats and GET /combat-stats/{character_name}
Frontend:
- index.html: new "Combat Stats" sidebar link
- script.js: full Combat Stats window with two panels:
- Top: monster list (name, kills, dmg recv, dmg given) with
clickable rows and "All" aggregate, matching CombatTrackerGUI.cs
- Bottom: damage breakdown grid matching CombatTrackerGUIInfo.cs
layout — element × attack type matrix (Mel/Msl + Magic columns),
Attacks (hit%), Evades (%), Resists (%), A.Surges (%), C.Surges (%),
normal Avg/Max, Crits (%), Crit Avg/Max, Total Damage.
- Session / Lifetime toggle button
- style.css: combat-stats-toggle styles
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
3906 lines
150 KiB
Python
3906 lines
150 KiB
Python
"""
|
||
main.py - FastAPI-based telemetry server for Dereth Tracker.
|
||
|
||
This service ingests real-time position and event data from plugin clients via WebSockets,
|
||
stores telemetry and statistics in a TimescaleDB backend, and exposes HTTP and WebSocket
|
||
endpoints for browser clients to retrieve live and historical data, trails, and per-character stats.
|
||
"""
|
||
|
||
from collections import defaultdict
|
||
from datetime import datetime, timedelta, timezone
|
||
import html as _html
|
||
import json
|
||
import logging
|
||
import os
|
||
import uuid
|
||
import sys
|
||
import time
|
||
from typing import Dict, List, Any
|
||
from pathlib import Path
|
||
import asyncio
|
||
import socket
|
||
import struct
|
||
|
||
from fastapi import (
|
||
FastAPI,
|
||
Header,
|
||
HTTPException,
|
||
Query,
|
||
WebSocket,
|
||
WebSocketDisconnect,
|
||
Request,
|
||
)
|
||
from fastapi.responses import (
|
||
JSONResponse,
|
||
Response,
|
||
StreamingResponse,
|
||
HTMLResponse,
|
||
RedirectResponse,
|
||
)
|
||
from fastapi.routing import APIRoute
|
||
from fastapi.staticfiles import StaticFiles
|
||
from fastapi.encoders import jsonable_encoder
|
||
from pydantic import BaseModel
|
||
from typing import Optional
|
||
from starlette.middleware.base import BaseHTTPMiddleware
|
||
import httpx
|
||
import bcrypt as _bcrypt
|
||
from itsdangerous import URLSafeTimedSerializer, BadSignature, SignatureExpired
|
||
|
||
# Async database support
|
||
from sqlalchemy.dialects.postgresql import insert as pg_insert
|
||
from db_async import (
|
||
database,
|
||
telemetry_events,
|
||
char_stats,
|
||
rare_stats,
|
||
rare_stats_sessions,
|
||
spawn_events,
|
||
rare_events,
|
||
character_inventories,
|
||
character_stats,
|
||
portals,
|
||
server_health_checks,
|
||
server_status,
|
||
combat_stats,
|
||
combat_stats_sessions,
|
||
users,
|
||
init_db_async,
|
||
cleanup_old_portals,
|
||
seed_users,
|
||
)
|
||
import asyncio
|
||
|
||
# Configure logging
|
||
logging.basicConfig(
|
||
level=logging.INFO,
|
||
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
||
handlers=[
|
||
logging.StreamHandler(sys.stdout),
|
||
],
|
||
)
|
||
logger = logging.getLogger(__name__)
|
||
|
||
# Get log level from environment (DEBUG, INFO, WARNING, ERROR)
|
||
log_level = os.getenv("LOG_LEVEL", "INFO").upper()
|
||
logger.setLevel(getattr(logging, log_level, logging.INFO))
|
||
|
||
# Inventory service configuration
|
||
INVENTORY_SERVICE_URL = os.getenv(
|
||
"INVENTORY_SERVICE_URL", "http://inventory-service:8000"
|
||
)
|
||
# In-memory caches for REST endpoints
|
||
_cached_live: dict = {"players": []}
|
||
_cached_trails: dict = {"trails": []}
|
||
_cached_total_rares: dict = {"all_time": 0, "today": 0, "last_updated": None}
|
||
_cached_total_kills: dict = {"total": 0, "last_updated": None}
|
||
_cache_task: asyncio.Task | None = None
|
||
_rares_cache_task: asyncio.Task | None = None
|
||
_cleanup_task: asyncio.Task | None = None
|
||
_broadcast_tasks: set[asyncio.Task] = set()
|
||
|
||
# Player tracking for debug purposes
|
||
_player_history: list = [] # List of player sets from last 10 refreshes
|
||
_player_events: list = [] # List of player enter/exit events
|
||
_max_history_size = 10 # Keep last 10 player sets
|
||
_max_events_size = 100 # Keep last 100 events
|
||
|
||
# Telemetry timing tracking for debug purposes
|
||
_player_telemetry_times: dict = {} # character_name -> list of timestamps
|
||
_max_telemetry_history = 20 # Keep last 20 telemetry timestamps per player
|
||
|
||
# Simple WebSocket connection counters (Phase 1)
|
||
_plugin_connections = 0
|
||
_browser_connections = 0
|
||
|
||
# Simple database query performance counters (Phase 2)
|
||
_total_queries = 0
|
||
_total_query_time = 0.0
|
||
|
||
# Simple recent activity tracking (Phase 3)
|
||
_recent_telemetry_messages = []
|
||
_max_recent_messages = 50
|
||
|
||
# Server health monitoring
|
||
_server_health_task = None
|
||
_server_status_cache = {
|
||
"status": "unknown",
|
||
"latency_ms": None,
|
||
"player_count": None,
|
||
"last_check": None,
|
||
"uptime_seconds": 0,
|
||
"last_restart": None,
|
||
}
|
||
|
||
# Quest status cache - stores last received quest data per player
|
||
# Structure: {character_name: {quest_name: countdown_value}}
|
||
_quest_status_cache: Dict[str, Dict[str, str]] = {}
|
||
|
||
|
||
# AC Hash32 checksum algorithm (based on ThwargLauncher)
|
||
def calculate_hash32(data: bytes) -> int:
|
||
"""Calculate AC Hash32 checksum as used in ThwargLauncher."""
|
||
length = len(data)
|
||
checksum = (length << 16) & 0xFFFFFFFF
|
||
|
||
# Process 4-byte chunks
|
||
for i in range(0, length - 3, 4):
|
||
chunk = struct.unpack("<I", data[i : i + 4])[0]
|
||
checksum = (checksum + chunk) & 0xFFFFFFFF
|
||
|
||
# Handle remaining bytes
|
||
remaining_start = (length // 4) * 4
|
||
shift = 24
|
||
for i in range(remaining_start, length):
|
||
byte_val = data[i] << shift
|
||
checksum = (checksum + byte_val) & 0xFFFFFFFF
|
||
shift -= 8
|
||
|
||
return checksum
|
||
|
||
|
||
# Create AC EchoRequest packet for server health check (based on ThwargLauncher)
|
||
def create_echo_request_packet():
|
||
"""Create an AC EchoRequest packet for server health checking."""
|
||
# AC packet header: sequence(4) + flags(4) + checksum(4) + id(2) + time(2) + size(2) + table(2) = 20 bytes + padding
|
||
packet = bytearray(32) # 32 bytes total (0x20)
|
||
|
||
# Sequence (4 bytes) - can be 0
|
||
struct.pack_into("<I", packet, 0, 0)
|
||
|
||
# Flags (4 bytes) - EchoRequest = 0x02000000
|
||
struct.pack_into("<I", packet, 4, 0x02000000)
|
||
|
||
# Temporary checksum (4 bytes) - required for proper checksum calculation
|
||
struct.pack_into("<I", packet, 8, 0x0BADD70D)
|
||
|
||
# ID (2 bytes) - can be 0
|
||
struct.pack_into("<H", packet, 12, 0)
|
||
|
||
# Time (2 bytes) - can be 0
|
||
struct.pack_into("<H", packet, 14, 0)
|
||
|
||
# Size (2 bytes) - header size = 32 (0x20)
|
||
struct.pack_into("<H", packet, 16, 32)
|
||
|
||
# Table (2 bytes) - can be 0
|
||
struct.pack_into("<H", packet, 18, 0)
|
||
|
||
# Calculate proper AC Hash32 checksum
|
||
# First, set checksum field to 0
|
||
struct.pack_into("<I", packet, 8, 0)
|
||
|
||
# Calculate checksum using Hash32 algorithm
|
||
checksum = calculate_hash32(bytes(packet))
|
||
struct.pack_into("<I", packet, 8, checksum)
|
||
|
||
return bytes(packet)
|
||
|
||
|
||
AC_ECHO_PACKET = create_echo_request_packet()
|
||
|
||
# AC login packet for server health check (same as ThwargLauncher MakeLoginPacket)
|
||
AC_LOGIN_PACKET = bytes(
|
||
[
|
||
0x00,
|
||
0x00,
|
||
0x00,
|
||
0x00,
|
||
0x00,
|
||
0x00,
|
||
0x01,
|
||
0x00,
|
||
0x93,
|
||
0x00,
|
||
0xD0,
|
||
0x05,
|
||
0x00,
|
||
0x00,
|
||
0x00,
|
||
0x00,
|
||
0x40,
|
||
0x00,
|
||
0x00,
|
||
0x00,
|
||
0x04,
|
||
0x00,
|
||
0x31,
|
||
0x38,
|
||
0x30,
|
||
0x32,
|
||
0x00,
|
||
0x00,
|
||
0x34,
|
||
0x00,
|
||
0x00,
|
||
0x00,
|
||
0x01,
|
||
0x00,
|
||
0x00,
|
||
0x00,
|
||
0x00,
|
||
0x00,
|
||
0x00,
|
||
0x00,
|
||
0x3E,
|
||
0xB8,
|
||
0xA8,
|
||
0x58,
|
||
0x1C,
|
||
0x00,
|
||
0x61,
|
||
0x63,
|
||
0x73,
|
||
0x65,
|
||
0x72,
|
||
0x76,
|
||
0x65,
|
||
0x72,
|
||
0x74,
|
||
0x72,
|
||
0x61,
|
||
0x63,
|
||
0x6B,
|
||
0x65,
|
||
0x72,
|
||
0x3A,
|
||
0x6A,
|
||
0x6A,
|
||
0x39,
|
||
0x68,
|
||
0x32,
|
||
0x36,
|
||
0x68,
|
||
0x63,
|
||
0x73,
|
||
0x67,
|
||
0x67,
|
||
0x63,
|
||
0x00,
|
||
0x00,
|
||
0x00,
|
||
0x00,
|
||
0x00,
|
||
0x00,
|
||
0x00,
|
||
0x00,
|
||
0x00,
|
||
0x00,
|
||
]
|
||
)
|
||
|
||
|
||
async def check_server_health(
|
||
address: str, port: int, timeout: float = 3.0
|
||
) -> tuple[bool, float, int]:
|
||
"""Check AC server health via UDP packet with retry logic.
|
||
|
||
Retries 6 times with 5-second delays before declaring server down.
|
||
Returns: (is_up, latency_ms, player_count)
|
||
"""
|
||
max_retries = 6
|
||
retry_delay = 5.0
|
||
|
||
for attempt in range(max_retries):
|
||
logger.debug(
|
||
f"🔍 Health check attempt {attempt + 1}/{max_retries} for {address}:{port}"
|
||
)
|
||
start_time = time.time()
|
||
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||
sock.setblocking(False)
|
||
|
||
try:
|
||
# Send login packet (same as ThwargLauncher)
|
||
await asyncio.get_event_loop().sock_sendto(
|
||
sock, AC_LOGIN_PACKET, (address, port)
|
||
)
|
||
|
||
# Wait for response with timeout
|
||
try:
|
||
data, addr = await asyncio.wait_for(
|
||
asyncio.get_event_loop().sock_recvfrom(sock, 1024), timeout=timeout
|
||
)
|
||
|
||
latency_ms = (time.time() - start_time) * 1000
|
||
logger.debug(
|
||
f"📥 Received response from {addr}: {len(data)} bytes, latency: {latency_ms:.1f}ms"
|
||
)
|
||
|
||
# Check if valid response (support both TimeSynch 0x800000 and ConnectRequest 0x40000)
|
||
if len(data) >= 24:
|
||
flags = struct.unpack("<I", data[4:8])[0]
|
||
|
||
# Accept both TimeSynch (0x800000) and ConnectRequest (0x40000) as valid responses
|
||
if (flags & 0x800000) or (flags & 0x40000):
|
||
# UDP health check is for server status and latency only
|
||
# Player count comes from TreeStats.net API (like ThwargLauncher)
|
||
logger.debug(
|
||
f"✅ Valid server response: latency: {latency_ms:.1f}ms"
|
||
)
|
||
return True, latency_ms, None
|
||
|
||
# Any response indicates server is up, even if not the expected format
|
||
logger.info(
|
||
f"✅ Server response (non-standard format): latency: {latency_ms:.1f}ms"
|
||
)
|
||
return True, latency_ms, None
|
||
|
||
except asyncio.TimeoutError:
|
||
logger.debug(
|
||
f"⏰ TIMEOUT: No response from {address}:{port} after {timeout}s"
|
||
)
|
||
if attempt < max_retries - 1:
|
||
logger.debug(f"Retrying in {retry_delay} seconds...")
|
||
await asyncio.sleep(retry_delay)
|
||
continue
|
||
|
||
except Exception as e:
|
||
logger.error(f"Server health check error on attempt {attempt + 1}: {e}")
|
||
if attempt < max_retries - 1:
|
||
await asyncio.sleep(retry_delay)
|
||
continue
|
||
finally:
|
||
sock.close()
|
||
|
||
# Only declare down after all retries fail
|
||
logger.warning(
|
||
f"❌ Server {address}:{port} is DOWN after {max_retries} attempts over {max_retries * retry_delay} seconds"
|
||
)
|
||
return False, None, None
|
||
|
||
|
||
async def get_player_count_from_treestats(server_name: str) -> int:
|
||
"""Get player count from TreeStats.net API (same as ThwargLauncher)."""
|
||
try:
|
||
async with httpx.AsyncClient() as client:
|
||
response = await client.get(
|
||
"http://treestats.net/player_counts-latest.json", timeout=10
|
||
)
|
||
if response.status_code == 200:
|
||
data = response.json()
|
||
for server_data in data:
|
||
if server_data.get("server") == server_name:
|
||
return server_data.get("count", 0)
|
||
return 0
|
||
except Exception as e:
|
||
logger.debug(f"Failed to get player count from TreeStats.net: {e}")
|
||
return 0
|
||
|
||
|
||
async def monitor_server_health():
|
||
"""Background task to monitor server health every 30 seconds and cleanup old portals every minute."""
|
||
server_name = "Coldeve"
|
||
server_address = "play.coldeve.ac"
|
||
server_port = 9000
|
||
check_interval = 30 # seconds
|
||
player_count_interval = (
|
||
300 # 5 minutes (like ThwargLauncher's 10 minutes, but more frequent)
|
||
)
|
||
portal_cleanup_interval = 60 # 1 minute
|
||
last_player_count_check = 0
|
||
last_portal_cleanup = 0
|
||
current_player_count = None
|
||
|
||
# Initialize server status in database
|
||
try:
|
||
existing = await database.fetch_one(
|
||
"SELECT * FROM server_status WHERE server_name = :name",
|
||
{"name": server_name},
|
||
)
|
||
if not existing:
|
||
await database.execute(
|
||
server_status.insert().values(
|
||
server_name=server_name,
|
||
current_status="unknown",
|
||
total_uptime_seconds=0,
|
||
)
|
||
)
|
||
except Exception as e:
|
||
logger.error(f"Failed to initialize server status: {e}")
|
||
|
||
while True:
|
||
try:
|
||
logger.debug(
|
||
f"🏥 Running scheduled health check for {server_name} ({server_address}:{server_port})"
|
||
)
|
||
# Check server health via UDP (for status and latency)
|
||
is_up, latency_ms, _ = await check_server_health(
|
||
server_address, server_port
|
||
)
|
||
status = "up" if is_up else "down"
|
||
now = datetime.now(timezone.utc)
|
||
|
||
# Get player count from TreeStats.net API (like ThwargLauncher)
|
||
current_time = time.time()
|
||
if (
|
||
current_time - last_player_count_check >= player_count_interval
|
||
or current_player_count is None
|
||
):
|
||
new_player_count = await get_player_count_from_treestats(server_name)
|
||
if new_player_count > 0: # Only update if we got a valid count
|
||
current_player_count = new_player_count
|
||
last_player_count_check = current_time
|
||
logger.info(
|
||
f"🏥 Updated player count from TreeStats.net: {current_player_count}"
|
||
)
|
||
|
||
logger.debug(
|
||
f"🏥 Health check result: {status}, latency: {latency_ms}, players: {current_player_count}"
|
||
)
|
||
|
||
# Record health check
|
||
await database.execute(
|
||
server_health_checks.insert().values(
|
||
server_name=server_name,
|
||
server_address=f"{server_address}:{server_port}",
|
||
timestamp=now,
|
||
status=status,
|
||
latency_ms=latency_ms,
|
||
player_count=current_player_count,
|
||
)
|
||
)
|
||
|
||
# Get previous status
|
||
prev_status = await database.fetch_one(
|
||
"SELECT * FROM server_status WHERE server_name = :name",
|
||
{"name": server_name},
|
||
)
|
||
|
||
# Calculate uptime and detect restarts
|
||
last_restart = prev_status["last_restart"] if prev_status else None
|
||
|
||
if (
|
||
prev_status
|
||
and prev_status["current_status"] == "down"
|
||
and status == "up"
|
||
):
|
||
# Server came back up - this is a restart
|
||
last_restart = now
|
||
logger.info(f"Server {server_name} came back online")
|
||
# Broadcast to all browser clients
|
||
await _broadcast_to_browser_clients(
|
||
{
|
||
"type": "server_status",
|
||
"server": server_name,
|
||
"status": "up",
|
||
"message": "Server is back online",
|
||
}
|
||
)
|
||
|
||
# Calculate uptime from last restart time (not accumulated)
|
||
if last_restart and status == "up":
|
||
uptime_seconds = int((now - last_restart).total_seconds())
|
||
else:
|
||
uptime_seconds = 0
|
||
|
||
# Update server status (always include current_player_count if we have it)
|
||
await database.execute(
|
||
"""
|
||
INSERT INTO server_status (server_name, current_status, last_seen_up, last_restart,
|
||
total_uptime_seconds, last_check, last_latency_ms, last_player_count)
|
||
VALUES (:name, :status, :last_seen, :restart, :uptime, :check, :latency, :players)
|
||
ON CONFLICT (server_name) DO UPDATE SET
|
||
current_status = :status,
|
||
last_seen_up = CASE WHEN :status = 'up' THEN :last_seen ELSE server_status.last_seen_up END,
|
||
last_restart = :restart,
|
||
total_uptime_seconds = :uptime,
|
||
last_check = :check,
|
||
last_latency_ms = :latency,
|
||
last_player_count = CASE WHEN :players IS NOT NULL THEN :players ELSE server_status.last_player_count END
|
||
""",
|
||
{
|
||
"name": server_name,
|
||
"status": status,
|
||
"last_seen": now if status == "up" else None,
|
||
"restart": last_restart,
|
||
"uptime": uptime_seconds,
|
||
"check": now,
|
||
"latency": latency_ms,
|
||
"players": current_player_count,
|
||
},
|
||
)
|
||
|
||
# Update cache
|
||
global _server_status_cache
|
||
_server_status_cache = {
|
||
"status": status,
|
||
"latency_ms": latency_ms,
|
||
"player_count": current_player_count,
|
||
"last_check": now.isoformat(),
|
||
"uptime_seconds": uptime_seconds,
|
||
"last_restart": last_restart.isoformat() if last_restart else None,
|
||
}
|
||
|
||
logger.debug(
|
||
f"Server health check: {status}, latency={latency_ms}ms, players={current_player_count}"
|
||
)
|
||
|
||
# Portal cleanup (run every minute)
|
||
current_time = time.time()
|
||
if current_time - last_portal_cleanup >= portal_cleanup_interval:
|
||
try:
|
||
deleted_count = await cleanup_old_portals()
|
||
logger.info(
|
||
f"Portal cleanup: removed {deleted_count} portals older than 1 hour"
|
||
)
|
||
last_portal_cleanup = current_time
|
||
except Exception as cleanup_error:
|
||
logger.error(
|
||
f"Portal cleanup error: {cleanup_error}", exc_info=True
|
||
)
|
||
|
||
except Exception as e:
|
||
logger.error(f"Server health monitoring error: {e}", exc_info=True)
|
||
|
||
await asyncio.sleep(check_interval)
|
||
|
||
|
||
async def cleanup_connections_loop():
|
||
"""Background task to clean up stale WebSocket connections every 5 minutes."""
|
||
cleanup_interval = 300 # 5 minutes
|
||
|
||
logger.info("🧹 Starting WebSocket connection cleanup task")
|
||
|
||
while True:
|
||
try:
|
||
await asyncio.sleep(cleanup_interval)
|
||
logger.debug("🧹 Running periodic WebSocket connection cleanup")
|
||
await cleanup_stale_connections()
|
||
except Exception as e:
|
||
logger.error(f"WebSocket cleanup task error: {e}", exc_info=True)
|
||
|
||
|
||
def _track_player_changes(new_players: list) -> None:
|
||
"""Track player changes for debugging flapping issues."""
|
||
from datetime import datetime, timezone
|
||
|
||
# Get current player names
|
||
current_players = {p["character_name"] for p in new_players}
|
||
timestamp = datetime.now(timezone.utc)
|
||
|
||
# Track telemetry timing for each player
|
||
for player_data in new_players:
|
||
player_name = player_data["character_name"]
|
||
player_timestamp = player_data.get("timestamp")
|
||
|
||
# Convert timestamp if it's a string
|
||
if isinstance(player_timestamp, str):
|
||
try:
|
||
player_timestamp = datetime.fromisoformat(
|
||
player_timestamp.replace("Z", "+00:00")
|
||
)
|
||
except:
|
||
player_timestamp = timestamp
|
||
elif player_timestamp is None:
|
||
player_timestamp = timestamp
|
||
|
||
# Initialize player telemetry tracking if needed
|
||
if player_name not in _player_telemetry_times:
|
||
_player_telemetry_times[player_name] = []
|
||
|
||
# Add this telemetry timestamp
|
||
_player_telemetry_times[player_name].append(player_timestamp)
|
||
|
||
# Trim to max history
|
||
if len(_player_telemetry_times[player_name]) > _max_telemetry_history:
|
||
_player_telemetry_times[player_name].pop(0)
|
||
|
||
# Get previous player names if we have history
|
||
previous_players = set()
|
||
if _player_history:
|
||
previous_players = {p["character_name"] for p in _player_history[-1]["players"]}
|
||
|
||
# Find players who entered and exited
|
||
entered_players = current_players - previous_players
|
||
exited_players = previous_players - current_players
|
||
|
||
# Log events with telemetry timing analysis
|
||
for player in entered_players:
|
||
# Check if this is due to timing gap
|
||
timing_gap = None
|
||
if (
|
||
player in _player_telemetry_times
|
||
and len(_player_telemetry_times[player]) >= 2
|
||
):
|
||
last_two = _player_telemetry_times[player][-2:]
|
||
timing_gap = (last_two[1] - last_two[0]).total_seconds()
|
||
|
||
event = {
|
||
"timestamp": timestamp,
|
||
"type": "enter",
|
||
"character_name": player,
|
||
"total_players": len(current_players),
|
||
"timing_gap": timing_gap,
|
||
}
|
||
_player_events.append(event)
|
||
gap_info = (
|
||
f" (gap: {timing_gap:.1f}s)" if timing_gap and timing_gap > 25 else ""
|
||
)
|
||
logger.debug(
|
||
f"Player entered: {player} (total: {len(current_players)}){gap_info}"
|
||
)
|
||
|
||
for player in exited_players:
|
||
# Calculate time since last telemetry
|
||
last_telemetry_age = None
|
||
if player in _player_telemetry_times and _player_telemetry_times[player]:
|
||
last_telemetry = _player_telemetry_times[player][-1]
|
||
last_telemetry_age = (timestamp - last_telemetry).total_seconds()
|
||
|
||
event = {
|
||
"timestamp": timestamp,
|
||
"type": "exit",
|
||
"character_name": player,
|
||
"total_players": len(current_players),
|
||
"last_telemetry_age": last_telemetry_age,
|
||
}
|
||
_player_events.append(event)
|
||
age_info = (
|
||
f" (last telemetry: {last_telemetry_age:.1f}s ago)"
|
||
if last_telemetry_age
|
||
else ""
|
||
)
|
||
logger.debug(
|
||
f"Player exited: {player} (total: {len(current_players)}){age_info}"
|
||
)
|
||
|
||
# Add current state to history
|
||
history_entry = {
|
||
"timestamp": timestamp,
|
||
"players": new_players,
|
||
"player_count": len(new_players),
|
||
"player_names": list(current_players),
|
||
}
|
||
_player_history.append(history_entry)
|
||
|
||
# Trim history to max size
|
||
if len(_player_history) > _max_history_size:
|
||
_player_history.pop(0)
|
||
|
||
# Trim events to max size
|
||
if len(_player_events) > _max_events_size:
|
||
_player_events.pop(0)
|
||
|
||
|
||
def _analyze_flapping_patterns() -> dict:
|
||
"""Analyze player events to identify flapping patterns."""
|
||
from collections import Counter, defaultdict
|
||
|
||
if not _player_events:
|
||
return {
|
||
"flapping_players": [],
|
||
"frequent_events": [],
|
||
"analysis": "No events to analyze",
|
||
}
|
||
|
||
# Count events per player
|
||
player_event_counts = Counter()
|
||
player_flap_counts = defaultdict(int)
|
||
|
||
# Track recent activity per player (last 10 events)
|
||
recent_player_activity = defaultdict(list)
|
||
|
||
for event in _player_events[-50:]: # Analyze last 50 events
|
||
player = event["character_name"]
|
||
event_type = event["type"]
|
||
player_event_counts[player] += 1
|
||
recent_player_activity[player].append(event_type)
|
||
|
||
# Identify flapping players (players with many enter/exit cycles)
|
||
flapping_players = []
|
||
for player, activity in recent_player_activity.items():
|
||
if len(activity) >= 4: # At least 4 events
|
||
# Count alternating enter/exit patterns
|
||
flap_score = 0
|
||
for i in range(1, len(activity)):
|
||
if activity[i] != activity[i - 1]: # Different from previous
|
||
flap_score += 1
|
||
|
||
if flap_score >= 3: # At least 3 transitions
|
||
flapping_players.append(
|
||
{
|
||
"character_name": player,
|
||
"events": len(activity),
|
||
"flap_score": flap_score,
|
||
"recent_activity": activity[-10:], # Last 10 events
|
||
}
|
||
)
|
||
|
||
# Sort by flap score
|
||
flapping_players.sort(key=lambda x: x["flap_score"], reverse=True)
|
||
|
||
# Most active players
|
||
frequent_events = [
|
||
{"character_name": player, "event_count": count}
|
||
for player, count in player_event_counts.most_common(10)
|
||
]
|
||
|
||
# Recent activity summary
|
||
recent_enters = sum(1 for e in _player_events[-20:] if e["type"] == "enter")
|
||
recent_exits = sum(1 for e in _player_events[-20:] if e["type"] == "exit")
|
||
|
||
return {
|
||
"flapping_players": flapping_players,
|
||
"frequent_events": frequent_events,
|
||
"recent_activity": {
|
||
"enters": recent_enters,
|
||
"exits": recent_exits,
|
||
"net_change": recent_enters - recent_exits,
|
||
},
|
||
"analysis": f"Found {len(flapping_players)} potentially flapping players",
|
||
}
|
||
|
||
|
||
def _analyze_telemetry_timing() -> dict:
|
||
"""Analyze telemetry timing patterns for all players."""
|
||
from datetime import datetime, timezone
|
||
|
||
timing_analysis = {}
|
||
problem_players = []
|
||
|
||
for player_name, timestamps in _player_telemetry_times.items():
|
||
if len(timestamps) < 2:
|
||
continue
|
||
|
||
# Calculate intervals between telemetry messages
|
||
intervals = []
|
||
for i in range(1, len(timestamps)):
|
||
interval = (timestamps[i] - timestamps[i - 1]).total_seconds()
|
||
intervals.append(interval)
|
||
|
||
if not intervals:
|
||
continue
|
||
|
||
# Calculate timing statistics
|
||
avg_interval = sum(intervals) / len(intervals)
|
||
min_interval = min(intervals)
|
||
max_interval = max(intervals)
|
||
|
||
# Count problematic intervals (>30s)
|
||
long_gaps = [i for i in intervals if i > 30]
|
||
recent_long_gaps = [i for i in intervals[-5:] if i > 30] # Last 5 intervals
|
||
|
||
# Determine if this player has timing issues
|
||
has_timing_issues = len(long_gaps) > 0 or max_interval > 35
|
||
|
||
timing_stats = {
|
||
"character_name": player_name,
|
||
"total_messages": len(timestamps),
|
||
"avg_interval": round(avg_interval, 1),
|
||
"min_interval": round(min_interval, 1),
|
||
"max_interval": round(max_interval, 1),
|
||
"long_gaps_count": len(long_gaps),
|
||
"recent_long_gaps": len(recent_long_gaps),
|
||
"last_message_age": (
|
||
datetime.now(timezone.utc) - timestamps[-1]
|
||
).total_seconds()
|
||
if timestamps
|
||
else 0,
|
||
"has_timing_issues": has_timing_issues,
|
||
"recent_intervals": [
|
||
round(i, 1) for i in intervals[-5:]
|
||
], # Last 5 intervals
|
||
}
|
||
|
||
timing_analysis[player_name] = timing_stats
|
||
|
||
if has_timing_issues:
|
||
problem_players.append(timing_stats)
|
||
|
||
# Sort problem players by severity (max interval)
|
||
problem_players.sort(key=lambda x: x["max_interval"], reverse=True)
|
||
|
||
return {
|
||
"all_players": timing_analysis,
|
||
"problem_players": problem_players,
|
||
"summary": {
|
||
"total_tracked_players": len(timing_analysis),
|
||
"players_with_issues": len(problem_players),
|
||
"avg_intervals": [
|
||
stats["avg_interval"] for stats in timing_analysis.values()
|
||
],
|
||
},
|
||
}
|
||
|
||
|
||
async def _refresh_cache_loop() -> None:
|
||
"""Background task: refresh `/live` and `/trails` caches every 5 seconds."""
|
||
consecutive_failures = 0
|
||
max_consecutive_failures = 5
|
||
|
||
while True:
|
||
try:
|
||
# Recompute live players (last 30s)
|
||
cutoff = datetime.now(timezone.utc) - ACTIVE_WINDOW
|
||
sql_live = """
|
||
SELECT sub.*,
|
||
COALESCE(rs.total_rares, 0) AS total_rares,
|
||
COALESCE(rss.session_rares, 0) AS session_rares,
|
||
COALESCE(cs.total_kills, 0) AS total_kills
|
||
FROM (
|
||
SELECT DISTINCT ON (character_name) *
|
||
FROM telemetry_events
|
||
WHERE timestamp > :cutoff
|
||
ORDER BY character_name, timestamp DESC
|
||
) sub
|
||
LEFT JOIN rare_stats rs
|
||
ON sub.character_name = rs.character_name
|
||
LEFT JOIN rare_stats_sessions rss
|
||
ON sub.character_name = rss.character_name
|
||
AND sub.session_id = rss.session_id
|
||
LEFT JOIN char_stats cs
|
||
ON sub.character_name = cs.character_name
|
||
"""
|
||
|
||
# Use a single connection for both queries to reduce connection churn
|
||
async with database.connection() as conn:
|
||
rows = await conn.fetch_all(sql_live, {"cutoff": cutoff})
|
||
new_players = [dict(r) for r in rows]
|
||
|
||
# Track player changes for debugging
|
||
_track_player_changes(new_players)
|
||
|
||
_cached_live["players"] = new_players
|
||
|
||
# Recompute trails (last 600s)
|
||
cutoff2 = datetime.utcnow().replace(tzinfo=timezone.utc) - timedelta(
|
||
seconds=600
|
||
)
|
||
sql_trail = """
|
||
SELECT timestamp, character_name, ew, ns, z
|
||
FROM telemetry_events
|
||
WHERE timestamp >= :cutoff
|
||
ORDER BY character_name, timestamp
|
||
"""
|
||
rows2 = await conn.fetch_all(sql_trail, {"cutoff": cutoff2})
|
||
_cached_trails["trails"] = [
|
||
{
|
||
"timestamp": r["timestamp"],
|
||
"character_name": r["character_name"],
|
||
"ew": r["ew"],
|
||
"ns": r["ns"],
|
||
"z": r["z"],
|
||
}
|
||
for r in rows2
|
||
]
|
||
|
||
# Reset failure counter on success
|
||
consecutive_failures = 0
|
||
logger.debug(
|
||
f"Cache refreshed: {len(_cached_live['players'])} players, {len(_cached_trails['trails'])} trail points"
|
||
)
|
||
|
||
except Exception as e:
|
||
consecutive_failures += 1
|
||
logger.error(
|
||
f"Cache refresh failed ({consecutive_failures}/{max_consecutive_failures}): {e}",
|
||
exc_info=True,
|
||
)
|
||
|
||
# If too many consecutive failures, wait longer and try to reconnect
|
||
if consecutive_failures >= max_consecutive_failures:
|
||
logger.warning(
|
||
f"Too many consecutive cache refresh failures. Attempting database reconnection..."
|
||
)
|
||
try:
|
||
await database.disconnect()
|
||
await asyncio.sleep(2)
|
||
await database.connect()
|
||
logger.info("Database reconnected successfully")
|
||
consecutive_failures = 0
|
||
except Exception as reconnect_error:
|
||
logger.error(f"Database reconnection failed: {reconnect_error}")
|
||
await asyncio.sleep(10) # Wait longer before retrying
|
||
continue
|
||
|
||
await asyncio.sleep(5)
|
||
|
||
|
||
async def _refresh_total_rares_cache() -> None:
|
||
"""Background task: refresh total rares cache every 5 minutes."""
|
||
consecutive_failures = 0
|
||
max_consecutive_failures = 3
|
||
|
||
while True:
|
||
try:
|
||
async with database.connection() as conn:
|
||
# Get all-time total rares (sum of all characters) - gracefully handle missing table
|
||
try:
|
||
all_time_query = (
|
||
"SELECT COALESCE(SUM(total_rares), 0) as total FROM rare_stats"
|
||
)
|
||
all_time_result = await conn.fetch_one(all_time_query)
|
||
all_time_total = all_time_result["total"] if all_time_result else 0
|
||
except Exception as e:
|
||
logger.debug(f"rare_stats table not available: {e}")
|
||
all_time_total = 0
|
||
|
||
# Get today's rares from rare_events table - gracefully handle missing table
|
||
try:
|
||
today_query = """
|
||
SELECT COUNT(*) as today_count
|
||
FROM rare_events
|
||
WHERE timestamp >= CURRENT_DATE
|
||
"""
|
||
today_result = await conn.fetch_one(today_query)
|
||
today_total = today_result["today_count"] if today_result else 0
|
||
except Exception as e:
|
||
logger.debug(f"rare_events table not available or empty: {e}")
|
||
today_total = 0
|
||
|
||
# Get total kills from char_stats table (all-time, all characters)
|
||
try:
|
||
kills_query = (
|
||
"SELECT COALESCE(SUM(total_kills), 0) as total FROM char_stats"
|
||
)
|
||
kills_result = await conn.fetch_one(kills_query)
|
||
total_kills = kills_result["total"] if kills_result else 0
|
||
except Exception as e:
|
||
logger.debug(f"char_stats table not available: {e}")
|
||
total_kills = 0
|
||
|
||
# Update caches
|
||
_cached_total_rares["all_time"] = all_time_total
|
||
_cached_total_rares["today"] = today_total
|
||
_cached_total_rares["last_updated"] = datetime.now(timezone.utc)
|
||
|
||
_cached_total_kills["total"] = total_kills
|
||
_cached_total_kills["last_updated"] = datetime.now(timezone.utc)
|
||
|
||
consecutive_failures = 0
|
||
logger.debug(
|
||
f"Stats cache updated: Rares all-time: {all_time_total}, today: {today_total}, Kills: {total_kills}"
|
||
)
|
||
|
||
except Exception as e:
|
||
consecutive_failures += 1
|
||
logger.error(
|
||
f"Total rares cache refresh failed ({consecutive_failures}/{max_consecutive_failures}): {e}",
|
||
exc_info=True,
|
||
)
|
||
|
||
if consecutive_failures >= max_consecutive_failures:
|
||
logger.warning(
|
||
"Too many consecutive total rares cache failures, waiting longer..."
|
||
)
|
||
await asyncio.sleep(60) # Wait longer on repeated failures
|
||
continue
|
||
|
||
# Sleep for 5 minutes (300 seconds)
|
||
await asyncio.sleep(300)
|
||
|
||
|
||
# ------------------------------------------------------------------
|
||
app = FastAPI()
|
||
# In-memory store mapping character_name to the most recent telemetry snapshot
|
||
live_snapshots: Dict[str, dict] = {}
|
||
live_vitals: Dict[str, dict] = {}
|
||
live_character_stats: Dict[str, dict] = {}
|
||
live_equipment_cantrip_states: Dict[str, dict] = {}
|
||
live_nearby_objects: Dict[str, dict] = {}
|
||
dungeon_map_cache: Dict[str, dict] = {} # landblock hex string -> dungeon map data
|
||
|
||
# Shared secret used to authenticate plugin WebSocket connections (override for production)
|
||
SHARED_SECRET = "your_shared_secret"
|
||
# Secret key for signing session cookies (override via SECRET_KEY env var)
|
||
SECRET_KEY = os.getenv("SECRET_KEY", "change-me-in-production-please")
|
||
SESSION_MAX_AGE = 30 * 24 * 3600 # 30 days in seconds
|
||
_serializer = URLSafeTimedSerializer(SECRET_KEY)
|
||
|
||
# LOG_FILE = "telemetry_log.jsonl"
|
||
# ------------------------------------------------------------------
|
||
ACTIVE_WINDOW = timedelta(
|
||
seconds=30
|
||
) # Time window defining "online" players (last 30 seconds)
|
||
|
||
|
||
# ─── Session helpers ─────────────────────────────────────────────
|
||
def create_session_cookie(username: str, is_admin: bool) -> str:
|
||
"""Create a signed session token."""
|
||
return _serializer.dumps({"u": username, "a": is_admin})
|
||
|
||
|
||
def verify_session_cookie(token: str) -> dict | None:
|
||
"""Verify and decode a session token. Returns None if invalid/expired."""
|
||
try:
|
||
data = _serializer.loads(token, max_age=SESSION_MAX_AGE)
|
||
return {"username": data["u"], "is_admin": data["a"]}
|
||
except (BadSignature, SignatureExpired, KeyError):
|
||
return None
|
||
|
||
|
||
# Paths that don't require authentication
|
||
_PUBLIC_PATHS = {"/login", "/logout"}
|
||
_PUBLIC_PREFIXES = ("/ws/position",) # Plugin WS uses X-Plugin-Secret
|
||
|
||
|
||
class AuthMiddleware(BaseHTTPMiddleware):
|
||
"""Redirect unauthenticated requests to /login."""
|
||
|
||
async def dispatch(self, request: Request, call_next):
|
||
path = request.url.path
|
||
|
||
# Always allow public paths
|
||
if path in _PUBLIC_PATHS or path.startswith(_PUBLIC_PREFIXES):
|
||
return await call_next(request)
|
||
|
||
# Allow login page static assets
|
||
if (
|
||
path == "/login.html"
|
||
or path == "/login-style.css"
|
||
or path.startswith("/icons/")
|
||
):
|
||
return await call_next(request)
|
||
|
||
# WebSocket upgrades bypass middleware (auth checked in handler)
|
||
if path.startswith("/ws/live"):
|
||
return await call_next(request)
|
||
|
||
# Check session cookie
|
||
token = request.cookies.get("session")
|
||
if token:
|
||
user = verify_session_cookie(token)
|
||
if user:
|
||
request.state.user = user
|
||
return await call_next(request)
|
||
|
||
# Not authenticated — redirect browser, reject API
|
||
if "text/html" in request.headers.get("accept", ""):
|
||
return RedirectResponse("/login", status_code=302)
|
||
return JSONResponse({"detail": "Not authenticated"}, status_code=401)
|
||
|
||
|
||
app.add_middleware(AuthMiddleware)
|
||
|
||
|
||
"""
|
||
Data models for plugin events:
|
||
- TelemetrySnapshot: periodic telemetry data from a player client
|
||
- SpawnEvent: information about a mob spawn event
|
||
- RareEvent: details of a rare mob event
|
||
"""
|
||
|
||
|
||
class TelemetrySnapshot(BaseModel):
|
||
character_name: str
|
||
char_tag: Optional[str] = None
|
||
session_id: str
|
||
timestamp: datetime
|
||
|
||
ew: float # +E / –W
|
||
ns: float # +N / –S
|
||
z: float
|
||
|
||
kills: int
|
||
kills_per_hour: Optional[float] = None
|
||
onlinetime: Optional[str] = None
|
||
deaths: int
|
||
total_deaths: Optional[int] = None
|
||
# Removed from telemetry payload; always enforced to 0 and tracked via rare events
|
||
rares_found: Optional[int] = 0
|
||
prismatic_taper_count: int
|
||
vt_state: str
|
||
# Optional telemetry metrics
|
||
mem_mb: Optional[float] = None
|
||
cpu_pct: Optional[float] = None
|
||
mem_handles: Optional[int] = None
|
||
latency_ms: Optional[float] = None
|
||
|
||
|
||
class SpawnEvent(BaseModel):
|
||
"""
|
||
Model for a spawn event emitted by plugin clients when a mob appears.
|
||
Records character context, mob type, timestamp, and spawn location.
|
||
"""
|
||
|
||
character_name: str
|
||
mob: str
|
||
timestamp: datetime
|
||
ew: float
|
||
ns: float
|
||
z: float = 0.0
|
||
|
||
|
||
class RareEvent(BaseModel):
|
||
"""
|
||
Model for a rare mob event when a player encounters or discovers a rare entity.
|
||
Includes character, event name, timestamp, and location coordinates.
|
||
"""
|
||
|
||
character_name: str
|
||
name: str
|
||
timestamp: datetime
|
||
ew: float
|
||
ns: float
|
||
z: float = 0.0
|
||
|
||
|
||
class FullInventoryMessage(BaseModel):
|
||
"""
|
||
Model for the full_inventory WebSocket message type.
|
||
Contains complete character inventory snapshot with raw item data.
|
||
"""
|
||
|
||
character_name: str
|
||
timestamp: datetime
|
||
item_count: int
|
||
items: List[Dict[str, Any]]
|
||
|
||
|
||
class VitalsMessage(BaseModel):
|
||
"""
|
||
Model for the vitals WebSocket message type.
|
||
Contains character health, stamina, mana, and vitae information.
|
||
"""
|
||
|
||
character_name: str
|
||
timestamp: datetime
|
||
health_current: int
|
||
health_max: int
|
||
health_percentage: float
|
||
stamina_current: int
|
||
stamina_max: int
|
||
stamina_percentage: float
|
||
mana_current: int
|
||
mana_max: int
|
||
mana_percentage: float
|
||
vitae: int
|
||
|
||
|
||
class CharacterStatsMessage(BaseModel):
|
||
"""
|
||
Model for the character_stats WebSocket message type.
|
||
Contains character attributes, skills, allegiance, and progression data.
|
||
Sent by plugin on login and every 10 minutes.
|
||
"""
|
||
|
||
character_name: str
|
||
timestamp: datetime
|
||
level: Optional[int] = None
|
||
total_xp: Optional[int] = None
|
||
unassigned_xp: Optional[int] = None
|
||
luminance_earned: Optional[int] = None
|
||
luminance_total: Optional[int] = None
|
||
deaths: Optional[int] = None
|
||
race: Optional[str] = None
|
||
gender: Optional[str] = None
|
||
birth: Optional[str] = None
|
||
current_title: Optional[int] = None
|
||
skill_credits: Optional[int] = None
|
||
burden: Optional[int] = None
|
||
burden_units: Optional[int] = None
|
||
encumbrance_capacity: Optional[int] = None
|
||
attributes: Optional[dict] = None
|
||
vitals: Optional[dict] = None
|
||
skills: Optional[dict] = None
|
||
allegiance: Optional[dict] = None
|
||
active_item_enchantments: Optional[list] = None
|
||
properties: Optional[dict] = (
|
||
None # Dict[int, int] — DWORD properties (augs, ratings, etc.)
|
||
)
|
||
titles: Optional[list] = None # List[str] — character title names
|
||
|
||
|
||
@app.on_event("startup")
|
||
async def on_startup():
|
||
"""Event handler triggered when application starts up.
|
||
|
||
Attempts to connect to the database with retry logic to accommodate
|
||
potential startup delays (e.g., waiting for Postgres to be ready).
|
||
"""
|
||
max_attempts = 5
|
||
for attempt in range(1, max_attempts + 1):
|
||
try:
|
||
await database.connect()
|
||
await init_db_async()
|
||
logger.info(f"Database connected successfully on attempt {attempt}")
|
||
# Log connection pool configuration
|
||
try:
|
||
logger.info(f"Database connection established with pool configuration")
|
||
except Exception as pool_error:
|
||
logger.debug(f"Could not access pool details: {pool_error}")
|
||
break
|
||
except Exception as e:
|
||
logger.warning(
|
||
f"Database connection failed (attempt {attempt}/{max_attempts}): {e}"
|
||
)
|
||
if attempt < max_attempts:
|
||
await asyncio.sleep(5)
|
||
else:
|
||
raise RuntimeError(
|
||
f"Could not connect to database after {max_attempts} attempts"
|
||
)
|
||
# Start background cache refresh (live & trails)
|
||
global _cache_task, _rares_cache_task, _server_health_task, _cleanup_task
|
||
_cache_task = asyncio.create_task(_refresh_cache_loop())
|
||
_rares_cache_task = asyncio.create_task(_refresh_total_rares_cache())
|
||
_server_health_task = asyncio.create_task(monitor_server_health())
|
||
_cleanup_task = asyncio.create_task(cleanup_connections_loop())
|
||
logger.info(
|
||
"Background cache refresh, server monitoring, and connection cleanup tasks started"
|
||
)
|
||
# Seed default users on first run
|
||
await seed_users()
|
||
|
||
|
||
@app.on_event("shutdown")
|
||
async def on_shutdown():
|
||
"""Event handler triggered when application is shutting down.
|
||
|
||
Ensures the database connection is closed cleanly.
|
||
"""
|
||
# Stop cache refresh tasks
|
||
global _cache_task, _rares_cache_task, _server_health_task, _cleanup_task
|
||
if _cache_task:
|
||
logger.info("Stopping background cache refresh task")
|
||
_cache_task.cancel()
|
||
try:
|
||
await _cache_task
|
||
except asyncio.CancelledError:
|
||
pass
|
||
|
||
if _rares_cache_task:
|
||
logger.info("Stopping total rares cache refresh task")
|
||
_rares_cache_task.cancel()
|
||
try:
|
||
await _rares_cache_task
|
||
except asyncio.CancelledError:
|
||
pass
|
||
|
||
if _server_health_task:
|
||
logger.info("Stopping server health monitoring task")
|
||
_server_health_task.cancel()
|
||
try:
|
||
await _server_health_task
|
||
except asyncio.CancelledError:
|
||
pass
|
||
|
||
if _cleanup_task:
|
||
logger.info("Stopping WebSocket connection cleanup task")
|
||
_cleanup_task.cancel()
|
||
try:
|
||
await _cleanup_task
|
||
except asyncio.CancelledError:
|
||
pass
|
||
# Cancel any in-flight broadcast tasks
|
||
if _broadcast_tasks:
|
||
logger.info(f"Cancelling {len(_broadcast_tasks)} in-flight broadcast tasks")
|
||
for task in _broadcast_tasks:
|
||
task.cancel()
|
||
await asyncio.gather(*_broadcast_tasks, return_exceptions=True)
|
||
_broadcast_tasks.clear()
|
||
|
||
logger.info("Disconnecting from database")
|
||
await database.disconnect()
|
||
|
||
|
||
# ─── Authentication endpoints ────────────────────────────────────
|
||
|
||
|
||
@app.get("/login")
|
||
async def login_page():
|
||
"""Serve the login page."""
|
||
login_html = Path("static/login.html")
|
||
if login_html.exists():
|
||
return HTMLResponse(login_html.read_text())
|
||
return HTMLResponse("<h1>Login page not found</h1>", status_code=500)
|
||
|
||
|
||
# --------------- login security helpers ---------------
|
||
_login_attempts: Dict[str, float] = defaultdict(float) # IP -> last attempt timestamp
|
||
_LOGIN_COOLDOWN = 5 # seconds between attempts per IP
|
||
_DUMMY_HASH = _bcrypt.hashpw(b"dummy_constant_time_pad", _bcrypt.gensalt()).decode()
|
||
|
||
|
||
@app.post("/login")
|
||
async def login(request: Request):
|
||
"""Authenticate user and set session cookie."""
|
||
# Rate limit: 1 attempt per 5 seconds per IP
|
||
client_ip = request.client.host if request.client else "unknown"
|
||
now = time.monotonic()
|
||
last = _login_attempts.get(client_ip, 0)
|
||
if now - last < _LOGIN_COOLDOWN:
|
||
raise HTTPException(
|
||
status_code=429,
|
||
detail="Too many login attempts. Try again in a few seconds.",
|
||
)
|
||
_login_attempts[client_ip] = now
|
||
|
||
try:
|
||
body = await request.json()
|
||
except Exception:
|
||
raise HTTPException(status_code=400, detail="Invalid request body")
|
||
username = body.get("username", "").strip().lower()
|
||
password = body.get("password", "")
|
||
if not username or not password:
|
||
raise HTTPException(status_code=400, detail="Username and password required")
|
||
|
||
row = await database.fetch_one(
|
||
"SELECT id, username, password_hash, is_admin FROM users WHERE LOWER(username) = :username",
|
||
{"username": username},
|
||
)
|
||
# Constant-time: always run bcrypt even if user doesn't exist
|
||
if row:
|
||
pw_ok = _bcrypt.checkpw(password.encode(), row["password_hash"].encode())
|
||
else:
|
||
_bcrypt.checkpw(b"dummy", _DUMMY_HASH.encode())
|
||
pw_ok = False
|
||
if not pw_ok:
|
||
raise HTTPException(status_code=401, detail="Invalid username or password")
|
||
|
||
token = create_session_cookie(row["username"], row["is_admin"])
|
||
response = JSONResponse(
|
||
{"ok": True, "username": row["username"], "is_admin": row["is_admin"]}
|
||
)
|
||
response.set_cookie(
|
||
"session",
|
||
token,
|
||
max_age=SESSION_MAX_AGE,
|
||
httponly=True,
|
||
samesite="lax",
|
||
secure=True,
|
||
)
|
||
return response
|
||
|
||
|
||
@app.get("/logout")
|
||
async def logout():
|
||
"""Clear session cookie and redirect to login."""
|
||
response = RedirectResponse("/login", status_code=302)
|
||
response.delete_cookie("session")
|
||
return response
|
||
|
||
|
||
@app.get("/me")
|
||
async def me(request: Request):
|
||
"""Return current user info from session."""
|
||
user = getattr(request.state, "user", None)
|
||
if not user:
|
||
raise HTTPException(status_code=401, detail="Not authenticated")
|
||
return {"username": user["username"], "is_admin": user["is_admin"]}
|
||
|
||
|
||
# ─── Admin user management ───────────────────────────────────────
|
||
|
||
|
||
def _require_admin(request: Request):
|
||
"""Raise 403 if current user is not admin."""
|
||
user = getattr(request.state, "user", None)
|
||
if not user or not user.get("is_admin"):
|
||
raise HTTPException(status_code=403, detail="Admin access required")
|
||
|
||
|
||
@app.get("/admin/users")
|
||
async def admin_page(request: Request):
|
||
"""Serve the admin user management page."""
|
||
_require_admin(request)
|
||
admin_html = Path("static/admin.html")
|
||
if admin_html.exists():
|
||
return HTMLResponse(admin_html.read_text())
|
||
return HTMLResponse("<h1>Admin page not found</h1>", status_code=500)
|
||
|
||
|
||
@app.get("/api-admin/users")
|
||
async def list_users(request: Request):
|
||
"""List all users (admin only)."""
|
||
_require_admin(request)
|
||
rows = await database.fetch_all(
|
||
"SELECT id, username, is_admin, created_at FROM users ORDER BY id"
|
||
)
|
||
return {"users": [dict(r) for r in rows]}
|
||
|
||
|
||
@app.post("/api-admin/users")
|
||
async def create_user(request: Request):
|
||
"""Create a new user (admin only)."""
|
||
_require_admin(request)
|
||
body = await request.json()
|
||
username = body.get("username", "").strip()
|
||
password = body.get("password", "")
|
||
is_admin = bool(body.get("is_admin", False))
|
||
if not username or not password:
|
||
raise HTTPException(status_code=400, detail="Username and password required")
|
||
if len(password) < 4:
|
||
raise HTTPException(
|
||
status_code=400, detail="Password must be at least 4 characters"
|
||
)
|
||
|
||
existing = await database.fetch_one(
|
||
"SELECT id FROM users WHERE LOWER(username) = :username",
|
||
{"username": username.lower()},
|
||
)
|
||
if existing:
|
||
raise HTTPException(status_code=409, detail="Username already exists")
|
||
|
||
pw_hash = _bcrypt.hashpw(password.encode(), _bcrypt.gensalt()).decode()
|
||
await database.execute(
|
||
"INSERT INTO users (username, password_hash, is_admin) VALUES (:username, :password_hash, :is_admin)",
|
||
{"username": username, "password_hash": pw_hash, "is_admin": is_admin},
|
||
)
|
||
return {"ok": True, "username": username}
|
||
|
||
|
||
@app.delete("/api-admin/users/{user_id}")
|
||
async def delete_user(user_id: int, request: Request):
|
||
"""Delete a user (admin only). Cannot delete yourself."""
|
||
_require_admin(request)
|
||
current_user = request.state.user["username"]
|
||
row = await database.fetch_one(
|
||
"SELECT username FROM users WHERE id = :id", {"id": user_id}
|
||
)
|
||
if not row:
|
||
raise HTTPException(status_code=404, detail="User not found")
|
||
if row["username"].lower() == current_user.lower():
|
||
raise HTTPException(status_code=400, detail="Cannot delete yourself")
|
||
await database.execute("DELETE FROM users WHERE id = :id", {"id": user_id})
|
||
return {"ok": True}
|
||
|
||
|
||
@app.patch("/api-admin/users/{user_id}")
|
||
async def update_user(user_id: int, request: Request):
|
||
"""Update user (admin only). Supports password reset and admin toggle."""
|
||
_require_admin(request)
|
||
body = await request.json()
|
||
row = await database.fetch_one(
|
||
"SELECT id, username FROM users WHERE id = :id", {"id": user_id}
|
||
)
|
||
if not row:
|
||
raise HTTPException(status_code=404, detail="User not found")
|
||
|
||
if "password" in body:
|
||
password = body["password"]
|
||
if len(password) < 4:
|
||
raise HTTPException(
|
||
status_code=400, detail="Password must be at least 4 characters"
|
||
)
|
||
pw_hash = _bcrypt.hashpw(password.encode(), _bcrypt.gensalt()).decode()
|
||
await database.execute(
|
||
"UPDATE users SET password_hash = :pw WHERE id = :id",
|
||
{"pw": pw_hash, "id": user_id},
|
||
)
|
||
if "is_admin" in body:
|
||
await database.execute(
|
||
"UPDATE users SET is_admin = :admin WHERE id = :id",
|
||
{"admin": bool(body["is_admin"]), "id": user_id},
|
||
)
|
||
return {"ok": True}
|
||
|
||
|
||
# ------------------------ GET -----------------------------------
|
||
@app.get("/debug")
|
||
def debug():
|
||
return {"status": "OK"}
|
||
|
||
|
||
@app.get("/debug/player-flapping")
|
||
async def get_player_flapping_debug():
|
||
"""Return player tracking data for debugging flapping issues."""
|
||
try:
|
||
# Analyze flapping patterns
|
||
flapping_analysis = _analyze_flapping_patterns()
|
||
|
||
# Analyze telemetry timing
|
||
timing_analysis = _analyze_telemetry_timing()
|
||
|
||
# Get recent events (last 50)
|
||
recent_events = (
|
||
_player_events[-50:] if len(_player_events) > 50 else _player_events
|
||
)
|
||
|
||
# Convert timestamps to ISO format for JSON serialization
|
||
formatted_events = []
|
||
for event in recent_events:
|
||
formatted_event = event.copy()
|
||
formatted_event["timestamp"] = event["timestamp"].isoformat()
|
||
formatted_events.append(formatted_event)
|
||
|
||
# Format history
|
||
formatted_history = []
|
||
for entry in _player_history:
|
||
formatted_entry = {
|
||
"timestamp": entry["timestamp"].isoformat(),
|
||
"player_count": entry["player_count"],
|
||
"player_names": entry["player_names"],
|
||
}
|
||
formatted_history.append(formatted_entry)
|
||
|
||
# Format timing data for JSON serialization
|
||
formatted_timing = {}
|
||
for player_name, timing_data in timing_analysis["all_players"].items():
|
||
formatted_timing[player_name] = timing_data.copy()
|
||
# Round last_message_age for readability
|
||
formatted_timing[player_name]["last_message_age"] = round(
|
||
timing_data["last_message_age"], 1
|
||
)
|
||
|
||
return {
|
||
"current_players": len(_cached_live.get("players", [])),
|
||
"history": formatted_history,
|
||
"recent_events": formatted_events,
|
||
"flapping_analysis": flapping_analysis,
|
||
"timing_analysis": {
|
||
"all_players": formatted_timing,
|
||
"problem_players": timing_analysis["problem_players"],
|
||
"summary": timing_analysis["summary"],
|
||
},
|
||
"tracking_stats": {
|
||
"history_entries": len(_player_history),
|
||
"total_events": len(_player_events),
|
||
"tracked_players": len(_player_telemetry_times),
|
||
"max_history_size": _max_history_size,
|
||
"max_events_size": _max_events_size,
|
||
},
|
||
}
|
||
except Exception as e:
|
||
logger.error(f"Failed to get player flapping debug data: {e}", exc_info=True)
|
||
raise HTTPException(status_code=500, detail="Internal server error")
|
||
|
||
|
||
@app.get("/debug/websocket-health")
|
||
async def get_websocket_health():
|
||
"""Return simple WebSocket connection counts."""
|
||
try:
|
||
return {
|
||
"plugin_connections": _plugin_connections,
|
||
"browser_connections": _browser_connections,
|
||
"total_connections": _plugin_connections + _browser_connections,
|
||
}
|
||
except Exception as e:
|
||
logger.error(f"Failed to get WebSocket health data: {e}", exc_info=True)
|
||
raise HTTPException(status_code=500, detail="Internal server error")
|
||
|
||
|
||
@app.get("/debug/database-performance")
|
||
async def get_database_performance():
|
||
"""Return simple database query performance statistics."""
|
||
try:
|
||
avg_query_time = (
|
||
(_total_query_time / _total_queries) if _total_queries > 0 else 0.0
|
||
)
|
||
return {
|
||
"total_queries": _total_queries,
|
||
"total_query_time": round(_total_query_time, 3),
|
||
"average_query_time": round(avg_query_time, 3),
|
||
}
|
||
except Exception as e:
|
||
logger.error(f"Failed to get database performance data: {e}", exc_info=True)
|
||
raise HTTPException(status_code=500, detail="Internal server error")
|
||
|
||
|
||
@app.get("/debug/recent-activity")
|
||
async def get_recent_activity():
|
||
"""Return recent telemetry activity feed."""
|
||
try:
|
||
return {
|
||
"recent_messages": _recent_telemetry_messages.copy(),
|
||
"total_messages": len(_recent_telemetry_messages),
|
||
"max_messages": _max_recent_messages,
|
||
}
|
||
except Exception as e:
|
||
logger.error(f"Failed to get recent activity data: {e}", exc_info=True)
|
||
raise HTTPException(status_code=500, detail="Internal server error")
|
||
|
||
|
||
# ─── Version endpoint ────────────────────────────────────────────
|
||
@app.get("/api-version")
|
||
async def get_version():
|
||
"""Return the application version (CalVer + git hash, set at build time)."""
|
||
return {"version": os.environ.get("APP_VERSION", "dev")}
|
||
|
||
|
||
# ─── Issues board endpoints ──────────────────────────────────────
|
||
ISSUES_FILE = Path("static/openissues.json")
|
||
|
||
|
||
def _load_issues():
|
||
if ISSUES_FILE.exists():
|
||
try:
|
||
return json.loads(ISSUES_FILE.read_text())
|
||
except (json.JSONDecodeError, IOError):
|
||
pass
|
||
return []
|
||
|
||
|
||
def _save_issues(issues):
|
||
ISSUES_FILE.write_text(json.dumps(issues, indent=2))
|
||
|
||
|
||
@app.get("/issues")
|
||
async def get_issues():
|
||
"""Return all open issues."""
|
||
return {"issues": _load_issues()}
|
||
|
||
|
||
@app.post("/issues")
|
||
async def add_issue(request: Request, issue: dict):
|
||
"""Add a new issue. Author from session."""
|
||
user = getattr(request.state, "user", {})
|
||
issues = _load_issues()
|
||
new_issue = {
|
||
"id": uuid.uuid4().hex[:8],
|
||
"title": _html.escape(issue.get("title", "").strip()),
|
||
"description": _html.escape(issue.get("description", "").strip()),
|
||
"category": _html.escape(issue.get("category", "other")),
|
||
"author": user.get("username", "Anonymous"),
|
||
"created": datetime.utcnow().isoformat(),
|
||
"resolved": False,
|
||
"comments": [],
|
||
}
|
||
if not new_issue["title"]:
|
||
raise HTTPException(status_code=400, detail="Title is required")
|
||
issues.insert(0, new_issue)
|
||
_save_issues(issues)
|
||
return new_issue
|
||
|
||
|
||
@app.patch("/issues/{issue_id}")
|
||
async def update_issue(issue_id: str, update: dict):
|
||
"""Update an issue (toggle resolved, edit title/description/category)."""
|
||
issues = _load_issues()
|
||
found = None
|
||
for i in issues:
|
||
if i.get("id") == issue_id:
|
||
if "resolved" in update:
|
||
i["resolved"] = bool(update["resolved"])
|
||
if "title" in update:
|
||
title = _html.escape(update["title"].strip())
|
||
if not title:
|
||
raise HTTPException(status_code=400, detail="Title cannot be empty")
|
||
i["title"] = title
|
||
if "description" in update:
|
||
i["description"] = _html.escape(update["description"].strip())
|
||
if "category" in update:
|
||
i["category"] = _html.escape(update["category"])
|
||
found = i
|
||
break
|
||
if not found:
|
||
raise HTTPException(status_code=404, detail="Issue not found")
|
||
_save_issues(issues)
|
||
return found
|
||
|
||
|
||
@app.post("/issues/{issue_id}/comments")
|
||
async def add_comment(issue_id: str, request: Request, comment: dict):
|
||
"""Add a comment to an issue. Author from session."""
|
||
user = getattr(request.state, "user", {})
|
||
issues = _load_issues()
|
||
found = None
|
||
for i in issues:
|
||
if i.get("id") == issue_id:
|
||
found = i
|
||
break
|
||
if not found:
|
||
raise HTTPException(status_code=404, detail="Issue not found")
|
||
text = _html.escape(comment.get("text", "").strip())
|
||
if not text:
|
||
raise HTTPException(status_code=400, detail="Comment text is required")
|
||
new_comment = {
|
||
"id": uuid.uuid4().hex[:8],
|
||
"author": user.get("username", "Anonymous"),
|
||
"text": text,
|
||
"created": datetime.utcnow().isoformat(),
|
||
}
|
||
if "comments" not in found:
|
||
found["comments"] = []
|
||
found["comments"].append(new_comment)
|
||
_save_issues(issues)
|
||
return new_comment
|
||
|
||
|
||
@app.delete("/issues/{issue_id}")
|
||
async def delete_issue(issue_id: str):
|
||
"""Permanently delete an issue."""
|
||
issues = _load_issues()
|
||
issues = [i for i in issues if i.get("id") != issue_id]
|
||
_save_issues(issues)
|
||
return {"status": "ok"}
|
||
|
||
|
||
@app.get("/vital-sharing/peers")
|
||
async def get_vital_sharing_peers():
|
||
"""Return the current vital-sharing peer list for the NetworkUI window."""
|
||
peers = []
|
||
for char, entry in _vital_sharing_peer_state.items():
|
||
peers.append(
|
||
{
|
||
**entry,
|
||
"subscribed": char in _vital_sharing_subscribers,
|
||
"plugin_connected": char in plugin_conns,
|
||
}
|
||
)
|
||
peers.sort(key=lambda p: p.get("character_name") or "")
|
||
return {
|
||
"peers": peers,
|
||
"subscriber_count": len(_vital_sharing_subscribers),
|
||
}
|
||
|
||
|
||
@app.get("/combat-stats/{character_name}")
|
||
async def get_combat_stats(character_name: str):
|
||
"""Get lifetime combat stats for a character."""
|
||
# Prefer live in-memory data (more up-to-date), fall back to DB
|
||
live = live_combat_stats.get(character_name)
|
||
if live:
|
||
return {
|
||
"character_name": character_name,
|
||
"session": live.get("session"),
|
||
"lifetime": live.get("lifetime"),
|
||
}
|
||
row = await database.fetch_one(
|
||
"SELECT stats_data FROM combat_stats WHERE character_name = :name",
|
||
{"name": character_name},
|
||
)
|
||
if not row:
|
||
return {"character_name": character_name, "session": None, "lifetime": None}
|
||
return {
|
||
"character_name": character_name,
|
||
"session": None,
|
||
"lifetime": row["stats_data"],
|
||
}
|
||
|
||
|
||
@app.get("/combat-stats")
|
||
async def get_all_combat_stats():
|
||
"""Get combat stats for all characters with live data."""
|
||
results = []
|
||
seen = set()
|
||
# Live data first (most current)
|
||
for char, data in live_combat_stats.items():
|
||
seen.add(char)
|
||
results.append({
|
||
"character_name": char,
|
||
"session": data.get("session"),
|
||
"lifetime": data.get("lifetime"),
|
||
})
|
||
# Fill in from DB for characters not currently live
|
||
rows = await database.fetch_all("SELECT character_name, stats_data FROM combat_stats")
|
||
for r in rows:
|
||
if r["character_name"] not in seen:
|
||
results.append({
|
||
"character_name": r["character_name"],
|
||
"session": None,
|
||
"lifetime": r["stats_data"],
|
||
})
|
||
results.sort(key=lambda x: x["character_name"])
|
||
return {"stats": results}
|
||
|
||
|
||
@app.get("/server-health")
|
||
async def get_server_health():
|
||
"""Return current server health status."""
|
||
try:
|
||
# Get latest status from database if cache is stale
|
||
if not _server_status_cache.get("last_check") or (
|
||
datetime.now(timezone.utc)
|
||
- datetime.fromisoformat(
|
||
_server_status_cache["last_check"].replace("Z", "+00:00")
|
||
)
|
||
> timedelta(minutes=2)
|
||
):
|
||
row = await database.fetch_one(
|
||
"SELECT * FROM server_status WHERE server_name = :name",
|
||
{"name": "Coldeve"},
|
||
)
|
||
|
||
if row:
|
||
_server_status_cache.update(
|
||
{
|
||
"status": row["current_status"],
|
||
"latency_ms": row["last_latency_ms"],
|
||
"player_count": row["last_player_count"],
|
||
"last_check": row["last_check"].isoformat()
|
||
if row["last_check"]
|
||
else None,
|
||
"uptime_seconds": row["total_uptime_seconds"],
|
||
"last_restart": row["last_restart"].isoformat()
|
||
if row["last_restart"]
|
||
else None,
|
||
}
|
||
)
|
||
|
||
# Format uptime
|
||
uptime_seconds = _server_status_cache.get("uptime_seconds", 0)
|
||
days = uptime_seconds // 86400
|
||
hours = (uptime_seconds % 86400) // 3600
|
||
minutes = (uptime_seconds % 3600) // 60
|
||
|
||
uptime_str = (
|
||
f"{days}d {hours}h {minutes}m" if days > 0 else f"{hours}h {minutes}m"
|
||
)
|
||
|
||
return {
|
||
"server_name": "Coldeve",
|
||
"status": _server_status_cache.get("status", "unknown"),
|
||
"latency_ms": _server_status_cache.get("latency_ms"),
|
||
"player_count": _server_status_cache.get("player_count"),
|
||
"uptime": uptime_str,
|
||
"uptime_seconds": uptime_seconds,
|
||
"last_restart": _server_status_cache.get("last_restart"),
|
||
"last_check": _server_status_cache.get("last_check"),
|
||
}
|
||
|
||
except Exception as e:
|
||
logger.error(f"Failed to get server health data: {e}", exc_info=True)
|
||
raise HTTPException(status_code=500, detail="Internal server error")
|
||
|
||
|
||
@app.get("/quest-status")
|
||
async def get_quest_status():
|
||
"""Return current cached quest status for all players."""
|
||
try:
|
||
# Return the quest cache with structured data
|
||
return {
|
||
"quest_data": _quest_status_cache,
|
||
"tracked_quests": [
|
||
"Stipend Collection Timer",
|
||
"Blank Augmentation Gem Pickup Timer",
|
||
"Insatiable Eater Jaw",
|
||
],
|
||
"player_count": len(_quest_status_cache),
|
||
}
|
||
except Exception as e:
|
||
logger.error(f"Failed to get quest status data: {e}", exc_info=True)
|
||
raise HTTPException(status_code=500, detail="Internal server error")
|
||
|
||
|
||
@app.get("/portals")
|
||
async def get_portals():
|
||
"""Return all active portals (less than 1 hour old)."""
|
||
try:
|
||
# No need for cutoff check - cleanup job handles expiration
|
||
query = """
|
||
SELECT portal_name, ns, ew, z, discovered_at, discovered_by
|
||
FROM portals
|
||
ORDER BY discovered_at DESC
|
||
"""
|
||
|
||
rows = await database.fetch_all(query)
|
||
|
||
portals = []
|
||
for row in rows:
|
||
portal = {
|
||
"portal_name": row["portal_name"],
|
||
"coordinates": {"ns": row["ns"], "ew": row["ew"], "z": row["z"]},
|
||
"discovered_at": row["discovered_at"].isoformat(),
|
||
"discovered_by": row["discovered_by"],
|
||
}
|
||
portals.append(portal)
|
||
|
||
return {"portals": portals, "portal_count": len(portals)}
|
||
|
||
except Exception as e:
|
||
logger.error(f"Failed to get portals data: {e}", exc_info=True)
|
||
raise HTTPException(status_code=500, detail="Internal server error")
|
||
|
||
|
||
@app.get("/live", response_model=dict)
|
||
@app.get("/live/", response_model=dict)
|
||
async def get_live_players():
|
||
"""Return cached live telemetry per character."""
|
||
try:
|
||
return JSONResponse(content=jsonable_encoder(_cached_live))
|
||
except Exception as e:
|
||
logger.error(f"Failed to get live players: {e}", exc_info=True)
|
||
raise HTTPException(status_code=500, detail="Internal server error")
|
||
|
||
|
||
# --- GET Trails ---------------------------------
|
||
@app.get("/trails")
|
||
@app.get("/trails/")
|
||
async def get_trails(
|
||
seconds: int = Query(600, ge=0, description="Lookback window in seconds"),
|
||
):
|
||
"""Return cached trails (updated every 5 seconds)."""
|
||
try:
|
||
return JSONResponse(content=jsonable_encoder(_cached_trails))
|
||
except Exception as e:
|
||
logger.error(f"Failed to get trails: {e}", exc_info=True)
|
||
raise HTTPException(status_code=500, detail="Internal server error")
|
||
|
||
|
||
@app.get("/total-rares")
|
||
@app.get("/total-rares/")
|
||
async def get_total_rares():
|
||
"""Return cached total rares statistics (updated every 5 minutes)."""
|
||
try:
|
||
return JSONResponse(content=jsonable_encoder(_cached_total_rares))
|
||
except Exception as e:
|
||
logger.error(f"Failed to get total rares: {e}", exc_info=True)
|
||
raise HTTPException(status_code=500, detail="Internal server error")
|
||
|
||
|
||
@app.get("/total-kills")
|
||
@app.get("/total-kills/")
|
||
async def get_total_kills():
|
||
"""Return cached total kills statistics (updated every 5 minutes)."""
|
||
try:
|
||
return JSONResponse(content=jsonable_encoder(_cached_total_kills))
|
||
except Exception as e:
|
||
logger.error(f"Failed to get total kills: {e}", exc_info=True)
|
||
raise HTTPException(status_code=500, detail="Internal server error")
|
||
|
||
|
||
# --- GET Spawn Heat Map Endpoint ---------------------------------
|
||
@app.get("/spawns/heatmap")
|
||
async def get_spawn_heatmap_data(
|
||
hours: int = Query(
|
||
24, ge=1, le=168, description="Lookback window in hours (1-168)"
|
||
),
|
||
limit: int = Query(
|
||
10000, ge=100, le=50000, description="Maximum number of spawn points to return"
|
||
),
|
||
):
|
||
"""
|
||
Aggregate spawn locations for heat-map visualization.
|
||
|
||
Returns spawn event coordinates grouped by location with intensity counts
|
||
for the specified time window.
|
||
|
||
Response format:
|
||
{
|
||
"spawn_points": [{"ew": float, "ns": float, "intensity": int}, ...],
|
||
"total_points": int,
|
||
"timestamp": "UTC-ISO"
|
||
}
|
||
"""
|
||
try:
|
||
cutoff = datetime.now(timezone.utc) - timedelta(hours=hours)
|
||
|
||
# Aggregate spawn events by coordinates within time window
|
||
query = """
|
||
SELECT ew, ns, COUNT(*) AS spawn_count
|
||
FROM spawn_events
|
||
WHERE timestamp >= :cutoff
|
||
GROUP BY ew, ns
|
||
ORDER BY spawn_count DESC
|
||
LIMIT :limit
|
||
"""
|
||
|
||
rows = await database.fetch_all(query, {"cutoff": cutoff, "limit": limit})
|
||
|
||
spawn_points = [
|
||
{
|
||
"ew": float(row["ew"]),
|
||
"ns": float(row["ns"]),
|
||
"intensity": int(row["spawn_count"]),
|
||
}
|
||
for row in rows
|
||
]
|
||
|
||
result = {
|
||
"spawn_points": spawn_points,
|
||
"total_points": len(spawn_points),
|
||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||
"hours_window": hours,
|
||
}
|
||
|
||
logger.debug(
|
||
f"Heat map data: {len(spawn_points)} unique spawn locations from last {hours} hours"
|
||
)
|
||
return JSONResponse(content=jsonable_encoder(result))
|
||
|
||
except Exception as e:
|
||
logger.error(f"Heat map query failed: {e}", exc_info=True)
|
||
raise HTTPException(status_code=500, detail="Spawn heat map query failed")
|
||
|
||
|
||
# --- GET Inventory Endpoints ---------------------------------
|
||
@app.get("/inventory/{character_name}")
|
||
async def get_character_inventory(character_name: str):
|
||
"""Get the complete inventory for a specific character - inventory service only."""
|
||
try:
|
||
async with httpx.AsyncClient(timeout=10.0) as client:
|
||
response = await client.get(
|
||
f"{INVENTORY_SERVICE_URL}/inventory/{character_name}"
|
||
)
|
||
|
||
if response.status_code == 200:
|
||
return JSONResponse(content=response.json())
|
||
elif response.status_code == 404:
|
||
raise HTTPException(
|
||
status_code=404,
|
||
detail=f"No inventory found for character '{character_name}'",
|
||
)
|
||
else:
|
||
logger.error(
|
||
f"Inventory service returned {response.status_code} for {character_name}"
|
||
)
|
||
raise HTTPException(status_code=502, detail="Inventory service error")
|
||
|
||
except httpx.RequestError as e:
|
||
logger.error(f"Could not reach inventory service: {e}")
|
||
raise HTTPException(status_code=503, detail="Inventory service unavailable")
|
||
except HTTPException:
|
||
raise
|
||
except Exception as e:
|
||
logger.error(
|
||
f"Failed to get inventory for {character_name}: {e}", exc_info=True
|
||
)
|
||
raise HTTPException(status_code=500, detail="Internal server error")
|
||
|
||
|
||
@app.get("/inventory/{character_name}/search")
|
||
async def search_character_inventory(
|
||
character_name: str,
|
||
name: str = Query(None, description="Search by item name (partial match)"),
|
||
object_class: int = Query(None, description="Filter by ObjectClass"),
|
||
min_value: int = Query(None, description="Minimum item value"),
|
||
max_value: int = Query(None, description="Maximum item value"),
|
||
min_burden: int = Query(None, description="Minimum burden"),
|
||
max_burden: int = Query(None, description="Maximum burden"),
|
||
):
|
||
"""Search and filter inventory items for a character with various criteria."""
|
||
try:
|
||
conditions = ["character_name = :character_name"]
|
||
params = {"character_name": character_name}
|
||
|
||
if name:
|
||
conditions.append("name ILIKE :name")
|
||
params["name"] = f"%{name}%"
|
||
|
||
if object_class is not None:
|
||
conditions.append("object_class = :object_class")
|
||
params["object_class"] = object_class
|
||
|
||
if min_value is not None:
|
||
conditions.append("value >= :min_value")
|
||
params["min_value"] = min_value
|
||
|
||
if max_value is not None:
|
||
conditions.append("value <= :max_value")
|
||
params["max_value"] = max_value
|
||
|
||
if min_burden is not None:
|
||
conditions.append("burden >= :min_burden")
|
||
params["min_burden"] = min_burden
|
||
|
||
if max_burden is not None:
|
||
conditions.append("burden <= :max_burden")
|
||
params["max_burden"] = max_burden
|
||
|
||
query = f"""
|
||
SELECT name, icon, object_class, value, burden, has_id_data, item_data, timestamp
|
||
FROM character_inventories
|
||
WHERE {" AND ".join(conditions)}
|
||
ORDER BY value DESC, name
|
||
"""
|
||
|
||
rows = await database.fetch_all(query, params)
|
||
|
||
items = []
|
||
for row in rows:
|
||
item = dict(row)
|
||
items.append(item)
|
||
|
||
return JSONResponse(
|
||
content=jsonable_encoder(
|
||
{
|
||
"character_name": character_name,
|
||
"item_count": len(items),
|
||
"search_criteria": {
|
||
"name": name,
|
||
"object_class": object_class,
|
||
"min_value": min_value,
|
||
"max_value": max_value,
|
||
"min_burden": min_burden,
|
||
"max_burden": max_burden,
|
||
},
|
||
"items": items,
|
||
}
|
||
)
|
||
)
|
||
except Exception as e:
|
||
logger.error(
|
||
f"Failed to search inventory for {character_name}: {e}", exc_info=True
|
||
)
|
||
raise HTTPException(status_code=500, detail="Internal server error")
|
||
|
||
|
||
@app.get("/inventories")
|
||
async def list_characters_with_inventories():
|
||
"""List all characters that have stored inventories with item counts."""
|
||
try:
|
||
query = """
|
||
SELECT character_name, COUNT(*) as item_count, MAX(timestamp) as last_updated
|
||
FROM character_inventories
|
||
GROUP BY character_name
|
||
ORDER BY last_updated DESC
|
||
"""
|
||
rows = await database.fetch_all(query)
|
||
|
||
characters = []
|
||
for row in rows:
|
||
characters.append(
|
||
{
|
||
"character_name": row["character_name"],
|
||
"item_count": row["item_count"],
|
||
"last_updated": row["last_updated"],
|
||
}
|
||
)
|
||
|
||
return JSONResponse(
|
||
content=jsonable_encoder(
|
||
{"characters": characters, "total_characters": len(characters)}
|
||
)
|
||
)
|
||
except Exception as e:
|
||
logger.error(f"Failed to list inventory characters: {e}", exc_info=True)
|
||
raise HTTPException(status_code=500, detail="Internal server error")
|
||
|
||
|
||
# --- Inventory Service Character List Proxy ---------------------
|
||
@app.get("/inventory-characters")
|
||
async def get_inventory_characters():
|
||
"""Get character list from inventory service - proxy to avoid routing conflicts."""
|
||
try:
|
||
async with httpx.AsyncClient(timeout=10.0) as client:
|
||
response = await client.get(f"{INVENTORY_SERVICE_URL}/characters/list")
|
||
|
||
if response.status_code == 200:
|
||
return JSONResponse(content=response.json())
|
||
else:
|
||
logger.error(
|
||
f"Inventory service returned {response.status_code}: {response.text}"
|
||
)
|
||
raise HTTPException(
|
||
status_code=response.status_code,
|
||
detail="Failed to get characters from inventory service",
|
||
)
|
||
|
||
except Exception as e:
|
||
logger.error(
|
||
f"Failed to proxy inventory characters request: {e}", exc_info=True
|
||
)
|
||
raise HTTPException(
|
||
status_code=500, detail="Failed to get inventory characters"
|
||
)
|
||
|
||
|
||
# --- Inventory Search Service Proxy Endpoints -------------------
|
||
@app.get("/search/items")
|
||
async def search_items_proxy(
|
||
text: str = Query(
|
||
None, description="Search item names, descriptions, or properties"
|
||
),
|
||
character: str = Query(None, description="Limit search to specific character"),
|
||
include_all_characters: bool = Query(
|
||
False, description="Search across all characters"
|
||
),
|
||
equipment_status: str = Query(None, description="equipped, unequipped, or all"),
|
||
equipment_slot: int = Query(None, description="Equipment slot mask"),
|
||
# Item category filtering
|
||
armor_only: bool = Query(False, description="Show only armor items"),
|
||
jewelry_only: bool = Query(False, description="Show only jewelry items"),
|
||
weapon_only: bool = Query(False, description="Show only weapon items"),
|
||
# Spell filtering
|
||
has_spell: str = Query(None, description="Must have this specific spell (by name)"),
|
||
spell_contains: str = Query(None, description="Spell name contains this text"),
|
||
legendary_cantrips: str = Query(
|
||
None, description="Comma-separated list of legendary cantrip names"
|
||
),
|
||
# Combat properties
|
||
min_damage: int = Query(None, description="Minimum damage"),
|
||
max_damage: int = Query(None, description="Maximum damage"),
|
||
min_armor: int = Query(None, description="Minimum armor level"),
|
||
max_armor: int = Query(None, description="Maximum armor level"),
|
||
min_attack_bonus: float = Query(None, description="Minimum attack bonus"),
|
||
min_crit_damage_rating: int = Query(
|
||
None, description="Minimum critical damage rating"
|
||
),
|
||
min_damage_rating: int = Query(None, description="Minimum damage rating"),
|
||
min_heal_boost_rating: int = Query(None, description="Minimum heal boost rating"),
|
||
max_level: int = Query(None, description="Maximum wield level requirement"),
|
||
min_level: int = Query(None, description="Minimum wield level requirement"),
|
||
material: str = Query(None, description="Material type (partial match)"),
|
||
min_workmanship: float = Query(None, description="Minimum workmanship"),
|
||
has_imbue: bool = Query(None, description="Has imbue effects"),
|
||
item_set: str = Query(None, description="Item set name (partial match)"),
|
||
min_tinks: int = Query(None, description="Minimum tinker count"),
|
||
bonded: bool = Query(None, description="Bonded status"),
|
||
attuned: bool = Query(None, description="Attuned status"),
|
||
unique: bool = Query(None, description="Unique item status"),
|
||
is_rare: bool = Query(None, description="Rare item status"),
|
||
min_condition: int = Query(None, description="Minimum condition percentage"),
|
||
min_value: int = Query(None, description="Minimum item value"),
|
||
max_value: int = Query(None, description="Maximum item value"),
|
||
max_burden: int = Query(None, description="Maximum burden"),
|
||
sort_by: str = Query(
|
||
"name", description="Sort field: name, value, damage, armor, workmanship"
|
||
),
|
||
sort_dir: str = Query("asc", description="Sort direction: asc or desc"),
|
||
page: int = Query(1, ge=1, description="Page number"),
|
||
limit: int = Query(50, ge=1, le=200, description="Items per page"),
|
||
):
|
||
"""Proxy to inventory service comprehensive item search."""
|
||
try:
|
||
# Build query parameters
|
||
params = {}
|
||
if text:
|
||
params["text"] = text
|
||
if character:
|
||
params["character"] = character
|
||
if include_all_characters:
|
||
params["include_all_characters"] = include_all_characters
|
||
if equipment_status:
|
||
params["equipment_status"] = equipment_status
|
||
if equipment_slot is not None:
|
||
params["equipment_slot"] = equipment_slot
|
||
# Category filtering
|
||
if armor_only:
|
||
params["armor_only"] = armor_only
|
||
if jewelry_only:
|
||
params["jewelry_only"] = jewelry_only
|
||
if weapon_only:
|
||
params["weapon_only"] = weapon_only
|
||
# Spell filtering
|
||
if has_spell:
|
||
params["has_spell"] = has_spell
|
||
if spell_contains:
|
||
params["spell_contains"] = spell_contains
|
||
if legendary_cantrips:
|
||
params["legendary_cantrips"] = legendary_cantrips
|
||
# Combat properties
|
||
if min_damage is not None:
|
||
params["min_damage"] = min_damage
|
||
if max_damage is not None:
|
||
params["max_damage"] = max_damage
|
||
if min_armor is not None:
|
||
params["min_armor"] = min_armor
|
||
if max_armor is not None:
|
||
params["max_armor"] = max_armor
|
||
if min_attack_bonus is not None:
|
||
params["min_attack_bonus"] = min_attack_bonus
|
||
if min_crit_damage_rating is not None:
|
||
params["min_crit_damage_rating"] = min_crit_damage_rating
|
||
if min_damage_rating is not None:
|
||
params["min_damage_rating"] = min_damage_rating
|
||
if min_heal_boost_rating is not None:
|
||
params["min_heal_boost_rating"] = min_heal_boost_rating
|
||
if max_level is not None:
|
||
params["max_level"] = max_level
|
||
if min_level is not None:
|
||
params["min_level"] = min_level
|
||
if material:
|
||
params["material"] = material
|
||
if min_workmanship is not None:
|
||
params["min_workmanship"] = min_workmanship
|
||
if has_imbue is not None:
|
||
params["has_imbue"] = has_imbue
|
||
if item_set:
|
||
params["item_set"] = item_set
|
||
if min_tinks is not None:
|
||
params["min_tinks"] = min_tinks
|
||
if bonded is not None:
|
||
params["bonded"] = bonded
|
||
if attuned is not None:
|
||
params["attuned"] = attuned
|
||
if unique is not None:
|
||
params["unique"] = unique
|
||
if is_rare is not None:
|
||
params["is_rare"] = is_rare
|
||
if min_condition is not None:
|
||
params["min_condition"] = min_condition
|
||
if min_value is not None:
|
||
params["min_value"] = min_value
|
||
if max_value is not None:
|
||
params["max_value"] = max_value
|
||
if max_burden is not None:
|
||
params["max_burden"] = max_burden
|
||
params["sort_by"] = sort_by
|
||
params["sort_dir"] = sort_dir
|
||
params["page"] = page
|
||
params["limit"] = limit
|
||
|
||
async with httpx.AsyncClient(timeout=30.0) as client:
|
||
response = await client.get(
|
||
f"{INVENTORY_SERVICE_URL}/search/items", params=params
|
||
)
|
||
|
||
if response.status_code == 200:
|
||
return JSONResponse(content=response.json())
|
||
else:
|
||
logger.error(
|
||
f"Inventory search service returned {response.status_code}"
|
||
)
|
||
raise HTTPException(
|
||
status_code=response.status_code,
|
||
detail="Inventory search service error",
|
||
)
|
||
|
||
except httpx.RequestError as e:
|
||
logger.error(f"Could not reach inventory service: {e}")
|
||
raise HTTPException(status_code=503, detail="Inventory service unavailable")
|
||
except HTTPException:
|
||
raise
|
||
except Exception as e:
|
||
logger.error(f"Failed to search items: {e}", exc_info=True)
|
||
raise HTTPException(status_code=500, detail="Internal server error")
|
||
|
||
|
||
@app.get("/search/equipped/{character_name}")
|
||
async def search_equipped_items_proxy(
|
||
character_name: str,
|
||
slot: int = Query(None, description="Specific equipment slot mask"),
|
||
):
|
||
"""Proxy to inventory service equipped items search."""
|
||
try:
|
||
params = {}
|
||
if slot is not None:
|
||
params["slot"] = slot
|
||
|
||
async with httpx.AsyncClient(timeout=10.0) as client:
|
||
response = await client.get(
|
||
f"{INVENTORY_SERVICE_URL}/search/equipped/{character_name}",
|
||
params=params,
|
||
)
|
||
|
||
if response.status_code == 200:
|
||
return JSONResponse(content=response.json())
|
||
elif response.status_code == 404:
|
||
raise HTTPException(
|
||
status_code=404,
|
||
detail=f"No equipped items found for character '{character_name}'",
|
||
)
|
||
else:
|
||
logger.error(
|
||
f"Inventory service returned {response.status_code} for equipped items search"
|
||
)
|
||
raise HTTPException(
|
||
status_code=response.status_code, detail="Inventory service error"
|
||
)
|
||
|
||
except httpx.RequestError as e:
|
||
logger.error(f"Could not reach inventory service: {e}")
|
||
raise HTTPException(status_code=503, detail="Inventory service unavailable")
|
||
except HTTPException:
|
||
raise
|
||
except Exception as e:
|
||
logger.error(
|
||
f"Failed to search equipped items for {character_name}: {e}", exc_info=True
|
||
)
|
||
raise HTTPException(status_code=500, detail="Internal server error")
|
||
|
||
|
||
@app.get("/search/upgrades/{character_name}/{slot}")
|
||
async def find_equipment_upgrades_proxy(
|
||
character_name: str,
|
||
slot: int,
|
||
upgrade_type: str = Query(
|
||
"damage", description="What to optimize for: damage, armor, workmanship, value"
|
||
),
|
||
):
|
||
"""Proxy to inventory service equipment upgrades search."""
|
||
try:
|
||
params = {"upgrade_type": upgrade_type}
|
||
|
||
async with httpx.AsyncClient(timeout=10.0) as client:
|
||
response = await client.get(
|
||
f"{INVENTORY_SERVICE_URL}/search/upgrades/{character_name}/{slot}",
|
||
params=params,
|
||
)
|
||
|
||
if response.status_code == 200:
|
||
return JSONResponse(content=response.json())
|
||
elif response.status_code == 404:
|
||
raise HTTPException(
|
||
status_code=404,
|
||
detail=f"No upgrade options found for character '{character_name}' slot {slot}",
|
||
)
|
||
else:
|
||
logger.error(
|
||
f"Inventory service returned {response.status_code} for upgrades search"
|
||
)
|
||
raise HTTPException(
|
||
status_code=response.status_code, detail="Inventory service error"
|
||
)
|
||
|
||
except httpx.RequestError as e:
|
||
logger.error(f"Could not reach inventory service: {e}")
|
||
raise HTTPException(status_code=503, detail="Inventory service unavailable")
|
||
except HTTPException:
|
||
raise
|
||
except Exception as e:
|
||
logger.error(
|
||
f"Failed to find equipment upgrades for {character_name} slot {slot}: {e}",
|
||
exc_info=True,
|
||
)
|
||
raise HTTPException(status_code=500, detail="Internal server error")
|
||
|
||
|
||
@app.get("/sets/list")
|
||
async def list_equipment_sets_proxy():
|
||
"""Proxy to inventory service equipment sets list."""
|
||
try:
|
||
async with httpx.AsyncClient(timeout=10.0) as client:
|
||
response = await client.get(f"{INVENTORY_SERVICE_URL}/sets/list")
|
||
|
||
if response.status_code == 200:
|
||
return JSONResponse(content=response.json())
|
||
else:
|
||
logger.error(
|
||
f"Inventory service returned {response.status_code} for sets list"
|
||
)
|
||
raise HTTPException(
|
||
status_code=response.status_code, detail="Inventory service error"
|
||
)
|
||
|
||
except httpx.RequestError as e:
|
||
logger.error(f"Could not reach inventory service: {e}")
|
||
raise HTTPException(status_code=503, detail="Inventory service unavailable")
|
||
except HTTPException:
|
||
raise
|
||
except Exception as e:
|
||
logger.error(f"Failed to list equipment sets: {e}", exc_info=True)
|
||
raise HTTPException(status_code=500, detail="Internal server error")
|
||
|
||
|
||
# -------------------- WebSocket endpoints -----------------------
|
||
## WebSocket connection tracking
|
||
# Set of browser WebSocket clients subscribed to live updates
|
||
browser_conns: set[WebSocket] = set()
|
||
# Mapping of plugin clients by character_name to their WebSocket for command forwarding
|
||
plugin_conns: Dict[str, WebSocket] = {}
|
||
|
||
# --- Vital sharing (cross-machine VTankFellowHeals replacement) ----------
|
||
# Characters that have opted-in to vital/position/item/cast sharing. Backend
|
||
# forwards share_* messages only between subscribers and excludes the sender.
|
||
_vital_sharing_subscribers: set[str] = set()
|
||
# Latest snapshot per character so new peers can query /vital-sharing/peers
|
||
# and so the browser NetworkUI can populate without waiting for the next tick.
|
||
_vital_sharing_peer_state: Dict[str, dict] = {}
|
||
|
||
# --- Combat stats (Mag-Tools style per-character combat tracking) ----------
|
||
# Latest combat_stats payload per character for real-time display.
|
||
live_combat_stats: Dict[str, dict] = {}
|
||
|
||
|
||
async def _broadcast_share_to_plugin_clients(data: dict, origin: str) -> None:
|
||
"""Forward a share_* message to all opted-in plugin clients except origin.
|
||
|
||
Transient send failures are logged but do NOT evict the subscriber —
|
||
eviction only happens on actual WebSocket disconnect (handled in the
|
||
plugin receive loop's finally block). Evicting aggressively here caused
|
||
a bug where a single slow send would silently drop a subscriber and
|
||
force the user to retoggle vital sharing to get peer updates again.
|
||
"""
|
||
if not _vital_sharing_subscribers:
|
||
return
|
||
for char_name, ws in list(plugin_conns.items()):
|
||
if char_name == origin:
|
||
continue
|
||
if char_name not in _vital_sharing_subscribers:
|
||
continue
|
||
try:
|
||
await asyncio.wait_for(ws.send_json(data), timeout=1.0)
|
||
except Exception as e:
|
||
logger.debug(f"Failed forwarding share_* to {char_name}: {e}")
|
||
|
||
|
||
def _update_vital_sharing_peer_state(msg_type: str, data: dict) -> None:
|
||
"""Keep the last-known vitals/position/item/tags snapshot per character."""
|
||
char = data.get("character_name")
|
||
if not char:
|
||
return
|
||
entry = _vital_sharing_peer_state.setdefault(
|
||
char,
|
||
{
|
||
"character_name": char,
|
||
"tags": [],
|
||
"vitals": None,
|
||
"position": None,
|
||
"items": None,
|
||
"connected": True,
|
||
"last_update": None,
|
||
},
|
||
)
|
||
entry["last_update"] = data.get("timestamp")
|
||
if "tags" in data and isinstance(data.get("tags"), list):
|
||
entry["tags"] = data["tags"]
|
||
if msg_type == "share_vital_update":
|
||
entry["vitals"] = {
|
||
"current_health": data.get("current_health"),
|
||
"max_health": data.get("max_health"),
|
||
"current_stamina": data.get("current_stamina"),
|
||
"max_stamina": data.get("max_stamina"),
|
||
"current_mana": data.get("current_mana"),
|
||
"max_mana": data.get("max_mana"),
|
||
}
|
||
elif msg_type == "share_position_update":
|
||
entry["position"] = {
|
||
"ew": data.get("ew"),
|
||
"ns": data.get("ns"),
|
||
"z": data.get("z"),
|
||
"heading": data.get("heading"),
|
||
}
|
||
elif msg_type == "share_item_update":
|
||
entry["items"] = data.get("items")
|
||
|
||
|
||
async def _send_to_browser(ws: WebSocket, data: dict) -> WebSocket | None:
|
||
"""Send data to a single browser client. Returns the ws if it failed, None if ok."""
|
||
try:
|
||
await asyncio.wait_for(ws.send_json(data), timeout=1.0)
|
||
except (WebSocketDisconnect, RuntimeError, ConnectionAbortedError) as e:
|
||
logger.debug(f"Detected disconnected browser client: {e}")
|
||
return ws
|
||
except asyncio.TimeoutError:
|
||
logger.warning(
|
||
"Timed out broadcasting to browser client; removing stale connection"
|
||
)
|
||
return ws
|
||
except Exception as e:
|
||
logger.warning(f"Unexpected error broadcasting to browser client: {e}")
|
||
return ws
|
||
return None
|
||
|
||
|
||
async def _do_broadcast(data: dict):
|
||
"""Send data to all browser clients concurrently. Runs as a background task."""
|
||
clients = list(browser_conns)
|
||
if not clients:
|
||
return
|
||
results = await asyncio.gather(*(_send_to_browser(ws, data) for ws in clients))
|
||
for ws in results:
|
||
if ws is not None:
|
||
browser_conns.discard(ws)
|
||
|
||
|
||
async def _broadcast_to_browser_clients(snapshot: dict):
|
||
"""Broadcast a telemetry or chat message to all connected browser clients.
|
||
|
||
Fires off a background task so the plugin receive loop is never blocked
|
||
by slow browser connections.
|
||
"""
|
||
data = jsonable_encoder(snapshot)
|
||
task = asyncio.create_task(_do_broadcast(data))
|
||
_broadcast_tasks.add(task)
|
||
task.add_done_callback(_broadcast_tasks.discard)
|
||
|
||
|
||
async def _forward_to_inventory_service(inventory_msg: FullInventoryMessage):
|
||
"""Forward inventory data to the inventory microservice for processing."""
|
||
try:
|
||
# Prepare data for inventory service
|
||
inventory_data = {
|
||
"character_name": inventory_msg.character_name,
|
||
"timestamp": inventory_msg.timestamp.isoformat(),
|
||
"items": inventory_msg.items,
|
||
}
|
||
|
||
async with httpx.AsyncClient(timeout=30.0) as client:
|
||
response = await client.post(
|
||
f"{INVENTORY_SERVICE_URL}/process-inventory", json=inventory_data
|
||
)
|
||
|
||
if response.status_code == 200:
|
||
result = response.json()
|
||
logger.info(
|
||
f"Inventory service processed {result['processed']} items for {inventory_msg.character_name}"
|
||
)
|
||
else:
|
||
logger.error(
|
||
f"Inventory service error {response.status_code}: {response.text}"
|
||
)
|
||
|
||
except Exception as e:
|
||
logger.error(f"Failed to forward inventory to service: {e}")
|
||
# Don't raise - this shouldn't block the main storage
|
||
|
||
|
||
async def _store_inventory(inventory_msg: FullInventoryMessage):
|
||
"""Forward inventory data to inventory microservice for processing and storage."""
|
||
try:
|
||
# Forward to inventory microservice for enhanced processing and storage
|
||
await _forward_to_inventory_service(inventory_msg)
|
||
|
||
# Optional: Create JSON file for debugging (can be removed in production)
|
||
inventory_dir = Path("./inventory")
|
||
inventory_dir.mkdir(exist_ok=True)
|
||
|
||
file_path = inventory_dir / f"{inventory_msg.character_name}_inventory.json"
|
||
inventory_data = {
|
||
"character_name": inventory_msg.character_name,
|
||
"timestamp": inventory_msg.timestamp.isoformat(),
|
||
"item_count": inventory_msg.item_count,
|
||
"items": inventory_msg.items,
|
||
}
|
||
|
||
with open(file_path, "w") as f:
|
||
json.dump(inventory_data, f, indent=2)
|
||
|
||
except Exception as e:
|
||
logger.error(
|
||
f"Failed to forward inventory for {inventory_msg.character_name}: {e}",
|
||
exc_info=True,
|
||
)
|
||
raise
|
||
|
||
|
||
@app.websocket("/ws/position")
|
||
async def ws_receive_snapshots(
|
||
websocket: WebSocket,
|
||
secret: str | None = Query(None),
|
||
x_plugin_secret: str | None = Header(None),
|
||
):
|
||
"""WebSocket endpoint for plugin clients to send telemetry and events.
|
||
|
||
Validates a shared secret for authentication, then listens for messages of
|
||
various types (register, spawn, telemetry, rare, chat) and handles each:
|
||
- register: record plugin WebSocket for command forwarding
|
||
- spawn: persist spawn event
|
||
- telemetry: store snapshot, update stats, broadcast to browsers
|
||
- rare: update total and session rare counts, persist event
|
||
- chat: broadcast chat messages to browsers
|
||
"""
|
||
global _plugin_connections
|
||
|
||
# Authenticate plugin connection using shared secret
|
||
key = secret or x_plugin_secret
|
||
if key != SHARED_SECRET:
|
||
# Reject without completing the WebSocket handshake
|
||
logger.warning(
|
||
f"Plugin WebSocket authentication failed from {websocket.client}"
|
||
)
|
||
await websocket.close(code=1008)
|
||
return
|
||
# Accept the WebSocket connection
|
||
await websocket.accept()
|
||
logger.info(f"🔌 PLUGIN_CONNECTED: {websocket.client}")
|
||
|
||
# Track plugin connection
|
||
_plugin_connections += 1
|
||
|
||
try:
|
||
while True:
|
||
# Read next text frame
|
||
try:
|
||
raw = await websocket.receive_text()
|
||
# Debug: log all incoming plugin WebSocket messages
|
||
logger.debug(f"Plugin WebSocket RX from {websocket.client}: {raw}")
|
||
except WebSocketDisconnect:
|
||
logger.info(f"🔌 PLUGIN_DISCONNECTED: {websocket.client}")
|
||
break
|
||
# Parse JSON payload
|
||
try:
|
||
data = json.loads(raw)
|
||
except json.JSONDecodeError as e:
|
||
logger.warning(f"Invalid JSON from plugin {websocket.client}: {e}")
|
||
continue
|
||
msg_type = data.get("type")
|
||
# --- Registration: associate character_name with this plugin socket ---
|
||
if msg_type == "register":
|
||
name = data.get("character_name") or data.get("player_name")
|
||
if isinstance(name, str):
|
||
plugin_conns[name] = websocket
|
||
live_equipment_cantrip_states.pop(name, None)
|
||
logger.info(f"📋 PLUGIN_REGISTERED: {name} from {websocket.client}")
|
||
continue
|
||
# --- Spawn event: persist to spawn_events table ---
|
||
if msg_type == "spawn":
|
||
payload = data.copy()
|
||
payload.pop("type", None)
|
||
try:
|
||
spawn = SpawnEvent.parse_obj(payload)
|
||
await database.execute(spawn_events.insert().values(**spawn.dict()))
|
||
logger.debug(
|
||
f"Recorded spawn event: {spawn.mob} by {spawn.character_name}"
|
||
)
|
||
except Exception as e:
|
||
logger.error(f"Failed to process spawn event: {e}")
|
||
continue
|
||
continue
|
||
# --- Telemetry message: persist snapshot and update kill stats ---
|
||
if msg_type == "telemetry":
|
||
# Parse telemetry snapshot and update in-memory state
|
||
payload = data.copy()
|
||
payload.pop("type", None)
|
||
character_name = payload.get("character_name", "unknown")
|
||
|
||
# Track message receipt and start timing
|
||
telemetry_start_time = time.time()
|
||
logger.info(
|
||
f"📨 TELEMETRY_RECEIVED: {character_name} from {websocket.client}"
|
||
)
|
||
|
||
try:
|
||
snap = TelemetrySnapshot.parse_obj(payload)
|
||
live_snapshots[snap.character_name] = snap.dict()
|
||
# Prepare data and compute kill delta
|
||
db_data = snap.dict()
|
||
db_data["rares_found"] = 0
|
||
key = (snap.session_id, snap.character_name)
|
||
|
||
# Get last recorded kill count for this session
|
||
if key in ws_receive_snapshots._last_kills:
|
||
last = ws_receive_snapshots._last_kills[key]
|
||
else:
|
||
# Cache miss - check database for last kill count for this session
|
||
row = await database.fetch_one(
|
||
"SELECT kills FROM telemetry_events WHERE character_name = :char AND session_id = :session ORDER BY timestamp DESC LIMIT 1",
|
||
{"char": snap.character_name, "session": snap.session_id},
|
||
)
|
||
last = row["kills"] if row else 0
|
||
logger.debug(
|
||
f"Cache miss for {snap.character_name} session {snap.session_id[:8]}: loaded last_kills={last} from database"
|
||
)
|
||
|
||
delta = snap.kills - last
|
||
# Persist snapshot and any kill delta in a single transaction
|
||
db_start_time = time.time()
|
||
|
||
# Log connection pool status before database operation
|
||
try:
|
||
pool_status = (
|
||
f"pool_size:{database._pool._queue.qsize()}"
|
||
if hasattr(database, "_pool")
|
||
and hasattr(database._pool, "_queue")
|
||
else "pool_status:unknown"
|
||
)
|
||
except:
|
||
pool_status = "pool_status:error"
|
||
|
||
logger.info(
|
||
f"💾 TELEMETRY_DB_WRITE_ATTEMPT: {snap.character_name} session:{snap.session_id[:8]} kills:{snap.kills} delta:{delta} {pool_status}"
|
||
)
|
||
|
||
try:
|
||
async with database.transaction():
|
||
await database.execute(
|
||
telemetry_events.insert().values(**db_data)
|
||
)
|
||
if delta > 0:
|
||
stmt = (
|
||
pg_insert(char_stats)
|
||
.values(
|
||
character_name=snap.character_name,
|
||
total_kills=delta,
|
||
)
|
||
.on_conflict_do_update(
|
||
index_elements=["character_name"],
|
||
set_={
|
||
"total_kills": char_stats.c.total_kills
|
||
+ delta
|
||
},
|
||
)
|
||
)
|
||
await database.execute(stmt)
|
||
logger.debug(
|
||
f"Updated kills for {snap.character_name}: +{delta} (total from {last} to {snap.kills})"
|
||
)
|
||
|
||
# Success: log timing and update cache
|
||
db_duration = (time.time() - db_start_time) * 1000
|
||
ws_receive_snapshots._last_kills[key] = snap.kills
|
||
|
||
# Track database performance (Phase 2)
|
||
global _total_queries, _total_query_time
|
||
_total_queries += 1
|
||
_total_query_time += (
|
||
db_duration / 1000.0
|
||
) # Convert ms to seconds
|
||
|
||
# Track recent activity (Phase 3)
|
||
global _recent_telemetry_messages, _max_recent_messages
|
||
activity_entry = {
|
||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||
"character_name": snap.character_name,
|
||
"kills": snap.kills,
|
||
"kill_delta": delta,
|
||
"query_time": round(db_duration, 1),
|
||
}
|
||
_recent_telemetry_messages.append(activity_entry)
|
||
if len(_recent_telemetry_messages) > _max_recent_messages:
|
||
_recent_telemetry_messages.pop(0)
|
||
|
||
# Log final pool status after successful operation
|
||
try:
|
||
final_pool_status = (
|
||
f"pool_size:{database._pool._queue.qsize()}"
|
||
if hasattr(database, "_pool")
|
||
and hasattr(database._pool, "_queue")
|
||
else "pool_status:unknown"
|
||
)
|
||
except:
|
||
final_pool_status = "pool_status:error"
|
||
|
||
logger.info(
|
||
f"✅ TELEMETRY_DB_WRITE_SUCCESS: {snap.character_name} took {db_duration:.1f}ms {final_pool_status}"
|
||
)
|
||
|
||
except Exception as db_error:
|
||
db_duration = (time.time() - db_start_time) * 1000
|
||
|
||
# Log pool status during failure
|
||
try:
|
||
error_pool_status = (
|
||
f"pool_size:{database._pool._queue.qsize()}"
|
||
if hasattr(database, "_pool")
|
||
and hasattr(database._pool, "_queue")
|
||
else "pool_status:unknown"
|
||
)
|
||
except:
|
||
error_pool_status = "pool_status:error"
|
||
|
||
logger.error(
|
||
f"❌ TELEMETRY_DB_WRITE_FAILED: {snap.character_name} session:{snap.session_id[:8]} took {db_duration:.1f}ms {error_pool_status} error:{db_error}",
|
||
exc_info=True,
|
||
)
|
||
continue
|
||
# Broadcast updated snapshot to all browser clients
|
||
await _broadcast_to_browser_clients(snap.dict())
|
||
|
||
# Log successful processing completion with timing
|
||
total_duration = (time.time() - telemetry_start_time) * 1000
|
||
logger.info(
|
||
f"⏱️ TELEMETRY_PROCESSING_COMPLETE: {snap.character_name} took {total_duration:.1f}ms total"
|
||
)
|
||
|
||
except Exception as e:
|
||
total_duration = (time.time() - telemetry_start_time) * 1000
|
||
logger.error(
|
||
f"❌ TELEMETRY_PROCESSING_FAILED: {character_name} took {total_duration:.1f}ms error:{e}",
|
||
exc_info=True,
|
||
)
|
||
continue
|
||
# --- Rare event: update total and session counters and persist ---
|
||
if msg_type == "rare":
|
||
name = data.get("character_name")
|
||
if isinstance(name, str) and name.strip():
|
||
try:
|
||
# Total rare count per character
|
||
stmt_tot = (
|
||
pg_insert(rare_stats)
|
||
.values(character_name=name, total_rares=1)
|
||
.on_conflict_do_update(
|
||
index_elements=["character_name"],
|
||
set_={"total_rares": rare_stats.c.total_rares + 1},
|
||
)
|
||
)
|
||
await database.execute(stmt_tot)
|
||
# Session-specific rare count (use live cache or fallback to latest telemetry)
|
||
session_id = live_snapshots.get(name, {}).get("session_id")
|
||
if not session_id:
|
||
row = await database.fetch_one(
|
||
"SELECT session_id FROM telemetry_events"
|
||
" WHERE character_name = :name"
|
||
" ORDER BY timestamp DESC LIMIT 1",
|
||
{"name": name},
|
||
)
|
||
if row:
|
||
session_id = row["session_id"]
|
||
if session_id:
|
||
stmt_sess = (
|
||
pg_insert(rare_stats_sessions)
|
||
.values(
|
||
character_name=name,
|
||
session_id=session_id,
|
||
session_rares=1,
|
||
)
|
||
.on_conflict_do_update(
|
||
index_elements=["character_name", "session_id"],
|
||
set_={
|
||
"session_rares": rare_stats_sessions.c.session_rares
|
||
+ 1
|
||
},
|
||
)
|
||
)
|
||
await database.execute(stmt_sess)
|
||
# Persist individual rare event for future analysis
|
||
payload = data.copy()
|
||
payload.pop("type", None)
|
||
try:
|
||
rare_ev = RareEvent.parse_obj(payload)
|
||
await database.execute(
|
||
rare_events.insert().values(**rare_ev.dict())
|
||
)
|
||
logger.info(
|
||
f"Recorded rare event: {rare_ev.name} by {name}"
|
||
)
|
||
# Broadcast rare event to browser clients for epic notifications
|
||
await _broadcast_to_browser_clients(data)
|
||
except Exception as e:
|
||
logger.error(f"Failed to persist rare event: {e}")
|
||
except Exception as e:
|
||
logger.error(
|
||
f"Failed to process rare event for {name}: {e}",
|
||
exc_info=True,
|
||
)
|
||
continue
|
||
# --- Chat message: forward chat payload to browser clients ---
|
||
if msg_type == "chat":
|
||
await _broadcast_to_browser_clients(data)
|
||
logger.debug(
|
||
f"Broadcasted chat message from {data.get('character_name', 'unknown')}"
|
||
)
|
||
continue
|
||
# --- Combat stats: store + broadcast Mag-Tools style combat data ---
|
||
if msg_type == "combat_stats":
|
||
char = data.get("character_name")
|
||
if char:
|
||
live_combat_stats[char] = data
|
||
# Upsert lifetime stats into DB
|
||
try:
|
||
lifetime = data.get("lifetime")
|
||
if lifetime:
|
||
await database.execute(
|
||
combat_stats.delete().where(
|
||
combat_stats.c.character_name == char
|
||
)
|
||
)
|
||
await database.execute(
|
||
combat_stats.insert().values(
|
||
character_name=char,
|
||
timestamp=datetime.now(timezone.utc),
|
||
stats_data=lifetime,
|
||
)
|
||
)
|
||
# Store session snapshot (latest per session)
|
||
session_data = data.get("session")
|
||
session_id = data.get("session_id")
|
||
if session_data and session_id:
|
||
# Delete old snapshot for this session, then insert fresh
|
||
await database.execute(
|
||
combat_stats_sessions.delete().where(
|
||
(combat_stats_sessions.c.character_name == char)
|
||
& (combat_stats_sessions.c.session_id == session_id)
|
||
)
|
||
)
|
||
await database.execute(
|
||
combat_stats_sessions.insert().values(
|
||
character_name=char,
|
||
session_id=session_id,
|
||
timestamp=datetime.now(timezone.utc),
|
||
stats_data=session_data,
|
||
)
|
||
)
|
||
except Exception as e:
|
||
logger.error(f"Failed to store combat stats for {char}: {e}")
|
||
# Broadcast to browser clients for live display
|
||
await _broadcast_to_browser_clients(data)
|
||
continue
|
||
|
||
# --- Full inventory message: store complete inventory snapshot ---
|
||
if msg_type == "full_inventory":
|
||
payload = data.copy()
|
||
payload.pop("type", None)
|
||
try:
|
||
inventory_msg = FullInventoryMessage.parse_obj(payload)
|
||
await _store_inventory(inventory_msg)
|
||
logger.info(
|
||
f"Stored inventory for {inventory_msg.character_name}: {inventory_msg.item_count} items"
|
||
)
|
||
except Exception as e:
|
||
logger.error(
|
||
f"Failed to process inventory for {data.get('character_name', 'unknown')}: {e}",
|
||
exc_info=True,
|
||
)
|
||
continue
|
||
# --- Inventory delta: single item add/remove/update ---
|
||
if msg_type == "inventory_delta":
|
||
try:
|
||
action = data.get("action")
|
||
char_name = data.get("character_name", "unknown")
|
||
|
||
if action == "remove":
|
||
item_id = data.get("item_id")
|
||
if item_id is not None:
|
||
async with httpx.AsyncClient(timeout=10.0) as client:
|
||
resp = await client.delete(
|
||
f"{INVENTORY_SERVICE_URL}/inventory/{char_name}/item/{item_id}"
|
||
)
|
||
if resp.status_code >= 400:
|
||
logger.warning(
|
||
f"Inventory service returned {resp.status_code} for delta remove item_id={item_id}"
|
||
)
|
||
elif action in ("add", "update"):
|
||
item = data.get("item")
|
||
if item:
|
||
async with httpx.AsyncClient(timeout=10.0) as client:
|
||
resp = await client.post(
|
||
f"{INVENTORY_SERVICE_URL}/inventory/{char_name}/item",
|
||
json=item,
|
||
)
|
||
if resp.status_code < 400:
|
||
# Use enriched item from inventory-service response for broadcast
|
||
resp_json = resp.json()
|
||
enriched_item = resp_json.get("item")
|
||
if enriched_item:
|
||
data = {
|
||
"type": "inventory_delta",
|
||
"action": action,
|
||
"character_name": char_name,
|
||
"item": enriched_item,
|
||
}
|
||
else:
|
||
logger.warning(
|
||
f"Inventory service returned {resp.status_code} for delta {action}"
|
||
)
|
||
|
||
# Broadcast delta to all browser clients
|
||
await _broadcast_to_browser_clients(data)
|
||
logger.debug(f"Inventory delta ({action}) for {char_name}")
|
||
except Exception as e:
|
||
logger.error(
|
||
f"Failed to process inventory delta: {e}", exc_info=True
|
||
)
|
||
continue
|
||
# --- Vitals message: store character health/stamina/mana and broadcast ---
|
||
if msg_type == "vitals":
|
||
payload = data.copy()
|
||
payload.pop("type", None)
|
||
try:
|
||
vitals_msg = VitalsMessage.parse_obj(payload)
|
||
live_vitals[vitals_msg.character_name] = vitals_msg.dict()
|
||
await _broadcast_to_browser_clients(data)
|
||
logger.debug(
|
||
f"Updated vitals for {vitals_msg.character_name}: {vitals_msg.health_percentage}% HP, {vitals_msg.stamina_percentage}% Stam, {vitals_msg.mana_percentage}% Mana"
|
||
)
|
||
except Exception as e:
|
||
logger.error(
|
||
f"Failed to process vitals for {data.get('character_name', 'unknown')}: {e}",
|
||
exc_info=True,
|
||
)
|
||
continue
|
||
# --- Character stats message: store character attributes/skills/progression and broadcast ---
|
||
if msg_type == "character_stats":
|
||
payload = data.copy()
|
||
payload.pop("type", None)
|
||
try:
|
||
stats_msg = CharacterStatsMessage.parse_obj(payload)
|
||
stats_dict = stats_msg.dict()
|
||
|
||
# Cache in memory
|
||
live_character_stats[stats_msg.character_name] = stats_dict
|
||
|
||
# Build stats_data JSONB (everything except extracted columns)
|
||
stats_data = {}
|
||
for key in (
|
||
"attributes",
|
||
"vitals",
|
||
"skills",
|
||
"allegiance",
|
||
"active_item_enchantments",
|
||
"race",
|
||
"gender",
|
||
"birth",
|
||
"current_title",
|
||
"skill_credits",
|
||
"burden",
|
||
"burden_units",
|
||
"encumbrance_capacity",
|
||
"properties",
|
||
"titles",
|
||
):
|
||
if stats_dict.get(key) is not None:
|
||
stats_data[key] = stats_dict[key]
|
||
|
||
# Upsert to database
|
||
await database.execute(
|
||
"""
|
||
INSERT INTO character_stats
|
||
(character_name, timestamp, level, total_xp, unassigned_xp,
|
||
luminance_earned, luminance_total, deaths, stats_data)
|
||
VALUES
|
||
(:character_name, :timestamp, :level, :total_xp, :unassigned_xp,
|
||
:luminance_earned, :luminance_total, :deaths, :stats_data)
|
||
ON CONFLICT (character_name) DO UPDATE SET
|
||
timestamp = EXCLUDED.timestamp,
|
||
level = EXCLUDED.level,
|
||
total_xp = EXCLUDED.total_xp,
|
||
unassigned_xp = EXCLUDED.unassigned_xp,
|
||
luminance_earned = EXCLUDED.luminance_earned,
|
||
luminance_total = EXCLUDED.luminance_total,
|
||
deaths = EXCLUDED.deaths,
|
||
stats_data = EXCLUDED.stats_data
|
||
""",
|
||
{
|
||
"character_name": stats_msg.character_name,
|
||
"timestamp": stats_msg.timestamp,
|
||
"level": stats_msg.level,
|
||
"total_xp": stats_msg.total_xp,
|
||
"unassigned_xp": stats_msg.unassigned_xp,
|
||
"luminance_earned": stats_msg.luminance_earned,
|
||
"luminance_total": stats_msg.luminance_total,
|
||
"deaths": stats_msg.deaths,
|
||
"stats_data": json.dumps(stats_data),
|
||
},
|
||
)
|
||
|
||
# Broadcast to browser clients
|
||
await _broadcast_to_browser_clients(data)
|
||
logger.info(
|
||
f"Updated character stats for {stats_msg.character_name}: Level {stats_msg.level}"
|
||
)
|
||
except Exception as e:
|
||
logger.error(
|
||
f"Failed to process character_stats for {data.get('character_name', 'unknown')}: {e}",
|
||
exc_info=True,
|
||
)
|
||
continue
|
||
# --- Equipment cantrip state: live-only overlay for mana panel ---
|
||
if msg_type == "equipment_cantrip_state":
|
||
try:
|
||
character_name = data.get("character_name")
|
||
if character_name:
|
||
live_equipment_cantrip_states[character_name] = data
|
||
await _broadcast_to_browser_clients(data)
|
||
logger.debug(
|
||
f"Updated equipment cantrip state for {character_name}"
|
||
)
|
||
except Exception as e:
|
||
logger.error(
|
||
f"Failed to process equipment_cantrip_state for {data.get('character_name', 'unknown')}: {e}",
|
||
exc_info=True,
|
||
)
|
||
continue
|
||
# --- Quest message: update cache and broadcast (no database storage) ---
|
||
if msg_type == "quest":
|
||
character_name = data.get("character_name")
|
||
quest_name = data.get("quest_name")
|
||
countdown = data.get("countdown")
|
||
|
||
if character_name and quest_name and countdown is not None:
|
||
# Only track specific quest types
|
||
allowed_quests = {
|
||
"Stipend Collection Timer",
|
||
"Blank Augmentation Gem Pickup Timer",
|
||
"Insatiable Eater Jaw",
|
||
}
|
||
|
||
if quest_name in allowed_quests:
|
||
# Update quest cache
|
||
if character_name not in _quest_status_cache:
|
||
_quest_status_cache[character_name] = {}
|
||
_quest_status_cache[character_name][quest_name] = countdown
|
||
|
||
# Broadcast to browser clients for real-time updates
|
||
await _broadcast_to_browser_clients(data)
|
||
logger.debug(
|
||
f"Updated quest status for {character_name}: {quest_name} = {countdown}"
|
||
)
|
||
else:
|
||
logger.debug(f"Ignoring non-tracked quest: {quest_name}")
|
||
else:
|
||
logger.warning(
|
||
f"Invalid quest message format from {websocket.client}: missing required fields"
|
||
)
|
||
continue
|
||
# --- Portal message: store in database and broadcast ---
|
||
if msg_type == "portal":
|
||
character_name = data.get("character_name")
|
||
portal_name = data.get("portal_name")
|
||
ns = data.get("ns")
|
||
ew = data.get("ew")
|
||
z = data.get("z")
|
||
timestamp_str = data.get("timestamp")
|
||
|
||
if all([character_name, portal_name, ns, ew, z, timestamp_str]):
|
||
try:
|
||
# Parse timestamp
|
||
timestamp = datetime.fromisoformat(
|
||
timestamp_str.replace("Z", "+00:00")
|
||
)
|
||
|
||
# Convert coordinates to floats for database storage
|
||
ns = float(ns)
|
||
ew = float(ew)
|
||
z = float(z)
|
||
|
||
# Round coordinates for display (0.1 tolerance to match DB constraint)
|
||
ns_rounded = round(ns, 1)
|
||
ew_rounded = round(ew, 1)
|
||
|
||
# Use PostgreSQL UPSERT to handle race conditions atomically
|
||
# This completely eliminates duplicate key errors
|
||
result = await database.fetch_one(
|
||
"""
|
||
INSERT INTO portals (portal_name, ns, ew, z, discovered_at, discovered_by)
|
||
VALUES (:portal_name, :ns, :ew, :z, :timestamp, :character_name)
|
||
ON CONFLICT (ROUND(ns::numeric, 1), ROUND(ew::numeric, 1))
|
||
DO UPDATE SET
|
||
discovered_at = EXCLUDED.discovered_at,
|
||
discovered_by = EXCLUDED.discovered_by,
|
||
portal_name = EXCLUDED.portal_name
|
||
RETURNING (xmax = 0) AS was_inserted
|
||
""",
|
||
{
|
||
"portal_name": portal_name,
|
||
"ns": ns,
|
||
"ew": ew,
|
||
"z": z,
|
||
"timestamp": timestamp,
|
||
"character_name": character_name,
|
||
},
|
||
)
|
||
|
||
# Log whether this was a new discovery or an update
|
||
# xmax = 0 means it was an INSERT (new portal)
|
||
# xmax != 0 means it was an UPDATE (existing portal)
|
||
if result and result["was_inserted"]:
|
||
logger.info(
|
||
f"New portal discovered: {portal_name} at {ns_rounded}, {ew_rounded} by {character_name}"
|
||
)
|
||
else:
|
||
logger.debug(
|
||
f"Portal timestamp updated: {portal_name} at {ns_rounded}, {ew_rounded} by {character_name}"
|
||
)
|
||
|
||
# Broadcast to browser clients for map updates
|
||
await _broadcast_to_browser_clients(data)
|
||
|
||
except Exception as e:
|
||
logger.error(
|
||
f"Failed to process portal discovery for {character_name}: {e}",
|
||
exc_info=True,
|
||
)
|
||
else:
|
||
logger.warning(
|
||
f"Invalid portal message format from {websocket.client}: missing required fields"
|
||
)
|
||
continue
|
||
|
||
if msg_type == "nearby_objects":
|
||
character_name = data.get("character_name")
|
||
if character_name:
|
||
live_nearby_objects[character_name] = data
|
||
await _broadcast_to_browser_clients(data)
|
||
continue
|
||
|
||
if msg_type == "dungeon_map":
|
||
landblock = data.get("landblock")
|
||
if landblock:
|
||
dungeon_map_cache[landblock] = data
|
||
logger.info(
|
||
f"Cached dungeon map for {landblock} ({len(data.get('z_levels', []))} z-levels)"
|
||
)
|
||
await _broadcast_to_browser_clients(data)
|
||
continue
|
||
|
||
# ── Vital sharing (cross-machine VTankFellowHeals replacement) ──
|
||
if msg_type == "share_subscribe":
|
||
char = data.get("character_name")
|
||
if char:
|
||
_vital_sharing_subscribers.add(char)
|
||
tags = data.get("tags") or []
|
||
entry = _vital_sharing_peer_state.setdefault(
|
||
char,
|
||
{
|
||
"character_name": char,
|
||
"tags": [],
|
||
"vitals": None,
|
||
"position": None,
|
||
"items": None,
|
||
"connected": True,
|
||
"last_update": None,
|
||
},
|
||
)
|
||
if isinstance(tags, list):
|
||
entry["tags"] = tags
|
||
entry["connected"] = True
|
||
logger.info(
|
||
f"🤝 VITAL_SHARING_SUBSCRIBED: {char} (tags={tags})"
|
||
)
|
||
continue
|
||
|
||
if msg_type == "share_unsubscribe":
|
||
char = data.get("character_name")
|
||
if char:
|
||
_vital_sharing_subscribers.discard(char)
|
||
_vital_sharing_peer_state.pop(char, None)
|
||
logger.info(f"🤝 VITAL_SHARING_UNSUBSCRIBED: {char}")
|
||
# Tell browser clients to drop this peer from their UI
|
||
await _broadcast_to_browser_clients({
|
||
"type": "share_peer_removed",
|
||
"character_name": char,
|
||
})
|
||
continue
|
||
|
||
if msg_type and msg_type.startswith("share_"):
|
||
origin = data.get("character_name") or ""
|
||
_update_vital_sharing_peer_state(msg_type, data)
|
||
# Fan out to other opted-in plugin clients
|
||
await _broadcast_share_to_plugin_clients(data, origin)
|
||
# Fan out to browser clients for NetworkUI display
|
||
await _broadcast_to_browser_clients(data)
|
||
continue
|
||
|
||
# Unknown message types are ignored
|
||
if msg_type:
|
||
logger.warning(
|
||
f"Unknown message type '{msg_type}' from {websocket.client}"
|
||
)
|
||
finally:
|
||
# Track plugin disconnection
|
||
_plugin_connections = max(0, _plugin_connections - 1)
|
||
disconnected_names = [
|
||
name for name, ws in plugin_conns.items() if ws is websocket
|
||
]
|
||
for name in disconnected_names:
|
||
plugin_conns.pop(name, None)
|
||
live_equipment_cantrip_states.pop(name, None)
|
||
live_nearby_objects.pop(name, None)
|
||
was_sharing = name in _vital_sharing_subscribers or name in _vital_sharing_peer_state
|
||
_vital_sharing_subscribers.discard(name)
|
||
_vital_sharing_peer_state.pop(name, None)
|
||
if was_sharing:
|
||
# Tell browser clients to drop this peer from their UI
|
||
await _broadcast_to_browser_clients({
|
||
"type": "share_peer_removed",
|
||
"character_name": name,
|
||
})
|
||
|
||
# Clean up any plugin registrations for this socket
|
||
to_remove = [n for n, ws in plugin_conns.items() if ws is websocket]
|
||
for n in to_remove:
|
||
# Use pop() instead of del to avoid KeyError if already removed
|
||
plugin_conns.pop(n, None)
|
||
|
||
# Also clean up any entries in the kill tracking cache for this session
|
||
# Remove entries that might be associated with disconnected clients
|
||
stale_keys = []
|
||
for (session_id, char_name), _ in ws_receive_snapshots._last_kills.items():
|
||
if char_name in to_remove:
|
||
stale_keys.append((session_id, char_name))
|
||
for key in stale_keys:
|
||
ws_receive_snapshots._last_kills.pop(key, None)
|
||
|
||
if to_remove:
|
||
logger.info(
|
||
f"Cleaned up plugin connections for characters: {to_remove} from {websocket.client}"
|
||
)
|
||
if stale_keys:
|
||
logger.debug(
|
||
f"Cleaned up {len(stale_keys)} kill tracking cache entries"
|
||
)
|
||
else:
|
||
logger.debug(f"No plugin registrations to clean up for {websocket.client}")
|
||
|
||
|
||
# In-memory cache of last seen kill counts per (session_id, character_name)
|
||
# Used to compute deltas for updating persistent kill statistics efficiently
|
||
ws_receive_snapshots._last_kills = {}
|
||
|
||
|
||
async def cleanup_stale_connections():
|
||
"""Periodic cleanup of stale WebSocket connections.
|
||
|
||
This function can be called periodically to clean up connections
|
||
that may have become stale but weren't properly cleaned up.
|
||
"""
|
||
# Clean up plugin connections that no longer have valid WebSockets
|
||
stale_plugins = []
|
||
for char_name, ws in list(plugin_conns.items()):
|
||
try:
|
||
# Test if the WebSocket is still alive by checking its state
|
||
if ws.client_state.name != "CONNECTED":
|
||
stale_plugins.append(char_name)
|
||
except Exception:
|
||
# If we can't check the state, consider it stale
|
||
stale_plugins.append(char_name)
|
||
|
||
for char_name in stale_plugins:
|
||
plugin_conns.pop(char_name, None)
|
||
logger.info(f"Cleaned up stale plugin connection: {char_name}")
|
||
|
||
# Clean up browser connections
|
||
stale_browsers = []
|
||
for ws in list(browser_conns):
|
||
try:
|
||
if ws.client_state.name != "CONNECTED":
|
||
stale_browsers.append(ws)
|
||
except Exception:
|
||
stale_browsers.append(ws)
|
||
|
||
for ws in stale_browsers:
|
||
browser_conns.discard(ws)
|
||
|
||
if stale_browsers:
|
||
logger.info(f"Cleaned up {len(stale_browsers)} stale browser connections")
|
||
|
||
logger.debug(
|
||
f"Connection health check: {len(plugin_conns)} plugins, {len(browser_conns)} browsers"
|
||
)
|
||
|
||
|
||
@app.websocket("/ws/live")
|
||
async def ws_live_updates(websocket: WebSocket):
|
||
"""WebSocket endpoint for browser clients to receive live updates and send commands.
|
||
|
||
Manages a set of connected browser clients; listens for incoming command messages
|
||
and forwards them to the appropriate plugin client WebSocket.
|
||
"""
|
||
# Require valid session cookie for browser WebSocket
|
||
token = websocket.cookies.get("session")
|
||
if not token or not verify_session_cookie(token):
|
||
await websocket.close(code=4401, reason="Not authenticated")
|
||
return
|
||
|
||
global _browser_connections
|
||
# Add new browser client to the set
|
||
await websocket.accept()
|
||
browser_conns.add(websocket)
|
||
logger.info(f"Browser WebSocket connected: {websocket.client}")
|
||
|
||
# Track browser connection
|
||
_browser_connections += 1
|
||
|
||
try:
|
||
while True:
|
||
# Receive command messages from browser
|
||
try:
|
||
data = await websocket.receive_json()
|
||
# Debug: log all incoming browser WebSocket messages
|
||
logger.debug(f"Browser WebSocket RX from {websocket.client}: {data}")
|
||
except WebSocketDisconnect:
|
||
logger.info(f"Browser WebSocket disconnected: {websocket.client}")
|
||
break
|
||
# Handle dungeon map requests from browser
|
||
if data.get("type") == "request_dungeon_map":
|
||
landblock = data.get("landblock")
|
||
cached = dungeon_map_cache.get(landblock)
|
||
if cached:
|
||
await websocket.send_json(cached)
|
||
logger.debug(f"Sent cached dungeon map {landblock} to browser")
|
||
continue
|
||
|
||
# Determine command envelope format (new or legacy)
|
||
if "player_name" in data and "command" in data:
|
||
# New format: { player_name, command }
|
||
target_name = data["player_name"]
|
||
payload = data
|
||
elif (
|
||
data.get("type") == "command"
|
||
and "character_name" in data
|
||
and "text" in data
|
||
):
|
||
# Legacy format: { type: 'command', character_name, text }
|
||
target_name = data.get("character_name")
|
||
payload = {"player_name": target_name, "command": data.get("text")}
|
||
else:
|
||
# Not a recognized command envelope
|
||
continue
|
||
# Forward command envelope to the appropriate plugin WebSocket
|
||
target_ws = plugin_conns.get(target_name)
|
||
if target_ws:
|
||
try:
|
||
await target_ws.send_json(payload)
|
||
logger.debug(
|
||
f"Forwarded command to plugin for {target_name}: {payload}"
|
||
)
|
||
except (WebSocketDisconnect, RuntimeError, ConnectionAbortedError) as e:
|
||
logger.warning(f"Failed to forward command to {target_name}: {e}")
|
||
# Remove stale connection
|
||
plugin_conns.pop(target_name, None)
|
||
except Exception as e:
|
||
logger.error(
|
||
f"Unexpected error forwarding command to {target_name}: {e}"
|
||
)
|
||
# Remove potentially corrupted connection
|
||
plugin_conns.pop(target_name, None)
|
||
else:
|
||
logger.warning(
|
||
f"No plugin connection found for target character: {target_name}"
|
||
)
|
||
except WebSocketDisconnect:
|
||
pass
|
||
finally:
|
||
# Track browser disconnection
|
||
_browser_connections = max(0, _browser_connections - 1)
|
||
|
||
browser_conns.discard(websocket)
|
||
logger.debug(
|
||
f"Removed browser WebSocket from connection pool: {websocket.client}"
|
||
)
|
||
|
||
|
||
## -------------------- static frontend ---------------------------
|
||
## (static mount moved to end of file, below API routes)
|
||
|
||
# list routes for convenience
|
||
logger.info("🔍 Registered HTTP API routes:")
|
||
for route in app.routes:
|
||
if isinstance(route, APIRoute):
|
||
# Log the path and allowed methods for each API route
|
||
logger.info(f"{route.path} -> {route.methods}")
|
||
|
||
|
||
# Add stats endpoint for per-character metrics
|
||
@app.get("/stats/{character_name}")
|
||
async def get_stats(character_name: str):
|
||
"""
|
||
HTTP GET endpoint to retrieve per-character metrics:
|
||
- latest_snapshot: most recent telemetry entry for the character
|
||
- total_kills: accumulated kills from char_stats
|
||
- total_rares: accumulated rares from rare_stats
|
||
Returns 404 if character has no recorded telemetry.
|
||
"""
|
||
try:
|
||
# Single optimized query with LEFT JOINs to get all data in one round trip
|
||
sql = """
|
||
WITH latest AS (
|
||
SELECT * FROM telemetry_events
|
||
WHERE character_name = :cn
|
||
ORDER BY timestamp DESC LIMIT 1
|
||
)
|
||
SELECT
|
||
l.*,
|
||
COALESCE(cs.total_kills, 0) as total_kills,
|
||
COALESCE(rs.total_rares, 0) as total_rares
|
||
FROM latest l
|
||
LEFT JOIN char_stats cs ON l.character_name = cs.character_name
|
||
LEFT JOIN rare_stats rs ON l.character_name = rs.character_name
|
||
"""
|
||
row = await database.fetch_one(sql, {"cn": character_name})
|
||
if not row:
|
||
logger.warning(f"No telemetry data found for character: {character_name}")
|
||
raise HTTPException(status_code=404, detail="Character not found")
|
||
|
||
# Extract latest snapshot data (exclude the added total_kills/total_rares)
|
||
snap_dict = {
|
||
k: v
|
||
for k, v in dict(row).items()
|
||
if k not in ("total_kills", "total_rares")
|
||
}
|
||
|
||
result = {
|
||
"character_name": character_name,
|
||
"latest_snapshot": snap_dict,
|
||
"total_kills": row["total_kills"],
|
||
"total_rares": row["total_rares"],
|
||
}
|
||
logger.debug(
|
||
f"Retrieved stats for character: {character_name} (optimized query)"
|
||
)
|
||
return JSONResponse(content=jsonable_encoder(result))
|
||
except HTTPException:
|
||
raise
|
||
except Exception as e:
|
||
logger.error(
|
||
f"Failed to get stats for character {character_name}: {e}", exc_info=True
|
||
)
|
||
raise HTTPException(status_code=500, detail="Internal server error")
|
||
|
||
|
||
# --- Character Stats API -------------------------------------------
|
||
|
||
|
||
@app.post("/character-stats/test")
|
||
async def test_character_stats_default():
|
||
"""Inject mock character_stats data for frontend development."""
|
||
return await test_character_stats("TestCharacter")
|
||
|
||
|
||
@app.post("/character-stats/test/{name}")
|
||
async def test_character_stats(name: str):
|
||
"""Inject mock character_stats data for a specific character name.
|
||
Processes through the same pipeline as real plugin data."""
|
||
mock_data = {
|
||
"type": "character_stats",
|
||
"timestamp": datetime.utcnow().isoformat() + "Z",
|
||
"character_name": name,
|
||
"level": 275,
|
||
"race": "Aluvian",
|
||
"gender": "Male",
|
||
"birth": "2018-03-15 14:22:33",
|
||
"total_xp": 191226310247,
|
||
"unassigned_xp": 4500000,
|
||
"skill_credits": 2,
|
||
"luminance_earned": 500000,
|
||
"luminance_total": 1500000,
|
||
"deaths": 3175,
|
||
"current_title": 42,
|
||
"attributes": {
|
||
"strength": {"base": 290, "creation": 100},
|
||
"endurance": {"base": 200, "creation": 100},
|
||
"coordination": {"base": 240, "creation": 100},
|
||
"quickness": {"base": 220, "creation": 10},
|
||
"focus": {"base": 250, "creation": 100},
|
||
"self": {"base": 200, "creation": 100},
|
||
},
|
||
"vitals": {
|
||
"health": {"base": 341},
|
||
"stamina": {"base": 400},
|
||
"mana": {"base": 300},
|
||
},
|
||
"skills": {
|
||
"war_magic": {"base": 533, "training": "Specialized"},
|
||
"life_magic": {"base": 440, "training": "Specialized"},
|
||
"creature_enchantment": {"base": 430, "training": "Specialized"},
|
||
"item_enchantment": {"base": 420, "training": "Specialized"},
|
||
"void_magic": {"base": 510, "training": "Specialized"},
|
||
"melee_defense": {"base": 488, "training": "Specialized"},
|
||
"missile_defense": {"base": 470, "training": "Specialized"},
|
||
"magic_defense": {"base": 460, "training": "Specialized"},
|
||
"two_handed_combat": {"base": 420, "training": "Specialized"},
|
||
"heavy_weapons": {"base": 410, "training": "Specialized"},
|
||
"finesse_weapons": {"base": 400, "training": "Trained"},
|
||
"light_weapons": {"base": 390, "training": "Trained"},
|
||
"missile_weapons": {"base": 380, "training": "Trained"},
|
||
"shield": {"base": 350, "training": "Trained"},
|
||
"dual_wield": {"base": 340, "training": "Trained"},
|
||
"arcane_lore": {"base": 330, "training": "Trained"},
|
||
"mana_conversion": {"base": 320, "training": "Trained"},
|
||
"healing": {"base": 300, "training": "Trained"},
|
||
"lockpick": {"base": 280, "training": "Trained"},
|
||
"assess_creature": {"base": 10, "training": "Untrained"},
|
||
"assess_person": {"base": 10, "training": "Untrained"},
|
||
"deception": {"base": 10, "training": "Untrained"},
|
||
"leadership": {"base": 10, "training": "Untrained"},
|
||
"loyalty": {"base": 10, "training": "Untrained"},
|
||
"jump": {"base": 10, "training": "Untrained"},
|
||
"run": {"base": 10, "training": "Untrained"},
|
||
"salvaging": {"base": 10, "training": "Untrained"},
|
||
"cooking": {"base": 10, "training": "Untrained"},
|
||
"fletching": {"base": 10, "training": "Untrained"},
|
||
"alchemy": {"base": 10, "training": "Untrained"},
|
||
"sneak_attack": {"base": 10, "training": "Untrained"},
|
||
"dirty_fighting": {"base": 10, "training": "Untrained"},
|
||
"recklessness": {"base": 10, "training": "Untrained"},
|
||
"summoning": {"base": 10, "training": "Untrained"},
|
||
},
|
||
"allegiance": {
|
||
"name": "Knights of Dereth",
|
||
"monarch": {"name": "HighKing", "race": 1, "rank": 0, "gender": 0},
|
||
"patron": {"name": "SirLancelot", "race": 1, "rank": 5, "gender": 0},
|
||
"rank": 8,
|
||
"followers": 12,
|
||
},
|
||
}
|
||
|
||
# Process through the same pipeline as real data
|
||
payload = mock_data.copy()
|
||
payload.pop("type", None)
|
||
try:
|
||
stats_msg = CharacterStatsMessage.parse_obj(payload)
|
||
stats_dict = stats_msg.dict()
|
||
live_character_stats[stats_msg.character_name] = stats_dict
|
||
|
||
stats_data = {}
|
||
for key in (
|
||
"attributes",
|
||
"vitals",
|
||
"skills",
|
||
"allegiance",
|
||
"active_item_enchantments",
|
||
"race",
|
||
"gender",
|
||
"birth",
|
||
"current_title",
|
||
"skill_credits",
|
||
"burden",
|
||
"burden_units",
|
||
"encumbrance_capacity",
|
||
):
|
||
if stats_dict.get(key) is not None:
|
||
stats_data[key] = stats_dict[key]
|
||
|
||
await database.execute(
|
||
"""
|
||
INSERT INTO character_stats
|
||
(character_name, timestamp, level, total_xp, unassigned_xp,
|
||
luminance_earned, luminance_total, deaths, stats_data)
|
||
VALUES
|
||
(:character_name, :timestamp, :level, :total_xp, :unassigned_xp,
|
||
:luminance_earned, :luminance_total, :deaths, :stats_data)
|
||
ON CONFLICT (character_name) DO UPDATE SET
|
||
timestamp = EXCLUDED.timestamp,
|
||
level = EXCLUDED.level,
|
||
total_xp = EXCLUDED.total_xp,
|
||
unassigned_xp = EXCLUDED.unassigned_xp,
|
||
luminance_earned = EXCLUDED.luminance_earned,
|
||
luminance_total = EXCLUDED.luminance_total,
|
||
deaths = EXCLUDED.deaths,
|
||
stats_data = EXCLUDED.stats_data
|
||
""",
|
||
{
|
||
"character_name": stats_msg.character_name,
|
||
"timestamp": stats_msg.timestamp,
|
||
"level": stats_msg.level,
|
||
"total_xp": stats_msg.total_xp,
|
||
"unassigned_xp": stats_msg.unassigned_xp,
|
||
"luminance_earned": stats_msg.luminance_earned,
|
||
"luminance_total": stats_msg.luminance_total,
|
||
"deaths": stats_msg.deaths,
|
||
"stats_data": json.dumps(stats_data),
|
||
},
|
||
)
|
||
|
||
await _broadcast_to_browser_clients(mock_data)
|
||
return {"status": "ok", "character_name": stats_msg.character_name}
|
||
except Exception as e:
|
||
logger.error(f"Test endpoint failed: {e}", exc_info=True)
|
||
raise HTTPException(status_code=500, detail=str(e))
|
||
|
||
|
||
@app.get("/character-stats/{name}")
|
||
async def get_character_stats(name: str):
|
||
"""Return latest character stats. Checks in-memory cache first, falls back to DB."""
|
||
try:
|
||
# Try in-memory cache first
|
||
if name in live_character_stats:
|
||
return JSONResponse(content=jsonable_encoder(live_character_stats[name]))
|
||
|
||
# Fall back to database
|
||
row = await database.fetch_one(
|
||
"SELECT * FROM character_stats WHERE character_name = :name", {"name": name}
|
||
)
|
||
if row:
|
||
result = dict(row._mapping)
|
||
# Parse stats_data back from JSONB
|
||
if isinstance(result.get("stats_data"), str):
|
||
result["stats_data"] = json.loads(result["stats_data"])
|
||
# Merge stats_data fields into top level for frontend compatibility
|
||
stats_data = result.pop("stats_data", {})
|
||
result.update(stats_data)
|
||
return JSONResponse(content=jsonable_encoder(result))
|
||
|
||
return JSONResponse(
|
||
content={"error": "No stats available for this character"}, status_code=404
|
||
)
|
||
except Exception as e:
|
||
logger.error(f"Failed to get character stats for {name}: {e}", exc_info=True)
|
||
raise HTTPException(status_code=500, detail="Internal server error")
|
||
|
||
|
||
@app.get("/equipment-cantrip-state/{name}")
|
||
async def get_equipment_cantrip_state(name: str):
|
||
"""Return latest live equipment cantrip state overlay for a character."""
|
||
try:
|
||
data = live_equipment_cantrip_states.get(name)
|
||
if data:
|
||
return JSONResponse(content=jsonable_encoder(data))
|
||
|
||
return JSONResponse(
|
||
content={
|
||
"type": "equipment_cantrip_state",
|
||
"character_name": name,
|
||
"items": [],
|
||
}
|
||
)
|
||
except Exception as e:
|
||
logger.error(
|
||
f"Failed to get equipment cantrip state for {name}: {e}", exc_info=True
|
||
)
|
||
raise HTTPException(status_code=500, detail="Internal server error")
|
||
|
||
|
||
# -------------------- static frontend ---------------------------
|
||
# Custom icon handler that prioritizes clean icons over originals
|
||
from fastapi.responses import FileResponse
|
||
|
||
|
||
@app.get("/icons/{icon_filename}")
|
||
async def serve_icon(icon_filename: str):
|
||
"""Serve icons from static/icons directory"""
|
||
|
||
# Serve from static/icons directory
|
||
icon_path = Path("static/icons") / icon_filename
|
||
if icon_path.exists():
|
||
return FileResponse(icon_path, media_type="image/png")
|
||
|
||
# Icon not found
|
||
raise HTTPException(status_code=404, detail="Icon not found")
|
||
|
||
|
||
# -------------------- Inventory Service Proxy ---------------------------
|
||
|
||
|
||
@app.get("/inv/test")
|
||
async def test_inventory_route():
|
||
"""Test route to verify inventory proxy is working"""
|
||
return {"message": "Inventory proxy route is working"}
|
||
|
||
|
||
@app.post("/inv/suitbuilder/search")
|
||
async def proxy_suitbuilder_search(request: Request):
|
||
"""Stream suitbuilder search results - SSE requires streaming proxy."""
|
||
inventory_service_url = os.getenv(
|
||
"INVENTORY_SERVICE_URL", "http://inventory-service:8000"
|
||
)
|
||
logger.info(f"Streaming proxy to suitbuilder search")
|
||
|
||
# Read body BEFORE creating generator (request context needed)
|
||
body = await request.body()
|
||
|
||
async def stream_response():
|
||
try:
|
||
# Use streaming request with long timeout for searches
|
||
async with httpx.AsyncClient(
|
||
timeout=httpx.Timeout(300.0, connect=10.0)
|
||
) as client:
|
||
async with client.stream(
|
||
method="POST",
|
||
url=f"{inventory_service_url}/suitbuilder/search",
|
||
content=body,
|
||
headers={"Content-Type": "application/json"},
|
||
) as response:
|
||
async for chunk in response.aiter_bytes():
|
||
yield chunk
|
||
except httpx.ReadTimeout:
|
||
yield b'event: error\ndata: {"message": "Search timeout"}\n\n'
|
||
except Exception as e:
|
||
logger.error(f"Streaming proxy error: {e}")
|
||
yield f'event: error\ndata: {{"message": "Proxy error: {str(e)}"}}\n\n'.encode()
|
||
|
||
return StreamingResponse(
|
||
stream_response(),
|
||
media_type="text/event-stream",
|
||
headers={
|
||
"Cache-Control": "no-cache",
|
||
"Connection": "keep-alive",
|
||
"X-Accel-Buffering": "no", # Disable nginx buffering
|
||
},
|
||
)
|
||
|
||
|
||
@app.api_route("/inv/{path:path}", methods=["GET", "POST"])
|
||
async def proxy_inventory_service(path: str, request: Request):
|
||
"""Proxy all inventory service requests"""
|
||
try:
|
||
inventory_service_url = os.getenv(
|
||
"INVENTORY_SERVICE_URL", "http://inventory-service:8000"
|
||
)
|
||
logger.info(f"Proxying to inventory service: {inventory_service_url}/{path}")
|
||
|
||
# Forward the request to inventory service (60s timeout for large queries)
|
||
async with httpx.AsyncClient(timeout=60.0) as client:
|
||
response = await client.request(
|
||
method=request.method,
|
||
url=f"{inventory_service_url}/{path}",
|
||
params=request.query_params,
|
||
headers=dict(request.headers),
|
||
content=await request.body(),
|
||
)
|
||
return Response(
|
||
content=response.content,
|
||
status_code=response.status_code,
|
||
headers=dict(response.headers),
|
||
)
|
||
except Exception as e:
|
||
logger.error(f"Failed to proxy inventory request: {e}")
|
||
raise HTTPException(status_code=500, detail="Inventory service unavailable")
|
||
|
||
|
||
# Icons are now served from static/icons directory
|
||
# Serve SPA files (catch-all for frontend routes)
|
||
# Mount the single-page application frontend (static assets) at root path
|
||
#
|
||
# Force browsers to always revalidate static assets so bind-mounted file
|
||
# changes are picked up without a hard refresh. The ETag/Last-Modified
|
||
# headers already make revalidation efficient (304 responses).
|
||
class NoCacheStaticFiles(StaticFiles):
|
||
async def get_response(self, path, scope):
|
||
response = await super().get_response(path, scope)
|
||
# Force revalidation for HTML/JS/CSS/JSON so code changes show up
|
||
# immediately after git pull. Other assets (images, fonts) can cache.
|
||
# Check content-type header since root path "" resolves to index.html
|
||
# via html=True and we need to catch it too.
|
||
ct = response.headers.get("content-type", "").lower()
|
||
if any(
|
||
t in ct for t in ("text/html", "javascript", "text/css", "application/json")
|
||
):
|
||
response.headers["Cache-Control"] = "no-cache, must-revalidate"
|
||
return response
|
||
|
||
|
||
app.mount("/", NoCacheStaticFiles(directory="static", html=True), name="static")
|