From b204ba8e7592b81c0abc30fad0029e52fdcc0293 Mon Sep 17 00:00:00 2001 From: Erik Date: Fri, 13 Mar 2026 08:25:37 +0100 Subject: [PATCH] fix: improve mana tracker state matching --- main.py | 1668 ++++++++++++++++++++++++++++++---------------- static/script.js | 135 +++- static/style.css | 12 +- 3 files changed, 1212 insertions(+), 603 deletions(-) diff --git a/main.py b/main.py index 448e1691..1d8e8229 100644 --- a/main.py +++ b/main.py @@ -5,6 +5,7 @@ This service ingests real-time position and event data from plugin clients via W stores telemetry and statistics in a TimescaleDB backend, and exposes HTTP and WebSocket endpoints for browser clients to retrieve live and historical data, trails, and per-character stats. """ + from datetime import datetime, timedelta, timezone import json import logging @@ -17,7 +18,15 @@ import asyncio import socket import struct -from fastapi import FastAPI, Header, HTTPException, Query, WebSocket, WebSocketDisconnect, Request +from fastapi import ( + FastAPI, + Header, + HTTPException, + Query, + WebSocket, + WebSocketDisconnect, + Request, +) from fastapi.responses import JSONResponse, Response, StreamingResponse from fastapi.routing import APIRoute from fastapi.staticfiles import StaticFiles @@ -42,26 +51,28 @@ from db_async import ( server_health_checks, server_status, init_db_async, - cleanup_old_portals + cleanup_old_portals, ) import asyncio # Configure logging logging.basicConfig( level=logging.INFO, - format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', + format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", handlers=[ logging.StreamHandler(sys.stdout), - ] + ], ) logger = logging.getLogger(__name__) # Get log level from environment (DEBUG, INFO, WARNING, ERROR) -log_level = os.getenv('LOG_LEVEL', 'INFO').upper() +log_level = os.getenv("LOG_LEVEL", "INFO").upper() logger.setLevel(getattr(logging, log_level, logging.INFO)) # Inventory service configuration -INVENTORY_SERVICE_URL = os.getenv('INVENTORY_SERVICE_URL', 'http://inventory-service:8000') +INVENTORY_SERVICE_URL = os.getenv( + "INVENTORY_SERVICE_URL", "http://inventory-service:8000" +) # In-memory caches for REST endpoints _cached_live: dict = {"players": []} _cached_trails: dict = {"trails": []} @@ -73,13 +84,13 @@ _cleanup_task: asyncio.Task | None = None # Player tracking for debug purposes _player_history: list = [] # List of player sets from last 10 refreshes -_player_events: list = [] # List of player enter/exit events -_max_history_size = 10 # Keep last 10 player sets -_max_events_size = 100 # Keep last 100 events +_player_events: list = [] # List of player enter/exit events +_max_history_size = 10 # Keep last 10 player sets +_max_events_size = 100 # Keep last 100 events # Telemetry timing tracking for debug purposes _player_telemetry_times: dict = {} # character_name -> list of timestamps -_max_telemetry_history = 20 # Keep last 20 telemetry timestamps per player +_max_telemetry_history = 20 # Keep last 20 telemetry timestamps per player # Simple WebSocket connection counters (Phase 1) _plugin_connections = 0 @@ -101,24 +112,25 @@ _server_status_cache = { "player_count": None, "last_check": None, "uptime_seconds": 0, - "last_restart": None + "last_restart": None, } # Quest status cache - stores last received quest data per player # Structure: {character_name: {quest_name: countdown_value}} _quest_status_cache: Dict[str, Dict[str, str]] = {} + # AC Hash32 checksum algorithm (based on ThwargLauncher) def calculate_hash32(data: bytes) -> int: """Calculate AC Hash32 checksum as used in ThwargLauncher.""" length = len(data) checksum = (length << 16) & 0xFFFFFFFF - + # Process 4-byte chunks for i in range(0, length - 3, 4): - chunk = struct.unpack(' int: byte_val = data[i] << shift checksum = (checksum + byte_val) & 0xFFFFFFFF shift -= 8 - + return checksum + # Create AC EchoRequest packet for server health check (based on ThwargLauncher) def create_echo_request_packet(): """Create an AC EchoRequest packet for server health checking.""" # AC packet header: sequence(4) + flags(4) + checksum(4) + id(2) + time(2) + size(2) + table(2) = 20 bytes + padding packet = bytearray(32) # 32 bytes total (0x20) - + # Sequence (4 bytes) - can be 0 - struct.pack_into(' tuple[bool, float, int]: + +async def check_server_health( + address: str, port: int, timeout: float = 3.0 +) -> tuple[bool, float, int]: """Check AC server health via UDP packet with retry logic. - + Retries 6 times with 5-second delays before declaring server down. Returns: (is_up, latency_ms, player_count) """ max_retries = 6 retry_delay = 5.0 - + for attempt in range(max_retries): - logger.debug(f"πŸ” Health check attempt {attempt + 1}/{max_retries} for {address}:{port}") + logger.debug( + f"πŸ” Health check attempt {attempt + 1}/{max_retries} for {address}:{port}" + ) start_time = time.time() sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.setblocking(False) - + try: # Send login packet (same as ThwargLauncher) - await asyncio.get_event_loop().sock_sendto(sock, AC_LOGIN_PACKET, (address, port)) - + await asyncio.get_event_loop().sock_sendto( + sock, AC_LOGIN_PACKET, (address, port) + ) + # Wait for response with timeout try: data, addr = await asyncio.wait_for( - asyncio.get_event_loop().sock_recvfrom(sock, 1024), - timeout=timeout + asyncio.get_event_loop().sock_recvfrom(sock, 1024), timeout=timeout ) - + latency_ms = (time.time() - start_time) * 1000 - logger.debug(f"πŸ“₯ Received response from {addr}: {len(data)} bytes, latency: {latency_ms:.1f}ms") - + logger.debug( + f"πŸ“₯ Received response from {addr}: {len(data)} bytes, latency: {latency_ms:.1f}ms" + ) + # Check if valid response (support both TimeSynch 0x800000 and ConnectRequest 0x40000) if len(data) >= 24: - flags = struct.unpack(' continue finally: sock.close() - + # Only declare down after all retries fail - logger.warning(f"❌ Server {address}:{port} is DOWN after {max_retries} attempts over {max_retries * retry_delay} seconds") + logger.warning( + f"❌ Server {address}:{port} is DOWN after {max_retries} attempts over {max_retries * retry_delay} seconds" + ) return False, None, None + async def get_player_count_from_treestats(server_name: str) -> int: """Get player count from TreeStats.net API (same as ThwargLauncher).""" try: async with httpx.AsyncClient() as client: - response = await client.get("http://treestats.net/player_counts-latest.json", timeout=10) + response = await client.get( + "http://treestats.net/player_counts-latest.json", timeout=10 + ) if response.status_code == 200: data = response.json() for server_data in data: @@ -257,54 +369,68 @@ async def get_player_count_from_treestats(server_name: str) -> int: logger.debug(f"Failed to get player count from TreeStats.net: {e}") return 0 + async def monitor_server_health(): """Background task to monitor server health every 30 seconds and cleanup old portals every minute.""" server_name = "Coldeve" server_address = "play.coldeve.ac" server_port = 9000 check_interval = 30 # seconds - player_count_interval = 300 # 5 minutes (like ThwargLauncher's 10 minutes, but more frequent) + player_count_interval = ( + 300 # 5 minutes (like ThwargLauncher's 10 minutes, but more frequent) + ) portal_cleanup_interval = 60 # 1 minute last_player_count_check = 0 last_portal_cleanup = 0 current_player_count = None - + # Initialize server status in database try: existing = await database.fetch_one( "SELECT * FROM server_status WHERE server_name = :name", - {"name": server_name} + {"name": server_name}, ) if not existing: await database.execute( server_status.insert().values( server_name=server_name, current_status="unknown", - total_uptime_seconds=0 + total_uptime_seconds=0, ) ) except Exception as e: logger.error(f"Failed to initialize server status: {e}") - + while True: try: - logger.debug(f"πŸ₯ Running scheduled health check for {server_name} ({server_address}:{server_port})") + logger.debug( + f"πŸ₯ Running scheduled health check for {server_name} ({server_address}:{server_port})" + ) # Check server health via UDP (for status and latency) - is_up, latency_ms, _ = await check_server_health(server_address, server_port) + is_up, latency_ms, _ = await check_server_health( + server_address, server_port + ) status = "up" if is_up else "down" now = datetime.now(timezone.utc) - + # Get player count from TreeStats.net API (like ThwargLauncher) current_time = time.time() - if current_time - last_player_count_check >= player_count_interval or current_player_count is None: + if ( + current_time - last_player_count_check >= player_count_interval + or current_player_count is None + ): new_player_count = await get_player_count_from_treestats(server_name) if new_player_count > 0: # Only update if we got a valid count current_player_count = new_player_count last_player_count_check = current_time - logger.info(f"πŸ₯ Updated player count from TreeStats.net: {current_player_count}") - - logger.debug(f"πŸ₯ Health check result: {status}, latency: {latency_ms}, players: {current_player_count}") - + logger.info( + f"πŸ₯ Updated player count from TreeStats.net: {current_player_count}" + ) + + logger.debug( + f"πŸ₯ Health check result: {status}, latency: {latency_ms}, players: {current_player_count}" + ) + # Record health check await database.execute( server_health_checks.insert().values( @@ -313,37 +439,43 @@ async def monitor_server_health(): timestamp=now, status=status, latency_ms=latency_ms, - player_count=current_player_count + player_count=current_player_count, ) ) - + # Get previous status prev_status = await database.fetch_one( "SELECT * FROM server_status WHERE server_name = :name", - {"name": server_name} + {"name": server_name}, ) - + # Calculate uptime and detect restarts last_restart = prev_status["last_restart"] if prev_status else None - - if prev_status and prev_status["current_status"] == "down" and status == "up": + + if ( + prev_status + and prev_status["current_status"] == "down" + and status == "up" + ): # Server came back up - this is a restart last_restart = now logger.info(f"Server {server_name} came back online") # Broadcast to all browser clients - await _broadcast_to_browser_clients({ - "type": "server_status", - "server": server_name, - "status": "up", - "message": "Server is back online" - }) - + await _broadcast_to_browser_clients( + { + "type": "server_status", + "server": server_name, + "status": "up", + "message": "Server is back online", + } + ) + # Calculate uptime from last restart time (not accumulated) if last_restart and status == "up": uptime_seconds = int((now - last_restart).total_seconds()) else: uptime_seconds = 0 - + # Update server status (always include current_player_count if we have it) await database.execute( """ @@ -367,10 +499,10 @@ async def monitor_server_health(): "uptime": uptime_seconds, "check": now, "latency": latency_ms, - "players": current_player_count - } + "players": current_player_count, + }, ) - + # Update cache global _server_status_cache _server_status_cache = { @@ -379,32 +511,39 @@ async def monitor_server_health(): "player_count": current_player_count, "last_check": now.isoformat(), "uptime_seconds": uptime_seconds, - "last_restart": last_restart.isoformat() if last_restart else None + "last_restart": last_restart.isoformat() if last_restart else None, } - - logger.debug(f"Server health check: {status}, latency={latency_ms}ms, players={current_player_count}") - + + logger.debug( + f"Server health check: {status}, latency={latency_ms}ms, players={current_player_count}" + ) + # Portal cleanup (run every minute) current_time = time.time() if current_time - last_portal_cleanup >= portal_cleanup_interval: try: deleted_count = await cleanup_old_portals() - logger.info(f"Portal cleanup: removed {deleted_count} portals older than 1 hour") + logger.info( + f"Portal cleanup: removed {deleted_count} portals older than 1 hour" + ) last_portal_cleanup = current_time except Exception as cleanup_error: - logger.error(f"Portal cleanup error: {cleanup_error}", exc_info=True) - + logger.error( + f"Portal cleanup error: {cleanup_error}", exc_info=True + ) + except Exception as e: logger.error(f"Server health monitoring error: {e}", exc_info=True) - + await asyncio.sleep(check_interval) + async def cleanup_connections_loop(): """Background task to clean up stale WebSocket connections every 5 minutes.""" cleanup_interval = 300 # 5 minutes - + logger.info("🧹 Starting WebSocket connection cleanup task") - + while True: try: await asyncio.sleep(cleanup_interval) @@ -413,122 +552,143 @@ async def cleanup_connections_loop(): except Exception as e: logger.error(f"WebSocket cleanup task error: {e}", exc_info=True) + def _track_player_changes(new_players: list) -> None: """Track player changes for debugging flapping issues.""" from datetime import datetime, timezone - + # Get current player names current_players = {p["character_name"] for p in new_players} timestamp = datetime.now(timezone.utc) - + # Track telemetry timing for each player for player_data in new_players: player_name = player_data["character_name"] player_timestamp = player_data.get("timestamp") - + # Convert timestamp if it's a string if isinstance(player_timestamp, str): try: - player_timestamp = datetime.fromisoformat(player_timestamp.replace('Z', '+00:00')) + player_timestamp = datetime.fromisoformat( + player_timestamp.replace("Z", "+00:00") + ) except: player_timestamp = timestamp elif player_timestamp is None: player_timestamp = timestamp - + # Initialize player telemetry tracking if needed if player_name not in _player_telemetry_times: _player_telemetry_times[player_name] = [] - + # Add this telemetry timestamp _player_telemetry_times[player_name].append(player_timestamp) - + # Trim to max history if len(_player_telemetry_times[player_name]) > _max_telemetry_history: _player_telemetry_times[player_name].pop(0) - + # Get previous player names if we have history previous_players = set() if _player_history: previous_players = {p["character_name"] for p in _player_history[-1]["players"]} - + # Find players who entered and exited entered_players = current_players - previous_players exited_players = previous_players - current_players - + # Log events with telemetry timing analysis for player in entered_players: # Check if this is due to timing gap timing_gap = None - if player in _player_telemetry_times and len(_player_telemetry_times[player]) >= 2: + if ( + player in _player_telemetry_times + and len(_player_telemetry_times[player]) >= 2 + ): last_two = _player_telemetry_times[player][-2:] timing_gap = (last_two[1] - last_two[0]).total_seconds() - + event = { "timestamp": timestamp, "type": "enter", "character_name": player, "total_players": len(current_players), - "timing_gap": timing_gap + "timing_gap": timing_gap, } _player_events.append(event) - gap_info = f" (gap: {timing_gap:.1f}s)" if timing_gap and timing_gap > 25 else "" - logger.debug(f"Player entered: {player} (total: {len(current_players)}){gap_info}") - + gap_info = ( + f" (gap: {timing_gap:.1f}s)" if timing_gap and timing_gap > 25 else "" + ) + logger.debug( + f"Player entered: {player} (total: {len(current_players)}){gap_info}" + ) + for player in exited_players: # Calculate time since last telemetry last_telemetry_age = None if player in _player_telemetry_times and _player_telemetry_times[player]: last_telemetry = _player_telemetry_times[player][-1] last_telemetry_age = (timestamp - last_telemetry).total_seconds() - + event = { "timestamp": timestamp, - "type": "exit", + "type": "exit", "character_name": player, "total_players": len(current_players), - "last_telemetry_age": last_telemetry_age + "last_telemetry_age": last_telemetry_age, } _player_events.append(event) - age_info = f" (last telemetry: {last_telemetry_age:.1f}s ago)" if last_telemetry_age else "" - logger.debug(f"Player exited: {player} (total: {len(current_players)}){age_info}") - + age_info = ( + f" (last telemetry: {last_telemetry_age:.1f}s ago)" + if last_telemetry_age + else "" + ) + logger.debug( + f"Player exited: {player} (total: {len(current_players)}){age_info}" + ) + # Add current state to history history_entry = { "timestamp": timestamp, "players": new_players, "player_count": len(new_players), - "player_names": list(current_players) + "player_names": list(current_players), } _player_history.append(history_entry) - + # Trim history to max size if len(_player_history) > _max_history_size: _player_history.pop(0) - - # Trim events to max size + + # Trim events to max size if len(_player_events) > _max_events_size: _player_events.pop(0) + def _analyze_flapping_patterns() -> dict: """Analyze player events to identify flapping patterns.""" from collections import Counter, defaultdict - + if not _player_events: - return {"flapping_players": [], "frequent_events": [], "analysis": "No events to analyze"} - + return { + "flapping_players": [], + "frequent_events": [], + "analysis": "No events to analyze", + } + # Count events per player player_event_counts = Counter() player_flap_counts = defaultdict(int) - + # Track recent activity per player (last 10 events) recent_player_activity = defaultdict(list) - + for event in _player_events[-50:]: # Analyze last 50 events player = event["character_name"] event_type = event["type"] player_event_counts[player] += 1 recent_player_activity[player].append(event_type) - + # Identify flapping players (players with many enter/exit cycles) flapping_players = [] for player, activity in recent_player_activity.items(): @@ -536,73 +696,76 @@ def _analyze_flapping_patterns() -> dict: # Count alternating enter/exit patterns flap_score = 0 for i in range(1, len(activity)): - if activity[i] != activity[i-1]: # Different from previous + if activity[i] != activity[i - 1]: # Different from previous flap_score += 1 - + if flap_score >= 3: # At least 3 transitions - flapping_players.append({ - "character_name": player, - "events": len(activity), - "flap_score": flap_score, - "recent_activity": activity[-10:] # Last 10 events - }) - + flapping_players.append( + { + "character_name": player, + "events": len(activity), + "flap_score": flap_score, + "recent_activity": activity[-10:], # Last 10 events + } + ) + # Sort by flap score flapping_players.sort(key=lambda x: x["flap_score"], reverse=True) - + # Most active players frequent_events = [ - {"character_name": player, "event_count": count} + {"character_name": player, "event_count": count} for player, count in player_event_counts.most_common(10) ] - + # Recent activity summary recent_enters = sum(1 for e in _player_events[-20:] if e["type"] == "enter") recent_exits = sum(1 for e in _player_events[-20:] if e["type"] == "exit") - + return { "flapping_players": flapping_players, "frequent_events": frequent_events, "recent_activity": { "enters": recent_enters, "exits": recent_exits, - "net_change": recent_enters - recent_exits + "net_change": recent_enters - recent_exits, }, - "analysis": f"Found {len(flapping_players)} potentially flapping players" + "analysis": f"Found {len(flapping_players)} potentially flapping players", } + def _analyze_telemetry_timing() -> dict: """Analyze telemetry timing patterns for all players.""" from datetime import datetime, timezone - + timing_analysis = {} problem_players = [] - + for player_name, timestamps in _player_telemetry_times.items(): if len(timestamps) < 2: continue - + # Calculate intervals between telemetry messages intervals = [] for i in range(1, len(timestamps)): - interval = (timestamps[i] - timestamps[i-1]).total_seconds() + interval = (timestamps[i] - timestamps[i - 1]).total_seconds() intervals.append(interval) - + if not intervals: continue - + # Calculate timing statistics avg_interval = sum(intervals) / len(intervals) min_interval = min(intervals) max_interval = max(intervals) - + # Count problematic intervals (>30s) long_gaps = [i for i in intervals if i > 30] recent_long_gaps = [i for i in intervals[-5:] if i > 30] # Last 5 intervals - + # Determine if this player has timing issues has_timing_issues = len(long_gaps) > 0 or max_interval > 35 - + timing_stats = { "character_name": player_name, "total_messages": len(timestamps), @@ -611,34 +774,43 @@ def _analyze_telemetry_timing() -> dict: "max_interval": round(max_interval, 1), "long_gaps_count": len(long_gaps), "recent_long_gaps": len(recent_long_gaps), - "last_message_age": (datetime.now(timezone.utc) - timestamps[-1]).total_seconds() if timestamps else 0, + "last_message_age": ( + datetime.now(timezone.utc) - timestamps[-1] + ).total_seconds() + if timestamps + else 0, "has_timing_issues": has_timing_issues, - "recent_intervals": [round(i, 1) for i in intervals[-5:]] # Last 5 intervals + "recent_intervals": [ + round(i, 1) for i in intervals[-5:] + ], # Last 5 intervals } - + timing_analysis[player_name] = timing_stats - + if has_timing_issues: problem_players.append(timing_stats) - + # Sort problem players by severity (max interval) problem_players.sort(key=lambda x: x["max_interval"], reverse=True) - + return { "all_players": timing_analysis, "problem_players": problem_players, "summary": { "total_tracked_players": len(timing_analysis), "players_with_issues": len(problem_players), - "avg_intervals": [stats["avg_interval"] for stats in timing_analysis.values()], - } + "avg_intervals": [ + stats["avg_interval"] for stats in timing_analysis.values() + ], + }, } + async def _refresh_cache_loop() -> None: """Background task: refresh `/live` and `/trails` caches every 5 seconds.""" consecutive_failures = 0 max_consecutive_failures = 5 - + while True: try: # Recompute live players (last 30s) @@ -662,19 +834,21 @@ async def _refresh_cache_loop() -> None: LEFT JOIN char_stats cs ON sub.character_name = cs.character_name """ - + # Use a single connection for both queries to reduce connection churn async with database.connection() as conn: rows = await conn.fetch_all(sql_live, {"cutoff": cutoff}) new_players = [dict(r) for r in rows] - + # Track player changes for debugging _track_player_changes(new_players) - + _cached_live["players"] = new_players - + # Recompute trails (last 600s) - cutoff2 = datetime.utcnow().replace(tzinfo=timezone.utc) - timedelta(seconds=600) + cutoff2 = datetime.utcnow().replace(tzinfo=timezone.utc) - timedelta( + seconds=600 + ) sql_trail = """ SELECT timestamp, character_name, ew, ns, z FROM telemetry_events @@ -683,22 +857,34 @@ async def _refresh_cache_loop() -> None: """ rows2 = await conn.fetch_all(sql_trail, {"cutoff": cutoff2}) _cached_trails["trails"] = [ - {"timestamp": r["timestamp"], "character_name": r["character_name"], - "ew": r["ew"], "ns": r["ns"], "z": r["z"]} + { + "timestamp": r["timestamp"], + "character_name": r["character_name"], + "ew": r["ew"], + "ns": r["ns"], + "z": r["z"], + } for r in rows2 ] - + # Reset failure counter on success consecutive_failures = 0 - logger.debug(f"Cache refreshed: {len(_cached_live['players'])} players, {len(_cached_trails['trails'])} trail points") - + logger.debug( + f"Cache refreshed: {len(_cached_live['players'])} players, {len(_cached_trails['trails'])} trail points" + ) + except Exception as e: consecutive_failures += 1 - logger.error(f"Cache refresh failed ({consecutive_failures}/{max_consecutive_failures}): {e}", exc_info=True) - + logger.error( + f"Cache refresh failed ({consecutive_failures}/{max_consecutive_failures}): {e}", + exc_info=True, + ) + # If too many consecutive failures, wait longer and try to reconnect if consecutive_failures >= max_consecutive_failures: - logger.warning(f"Too many consecutive cache refresh failures. Attempting database reconnection...") + logger.warning( + f"Too many consecutive cache refresh failures. Attempting database reconnection..." + ) try: await database.disconnect() await asyncio.sleep(2) @@ -709,26 +895,29 @@ async def _refresh_cache_loop() -> None: logger.error(f"Database reconnection failed: {reconnect_error}") await asyncio.sleep(10) # Wait longer before retrying continue - + await asyncio.sleep(5) + async def _refresh_total_rares_cache() -> None: """Background task: refresh total rares cache every 5 minutes.""" consecutive_failures = 0 max_consecutive_failures = 3 - + while True: try: async with database.connection() as conn: # Get all-time total rares (sum of all characters) - gracefully handle missing table try: - all_time_query = "SELECT COALESCE(SUM(total_rares), 0) as total FROM rare_stats" + all_time_query = ( + "SELECT COALESCE(SUM(total_rares), 0) as total FROM rare_stats" + ) all_time_result = await conn.fetch_one(all_time_query) all_time_total = all_time_result["total"] if all_time_result else 0 except Exception as e: logger.debug(f"rare_stats table not available: {e}") all_time_total = 0 - + # Get today's rares from rare_events table - gracefully handle missing table try: today_query = """ @@ -744,7 +933,9 @@ async def _refresh_total_rares_cache() -> None: # Get total kills from char_stats table (all-time, all characters) try: - kills_query = "SELECT COALESCE(SUM(total_kills), 0) as total FROM char_stats" + kills_query = ( + "SELECT COALESCE(SUM(total_kills), 0) as total FROM char_stats" + ) kills_result = await conn.fetch_one(kills_query) total_kills = kills_result["total"] if kills_result else 0 except Exception as e: @@ -760,20 +951,28 @@ async def _refresh_total_rares_cache() -> None: _cached_total_kills["last_updated"] = datetime.now(timezone.utc) consecutive_failures = 0 - logger.debug(f"Stats cache updated: Rares all-time: {all_time_total}, today: {today_total}, Kills: {total_kills}") - + logger.debug( + f"Stats cache updated: Rares all-time: {all_time_total}, today: {today_total}, Kills: {total_kills}" + ) + except Exception as e: consecutive_failures += 1 - logger.error(f"Total rares cache refresh failed ({consecutive_failures}/{max_consecutive_failures}): {e}", exc_info=True) - + logger.error( + f"Total rares cache refresh failed ({consecutive_failures}/{max_consecutive_failures}): {e}", + exc_info=True, + ) + if consecutive_failures >= max_consecutive_failures: - logger.warning("Too many consecutive total rares cache failures, waiting longer...") + logger.warning( + "Too many consecutive total rares cache failures, waiting longer..." + ) await asyncio.sleep(60) # Wait longer on repeated failures continue - + # Sleep for 5 minutes (300 seconds) await asyncio.sleep(300) + # ------------------------------------------------------------------ app = FastAPI() # In-memory store mapping character_name to the most recent telemetry snapshot @@ -785,7 +984,9 @@ live_character_stats: Dict[str, dict] = {} SHARED_SECRET = "your_shared_secret" # LOG_FILE = "telemetry_log.jsonl" # ------------------------------------------------------------------ -ACTIVE_WINDOW = timedelta(seconds=30) # Time window defining β€œonline” players (last 30 seconds) +ACTIVE_WINDOW = timedelta( + seconds=30 +) # Time window defining β€œonline” players (last 30 seconds) """ Data models for plugin events: @@ -826,6 +1027,7 @@ class SpawnEvent(BaseModel): Model for a spawn event emitted by plugin clients when a mob appears. Records character context, mob type, timestamp, and spawn location. """ + character_name: str mob: str timestamp: datetime @@ -833,11 +1035,13 @@ class SpawnEvent(BaseModel): ns: float z: float = 0.0 + class RareEvent(BaseModel): """ Model for a rare mob event when a player encounters or discovers a rare entity. Includes character, event name, timestamp, and location coordinates. """ + character_name: str name: str timestamp: datetime @@ -851,6 +1055,7 @@ class FullInventoryMessage(BaseModel): Model for the full_inventory WebSocket message type. Contains complete character inventory snapshot with raw item data. """ + character_name: str timestamp: datetime item_count: int @@ -862,6 +1067,7 @@ class VitalsMessage(BaseModel): Model for the vitals WebSocket message type. Contains character health, stamina, mana, and vitae information. """ + character_name: str timestamp: datetime health_current: int @@ -882,6 +1088,7 @@ class CharacterStatsMessage(BaseModel): Contains character attributes, skills, allegiance, and progression data. Sent by plugin on login and every 10 minutes. """ + character_name: str timestamp: datetime level: Optional[int] = None @@ -899,8 +1106,11 @@ class CharacterStatsMessage(BaseModel): vitals: Optional[dict] = None skills: Optional[dict] = None allegiance: Optional[dict] = None - properties: Optional[dict] = None # Dict[int, int] β€” DWORD properties (augs, ratings, etc.) - titles: Optional[list] = None # List[str] β€” character title names + active_item_enchantments: Optional[list] = None + properties: Optional[dict] = ( + None # Dict[int, int] β€” DWORD properties (augs, ratings, etc.) + ) + titles: Optional[list] = None # List[str] β€” character title names @app.on_event("startup") @@ -923,18 +1133,26 @@ async def on_startup(): logger.debug(f"Could not access pool details: {pool_error}") break except Exception as e: - logger.warning(f"Database connection failed (attempt {attempt}/{max_attempts}): {e}") + logger.warning( + f"Database connection failed (attempt {attempt}/{max_attempts}): {e}" + ) if attempt < max_attempts: await asyncio.sleep(5) else: - raise RuntimeError(f"Could not connect to database after {max_attempts} attempts") + raise RuntimeError( + f"Could not connect to database after {max_attempts} attempts" + ) # Start background cache refresh (live & trails) global _cache_task, _rares_cache_task, _server_health_task, _cleanup_task _cache_task = asyncio.create_task(_refresh_cache_loop()) _rares_cache_task = asyncio.create_task(_refresh_total_rares_cache()) _server_health_task = asyncio.create_task(monitor_server_health()) _cleanup_task = asyncio.create_task(cleanup_connections_loop()) - logger.info("Background cache refresh, server monitoring, and connection cleanup tasks started") + logger.info( + "Background cache refresh, server monitoring, and connection cleanup tasks started" + ) + + @app.on_event("shutdown") async def on_shutdown(): """Event handler triggered when application is shutting down. @@ -950,7 +1168,7 @@ async def on_shutdown(): await _cache_task except asyncio.CancelledError: pass - + if _rares_cache_task: logger.info("Stopping total rares cache refresh task") _rares_cache_task.cancel() @@ -958,7 +1176,7 @@ async def on_shutdown(): await _rares_cache_task except asyncio.CancelledError: pass - + if _server_health_task: logger.info("Stopping server health monitoring task") _server_health_task.cancel() @@ -966,7 +1184,7 @@ async def on_shutdown(): await _server_health_task except asyncio.CancelledError: pass - + if _cleanup_task: logger.info("Stopping WebSocket connection cleanup task") _cleanup_task.cancel() @@ -978,49 +1196,53 @@ async def on_shutdown(): await database.disconnect() - # ------------------------ GET ----------------------------------- @app.get("/debug") def debug(): return {"status": "OK"} + @app.get("/debug/player-flapping") async def get_player_flapping_debug(): """Return player tracking data for debugging flapping issues.""" try: # Analyze flapping patterns flapping_analysis = _analyze_flapping_patterns() - + # Analyze telemetry timing timing_analysis = _analyze_telemetry_timing() - + # Get recent events (last 50) - recent_events = _player_events[-50:] if len(_player_events) > 50 else _player_events - + recent_events = ( + _player_events[-50:] if len(_player_events) > 50 else _player_events + ) + # Convert timestamps to ISO format for JSON serialization formatted_events = [] for event in recent_events: formatted_event = event.copy() formatted_event["timestamp"] = event["timestamp"].isoformat() formatted_events.append(formatted_event) - + # Format history formatted_history = [] for entry in _player_history: formatted_entry = { "timestamp": entry["timestamp"].isoformat(), "player_count": entry["player_count"], - "player_names": entry["player_names"] + "player_names": entry["player_names"], } formatted_history.append(formatted_entry) - + # Format timing data for JSON serialization formatted_timing = {} for player_name, timing_data in timing_analysis["all_players"].items(): formatted_timing[player_name] = timing_data.copy() # Round last_message_age for readability - formatted_timing[player_name]["last_message_age"] = round(timing_data["last_message_age"], 1) - + formatted_timing[player_name]["last_message_age"] = round( + timing_data["last_message_age"], 1 + ) + return { "current_players": len(_cached_live.get("players", [])), "history": formatted_history, @@ -1029,20 +1251,21 @@ async def get_player_flapping_debug(): "timing_analysis": { "all_players": formatted_timing, "problem_players": timing_analysis["problem_players"], - "summary": timing_analysis["summary"] + "summary": timing_analysis["summary"], }, "tracking_stats": { "history_entries": len(_player_history), "total_events": len(_player_events), "tracked_players": len(_player_telemetry_times), "max_history_size": _max_history_size, - "max_events_size": _max_events_size - } + "max_events_size": _max_events_size, + }, } except Exception as e: logger.error(f"Failed to get player flapping debug data: {e}", exc_info=True) raise HTTPException(status_code=500, detail="Internal server error") + @app.get("/debug/websocket-health") async def get_websocket_health(): """Return simple WebSocket connection counts.""" @@ -1050,26 +1273,30 @@ async def get_websocket_health(): return { "plugin_connections": _plugin_connections, "browser_connections": _browser_connections, - "total_connections": _plugin_connections + _browser_connections + "total_connections": _plugin_connections + _browser_connections, } except Exception as e: logger.error(f"Failed to get WebSocket health data: {e}", exc_info=True) raise HTTPException(status_code=500, detail="Internal server error") + @app.get("/debug/database-performance") async def get_database_performance(): """Return simple database query performance statistics.""" try: - avg_query_time = (_total_query_time / _total_queries) if _total_queries > 0 else 0.0 + avg_query_time = ( + (_total_query_time / _total_queries) if _total_queries > 0 else 0.0 + ) return { "total_queries": _total_queries, "total_query_time": round(_total_query_time, 3), - "average_query_time": round(avg_query_time, 3) + "average_query_time": round(avg_query_time, 3), } except Exception as e: logger.error(f"Failed to get database performance data: {e}", exc_info=True) raise HTTPException(status_code=500, detail="Internal server error") + @app.get("/debug/recent-activity") async def get_recent_activity(): """Return recent telemetry activity feed.""" @@ -1077,43 +1304,56 @@ async def get_recent_activity(): return { "recent_messages": _recent_telemetry_messages.copy(), "total_messages": len(_recent_telemetry_messages), - "max_messages": _max_recent_messages + "max_messages": _max_recent_messages, } except Exception as e: logger.error(f"Failed to get recent activity data: {e}", exc_info=True) raise HTTPException(status_code=500, detail="Internal server error") + @app.get("/server-health") async def get_server_health(): """Return current server health status.""" try: # Get latest status from database if cache is stale - if not _server_status_cache.get("last_check") or \ - (datetime.now(timezone.utc) - datetime.fromisoformat(_server_status_cache["last_check"].replace('Z', '+00:00')) > timedelta(minutes=2)): - + if not _server_status_cache.get("last_check") or ( + datetime.now(timezone.utc) + - datetime.fromisoformat( + _server_status_cache["last_check"].replace("Z", "+00:00") + ) + > timedelta(minutes=2) + ): row = await database.fetch_one( "SELECT * FROM server_status WHERE server_name = :name", - {"name": "Coldeve"} + {"name": "Coldeve"}, ) - + if row: - _server_status_cache.update({ - "status": row["current_status"], - "latency_ms": row["last_latency_ms"], - "player_count": row["last_player_count"], - "last_check": row["last_check"].isoformat() if row["last_check"] else None, - "uptime_seconds": row["total_uptime_seconds"], - "last_restart": row["last_restart"].isoformat() if row["last_restart"] else None - }) - + _server_status_cache.update( + { + "status": row["current_status"], + "latency_ms": row["last_latency_ms"], + "player_count": row["last_player_count"], + "last_check": row["last_check"].isoformat() + if row["last_check"] + else None, + "uptime_seconds": row["total_uptime_seconds"], + "last_restart": row["last_restart"].isoformat() + if row["last_restart"] + else None, + } + ) + # Format uptime uptime_seconds = _server_status_cache.get("uptime_seconds", 0) days = uptime_seconds // 86400 hours = (uptime_seconds % 86400) // 3600 minutes = (uptime_seconds % 3600) // 60 - - uptime_str = f"{days}d {hours}h {minutes}m" if days > 0 else f"{hours}h {minutes}m" - + + uptime_str = ( + f"{days}d {hours}h {minutes}m" if days > 0 else f"{hours}h {minutes}m" + ) + return { "server_name": "Coldeve", "status": _server_status_cache.get("status", "unknown"), @@ -1122,13 +1362,14 @@ async def get_server_health(): "uptime": uptime_str, "uptime_seconds": uptime_seconds, "last_restart": _server_status_cache.get("last_restart"), - "last_check": _server_status_cache.get("last_check") + "last_check": _server_status_cache.get("last_check"), } - + except Exception as e: logger.error(f"Failed to get server health data: {e}", exc_info=True) raise HTTPException(status_code=500, detail="Internal server error") + @app.get("/quest-status") async def get_quest_status(): """Return current cached quest status for all players.""" @@ -1138,15 +1379,16 @@ async def get_quest_status(): "quest_data": _quest_status_cache, "tracked_quests": [ "Stipend Collection Timer", - "Blank Augmentation Gem Pickup Timer", - "Insatiable Eater Jaw" + "Blank Augmentation Gem Pickup Timer", + "Insatiable Eater Jaw", ], - "player_count": len(_quest_status_cache) + "player_count": len(_quest_status_cache), } except Exception as e: logger.error(f"Failed to get quest status data: {e}", exc_info=True) raise HTTPException(status_code=500, detail="Internal server error") + @app.get("/portals") async def get_portals(): """Return all active portals (less than 1 hour old).""" @@ -1157,32 +1399,26 @@ async def get_portals(): FROM portals ORDER BY discovered_at DESC """ - + rows = await database.fetch_all(query) - + portals = [] for row in rows: portal = { "portal_name": row["portal_name"], - "coordinates": { - "ns": row["ns"], - "ew": row["ew"], - "z": row["z"] - }, + "coordinates": {"ns": row["ns"], "ew": row["ew"], "z": row["z"]}, "discovered_at": row["discovered_at"].isoformat(), - "discovered_by": row["discovered_by"] + "discovered_by": row["discovered_by"], } portals.append(portal) - - return { - "portals": portals, - "portal_count": len(portals) - } - + + return {"portals": portals, "portal_count": len(portals)} + except Exception as e: logger.error(f"Failed to get portals data: {e}", exc_info=True) raise HTTPException(status_code=500, detail="Internal server error") + @app.get("/live", response_model=dict) @app.get("/live/", response_model=dict) async def get_live_players(): @@ -1194,8 +1430,6 @@ async def get_live_players(): raise HTTPException(status_code=500, detail="Internal server error") - - # --- GET Trails --------------------------------- @app.get("/trails") @app.get("/trails/") @@ -1235,15 +1469,19 @@ async def get_total_kills(): # --- GET Spawn Heat Map Endpoint --------------------------------- @app.get("/spawns/heatmap") async def get_spawn_heatmap_data( - hours: int = Query(24, ge=1, le=168, description="Lookback window in hours (1-168)"), - limit: int = Query(10000, ge=100, le=50000, description="Maximum number of spawn points to return") + hours: int = Query( + 24, ge=1, le=168, description="Lookback window in hours (1-168)" + ), + limit: int = Query( + 10000, ge=100, le=50000, description="Maximum number of spawn points to return" + ), ): """ Aggregate spawn locations for heat-map visualization. - + Returns spawn event coordinates grouped by location with intensity counts for the specified time window. - + Response format: { "spawn_points": [{"ew": float, "ns": float, "intensity": int}, ...], @@ -1253,7 +1491,7 @@ async def get_spawn_heatmap_data( """ try: cutoff = datetime.now(timezone.utc) - timedelta(hours=hours) - + # Aggregate spawn events by coordinates within time window query = """ SELECT ew, ns, COUNT(*) AS spawn_count @@ -1263,28 +1501,30 @@ async def get_spawn_heatmap_data( ORDER BY spawn_count DESC LIMIT :limit """ - + rows = await database.fetch_all(query, {"cutoff": cutoff, "limit": limit}) - + spawn_points = [ { "ew": float(row["ew"]), "ns": float(row["ns"]), - "intensity": int(row["spawn_count"]) + "intensity": int(row["spawn_count"]), } for row in rows ] - + result = { "spawn_points": spawn_points, "total_points": len(spawn_points), "timestamp": datetime.now(timezone.utc).isoformat(), - "hours_window": hours + "hours_window": hours, } - - logger.debug(f"Heat map data: {len(spawn_points)} unique spawn locations from last {hours} hours") + + logger.debug( + f"Heat map data: {len(spawn_points)} unique spawn locations from last {hours} hours" + ) return JSONResponse(content=jsonable_encoder(result)) - + except Exception as e: logger.error(f"Heat map query failed: {e}", exc_info=True) raise HTTPException(status_code=500, detail="Spawn heat map query failed") @@ -1299,22 +1539,29 @@ async def get_character_inventory(character_name: str): response = await client.get( f"{INVENTORY_SERVICE_URL}/inventory/{character_name}" ) - + if response.status_code == 200: return JSONResponse(content=response.json()) elif response.status_code == 404: - raise HTTPException(status_code=404, detail=f"No inventory found for character '{character_name}'") + raise HTTPException( + status_code=404, + detail=f"No inventory found for character '{character_name}'", + ) else: - logger.error(f"Inventory service returned {response.status_code} for {character_name}") + logger.error( + f"Inventory service returned {response.status_code} for {character_name}" + ) raise HTTPException(status_code=502, detail="Inventory service error") - + except httpx.RequestError as e: logger.error(f"Could not reach inventory service: {e}") raise HTTPException(status_code=503, detail="Inventory service unavailable") except HTTPException: raise except Exception as e: - logger.error(f"Failed to get inventory for {character_name}: {e}", exc_info=True) + logger.error( + f"Failed to get inventory for {character_name}: {e}", exc_info=True + ) raise HTTPException(status_code=500, detail="Internal server error") @@ -1326,66 +1573,72 @@ async def search_character_inventory( min_value: int = Query(None, description="Minimum item value"), max_value: int = Query(None, description="Maximum item value"), min_burden: int = Query(None, description="Minimum burden"), - max_burden: int = Query(None, description="Maximum burden") + max_burden: int = Query(None, description="Maximum burden"), ): """Search and filter inventory items for a character with various criteria.""" try: conditions = ["character_name = :character_name"] params = {"character_name": character_name} - + if name: conditions.append("name ILIKE :name") params["name"] = f"%{name}%" - + if object_class is not None: conditions.append("object_class = :object_class") params["object_class"] = object_class - + if min_value is not None: conditions.append("value >= :min_value") params["min_value"] = min_value - + if max_value is not None: conditions.append("value <= :max_value") params["max_value"] = max_value - + if min_burden is not None: conditions.append("burden >= :min_burden") params["min_burden"] = min_burden - + if max_burden is not None: conditions.append("burden <= :max_burden") params["max_burden"] = max_burden - + query = f""" SELECT name, icon, object_class, value, burden, has_id_data, item_data, timestamp FROM character_inventories - WHERE {' AND '.join(conditions)} + WHERE {" AND ".join(conditions)} ORDER BY value DESC, name """ - + rows = await database.fetch_all(query, params) - + items = [] for row in rows: item = dict(row) items.append(item) - - return JSONResponse(content=jsonable_encoder({ - "character_name": character_name, - "item_count": len(items), - "search_criteria": { - "name": name, - "object_class": object_class, - "min_value": min_value, - "max_value": max_value, - "min_burden": min_burden, - "max_burden": max_burden - }, - "items": items - })) + + return JSONResponse( + content=jsonable_encoder( + { + "character_name": character_name, + "item_count": len(items), + "search_criteria": { + "name": name, + "object_class": object_class, + "min_value": min_value, + "max_value": max_value, + "min_burden": min_burden, + "max_burden": max_burden, + }, + "items": items, + } + ) + ) except Exception as e: - logger.error(f"Failed to search inventory for {character_name}: {e}", exc_info=True) + logger.error( + f"Failed to search inventory for {character_name}: {e}", exc_info=True + ) raise HTTPException(status_code=500, detail="Internal server error") @@ -1400,19 +1653,22 @@ async def list_characters_with_inventories(): ORDER BY last_updated DESC """ rows = await database.fetch_all(query) - + characters = [] for row in rows: - characters.append({ - "character_name": row["character_name"], - "item_count": row["item_count"], - "last_updated": row["last_updated"] - }) - - return JSONResponse(content=jsonable_encoder({ - "characters": characters, - "total_characters": len(characters) - })) + characters.append( + { + "character_name": row["character_name"], + "item_count": row["item_count"], + "last_updated": row["last_updated"], + } + ) + + return JSONResponse( + content=jsonable_encoder( + {"characters": characters, "total_characters": len(characters)} + ) + ) except Exception as e: logger.error(f"Failed to list inventory characters: {e}", exc_info=True) raise HTTPException(status_code=500, detail="Internal server error") @@ -1425,24 +1681,37 @@ async def get_inventory_characters(): try: async with httpx.AsyncClient(timeout=10.0) as client: response = await client.get(f"{INVENTORY_SERVICE_URL}/characters/list") - + if response.status_code == 200: return JSONResponse(content=response.json()) else: - logger.error(f"Inventory service returned {response.status_code}: {response.text}") - raise HTTPException(status_code=response.status_code, detail="Failed to get characters from inventory service") - + logger.error( + f"Inventory service returned {response.status_code}: {response.text}" + ) + raise HTTPException( + status_code=response.status_code, + detail="Failed to get characters from inventory service", + ) + except Exception as e: - logger.error(f"Failed to proxy inventory characters request: {e}", exc_info=True) - raise HTTPException(status_code=500, detail="Failed to get inventory characters") + logger.error( + f"Failed to proxy inventory characters request: {e}", exc_info=True + ) + raise HTTPException( + status_code=500, detail="Failed to get inventory characters" + ) # --- Inventory Search Service Proxy Endpoints ------------------- @app.get("/search/items") async def search_items_proxy( - text: str = Query(None, description="Search item names, descriptions, or properties"), + text: str = Query( + None, description="Search item names, descriptions, or properties" + ), character: str = Query(None, description="Limit search to specific character"), - include_all_characters: bool = Query(False, description="Search across all characters"), + include_all_characters: bool = Query( + False, description="Search across all characters" + ), equipment_status: str = Query(None, description="equipped, unequipped, or all"), equipment_slot: int = Query(None, description="Equipment slot mask"), # Item category filtering @@ -1452,14 +1721,18 @@ async def search_items_proxy( # Spell filtering has_spell: str = Query(None, description="Must have this specific spell (by name)"), spell_contains: str = Query(None, description="Spell name contains this text"), - legendary_cantrips: str = Query(None, description="Comma-separated list of legendary cantrip names"), + legendary_cantrips: str = Query( + None, description="Comma-separated list of legendary cantrip names" + ), # Combat properties min_damage: int = Query(None, description="Minimum damage"), max_damage: int = Query(None, description="Maximum damage"), min_armor: int = Query(None, description="Minimum armor level"), max_armor: int = Query(None, description="Maximum armor level"), min_attack_bonus: float = Query(None, description="Minimum attack bonus"), - min_crit_damage_rating: int = Query(None, description="Minimum critical damage rating"), + min_crit_damage_rating: int = Query( + None, description="Minimum critical damage rating" + ), min_damage_rating: int = Query(None, description="Minimum damage rating"), min_heal_boost_rating: int = Query(None, description="Minimum heal boost rating"), max_level: int = Query(None, description="Maximum wield level requirement"), @@ -1477,69 +1750,109 @@ async def search_items_proxy( min_value: int = Query(None, description="Minimum item value"), max_value: int = Query(None, description="Maximum item value"), max_burden: int = Query(None, description="Maximum burden"), - sort_by: str = Query("name", description="Sort field: name, value, damage, armor, workmanship"), + sort_by: str = Query( + "name", description="Sort field: name, value, damage, armor, workmanship" + ), sort_dir: str = Query("asc", description="Sort direction: asc or desc"), page: int = Query(1, ge=1, description="Page number"), - limit: int = Query(50, ge=1, le=200, description="Items per page") + limit: int = Query(50, ge=1, le=200, description="Items per page"), ): """Proxy to inventory service comprehensive item search.""" try: # Build query parameters params = {} - if text: params["text"] = text - if character: params["character"] = character - if include_all_characters: params["include_all_characters"] = include_all_characters - if equipment_status: params["equipment_status"] = equipment_status - if equipment_slot is not None: params["equipment_slot"] = equipment_slot + if text: + params["text"] = text + if character: + params["character"] = character + if include_all_characters: + params["include_all_characters"] = include_all_characters + if equipment_status: + params["equipment_status"] = equipment_status + if equipment_slot is not None: + params["equipment_slot"] = equipment_slot # Category filtering - if armor_only: params["armor_only"] = armor_only - if jewelry_only: params["jewelry_only"] = jewelry_only - if weapon_only: params["weapon_only"] = weapon_only + if armor_only: + params["armor_only"] = armor_only + if jewelry_only: + params["jewelry_only"] = jewelry_only + if weapon_only: + params["weapon_only"] = weapon_only # Spell filtering - if has_spell: params["has_spell"] = has_spell - if spell_contains: params["spell_contains"] = spell_contains - if legendary_cantrips: params["legendary_cantrips"] = legendary_cantrips + if has_spell: + params["has_spell"] = has_spell + if spell_contains: + params["spell_contains"] = spell_contains + if legendary_cantrips: + params["legendary_cantrips"] = legendary_cantrips # Combat properties - if min_damage is not None: params["min_damage"] = min_damage - if max_damage is not None: params["max_damage"] = max_damage - if min_armor is not None: params["min_armor"] = min_armor - if max_armor is not None: params["max_armor"] = max_armor - if min_attack_bonus is not None: params["min_attack_bonus"] = min_attack_bonus - if min_crit_damage_rating is not None: params["min_crit_damage_rating"] = min_crit_damage_rating - if min_damage_rating is not None: params["min_damage_rating"] = min_damage_rating - if min_heal_boost_rating is not None: params["min_heal_boost_rating"] = min_heal_boost_rating - if max_level is not None: params["max_level"] = max_level - if min_level is not None: params["min_level"] = min_level - if material: params["material"] = material - if min_workmanship is not None: params["min_workmanship"] = min_workmanship - if has_imbue is not None: params["has_imbue"] = has_imbue - if item_set: params["item_set"] = item_set - if min_tinks is not None: params["min_tinks"] = min_tinks - if bonded is not None: params["bonded"] = bonded - if attuned is not None: params["attuned"] = attuned - if unique is not None: params["unique"] = unique - if is_rare is not None: params["is_rare"] = is_rare - if min_condition is not None: params["min_condition"] = min_condition - if min_value is not None: params["min_value"] = min_value - if max_value is not None: params["max_value"] = max_value - if max_burden is not None: params["max_burden"] = max_burden + if min_damage is not None: + params["min_damage"] = min_damage + if max_damage is not None: + params["max_damage"] = max_damage + if min_armor is not None: + params["min_armor"] = min_armor + if max_armor is not None: + params["max_armor"] = max_armor + if min_attack_bonus is not None: + params["min_attack_bonus"] = min_attack_bonus + if min_crit_damage_rating is not None: + params["min_crit_damage_rating"] = min_crit_damage_rating + if min_damage_rating is not None: + params["min_damage_rating"] = min_damage_rating + if min_heal_boost_rating is not None: + params["min_heal_boost_rating"] = min_heal_boost_rating + if max_level is not None: + params["max_level"] = max_level + if min_level is not None: + params["min_level"] = min_level + if material: + params["material"] = material + if min_workmanship is not None: + params["min_workmanship"] = min_workmanship + if has_imbue is not None: + params["has_imbue"] = has_imbue + if item_set: + params["item_set"] = item_set + if min_tinks is not None: + params["min_tinks"] = min_tinks + if bonded is not None: + params["bonded"] = bonded + if attuned is not None: + params["attuned"] = attuned + if unique is not None: + params["unique"] = unique + if is_rare is not None: + params["is_rare"] = is_rare + if min_condition is not None: + params["min_condition"] = min_condition + if min_value is not None: + params["min_value"] = min_value + if max_value is not None: + params["max_value"] = max_value + if max_burden is not None: + params["max_burden"] = max_burden params["sort_by"] = sort_by params["sort_dir"] = sort_dir params["page"] = page params["limit"] = limit - + async with httpx.AsyncClient(timeout=30.0) as client: response = await client.get( - f"{INVENTORY_SERVICE_URL}/search/items", - params=params + f"{INVENTORY_SERVICE_URL}/search/items", params=params ) - + if response.status_code == 200: return JSONResponse(content=response.json()) else: - logger.error(f"Inventory search service returned {response.status_code}") - raise HTTPException(status_code=response.status_code, detail="Inventory search service error") - + logger.error( + f"Inventory search service returned {response.status_code}" + ) + raise HTTPException( + status_code=response.status_code, + detail="Inventory search service error", + ) + except httpx.RequestError as e: logger.error(f"Could not reach inventory service: {e}") raise HTTPException(status_code=503, detail="Inventory service unavailable") @@ -1553,35 +1866,44 @@ async def search_items_proxy( @app.get("/search/equipped/{character_name}") async def search_equipped_items_proxy( character_name: str, - slot: int = Query(None, description="Specific equipment slot mask") + slot: int = Query(None, description="Specific equipment slot mask"), ): """Proxy to inventory service equipped items search.""" try: params = {} if slot is not None: params["slot"] = slot - + async with httpx.AsyncClient(timeout=10.0) as client: response = await client.get( f"{INVENTORY_SERVICE_URL}/search/equipped/{character_name}", - params=params + params=params, ) - + if response.status_code == 200: return JSONResponse(content=response.json()) elif response.status_code == 404: - raise HTTPException(status_code=404, detail=f"No equipped items found for character '{character_name}'") + raise HTTPException( + status_code=404, + detail=f"No equipped items found for character '{character_name}'", + ) else: - logger.error(f"Inventory service returned {response.status_code} for equipped items search") - raise HTTPException(status_code=response.status_code, detail="Inventory service error") - + logger.error( + f"Inventory service returned {response.status_code} for equipped items search" + ) + raise HTTPException( + status_code=response.status_code, detail="Inventory service error" + ) + except httpx.RequestError as e: logger.error(f"Could not reach inventory service: {e}") raise HTTPException(status_code=503, detail="Inventory service unavailable") except HTTPException: raise except Exception as e: - logger.error(f"Failed to search equipped items for {character_name}: {e}", exc_info=True) + logger.error( + f"Failed to search equipped items for {character_name}: {e}", exc_info=True + ) raise HTTPException(status_code=500, detail="Internal server error") @@ -1589,33 +1911,45 @@ async def search_equipped_items_proxy( async def find_equipment_upgrades_proxy( character_name: str, slot: int, - upgrade_type: str = Query("damage", description="What to optimize for: damage, armor, workmanship, value") + upgrade_type: str = Query( + "damage", description="What to optimize for: damage, armor, workmanship, value" + ), ): """Proxy to inventory service equipment upgrades search.""" try: params = {"upgrade_type": upgrade_type} - + async with httpx.AsyncClient(timeout=10.0) as client: response = await client.get( f"{INVENTORY_SERVICE_URL}/search/upgrades/{character_name}/{slot}", - params=params + params=params, ) - + if response.status_code == 200: return JSONResponse(content=response.json()) elif response.status_code == 404: - raise HTTPException(status_code=404, detail=f"No upgrade options found for character '{character_name}' slot {slot}") + raise HTTPException( + status_code=404, + detail=f"No upgrade options found for character '{character_name}' slot {slot}", + ) else: - logger.error(f"Inventory service returned {response.status_code} for upgrades search") - raise HTTPException(status_code=response.status_code, detail="Inventory service error") - + logger.error( + f"Inventory service returned {response.status_code} for upgrades search" + ) + raise HTTPException( + status_code=response.status_code, detail="Inventory service error" + ) + except httpx.RequestError as e: logger.error(f"Could not reach inventory service: {e}") raise HTTPException(status_code=503, detail="Inventory service unavailable") except HTTPException: raise except Exception as e: - logger.error(f"Failed to find equipment upgrades for {character_name} slot {slot}: {e}", exc_info=True) + logger.error( + f"Failed to find equipment upgrades for {character_name} slot {slot}: {e}", + exc_info=True, + ) raise HTTPException(status_code=500, detail="Internal server error") @@ -1625,13 +1959,17 @@ async def list_equipment_sets_proxy(): try: async with httpx.AsyncClient(timeout=10.0) as client: response = await client.get(f"{INVENTORY_SERVICE_URL}/sets/list") - + if response.status_code == 200: return JSONResponse(content=response.json()) else: - logger.error(f"Inventory service returned {response.status_code} for sets list") - raise HTTPException(status_code=response.status_code, detail="Inventory service error") - + logger.error( + f"Inventory service returned {response.status_code} for sets list" + ) + raise HTTPException( + status_code=response.status_code, detail="Inventory service error" + ) + except httpx.RequestError as e: logger.error(f"Could not reach inventory service: {e}") raise HTTPException(status_code=503, detail="Inventory service unavailable") @@ -1649,6 +1987,7 @@ browser_conns: set[WebSocket] = set() # Mapping of plugin clients by character_name to their WebSocket for command forwarding plugin_conns: Dict[str, WebSocket] = {} + async def _broadcast_to_browser_clients(snapshot: dict): """Broadcast a telemetry or chat message to all connected browser clients. @@ -1659,7 +1998,7 @@ async def _broadcast_to_browser_clients(snapshot: dict): data = jsonable_encoder(snapshot) # Use list() to avoid "set changed size during iteration" errors disconnected_clients = [] - + for ws in list(browser_conns): try: await ws.send_json(data) @@ -1671,7 +2010,7 @@ async def _broadcast_to_browser_clients(snapshot: dict): # Handle any other unexpected errors disconnected_clients.append(ws) logger.warning(f"Unexpected error broadcasting to browser client: {e}") - + # Clean up disconnected clients for ws in disconnected_clients: browser_conns.discard(ws) @@ -1684,48 +2023,55 @@ async def _forward_to_inventory_service(inventory_msg: FullInventoryMessage): inventory_data = { "character_name": inventory_msg.character_name, "timestamp": inventory_msg.timestamp.isoformat(), - "items": inventory_msg.items + "items": inventory_msg.items, } - + async with httpx.AsyncClient(timeout=30.0) as client: response = await client.post( - f"{INVENTORY_SERVICE_URL}/process-inventory", - json=inventory_data + f"{INVENTORY_SERVICE_URL}/process-inventory", json=inventory_data ) - + if response.status_code == 200: result = response.json() - logger.info(f"Inventory service processed {result['processed']} items for {inventory_msg.character_name}") + logger.info( + f"Inventory service processed {result['processed']} items for {inventory_msg.character_name}" + ) else: - logger.error(f"Inventory service error {response.status_code}: {response.text}") - + logger.error( + f"Inventory service error {response.status_code}: {response.text}" + ) + except Exception as e: logger.error(f"Failed to forward inventory to service: {e}") # Don't raise - this shouldn't block the main storage + async def _store_inventory(inventory_msg: FullInventoryMessage): """Forward inventory data to inventory microservice for processing and storage.""" try: # Forward to inventory microservice for enhanced processing and storage await _forward_to_inventory_service(inventory_msg) - + # Optional: Create JSON file for debugging (can be removed in production) inventory_dir = Path("./inventory") inventory_dir.mkdir(exist_ok=True) - + file_path = inventory_dir / f"{inventory_msg.character_name}_inventory.json" inventory_data = { "character_name": inventory_msg.character_name, "timestamp": inventory_msg.timestamp.isoformat(), "item_count": inventory_msg.item_count, - "items": inventory_msg.items + "items": inventory_msg.items, } - - with open(file_path, 'w') as f: + + with open(file_path, "w") as f: json.dump(inventory_data, f, indent=2) - + except Exception as e: - logger.error(f"Failed to forward inventory for {inventory_msg.character_name}: {e}", exc_info=True) + logger.error( + f"Failed to forward inventory for {inventory_msg.character_name}: {e}", + exc_info=True, + ) raise @@ -1733,7 +2079,7 @@ async def _store_inventory(inventory_msg: FullInventoryMessage): async def ws_receive_snapshots( websocket: WebSocket, secret: str | None = Query(None), - x_plugin_secret: str | None = Header(None) + x_plugin_secret: str | None = Header(None), ): """WebSocket endpoint for plugin clients to send telemetry and events. @@ -1746,27 +2092,29 @@ async def ws_receive_snapshots( - chat: broadcast chat messages to browsers """ global _plugin_connections - + # Authenticate plugin connection using shared secret key = secret or x_plugin_secret if key != SHARED_SECRET: # Reject without completing the WebSocket handshake - logger.warning(f"Plugin WebSocket authentication failed from {websocket.client}") + logger.warning( + f"Plugin WebSocket authentication failed from {websocket.client}" + ) await websocket.close(code=1008) return # Accept the WebSocket connection await websocket.accept() logger.info(f"πŸ”Œ PLUGIN_CONNECTED: {websocket.client}") - + # Track plugin connection _plugin_connections += 1 - + try: while True: # Read next text frame try: raw = await websocket.receive_text() - # Debug: log all incoming plugin WebSocket messages + # Debug: log all incoming plugin WebSocket messages logger.debug(f"Plugin WebSocket RX from {websocket.client}: {raw}") except WebSocketDisconnect: logger.info(f"πŸ”Œ PLUGIN_DISCONNECTED: {websocket.client}") @@ -1791,10 +2139,10 @@ async def ws_receive_snapshots( payload.pop("type", None) try: spawn = SpawnEvent.parse_obj(payload) - await database.execute( - spawn_events.insert().values(**spawn.dict()) + await database.execute(spawn_events.insert().values(**spawn.dict())) + logger.debug( + f"Recorded spawn event: {spawn.mob} by {spawn.character_name}" ) - logger.debug(f"Recorded spawn event: {spawn.mob} by {spawn.character_name}") except Exception as e: logger.error(f"Failed to process spawn event: {e}") continue @@ -1804,20 +2152,22 @@ async def ws_receive_snapshots( # Parse telemetry snapshot and update in-memory state payload = data.copy() payload.pop("type", None) - character_name = payload.get('character_name', 'unknown') - + character_name = payload.get("character_name", "unknown") + # Track message receipt and start timing telemetry_start_time = time.time() - logger.info(f"πŸ“¨ TELEMETRY_RECEIVED: {character_name} from {websocket.client}") - + logger.info( + f"πŸ“¨ TELEMETRY_RECEIVED: {character_name} from {websocket.client}" + ) + try: snap = TelemetrySnapshot.parse_obj(payload) live_snapshots[snap.character_name] = snap.dict() # Prepare data and compute kill delta db_data = snap.dict() - db_data['rares_found'] = 0 + db_data["rares_found"] = 0 key = (snap.session_id, snap.character_name) - + # Get last recorded kill count for this session if key in ws_receive_snapshots._last_kills: last = ws_receive_snapshots._last_kills[key] @@ -1825,48 +2175,68 @@ async def ws_receive_snapshots( # Cache miss - check database for last kill count for this session row = await database.fetch_one( "SELECT kills FROM telemetry_events WHERE character_name = :char AND session_id = :session ORDER BY timestamp DESC LIMIT 1", - {"char": snap.character_name, "session": snap.session_id} + {"char": snap.character_name, "session": snap.session_id}, ) last = row["kills"] if row else 0 - logger.debug(f"Cache miss for {snap.character_name} session {snap.session_id[:8]}: loaded last_kills={last} from database") - + logger.debug( + f"Cache miss for {snap.character_name} session {snap.session_id[:8]}: loaded last_kills={last} from database" + ) + delta = snap.kills - last # Persist snapshot and any kill delta in a single transaction db_start_time = time.time() - + # Log connection pool status before database operation try: - pool_status = f"pool_size:{database._pool._queue.qsize()}" if hasattr(database, '_pool') and hasattr(database._pool, '_queue') else "pool_status:unknown" + pool_status = ( + f"pool_size:{database._pool._queue.qsize()}" + if hasattr(database, "_pool") + and hasattr(database._pool, "_queue") + else "pool_status:unknown" + ) except: pool_status = "pool_status:error" - - logger.info(f"πŸ’Ύ TELEMETRY_DB_WRITE_ATTEMPT: {snap.character_name} session:{snap.session_id[:8]} kills:{snap.kills} delta:{delta} {pool_status}") - + + logger.info( + f"πŸ’Ύ TELEMETRY_DB_WRITE_ATTEMPT: {snap.character_name} session:{snap.session_id[:8]} kills:{snap.kills} delta:{delta} {pool_status}" + ) + try: async with database.transaction(): await database.execute( telemetry_events.insert().values(**db_data) ) if delta > 0: - stmt = pg_insert(char_stats).values( - character_name=snap.character_name, - total_kills=delta - ).on_conflict_do_update( - index_elements=["character_name"], - set_={"total_kills": char_stats.c.total_kills + delta}, + stmt = ( + pg_insert(char_stats) + .values( + character_name=snap.character_name, + total_kills=delta, + ) + .on_conflict_do_update( + index_elements=["character_name"], + set_={ + "total_kills": char_stats.c.total_kills + + delta + }, + ) ) await database.execute(stmt) - logger.debug(f"Updated kills for {snap.character_name}: +{delta} (total from {last} to {snap.kills})") - + logger.debug( + f"Updated kills for {snap.character_name}: +{delta} (total from {last} to {snap.kills})" + ) + # Success: log timing and update cache db_duration = (time.time() - db_start_time) * 1000 ws_receive_snapshots._last_kills[key] = snap.kills - + # Track database performance (Phase 2) global _total_queries, _total_query_time _total_queries += 1 - _total_query_time += db_duration / 1000.0 # Convert ms to seconds - + _total_query_time += ( + db_duration / 1000.0 + ) # Convert ms to seconds + # Track recent activity (Phase 3) global _recent_telemetry_messages, _max_recent_messages activity_entry = { @@ -1874,43 +2244,61 @@ async def ws_receive_snapshots( "character_name": snap.character_name, "kills": snap.kills, "kill_delta": delta, - "query_time": round(db_duration, 1) + "query_time": round(db_duration, 1), } _recent_telemetry_messages.append(activity_entry) if len(_recent_telemetry_messages) > _max_recent_messages: _recent_telemetry_messages.pop(0) - - + # Log final pool status after successful operation try: - final_pool_status = f"pool_size:{database._pool._queue.qsize()}" if hasattr(database, '_pool') and hasattr(database._pool, '_queue') else "pool_status:unknown" + final_pool_status = ( + f"pool_size:{database._pool._queue.qsize()}" + if hasattr(database, "_pool") + and hasattr(database._pool, "_queue") + else "pool_status:unknown" + ) except: final_pool_status = "pool_status:error" - - logger.info(f"βœ… TELEMETRY_DB_WRITE_SUCCESS: {snap.character_name} took {db_duration:.1f}ms {final_pool_status}") - + + logger.info( + f"βœ… TELEMETRY_DB_WRITE_SUCCESS: {snap.character_name} took {db_duration:.1f}ms {final_pool_status}" + ) + except Exception as db_error: db_duration = (time.time() - db_start_time) * 1000 - - + # Log pool status during failure try: - error_pool_status = f"pool_size:{database._pool._queue.qsize()}" if hasattr(database, '_pool') and hasattr(database._pool, '_queue') else "pool_status:unknown" + error_pool_status = ( + f"pool_size:{database._pool._queue.qsize()}" + if hasattr(database, "_pool") + and hasattr(database._pool, "_queue") + else "pool_status:unknown" + ) except: error_pool_status = "pool_status:error" - - logger.error(f"❌ TELEMETRY_DB_WRITE_FAILED: {snap.character_name} session:{snap.session_id[:8]} took {db_duration:.1f}ms {error_pool_status} error:{db_error}", exc_info=True) + + logger.error( + f"❌ TELEMETRY_DB_WRITE_FAILED: {snap.character_name} session:{snap.session_id[:8]} took {db_duration:.1f}ms {error_pool_status} error:{db_error}", + exc_info=True, + ) continue # Broadcast updated snapshot to all browser clients await _broadcast_to_browser_clients(snap.dict()) - + # Log successful processing completion with timing total_duration = (time.time() - telemetry_start_time) * 1000 - logger.info(f"⏱️ TELEMETRY_PROCESSING_COMPLETE: {snap.character_name} took {total_duration:.1f}ms total") - + logger.info( + f"⏱️ TELEMETRY_PROCESSING_COMPLETE: {snap.character_name} took {total_duration:.1f}ms total" + ) + except Exception as e: total_duration = (time.time() - telemetry_start_time) * 1000 - logger.error(f"❌ TELEMETRY_PROCESSING_FAILED: {character_name} took {total_duration:.1f}ms error:{e}", exc_info=True) + logger.error( + f"❌ TELEMETRY_PROCESSING_FAILED: {character_name} took {total_duration:.1f}ms error:{e}", + exc_info=True, + ) continue # --- Rare event: update total and session counters and persist --- if msg_type == "rare": @@ -1918,12 +2306,13 @@ async def ws_receive_snapshots( if isinstance(name, str) and name.strip(): try: # Total rare count per character - stmt_tot = pg_insert(rare_stats).values( - character_name=name, - total_rares=1 - ).on_conflict_do_update( - index_elements=["character_name"], - set_={"total_rares": rare_stats.c.total_rares + 1}, + stmt_tot = ( + pg_insert(rare_stats) + .values(character_name=name, total_rares=1) + .on_conflict_do_update( + index_elements=["character_name"], + set_={"total_rares": rare_stats.c.total_rares + 1}, + ) ) await database.execute(stmt_tot) # Session-specific rare count (use live cache or fallback to latest telemetry) @@ -1933,18 +2322,25 @@ async def ws_receive_snapshots( "SELECT session_id FROM telemetry_events" " WHERE character_name = :name" " ORDER BY timestamp DESC LIMIT 1", - {"name": name} + {"name": name}, ) if row: session_id = row["session_id"] if session_id: - stmt_sess = pg_insert(rare_stats_sessions).values( - character_name=name, - session_id=session_id, - session_rares=1 - ).on_conflict_do_update( - index_elements=["character_name", "session_id"], - set_={"session_rares": rare_stats_sessions.c.session_rares + 1}, + stmt_sess = ( + pg_insert(rare_stats_sessions) + .values( + character_name=name, + session_id=session_id, + session_rares=1, + ) + .on_conflict_do_update( + index_elements=["character_name", "session_id"], + set_={ + "session_rares": rare_stats_sessions.c.session_rares + + 1 + }, + ) ) await database.execute(stmt_sess) # Persist individual rare event for future analysis @@ -1955,18 +2351,25 @@ async def ws_receive_snapshots( await database.execute( rare_events.insert().values(**rare_ev.dict()) ) - logger.info(f"Recorded rare event: {rare_ev.name} by {name}") + logger.info( + f"Recorded rare event: {rare_ev.name} by {name}" + ) # Broadcast rare event to browser clients for epic notifications await _broadcast_to_browser_clients(data) except Exception as e: logger.error(f"Failed to persist rare event: {e}") except Exception as e: - logger.error(f"Failed to process rare event for {name}: {e}", exc_info=True) + logger.error( + f"Failed to process rare event for {name}: {e}", + exc_info=True, + ) continue # --- Chat message: forward chat payload to browser clients --- if msg_type == "chat": await _broadcast_to_browser_clients(data) - logger.debug(f"Broadcasted chat message from {data.get('character_name', 'unknown')}") + logger.debug( + f"Broadcasted chat message from {data.get('character_name', 'unknown')}" + ) continue # --- Full inventory message: store complete inventory snapshot --- if msg_type == "full_inventory": @@ -1975,9 +2378,14 @@ async def ws_receive_snapshots( try: inventory_msg = FullInventoryMessage.parse_obj(payload) await _store_inventory(inventory_msg) - logger.info(f"Stored inventory for {inventory_msg.character_name}: {inventory_msg.item_count} items") + logger.info( + f"Stored inventory for {inventory_msg.character_name}: {inventory_msg.item_count} items" + ) except Exception as e: - logger.error(f"Failed to process inventory for {data.get('character_name', 'unknown')}: {e}", exc_info=True) + logger.error( + f"Failed to process inventory for {data.get('character_name', 'unknown')}: {e}", + exc_info=True, + ) continue # --- Inventory delta: single item add/remove/update --- if msg_type == "inventory_delta": @@ -1993,14 +2401,16 @@ async def ws_receive_snapshots( f"{INVENTORY_SERVICE_URL}/inventory/{char_name}/item/{item_id}" ) if resp.status_code >= 400: - logger.warning(f"Inventory service returned {resp.status_code} for delta remove item_id={item_id}") + logger.warning( + f"Inventory service returned {resp.status_code} for delta remove item_id={item_id}" + ) elif action in ("add", "update"): item = data.get("item") if item: async with httpx.AsyncClient(timeout=10.0) as client: resp = await client.post( f"{INVENTORY_SERVICE_URL}/inventory/{char_name}/item", - json=item + json=item, ) if resp.status_code < 400: # Use enriched item from inventory-service response for broadcast @@ -2011,16 +2421,20 @@ async def ws_receive_snapshots( "type": "inventory_delta", "action": action, "character_name": char_name, - "item": enriched_item + "item": enriched_item, } else: - logger.warning(f"Inventory service returned {resp.status_code} for delta {action}") + logger.warning( + f"Inventory service returned {resp.status_code} for delta {action}" + ) # Broadcast delta to all browser clients await _broadcast_to_browser_clients(data) logger.debug(f"Inventory delta ({action}) for {char_name}") except Exception as e: - logger.error(f"Failed to process inventory delta: {e}", exc_info=True) + logger.error( + f"Failed to process inventory delta: {e}", exc_info=True + ) continue # --- Vitals message: store character health/stamina/mana and broadcast --- if msg_type == "vitals": @@ -2030,9 +2444,14 @@ async def ws_receive_snapshots( vitals_msg = VitalsMessage.parse_obj(payload) live_vitals[vitals_msg.character_name] = vitals_msg.dict() await _broadcast_to_browser_clients(data) - logger.debug(f"Updated vitals for {vitals_msg.character_name}: {vitals_msg.health_percentage}% HP, {vitals_msg.stamina_percentage}% Stam, {vitals_msg.mana_percentage}% Mana") + logger.debug( + f"Updated vitals for {vitals_msg.character_name}: {vitals_msg.health_percentage}% HP, {vitals_msg.stamina_percentage}% Stam, {vitals_msg.mana_percentage}% Mana" + ) except Exception as e: - logger.error(f"Failed to process vitals for {data.get('character_name', 'unknown')}: {e}", exc_info=True) + logger.error( + f"Failed to process vitals for {data.get('character_name', 'unknown')}: {e}", + exc_info=True, + ) continue # --- Character stats message: store character attributes/skills/progression and broadcast --- if msg_type == "character_stats": @@ -2047,9 +2466,20 @@ async def ws_receive_snapshots( # Build stats_data JSONB (everything except extracted columns) stats_data = {} - for key in ("attributes", "vitals", "skills", "allegiance", - "race", "gender", "birth", "current_title", "skill_credits", - "properties", "titles"): + for key in ( + "attributes", + "vitals", + "skills", + "allegiance", + "active_item_enchantments", + "race", + "gender", + "birth", + "current_title", + "skill_credits", + "properties", + "titles", + ): if stats_dict.get(key) is not None: stats_data[key] = stats_dict[key] @@ -2073,70 +2503,82 @@ async def ws_receive_snapshots( stats_data = EXCLUDED.stats_data """, { - "character_name": stats_msg.character_name, - "timestamp": stats_msg.timestamp, - "level": stats_msg.level, - "total_xp": stats_msg.total_xp, - "unassigned_xp": stats_msg.unassigned_xp, - "luminance_earned": stats_msg.luminance_earned, - "luminance_total": stats_msg.luminance_total, - "deaths": stats_msg.deaths, - "stats_data": json.dumps(stats_data), - }) + "character_name": stats_msg.character_name, + "timestamp": stats_msg.timestamp, + "level": stats_msg.level, + "total_xp": stats_msg.total_xp, + "unassigned_xp": stats_msg.unassigned_xp, + "luminance_earned": stats_msg.luminance_earned, + "luminance_total": stats_msg.luminance_total, + "deaths": stats_msg.deaths, + "stats_data": json.dumps(stats_data), + }, + ) # Broadcast to browser clients await _broadcast_to_browser_clients(data) - logger.info(f"Updated character stats for {stats_msg.character_name}: Level {stats_msg.level}") + logger.info( + f"Updated character stats for {stats_msg.character_name}: Level {stats_msg.level}" + ) except Exception as e: - logger.error(f"Failed to process character_stats for {data.get('character_name', 'unknown')}: {e}", exc_info=True) + logger.error( + f"Failed to process character_stats for {data.get('character_name', 'unknown')}: {e}", + exc_info=True, + ) continue # --- Quest message: update cache and broadcast (no database storage) --- if msg_type == "quest": character_name = data.get("character_name") quest_name = data.get("quest_name") countdown = data.get("countdown") - + if character_name and quest_name and countdown is not None: # Only track specific quest types allowed_quests = { "Stipend Collection Timer", - "Blank Augmentation Gem Pickup Timer", - "Insatiable Eater Jaw" + "Blank Augmentation Gem Pickup Timer", + "Insatiable Eater Jaw", } - + if quest_name in allowed_quests: # Update quest cache if character_name not in _quest_status_cache: _quest_status_cache[character_name] = {} _quest_status_cache[character_name][quest_name] = countdown - + # Broadcast to browser clients for real-time updates await _broadcast_to_browser_clients(data) - logger.debug(f"Updated quest status for {character_name}: {quest_name} = {countdown}") + logger.debug( + f"Updated quest status for {character_name}: {quest_name} = {countdown}" + ) else: logger.debug(f"Ignoring non-tracked quest: {quest_name}") else: - logger.warning(f"Invalid quest message format from {websocket.client}: missing required fields") + logger.warning( + f"Invalid quest message format from {websocket.client}: missing required fields" + ) continue # --- Portal message: store in database and broadcast --- if msg_type == "portal": character_name = data.get("character_name") portal_name = data.get("portal_name") ns = data.get("ns") - ew = data.get("ew") + ew = data.get("ew") z = data.get("z") timestamp_str = data.get("timestamp") - + if all([character_name, portal_name, ns, ew, z, timestamp_str]): try: # Parse timestamp - timestamp = datetime.fromisoformat(timestamp_str.replace('Z', '+00:00')) - + timestamp = datetime.fromisoformat( + timestamp_str.replace("Z", "+00:00") + ) + # Convert coordinates to floats for database storage ns = float(ns) ew = float(ew) z = float(z) - + # Round coordinates for display (0.1 tolerance to match DB constraint) ns_rounded = round(ns, 1) ew_rounded = round(ew, 1) @@ -2160,39 +2602,50 @@ async def ws_receive_snapshots( "ew": ew, "z": z, "timestamp": timestamp, - "character_name": character_name - } + "character_name": character_name, + }, ) # Log whether this was a new discovery or an update # xmax = 0 means it was an INSERT (new portal) # xmax != 0 means it was an UPDATE (existing portal) if result and result["was_inserted"]: - logger.info(f"New portal discovered: {portal_name} at {ns_rounded}, {ew_rounded} by {character_name}") + logger.info( + f"New portal discovered: {portal_name} at {ns_rounded}, {ew_rounded} by {character_name}" + ) else: - logger.debug(f"Portal timestamp updated: {portal_name} at {ns_rounded}, {ew_rounded} by {character_name}") - + logger.debug( + f"Portal timestamp updated: {portal_name} at {ns_rounded}, {ew_rounded} by {character_name}" + ) + # Broadcast to browser clients for map updates await _broadcast_to_browser_clients(data) - + except Exception as e: - logger.error(f"Failed to process portal discovery for {character_name}: {e}", exc_info=True) + logger.error( + f"Failed to process portal discovery for {character_name}: {e}", + exc_info=True, + ) else: - logger.warning(f"Invalid portal message format from {websocket.client}: missing required fields") + logger.warning( + f"Invalid portal message format from {websocket.client}: missing required fields" + ) continue # Unknown message types are ignored if msg_type: - logger.warning(f"Unknown message type '{msg_type}' from {websocket.client}") + logger.warning( + f"Unknown message type '{msg_type}' from {websocket.client}" + ) finally: # Track plugin disconnection _plugin_connections = max(0, _plugin_connections - 1) - + # Clean up any plugin registrations for this socket to_remove = [n for n, ws in plugin_conns.items() if ws is websocket] for n in to_remove: # Use pop() instead of del to avoid KeyError if already removed plugin_conns.pop(n, None) - + # Also clean up any entries in the kill tracking cache for this session # Remove entries that might be associated with disconnected clients stale_keys = [] @@ -2201,21 +2654,27 @@ async def ws_receive_snapshots( stale_keys.append((session_id, char_name)) for key in stale_keys: ws_receive_snapshots._last_kills.pop(key, None) - + if to_remove: - logger.info(f"Cleaned up plugin connections for characters: {to_remove} from {websocket.client}") + logger.info( + f"Cleaned up plugin connections for characters: {to_remove} from {websocket.client}" + ) if stale_keys: - logger.debug(f"Cleaned up {len(stale_keys)} kill tracking cache entries") + logger.debug( + f"Cleaned up {len(stale_keys)} kill tracking cache entries" + ) else: logger.debug(f"No plugin registrations to clean up for {websocket.client}") + # In-memory cache of last seen kill counts per (session_id, character_name) # Used to compute deltas for updating persistent kill statistics efficiently ws_receive_snapshots._last_kills = {} + async def cleanup_stale_connections(): """Periodic cleanup of stale WebSocket connections. - + This function can be called periodically to clean up connections that may have become stale but weren't properly cleaned up. """ @@ -2224,32 +2683,35 @@ async def cleanup_stale_connections(): for char_name, ws in list(plugin_conns.items()): try: # Test if the WebSocket is still alive by checking its state - if ws.client_state.name != 'CONNECTED': + if ws.client_state.name != "CONNECTED": stale_plugins.append(char_name) except Exception: # If we can't check the state, consider it stale stale_plugins.append(char_name) - + for char_name in stale_plugins: plugin_conns.pop(char_name, None) logger.info(f"Cleaned up stale plugin connection: {char_name}") - + # Clean up browser connections stale_browsers = [] for ws in list(browser_conns): try: - if ws.client_state.name != 'CONNECTED': + if ws.client_state.name != "CONNECTED": stale_browsers.append(ws) except Exception: stale_browsers.append(ws) - + for ws in stale_browsers: browser_conns.discard(ws) - + if stale_browsers: logger.info(f"Cleaned up {len(stale_browsers)} stale browser connections") - - logger.debug(f"Connection health check: {len(plugin_conns)} plugins, {len(browser_conns)} browsers") + + logger.debug( + f"Connection health check: {len(plugin_conns)} plugins, {len(browser_conns)} browsers" + ) + @app.websocket("/ws/live") async def ws_live_updates(websocket: WebSocket): @@ -2263,10 +2725,10 @@ async def ws_live_updates(websocket: WebSocket): await websocket.accept() browser_conns.add(websocket) logger.info(f"Browser WebSocket connected: {websocket.client}") - + # Track browser connection _browser_connections += 1 - + try: while True: # Receive command messages from browser @@ -2282,7 +2744,11 @@ async def ws_live_updates(websocket: WebSocket): # New format: { player_name, command } target_name = data["player_name"] payload = data - elif data.get("type") == "command" and "character_name" in data and "text" in data: + elif ( + data.get("type") == "command" + and "character_name" in data + and "text" in data + ): # Legacy format: { type: 'command', character_name, text } target_name = data.get("character_name") payload = {"player_name": target_name, "command": data.get("text")} @@ -2294,25 +2760,33 @@ async def ws_live_updates(websocket: WebSocket): if target_ws: try: await target_ws.send_json(payload) - logger.debug(f"Forwarded command to plugin for {target_name}: {payload}") + logger.debug( + f"Forwarded command to plugin for {target_name}: {payload}" + ) except (WebSocketDisconnect, RuntimeError, ConnectionAbortedError) as e: logger.warning(f"Failed to forward command to {target_name}: {e}") # Remove stale connection plugin_conns.pop(target_name, None) except Exception as e: - logger.error(f"Unexpected error forwarding command to {target_name}: {e}") + logger.error( + f"Unexpected error forwarding command to {target_name}: {e}" + ) # Remove potentially corrupted connection plugin_conns.pop(target_name, None) else: - logger.warning(f"No plugin connection found for target character: {target_name}") + logger.warning( + f"No plugin connection found for target character: {target_name}" + ) except WebSocketDisconnect: pass finally: # Track browser disconnection _browser_connections = max(0, _browser_connections - 1) - + browser_conns.discard(websocket) - logger.debug(f"Removed browser WebSocket from connection pool: {websocket.client}") + logger.debug( + f"Removed browser WebSocket from connection pool: {websocket.client}" + ) ## -------------------- static frontend --------------------------- @@ -2324,7 +2798,9 @@ for route in app.routes: if isinstance(route, APIRoute): # Log the path and allowed methods for each API route logger.info(f"{route.path} -> {route.methods}") - # Add stats endpoint for per-character metrics + + +# Add stats endpoint for per-character metrics @app.get("/stats/{character_name}") async def get_stats(character_name: str): """ @@ -2354,32 +2830,42 @@ async def get_stats(character_name: str): if not row: logger.warning(f"No telemetry data found for character: {character_name}") raise HTTPException(status_code=404, detail="Character not found") - + # Extract latest snapshot data (exclude the added total_kills/total_rares) - snap_dict = {k: v for k, v in dict(row).items() - if k not in ("total_kills", "total_rares")} - + snap_dict = { + k: v + for k, v in dict(row).items() + if k not in ("total_kills", "total_rares") + } + result = { "character_name": character_name, "latest_snapshot": snap_dict, "total_kills": row["total_kills"], "total_rares": row["total_rares"], } - logger.debug(f"Retrieved stats for character: {character_name} (optimized query)") + logger.debug( + f"Retrieved stats for character: {character_name} (optimized query)" + ) return JSONResponse(content=jsonable_encoder(result)) except HTTPException: raise except Exception as e: - logger.error(f"Failed to get stats for character {character_name}: {e}", exc_info=True) + logger.error( + f"Failed to get stats for character {character_name}: {e}", exc_info=True + ) raise HTTPException(status_code=500, detail="Internal server error") + # --- Character Stats API ------------------------------------------- + @app.post("/character-stats/test") async def test_character_stats_default(): """Inject mock character_stats data for frontend development.""" return await test_character_stats("TestCharacter") + @app.post("/character-stats/test/{name}") async def test_character_stats(name: str): """Inject mock character_stats data for a specific character name. @@ -2405,12 +2891,12 @@ async def test_character_stats(name: str): "coordination": {"base": 240, "creation": 100}, "quickness": {"base": 220, "creation": 10}, "focus": {"base": 250, "creation": 100}, - "self": {"base": 200, "creation": 100} + "self": {"base": 200, "creation": 100}, }, "vitals": { "health": {"base": 341}, "stamina": {"base": 400}, - "mana": {"base": 300} + "mana": {"base": 300}, }, "skills": { "war_magic": {"base": 533, "training": "Specialized"}, @@ -2446,15 +2932,15 @@ async def test_character_stats(name: str): "sneak_attack": {"base": 10, "training": "Untrained"}, "dirty_fighting": {"base": 10, "training": "Untrained"}, "recklessness": {"base": 10, "training": "Untrained"}, - "summoning": {"base": 10, "training": "Untrained"} + "summoning": {"base": 10, "training": "Untrained"}, }, "allegiance": { "name": "Knights of Dereth", "monarch": {"name": "HighKing", "race": 1, "rank": 0, "gender": 0}, "patron": {"name": "SirLancelot", "race": 1, "rank": 5, "gender": 0}, "rank": 8, - "followers": 12 - } + "followers": 12, + }, } # Process through the same pipeline as real data @@ -2466,8 +2952,18 @@ async def test_character_stats(name: str): live_character_stats[stats_msg.character_name] = stats_dict stats_data = {} - for key in ("attributes", "vitals", "skills", "allegiance", - "race", "gender", "birth", "current_title", "skill_credits"): + for key in ( + "attributes", + "vitals", + "skills", + "allegiance", + "active_item_enchantments", + "race", + "gender", + "birth", + "current_title", + "skill_credits", + ): if stats_dict.get(key) is not None: stats_data[key] = stats_dict[key] @@ -2490,16 +2986,17 @@ async def test_character_stats(name: str): stats_data = EXCLUDED.stats_data """, { - "character_name": stats_msg.character_name, - "timestamp": stats_msg.timestamp, - "level": stats_msg.level, - "total_xp": stats_msg.total_xp, - "unassigned_xp": stats_msg.unassigned_xp, - "luminance_earned": stats_msg.luminance_earned, - "luminance_total": stats_msg.luminance_total, - "deaths": stats_msg.deaths, - "stats_data": json.dumps(stats_data), - }) + "character_name": stats_msg.character_name, + "timestamp": stats_msg.timestamp, + "level": stats_msg.level, + "total_xp": stats_msg.total_xp, + "unassigned_xp": stats_msg.unassigned_xp, + "luminance_earned": stats_msg.luminance_earned, + "luminance_total": stats_msg.luminance_total, + "deaths": stats_msg.deaths, + "stats_data": json.dumps(stats_data), + }, + ) await _broadcast_to_browser_clients(mock_data) return {"status": "ok", "character_name": stats_msg.character_name} @@ -2518,8 +3015,7 @@ async def get_character_stats(name: str): # Fall back to database row = await database.fetch_one( - "SELECT * FROM character_stats WHERE character_name = :name", - {"name": name} + "SELECT * FROM character_stats WHERE character_name = :name", {"name": name} ) if row: result = dict(row._mapping) @@ -2531,7 +3027,9 @@ async def get_character_stats(name: str): result.update(stats_data) return JSONResponse(content=jsonable_encoder(result)) - return JSONResponse(content={"error": "No stats available for this character"}, status_code=404) + return JSONResponse( + content={"error": "No stats available for this character"}, status_code=404 + ) except Exception as e: logger.error(f"Failed to get character stats for {name}: {e}", exc_info=True) raise HTTPException(status_code=500, detail="Internal server error") @@ -2541,29 +3039,35 @@ async def get_character_stats(name: str): # Custom icon handler that prioritizes clean icons over originals from fastapi.responses import FileResponse + @app.get("/icons/{icon_filename}") async def serve_icon(icon_filename: str): """Serve icons from static/icons directory""" - + # Serve from static/icons directory icon_path = Path("static/icons") / icon_filename if icon_path.exists(): return FileResponse(icon_path, media_type="image/png") - + # Icon not found raise HTTPException(status_code=404, detail="Icon not found") + # -------------------- Inventory Service Proxy --------------------------- + @app.get("/inv/test") async def test_inventory_route(): """Test route to verify inventory proxy is working""" return {"message": "Inventory proxy route is working"} + @app.post("/inv/suitbuilder/search") async def proxy_suitbuilder_search(request: Request): """Stream suitbuilder search results - SSE requires streaming proxy.""" - inventory_service_url = os.getenv('INVENTORY_SERVICE_URL', 'http://inventory-service:8000') + inventory_service_url = os.getenv( + "INVENTORY_SERVICE_URL", "http://inventory-service:8000" + ) logger.info(f"Streaming proxy to suitbuilder search") # Read body BEFORE creating generator (request context needed) @@ -2572,20 +3076,22 @@ async def proxy_suitbuilder_search(request: Request): async def stream_response(): try: # Use streaming request with long timeout for searches - async with httpx.AsyncClient(timeout=httpx.Timeout(300.0, connect=10.0)) as client: + async with httpx.AsyncClient( + timeout=httpx.Timeout(300.0, connect=10.0) + ) as client: async with client.stream( method="POST", url=f"{inventory_service_url}/suitbuilder/search", content=body, - headers={"Content-Type": "application/json"} + headers={"Content-Type": "application/json"}, ) as response: async for chunk in response.aiter_bytes(): yield chunk except httpx.ReadTimeout: - yield b"event: error\ndata: {\"message\": \"Search timeout\"}\n\n" + yield b'event: error\ndata: {"message": "Search timeout"}\n\n' except Exception as e: logger.error(f"Streaming proxy error: {e}") - yield f"event: error\ndata: {{\"message\": \"Proxy error: {str(e)}\"}}\n\n".encode() + yield f'event: error\ndata: {{"message": "Proxy error: {str(e)}"}}\n\n'.encode() return StreamingResponse( stream_response(), @@ -2593,15 +3099,18 @@ async def proxy_suitbuilder_search(request: Request): headers={ "Cache-Control": "no-cache", "Connection": "keep-alive", - "X-Accel-Buffering": "no" # Disable nginx buffering - } + "X-Accel-Buffering": "no", # Disable nginx buffering + }, ) + @app.api_route("/inv/{path:path}", methods=["GET", "POST"]) async def proxy_inventory_service(path: str, request: Request): """Proxy all inventory service requests""" try: - inventory_service_url = os.getenv('INVENTORY_SERVICE_URL', 'http://inventory-service:8000') + inventory_service_url = os.getenv( + "INVENTORY_SERVICE_URL", "http://inventory-service:8000" + ) logger.info(f"Proxying to inventory service: {inventory_service_url}/{path}") # Forward the request to inventory service (60s timeout for large queries) @@ -2611,17 +3120,18 @@ async def proxy_inventory_service(path: str, request: Request): url=f"{inventory_service_url}/{path}", params=request.query_params, headers=dict(request.headers), - content=await request.body() + content=await request.body(), ) return Response( content=response.content, status_code=response.status_code, - headers=dict(response.headers) + headers=dict(response.headers), ) except Exception as e: logger.error(f"Failed to proxy inventory request: {e}") raise HTTPException(status_code=500, detail="Inventory service unavailable") + # Icons are now served from static/icons directory # Serve SPA files (catch-all for frontend routes) # Mount the single-page application frontend (static assets) at root path diff --git a/static/script.js b/static/script.js index 45102acd..48a8efd5 100644 --- a/static/script.js +++ b/static/script.js @@ -1494,9 +1494,25 @@ function getManaTrackedItems(state) { const snapshotMs = Date.now(); return state.items .filter(item => (item.current_wielded_location || 0) > 0) - .filter(item => item.is_mana_tracked || item.current_mana !== undefined || item.max_mana !== undefined || item.spellcraft !== undefined) + .filter(item => { + const spellInfo = item.spells; + const hasSpellData = + (Array.isArray(spellInfo?.spells) && spellInfo.spells.length > 0) || + (Array.isArray(spellInfo?.active_spells) && spellInfo.active_spells.length > 0) || + Number(spellInfo?.spell_count || 0) > 0 || + Number(spellInfo?.active_spell_count || 0) > 0; + + return ( + item.is_mana_tracked || + item.current_mana !== undefined || + item.max_mana !== undefined || + item.spellcraft !== undefined || + hasSpellData + ); + }) .map(item => { const result = { ...item }; + result.mana_state = deriveManaStateFromCharacterStats(result, state.characterName); if (result.mana_time_remaining_seconds !== undefined && result.mana_time_remaining_seconds !== null) { const snapshotUtc = result.mana_snapshot_utc ? Date.parse(result.mana_snapshot_utc) : NaN; if (!Number.isNaN(snapshotUtc)) { @@ -1521,6 +1537,87 @@ function getManaTrackedItems(state) { }); } +function isActionableManaSpell(spell) { + if (!spell) return false; + const spellName = (spell.name || '').toLowerCase(); + if (!spellName || spellName.startsWith('unknown_spell_')) return false; + if (spellName.startsWith('cantrip portal send') || spellName.startsWith('cantrip portal recall')) return false; + if (spellName.startsWith('incantation of ') || spellName.startsWith('aura of incantation ')) return true; + if ( + spellName.startsWith('feeble ') || + spellName.startsWith('minor ') || + spellName.startsWith('lesser ') || + spellName.startsWith('moderate ') || + spellName.startsWith('inner ') || + spellName.startsWith('major ') || + spellName.startsWith('epic ') || + spellName.startsWith('legendary ') || + spellName.startsWith('prodigal ') + ) return true; + + const duration = spell.duration; + return duration !== undefined && duration !== null && Number(duration) <= 0; +} + +function doesSpellMatch(activeSpell, spell) { + if (!activeSpell || !spell) return false; + if (activeSpell.id === spell.id) return true; + if (activeSpell.family == null || spell.family == null) return false; + if (Number(activeSpell.family) !== Number(spell.family)) return false; + if (activeSpell.difficulty == null || spell.difficulty == null) return true; + return Number(activeSpell.difficulty) >= Number(spell.difficulty); +} + +function isHandEquippedItem(item) { + const mask = Number(item?.current_wielded_location || 0); + return ( + mask === 1048576 || + mask === 2097152 || + mask === 4194304 || + mask === 16777216 || + mask === 33554432 + ); +} + +function deriveManaStateFromCharacterStats(item, characterName) { + const stats = characterStats[characterName]; + const activeEnchantments = stats?.active_item_enchantments; + const itemActiveSpells = Array.isArray(item.spells?.active_spells) ? item.spells.active_spells : []; + + if ((!Array.isArray(activeEnchantments) || activeEnchantments.length === 0) && itemActiveSpells.length === 0) { + return item.mana_state; + } + + if (item.current_mana === undefined || item.current_mana === null) return item.mana_state; + if (item.current_mana <= 0) return 'not_active'; + + const translatedSpells = Array.isArray(item.spells?.spells) ? item.spells.spells : []; + const actionableSpells = translatedSpells.filter(isActionableManaSpell); + if (actionableSpells.length === 0) return item.mana_state; + + const allMatchedOnItem = actionableSpells.every(spell => itemActiveSpells.some(activeSpell => doesSpellMatch(activeSpell, spell))); + if (allMatchedOnItem) { + return 'active'; + } + + if (isHandEquippedItem(item)) { + return 'not_active'; + } + + if (!Array.isArray(activeEnchantments) || activeEnchantments.length === 0) { + return allMatchedOnItem ? 'active' : item.mana_state; + } + + const allMatched = actionableSpells.every(spell => { + if (!spell) return false; + return activeEnchantments.some(activeSpell => doesSpellMatch(activeSpell, spell)); + }); + + if (allMatched) return 'active'; + if (item.mana_state === 'active') return 'not_active'; + return item.mana_state || 'not_active'; +} + function formatManaRemaining(totalSeconds) { if (totalSeconds === null || totalSeconds === undefined) return '--'; const safeSeconds = Math.max(0, Math.floor(totalSeconds)); @@ -1533,7 +1630,6 @@ function renderInventoryManaPanel(state) { if (!state || !state.manaListBody || !state.manaSummary) return; const items = getManaTrackedItems(state); - adjustInventoryLayoutForMana(state, items.length); state.manaListBody.innerHTML = ''; if (items.length === 0) { @@ -1589,23 +1685,7 @@ function renderInventoryManaPanel(state) { row.appendChild(timeEl); state.manaListBody.appendChild(row); }); -} -function adjustInventoryLayoutForMana(state, itemCount) { - if (!state || !state.windowEl || !state.topSection || !state.manaPanel) return; - - const baseWindowHeight = 520; - const baseTopHeight = 264; - const basePanelHeight = 260; - const visibleRowsAtBase = 9; - const rowHeight = 22; - - const extraRows = Math.max(0, itemCount - visibleRowsAtBase); - const extraHeight = extraRows * rowHeight; - - state.topSection.style.height = `${baseTopHeight + extraHeight}px`; - state.manaPanel.style.height = `${basePanelHeight + extraHeight}px`; - state.windowEl.style.height = `${baseWindowHeight + extraHeight}px`; } function showInventoryWindow(name) { @@ -1630,7 +1710,7 @@ function showInventoryWindow(name) { content.appendChild(loading); win.style.width = '572px'; - win.style.height = '520px'; + win.style.height = '720px'; const invContent = document.createElement('div'); invContent.className = 'inventory-content'; @@ -1784,6 +1864,20 @@ function showInventoryWindow(name) { loading.textContent = `Failed to load inventory: ${err.message}`; }); + if (!characterStats[name]) { + fetch(`${API_BASE}/character-stats/${encodeURIComponent(name)}`) + .then(r => r.ok ? r.json() : null) + .then(data => { + if (data && !data.error) { + characterStats[name] = data; + if (win._inventoryState) { + renderInventoryState(win._inventoryState); + } + } + }) + .catch(() => {}); + } + debugLog('Inventory window created for:', name); } @@ -3000,6 +3094,9 @@ function initWebSocket() { } else if (msg.type === 'character_stats') { characterStats[msg.character_name] = msg; updateCharacterWindow(msg.character_name, msg); + if (inventoryWindows[msg.character_name] && inventoryWindows[msg.character_name]._inventoryState) { + renderInventoryState(inventoryWindows[msg.character_name]._inventoryState); + } } else if (msg.type === 'inventory_delta') { updateInventoryLive(msg); } else if (msg.type === 'server_status') { diff --git a/static/style.css b/static/style.css index 663dc680..ffb87c1b 100644 --- a/static/style.css +++ b/static/style.css @@ -736,7 +736,7 @@ body.noselect, body.noselect * { top: 100px; left: 400px; width: 572px; - height: 520px; + height: 720px; background: rgba(20, 20, 20, 0.92); backdrop-filter: blur(2px); border: 2px solid var(--ac-gold); @@ -761,7 +761,7 @@ body.noselect, body.noselect * { .inv-top-section { display: flex; justify-content: flex-start; - min-height: 264px; + min-height: 464px; gap: 14px; } @@ -959,7 +959,8 @@ body.noselect, body.noselect * { border: 1px solid var(--ac-border-light); padding: 3px; min-height: 0; - min-height: 260px; + min-height: 460px; + height: 460px; flex-shrink: 0; overflow: hidden; } @@ -2433,7 +2434,7 @@ table.ts-allegiance td:first-child { background: #0e0c08 !important; resize: none !important; width: 572px !important; - min-height: 520px !important; + min-height: 720px !important; } .inv-top-section { @@ -2449,7 +2450,8 @@ table.ts-allegiance td:first-child { .inv-mana-panel { width: 162px !important; min-width: 162px !important; - min-height: 260px !important; + min-height: 460px !important; + height: 460px !important; background: #111014 !important; border: 1px solid #5a4a24 !important; overflow: hidden !important;