fixed portals

This commit is contained in:
erik 2025-06-24 19:13:31 +00:00
parent dffd295091
commit 7ff94b59a8
4 changed files with 475 additions and 134 deletions

109
main.py
View file

@ -37,7 +37,7 @@ from db_async import (
spawn_events,
rare_events,
character_inventories,
portal_discoveries,
portals,
server_health_checks,
server_status,
init_db_async,
@ -67,6 +67,7 @@ _cached_trails: dict = {"trails": []}
_cached_total_rares: dict = {"all_time": 0, "today": 0, "last_updated": None}
_cache_task: asyncio.Task | None = None
_rares_cache_task: asyncio.Task | None = None
_cleanup_task: asyncio.Task | None = None
# Player tracking for debug purposes
_player_history: list = [] # List of player sets from last 10 refreshes
@ -255,13 +256,13 @@ async def get_player_count_from_treestats(server_name: str) -> int:
return 0
async def monitor_server_health():
"""Background task to monitor server health every 30 seconds and cleanup old portals hourly."""
"""Background task to monitor server health every 30 seconds and cleanup old portals every minute."""
server_name = "Coldeve"
server_address = "play.coldeve.ac"
server_port = 9000
check_interval = 30 # seconds
player_count_interval = 300 # 5 minutes (like ThwargLauncher's 10 minutes, but more frequent)
portal_cleanup_interval = 3600 # 1 hour
portal_cleanup_interval = 60 # 1 minute
last_player_count_check = 0
last_portal_cleanup = 0
current_player_count = None
@ -381,12 +382,12 @@ async def monitor_server_health():
logger.debug(f"Server health check: {status}, latency={latency_ms}ms, players={current_player_count}")
# Portal cleanup (run every hour)
# Portal cleanup (run every minute)
current_time = time.time()
if current_time - last_portal_cleanup >= portal_cleanup_interval:
try:
deleted_count = await cleanup_old_portals()
logger.info(f"Portal cleanup: removed {deleted_count} old portal discoveries")
logger.info(f"Portal cleanup: removed {deleted_count} portals older than 1 hour")
last_portal_cleanup = current_time
except Exception as cleanup_error:
logger.error(f"Portal cleanup error: {cleanup_error}", exc_info=True)
@ -396,6 +397,20 @@ async def monitor_server_health():
await asyncio.sleep(check_interval)
async def cleanup_connections_loop():
"""Background task to clean up stale WebSocket connections every 5 minutes."""
cleanup_interval = 300 # 5 minutes
logger.info("🧹 Starting WebSocket connection cleanup task")
while True:
try:
await asyncio.sleep(cleanup_interval)
logger.debug("🧹 Running periodic WebSocket connection cleanup")
await cleanup_stale_connections()
except Exception as e:
logger.error(f"WebSocket cleanup task error: {e}", exc_info=True)
def _track_player_changes(new_players: list) -> None:
"""Track player changes for debugging flapping issues."""
from datetime import datetime, timezone
@ -872,11 +887,12 @@ async def on_startup():
else:
raise RuntimeError(f"Could not connect to database after {max_attempts} attempts")
# Start background cache refresh (live & trails)
global _cache_task, _rares_cache_task, _server_health_task
global _cache_task, _rares_cache_task, _server_health_task, _cleanup_task
_cache_task = asyncio.create_task(_refresh_cache_loop())
_rares_cache_task = asyncio.create_task(_refresh_total_rares_cache())
_server_health_task = asyncio.create_task(monitor_server_health())
logger.info("Background cache refresh and server monitoring tasks started")
_cleanup_task = asyncio.create_task(cleanup_connections_loop())
logger.info("Background cache refresh, server monitoring, and connection cleanup tasks started")
@app.on_event("shutdown")
async def on_shutdown():
"""Event handler triggered when application is shutting down.
@ -884,7 +900,7 @@ async def on_shutdown():
Ensures the database connection is closed cleanly.
"""
# Stop cache refresh tasks
global _cache_task, _rares_cache_task, _server_health_task
global _cache_task, _rares_cache_task, _server_health_task, _cleanup_task
if _cache_task:
logger.info("Stopping background cache refresh task")
_cache_task.cancel()
@ -908,6 +924,14 @@ async def on_shutdown():
await _server_health_task
except asyncio.CancelledError:
pass
if _cleanup_task:
logger.info("Stopping WebSocket connection cleanup task")
_cleanup_task.cancel()
try:
await _cleanup_task
except asyncio.CancelledError:
pass
logger.info("Disconnecting from database")
await database.disconnect()
@ -1083,37 +1107,34 @@ async def get_quest_status():
@app.get("/portals")
async def get_portals():
"""Return unique portal discoveries from the last 24 hours."""
"""Return all active portals (less than 1 hour old)."""
try:
# Query unique portals from last 24 hours, keeping the most recent discovery of each
cutoff_time = datetime.now(timezone.utc) - timedelta(hours=24)
# No need for cutoff check - cleanup job handles expiration
query = """
SELECT DISTINCT ON (portal_name)
character_name, portal_name, timestamp, ns, ew, z
FROM portal_discoveries
WHERE timestamp >= :cutoff_time
ORDER BY portal_name, timestamp DESC
SELECT portal_name, ns, ew, z, discovered_at, discovered_by
FROM portals
ORDER BY discovered_at DESC
"""
rows = await database.fetch_all(query, {"cutoff_time": cutoff_time})
rows = await database.fetch_all(query)
portals = []
for row in rows:
portal = {
"character_name": row["character_name"],
"portal_name": row["portal_name"],
"timestamp": row["timestamp"].isoformat(),
"ns": row["ns"],
"ew": row["ew"],
"z": row["z"]
"portal_name": row["portal_name"],
"coordinates": {
"ns": row["ns"],
"ew": row["ew"],
"z": row["z"]
},
"discovered_at": row["discovered_at"].isoformat(),
"discovered_by": row["discovered_by"]
}
portals.append(portal)
return {
"portals": portals,
"portal_count": len(portals),
"cutoff_time": cutoff_time.isoformat()
"portal_count": len(portals)
}
except Exception as e:
@ -1964,37 +1985,39 @@ async def ws_receive_snapshots(
ew = float(ew)
z = float(z)
# Check if this portal was recently discovered (within last hour) to avoid duplicates
recent_check = await database.fetch_one(
# Round coordinates for comparison (0.01 tolerance)
ns_rounded = round(ns, 2)
ew_rounded = round(ew, 2)
# Check if portal exists at these coordinates
existing_portal = await database.fetch_one(
"""
SELECT id FROM portal_discoveries
WHERE character_name = :character_name
AND portal_name = :portal_name
AND timestamp > :cutoff_time
SELECT id FROM portals
WHERE ROUND(ns::numeric, 2) = :ns_rounded
AND ROUND(ew::numeric, 2) = :ew_rounded
LIMIT 1
""",
{
"character_name": character_name,
"portal_name": portal_name,
"cutoff_time": timestamp - timedelta(hours=1)
"ns_rounded": ns_rounded,
"ew_rounded": ew_rounded
}
)
if not recent_check:
# Store portal discovery in database
if not existing_portal:
# Store new portal in database
await database.execute(
portal_discoveries.insert().values(
character_name=character_name,
portals.insert().values(
portal_name=portal_name,
timestamp=timestamp,
ns=ns,
ew=ew,
z=z
z=z,
discovered_at=timestamp,
discovered_by=character_name
)
)
logger.info(f"Recorded portal discovery: {portal_name} by {character_name} at {ns}, {ew}")
logger.info(f"New portal discovered: {portal_name} at {ns_rounded}, {ew_rounded} by {character_name}")
else:
logger.debug(f"Skipping duplicate portal discovery: {portal_name} by {character_name} (already discovered recently)")
logger.debug(f"Portal already exists at {ns_rounded}, {ew_rounded}")
# Broadcast to browser clients for map updates
await _broadcast_to_browser_clients(data)