fixed portals
This commit is contained in:
parent
dffd295091
commit
7ff94b59a8
4 changed files with 475 additions and 134 deletions
51
db_async.py
51
db_async.py
|
|
@ -127,18 +127,18 @@ character_inventories = Table(
|
||||||
UniqueConstraint("character_name", "item_id", name="uq_char_item"),
|
UniqueConstraint("character_name", "item_id", name="uq_char_item"),
|
||||||
)
|
)
|
||||||
|
|
||||||
# Portal discoveries table for 24-hour live tracking
|
# Portals table with coordinate-based uniqueness and 1-hour retention
|
||||||
portal_discoveries = Table(
|
portals = Table(
|
||||||
# Records player portal discoveries with 24-hour retention
|
# Stores unique portals by coordinates with 1-hour retention
|
||||||
"portal_discoveries",
|
"portals",
|
||||||
metadata,
|
metadata,
|
||||||
Column("id", Integer, primary_key=True),
|
Column("id", Integer, primary_key=True),
|
||||||
Column("character_name", String, nullable=False, index=True),
|
|
||||||
Column("portal_name", String, nullable=False),
|
Column("portal_name", String, nullable=False),
|
||||||
Column("timestamp", DateTime(timezone=True), nullable=False, index=True),
|
Column("ns", Float, nullable=False),
|
||||||
Column("ns", Float, nullable=False), # North/South coordinate as float
|
Column("ew", Float, nullable=False),
|
||||||
Column("ew", Float, nullable=False), # East/West coordinate as float
|
Column("z", Float, nullable=False),
|
||||||
Column("z", Float, nullable=False), # Elevation as float
|
Column("discovered_at", DateTime(timezone=True), nullable=False, index=True),
|
||||||
|
Column("discovered_by", String, nullable=False),
|
||||||
)
|
)
|
||||||
|
|
||||||
# Server health monitoring tables
|
# Server health monitoring tables
|
||||||
|
|
@ -230,18 +230,39 @@ async def init_db_async():
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Warning: failed to set retention/compression policies: {e}")
|
print(f"Warning: failed to set retention/compression policies: {e}")
|
||||||
|
|
||||||
async def cleanup_old_portals():
|
# Create unique constraint on rounded portal coordinates
|
||||||
"""Clean up portal discoveries older than 24 hours."""
|
|
||||||
try:
|
try:
|
||||||
cutoff_time = datetime.now(timezone.utc) - timedelta(hours=24)
|
with engine.connect().execution_options(isolation_level="AUTOCOMMIT") as conn:
|
||||||
|
# Drop old portal_discoveries table if it exists
|
||||||
|
conn.execute(text("DROP TABLE IF EXISTS portal_discoveries CASCADE"))
|
||||||
|
|
||||||
# Delete old portal discoveries
|
# Create unique constraint on rounded coordinates for the new portals table
|
||||||
|
conn.execute(text(
|
||||||
|
"""CREATE UNIQUE INDEX IF NOT EXISTS unique_portal_coords
|
||||||
|
ON portals (ROUND(ns::numeric, 2), ROUND(ew::numeric, 2))"""
|
||||||
|
))
|
||||||
|
|
||||||
|
# Create index on coordinates for efficient lookups
|
||||||
|
conn.execute(text(
|
||||||
|
"CREATE INDEX IF NOT EXISTS idx_portals_coords ON portals (ns, ew)"
|
||||||
|
))
|
||||||
|
|
||||||
|
print("Portal table indexes and constraints created successfully")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Warning: failed to create portal table constraints: {e}")
|
||||||
|
|
||||||
|
async def cleanup_old_portals():
|
||||||
|
"""Clean up portals older than 1 hour."""
|
||||||
|
try:
|
||||||
|
cutoff_time = datetime.now(timezone.utc) - timedelta(hours=1)
|
||||||
|
|
||||||
|
# Delete old portals
|
||||||
result = await database.execute(
|
result = await database.execute(
|
||||||
"DELETE FROM portal_discoveries WHERE timestamp < :cutoff_time",
|
"DELETE FROM portals WHERE discovered_at < :cutoff_time",
|
||||||
{"cutoff_time": cutoff_time}
|
{"cutoff_time": cutoff_time}
|
||||||
)
|
)
|
||||||
|
|
||||||
print(f"Cleaned up {result} portal discoveries older than 24 hours")
|
print(f"Cleaned up {result} portals older than 1 hour")
|
||||||
return result
|
return result
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|
|
||||||
|
|
@ -44,8 +44,8 @@ MONITOR_CHARACTER = os.getenv('MONITOR_CHARACTER', 'Dunking Rares')
|
||||||
|
|
||||||
# Comprehensive rare classification patterns
|
# Comprehensive rare classification patterns
|
||||||
|
|
||||||
# Common Rares - Exact match pattern (70 items)
|
# Common Rares - Exact match pattern (71 items)
|
||||||
COMMON_RARES_PATTERN = re.compile(r"^(Alchemist's Crystal|Scholar's Crystal|Smithy's Crystal|Hunter's Crystal|Observer's Crystal|Thorsten's Crystal|Elysa's Crystal|Chef's Crystal|Enchanter's Crystal|Oswald's Crystal|Deceiver's Crystal|Fletcher's Crystal|Physician's Crystal|Artificer's Crystal|Tinker's Crystal|Vaulter's Crystal|Monarch's Crystal|Life Giver's Crystal|Thief's Crystal|Adherent's Crystal|Resister's Crystal|Imbuer's Crystal|Converter's Crystal|Evader's Crystal|Dodger's Crystal|Zefir's Crystal|Ben Ten's Crystal|Corruptor's Crystal|Artist's Crystal|T'ing's Crystal|Warrior's Crystal|Brawler's Crystal|Hieromancer's Crystal|Rogue's Crystal|Berzerker's Crystal|Lugian's Pearl|Ursuin's Pearl|Wayfarer's Pearl|Sprinter's Pearl|Magus's Pearl|Lich's Pearl|Warrior's Jewel|Melee's Jewel|Mage's Jewel|Duelist's Jewel|Archer's Jewel|Tusker's Jewel|Olthoi's Jewel|Inferno's Jewel|Gelid's Jewel|Astyrrian's Jewel|Executor's Jewel|Pearl of Blood Drinking|Pearl of Heart Seeking|Pearl of Defending|Pearl of Swift Killing|Pearl of Spirit Drinking|Pearl of Hermetic Linking|Pearl of Blade Baning|Pearl of Pierce Baning|Pearl of Bludgeon Baning|Pearl of Acid Baning|Pearl of Flame Baning|Pearl of Frost Baning|Pearl of Lightning Baning|Pearl of Impenetrability|Refreshing Elixir|Invigorating Elixir|Miraculous Elixir|Medicated Health Kit|Medicated Stamina Kit|Medicated Mana Kit|Casino Exquisite Keyring)$")
|
COMMON_RARES_PATTERN = re.compile(r"^(Alchemist's Crystal|Scholar's Crystal|Smithy's Crystal|Hunter's Crystal|Observer's Crystal|Thorsten's Crystal|Elysa's Crystal|Chef's Crystal|Enchanter's Crystal|Oswald's Crystal|Deceiver's Crystal|Fletcher's Crystal|Physician's Crystal|Artificer's Crystal|Tinker's Crystal|Vaulter's Crystal|Monarch's Crystal|Life Giver's Crystal|Thief's Crystal|Adherent's Crystal|Resister's Crystal|Imbuer's Crystal|Converter's Crystal|Evader's Crystal|Dodger's Crystal|Zefir's Crystal|Ben Ten's Crystal|Corruptor's Crystal|Artist's Crystal|T'ing's Crystal|Warrior's Crystal|Brawler's Crystal|Hieromancer's Crystal|Rogue's Crystal|Berzerker's Crystal|Knight's Crystal|Lugian's Pearl|Ursuin's Pearl|Wayfarer's Pearl|Sprinter's Pearl|Magus's Pearl|Lich's Pearl|Warrior's Jewel|Melee's Jewel|Mage's Jewel|Duelist's Jewel|Archer's Jewel|Tusker's Jewel|Olthoi's Jewel|Inferno's Jewel|Gelid's Jewel|Astyrrian's Jewel|Executor's Jewel|Pearl of Blood Drinking|Pearl of Heart Seeking|Pearl of Defending|Pearl of Swift Killing|Pearl of Spirit Drinking|Pearl of Hermetic Linking|Pearl of Blade Baning|Pearl of Pierce Baning|Pearl of Bludgeon Baning|Pearl of Acid Baning|Pearl of Flame Baning|Pearl of Frost Baning|Pearl of Lightning Baning|Pearl of Impenetrability|Refreshing Elixir|Invigorating Elixir|Miraculous Elixir|Medicated Health Kit|Medicated Stamina Kit|Medicated Mana Kit|Casino Exquisite Keyring)$")
|
||||||
|
|
||||||
# Combined pattern for detecting any rare in chat messages (simplified for common detection)
|
# Combined pattern for detecting any rare in chat messages (simplified for common detection)
|
||||||
RARE_IN_CHAT_PATTERN = re.compile(r"(Crystal|Pearl|Jewel|Elixir|Kit|Hieroglyph|Pictograph|Ideograph|Rune|Infinite|Eternal|Perennial|Foolproof|Limitless|Shimmering|Gelidite|Leikotha|Frore|Staff of|Count Renari|Wand of)")
|
RARE_IN_CHAT_PATTERN = re.compile(r"(Crystal|Pearl|Jewel|Elixir|Kit|Hieroglyph|Pictograph|Ideograph|Rune|Infinite|Eternal|Perennial|Foolproof|Limitless|Shimmering|Gelidite|Leikotha|Frore|Staff of|Count Renari|Wand of)")
|
||||||
|
|
@ -67,6 +67,7 @@ class DiscordRareMonitor:
|
||||||
|
|
||||||
# WebSocket connection tracking
|
# WebSocket connection tracking
|
||||||
self.websocket_task: Optional[asyncio.Task] = None
|
self.websocket_task: Optional[asyncio.Task] = None
|
||||||
|
self.health_monitor_task: Optional[asyncio.Task] = None
|
||||||
self.running = False
|
self.running = False
|
||||||
|
|
||||||
# Setup Discord event handlers
|
# Setup Discord event handlers
|
||||||
|
|
@ -112,12 +113,27 @@ class DiscordRareMonitor:
|
||||||
self.websocket_task = asyncio.create_task(self.monitor_websocket())
|
self.websocket_task = asyncio.create_task(self.monitor_websocket())
|
||||||
logger.info("🔄 Started WebSocket monitoring task")
|
logger.info("🔄 Started WebSocket monitoring task")
|
||||||
|
|
||||||
|
# Start health monitoring task
|
||||||
|
self.health_monitor_task = asyncio.create_task(self.monitor_websocket_health())
|
||||||
|
logger.info("💓 Started WebSocket health monitoring task")
|
||||||
|
|
||||||
@self.client.event
|
@self.client.event
|
||||||
async def on_disconnect():
|
async def on_disconnect():
|
||||||
logger.warning("⚠️ Discord client disconnected")
|
logger.warning("⚠️ Discord client disconnected (gateway connection lost)")
|
||||||
self.running = False
|
# Don't stop WebSocket monitoring on temporary Discord disconnects
|
||||||
if self.websocket_task:
|
# Discord will attempt to resume automatically
|
||||||
self.websocket_task.cancel()
|
|
||||||
|
@self.client.event
|
||||||
|
async def on_resumed():
|
||||||
|
logger.info("🔄 Discord session resumed - checking WebSocket health")
|
||||||
|
# Check if WebSocket task is still running after Discord resume
|
||||||
|
if not self.websocket_task or self.websocket_task.done():
|
||||||
|
logger.warning("🔧 WebSocket task was lost during Discord disconnect - restarting")
|
||||||
|
await self.post_status_to_aclog("🔄 Discord resumed: WebSocket was lost, restarting connection")
|
||||||
|
self.websocket_task = asyncio.create_task(self.monitor_websocket())
|
||||||
|
else:
|
||||||
|
logger.info("✅ WebSocket task still healthy after Discord resume")
|
||||||
|
await self.post_status_to_aclog("✅ Discord resumed: WebSocket connection still healthy")
|
||||||
|
|
||||||
@self.client.event
|
@self.client.event
|
||||||
async def on_message(message):
|
async def on_message(message):
|
||||||
|
|
@ -170,120 +186,195 @@ class DiscordRareMonitor:
|
||||||
logger.info("📋 Calling handle_icons_summary (no args)")
|
logger.info("📋 Calling handle_icons_summary (no args)")
|
||||||
await self.handle_icons_summary(message)
|
await self.handle_icons_summary(message)
|
||||||
|
|
||||||
|
# Handle !test_great command to test great rare display
|
||||||
|
elif message.content.startswith('!test_great'):
|
||||||
|
logger.info(f"🧪 !test_great command received from {message.author}")
|
||||||
|
await self.handle_test_great_command(message)
|
||||||
|
|
||||||
|
async def monitor_websocket_health(self):
|
||||||
|
"""Periodically monitor WebSocket task health and restart if needed."""
|
||||||
|
health_check_interval = 60 # Check every 1 minute
|
||||||
|
|
||||||
|
while self.running:
|
||||||
|
try:
|
||||||
|
await asyncio.sleep(health_check_interval)
|
||||||
|
|
||||||
|
if not self.running:
|
||||||
|
break
|
||||||
|
|
||||||
|
# Check if WebSocket task is still alive
|
||||||
|
if not self.websocket_task or self.websocket_task.done():
|
||||||
|
logger.warning("🚨 WebSocket task health check failed - task is not running")
|
||||||
|
|
||||||
|
# Check if task completed with an exception
|
||||||
|
if self.websocket_task and self.websocket_task.done():
|
||||||
|
try:
|
||||||
|
exception = self.websocket_task.exception()
|
||||||
|
if exception:
|
||||||
|
logger.error(f"🚨 WebSocket task failed with exception: {exception}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Could not get task exception: {e}")
|
||||||
|
|
||||||
|
# Restart the WebSocket monitoring task
|
||||||
|
logger.info("🔧 Restarting WebSocket monitoring task")
|
||||||
|
await self.post_status_to_aclog("🚨 Health check detected WebSocket failure - restarting connection")
|
||||||
|
self.websocket_task = asyncio.create_task(self.monitor_websocket())
|
||||||
|
else:
|
||||||
|
logger.debug("💓 WebSocket task health check passed")
|
||||||
|
|
||||||
|
except asyncio.CancelledError:
|
||||||
|
logger.info("💓 WebSocket health monitoring task cancelled")
|
||||||
|
break
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"💓 WebSocket health monitoring error: {e}", exc_info=True)
|
||||||
|
# Continue monitoring despite errors
|
||||||
|
|
||||||
async def monitor_websocket(self):
|
async def monitor_websocket(self):
|
||||||
"""Monitor Dereth Tracker WebSocket for rare events with robust reconnection."""
|
"""Monitor Dereth Tracker WebSocket with immediate failure detection and reconnection."""
|
||||||
retry_delay = 5 # seconds
|
retry_delay = 1 # Start with immediate retry
|
||||||
max_retry_delay = 300 # 5 minutes
|
max_retry_delay = 60 # Max 1 minute delay
|
||||||
consecutive_failures = 0
|
consecutive_failures = 0
|
||||||
max_consecutive_failures = 10
|
max_consecutive_failures = 20 # Allow more attempts before giving up
|
||||||
|
|
||||||
while self.running:
|
while self.running:
|
||||||
websocket = None
|
websocket = None
|
||||||
last_message_time = time.time()
|
connection_failed = False
|
||||||
health_check_interval = 60 # Check health every 60 seconds
|
connection_start_time = None
|
||||||
message_timeout = 180 # Consider connection dead if no messages for 3 minutes
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Connect to live endpoint (no authentication needed for browsers)
|
# Connect to live endpoint with built-in ping/pong for failure detection
|
||||||
logger.info(f"🔗 Connecting to WebSocket: {WEBSOCKET_URL}")
|
logger.info(f"🔗 Connecting to WebSocket: {WEBSOCKET_URL}")
|
||||||
|
|
||||||
# Add connection timeout and ping interval for better connection health
|
|
||||||
websocket = await websockets.connect(
|
websocket = await websockets.connect(
|
||||||
WEBSOCKET_URL,
|
WEBSOCKET_URL,
|
||||||
ping_interval=30, # Send ping every 30 seconds
|
ping_interval=20, # Built-in ping every 20 seconds
|
||||||
ping_timeout=10, # Wait 10 seconds for pong
|
ping_timeout=10, # Fail if no pong response in 10 seconds
|
||||||
close_timeout=10 # Wait 10 seconds for close
|
close_timeout=5 # Quick close timeout
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.info("✅ WebSocket connected successfully")
|
connection_start_time = time.time()
|
||||||
retry_delay = 5 # Reset retry delay on successful connection
|
logger.info(f"✅ WebSocket connected successfully at {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(connection_start_time))}")
|
||||||
|
retry_delay = 1 # Reset retry delay on successful connection
|
||||||
consecutive_failures = 0 # Reset failure counter
|
consecutive_failures = 0 # Reset failure counter
|
||||||
last_message_time = time.time() # Reset message timer
|
|
||||||
|
|
||||||
# Send a test message to Discord to indicate connection restored
|
# Log connection details
|
||||||
|
logger.info(f"📊 Connection details: Local={websocket.local_address}, Remote={websocket.remote_address}")
|
||||||
|
|
||||||
|
# Send connection established message
|
||||||
await self.post_status_to_aclog("🔗 WebSocket connection established")
|
await self.post_status_to_aclog("🔗 WebSocket connection established")
|
||||||
|
|
||||||
# Create tasks for message processing and health checking
|
# Simple message processing with comprehensive error handling
|
||||||
async def process_messages():
|
try:
|
||||||
nonlocal last_message_time
|
message_count = 0
|
||||||
|
last_health_log = time.time()
|
||||||
|
health_log_interval = 600 # Log health every 10 minutes
|
||||||
|
|
||||||
async for message in websocket:
|
async for message in websocket:
|
||||||
if not self.running:
|
if not self.running:
|
||||||
break
|
break
|
||||||
|
|
||||||
last_message_time = time.time()
|
message_count += 1
|
||||||
logger.debug(f"📨 Raw WebSocket message: {message[:100]}...")
|
logger.debug(f"📨 Raw WebSocket message: {message[:100]}...")
|
||||||
await self.process_websocket_message(message)
|
await self.process_websocket_message(message)
|
||||||
|
|
||||||
async def health_check():
|
# Periodic health logging
|
||||||
"""Periodically check connection health and force reconnect if needed."""
|
|
||||||
while self.running and websocket and not websocket.closed:
|
|
||||||
await asyncio.sleep(health_check_interval)
|
|
||||||
|
|
||||||
current_time = time.time()
|
current_time = time.time()
|
||||||
time_since_last_message = current_time - last_message_time
|
if current_time - last_health_log >= health_log_interval:
|
||||||
|
connection_duration = current_time - connection_start_time
|
||||||
|
hours = connection_duration / 3600
|
||||||
|
logger.info(f"💓 Connection health: {hours:.2f}h uptime, {message_count} messages received")
|
||||||
|
last_health_log = current_time
|
||||||
|
|
||||||
if time_since_last_message > message_timeout:
|
# If async for ends, connection was closed
|
||||||
logger.warning(f"⚠️ No messages received for {time_since_last_message:.0f} seconds, forcing reconnect")
|
connection_duration = time.time() - connection_start_time if connection_start_time else 0
|
||||||
await self.post_status_to_aclog(f"⚠️ WebSocket appears dead, reconnecting...")
|
hours = connection_duration / 3600
|
||||||
if websocket and not websocket.closed:
|
|
||||||
await websocket.close()
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
logger.debug(f"💓 WebSocket health check passed (last message {time_since_last_message:.0f}s ago)")
|
|
||||||
|
|
||||||
# Run both tasks concurrently
|
# Try to get close code and reason
|
||||||
message_task = asyncio.create_task(process_messages())
|
close_code = None
|
||||||
health_task = asyncio.create_task(health_check())
|
close_reason = None
|
||||||
|
|
||||||
# Wait for either task to complete (health check failure or message loop exit)
|
|
||||||
done, pending = await asyncio.wait(
|
|
||||||
[message_task, health_task],
|
|
||||||
return_when=asyncio.FIRST_COMPLETED
|
|
||||||
)
|
|
||||||
|
|
||||||
# Cancel any remaining tasks
|
|
||||||
for task in pending:
|
|
||||||
task.cancel()
|
|
||||||
try:
|
try:
|
||||||
await task
|
close_code = websocket.close_code
|
||||||
except asyncio.CancelledError:
|
close_reason = websocket.close_reason
|
||||||
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
logger.warning(f"🔌 WebSocket connection ended after {hours:.2f} hours (async for loop exited)")
|
||||||
|
logger.warning(f"📊 Close details: code={close_code}, reason='{close_reason}'")
|
||||||
|
connection_failed = True
|
||||||
|
|
||||||
|
# Post disconnect notification to Discord
|
||||||
|
await self.post_status_to_aclog(f"🔌 WebSocket disconnected after {hours:.2f}h (code={close_code}), reconnecting...")
|
||||||
|
|
||||||
|
except asyncio.CancelledError:
|
||||||
|
logger.info("📡 WebSocket task cancelled")
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
# Any exception during message processing indicates connection failure
|
||||||
|
logger.warning(f"⚠️ WebSocket message processing failed: {type(e).__name__}: {e}")
|
||||||
|
connection_failed = True
|
||||||
|
raise # Re-raise to trigger reconnection
|
||||||
|
|
||||||
except websockets.exceptions.ConnectionClosed as e:
|
except websockets.exceptions.ConnectionClosed as e:
|
||||||
logger.warning(f"⚠️ WebSocket connection closed: {e}")
|
connection_duration = time.time() - connection_start_time if connection_start_time else 0
|
||||||
|
hours = connection_duration / 3600
|
||||||
|
logger.warning(f"⚠️ WebSocket connection closed after {hours:.2f} hours: {e}")
|
||||||
|
logger.warning(f"📊 ConnectionClosed details: code={e.code}, reason='{e.reason}'")
|
||||||
consecutive_failures += 1
|
consecutive_failures += 1
|
||||||
await self.post_status_to_aclog(f"⚠️ WebSocket disconnected (attempt {consecutive_failures})")
|
await self.post_status_to_aclog(f"⚠️ WebSocket disconnected after {hours:.2f}h, reconnecting...")
|
||||||
|
|
||||||
except websockets.exceptions.InvalidStatusCode as e:
|
except websockets.exceptions.InvalidStatusCode as e:
|
||||||
logger.error(f"❌ WebSocket invalid status code: {e}")
|
logger.error(f"❌ WebSocket server error: {e}")
|
||||||
consecutive_failures += 1
|
consecutive_failures += 1
|
||||||
await self.post_status_to_aclog(f"❌ WebSocket connection failed: {e}")
|
await self.post_status_to_aclog(f"❌ Server error {e.status_code}, retrying...")
|
||||||
|
|
||||||
|
except (OSError, ConnectionRefusedError) as e:
|
||||||
|
logger.warning(f"⚠️ Network connection failed: {e}")
|
||||||
|
consecutive_failures += 1
|
||||||
|
await self.post_status_to_aclog(f"⚠️ Network error, retrying...")
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"❌ WebSocket error: {e}")
|
connection_duration = time.time() - connection_start_time if connection_start_time else 0
|
||||||
|
hours = connection_duration / 3600
|
||||||
|
logger.error(f"❌ WebSocket error after {hours:.2f} hours: {type(e).__name__}: {e}")
|
||||||
|
logger.error(f"📊 Exception details: {repr(e)}")
|
||||||
consecutive_failures += 1
|
consecutive_failures += 1
|
||||||
await self.post_status_to_aclog(f"❌ WebSocket error: {e}")
|
await self.post_status_to_aclog(f"❌ Connection error after {hours:.2f}h: {type(e).__name__}")
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
# Ensure websocket is properly closed
|
# Ensure websocket is properly closed
|
||||||
if websocket and not websocket.closed:
|
if websocket:
|
||||||
try:
|
try:
|
||||||
await websocket.close()
|
await websocket.close()
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
# Log final connection stats
|
||||||
|
if connection_start_time:
|
||||||
|
connection_duration = time.time() - connection_start_time
|
||||||
|
hours = connection_duration / 3600
|
||||||
|
logger.info(f"📊 Connection summary: Duration={hours:.2f}h, Total failures={consecutive_failures}")
|
||||||
|
|
||||||
|
# If we got here, the connection failed somehow
|
||||||
|
if connection_failed and consecutive_failures == 0:
|
||||||
|
consecutive_failures = 1 # Count silent failures
|
||||||
|
logger.warning("⚠️ Connection failed without exception - forcing reconnection")
|
||||||
|
await self.post_status_to_aclog("⚠️ WebSocket connection lost, reconnecting...")
|
||||||
|
|
||||||
# Check if we should keep retrying
|
# Check if we should keep retrying
|
||||||
if consecutive_failures >= max_consecutive_failures:
|
if consecutive_failures >= max_consecutive_failures:
|
||||||
logger.error(f"❌ Too many consecutive failures ({consecutive_failures}). Stopping reconnection attempts.")
|
logger.error(f"❌ Too many consecutive failures ({consecutive_failures}). Stopping reconnection attempts.")
|
||||||
await self.post_status_to_aclog(f"❌ WebSocket reconnection failed after {consecutive_failures} attempts. Bot may need restart.")
|
await self.post_status_to_aclog(f"❌ WebSocket reconnection failed after {consecutive_failures} attempts. Bot may need restart.")
|
||||||
break
|
break
|
||||||
|
|
||||||
if self.running:
|
# Immediate retry for first few failures, then exponential backoff
|
||||||
|
if self.running and consecutive_failures > 0:
|
||||||
|
if consecutive_failures <= 3:
|
||||||
|
logger.info(f"🔄 Immediate reconnection attempt #{consecutive_failures}")
|
||||||
|
await asyncio.sleep(1) # Brief pause to avoid tight loop
|
||||||
|
else:
|
||||||
logger.info(f"🔄 Retrying WebSocket connection in {retry_delay} seconds... (failure #{consecutive_failures})")
|
logger.info(f"🔄 Retrying WebSocket connection in {retry_delay} seconds... (failure #{consecutive_failures})")
|
||||||
await asyncio.sleep(retry_delay)
|
await asyncio.sleep(retry_delay)
|
||||||
|
retry_delay = min(retry_delay * 2, max_retry_delay) # Exponential backoff
|
||||||
# Exponential backoff with max delay
|
|
||||||
retry_delay = min(retry_delay * 2, max_retry_delay)
|
|
||||||
|
|
||||||
async def process_websocket_message(self, raw_message: str):
|
async def process_websocket_message(self, raw_message: str):
|
||||||
"""Process incoming WebSocket message."""
|
"""Process incoming WebSocket message."""
|
||||||
|
|
@ -344,11 +435,88 @@ class DiscordRareMonitor:
|
||||||
if character_name != MONITOR_CHARACTER:
|
if character_name != MONITOR_CHARACTER:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
# Check for special vortex warning message FIRST - simplified matching
|
||||||
|
# Debug: log what we're checking
|
||||||
|
logger.debug(f"🔍 Checking for vortex in: {repr(chat_text)}")
|
||||||
|
|
||||||
|
if "m in whirlwind of vortexes" in chat_text:
|
||||||
|
logger.warning(f"⚠️🌪️ VORTEX WARNING from {character_name}: {chat_text}")
|
||||||
|
|
||||||
|
# Get timestamp for the warning
|
||||||
|
from datetime import datetime
|
||||||
|
timestamp_str = data.get('timestamp', '')
|
||||||
|
try:
|
||||||
|
if timestamp_str:
|
||||||
|
if timestamp_str.endswith('Z'):
|
||||||
|
timestamp = datetime.fromisoformat(timestamp_str.replace('Z', '+00:00'))
|
||||||
|
else:
|
||||||
|
timestamp = datetime.fromisoformat(timestamp_str)
|
||||||
|
else:
|
||||||
|
timestamp = datetime.now()
|
||||||
|
except ValueError:
|
||||||
|
timestamp = datetime.now()
|
||||||
|
|
||||||
|
time_str = timestamp.strftime("%H:%M:%S")
|
||||||
|
|
||||||
|
# Extract the actual character name from allegiance chat
|
||||||
|
# Format: [Allegiance] Character Name says, "message"
|
||||||
|
actual_character = "Unknown Character"
|
||||||
|
if "[Allegiance]" in chat_text and " says," in chat_text:
|
||||||
|
try:
|
||||||
|
# Extract between "] " and " says,"
|
||||||
|
start = chat_text.find("] ") + 2
|
||||||
|
end = chat_text.find(" says,")
|
||||||
|
if start > 1 and end > start:
|
||||||
|
actual_character = chat_text[start:end]
|
||||||
|
except:
|
||||||
|
actual_character = "Unknown Character"
|
||||||
|
|
||||||
|
try:
|
||||||
|
channel = self.client.get_channel(ACLOG_CHANNEL_ID)
|
||||||
|
if channel:
|
||||||
|
# Create clean vortex warning embed
|
||||||
|
embed = discord.Embed(
|
||||||
|
title="🌪️ **VORTEX WARNING** 🌪️",
|
||||||
|
description=f"**{actual_character}** is caught in vortexes!",
|
||||||
|
color=0xFF4500 # Orange-red
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add time and character info
|
||||||
|
embed.add_field(
|
||||||
|
name="🕐 **Time**",
|
||||||
|
value=f"⏰ {time_str}",
|
||||||
|
inline=True
|
||||||
|
)
|
||||||
|
|
||||||
|
embed.add_field(
|
||||||
|
name="🎯 **Character**",
|
||||||
|
value=f"**{actual_character}**",
|
||||||
|
inline=True
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add footer
|
||||||
|
embed.set_footer(text="Someone needs help with vortexes!")
|
||||||
|
|
||||||
|
# Send clean warning
|
||||||
|
await channel.send(embed=embed)
|
||||||
|
|
||||||
|
logger.info(f"🌪️ Posted vortex warning for {actual_character} to #{channel.name}")
|
||||||
|
else:
|
||||||
|
logger.error(f"❌ Could not find channel {ACLOG_CHANNEL_ID}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"💥 VORTEX WARNING POSTING FAILED: {e}", exc_info=True)
|
||||||
|
return
|
||||||
|
elif "whirlwind of vortex" in chat_text.lower():
|
||||||
|
logger.debug(f"🌪️ Found 'whirlwind of vortex' but not exact match in: {repr(chat_text)}")
|
||||||
|
elif "jeebus" in chat_text.lower():
|
||||||
|
logger.debug(f"👀 Found 'jeebus' but not vortex pattern in: {repr(chat_text)}")
|
||||||
|
|
||||||
# Skip if this message contains any rare names (common or great)
|
# Skip if this message contains any rare names (common or great)
|
||||||
if RARE_IN_CHAT_PATTERN.search(chat_text) or self.is_rare_message(chat_text):
|
if RARE_IN_CHAT_PATTERN.search(chat_text) or self.is_rare_message(chat_text):
|
||||||
logger.debug(f"🎯 Skipping rare message from {character_name}: {chat_text}")
|
logger.debug(f"🎯 Skipping rare message from {character_name}: {chat_text}")
|
||||||
return
|
return
|
||||||
|
|
||||||
|
# Regular chat logging
|
||||||
logger.info(f"💬 Chat from {character_name}: {chat_text}")
|
logger.info(f"💬 Chat from {character_name}: {chat_text}")
|
||||||
|
|
||||||
# Post to AC Log channel
|
# Post to AC Log channel
|
||||||
|
|
@ -542,6 +710,69 @@ class DiscordRareMonitor:
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"❌ Error posting rare debug to Discord: {e}")
|
logger.error(f"❌ Error posting rare debug to Discord: {e}")
|
||||||
|
|
||||||
|
async def post_vortex_warning_to_discord(self, data: dict):
|
||||||
|
"""Post vortex warning as a special alert message to Discord."""
|
||||||
|
logger.info(f"🔧 Starting vortex warning posting process...")
|
||||||
|
try:
|
||||||
|
character_name = data.get('character_name', 'Unknown Character')
|
||||||
|
chat_text = data.get('text', '')
|
||||||
|
timestamp_str = data.get('timestamp', '')
|
||||||
|
|
||||||
|
# Parse timestamp
|
||||||
|
try:
|
||||||
|
if timestamp_str:
|
||||||
|
if timestamp_str.endswith('Z'):
|
||||||
|
timestamp = datetime.fromisoformat(timestamp_str.replace('Z', '+00:00'))
|
||||||
|
else:
|
||||||
|
timestamp = datetime.fromisoformat(timestamp_str)
|
||||||
|
else:
|
||||||
|
timestamp = datetime.now()
|
||||||
|
except ValueError:
|
||||||
|
timestamp = datetime.now()
|
||||||
|
|
||||||
|
logger.info(f"🔧 Creating vortex warning embed for {character_name}")
|
||||||
|
|
||||||
|
# Create warning embed
|
||||||
|
embed = discord.Embed(
|
||||||
|
title="🌪️ VORTEX WARNING! 🌪️",
|
||||||
|
description=f"**{character_name}** reported a vortex situation!",
|
||||||
|
color=discord.Color.red(),
|
||||||
|
timestamp=timestamp
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add the chat message as a field - but truncate if too long
|
||||||
|
chat_display = chat_text[:1800] + "..." if len(chat_text) > 1800 else chat_text
|
||||||
|
embed.add_field(
|
||||||
|
name="Message",
|
||||||
|
value=f"```{chat_display}```",
|
||||||
|
inline=False
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add warning footer
|
||||||
|
embed.set_footer(text="⚠️ Immediate attention required!")
|
||||||
|
|
||||||
|
logger.info(f"🔧 Getting AC Log channel {ACLOG_CHANNEL_ID}")
|
||||||
|
# Get AC Log channel
|
||||||
|
channel = self.client.get_channel(ACLOG_CHANNEL_ID)
|
||||||
|
logger.info(f"🔧 Channel result: {channel}")
|
||||||
|
|
||||||
|
if channel:
|
||||||
|
# Send the warning embed first
|
||||||
|
await channel.send(embed=embed)
|
||||||
|
logger.info(f"🌪️ Posted VORTEX WARNING embed to #{channel.name}")
|
||||||
|
|
||||||
|
# Try to send alert ping (might fail due to permissions)
|
||||||
|
try:
|
||||||
|
await channel.send("🚨 VORTEX ALERT! 🚨")
|
||||||
|
logger.info(f"🚨 Posted VORTEX ALERT ping to #{channel.name}")
|
||||||
|
except Exception as ping_error:
|
||||||
|
logger.warning(f"⚠️ Could not send @here ping: {ping_error}")
|
||||||
|
else:
|
||||||
|
logger.error(f"❌ Could not find AC Log channel for vortex warning: {ACLOG_CHANNEL_ID}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"❌ Error posting vortex warning to Discord: {e}")
|
||||||
|
|
||||||
async def post_status_to_aclog(self, status_message: str):
|
async def post_status_to_aclog(self, status_message: str):
|
||||||
"""Post status update to AC Log channel."""
|
"""Post status update to AC Log channel."""
|
||||||
try:
|
try:
|
||||||
|
|
@ -817,6 +1048,63 @@ class DiscordRareMonitor:
|
||||||
"""Handle !icons grid to show icon grid compositions."""
|
"""Handle !icons grid to show icon grid compositions."""
|
||||||
await message.channel.send("🚧 **Grid View Coming Soon!**\n\nThis feature will show multiple icons arranged in grids. For now, use:\n• `!icons` - Summary with examples\n• `!icons all` - Browse all icons\n• `!icons [name]` - Search specific items")
|
await message.channel.send("🚧 **Grid View Coming Soon!**\n\nThis feature will show multiple icons arranged in grids. For now, use:\n• `!icons` - Summary with examples\n• `!icons all` - Browse all icons\n• `!icons [name]` - Search specific items")
|
||||||
|
|
||||||
|
async def handle_test_great_command(self, message):
|
||||||
|
"""Test great rare display with a random great rare icon."""
|
||||||
|
try:
|
||||||
|
import random
|
||||||
|
|
||||||
|
# Get icons directory
|
||||||
|
icons_dir = os.path.join(os.path.dirname(__file__), "icons")
|
||||||
|
|
||||||
|
if not os.path.exists(icons_dir):
|
||||||
|
await message.channel.send("❌ Icons directory not found!")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Get all icon files
|
||||||
|
icon_files = [f for f in os.listdir(icons_dir) if f.endswith('_Icon.png')]
|
||||||
|
|
||||||
|
# Filter to get great rares only (exclude common rares)
|
||||||
|
great_rare_icons = []
|
||||||
|
for filename in icon_files:
|
||||||
|
# Convert filename to rare name
|
||||||
|
rare_name = filename.replace('_Icon.png', '').replace('_', ' ').replace("'", "'")
|
||||||
|
# Fix common naming issues
|
||||||
|
if "s " in rare_name:
|
||||||
|
rare_name = rare_name.replace("s ", "'s ")
|
||||||
|
|
||||||
|
# Check if it's NOT a common rare
|
||||||
|
if not COMMON_RARES_PATTERN.match(rare_name):
|
||||||
|
great_rare_icons.append((filename, rare_name))
|
||||||
|
|
||||||
|
if not great_rare_icons:
|
||||||
|
await message.channel.send("❌ No great rare icons found!")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Pick a random great rare
|
||||||
|
filename, rare_name = random.choice(great_rare_icons)
|
||||||
|
|
||||||
|
# Create mock rare discovery data
|
||||||
|
mock_data = {
|
||||||
|
'name': rare_name,
|
||||||
|
'character_name': 'Test Character',
|
||||||
|
'timestamp': datetime.now().isoformat(),
|
||||||
|
'ew': 12.3,
|
||||||
|
'ns': 34.5,
|
||||||
|
'z': 56.7
|
||||||
|
}
|
||||||
|
|
||||||
|
# Send confirmation message first
|
||||||
|
await message.channel.send(f"🧪 **Testing great rare display with:** {rare_name}\n📤 Posting to great rares channel...")
|
||||||
|
|
||||||
|
# Post to great rares channel using existing function
|
||||||
|
await self.post_rare_to_discord(mock_data, "great")
|
||||||
|
|
||||||
|
logger.info(f"🧪 Test great rare posted: {rare_name} (from {filename})")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"❌ Error in test_great command: {e}", exc_info=True)
|
||||||
|
await message.channel.send(f"❌ Error testing great rare: {e}")
|
||||||
|
|
||||||
async def start(self):
|
async def start(self):
|
||||||
"""Start the Discord bot."""
|
"""Start the Discord bot."""
|
||||||
if not DISCORD_TOKEN:
|
if not DISCORD_TOKEN:
|
||||||
|
|
@ -835,6 +1123,7 @@ class DiscordRareMonitor:
|
||||||
logger.info("🛑 Stopping Discord Rare Monitor Bot...")
|
logger.info("🛑 Stopping Discord Rare Monitor Bot...")
|
||||||
self.running = False
|
self.running = False
|
||||||
|
|
||||||
|
# Cancel WebSocket monitoring task
|
||||||
if self.websocket_task:
|
if self.websocket_task:
|
||||||
self.websocket_task.cancel()
|
self.websocket_task.cancel()
|
||||||
try:
|
try:
|
||||||
|
|
@ -842,6 +1131,14 @@ class DiscordRareMonitor:
|
||||||
except asyncio.CancelledError:
|
except asyncio.CancelledError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
# Cancel health monitoring task
|
||||||
|
if self.health_monitor_task:
|
||||||
|
self.health_monitor_task.cancel()
|
||||||
|
try:
|
||||||
|
await self.health_monitor_task
|
||||||
|
except asyncio.CancelledError:
|
||||||
|
pass
|
||||||
|
|
||||||
if not self.client.is_closed():
|
if not self.client.is_closed():
|
||||||
await self.client.close()
|
await self.client.close()
|
||||||
|
|
||||||
|
|
|
||||||
101
main.py
101
main.py
|
|
@ -37,7 +37,7 @@ from db_async import (
|
||||||
spawn_events,
|
spawn_events,
|
||||||
rare_events,
|
rare_events,
|
||||||
character_inventories,
|
character_inventories,
|
||||||
portal_discoveries,
|
portals,
|
||||||
server_health_checks,
|
server_health_checks,
|
||||||
server_status,
|
server_status,
|
||||||
init_db_async,
|
init_db_async,
|
||||||
|
|
@ -67,6 +67,7 @@ _cached_trails: dict = {"trails": []}
|
||||||
_cached_total_rares: dict = {"all_time": 0, "today": 0, "last_updated": None}
|
_cached_total_rares: dict = {"all_time": 0, "today": 0, "last_updated": None}
|
||||||
_cache_task: asyncio.Task | None = None
|
_cache_task: asyncio.Task | None = None
|
||||||
_rares_cache_task: asyncio.Task | None = None
|
_rares_cache_task: asyncio.Task | None = None
|
||||||
|
_cleanup_task: asyncio.Task | None = None
|
||||||
|
|
||||||
# Player tracking for debug purposes
|
# Player tracking for debug purposes
|
||||||
_player_history: list = [] # List of player sets from last 10 refreshes
|
_player_history: list = [] # List of player sets from last 10 refreshes
|
||||||
|
|
@ -255,13 +256,13 @@ async def get_player_count_from_treestats(server_name: str) -> int:
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
async def monitor_server_health():
|
async def monitor_server_health():
|
||||||
"""Background task to monitor server health every 30 seconds and cleanup old portals hourly."""
|
"""Background task to monitor server health every 30 seconds and cleanup old portals every minute."""
|
||||||
server_name = "Coldeve"
|
server_name = "Coldeve"
|
||||||
server_address = "play.coldeve.ac"
|
server_address = "play.coldeve.ac"
|
||||||
server_port = 9000
|
server_port = 9000
|
||||||
check_interval = 30 # seconds
|
check_interval = 30 # seconds
|
||||||
player_count_interval = 300 # 5 minutes (like ThwargLauncher's 10 minutes, but more frequent)
|
player_count_interval = 300 # 5 minutes (like ThwargLauncher's 10 minutes, but more frequent)
|
||||||
portal_cleanup_interval = 3600 # 1 hour
|
portal_cleanup_interval = 60 # 1 minute
|
||||||
last_player_count_check = 0
|
last_player_count_check = 0
|
||||||
last_portal_cleanup = 0
|
last_portal_cleanup = 0
|
||||||
current_player_count = None
|
current_player_count = None
|
||||||
|
|
@ -381,12 +382,12 @@ async def monitor_server_health():
|
||||||
|
|
||||||
logger.debug(f"Server health check: {status}, latency={latency_ms}ms, players={current_player_count}")
|
logger.debug(f"Server health check: {status}, latency={latency_ms}ms, players={current_player_count}")
|
||||||
|
|
||||||
# Portal cleanup (run every hour)
|
# Portal cleanup (run every minute)
|
||||||
current_time = time.time()
|
current_time = time.time()
|
||||||
if current_time - last_portal_cleanup >= portal_cleanup_interval:
|
if current_time - last_portal_cleanup >= portal_cleanup_interval:
|
||||||
try:
|
try:
|
||||||
deleted_count = await cleanup_old_portals()
|
deleted_count = await cleanup_old_portals()
|
||||||
logger.info(f"Portal cleanup: removed {deleted_count} old portal discoveries")
|
logger.info(f"Portal cleanup: removed {deleted_count} portals older than 1 hour")
|
||||||
last_portal_cleanup = current_time
|
last_portal_cleanup = current_time
|
||||||
except Exception as cleanup_error:
|
except Exception as cleanup_error:
|
||||||
logger.error(f"Portal cleanup error: {cleanup_error}", exc_info=True)
|
logger.error(f"Portal cleanup error: {cleanup_error}", exc_info=True)
|
||||||
|
|
@ -396,6 +397,20 @@ async def monitor_server_health():
|
||||||
|
|
||||||
await asyncio.sleep(check_interval)
|
await asyncio.sleep(check_interval)
|
||||||
|
|
||||||
|
async def cleanup_connections_loop():
|
||||||
|
"""Background task to clean up stale WebSocket connections every 5 minutes."""
|
||||||
|
cleanup_interval = 300 # 5 minutes
|
||||||
|
|
||||||
|
logger.info("🧹 Starting WebSocket connection cleanup task")
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
await asyncio.sleep(cleanup_interval)
|
||||||
|
logger.debug("🧹 Running periodic WebSocket connection cleanup")
|
||||||
|
await cleanup_stale_connections()
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"WebSocket cleanup task error: {e}", exc_info=True)
|
||||||
|
|
||||||
def _track_player_changes(new_players: list) -> None:
|
def _track_player_changes(new_players: list) -> None:
|
||||||
"""Track player changes for debugging flapping issues."""
|
"""Track player changes for debugging flapping issues."""
|
||||||
from datetime import datetime, timezone
|
from datetime import datetime, timezone
|
||||||
|
|
@ -872,11 +887,12 @@ async def on_startup():
|
||||||
else:
|
else:
|
||||||
raise RuntimeError(f"Could not connect to database after {max_attempts} attempts")
|
raise RuntimeError(f"Could not connect to database after {max_attempts} attempts")
|
||||||
# Start background cache refresh (live & trails)
|
# Start background cache refresh (live & trails)
|
||||||
global _cache_task, _rares_cache_task, _server_health_task
|
global _cache_task, _rares_cache_task, _server_health_task, _cleanup_task
|
||||||
_cache_task = asyncio.create_task(_refresh_cache_loop())
|
_cache_task = asyncio.create_task(_refresh_cache_loop())
|
||||||
_rares_cache_task = asyncio.create_task(_refresh_total_rares_cache())
|
_rares_cache_task = asyncio.create_task(_refresh_total_rares_cache())
|
||||||
_server_health_task = asyncio.create_task(monitor_server_health())
|
_server_health_task = asyncio.create_task(monitor_server_health())
|
||||||
logger.info("Background cache refresh and server monitoring tasks started")
|
_cleanup_task = asyncio.create_task(cleanup_connections_loop())
|
||||||
|
logger.info("Background cache refresh, server monitoring, and connection cleanup tasks started")
|
||||||
@app.on_event("shutdown")
|
@app.on_event("shutdown")
|
||||||
async def on_shutdown():
|
async def on_shutdown():
|
||||||
"""Event handler triggered when application is shutting down.
|
"""Event handler triggered when application is shutting down.
|
||||||
|
|
@ -884,7 +900,7 @@ async def on_shutdown():
|
||||||
Ensures the database connection is closed cleanly.
|
Ensures the database connection is closed cleanly.
|
||||||
"""
|
"""
|
||||||
# Stop cache refresh tasks
|
# Stop cache refresh tasks
|
||||||
global _cache_task, _rares_cache_task, _server_health_task
|
global _cache_task, _rares_cache_task, _server_health_task, _cleanup_task
|
||||||
if _cache_task:
|
if _cache_task:
|
||||||
logger.info("Stopping background cache refresh task")
|
logger.info("Stopping background cache refresh task")
|
||||||
_cache_task.cancel()
|
_cache_task.cancel()
|
||||||
|
|
@ -908,6 +924,14 @@ async def on_shutdown():
|
||||||
await _server_health_task
|
await _server_health_task
|
||||||
except asyncio.CancelledError:
|
except asyncio.CancelledError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
if _cleanup_task:
|
||||||
|
logger.info("Stopping WebSocket connection cleanup task")
|
||||||
|
_cleanup_task.cancel()
|
||||||
|
try:
|
||||||
|
await _cleanup_task
|
||||||
|
except asyncio.CancelledError:
|
||||||
|
pass
|
||||||
logger.info("Disconnecting from database")
|
logger.info("Disconnecting from database")
|
||||||
await database.disconnect()
|
await database.disconnect()
|
||||||
|
|
||||||
|
|
@ -1083,37 +1107,34 @@ async def get_quest_status():
|
||||||
|
|
||||||
@app.get("/portals")
|
@app.get("/portals")
|
||||||
async def get_portals():
|
async def get_portals():
|
||||||
"""Return unique portal discoveries from the last 24 hours."""
|
"""Return all active portals (less than 1 hour old)."""
|
||||||
try:
|
try:
|
||||||
# Query unique portals from last 24 hours, keeping the most recent discovery of each
|
# No need for cutoff check - cleanup job handles expiration
|
||||||
cutoff_time = datetime.now(timezone.utc) - timedelta(hours=24)
|
|
||||||
|
|
||||||
query = """
|
query = """
|
||||||
SELECT DISTINCT ON (portal_name)
|
SELECT portal_name, ns, ew, z, discovered_at, discovered_by
|
||||||
character_name, portal_name, timestamp, ns, ew, z
|
FROM portals
|
||||||
FROM portal_discoveries
|
ORDER BY discovered_at DESC
|
||||||
WHERE timestamp >= :cutoff_time
|
|
||||||
ORDER BY portal_name, timestamp DESC
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
rows = await database.fetch_all(query, {"cutoff_time": cutoff_time})
|
rows = await database.fetch_all(query)
|
||||||
|
|
||||||
portals = []
|
portals = []
|
||||||
for row in rows:
|
for row in rows:
|
||||||
portal = {
|
portal = {
|
||||||
"character_name": row["character_name"],
|
|
||||||
"portal_name": row["portal_name"],
|
"portal_name": row["portal_name"],
|
||||||
"timestamp": row["timestamp"].isoformat(),
|
"coordinates": {
|
||||||
"ns": row["ns"],
|
"ns": row["ns"],
|
||||||
"ew": row["ew"],
|
"ew": row["ew"],
|
||||||
"z": row["z"]
|
"z": row["z"]
|
||||||
|
},
|
||||||
|
"discovered_at": row["discovered_at"].isoformat(),
|
||||||
|
"discovered_by": row["discovered_by"]
|
||||||
}
|
}
|
||||||
portals.append(portal)
|
portals.append(portal)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
"portals": portals,
|
"portals": portals,
|
||||||
"portal_count": len(portals),
|
"portal_count": len(portals)
|
||||||
"cutoff_time": cutoff_time.isoformat()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|
@ -1964,37 +1985,39 @@ async def ws_receive_snapshots(
|
||||||
ew = float(ew)
|
ew = float(ew)
|
||||||
z = float(z)
|
z = float(z)
|
||||||
|
|
||||||
# Check if this portal was recently discovered (within last hour) to avoid duplicates
|
# Round coordinates for comparison (0.01 tolerance)
|
||||||
recent_check = await database.fetch_one(
|
ns_rounded = round(ns, 2)
|
||||||
|
ew_rounded = round(ew, 2)
|
||||||
|
|
||||||
|
# Check if portal exists at these coordinates
|
||||||
|
existing_portal = await database.fetch_one(
|
||||||
"""
|
"""
|
||||||
SELECT id FROM portal_discoveries
|
SELECT id FROM portals
|
||||||
WHERE character_name = :character_name
|
WHERE ROUND(ns::numeric, 2) = :ns_rounded
|
||||||
AND portal_name = :portal_name
|
AND ROUND(ew::numeric, 2) = :ew_rounded
|
||||||
AND timestamp > :cutoff_time
|
|
||||||
LIMIT 1
|
LIMIT 1
|
||||||
""",
|
""",
|
||||||
{
|
{
|
||||||
"character_name": character_name,
|
"ns_rounded": ns_rounded,
|
||||||
"portal_name": portal_name,
|
"ew_rounded": ew_rounded
|
||||||
"cutoff_time": timestamp - timedelta(hours=1)
|
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
if not recent_check:
|
if not existing_portal:
|
||||||
# Store portal discovery in database
|
# Store new portal in database
|
||||||
await database.execute(
|
await database.execute(
|
||||||
portal_discoveries.insert().values(
|
portals.insert().values(
|
||||||
character_name=character_name,
|
|
||||||
portal_name=portal_name,
|
portal_name=portal_name,
|
||||||
timestamp=timestamp,
|
|
||||||
ns=ns,
|
ns=ns,
|
||||||
ew=ew,
|
ew=ew,
|
||||||
z=z
|
z=z,
|
||||||
|
discovered_at=timestamp,
|
||||||
|
discovered_by=character_name
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
logger.info(f"Recorded portal discovery: {portal_name} by {character_name} at {ns}, {ew}")
|
logger.info(f"New portal discovered: {portal_name} at {ns_rounded}, {ew_rounded} by {character_name}")
|
||||||
else:
|
else:
|
||||||
logger.debug(f"Skipping duplicate portal discovery: {portal_name} by {character_name} (already discovered recently)")
|
logger.debug(f"Portal already exists at {ns_rounded}, {ew_rounded}")
|
||||||
|
|
||||||
# Broadcast to browser clients for map updates
|
# Broadcast to browser clients for map updates
|
||||||
await _broadcast_to_browser_clients(data)
|
await _broadcast_to_browser_clients(data)
|
||||||
|
|
|
||||||
|
|
@ -480,8 +480,8 @@ async function fetchPortalData() {
|
||||||
}
|
}
|
||||||
|
|
||||||
const data = await response.json();
|
const data = await response.json();
|
||||||
portalData = data.portals; // [{character_name, portal_name, ns, ew, z}]
|
portalData = data.portals; // [{portal_name, coordinates: {ns, ew, z}, discovered_by, discovered_at}]
|
||||||
console.log(`Loaded ${portalData.length} portal discoveries from last 24 hours`);
|
console.log(`Loaded ${portalData.length} portals from last hour`);
|
||||||
renderPortals();
|
renderPortals();
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
console.error('Failed to fetch portal data:', err);
|
console.error('Failed to fetch portal data:', err);
|
||||||
|
|
@ -526,9 +526,9 @@ function renderPortals() {
|
||||||
clearPortals();
|
clearPortals();
|
||||||
|
|
||||||
for (const portal of portalData) {
|
for (const portal of portalData) {
|
||||||
// Coordinates are already floats from the API
|
// Extract coordinates from new API format
|
||||||
const ns = portal.ns;
|
const ns = portal.coordinates.ns;
|
||||||
const ew = portal.ew;
|
const ew = portal.coordinates.ew;
|
||||||
|
|
||||||
// Convert to pixel coordinates
|
// Convert to pixel coordinates
|
||||||
const { x, y } = worldToPx(ew, ns);
|
const { x, y } = worldToPx(ew, ns);
|
||||||
|
|
@ -538,7 +538,7 @@ function renderPortals() {
|
||||||
icon.className = 'portal-icon';
|
icon.className = 'portal-icon';
|
||||||
icon.style.left = `${x}px`;
|
icon.style.left = `${x}px`;
|
||||||
icon.style.top = `${y}px`;
|
icon.style.top = `${y}px`;
|
||||||
icon.title = `${portal.portal_name} (discovered by ${portal.character_name})`;
|
icon.title = `${portal.portal_name} (discovered by ${portal.discovered_by})`;
|
||||||
|
|
||||||
portalContainer.appendChild(icon);
|
portalContainer.appendChild(icon);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue