fixed portals

This commit is contained in:
erik 2025-06-24 19:13:31 +00:00
parent dffd295091
commit 7ff94b59a8
4 changed files with 475 additions and 134 deletions

View file

@ -127,18 +127,18 @@ character_inventories = Table(
UniqueConstraint("character_name", "item_id", name="uq_char_item"),
)
# Portal discoveries table for 24-hour live tracking
portal_discoveries = Table(
# Records player portal discoveries with 24-hour retention
"portal_discoveries",
# Portals table with coordinate-based uniqueness and 1-hour retention
portals = Table(
# Stores unique portals by coordinates with 1-hour retention
"portals",
metadata,
Column("id", Integer, primary_key=True),
Column("character_name", String, nullable=False, index=True),
Column("portal_name", String, nullable=False),
Column("timestamp", DateTime(timezone=True), nullable=False, index=True),
Column("ns", Float, nullable=False), # North/South coordinate as float
Column("ew", Float, nullable=False), # East/West coordinate as float
Column("z", Float, nullable=False), # Elevation as float
Column("ns", Float, nullable=False),
Column("ew", Float, nullable=False),
Column("z", Float, nullable=False),
Column("discovered_at", DateTime(timezone=True), nullable=False, index=True),
Column("discovered_by", String, nullable=False),
)
# Server health monitoring tables
@ -230,18 +230,39 @@ async def init_db_async():
except Exception as e:
print(f"Warning: failed to set retention/compression policies: {e}")
async def cleanup_old_portals():
"""Clean up portal discoveries older than 24 hours."""
# Create unique constraint on rounded portal coordinates
try:
cutoff_time = datetime.now(timezone.utc) - timedelta(hours=24)
with engine.connect().execution_options(isolation_level="AUTOCOMMIT") as conn:
# Drop old portal_discoveries table if it exists
conn.execute(text("DROP TABLE IF EXISTS portal_discoveries CASCADE"))
# Delete old portal discoveries
# Create unique constraint on rounded coordinates for the new portals table
conn.execute(text(
"""CREATE UNIQUE INDEX IF NOT EXISTS unique_portal_coords
ON portals (ROUND(ns::numeric, 2), ROUND(ew::numeric, 2))"""
))
# Create index on coordinates for efficient lookups
conn.execute(text(
"CREATE INDEX IF NOT EXISTS idx_portals_coords ON portals (ns, ew)"
))
print("Portal table indexes and constraints created successfully")
except Exception as e:
print(f"Warning: failed to create portal table constraints: {e}")
async def cleanup_old_portals():
"""Clean up portals older than 1 hour."""
try:
cutoff_time = datetime.now(timezone.utc) - timedelta(hours=1)
# Delete old portals
result = await database.execute(
"DELETE FROM portal_discoveries WHERE timestamp < :cutoff_time",
"DELETE FROM portals WHERE discovered_at < :cutoff_time",
{"cutoff_time": cutoff_time}
)
print(f"Cleaned up {result} portal discoveries older than 24 hours")
print(f"Cleaned up {result} portals older than 1 hour")
return result
except Exception as e:

View file

@ -44,8 +44,8 @@ MONITOR_CHARACTER = os.getenv('MONITOR_CHARACTER', 'Dunking Rares')
# Comprehensive rare classification patterns
# Common Rares - Exact match pattern (70 items)
COMMON_RARES_PATTERN = re.compile(r"^(Alchemist's Crystal|Scholar's Crystal|Smithy's Crystal|Hunter's Crystal|Observer's Crystal|Thorsten's Crystal|Elysa's Crystal|Chef's Crystal|Enchanter's Crystal|Oswald's Crystal|Deceiver's Crystal|Fletcher's Crystal|Physician's Crystal|Artificer's Crystal|Tinker's Crystal|Vaulter's Crystal|Monarch's Crystal|Life Giver's Crystal|Thief's Crystal|Adherent's Crystal|Resister's Crystal|Imbuer's Crystal|Converter's Crystal|Evader's Crystal|Dodger's Crystal|Zefir's Crystal|Ben Ten's Crystal|Corruptor's Crystal|Artist's Crystal|T'ing's Crystal|Warrior's Crystal|Brawler's Crystal|Hieromancer's Crystal|Rogue's Crystal|Berzerker's Crystal|Lugian's Pearl|Ursuin's Pearl|Wayfarer's Pearl|Sprinter's Pearl|Magus's Pearl|Lich's Pearl|Warrior's Jewel|Melee's Jewel|Mage's Jewel|Duelist's Jewel|Archer's Jewel|Tusker's Jewel|Olthoi's Jewel|Inferno's Jewel|Gelid's Jewel|Astyrrian's Jewel|Executor's Jewel|Pearl of Blood Drinking|Pearl of Heart Seeking|Pearl of Defending|Pearl of Swift Killing|Pearl of Spirit Drinking|Pearl of Hermetic Linking|Pearl of Blade Baning|Pearl of Pierce Baning|Pearl of Bludgeon Baning|Pearl of Acid Baning|Pearl of Flame Baning|Pearl of Frost Baning|Pearl of Lightning Baning|Pearl of Impenetrability|Refreshing Elixir|Invigorating Elixir|Miraculous Elixir|Medicated Health Kit|Medicated Stamina Kit|Medicated Mana Kit|Casino Exquisite Keyring)$")
# Common Rares - Exact match pattern (71 items)
COMMON_RARES_PATTERN = re.compile(r"^(Alchemist's Crystal|Scholar's Crystal|Smithy's Crystal|Hunter's Crystal|Observer's Crystal|Thorsten's Crystal|Elysa's Crystal|Chef's Crystal|Enchanter's Crystal|Oswald's Crystal|Deceiver's Crystal|Fletcher's Crystal|Physician's Crystal|Artificer's Crystal|Tinker's Crystal|Vaulter's Crystal|Monarch's Crystal|Life Giver's Crystal|Thief's Crystal|Adherent's Crystal|Resister's Crystal|Imbuer's Crystal|Converter's Crystal|Evader's Crystal|Dodger's Crystal|Zefir's Crystal|Ben Ten's Crystal|Corruptor's Crystal|Artist's Crystal|T'ing's Crystal|Warrior's Crystal|Brawler's Crystal|Hieromancer's Crystal|Rogue's Crystal|Berzerker's Crystal|Knight's Crystal|Lugian's Pearl|Ursuin's Pearl|Wayfarer's Pearl|Sprinter's Pearl|Magus's Pearl|Lich's Pearl|Warrior's Jewel|Melee's Jewel|Mage's Jewel|Duelist's Jewel|Archer's Jewel|Tusker's Jewel|Olthoi's Jewel|Inferno's Jewel|Gelid's Jewel|Astyrrian's Jewel|Executor's Jewel|Pearl of Blood Drinking|Pearl of Heart Seeking|Pearl of Defending|Pearl of Swift Killing|Pearl of Spirit Drinking|Pearl of Hermetic Linking|Pearl of Blade Baning|Pearl of Pierce Baning|Pearl of Bludgeon Baning|Pearl of Acid Baning|Pearl of Flame Baning|Pearl of Frost Baning|Pearl of Lightning Baning|Pearl of Impenetrability|Refreshing Elixir|Invigorating Elixir|Miraculous Elixir|Medicated Health Kit|Medicated Stamina Kit|Medicated Mana Kit|Casino Exquisite Keyring)$")
# Combined pattern for detecting any rare in chat messages (simplified for common detection)
RARE_IN_CHAT_PATTERN = re.compile(r"(Crystal|Pearl|Jewel|Elixir|Kit|Hieroglyph|Pictograph|Ideograph|Rune|Infinite|Eternal|Perennial|Foolproof|Limitless|Shimmering|Gelidite|Leikotha|Frore|Staff of|Count Renari|Wand of)")
@ -67,6 +67,7 @@ class DiscordRareMonitor:
# WebSocket connection tracking
self.websocket_task: Optional[asyncio.Task] = None
self.health_monitor_task: Optional[asyncio.Task] = None
self.running = False
# Setup Discord event handlers
@ -112,12 +113,27 @@ class DiscordRareMonitor:
self.websocket_task = asyncio.create_task(self.monitor_websocket())
logger.info("🔄 Started WebSocket monitoring task")
# Start health monitoring task
self.health_monitor_task = asyncio.create_task(self.monitor_websocket_health())
logger.info("💓 Started WebSocket health monitoring task")
@self.client.event
async def on_disconnect():
logger.warning("⚠️ Discord client disconnected")
self.running = False
if self.websocket_task:
self.websocket_task.cancel()
logger.warning("⚠️ Discord client disconnected (gateway connection lost)")
# Don't stop WebSocket monitoring on temporary Discord disconnects
# Discord will attempt to resume automatically
@self.client.event
async def on_resumed():
logger.info("🔄 Discord session resumed - checking WebSocket health")
# Check if WebSocket task is still running after Discord resume
if not self.websocket_task or self.websocket_task.done():
logger.warning("🔧 WebSocket task was lost during Discord disconnect - restarting")
await self.post_status_to_aclog("🔄 Discord resumed: WebSocket was lost, restarting connection")
self.websocket_task = asyncio.create_task(self.monitor_websocket())
else:
logger.info("✅ WebSocket task still healthy after Discord resume")
await self.post_status_to_aclog("✅ Discord resumed: WebSocket connection still healthy")
@self.client.event
async def on_message(message):
@ -170,120 +186,195 @@ class DiscordRareMonitor:
logger.info("📋 Calling handle_icons_summary (no args)")
await self.handle_icons_summary(message)
# Handle !test_great command to test great rare display
elif message.content.startswith('!test_great'):
logger.info(f"🧪 !test_great command received from {message.author}")
await self.handle_test_great_command(message)
async def monitor_websocket_health(self):
"""Periodically monitor WebSocket task health and restart if needed."""
health_check_interval = 60 # Check every 1 minute
while self.running:
try:
await asyncio.sleep(health_check_interval)
if not self.running:
break
# Check if WebSocket task is still alive
if not self.websocket_task or self.websocket_task.done():
logger.warning("🚨 WebSocket task health check failed - task is not running")
# Check if task completed with an exception
if self.websocket_task and self.websocket_task.done():
try:
exception = self.websocket_task.exception()
if exception:
logger.error(f"🚨 WebSocket task failed with exception: {exception}")
except Exception as e:
logger.warning(f"Could not get task exception: {e}")
# Restart the WebSocket monitoring task
logger.info("🔧 Restarting WebSocket monitoring task")
await self.post_status_to_aclog("🚨 Health check detected WebSocket failure - restarting connection")
self.websocket_task = asyncio.create_task(self.monitor_websocket())
else:
logger.debug("💓 WebSocket task health check passed")
except asyncio.CancelledError:
logger.info("💓 WebSocket health monitoring task cancelled")
break
except Exception as e:
logger.error(f"💓 WebSocket health monitoring error: {e}", exc_info=True)
# Continue monitoring despite errors
async def monitor_websocket(self):
"""Monitor Dereth Tracker WebSocket for rare events with robust reconnection."""
retry_delay = 5 # seconds
max_retry_delay = 300 # 5 minutes
"""Monitor Dereth Tracker WebSocket with immediate failure detection and reconnection."""
retry_delay = 1 # Start with immediate retry
max_retry_delay = 60 # Max 1 minute delay
consecutive_failures = 0
max_consecutive_failures = 10
max_consecutive_failures = 20 # Allow more attempts before giving up
while self.running:
websocket = None
last_message_time = time.time()
health_check_interval = 60 # Check health every 60 seconds
message_timeout = 180 # Consider connection dead if no messages for 3 minutes
connection_failed = False
connection_start_time = None
try:
# Connect to live endpoint (no authentication needed for browsers)
# Connect to live endpoint with built-in ping/pong for failure detection
logger.info(f"🔗 Connecting to WebSocket: {WEBSOCKET_URL}")
# Add connection timeout and ping interval for better connection health
websocket = await websockets.connect(
WEBSOCKET_URL,
ping_interval=30, # Send ping every 30 seconds
ping_timeout=10, # Wait 10 seconds for pong
close_timeout=10 # Wait 10 seconds for close
ping_interval=20, # Built-in ping every 20 seconds
ping_timeout=10, # Fail if no pong response in 10 seconds
close_timeout=5 # Quick close timeout
)
logger.info("✅ WebSocket connected successfully")
retry_delay = 5 # Reset retry delay on successful connection
connection_start_time = time.time()
logger.info(f"✅ WebSocket connected successfully at {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(connection_start_time))}")
retry_delay = 1 # Reset retry delay on successful connection
consecutive_failures = 0 # Reset failure counter
last_message_time = time.time() # Reset message timer
# Send a test message to Discord to indicate connection restored
# Log connection details
logger.info(f"📊 Connection details: Local={websocket.local_address}, Remote={websocket.remote_address}")
# Send connection established message
await self.post_status_to_aclog("🔗 WebSocket connection established")
# Create tasks for message processing and health checking
async def process_messages():
nonlocal last_message_time
# Simple message processing with comprehensive error handling
try:
message_count = 0
last_health_log = time.time()
health_log_interval = 600 # Log health every 10 minutes
async for message in websocket:
if not self.running:
break
last_message_time = time.time()
message_count += 1
logger.debug(f"📨 Raw WebSocket message: {message[:100]}...")
await self.process_websocket_message(message)
async def health_check():
"""Periodically check connection health and force reconnect if needed."""
while self.running and websocket and not websocket.closed:
await asyncio.sleep(health_check_interval)
# Periodic health logging
current_time = time.time()
time_since_last_message = current_time - last_message_time
if current_time - last_health_log >= health_log_interval:
connection_duration = current_time - connection_start_time
hours = connection_duration / 3600
logger.info(f"💓 Connection health: {hours:.2f}h uptime, {message_count} messages received")
last_health_log = current_time
if time_since_last_message > message_timeout:
logger.warning(f"⚠️ No messages received for {time_since_last_message:.0f} seconds, forcing reconnect")
await self.post_status_to_aclog(f"⚠️ WebSocket appears dead, reconnecting...")
if websocket and not websocket.closed:
await websocket.close()
break
else:
logger.debug(f"💓 WebSocket health check passed (last message {time_since_last_message:.0f}s ago)")
# If async for ends, connection was closed
connection_duration = time.time() - connection_start_time if connection_start_time else 0
hours = connection_duration / 3600
# Run both tasks concurrently
message_task = asyncio.create_task(process_messages())
health_task = asyncio.create_task(health_check())
# Wait for either task to complete (health check failure or message loop exit)
done, pending = await asyncio.wait(
[message_task, health_task],
return_when=asyncio.FIRST_COMPLETED
)
# Cancel any remaining tasks
for task in pending:
task.cancel()
# Try to get close code and reason
close_code = None
close_reason = None
try:
await task
except asyncio.CancelledError:
close_code = websocket.close_code
close_reason = websocket.close_reason
except:
pass
logger.warning(f"🔌 WebSocket connection ended after {hours:.2f} hours (async for loop exited)")
logger.warning(f"📊 Close details: code={close_code}, reason='{close_reason}'")
connection_failed = True
# Post disconnect notification to Discord
await self.post_status_to_aclog(f"🔌 WebSocket disconnected after {hours:.2f}h (code={close_code}), reconnecting...")
except asyncio.CancelledError:
logger.info("📡 WebSocket task cancelled")
raise
except Exception as e:
# Any exception during message processing indicates connection failure
logger.warning(f"⚠️ WebSocket message processing failed: {type(e).__name__}: {e}")
connection_failed = True
raise # Re-raise to trigger reconnection
except websockets.exceptions.ConnectionClosed as e:
logger.warning(f"⚠️ WebSocket connection closed: {e}")
connection_duration = time.time() - connection_start_time if connection_start_time else 0
hours = connection_duration / 3600
logger.warning(f"⚠️ WebSocket connection closed after {hours:.2f} hours: {e}")
logger.warning(f"📊 ConnectionClosed details: code={e.code}, reason='{e.reason}'")
consecutive_failures += 1
await self.post_status_to_aclog(f"⚠️ WebSocket disconnected (attempt {consecutive_failures})")
await self.post_status_to_aclog(f"⚠️ WebSocket disconnected after {hours:.2f}h, reconnecting...")
except websockets.exceptions.InvalidStatusCode as e:
logger.error(f"❌ WebSocket invalid status code: {e}")
logger.error(f"❌ WebSocket server error: {e}")
consecutive_failures += 1
await self.post_status_to_aclog(f"❌ WebSocket connection failed: {e}")
await self.post_status_to_aclog(f"❌ Server error {e.status_code}, retrying...")
except (OSError, ConnectionRefusedError) as e:
logger.warning(f"⚠️ Network connection failed: {e}")
consecutive_failures += 1
await self.post_status_to_aclog(f"⚠️ Network error, retrying...")
except Exception as e:
logger.error(f"❌ WebSocket error: {e}")
connection_duration = time.time() - connection_start_time if connection_start_time else 0
hours = connection_duration / 3600
logger.error(f"❌ WebSocket error after {hours:.2f} hours: {type(e).__name__}: {e}")
logger.error(f"📊 Exception details: {repr(e)}")
consecutive_failures += 1
await self.post_status_to_aclog(f"❌ WebSocket error: {e}")
await self.post_status_to_aclog(f"Connection error after {hours:.2f}h: {type(e).__name__}")
finally:
# Ensure websocket is properly closed
if websocket and not websocket.closed:
if websocket:
try:
await websocket.close()
except:
except Exception:
pass
# Log final connection stats
if connection_start_time:
connection_duration = time.time() - connection_start_time
hours = connection_duration / 3600
logger.info(f"📊 Connection summary: Duration={hours:.2f}h, Total failures={consecutive_failures}")
# If we got here, the connection failed somehow
if connection_failed and consecutive_failures == 0:
consecutive_failures = 1 # Count silent failures
logger.warning("⚠️ Connection failed without exception - forcing reconnection")
await self.post_status_to_aclog("⚠️ WebSocket connection lost, reconnecting...")
# Check if we should keep retrying
if consecutive_failures >= max_consecutive_failures:
logger.error(f"❌ Too many consecutive failures ({consecutive_failures}). Stopping reconnection attempts.")
await self.post_status_to_aclog(f"❌ WebSocket reconnection failed after {consecutive_failures} attempts. Bot may need restart.")
break
if self.running:
# Immediate retry for first few failures, then exponential backoff
if self.running and consecutive_failures > 0:
if consecutive_failures <= 3:
logger.info(f"🔄 Immediate reconnection attempt #{consecutive_failures}")
await asyncio.sleep(1) # Brief pause to avoid tight loop
else:
logger.info(f"🔄 Retrying WebSocket connection in {retry_delay} seconds... (failure #{consecutive_failures})")
await asyncio.sleep(retry_delay)
# Exponential backoff with max delay
retry_delay = min(retry_delay * 2, max_retry_delay)
retry_delay = min(retry_delay * 2, max_retry_delay) # Exponential backoff
async def process_websocket_message(self, raw_message: str):
"""Process incoming WebSocket message."""
@ -344,11 +435,88 @@ class DiscordRareMonitor:
if character_name != MONITOR_CHARACTER:
return
# Check for special vortex warning message FIRST - simplified matching
# Debug: log what we're checking
logger.debug(f"🔍 Checking for vortex in: {repr(chat_text)}")
if "m in whirlwind of vortexes" in chat_text:
logger.warning(f"⚠️🌪️ VORTEX WARNING from {character_name}: {chat_text}")
# Get timestamp for the warning
from datetime import datetime
timestamp_str = data.get('timestamp', '')
try:
if timestamp_str:
if timestamp_str.endswith('Z'):
timestamp = datetime.fromisoformat(timestamp_str.replace('Z', '+00:00'))
else:
timestamp = datetime.fromisoformat(timestamp_str)
else:
timestamp = datetime.now()
except ValueError:
timestamp = datetime.now()
time_str = timestamp.strftime("%H:%M:%S")
# Extract the actual character name from allegiance chat
# Format: [Allegiance] Character Name says, "message"
actual_character = "Unknown Character"
if "[Allegiance]" in chat_text and " says," in chat_text:
try:
# Extract between "] " and " says,"
start = chat_text.find("] ") + 2
end = chat_text.find(" says,")
if start > 1 and end > start:
actual_character = chat_text[start:end]
except:
actual_character = "Unknown Character"
try:
channel = self.client.get_channel(ACLOG_CHANNEL_ID)
if channel:
# Create clean vortex warning embed
embed = discord.Embed(
title="🌪️ **VORTEX WARNING** 🌪️",
description=f"**{actual_character}** is caught in vortexes!",
color=0xFF4500 # Orange-red
)
# Add time and character info
embed.add_field(
name="🕐 **Time**",
value=f"{time_str}",
inline=True
)
embed.add_field(
name="🎯 **Character**",
value=f"**{actual_character}**",
inline=True
)
# Add footer
embed.set_footer(text="Someone needs help with vortexes!")
# Send clean warning
await channel.send(embed=embed)
logger.info(f"🌪️ Posted vortex warning for {actual_character} to #{channel.name}")
else:
logger.error(f"❌ Could not find channel {ACLOG_CHANNEL_ID}")
except Exception as e:
logger.error(f"💥 VORTEX WARNING POSTING FAILED: {e}", exc_info=True)
return
elif "whirlwind of vortex" in chat_text.lower():
logger.debug(f"🌪️ Found 'whirlwind of vortex' but not exact match in: {repr(chat_text)}")
elif "jeebus" in chat_text.lower():
logger.debug(f"👀 Found 'jeebus' but not vortex pattern in: {repr(chat_text)}")
# Skip if this message contains any rare names (common or great)
if RARE_IN_CHAT_PATTERN.search(chat_text) or self.is_rare_message(chat_text):
logger.debug(f"🎯 Skipping rare message from {character_name}: {chat_text}")
return
# Regular chat logging
logger.info(f"💬 Chat from {character_name}: {chat_text}")
# Post to AC Log channel
@ -542,6 +710,69 @@ class DiscordRareMonitor:
except Exception as e:
logger.error(f"❌ Error posting rare debug to Discord: {e}")
async def post_vortex_warning_to_discord(self, data: dict):
"""Post vortex warning as a special alert message to Discord."""
logger.info(f"🔧 Starting vortex warning posting process...")
try:
character_name = data.get('character_name', 'Unknown Character')
chat_text = data.get('text', '')
timestamp_str = data.get('timestamp', '')
# Parse timestamp
try:
if timestamp_str:
if timestamp_str.endswith('Z'):
timestamp = datetime.fromisoformat(timestamp_str.replace('Z', '+00:00'))
else:
timestamp = datetime.fromisoformat(timestamp_str)
else:
timestamp = datetime.now()
except ValueError:
timestamp = datetime.now()
logger.info(f"🔧 Creating vortex warning embed for {character_name}")
# Create warning embed
embed = discord.Embed(
title="🌪️ VORTEX WARNING! 🌪️",
description=f"**{character_name}** reported a vortex situation!",
color=discord.Color.red(),
timestamp=timestamp
)
# Add the chat message as a field - but truncate if too long
chat_display = chat_text[:1800] + "..." if len(chat_text) > 1800 else chat_text
embed.add_field(
name="Message",
value=f"```{chat_display}```",
inline=False
)
# Add warning footer
embed.set_footer(text="⚠️ Immediate attention required!")
logger.info(f"🔧 Getting AC Log channel {ACLOG_CHANNEL_ID}")
# Get AC Log channel
channel = self.client.get_channel(ACLOG_CHANNEL_ID)
logger.info(f"🔧 Channel result: {channel}")
if channel:
# Send the warning embed first
await channel.send(embed=embed)
logger.info(f"🌪️ Posted VORTEX WARNING embed to #{channel.name}")
# Try to send alert ping (might fail due to permissions)
try:
await channel.send("🚨 VORTEX ALERT! 🚨")
logger.info(f"🚨 Posted VORTEX ALERT ping to #{channel.name}")
except Exception as ping_error:
logger.warning(f"⚠️ Could not send @here ping: {ping_error}")
else:
logger.error(f"❌ Could not find AC Log channel for vortex warning: {ACLOG_CHANNEL_ID}")
except Exception as e:
logger.error(f"❌ Error posting vortex warning to Discord: {e}")
async def post_status_to_aclog(self, status_message: str):
"""Post status update to AC Log channel."""
try:
@ -817,6 +1048,63 @@ class DiscordRareMonitor:
"""Handle !icons grid to show icon grid compositions."""
await message.channel.send("🚧 **Grid View Coming Soon!**\n\nThis feature will show multiple icons arranged in grids. For now, use:\n• `!icons` - Summary with examples\n• `!icons all` - Browse all icons\n• `!icons [name]` - Search specific items")
async def handle_test_great_command(self, message):
"""Test great rare display with a random great rare icon."""
try:
import random
# Get icons directory
icons_dir = os.path.join(os.path.dirname(__file__), "icons")
if not os.path.exists(icons_dir):
await message.channel.send("❌ Icons directory not found!")
return
# Get all icon files
icon_files = [f for f in os.listdir(icons_dir) if f.endswith('_Icon.png')]
# Filter to get great rares only (exclude common rares)
great_rare_icons = []
for filename in icon_files:
# Convert filename to rare name
rare_name = filename.replace('_Icon.png', '').replace('_', ' ').replace("'", "'")
# Fix common naming issues
if "s " in rare_name:
rare_name = rare_name.replace("s ", "'s ")
# Check if it's NOT a common rare
if not COMMON_RARES_PATTERN.match(rare_name):
great_rare_icons.append((filename, rare_name))
if not great_rare_icons:
await message.channel.send("❌ No great rare icons found!")
return
# Pick a random great rare
filename, rare_name = random.choice(great_rare_icons)
# Create mock rare discovery data
mock_data = {
'name': rare_name,
'character_name': 'Test Character',
'timestamp': datetime.now().isoformat(),
'ew': 12.3,
'ns': 34.5,
'z': 56.7
}
# Send confirmation message first
await message.channel.send(f"🧪 **Testing great rare display with:** {rare_name}\n📤 Posting to great rares channel...")
# Post to great rares channel using existing function
await self.post_rare_to_discord(mock_data, "great")
logger.info(f"🧪 Test great rare posted: {rare_name} (from {filename})")
except Exception as e:
logger.error(f"❌ Error in test_great command: {e}", exc_info=True)
await message.channel.send(f"❌ Error testing great rare: {e}")
async def start(self):
"""Start the Discord bot."""
if not DISCORD_TOKEN:
@ -835,6 +1123,7 @@ class DiscordRareMonitor:
logger.info("🛑 Stopping Discord Rare Monitor Bot...")
self.running = False
# Cancel WebSocket monitoring task
if self.websocket_task:
self.websocket_task.cancel()
try:
@ -842,6 +1131,14 @@ class DiscordRareMonitor:
except asyncio.CancelledError:
pass
# Cancel health monitoring task
if self.health_monitor_task:
self.health_monitor_task.cancel()
try:
await self.health_monitor_task
except asyncio.CancelledError:
pass
if not self.client.is_closed():
await self.client.close()

101
main.py
View file

@ -37,7 +37,7 @@ from db_async import (
spawn_events,
rare_events,
character_inventories,
portal_discoveries,
portals,
server_health_checks,
server_status,
init_db_async,
@ -67,6 +67,7 @@ _cached_trails: dict = {"trails": []}
_cached_total_rares: dict = {"all_time": 0, "today": 0, "last_updated": None}
_cache_task: asyncio.Task | None = None
_rares_cache_task: asyncio.Task | None = None
_cleanup_task: asyncio.Task | None = None
# Player tracking for debug purposes
_player_history: list = [] # List of player sets from last 10 refreshes
@ -255,13 +256,13 @@ async def get_player_count_from_treestats(server_name: str) -> int:
return 0
async def monitor_server_health():
"""Background task to monitor server health every 30 seconds and cleanup old portals hourly."""
"""Background task to monitor server health every 30 seconds and cleanup old portals every minute."""
server_name = "Coldeve"
server_address = "play.coldeve.ac"
server_port = 9000
check_interval = 30 # seconds
player_count_interval = 300 # 5 minutes (like ThwargLauncher's 10 minutes, but more frequent)
portal_cleanup_interval = 3600 # 1 hour
portal_cleanup_interval = 60 # 1 minute
last_player_count_check = 0
last_portal_cleanup = 0
current_player_count = None
@ -381,12 +382,12 @@ async def monitor_server_health():
logger.debug(f"Server health check: {status}, latency={latency_ms}ms, players={current_player_count}")
# Portal cleanup (run every hour)
# Portal cleanup (run every minute)
current_time = time.time()
if current_time - last_portal_cleanup >= portal_cleanup_interval:
try:
deleted_count = await cleanup_old_portals()
logger.info(f"Portal cleanup: removed {deleted_count} old portal discoveries")
logger.info(f"Portal cleanup: removed {deleted_count} portals older than 1 hour")
last_portal_cleanup = current_time
except Exception as cleanup_error:
logger.error(f"Portal cleanup error: {cleanup_error}", exc_info=True)
@ -396,6 +397,20 @@ async def monitor_server_health():
await asyncio.sleep(check_interval)
async def cleanup_connections_loop():
"""Background task to clean up stale WebSocket connections every 5 minutes."""
cleanup_interval = 300 # 5 minutes
logger.info("🧹 Starting WebSocket connection cleanup task")
while True:
try:
await asyncio.sleep(cleanup_interval)
logger.debug("🧹 Running periodic WebSocket connection cleanup")
await cleanup_stale_connections()
except Exception as e:
logger.error(f"WebSocket cleanup task error: {e}", exc_info=True)
def _track_player_changes(new_players: list) -> None:
"""Track player changes for debugging flapping issues."""
from datetime import datetime, timezone
@ -872,11 +887,12 @@ async def on_startup():
else:
raise RuntimeError(f"Could not connect to database after {max_attempts} attempts")
# Start background cache refresh (live & trails)
global _cache_task, _rares_cache_task, _server_health_task
global _cache_task, _rares_cache_task, _server_health_task, _cleanup_task
_cache_task = asyncio.create_task(_refresh_cache_loop())
_rares_cache_task = asyncio.create_task(_refresh_total_rares_cache())
_server_health_task = asyncio.create_task(monitor_server_health())
logger.info("Background cache refresh and server monitoring tasks started")
_cleanup_task = asyncio.create_task(cleanup_connections_loop())
logger.info("Background cache refresh, server monitoring, and connection cleanup tasks started")
@app.on_event("shutdown")
async def on_shutdown():
"""Event handler triggered when application is shutting down.
@ -884,7 +900,7 @@ async def on_shutdown():
Ensures the database connection is closed cleanly.
"""
# Stop cache refresh tasks
global _cache_task, _rares_cache_task, _server_health_task
global _cache_task, _rares_cache_task, _server_health_task, _cleanup_task
if _cache_task:
logger.info("Stopping background cache refresh task")
_cache_task.cancel()
@ -908,6 +924,14 @@ async def on_shutdown():
await _server_health_task
except asyncio.CancelledError:
pass
if _cleanup_task:
logger.info("Stopping WebSocket connection cleanup task")
_cleanup_task.cancel()
try:
await _cleanup_task
except asyncio.CancelledError:
pass
logger.info("Disconnecting from database")
await database.disconnect()
@ -1083,37 +1107,34 @@ async def get_quest_status():
@app.get("/portals")
async def get_portals():
"""Return unique portal discoveries from the last 24 hours."""
"""Return all active portals (less than 1 hour old)."""
try:
# Query unique portals from last 24 hours, keeping the most recent discovery of each
cutoff_time = datetime.now(timezone.utc) - timedelta(hours=24)
# No need for cutoff check - cleanup job handles expiration
query = """
SELECT DISTINCT ON (portal_name)
character_name, portal_name, timestamp, ns, ew, z
FROM portal_discoveries
WHERE timestamp >= :cutoff_time
ORDER BY portal_name, timestamp DESC
SELECT portal_name, ns, ew, z, discovered_at, discovered_by
FROM portals
ORDER BY discovered_at DESC
"""
rows = await database.fetch_all(query, {"cutoff_time": cutoff_time})
rows = await database.fetch_all(query)
portals = []
for row in rows:
portal = {
"character_name": row["character_name"],
"portal_name": row["portal_name"],
"timestamp": row["timestamp"].isoformat(),
"coordinates": {
"ns": row["ns"],
"ew": row["ew"],
"z": row["z"]
},
"discovered_at": row["discovered_at"].isoformat(),
"discovered_by": row["discovered_by"]
}
portals.append(portal)
return {
"portals": portals,
"portal_count": len(portals),
"cutoff_time": cutoff_time.isoformat()
"portal_count": len(portals)
}
except Exception as e:
@ -1964,37 +1985,39 @@ async def ws_receive_snapshots(
ew = float(ew)
z = float(z)
# Check if this portal was recently discovered (within last hour) to avoid duplicates
recent_check = await database.fetch_one(
# Round coordinates for comparison (0.01 tolerance)
ns_rounded = round(ns, 2)
ew_rounded = round(ew, 2)
# Check if portal exists at these coordinates
existing_portal = await database.fetch_one(
"""
SELECT id FROM portal_discoveries
WHERE character_name = :character_name
AND portal_name = :portal_name
AND timestamp > :cutoff_time
SELECT id FROM portals
WHERE ROUND(ns::numeric, 2) = :ns_rounded
AND ROUND(ew::numeric, 2) = :ew_rounded
LIMIT 1
""",
{
"character_name": character_name,
"portal_name": portal_name,
"cutoff_time": timestamp - timedelta(hours=1)
"ns_rounded": ns_rounded,
"ew_rounded": ew_rounded
}
)
if not recent_check:
# Store portal discovery in database
if not existing_portal:
# Store new portal in database
await database.execute(
portal_discoveries.insert().values(
character_name=character_name,
portals.insert().values(
portal_name=portal_name,
timestamp=timestamp,
ns=ns,
ew=ew,
z=z
z=z,
discovered_at=timestamp,
discovered_by=character_name
)
)
logger.info(f"Recorded portal discovery: {portal_name} by {character_name} at {ns}, {ew}")
logger.info(f"New portal discovered: {portal_name} at {ns_rounded}, {ew_rounded} by {character_name}")
else:
logger.debug(f"Skipping duplicate portal discovery: {portal_name} by {character_name} (already discovered recently)")
logger.debug(f"Portal already exists at {ns_rounded}, {ew_rounded}")
# Broadcast to browser clients for map updates
await _broadcast_to_browser_clients(data)

View file

@ -480,8 +480,8 @@ async function fetchPortalData() {
}
const data = await response.json();
portalData = data.portals; // [{character_name, portal_name, ns, ew, z}]
console.log(`Loaded ${portalData.length} portal discoveries from last 24 hours`);
portalData = data.portals; // [{portal_name, coordinates: {ns, ew, z}, discovered_by, discovered_at}]
console.log(`Loaded ${portalData.length} portals from last hour`);
renderPortals();
} catch (err) {
console.error('Failed to fetch portal data:', err);
@ -526,9 +526,9 @@ function renderPortals() {
clearPortals();
for (const portal of portalData) {
// Coordinates are already floats from the API
const ns = portal.ns;
const ew = portal.ew;
// Extract coordinates from new API format
const ns = portal.coordinates.ns;
const ew = portal.coordinates.ew;
// Convert to pixel coordinates
const { x, y } = worldToPx(ew, ns);
@ -538,7 +538,7 @@ function renderPortals() {
icon.className = 'portal-icon';
icon.style.left = `${x}px`;
icon.style.top = `${y}px`;
icon.title = `${portal.portal_name} (discovered by ${portal.character_name})`;
icon.title = `${portal.portal_name} (discovered by ${portal.discovered_by})`;
portalContainer.appendChild(icon);
}