From e6db0f094c958169e3d1589fed4690c8020038b4 Mon Sep 17 00:00:00 2001 From: erik Date: Tue, 23 Sep 2025 20:13:35 +0000 Subject: [PATCH] Fixed error portal insert --- main.py | 88 +++++++++++++++++---------------------------------------- 1 file changed, 26 insertions(+), 62 deletions(-) diff --git a/main.py b/main.py index 23570edd..0db32326 100644 --- a/main.py +++ b/main.py @@ -1985,76 +1985,40 @@ async def ws_receive_snapshots( ew = float(ew) z = float(z) - # Round coordinates for comparison (0.1 tolerance to match DB constraint) + # Round coordinates for display (0.1 tolerance to match DB constraint) ns_rounded = round(ns, 1) ew_rounded = round(ew, 1) - - # Check if portal exists at these coordinates - existing_portal = await database.fetch_one( + + # Use PostgreSQL UPSERT to handle race conditions atomically + # This completely eliminates duplicate key errors + result = await database.fetch_one( """ - SELECT id FROM portals - WHERE ROUND(ns::numeric, 1) = :ns_rounded - AND ROUND(ew::numeric, 1) = :ew_rounded - LIMIT 1 + INSERT INTO portals (portal_name, ns, ew, z, discovered_at, discovered_by) + VALUES (:portal_name, :ns, :ew, :z, :timestamp, :character_name) + ON CONFLICT (ROUND(ns::numeric, 1), ROUND(ew::numeric, 1)) + DO UPDATE SET + discovered_at = EXCLUDED.discovered_at, + discovered_by = EXCLUDED.discovered_by, + portal_name = EXCLUDED.portal_name + RETURNING (xmax = 0) AS was_inserted """, { - "ns_rounded": ns_rounded, - "ew_rounded": ew_rounded + "portal_name": portal_name, + "ns": ns, + "ew": ew, + "z": z, + "timestamp": timestamp, + "character_name": character_name } ) - - if not existing_portal: - # Store new portal in database with ON CONFLICT handling - # This prevents race conditions and duplicate key errors - try: - await database.execute( - portals.insert().values( - portal_name=portal_name, - ns=ns, - ew=ew, - z=z, - discovered_at=timestamp, - discovered_by=character_name - ) - ) - logger.info(f"New portal discovered: {portal_name} at {ns_rounded}, {ew_rounded} by {character_name}") - except Exception as insert_error: - # If insert fails due to duplicate, update the existing portal - if "duplicate key" in str(insert_error).lower(): - await database.execute( - """ - UPDATE portals - SET discovered_at = :timestamp, discovered_by = :character_name - WHERE ROUND(ns::numeric, 1) = :ns_rounded - AND ROUND(ew::numeric, 1) = :ew_rounded - """, - { - "timestamp": timestamp, - "character_name": character_name, - "ns_rounded": ns_rounded, - "ew_rounded": ew_rounded - } - ) - logger.debug(f"Portal already exists (race condition), updated: {portal_name} at {ns_rounded}, {ew_rounded}") - else: - raise + + # Log whether this was a new discovery or an update + # xmax = 0 means it was an INSERT (new portal) + # xmax != 0 means it was an UPDATE (existing portal) + if result and result["was_inserted"]: + logger.info(f"New portal discovered: {portal_name} at {ns_rounded}, {ew_rounded} by {character_name}") else: - # Update timestamp for existing portal to keep it alive - await database.execute( - """ - UPDATE portals - SET discovered_at = :timestamp, discovered_by = :character_name - WHERE ROUND(ns::numeric, 1) = :ns_rounded - AND ROUND(ew::numeric, 1) = :ew_rounded - """, - { - "timestamp": timestamp, - "character_name": character_name, - "ns_rounded": ns_rounded, - "ew_rounded": ew_rounded - } - ) - logger.info(f"Portal timestamp updated: {portal_name} at {ns_rounded}, {ew_rounded} by {character_name}") + logger.debug(f"Portal timestamp updated: {portal_name} at {ns_rounded}, {ew_rounded} by {character_name}") # Broadcast to browser clients for map updates await _broadcast_to_browser_clients(data)