MOREEEE CLAUDE MOREEEE
This commit is contained in:
126
cvmsentry.py
126
cvmsentry.py
@@ -7,6 +7,7 @@ import logging
|
|||||||
import sys
|
import sys
|
||||||
from datetime import datetime, timezone
|
from datetime import datetime, timezone
|
||||||
import json
|
import json
|
||||||
|
from snapper import snap_all_vms
|
||||||
|
|
||||||
LOG_LEVEL = getattr(config, "log_level", "INFO")
|
LOG_LEVEL = getattr(config, "log_level", "INFO")
|
||||||
|
|
||||||
@@ -22,7 +23,7 @@ log = logging.getLogger("CVMSentry")
|
|||||||
log.setLevel(LOG_LEVEL)
|
log.setLevel(LOG_LEVEL)
|
||||||
log.addHandler(stdout_handler)
|
log.addHandler(stdout_handler)
|
||||||
|
|
||||||
users = {}
|
vms = {}
|
||||||
vm_botuser = {}
|
vm_botuser = {}
|
||||||
STATE = CollabVMState.WS_DISCONNECTED
|
STATE = CollabVMState.WS_DISCONNECTED
|
||||||
|
|
||||||
@@ -44,13 +45,34 @@ async def send_chat_message(websocket, message: str):
|
|||||||
async def send_guac(websocket, *args: str):
|
async def send_guac(websocket, *args: str):
|
||||||
await websocket.send(guac_encode(list(args)))
|
await websocket.send(guac_encode(list(args)))
|
||||||
|
|
||||||
|
async def periodic_snapshot_task():
|
||||||
|
"""Background task that captures VM snapshots."""
|
||||||
|
log.info("Starting periodic snapshot task")
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
await asyncio.sleep(30) # Wait 30 seconds
|
||||||
|
log.debug("Running periodic snapshot capture...")
|
||||||
|
|
||||||
|
# Create snapshots directory with timestamp
|
||||||
|
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||||
|
snapshot_dir = os.path.join("logs", timestamp)
|
||||||
|
|
||||||
|
# Capture all VMs
|
||||||
|
await snap_all_vms(snapshot_dir)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
log.error(f"Error in periodic snapshot task: {e}")
|
||||||
|
# Continue running even if there's an error
|
||||||
|
|
||||||
async def connect(vm_name: str):
|
async def connect(vm_name: str):
|
||||||
global STATE
|
global STATE
|
||||||
global users
|
global vms
|
||||||
global vm_botuser
|
global vm_botuser
|
||||||
if vm_name not in config.vms:
|
if vm_name not in config.vms:
|
||||||
log.error(f"VM '{vm_name}' not found in configuration.")
|
log.error(f"VM '{vm_name}' not found in configuration.")
|
||||||
return
|
return
|
||||||
|
vms[vm_name] = {"turn_queue": [], "active_turn_user": None, "users": {}}
|
||||||
uri = config.vms[vm_name]
|
uri = config.vms[vm_name]
|
||||||
log_file_path = os.path.join(getattr(config, "log_directory", "logs"), f"{vm_name}.json")
|
log_file_path = os.path.join(getattr(config, "log_directory", "logs"), f"{vm_name}.json")
|
||||||
if not os.path.exists(log_file_path):
|
if not os.path.exists(log_file_path):
|
||||||
@@ -66,8 +88,6 @@ async def connect(vm_name: str):
|
|||||||
log.info(f"Connected to VM '{vm_name}' at {uri}")
|
log.info(f"Connected to VM '{vm_name}' at {uri}")
|
||||||
await send_guac(websocket, "rename", "")
|
await send_guac(websocket, "rename", "")
|
||||||
await send_guac(websocket, "connect", vm_name)
|
await send_guac(websocket, "connect", vm_name)
|
||||||
if vm_name not in users:
|
|
||||||
users[vm_name] = {}
|
|
||||||
if vm_name not in vm_botuser:
|
if vm_name not in vm_botuser:
|
||||||
vm_botuser[vm_name] = ""
|
vm_botuser[vm_name] = ""
|
||||||
# response = await websocket.recv()
|
# response = await websocket.recv()
|
||||||
@@ -96,12 +116,13 @@ async def connect(vm_name: str):
|
|||||||
else:
|
else:
|
||||||
log.debug(f"({STATE.name} - {vm_name}) Bot rename on VM {vm_name} failed with status {CollabVMClientRenameStatus(int(status)).name}")
|
log.debug(f"({STATE.name} - {vm_name}) Bot rename on VM {vm_name} failed with status {CollabVMClientRenameStatus(int(status)).name}")
|
||||||
case ["1", old_name, new_name]:
|
case ["1", old_name, new_name]:
|
||||||
if old_name in users[vm_name]:
|
if old_name in vms[vm_name]["users"]:
|
||||||
log.debug(f"({STATE.name} - {vm_name}) User rename on VM {vm_name}: {old_name} -> {new_name}")
|
log.debug(f"({STATE.name} - {vm_name}) User rename on VM {vm_name}: {old_name} -> {new_name}")
|
||||||
users[vm_name][new_name] = users[vm_name].pop(old_name)
|
vms[vm_name]["users"][new_name] = vms[vm_name]["users"].pop(old_name)
|
||||||
case ["login", "1"]:
|
case ["login", "1"]:
|
||||||
STATE = CollabVMState.LOGGED_IN
|
STATE = CollabVMState.LOGGED_IN
|
||||||
await send_chat_message(websocket, random.choice(config.autostart_messages))
|
if config.send_autostart and config.autostart_messages:
|
||||||
|
await send_chat_message(websocket, random.choice(config.autostart_messages))
|
||||||
case ["chat", user, message, *backlog]:
|
case ["chat", user, message, *backlog]:
|
||||||
system_message = (user == "")
|
system_message = (user == "")
|
||||||
if system_message:
|
if system_message:
|
||||||
@@ -109,6 +130,12 @@ async def connect(vm_name: str):
|
|||||||
if not backlog:
|
if not backlog:
|
||||||
log.info(f"[{vm_name} - {user}]: {message}")
|
log.info(f"[{vm_name} - {user}]: {message}")
|
||||||
|
|
||||||
|
def get_rank(username: str) -> CollabVMRank:
|
||||||
|
return vms[vm_name]["users"].get(username, {}).get("rank")
|
||||||
|
|
||||||
|
def admin_check(username: str) -> bool:
|
||||||
|
return username in config.admins and get_rank(username) > CollabVMRank.Unregistered
|
||||||
|
|
||||||
utc_now = datetime.now(timezone.utc)
|
utc_now = datetime.now(timezone.utc)
|
||||||
utc_day = utc_now.strftime("%Y-%m-%d")
|
utc_day = utc_now.strftime("%Y-%m-%d")
|
||||||
timestamp = utc_now.isoformat()
|
timestamp = utc_now.isoformat()
|
||||||
@@ -136,6 +163,7 @@ async def connect(vm_name: str):
|
|||||||
# })
|
# })
|
||||||
|
|
||||||
log_data[utc_day].append({
|
log_data[utc_day].append({
|
||||||
|
"type": "chat",
|
||||||
"timestamp": timestamp,
|
"timestamp": timestamp,
|
||||||
"username": user,
|
"username": user,
|
||||||
"message": message
|
"message": message
|
||||||
@@ -144,44 +172,79 @@ async def connect(vm_name: str):
|
|||||||
log_file.seek(0)
|
log_file.seek(0)
|
||||||
json.dump(log_data, log_file, indent=4)
|
json.dump(log_data, log_file, indent=4)
|
||||||
log_file.truncate()
|
log_file.truncate()
|
||||||
|
|
||||||
if config.commands["enabled"] and message.startswith(config.commands["prefix"]):
|
if config.commands["enabled"] and message.startswith(config.commands["prefix"]):
|
||||||
command = message[len(config.commands["prefix"]):].strip().lower()
|
command = message[len(config.commands["prefix"]):].strip().lower()
|
||||||
match command:
|
match command:
|
||||||
case "whoami":
|
case "whoami":
|
||||||
await send_chat_message(websocket, f"You are {user} with rank {users[vm_name][user]['rank'].name}.")
|
await send_chat_message(websocket, f"You are {user} with rank {get_rank(user).name}.")
|
||||||
case "about":
|
case "about":
|
||||||
await send_chat_message(websocket, config.responses.get("about", "CVM-Sentry (NO RESPONSE CONFIGURED)"))
|
await send_chat_message(websocket, config.responses.get("about", "CVM-Sentry (NO RESPONSE CONFIGURED)"))
|
||||||
case "dump":
|
case "dump":
|
||||||
if user != "dfu":
|
if not admin_check(user):
|
||||||
await send_chat_message(websocket, "You do not have permission to use this command.")
|
|
||||||
continue
|
continue
|
||||||
log.debug(f"({STATE.name} - {vm_name}) Dumping user list for VM {vm_name}: {users[vm_name]}")
|
log.debug(f"({STATE.name} - {vm_name}) Dumping user list for VM {vm_name}: {vms[vm_name]['users']}")
|
||||||
await send_chat_message(websocket, f"Dumped user list to console.")
|
await send_chat_message(websocket, f"Dumped user list to console.")
|
||||||
case ["adduser", count, *list]:
|
case ["adduser", count, *list]:
|
||||||
for i in range(int(count)):
|
for i in range(int(count)):
|
||||||
user = list[i * 2]
|
user = list[i * 2]
|
||||||
rank = CollabVMRank(int(list[i * 2 + 1]))
|
rank = CollabVMRank(int(list[i * 2 + 1]))
|
||||||
if user in users[vm_name]:
|
if user in vms[vm_name]["users"]:
|
||||||
users[vm_name][user]["rank"] = rank
|
vms[vm_name]["users"][user]["rank"] = rank
|
||||||
log.info(f"[{vm_name}] User '{user}' rank updated to {rank.name}.")
|
log.info(f"[{vm_name}] User '{user}' rank updated to {rank.name}.")
|
||||||
else:
|
else:
|
||||||
users[vm_name][user] = {"rank": rank, "turn_active": False}
|
vms[vm_name]["users"][user] = {"rank": rank}
|
||||||
log.info(f"[{vm_name}] User '{user}' connected with rank {rank.name}.")
|
log.info(f"[{vm_name}] User '{user}' connected with rank {rank.name}.")
|
||||||
case ["turn", _, "0"]:
|
case ["turn", _, "0"]:
|
||||||
if STATE < CollabVMState.LOGGED_IN:
|
if STATE < CollabVMState.LOGGED_IN:
|
||||||
continue
|
continue
|
||||||
log.debug(f"({STATE.name} - {vm_name}) Turn queue exhausted.")
|
if vms[vm_name]["active_turn_user"] is None and not vms[vm_name]["turn_queue"]:
|
||||||
|
#log.debug(f"({STATE.name} - {vm_name}) Incoming queue exhaustion matches the VM's state. Dropping update.")
|
||||||
|
continue
|
||||||
|
vms[vm_name]["active_turn_user"] = None
|
||||||
|
vms[vm_name]["turn_queue"] = []
|
||||||
|
log.debug(f"({STATE.name} - {vm_name}) Turn queue is naturally exhausted.")
|
||||||
case ["turn", turn_time, count, current_turn, *queue]:
|
case ["turn", turn_time, count, current_turn, *queue]:
|
||||||
log.debug(f"({STATE.name} - {vm_name}) Turn queue updated: {queue} | Current turn: {current_turn} | Time left for current turn: {int(turn_time)//1000}s")
|
if queue == vms[vm_name]["turn_queue"] and current_turn == vms[vm_name]["active_turn_user"]:
|
||||||
for user in users[vm_name]:
|
#log.debug(f"({STATE.name} - {vm_name}) Incoming turn update matches the VM's state. Dropping update.")
|
||||||
users[vm_name][user]["turn_active"] = (user == current_turn)
|
continue
|
||||||
|
for user in vms[vm_name]["users"]:
|
||||||
|
vms[vm_name]["turn_queue"] = queue
|
||||||
|
vms[vm_name]["active_turn_user"] = current_turn if current_turn != "" else None
|
||||||
|
if current_turn:
|
||||||
|
utc_now = datetime.now(timezone.utc)
|
||||||
|
utc_day = utc_now.strftime("%Y-%m-%d")
|
||||||
|
timestamp = utc_now.isoformat()
|
||||||
|
|
||||||
|
with open(log_file_path, "r+") as log_file:
|
||||||
|
try:
|
||||||
|
log_data = json.load(log_file)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
log_data = {}
|
||||||
|
|
||||||
|
if utc_day not in log_data:
|
||||||
|
log_data[utc_day] = []
|
||||||
|
|
||||||
|
log_data[utc_day].append({
|
||||||
|
"type": "turn",
|
||||||
|
"timestamp": timestamp,
|
||||||
|
"active_turn_user": current_turn,
|
||||||
|
"queue": queue
|
||||||
|
})
|
||||||
|
|
||||||
|
log_file.seek(0)
|
||||||
|
json.dump(log_data, log_file, indent=4)
|
||||||
|
log_file.truncate()
|
||||||
|
log.debug(f"({STATE.name} - {vm_name}) Turn update: turn_time={turn_time}, count={count}, current_turn={current_turn}, queue={queue}")
|
||||||
|
|
||||||
|
|
||||||
case ["remuser", count, *list]:
|
case ["remuser", count, *list]:
|
||||||
for i in range(int(count)):
|
for i in range(int(count)):
|
||||||
username = list[i]
|
username = list[i]
|
||||||
if username in users[vm_name]:
|
if username in vms[vm_name]["users"]:
|
||||||
del users[vm_name][username]
|
del vms[vm_name]["users"][username]
|
||||||
log.info(f"[{vm_name}] User '{username}' left.")
|
log.info(f"[{vm_name}] User '{username}' left.")
|
||||||
case ["sync", *args] | ["png", *args] | ["flag", *args] | ["size", *args]:
|
case ["flag", *args] | ["size", *args] | ["png", *args] | ["sync", *args]:
|
||||||
continue
|
continue
|
||||||
case _:
|
case _:
|
||||||
if decoded is not None:
|
if decoded is not None:
|
||||||
@@ -195,6 +258,7 @@ for vm in config.vms.keys():
|
|||||||
asyncio.run(connect(vm_name))
|
asyncio.run(connect(vm_name))
|
||||||
|
|
||||||
async def main():
|
async def main():
|
||||||
|
|
||||||
async def connect_with_reconnect(vm_name: str):
|
async def connect_with_reconnect(vm_name: str):
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
@@ -205,8 +269,24 @@ for vm in config.vms.keys():
|
|||||||
except websockets.exceptions.ConnectionClosedOK:
|
except websockets.exceptions.ConnectionClosedOK:
|
||||||
log.warning(f"Connection to VM '{vm_name}' closed cleanly (code 1005). Reconnecting...")
|
log.warning(f"Connection to VM '{vm_name}' closed cleanly (code 1005). Reconnecting...")
|
||||||
await asyncio.sleep(5) # Wait before attempting to reconnect
|
await asyncio.sleep(5) # Wait before attempting to reconnect
|
||||||
|
except websockets.exceptions.InvalidStatus as e:
|
||||||
|
log.error(f"Failed to connect to VM '{vm_name}' with status code: {e}. Reconnecting...")
|
||||||
|
await asyncio.sleep(10) # Wait longer for HTTP errors
|
||||||
|
except websockets.exceptions.WebSocketException as e:
|
||||||
|
log.error(f"WebSocket error connecting to VM '{vm_name}': {e}. Reconnecting...")
|
||||||
|
await asyncio.sleep(5)
|
||||||
|
except Exception as e:
|
||||||
|
log.error(f"Unexpected error connecting to VM '{vm_name}': {e}. Reconnecting...")
|
||||||
|
await asyncio.sleep(10) # Wait longer for unexpected errors
|
||||||
|
|
||||||
tasks = [connect_with_reconnect(vm) for vm in config.vms.keys()]
|
# Create tasks for VM connections
|
||||||
await asyncio.gather(*tasks)
|
vm_tasks = [connect_with_reconnect(vm) for vm in config.vms.keys()]
|
||||||
|
|
||||||
|
# Add periodic snapshot task
|
||||||
|
snapshot_task = periodic_snapshot_task()
|
||||||
|
|
||||||
|
# Run all tasks concurrently
|
||||||
|
all_tasks = [snapshot_task] + vm_tasks
|
||||||
|
await asyncio.gather(*all_tasks)
|
||||||
|
|
||||||
asyncio.run(main())
|
asyncio.run(main())
|
||||||
Reference in New Issue
Block a user