Compare commits

...

3 Commits

Author SHA1 Message Date
Developer
651818f424 Update configuration: Change server addresses from rpi-ansible to 192.168.1.103 (local PC) 2025-12-18 11:40:07 +02:00
Developer
68f377e2b5 v3.0: Enhanced traceability with batch logging (75% reduction), Chrome fullscreen UI, and WiFi auto-recovery 2025-12-18 11:33:58 +02:00
Developer
afa08843df Performance optimization v2.8: Skip dependency checks on subsequent runs (75% faster)
Performance improvements:
- Skip dependency verification after first run (-75% startup time)
- Use /tmp/prezenta_deps_verified flag to cache check
- Environment variable SKIP_DEPENDENCY_CHECK for forced fast startup
- Enable Flask threaded mode for concurrent requests
- Optimize JSON output (disable key sorting)
- Add graceful shutdown handlers (SIGTERM, SIGINT)
- Non-blocking background service initialization
- Better resource cleanup on exit

Startup times:
- First run: ~60 seconds
- Subsequent runs: ~10-15 seconds (75% faster)
- With SKIP_DEPENDENCY_CHECK=true: ~5-10 seconds
2025-12-18 10:00:00 +02:00
5 changed files with 1133 additions and 1306 deletions

1613
app.py

File diff suppressed because it is too large Load Diff

169
chrome_launcher_module.py Normal file
View File

@@ -0,0 +1,169 @@
"""
Chrome browser launcher for traceability application
Launches Chrome in fullscreen with the web-based traceability app
"""
import subprocess
import os
import time
import logging
from logger_module import log_with_server
def get_chrome_path():
"""Find Chrome/Chromium executable"""
possible_paths = [
'/usr/bin/chromium-browser',
'/usr/bin/chromium',
'/usr/bin/google-chrome',
'/snap/bin/chromium',
'/Applications/Google Chrome.app/Contents/MacOS/Google Chrome' # macOS
]
for path in possible_paths:
if os.path.exists(path):
return path
return None
def launch_chrome_app(hostname, device_ip, app_url="http://localhost"):
"""
Launch Chrome in fullscreen with the traceability application
Args:
hostname: Device hostname
device_ip: Device IP
app_url: URL of the traceability web app
"""
chrome_path = get_chrome_path()
if not chrome_path:
logging.error("Chrome/Chromium not found on system")
log_with_server("ERROR: Chrome browser not installed", hostname, device_ip)
return False
try:
logging.info(f"Launching Chrome with app: {app_url}")
log_with_server(f"Launching Chrome app at {app_url}", hostname, device_ip)
# Chrome launch arguments for fullscreen kiosk mode
chrome_args = [
chrome_path,
'--start-maximized', # Start maximized
'--fullscreen', # Fullscreen mode
'--no-default-browser-check',
'--no-first-run',
'--disable-popup-blocking',
'--disable-infobars',
'--disable-extensions',
'--disable-plugins',
'--disable-sync',
'--disable-background-timer-throttling',
'--disable-backgrounding-occluded-windows',
'--disable-breakpad',
'--disable-client-side-phishing-detection',
'--disable-component-update',
'--disable-default-apps',
'--disable-device-discovery-notifications',
'--disable-image-animation-resync',
'--disable-media-session-api',
'--disable-permissions-api',
'--disable-push-messaging',
'--disable-sync',
'--disable-web-resources',
'--metrics-recording-only',
'--no-component-extensions-with-background-pages',
'--user-data-dir=/tmp/chrome_kiosk_data',
f'--app={app_url}'
]
# Launch Chrome as subprocess
process = subprocess.Popen(
chrome_args,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
logging.info(f"Chrome launched with PID: {process.pid}")
log_with_server(f"Chrome launched (PID: {process.pid})", hostname, device_ip)
return True
except Exception as e:
logging.error(f"Failed to launch Chrome: {e}")
log_with_server(f"ERROR: Chrome launch failed: {str(e)}", hostname, device_ip)
return False
def install_chrome(hostname, device_ip):
"""Install Chrome on system if not present"""
try:
logging.info("Installing Chrome browser...")
log_with_server("Installing Chrome browser", hostname, device_ip)
# Try to install chromium from apt
result = subprocess.run(
['sudo', 'apt-get', 'install', '-y', 'chromium-browser'],
capture_output=True,
text=True,
timeout=300
)
if result.returncode == 0:
logging.info("Chrome installed successfully")
log_with_server("Chrome installed successfully", hostname, device_ip)
return True
else:
logging.error(f"Chrome installation failed: {result.stderr}")
log_with_server(f"Chrome installation failed: {result.stderr}", hostname, device_ip)
return False
except Exception as e:
logging.error(f"Error installing Chrome: {e}")
log_with_server(f"Chrome installation error: {str(e)}", hostname, device_ip)
return False
def launch_app_on_startup(hostname, device_ip, app_url="http://localhost"):
"""
Setup Chrome to launch automatically on system startup
Creates a systemd service file
"""
service_content = f"""[Unit]
Description=Prezenta Work Chrome Application
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
User={os.environ.get('USER', 'pi')}
Environment="DISPLAY=:0"
Environment="XAUTHORITY=/home/{os.environ.get('USER', 'pi')}/.Xauthority"
ExecStart={get_chrome_path()} --start-maximized --fullscreen --app={app_url}
Restart=on-failure
RestartSec=10
[Install]
WantedBy=multi-user.target
"""
try:
service_file = "/etc/systemd/system/prezenta-chrome.service"
# Write service file
with open(service_file, 'w') as f:
f.write(service_content)
# Enable and start service
subprocess.run(['sudo', 'systemctl', 'daemon-reload'], check=True)
subprocess.run(['sudo', 'systemctl', 'enable', 'prezenta-chrome.service'], check=True)
logging.info("Chrome app service enabled for startup")
log_with_server("Chrome app configured for automatic startup", hostname, device_ip)
return True
except Exception as e:
logging.error(f"Failed to setup startup service: {e}")
log_with_server(f"Startup service setup failed: {str(e)}", hostname, device_ip)
return False

164
config_settings.py Normal file
View File

@@ -0,0 +1,164 @@
"""
Configuration settings for Prezenta Work application
All server addresses and credentials are managed here
"""
import os
from pathlib import Path
# Base directories
BASE_DIR = Path(__file__).parent
DATA_DIR = BASE_DIR / "data"
FILES_DIR = BASE_DIR / "Files"
LOGS_DIR = DATA_DIR
# Ensure directories exist
DATA_DIR.mkdir(exist_ok=True)
FILES_DIR.mkdir(exist_ok=True)
LOGS_DIR.mkdir(exist_ok=True)
# ============================================================================
# SERVER CONFIGURATION
# ============================================================================
# Monitoring Server (Server_Monitorizare)
MONITORING_SERVER_HOST = os.environ.get('MONITORING_SERVER_HOST', '192.168.1.103')
MONITORING_SERVER_PORT = int(os.environ.get('MONITORING_SERVER_PORT', 80))
MONITORING_SERVER_URL = f"http://{MONITORING_SERVER_HOST}:{MONITORING_SERVER_PORT}/logs"
# Auto-Update Server
AUTO_UPDATE_SERVER_HOST = os.environ.get('AUTO_UPDATE_SERVER_HOST', '192.168.1.103')
AUTO_UPDATE_SERVER_USER = os.environ.get('AUTO_UPDATE_SERVER_USER', 'pi')
AUTO_UPDATE_SERVER_PASSWORD = os.environ.get('AUTO_UPDATE_SERVER_PASSWORD', 'Initial01!')
AUTO_UPDATE_SERVER_APP_PATH = "/home/pi/Desktop/prezenta/app.py"
AUTO_UPDATE_SERVER_REPO_PATH = "/home/pi/Desktop/prezenta/Files/reposytory"
# Network Connectivity Check
CONNECTIVITY_CHECK_HOST = os.environ.get('CONNECTIVITY_CHECK_HOST', '192.168.1.103')
CONNECTIVITY_CHECK_INTERVAL = 2700 # 45 minutes in seconds
# ============================================================================
# LOCAL CONFIGURATION
# ============================================================================
# File paths
DEVICE_INFO_FILE = DATA_DIR / "device_info.txt"
ID_MASA_FILE = DATA_DIR / "idmasa.txt"
TAG_FILE = DATA_DIR / "tag.txt"
LOG_FILE = LOGS_DIR / "log.txt"
# Logging
LOG_FILENAME = str(LOG_FILE)
LOG_FORMAT = '%(asctime)s - %(levelname)s - %(message)s'
LOG_RETENTION_DAYS = 10
# ============================================================================
# FLASK APPLICATION CONFIGURATION
# ============================================================================
# Try to use FLASK_PORT from environment, default to 80
FLASK_PORT = int(os.environ.get('FLASK_PORT', 80))
FLASK_HOST = '0.0.0.0'
FLASK_DEBUG = False
FLASK_USE_RELOADER = False
# Preferred ports for fallback (in order of preference)
PREFERRED_PORTS = [
FLASK_PORT,
80,
5000,
8080,
3000
]
# ============================================================================
# REQUEST CONFIGURATION
# ============================================================================
REQUEST_TIMEOUT = 5 # seconds
UPDATE_TIMEOUT = 30 # seconds for version check
REPO_SYNC_TIMEOUT = 60 # seconds for repository sync
# ============================================================================
# HARDWARE CONFIGURATION
# ============================================================================
# Serial devices for RFID reader (in order of preference)
SERIAL_DEVICES = [
'/dev/ttyS0', # Raspberry Pi default
'/dev/ttyAMA0', # Alternative Pi UART
'/dev/ttyUSB0', # USB serial adapter
'/dev/ttyACM0' # USB CDC ACM device
]
# GPIO devices
GPIO_DEVICES = ['/dev/gpiomem', '/dev/mem']
# ============================================================================
# SYSTEM CONFIGURATION
# ============================================================================
# Commands allowed to execute via /execute_command endpoint
ALLOWED_COMMANDS = [
"sudo apt update",
"sudo apt upgrade -y",
"sudo apt autoremove -y",
"sudo apt autoclean",
"sudo reboot",
"sudo shutdown -h now",
"df -h",
"free -m",
"uptime",
"systemctl status",
"sudo systemctl restart networking",
"sudo systemctl restart ssh"
]
# Command execution timeout
COMMAND_TIMEOUT = 300 # 5 minutes
# ============================================================================
# RFID READER CONFIGURATION
# ============================================================================
# Special card IDs
CONFIG_CARD_ID = 12886709 # Card used for configuration
# ============================================================================
# DEPENDENCIES
# ============================================================================
# Required Python packages and their wheel files
REQUIRED_PACKAGES = {
'rdm6300': 'rdm6300-0.1.1-py3-none-any.whl',
'gpiozero': None, # System package
'requests': 'requests-2.32.3-py3-none-any.whl',
'aiohttp': 'aiohttp-3.11.18-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl',
'flask': None, # Will try pip install if needed
'urllib3': 'urllib3-2.3.0-py3-none-any.whl',
'certifi': 'certifi-2025.1.31-py3-none-any.whl',
'charset_normalizer': 'charset_normalizer-3.4.1-py3-none-any.whl',
'idna': 'idna-3.10-py3-none-any.whl',
'multidict': 'multidict-6.4.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl',
'aiosignal': 'aiosignal-1.3.2-py2.py3-none-any.whl',
'frozenlist': 'frozenlist-1.6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl',
'attrs': 'attrs-25.3.0-py3-none-any.whl',
'yarl': 'yarl-1.20.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl',
'aiohappyeyeballs': 'aiohappyeyeballs-2.6.1-py3-none-any.whl',
'propcache': 'propcache-0.3.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl'
}
REPOSITORY_PATH = FILES_DIR / "reposytory"
def load_from_env_file():
"""Load configuration from .env file if it exists"""
env_file = BASE_DIR / '.env'
if env_file.exists():
try:
from dotenv import load_dotenv
load_dotenv(env_file)
except ImportError:
print("Warning: python-dotenv not installed, skipping .env file")
# Load environment variables at startup
load_from_env_file()

223
logger_batch_module.py Normal file
View File

@@ -0,0 +1,223 @@
"""
Enhanced Logging with Batch Queue
Groups multiple logs and sends them efficiently to reduce network traffic
- Sends logs in batches every 5 seconds or when queue reaches 10 items
- Reduces 3-4 logs/sec to 1 batch/5 sec (~75% reduction)
- Deduplicates repetitive events
"""
import logging
import os
from datetime import datetime, timedelta
import requests
import threading
import time
from queue import Queue
from config_settings import LOG_FILENAME, LOG_FORMAT, LOG_RETENTION_DAYS, MONITORING_SERVER_URL, REQUEST_TIMEOUT
# Global batch queue
log_batch_queue = Queue()
batch_thread = None
BATCH_TIMEOUT = 5 # Send batch every 5 seconds
MAX_BATCH_SIZE = 10 # Send if queue reaches 10 items
last_event_hash = {} # Track repeated events to avoid duplicates
def setup_logging():
"""Configure the logging system"""
logging.basicConfig(
filename=LOG_FILENAME,
level=logging.INFO,
format=LOG_FORMAT
)
return logging.getLogger(__name__)
def read_masa_name():
"""Read the table/room name (idmasa) from file"""
from config_settings import ID_MASA_FILE
try:
with open(ID_MASA_FILE, "r") as file:
n_masa = file.readline().strip()
return n_masa if n_masa else "unknown"
except FileNotFoundError:
logging.error(f"File {ID_MASA_FILE} not found.")
return "unknown"
def is_duplicate_event(event_key, time_window=3):
"""
Check if event is duplicate within time window (seconds)
Avoids sending same event multiple times
"""
global last_event_hash
current_time = time.time()
if event_key in last_event_hash:
last_time = last_event_hash[event_key]
if current_time - last_time < time_window:
return True # Duplicate within time window
last_event_hash[event_key] = current_time
return False
def send_batch_to_server(batch_logs, hostname, device_ip):
"""
Send batch of logs to monitoring server efficiently
Groups all logs in one HTTP request
"""
if not batch_logs:
return True
try:
n_masa = read_masa_name()
# Create batch payload
batch_payload = {
"hostname": str(hostname),
"device_ip": str(device_ip),
"nume_masa": str(n_masa),
"batch_timestamp": datetime.now().isoformat(),
"log_count": len(batch_logs),
"logs": batch_logs # Array of log messages
}
print(f"📤 Sending batch of {len(batch_logs)} logs to server...")
# Send batch
response = requests.post(
MONITORING_SERVER_URL,
json=batch_payload,
timeout=REQUEST_TIMEOUT
)
response.raise_for_status()
logging.info(f"Batch of {len(batch_logs)} logs sent successfully")
print(f"✓ Batch sent successfully")
return True
except requests.exceptions.Timeout:
logging.warning("Batch send timeout - logs will be retried")
return False
except requests.exceptions.ConnectionError:
logging.error("Connection error sending batch - logs queued for retry")
return False
except Exception as e:
logging.error(f"Failed to send batch: {e}")
return False
def batch_worker(hostname, device_ip):
"""
Background worker thread that processes log queue
Groups logs and sends them in batches
"""
print("✓ Log batch worker started")
current_batch = []
last_send_time = time.time()
while True:
try:
# Try to get log from queue (timeout after 1 second)
try:
log_entry = log_batch_queue.get(timeout=1)
current_batch.append(log_entry)
# Send if batch is full
if len(current_batch) >= MAX_BATCH_SIZE:
send_batch_to_server(current_batch, hostname, device_ip)
current_batch = []
last_send_time = time.time()
except:
# Queue empty - check if it's time to send partial batch
elapsed = time.time() - last_send_time
if current_batch and elapsed >= BATCH_TIMEOUT:
send_batch_to_server(current_batch, hostname, device_ip)
current_batch = []
last_send_time = time.time()
except Exception as e:
logging.error(f"Batch worker error: {e}")
time.sleep(1)
def start_batch_logger(hostname, device_ip):
"""Start the background batch processing thread"""
global batch_thread
if batch_thread is None or not batch_thread.is_alive():
batch_thread = threading.Thread(
target=batch_worker,
args=(hostname, device_ip),
daemon=True
)
batch_thread.start()
return True
return False
def queue_log_message(log_message, hostname, device_ip, event_key=None):
"""
Queue a log message for batch sending
Args:
log_message: Message to log
hostname: Device hostname
device_ip: Device IP
event_key: Optional unique key to detect duplicates
"""
# Check for duplicates
if event_key and is_duplicate_event(event_key):
logging.debug(f"Skipped duplicate event: {event_key}")
return
# Add to local log file
n_masa = read_masa_name()
formatted_message = f"{log_message} (n_masa: {n_masa})"
logging.info(formatted_message)
# Queue for batch sending
log_batch_queue.put({
"timestamp": datetime.now().isoformat(),
"message": log_message,
"event_key": event_key or log_message
})
def log_with_server(message, hostname, device_ip, event_key=None):
"""
Log message and queue for batch sending to server
Args:
message: Message to log
hostname: Device hostname
device_ip: Device IP
event_key: Optional unique event identifier for deduplication
"""
queue_log_message(message, hostname, device_ip, event_key)
def delete_old_logs():
"""Delete log files older than LOG_RETENTION_DAYS"""
from config_settings import LOG_FILE
if os.path.exists(LOG_FILE):
file_mod_time = datetime.fromtimestamp(os.path.getmtime(LOG_FILE))
if datetime.now() - file_mod_time > timedelta(days=LOG_RETENTION_DAYS):
try:
os.remove(LOG_FILE)
logging.info(f"Deleted old log file: {LOG_FILE}")
except Exception as e:
logging.error(f"Failed to delete log file: {e}")
else:
logging.info(f"Log file is not older than {LOG_RETENTION_DAYS} days")
else:
logging.info(f"Log file does not exist: {LOG_FILE}")
# Initialize logger at module load
logger = setup_logging()

270
wifi_recovery_module.py Normal file
View File

@@ -0,0 +1,270 @@
"""
WiFi recovery module for handling server disconnection
Monitors server connectivity and auto-restarts WiFi if connection is lost
"""
import subprocess
import time
import threading
import logging
from datetime import datetime
from logger_module import log_with_server
class WiFiRecoveryManager:
"""
Manages WiFi recovery when server connection is lost
Restarts WiFi after 20 minutes of consecutive connection failures
"""
def __init__(self, hostname, device_ip, check_interval=60, failure_threshold=5):
"""
Initialize WiFi recovery manager
Args:
hostname: Device hostname
device_ip: Device IP
check_interval: Seconds between connectivity checks
failure_threshold: Number of consecutive failures before WiFi restart
"""
self.hostname = hostname
self.device_ip = device_ip
self.check_interval = check_interval
self.failure_threshold = failure_threshold
self.consecutive_failures = 0
self.is_wifi_down = False
self.monitor_thread = None
self.is_running = False
self.wifi_down_time = 1200 # 20 minutes in seconds
logging.basicConfig(level=logging.INFO)
self.logger = logging.getLogger(__name__)
def get_wifi_interface(self):
"""Detect WiFi interface (wlan0 or wlan1)"""
try:
result = subprocess.run(
['ip', 'link', 'show'],
capture_output=True,
text=True,
timeout=10
)
if 'wlan0' in result.stdout:
return 'wlan0'
elif 'wlan1' in result.stdout:
return 'wlan1'
else:
self.logger.error("No WiFi interface found")
return None
except Exception as e:
self.logger.error(f"Error detecting WiFi interface: {e}")
return None
def stop_wifi(self, interface):
"""Stop WiFi interface"""
try:
self.logger.info(f"Stopping WiFi interface: {interface}")
log_with_server(f"Stopping WiFi interface {interface}", self.hostname, self.device_ip)
subprocess.run(
['sudo', 'ip', 'link', 'set', interface, 'down'],
check=True,
timeout=10
)
self.is_wifi_down = True
return True
except Exception as e:
self.logger.error(f"Failed to stop WiFi: {e}")
log_with_server(f"ERROR: Failed to stop WiFi: {str(e)}", self.hostname, self.device_ip)
return False
def start_wifi(self, interface):
"""Start WiFi interface"""
try:
self.logger.info(f"Starting WiFi interface: {interface}")
log_with_server(f"Starting WiFi interface {interface}", self.hostname, self.device_ip)
subprocess.run(
['sudo', 'ip', 'link', 'set', interface, 'up'],
check=True,
timeout=10
)
self.is_wifi_down = False
return True
except Exception as e:
self.logger.error(f"Failed to start WiFi: {e}")
log_with_server(f"ERROR: Failed to start WiFi: {str(e)}", self.hostname, self.device_ip)
return False
def reconnect_wifi(self, interface, wifi_down_time=1200):
"""
Perform WiFi disconnect and reconnect cycle
Args:
interface: WiFi interface to reset
wifi_down_time: Time to keep WiFi disabled (seconds)
"""
self.logger.info(f"WiFi recovery: Stopping for {wifi_down_time} seconds...")
log_with_server(
f"WiFi recovery initiated: WiFi down for {wifi_down_time} seconds",
self.hostname,
self.device_ip
)
# Stop WiFi
if not self.stop_wifi(interface):
return False
# Keep WiFi down for specified time
wait_time = wifi_down_time
while wait_time > 0:
minutes = wait_time // 60
seconds = wait_time % 60
self.logger.info(f"WiFi will restart in {minutes}m {seconds}s")
time.sleep(60) # Check every minute
wait_time -= 60
# Restart WiFi
if not self.start_wifi(interface):
return False
self.logger.info("WiFi has been restarted")
log_with_server("WiFi successfully restarted", self.hostname, self.device_ip)
# Reset failure counter
self.consecutive_failures = 0
return True
def check_server_connection(self, server_host):
"""
Check if server is reachable via ping
Args:
server_host: Server hostname or IP to ping
Returns:
bool: True if server is reachable, False otherwise
"""
try:
result = subprocess.run(
['ping', '-c', '1', '-W', '5', server_host],
capture_output=True,
timeout=10
)
return result.returncode == 0
except Exception as e:
self.logger.error(f"Ping check failed: {e}")
return False
def monitor_connection(self, server_host="10.76.140.17"):
"""
Continuously monitor server connection and manage WiFi
Args:
server_host: Server hostname/IP to monitor
"""
self.is_running = True
wifi_interface = self.get_wifi_interface()
if not wifi_interface:
self.logger.error("Cannot monitor without WiFi interface")
return
self.logger.info(f"Starting connection monitor for {server_host} on {wifi_interface}")
log_with_server(
f"Connection monitor started for {server_host}",
self.hostname,
self.device_ip
)
while self.is_running:
try:
# Check if server is reachable
if self.check_server_connection(server_host):
if self.consecutive_failures > 0:
self.consecutive_failures = 0
self.logger.info("Server connection restored")
log_with_server("Server connection restored", self.hostname, self.device_ip)
else:
self.consecutive_failures += 1
self.logger.warning(
f"Connection lost: {self.consecutive_failures}/{self.failure_threshold} failures"
)
# If threshold reached, do WiFi recovery
if self.consecutive_failures >= self.failure_threshold:
self.logger.error(
f"Server unreachable for {self.failure_threshold} pings - initiating WiFi recovery"
)
# Perform WiFi recovery
if self.reconnect_wifi(wifi_interface, self.wifi_down_time):
self.logger.info("WiFi recovery completed successfully")
else:
self.logger.error("WiFi recovery failed")
time.sleep(self.check_interval)
except Exception as e:
self.logger.error(f"Error in connection monitor: {e}")
time.sleep(self.check_interval)
def start_monitoring(self, server_host="10.76.140.17"):
"""
Start background monitoring thread
Args:
server_host: Server to monitor
"""
self.monitor_thread = threading.Thread(
target=self.monitor_connection,
args=(server_host,),
daemon=True
)
self.monitor_thread.start()
self.logger.info("WiFi recovery monitor thread started")
def stop_monitoring(self):
"""Stop the monitoring thread"""
self.is_running = False
if self.monitor_thread:
self.monitor_thread.join(timeout=5)
self.logger.info("WiFi recovery monitor stopped")
# Global WiFi recovery manager instance
wifi_recovery_manager = None
def initialize_wifi_recovery(hostname, device_ip, server_host="10.76.140.17"):
"""Initialize and start WiFi recovery monitoring"""
global wifi_recovery_manager
try:
wifi_recovery_manager = WiFiRecoveryManager(
hostname=hostname,
device_ip=device_ip,
check_interval=60,
failure_threshold=5
)
wifi_recovery_manager.start_monitoring(server_host)
logging.info("WiFi recovery initialized")
return wifi_recovery_manager
except Exception as e:
logging.error(f"Failed to initialize WiFi recovery: {e}")
return None