Fixed the scan error and backup problems
This commit is contained in:
@@ -19,5 +19,10 @@ def create_app():
|
||||
|
||||
# Add 'now' function to Jinja2 globals
|
||||
app.jinja_env.globals['now'] = datetime.now
|
||||
|
||||
# Initialize automatic backup scheduler
|
||||
from app.backup_scheduler import init_backup_scheduler
|
||||
init_backup_scheduler(app)
|
||||
print("✅ Automatic backup scheduler initialized")
|
||||
|
||||
return app
|
||||
296
py_app/app/backup_scheduler.py
Normal file
296
py_app/app/backup_scheduler.py
Normal file
@@ -0,0 +1,296 @@
|
||||
"""
|
||||
Automated Backup Scheduler
|
||||
Quality Recticel Application
|
||||
|
||||
This module manages automatic backup execution based on the configured schedule.
|
||||
Uses APScheduler to run backups at specified times.
|
||||
"""
|
||||
|
||||
from apscheduler.schedulers.background import BackgroundScheduler
|
||||
from apscheduler.triggers.cron import CronTrigger
|
||||
from datetime import datetime
|
||||
import logging
|
||||
|
||||
# Configure logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BackupScheduler:
|
||||
"""Manages automatic backup scheduling"""
|
||||
|
||||
def __init__(self, app=None):
|
||||
"""
|
||||
Initialize the backup scheduler
|
||||
|
||||
Args:
|
||||
app: Flask application instance
|
||||
"""
|
||||
self.scheduler = None
|
||||
self.app = app
|
||||
self.job_prefix = 'scheduled_backup'
|
||||
|
||||
if app is not None:
|
||||
self.init_app(app)
|
||||
|
||||
def init_app(self, app):
|
||||
"""
|
||||
Initialize scheduler with Flask app context
|
||||
|
||||
Args:
|
||||
app: Flask application instance
|
||||
"""
|
||||
self.app = app
|
||||
|
||||
# Create scheduler
|
||||
self.scheduler = BackgroundScheduler(
|
||||
daemon=True,
|
||||
timezone='Europe/Bucharest' # Adjust to your timezone
|
||||
)
|
||||
|
||||
# Load and apply schedule from configuration
|
||||
self.update_schedule()
|
||||
|
||||
# Start scheduler
|
||||
self.scheduler.start()
|
||||
logger.info("Backup scheduler started")
|
||||
|
||||
# Register shutdown handler
|
||||
import atexit
|
||||
atexit.register(lambda: self.scheduler.shutdown())
|
||||
|
||||
def execute_scheduled_backup(self, schedule_id, backup_type):
|
||||
"""
|
||||
Execute a backup based on the schedule configuration
|
||||
This method runs in the scheduler thread
|
||||
|
||||
Args:
|
||||
schedule_id: Identifier for the schedule
|
||||
backup_type: Type of backup ('full' or 'data-only')
|
||||
"""
|
||||
try:
|
||||
from app.database_backup import DatabaseBackupManager
|
||||
|
||||
with self.app.app_context():
|
||||
backup_manager = DatabaseBackupManager()
|
||||
|
||||
logger.info(f"Starting scheduled {backup_type} backup (schedule: {schedule_id})...")
|
||||
|
||||
# Execute appropriate backup
|
||||
if backup_type == 'data-only':
|
||||
result = backup_manager.create_data_only_backup(backup_name='scheduled')
|
||||
else:
|
||||
result = backup_manager.create_backup(backup_name='scheduled')
|
||||
|
||||
if result['success']:
|
||||
logger.info(f"✅ Scheduled backup completed: {result['filename']} ({result['size']})")
|
||||
|
||||
# Clean up old backups based on retention policy
|
||||
schedule = backup_manager.get_backup_schedule()
|
||||
schedules = schedule.get('schedules', []) if isinstance(schedule, dict) and 'schedules' in schedule else []
|
||||
|
||||
# Find the schedule that triggered this backup
|
||||
current_schedule = next((s for s in schedules if s.get('id') == schedule_id), None)
|
||||
if current_schedule:
|
||||
retention_days = current_schedule.get('retention_days', 30)
|
||||
cleanup_result = backup_manager.cleanup_old_backups(retention_days)
|
||||
|
||||
if cleanup_result['success'] and cleanup_result['deleted_count'] > 0:
|
||||
logger.info(f"🗑️ Cleaned up {cleanup_result['deleted_count']} old backup(s)")
|
||||
else:
|
||||
logger.error(f"❌ Scheduled backup failed: {result['message']}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Error during scheduled backup: {e}", exc_info=True)
|
||||
|
||||
def update_schedule(self):
|
||||
"""
|
||||
Reload schedule from configuration and update scheduler jobs
|
||||
Supports multiple schedules
|
||||
"""
|
||||
try:
|
||||
from app.database_backup import DatabaseBackupManager
|
||||
|
||||
with self.app.app_context():
|
||||
backup_manager = DatabaseBackupManager()
|
||||
schedule_config = backup_manager.get_backup_schedule()
|
||||
|
||||
# Remove all existing backup jobs
|
||||
for job in self.scheduler.get_jobs():
|
||||
if job.id.startswith(self.job_prefix):
|
||||
self.scheduler.remove_job(job.id)
|
||||
|
||||
# Handle new multi-schedule format
|
||||
if isinstance(schedule_config, dict) and 'schedules' in schedule_config:
|
||||
schedules = schedule_config['schedules']
|
||||
|
||||
for schedule in schedules:
|
||||
if not schedule.get('enabled', False):
|
||||
continue
|
||||
|
||||
schedule_id = schedule.get('id', 'default')
|
||||
time_str = schedule.get('time', '02:00')
|
||||
frequency = schedule.get('frequency', 'daily')
|
||||
backup_type = schedule.get('backup_type', 'full')
|
||||
|
||||
# Parse time
|
||||
hour, minute = map(int, time_str.split(':'))
|
||||
|
||||
# Create appropriate trigger
|
||||
if frequency == 'daily':
|
||||
trigger = CronTrigger(hour=hour, minute=minute)
|
||||
elif frequency == 'weekly':
|
||||
trigger = CronTrigger(day_of_week='sun', hour=hour, minute=minute)
|
||||
elif frequency == 'monthly':
|
||||
trigger = CronTrigger(day=1, hour=hour, minute=minute)
|
||||
else:
|
||||
logger.error(f"Unknown frequency: {frequency}")
|
||||
continue
|
||||
|
||||
# Add job with unique ID
|
||||
job_id = f"{self.job_prefix}_{schedule_id}"
|
||||
self.scheduler.add_job(
|
||||
func=self.execute_scheduled_backup,
|
||||
trigger=trigger,
|
||||
args=[schedule_id, backup_type],
|
||||
id=job_id,
|
||||
name=f'Scheduled {backup_type} backup ({schedule_id})',
|
||||
replace_existing=True
|
||||
)
|
||||
|
||||
logger.info(f"✅ Schedule '{schedule_id}': {backup_type} backup {frequency} at {time_str}")
|
||||
|
||||
# Handle legacy single-schedule format (backward compatibility)
|
||||
elif isinstance(schedule_config, dict) and schedule_config.get('enabled', False):
|
||||
time_str = schedule_config.get('time', '02:00')
|
||||
frequency = schedule_config.get('frequency', 'daily')
|
||||
backup_type = schedule_config.get('backup_type', 'full')
|
||||
|
||||
hour, minute = map(int, time_str.split(':'))
|
||||
|
||||
if frequency == 'daily':
|
||||
trigger = CronTrigger(hour=hour, minute=minute)
|
||||
elif frequency == 'weekly':
|
||||
trigger = CronTrigger(day_of_week='sun', hour=hour, minute=minute)
|
||||
elif frequency == 'monthly':
|
||||
trigger = CronTrigger(day=1, hour=hour, minute=minute)
|
||||
else:
|
||||
logger.error(f"Unknown frequency: {frequency}")
|
||||
return
|
||||
|
||||
job_id = f"{self.job_prefix}_default"
|
||||
self.scheduler.add_job(
|
||||
func=self.execute_scheduled_backup,
|
||||
trigger=trigger,
|
||||
args=['default', backup_type],
|
||||
id=job_id,
|
||||
name=f'Scheduled {backup_type} backup',
|
||||
replace_existing=True
|
||||
)
|
||||
|
||||
logger.info(f"✅ Backup schedule configured: {backup_type} backup {frequency} at {time_str}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error updating backup schedule: {e}", exc_info=True)
|
||||
|
||||
def get_next_run_time(self, schedule_id='default'):
|
||||
"""
|
||||
Get the next scheduled run time for a specific schedule
|
||||
|
||||
Args:
|
||||
schedule_id: Identifier for the schedule
|
||||
|
||||
Returns:
|
||||
datetime or None: Next run time if job exists
|
||||
"""
|
||||
if not self.scheduler:
|
||||
return None
|
||||
|
||||
job_id = f"{self.job_prefix}_{schedule_id}"
|
||||
job = self.scheduler.get_job(job_id)
|
||||
if job:
|
||||
return job.next_run_time
|
||||
return None
|
||||
|
||||
def get_schedule_info(self):
|
||||
"""
|
||||
Get information about all schedules
|
||||
|
||||
Returns:
|
||||
dict: Schedule information including next run times for all jobs
|
||||
"""
|
||||
try:
|
||||
from app.database_backup import DatabaseBackupManager
|
||||
|
||||
with self.app.app_context():
|
||||
backup_manager = DatabaseBackupManager()
|
||||
schedule_config = backup_manager.get_backup_schedule()
|
||||
|
||||
# Get all backup jobs
|
||||
jobs_info = []
|
||||
for job in self.scheduler.get_jobs():
|
||||
if job.id.startswith(self.job_prefix):
|
||||
jobs_info.append({
|
||||
'id': job.id.replace(f"{self.job_prefix}_", ""),
|
||||
'name': job.name,
|
||||
'next_run_time': job.next_run_time.strftime('%Y-%m-%d %H:%M:%S') if job.next_run_time else None
|
||||
})
|
||||
|
||||
return {
|
||||
'schedule': schedule_config,
|
||||
'jobs': jobs_info,
|
||||
'scheduler_running': self.scheduler.running if self.scheduler else False,
|
||||
'total_jobs': len(jobs_info)
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting schedule info: {e}")
|
||||
return None
|
||||
|
||||
def trigger_backup_now(self):
|
||||
"""
|
||||
Manually trigger a backup immediately (outside of schedule)
|
||||
|
||||
Returns:
|
||||
dict: Result of backup operation
|
||||
"""
|
||||
try:
|
||||
logger.info("Manual backup trigger requested")
|
||||
self.execute_scheduled_backup()
|
||||
return {
|
||||
'success': True,
|
||||
'message': 'Backup triggered successfully'
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Error triggering manual backup: {e}")
|
||||
return {
|
||||
'success': False,
|
||||
'message': f'Failed to trigger backup: {str(e)}'
|
||||
}
|
||||
|
||||
|
||||
# Global scheduler instance (initialized in __init__.py)
|
||||
backup_scheduler = None
|
||||
|
||||
|
||||
def init_backup_scheduler(app):
|
||||
"""
|
||||
Initialize the global backup scheduler instance
|
||||
|
||||
Args:
|
||||
app: Flask application instance
|
||||
|
||||
Returns:
|
||||
BackupScheduler: Initialized scheduler instance
|
||||
"""
|
||||
global backup_scheduler
|
||||
backup_scheduler = BackupScheduler(app)
|
||||
return backup_scheduler
|
||||
|
||||
|
||||
def get_backup_scheduler():
|
||||
"""
|
||||
Get the global backup scheduler instance
|
||||
|
||||
Returns:
|
||||
BackupScheduler or None: Scheduler instance if initialized
|
||||
"""
|
||||
return backup_scheduler
|
||||
@@ -281,6 +281,95 @@ class DatabaseBackupManager:
|
||||
except Exception as e:
|
||||
print(f"Error removing backup metadata: {e}")
|
||||
|
||||
def create_data_only_backup(self, backup_name=None):
|
||||
"""
|
||||
Create a data-only backup (no schema, triggers, or structure)
|
||||
Only exports INSERT statements for existing tables
|
||||
|
||||
Args:
|
||||
backup_name (str, optional): Custom name for the backup file
|
||||
|
||||
Returns:
|
||||
dict: Result with success status, message, and backup file path
|
||||
"""
|
||||
try:
|
||||
if not self.config:
|
||||
return {
|
||||
'success': False,
|
||||
'message': 'Database configuration not loaded'
|
||||
}
|
||||
|
||||
# Generate backup filename with data_only prefix
|
||||
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
|
||||
if backup_name:
|
||||
filename = f"data_only_{backup_name}_{timestamp}.sql"
|
||||
else:
|
||||
filename = f"data_only_{self.config['database']}_{timestamp}.sql"
|
||||
|
||||
backup_file = os.path.join(self.backup_path, filename)
|
||||
|
||||
# Build mysqldump command for data only
|
||||
# --no-create-info: Skip CREATE TABLE statements
|
||||
# --skip-triggers: Skip trigger definitions
|
||||
# --no-create-db: Skip CREATE DATABASE statement
|
||||
# --complete-insert: Include column names in INSERT (more reliable)
|
||||
# --extended-insert: Use multi-row INSERT for efficiency
|
||||
cmd = [
|
||||
'mysqldump',
|
||||
f"--host={self.config['host']}",
|
||||
f"--port={self.config['port']}",
|
||||
f"--user={self.config['user']}",
|
||||
f"--password={self.config['password']}",
|
||||
'--no-create-info', # Skip table structure
|
||||
'--skip-triggers', # Skip triggers
|
||||
'--no-create-db', # Skip database creation
|
||||
'--complete-insert', # Include column names
|
||||
'--extended-insert', # Multi-row INSERTs
|
||||
'--single-transaction',
|
||||
'--skip-lock-tables',
|
||||
self.config['database']
|
||||
]
|
||||
|
||||
# Execute mysqldump and save to file
|
||||
with open(backup_file, 'w') as f:
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
stdout=f,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True
|
||||
)
|
||||
|
||||
if result.returncode == 0:
|
||||
# Get file size
|
||||
file_size = os.path.getsize(backup_file)
|
||||
file_size_mb = file_size / (1024 * 1024)
|
||||
|
||||
# Save backup metadata
|
||||
self._save_backup_metadata(filename, file_size)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'message': f'Data-only backup created successfully',
|
||||
'filename': filename,
|
||||
'file_path': backup_file,
|
||||
'size': f"{file_size_mb:.2f} MB",
|
||||
'timestamp': timestamp
|
||||
}
|
||||
else:
|
||||
error_msg = result.stderr
|
||||
print(f"Data backup error: {error_msg}")
|
||||
return {
|
||||
'success': False,
|
||||
'message': f'Data backup failed: {error_msg}'
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
print(f"Exception during data backup: {e}")
|
||||
return {
|
||||
'success': False,
|
||||
'message': f'Data backup failed: {str(e)}'
|
||||
}
|
||||
|
||||
def restore_backup(self, filename):
|
||||
"""
|
||||
Restore database from a backup file
|
||||
@@ -345,6 +434,127 @@ class DatabaseBackupManager:
|
||||
'message': f'Restore failed: {str(e)}'
|
||||
}
|
||||
|
||||
def restore_data_only(self, filename):
|
||||
"""
|
||||
Restore data from a data-only backup file
|
||||
Assumes database schema already exists
|
||||
Truncates tables before inserting data to avoid duplicates
|
||||
|
||||
Args:
|
||||
filename (str): Name of the data-only backup file to restore
|
||||
|
||||
Returns:
|
||||
dict: Result with success status and message
|
||||
"""
|
||||
try:
|
||||
# Security: ensure filename doesn't contain path traversal
|
||||
if '..' in filename or '/' in filename:
|
||||
return {
|
||||
'success': False,
|
||||
'message': 'Invalid filename'
|
||||
}
|
||||
|
||||
file_path = os.path.join(self.backup_path, filename)
|
||||
|
||||
if not os.path.exists(file_path):
|
||||
return {
|
||||
'success': False,
|
||||
'message': 'Backup file not found'
|
||||
}
|
||||
|
||||
# First, disable foreign key checks and truncate all tables
|
||||
# This ensures clean data import without constraint violations
|
||||
try:
|
||||
conn = mariadb.connect(
|
||||
host=self.config['host'],
|
||||
port=int(self.config['port']),
|
||||
user=self.config['user'],
|
||||
password=self.config['password'],
|
||||
database=self.config['database']
|
||||
)
|
||||
cursor = conn.cursor()
|
||||
|
||||
# Disable foreign key checks
|
||||
cursor.execute("SET FOREIGN_KEY_CHECKS = 0;")
|
||||
|
||||
# Get list of all tables in the database
|
||||
cursor.execute("SHOW TABLES;")
|
||||
tables = cursor.fetchall()
|
||||
|
||||
# Truncate each table (except system tables)
|
||||
for (table_name,) in tables:
|
||||
# Skip metadata and system tables
|
||||
if table_name not in ['backups_metadata', 'backup_schedule']:
|
||||
try:
|
||||
cursor.execute(f"TRUNCATE TABLE `{table_name}`;")
|
||||
print(f"Truncated table: {table_name}")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not truncate {table_name}: {e}")
|
||||
|
||||
conn.commit()
|
||||
cursor.close()
|
||||
conn.close()
|
||||
|
||||
except Exception as e:
|
||||
print(f"Warning during table truncation: {e}")
|
||||
# Continue anyway - the restore might still work
|
||||
|
||||
# Build mysql restore command for data
|
||||
cmd = [
|
||||
'mysql',
|
||||
f"--host={self.config['host']}",
|
||||
f"--port={self.config['port']}",
|
||||
f"--user={self.config['user']}",
|
||||
f"--password={self.config['password']}",
|
||||
self.config['database']
|
||||
]
|
||||
|
||||
# Execute mysql restore
|
||||
with open(file_path, 'r') as f:
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
stdin=f,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True
|
||||
)
|
||||
|
||||
# Re-enable foreign key checks
|
||||
try:
|
||||
conn = mariadb.connect(
|
||||
host=self.config['host'],
|
||||
port=int(self.config['port']),
|
||||
user=self.config['user'],
|
||||
password=self.config['password'],
|
||||
database=self.config['database']
|
||||
)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("SET FOREIGN_KEY_CHECKS = 1;")
|
||||
conn.commit()
|
||||
cursor.close()
|
||||
conn.close()
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not re-enable foreign key checks: {e}")
|
||||
|
||||
if result.returncode == 0:
|
||||
return {
|
||||
'success': True,
|
||||
'message': f'Data restored successfully from {filename}'
|
||||
}
|
||||
else:
|
||||
error_msg = result.stderr
|
||||
print(f"Data restore error: {error_msg}")
|
||||
return {
|
||||
'success': False,
|
||||
'message': f'Data restore failed: {error_msg}'
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
print(f"Exception during data restore: {e}")
|
||||
return {
|
||||
'success': False,
|
||||
'message': f'Data restore failed: {str(e)}'
|
||||
}
|
||||
|
||||
def get_backup_schedule(self):
|
||||
"""Get current backup schedule configuration"""
|
||||
try:
|
||||
@@ -352,13 +562,18 @@ class DatabaseBackupManager:
|
||||
|
||||
if os.path.exists(schedule_file):
|
||||
with open(schedule_file, 'r') as f:
|
||||
return json.load(f)
|
||||
schedule = json.load(f)
|
||||
# Ensure backup_type exists (for backward compatibility)
|
||||
if 'backup_type' not in schedule:
|
||||
schedule['backup_type'] = 'full'
|
||||
return schedule
|
||||
|
||||
# Default schedule
|
||||
return {
|
||||
'enabled': False,
|
||||
'time': '02:00', # 2 AM
|
||||
'frequency': 'daily', # daily, weekly, monthly
|
||||
'backup_type': 'full', # full or data-only
|
||||
'retention_days': 30 # Keep backups for 30 days
|
||||
}
|
||||
|
||||
@@ -607,3 +822,112 @@ class DatabaseBackupManager:
|
||||
'success': False,
|
||||
'message': f'Cleanup failed: {str(e)}'
|
||||
}
|
||||
|
||||
def upload_backup(self, uploaded_file):
|
||||
"""
|
||||
Upload and validate an external backup file
|
||||
|
||||
Args:
|
||||
uploaded_file: Werkzeug FileStorage object from request.files
|
||||
|
||||
Returns:
|
||||
dict: Result with success status, filename, and validation details
|
||||
"""
|
||||
try:
|
||||
from werkzeug.utils import secure_filename
|
||||
from pathlib import Path
|
||||
|
||||
# Validate file extension
|
||||
if not uploaded_file.filename.lower().endswith('.sql'):
|
||||
return {
|
||||
'success': False,
|
||||
'message': 'Invalid file format. Only .sql files are allowed.'
|
||||
}
|
||||
|
||||
# Ensure backup_path is a Path object
|
||||
backup_path = Path(self.backup_path)
|
||||
backup_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Generate secure filename with timestamp to avoid conflicts
|
||||
original_filename = secure_filename(uploaded_file.filename)
|
||||
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
|
||||
|
||||
# If filename already starts with "backup_", keep it; otherwise add prefix
|
||||
if original_filename.startswith('backup_'):
|
||||
new_filename = f"{original_filename.rsplit('.', 1)[0]}_{timestamp}.sql"
|
||||
else:
|
||||
new_filename = f"backup_uploaded_{timestamp}_{original_filename}"
|
||||
|
||||
# Save file to backup directory
|
||||
file_path = backup_path / new_filename
|
||||
uploaded_file.save(str(file_path))
|
||||
|
||||
# Get file size
|
||||
file_size = file_path.stat().st_size
|
||||
size_mb = round(file_size / (1024 * 1024), 2)
|
||||
|
||||
# Validate the uploaded file for integrity and compatibility
|
||||
validation_result = self.validate_backup_file(new_filename)
|
||||
|
||||
if not validation_result['success']:
|
||||
# Validation failed - remove the uploaded file
|
||||
file_path.unlink() # Delete the invalid file
|
||||
return {
|
||||
'success': False,
|
||||
'message': f'Validation failed: {validation_result["message"]}',
|
||||
'validation_details': validation_result.get('details', {}),
|
||||
'warnings': validation_result.get('warnings', [])
|
||||
}
|
||||
|
||||
# Build response with validation details
|
||||
response = {
|
||||
'success': True,
|
||||
'message': 'Backup file uploaded and validated successfully',
|
||||
'filename': new_filename,
|
||||
'size': f'{size_mb} MB',
|
||||
'path': str(file_path),
|
||||
'validation': {
|
||||
'status': 'passed',
|
||||
'message': validation_result['message'],
|
||||
'details': validation_result.get('details', {}),
|
||||
'warnings': validation_result.get('warnings', [])
|
||||
}
|
||||
}
|
||||
|
||||
# Add warning flag if there are warnings
|
||||
if validation_result.get('warnings'):
|
||||
response['message'] = f'Backup uploaded with warnings: {"; ".join(validation_result["warnings"])}'
|
||||
|
||||
# Save metadata
|
||||
self._save_backup_metadata(new_filename, file_size)
|
||||
|
||||
return response
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error uploading backup: {e}")
|
||||
return {
|
||||
'success': False,
|
||||
'message': f'Upload failed: {str(e)}'
|
||||
}
|
||||
|
||||
def get_backup_file_path(self, filename):
|
||||
"""
|
||||
Get the full path to a backup file (with security validation)
|
||||
|
||||
Args:
|
||||
filename (str): Name of the backup file
|
||||
|
||||
Returns:
|
||||
str or None: Full file path if valid, None if security check fails
|
||||
"""
|
||||
# Security: ensure filename doesn't contain path traversal
|
||||
if '..' in filename or '/' in filename:
|
||||
return None
|
||||
|
||||
file_path = os.path.join(self.backup_path, filename)
|
||||
|
||||
if os.path.exists(file_path):
|
||||
return file_path
|
||||
|
||||
return None
|
||||
|
||||
|
||||
@@ -394,56 +394,76 @@ def create_database_triggers():
|
||||
conn = mariadb.connect(**DB_CONFIG)
|
||||
cursor = conn.cursor()
|
||||
|
||||
# Drop existing triggers if they exist
|
||||
# Drop existing triggers if they exist (old and new names)
|
||||
trigger_drops = [
|
||||
"DROP TRIGGER IF EXISTS increment_approved_quantity;",
|
||||
"DROP TRIGGER IF EXISTS increment_rejected_quantity;",
|
||||
"DROP TRIGGER IF EXISTS increment_approved_quantity_fg;",
|
||||
"DROP TRIGGER IF EXISTS increment_rejected_quantity_fg;"
|
||||
"DROP TRIGGER IF EXISTS increment_rejected_quantity_fg;",
|
||||
"DROP TRIGGER IF EXISTS set_quantities_scan1;",
|
||||
"DROP TRIGGER IF EXISTS set_quantities_fg;"
|
||||
]
|
||||
|
||||
for drop_query in trigger_drops:
|
||||
cursor.execute(drop_query)
|
||||
|
||||
# Create trigger for scan1_orders approved quantity
|
||||
scan1_approved_trigger = """
|
||||
CREATE TRIGGER increment_approved_quantity
|
||||
AFTER INSERT ON scan1_orders
|
||||
# Create trigger for scan1_orders - BEFORE INSERT to set quantities
|
||||
scan1_trigger = """
|
||||
CREATE TRIGGER set_quantities_scan1
|
||||
BEFORE INSERT ON scan1_orders
|
||||
FOR EACH ROW
|
||||
BEGIN
|
||||
IF NEW.quality_code = 000 THEN
|
||||
UPDATE scan1_orders
|
||||
SET approved_quantity = approved_quantity + 1
|
||||
WHERE CP_base_code = NEW.CP_base_code;
|
||||
-- Count existing approved for this CP_base_code
|
||||
SET @approved = (SELECT COUNT(*) FROM scan1_orders
|
||||
WHERE CP_base_code = LEFT(NEW.CP_full_code, 10)
|
||||
AND quality_code = 0);
|
||||
|
||||
-- Count existing rejected for this CP_base_code
|
||||
SET @rejected = (SELECT COUNT(*) FROM scan1_orders
|
||||
WHERE CP_base_code = LEFT(NEW.CP_full_code, 10)
|
||||
AND quality_code != 0);
|
||||
|
||||
-- Add 1 to appropriate counter for this new row
|
||||
IF NEW.quality_code = 0 THEN
|
||||
SET NEW.approved_quantity = @approved + 1;
|
||||
SET NEW.rejected_quantity = @rejected;
|
||||
ELSE
|
||||
UPDATE scan1_orders
|
||||
SET rejected_quantity = rejected_quantity + 1
|
||||
WHERE CP_base_code = NEW.CP_base_code;
|
||||
SET NEW.approved_quantity = @approved;
|
||||
SET NEW.rejected_quantity = @rejected + 1;
|
||||
END IF;
|
||||
END;
|
||||
"""
|
||||
cursor.execute(scan1_approved_trigger)
|
||||
print_success("Trigger 'increment_approved_quantity' created for scan1_orders")
|
||||
cursor.execute(scan1_trigger)
|
||||
print_success("Trigger 'set_quantities_scan1' created for scan1_orders")
|
||||
|
||||
# Create trigger for scanfg_orders approved quantity
|
||||
scanfg_approved_trigger = """
|
||||
CREATE TRIGGER increment_approved_quantity_fg
|
||||
AFTER INSERT ON scanfg_orders
|
||||
# Create trigger for scanfg_orders - BEFORE INSERT to set quantities
|
||||
scanfg_trigger = """
|
||||
CREATE TRIGGER set_quantities_fg
|
||||
BEFORE INSERT ON scanfg_orders
|
||||
FOR EACH ROW
|
||||
BEGIN
|
||||
IF NEW.quality_code = 000 THEN
|
||||
UPDATE scanfg_orders
|
||||
SET approved_quantity = approved_quantity + 1
|
||||
WHERE CP_base_code = NEW.CP_base_code;
|
||||
-- Count existing approved for this CP_base_code
|
||||
SET @approved = (SELECT COUNT(*) FROM scanfg_orders
|
||||
WHERE CP_base_code = LEFT(NEW.CP_full_code, 10)
|
||||
AND quality_code = 0);
|
||||
|
||||
-- Count existing rejected for this CP_base_code
|
||||
SET @rejected = (SELECT COUNT(*) FROM scanfg_orders
|
||||
WHERE CP_base_code = LEFT(NEW.CP_full_code, 10)
|
||||
AND quality_code != 0);
|
||||
|
||||
-- Add 1 to appropriate counter for this new row
|
||||
IF NEW.quality_code = 0 THEN
|
||||
SET NEW.approved_quantity = @approved + 1;
|
||||
SET NEW.rejected_quantity = @rejected;
|
||||
ELSE
|
||||
UPDATE scanfg_orders
|
||||
SET rejected_quantity = rejected_quantity + 1
|
||||
WHERE CP_base_code = NEW.CP_base_code;
|
||||
SET NEW.approved_quantity = @approved;
|
||||
SET NEW.rejected_quantity = @rejected + 1;
|
||||
END IF;
|
||||
END;
|
||||
"""
|
||||
cursor.execute(scanfg_approved_trigger)
|
||||
print_success("Trigger 'increment_approved_quantity_fg' created for scanfg_orders")
|
||||
cursor.execute(scanfg_trigger)
|
||||
print_success("Trigger 'set_quantities_fg' created for scanfg_orders")
|
||||
|
||||
conn.commit()
|
||||
cursor.close()
|
||||
|
||||
@@ -473,36 +473,25 @@ def scan():
|
||||
conn = get_db_connection()
|
||||
cursor = conn.cursor()
|
||||
|
||||
# Always insert a new entry - each scan is a separate record
|
||||
# Insert new entry - the BEFORE INSERT trigger 'set_quantities_scan1' will automatically
|
||||
# calculate and set approved_quantity and rejected_quantity for this new record
|
||||
insert_query = """
|
||||
INSERT INTO scan1_orders (operator_code, CP_full_code, OC1_code, OC2_code, quality_code, date, time)
|
||||
VALUES (%s, %s, %s, %s, %s, %s, %s)
|
||||
"""
|
||||
cursor.execute(insert_query, (operator_code, cp_code, oc1_code, oc2_code, defect_code, date, time))
|
||||
conn.commit()
|
||||
|
||||
# Get the CP_base_code (first 10 characters of CP_full_code)
|
||||
# Get the quantities from the newly inserted row for the flash message
|
||||
cp_base_code = cp_code[:10]
|
||||
|
||||
# Count approved quantities (quality_code = 0) for this CP_base_code
|
||||
cursor.execute("""
|
||||
SELECT COUNT(*) FROM scan1_orders
|
||||
WHERE CP_base_code = %s AND quality_code = 0
|
||||
""", (cp_base_code,))
|
||||
approved_count = cursor.fetchone()[0]
|
||||
|
||||
# Count rejected quantities (quality_code != 0) for this CP_base_code
|
||||
cursor.execute("""
|
||||
SELECT COUNT(*) FROM scan1_orders
|
||||
WHERE CP_base_code = %s AND quality_code != 0
|
||||
""", (cp_base_code,))
|
||||
rejected_count = cursor.fetchone()[0]
|
||||
|
||||
# Update all records with the same CP_base_code with new quantities
|
||||
cursor.execute("""
|
||||
UPDATE scan1_orders
|
||||
SET approved_quantity = %s, rejected_quantity = %s
|
||||
WHERE CP_base_code = %s
|
||||
""", (approved_count, rejected_count, cp_base_code))
|
||||
SELECT approved_quantity, rejected_quantity
|
||||
FROM scan1_orders
|
||||
WHERE CP_full_code = %s
|
||||
""", (cp_code,))
|
||||
result = cursor.fetchone()
|
||||
approved_count = result[0] if result else 0
|
||||
rejected_count = result[1] if result else 0
|
||||
|
||||
# Flash appropriate message
|
||||
if int(defect_code) == 0:
|
||||
@@ -510,8 +499,6 @@ def scan():
|
||||
else:
|
||||
flash(f'❌ REJECTED scan recorded for {cp_code} (defect: {defect_code}). Total rejected: {rejected_count}')
|
||||
|
||||
# Commit the transaction
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
except mariadb.Error as e:
|
||||
@@ -566,35 +553,25 @@ def fg_scan():
|
||||
cursor = conn.cursor()
|
||||
|
||||
# Always insert a new entry - each scan is a separate record
|
||||
# Note: The trigger 'increment_approved_quantity_fg' will automatically
|
||||
# update approved_quantity or rejected_quantity for all records with same CP_base_code
|
||||
insert_query = """
|
||||
INSERT INTO scanfg_orders (operator_code, CP_full_code, OC1_code, OC2_code, quality_code, date, time)
|
||||
VALUES (%s, %s, %s, %s, %s, %s, %s)
|
||||
"""
|
||||
cursor.execute(insert_query, (operator_code, cp_code, oc1_code, oc2_code, defect_code, date, time))
|
||||
conn.commit()
|
||||
|
||||
# Get the CP_base_code (first 10 characters of CP_full_code)
|
||||
# Get the quantities from the newly inserted row for the flash message
|
||||
cp_base_code = cp_code[:10]
|
||||
|
||||
# Count approved quantities (quality_code = 0) for this CP_base_code
|
||||
cursor.execute("""
|
||||
SELECT COUNT(*) FROM scanfg_orders
|
||||
WHERE CP_base_code = %s AND quality_code = 0
|
||||
""", (cp_base_code,))
|
||||
approved_count = cursor.fetchone()[0]
|
||||
|
||||
# Count rejected quantities (quality_code != 0) for this CP_base_code
|
||||
cursor.execute("""
|
||||
SELECT COUNT(*) FROM scanfg_orders
|
||||
WHERE CP_base_code = %s AND quality_code != 0
|
||||
""", (cp_base_code,))
|
||||
rejected_count = cursor.fetchone()[0]
|
||||
|
||||
# Update all records with the same CP_base_code with new quantities
|
||||
cursor.execute("""
|
||||
UPDATE scanfg_orders
|
||||
SET approved_quantity = %s, rejected_quantity = %s
|
||||
WHERE CP_base_code = %s
|
||||
""", (approved_count, rejected_count, cp_base_code))
|
||||
SELECT approved_quantity, rejected_quantity
|
||||
FROM scanfg_orders
|
||||
WHERE CP_full_code = %s
|
||||
""", (cp_code,))
|
||||
result = cursor.fetchone()
|
||||
approved_count = result[0] if result else 0
|
||||
rejected_count = result[1] if result else 0
|
||||
|
||||
# Flash appropriate message
|
||||
if int(defect_code) == 0:
|
||||
@@ -602,8 +579,6 @@ def fg_scan():
|
||||
else:
|
||||
flash(f'❌ REJECTED scan recorded for {cp_code} (defect: {defect_code}). Total rejected: {rejected_count}')
|
||||
|
||||
# Commit the transaction
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
except mariadb.Error as e:
|
||||
@@ -3628,25 +3603,16 @@ def api_backup_download(filename):
|
||||
try:
|
||||
from app.database_backup import DatabaseBackupManager
|
||||
from flask import send_file
|
||||
import os
|
||||
|
||||
backup_manager = DatabaseBackupManager()
|
||||
backup_path = backup_manager.backup_path
|
||||
file_path = os.path.join(backup_path, filename)
|
||||
file_path = backup_manager.get_backup_file_path(filename)
|
||||
|
||||
# Security: ensure filename doesn't contain path traversal
|
||||
if '..' in filename or '/' in filename:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'message': 'Invalid filename'
|
||||
}), 400
|
||||
|
||||
if os.path.exists(file_path):
|
||||
if file_path:
|
||||
return send_file(file_path, as_attachment=True, download_name=filename)
|
||||
else:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'message': 'Backup file not found'
|
||||
'message': 'Backup file not found or invalid filename'
|
||||
}), 404
|
||||
except Exception as e:
|
||||
return jsonify({
|
||||
@@ -3677,18 +3643,56 @@ def api_backup_schedule():
|
||||
"""Get or save backup schedule configuration"""
|
||||
try:
|
||||
from app.database_backup import DatabaseBackupManager
|
||||
from app.backup_scheduler import get_backup_scheduler
|
||||
|
||||
backup_manager = DatabaseBackupManager()
|
||||
|
||||
if request.method == 'POST':
|
||||
schedule = request.json
|
||||
result = backup_manager.save_backup_schedule(schedule)
|
||||
|
||||
if result['success']:
|
||||
# Reload the scheduler to apply new configuration
|
||||
scheduler = get_backup_scheduler()
|
||||
if scheduler:
|
||||
scheduler.update_schedule()
|
||||
info = scheduler.get_schedule_info()
|
||||
|
||||
if info and info.get('jobs'):
|
||||
jobs = info['jobs']
|
||||
result['message'] += f'. {len(jobs)} active schedule(s)'
|
||||
|
||||
return jsonify(result)
|
||||
else:
|
||||
schedule = backup_manager.get_backup_schedule()
|
||||
|
||||
# Auto-migrate legacy format to multi-schedule format
|
||||
if 'schedules' not in schedule:
|
||||
schedule = {
|
||||
'schedules': [{
|
||||
'id': 'default',
|
||||
'name': 'Default Schedule',
|
||||
'enabled': schedule.get('enabled', True),
|
||||
'time': schedule.get('time', '02:00'),
|
||||
'frequency': schedule.get('frequency', 'daily'),
|
||||
'backup_type': schedule.get('backup_type', 'full'),
|
||||
'retention_days': schedule.get('retention_days', 30)
|
||||
}]
|
||||
}
|
||||
# Save migrated format
|
||||
backup_manager.save_backup_schedule(schedule)
|
||||
|
||||
# Get scheduler info with all jobs
|
||||
scheduler = get_backup_scheduler()
|
||||
jobs = []
|
||||
if scheduler:
|
||||
info = scheduler.get_schedule_info()
|
||||
jobs = info.get('jobs', []) if info else []
|
||||
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'schedule': schedule
|
||||
'schedule': schedule,
|
||||
'jobs': jobs
|
||||
})
|
||||
except Exception as e:
|
||||
return jsonify({
|
||||
@@ -3718,9 +3722,6 @@ def api_backup_upload():
|
||||
"""Upload an external backup file (superadmin only)"""
|
||||
try:
|
||||
from app.database_backup import DatabaseBackupManager
|
||||
from werkzeug.utils import secure_filename
|
||||
import os
|
||||
from datetime import datetime
|
||||
|
||||
# Check if file was uploaded
|
||||
if 'backup_file' not in request.files:
|
||||
@@ -3738,79 +3739,302 @@ def api_backup_upload():
|
||||
'message': 'No file selected'
|
||||
}), 400
|
||||
|
||||
# Validate file extension
|
||||
if not file.filename.lower().endswith('.sql'):
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'message': 'Invalid file format. Only .sql files are allowed.'
|
||||
}), 400
|
||||
|
||||
# Get backup manager and backup path
|
||||
# Use DatabaseBackupManager to handle upload
|
||||
backup_manager = DatabaseBackupManager()
|
||||
backup_path = backup_manager.backup_path
|
||||
result = backup_manager.upload_backup(file)
|
||||
|
||||
# Ensure backup_path is a Path object
|
||||
from pathlib import Path
|
||||
if not isinstance(backup_path, Path):
|
||||
backup_path = Path(backup_path)
|
||||
|
||||
# Create backup directory if it doesn't exist
|
||||
backup_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Generate secure filename with timestamp to avoid conflicts
|
||||
original_filename = secure_filename(file.filename)
|
||||
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
|
||||
|
||||
# If filename already starts with "backup_", keep it; otherwise add prefix
|
||||
if original_filename.startswith('backup_'):
|
||||
new_filename = f"{original_filename.rsplit('.', 1)[0]}_{timestamp}.sql"
|
||||
else:
|
||||
new_filename = f"backup_uploaded_{timestamp}_{original_filename}"
|
||||
|
||||
# Save file to backup directory
|
||||
file_path = backup_path / new_filename
|
||||
file.save(str(file_path))
|
||||
|
||||
# Get file size
|
||||
file_size = file_path.stat().st_size
|
||||
size_mb = round(file_size / (1024 * 1024), 2)
|
||||
|
||||
# Validate the uploaded file for integrity and compatibility
|
||||
validation_result = backup_manager.validate_backup_file(new_filename)
|
||||
|
||||
if not validation_result['success']:
|
||||
# Validation failed - remove the uploaded file
|
||||
file_path.unlink() # Delete the invalid file
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'message': f'Validation failed: {validation_result["message"]}',
|
||||
'validation_details': validation_result.get('details', {}),
|
||||
'warnings': validation_result.get('warnings', [])
|
||||
}), 400
|
||||
|
||||
# Build response with validation details
|
||||
response = {
|
||||
'success': True,
|
||||
'message': 'Backup file uploaded and validated successfully',
|
||||
'filename': new_filename,
|
||||
'size': f'{size_mb} MB',
|
||||
'path': str(file_path),
|
||||
'validation': {
|
||||
'status': 'passed',
|
||||
'message': validation_result['message'],
|
||||
'details': validation_result.get('details', {}),
|
||||
'warnings': validation_result.get('warnings', [])
|
||||
}
|
||||
}
|
||||
|
||||
# Add warning flag if there are warnings
|
||||
if validation_result.get('warnings'):
|
||||
response['message'] = f'Backup uploaded with warnings: {"; ".join(validation_result["warnings"])}'
|
||||
|
||||
return jsonify(response)
|
||||
# Return appropriate status code
|
||||
status_code = 200 if result['success'] else 400
|
||||
return jsonify(result), status_code
|
||||
|
||||
except Exception as e:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'message': f'Upload failed: {str(e)}'
|
||||
}), 500
|
||||
|
||||
@bp.route('/api/backup/create-data-only', methods=['POST'])
|
||||
@admin_plus
|
||||
def api_backup_create_data_only():
|
||||
"""Create a data-only backup (no schema, triggers, or structure)"""
|
||||
try:
|
||||
from app.database_backup import DatabaseBackupManager
|
||||
|
||||
backup_manager = DatabaseBackupManager()
|
||||
result = backup_manager.create_data_only_backup()
|
||||
|
||||
return jsonify(result)
|
||||
except Exception as e:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'message': f'Data backup failed: {str(e)}'
|
||||
}), 500
|
||||
|
||||
@bp.route('/api/backup/restore-data-only/<filename>', methods=['POST'])
|
||||
@superadmin_only
|
||||
def api_backup_restore_data_only(filename):
|
||||
"""Restore data from a data-only backup file (superadmin only)
|
||||
Assumes database schema already exists
|
||||
"""
|
||||
try:
|
||||
from app.database_backup import DatabaseBackupManager
|
||||
|
||||
backup_manager = DatabaseBackupManager()
|
||||
result = backup_manager.restore_data_only(filename)
|
||||
|
||||
return jsonify(result)
|
||||
except Exception as e:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'message': f'Data restore failed: {str(e)}'
|
||||
}), 500
|
||||
|
||||
@bp.route('/api/backup/schedule-info', methods=['GET'])
|
||||
@admin_plus
|
||||
def api_backup_schedule_info():
|
||||
"""Get detailed backup schedule information including next run time"""
|
||||
try:
|
||||
from app.backup_scheduler import get_backup_scheduler
|
||||
|
||||
scheduler = get_backup_scheduler()
|
||||
if not scheduler:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'message': 'Backup scheduler not initialized'
|
||||
}), 500
|
||||
|
||||
info = scheduler.get_schedule_info()
|
||||
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'info': info
|
||||
})
|
||||
except Exception as e:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'message': f'Failed to get schedule info: {str(e)}'
|
||||
}), 500
|
||||
|
||||
@bp.route('/api/backup/reload-schedule', methods=['POST'])
|
||||
@admin_plus
|
||||
def api_backup_reload_schedule():
|
||||
"""Reload the backup schedule after configuration changes"""
|
||||
try:
|
||||
from app.backup_scheduler import get_backup_scheduler
|
||||
|
||||
scheduler = get_backup_scheduler()
|
||||
if not scheduler:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'message': 'Backup scheduler not initialized'
|
||||
}), 500
|
||||
|
||||
scheduler.update_schedule()
|
||||
|
||||
info = scheduler.get_schedule_info()
|
||||
next_run = info['next_run_time'] if info else None
|
||||
|
||||
message = 'Backup schedule reloaded successfully'
|
||||
if next_run:
|
||||
message += f'. Next backup: {next_run}'
|
||||
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'message': message,
|
||||
'next_run_time': next_run
|
||||
})
|
||||
except Exception as e:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'message': f'Failed to reload schedule: {str(e)}'
|
||||
}), 500
|
||||
|
||||
@bp.route('/api/backup/schedule/toggle/<schedule_id>', methods=['POST'])
|
||||
@admin_plus
|
||||
def api_backup_schedule_toggle(schedule_id):
|
||||
"""Toggle a specific schedule on/off"""
|
||||
try:
|
||||
from app.database_backup import DatabaseBackupManager
|
||||
from app.backup_scheduler import get_backup_scheduler
|
||||
|
||||
backup_manager = DatabaseBackupManager()
|
||||
schedule_config = backup_manager.get_backup_schedule()
|
||||
|
||||
# Handle new multi-schedule format
|
||||
if isinstance(schedule_config, dict) and 'schedules' in schedule_config:
|
||||
schedules = schedule_config['schedules']
|
||||
|
||||
# Find and toggle the schedule
|
||||
schedule_found = False
|
||||
for schedule in schedules:
|
||||
if schedule.get('id') == schedule_id:
|
||||
schedule['enabled'] = not schedule.get('enabled', False)
|
||||
schedule_found = True
|
||||
break
|
||||
|
||||
if not schedule_found:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'message': f'Schedule {schedule_id} not found'
|
||||
}), 404
|
||||
|
||||
# Save updated configuration
|
||||
result = backup_manager.save_backup_schedule(schedule_config)
|
||||
|
||||
if result['success']:
|
||||
# Reload scheduler
|
||||
scheduler = get_backup_scheduler()
|
||||
if scheduler:
|
||||
scheduler.update_schedule()
|
||||
|
||||
enabled_count = sum(1 for s in schedules if s.get('enabled', False))
|
||||
result['message'] = f'Schedule {schedule_id} toggled successfully. {enabled_count} schedule(s) active.'
|
||||
|
||||
return jsonify(result)
|
||||
|
||||
# Handle legacy single schedule format
|
||||
else:
|
||||
schedule_config['enabled'] = not schedule_config.get('enabled', False)
|
||||
result = backup_manager.save_backup_schedule(schedule_config)
|
||||
|
||||
if result['success']:
|
||||
scheduler = get_backup_scheduler()
|
||||
if scheduler:
|
||||
scheduler.update_schedule()
|
||||
|
||||
return jsonify(result)
|
||||
|
||||
except Exception as e:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'message': f'Failed to toggle schedule: {str(e)}'
|
||||
}), 500
|
||||
|
||||
@bp.route('/api/backup/schedule/delete/<schedule_id>', methods=['DELETE'])
|
||||
@admin_plus
|
||||
def api_backup_schedule_delete(schedule_id):
|
||||
"""Delete a specific schedule"""
|
||||
try:
|
||||
from app.database_backup import DatabaseBackupManager
|
||||
from app.backup_scheduler import get_backup_scheduler
|
||||
|
||||
# Don't allow deleting the default schedule
|
||||
if schedule_id == 'default':
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'message': 'Cannot delete the default schedule'
|
||||
}), 400
|
||||
|
||||
backup_manager = DatabaseBackupManager()
|
||||
schedule_config = backup_manager.get_backup_schedule()
|
||||
|
||||
if isinstance(schedule_config, dict) and 'schedules' in schedule_config:
|
||||
schedules = schedule_config['schedules']
|
||||
|
||||
# Remove the schedule
|
||||
new_schedules = [s for s in schedules if s.get('id') != schedule_id]
|
||||
|
||||
if len(new_schedules) == len(schedules):
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'message': f'Schedule {schedule_id} not found'
|
||||
}), 404
|
||||
|
||||
schedule_config['schedules'] = new_schedules
|
||||
result = backup_manager.save_backup_schedule(schedule_config)
|
||||
|
||||
if result['success']:
|
||||
# Reload scheduler
|
||||
scheduler = get_backup_scheduler()
|
||||
if scheduler:
|
||||
scheduler.update_schedule()
|
||||
|
||||
result['message'] = f'Schedule {schedule_id} deleted successfully'
|
||||
|
||||
return jsonify(result)
|
||||
else:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'message': 'Multi-schedule format not enabled'
|
||||
}), 400
|
||||
|
||||
except Exception as e:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'message': f'Failed to delete schedule: {str(e)}'
|
||||
}), 500
|
||||
|
||||
@bp.route('/api/backup/schedule/add', methods=['POST'])
|
||||
@admin_plus
|
||||
def api_backup_schedule_add():
|
||||
"""Add a new schedule"""
|
||||
try:
|
||||
from app.database_backup import DatabaseBackupManager
|
||||
from app.backup_scheduler import get_backup_scheduler
|
||||
import uuid
|
||||
|
||||
new_schedule = request.json
|
||||
|
||||
# Validate required fields
|
||||
required_fields = ['time', 'frequency', 'backup_type', 'retention_days']
|
||||
for field in required_fields:
|
||||
if field not in new_schedule:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'message': f'Missing required field: {field}'
|
||||
}), 400
|
||||
|
||||
backup_manager = DatabaseBackupManager()
|
||||
schedule_config = backup_manager.get_backup_schedule()
|
||||
|
||||
# Migrate to multi-schedule format if needed
|
||||
if 'schedules' not in schedule_config:
|
||||
# Convert legacy format to multi-schedule
|
||||
schedule_config = {
|
||||
'schedules': [{
|
||||
'id': 'default',
|
||||
'name': 'Default Schedule',
|
||||
'enabled': schedule_config.get('enabled', True),
|
||||
'time': schedule_config.get('time', '02:00'),
|
||||
'frequency': schedule_config.get('frequency', 'daily'),
|
||||
'backup_type': schedule_config.get('backup_type', 'full'),
|
||||
'retention_days': schedule_config.get('retention_days', 30)
|
||||
}]
|
||||
}
|
||||
|
||||
# Generate unique ID
|
||||
schedule_id = new_schedule.get('id') or str(uuid.uuid4())[:8]
|
||||
|
||||
# Add new schedule
|
||||
new_schedule_entry = {
|
||||
'id': schedule_id,
|
||||
'name': new_schedule.get('name', f'Schedule {schedule_id}'),
|
||||
'enabled': new_schedule.get('enabled', True),
|
||||
'time': new_schedule['time'],
|
||||
'frequency': new_schedule['frequency'],
|
||||
'backup_type': new_schedule['backup_type'],
|
||||
'retention_days': int(new_schedule['retention_days'])
|
||||
}
|
||||
|
||||
schedule_config['schedules'].append(new_schedule_entry)
|
||||
|
||||
# Save configuration
|
||||
result = backup_manager.save_backup_schedule(schedule_config)
|
||||
|
||||
if result['success']:
|
||||
# Reload scheduler
|
||||
scheduler = get_backup_scheduler()
|
||||
if scheduler:
|
||||
scheduler.update_schedule()
|
||||
|
||||
result['message'] = f'Schedule {schedule_id} added successfully'
|
||||
result['schedule_id'] = schedule_id
|
||||
|
||||
return jsonify(result)
|
||||
|
||||
except Exception as e:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'message': f'Failed to add schedule: {str(e)}'
|
||||
}), 500
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user