Fixed the scan error and backup problems

This commit is contained in:
Quality System Admin
2025-11-05 21:25:02 +02:00
parent 9020f2c1cf
commit c91b7d0a4d
15 changed files with 4873 additions and 429 deletions

View File

@@ -473,36 +473,25 @@ def scan():
conn = get_db_connection()
cursor = conn.cursor()
# Always insert a new entry - each scan is a separate record
# Insert new entry - the BEFORE INSERT trigger 'set_quantities_scan1' will automatically
# calculate and set approved_quantity and rejected_quantity for this new record
insert_query = """
INSERT INTO scan1_orders (operator_code, CP_full_code, OC1_code, OC2_code, quality_code, date, time)
VALUES (%s, %s, %s, %s, %s, %s, %s)
"""
cursor.execute(insert_query, (operator_code, cp_code, oc1_code, oc2_code, defect_code, date, time))
conn.commit()
# Get the CP_base_code (first 10 characters of CP_full_code)
# Get the quantities from the newly inserted row for the flash message
cp_base_code = cp_code[:10]
# Count approved quantities (quality_code = 0) for this CP_base_code
cursor.execute("""
SELECT COUNT(*) FROM scan1_orders
WHERE CP_base_code = %s AND quality_code = 0
""", (cp_base_code,))
approved_count = cursor.fetchone()[0]
# Count rejected quantities (quality_code != 0) for this CP_base_code
cursor.execute("""
SELECT COUNT(*) FROM scan1_orders
WHERE CP_base_code = %s AND quality_code != 0
""", (cp_base_code,))
rejected_count = cursor.fetchone()[0]
# Update all records with the same CP_base_code with new quantities
cursor.execute("""
UPDATE scan1_orders
SET approved_quantity = %s, rejected_quantity = %s
WHERE CP_base_code = %s
""", (approved_count, rejected_count, cp_base_code))
SELECT approved_quantity, rejected_quantity
FROM scan1_orders
WHERE CP_full_code = %s
""", (cp_code,))
result = cursor.fetchone()
approved_count = result[0] if result else 0
rejected_count = result[1] if result else 0
# Flash appropriate message
if int(defect_code) == 0:
@@ -510,8 +499,6 @@ def scan():
else:
flash(f'❌ REJECTED scan recorded for {cp_code} (defect: {defect_code}). Total rejected: {rejected_count}')
# Commit the transaction
conn.commit()
conn.close()
except mariadb.Error as e:
@@ -566,35 +553,25 @@ def fg_scan():
cursor = conn.cursor()
# Always insert a new entry - each scan is a separate record
# Note: The trigger 'increment_approved_quantity_fg' will automatically
# update approved_quantity or rejected_quantity for all records with same CP_base_code
insert_query = """
INSERT INTO scanfg_orders (operator_code, CP_full_code, OC1_code, OC2_code, quality_code, date, time)
VALUES (%s, %s, %s, %s, %s, %s, %s)
"""
cursor.execute(insert_query, (operator_code, cp_code, oc1_code, oc2_code, defect_code, date, time))
conn.commit()
# Get the CP_base_code (first 10 characters of CP_full_code)
# Get the quantities from the newly inserted row for the flash message
cp_base_code = cp_code[:10]
# Count approved quantities (quality_code = 0) for this CP_base_code
cursor.execute("""
SELECT COUNT(*) FROM scanfg_orders
WHERE CP_base_code = %s AND quality_code = 0
""", (cp_base_code,))
approved_count = cursor.fetchone()[0]
# Count rejected quantities (quality_code != 0) for this CP_base_code
cursor.execute("""
SELECT COUNT(*) FROM scanfg_orders
WHERE CP_base_code = %s AND quality_code != 0
""", (cp_base_code,))
rejected_count = cursor.fetchone()[0]
# Update all records with the same CP_base_code with new quantities
cursor.execute("""
UPDATE scanfg_orders
SET approved_quantity = %s, rejected_quantity = %s
WHERE CP_base_code = %s
""", (approved_count, rejected_count, cp_base_code))
SELECT approved_quantity, rejected_quantity
FROM scanfg_orders
WHERE CP_full_code = %s
""", (cp_code,))
result = cursor.fetchone()
approved_count = result[0] if result else 0
rejected_count = result[1] if result else 0
# Flash appropriate message
if int(defect_code) == 0:
@@ -602,8 +579,6 @@ def fg_scan():
else:
flash(f'❌ REJECTED scan recorded for {cp_code} (defect: {defect_code}). Total rejected: {rejected_count}')
# Commit the transaction
conn.commit()
conn.close()
except mariadb.Error as e:
@@ -3628,25 +3603,16 @@ def api_backup_download(filename):
try:
from app.database_backup import DatabaseBackupManager
from flask import send_file
import os
backup_manager = DatabaseBackupManager()
backup_path = backup_manager.backup_path
file_path = os.path.join(backup_path, filename)
file_path = backup_manager.get_backup_file_path(filename)
# Security: ensure filename doesn't contain path traversal
if '..' in filename or '/' in filename:
return jsonify({
'success': False,
'message': 'Invalid filename'
}), 400
if os.path.exists(file_path):
if file_path:
return send_file(file_path, as_attachment=True, download_name=filename)
else:
return jsonify({
'success': False,
'message': 'Backup file not found'
'message': 'Backup file not found or invalid filename'
}), 404
except Exception as e:
return jsonify({
@@ -3677,18 +3643,56 @@ def api_backup_schedule():
"""Get or save backup schedule configuration"""
try:
from app.database_backup import DatabaseBackupManager
from app.backup_scheduler import get_backup_scheduler
backup_manager = DatabaseBackupManager()
if request.method == 'POST':
schedule = request.json
result = backup_manager.save_backup_schedule(schedule)
if result['success']:
# Reload the scheduler to apply new configuration
scheduler = get_backup_scheduler()
if scheduler:
scheduler.update_schedule()
info = scheduler.get_schedule_info()
if info and info.get('jobs'):
jobs = info['jobs']
result['message'] += f'. {len(jobs)} active schedule(s)'
return jsonify(result)
else:
schedule = backup_manager.get_backup_schedule()
# Auto-migrate legacy format to multi-schedule format
if 'schedules' not in schedule:
schedule = {
'schedules': [{
'id': 'default',
'name': 'Default Schedule',
'enabled': schedule.get('enabled', True),
'time': schedule.get('time', '02:00'),
'frequency': schedule.get('frequency', 'daily'),
'backup_type': schedule.get('backup_type', 'full'),
'retention_days': schedule.get('retention_days', 30)
}]
}
# Save migrated format
backup_manager.save_backup_schedule(schedule)
# Get scheduler info with all jobs
scheduler = get_backup_scheduler()
jobs = []
if scheduler:
info = scheduler.get_schedule_info()
jobs = info.get('jobs', []) if info else []
return jsonify({
'success': True,
'schedule': schedule
'schedule': schedule,
'jobs': jobs
})
except Exception as e:
return jsonify({
@@ -3718,9 +3722,6 @@ def api_backup_upload():
"""Upload an external backup file (superadmin only)"""
try:
from app.database_backup import DatabaseBackupManager
from werkzeug.utils import secure_filename
import os
from datetime import datetime
# Check if file was uploaded
if 'backup_file' not in request.files:
@@ -3738,79 +3739,302 @@ def api_backup_upload():
'message': 'No file selected'
}), 400
# Validate file extension
if not file.filename.lower().endswith('.sql'):
return jsonify({
'success': False,
'message': 'Invalid file format. Only .sql files are allowed.'
}), 400
# Get backup manager and backup path
# Use DatabaseBackupManager to handle upload
backup_manager = DatabaseBackupManager()
backup_path = backup_manager.backup_path
result = backup_manager.upload_backup(file)
# Ensure backup_path is a Path object
from pathlib import Path
if not isinstance(backup_path, Path):
backup_path = Path(backup_path)
# Create backup directory if it doesn't exist
backup_path.mkdir(parents=True, exist_ok=True)
# Generate secure filename with timestamp to avoid conflicts
original_filename = secure_filename(file.filename)
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
# If filename already starts with "backup_", keep it; otherwise add prefix
if original_filename.startswith('backup_'):
new_filename = f"{original_filename.rsplit('.', 1)[0]}_{timestamp}.sql"
else:
new_filename = f"backup_uploaded_{timestamp}_{original_filename}"
# Save file to backup directory
file_path = backup_path / new_filename
file.save(str(file_path))
# Get file size
file_size = file_path.stat().st_size
size_mb = round(file_size / (1024 * 1024), 2)
# Validate the uploaded file for integrity and compatibility
validation_result = backup_manager.validate_backup_file(new_filename)
if not validation_result['success']:
# Validation failed - remove the uploaded file
file_path.unlink() # Delete the invalid file
return jsonify({
'success': False,
'message': f'Validation failed: {validation_result["message"]}',
'validation_details': validation_result.get('details', {}),
'warnings': validation_result.get('warnings', [])
}), 400
# Build response with validation details
response = {
'success': True,
'message': 'Backup file uploaded and validated successfully',
'filename': new_filename,
'size': f'{size_mb} MB',
'path': str(file_path),
'validation': {
'status': 'passed',
'message': validation_result['message'],
'details': validation_result.get('details', {}),
'warnings': validation_result.get('warnings', [])
}
}
# Add warning flag if there are warnings
if validation_result.get('warnings'):
response['message'] = f'Backup uploaded with warnings: {"; ".join(validation_result["warnings"])}'
return jsonify(response)
# Return appropriate status code
status_code = 200 if result['success'] else 400
return jsonify(result), status_code
except Exception as e:
return jsonify({
'success': False,
'message': f'Upload failed: {str(e)}'
}), 500
@bp.route('/api/backup/create-data-only', methods=['POST'])
@admin_plus
def api_backup_create_data_only():
"""Create a data-only backup (no schema, triggers, or structure)"""
try:
from app.database_backup import DatabaseBackupManager
backup_manager = DatabaseBackupManager()
result = backup_manager.create_data_only_backup()
return jsonify(result)
except Exception as e:
return jsonify({
'success': False,
'message': f'Data backup failed: {str(e)}'
}), 500
@bp.route('/api/backup/restore-data-only/<filename>', methods=['POST'])
@superadmin_only
def api_backup_restore_data_only(filename):
"""Restore data from a data-only backup file (superadmin only)
Assumes database schema already exists
"""
try:
from app.database_backup import DatabaseBackupManager
backup_manager = DatabaseBackupManager()
result = backup_manager.restore_data_only(filename)
return jsonify(result)
except Exception as e:
return jsonify({
'success': False,
'message': f'Data restore failed: {str(e)}'
}), 500
@bp.route('/api/backup/schedule-info', methods=['GET'])
@admin_plus
def api_backup_schedule_info():
"""Get detailed backup schedule information including next run time"""
try:
from app.backup_scheduler import get_backup_scheduler
scheduler = get_backup_scheduler()
if not scheduler:
return jsonify({
'success': False,
'message': 'Backup scheduler not initialized'
}), 500
info = scheduler.get_schedule_info()
return jsonify({
'success': True,
'info': info
})
except Exception as e:
return jsonify({
'success': False,
'message': f'Failed to get schedule info: {str(e)}'
}), 500
@bp.route('/api/backup/reload-schedule', methods=['POST'])
@admin_plus
def api_backup_reload_schedule():
"""Reload the backup schedule after configuration changes"""
try:
from app.backup_scheduler import get_backup_scheduler
scheduler = get_backup_scheduler()
if not scheduler:
return jsonify({
'success': False,
'message': 'Backup scheduler not initialized'
}), 500
scheduler.update_schedule()
info = scheduler.get_schedule_info()
next_run = info['next_run_time'] if info else None
message = 'Backup schedule reloaded successfully'
if next_run:
message += f'. Next backup: {next_run}'
return jsonify({
'success': True,
'message': message,
'next_run_time': next_run
})
except Exception as e:
return jsonify({
'success': False,
'message': f'Failed to reload schedule: {str(e)}'
}), 500
@bp.route('/api/backup/schedule/toggle/<schedule_id>', methods=['POST'])
@admin_plus
def api_backup_schedule_toggle(schedule_id):
"""Toggle a specific schedule on/off"""
try:
from app.database_backup import DatabaseBackupManager
from app.backup_scheduler import get_backup_scheduler
backup_manager = DatabaseBackupManager()
schedule_config = backup_manager.get_backup_schedule()
# Handle new multi-schedule format
if isinstance(schedule_config, dict) and 'schedules' in schedule_config:
schedules = schedule_config['schedules']
# Find and toggle the schedule
schedule_found = False
for schedule in schedules:
if schedule.get('id') == schedule_id:
schedule['enabled'] = not schedule.get('enabled', False)
schedule_found = True
break
if not schedule_found:
return jsonify({
'success': False,
'message': f'Schedule {schedule_id} not found'
}), 404
# Save updated configuration
result = backup_manager.save_backup_schedule(schedule_config)
if result['success']:
# Reload scheduler
scheduler = get_backup_scheduler()
if scheduler:
scheduler.update_schedule()
enabled_count = sum(1 for s in schedules if s.get('enabled', False))
result['message'] = f'Schedule {schedule_id} toggled successfully. {enabled_count} schedule(s) active.'
return jsonify(result)
# Handle legacy single schedule format
else:
schedule_config['enabled'] = not schedule_config.get('enabled', False)
result = backup_manager.save_backup_schedule(schedule_config)
if result['success']:
scheduler = get_backup_scheduler()
if scheduler:
scheduler.update_schedule()
return jsonify(result)
except Exception as e:
return jsonify({
'success': False,
'message': f'Failed to toggle schedule: {str(e)}'
}), 500
@bp.route('/api/backup/schedule/delete/<schedule_id>', methods=['DELETE'])
@admin_plus
def api_backup_schedule_delete(schedule_id):
"""Delete a specific schedule"""
try:
from app.database_backup import DatabaseBackupManager
from app.backup_scheduler import get_backup_scheduler
# Don't allow deleting the default schedule
if schedule_id == 'default':
return jsonify({
'success': False,
'message': 'Cannot delete the default schedule'
}), 400
backup_manager = DatabaseBackupManager()
schedule_config = backup_manager.get_backup_schedule()
if isinstance(schedule_config, dict) and 'schedules' in schedule_config:
schedules = schedule_config['schedules']
# Remove the schedule
new_schedules = [s for s in schedules if s.get('id') != schedule_id]
if len(new_schedules) == len(schedules):
return jsonify({
'success': False,
'message': f'Schedule {schedule_id} not found'
}), 404
schedule_config['schedules'] = new_schedules
result = backup_manager.save_backup_schedule(schedule_config)
if result['success']:
# Reload scheduler
scheduler = get_backup_scheduler()
if scheduler:
scheduler.update_schedule()
result['message'] = f'Schedule {schedule_id} deleted successfully'
return jsonify(result)
else:
return jsonify({
'success': False,
'message': 'Multi-schedule format not enabled'
}), 400
except Exception as e:
return jsonify({
'success': False,
'message': f'Failed to delete schedule: {str(e)}'
}), 500
@bp.route('/api/backup/schedule/add', methods=['POST'])
@admin_plus
def api_backup_schedule_add():
"""Add a new schedule"""
try:
from app.database_backup import DatabaseBackupManager
from app.backup_scheduler import get_backup_scheduler
import uuid
new_schedule = request.json
# Validate required fields
required_fields = ['time', 'frequency', 'backup_type', 'retention_days']
for field in required_fields:
if field not in new_schedule:
return jsonify({
'success': False,
'message': f'Missing required field: {field}'
}), 400
backup_manager = DatabaseBackupManager()
schedule_config = backup_manager.get_backup_schedule()
# Migrate to multi-schedule format if needed
if 'schedules' not in schedule_config:
# Convert legacy format to multi-schedule
schedule_config = {
'schedules': [{
'id': 'default',
'name': 'Default Schedule',
'enabled': schedule_config.get('enabled', True),
'time': schedule_config.get('time', '02:00'),
'frequency': schedule_config.get('frequency', 'daily'),
'backup_type': schedule_config.get('backup_type', 'full'),
'retention_days': schedule_config.get('retention_days', 30)
}]
}
# Generate unique ID
schedule_id = new_schedule.get('id') or str(uuid.uuid4())[:8]
# Add new schedule
new_schedule_entry = {
'id': schedule_id,
'name': new_schedule.get('name', f'Schedule {schedule_id}'),
'enabled': new_schedule.get('enabled', True),
'time': new_schedule['time'],
'frequency': new_schedule['frequency'],
'backup_type': new_schedule['backup_type'],
'retention_days': int(new_schedule['retention_days'])
}
schedule_config['schedules'].append(new_schedule_entry)
# Save configuration
result = backup_manager.save_backup_schedule(schedule_config)
if result['success']:
# Reload scheduler
scheduler = get_backup_scheduler()
if scheduler:
scheduler.update_schedule()
result['message'] = f'Schedule {schedule_id} added successfully'
result['schedule_id'] = schedule_id
return jsonify(result)
except Exception as e:
return jsonify({
'success': False,
'message': f'Failed to add schedule: {str(e)}'
}), 500