Compare commits

..

13 Commits

Author SHA1 Message Date
DigiServer Developer
9cb32da13c Resolve merge conflict in docker-compose.yml 2025-09-09 15:29:21 +03:00
DigiServer Developer
0f34a47fa9 updated to receive player message 2025-09-09 15:24:35 +03:00
DigiServer Developer
a5ef5749b1 upload feedbackto server 2025-09-08 14:04:13 +03:00
3cc703a7d1 add saved files 2025-09-04 13:14:10 -04:00
DigiServer Developer
505c8e268c Final working version: playlist API, persistent data, and deployment fixes 2025-08-25 16:40:17 +03:00
DigiServer Developer
6cefce81ef Update: media file handling and playlist filename consistency using secure_filename 2025-08-25 13:13:12 +03:00
359e330758 media 2025-08-25 00:35:53 -04:00
9c124dbd7e fix: remove video conversion, use uploaded videos as-is for playlist (bugfix) 2025-08-25 00:33:40 -04:00
DigiServer Developer
7b24245ddb updated the upload functionality to handle large files and added a new image file 2025-08-21 16:27:16 +03:00
DigiServer Developer
58694ff3f4 Update all changes before rebase and push 2025-08-21 16:26:53 +03:00
7f5991f60d fix: use correct endpoint for group media delete in manage_group.html 2025-08-20 15:11:22 -04:00
DigiServer Developer
5e4950563c Fix admin authentication and update port mapping
- Fix environment variable mismatch in create_default_user.py
- Now correctly uses ADMIN_USER and ADMIN_PASSWORD from docker-compose
- Maintains backward compatibility with DEFAULT_USER and DEFAULT_PASSWORD
- Change port mapping from 8880 to 80 for easier access
- Resolves login issues with admin user credentials
2025-08-11 17:01:58 +03:00
091e985ff2 fix: Simplified Docker deployment and fixed upload path resolution
🐳 Docker Configuration Improvements:
- Simplified docker-compose.yml to use single app folder bind mount
- Removed complex data folder mapping that caused path confusion
- Updated environment variables to match entrypoint script expectations
- Streamlined deployment for better reliability

🔧 Upload System Fixes:
- Fixed path resolution issues in uploads.py for containerized deployment
- Simplified upload folder path handling to work correctly in containers
- Removed complex absolute path conversion logic that caused file placement issues
- Ensured all file operations use consistent /app/static/uploads path

📁 File Processing Improvements:
- Fixed PPTX to JPG conversion workflow path handling
- Corrected PDF processing to save files in correct container location
- Improved video conversion path resolution
- Enhanced error handling and logging for upload operations

🚀 Production Benefits:
- Eliminates 404 errors for uploaded media files
- Ensures files are saved in correct locations within container
- Simplifies development and debugging with direct app folder mounting
- Maintains data consistency across container restarts

 This resolves the upload workflow issues where PPTX files were not
being correctly processed and saved to the expected locations.
2025-08-05 19:16:08 -04:00
20 changed files with 2002 additions and 195 deletions

View File

@@ -1,3 +1,10 @@
# ...existing code...
# Player feedback API
from models.player_feedback import PlayerFeedback
# --- API route to receive player feedback ---
import os
import click
import psutil
@@ -69,8 +76,12 @@ db_path = os.path.join(instance_dir, 'dashboard.db')
app.config['SQLALCHEMY_DATABASE_URI'] = f'sqlite:///{db_path}'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
# Set maximum content length to 1GB
app.config['MAX_CONTENT_LENGTH'] = 2048 * 2048 * 2048 # 2GB, adjust as needed
# Set maximum content length to 2GB
app.config['MAX_CONTENT_LENGTH'] = 2048 * 1024 * 1024 # 2GB, adjust as needed
# Set longer timeouts for file processing
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 300 # 5 minutes for static files
app.config['PERMANENT_SESSION_LIFETIME'] = 1800 # 30 minutes for sessions
# Ensure the instance folder exists
os.makedirs(app.instance_path, exist_ok=True)
@@ -95,6 +106,54 @@ login_manager.login_view = 'login'
migrate = Migrate(app, db)
@app.route('/api/player-feedback', methods=['POST'])
def api_player_feedback():
from datetime import datetime
import dateutil.parser
data = request.get_json()
required_fields = ['player_name', 'quickconnect_code', 'message', 'status', 'timestamp']
if not all(field in data for field in required_fields):
return jsonify({'error': 'Missing required fields'}), 400
# Convert timestamp string to datetime object
try:
if isinstance(data['timestamp'], str):
timestamp = dateutil.parser.parse(data['timestamp'])
else:
timestamp = data['timestamp']
except (ValueError, TypeError):
return jsonify({'error': 'Invalid timestamp format'}), 400
feedback = PlayerFeedback(
player_name=data['player_name'],
quickconnect_code=data['quickconnect_code'],
message=data['message'],
status=data['status'],
timestamp=timestamp,
playlist_version=data.get('playlist_version'),
error_details=data.get('error_details')
)
db.session.add(feedback)
db.session.commit()
return jsonify({'success': True, 'feedback_id': feedback.id}), 200
# Add error handlers for better user experience
@app.errorhandler(413)
def request_entity_too_large(error):
flash('File too large. Please upload files smaller than 2GB.', 'danger')
return redirect(url_for('dashboard'))
@app.errorhandler(408)
def request_timeout(error):
flash('Request timed out. Please try uploading smaller files or try again later.', 'danger')
return redirect(url_for('dashboard'))
@app.errorhandler(500)
def internal_server_error(error):
flash('An internal server error occurred. Please try again or contact support.', 'danger')
return redirect(url_for('dashboard'))
@login_manager.user_loader
def load_user(user_id):
return db.session.get(User, int(user_id))
@@ -213,8 +272,23 @@ def upload_content():
flash('Please select a target type and target ID.', 'danger')
return redirect(url_for('upload_content'))
# Process uploaded files and get results
results = process_uploaded_files(app, files, media_type, duration, target_type, target_id)
try:
# Process uploaded files and get results
results = process_uploaded_files(app, files, media_type, duration, target_type, target_id)
# Check for any failed uploads
failed_files = [r for r in results if not r.get('success', True)]
if failed_files:
for failed in failed_files:
flash(f"Error uploading {failed.get('filename', 'unknown file')}: {failed.get('message', 'Unknown error')}", 'warning')
else:
flash('All files uploaded and processed successfully!', 'success')
except Exception as e:
print(f"Error in upload_content: {e}")
import traceback
traceback.print_exc()
flash(f'Upload failed: {str(e)}', 'danger')
return redirect(return_url)
@@ -295,7 +369,13 @@ def create_user():
def player_page(player_id):
player = db.session.get(Player, player_id)
content = get_player_content(player_id)
return render_template('player_page.html', player=player, content=content)
# Get last 5 feedback entries for this player
player_feedback = PlayerFeedback.query.filter_by(
player_name=player.username
).order_by(PlayerFeedback.timestamp.desc()).limit(5).all()
return render_template('player_page.html', player=player, content=content, player_feedback=player_feedback)
@app.route('/player/<int:player_id>/upload', methods=['POST'])
@login_required

1
app/migrations/README Normal file
View File

@@ -0,0 +1 @@
Single-database configuration for Flask.

View File

@@ -0,0 +1,50 @@
# A generic, single database configuration.
[alembic]
# template used to generate migration files
# file_template = %%(rev)s_%%(slug)s
# set to 'true' to run the environment during
# the 'revision' command, regardless of autogenerate
# revision_environment = false
# Logging configuration
[loggers]
keys = root,sqlalchemy,alembic,flask_migrate
[handlers]
keys = console
[formatters]
keys = generic
[logger_root]
level = WARN
handlers = console
qualname =
[logger_sqlalchemy]
level = WARN
handlers =
qualname = sqlalchemy.engine
[logger_alembic]
level = INFO
handlers =
qualname = alembic
[logger_flask_migrate]
level = INFO
handlers =
qualname = flask_migrate
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatter_generic]
format = %(levelname)-5.5s [%(name)s] %(message)s
datefmt = %H:%M:%S

113
app/migrations/env.py Normal file
View File

@@ -0,0 +1,113 @@
import logging
from logging.config import fileConfig
from flask import current_app
from alembic import context
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
logger = logging.getLogger('alembic.env')
def get_engine():
try:
# this works with Flask-SQLAlchemy<3 and Alchemical
return current_app.extensions['migrate'].db.get_engine()
except (TypeError, AttributeError):
# this works with Flask-SQLAlchemy>=3
return current_app.extensions['migrate'].db.engine
def get_engine_url():
try:
return get_engine().url.render_as_string(hide_password=False).replace(
'%', '%%')
except AttributeError:
return str(get_engine().url).replace('%', '%%')
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
config.set_main_option('sqlalchemy.url', get_engine_url())
target_db = current_app.extensions['migrate'].db
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def get_metadata():
if hasattr(target_db, 'metadatas'):
return target_db.metadatas[None]
return target_db.metadata
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url, target_metadata=get_metadata(), literal_binds=True
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
# this callback is used to prevent an auto-migration from being generated
# when there are no changes to the schema
# reference: http://alembic.zzzcomputing.com/en/latest/cookbook.html
def process_revision_directives(context, revision, directives):
if getattr(config.cmd_opts, 'autogenerate', False):
script = directives[0]
if script.upgrade_ops.is_empty():
directives[:] = []
logger.info('No changes in schema detected.')
conf_args = current_app.extensions['migrate'].configure_args
if conf_args.get("process_revision_directives") is None:
conf_args["process_revision_directives"] = process_revision_directives
connectable = get_engine()
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=get_metadata(),
**conf_args
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()

View File

@@ -0,0 +1,24 @@
"""${message}
Revision ID: ${up_revision}
Revises: ${down_revision | comma,n}
Create Date: ${create_date}
"""
from alembic import op
import sqlalchemy as sa
${imports if imports else ""}
# revision identifiers, used by Alembic.
revision = ${repr(up_revision)}
down_revision = ${repr(down_revision)}
branch_labels = ${repr(branch_labels)}
depends_on = ${repr(depends_on)}
def upgrade():
${upgrades if upgrades else "pass"}
def downgrade():
${downgrades if downgrades else "pass"}

View File

@@ -0,0 +1,38 @@
"""Add PlayerFeedback table
Revision ID: 217eab16e4e4
Revises:
Create Date: 2025-09-08 11:30:26.742813
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '217eab16e4e4'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('player_feedback',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('player_name', sa.String(length=255), nullable=False),
sa.Column('quickconnect_code', sa.String(length=255), nullable=False),
sa.Column('message', sa.Text(), nullable=False),
sa.Column('status', sa.String(length=50), nullable=False),
sa.Column('timestamp', sa.DateTime(), nullable=False),
sa.Column('playlist_version', sa.Integer(), nullable=True),
sa.Column('error_details', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('player_feedback')
# ### end Alembic commands ###

View File

@@ -2,4 +2,5 @@ from .user import User
from .player import Player
from .group import Group, group_player
from .content import Content
from .server_log import ServerLog
from .server_log import ServerLog
from .player_feedback import PlayerFeedback

View File

@@ -2,8 +2,9 @@
import os
def create_default_user(db, User, bcrypt):
username = os.getenv('DEFAULT_USER', 'admin')
password = os.getenv('DEFAULT_PASSWORD', '1234')
# Use ADMIN_USER and ADMIN_PASSWORD to match docker-compose environment variables
username = os.getenv('ADMIN_USER', os.getenv('DEFAULT_USER', 'admin'))
password = os.getenv('ADMIN_PASSWORD', os.getenv('DEFAULT_PASSWORD', '1234'))
hashed_password = bcrypt.generate_password_hash(password).decode('utf-8')
existing_user = User.query.filter_by(username=username).first()
if not existing_user:

View File

@@ -0,0 +1,11 @@
from extensions import db
class PlayerFeedback(db.Model):
id = db.Column(db.Integer, primary_key=True)
player_name = db.Column(db.String(255), nullable=False)
quickconnect_code = db.Column(db.String(255), nullable=False)
message = db.Column(db.Text, nullable=False)
status = db.Column(db.String(50), nullable=False)
timestamp = db.Column(db.DateTime, nullable=False)
playlist_version = db.Column(db.Integer, nullable=True)
error_details = db.Column(db.Text, nullable=True)

View File

@@ -18,6 +18,9 @@ alembic==1.14.1
Mako==1.3.8
greenlet==3.1.1
# Date parsing
python-dateutil==2.9.0
# File Processing
pdf2image==1.17.0
PyPDF2==3.0.1

Binary file not shown.

Before

Width:  |  Height:  |  Size: 153 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 52 KiB

View File

@@ -110,35 +110,44 @@
<ul class="list-group sortable-list" id="groupMediaList">
{% for media in content %}
<li class="list-group-item d-flex align-items-center {{ 'dark-mode' if theme == 'dark' else '' }}"
<li class="list-group-item d-flex align-items-center {{ 'dark-mode' if theme == 'dark' else '' }}"
draggable="true"
data-id="{{ media.id }}"
data-position="{{ loop.index0 }}">
<!-- Checkbox for bulk selection -->
<div class="me-2">
<input class="form-check-input media-checkbox"
type="checkbox"
name="selected_content"
<input class="form-check-input media-checkbox"
type="checkbox"
name="selected_content"
value="{{ media.id }}">
</div>
<!-- Drag handle -->
<div class="drag-handle me-2" title="Drag to reorder">
<i class="bi bi-grip-vertical"></i>
&#9776;
</div>
<div class="flex-grow-1">
<!-- Media Thumbnail and Name -->
<div class="flex-grow-1 mb-2 mb-md-0 d-flex align-items-center">
<img src="{{ url_for('static', filename='uploads/' ~ media.file_name) }}"
alt="thumbnail"
style="width: 48px; height: 48px; object-fit: cover; margin-right: 10px; border-radius: 4px;"
onerror="this.style.display='none';">
<p class="mb-0"><strong>Media Name:</strong> {{ media.file_name }}</p>
</div>
<form action="{{ url_for('edit_group_media', group_id=group.id, content_id=media.id) }}" method="post" class="d-flex align-items-center">
<<<<<<< HEAD
=======
>>>>>>> 2255cc2 (Show media thumbnails in manage group page, matching player page style)
<form action="{{ url_for('edit_group_media_route', group_id=group.id, content_id=media.id) }}" method="post" class="d-flex align-items-center">
<div class="input-group me-2">
<span class="input-group-text">seconds</span>
<input type="number" class="form-control {{ 'dark-mode' if theme == 'dark' else '' }}" name="duration" value="{{ media.duration }}" required>
</div>
<button type="submit" class="btn btn-warning me-2">Edit</button>
</form>
<form action="{{ url_for('delete_group_media', group_id=group.id, content_id=media.id) }}" method="post" style="display:inline;">
<form action="{{ url_for('delete_group_media_route', group_id=group.id, content_id=media.id) }}" method="post" style="display:inline;">
<button type="submit" class="btn btn-danger" onclick="return confirm('Are you sure you want to delete this media?');">Delete</button>
</form>
</li>

View File

@@ -54,20 +54,77 @@
<div class="container py-5">
<h1 class="text-center mb-4">Player Schedule for {{ player.username }}</h1>
<!-- Player Info Section -->
<div class="card mb-4 {% if theme == 'dark' %}dark-mode{% endif %}">
<div class="card-header bg-info text-white">
<h2>Player Info</h2>
<div class="row">
<!-- Player Info Section -->
<div class="col-md-6">
<div class="card mb-4 {% if theme == 'dark' %}dark-mode{% endif %}">
<div class="card-header bg-info text-white">
<h2>Player Info</h2>
</div>
<div class="card-body">
<p><strong>Player Name:</strong> {{ player.username }}</p>
<p><strong>Hostname:</strong> {{ player.hostname }}</p>
{% if current_user.role == 'admin' %}
<a href="{{ url_for('edit_player', player_id=player.id, return_url=url_for('player_page', player_id=player.id)) }}" class="btn btn-warning">Update</a>
<form action="{{ url_for('delete_player', player_id=player.id) }}" method="post" style="display:inline;">
<button type="submit" class="btn btn-danger" onclick="return confirm('Are you sure you want to delete this player?');">Delete</button>
</form>
{% endif %}
</div>
</div>
</div>
<div class="card-body">
<p><strong>Player Name:</strong> {{ player.username }}</p>
<p><strong>Hostname:</strong> {{ player.hostname }}</p>
{% if current_user.role == 'admin' %}
<a href="{{ url_for('edit_player', player_id=player.id, return_url=url_for('player_page', player_id=player.id)) }}" class="btn btn-warning">Update</a>
<form action="{{ url_for('delete_player', player_id=player.id) }}" method="post" style="display:inline;">
<button type="submit" class="btn btn-danger" onclick="return confirm('Are you sure you want to delete this player?');">Delete</button>
</form>
{% endif %}
<!-- Player Status Section -->
<div class="col-md-6">
<div class="card mb-4 {% if theme == 'dark' %}dark-mode{% endif %}">
<div class="card-header bg-success text-white">
<h2>Player Status</h2>
</div>
<div class="card-body">
{% if player_feedback %}
<div class="mb-3">
<strong>Current Status:</strong>
<span class="badge bg-{{ 'success' if player_feedback[0].status in ['active', 'playing'] else 'danger' }}">
{{ player_feedback[0].status|title }}
</span>
</div>
<div class="mb-3">
<strong>Last Activity:</strong> {{ player_feedback[0].timestamp.strftime('%Y-%m-%d %H:%M:%S') }}
</div>
<div class="mb-3">
<strong>Latest Message:</strong> {{ player_feedback[0].message }}
</div>
<!-- Recent Activity Log -->
<details>
<summary class="fw-bold mb-2">Recent Activity (Last 5)</summary>
<div class="mt-2">
{% for feedback in player_feedback %}
<div class="border-bottom pb-2 mb-2">
<div class="d-flex justify-content-between">
<span class="badge bg-{{ 'success' if feedback.status in ['active', 'playing'] else 'danger' }}">
{{ feedback.status|title }}
</span>
<small class="text-muted">{{ feedback.timestamp.strftime('%m-%d %H:%M') }}</small>
</div>
<div class="mt-1">
<small>{{ feedback.message }}</small>
{% if feedback.playlist_version %}
<br><small class="text-muted">Playlist v{{ feedback.playlist_version }}</small>
{% endif %}
</div>
</div>
{% endfor %}
</div>
</details>
{% else %}
<div class="text-center text-muted">
<p>No status information available</p>
<small>Player hasn't sent any feedback yet</small>
</div>
{% endif %}
</div>
</div>
</div>
</div>

View File

@@ -223,61 +223,82 @@
<script src="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0-alpha3/dist/js/bootstrap.bundle.min.js"></script>
<script>
function showStatusModal() {
console.log("Processing popup triggered");
const statusModal = new bootstrap.Modal(document.getElementById('statusModal'));
statusModal.show();
// Update status message based on media type
const mediaType = document.getElementById('media_type').value;
const statusMessage = document.getElementById('status-message');
switch(mediaType) {
case 'image':
statusMessage.textContent = 'Uploading images...';
break;
case 'video':
statusMessage.textContent = 'Uploading and processing video. This may take a while...';
break;
case 'pdf':
statusMessage.textContent = 'Converting PDF to 4K images. This may take a while...';
break;
case 'ppt':
statusMessage.textContent = 'Converting PowerPoint to 4K images. This may take a while...';
break;
default:
statusMessage.textContent = 'Uploading and processing your files. Please wait...';
}
// Start system monitoring updates in modal
{% if system_info %}
startModalSystemMonitoring();
{% endif %}
// Simulate progress updates
const progressBar = document.getElementById('progress-bar');
let progress = 0;
const interval = setInterval(() => {
// For slow processes, increment more slowly
const increment = (mediaType === 'image') ? 20 : 5;
progress += increment;
if (progress >= 100) {
clearInterval(interval);
statusMessage.textContent = 'Files uploaded and processed successfully!';
// Stop system monitoring updates
{% if system_info %}
stopModalSystemMonitoring();
{% endif %}
// Enable the close button
document.querySelector('[data-bs-dismiss="modal"]').disabled = false;
} else {
progressBar.style.width = `${progress}%`;
progressBar.setAttribute('aria-valuenow', progress);
if (mediaType === 'video') {
statusMessage.textContent = 'Uploading video...';
// Stage 1: Uploading (0-40%)
let uploadInterval = setInterval(() => {
progress += 8;
if (progress >= 40) {
clearInterval(uploadInterval);
progress = 40;
progressBar.style.width = `${progress}%`;
progressBar.setAttribute('aria-valuenow', progress);
// Stage 2: Converting
statusMessage.textContent = 'Converting video to standard format (29.97 fps, 1080p)...';
let convertProgress = 40;
let convertInterval = setInterval(() => {
convertProgress += 3;
progressBar.style.width = `${convertProgress}%`;
progressBar.setAttribute('aria-valuenow', convertProgress);
if (convertProgress >= 100) {
clearInterval(convertInterval);
statusMessage.textContent = 'Video uploaded and converted successfully!';
// Stop system monitoring updates
{% if system_info %}
stopModalSystemMonitoring();
{% endif %}
document.querySelector('[data-bs-dismiss="modal"]').disabled = false;
}
}, 600);
} else {
progressBar.style.width = `${progress}%`;
progressBar.setAttribute('aria-valuenow', progress);
}
}, 400);
} else {
// Default for other media types
switch(mediaType) {
case 'image':
statusMessage.textContent = 'Uploading images...';
break;
case 'pdf':
statusMessage.textContent = 'Converting PDF to 4K images. This may take a while...';
break;
case 'ppt':
statusMessage.textContent = 'Converting PowerPoint to images (PPTX → PDF → Images). This may take 2-5 minutes...';
break;
default:
statusMessage.textContent = 'Uploading and processing your files. Please wait...';
}
}, 500);
// Simulate progress updates
let interval = setInterval(() => {
const increment = (mediaType === 'image') ? 20 : 5;
progress += increment;
if (progress >= 100) {
clearInterval(interval);
statusMessage.textContent = 'Files uploaded and processed successfully!';
{% if system_info %}
stopModalSystemMonitoring();
{% endif %}
document.querySelector('[data-bs-dismiss="modal"]').disabled = false;
} else {
progressBar.style.width = `${progress}%`;
progressBar.setAttribute('aria-valuenow', progress);
}
}, 500);
}
}
{% if system_info %}

View File

@@ -9,12 +9,23 @@ The converted PDF is then processed by the main upload workflow for 4K image gen
import os
import subprocess
import logging
import signal
import time
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def cleanup_libreoffice_processes():
"""Clean up any hanging LibreOffice processes"""
try:
subprocess.run(['pkill', '-f', 'soffice'], capture_output=True, timeout=10)
time.sleep(1) # Give processes time to terminate
except Exception as e:
logger.warning(f"Failed to cleanup LibreOffice processes: {e}")
def pptx_to_pdf_libreoffice(pptx_path, output_dir):
"""
Convert PPTX to PDF using LibreOffice for highest quality.
@@ -30,6 +41,9 @@ def pptx_to_pdf_libreoffice(pptx_path, output_dir):
str: Path to the generated PDF file, or None if conversion failed
"""
try:
# Clean up any existing LibreOffice processes
cleanup_libreoffice_processes()
# Ensure output directory exists
os.makedirs(output_dir, exist_ok=True)
@@ -39,14 +53,19 @@ def pptx_to_pdf_libreoffice(pptx_path, output_dir):
'--headless',
'--convert-to', 'pdf',
'--outdir', output_dir,
'--invisible', # Run without any UI
'--nodefault', # Don't start with default template
pptx_path
]
logger.info(f"Converting PPTX to PDF using LibreOffice: {pptx_path}")
result = subprocess.run(cmd, capture_output=True, text=True, timeout=120)
# Increase timeout to 300 seconds (5 minutes) for large presentations
result = subprocess.run(cmd, capture_output=True, text=True, timeout=300)
if result.returncode != 0:
logger.error(f"LibreOffice conversion failed: {result.stderr}")
logger.error(f"LibreOffice stdout: {result.stdout}")
cleanup_libreoffice_processes() # Clean up on failure
return None
# Find the generated PDF file
@@ -55,16 +74,22 @@ def pptx_to_pdf_libreoffice(pptx_path, output_dir):
if os.path.exists(pdf_path):
logger.info(f"PDF conversion successful: {pdf_path}")
cleanup_libreoffice_processes() # Clean up after success
return pdf_path
else:
logger.error(f"PDF file not found after conversion: {pdf_path}")
cleanup_libreoffice_processes() # Clean up on failure
return None
except subprocess.TimeoutExpired:
logger.error("LibreOffice conversion timed out (120s)")
logger.error("LibreOffice conversion timed out (300s)")
cleanup_libreoffice_processes() # Clean up on timeout
return None
except Exception as e:
logger.error(f"Error in PPTX to PDF conversion: {e}")
import traceback
logger.error(f"Traceback: {traceback.format_exc()}")
cleanup_libreoffice_processes() # Clean up on error
return None

View File

@@ -12,10 +12,10 @@ def add_image_to_playlist(app, file, filename, duration, target_type, target_id)
"""
Save the image file and add it to the playlist database.
"""
# Ensure we use absolute path for upload folder
# Use simple path resolution for containerized environment
upload_folder = app.config['UPLOAD_FOLDER']
if not os.path.isabs(upload_folder):
upload_folder = os.path.abspath(upload_folder)
# In container, working directory is /app, so static/uploads resolves correctly
print(f"Upload folder config: {upload_folder}")
# Ensure upload folder exists
if not os.path.exists(upload_folder):
@@ -52,86 +52,43 @@ def add_image_to_playlist(app, file, filename, duration, target_type, target_id)
# Video conversion functions
def convert_video(input_file, output_folder):
"""
Converts a video file to MP4 format with H.264 codec.
"""
# Ensure we use absolute path for output folder
if not os.path.isabs(output_folder):
output_folder = os.path.abspath(output_folder)
print(f"Converted output folder to absolute path: {output_folder}")
if not os.path.exists(output_folder):
os.makedirs(output_folder, exist_ok=True)
print(f"Created output folder: {output_folder}")
# Generate the output file path
base_name = os.path.splitext(os.path.basename(input_file))[0]
output_file = os.path.join(output_folder, f"{base_name}.mp4")
print(f"Converting video: {input_file} -> {output_file}")
# FFmpeg command to convert the video
command = [
"ffmpeg",
"-i", input_file, # Input file
"-c:v", "libx264", # Video codec: H.264
"-preset", "fast", # Encoding speed/quality tradeoff
"-crf", "23", # Constant Rate Factor (quality, lower is better)
"-vf", "scale=-1:1080", # Scale video to 1080p (preserve aspect ratio)
"-r", "30", # Frame rate: 30 FPS
"-c:a", "aac", # Audio codec: AAC
"-b:a", "128k", # Audio bitrate
output_file # Output file
]
try:
# Run the FFmpeg command
subprocess.run(command, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print(f"Video converted successfully: {output_file}")
return output_file
except subprocess.CalledProcessError as e:
print(f"Error converting video: {e.stderr.decode()}")
return None
print(f"Video conversion skipped for: {input_file}")
return input_file
def convert_video_and_update_playlist(app, file_path, original_filename, target_type, target_id, duration):
"""
Converts a video and updates the playlist database.
"""
print(f"Starting video conversion for: {file_path}")
# Ensure we use absolute path for upload folder
upload_folder = app.config['UPLOAD_FOLDER']
if not os.path.isabs(upload_folder):
upload_folder = os.path.abspath(upload_folder)
print(f"Converted upload folder to absolute path: {upload_folder}")
converted_file = convert_video(file_path, upload_folder)
if converted_file:
converted_filename = os.path.basename(converted_file)
print(f"Video converted successfully: {converted_filename}")
import shutil
import tempfile
print(f"Starting video normalization for: {file_path}")
# Only process .mp4 files
if not file_path.lower().endswith('.mp4'):
print(f"Skipping non-mp4 file: {file_path}")
return None
# Use the application context to interact with the database
with app.app_context():
# Update the database with the converted filename
if target_type == 'group':
group = Group.query.get_or_404(target_id)
for player in group.players:
content = Content.query.filter_by(player_id=player.id, file_name=original_filename).first()
if content:
content.file_name = converted_filename
elif target_type == 'player':
content = Content.query.filter_by(player_id=target_id, file_name=original_filename).first()
if content:
content.file_name = converted_filename
db.session.commit()
print(f"Database updated with converted video: {converted_filename}")
# Delete the original file only if it exists
if os.path.exists(file_path):
os.remove(file_path)
print(f"Original file deleted: {file_path}")
else:
print(f"Video conversion failed for: {file_path}")
# Prepare temp output file
temp_dir = tempfile.gettempdir()
temp_output = os.path.join(temp_dir, f"normalized_{os.path.basename(file_path)}")
ffmpeg_cmd = [
'ffmpeg', '-y', '-i', file_path,
'-c:v', 'libx264', '-profile:v', 'main',
# Bitrate is not forced, so we allow lower bitrates
'-vf', 'scale=1920:1080,fps=29.97',
'-c:a', 'copy',
temp_output
]
print(f"Running ffmpeg: {' '.join(ffmpeg_cmd)}")
try:
result = subprocess.run(ffmpeg_cmd, capture_output=True, text=True, timeout=1800)
if result.returncode != 0:
print(f"ffmpeg error: {result.stderr}")
return None
# Replace original file with normalized one
shutil.move(temp_output, file_path)
print(f"Video normalized and replaced: {file_path}")
except Exception as e:
print(f"Error during video normalization: {e}")
return None
# No need to update playlist, as filename remains the same
return True
# PDF conversion functions
def convert_pdf_to_images(pdf_file, output_folder, delete_pdf=True, dpi=300):
@@ -140,25 +97,7 @@ def convert_pdf_to_images(pdf_file, output_folder, delete_pdf=True, dpi=300):
Uses standard 300 DPI for reliable conversion.
"""
print(f"Converting PDF to JPG images: {pdf_file} at {dpi} DPI")
print(f"Original output folder: {output_folder}")
# Force absolute path resolution to ensure we use the app directory
if not os.path.isabs(output_folder):
# If relative path, resolve from the current working directory
output_folder = os.path.abspath(output_folder)
print(f"Converted relative path to absolute: {output_folder}")
else:
print(f"Using provided absolute path: {output_folder}")
# Ensure we're using the app static folder, not workspace root
if output_folder.endswith('static/uploads'):
# Check if we're accidentally using workspace root instead of app folder
expected_app_path = '/opt/digiserver/app/static/uploads'
if output_folder != expected_app_path:
print(f"WARNING: Correcting path from {output_folder} to {expected_app_path}")
output_folder = expected_app_path
print(f"Final output folder: {output_folder}")
print(f"Output folder: {output_folder}")
try:
# Ensure output folder exists
@@ -266,11 +205,6 @@ def process_pdf(input_file, output_folder, duration, target_type, target_id):
print(f"Processing PDF file: {input_file}")
print(f"Output folder: {output_folder}")
# Ensure we have absolute path for output folder
if not os.path.isabs(output_folder):
output_folder = os.path.abspath(output_folder)
print(f"Converted output folder to absolute path: {output_folder}")
# Ensure output folder exists
if not os.path.exists(output_folder):
os.makedirs(output_folder, exist_ok=True)
@@ -306,11 +240,6 @@ def process_pptx(input_file, output_folder, duration, target_type, target_id):
print(f"Processing PPTX file using PDF workflow: {input_file}")
print(f"Output folder: {output_folder}")
# Ensure we have absolute path for output folder
if not os.path.isabs(output_folder):
output_folder = os.path.abspath(output_folder)
print(f"Converted output folder to absolute path: {output_folder}")
# Ensure output folder exists
if not os.path.exists(output_folder):
os.makedirs(output_folder, exist_ok=True)
@@ -318,21 +247,33 @@ def process_pptx(input_file, output_folder, duration, target_type, target_id):
try:
# Step 1: Convert PPTX to PDF using LibreOffice for vector quality
print("Step 1: Converting PPTX to PDF...")
from utils.pptx_converter import pptx_to_pdf_libreoffice
pdf_file = pptx_to_pdf_libreoffice(input_file, output_folder)
if not pdf_file:
print("Error: Failed to convert PPTX to PDF")
print("This could be due to:")
print("- LibreOffice not properly installed")
print("- Corrupted PPTX file")
print("- Insufficient memory")
print("- File permission issues")
return False
print(f"PPTX successfully converted to PDF: {pdf_file}")
# Step 2: Use the same PDF to images workflow as direct PDF uploads
print("Step 2: Converting PDF to JPG images...")
# Convert PDF to JPG images (300 DPI, same as PDF workflow)
image_filenames = convert_pdf_to_images(pdf_file, output_folder, delete_pdf=True, dpi=300)
if not image_filenames:
print("Error: Failed to convert PDF to images")
print("This could be due to:")
print("- poppler-utils not properly installed")
print("- PDF corruption during conversion")
print("- Insufficient disk space")
print("- Memory issues during image processing")
return False
print(f"Generated {len(image_filenames)} JPG images from PPTX → PDF")
@@ -341,11 +282,14 @@ def process_pptx(input_file, output_folder, duration, target_type, target_id):
if os.path.exists(input_file):
os.remove(input_file)
print(f"Original PPTX file deleted: {input_file}")
# Step 4: Update playlist with generated images in sequential order
print("Step 3: Adding images to playlist...")
success = update_playlist_with_files(image_filenames, duration, target_type, target_id)
if success:
print(f"Successfully processed PPTX: {len(image_filenames)} images added to playlist")
else:
print("Error: Failed to add images to playlist database")
return success
except Exception as e:
@@ -377,10 +321,9 @@ def process_uploaded_files(app, files, media_type, duration, target_type, target
# Generate a secure filename and save the file
filename = secure_filename(file.filename)
# Ensure we use absolute path for upload folder
# Use simple path resolution for containerized environment
upload_folder = app.config['UPLOAD_FOLDER']
if not os.path.isabs(upload_folder):
upload_folder = os.path.abspath(upload_folder)
print(f"Upload folder: {upload_folder}")
# Ensure upload folder exists
if not os.path.exists(upload_folder):

View File

@@ -0,0 +1,336 @@
import os
import json
import requests
import bcrypt
import re
import datetime
from logging_config import Logger
def send_player_feedback(config, message, status="active", playlist_version=None, error_details=None):
"""
Send feedback to the server about player status.
Args:
config (dict): Configuration containing server details
message (str): Main feedback message
status (str): Player status - "active", "playing", "error", "restarting"
playlist_version (int, optional): Current playlist version being played
error_details (str, optional): Error details if status is "error"
Returns:
bool: True if feedback sent successfully, False otherwise
"""
try:
server = config.get("server_ip", "")
host = config.get("screen_name", "")
quick = config.get("quickconnect_key", "")
port = config.get("port", "")
# Construct server URL
ip_pattern = r'^\d+\.\d+\.\d+\.\d+$'
if re.match(ip_pattern, server):
feedback_url = f'http://{server}:{port}/api/player-feedback'
else:
feedback_url = f'http://{server}/api/player-feedback'
# Prepare feedback data
feedback_data = {
'player_name': host,
'quickconnect_code': quick,
'message': message,
'status': status,
'timestamp': datetime.datetime.now().isoformat(),
'playlist_version': playlist_version,
'error_details': error_details
}
Logger.info(f"Sending feedback to {feedback_url}: {feedback_data}")
# Send POST request
response = requests.post(feedback_url, json=feedback_data, timeout=10)
if response.status_code == 200:
Logger.info(f"Feedback sent successfully: {message}")
return True
else:
Logger.warning(f"Feedback failed with status {response.status_code}: {response.text}")
return False
except requests.exceptions.RequestException as e:
Logger.error(f"Failed to send feedback: {e}")
return False
except Exception as e:
Logger.error(f"Unexpected error sending feedback: {e}")
return False
def send_playlist_check_feedback(config, playlist_version=None):
"""
Send feedback when playlist is checked for updates.
Args:
config (dict): Configuration containing server details
playlist_version (int, optional): Current playlist version
Returns:
bool: True if feedback sent successfully, False otherwise
"""
player_name = config.get("screen_name", "unknown")
version_info = f"v{playlist_version}" if playlist_version else "unknown"
message = f"player {player_name}, is active, Playing {version_info}"
return send_player_feedback(
config=config,
message=message,
status="active",
playlist_version=playlist_version
)
def send_playlist_restart_feedback(config, playlist_version=None):
"""
Send feedback when playlist loop ends and restarts.
Args:
config (dict): Configuration containing server details
playlist_version (int, optional): Current playlist version
Returns:
bool: True if feedback sent successfully, False otherwise
"""
player_name = config.get("screen_name", "unknown")
version_info = f"v{playlist_version}" if playlist_version else "unknown"
message = f"player {player_name}, playlist loop completed, restarting {version_info}"
return send_player_feedback(
config=config,
message=message,
status="restarting",
playlist_version=playlist_version
)
def send_player_error_feedback(config, error_message, playlist_version=None):
"""
Send feedback when an error occurs in the player.
Args:
config (dict): Configuration containing server details
error_message (str): Description of the error
playlist_version (int, optional): Current playlist version
Returns:
bool: True if feedback sent successfully, False otherwise
"""
player_name = config.get("screen_name", "unknown")
message = f"player {player_name}, error occurred"
return send_player_feedback(
config=config,
message=message,
status="error",
playlist_version=playlist_version,
error_details=error_message
)
def send_playing_status_feedback(config, playlist_version=None, current_media=None):
"""
Send feedback about current playing status.
Args:
config (dict): Configuration containing server details
playlist_version (int, optional): Current playlist version
current_media (str, optional): Currently playing media file
Returns:
bool: True if feedback sent successfully, False otherwise
"""
player_name = config.get("screen_name", "unknown")
version_info = f"v{playlist_version}" if playlist_version else "unknown"
media_info = f" - {current_media}" if current_media else ""
message = f"player {player_name}, is active, Playing {version_info}{media_info}"
return send_player_feedback(
config=config,
message=message,
status="playing",
playlist_version=playlist_version
)
def is_playlist_up_to_date(local_playlist_path, config):
"""
Compare the version of the local playlist with the server playlist.
Returns True if up-to-date, False otherwise.
"""
import json
if not os.path.exists(local_playlist_path):
Logger.info(f"Local playlist file not found: {local_playlist_path}")
return False
with open(local_playlist_path, 'r') as f:
local_data = json.load(f)
local_version = local_data.get('version', 0)
server_data = fetch_server_playlist(config)
server_version = server_data.get('version', 0)
Logger.info(f"Local playlist version: {local_version}, Server playlist version: {server_version}")
return local_version == server_version
def fetch_server_playlist(config):
"""Fetch the updated playlist from the server using a config dict."""
server = config.get("server_ip", "")
host = config.get("screen_name", "")
quick = config.get("quickconnect_key", "")
port = config.get("port", "")
try:
ip_pattern = r'^\d+\.\d+\.\d+\.\d+$'
if re.match(ip_pattern, server):
server_url = f'http://{server}:{port}/api/playlists'
else:
server_url = f'http://{server}/api/playlists'
params = {
'hostname': host,
'quickconnect_code': quick
}
Logger.info(f"Fetching playlist from URL: {server_url} with params: {params}")
response = requests.get(server_url, params=params)
if response.status_code == 200:
response_data = response.json()
Logger.info(f"Server response: {response_data}")
playlist = response_data.get('playlist', [])
version = response_data.get('playlist_version', None)
hashed_quickconnect = response_data.get('hashed_quickconnect', None)
if version is not None and hashed_quickconnect is not None:
if bcrypt.checkpw(quick.encode('utf-8'), hashed_quickconnect.encode('utf-8')):
Logger.info("Fetched updated playlist from server.")
return {'playlist': playlist, 'version': version}
else:
Logger.error("Quickconnect code validation failed.")
else:
Logger.error("Failed to retrieve playlist or hashed quickconnect from the response.")
else:
Logger.error(f"Failed to fetch playlist. Status Code: {response.status_code}")
except requests.exceptions.RequestException as e:
Logger.error(f"Failed to fetch playlist: {e}")
return {'playlist': [], 'version': 0}
def save_playlist_with_version(playlist_data, playlist_dir):
version = playlist_data.get('version', 0)
playlist_file = os.path.join(playlist_dir, f'server_playlist_v{version}.json')
with open(playlist_file, 'w') as f:
json.dump(playlist_data, f, indent=2)
print(f"Playlist saved to {playlist_file}")
return playlist_file
def download_media_files(playlist, media_dir):
"""Download media files from the server and save them to media_dir."""
if not os.path.exists(media_dir):
os.makedirs(media_dir)
Logger.info(f"Created directory {media_dir} for media files.")
updated_playlist = []
for media in playlist:
file_name = media.get('file_name', '')
file_url = media.get('url', '')
duration = media.get('duration', 10)
local_path = os.path.join(media_dir, file_name)
Logger.info(f"Preparing to download {file_name} from {file_url}...")
if os.path.exists(local_path):
Logger.info(f"File {file_name} already exists. Skipping download.")
else:
try:
response = requests.get(file_url, timeout=10)
if response.status_code == 200:
with open(local_path, 'wb') as file:
file.write(response.content)
Logger.info(f"Successfully downloaded {file_name} to {local_path}")
else:
Logger.error(f"Failed to download {file_name}. Status Code: {response.status_code}")
continue
except requests.exceptions.RequestException as e:
Logger.error(f"Error downloading {file_name}: {e}")
continue
updated_media = {
'file_name': file_name,
'url': os.path.relpath(local_path, os.path.dirname(media_dir)),
'duration': duration
}
updated_playlist.append(updated_media)
return updated_playlist
def delete_old_playlists_and_media(current_version, playlist_dir, media_dir, keep_versions=1):
"""
Delete old playlist files and media files not referenced by the latest playlist version.
keep_versions: number of latest versions to keep (default 1)
"""
# Find all playlist files
playlist_files = [f for f in os.listdir(playlist_dir) if f.startswith('server_playlist_v') and f.endswith('.json')]
# Keep only the latest N versions
versions = sorted([int(f.split('_v')[-1].split('.json')[0]) for f in playlist_files], reverse=True)
keep = set(versions[:keep_versions])
# Delete old playlist files
for f in playlist_files:
v = int(f.split('_v')[-1].split('.json')[0])
if v not in keep:
os.remove(os.path.join(playlist_dir, f))
# Collect all media files referenced by the kept playlists
referenced = set()
for v in keep:
path = os.path.join(playlist_dir, f'server_playlist_v{v}.json')
if os.path.exists(path):
with open(path, 'r') as f:
data = json.load(f)
for item in data.get('playlist', []):
referenced.add(item.get('file_name'))
# Delete media files not referenced
for f in os.listdir(media_dir):
if f not in referenced:
try:
os.remove(os.path.join(media_dir, f))
except Exception as e:
Logger.warning(f"Failed to delete media file {f}: {e}")
def update_playlist_if_needed(local_playlist_path, config, media_dir, playlist_dir):
"""
Fetch the server playlist once, compare versions, and update if needed.
Returns True if updated, False if already up to date.
Also sends feedback to server about playlist check.
"""
import json
server_data = fetch_server_playlist(config)
server_version = server_data.get('version', 0)
if not os.path.exists(local_playlist_path):
local_version = 0
else:
with open(local_playlist_path, 'r') as f:
local_data = json.load(f)
local_version = local_data.get('version', 0)
Logger.info(f"Local playlist version: {local_version}, Server playlist version: {server_version}")
# Send feedback about playlist check
send_playlist_check_feedback(config, server_version if server_version > 0 else local_version)
if local_version != server_version:
if server_data and server_data.get('playlist'):
updated_playlist = download_media_files(server_data['playlist'], media_dir)
server_data['playlist'] = updated_playlist
save_playlist_with_version(server_data, playlist_dir)
# Delete old playlists and unreferenced media
delete_old_playlists_and_media(server_version, playlist_dir, media_dir)
# Send feedback about playlist update
player_name = config.get("screen_name", "unknown")
update_message = f"player {player_name}, playlist updated to v{server_version}"
send_player_feedback(config, update_message, "active", server_version)
return True
else:
Logger.warning("No playlist data fetched from server or playlist is empty.")
# Send error feedback
send_player_error_feedback(config, "No playlist data fetched from server or playlist is empty", local_version)
return False
else:
Logger.info("Local playlist is already up to date.")
return False

1091
code player/player.py Normal file

File diff suppressed because it is too large Load Diff

13
docker-compose.yml Executable file → Normal file
View File

@@ -1,6 +1,5 @@
# DigiServer - Digital Signage Management Platform
# Version: 1.1.0
# Build Date: 2025-06-29
# Production Docker Compose Configuration
# Use this for production deployment
services:
digiserver:
@@ -12,10 +11,14 @@ services:
environment:
- FLASK_APP=app.py
- FLASK_RUN_HOST=0.0.0.0
- DEFAULT_USER=admin
- DEFAULT_PASSWORD=Initial01!
- FLASK_ENV=production
- FLASK_DEBUG=0
- ADMIN_USER=admin
- ADMIN_PASSWORD=Initial01!
- SECRET_KEY=Ma_Duc_Dupa_Merele_Lui_Ana
volumes:
# Mount app code
- ./app:/app
# Persistent data volumes
- ./data/instance:/app/instance
- ./data/uploads:/app/static/uploads