1836 lines
77 KiB
Python
1836 lines
77 KiB
Python
"""
|
|
3D Video Animation Generator
|
|
Creates professional Google Earth-style 3D video animations from GPS route data
|
|
"""
|
|
|
|
import json
|
|
import os
|
|
import math
|
|
import requests
|
|
import cv2
|
|
import numpy as np
|
|
from PIL import Image, ImageDraw, ImageFont, ImageFilter
|
|
import tempfile
|
|
import shutil
|
|
from datetime import datetime
|
|
import random
|
|
|
|
def generate_3d_video_animation(project_name, resources_folder, label_widget, progress_widget, popup_widget, clock_module, test_mode=False):
|
|
"""
|
|
Generate a 3D video animation similar to Relive
|
|
|
|
Args:
|
|
project_name: Name of the project
|
|
resources_folder: Path to resources folder
|
|
label_widget: Kivy label for status updates
|
|
progress_widget: Kivy progress bar
|
|
popup_widget: Kivy popup to dismiss when done
|
|
clock_module: Kivy Clock module for scheduling
|
|
test_mode: If True, generates video in 720p for faster testing (default: False for 2K quality)
|
|
"""
|
|
|
|
def update_progress(progress_val, status_text):
|
|
"""Update UI from background thread"""
|
|
def _update(dt):
|
|
progress_widget.value = progress_val
|
|
label_widget.text = status_text
|
|
clock_module.schedule_once(_update, 0)
|
|
|
|
def finish_generation(success, message, output_path=None):
|
|
"""Finish the generation process"""
|
|
def _finish(dt):
|
|
if popup_widget:
|
|
popup_widget.dismiss()
|
|
|
|
# Show result popup
|
|
from kivy.uix.popup import Popup
|
|
from kivy.uix.boxlayout import BoxLayout
|
|
from kivy.uix.button import Button
|
|
from kivy.uix.label import Label
|
|
|
|
result_layout = BoxLayout(orientation='vertical', spacing=10, padding=10)
|
|
|
|
if success:
|
|
result_label = Label(
|
|
text=f"3D Video Generated Successfully!\n\nSaved to:\n{output_path}",
|
|
color=(0, 1, 0, 1),
|
|
halign="center"
|
|
)
|
|
open_btn = Button(
|
|
text="Open Video Folder",
|
|
size_hint_y=None,
|
|
height=40,
|
|
background_color=(0.2, 0.7, 0.2, 1)
|
|
)
|
|
open_btn.bind(on_press=lambda x: (os.system(f"xdg-open '{os.path.dirname(output_path)}'"), result_popup.dismiss()))
|
|
result_layout.add_widget(result_label)
|
|
result_layout.add_widget(open_btn)
|
|
else:
|
|
result_label = Label(
|
|
text=f"Generation Failed:\n{message}",
|
|
color=(1, 0, 0, 1),
|
|
halign="center"
|
|
)
|
|
result_layout.add_widget(result_label)
|
|
|
|
close_btn = Button(
|
|
text="Close",
|
|
size_hint_y=None,
|
|
height=40,
|
|
background_color=(0.3, 0.3, 0.3, 1)
|
|
)
|
|
|
|
result_layout.add_widget(close_btn)
|
|
|
|
result_popup = Popup(
|
|
title="3D Video Generation Result",
|
|
content=result_layout,
|
|
size_hint=(0.9, 0.6),
|
|
auto_dismiss=False
|
|
)
|
|
|
|
close_btn.bind(on_press=lambda x: result_popup.dismiss())
|
|
result_popup.open()
|
|
|
|
clock_module.schedule_once(_finish, 0)
|
|
|
|
def run_generation():
|
|
"""Main generation function"""
|
|
try:
|
|
# Step 1: Load route data
|
|
update_progress(10, "Loading route data...")
|
|
project_folder = os.path.join(resources_folder, "projects", project_name)
|
|
positions_path = os.path.join(project_folder, "positions.json")
|
|
|
|
if not os.path.exists(positions_path):
|
|
finish_generation(False, "No route data found!")
|
|
return
|
|
|
|
with open(positions_path, "r") as f:
|
|
positions = json.load(f)
|
|
|
|
if len(positions) < 10:
|
|
finish_generation(False, "Route too short for 3D animation (minimum 10 points)")
|
|
return
|
|
|
|
# Step 2: Calculate route bounds and center
|
|
update_progress(20, "Calculating route boundaries...")
|
|
lats = [pos['latitude'] for pos in positions]
|
|
lons = [pos['longitude'] for pos in positions]
|
|
|
|
center_lat = sum(lats) / len(lats)
|
|
center_lon = sum(lons) / len(lons)
|
|
|
|
min_lat, max_lat = min(lats), max(lats)
|
|
min_lon, max_lon = min(lons), max(lons)
|
|
|
|
# Step 3: Generate frames with space entry sequence
|
|
update_progress(30, "Generating 3D frames with space entry...")
|
|
|
|
# Create temporary directory for frames
|
|
temp_dir = tempfile.mkdtemp()
|
|
frames_dir = os.path.join(temp_dir, "frames")
|
|
os.makedirs(frames_dir)
|
|
|
|
# Video settings - Test mode vs Production quality
|
|
if test_mode:
|
|
# Test mode: 720p resolution for faster generation
|
|
width, height = 1280, 720 # 720p for testing
|
|
fps = 30 # Standard frame rate for testing
|
|
entry_frames = 60 # 2 seconds at 30fps for quicker tests
|
|
total_frames = entry_frames + len(positions) * 2 # Fewer frames per position for faster testing
|
|
quality_suffix = "720p_test"
|
|
update_progress(30, "Creating 720p test video...")
|
|
else:
|
|
# Production mode: 2K resolution for high quality
|
|
width, height = 2560, 1440 # 1440p (2K) for much higher quality
|
|
fps = 60 # Higher frame rate for smoother motion
|
|
entry_frames = 120 # 4 seconds at 60fps for longer, more detailed space entry
|
|
total_frames = entry_frames + len(positions) * 3 # More frames per position for ultra-smooth animation
|
|
quality_suffix = "2K_production"
|
|
update_progress(30, "Creating 2K production video...")
|
|
|
|
frame_counter = 0
|
|
|
|
# Generate space entry sequence
|
|
mode_text = "720p test" if test_mode else "2K production"
|
|
update_progress(30, f"Creating space entry sequence ({mode_text})...")
|
|
for i in range(entry_frames):
|
|
progress = 30 + (i / total_frames) * 40
|
|
update_progress(progress, f"Space entry frame {i+1}/{entry_frames} ({mode_text})...")
|
|
|
|
try:
|
|
frame = create_space_entry_frame(
|
|
positions[0], center_lat, center_lon,
|
|
min_lat, max_lat, min_lon, max_lon,
|
|
width, height, i, entry_frames
|
|
)
|
|
|
|
frame_path = os.path.join(frames_dir, f"frame_{frame_counter:06d}.png")
|
|
cv2.imwrite(frame_path, frame)
|
|
frame_counter += 1
|
|
|
|
except Exception as e:
|
|
print(f"Error generating space entry frame {i}: {e}")
|
|
# Create a simple fallback frame
|
|
fallback_frame = np.zeros((height, width, 3), dtype=np.uint8)
|
|
fallback_frame[:] = (0, 0, 50) # Space-like background
|
|
frame_path = os.path.join(frames_dir, f"frame_{frame_counter:06d}.png")
|
|
cv2.imwrite(frame_path, fallback_frame)
|
|
frame_counter += 1
|
|
|
|
# Generate route following frames
|
|
for i, pos in enumerate(positions):
|
|
progress = 30 + ((entry_frames + i) / total_frames) * 40
|
|
update_progress(progress, f"Route frame {i+1}/{len(positions)}...")
|
|
|
|
try:
|
|
frame = create_3d_frame(
|
|
pos, positions, i, center_lat, center_lon,
|
|
min_lat, max_lat, min_lon, max_lon,
|
|
width, height
|
|
)
|
|
|
|
# Save frame
|
|
frame_path = os.path.join(frames_dir, f"frame_{frame_counter:06d}.png")
|
|
cv2.imwrite(frame_path, frame)
|
|
frame_counter += 1
|
|
|
|
except Exception as e:
|
|
print(f"Error generating route frame {i}: {e}")
|
|
# Create a simple fallback frame to continue generation
|
|
fallback_frame = np.zeros((height, width, 3), dtype=np.uint8)
|
|
fallback_frame[:] = (50, 50, 100) # Dark blue background
|
|
frame_path = os.path.join(frames_dir, f"frame_{frame_counter:06d}.png")
|
|
cv2.imwrite(frame_path, fallback_frame)
|
|
frame_counter += 1
|
|
|
|
# Add transition bridge frame (smooth transition from space to route)
|
|
try:
|
|
update_progress(progress, "Creating transition bridge...")
|
|
transition_frame = create_transition_bridge_frame(
|
|
positions[0], center_lat, center_lon,
|
|
min_lat, max_lat, min_lon, max_lon,
|
|
width, height
|
|
)
|
|
frame_path = os.path.join(frames_dir, f"frame_{frame_counter:06d}.png")
|
|
cv2.imwrite(frame_path, transition_frame)
|
|
frame_counter += 1
|
|
except Exception as e:
|
|
print(f"Warning: Could not create transition bridge frame: {e}")
|
|
|
|
# Step 4: Create video
|
|
update_progress(75, f"Compiling {quality_suffix} video...")
|
|
|
|
# Output path with quality indicator
|
|
output_filename = f"{project_name}_3d_animation_{quality_suffix}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.mp4"
|
|
output_path = os.path.join(project_folder, output_filename)
|
|
|
|
# Create video writer
|
|
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
|
video_writer = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
|
|
|
|
# Add frames to video
|
|
frame_files = sorted([f for f in os.listdir(frames_dir) if f.endswith('.png')])
|
|
for frame_file in frame_files:
|
|
frame_path = os.path.join(frames_dir, frame_file)
|
|
frame = cv2.imread(frame_path)
|
|
video_writer.write(frame)
|
|
|
|
video_writer.release()
|
|
|
|
# Step 5: Add audio (optional)
|
|
update_progress(90, "Adding finishing touches...")
|
|
|
|
# Clean up
|
|
shutil.rmtree(temp_dir)
|
|
|
|
update_progress(100, "3D Video generated successfully!")
|
|
finish_generation(True, "Success!", output_path)
|
|
|
|
except Exception as e:
|
|
finish_generation(False, str(e))
|
|
|
|
# Start generation in background
|
|
import threading
|
|
thread = threading.Thread(target=run_generation)
|
|
thread.daemon = True
|
|
thread.start()
|
|
|
|
def create_3d_frame(current_pos, all_positions, frame_index, center_lat, center_lon,
|
|
min_lat, max_lat, min_lon, max_lon, width, height):
|
|
"""
|
|
Create a Google Earth-style 3D frame with camera following the route
|
|
"""
|
|
# Create canvas
|
|
frame = np.zeros((height, width, 3), dtype=np.uint8)
|
|
|
|
# Enhanced camera following system
|
|
camera_pos, camera_target, camera_bearing = calculate_dynamic_camera_position(
|
|
current_pos, all_positions, frame_index, min_lat, max_lat, min_lon, max_lon
|
|
)
|
|
|
|
# Google Earth-style perspective parameters with improved aerial view
|
|
base_camera_height = 1500 + 1000 * math.sin(frame_index * 0.02) # 1000-3000m range
|
|
camera_height = base_camera_height + 500 * math.sin(frame_index * 0.05) # Add variation
|
|
view_distance = 3000 # Increased view distance for better aerial perspective
|
|
tilt_angle = 65 + 8 * math.sin(frame_index * 0.03) # Dynamic tilt for cinematic effect
|
|
fov = 75 # Slightly wider field of view for aerial shots
|
|
|
|
# Create enhanced terrain background
|
|
create_terrain_background(frame, width, height, camera_pos['latitude'], camera_pos['longitude'], camera_bearing, tilt_angle)
|
|
|
|
# Transform all route points to 3D camera space
|
|
route_points_3d = []
|
|
for i, pos in enumerate(all_positions):
|
|
# Calculate distance from camera
|
|
dist_to_camera = calculate_distance(camera_pos['latitude'], camera_pos['longitude'],
|
|
pos['latitude'], pos['longitude'])
|
|
|
|
if dist_to_camera > view_distance * 2: # Skip points too far away
|
|
continue
|
|
|
|
# Get elevation for this point
|
|
elevation = get_simulated_elevation(pos['latitude'], pos['longitude'], i)
|
|
|
|
# Convert to 3D screen coordinates
|
|
screen_x, screen_y, is_visible = world_to_screen_3d(
|
|
pos['latitude'], pos['longitude'], elevation,
|
|
camera_pos['latitude'], camera_pos['longitude'], camera_height,
|
|
camera_bearing, tilt_angle, width, height, view_distance
|
|
)
|
|
|
|
if is_visible:
|
|
# Mark points as past, current, or future
|
|
# Ensure at least the current position (frame_index) is marked as past
|
|
is_past_or_current = i <= frame_index
|
|
route_points_3d.append((screen_x, screen_y, is_past_or_current))
|
|
|
|
# Draw route with enhanced 3D effects
|
|
draw_3d_route(frame, route_points_3d, frame_index)
|
|
|
|
# Add Google Earth-style UI overlays
|
|
add_google_earth_ui(frame, current_pos, camera_bearing, width, height, frame_index, len(all_positions))
|
|
|
|
# Add atmospheric effects
|
|
add_atmospheric_perspective(frame, width, height)
|
|
|
|
return frame
|
|
|
|
def calculate_bearing(lat1, lon1, lat2, lon2):
|
|
"""Calculate bearing between two GPS points"""
|
|
lat1_rad = math.radians(lat1)
|
|
lat2_rad = math.radians(lat2)
|
|
dlon_rad = math.radians(lon2 - lon1)
|
|
|
|
y = math.sin(dlon_rad) * math.cos(lat2_rad)
|
|
x = math.cos(lat1_rad) * math.sin(lat2_rad) - math.sin(lat1_rad) * math.cos(lat2_rad) * math.cos(dlon_rad)
|
|
|
|
bearing = math.atan2(y, x)
|
|
bearing = math.degrees(bearing)
|
|
bearing = (bearing + 360) % 360
|
|
|
|
return bearing
|
|
|
|
def create_terrain_background(frame, width, height, camera_lat, camera_lon, bearing, tilt_angle):
|
|
"""Create a professional Google Earth-style terrain background"""
|
|
|
|
# Enhanced sky gradient with realistic atmospheric scattering
|
|
for y in range(int(height * 0.35)): # Sky takes upper 35%
|
|
sky_intensity = y / (height * 0.35)
|
|
|
|
# Realistic sky colors with atmospheric perspective
|
|
horizon_r, horizon_g, horizon_b = 255, 248, 220 # Warm horizon
|
|
zenith_r, zenith_g, zenith_b = 135, 206, 235 # Sky blue
|
|
|
|
r = int(horizon_r + (zenith_r - horizon_r) * sky_intensity)
|
|
g = int(horizon_g + (zenith_g - horizon_g) * sky_intensity)
|
|
b = int(horizon_b + (zenith_b - horizon_b) * sky_intensity)
|
|
|
|
frame[y, :] = (b, g, r) # BGR format for OpenCV
|
|
|
|
# Realistic terrain with multiple layers and textures
|
|
terrain_start_y = int(height * 0.35)
|
|
create_enhanced_terrain_layer(frame, width, height, terrain_start_y, camera_lat, camera_lon)
|
|
|
|
# Add atmospheric haze for depth
|
|
add_atmospheric_haze(frame, width, height, terrain_start_y)
|
|
|
|
# Add realistic cloud shadows
|
|
add_cloud_shadows(frame, width, height, terrain_start_y)
|
|
|
|
def create_enhanced_terrain_layer(frame, width, height, start_y, camera_lat, camera_lon):
|
|
"""Create enhanced terrain with realistic colors and textures"""
|
|
|
|
for y in range(start_y, height):
|
|
distance_factor = (y - start_y) / (height - start_y)
|
|
|
|
for x in range(width):
|
|
# Multiple noise layers for realistic terrain variation
|
|
terrain_color = generate_enhanced_terrain_color(x, y, camera_lat, camera_lon, width, height, distance_factor)
|
|
frame[y, x] = terrain_color
|
|
|
|
def generate_enhanced_terrain_color(x, y, camera_lat, camera_lon, width, height, distance_factor):
|
|
"""Generate enhanced terrain color with realistic geographic features"""
|
|
|
|
# Base terrain using multiple octaves of noise
|
|
noise_scale1 = 0.01
|
|
noise_scale2 = 0.005
|
|
noise_scale3 = 0.002
|
|
|
|
# Primary terrain features
|
|
terrain1 = math.sin(x * noise_scale1) * math.sin(y * noise_scale1)
|
|
terrain2 = math.sin(x * noise_scale2 + 100) * math.sin(y * noise_scale2 + 100) * 0.7
|
|
terrain3 = math.sin(x * noise_scale3 + 200) * math.sin(y * noise_scale3 + 200) * 0.3
|
|
|
|
combined_terrain = terrain1 + terrain2 + terrain3
|
|
|
|
# Simulate geographic coordinate influence
|
|
lat_influence = math.sin(camera_lat * 0.1) * 0.5
|
|
lon_influence = math.cos(camera_lon * 0.1) * 0.3
|
|
geographic_factor = lat_influence + lon_influence
|
|
|
|
final_terrain = combined_terrain + geographic_factor
|
|
|
|
# Classify terrain types based on noise
|
|
if final_terrain > 1.2:
|
|
# High mountains - snow-capped peaks
|
|
base_color = (240, 248, 255) # Alice blue
|
|
elif final_terrain > 0.8:
|
|
# Mountains - rocky gray/brown
|
|
base_color = (105, 105, 105) # Dim gray
|
|
elif final_terrain > 0.4:
|
|
# Hills - forest green
|
|
base_color = (34, 139, 34) # Forest green
|
|
elif final_terrain > 0.1:
|
|
# Plains - grassland
|
|
base_color = (124, 252, 0) # Lawn green
|
|
elif final_terrain > -0.2:
|
|
# Agricultural areas - golden
|
|
base_color = (255, 215, 0) # Gold
|
|
elif final_terrain > -0.5:
|
|
# Desert/arid - sandy brown
|
|
base_color = (244, 164, 96) # Sandy brown
|
|
else:
|
|
# Water bodies - deep blue
|
|
base_color = (25, 25, 112) # Midnight blue
|
|
|
|
# Apply distance-based atmospheric perspective
|
|
atmosphere_fade = 1.0 - (distance_factor * 0.4)
|
|
final_color = tuple(int(c * atmosphere_fade + 200 * (1 - atmosphere_fade)) for c in base_color)
|
|
|
|
# Add subtle texture variation
|
|
texture_noise = (math.sin(x * 0.1) * math.sin(y * 0.1)) * 10
|
|
final_color = tuple(max(0, min(255, c + int(texture_noise))) for c in final_color)
|
|
|
|
return final_color
|
|
|
|
def add_atmospheric_haze(frame, width, height, terrain_start_y):
|
|
"""Add realistic atmospheric haze for depth perception"""
|
|
|
|
haze_overlay = np.zeros_like(frame)
|
|
|
|
for y in range(terrain_start_y, height):
|
|
distance_factor = (y - terrain_start_y) / (height - terrain_start_y)
|
|
haze_intensity = distance_factor * 0.3 # Stronger haze in distance
|
|
|
|
if haze_intensity > 0:
|
|
haze_color = int(220 * haze_intensity) # Light blue-gray haze
|
|
haze_overlay[y, :] = (haze_color, haze_color, haze_color)
|
|
|
|
# Blend haze with terrain
|
|
cv2.addWeighted(frame, 1.0, haze_overlay, 0.3, 0, frame)
|
|
|
|
def add_cloud_shadows(frame, width, height, terrain_start_y):
|
|
"""Add realistic cloud shadows on terrain"""
|
|
|
|
shadow_overlay = np.zeros_like(frame)
|
|
|
|
# Generate cloud shadow patterns
|
|
for shadow_id in range(3):
|
|
shadow_center_x = int(width * (0.2 + shadow_id * 0.3))
|
|
shadow_center_y = int(terrain_start_y + (height - terrain_start_y) * 0.3)
|
|
|
|
shadow_radius = 80 + shadow_id * 30
|
|
|
|
# Create soft circular shadows
|
|
for y in range(max(terrain_start_y, shadow_center_y - shadow_radius),
|
|
min(height, shadow_center_y + shadow_radius)):
|
|
for x in range(max(0, shadow_center_x - shadow_radius),
|
|
min(width, shadow_center_x + shadow_radius)):
|
|
|
|
distance = math.sqrt((x - shadow_center_x)**2 + (y - shadow_center_y)**2)
|
|
|
|
if distance < shadow_radius:
|
|
shadow_intensity = 1.0 - (distance / shadow_radius)
|
|
shadow_intensity *= 0.3 # Subtle shadows
|
|
|
|
shadow_value = int(50 * shadow_intensity)
|
|
shadow_overlay[y, x] = (shadow_value, shadow_value, shadow_value)
|
|
|
|
# Apply shadows
|
|
frame_dark = frame.astype(np.int32) - shadow_overlay.astype(np.int32)
|
|
frame[:] = np.clip(frame_dark, 0, 255).astype(np.uint8)
|
|
|
|
def calculate_visible_bounds(camera_lat, camera_lon, bearing, view_distance, width, height):
|
|
"""Calculate the bounds of the visible area"""
|
|
# This is a simplified calculation for the demo
|
|
# In a real implementation, you'd use proper 3D projection math
|
|
lat_offset = view_distance / 111000 # Rough conversion to degrees
|
|
lon_offset = view_distance / (111000 * math.cos(math.radians(camera_lat)))
|
|
|
|
return {
|
|
'min_lat': camera_lat - lat_offset,
|
|
'max_lat': camera_lat + lat_offset,
|
|
'min_lon': camera_lon - lon_offset,
|
|
'max_lon': camera_lon + lon_offset
|
|
}
|
|
|
|
def world_to_screen_3d(world_lat, world_lon, elevation, camera_lat, camera_lon, camera_height,
|
|
bearing, tilt_angle, screen_width, screen_height, view_distance):
|
|
"""Transform world coordinates to 3D screen coordinates"""
|
|
# Calculate relative position
|
|
lat_diff = world_lat - camera_lat
|
|
lon_diff = world_lon - camera_lon
|
|
|
|
# Convert to meters (approximate)
|
|
x_meters = lon_diff * 111000 * math.cos(math.radians(camera_lat))
|
|
y_meters = lat_diff * 111000
|
|
z_meters = elevation - camera_height
|
|
|
|
# Rotate based on bearing
|
|
bearing_rad = math.radians(-bearing) # Negative for correct rotation
|
|
rotated_x = x_meters * math.cos(bearing_rad) - y_meters * math.sin(bearing_rad)
|
|
rotated_y = x_meters * math.sin(bearing_rad) + y_meters * math.cos(bearing_rad)
|
|
|
|
# Check if point is in front of camera
|
|
if rotated_y < 0:
|
|
return 0, 0, False
|
|
|
|
# Apply perspective projection
|
|
perspective_scale = view_distance / max(rotated_y, 1)
|
|
|
|
# Convert to screen coordinates
|
|
screen_x = int(screen_width / 2 + rotated_x * perspective_scale * 0.5)
|
|
|
|
# Apply tilt for vertical positioning
|
|
tilt_factor = math.sin(math.radians(tilt_angle))
|
|
horizon_y = screen_height * 0.4 # Horizon line
|
|
screen_y = int(horizon_y + (z_meters * perspective_scale * tilt_factor * 0.1) +
|
|
(rotated_y * perspective_scale * 0.2))
|
|
|
|
# Check if point is visible on screen
|
|
is_visible = (0 <= screen_x < screen_width and 0 <= screen_y < screen_height)
|
|
|
|
return screen_x, screen_y, is_visible
|
|
|
|
def get_simulated_elevation(lat, lon, frame_index):
|
|
"""Generate simulated elevation data"""
|
|
# Create varied terrain using sine waves
|
|
elevation = (
|
|
50 * math.sin(lat * 100) +
|
|
30 * math.sin(lon * 80) +
|
|
20 * math.sin((lat + lon) * 60) +
|
|
10 * math.sin(frame_index * 0.1) # Dynamic element
|
|
)
|
|
return max(0, elevation) # Ensure non-negative elevation
|
|
|
|
def draw_3d_route(frame, route_points_3d, current_frame_index):
|
|
"""Draw the route with 3D perspective effects"""
|
|
if len(route_points_3d) < 2:
|
|
return
|
|
|
|
# Draw route segments
|
|
for i in range(1, len(route_points_3d)):
|
|
x1, y1, is_past1 = route_points_3d[i-1]
|
|
x2, y2, is_past2 = route_points_3d[i]
|
|
|
|
# Color based on position relative to current point
|
|
if is_past1 and is_past2:
|
|
# Past route - blue to cyan gradient
|
|
color = (255, 200, 100) # Cyan-ish
|
|
thickness = 4
|
|
else:
|
|
# Future route - red gradient
|
|
color = (100, 100, 255) # Red-ish
|
|
thickness = 3
|
|
|
|
# Draw line with shadow for depth
|
|
cv2.line(frame, (x1+2, y1+2), (x2+2, y2+2), (50, 50, 50), thickness+2)
|
|
cv2.line(frame, (x1, y1), (x2, y2), color, thickness)
|
|
|
|
# Draw current position marker
|
|
if route_points_3d:
|
|
# Find the current position - look for the last "past" point, or use the first point
|
|
current_x, current_y = None, None
|
|
|
|
# Try to find the last past point
|
|
for x, y, is_past in route_points_3d:
|
|
if is_past:
|
|
current_x, current_y = x, y
|
|
|
|
# If no past points found (beginning of route), use the first point
|
|
if current_x is None and len(route_points_3d) > 0:
|
|
current_x, current_y, _ = route_points_3d[0]
|
|
|
|
# Only draw marker if we have a valid position
|
|
if current_x is not None and current_y is not None:
|
|
# Pulsing current position marker
|
|
pulse_size = int(12 + 8 * math.sin(current_frame_index * 0.3))
|
|
|
|
# Shadow
|
|
cv2.circle(frame, (current_x+3, current_y+3), pulse_size, (0, 0, 0), -1)
|
|
# Outer ring
|
|
cv2.circle(frame, (current_x, current_y), pulse_size, (0, 255, 255), -1)
|
|
# Inner ring
|
|
cv2.circle(frame, (current_x, current_y), pulse_size-4, (255, 255, 255), 2)
|
|
# Center dot
|
|
cv2.circle(frame, (current_x, current_y), 3, (255, 0, 0), -1)
|
|
|
|
def add_google_earth_ui(frame, current_pos, bearing, width, height, frame_index, total_frames):
|
|
"""Add Google Earth-style UI elements"""
|
|
# Speed and info panel (top-left)
|
|
panel_width = 250
|
|
panel_height = 120
|
|
overlay = frame.copy()
|
|
|
|
# Semi-transparent panel
|
|
cv2.rectangle(overlay, (10, 10), (panel_width, panel_height), (50, 50, 50), -1)
|
|
cv2.addWeighted(overlay, 0.7, frame, 0.3, 0, frame)
|
|
|
|
# Panel border
|
|
cv2.rectangle(frame, (10, 10), (panel_width, panel_height), (200, 200, 200), 2)
|
|
|
|
# Text information
|
|
speed = current_pos.get('speed', 0)
|
|
timestamp = current_pos.get('deviceTime', '')
|
|
|
|
y_pos = 35
|
|
cv2.putText(frame, f"Speed: {speed:.1f} km/h", (20, y_pos),
|
|
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1)
|
|
|
|
y_pos += 25
|
|
cv2.putText(frame, f"Bearing: {bearing:.0f}°", (20, y_pos),
|
|
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1)
|
|
|
|
y_pos += 25
|
|
if timestamp:
|
|
cv2.putText(frame, f"Time: {timestamp[:16]}", (20, y_pos),
|
|
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
|
|
|
|
y_pos += 25
|
|
progress = (frame_index + 1) / total_frames * 100
|
|
cv2.putText(frame, f"Progress: {progress:.1f}%", (20, y_pos),
|
|
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
|
|
|
|
# Compass (top-right)
|
|
compass_center_x = width - 80
|
|
compass_center_y = 80
|
|
compass_radius = 40
|
|
|
|
# Compass background
|
|
cv2.circle(frame, (compass_center_x, compass_center_y), compass_radius, (50, 50, 50), -1)
|
|
cv2.circle(frame, (compass_center_x, compass_center_y), compass_radius, (200, 200, 200), 2)
|
|
|
|
# North indicator
|
|
north_x = compass_center_x + int((compass_radius - 10) * math.sin(math.radians(-bearing)))
|
|
north_y = compass_center_y - int((compass_radius - 10) * math.cos(math.radians(-bearing)))
|
|
cv2.arrowedLine(frame, (compass_center_x, compass_center_y), (north_x, north_y), (0, 0, 255), 3)
|
|
|
|
# N label
|
|
cv2.putText(frame, "N", (compass_center_x - 8, compass_center_y - compass_radius - 10),
|
|
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)
|
|
|
|
# Progress bar (bottom)
|
|
progress_bar_width = width - 40
|
|
progress_bar_height = 10
|
|
progress_bar_x = 20
|
|
progress_bar_y = height - 30
|
|
|
|
# Background
|
|
cv2.rectangle(frame, (progress_bar_x, progress_bar_y),
|
|
(progress_bar_x + progress_bar_width, progress_bar_y + progress_bar_height),
|
|
(100, 100, 100), -1)
|
|
|
|
# Progress fill
|
|
progress_width = int(progress_bar_width * progress / 100)
|
|
cv2.rectangle(frame, (progress_bar_x, progress_bar_y),
|
|
(progress_bar_x + progress_width, progress_bar_y + progress_bar_height),
|
|
(0, 255, 100), -1)
|
|
|
|
# Border
|
|
cv2.rectangle(frame, (progress_bar_x, progress_bar_y),
|
|
(progress_bar_x + progress_bar_width, progress_bar_y + progress_bar_height),
|
|
(200, 200, 200), 1)
|
|
|
|
def add_atmospheric_perspective(frame, width, height):
|
|
"""Add distance fog effect for realism"""
|
|
# Create fog gradient overlay
|
|
fog_overlay = np.zeros_like(frame)
|
|
|
|
# Fog is stronger towards the horizon
|
|
horizon_y = int(height * 0.4)
|
|
for y in range(horizon_y, height):
|
|
fog_intensity = min(0.3, (y - horizon_y) / (height - horizon_y) * 0.3)
|
|
fog_color = int(200 * fog_intensity)
|
|
fog_overlay[y, :] = (fog_color, fog_color, fog_color)
|
|
|
|
# Blend fog with frame
|
|
cv2.addWeighted(frame, 1.0, fog_overlay, 0.5, 0, frame)
|
|
|
|
def get_elevation_data(lat, lon):
|
|
"""
|
|
Get elevation data for a coordinate (optional enhancement)
|
|
"""
|
|
try:
|
|
# Using a free elevation API
|
|
url = f"https://api.open-elevation.com/api/v1/lookup?locations={lat},{lon}"
|
|
response = requests.get(url, timeout=5)
|
|
if response.status_code == 200:
|
|
data = response.json()
|
|
return data['results'][0]['elevation']
|
|
except Exception:
|
|
pass
|
|
return 0 # Default elevation
|
|
|
|
def calculate_dynamic_camera_position(current_pos, all_positions, frame_index, min_lat, max_lat, min_lon, max_lon):
|
|
"""
|
|
Calculate dynamic camera position that follows the route smoothly
|
|
"""
|
|
camera_lat = current_pos['latitude']
|
|
camera_lon = current_pos['longitude']
|
|
|
|
# Dynamic look-ahead based on speed and terrain
|
|
speed = current_pos.get('speed', 0)
|
|
base_look_ahead = max(3, min(10, int(speed / 10))) # Adjust based on speed
|
|
|
|
# Look ahead in the route for camera direction
|
|
look_ahead_frames = min(base_look_ahead, len(all_positions) - frame_index - 1)
|
|
|
|
if look_ahead_frames > 0:
|
|
target_pos = all_positions[frame_index + look_ahead_frames]
|
|
target_lat = target_pos['latitude']
|
|
target_lon = target_pos['longitude']
|
|
else:
|
|
# Use previous points to maintain direction
|
|
if frame_index > 0:
|
|
prev_pos = all_positions[frame_index - 1]
|
|
# Extrapolate forward
|
|
lat_diff = camera_lat - prev_pos['latitude']
|
|
lon_diff = camera_lon - prev_pos['longitude']
|
|
target_lat = camera_lat + lat_diff
|
|
target_lon = camera_lon + lon_diff
|
|
else:
|
|
target_lat = camera_lat
|
|
target_lon = camera_lon
|
|
|
|
# Calculate smooth bearing with momentum
|
|
bearing = calculate_bearing(camera_lat, camera_lon, target_lat, target_lon)
|
|
|
|
# Add slight camera offset for better viewing angle
|
|
offset_distance = 50 # meters
|
|
offset_angle = bearing + 45 # 45 degrees offset for better perspective
|
|
|
|
# Calculate offset position
|
|
offset_lat = camera_lat + (offset_distance / 111000) * math.cos(math.radians(offset_angle))
|
|
offset_lon = camera_lon + (offset_distance / (111000 * math.cos(math.radians(camera_lat)))) * math.sin(math.radians(offset_angle))
|
|
|
|
camera_pos = {
|
|
'latitude': offset_lat,
|
|
'longitude': offset_lon
|
|
}
|
|
|
|
camera_target = {
|
|
'latitude': target_lat,
|
|
'longitude': target_lon
|
|
}
|
|
|
|
return camera_pos, camera_target, bearing
|
|
|
|
def calculate_distance(lat1, lon1, lat2, lon2):
|
|
"""Calculate distance between two GPS points in meters"""
|
|
# Haversine formula
|
|
R = 6371000 # Earth's radius in meters
|
|
phi1 = math.radians(lat1)
|
|
phi2 = math.radians(lat2)
|
|
delta_phi = math.radians(lat2 - lat1)
|
|
delta_lambda = math.radians(lon2 - lon1)
|
|
|
|
a = math.sin(delta_phi/2)**2 + math.cos(phi1) * math.cos(phi2) * math.sin(delta_lambda/2)**2
|
|
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
|
|
|
|
return R * c
|
|
|
|
def world_to_camera_screen(world_lat, world_lon, elevation, camera_pos, camera_target, camera_height,
|
|
bearing, tilt_angle, fov, screen_width, screen_height):
|
|
"""
|
|
Advanced 3D transformation from world coordinates to screen coordinates
|
|
"""
|
|
# Convert GPS to local coordinates relative to camera
|
|
lat_diff = world_lat - camera_pos['latitude']
|
|
lon_diff = world_lon - camera_pos['longitude']
|
|
|
|
# Convert to meters (more accurate conversion)
|
|
x_meters = lon_diff * 111320 * math.cos(math.radians(camera_pos['latitude']))
|
|
y_meters = lat_diff * 110540
|
|
z_meters = elevation - camera_height
|
|
|
|
# Apply camera rotation based on bearing
|
|
bearing_rad = math.radians(-bearing)
|
|
tilt_rad = math.radians(tilt_angle)
|
|
|
|
# Rotate around Z axis (bearing)
|
|
rotated_x = x_meters * math.cos(bearing_rad) - y_meters * math.sin(bearing_rad)
|
|
rotated_y = x_meters * math.sin(bearing_rad) + y_meters * math.cos(bearing_rad)
|
|
rotated_z = z_meters
|
|
|
|
# Apply tilt rotation
|
|
final_y = rotated_y * math.cos(tilt_rad) - rotated_z * math.sin(tilt_rad)
|
|
final_z = rotated_y * math.sin(tilt_rad) + rotated_z * math.cos(tilt_rad)
|
|
final_x = rotated_x
|
|
|
|
# Check if point is in front of camera
|
|
if final_y <= 0:
|
|
return 0, 0, float('inf'), False
|
|
|
|
# Perspective projection
|
|
fov_rad = math.radians(fov)
|
|
f = (screen_width / 2) / math.tan(fov_rad / 2) # Focal length
|
|
|
|
# Project to screen
|
|
screen_x = int(screen_width / 2 + (final_x * f) / final_y)
|
|
screen_y = int(screen_height / 2 - (final_z * f) / final_y)
|
|
|
|
# Calculate depth for sorting
|
|
depth = final_y
|
|
|
|
# Check if point is visible on screen
|
|
is_visible = (0 <= screen_x < screen_width and 0 <= screen_y < screen_height)
|
|
|
|
return screen_x, screen_y, depth, is_visible
|
|
|
|
def get_enhanced_elevation(lat, lon, point_index, frame_index):
|
|
"""
|
|
Generate more realistic elevation data with variation
|
|
"""
|
|
# Base elevation using multiple harmonics
|
|
base_elevation = (
|
|
100 * math.sin(lat * 50) +
|
|
70 * math.sin(lon * 40) +
|
|
50 * math.sin((lat + lon) * 30) +
|
|
30 * math.sin(lat * 200) * math.cos(lon * 150) +
|
|
20 * math.sin(point_index * 0.1) # Smooth variation along route
|
|
)
|
|
|
|
# Add temporal variation for dynamic feel
|
|
time_variation = 10 * math.sin(frame_index * 0.05 + point_index * 0.2)
|
|
|
|
# Ensure realistic elevation range
|
|
elevation = max(0, min(500, base_elevation + time_variation))
|
|
|
|
return elevation
|
|
|
|
def create_space_entry_frame(start_pos, center_lat, center_lon, min_lat, max_lat, min_lon, max_lon,
|
|
width, height, frame_index, total_entry_frames):
|
|
"""
|
|
Create ultra-realistic Google Earth-style space entry frame with high detail
|
|
"""
|
|
# Create ultra-high-resolution canvas
|
|
frame = np.zeros((height, width, 3), dtype=np.uint8)
|
|
|
|
# Calculate entry progress (0 to 1)
|
|
entry_progress = frame_index / total_entry_frames
|
|
|
|
# Ultra-realistic altitude progression
|
|
max_altitude = 100000 # 100km - true edge of space
|
|
min_altitude = 1500 # 1.5km - final aerial altitude
|
|
|
|
# Advanced descent curve with multiple phases
|
|
if entry_progress < 0.3:
|
|
# Phase 1: Slow initial descent from space
|
|
altitude_progress = (entry_progress / 0.3) ** 3
|
|
elif entry_progress < 0.7:
|
|
# Phase 2: Faster descent through atmosphere
|
|
phase_progress = (entry_progress - 0.3) / 0.4
|
|
altitude_progress = 0.027 + phase_progress ** 1.5 * 0.7
|
|
else:
|
|
# Phase 3: Deceleration to aerial view
|
|
phase_progress = (entry_progress - 0.7) / 0.3
|
|
altitude_progress = 0.727 + phase_progress ** 2 * 0.273
|
|
|
|
current_altitude = max_altitude - (max_altitude - min_altitude) * altitude_progress
|
|
|
|
# Create photorealistic Earth background
|
|
create_photorealistic_earth_view(frame, width, height, current_altitude, center_lat, center_lon, entry_progress)
|
|
|
|
# Add ultra-detailed geographic features
|
|
add_ultra_detailed_geography(frame, width, height, center_lat, center_lon, current_altitude, entry_progress)
|
|
|
|
# Add realistic weather systems
|
|
add_realistic_weather_systems(frame, width, height, current_altitude, entry_progress)
|
|
|
|
# Add city lights for night side
|
|
if entry_progress > 0.5:
|
|
add_city_lights(frame, width, height, center_lat, center_lon, current_altitude)
|
|
|
|
# Add ultra-realistic atmospheric layers
|
|
add_ultra_realistic_atmosphere(frame, width, height, current_altitude, entry_progress)
|
|
|
|
# Add route visualization with high detail
|
|
if entry_progress > 0.6:
|
|
draw_ultra_detailed_route_preview(frame, min_lat, max_lat, min_lon, max_lon,
|
|
center_lat, center_lon, width, height,
|
|
current_altitude, entry_progress)
|
|
|
|
# Add cinema-quality UI
|
|
add_cinema_quality_ui(frame, current_altitude, entry_progress, start_pos, width, height, frame_index)
|
|
|
|
# Add satellite overlay effects
|
|
add_satellite_overlay_effects(frame, width, height, current_altitude, entry_progress)
|
|
|
|
return frame
|
|
|
|
def create_photorealistic_earth_view(frame, width, height, altitude, center_lat, center_lon, progress):
|
|
"""Create photorealistic Earth view with ultra-high detail"""
|
|
|
|
if altitude > 50000: # True space view
|
|
create_ultra_realistic_space_view(frame, width, height, altitude, center_lat, center_lon)
|
|
elif altitude > 25000: # High atmosphere
|
|
create_high_atmosphere_view(frame, width, height, altitude, center_lat, center_lon, progress)
|
|
elif altitude > 10000: # Mid atmosphere
|
|
create_mid_atmosphere_view(frame, width, height, altitude, center_lat, center_lon, progress)
|
|
else: # Low atmosphere - aerial view
|
|
create_ultra_detailed_aerial_view(frame, width, height, altitude, center_lat, center_lon, progress)
|
|
|
|
def create_ultra_realistic_space_view(frame, width, height, altitude, center_lat, center_lon):
|
|
"""Create ultra-realistic space view with detailed Earth"""
|
|
|
|
# Deep space background with nebula effects
|
|
create_deep_space_background(frame, width, height, altitude)
|
|
|
|
# Ultra-detailed Earth sphere
|
|
earth_radius = int(min(width, height) * 0.35) # Larger Earth for more detail
|
|
earth_center_x = width // 2
|
|
earth_center_y = int(height * 0.65) # Position Earth in lower portion
|
|
|
|
# Create high-resolution Earth surface
|
|
create_high_resolution_earth_surface(frame, earth_center_x, earth_center_y, earth_radius,
|
|
center_lat, center_lon, width, height)
|
|
|
|
# Add realistic cloud patterns
|
|
add_realistic_cloud_patterns(frame, earth_center_x, earth_center_y, earth_radius, width, height)
|
|
|
|
# Add Earth's atmospheric layers
|
|
add_detailed_atmospheric_layers(frame, earth_center_x, earth_center_y, earth_radius, width, height)
|
|
|
|
# Add auroras at poles
|
|
add_auroras(frame, earth_center_x, earth_center_y, earth_radius, width, height)
|
|
|
|
def create_deep_space_background(frame, width, height, altitude):
|
|
"""Create realistic deep space background"""
|
|
|
|
# Deep space gradient
|
|
for y in range(height):
|
|
space_intensity = y / height
|
|
r = int(2 + (8 - 2) * space_intensity)
|
|
g = int(2 + (12 - 2) * space_intensity)
|
|
b = int(8 + (20 - 8) * space_intensity)
|
|
frame[y, :] = (b, g, r)
|
|
|
|
# Add detailed star field with varying brightness and colors
|
|
np.random.seed(42) # Consistent stars
|
|
num_stars = int(2000 + altitude / 100) # More stars at higher altitude
|
|
|
|
for _ in range(num_stars):
|
|
x = np.random.randint(0, width)
|
|
y = np.random.randint(0, height)
|
|
|
|
# Vary star properties
|
|
brightness = np.random.randint(80, 255)
|
|
star_type = np.random.choice(['white', 'blue', 'yellow', 'red'], p=[0.7, 0.1, 0.15, 0.05])
|
|
size = np.random.choice([1, 2, 3], p=[0.85, 0.12, 0.03])
|
|
|
|
# Set star color
|
|
if star_type == 'blue':
|
|
color = (brightness, brightness//2, brightness//3)
|
|
elif star_type == 'yellow':
|
|
color = (brightness//3, brightness//2, brightness)
|
|
elif star_type == 'red':
|
|
color = (brightness//4, brightness//4, brightness)
|
|
else: # white
|
|
color = (brightness, brightness, brightness)
|
|
|
|
# Draw star
|
|
if size == 1:
|
|
if 0 <= y < height and 0 <= x < width:
|
|
frame[y, x] = color
|
|
else:
|
|
cv2.circle(frame, (x, y), size, color, -1)
|
|
|
|
# Add distant galaxies/nebulae
|
|
add_distant_galaxies(frame, width, height)
|
|
|
|
def create_high_resolution_earth_surface(frame, center_x, center_y, radius, center_lat, center_lon, width, height):
|
|
"""Create ultra-high resolution Earth surface with realistic geography"""
|
|
|
|
# Create detailed Earth mask
|
|
y_coords, x_coords = np.ogrid[:height, :width]
|
|
earth_mask = (x_coords - center_x)**2 + (y_coords - center_y)**2 <= radius**2
|
|
|
|
# Generate ultra-detailed surface colors
|
|
earth_surface = create_ultra_detailed_earth_colors(width, height, center_x, center_y, radius, center_lat, center_lon)
|
|
|
|
# Apply Earth surface to frame
|
|
frame[earth_mask] = earth_surface[earth_mask]
|
|
|
|
# Add continental details
|
|
add_continental_details(frame, center_x, center_y, radius, center_lat, center_lon, width, height)
|
|
|
|
def create_ultra_detailed_earth_colors(width, height, center_x, center_y, radius, center_lat, center_lon):
|
|
"""Generate ultra-detailed Earth surface colors"""
|
|
|
|
colors = np.zeros((height, width, 3), dtype=np.uint8)
|
|
|
|
for y in range(height):
|
|
for x in range(width):
|
|
distance = math.sqrt((x - center_x)**2 + (y - center_y)**2)
|
|
|
|
if distance <= radius:
|
|
# Convert pixel position to lat/lon on Earth
|
|
angle_x = (x - center_x) / radius * math.pi
|
|
angle_y = (y - center_y) / radius * math.pi
|
|
|
|
# Simulate realistic geographic coordinates
|
|
sim_lat = center_lat + angle_y * 180 / math.pi * 0.5
|
|
sim_lon = center_lon + angle_x * 180 / math.pi * 0.5
|
|
|
|
# Generate realistic terrain based on coordinates
|
|
terrain_color = generate_realistic_geographic_color(sim_lat, sim_lon, distance, radius)
|
|
colors[y, x] = terrain_color
|
|
|
|
return colors
|
|
|
|
def generate_realistic_geographic_color(lat, lon, distance_from_center, radius):
|
|
"""Generate realistic color based on geographic coordinates"""
|
|
|
|
# Multiple layers of geographic simulation
|
|
primary_terrain = (
|
|
math.sin(lat * 0.1) * math.sin(lon * 0.08) +
|
|
math.sin(lat * 0.05) * math.sin(lon * 0.12) * 0.7 +
|
|
math.sin(lat * 0.02) * math.sin(lon * 0.03) * 0.3
|
|
)
|
|
|
|
# Latitude-based climate zones
|
|
abs_lat = abs(lat)
|
|
if abs_lat > 60: # Polar regions
|
|
climate_factor = -0.5
|
|
elif abs_lat > 30: # Temperate regions
|
|
climate_factor = 0.3
|
|
else: # Tropical regions
|
|
climate_factor = 0.1
|
|
|
|
combined_terrain = primary_terrain + climate_factor
|
|
|
|
# Altitude simulation
|
|
elevation = combined_terrain + math.sin(lat * 0.3) * math.sin(lon * 0.25) * 0.4
|
|
|
|
# Water vs land determination
|
|
if combined_terrain < -0.2:
|
|
# Ocean - varying depths
|
|
if elevation < -0.5:
|
|
return (139, 69, 19) # Deep ocean - dark blue
|
|
elif elevation < -0.2:
|
|
return (180, 130, 70) # Medium ocean - blue
|
|
else:
|
|
return (205, 170, 125) # Shallow water - light blue
|
|
|
|
# Land terrain types
|
|
if elevation > 0.8:
|
|
# High mountains - snow and rock
|
|
return (248, 248, 255) # Snow white
|
|
elif elevation > 0.5:
|
|
# Mountains - rock and sparse vegetation
|
|
return (139, 137, 137) # Dark gray
|
|
elif elevation > 0.2:
|
|
# Hills - forests and mixed terrain
|
|
if abs_lat < 30: # Tropical
|
|
return (34, 139, 34) # Forest green
|
|
else: # Temperate
|
|
return (107, 142, 35) # Olive green
|
|
elif elevation > 0:
|
|
# Plains - agriculture and grassland
|
|
if abs_lat > 40: # Northern/Southern
|
|
return (255, 228, 181) # Wheat/farmland
|
|
else:
|
|
return (144, 238, 144) # Light green
|
|
else:
|
|
# Low areas - wetlands and river deltas
|
|
return (95, 158, 160) # Cadet blue
|
|
|
|
def add_continental_details(frame, center_x, center_y, radius, center_lat, center_lon, width, height):
|
|
"""Add detailed continental features"""
|
|
|
|
# Major continental outlines
|
|
continents = [
|
|
# North America
|
|
[(center_x - radius//3, center_y - radius//4), (center_x - radius//6, center_y - radius//8),
|
|
(center_x - radius//8, center_y + radius//6)],
|
|
# Europe
|
|
[(center_x - radius//8, center_y - radius//3), (center_x + radius//8, center_y - radius//4)],
|
|
# Asia
|
|
[(center_x + radius//8, center_y - radius//3), (center_x + radius//3, center_y - radius//6),
|
|
(center_x + radius//4, center_y + radius//8)],
|
|
]
|
|
|
|
for continent in continents:
|
|
if len(continent) > 2:
|
|
# Draw continental outline
|
|
for i in range(len(continent)):
|
|
start = continent[i]
|
|
end = continent[(i + 1) % len(continent)]
|
|
cv2.line(frame, start, end, (101, 67, 33), 2) # Brown outline
|
|
|
|
def add_realistic_cloud_patterns(frame, center_x, center_y, radius, width, height):
|
|
"""Add ultra-realistic cloud patterns"""
|
|
|
|
# Create cloud layer
|
|
cloud_overlay = np.zeros((height, width, 3), dtype=np.uint8)
|
|
|
|
# Generate realistic cloud systems
|
|
for cloud_system in range(5):
|
|
# Cloud center
|
|
cloud_center_x = center_x + int((np.random.random() - 0.5) * radius * 1.5)
|
|
cloud_center_y = center_y + int((np.random.random() - 0.5) * radius * 1.5)
|
|
|
|
# Generate cloud pattern using multiple octaves
|
|
for y in range(max(0, cloud_center_y - 100), min(height, cloud_center_y + 100)):
|
|
for x in range(max(0, cloud_center_x - 100), min(width, cloud_center_x + 100)):
|
|
# Check if within Earth
|
|
earth_distance = math.sqrt((x - center_x)**2 + (y - center_y)**2)
|
|
if earth_distance > radius:
|
|
continue
|
|
|
|
# Multi-scale cloud noise
|
|
cloud_noise = (
|
|
math.sin((x - cloud_center_x) * 0.02) * math.sin((y - cloud_center_y) * 0.02) +
|
|
math.sin((x - cloud_center_x) * 0.05) * math.sin((y - cloud_center_y) * 0.05) * 0.5 +
|
|
math.sin((x - cloud_center_x) * 0.1) * math.sin((y - cloud_center_y) * 0.1) * 0.25
|
|
)
|
|
|
|
# Distance falloff
|
|
distance_from_cloud_center = math.sqrt((x - cloud_center_x)**2 + (y - cloud_center_y)**2)
|
|
distance_factor = max(0, 1 - distance_from_cloud_center / 100)
|
|
|
|
cloud_intensity = max(0, (cloud_noise + 0.5) * distance_factor)
|
|
|
|
if cloud_intensity > 0.4:
|
|
cloud_alpha = min(0.7, cloud_intensity)
|
|
cloud_color = (255, 255, 255)
|
|
|
|
# Blend with existing
|
|
current = frame[y, x].astype(np.float32)
|
|
cloud_overlay[y, x] = (current * (1 - cloud_alpha) + np.array(cloud_color) * cloud_alpha).astype(np.uint8)
|
|
|
|
# Apply cloud overlay
|
|
mask = np.any(cloud_overlay > 0, axis=2)
|
|
frame[mask] = cloud_overlay[mask]
|
|
|
|
def add_distant_galaxies(frame, width, height):
|
|
"""Add distant galaxies and nebulae effects"""
|
|
|
|
# Add a few distant galaxies
|
|
np.random.seed(123) # Different seed for galaxies
|
|
for _ in range(3):
|
|
galaxy_x = np.random.randint(width//4, 3*width//4)
|
|
galaxy_y = np.random.randint(height//4, height//2)
|
|
|
|
# Create small galaxy spiral
|
|
for angle in np.linspace(0, 4*math.pi, 50):
|
|
radius = angle * 2
|
|
if radius > 30:
|
|
break
|
|
|
|
spiral_x = int(galaxy_x + radius * math.cos(angle))
|
|
spiral_y = int(galaxy_y + radius * math.sin(angle) * 0.3) # Flattened
|
|
|
|
if 0 <= spiral_x < width and 0 <= spiral_y < height:
|
|
brightness = int(30 + 20 * (1 - radius/30))
|
|
frame[spiral_y, spiral_x] = (brightness//2, brightness//2, brightness)
|
|
|
|
def add_detailed_atmospheric_layers(frame, center_x, center_y, radius, width, height):
|
|
"""Add detailed atmospheric layers around Earth"""
|
|
|
|
# Multiple atmospheric layers
|
|
for layer in range(3):
|
|
layer_radius = radius + 15 + layer * 8
|
|
layer_intensity = 0.4 - layer * 0.1
|
|
|
|
for y in range(max(0, center_y - layer_radius - 10), min(height, center_y + layer_radius + 10)):
|
|
for x in range(max(0, center_x - layer_radius - 10), min(width, center_x + layer_radius + 10)):
|
|
distance = math.sqrt((x - center_x)**2 + (y - center_y)**2)
|
|
|
|
if layer_radius - 3 < distance < layer_radius + 3:
|
|
glow_factor = 1.0 - abs(distance - layer_radius) / 3.0
|
|
if glow_factor > 0:
|
|
glow_intensity = int(80 * glow_factor * layer_intensity)
|
|
current_color = frame[y, x].astype(np.int32)
|
|
|
|
# Blue atmospheric glow
|
|
frame[y, x] = np.clip(current_color + [glow_intensity//4, glow_intensity//2, glow_intensity], 0, 255)
|
|
|
|
def add_auroras(frame, center_x, center_y, radius, width, height):
|
|
"""Add realistic auroras at Earth's poles"""
|
|
|
|
# Northern aurora
|
|
aurora_center_y = center_y - int(radius * 0.8)
|
|
|
|
for i in range(30):
|
|
# Wavy aurora pattern
|
|
wave_x = center_x + int(40 * math.sin(i * 0.3))
|
|
wave_y = aurora_center_y + i * 2
|
|
|
|
if 0 <= wave_x < width and 0 <= wave_y < height:
|
|
# Aurora colors - green and blue
|
|
intensity = 100 + int(50 * math.sin(i * 0.2))
|
|
aurora_color = (intensity//3, intensity, intensity//2) # Green-dominant
|
|
|
|
# Draw aurora streak
|
|
cv2.circle(frame, (wave_x, wave_y), 3, aurora_color, -1)
|
|
|
|
# Southern aurora (smaller)
|
|
aurora_center_y = center_y + int(radius * 0.8)
|
|
|
|
for i in range(20):
|
|
wave_x = center_x + int(30 * math.sin(i * 0.4 + math.pi))
|
|
wave_y = aurora_center_y - i * 2
|
|
|
|
if 0 <= wave_x < width and 0 <= wave_y < height:
|
|
intensity = 80 + int(40 * math.sin(i * 0.25))
|
|
aurora_color = (intensity, intensity//2, intensity//3) # Blue-dominant
|
|
cv2.circle(frame, (wave_x, wave_y), 2, aurora_color, -1)
|
|
|
|
def add_ultra_detailed_geography(frame, width, height, center_lat, center_lon, altitude, progress):
|
|
"""Add ultra-detailed geographic features"""
|
|
|
|
if altitude < 30000: # Detailed features visible below 30km
|
|
detail_level = 1.0 - (altitude / 30000)
|
|
|
|
# Major mountain ranges
|
|
add_mountain_ranges(frame, width, height, center_lat, center_lon, detail_level)
|
|
|
|
# Major river systems
|
|
add_major_rivers(frame, width, height, center_lat, center_lon, detail_level)
|
|
|
|
# Desert regions
|
|
add_desert_regions(frame, width, height, center_lat, center_lon, detail_level)
|
|
|
|
# Forest regions
|
|
add_forest_regions(frame, width, height, center_lat, center_lon, detail_level)
|
|
|
|
def add_mountain_ranges(frame, width, height, center_lat, center_lon, detail_level):
|
|
"""Add realistic mountain ranges"""
|
|
|
|
if detail_level > 0.3:
|
|
# Simulate major mountain ranges
|
|
ranges = [
|
|
# Himalayas
|
|
[(width//2 + 100, height//2 - 50), (width//2 + 200, height//2 - 30)],
|
|
# Rockies
|
|
[(width//2 - 150, height//2 - 100), (width//2 - 120, height//2 + 50)],
|
|
# Andes
|
|
[(width//2 - 200, height//2), (width//2 - 180, height//2 + 150)],
|
|
]
|
|
|
|
for mountain_range in ranges:
|
|
if len(mountain_range) >= 2:
|
|
for i in range(len(mountain_range) - 1):
|
|
start = mountain_range[i]
|
|
end = mountain_range[i + 1]
|
|
|
|
# Draw mountain ridge
|
|
cv2.line(frame, start, end, (139, 137, 137), int(3 * detail_level))
|
|
|
|
# Add peaks along the ridge
|
|
distance = math.sqrt((end[0] - start[0])**2 + (end[1] - start[1])**2)
|
|
num_peaks = int(distance / 20)
|
|
|
|
for peak in range(num_peaks):
|
|
t = peak / num_peaks
|
|
peak_x = int(start[0] + t * (end[0] - start[0]))
|
|
peak_y = int(start[1] + t * (end[1] - start[1]))
|
|
|
|
# Random peak height
|
|
peak_height = int(5 + 10 * detail_level)
|
|
cv2.circle(frame, (peak_x, peak_y - peak_height), 2, (248, 248, 255), -1)
|
|
|
|
def add_major_rivers(frame, width, height, center_lat, center_lon, detail_level):
|
|
"""Add major river systems"""
|
|
|
|
if detail_level > 0.4:
|
|
rivers = [
|
|
# Amazon
|
|
[(width//2 - 100, height//2 + 80), (width//2 - 50, height//2 + 85), (width//2, height//2 + 90)],
|
|
# Nile
|
|
[(width//2 + 50, height//2 - 20), (width//2 + 55, height//2 + 100)],
|
|
# Mississippi
|
|
[(width//2 - 80, height//2 - 50), (width//2 - 75, height//2 + 80)],
|
|
]
|
|
|
|
for river in rivers:
|
|
# Draw meandering river
|
|
for i in range(len(river) - 1):
|
|
start = river[i]
|
|
end = river[i + 1]
|
|
cv2.line(frame, start, end, (100, 149, 237), max(1, int(2 * detail_level)))
|
|
|
|
def add_city_lights(frame, width, height, center_lat, center_lon, altitude):
|
|
"""Add realistic city lights for night side"""
|
|
|
|
if altitude < 40000: # City lights visible below 40km
|
|
# Major city clusters
|
|
cities = [
|
|
(width//2 - 80, height//2 + 40), # South America
|
|
(width//2 + 60, height//2 - 30), # Europe
|
|
(width//2 + 120, height//2 - 10), # Asia
|
|
(width//2 - 120, height//2 + 10), # North America
|
|
]
|
|
|
|
for city_x, city_y in cities:
|
|
# Create city light cluster
|
|
for _ in range(20):
|
|
light_x = city_x + np.random.randint(-15, 15)
|
|
light_y = city_y + np.random.randint(-15, 15)
|
|
|
|
if 0 <= light_x < width and 0 <= light_y < height:
|
|
brightness = np.random.randint(150, 255)
|
|
cv2.circle(frame, (light_x, light_y), 1, (brightness//3, brightness//2, brightness), -1)
|
|
|
|
def add_realistic_weather_systems(frame, width, height, altitude, progress):
|
|
"""Add realistic weather systems like hurricanes and storm fronts"""
|
|
|
|
if altitude < 25000: # Weather visible below 25km
|
|
# Hurricane spiral
|
|
hurricane_x = width//2 + 100
|
|
hurricane_y = height//2 + 50
|
|
|
|
for spiral_turn in range(3):
|
|
for angle in np.linspace(0, 2*math.pi, 30):
|
|
radius = 20 + spiral_turn * 15 + angle * 5
|
|
if radius > 60:
|
|
break
|
|
|
|
spiral_x = int(hurricane_x + radius * math.cos(angle + spiral_turn * 2))
|
|
spiral_y = int(hurricane_y + radius * math.sin(angle + spiral_turn * 2))
|
|
|
|
if 0 <= spiral_x < width and 0 <= spiral_y < height:
|
|
cloud_intensity = 200 - int(radius * 2)
|
|
if cloud_intensity > 100:
|
|
frame[spiral_y, spiral_x] = (cloud_intensity, cloud_intensity, cloud_intensity)
|
|
|
|
def add_ultra_realistic_atmosphere(frame, width, height, altitude, progress):
|
|
"""Add ultra-realistic atmospheric effects"""
|
|
|
|
if altitude > 80000:
|
|
# Space effects
|
|
add_space_effects(frame, width, height, altitude)
|
|
elif altitude > 50000:
|
|
# Stratosphere
|
|
add_stratosphere_effects(frame, width, height, altitude, progress)
|
|
elif altitude > 20000:
|
|
# High atmosphere
|
|
add_high_atmosphere_effects(frame, width, height, altitude, progress)
|
|
else:
|
|
# Troposphere
|
|
add_troposphere_effects(frame, width, height, altitude, progress)
|
|
|
|
def add_space_effects(frame, width, height, altitude):
|
|
"""Add space-specific atmospheric effects"""
|
|
# Very thin atmospheric glow
|
|
space_glow_intensity = max(0, 1.0 - (altitude - 80000) / 20000)
|
|
|
|
if space_glow_intensity > 0:
|
|
# Horizontal atmospheric band
|
|
glow_y = int(height * 0.7)
|
|
glow_height = int(20 * space_glow_intensity)
|
|
|
|
for y in range(max(0, glow_y - glow_height), min(height, glow_y + glow_height)):
|
|
glow_factor = 1.0 - abs(y - glow_y) / glow_height
|
|
blue_glow = int(30 * glow_factor * space_glow_intensity)
|
|
|
|
frame[y, :, 2] = np.minimum(255, frame[y, :, 2] + blue_glow)
|
|
|
|
def add_cinema_quality_ui(frame, altitude, progress, start_pos, width, height, frame_index):
|
|
"""Add cinema-quality UI with advanced graphics"""
|
|
|
|
# Create semi-transparent panels with gradients
|
|
overlay = frame.copy()
|
|
|
|
# Main telemetry panel (top-left) - larger and more detailed
|
|
panel_width = 380
|
|
panel_height = 180
|
|
|
|
# Gradient panel background
|
|
for y in range(20, 20 + panel_height):
|
|
for x in range(20, 20 + panel_width):
|
|
if y < height and x < width:
|
|
gradient_factor = (y - 20) / panel_height
|
|
bg_intensity = int(20 + 25 * gradient_factor)
|
|
overlay[y, x] = (bg_intensity, bg_intensity, bg_intensity)
|
|
|
|
cv2.addWeighted(overlay, 0.85, frame, 0.15, 0, frame)
|
|
|
|
# Panel border with glow effect
|
|
cv2.rectangle(frame, (20, 20), (20 + panel_width, 20 + panel_height), (100, 150, 255), 3)
|
|
cv2.rectangle(frame, (18, 18), (22 + panel_width, 22 + panel_height), (50, 100, 200), 1)
|
|
|
|
# Ultra-detailed telemetry
|
|
font_scale = 0.7
|
|
font_thickness = 2
|
|
|
|
# Altitude with large display
|
|
altitude_text = f"{altitude/1000:.2f} km"
|
|
cv2.putText(frame, "ALTITUDE", (35, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (150, 150, 150), 1)
|
|
cv2.putText(frame, altitude_text, (35, 85), cv2.FONT_HERSHEY_SIMPLEX, 1.4, (100, 255, 100), 3)
|
|
|
|
# Velocity calculation
|
|
velocity = int((1 - progress) * 25000 + 800) # Simulated descent velocity
|
|
cv2.putText(frame, f"VELOCITY: {velocity} m/s", (35, 115), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 200, 100), 2)
|
|
|
|
# Coordinates with higher precision
|
|
cv2.putText(frame, f"LAT: {start_pos['latitude']:.6f}", (35, 140), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (200, 200, 200), 1)
|
|
cv2.putText(frame, f"LON: {start_pos['longitude']:.6f}", (35, 160), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (200, 200, 200), 1)
|
|
|
|
# Mission time
|
|
mission_time = frame_index / 60 # Assuming 60fps
|
|
cv2.putText(frame, f"T+ {mission_time:.1f}s", (35, 185), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (100, 200, 255), 2)
|
|
|
|
# Atmospheric data panel (top-right)
|
|
atmo_panel_x = width - 320
|
|
atmo_panel_width = 300
|
|
atmo_panel_height = 140
|
|
|
|
# Atmospheric panel background
|
|
cv2.rectangle(overlay, (atmo_panel_x, 20), (atmo_panel_x + atmo_panel_width, 20 + atmo_panel_height), (30, 30, 30), -1)
|
|
cv2.addWeighted(overlay, 0.8, frame, 0.2, 0, frame)
|
|
cv2.rectangle(frame, (atmo_panel_x, 20), (atmo_panel_x + atmo_panel_width, 20 + atmo_panel_height), (255, 150, 100), 2)
|
|
|
|
# Atmospheric data
|
|
if altitude > 50000:
|
|
atmo_layer = "THERMOSPHERE"
|
|
atmo_color = (255, 100, 100)
|
|
elif altitude > 20000:
|
|
atmo_layer = "STRATOSPHERE"
|
|
atmo_color = (255, 200, 100)
|
|
else:
|
|
atmo_layer = "TROPOSPHERE"
|
|
atmo_color = (100, 255, 100)
|
|
|
|
cv2.putText(frame, "ATMOSPHERIC LAYER", (atmo_panel_x + 10, 45), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (150, 150, 150), 1)
|
|
cv2.putText(frame, atmo_layer, (atmo_panel_x + 10, 70), cv2.FONT_HERSHEY_SIMPLEX, 0.8, atmo_color, 2)
|
|
|
|
# Temperature and pressure simulation
|
|
temp = int(250 - altitude * 0.002) # Rough temperature model
|
|
pressure = max(0.1, 1013 * math.exp(-altitude / 8000)) # Barometric formula
|
|
|
|
cv2.putText(frame, f"TEMP: {temp}K", (atmo_panel_x + 10, 100), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (200, 200, 255), 1)
|
|
cv2.putText(frame, f"PRESSURE: {pressure:.1f} hPa", (atmo_panel_x + 10, 125), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (200, 200, 255), 1)
|
|
|
|
# Enhanced status messages (center-bottom)
|
|
status_messages = [
|
|
(0.0, 0.25, "INITIATING ORBITAL DESCENT", (100, 200, 255)),
|
|
(0.25, 0.5, "ENTERING EARTH'S ATMOSPHERE", (255, 200, 100)),
|
|
(0.5, 0.75, "ATMOSPHERIC ENTRY COMPLETE", (255, 150, 100)),
|
|
(0.75, 1.0, "APPROACHING TARGET COORDINATES", (100, 255, 100))
|
|
]
|
|
|
|
current_message = None
|
|
for start_prog, end_prog, message, color in status_messages:
|
|
if start_prog <= progress <= end_prog:
|
|
current_message = (message, color)
|
|
break
|
|
|
|
if current_message:
|
|
message, color = current_message
|
|
text_size = cv2.getTextSize(message, cv2.FONT_HERSHEY_SIMPLEX, 1.0, 3)[0]
|
|
text_x = (width - text_size[0]) // 2
|
|
text_y = height - 100
|
|
|
|
# Message background with glow
|
|
cv2.rectangle(frame, (text_x - 20, text_y - 40), (text_x + text_size[0] + 20, text_y + 15), (0, 0, 0), -1)
|
|
cv2.rectangle(frame, (text_x - 22, text_y - 42), (text_x + text_size[0] + 22, text_y + 17), color, 2)
|
|
cv2.putText(frame, message, (text_x, text_y), cv2.FONT_HERSHEY_SIMPLEX, 1.0, color, 3)
|
|
|
|
# Ultra-detailed progress bar (bottom)
|
|
progress_bar_width = width - 120
|
|
progress_bar_height = 12
|
|
progress_bar_x = 60
|
|
progress_bar_y = height - 50
|
|
|
|
# Multi-segment progress bar
|
|
cv2.rectangle(frame, (progress_bar_x, progress_bar_y),
|
|
(progress_bar_x + progress_bar_width, progress_bar_y + progress_bar_height),
|
|
(40, 40, 40), -1)
|
|
|
|
# Filled progress with color transitions
|
|
progress_width = int(progress_bar_width * progress)
|
|
for x in range(progress_width):
|
|
segment_progress = x / progress_width if progress_width > 0 else 0
|
|
|
|
# Color transition: red -> yellow -> green
|
|
if segment_progress < 0.5:
|
|
r = 255
|
|
g = int(255 * segment_progress * 2)
|
|
b = 0
|
|
else:
|
|
r = int(255 * (1 - (segment_progress - 0.5) * 2))
|
|
g = 255
|
|
b = 0
|
|
|
|
cv2.rectangle(frame, (progress_bar_x + x, progress_bar_y),
|
|
(progress_bar_x + x + 1, progress_bar_y + progress_bar_height),
|
|
(b, g, r), -1)
|
|
|
|
# Progress bar border
|
|
cv2.rectangle(frame, (progress_bar_x, progress_bar_y),
|
|
(progress_bar_x + progress_bar_width, progress_bar_y + progress_bar_height),
|
|
(200, 200, 200), 2)
|
|
|
|
def add_desert_regions(frame, width, height, center_lat, center_lon, detail_level):
|
|
"""Add realistic desert regions based on geographic location"""
|
|
|
|
# Desert patterns based on latitude zones
|
|
desert_regions = [
|
|
# Sahara region (North Africa)
|
|
(15, 35, -20, 50),
|
|
# Arabian Peninsula
|
|
(15, 35, 35, 60),
|
|
# Gobi Desert (Asia)
|
|
(35, 50, 90, 120),
|
|
# Southwestern US
|
|
(25, 40, -120, -100),
|
|
# Australian Outback
|
|
(-35, -15, 110, 155),
|
|
# Kalahari (Southern Africa)
|
|
(-30, -15, 15, 30)
|
|
]
|
|
|
|
for min_lat, max_lat, min_lon, max_lon in desert_regions:
|
|
# Check if current view overlaps with desert region
|
|
if (min_lat <= center_lat <= max_lat and min_lon <= center_lon <= max_lon):
|
|
# Add desert coloring and texture
|
|
desert_overlay = np.zeros((height, width, 3), dtype=np.uint8)
|
|
|
|
# Desert sand color variations
|
|
sand_colors = [
|
|
(194, 178, 128), # Light sand
|
|
(210, 180, 140), # Tan
|
|
(238, 203, 173), # Peach puff
|
|
(222, 184, 135), # Burlywood
|
|
]
|
|
|
|
# Generate desert texture using noise
|
|
np.random.seed(42) # Consistent pattern
|
|
for y in range(0, height, 4):
|
|
for x in range(0, width, 4):
|
|
# Create sandy texture
|
|
noise_val = np.random.random()
|
|
if noise_val > 0.3: # 70% desert coverage in desert regions
|
|
color_idx = int(noise_val * len(sand_colors)) % len(sand_colors)
|
|
color = sand_colors[color_idx]
|
|
|
|
# Add some variation
|
|
variation = int((np.random.random() - 0.5) * 40)
|
|
color = tuple(max(0, min(255, c + variation)) for c in color)
|
|
|
|
# Draw small desert patch
|
|
cv2.rectangle(desert_overlay, (x, y), (x+4, y+4), color, -1)
|
|
|
|
# Blend desert overlay with frame
|
|
mask = (desert_overlay[:,:,0] > 0) | (desert_overlay[:,:,1] > 0) | (desert_overlay[:,:,2] > 0)
|
|
frame[mask] = cv2.addWeighted(frame, 0.6, desert_overlay, 0.4, 0)[mask]
|
|
|
|
|
|
def add_forest_regions(frame, width, height, center_lat, center_lon, detail_level):
|
|
"""Add realistic forest regions based on geographic location"""
|
|
|
|
# Major forest regions
|
|
forest_regions = [
|
|
# Amazon Rainforest
|
|
(-10, 5, -75, -45),
|
|
# Congo Basin
|
|
(-5, 5, 10, 30),
|
|
# Taiga (Northern forests)
|
|
(50, 70, -180, 180),
|
|
# European forests
|
|
(45, 65, -10, 40),
|
|
# Southeast Asian forests
|
|
(-10, 25, 90, 140),
|
|
# North American forests
|
|
(30, 60, -130, -60)
|
|
]
|
|
|
|
for min_lat, max_lat, min_lon, max_lon in forest_regions:
|
|
# Check if current view overlaps with forest region
|
|
if (min_lat <= center_lat <= max_lat and min_lon <= center_lon <= max_lon):
|
|
# Add forest coloring
|
|
forest_overlay = np.zeros((height, width, 3), dtype=np.uint8)
|
|
|
|
# Forest green variations
|
|
forest_colors = [
|
|
(34, 139, 34), # Forest green
|
|
(0, 100, 0), # Dark green
|
|
(107, 142, 35), # Olive drab
|
|
(85, 107, 47), # Dark olive green
|
|
(46, 125, 50), # Medium green
|
|
]
|
|
|
|
# Generate forest texture
|
|
np.random.seed(43) # Different seed from desert
|
|
for y in range(0, height, 3):
|
|
for x in range(0, width, 3):
|
|
noise_val = np.random.random()
|
|
if noise_val > 0.2: # 80% forest coverage in forest regions
|
|
color_idx = int(noise_val * len(forest_colors)) % len(forest_colors)
|
|
color = forest_colors[color_idx]
|
|
|
|
# Add variation for realistic forest texture
|
|
variation = int((np.random.random() - 0.5) * 30)
|
|
color = tuple(max(0, min(255, c + variation)) for c in color)
|
|
|
|
# Draw forest patch
|
|
cv2.rectangle(forest_overlay, (x, y), (x+3, y+3), color, -1)
|
|
|
|
# Blend forest overlay
|
|
mask = (forest_overlay[:,:,0] > 0) | (forest_overlay[:,:,1] > 0) | (forest_overlay[:,:,2] > 0)
|
|
frame[mask] = cv2.addWeighted(frame, 0.5, forest_overlay, 0.5, 0)[mask]
|
|
|
|
|
|
def add_stratosphere_effects(frame, width, height, altitude, progress):
|
|
"""Add realistic stratosphere visual effects"""
|
|
|
|
if 10000 < altitude < 50000: # Stratosphere range
|
|
# Create stratosphere haze
|
|
haze_overlay = np.zeros((height, width, 3), dtype=np.uint8)
|
|
|
|
# Stratosphere is characterized by stable layers
|
|
haze_intensity = int(80 * (1 - (altitude - 10000) / 40000))
|
|
|
|
# Add horizontal layered haze
|
|
for y in range(0, height, 8):
|
|
layer_intensity = haze_intensity + int(math.sin(y * 0.1) * 20)
|
|
layer_intensity = max(0, min(255, layer_intensity))
|
|
|
|
# Stratosphere has a bluish tint
|
|
haze_color = (layer_intensity, layer_intensity//2, layer_intensity//4)
|
|
cv2.rectangle(haze_overlay, (0, y), (width, y+4), haze_color, -1)
|
|
|
|
# Apply haze effect
|
|
cv2.addWeighted(frame, 0.8, haze_overlay, 0.2, 0, frame)
|
|
|
|
# Add occasional noctilucent clouds (high altitude clouds)
|
|
if altitude > 30000 and np.random.random() > 0.7:
|
|
cloud_x = np.random.randint(0, width//2)
|
|
cloud_y = np.random.randint(height//4, 3*height//4)
|
|
cv2.ellipse(frame, (cloud_x, cloud_y), (50, 20), 0, 0, 360, (200, 200, 255), -1)
|
|
|
|
|
|
def add_high_atmosphere_effects(frame, width, height, altitude, progress):
|
|
"""Add realistic high atmosphere visual effects"""
|
|
|
|
if 50000 < altitude < 100000: # High atmosphere/thermosphere
|
|
# Create aurora-like effects at very high altitude
|
|
aurora_overlay = np.zeros((height, width, 3), dtype=np.uint8)
|
|
|
|
# Aurora intensity based on altitude and progress
|
|
aurora_intensity = int(100 * (altitude - 50000) / 50000)
|
|
|
|
# Create wavy aurora patterns
|
|
for y in range(height):
|
|
wave_x = int(math.sin(y * 0.02 + progress * 6.28) * 100)
|
|
aurora_x = width//2 + wave_x
|
|
|
|
if 0 <= aurora_x < width:
|
|
# Aurora colors (green, blue, purple)
|
|
if y < height//3:
|
|
color = (aurora_intensity//2, aurora_intensity, aurora_intensity//3) # Green
|
|
elif y < 2*height//3:
|
|
color = (aurora_intensity, aurora_intensity//2, aurora_intensity//3) # Blue
|
|
else:
|
|
color = (aurora_intensity//3, aurora_intensity//3, aurora_intensity) # Purple
|
|
|
|
# Draw aurora strip
|
|
cv2.circle(aurora_overlay, (aurora_x, y), 15, color, -1)
|
|
|
|
# Apply aurora effect with transparency
|
|
cv2.addWeighted(frame, 0.9, aurora_overlay, 0.1, 0, frame)
|
|
|
|
# Add atmospheric glow around Earth's limb
|
|
earth_glow_overlay = np.zeros((height, width, 3), dtype=np.uint8)
|
|
center_x, center_y = width//2, height//2
|
|
|
|
for y in range(height):
|
|
for x in range(width):
|
|
distance_from_center = math.sqrt((x - center_x)**2 + (y - center_y)**2)
|
|
earth_radius = min(width, height) * 0.35
|
|
|
|
# Create glow effect around Earth's edge
|
|
if earth_radius * 0.9 <= distance_from_center <= earth_radius * 1.3:
|
|
glow_intensity = int(150 * (1 - abs(distance_from_center - earth_radius) / (earth_radius * 0.4)))
|
|
glow_intensity = max(0, min(255, glow_intensity))
|
|
|
|
# Atmospheric glow is blue-white
|
|
earth_glow_overlay[y, x] = (glow_intensity, glow_intensity//2, glow_intensity//4)
|
|
|
|
# Apply Earth's atmospheric glow
|
|
cv2.addWeighted(frame, 0.85, earth_glow_overlay, 0.15, 0, frame)
|
|
|
|
|
|
def add_troposphere_effects(frame, width, height, altitude, progress):
|
|
"""Add realistic troposphere visual effects"""
|
|
|
|
if altitude < 15000: # Troposphere range
|
|
# Add weather patterns and clouds
|
|
cloud_overlay = np.zeros((height, width, 3), dtype=np.uint8)
|
|
|
|
# Cloud density based on altitude
|
|
cloud_density = 1 - (altitude / 15000) # More clouds at lower altitude
|
|
|
|
# Generate realistic cloud formations
|
|
np.random.seed(int(progress * 100)) # Change clouds over time
|
|
|
|
for cloud_system in range(int(10 * cloud_density)):
|
|
cloud_x = np.random.randint(0, width)
|
|
cloud_y = np.random.randint(0, height)
|
|
cloud_size = np.random.randint(30, 100)
|
|
|
|
# Cloud brightness varies with altitude
|
|
cloud_brightness = int(180 + (255 - 180) * (1 - cloud_density))
|
|
cloud_color = (cloud_brightness, cloud_brightness, cloud_brightness)
|
|
|
|
# Draw fluffy clouds
|
|
cv2.ellipse(cloud_overlay, (cloud_x, cloud_y),
|
|
(cloud_size, cloud_size//2),
|
|
np.random.randint(0, 360), 0, 360, cloud_color, -1)
|
|
|
|
# Add smaller cloud puffs around main cloud
|
|
for puff in range(3):
|
|
puff_x = cloud_x + np.random.randint(-cloud_size, cloud_size)
|
|
puff_y = cloud_y + np.random.randint(-cloud_size//2, cloud_size//2)
|
|
puff_size = cloud_size // 3
|
|
|
|
if 0 <= puff_x < width and 0 <= puff_y < height:
|
|
cv2.circle(cloud_overlay, (puff_x, puff_y), puff_size, cloud_color, -1)
|
|
|
|
# Apply cloud layer
|
|
cv2.addWeighted(frame, 0.7, cloud_overlay, 0.3, 0, frame)
|
|
|
|
# Add atmospheric perspective (haze increasing with distance)
|
|
haze_overlay = np.zeros((height, width, 3), dtype=np.uint8)
|
|
for y in range(height):
|
|
# More haze towards horizon
|
|
haze_intensity = int(50 * (y / height) * cloud_density)
|
|
if haze_intensity > 0:
|
|
cv2.rectangle(haze_overlay, (0, y), (width, y+1),
|
|
(haze_intensity, haze_intensity, haze_intensity), -1)
|
|
|
|
cv2.addWeighted(frame, 0.9, haze_overlay, 0.1, 0, frame)
|
|
|
|
def create_transition_bridge_frame(frame1, frame2, transition_progress, width, height):
|
|
"""Create smooth transition between two frames with cinematic effects"""
|
|
|
|
# Create transition frame
|
|
transition_frame = np.zeros((height, width, 3), dtype=np.uint8)
|
|
|
|
# Smooth blend between frames
|
|
alpha = transition_progress # 0 to 1
|
|
beta = 1 - alpha
|
|
|
|
# Basic blend
|
|
cv2.addWeighted(frame1, beta, frame2, alpha, 0, transition_frame)
|
|
|
|
# Add cinematic transition effects
|
|
if transition_progress < 0.5:
|
|
# First half: fade out effect on frame1
|
|
fade_progress = transition_progress * 2 # 0 to 1
|
|
|
|
# Add motion blur effect
|
|
blur_intensity = int(fade_progress * 15)
|
|
if blur_intensity > 0:
|
|
transition_frame = cv2.GaussianBlur(transition_frame,
|
|
(blur_intensity*2+1, blur_intensity*2+1), 0)
|
|
|
|
# Add vignette effect for dramatic transition
|
|
vignette_overlay = np.zeros((height, width, 3), dtype=np.uint8)
|
|
center_x, center_y = width//2, height//2
|
|
max_distance = math.sqrt(center_x**2 + center_y**2)
|
|
|
|
for y in range(height):
|
|
for x in range(width):
|
|
distance = math.sqrt((x - center_x)**2 + (y - center_y)**2)
|
|
vignette_strength = (distance / max_distance) * fade_progress
|
|
vignette_darkness = int(255 * vignette_strength * 0.7)
|
|
vignette_overlay[y, x] = (vignette_darkness, vignette_darkness, vignette_darkness)
|
|
|
|
# Apply vignette
|
|
transition_frame = cv2.subtract(transition_frame, vignette_overlay)
|
|
|
|
else:
|
|
# Second half: fade in effect on frame2
|
|
fade_progress = (transition_progress - 0.5) * 2 # 0 to 1
|
|
|
|
# Add zoom effect for dynamic transition
|
|
zoom_factor = 1 + fade_progress * 0.1 # Slight zoom
|
|
zoom_matrix = cv2.getRotationMatrix2D((width//2, height//2), 0, zoom_factor)
|
|
transition_frame = cv2.warpAffine(transition_frame, zoom_matrix, (width, height))
|
|
|
|
# Add brightness enhancement for dramatic effect
|
|
brightness_boost = int(fade_progress * 30)
|
|
if brightness_boost > 0:
|
|
bright_overlay = np.full((height, width, 3), brightness_boost, dtype=np.uint8)
|
|
transition_frame = cv2.add(transition_frame, bright_overlay)
|
|
|
|
# Add film grain for cinematic quality
|
|
grain_overlay = np.random.randint(0, 20, (height, width, 3), dtype=np.uint8)
|
|
grain_overlay = cv2.GaussianBlur(grain_overlay, (3, 3), 0)
|
|
cv2.addWeighted(transition_frame, 0.95, grain_overlay, 0.05, 0, transition_frame)
|
|
|
|
return transition_frame
|
|
|
|
def generate_3d_video_animation_test_mode(project_name, resources_folder, label_widget, progress_widget, popup_widget, clock_module):
|
|
"""
|
|
Generate a 3D video animation in test mode (720p resolution for faster generation)
|
|
|
|
This is a convenience wrapper for testing purposes that automatically enables test mode.
|
|
|
|
Args:
|
|
project_name: Name of the project
|
|
resources_folder: Path to resources folder
|
|
label_widget: Kivy label for status updates
|
|
progress_widget: Kivy progress bar
|
|
popup_widget: Kivy popup to dismiss when done
|
|
clock_module: Kivy Clock module for scheduling
|
|
"""
|
|
return generate_3d_video_animation(
|
|
project_name, resources_folder, label_widget, progress_widget,
|
|
popup_widget, clock_module, test_mode=True
|
|
)
|
|
|
|
def generate_3d_video_animation_production_mode(project_name, resources_folder, label_widget, progress_widget, popup_widget, clock_module):
|
|
"""
|
|
Generate a 3D video animation in production mode (2K resolution for high quality)
|
|
|
|
This is a convenience wrapper that explicitly enables production mode.
|
|
|
|
Args:
|
|
project_name: Name of the project
|
|
resources_folder: Path to resources folder
|
|
label_widget: Kivy label for status updates
|
|
progress_widget: Kivy progress bar
|
|
popup_widget: Kivy popup to dismiss when done
|
|
clock_module: Kivy Clock module for scheduling
|
|
"""
|
|
return generate_3d_video_animation(
|
|
project_name, resources_folder, label_widget, progress_widget,
|
|
popup_widget, clock_module, test_mode=False
|
|
)
|