blender GLGS
import glfw
from OpenGL.GL import *
import numpy as np
import librosa
import subprocess
import time
import sys
import os
from tkinter import Tk, filedialog
import cv2
# =============================
# 1. SCELTA FILE CON TKINTER
# =============================
root = Tk()
root.withdraw()
print("🎵 Seleziona il file audio...")
audio_path = filedialog.askopenfilename(
title="Seleziona il file audio",
filetypes=[("Audio Files", "*.wav *.mp3 *.flac *.ogg *.m4a")]
)
if not audio_path:
print("✗ Nessun file audio selezionato.")
sys.exit(0)
print("🎬 Seleziona il video di sfondo...")
video_path = filedialog.askopenfilename(
title="Seleziona video di sfondo",
filetypes=[("Video Files", "*.mp4 *.avi *.mov *.mkv *.webm *.flv *.wmv")]
)
if not video_path:
print("✗ Nessun video selezionato.")
sys.exit(0)
print("💾 Seleziona dove salvare il video renderizzato...")
output_path = filedialog.asksaveasfilename(
title="Salva file video",
defaultextension=".mp4",
filetypes=[("Video MP4", "*.mp4")]
)
if not output_path:
print("✗ Nessun percorso di output selezionato.")
sys.exit(0)
# =============================
# 2. ANALISI AUDIO (LIBROSA)
# =============================
print(f"\n=== ANALISI AUDIO DI {os.path.basename(audio_path)} ===")
try:
y, sr = librosa.load(audio_path, sr=None, mono=True)
print(f"✓ Durata audio: {len(y)/sr:.1f}s, {sr} Hz")
except Exception as e:
print(f"✗ Errore caricamento audio: {e}")
sys.exit(1)
hop_length = 512
y_harm, y_perc = librosa.effects.hpss(y)
rms = librosa.feature.rms(y=y, hop_length=hop_length)[0]
centroid = librosa.feature.spectral_centroid(y=y, sr=sr, hop_length=hop_length)[0]
flux = librosa.onset.onset_strength(y=y, sr=sr, hop_length=hop_length)
chroma = librosa.feature.chroma_cens(y=y_harm, sr=sr, hop_length=hop_length)
note = np.argmax(chroma, axis=0) / 12.0
strength = np.max(chroma, axis=0)
def normalize(x):
return (x - np.min(x)) / (np.max(x) - np.min(x) + 1e-6)
u_energy = normalize(rms)
u_brightness = normalize(centroid)
u_flux = normalize(flux)
u_note = normalize(note)
u_strength = normalize(strength)
print("✓ Analisi audio completata")
print(f" - Energy (RMS): {len(u_energy)} campioni")
print(f" - Flux (onset): {len(u_flux)} campioni")
print(f" - Note (chroma): {len(u_note)} campioni")
# =============================
# 3. ANALISI VIDEO DI SFONDO
# =============================
print(f"\n=== ANALISI VIDEO DI {os.path.basename(video_path)} ===")
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
print("✗ Impossibile aprire il video")
sys.exit(1)
video_fps = int(cap.get(cv2.CAP_PROP_FPS))
video_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
video_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
video_frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
video_duration = video_frame_count / video_fps
print(f"✓ Video: {video_width}x{video_height}, {video_fps} FPS")
print(f"✓ Durata video: {video_duration:.1f}s ({video_frame_count} frames)")
audio_duration = len(y) / sr
print(f"\n📊 Confronto durate:")
print(f" Audio: {audio_duration:.1f}s")
print(f" Video: {video_duration:.1f}s")
if video_duration < audio_duration:
print("⚠️ Video più corto dell'audio: verrà loopato")
elif video_duration > audio_duration:
print("⚠️ Video più lungo dell'audio: verrà troncato")
# Leggi un frame per test
ret, test_frame = cap.read()
if ret:
print("✓ Video caricato correttamente")
cap.set(cv2.CAP_PROP_POS_FRAMES, 0) # Rewind
else:
print("✗ Errore lettura video")
sys.exit(1)
# =============================
# 4. SHADER CON BLENDING AVANZATO
# =============================
vertex_shader = """
#version 130
in vec2 position;
out vec2 uv;
void main() {
gl_Position = vec4(position, 0.0, 1.0);
uv = position * 0.5 + 0.5;
}
"""
fragment_shader = """
#version 130
in vec2 uv;
out vec4 fragColor;
uniform float u_time, u_energy, u_brightness, u_flux, u_note, u_strength;
uniform vec2 u_res;
uniform sampler2D tex_video;
uniform float u_video_time;
uniform int u_blend_mode;
float hash(vec2 p) {
return fract(sin(dot(p, vec2(12.9898, 78.233))) * 43758.5453);
}
float noise(vec2 p) {
vec2 i = floor(p);
vec2 f = fract(p);
float a = hash(i);
float b = hash(i + vec2(1.0, 0.0));
float c = hash(i + vec2(0.0, 1.0));
float d = hash(i + vec2(1.0, 1.0));
vec2 u = f * f * (3.0 - 2.0 * f);
return mix(a, b, u.x) + (c - a) * u.y * (1.0 - u.x) + (d - b) * u.x * u.y;
}
float fbm(vec2 p) {
float value = 0.0;
float amplitude = 0.5;
for (int i = 0; i < 4; i++) {
value += amplitude * noise(p);
p *= 2.0;
amplitude *= 0.5;
}
return value;
}
vec3 thermal_color(float t, float hue, float str) {
float h = hue * 6.28318;
vec3 c = vec3(
0.5 + 0.5 * cos(h),
0.5 + 0.5 * cos(h + 2.094),
0.5 + 0.5 * cos(h + 4.188)
);
c *= 0.6 + str * 0.4;
if (t > 0.8) return mix(c, vec3(1.0, 1.0, 0.9), (t - 0.8) * 5.0);
if (t > 0.4) return mix(vec3(0.05, 0.1, 0.4) * c, c, (t - 0.4) * 2.5);
return mix(vec3(0.0, 0.0, 0.05), vec3(0.05, 0.1, 0.4) * c, t * 2.5);
}
// --- FUNZIONE MODIFICATA PER ALLARGARE LA MACCHIA ---
vec3 generate_thermal_effect(vec2 uv, float time, float energy,
float brightness, float flux,
float note, float strength) {
float distort = fbm(uv * 1.5 + time * 0.1);
vec2 uv_dist = uv + distort * (energy * 0.3 + flux * 0.2);
float d = length(uv_dist);
// MODIFICA: aumentato il raggio da 0.7 a 1.4 per espandere la macchia
float base_t = smoothstep(1.4 + brightness * 0.4, 0.0, d);
float temp = base_t * (0.3 + energy * 0.7);
temp += fbm(uv_dist * 5.0 - time * 0.15) * flux * 0.3;
vec3 col = thermal_color(temp, note, strength);
float phase = sin(time * (5.0 + note * 10.0)) * 0.5 + 0.5;
col *= mix(0.9, 1.1, phase);
col += col * (0.03 / (d + 0.03)) * (energy + 0.5);
col += hash(uv * time) * 0.04 * flux;
// MODIFICA: allargato il cutoff ai bordi
col *= smoothstep(1.5, 0.5, length(uv));
return col;
}
vec3 blend_mix(vec3 a, vec3 b, float t) { return mix(a, b, t); }
vec3 blend_add(vec3 a, vec3 b, float t) { return a + b * t; }
vec3 blend_multiply(vec3 a, vec3 b, float t) { return a * mix(vec3(1.0), b, t); }
vec3 blend_screen(vec3 a, vec3 b, float t) { return 1.0 - (1.0 - a) * (1.0 - b * t); }
vec3 blend_overlay(vec3 a, vec3 b, float t) {
b *= t;
vec3 result = vec3(0.0);
for (int i = 0; i < 3; i++) {
result[i] = a[i] < 0.5 ? 2.0 * a[i] * b[i] : 1.0 - 2.0 * (1.0 - a[i]) * (1.0 - b[i]);
}
return mix(a, result, t);
}
// --- IL "MAIN" DELLO SHADER ---
void main() {
vec2 fragCoord = gl_FragCoord.xy;
vec2 pixel_uv = fragCoord / u_res;
vec2 video_uv = vec2(pixel_uv.x, 1.0 - pixel_uv.y);
float distortion_strength = u_flux * 0.15 + u_energy * 0.05;
vec2 distorted_uv = video_uv + vec2(
sin(video_uv.y * 15.0 + u_time * 2.5) * 0.01 * distortion_strength,
cos(video_uv.x * 12.0 + u_time * 2.0) * 0.01 * distortion_strength
);
vec3 video_color = texture(tex_video, distorted_uv).rgb;
// Effetto saturazione e tint
float luminance = dot(video_color, vec3(0.299, 0.587, 0.114));
video_color = mix(vec3(luminance), video_color, 1.0 + u_note * 0.3);
vec2 centered_uv = (fragCoord - 0.5 * u_res) / u_res.y;
vec3 thermal_col = generate_thermal_effect(centered_uv, u_time,
u_energy, u_brightness,
u_flux, u_note, u_strength);
// MODIFICA: Aumentata la trasparenza di base (0.5 invece di 0.0)
// Più alto è questo valore, più si vedrà sempre il video originale
float blend_strength = clamp(u_energy * 0.4 + u_flux * 0.2 + 0.5, 0.0, 1.0);
vec3 final_color;
if (u_blend_mode == 0) final_color = blend_mix(thermal_col, video_color, blend_strength);
else if (u_blend_mode == 1) final_color = blend_add(thermal_col, video_color, blend_strength * 0.7);
else if (u_blend_mode == 2) final_color = blend_multiply(thermal_col, video_color, blend_strength);
else if (u_blend_mode == 3) final_color = blend_screen(thermal_col, video_color, blend_strength);
else final_color = blend_overlay(thermal_col, video_color, blend_strength);
// Flash e vibrazione finale
if (u_energy > 0.9) final_color = mix(final_color, video_color * 2.0, smoothstep(0.9, 1.0, u_energy) * 0.7);
final_color *= 1.0 + hash(fragCoord * 0.1 + u_time) * (u_flux * 0.02);
fragColor = vec4(clamp(final_color, 0.0, 1.0), 1.0);
}
"""
# =============================
# 5. RENDER OFFLINE
# =============================
WIDTH, HEIGHT, FPS = 1280, 720, 30
duration = len(y) / sr
total_frames = int(duration * FPS)
print(f"\n=== IMPOSTAZIONI RENDER ===")
print(f"Risoluzione: {WIDTH}x{HEIGHT}")
print(f"Frame rate: {FPS} FPS")
print(f"Frame totali: {total_frames}")
print(f"Durata render: {duration:.1f}s")
# Configura FFmpeg per output video
def setup_ffmpeg(output_path, audio_path):
cmd = [
"ffmpeg", "-y",
"-f", "rawvideo",
"-vcodec", "rawvideo",
"-s", f"{WIDTH}x{HEIGHT}",
"-pix_fmt", "rgb24",
"-r", str(FPS),
"-i", "-",
"-i", audio_path,
"-c:v", "libx264",
"-preset", "veryfast",
"-crf", "20",
"-c:a", "aac",
"-b:a", "192k",
"-shortest",
"-pix_fmt", "yuv420p",
output_path
]
return subprocess.Popen(cmd, stdin=subprocess.PIPE, stderr=subprocess.DEVNULL)
# =============================
# 6. INIZIALIZZAZIONE OPENGL
# =============================
if not glfw.init():
print("✗ GLFW init fallito")
sys.exit()
glfw.window_hint(glfw.VISIBLE, glfw.FALSE)
window = glfw.create_window(WIDTH, HEIGHT, "Render Audio-Video", None, None)
if not window:
print("✗ Finestra GLFW non creata")
glfw.terminate()
sys.exit()
glfw.make_context_current(window)
# Compila shader
def compile_program(vs, fs):
vertex = glCreateShader(GL_VERTEX_SHADER)
glShaderSource(vertex, vs)
glCompileShader(vertex)
fragment = glCreateShader(GL_FRAGMENT_SHADER)
glShaderSource(fragment, fs)
glCompileShader(fragment)
prog = glCreateProgram()
glAttachShader(prog, vertex)
glAttachShader(prog, fragment)
glLinkProgram(prog)
return prog
prog = compile_program(vertex_shader, fragment_shader)
glUseProgram(prog)
# Setup geometria
vao = glGenVertexArrays(1)
glBindVertexArray(vao)
vbo = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, vbo)
verts = np.array([-1, -1, 1, -1, -1, 1, 1, 1], dtype=np.float32)
glBufferData(GL_ARRAY_BUFFER, verts.nbytes, verts, GL_STATIC_DRAW)
pos = glGetAttribLocation(prog, "position")
glEnableVertexAttribArray(pos)
glVertexAttribPointer(pos, 2, GL_FLOAT, GL_FALSE, 0, None)
# Uniform locations
locs = {}
uniform_names = ["u_time", "u_energy", "u_brightness", "u_flux", "u_note",
"u_strength", "u_res", "tex_video", "u_video_time", "u_blend_mode"]
for name in uniform_names:
locs[name] = glGetUniformLocation(prog, name)
# Crea texture per il video
tex = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, tex)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
# Alloca spazio per la texture
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, WIDTH, HEIGHT, 0, GL_RGB, GL_UNSIGNED_BYTE, None)
glUniform1i(locs["tex_video"], 0) # Texture unit 0
# Imposta blend mode (0=mix, 1=add, 2=multiply, 3=screen, 4=overlay)
blend_mode = 0 # Puoi cambiare questo valore per sperimentare
glUniform1i(locs["u_blend_mode"], blend_mode)
print("\n=== INIZIO RENDERING ===")
# =============================
# 7. LOOP DI RENDERING PRINCIPALE
# =============================
proc = setup_ffmpeg(output_path, audio_path)
start_time = time.time()
try:
for frame in range(total_frames):
t = frame / FPS
idx = min(int(t * sr / hop_length), len(u_energy) - 1)
# Leggi il frame corrente del video
video_frame_pos = int((t % video_duration) * video_fps) % video_frame_count
cap.set(cv2.CAP_PROP_POS_FRAMES, video_frame_pos)
ret, video_frame = cap.read()
if ret:
# Converti BGR→RGB e ridimensiona se necessario
video_frame_rgb = cv2.cvtColor(video_frame, cv2.COLOR_BGR2RGB)
if video_frame_rgb.shape[1] != WIDTH or video_frame_rgb.shape[0] != HEIGHT:
video_frame_rgb = cv2.resize(video_frame_rgb, (WIDTH, HEIGHT))
# Aggiorna texture OpenGL
glActiveTexture(GL_TEXTURE0)
glBindTexture(GL_TEXTURE_2D, tex)
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, WIDTH, HEIGHT,
GL_RGB, GL_UNSIGNED_BYTE, video_frame_rgb)
# Tempo normalizzato del video (0-1)
video_time_normalized = (t % video_duration) / video_duration
# Poll events e clear
glfw.poll_events()
glClear(GL_COLOR_BUFFER_BIT)
# Imposta uniformi
glUniform1f(locs["u_time"], t)
glUniform1f(locs["u_energy"], u_energy[idx])
glUniform1f(locs["u_brightness"], u_brightness[idx])
glUniform1f(locs["u_flux"], u_flux[idx])
glUniform1f(locs["u_note"], u_note[idx])
glUniform1f(locs["u_strength"], u_strength[idx])
glUniform2f(locs["u_res"], WIDTH, HEIGHT)
glUniform1f(locs["u_video_time"], video_time_normalized)
# Render
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4)
# Leggi pixel e invia a FFmpeg
data = glReadPixels(0, 0, WIDTH, HEIGHT, GL_RGB, GL_UNSIGNED_BYTE)
frame_data = np.frombuffer(data, dtype=np.uint8).reshape(HEIGHT, WIDTH, 3)
frame_data = np.flipud(frame_data) # Flip verticale
try:
proc.stdin.write(frame_data.tobytes())
except BrokenPipeError:
print("\n⚠️ Pipe FFmpeg chiusa")
break
# Progresso
if frame % max(1, total_frames // 50) == 0 or frame == total_frames - 1:
progress = (frame + 1) / total_frames * 100
elapsed = time.time() - start_time
eta = elapsed / (frame + 1) * (total_frames - frame - 1)
print(f"\rProgresso: {progress:5.1f}% | "
f"Frame: {frame+1}/{total_frames} | "
f"Tempo: {elapsed:.0f}s | ETA: {eta:.0f}s", end="")
# Aggiorna blend mode in base all'audio (opzionale)
if frame % (FPS * 2) == 0: # Ogni 2 secondi
avg_energy = np.mean(u_energy[max(0, idx-10):idx+10])
if avg_energy > 0.8:
new_blend_mode = 1 # Additive per picchi forti
elif avg_energy < 0.3:
new_blend_mode = 2 # Multiply per parti soft
else:
new_blend_mode = 0 # Mix normale
if new_blend_mode != blend_mode:
blend_mode = new_blend_mode
glUniform1i(locs["u_blend_mode"], blend_mode)
# Piccola pausa per evitare sovraccarico CPU
time.sleep(0.001)
except KeyboardInterrupt:
print("\n\n⏹️ Rendering interrotto dall'utente")
except Exception as e:
print(f"\n\n❌ Errore durante il rendering: {e}")
import traceback
traceback.print_exc()
# =============================
# 8. PULIZIA FINALE
# =============================
print("\n\n📦 Finalizzazione...")
try:
proc.stdin.close()
proc.wait()
except:
pass
cap.release()
cv2.destroyAllWindows()
glfw.terminate()
# Verifica file generato
if os.path.exists(output_path):
size = os.path.getsize(output_path) / (1024 * 1024)
print(f"✅ Video creato con successo!")
print(f" Percorso: {output_path}")
print(f" Dimensione: {size:.1f} MB")
print(f" Durata: {duration:.1f} secondi")
print(f" Tempo totale: {time.time() - start_time:.1f} secondi")
# Info blend mode utilizzato
blend_modes = ["Mix", "Additivo", "Moltiplicativo", "Screen", "Overlay"]
print(f" Blend mode: {blend_modes[blend_mode % 5]}")
else:
print("✗ Errore: file video non generato")
print("\n🎉 Operazione completata!")
Commenti