1323 lines
42 KiB
JavaScript
Executable File
1323 lines
42 KiB
JavaScript
Executable File
// js/pattern_audio.js
|
|
import * as Tone from "https://esm.sh/tone";
|
|
|
|
import { appState } from "../state.js";
|
|
import { highlightStep } from "./pattern_ui.js";
|
|
import { getTotalSteps } from "../utils.js";
|
|
import { initializeAudioContext, getMainGainNode } from "../audio.js";
|
|
|
|
import { TripleOscillator } from "../../audio/plugins/TripleOscillator.js";
|
|
import { Nes } from "../../audio/plugins/Nes.js";
|
|
import { SuperSaw } from "../../audio/plugins/SuperSaw.js";
|
|
import { Lb302 } from "../../audio/plugins/Lb302.js";
|
|
import { Kicker } from "../../audio/plugins/Kicker.js";
|
|
|
|
const MAX_VOL = 1.5;
|
|
const clamp = (n, a, b) => Math.max(a, Math.min(b, Number(n) || 0));
|
|
|
|
function getBasslineByPatternIndex(pi) {
|
|
return (appState.pattern.tracks || []).find(t => t.type === "bassline" && Number(t.patternIndex) === Number(pi));
|
|
}
|
|
|
|
function getSongMix(track, patternIndex) {
|
|
track._songMix ??= {};
|
|
if (track._songMix[patternIndex]) return track._songMix[patternIndex];
|
|
|
|
const instVol = new Tone.Volume(0);
|
|
const instPan = new Tone.Panner(0);
|
|
const pattVol = new Tone.Volume(0);
|
|
const pattPan = new Tone.Panner(0);
|
|
|
|
instVol.connect(instPan);
|
|
instPan.connect(pattVol);
|
|
pattVol.connect(pattPan);
|
|
pattPan.connect(getMainGainNode());
|
|
|
|
track._songMix[patternIndex] = { instVol, instPan, pattVol, pattPan };
|
|
return track._songMix[patternIndex];
|
|
}
|
|
|
|
function refreshSongMixFor(track, patternIndex) {
|
|
const b = getBasslineByPatternIndex(patternIndex);
|
|
const mix = getSongMix(track, patternIndex);
|
|
|
|
const instMuted = !!(track.isMuted || track.muted);
|
|
const pattMuted = !!(b?.isMuted || b?.muted);
|
|
|
|
const instV = clamp(track.volume ?? 1, 0, MAX_VOL);
|
|
const pattV = clamp(b?.volume ?? 1, 0, MAX_VOL);
|
|
|
|
mix.instVol.volume.value = (instMuted || instV === 0) ? -Infinity : Tone.gainToDb(instV);
|
|
mix.pattVol.volume.value = (pattMuted || pattV === 0) ? -Infinity : Tone.gainToDb(pattV);
|
|
|
|
mix.instPan.pan.value = clamp(track.pan ?? 0, -1, 1);
|
|
mix.pattPan.pan.value = clamp(b?.pan ?? 0, -1, 1);
|
|
|
|
return mix;
|
|
}
|
|
|
|
function getActivePatternForTrack(track) {
|
|
const idx = appState.pattern?.activePatternIndex ?? track.activePatternIndex ?? 0;
|
|
return track.patterns?.[idx] ?? null;
|
|
}
|
|
|
|
const TICKS_PER_STEP = 12; // LMMS: 12 ticks por 1/16
|
|
const STEPS_PER_BAR = 16; // 4/4 em 1/16
|
|
|
|
// Mapa para facilitar a criação dinâmica
|
|
const PLUGIN_CLASSES = {
|
|
tripleoscillator: TripleOscillator,
|
|
nes: Nes,
|
|
supersaw: SuperSaw,
|
|
lb302: Lb302,
|
|
kicker: Kicker,
|
|
};
|
|
|
|
const timerDisplay = document.getElementById("timer-display");
|
|
|
|
// Variável para armazenar as "Parts" (sequências melódicas) do Tone.js
|
|
let activeParts = [];
|
|
|
|
// =====================================================
|
|
// Proteção: não “vazar” loop do Pattern Editor pro Song
|
|
// =====================================================
|
|
let _transportLoopSnapshot = null;
|
|
|
|
function snapshotTransportLoopOnce() {
|
|
if (_transportLoopSnapshot) return;
|
|
_transportLoopSnapshot = {
|
|
loop: Tone.Transport.loop,
|
|
loopStart: Tone.Transport.loopStart,
|
|
loopEnd: Tone.Transport.loopEnd,
|
|
};
|
|
}
|
|
|
|
function restoreTransportLoop() {
|
|
if (!_transportLoopSnapshot) return;
|
|
Tone.Transport.loop = _transportLoopSnapshot.loop;
|
|
Tone.Transport.loopStart = _transportLoopSnapshot.loopStart;
|
|
Tone.Transport.loopEnd = _transportLoopSnapshot.loopEnd;
|
|
_transportLoopSnapshot = null;
|
|
}
|
|
|
|
|
|
let currentStep = 0;
|
|
|
|
function updateStepHighlight(step) {
|
|
// usa a função já existente do seu pattern_ui
|
|
highlightStep(step, true);
|
|
setTimeout(() => highlightStep(step, false), 60);
|
|
}
|
|
|
|
|
|
function formatTime(milliseconds) {
|
|
const totalSeconds = Math.floor(milliseconds / 1000);
|
|
const minutes = Math.floor(totalSeconds / 60)
|
|
.toString()
|
|
.padStart(2, "0");
|
|
const seconds = (totalSeconds % 60).toString().padStart(2, "0");
|
|
const centiseconds = Math.floor((milliseconds % 1000) / 10)
|
|
.toString()
|
|
.padStart(2, "0");
|
|
return `${minutes}:${seconds}:${centiseconds}`;
|
|
}
|
|
|
|
export function playMetronomeSound(isDownbeat) {
|
|
initializeAudioContext();
|
|
const synth = new Tone.Synth().toDestination();
|
|
const freq = isDownbeat ? 1000 : 800;
|
|
synth.triggerAttackRelease(freq, "8n", Tone.now());
|
|
}
|
|
|
|
// Dispara o sample de uma track, garantindo que o player esteja roteado corretamente
|
|
export function playSample(filePath, trackId) {
|
|
initializeAudioContext();
|
|
|
|
const track = trackId
|
|
? appState.pattern.tracks.find((t) => t.id == trackId)
|
|
: null;
|
|
|
|
// Se a track existe e tem player/preload
|
|
if (track && (track.previewPlayer || track.player)) {
|
|
const playerToUse = track.previewPlayer || track.player;
|
|
|
|
if (playerToUse.loaded) {
|
|
// Atualiza volume/pan ao tocar
|
|
if (track.volumeNode) {
|
|
track.volumeNode.volume.value =
|
|
track.volume === 0 ? -Infinity : Tone.gainToDb(track.volume);
|
|
}
|
|
if (track.pannerNode) {
|
|
track.pannerNode.pan.value = track.pan ?? 0;
|
|
}
|
|
|
|
// roteia playerToUse -> volumeNode
|
|
try { playerToUse.disconnect(); } catch {}
|
|
if (track.volumeNode) playerToUse.connect(track.volumeNode);
|
|
|
|
// Dispara (preview não interfere no player da playlist)
|
|
try {
|
|
playerToUse.start(Tone.now());
|
|
} catch (e) {
|
|
console.warn("Falha ao tocar preview/sample:", track.name, e);
|
|
}
|
|
} else {
|
|
console.warn(`Player da trilha "${track.name}" ainda não carregado — pulando.`);
|
|
}
|
|
return;
|
|
}
|
|
|
|
// Fallback: preview sem trackId
|
|
if (!trackId && filePath) {
|
|
const previewPlayer = new Tone.Player(filePath).toDestination();
|
|
previewPlayer.autostart = true;
|
|
}
|
|
}
|
|
|
|
function getSongStepPlayer(track, patternIndex) {
|
|
if (track.type !== "sampler" || !track.buffer) return null;
|
|
|
|
track._songStepPlayer ??= {};
|
|
if (track._songStepPlayer[patternIndex]) return track._songStepPlayer[patternIndex];
|
|
|
|
const mix = refreshSongMixFor(track, patternIndex);
|
|
const p = new Tone.Player(track.buffer);
|
|
p.connect(mix.instVol);
|
|
|
|
track._songStepPlayer[patternIndex] = p;
|
|
return p;
|
|
}
|
|
|
|
function _getPluginKey(track) {
|
|
return String(
|
|
track.pluginName ||
|
|
track.instrumentName ||
|
|
track.instrument?.constructor?.name ||
|
|
""
|
|
).toLowerCase();
|
|
}
|
|
|
|
function getSongInstrument(track, patternIndex) {
|
|
if (track.type !== "plugin") return null;
|
|
|
|
track._songInstrument ??= {};
|
|
if (track._songInstrument[patternIndex]) return track._songInstrument[patternIndex];
|
|
|
|
const key = _getPluginKey(track);
|
|
const Cls = PLUGIN_CLASSES[key];
|
|
if (!Cls) {
|
|
console.warn("[Song] Plugin não encontrado:", key, "track:", track.name);
|
|
return null;
|
|
}
|
|
|
|
const mix = refreshSongMixFor(track, patternIndex);
|
|
const inst = new Cls(Tone.getContext(), track.params || track.pluginData || {});
|
|
inst.connect(mix.instVol);
|
|
|
|
track._songInstrument[patternIndex] = inst;
|
|
return inst;
|
|
}
|
|
|
|
function playSamplerNoteAtTime(track, midi, time, durationSec, destinationNode = null) {
|
|
if (!track?.buffer) return;
|
|
|
|
const base = track.baseNote ?? 60;
|
|
const semitones = midi - base;
|
|
const rate = Math.pow(2, semitones / 12);
|
|
|
|
const player = new Tone.Player(track.buffer);
|
|
player.playbackRate = rate;
|
|
|
|
// destino: song bus (mix.instVol) OU cadeia normal do pattern editor
|
|
const dest = destinationNode || track.volumeNode || getMainGainNode();
|
|
player.connect(dest);
|
|
|
|
player.start(time);
|
|
|
|
if (durationSec && durationSec > 0) player.stop(time + durationSec);
|
|
player.onstop = () => player.dispose();
|
|
}
|
|
|
|
let stepEventId = null;
|
|
|
|
function tick(time) {
|
|
const totalSteps = getTotalSteps();
|
|
updateStepHighlight(currentStep);
|
|
|
|
appState.pattern.tracks.forEach((track) => {
|
|
const pat = getActivePatternForTrack(track);
|
|
if (!pat) return;
|
|
|
|
// Se for plugin/sampler e tem piano roll, ele já está agendado via schedulePianoRoll()
|
|
const hasNotes = Array.isArray(pat.notes) && pat.notes.length > 0;
|
|
if (hasNotes) return;
|
|
|
|
// Step sequencer (one-shots)
|
|
if (pat.steps?.[currentStep] && track.type === "sampler" && track.buffer) {
|
|
// sem midi -> toca “base” (drum one-shot)
|
|
playSamplerNoteAtTime(track, track.baseNote ?? 60, time, null);
|
|
}
|
|
});
|
|
|
|
currentStep = (currentStep + 1) % totalSteps;
|
|
}
|
|
|
|
export function startPlayback() {
|
|
if (appState.global.isPlaying) return;
|
|
|
|
appState.global.isPlaying = true;
|
|
currentStep = 0;
|
|
|
|
Tone.Transport.stop();
|
|
Tone.Transport.cancel();
|
|
stopScheduledPianoRoll();
|
|
schedulePianoRoll();
|
|
|
|
stepEventId = Tone.Transport.scheduleRepeat(tick, "16n");
|
|
Tone.Transport.start();
|
|
}
|
|
|
|
export function stopPlayback(rewind = true) {
|
|
if (!appState.global.isPlaying) return;
|
|
|
|
appState.global.isPlaying = false;
|
|
|
|
if (stepEventId) {
|
|
Tone.Transport.clear(stepEventId);
|
|
stepEventId = null;
|
|
}
|
|
|
|
Tone.Transport.stop();
|
|
Tone.Transport.cancel();
|
|
stopScheduledPianoRoll();
|
|
restoreTransportLoop();
|
|
|
|
// ✅ Pattern Editor: para apenas o preview (não mexe no track.player da playlist)
|
|
appState.pattern.tracks.forEach((track) => {
|
|
try { track.previewPlayer?.stop(); } catch {}
|
|
});
|
|
|
|
if (rewind) {
|
|
currentStep = 0;
|
|
updateStepHighlight(currentStep);
|
|
}
|
|
}
|
|
|
|
|
|
export function rewindPlayback() {
|
|
const lastStep =
|
|
appState.global.currentStep > 0
|
|
? appState.global.currentStep - 1
|
|
: getTotalSteps() - 1;
|
|
appState.global.currentStep = 0;
|
|
|
|
Tone.Transport.position = 0; // Reseta o tempo do Tone.js
|
|
|
|
if (!appState.global.isPlaying) {
|
|
if (timerDisplay) timerDisplay.textContent = "00:00:00";
|
|
highlightStep(lastStep, false);
|
|
}
|
|
}
|
|
|
|
export function togglePlayback() {
|
|
initializeAudioContext();
|
|
if (appState.global.isPlaying) {
|
|
stopPlayback();
|
|
} else {
|
|
appState.global.currentStep = 0;
|
|
startPlayback();
|
|
}
|
|
}
|
|
|
|
// 2. Agendador de Piano Roll (Melodia)
|
|
export function schedulePianoRoll() {
|
|
stopScheduledPianoRoll(); // Limpa agendamentos anteriores
|
|
|
|
const bpm = parseFloat(document.getElementById("bpm-input").value) || 120;
|
|
const stepSec = 60 / (bpm * 4); // 1/16
|
|
|
|
// LMMS: 1 bar (4/4) = 192 ticks, 1 step (1/16) = 12 ticks :contentReference[oaicite:3]{index=3}
|
|
const TICKS_PER_STEP = 12;
|
|
const TICKS_PER_BAR = 192;
|
|
|
|
// 1) Descobrir quantos compassos são necessários (maior nota “end”)
|
|
let barsNeeded = parseInt(document.getElementById("bars-input")?.value, 10) || 1;
|
|
|
|
appState.pattern.tracks.forEach((track) => {
|
|
const pat = getActivePatternForTrack(track);
|
|
if (!pat?.notes?.length) return;
|
|
|
|
let maxEndTick = 0;
|
|
pat.notes.forEach((n) => {
|
|
const end = (n.pos ?? 0) + (n.len ?? 0);
|
|
if (end > maxEndTick) maxEndTick = end;
|
|
});
|
|
|
|
const barsForThis = Math.max(1, Math.ceil(maxEndTick / TICKS_PER_BAR));
|
|
if (barsForThis > barsNeeded) barsNeeded = barsForThis;
|
|
});
|
|
|
|
// 2) Sincronizar UI + Transport loop com esse tamanho
|
|
const barsInput = document.getElementById("bars-input");
|
|
if (barsInput) {
|
|
barsInput.value = String(barsNeeded);
|
|
barsInput.dispatchEvent(new Event("input", { bubbles: true }));
|
|
}
|
|
|
|
snapshotTransportLoopOnce();
|
|
|
|
Tone.Transport.loop = true;
|
|
Tone.Transport.loopStart = 0;
|
|
Tone.Transport.loopEnd = `${barsNeeded}m`;
|
|
|
|
// 3) Agendar notas (plugins + samplers)
|
|
appState.pattern.tracks.forEach((track) => {
|
|
const pat = getActivePatternForTrack(track);
|
|
if (!pat?.notes?.length) return;
|
|
|
|
// plugin -> track.instrument
|
|
// sampler -> track.buffer
|
|
const canPlay =
|
|
(track.type === "plugin" && track.instrument) ||
|
|
(track.type === "sampler" && track.buffer);
|
|
|
|
if (!canPlay) return;
|
|
|
|
const events = pat.notes.map((note) => {
|
|
const posSteps = (note.pos ?? 0) / TICKS_PER_STEP;
|
|
const durSteps = (note.len ?? TICKS_PER_STEP) / TICKS_PER_STEP;
|
|
|
|
return {
|
|
time: posSteps * stepSec,
|
|
midi: note.key,
|
|
duration: Math.max(stepSec / 4, durSteps * stepSec),
|
|
velocity: (note.vol ?? 100) / 100,
|
|
};
|
|
});
|
|
|
|
const part = new Tone.Part((time, value) => {
|
|
if (track.type === "sampler") {
|
|
playSamplerNoteAtTime(track, value.midi, time, value.duration);
|
|
} else {
|
|
const freq = Tone.Frequency(value.midi, "midi").toFrequency();
|
|
track.instrument.triggerAttackRelease(freq, value.duration, time, value.velocity);
|
|
}
|
|
}, events).start(0);
|
|
|
|
part.loop = true;
|
|
part.loopEnd = `${barsNeeded}m`;
|
|
|
|
activeParts.push(part);
|
|
});
|
|
}
|
|
|
|
function stopScheduledPianoRoll() {
|
|
activeParts.forEach((p) => {
|
|
try { p.stop(); } catch {}
|
|
try { p.dispose(); } catch {}
|
|
});
|
|
activeParts = [];
|
|
}
|
|
|
|
// =========================================================================
|
|
// Renderizar o Pattern atual para um Blob de Áudio
|
|
// =========================================================================
|
|
|
|
export async function renderActivePatternToBlob() {
|
|
initializeAudioContext();
|
|
|
|
const bpm = parseInt(document.getElementById("bpm-input").value, 10) || 120;
|
|
|
|
// =========================================================
|
|
// 1. CÁLCULO DE DURAÇÃO INTELIGENTE
|
|
// =========================================================
|
|
const stepInterval = 60 / (bpm * 4);
|
|
const activePatternIndex =
|
|
appState.pattern.tracks[0]?.activePatternIndex || 0;
|
|
let maxStepFound = getTotalSteps(); // Mínimo: tamanho da tela
|
|
|
|
// Varre todas as tracks para achar a última nota ou step
|
|
appState.pattern.tracks.forEach((track) => {
|
|
const p = track.patterns[activePatternIndex];
|
|
if (!p) return;
|
|
|
|
// A. Steps (Bateria)
|
|
if (p.steps && p.steps.includes(true)) {
|
|
const lastIdx = p.steps.lastIndexOf(true);
|
|
if (lastIdx + 1 > maxStepFound) maxStepFound = lastIdx + 1;
|
|
}
|
|
|
|
// B. Notas (Piano Roll) - Assumindo 192 ticks/beat e steps de 1/16 (12 ticks)
|
|
if (p.notes && p.notes.length > 0) {
|
|
p.notes.forEach((n) => {
|
|
const endTick = n.pos + n.len;
|
|
const endStep = Math.ceil(endTick / 12);
|
|
if (endStep > maxStepFound) maxStepFound = endStep;
|
|
});
|
|
}
|
|
});
|
|
|
|
// Arredonda para o próximo compasso cheio (múltiplo de 16)
|
|
const stepsPerBar = 16;
|
|
const totalSteps = Math.ceil(maxStepFound / stepsPerBar) * stepsPerBar;
|
|
const duration = totalSteps * stepInterval;
|
|
|
|
// =========================================================
|
|
// 2. RENDERIZAÇÃO OFFLINE
|
|
// =========================================================
|
|
const buffer = await Tone.Offline(async ({ transport }) => {
|
|
const rawCtx = Tone.getContext().rawContext;
|
|
const masterGain = new Tone.Gain(1);
|
|
masterGain.connect(rawCtx.destination);
|
|
|
|
// Loop por cada trilha do projeto
|
|
appState.pattern.tracks.forEach((track) => {
|
|
const pattern = track.patterns[activePatternIndex];
|
|
// Se não tem pattern, ou se é uma track muda/vazia, pula
|
|
if (!pattern || track.muted) return;
|
|
|
|
// Verifica se tem conteúdo (buffer de áudio OU notas MIDI OU steps ativos)
|
|
const hasAudio = track.buffer;
|
|
const hasNotes = pattern.notes && pattern.notes.length > 0;
|
|
const hasSteps = pattern.steps && pattern.steps.includes(true);
|
|
|
|
if (!hasAudio && !hasNotes && !hasSteps) return;
|
|
|
|
// Cria canal de volume/pan para essa track no mundo Offline
|
|
const panner = new Tone.Panner(track.pan || 0).connect(masterGain);
|
|
const volume = new Tone.Volume(
|
|
track.volume === 0 ? -100 : Tone.gainToDb(track.volume)
|
|
).connect(panner);
|
|
|
|
// --- CENÁRIO A: É um SAMPLE (Áudio gravado) ---
|
|
if (track.samplePath && track.buffer) {
|
|
// Lógica original de steps para samples
|
|
if (pattern.steps) {
|
|
const events = [];
|
|
pattern.steps.forEach((isActive, stepIndex) => {
|
|
if (isActive) events.push(stepIndex * stepInterval);
|
|
});
|
|
|
|
if (events.length > 0) {
|
|
new Tone.Part((time) => {
|
|
const buf = track.buffer?.get?.() || track.buffer; // ✅ pega AudioBuffer se for ToneAudioBuffer
|
|
new Tone.Player(buf).connect(volume).start(time);
|
|
}, events).start(0);
|
|
}
|
|
}
|
|
}
|
|
|
|
// --- CENÁRIO B: É um PLUGIN (Sintetizador) ---
|
|
else if (track.type === "plugin") {
|
|
// Normaliza o nome (ex: "TripleOscillator" -> "tripleoscillator")
|
|
// Tenta pegar o nome da propriedade 'pluginName', 'instrument.name' ou do próprio objeto params
|
|
const pluginName = (
|
|
track.pluginName ||
|
|
track.instrument?.constructor?.name ||
|
|
""
|
|
).toLowerCase();
|
|
|
|
const PluginClass = PLUGIN_CLASSES[pluginName];
|
|
|
|
if (PluginClass) {
|
|
// INSTANCIA O PLUGIN NO MUNDO OFFLINE
|
|
// Passamos 'track.params' ou 'track.pluginData' (ajuste conforme seu appState salva os dados)
|
|
const instrumentInstance = new PluginClass(Tone.getContext(), track.params || track.pluginData || {});
|
|
|
|
// Conecta na cadeia de áudio offline
|
|
instrumentInstance.connect(volume);
|
|
|
|
// 1. Agendar Notas do Piano Roll
|
|
if (hasNotes) {
|
|
const TICKS_PER_BEAT = 192;
|
|
const SECONDS_PER_BEAT = 60 / bpm;
|
|
const TICKS_PER_STEP = 12; // ✅ 12 ticks por 1/16
|
|
const events = pattern.notes.map((note) => ({
|
|
time: (note.pos / TICKS_PER_STEP) * stepInterval,
|
|
midi: note.key,
|
|
duration: (note.len / TICKS_PER_STEP) * stepInterval,
|
|
velocity: (note.vol || 100) / 100,
|
|
}));
|
|
|
|
|
|
new Tone.Part((time, val) => {
|
|
const freq = Tone.Frequency(val.midi, "midi").toFrequency();
|
|
instrumentInstance.triggerAttackRelease(freq, val.duration, time, val.velocity);
|
|
}, events).start(0);
|
|
}
|
|
|
|
// 2. Agendar Steps (Caso use o TripleOscillator como bateria/efeito no step sequencer)
|
|
else if (hasSteps) {
|
|
const stepEvents = [];
|
|
pattern.steps.forEach((isActive, idx) => {
|
|
if (isActive) stepEvents.push(idx * stepInterval);
|
|
});
|
|
|
|
new Tone.Part((time) => {
|
|
// Toca um C5 padrão para steps sem nota definida
|
|
instrumentInstance.triggerAttackRelease("C5", 0.1, time);
|
|
}, stepEvents).start(0);
|
|
}
|
|
} else {
|
|
console.warn(
|
|
`Render: Plugin não suportado ou não encontrado: ${pluginName}`
|
|
);
|
|
}
|
|
}
|
|
});
|
|
|
|
// Configura e inicia o Transport Offline
|
|
transport.bpm.value = bpm;
|
|
transport.start();
|
|
}, duration);
|
|
|
|
const ch = buffer.getChannelData(0);
|
|
let peak = 0;
|
|
for (let i = 0; i < ch.length; i++) peak = Math.max(peak, Math.abs(ch[i]));
|
|
console.log("[Render] peak =", peak);
|
|
|
|
const blob = bufferToWave(buffer);
|
|
return blob;
|
|
}
|
|
|
|
// =========================================================================
|
|
// FUNÇÃO UTILITÁRIA: Converte AudioBuffer para Blob WAV
|
|
// =========================================================================
|
|
|
|
function bufferToWave(abuffer) {
|
|
let numOfChan = abuffer.numberOfChannels;
|
|
let length = abuffer.length * numOfChan * 2 + 44;
|
|
let buffer = new ArrayBuffer(length);
|
|
let view = new DataView(buffer);
|
|
let channels = [],
|
|
i,
|
|
sample;
|
|
let offset = 0;
|
|
let pos = 0;
|
|
|
|
function setAll(data) {
|
|
for (i = 0; i < data.length; i++) {
|
|
view.setUint8(pos + i, data[i]);
|
|
}
|
|
pos += data.length;
|
|
}
|
|
function setString(s) {
|
|
setAll(s.split("").map((c) => c.charCodeAt(0)));
|
|
}
|
|
|
|
setString("RIFF");
|
|
view.setUint32(pos, length - 8, true);
|
|
pos += 4;
|
|
setString("WAVE");
|
|
setString("fmt ");
|
|
view.setUint32(pos, 16, true);
|
|
pos += 4;
|
|
view.setUint16(pos, 1, true);
|
|
pos += 2;
|
|
view.setUint16(pos, numOfChan, true);
|
|
pos += 2;
|
|
view.setUint32(pos, abuffer.sampleRate, true);
|
|
pos += 4;
|
|
view.setUint32(pos, abuffer.sampleRate * 2 * numOfChan, true);
|
|
pos += 4;
|
|
view.setUint16(pos, numOfChan * 2, true);
|
|
pos += 2;
|
|
view.setUint16(pos, 16, true);
|
|
pos += 2;
|
|
setString("data");
|
|
view.setUint32(pos, length - 44, true);
|
|
pos += 4;
|
|
|
|
for (i = 0; i < numOfChan; i++) {
|
|
channels.push(abuffer.getChannelData(i));
|
|
}
|
|
|
|
for (i = 0; i < abuffer.length; i++) {
|
|
for (let j = 0; j < numOfChan; j++) {
|
|
sample = Math.max(-1, Math.min(1, channels[j][i]));
|
|
sample = (0.5 + sample * 32767.5) | 0;
|
|
view.setInt16(pos, sample, true);
|
|
pos += 2;
|
|
}
|
|
}
|
|
|
|
return new Blob([buffer], { type: "audio/wav" });
|
|
}
|
|
|
|
// ===============================
|
|
// Song/Playlist Pattern Scheduler
|
|
// (toca patterns arranjadas na Playlist)
|
|
// ===============================
|
|
|
|
const LMMS_TICKS_PER_STEP = 12;
|
|
|
|
function ticksToSec(ticks, stepIntervalSec) {
|
|
// stepIntervalSec = duração de 1 step (1/16) em segundos
|
|
// LMMS_TICKS_PER_STEP = 12 ticks por 1/16 (porque 48 ticks por semínima e 192 por compasso em 4/4)
|
|
return (Number(ticks) / LMMS_TICKS_PER_STEP) * stepIntervalSec;
|
|
}
|
|
|
|
let songPatternScheduleId = null;
|
|
|
|
export function startSongPatternPlaybackOnTransport() {
|
|
initializeAudioContext();
|
|
if (songPatternScheduleId !== null) return;
|
|
|
|
songPatternScheduleId = Tone.Transport.scheduleRepeat((time) => {
|
|
const bpm = parseInt(document.getElementById("bpm-input")?.value, 10) || 120;
|
|
const stepIntervalSec = 60 / (bpm * 4);
|
|
|
|
const transportSec = Tone.Transport.getSecondsAtTime
|
|
? Tone.Transport.getSecondsAtTime(time)
|
|
: Tone.Transport.seconds;
|
|
|
|
const songStep = Math.floor(transportSec / stepIntervalSec + 1e-6);
|
|
const songTick = songStep * LMMS_TICKS_PER_STEP;
|
|
|
|
// Patterns ativas neste tick (pelas basslines/playlist clips)
|
|
const basslineTracks = appState.pattern.tracks.filter(
|
|
(t) => t.type === "bassline" && !(t.isMuted || t.muted)
|
|
);
|
|
|
|
const activePatternHits = [];
|
|
for (const b of basslineTracks) {
|
|
const clips = b.playlist_clips || [];
|
|
const clip = clips.find((c) => songTick >= c.pos && songTick < c.pos + c.len);
|
|
if (!clip) continue;
|
|
|
|
const localStep = Math.floor((songTick - clip.pos) / LMMS_TICKS_PER_STEP);
|
|
activePatternHits.push({ patternIndex: b.patternIndex, localStep });
|
|
}
|
|
|
|
// Quais patternIndex estão ativas AGORA (tem clip tocando neste tick)
|
|
const activePatternSet = new Set(activePatternHits.map(h => Number(h.patternIndex)));
|
|
|
|
// Sempre atualiza buses já existentes e “fecha o gate” quando a pattern não está ativa
|
|
for (const tr of appState.pattern.tracks || []) {
|
|
if (tr.type === "bassline") continue;
|
|
|
|
const mixMap = tr._songMix;
|
|
if (!mixMap) continue;
|
|
|
|
for (const piStr of Object.keys(mixMap)) {
|
|
const pi = Number(piStr);
|
|
|
|
// atualiza vol/pan/mute do instrumento + vol/pan/mute da pattern
|
|
const mix = refreshSongMixFor(tr, pi);
|
|
|
|
// 🔑 gate: se essa patternIndex NÃO está ativa neste tick, corta IMEDIATAMENTE
|
|
if (!activePatternSet.has(pi)) {
|
|
mix.pattVol.volume.value = -Infinity;
|
|
}
|
|
}
|
|
}
|
|
|
|
// Agora sim: se não tem hits, sai (mas já “gateou” o sustain)
|
|
if (activePatternHits.length === 0) return;
|
|
|
|
// Dispara tracks reais (samplers/plugins)
|
|
for (const track of appState.pattern.tracks) {
|
|
if (track.type === "bassline") continue;
|
|
if (track.muted || track.isMuted) continue;
|
|
|
|
for (const hit of activePatternHits) {
|
|
const patt = track.patterns?.[hit.patternIndex];
|
|
if (!patt) continue;
|
|
|
|
// comprimento do pattern em ticks (prioriza notes, depois steps)
|
|
let pattLenTicksByNotes = 0;
|
|
if (Array.isArray(patt.notes) && patt.notes.length > 0) {
|
|
for (const n of patt.notes) {
|
|
const pos = Number(n.pos) || 0;
|
|
const rawLen = Number(n.len) || 0;
|
|
const len = rawLen < 0 ? LMMS_TICKS_PER_STEP : Math.max(rawLen, LMMS_TICKS_PER_STEP);
|
|
pattLenTicksByNotes = Math.max(pattLenTicksByNotes, pos + len);
|
|
}
|
|
}
|
|
const pattLenTicksBySteps =
|
|
(patt.steps?.length || 0) * LMMS_TICKS_PER_STEP;
|
|
|
|
const pattLenTicks = Math.max(
|
|
pattLenTicksByNotes,
|
|
pattLenTicksBySteps,
|
|
LMMS_TICKS_PER_STEP
|
|
);
|
|
|
|
// tick atual dentro do pattern (loop interno ao esticar clip)
|
|
const tickInPattern =
|
|
(hit.localStep * LMMS_TICKS_PER_STEP) % pattLenTicks;
|
|
|
|
// step index (para patterns de steps)
|
|
const pattLenSteps = patt.steps?.length || 0;
|
|
const stepInPattern =
|
|
pattLenSteps > 0
|
|
? (Math.floor(tickInPattern / LMMS_TICKS_PER_STEP) % pattLenSteps)
|
|
: hit.localStep;
|
|
|
|
// ✅ 1) PLUGIN com piano roll (notes) — USAR instrumento por patternIndex
|
|
if (
|
|
track.type === "plugin" &&
|
|
Array.isArray(patt.notes) &&
|
|
patt.notes.length > 0
|
|
) {
|
|
// cria/recupera a instância do plugin para esta patternIndex
|
|
const inst = getSongInstrument(track, hit.patternIndex);
|
|
if (!inst) continue;
|
|
|
|
// garante que o mix/bus exista e esteja atualizado
|
|
refreshSongMixFor(track, hit.patternIndex);
|
|
|
|
const stepStartTick = tickInPattern;
|
|
const stepEndTick = stepStartTick + LMMS_TICKS_PER_STEP;
|
|
|
|
for (const n of patt.notes) {
|
|
const nPos = Number(n.pos) || 0;
|
|
|
|
const wraps = stepEndTick > pattLenTicks;
|
|
const inWindow = wraps
|
|
? (nPos >= stepStartTick || nPos < (stepEndTick - pattLenTicks))
|
|
: (nPos >= stepStartTick && nPos < stepEndTick);
|
|
|
|
if (!inWindow) continue;
|
|
|
|
const offsetTicks =
|
|
wraps && nPos < stepStartTick
|
|
? (pattLenTicks - stepStartTick) + nPos
|
|
: nPos - stepStartTick;
|
|
|
|
const t2 = time + ticksToSec(offsetTicks, stepIntervalSec);
|
|
|
|
const rawLen = Number(n.len) || 0;
|
|
const lenTicks = rawLen < 0 ? LMMS_TICKS_PER_STEP : Math.max(rawLen, LMMS_TICKS_PER_STEP);
|
|
const durSec = Math.max(0.01, ticksToSec(lenTicks, stepIntervalSec));
|
|
const vel = (Number(n.vol) || 100) / 100;
|
|
|
|
const midi = Number(n.key) || 0;
|
|
const freq = Tone.Frequency(midi, "midi").toFrequency();
|
|
|
|
try {
|
|
inst.triggerAttackRelease(freq, durSec, t2, vel);
|
|
} catch (e) {
|
|
console.warn("[Playlist] Falha ao tocar plugin note:", track.name, e);
|
|
}
|
|
}
|
|
|
|
continue; // não cai na lógica de steps
|
|
}
|
|
|
|
// ✅ 1b) SAMPLER com piano roll (notes)
|
|
if (
|
|
track.type === "sampler" &&
|
|
track.buffer &&
|
|
Array.isArray(patt.notes) &&
|
|
patt.notes.length > 0
|
|
) {
|
|
const stepStartTick = tickInPattern;
|
|
const stepEndTick = stepStartTick + LMMS_TICKS_PER_STEP;
|
|
|
|
for (const n of patt.notes) {
|
|
const nPos = Number(n.pos) || 0;
|
|
|
|
const wraps = stepEndTick > pattLenTicks;
|
|
const inWindow = wraps
|
|
? (nPos >= stepStartTick || nPos < (stepEndTick - pattLenTicks))
|
|
: (nPos >= stepStartTick && nPos < stepEndTick);
|
|
|
|
if (!inWindow) continue;
|
|
|
|
const offsetTicks =
|
|
wraps && nPos < stepStartTick
|
|
? (pattLenTicks - stepStartTick) + nPos
|
|
: nPos - stepStartTick;
|
|
|
|
const t2 = time + ticksToSec(offsetTicks, stepIntervalSec);
|
|
|
|
const rawLen = Number(n.len) || 0;
|
|
const lenTicks = rawLen < 0 ? LMMS_TICKS_PER_STEP : Math.max(rawLen, LMMS_TICKS_PER_STEP);
|
|
const durSec = Math.max(0.01, ticksToSec(lenTicks, stepIntervalSec));
|
|
|
|
const mix = refreshSongMixFor(track, hit.patternIndex);
|
|
playSamplerNoteAtTime(track, Number(n.key) || 0, t2, durSec, mix.instVol);
|
|
}
|
|
|
|
continue; // não cai na lógica de steps
|
|
}
|
|
|
|
// ✅ 2) STEP (sampler/plugin sem notes)
|
|
if (!patt.steps) continue;
|
|
|
|
if (patt.steps[stepInPattern]) {
|
|
const mix = refreshSongMixFor(track, hit.patternIndex);
|
|
|
|
// sampler step -> player por patternIndex
|
|
if (track.type === "sampler" && track.buffer) {
|
|
const p = getSongStepPlayer(track, hit.patternIndex);
|
|
if (p) {
|
|
try {
|
|
if (typeof p.restart === "function") {
|
|
p.restart(time);
|
|
} else {
|
|
if (p.state === "started") p.stop(time);
|
|
p.start(time);
|
|
}
|
|
} catch (e) {
|
|
console.warn("[Playlist] Falha ao retrigger step:", track.name, e);
|
|
}
|
|
}
|
|
}
|
|
|
|
// plugin step -> instrumento por patternIndex
|
|
else if (track.type === "plugin") {
|
|
const inst = getSongInstrument(track, hit.patternIndex);
|
|
if (inst) {
|
|
try {
|
|
inst.triggerAttackRelease("C5", "16n", time);
|
|
} catch {}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}, "16n");
|
|
}
|
|
|
|
export function stopSongPatternPlaybackOnTransport() {
|
|
if (songPatternScheduleId === null) return;
|
|
for (const tr of appState.pattern.tracks || []) {
|
|
// mix nodes
|
|
if (tr._songMix) {
|
|
for (const pi of Object.keys(tr._songMix)) {
|
|
const m = tr._songMix[pi];
|
|
try { m.instVol?.dispose(); } catch {}
|
|
try { m.instPan?.dispose(); } catch {}
|
|
try { m.pattVol?.dispose(); } catch {}
|
|
try { m.pattPan?.dispose(); } catch {}
|
|
}
|
|
tr._songMix = null;
|
|
}
|
|
|
|
// plugin por pattern
|
|
if (tr._songInstrument) {
|
|
for (const pi of Object.keys(tr._songInstrument)) {
|
|
try { tr._songInstrument[pi]?.dispose?.(); } catch {}
|
|
}
|
|
tr._songInstrument = null;
|
|
}
|
|
|
|
// step players por pattern
|
|
if (tr._songStepPlayer) {
|
|
for (const pi of Object.keys(tr._songStepPlayer)) {
|
|
try { tr._songStepPlayer[pi]?.dispose?.(); } catch {}
|
|
}
|
|
tr._songStepPlayer = null;
|
|
}
|
|
}
|
|
try {
|
|
Tone.Transport.clear(songPatternScheduleId);
|
|
} catch {}
|
|
songPatternScheduleId = null;
|
|
}
|
|
|
|
// =========================================================================
|
|
// Renderizar o PROJETO inteiro (Playlist patterns + Audio Timeline) para WAV
|
|
// =========================================================================
|
|
|
|
function _n(v, def = 0) {
|
|
const x = Number(v);
|
|
return Number.isFinite(x) ? x : def;
|
|
}
|
|
|
|
function _secondsPerStep(bpm) {
|
|
return 60 / (bpm * 4); // 1/16
|
|
}
|
|
|
|
function _ticksToSeconds(ticks, stepSec) {
|
|
// LMMS: 12 ticks por 1/16
|
|
return (_n(ticks, 0) / 12) * stepSec;
|
|
}
|
|
|
|
function _dbFromVol(vol, muted) {
|
|
const v = clamp(vol ?? 1, 0, MAX_VOL);
|
|
if (muted || v <= 0) return -Infinity;
|
|
return Tone.gainToDb(v);
|
|
}
|
|
|
|
function _sanitizeFileName(name) {
|
|
return String(name || "projeto")
|
|
.trim()
|
|
.replace(/[<>:"/\\|?*\x00-\x1F]+/g, "_")
|
|
.replace(/\s+/g, "_")
|
|
.slice(0, 80);
|
|
}
|
|
|
|
function _patternLengthTicks(patt) {
|
|
const T = 12;
|
|
|
|
let byNotes = 0;
|
|
if (Array.isArray(patt?.notes) && patt.notes.length) {
|
|
for (const n of patt.notes) {
|
|
const pos = _n(n.pos, 0);
|
|
const rawLen = _n(n.len, T);
|
|
const len = rawLen < 0 ? T : Math.max(rawLen, T);
|
|
byNotes = Math.max(byNotes, pos + len);
|
|
}
|
|
}
|
|
|
|
const bySteps = (patt?.steps?.length || 0) * T;
|
|
|
|
return Math.max(byNotes, bySteps, T);
|
|
}
|
|
|
|
function _collectArrangements() {
|
|
const basslines = (appState.pattern?.tracks || []).filter(t => t.type === "bassline");
|
|
const arr = [];
|
|
|
|
for (const b of basslines) {
|
|
const clips = (b.playlist_clips || []).filter(c => _n(c.len, 0) > 0);
|
|
if (clips.length) arr.push(b);
|
|
}
|
|
|
|
// Fallback: se não houver playlist_clips, renderiza o pattern ativo por N compassos
|
|
if (arr.length === 0) {
|
|
const bars = parseInt(document.getElementById("bars-input")?.value, 10) || 1;
|
|
const activePi = _n(appState.pattern?.activePatternIndex, 0);
|
|
arr.push({
|
|
patternIndex: activePi,
|
|
volume: 1,
|
|
pan: 0,
|
|
muted: false,
|
|
isMuted: false,
|
|
playlist_clips: [{ pos: 0, len: bars * 192 }], // 192 ticks por compasso (4/4)
|
|
});
|
|
}
|
|
|
|
return arr;
|
|
}
|
|
|
|
function _projectDurationSeconds(bpm) {
|
|
const stepSec = _secondsPerStep(bpm);
|
|
|
|
// 1) fim vindo da playlist (ticks)
|
|
let maxTick = 0;
|
|
for (const b of _collectArrangements()) {
|
|
for (const c of (b.playlist_clips || [])) {
|
|
const end = _n(c.pos, 0) + _n(c.len, 0);
|
|
if (end > maxTick) maxTick = end;
|
|
}
|
|
}
|
|
const playlistEndSec = _ticksToSeconds(maxTick, stepSec);
|
|
|
|
// 2) fim vindo do editor de áudio (segundos)
|
|
let audioEndSec = 0;
|
|
for (const c of (appState.audio?.clips || [])) {
|
|
const end = _n(c.startTimeInSeconds, 0) + _n(c.durationInSeconds, 0);
|
|
if (end > audioEndSec) audioEndSec = end;
|
|
}
|
|
|
|
return Math.max(playlistEndSec, audioEndSec, stepSec);
|
|
}
|
|
|
|
async function _fetchAudioBuffer(url, audioCtx) {
|
|
try {
|
|
const res = await fetch(url);
|
|
if (!res.ok) throw new Error(`HTTP ${res.status}`);
|
|
const arr = await res.arrayBuffer();
|
|
// slice(0) evita problemas em alguns browsers com ArrayBuffer "detached"
|
|
return await audioCtx.decodeAudioData(arr.slice(0));
|
|
} catch (e) {
|
|
console.warn("[Render] Falha ao carregar áudio:", url, e);
|
|
return null;
|
|
}
|
|
}
|
|
|
|
function _playOneShot(buffer, time, dest, stopTime = null, playbackRate = 1) {
|
|
const p = new Tone.Player(buffer);
|
|
p.playbackRate = playbackRate;
|
|
p.connect(dest);
|
|
p.start(time);
|
|
|
|
if (stopTime != null && stopTime > time) {
|
|
try { p.stop(stopTime); } catch {}
|
|
}
|
|
|
|
p.onstop = () => {
|
|
try { p.dispose(); } catch {}
|
|
};
|
|
}
|
|
|
|
export async function renderProjectToBlob({ tailSec = 0.25 } = {}) {
|
|
initializeAudioContext();
|
|
|
|
// (Opcional, mas ajuda em alguns browsers)
|
|
try { await Tone.start(); } catch {}
|
|
|
|
const bpm = parseInt(document.getElementById("bpm-input")?.value, 10) || 120;
|
|
const duration = _projectDurationSeconds(bpm) + Math.max(0, Number(tailSec) || 0);
|
|
|
|
// =========================================================
|
|
// 1) PRÉ-CARREGA BUFFERS (FORA do Tone.Offline) ✅
|
|
// =========================================================
|
|
const decodeCtx = Tone.getContext().rawContext;
|
|
|
|
const urls = new Set();
|
|
|
|
// áudio timeline
|
|
for (const clip of (appState.audio?.clips || [])) {
|
|
const url = clip.sourcePath || clip.src || clip.url;
|
|
if (url) urls.add(String(url));
|
|
}
|
|
|
|
// sampler tracks
|
|
const samplerTracks = (appState.pattern?.tracks || []).filter(
|
|
t => t.type === "sampler" && t.samplePath
|
|
);
|
|
for (const t of samplerTracks) {
|
|
urls.add(String(t.samplePath));
|
|
}
|
|
|
|
const bufByUrl = new Map();
|
|
await Promise.all([...urls].map(async (url) => {
|
|
const b = await _fetchAudioBuffer(url, decodeCtx);
|
|
if (b) bufByUrl.set(url, b);
|
|
}));
|
|
|
|
const samplerBufByTrackId = new Map();
|
|
for (const t of samplerTracks) {
|
|
samplerBufByTrackId.set(String(t.id), bufByUrl.get(String(t.samplePath)) || null);
|
|
}
|
|
|
|
console.log("[Render] buffers carregados:", bufByUrl.size);
|
|
|
|
// =========================================================
|
|
// 2) OFFLINE RENDER (SEM await aqui dentro) ✅
|
|
// =========================================================
|
|
const buffer = await Tone.Offline(({ transport }) => {
|
|
transport.bpm.value = bpm;
|
|
|
|
const rawCtx = Tone.getContext().rawContext;
|
|
|
|
// master no contexto OFFLINE
|
|
const master = new Tone.Gain(1);
|
|
master.connect(rawCtx.destination);
|
|
|
|
let scheduledAudio = 0;
|
|
let scheduledNotes = 0;
|
|
let scheduledSteps = 0;
|
|
|
|
// ------------------------------------------------------------
|
|
// (A) AUDIO TIMELINE
|
|
// ------------------------------------------------------------
|
|
for (const clip of (appState.audio?.clips || [])) {
|
|
const muted = !!clip.muted || (_n(clip.volume, 1) <= 0);
|
|
if (muted) continue;
|
|
|
|
const url = clip.sourcePath || clip.src || clip.url;
|
|
if (!url) continue;
|
|
|
|
const buf = bufByUrl.get(String(url));
|
|
if (!buf) continue;
|
|
|
|
const start = _n(clip.startTimeInSeconds, 0);
|
|
const dur = _n(clip.durationInSeconds, 0);
|
|
if (dur <= 0.0001) continue;
|
|
|
|
const offset = Math.max(0, _n(clip.offset, 0));
|
|
const vol = clamp(clip.volume ?? 1, 0, MAX_VOL);
|
|
const pan = clamp(clip.pan ?? 0, -1, 1);
|
|
|
|
const volNode = new Tone.Volume(vol <= 0 ? -Infinity : Tone.gainToDb(vol));
|
|
const panNode = new Tone.Panner(pan);
|
|
|
|
volNode.connect(panNode);
|
|
panNode.connect(master);
|
|
|
|
const player = new Tone.Player();
|
|
player.buffer = buf; // ✅ usa AudioBuffer pré-decodado
|
|
player.connect(volNode);
|
|
|
|
player.start(start, offset, dur);
|
|
player.stop(start + dur + 0.01);
|
|
|
|
player.onstop = () => {
|
|
try { player.dispose(); } catch {}
|
|
try { volNode.dispose(); } catch {}
|
|
try { panNode.dispose(); } catch {}
|
|
};
|
|
|
|
scheduledAudio++;
|
|
}
|
|
|
|
// ------------------------------------------------------------
|
|
// (B) PLAYLIST (patterns)
|
|
// ------------------------------------------------------------
|
|
const arrangements = _collectArrangements();
|
|
const instrumentTracks = (appState.pattern?.tracks || []).filter(t => t.type !== "bassline");
|
|
|
|
const mixCache = new Map();
|
|
const pluginCache = new Map();
|
|
|
|
const getMix = (track, bassline) => {
|
|
const pi = _n(bassline.patternIndex, 0);
|
|
const key = `${track.id}::${pi}`;
|
|
if (mixCache.has(key)) return mixCache.get(key);
|
|
|
|
const instMuted = !!(track.isMuted || track.muted) || clamp(track.volume ?? 1, 0, MAX_VOL) <= 0;
|
|
const pattMuted = !!(bassline.isMuted || bassline.muted) || clamp(bassline.volume ?? 1, 0, MAX_VOL) <= 0;
|
|
|
|
const instVol = new Tone.Volume(_dbFromVol(track.volume ?? 1, instMuted));
|
|
const instPan = new Tone.Panner(clamp(track.pan ?? 0, -1, 1));
|
|
const pattVol = new Tone.Volume(_dbFromVol(bassline.volume ?? 1, pattMuted));
|
|
const pattPan = new Tone.Panner(clamp(bassline.pan ?? 0, -1, 1));
|
|
|
|
instVol.connect(instPan);
|
|
instPan.connect(pattVol);
|
|
pattVol.connect(pattPan);
|
|
pattPan.connect(master);
|
|
|
|
const m = { instVol, instPan, pattVol, pattPan };
|
|
mixCache.set(key, m);
|
|
return m;
|
|
};
|
|
|
|
const getPluginInst = (track, bassline, mix) => {
|
|
const pi = _n(bassline.patternIndex, 0);
|
|
const key = `${track.id}::${pi}`;
|
|
if (pluginCache.has(key)) return pluginCache.get(key);
|
|
|
|
const plugKey = _getPluginKey(track);
|
|
const Cls = PLUGIN_CLASSES[plugKey];
|
|
if (!Cls) {
|
|
console.warn("[Render] Plugin não encontrado:", plugKey, "track:", track.name);
|
|
pluginCache.set(key, null);
|
|
return null;
|
|
}
|
|
|
|
// ✅ IMPORTANTE: criar no contexto atual (OFFLINE)
|
|
const inst = new Cls(Tone.getContext(), track.params || track.pluginData || {});
|
|
inst.connect(mix.instVol);
|
|
|
|
pluginCache.set(key, inst);
|
|
return inst;
|
|
};
|
|
|
|
for (const b of arrangements) {
|
|
const pi = _n(b.patternIndex, 0);
|
|
|
|
const pattMuted = !!(b.isMuted || b.muted) || clamp(b.volume ?? 1, 0, MAX_VOL) <= 0;
|
|
if (pattMuted) continue;
|
|
|
|
const clips = (b.playlist_clips || []).filter(c => _n(c.len, 0) > 0);
|
|
if (!clips.length) continue;
|
|
|
|
for (const clip of clips) {
|
|
const clipStartTick = _n(clip.pos, 0);
|
|
const clipEndTick = clipStartTick + _n(clip.len, 0);
|
|
const clipEndSec = _ticksToSeconds(clipEndTick, stepSec);
|
|
|
|
for (const track of instrumentTracks) {
|
|
const instMuted = !!(track.isMuted || track.muted) || clamp(track.volume ?? 1, 0, MAX_VOL) <= 0;
|
|
if (instMuted) continue;
|
|
|
|
const patt = track.patterns?.[pi];
|
|
if (!patt) continue;
|
|
|
|
const pattLenTicks = _patternLengthTicks(patt);
|
|
const mix = getMix(track, b);
|
|
|
|
const pluginInst = (track.type === "plugin") ? getPluginInst(track, b, mix) : null;
|
|
const samplerBuf = (track.type === "sampler") ? (samplerBufByTrackId.get(String(track.id)) || null) : null;
|
|
|
|
// Piano roll
|
|
if (Array.isArray(patt.notes) && patt.notes.length > 0) {
|
|
for (const n of patt.notes) {
|
|
const notePos = _n(n.pos, 0);
|
|
const rawLen = _n(n.len, 12);
|
|
const lenTicks = rawLen < 0 ? 12 : Math.max(rawLen, 12);
|
|
const vel = _n(n.vol, 100) / 100;
|
|
const midi = _n(n.key, 60);
|
|
|
|
for (let startTick = clipStartTick + notePos; startTick < clipEndTick; startTick += pattLenTicks) {
|
|
const tSec = _ticksToSeconds(startTick, stepSec);
|
|
|
|
let durSec = _ticksToSeconds(lenTicks, stepSec);
|
|
durSec = Math.min(durSec, Math.max(0, clipEndSec - tSec));
|
|
if (durSec <= 0.0001) continue;
|
|
|
|
if (track.type === "plugin" && pluginInst) {
|
|
const freq = Tone.Frequency(midi, "midi").toFrequency();
|
|
try { pluginInst.triggerAttackRelease(freq, durSec, tSec, vel); } catch {}
|
|
scheduledNotes++;
|
|
} else if (track.type === "sampler" && samplerBuf) {
|
|
const base = _n(track.baseNote, 60);
|
|
const rate = Math.pow(2, (midi - base) / 12);
|
|
_playOneShot(samplerBuf, tSec, mix.instVol, tSec + durSec, rate);
|
|
scheduledNotes++;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// Step sequencer
|
|
else if (Array.isArray(patt.steps) && patt.steps.length > 0) {
|
|
for (let s = 0; s < patt.steps.length; s++) {
|
|
if (!patt.steps[s]) continue;
|
|
|
|
const stepTick = s * 12;
|
|
|
|
for (let startTick = clipStartTick + stepTick; startTick < clipEndTick; startTick += pattLenTicks) {
|
|
const tSec = _ticksToSeconds(startTick, stepSec);
|
|
|
|
if (track.type === "plugin" && pluginInst) {
|
|
try { pluginInst.triggerAttackRelease("C5", stepSec, tSec); } catch {}
|
|
scheduledSteps++;
|
|
} else if (track.type === "sampler" && samplerBuf) {
|
|
_playOneShot(samplerBuf, tSec, mix.instVol, clipEndSec, 1);
|
|
scheduledSteps++;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
transport.start();
|
|
|
|
console.log("[Render] agendados:", { scheduledAudio, scheduledNotes, scheduledSteps });
|
|
}, duration);
|
|
|
|
// debug peak
|
|
const ch = buffer.getChannelData(0);
|
|
let peak = 0;
|
|
for (let i = 0; i < ch.length; i++) peak = Math.max(peak, Math.abs(ch[i]));
|
|
console.log("[Render] peak =", peak);
|
|
|
|
return bufferToWave(buffer);
|
|
}
|
|
|
|
export async function renderProjectAndDownload() {
|
|
try {
|
|
const blob = await renderProjectToBlob({ tailSec: 0.35 });
|
|
|
|
const projectName =
|
|
appState.global?.currentBeatBasslineName ||
|
|
appState.global?.projectName ||
|
|
"projeto";
|
|
|
|
const fileName = `${_sanitizeFileName(projectName)}.wav`;
|
|
|
|
const url = URL.createObjectURL(blob);
|
|
const a = document.createElement("a");
|
|
a.href = url;
|
|
a.download = fileName;
|
|
document.body.appendChild(a);
|
|
a.click();
|
|
a.remove();
|
|
|
|
setTimeout(() => URL.revokeObjectURL(url), 1500);
|
|
} catch (e) {
|
|
console.error("Erro ao renderizar projeto:", e);
|
|
alert("Erro ao renderizar projeto. Veja o console para detalhes.");
|
|
}
|
|
}
|