+
75
-

回答

这个可以实现,多音轨合并混流音频,可拖拽时间

800_auto

完整代码

<!DOCTYPE html>
<html lang="zh-CN">
<head>
    <meta charset="UTF-8">
    <meta name="viewport" content="width=device-width, initial-scale=1.0">
    <title>HTML5 音频编辑器</title>
    <style>
        body { font-family: sans-serif; margin: 20px; background-color: #f4f4f4; color: #333; }
        .controls button { margin: 5px; padding: 10px 15px; background-color: #007bff; color: white; border: none; cursor: pointer; border-radius: 4px; }
        .controls button:hover { background-color: #0056b3; }
        .tracks-container {
            margin-top: 20px;
            border: 1px solid #ccc;
            padding: 10px;
            background-color: #fff;
            position: relative; /* For absolute positioning of ruler */
            overflow-x: auto; /* Allow horizontal scrolling for long timelines */
        }
        .timeline-ruler {
            height: 20px;
            background-color: #e0e0e0;
            position: sticky; /* Make ruler sticky */
            top: 0;
            z-index: 10;
            display: flex;
        }
        .ruler-mark {
            min-width: 50px; /* Corresponds to 1 second if pixelsPerSecond is 50 */
            border-left: 1px solid #aaa;
            font-size: 10px;
            text-align: right;
            padding-right: 2px;
            box-sizing: border-box;
        }
        .track {
            border: 1px dashed #ddd;
            margin-bottom: 10px;
            min-height: 60px; /* Min height for dropping files */
            position: relative; /* Crucial for absolute positioning of clips */
            background-color: #f9f9f9;
            padding: 5px 0; /* Add some padding for clips */
        }
        .track-header {
            font-size: 0.9em;
            color: #555;
            padding: 2px 5px;
            background-color: #eee;
            display: flex;
            justify-content: space-between;
            align-items: center;
        }
        .track-header input[type="file"] { display: none; }
        .audio-clip {
            position: absolute;
            height: 50px;
            background-color: lightcoral;
            border: 1px solid darkred;
            border-radius: 3px;
            cursor: move;
            display: flex;
            align-items: center;
            justify-content: center;
            font-size: 0.8em;
            color: white;
            overflow: hidden;
            text-overflow: ellipsis;
            white-space: nowrap;
            box-sizing: border-box;
            padding: 0 5px;
        }
        .drop-zone-active {
            border: 2px dashed dodgerblue !important;
            background-color: #e6f7ff !important;
        }
        #loading-indicator {
            display: none;
            position: fixed;
            top: 50%;
            left: 50%;
            transform: translate(-50%, -50%);
            padding: 20px;
            background: rgba(0,0,0,0.7);
            color: white;
            border-radius: 5px;
            z-index: 1000;
        }
    </style>
</head>
<body>
    <h1>简易音频混音器</h1>

    <div class="controls">
        <button id="addTrackBtn">添加音轨</button>
        <button id="mergeAndDownloadBtn">合并并下载 (WAV)</button>
    </div>

    <div class="tracks-container">
        <div class="timeline-ruler" id="timelineRuler">
            <!-- Ruler marks will be generated by JS -->
        </div>
        <div id="tracks">
            <!-- Audio tracks will be added here -->
        </div>
    </div>

    <div id="loading-indicator">处理中,请稍候...</div>

    <script>
       document.addEventListener('DOMContentLoaded', () => {
    const addTrackBtn = document.getElementById('addTrackBtn');
    const mergeAndDownloadBtn = document.getElementById('mergeAndDownloadBtn');
    const tracksContainer = document.getElementById('tracks');
    const timelineRuler = document.getElementById('timelineRuler');
    const loadingIndicator = document.getElementById('loading-indicator');

    let audioContext;
    let tracksData = []; // Array to store data for each track and its clips
    let trackIdCounter = 0;
    let clipIdCounter = 0;
    const PIXELS_PER_SECOND = 50; // 50 pixels represent 1 second
    const MAX_TIMELINE_SECONDS = 180; // Max length of timeline ruler

    // Initialize AudioContext
    function initAudioContext() {
        if (!audioContext) {
            audioContext = new (window.AudioContext || window.webkitAudioContext)();
        }
        if (!audioContext) {
            alert("浏览器不支持 Web Audio API");
            return false;
        }
        return true;
    }

    // Generate timeline ruler
    function generateTimelineRuler() {
        timelineRuler.innerHTML = ''; // Clear existing marks
        for (let i = 0; i < MAX_TIMELINE_SECONDS; i++) {
            const mark = document.createElement('div');
            mark.classList.add('ruler-mark');
            mark.style.minWidth = `${PIXELS_PER_SECOND}px`;
            mark.textContent = `${i}s`;
            timelineRuler.appendChild(mark);
        }
        // Ensure the tracks container is wide enough
        tracksContainer.style.minWidth = `${MAX_TIMELINE_SECONDS * PIXELS_PER_SECOND}px`;
    }


    // Add a new track
    addTrackBtn.addEventListener('click', () => {
        if (!initAudioContext()) return;
        createTrackElement();
    });

    function createTrackElement() {
        const trackId = `track-${trackIdCounter++}`;
        const trackDiv = document.createElement('div');
        trackDiv.classList.add('track');
        trackDiv.id = trackId;

        const trackHeader = document.createElement('div');
        trackHeader.classList.add('track-header');
        trackHeader.innerHTML = `
            <span>音轨 ${trackIdCounter}</span>
            <button class="add-audio-btn">添加音频</button>
            <input type="file" accept="audio/*" multiple style="display:none;">
        `;
        trackDiv.appendChild(trackHeader);
        tracksContainer.appendChild(trackDiv);

        tracksData.push({ id: trackId, clips: [] });

        // File input handling
        const fileInput = trackHeader.querySelector('input[type="file"]');
        const addAudioButton = trackHeader.querySelector('.add-audio-btn');
        addAudioButton.onclick = () => fileInput.click();
        fileInput.onchange = (e) => handleFiles(e.target.files, trackId);

        // Drag and drop files onto track
        trackDiv.addEventListener('dragover', (e) => {
            e.preventDefault();
            trackDiv.classList.add('drop-zone-active');
        });
        trackDiv.addEventListener('dragleave', (e) => {
            trackDiv.classList.remove('drop-zone-active');
        });
        trackDiv.addEventListener('drop', (e) => {
            e.preventDefault();
            trackDiv.classList.remove('drop-zone-active');
            if (e.dataTransfer.files.length > 0) {
                handleFiles(e.dataTransfer.files, trackId);
            } else {
                // This is for dragging existing clips
                handleClipDrop(e, trackId);
            }
        });
    }

    async function handleFiles(files, trackId) {
        if (!initAudioContext()) return;
        showLoading(true);
        for (const file of files) {
            if (file.type.startsWith('audio/')) {
                try {
                    const arrayBuffer = await file.arrayBuffer();
                    const audioBuffer = await audioContext.decodeAudioData(arrayBuffer);
                    addClipToTrack(file.name, audioBuffer, trackId);
                } catch (error) {
                    console.error("Error decoding audio data:", error);
                    alert(`无法解码音频文件: ${file.name}`);
                }
            } else {
                alert(`文件 ${file.name} 不是支持的音频格式。`);
            }
        }
        showLoading(false);
    }

    function addClipToTrack(fileName, audioBuffer, trackId, startTime = 0) {
        const trackData = tracksData.find(t => t.id === trackId);
        if (!trackData) return;

        const clipId = `clip-${clipIdCounter++}`;
        const duration = audioBuffer.duration;
        const clipData = {
            id: clipId,
            name: fileName,
            buffer: audioBuffer,
            startTime: startTime, // in seconds
            duration: duration,   // in seconds
            trackId: trackId
        };
        trackData.clips.push(clipData);
        renderClip(clipData);
    }

    function renderClip(clipData) {
        const trackElement = document.getElementById(clipData.trackId);
        if (!trackElement) return;

        let clipElement = document.getElementById(clipData.id);
        if (!clipElement) {
            clipElement = document.createElement('div');
            clipElement.classList.add('audio-clip');
            clipElement.id = clipData.id;
            clipElement.draggable = true;
            clipElement.textContent = clipData.name.length > 20 ? clipData.name.substring(0,17) + '...' : clipData.name;
            clipElement.title = clipData.name;
            trackElement.appendChild(clipElement);

            clipElement.addEventListener('dragstart', (e) => {
                e.dataTransfer.setData('text/plain', clipData.id);
                e.dataTransfer.effectAllowed = 'move';
                // Store offset of mouse click relative to clip's left edge
                const rect = clipElement.getBoundingClientRect();
                e.dataTransfer.setData('offsetX', (e.clientX - rect.left).toString());
            });
        }

        clipElement.style.left = `${clipData.startTime * PIXELS_PER_SECOND}px`;
        clipElement.style.width = `${clipData.duration * PIXELS_PER_SECOND}px`;
    }

    function handleClipDrop(e, targetTrackId) {
        e.preventDefault();
        const clipId = e.dataTransfer.getData('text/plain');
        const offsetX = parseFloat(e.dataTransfer.getData('offsetX')) || 0;
        const clipData = findClipById(clipId);
        const targetTrackElement = document.getElementById(targetTrackId);

        if (!clipData || !targetTrackElement) return;

        // Calculate new startTime based on drop position relative to the track
        const trackRect = targetTrackElement.getBoundingClientRect();
        const dropXInTrack = e.clientX - trackRect.left - offsetX; // Adjust for mouse click offset within clip
        let newStartTime = Math.max(0, dropXInTrack / PIXELS_PER_SECOND); // Ensure not negative

        // Update data
        // If changing tracks, remove from old trackData and add to new
        if (clipData.trackId !== targetTrackId) {
            const oldTrackData = tracksData.find(t => t.id === clipData.trackId);
            if (oldTrackData) {
                oldTrackData.clips = oldTrackData.clips.filter(c => c.id !== clipId);
            }
            const newTrackData = tracksData.find(t => t.id === targetTrackId);
            if (newTrackData) {
                clipData.trackId = targetTrackId;
                newTrackData.clips.push(clipData);
            }
        }
        clipData.startTime = newStartTime;

        // Re-render (or simply move the DOM element if staying in same track visual parent)
        const clipElement = document.getElementById(clipId);
        if (clipElement.parentElement.id !== targetTrackId) {
            targetTrackElement.appendChild(clipElement); // Move DOM element to new track
        }
        renderClip(clipData); // Update position and potentially parent
    }


    function findClipById(clipId) {
        for (const track of tracksData) {
            const clip = track.clips.find(c => c.id === clipId);
            if (clip) return clip;
        }
        return null;
    }

    // Merge and Download
    mergeAndDownloadBtn.addEventListener('click', async () => {
        if (!initAudioContext()) return;
        if (tracksData.every(track => track.clips.length === 0)) {
            alert("请先添加一些音频片段到音轨。");
            return;
        }

        showLoading(true);

        let totalDuration = 0;
        tracksData.forEach(track => {
            track.clips.forEach(clip => {
                totalDuration = Math.max(totalDuration, clip.startTime + clip.duration);
            });
        });

        if (totalDuration === 0) {
            alert("没有可合并的音频内容 (总时长为0)。");
            showLoading(false);
            return;
        }

        const offlineCtx = new OfflineAudioContext(
            audioContext.destination.channelCount, // Use same channel count as main context
            audioContext.sampleRate * totalDuration,
            audioContext.sampleRate
        );

        tracksData.forEach(track => {
            track.clips.forEach(clip => {
                const source = offlineCtx.createBufferSource();
                source.buffer = clip.buffer;
                source.connect(offlineCtx.destination);
                source.start(clip.startTime); // Start at its specific time
            });
        });

        try {
            const renderedBuffer = await offlineCtx.startRendering();
            const wavBlob = audioBufferToWav(renderedBuffer);
            downloadBlob(wavBlob, 'mixed_audio.wav');
        } catch (error) {
            console.error("Error rendering audio:", error);
            alert("合并音频时发生错误: " + error.message);
        } finally {
            showLoading(false);
        }
    });

    function showLoading(show) {
        loadingIndicator.style.display = show ? 'block' : 'none';
    }

    // --- WAV Encoding Helper ---
    // (This is a simplified WAV encoder. For production, a robust library might be better)
    function audioBufferToWav(buffer) {
        let numOfChan = buffer.numberOfChannels,
            btwLength = buffer.length * numOfChan * 2 + 44, // 2 bytes per sample (16-bit)
            btwArrBuff = new ArrayBuffer(btwLength),
            btwView = new DataView(btwArrBuff),
            btwChnls = [],
            btwIndex,
            btwSample,
            btwOffset = 0,
            btwPos = 0;

        setUint32(0x46464952); // "RIFF"
        setUint32(btwLength - 8); // file length - 8
        setUint32(0x45564157); // "WAVE"

        setUint32(0x20746d66); // "fmt " chunk
        setUint32(16); // length = 16
        setUint16(1); // PCM (uncompressed)
        setUint16(numOfChan);
        setUint32(buffer.sampleRate);
        setUint32(buffer.sampleRate * 2 * numOfChan); // avg. bytes/sec
        setUint16(numOfChan * 2); // block-align
        setUint16(16); // 16-bit (hardcoded)

        setUint32(0x61746164); // "data" - chunk
        setUint32(buffer.length * numOfChan * 2); // data length

        for (btwIndex = 0; btwIndex < buffer.numberOfChannels; btwIndex++)
            btwChnls.push(buffer.getChannelData(btwIndex));

        while (btwPos < buffer.length) {
            for (btwIndex = 0; btwIndex < numOfChan; btwIndex++) {
                // Muting negative values and clamping any > 1.0.
                // Most audio sources are -1 to 1, but some decoders might output 0 to 1.
                // For 16-bit WAV, we want to scale to -32768 to 32767.
                btwSample = Math.max(-1, Math.min(1, btwChnls[btwIndex][btwPos]));
                btwSample = btwSample < 0 ? btwSample * 0x8000 : btwSample * 0x7FFF; //  Scale to 16-bit signed int
                btwView.setInt16(btwOffset, btwSample, true); // Little-endian
                btwOffset += 2;
            }
            btwPos++;
        }

        return new Blob([btwView], { type: 'audio/wav' });

        function setUint16(data) {
            btwView.setUint16(btwOffset, data, true);
            btwOffset += 2;
        }

        function setUint32(data) {
            btwView.setUint32(btwOffset, data, true);
            btwOffset += 4;
        }
    }


    function downloadBlob(blob, filename) {
        const url = URL.createObjectURL(blob);
        const a = document.createElement('a');
        a.style.display = 'none';
        a.href = url;
        a.download = filename;
        document.body.appendChild(a);
        a.click();
        document.body.removeChild(a);
        URL.revokeObjectURL(url);
    }

    // Initial setup
    initAudioContext();
    generateTimelineRuler();
    createTrackElement(); // Create one track by default
});
    </script>
</body>
</html>

网友回复

我知道答案,我要回答