我有兴趣使用JavaScript WebAudioAPI
检测歌曲节奏,然后在画布中渲染。
我可以处理帆布部分,但是我不是一个大的音频人,真的不明白如何在JavaScript中制作节拍探测器。
我尝试了本文,但对于我的生命而言,我无法将每个函数之间的点连接到功能性程序。
我知道我应该向您展示一些代码,但老实说我没有任何代码,我所有的尝试都失败了,并且在前提到的文章中的相关代码。
无论如何,我真的很感谢一些指导,或者甚至更好地检测到如何实际检测WebAudioAPI
的歌曲的演示。
谢谢!
关于 Joe Sullivan 的引用文章的主要内容是,即使它提供了很多源代码,但它远非最终和完整的代码。要达到工作解决方案,您仍然需要一些编码和调试技能。
此答案从参考文章中汲取了大部分代码,原始许可在适当的地方适用。
以下是用于使用上面文章所描述的功能的幼稚示例实现,您仍然需要找出正确的阈值。
该代码包括为答案编写的准备代码:
- 通过FileReader API读取本地文件
- 使用AudioContext API 将文件解码为音频数据
,然后如文章所述:
- 过滤音频,在此示例中使用低通滤波器
- 使用阈值计算峰
- 分组间隔计数,然后速度计数
对于阈值,我使用了最大值和最小值之间的任意值的任意值为.98;分组时,我添加了一些额外的检查和任意舍入,以避免可能的无限循环,并使其成为易于删除的样本。
请注意,评论稀缺以保持样本实施简介,因为:
- 引用文章中解释了处理背后的逻辑
- 可以在相关方法的API文档中引用该语法
audio_file.onchange = function() {
var file = this.files[0];
var reader = new FileReader();
var context = new(window.AudioContext || window.webkitAudioContext)();
reader.onload = function() {
context.decodeAudioData(reader.result, function(buffer) {
prepare(buffer);
});
};
reader.readAsArrayBuffer(file);
};
function prepare(buffer) {
var offlineContext = new OfflineAudioContext(1, buffer.length, buffer.sampleRate);
var source = offlineContext.createBufferSource();
source.buffer = buffer;
var filter = offlineContext.createBiquadFilter();
filter.type = "lowpass";
source.connect(filter);
filter.connect(offlineContext.destination);
source.start(0);
offlineContext.startRendering();
offlineContext.oncomplete = function(e) {
process(e);
};
}
function process(e) {
var filteredBuffer = e.renderedBuffer;
//If you want to analyze both channels, use the other channel later
var data = filteredBuffer.getChannelData(0);
var max = arrayMax(data);
var min = arrayMin(data);
var threshold = min + (max - min) * 0.98;
var peaks = getPeaksAtThreshold(data, threshold);
var intervalCounts = countIntervalsBetweenNearbyPeaks(peaks);
var tempoCounts = groupNeighborsByTempo(intervalCounts);
tempoCounts.sort(function(a, b) {
return b.count - a.count;
});
if (tempoCounts.length) {
output.innerHTML = tempoCounts[0].tempo;
}
}
// http://tech.beatport.com/2014/web-audio/beat-detection-using-web-audio/
function getPeaksAtThreshold(data, threshold) {
var peaksArray = [];
var length = data.length;
for (var i = 0; i < length;) {
if (data[i] > threshold) {
peaksArray.push(i);
// Skip forward ~ 1/4s to get past this peak.
i += 10000;
}
i++;
}
return peaksArray;
}
function countIntervalsBetweenNearbyPeaks(peaks) {
var intervalCounts = [];
peaks.forEach(function(peak, index) {
for (var i = 0; i < 10; i++) {
var interval = peaks[index + i] - peak;
var foundInterval = intervalCounts.some(function(intervalCount) {
if (intervalCount.interval === interval) return intervalCount.count++;
});
//Additional checks to avoid infinite loops in later processing
if (!isNaN(interval) && interval !== 0 && !foundInterval) {
intervalCounts.push({
interval: interval,
count: 1
});
}
}
});
return intervalCounts;
}
function groupNeighborsByTempo(intervalCounts) {
var tempoCounts = [];
intervalCounts.forEach(function(intervalCount) {
//Convert an interval to tempo
var theoreticalTempo = 60 / (intervalCount.interval / 44100);
theoreticalTempo = Math.round(theoreticalTempo);
if (theoreticalTempo === 0) {
return;
}
// Adjust the tempo to fit within the 90-180 BPM range
while (theoreticalTempo < 90) theoreticalTempo *= 2;
while (theoreticalTempo > 180) theoreticalTempo /= 2;
var foundTempo = tempoCounts.some(function(tempoCount) {
if (tempoCount.tempo === theoreticalTempo) return tempoCount.count += intervalCount.count;
});
if (!foundTempo) {
tempoCounts.push({
tempo: theoreticalTempo,
count: intervalCount.count
});
}
});
return tempoCounts;
}
// http://stackoverflow.com/questions/1669190/javascript-min-max-array-values
function arrayMin(arr) {
var len = arr.length,
min = Infinity;
while (len--) {
if (arr[len] < min) {
min = arr[len];
}
}
return min;
}
function arrayMax(arr) {
var len = arr.length,
max = -Infinity;
while (len--) {
if (arr[len] > max) {
max = arr[len];
}
}
return max;
}
<input id="audio_file" type="file" accept="audio/*"></input>
<audio id="audio_player"></audio>
<p>
Most likely tempo: <span id="output"></span>
</p>
我在这里写了一个教程,该教程显示了如何使用JavaScript Web Audio API进行此操作。
https://askmacgyver.com/blog/tutorial/how-to-implement-tempo-detection-in-your-application
步骤概述
- 将音频文件转换为数组缓冲区
- 通过低通滤波器运行数组缓冲区
- 将阵列缓冲区的10秒剪辑修剪
- down示例数据
- 数据归一化
- 计数卷分组
- 从分组计数推断速度
下面的此代码进行繁重。
将音频文件加载到数组缓冲区中,并通过低通滤波器
运行function createBuffers(url) {
// Fetch Audio Track via AJAX with URL
request = new XMLHttpRequest();
request.open('GET', url, true);
request.responseType = 'arraybuffer';
request.onload = function(ajaxResponseBuffer) {
// Create and Save Original Buffer Audio Context in 'originalBuffer'
var audioCtx = new AudioContext();
var songLength = ajaxResponseBuffer.total;
// Arguments: Channels, Length, Sample Rate
var offlineCtx = new OfflineAudioContext(1, songLength, 44100);
source = offlineCtx.createBufferSource();
var audioData = request.response;
audioCtx.decodeAudioData(audioData, function(buffer) {
window.originalBuffer = buffer.getChannelData(0);
var source = offlineCtx.createBufferSource();
source.buffer = buffer;
// Create a Low Pass Filter to Isolate Low End Beat
var filter = offlineCtx.createBiquadFilter();
filter.type = "lowpass";
filter.frequency.value = 140;
source.connect(filter);
filter.connect(offlineCtx.destination);
// Render this low pass filter data to new Audio Context and Save in 'lowPassBuffer'
offlineCtx.startRendering().then(function(lowPassAudioBuffer) {
var audioCtx = new(window.AudioContext || window.webkitAudioContext)();
var song = audioCtx.createBufferSource();
song.buffer = lowPassAudioBuffer;
song.connect(audioCtx.destination);
// Save lowPassBuffer in Global Array
window.lowPassBuffer = song.buffer.getChannelData(0);
console.log("Low Pass Buffer Rendered!");
});
},
function(e) {});
}
request.send();
}
createBuffers('https://askmacgyver.com/test/Maroon5-Moves-Like-Jagger-128bpm.mp3');
您现在有一个低通滤波的歌曲(和原始)
的数组缓冲区它由许多条目组成,采样(44100乘以歌曲的秒数)。
window.lowPassBuffer // Low Pass Array Buffer
window.originalBuffer // Original Non Filtered Array Buffer
修剪歌曲
的10秒剪辑function getClip(length, startTime, data) {
var clip_length = length * 44100;
var section = startTime * 44100;
var newArr = [];
for (var i = 0; i < clip_length; i++) {
newArr.push(data[section + i]);
}
return newArr;
}
// Overwrite our array buffer to a 10 second clip starting from 00:10s
window.lowPassFilter = getClip(10, 10, lowPassFilter);
down示例剪辑
function getSampleClip(data, samples) {
var newArray = [];
var modulus_coefficient = Math.round(data.length / samples);
for (var i = 0; i < data.length; i++) {
if (i % modulus_coefficient == 0) {
newArray.push(data[i]);
}
}
return newArray;
}
// Overwrite our array to down-sampled array.
lowPassBuffer = getSampleClip(lowPassFilter, 300);
标准化您的数据
function normalizeArray(data) {
var newArray = [];
for (var i = 0; i < data.length; i++) {
newArray.push(Math.abs(Math.round((data[i + 1] - data[i]) * 1000)));
}
return newArray;
}
// Overwrite our array to the normalized array
lowPassBuffer = normalizeArray(lowPassBuffer);
计数平面分组
function countFlatLineGroupings(data) {
var groupings = 0;
var newArray = normalizeArray(data);
function getMax(a) {
var m = -Infinity,
i = 0,
n = a.length;
for (; i != n; ++i) {
if (a[i] > m) {
m = a[i];
}
}
return m;
}
function getMin(a) {
var m = Infinity,
i = 0,
n = a.length;
for (; i != n; ++i) {
if (a[i] < m) {
m = a[i];
}
}
return m;
}
var max = getMax(newArray);
var min = getMin(newArray);
var count = 0;
var threshold = Math.round((max - min) * 0.2);
for (var i = 0; i < newArray.length; i++) {
if (newArray[i] > threshold && newArray[i + 1] < threshold && newArray[i + 2] < threshold && newArray[i + 3] < threshold && newArray[i + 6] < threshold) {
count++;
}
}
return count;
}
// Count the Groupings
countFlatLineGroupings(lowPassBuffer);
比例10秒分组计数至60秒,以得出每分钟节拍
var final_tempo = countFlatLineGroupings(lowPassBuffer);
// final_tempo will be 21
final_tempo = final_tempo * 6;
console.log("Tempo: " + final_tempo);
// final_tempo will be 126