Python:从歌曲中获取频率



我正在编写一个代码来制作带有 4 个灯泡的灯光秀,它们将根据歌曲切换组合。因此,我必须采取歌曲的频率。我找到了这段代码,它基本上是我想要的。

# Read in a WAV and find the freq's
import pyaudio
import wave
import numpy as np
chunk = 2024
# open up a wave
wf = wave.open('Audio_3.wav', 'rb')
swidth = wf.getsampwidth()
RATE = wf.getframerate()
# use a Blackman window
window = np.blackman(chunk)
# open stream
p = pyaudio.PyAudio()
stream = p.open(format =
p.get_format_from_width(wf.getsampwidth()),
channels = wf.getnchannels(),
rate = RATE,
output = True)
# read some data
data = wf.readframes(chunk)
# play stream and find the frequency of each chunk
print(len(data))
print(chunk*swidth)
while len(data) == chunk*swidth:
# write data out to the audio stream
stream.write(data)
# unpack the data and times by the hamming window
indata = np.array(wave.struct.unpack("%dh"%(len(data)/swidth),
data))*window
# Take the fft and square each value
fftData=abs(np.fft.rfft(indata))**2
# find the maximum
which = fftData[1:].argmax() + 1
# use quadratic interpolation around the max
if which != len(fftData)-1:
y0,y1,y2 = np.log(fftData[which-1:which+2:])
x1 = (y2 - y0) * .5 / (2 * y1 - y2 - y0)
# find the frequency and output it
thefreq = (which+x1)*RATE/chunk
print ("The freq is %f Hz." % (thefreq))
else:
thefreq = which*RATE/chunk
print ("The freq is %f Hz." % (thefreq))
# read some more data
data = wf.readframes(chunk)
if data:
stream.write(data)
stream.close()
p.terminate()

但是,问题是此代码仅适用于基本声音。当我为一首歌曲尝试此代码时,循环变为错误。是否有从歌曲中获取频率的代码?还是其他方式?

我认为立体声文件造成了麻烦,因为数据大小最终将是样本宽度 * num_samples * num_channels。以下内容似乎有效,为由大胆生成的 433+1Hz 音调提供一致的 440+-1Hz(错误来自 fft 或来自 audacity 的生成器(

# Read in a WAV and find the freq's
import pyaudio
import wave
import numpy as np
chunk = 1024
# open up a wave
wf = wave.open('/home/jeremy/Music/wav1.wav', 'rb')
swidth = wf.getsampwidth()
RATE = wf.getframerate()
# use a Blackman window
window = np.blackman(chunk)
# open stream
p = pyaudio.PyAudio()
channels = wf.getnchannels()
stream = p.open(format =
p.get_format_from_width(wf.getsampwidth()),
channels = channels,
rate = RATE,
output = True)
# read some data
data = wf.readframes(chunk)
# play stream and find the frequency of each chunk
print('switdth {} chunk {} data {} ch {}'.format(swidth,chunk,len(data), channels))
while len(data) == chunk*swidth*channels:
# write data out to the audio stream
stream.write(data)
# unpack the data and times by the hamming window
#    indata = np.array(wave.struct.unpack("%dh"%(len(data)/(swidth)),data))*window
indata = np.fromstring(data, dtype='int16')
# deinterleave, select 1 channel
channel0 = indata[0::channels]
# Take the fft and square each value
fftData=abs(np.fft.rfft(indata))**2
# find the maximum
which = fftData[1:].argmax() + 1
# use quadratic interpolation around the max
if which != len(fftData)-1:
y0,y1,y2 = np.log(fftData[which-1:which+2:])
x1 = (y2 - y0) * .5 / (2 * y1 - y2 - y0)
# find the frequency and output it
thefreq = (which+x1)*RATE/chunk
print ("The freq is %f Hz." % (thefreq))
else:
thefreq = which*RATE/chunk
print ("The freq is %f Hz." % (thefreq))
# read some more data
data = wf.readframes(chunk)
if data:
stream.write(data)
stream.close()
p.terminate()

最新更新