低延迟输入/输出音频



我有两个iOS音频 - 一个输入,将样品直接输入到一个输出中。不幸的是,有一个回声效应非常明显:(

是否可以使用音频标准进行低延迟音频,还是我真的需要使用音频?(我尝试了使用音频的NovoCaine框架,在这里延迟要小得多。我还注意到该框架似乎使用了较少的CPU资源。不幸的是,我无法在我的Swift项目中使用此框架,而没有重大更改。)

这是我的代码的一些摘录,主要是在Swift中完成的,除了需要在C。

中实现的回调。
private let audioStreamBasicDescription = AudioStreamBasicDescription(
    mSampleRate: 16000,
    mFormatID: AudioFormatID(kAudioFormatLinearPCM),
    mFormatFlags: AudioFormatFlags(kAudioFormatFlagsNativeFloatPacked),
    mBytesPerPacket: 4,
    mFramesPerPacket: 1,
    mBytesPerFrame: 4,
    mChannelsPerFrame: 1,
    mBitsPerChannel: 32,
    mReserved: 0)
private let numberOfBuffers = 80
private let bufferSize: UInt32 = 256
private var active = false
private var inputQueue: AudioQueueRef = nil
private var outputQueue: AudioQueueRef = nil
private var inputBuffers = [AudioQueueBufferRef]()
private var outputBuffers = [AudioQueueBufferRef]()
private var headOfFreeOutputBuffers: AudioQueueBufferRef = nil
// callbacks implemented in Swift
private func audioQueueInputCallback(inputBuffer: AudioQueueBufferRef) {
    if active {
        if headOfFreeOutputBuffers != nil {
            let outputBuffer = headOfFreeOutputBuffers
            headOfFreeOutputBuffers = AudioQueueBufferRef(outputBuffer.memory.mUserData)
            outputBuffer.memory.mAudioDataByteSize = inputBuffer.memory.mAudioDataByteSize
            memcpy(outputBuffer.memory.mAudioData, inputBuffer.memory.mAudioData, Int(inputBuffer.memory.mAudioDataByteSize))
            assert(AudioQueueEnqueueBuffer(outputQueue, outputBuffer, 0, nil) == 0)
        } else {
            println(__FUNCTION__ + ": out-of-output-buffers!")
        }
        assert(AudioQueueEnqueueBuffer(inputQueue, inputBuffer, 0, nil) == 0)
    }
}
private func audioQueueOutputCallback(outputBuffer: AudioQueueBufferRef) {
    if active {
        outputBuffer.memory.mUserData = UnsafeMutablePointer<Void>(headOfFreeOutputBuffers)
        headOfFreeOutputBuffers = outputBuffer
    }
}
func start() {
    var error: NSError?
    audioSession.setCategory(AVAudioSessionCategoryPlayAndRecord, withOptions: .allZeros, error: &error)
    dumpError(error, functionName: "AVAudioSessionCategoryPlayAndRecord")
    audioSession.setPreferredSampleRate(16000, error: &error)
    dumpError(error, functionName: "setPreferredSampleRate")
    audioSession.setPreferredIOBufferDuration(0.005, error: &error)
    dumpError(error, functionName: "setPreferredIOBufferDuration")
    audioSession.setActive(true, error: &error)
    dumpError(error, functionName: "setActive(true)")
    assert(active == false)
    active = true
    // cannot provide callbacks to AudioQueueNewInput/AudioQueueNewOutput from Swift and so need to interface C functions
    assert(MyAudioQueueConfigureInputQueueAndCallback(audioStreamBasicDescription, &inputQueue, audioQueueInputCallback) == 0)
    assert(MyAudioQueueConfigureOutputQueueAndCallback(audioStreamBasicDescription, &outputQueue, audioQueueOutputCallback) == 0)
    for (var i = 0; i < numberOfBuffers; i++) {
        var audioQueueBufferRef: AudioQueueBufferRef = nil
        assert(AudioQueueAllocateBuffer(inputQueue, bufferSize, &audioQueueBufferRef) == 0)
        assert(AudioQueueEnqueueBuffer(inputQueue, audioQueueBufferRef, 0, nil) == 0)
        inputBuffers.append(audioQueueBufferRef)
        assert(AudioQueueAllocateBuffer(outputQueue, bufferSize, &audioQueueBufferRef) == 0)
        outputBuffers.append(audioQueueBufferRef)
        audioQueueBufferRef.memory.mUserData = UnsafeMutablePointer<Void>(headOfFreeOutputBuffers)
        headOfFreeOutputBuffers = audioQueueBufferRef
    }
    assert(AudioQueueStart(inputQueue, nil) == 0)
    assert(AudioQueueStart(outputQueue, nil) == 0)
}

,然后我的C代码将回调设置回Swift:

static void MyAudioQueueAudioInputCallback(void * inUserData, AudioQueueRef inAQ, AudioQueueBufferRef inBuffer, const AudioTimeStamp * inStartTime,
                                   UInt32 inNumberPacketDescriptions, const AudioStreamPacketDescription * inPacketDescs) {
    void(^block)(AudioQueueBufferRef) = (__bridge void(^)(AudioQueueBufferRef))inUserData;
    block(inBuffer);
}
static void MyAudioQueueAudioOutputCallback(void *inUserData, AudioQueueRef inAQ, AudioQueueBufferRef inBuffer) {
    void(^block)(AudioQueueBufferRef) = (__bridge void(^)(AudioQueueBufferRef))inUserData;
    block(inBuffer);
}
OSStatus MyAudioQueueConfigureInputQueueAndCallback(AudioStreamBasicDescription inFormat, AudioQueueRef *inAQ, void(^callback)(AudioQueueBufferRef)) {
    return AudioQueueNewInput(&inFormat, MyAudioQueueAudioInputCallback, (__bridge_retained void *)([callback copy]), nil, nil, 0, inAQ);
}
OSStatus MyAudioQueueConfigureOutputQueueAndCallback(AudioStreamBasicDescription inFormat, AudioQueueRef *inAQ, void(^callback)(AudioQueueBufferRef)) {
    return AudioQueueNewOutput(&inFormat, MyAudioQueueAudioOutputCallback, (__bridge_retained void *)([callback copy]), nil, nil, 0, inAQ);
}

好了一段时间后,我发现了使用音频而不是音频标题的这篇很棒的文章。我只是将其移植到Swift,然后简单地添加:

audioSession.setPreferredIOBufferDuration(0.005, error: &error)

如果您要从麦克风录制音频并在该麦克风的耳罩中播放,则由于音频吞吐量不是瞬时的,您先前的某些输出将使它成为新输入,因此回声。这种现象称为反馈。

这是一个结构性问题,因此更改录制API无济于事(尽管更改录音/播放缓冲尺寸将使您控制回声中的延迟)。您可以以麦克风听不到的方式播放音频(例如,根本没有,也没有通过耳机),也可以沿着Echo取消的兔子孔降落。

相关内容

  • 没有找到相关文章