使用Swift的Microsoft认知语音连续识别



我正在尝试使用适用于iOS的Microsoft认知服务语音到文本SDK。到目前为止,我已经成功地运行了recogizeOnce函数,没有任何问题,但Swift的样本很少,我似乎无法使用Swift进行连续识别。请记住,我通常使用React Native/JS,对Swift有一个非常基本的了解。话虽如此,任何帮助都将不胜感激。这就是我目前所拥有的:

func recognizeFromMic() {
var speechConfig: SPXSpeechConfiguration?
do {
try speechConfig = SPXSpeechConfiguration(subscription: key, region: region)
} catch {
print("error (error) happened")
speechConfig = nil
}
speechConfig?.speechRecognitionLanguage = "en-US"
let audioConfig = SPXAudioConfiguration(microphone: nil)
let reco = try! SPXSpeechRecognizer(speechConfiguration: speechConfig!, audioConfiguration: audioConfig!)
reco.addRecognizingEventHandler() {reco, evt in
print("Received intermediate result event. SessionId: (evt.sessionId), Recognition Result: (evt.result.text ?? "(no result)"), Status: (evt.result.reason), Offset: (evt.result.offset) Duration: (evt.result.duration), ResultID: (evt.result.resultId)")
}
reco.addRecognizedEventHandler() {reco, evt in
print("Received final result event. SessionId: (evt.sessionId), Recognition Result: (evt.result.text ?? "(no result)"), Status: (evt.result.reason), Offset: (evt.result.offset) Duration: (evt.result.duration), ResultID: (evt.result.resultId)")
}
reco.addSessionStoppedEventHandler() {reco, evt in
print("Received session stopped event. SessionId: (evt.sessionId)")
}
print("Listening...")
do {
try reco.startContinuousRecognition()
} catch {
print("error (error) happened")
}
}

当运行上述功能时,我得到以下错误:

Error Domain=SPXErrorDomain Code=21 "Error: Exception with an error code: 0x15 (SPXERR_MIC_ERROR)

所以,经过巨大的挣扎,我终于有了一个工作函数。微软肯定应该改进他们的例子。对于那些寻求相同答案的人来说,这就是我最终所做的。希望这能帮助到某人:(

var audioEngine = AVAudioEngine()
var audioData = Data()
var inputNode: AVAudioInputNode?
var reco: SPXSpeechRecognizer?;
var stream = SPXPullAudioInputStream()
func recognizeFromMic() {
var bytesPerFrame = UInt32()
var stream: SPXPushAudioInputStream?
func toData(PCMBuffer: AVAudioPCMBuffer) -> Data {
let channelCount = 1  // given PCMBuffer channel count is 1
let channels = UnsafeBufferPointer(start: PCMBuffer.int16ChannelData, count: channelCount)
let ch0Data = NSData(bytes: channels[0], length:Int(PCMBuffer.frameCapacity * PCMBuffer.format.streamDescription.pointee.mBytesPerFrame)) as Data
return ch0Data
}
guard let outputFormat = AVAudioFormat(commonFormat: .pcmFormatInt16, sampleRate: 16000, channels: AVAudioChannelCount(1), interleaved: false) else {
return
}
let audioSession = AVAudioSession.sharedInstance()
do {
try audioSession.setCategory(.record, mode: .measurement, options: .duckOthers)
try audioSession.setActive(true, options: .notifyOthersOnDeactivation)
} catch {
}
inputNode = self.audioEngine.inputNode
let inputFormat = inputNode!.outputFormat(forBus: 0)
let converter = AVAudioConverter(from: inputFormat, to: outputFormat)
inputNode!.installTap(onBus: AVAudioNodeBus(0), bufferSize: AVAudioFrameCount(32000), format: self.audioEngine.inputNode.outputFormat(forBus: 0)) { (pcmBuffer, time) in
guard let outputBuffer = AVAudioPCMBuffer(pcmFormat: outputFormat, frameCapacity:pcmBuffer.frameCapacity) else {
return
}
outputBuffer.frameLength = pcmBuffer.frameLength
let inputBlock: AVAudioConverterInputBlock = { inNumPackets, outStatus in
outStatus.pointee = AVAudioConverterInputStatus.haveData
return pcmBuffer
}
var  error:NSError? = nil
converter?.convert(to: outputBuffer, error: &error, withInputFrom: inputBlock)
self.audioData.append(toData(PCMBuffer: outputBuffer))
stream?.write(self.audioData)
}
self.audioEngine.prepare()
do {
try self.audioEngine.start()
} catch {
print("(error)")
}
//set up push stream
let audioFormat = SPXAudioStreamFormat.init(usingPCMWithSampleRate: 16000, bitsPerSample: 16, channels: 1)
stream = SPXPushAudioInputStream(audioFormat: audioFormat!)
var speechConfig: SPXSpeechConfiguration?
do {
try speechConfig = SPXSpeechConfiguration(subscription: key, region: region)
speechConfig!.enableDictation();
} catch {
print("error (error) happened")
speechConfig = nil
}
speechConfig?.speechRecognitionLanguage = "en-US"
let audioConfig = SPXAudioConfiguration(streamInput: stream!)
reco = try! SPXSpeechRecognizer(speechConfiguration: speechConfig!, audioConfiguration: audioConfig!)
reco!.addRecognizingEventHandler() {reco, evt in
print("Received intermediate result event. SessionId: (evt.sessionId), Recognition Result: (evt.result.text ?? "(no result)"), Status: (evt.result.reason), Offset: (evt.result.offset) Duration: (evt.result.duration), ResultID: (evt.result.resultId)")
}
reco!.addRecognizedEventHandler() {reco, evt in
print("Received final result event. SessionId: (evt.sessionId), Recognition Result: (evt.result.text ?? "(no result)"), Status: (evt.result.reason), Offset: (evt.result.offset) Duration: (evt.result.duration), ResultID: (evt.result.resultId)")
}
reco!.addSessionStoppedEventHandler() {reco, evt in
print("Received session stopped event. SessionId: (evt.sessionId)")
}
print("Listening...")
// Start recording and append recording buffer to speech recognizer
do {
try reco!.startContinuousRecognition()
} catch {
print("error (error) happened")
}
}

相关内容

  • 没有找到相关文章

最新更新