我正试图获得interim_results
与node.js
var params = {
audio: fs.createReadStream('./out.wav'),
content_type: 'audio/l16;rate=16000',
continuous: true,
interim_results: true,
word_confidence: true
};
我用speech_to_text.createSession
创建会话,然后我尝试启动recognize
功能:
function recognize(params,session_id,cookie_session) {
setInterval(function(){ observe_results(session_id,cookie_session) }, 1000);
speech_to_text.recognize(params, function(err, transcript) {
if (err) {
console.log(err);
}
else {
console.log("OK")
console.log(JSON.stringify(transcript, null, 2));
fs.writeFile('./transcript.txt', JSON.stringify(transcript), function(err) {if(err){return console.log('err')}});
}
});}
,但我只收到最后的转录,而不是interim_results
。提前感谢您的帮助。
UPDATE: 11/4:从v1.0开始,watson-developer-cloud
npm模块支持websockets。
参见:如何在node.js中创建一个blob以用于websocket?
原始回答:
您正在呼叫recognize
而不是recognizeLive
。
请看下面的例子:
var watson = require('watson-developer-cloud'),
fs = require('fs');
var speechToText = watson.speech_to_text({
password: '<password>',
username: '<username>',
version: 'v1'
});
var noop = function(){};
var observeResult = function(err, transcript) {
if (err){
console.log(err);
return;
}
// print the transcript
console.log(JSON.stringify(transcript, null, 2));
};
speechToText.createSession({}, function(err, session){
if (err){
console.log('error:', err);
return;
}
var request = speechToText.recognizeLive({
content_type: 'audio/l16;rate=44100',
continuous: true,
word_confidence: true,
interim_results: true,
session_id: session.session_id,
cookie_session: session.cookie_session }, noop);
// call observe result to get intermin results
speechToText.observeResult({
interim_results: true,
session_id: session.session_id,
cookie_session: session.cookie_session }, observeResult);
// pipe the audio to the request
// once the stream is consumed it will call request.end()
fs.createReadStream('audio.wav').pipe(request);
});
替换username
和password
,使用此音频
我写了一个列表供将来参考。它还包含使用上面的音频时的输出。