我正在尝试从Firebase Storage下载一个音频文件(约250KB(,并使用Firebase Cloud函数(节点8(将其发送到IBM Cloud Speech to Text。我正在使用axios
向下载URL发送HTTP GET请求。axios
返回一个流,所以我使用fs.createReadStream(response)
将文件流式传输到IBM Cloud Speech to Text。我没有收到错误消息,而是没有向IBM Cloud Speech to Text发送任何内容。
exports.IBM_Speech_to_Text = functions.firestore.document('Users/{userID}/Pronunciation_Test/downloadURL').onUpdate((change, context) => { // this is the Firebase Cloud Functions trigger
const fs = require('fs');
const SpeechToTextV1 = require('ibm-watson/speech-to-text/v1');
const { IamAuthenticator } = require('ibm-watson/auth');
const speechToText = new SpeechToTextV1({
authenticator: new IamAuthenticator({
apikey: 'my-api-key',
}),
url: 'https://api.us-south.speech-to-text.watson.cloud.ibm.com/instances/01010101',
});
const axios = require('axios');
return axios({
method: 'get',
url: 'https://firebasestorage.googleapis.com/v0/b/languagetwo-cd94d.appspot.com/o/Users%2FbcmrZDO0X5N6kB38MqhUJZ11OzA3%2Faudio-file.flac?alt=media&token=871b9401-c6af-4c38-aaf3-889bb5952d0e', // the download URL for the audio file
responseType: 'stream' // is this creating a stream?
})
.then(function (response) {
var params = {
audio: fs.createReadStream(response),
contentType: 'audio/flac',
wordAlternativesThreshold: 0.9,
keywords: ['colorado', 'tornado', 'tornadoes'],
keywordsThreshold: 0.5,
};
speechToText.recognize(params)
.then(results => {
console.log(JSON.stringify(results, null, 2)); // undefined
})
.catch(function (error) {
console.log(error.error);
});
})
.catch(function (error) {
console.log(error.error);
});
});
问题是来自axios
的响应不会到达fs.createReadStream()
。
fs.createReadStream(path)
的文档显示为path <string> | <Buffer> | <URL>
。response
不是这些。我需要将response
写入缓冲区吗?我试过这个:
const responseBuffer = Buffer.from(response.data.pipe(fs.createWriteStream(responseBuffer)));
;
var params = {
audio: fs.createReadStream(responseBuffer),
但这也没用。第一行很臭。。。
还是我应该使用溪流?
exports.IBM_Speech_to_Text = functions.firestore.document('Users/{userID}/Pronunciation_Test/downloadURL').onUpdate((change, context) => {
const fs = require('fs');
const SpeechToTextV1 = require('ibm-watson/speech-to-text/v1');
const { IamAuthenticator } = require('ibm-watson/auth');
const speechToText = new SpeechToTextV1({
authenticator: new IamAuthenticator({
apikey: 'my-api-key',
}),
url: 'https://api.us-south.speech-to-text.watson.cloud.ibm.com/instances/01010101',
});
const axios = require('axios');
const path = require('path');
return axios({
method: 'get',
url: 'https://firebasestorage.googleapis.com/v0/b/languagetwo-cd94d.appspot.com/o/Users%2FbcmrZDO0X5N6kB38MqhUJZ11OzA3%2Faudio-file.flac?alt=media&token=871b9401-c6af-4c38-aaf3-889bb5952d0e',
responseType: 'stream'
})
.then(function (response) {
response.data.pipe(createWriteStream(audiofile));
var params = {
audio: fs.createReadStream(audiofile),
contentType: 'audio/flac',
wordAlternativesThreshold: 0.9,
keywords: ['colorado', 'tornado', 'tornadoes'],
keywordsThreshold: 0.5,
};
speechToText.recognize(params)
.then(results => {
console.log(JSON.stringify(results, null, 2));
})
.catch(function (error) {
console.log(error.error);
});
})
.catch(function (error) {
console.log(error.error);
});
});
这也不起作用。
问题是我从axios
传递response
,而它本应是response.data
。如果有Postman,我会在五分钟内解决这个问题,但Postman不处理流媒体。
另一个问题是,正如jfriend00所说,fs.createReadStream
是不必要的。正确的代码是:
audio: response.data,
无需这些线路:
const fs = require('fs');
response.data.pipe(createWriteStream(audiofile));