我想通过webrtc发送自定义音频字节,这就是为什么我需要用自己的MediaTrack编写自定义getusermedia方法的原因。现在的外观:
exports.navigator.getUserMedia = getUserMedia;
function getUserMedia (constraints, successCallback) {
console.log("window navigator: ", exports.parent.navigator);
console.log("call getUserMedia callback method callback = ", successCallback)
console.log("createMediaStreamSource is: ", exports.AudioContext.prototype.createMediaStreamSource);
return new Promise(function(resolve, reject) {
postMessage('WKWebViewGetUserMediaShim_MediaStream_new', constraints, function (trackData) {
var stream = new MediaStream()
for (var i in trackData) {
var data = trackData[i]
var track = new MediaStreamTrack()
track.id = data.id
track.kind = data.kind
track._meta = data.meta
track._stream = stream
console.log("track in promise = ", track);
stream._tracks.push(track)
exports.parent.navigator.tracks[track.kind] = exports.parent.navigator.tracks[track.kind] || {}
exports.parent.navigator.tracks[track.kind][track.id] = track
console.log("tracks in promise = ", tracks);
}
console.log("success callback = ", successCallback)
resolve(stream)
}, function(error){reject(error)})
});
}
getUserMedia._onmedia = function (kind, data) {
var tracksByKind = exports.parent.navigator.tracks[kind]
if (kind === 'audio') {
data = new Float32Array(base64ToData(data))
} else if (kind === 'video') {
data = data
}
// console.log("onmedia tracks ", exports.parent.navigator.tracks);
for (var i in tracksByKind) {
var track = tracksByKind[i]
// console.log("data came with data ", data);
// console.log("track = : ", track);
track._ondataavailable && track._ondataavailable(data)
}
}
_ONMEDIA方法是我获得自定义字节的地方,而Getusermedia我正在尝试存根。但是我不知道如何将自定义字节的流正确设置为MediaStreamTrack类。
要从Web浏览器中生成音频,请使用Web Audio API。各种方法将允许您生成合成声音。
如果您希望控制每个样本,则可以使用ScriptProcessornode并为其AudioProcessevent的outputBuffer
。
Web音频API甚至具有MediaStreamAudioDestinationNode对象,该对象允许在MediaStream对象中输出此生成的音频:
btn.onclick = start;
function start() {
const ctx = new (window.AudioContext || window.webkitAudioContext);
const processor = ctx.createScriptProcessor(256,1,1);
processor.onaudioprocess = makeSomeNoise;
const streamNode = ctx.createMediaStreamDestination();
processor.connect(streamNode);
const mediaStream = streamNode.stream;
// for demo output to a MediaElement
aud.srcObject = mediaStream;
aud.volume = 0.5;
aud.play();
}
function makeSomeNoise(evt) {
const output = evt.outputBuffer.getChannelData(0);
// loop through the 4096 samples
for (let sample = 0; sample < output.length; sample++) {
// noise
output[sample] = (Math.random() - 0.5) * 0.5;
}
}
<button id="btn">start</button>
<audio id="aud" controls></audio>