我正在尝试使用UWP的AudioGraph API来再现合成语音和短通知声音("耳机")的混合。
UWP有一个语音合成API,它为我提供了一个包含WAV文件的流,但我不想对参数(比特率、采样深度等)做太多假设,所以我们的想法是只要有一些语音要复制,就有一个AudioSubmixNode
并添加AudioFrameInputNode
s。把不同的话语排成一排,这样它们就不会重叠,这有一定的复杂性。
图形初始化为
private async Task InitAudioGraph()
{
var graphCreated = await AudioGraph.CreateAsync(new AudioGraphSettings(Windows.Media.Render.AudioRenderCategory.Speech)
{
QuantumSizeSelectionMode = QuantumSizeSelectionMode.LowestLatency
});
if (graphCreated.Status != AudioGraphCreationStatus.Success) return;
_Graph = graphCreated.Graph;
var outputCreated = await _Graph.CreateDeviceOutputNodeAsync();
if (outputCreated.Status != AudioDeviceNodeCreationStatus.Success) return;
_Mixer = _Graph.CreateSubmixNode();
_Mixer.AddOutgoingConnection(outputCreated.DeviceOutputNode);
_Graph.Start();
}
然后用播放当前发音
class SpeechStreamPlayer : IDisposable
{
internal static void Play(AudioGraph graph, AudioSubmixNode mixer, SpeechSynthesisStream speechStream)
{
if (!speechStream.ContentType.Equals("audio/wav", StringComparison.OrdinalIgnoreCase)) throw new NotSupportedException("Content type: " + speechStream.ContentType);
var stream = speechStream.AsStreamForRead();
// Read the RIFF header
uint chunkId = stream.ReadUint(); // "RIFF" - but in little-endian
if (chunkId != 0x46464952) throw new NotSupportedException("Magic: " + chunkId);
uint chunkSize = stream.ReadUint(); // Length of rest of stream
uint format = stream.ReadUint(); // "WAVE"
if (format != 0x45564157) throw new NotSupportedException("Stream format: " + format);
// "fmt " sub-chunk
uint subchunkId = stream.ReadUint();
if (subchunkId != 0x20746d66) throw new NotSupportedException("Expected fmt sub-chunk, found " + subchunkId);
uint subchunkSize = stream.ReadUint();
uint subchunk2Off = (uint)stream.Position + subchunkSize;
uint audioFormat = (uint)stream.ReadShort();
uint chans = (uint)stream.ReadShort();
uint sampleRate = stream.ReadUint();
uint byteRate = stream.ReadUint();
uint blockSize = (uint)stream.ReadShort();
uint bitsPerSample = (uint)stream.ReadShort();
// Possibly extra stuff added, so...
stream.Seek(subchunk2Off, SeekOrigin.Begin);
subchunkId = stream.ReadUint(); // "data"
if (subchunkId != 0x61746164) throw new NotSupportedException("Expected data sub-chunk, found " + subchunkId);
subchunkSize = stream.ReadUint();
// Ok, the stream is in the correct place to start extracting data and we have the parameters.
var props = AudioEncodingProperties.CreatePcm(sampleRate, chans, bitsPerSample);
var frameInputNode = graph.CreateFrameInputNode(props);
frameInputNode.AddOutgoingConnection(mixer);
new SpeechStreamPlayer(frameInputNode, mixer, stream, blockSize);
}
internal event EventHandler StreamFinished;
private SpeechStreamPlayer(AudioFrameInputNode frameInputNode, AudioSubmixNode mixer, Stream stream, uint sampleSize)
{
_FrameInputNode = frameInputNode;
_Mixer = mixer;
_Stream = stream;
_SampleSize = sampleSize;
_FrameInputNode.QuantumStarted += Source_QuantumStarted;
_FrameInputNode.Start();
}
private AudioFrameInputNode _FrameInputNode;
private AudioSubmixNode _Mixer;
private Stream _Stream;
private readonly uint _SampleSize;
private unsafe void Source_QuantumStarted(AudioFrameInputNode sender, FrameInputNodeQuantumStartedEventArgs args)
{
if (args.RequiredSamples <= 0) return;
System.Diagnostics.Debug.WriteLine("Requested {0} samples", args.RequiredSamples);
var frame = new AudioFrame((uint)args.RequiredSamples * _SampleSize);
using (var buffer = frame.LockBuffer(AudioBufferAccessMode.Write))
{
using (var reference = buffer.CreateReference())
{
byte* pBuffer;
uint capacityBytes;
var directBuffer = reference as IMemoryBufferByteAccess;
((IMemoryBufferByteAccess)reference).GetBuffer(out pBuffer, out capacityBytes);
uint bytesRemaining = (uint)_Stream.Length - (uint)_Stream.Position;
uint bytesToCopy = Math.Min(capacityBytes, bytesRemaining);
for (uint i = 0; i < bytesToCopy; i++) pBuffer[i] = (byte)_Stream.ReadByte();
for (uint i = bytesToCopy; i < capacityBytes; i++) pBuffer[i] = 0;
if (bytesRemaining <= capacityBytes)
{
Dispose();
StreamFinished?.Invoke(this, EventArgs.Empty);
}
}
}
sender.AddFrame(frame);
}
public void Dispose()
{
if (_FrameInputNode != null)
{
_FrameInputNode.QuantumStarted -= Source_QuantumStarted;
_FrameInputNode.Dispose();
_FrameInputNode = null;
}
if (_Stream != null)
{
_Stream.Dispose();
_Stream = null;
}
}
}
这一次有效。当第一次发声结束时,StreamFinished?.Invoke(this, EventArgs.Empty);
通知队列管理系统应该播放下一次发声,并且线路
var frameInputNode = graph.CreateFrameInputNode(props);
抛出带有消息CCD_ 5的CCD_。经过一点挖掘,它对应于XAUDIO2_E_INVALID_CALL,但这不是很有描述性。
在这两种情况下,传递给AudioEncodingProperties.CreatePcm
的参数都是(22050, 1, 16)
。
我怎样才能找到问题的更多细节?在最坏的情况下,我想我可以把整个图扔掉,每次都构建一个新的图,但这似乎效率很低。
问题似乎在中
当第一次发声结束时,
StreamFinished?.Invoke(this, EventArgs.Empty);
通知队列管理系统应该播放下一次发声
尽管是AudioFrameInputNode的文档。QuantumStarted没有说任何关于禁止的动作,AudioGraph的文档。Quantum开始说
QuantumStarted事件是同步的,这意味着您不能更新AudioGraph的属性或状态,也不能更新该事件处理程序中的单个音频节点。尝试执行诸如停止音频图或添加、删除或启动单个音频节点之类的操作将导致引发异常。
这似乎也适用于节点的QuantumStarted
事件。
简单的解决方案是使用将图形操作转移到另一个线程
Task.Run(() => StreamFinished?.Invoke(this, EventArgs.Empty));