Actionscript Double Buffer通过放大进行音频播放



我一直在研究一种方法,将麦克风数据流式传输到服务器,循环回客户端,并以逐包的方式播放。到目前为止,我已经实现了客户端连接、互通、语音发送、语音接收、缓冲存储和中断播放。返回的语音以适当的速度播放,没有刺耳的噪音,但它只播放了语音缓冲区的1%,循环使用,并播放了新的前%。我需要客户端只播放一次它检索的声音数据(从重新采样到适当的音频速度),然后再也不会播放。

package Voip
{
    import flash.events.SampleDataEvent;
    import flash.events.TimerEvent;
    import flash.media.Sound;
    import flash.system.System;
    import flash.utils.ByteArray;
    import flash.utils.Timer;
    public class SoundObj
    {
        private var ID:int;
        public var sound:Sound;
        public var buf:ByteArray;
            public var _vbuf:ByteArray;
        public var _numSamples:int;
        public var _phase:Number = 0;
        public var killtimer:Timer = null;
        public var _delaytimer:Timer = new Timer(1000, 1);
        public function SoundObj(id:int)
        {
            ID = id;
            buf = new ByteArray();
            _vbuf = new ByteArray();
            sound = new Sound();
            sound.addEventListener(SampleDataEvent.SAMPLE_DATA, SoundBuffer, false, 0, true);
            sound.play();
        }
        public function receive(bytes:ByteArray):void {
            var i:int = _vbuf.position;
            _vbuf.position = _vbuf.length;
            _vbuf.writeBytes(bytes);
            _vbuf.position = i;
            _numSamples = _vbuf.length/4;
            /*var i:int = buf.position;
            buf.position = buf.length; // write to end
            buf.writeBytes(bytes);
            buf.position = i; // return to origin
            if (_delaytimer == null) {
                _delaytimer = new Timer(1000, 1);
                _delaytimer.addEventListener(TimerEvent.TIMER, finaldata);
                _delaytimer.start();
            }
            if (!_delaytimer.running) {
                // timer not running, dump buffer and reset.
                //var index:int = _vbuf.position;
                //_vbuf.position = _vbuf.length;
                //_vbuf.writeBytes(buf);
                _vbuf = buf;
                _vbuf.position = 0;
                buf = new ByteArray();
                //_vbuf.position = index;
                //sound.extract(_vbuf, int(_vbuf.length * 44.1));
                _phase = 0;
                _numSamples = _vbuf.length/4;
                // reset killtimer to silence timeout
                killtimer = new Timer(1000, 1);
                killtimer.addEventListener(TimerEvent.TIMER, killtimerEvent);
                killtimer.start();
            }*/
        }
        public function killtimerEvent(event:TimerEvent):void {
            _delaytimer = null;
        }
        // send remaining data
        public function finaldata(event:TimerEvent):void {
            if (buf.length > 0) {
                trace("adding final content");
                //var _buf:ByteArray = new ByteArray();
                //var index:int = int(_phase)*4;
                //if (index >= _vbuf.length)
                //  index = _vbuf.position;
                /*_buf.writeBytes(_vbuf, index, _vbuf.length-index);
                _buf.writeBytes(buf);
                buf = new ByteArray();*/
                //_vbuf = _buf;
                // add remaining buffer to playback
                var index:int = _vbuf.position;
                _vbuf.position = _vbuf.length;
                _vbuf.writeBytes(buf);
                _vbuf.position = index;
                // wipe buffer
                buf = new ByteArray();
                //sound.extract(_vbuf, int(_vbuf.length * 44.1));
                _phase = 0;
                //_numSamples = _vbuf.length/4;
                _numSamples = _vbuf.length/4;
                // reset killtimer to silence timeout
                killtimer = new Timer(1000, 1);
                killtimer.addEventListener(TimerEvent.TIMER, killtimerEvent);
                killtimer.start();
            }
        }
        public function SoundBuffer(event:SampleDataEvent):void {
            //try {
            //trace("[SoundBuffer:"+ID+"]");
            //sound.removeEventListener(SampleDataEvent.SAMPLE_DATA, SoundBuffer);
            // buffer 4KB of data
            for (var i:int = 0; i < 4096; i++)
            {
                var l:Number = 0;
                var r:Number = 0;
                if (_vbuf.length > int(_phase)*4) {
                    _vbuf.position = int(_phase)*4;
                    l = _vbuf.readFloat();
                    if (_vbuf.position < _vbuf.length)
                        r = _vbuf.readFloat();
                    else
                        r = l;
                }
                //if (_vbuf.position == _vbuf.length)
                    //_vbuf = new ByteArray();
                event.data.writeFloat(l);
                event.data.writeFloat(r);
                _phase += (16/44.1);
                if (_phase >= _numSamples) {
                    _phase -= _numSamples;
                }
            }
            System.gc();
        }
    }
}

最初的想法是在我的场景中创建一个SoundObj,使用obj.receive(字节)将数据添加到缓冲区中,以便在下次声音播放器需要新数据时播放。从那以后,我一直在努力以这样或那样的方式让它发挥作用。定时器的设计目的是确定何时缓冲更多数据,但从未真正达到预期效果。

适当的双缓冲区,适当的播放。

package VoipOnline
{
    import flash.events.SampleDataEvent;
    import flash.events.TimerEvent;
    import flash.media.Sound;
    import flash.system.System;
    import flash.utils.ByteArray;
    import flash.utils.Timer;
    import flashx.textLayout.formats.Float;
    public class SoundObj
    {
        public var ID:int;
        public var sound:Sound;
        internal var _readBuf:ByteArray;
        internal var _writeBuf:ByteArray;
        internal var n:Number;
        internal var _phase:Number;
        internal var _numSamples:int;
        internal var myTimer:Timer;
        internal var bytes:int;
        public function SoundObj(id:int)
        {
            ID = id;
            _readBuf = new ByteArray();
            _writeBuf = new ByteArray();
            bytes = 0;
            myTimer = new Timer(10000, 0);
            myTimer.addEventListener(TimerEvent.TIMER, timerHandler);
            myTimer.start();
            sound = new Sound();
            sound.addEventListener(SampleDataEvent.SAMPLE_DATA, SoundBuffer);
            sound.play();
        }
        public function receive(bytes:ByteArray):void 
        {
            var i:int = _writeBuf.position;
            _writeBuf.position = _writeBuf.length;
            _writeBuf.writeBytes(bytes);
            _writeBuf.position = i;
            this.bytes += bytes.length;
        }
        private function timerHandler(e:TimerEvent):void{
            trace((bytes/10) + " bytes per second.");
            bytes = 0;
        }
        public function SoundBuffer(event:SampleDataEvent):void 
        {
            //trace((_readBuf.length/8)+" in buffer, and "+(_writeBuf.length/8)+" waiting.");
            for (var i:int = 0; i < 4096; i++)
            {
                var l:Number = 0; // silence
                var r:Number = 0; // silence
                if (_readBuf.length > int(_phase)*8) {
                    _readBuf.position = int(_phase)*8;
                    l = _readBuf.readFloat();
                    if (_readBuf.position < _readBuf.length)
                        r = _readBuf.readFloat();
                    else {
                        r = l;
                        Buffer();
                    }
                } else {
                    Buffer();
                }
                event.data.writeFloat(l);
                event.data.writeFloat(r);
                _phase += 0.181;
            }
        }
        private function Buffer():void {
            // needs new data
            // snip 4096 bytes
            var buf:ByteArray = new ByteArray();
            var len:int = (_writeBuf.length >= 4096 ? 4096 : _writeBuf.length);
            buf.writeBytes(_writeBuf, 0, len);
            // remove snippet
            var tmp:ByteArray = new ByteArray();
            tmp.writeBytes(_writeBuf, len, _writeBuf.length-len);
            _writeBuf = tmp;
            // plug in snippet
            _readBuf = buf;
            _writeBuf = new ByteArray();
            _readBuf.position = 0;
            _phase = 0;
        }
    }
}

这些代码片段基于这个麦克风设置:

mic = Microphone.getMicrophone();
mic.addEventListener(SampleDataEvent.SAMPLE_DATA, this.micParse); // raw mic data stream handler
mic.codec = SoundCodec.SPEEX;
mic.setUseEchoSuppression(true);
mic.gain = 100;
mic.rate = 44;
mic.setSilenceLevel(voicelimit.value, 1);

经过大量测试,这似乎提供了迄今为止最好的结果。有点颗粒状,但经过压缩和过滤。我遇到的一些问题似乎是服务器的故障。我只接收到我发送的字节的大约30%。也就是说,上面的代码是有效的。您只需调整_phase增量即可修改速度。(0.181==16/44/2)信用会去信用到期的地方,即使他的样本没有完全解决手头的问题,这仍然是向前迈出的一大步。

我已经准备了一些样本数据,并将其输入到您的示例中,只得到了噪音。我已经将您的类简化为只有两个缓冲区,一个用于接收样本,另一个用于提供样本。希望这将起作用:

package  {
    import flash.events.*;
    import flash.media.*;
    import flash.utils.*;
    public class SoundObj
    {
        private var ID:int;
        public var sound:Sound;
        public var _readBuf:ByteArray;
        public var _writeBuf:ByteArray;
        public function SoundObj(id:int)
        {
            ID = id;
            _readBuf = new ByteArray();
            _writeBuf = new ByteArray();
            sound = new Sound();
            sound.addEventListener(SampleDataEvent.SAMPLE_DATA, SoundBuffer);
            sound.play();
        }
        public function receive(bytes:ByteArray):void
        {
            var i:int = _writeBuf.position;
            _writeBuf.position = _writeBuf.length;
            _writeBuf.writeBytes(bytes);
            _writeBuf.position = i;
            sound.play();
        }
        public function SoundBuffer(event:SampleDataEvent):void
        {
            for (var i:int = 0; i < 8192; i++)
            {
                if (_readBuf.position < _readBuf.length)
                    event.data.writeFloat(_readBuf.readFloat());
                else
                {
                    if (_writeBuf.length >= 81920)
                    {
                        _readBuf = _writeBuf;
                        _writeBuf = new ByteArray();
                    }
                    if (_readBuf.position < _readBuf.length)
                        event.data.writeFloat(_readBuf.readFloat());
                    else
                    {
                        //event.data.writeFloat( 0 );
                    }
                }
            }
        }
    }
}
    // microphone sample parsing with rate change
    function micParse(event:SampleDataEvent):void 
    {
        var soundBytes:ByteArray = new ByteArray();
        var i:uint = 0;
        var n:Number = event.data.bytesAvailable * 44 / mic.rate * 2; // *2 for stereo
        var f:Number = 0;
        while(event.data.bytesAvailable) 
        { 
            i++;
            var sample:Number = event.data.readFloat(); 
            for (; f <= i; f+= mic.rate / 2 / 44)
            {
                soundBytes.writeFloat(sample); 
            }
        }       
        snd.receive(soundBytes);
    }       

相关内容

  • 没有找到相关文章