音频流添加了libjingle在哪里



我找不到在'扬声器'中添加音频流的位置。我有可能修改流并以后再添加吗?我有libjingle正在处理流并添加它的感觉。

我添加了代码的libjingle部分:

import AVFoundation
import UIKit
let TAG = "ViewController"
let AUDIO_TRACK_ID = TAG + "AUDIO"
let LOCAL_MEDIA_STREAM_ID = TAG + "STREAM"
class ViewController: UIViewController, RTCSessionDescriptionDelegate, RTCPeerConnectionDelegate {
    var mediaStream: RTCMediaStream!
    var localAudioTrack: RTCAudioTrack!
    var remoteAudioTrack: RTCAudioTrack!
    var renderer: RTCEAGLVideoView!
    var renderer_sub: RTCEAGLVideoView!
    var roomName: String!    
    override func viewDidLoad() {
        super.viewDidLoad()
        // Do any additional setup after loading the view, typically from a nib.
        initWebRTC();
        sigConnect(wsUrl: "http://192.168.1.59:3000");
        localAudioTrack = peerConnectionFactory.audioTrack(withID: AUDIO_TRACK_ID)
        mediaStream = peerConnectionFactory.mediaStream(withLabel: LOCAL_MEDIA_STREAM_ID)
        mediaStream.addAudioTrack(localAudioTrack)
    }
    var peerConnectionFactory: RTCPeerConnectionFactory! = nil
    var peerConnection: RTCPeerConnection! = nil
    var pcConstraints: RTCMediaConstraints! = nil
    var audioConstraints: RTCMediaConstraints! = nil
    var mediaConstraints: RTCMediaConstraints! = nil
    var wsServerUrl: String! = nil
    var peerStarted: Bool = false
    func initWebRTC() {
        RTCPeerConnectionFactory.initializeSSL()
        peerConnectionFactory = RTCPeerConnectionFactory()
        pcConstraints = RTCMediaConstraints()
        audioConstraints = RTCMediaConstraints()
        mediaConstraints = RTCMediaConstraints(
            mandatoryConstraints: [
                RTCPair(key: "OfferToReceiveAudio", value: "true"),
            ],
            optionalConstraints: nil)
    }
    func prepareNewConnection() -> RTCPeerConnection {
        var icsServers: [RTCICEServer] = []
        icsServers.append(RTCICEServer(uri: NSURL(string: "stun:stun.l.google.com:19302") as URL!, username: "",
        password: ""))
        let rtcConfig: RTCConfiguration = RTCConfiguration()
        rtcConfig.tcpCandidatePolicy = RTCTcpCandidatePolicy.disabled
        rtcConfig.bundlePolicy = RTCBundlePolicy.maxBundle
        rtcConfig.rtcpMuxPolicy = RTCRtcpMuxPolicy.require
        peerConnection = peerConnectionFactory.peerConnection(withICEServers: icsServers, constraints: pcConstraints, delegate: self)
        peerConnection.add(mediaStream);
        return peerConnection;
    }

    func peerConnection(_ peerConnection: RTCPeerConnection!, signalingStateChanged stateChanged: RTCSignalingState) {
    }
    func peerConnection(_ peerConnection: RTCPeerConnection!, iceConnectionChanged newState: RTCICEConnectionState) {
    }
    func peerConnection(_ peerConnection: RTCPeerConnection!, iceGatheringChanged newState: RTCICEGatheringState) {
    }
    func peerConnection(_ peerConnection: RTCPeerConnection!, gotICECandidate candidate: RTCICECandidate!) {
        if (candidate != nil) {
            print("iceCandidate: " + candidate.description)
            let json:[String: AnyObject] = [
                "type" : "candidate" as AnyObject,
                "sdpMLineIndex" : candidate.sdpMLineIndex as AnyObject,
                "sdpMid" : candidate.sdpMid as AnyObject,
                "candidate" : candidate.sdp as AnyObject
            ]
            sigSend(msg: json as NSDictionary)
        } else {
            print("End of candidates. -------------------")
        }
    }
    func peerConnection(_ peerConnection: RTCPeerConnection!, addedStream stream: RTCMediaStream!) {
        if (peerConnection == nil) {
            return
        }
        if (stream.audioTracks.count > 1) {
            print("Weird-looking stream: " + stream.description)
            return
        }
    }
    func peerConnection(_ peerConnection: RTCPeerConnection!, removedStream stream: RTCMediaStream!) {
    }
    func peerConnection(_ peerConnection: RTCPeerConnection!, didOpen dataChannel: RTCDataChannel!) {
    }
    func peerConnection(onRenegotiationNeeded peerConnection: RTCPeerConnection!) {
    }
}

我的想法是我可以在此命令下的功能中捕获音频流。那是对的吗?此外,我可以手动将流添加到扬声器吗?

 func peerConnection(_ peerConnection: RTCPeerConnection!, addedStream stream: RTCMediaStream!) {
            if (peerConnection == nil) {
                return
            }
            if (stream.audioTracks.count > 1) {
                print("Weird-looking stream: " + stream.description)
                return
            }
        }

连接了WEBRTC调用时,WEBRTC堆栈使用平台API播放或记录音频。您只能控制

之类的东西
  1. 静音或取消静音音频流
  2. 使用系统API增加或减少音量或更改音频配置

您无法手动将流添加到扬声器中,但是您可以选择将默认音频输出更改为扬声器或耳机,以便将WebRTC音频重定向到校正输出。这可以使用Avoundation API

完成

相关内容

  • 没有找到相关文章

最新更新