SwiftUI 不会将状态更新到@ObservedObject相机视图模型对象



我是SwiftUI和手动相机功能的新手,我真的需要帮助。

因此,我试图构建一个SwiftUI相机视图,该视图以UIKit相机为包装器,通过SwiftUI选择器视图控制聚焦镜头位置,显示在岩藻值以下,并希望尝试在AVcaptureDevice.lensPosition从0到1.0与焦点选择器视图中显示的壮举之间建立相关性。但现在,我只想在屏幕上显示那个岩藻数字。

问题是,当我试图通过协调器焦点观察来更新焦点并将其设置为相机视图模型时,什么都没发生。请帮忙🙌

这是代码:

import SwiftUI
import AVFoundation
import Combine
struct ContentView: View {

@State private var didTapCapture = false
@State private var focusLensPosition: Float = 0
@ObservedObject var cameraViewModel = CameraViewModel(focusLensPosition: 0)

var body: some View {

VStack {
ZStack {
CameraPreviewRepresentable(didTapCapture: $didTapCapture, cameraViewModel: cameraViewModel)
.frame(maxWidth: .infinity, maxHeight: .infinity, alignment: .center)

VStack {
FocusPicker(selectedFocus: $focusLensPosition)

Text(String(cameraViewModel.focusLensPosition))
.foregroundColor(.red)
.font(.largeTitle)
}
.frame(maxWidth: .infinity, alignment: .leading)
}
.edgesIgnoringSafeArea(.all)

Spacer()

CaptureButton(didTapCapture: $didTapCapture)
.frame(width: 100, height: 100, alignment: .center)
.padding(.bottom, 20)
}


}
}
struct ContentView_Previews: PreviewProvider {
static var previews: some View {
ContentView()
}
}
struct CaptureButton: View {
@Binding var didTapCapture : Bool

var body: some View {
Button {
didTapCapture.toggle()

} label: {
Image(systemName: "photo")
.font(.largeTitle)
.padding(30)
.background(Color.red)
.foregroundColor(.white)
.clipShape(Circle())
.overlay(
Circle()
.stroke(Color.red)
)
}
}
}
struct CameraPreviewRepresentable: UIViewControllerRepresentable {

@Environment(.presentationMode) var presentationMode
@Binding var didTapCapture: Bool
@ObservedObject var cameraViewModel: CameraViewModel

let cameraController: CustomCameraController = CustomCameraController()

func makeUIViewController(context: Context) -> CustomCameraController {
cameraController.delegate = context.coordinator

return cameraController
}

func updateUIViewController(_ cameraViewController: CustomCameraController, context: Context) {

if (self.didTapCapture) {
cameraViewController.didTapRecord()
}
}

func makeCoordinator() -> Coordinator {
Coordinator(self, cameraViewModel: cameraViewModel)
}

class Coordinator: NSObject, UINavigationControllerDelegate, AVCapturePhotoCaptureDelegate {
let parent: CameraPreviewRepresentable
var cameraViewModel: CameraViewModel

var focusLensPositionObserver: NSKeyValueObservation?

init(_ parent: CameraPreviewRepresentable, cameraViewModel: CameraViewModel) {
self.parent = parent
self.cameraViewModel = cameraViewModel
super.init()

focusLensPositionObserver = self.parent.cameraController.currentCamera?.observe(.lensPosition, options: [.new]) { [weak self] camera, _ in
print(Float(camera.lensPosition))

//announcing changes via Publisher
self?.cameraViewModel.focusLensPosition = camera.lensPosition
}
}

deinit {
focusLensPositionObserver = nil
}

func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {

parent.didTapCapture = false

if let imageData = photo.fileDataRepresentation(), let image = UIImage(data: imageData) {
UIImageWriteToSavedPhotosAlbum(image, nil, nil, nil)
}

parent.presentationMode.wrappedValue.dismiss()
}
}
}
class CameraViewModel: ObservableObject {
@Published var focusLensPosition: Float = 0
init(focusLensPosition: Float) {
self.focusLensPosition = focusLensPosition
}
}
class CustomCameraController: UIViewController {

var image: UIImage?

var captureSession = AVCaptureSession()
var backCamera: AVCaptureDevice?
var frontCamera: AVCaptureDevice?
var currentCamera: AVCaptureDevice?
var photoOutput: AVCapturePhotoOutput?
var cameraPreviewLayer: AVCaptureVideoPreviewLayer?

//DELEGATE
var delegate: AVCapturePhotoCaptureDelegate?

func showFocusLensPosition() -> Float {
//        guard let camera = currentCamera else { return 0 }

//        try! currentCamera!.lockForConfiguration()
//        currentCamera!.focusMode = .autoFocus
////        currentCamera!.setFocusModeLocked(lensPosition: currentCamera!.lensPosition, completionHandler: nil)
//        currentCamera!.unlockForConfiguration()

return currentCamera!.lensPosition
}

func didTapRecord() {

let settings = AVCapturePhotoSettings()
photoOutput?.capturePhoto(with: settings, delegate: delegate!)
}

override func viewDidLoad() {
super.viewDidLoad()
setup()
}

func setup() {

setupCaptureSession()
setupDevice()
setupInputOutput()
setupPreviewLayer()
startRunningCaptureSession()
}

func setupCaptureSession() {
captureSession.sessionPreset = .photo
}

func setupDevice() {
let deviceDiscoverySession =
AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera],
mediaType: .video,
position: .unspecified)
for device in deviceDiscoverySession.devices {

switch device.position {
case .front:
self.frontCamera = device
case .back:
self.backCamera = device
default:
break
}
}

self.currentCamera = self.backCamera
}

func setupInputOutput() {
do {

let captureDeviceInput = try AVCaptureDeviceInput(device: currentCamera!)
captureSession.addInput(captureDeviceInput)
photoOutput = AVCapturePhotoOutput()
captureSession.addOutput(photoOutput!)

} catch {
print(error)
}

}

func setupPreviewLayer() {

self.cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill

let deviceOrientation = UIDevice.current.orientation
cameraPreviewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation(rawValue: deviceOrientation.rawValue)!

self.cameraPreviewLayer?.frame = self.view.frame
//        view.transform = CGAffineTransform(scaleX: 0.5, y: 0.5)
self.view.layer.insertSublayer(cameraPreviewLayer!, at: 0)
}

func startRunningCaptureSession() {
captureSession.startRunning()
}
}

struct FocusPicker: View {

var feets = ["∞ ft", "30", "15", "10", "7", "5", "4", "3.5", "3", "2.5", "2", "1.5", "1", "0.5", "Auto"]

@Binding var selectedFocus: Float

var body: some View {

Picker(selection: $selectedFocus, label: Text("")) {
ForEach(0 ..< feets.count) {
Text(feets[$0])
.foregroundColor(.white)
.font(.subheadline)
.fontWeight(.medium)

}
.animation(.none)
.background(Color.clear)
.pickerStyle(WheelPickerStyle())
}
.frame(width: 60, height: 200)
.border(Color.gray, width: 5)
.clipped()
}
}

您提供的代码的问题是FocusPicker视图中的selectedFocus类型应该是Integer而不是Float。因此,一种选择是将此类型更改为Integer,并找到一种方法将AVCaptureDevice.lensPosition表示为具有给定范围的Integer。

第二个选项是用枚举替换feets数组。通过使枚举符合CustomStringConvertible协议,您甚至可以提供适当的描述。请看下面的例子。

我去掉了你的代码,因为你只想在第一步显示数字,所以代码更容易理解。

我的工作示例:

import SwiftUI
import Combine
struct ContentView: View {
@ObservedObject var cameraViewModel = CameraViewModel(focusLensPosition: 0.5)

var body: some View {
VStack {
ZStack {
VStack {
FocusPicker(selectedFocus: $cameraViewModel.focusLensPosition)

Text(String(self.cameraViewModel.focusLensPosition))
.foregroundColor(.red)
.font(.largeTitle)
}
.frame(maxWidth: .infinity, alignment: .leading)
}
.edgesIgnoringSafeArea(.all)
}
}
}
struct ContentView_Previews: PreviewProvider {
static var previews: some View {
ContentView()
}
}
class CameraViewModel: ObservableObject {
@Published var focusLensPosition: Float
init(focusLensPosition: Float) {
self.focusLensPosition = focusLensPosition
}
}
enum Feets: Float, CustomStringConvertible, CaseIterable, Identifiable {
case case1 = 0.0
case case2 = 0.5
case case3 = 1.0

var id: Float { self.rawValue }
var description: String {
get {
switch self {
case .case1:
return "∞ ft"
case .case2:
return "4"
case .case3:
return "Auto"
}
}
}
}
struct FocusPicker: View {
@Binding var selectedFocus: Float

var body: some View {
Picker(selection: $selectedFocus, label: Text("")) {
ForEach(Feets.allCases) { feet in
Text(feet.description)
}
.animation(.none)
.background(Color.clear)
.pickerStyle(WheelPickerStyle())
}
.frame(width: 60, height: 200)
.border(Color.gray, width: 5)
.clipped()
}
}

最新更新