编辑代码

import UIKit
import Vision
import PlaygroundSupport

class ImageViewController: UIViewController {
    override func viewDidLoad() {
        super.viewDidLoad()
        
        //加载上传图片。
        guard let image = UIImage(named: "your_image_name.jpg") else {
            fatalError("Failed to load image.")
        }
        
        //呈现原始图片。
        let imageView = UIImageView(image: image)
        imageView.contentMode = .scaleAspectFit
        imageView.frame = CGRect(x: 0, y: 0, width: 500, height: 500)
        view.addSubview(imageView)
        
        //将图片像素化。
        guard let pixelBuffer = image.pixelBuffer(width: 224, height: 224) else {
            fatalError("Failed to convert image to pixel buffer.")
        }
        
        //创建一个用于物体检测的Core ML模型。
        guard let model = try? VNCoreMLModel(for: YourObjectDetectionModel().model) else {
            fatalError("Failed to load Core ML model.")
        }
        
        //为物体检测创建一个Vision请求。
        let request = VNCoreMLRequest(model: model) { (request, error) in
            if let error = error {
                print("Object detection error: \(error)")
                return
            }
            
            // 处理物体检测结果。
            guard let results = request.results as? [VNClassificationObservation],
                  let topResult = results.first else {
                print("No results found.")
                return
            }
            
            print("Detected object: \(topResult.identifier), Confidence: \(topResult.confidence)")
            
            //对检测到的物体进行进一步处理和渲染,可后续进一步完善。
        }
        
        //在图像上进行物体检测。
        let handler = VNImageRequestHandler(cvPixelBuffer: pixelBuffer, orientation: .up)
        do {
            try handler.perform([request])
        } catch {
            print("Object detection request failed: \(error)")
        }
    }
}

extension UIImage {
    func pixelBuffer(width: Int, height: Int) -> CVPixelBuffer? {
        var pixelBuffer: CVPixelBuffer?
        let attrs = [kCVPixelBufferCGImageCompatibilityKey: kCFBooleanTrue,
                     kCVPixelBufferCGBitmapContextCompatibilityKey: kCFBooleanTrue]
        let status = CVPixelBufferCreate(kCFAllocatorDefault, width, height, kCVPixelFormatType_32ARGB, attrs as CFDictionary, &pixelBuffer)
        
        guard status == kCVReturnSuccess, let buffer = pixelBuffer else {
            return nil
        }
        
        CVPixelBufferLockBaseAddress(buffer, CVPixelBufferLockFlags(rawValue: 0))
        let context = CGContext(data: CVPixelBufferGetBaseAddress(buffer),
                                width: width,
                                height: height,
                                bitsPerComponent: 8,
                                bytesPerRow: CVPixelBufferGetBytesPerRow(buffer),
                                space: CGColorSpaceCreateDeviceRGB(),
                                bitmapInfo: CGImageAlphaInfo.noneSkipFirst.rawValue)
        
        context?.translateBy(x: 0, y: CGFloat(height))
        context?.scaleBy(x: 1, y: -1)
        
        UIGraphicsPushContext(context!)
        draw(in: CGRect(x: 0, y: 0, width: width, height: height))
        UIGraphicsPopContext()
        CVPixelBufferUnlockBaseAddress(buffer, CVPixelBufferLockFlags(rawValue: 0))
        
        return pixelBuffer
    }
}

//将 "Your Object Detection Model "替换为用于对象检测的Core ML模型的实际名称。
class YourObjectDetectionModel {
    let model: MLModel
    
    init() {
        let modelConfig = MLModelConfiguration()
        guard let modelURL = Bundle.main.url(forResource: "YourObjectDetectionModel", withExtension: "mlmodelc") else {
            fatalError("Failed to find Core ML model file.")
        }
        
        do {
            model = try MLModel(contentsOf: modelURL, configuration: modelConfig)
        } catch {
            fatalError("Failed to load Core ML model.")
        }
    }
}

//创建一个视图控制器的实例,并将其设置为实时视图。
let viewController = ImageViewController()
PlaygroundPage.current.liveView = viewController