import UIKit
import Vision
import PlaygroundSupport
class ImageViewController: UIViewController {
override func viewDidLoad() {
super.viewDidLoad()
guard let image = UIImage(named: "your_image_name.jpg") else {
fatalError("Failed to load image.")
}
let imageView = UIImageView(image: image)
imageView.contentMode = .scaleAspectFit
imageView.frame = CGRect(x: 0, y: 0, width: 500, height: 500)
view.addSubview(imageView)
guard let pixelBuffer = image.pixelBuffer(width: 224, height: 224) else {
fatalError("Failed to convert image to pixel buffer.")
}
guard let model = try? VNCoreMLModel(for: YourObjectDetectionModel().model) else {
fatalError("Failed to load Core ML model.")
}
let request = VNCoreMLRequest(model: model) { (request, error) in
if let error = error {
print("Object detection error: \(error)")
return
}
guard let results = request.results as? [VNClassificationObservation],
let topResult = results.first else {
print("No results found.")
return
}
print("Detected object: \(topResult.identifier), Confidence: \(topResult.confidence)")
}
let handler = VNImageRequestHandler(cvPixelBuffer: pixelBuffer, orientation: .up)
do {
try handler.perform([request])
} catch {
print("Object detection request failed: \(error)")
}
}
}
extension UIImage {
func pixelBuffer(width: Int, height: Int) -> CVPixelBuffer? {
var pixelBuffer: CVPixelBuffer?
let attrs = [kCVPixelBufferCGImageCompatibilityKey: kCFBooleanTrue,
kCVPixelBufferCGBitmapContextCompatibilityKey: kCFBooleanTrue]
let status = CVPixelBufferCreate(kCFAllocatorDefault, width, height, kCVPixelFormatType_32ARGB, attrs as CFDictionary, &pixelBuffer)
guard status == kCVReturnSuccess, let buffer = pixelBuffer else {
return nil
}
CVPixelBufferLockBaseAddress(buffer, CVPixelBufferLockFlags(rawValue: 0))
let context = CGContext(data: CVPixelBufferGetBaseAddress(buffer),
width: width,
height: height,
bitsPerComponent: 8,
bytesPerRow: CVPixelBufferGetBytesPerRow(buffer),
space: CGColorSpaceCreateDeviceRGB(),
bitmapInfo: CGImageAlphaInfo.noneSkipFirst.rawValue)
context?.translateBy(x: 0, y: CGFloat(height))
context?.scaleBy(x: 1, y: -1)
UIGraphicsPushContext(context!)
draw(in: CGRect(x: 0, y: 0, width: width, height: height))
UIGraphicsPopContext()
CVPixelBufferUnlockBaseAddress(buffer, CVPixelBufferLockFlags(rawValue: 0))
return pixelBuffer
}
}
class YourObjectDetectionModel {
let model: MLModel
init() {
let modelConfig = MLModelConfiguration()
guard let modelURL = Bundle.main.url(forResource: "YourObjectDetectionModel", withExtension: "mlmodelc") else {
fatalError("Failed to find Core ML model file.")
}
do {
model = try MLModel(contentsOf: modelURL, configuration: modelConfig)
} catch {
fatalError("Failed to load Core ML model.")
}
}
}
let viewController = ImageViewController()
PlaygroundPage.current.liveView = viewController