Commit a67016f7 authored by Nguyễn Văn An's avatar Nguyễn Văn An

up model card id

parent 7088d89b
No preview for this file type
This diff is collapsed.
<?xml version="1.0" encoding="UTF-8"?>
<Bucket
uuid = "B08B0591-1A45-4415-9C04-B83659FBFE35"
type = "0"
version = "2.0">
</Bucket>
...@@ -35,7 +35,8 @@ typealias FileInfo = (name: String, extension: String) ...@@ -35,7 +35,8 @@ typealias FileInfo = (name: String, extension: String)
/// Information about the MobileNet model. /// Information about the MobileNet model.
enum MobileNet { enum MobileNet {
static let modelInfo: FileInfo = (name: "liveness", extension: "tflite") static let modelInfo: FileInfo = (name: "liveness", extension: "tflite")
static let cardModel: FileInfo = (name: "valid_card_10102020", extension: "tflite") static let cardModel: FileInfo = (name: "idcard15072021", extension: "tflite")
static let landMarkModel: FileInfo = (name: "face_detection_front", extension: "tflite")
} }
/// This class handles all data preprocessing and makes calls to run inference on a given frame /// This class handles all data preprocessing and makes calls to run inference on a given frame
...@@ -75,7 +76,7 @@ class SBKModelDataHandler { ...@@ -75,7 +76,7 @@ class SBKModelDataHandler {
/// labels files are successfully loaded from the app's main bundle. Default `threadCount` is 1. /// labels files are successfully loaded from the app's main bundle. Default `threadCount` is 1.
init?(modelFileInfo: FileInfo, threadCount: Int = 1) { init?(modelFileInfo: FileInfo, threadCount: Int = 1) {
let modelFilename = modelFileInfo.name let modelFilename = modelFileInfo.name
// Construct the path to the model file. // Construct the path to the model file.
let bundle = Bundle(for: SBKRecordFace.self) let bundle = Bundle(for: SBKRecordFace.self)
guard let modelPath = bundle.path( guard let modelPath = bundle.path(
...@@ -86,7 +87,6 @@ class SBKModelDataHandler { ...@@ -86,7 +87,6 @@ class SBKModelDataHandler {
return nil return nil
} }
let delegate = MetalDelegate()
// Specify the options for the `Interpreter`. // Specify the options for the `Interpreter`.
self.threadCount = threadCount self.threadCount = threadCount
...@@ -94,7 +94,7 @@ class SBKModelDataHandler { ...@@ -94,7 +94,7 @@ class SBKModelDataHandler {
options.threadCount = threadCount options.threadCount = threadCount
do { do {
// Create the `Interpreter`. // Create the `Interpreter`.
interpreter = try Interpreter(modelPath: modelPath, options: options, delegates: [delegate]) interpreter = try Interpreter(modelPath: modelPath, options: options)
// Allocate memory for the model's input `Tensor`s. // Allocate memory for the model's input `Tensor`s.
try interpreter.allocateTensors() try interpreter.allocateTensors()
} catch let error { } catch let error {
...@@ -142,87 +142,63 @@ class SBKModelDataHandler { ...@@ -142,87 +142,63 @@ class SBKModelDataHandler {
// MARK: - Internal Methods // MARK: - Internal Methods
/// Performs image preprocessing, invokes the `Interpreter`, and processes the inference results. /// Performs image preprocessing, invokes the `Interpreter`, and processes the inference results.
func runModel(onFrame pixelBuffer: CVPixelBuffer) -> [Float]? { func runModel(onFrame pixelBuffer: CVPixelBuffer) -> [Float]? {
let sourcePixelFormat = CVPixelBufferGetPixelFormatType(pixelBuffer) let sourcePixelFormat = CVPixelBufferGetPixelFormatType(pixelBuffer)
assert(sourcePixelFormat == kCVPixelFormatType_32ARGB || assert(sourcePixelFormat == kCVPixelFormatType_32ARGB ||
sourcePixelFormat == kCVPixelFormatType_32BGRA || sourcePixelFormat == kCVPixelFormatType_32BGRA ||
sourcePixelFormat == kCVPixelFormatType_32RGBA) sourcePixelFormat == kCVPixelFormatType_32RGBA)
let imageChannels = 4
let imageChannels = 4 assert(imageChannels >= inputChannels)
assert(imageChannels >= inputChannels) let scaledSize = CGSize(width: inputWidth, height: inputHeight)
guard let thumbnailPixelBuffer = pixelBuffer.resized(to: scaledSize) else {
// Crops the image to the biggest square in the center and scales it down to model dimensions.
let scaledSize = CGSize(width: inputWidth, height: inputHeight)
guard let thumbnailPixelBuffer = pixelBuffer.centerThumbnail(ofSize: scaledSize) else {
return nil
}
let interval: TimeInterval
let outputTensor: Tensor
do {
let inputTensor = try interpreter.input(at: 0)
// Remove the alpha component from the image buffer to get the RGB data.
guard let rgbData = rgbDataFromBuffer(
thumbnailPixelBuffer,
byteCount: batchSize * inputWidth * inputHeight * inputChannels,
isModelQuantized: inputTensor.dataType == .uInt8
) else {
print("Failed to convert the image buffer to RGB data.")
return nil return nil
} }
let interval: TimeInterval
let imageCap = UIImage(data: rgbData) let outputTensor: Tensor
// self.fromImage(image: imageCap!, datas: rgbData, imagesss: imageCap!) do {
let inputTensor = try interpreter.input(at: 0)
// Copy the RGB data to the input `Tensor`. guard let rgbData = rgbDataFromBuffer(
try interpreter.copy(rgbData, toInputAt: 0) thumbnailPixelBuffer,
byteCount: batchSize * inputWidth * inputHeight * inputChannels,
// Run inference by invoking the `Interpreter`. isModelQuantized: inputTensor.dataType == .uInt8
let startDate = Date() ) else {
try interpreter.invoke() print("Failed to convert the image buffer to RGB data.")
interval = Date().timeIntervalSince(startDate) * 1000 return nil
}
// Get the output `Tensor` to process the inference results. try interpreter.copy(rgbData, toInputAt: 0)
outputTensor = try interpreter.output(at: 0) let startDate = Date()
} catch let error { try interpreter.invoke()
print("Failed to invoke the interpreter with error: \(error.localizedDescription)") interval = Date().timeIntervalSince(startDate) * 1000
return nil outputTensor = try interpreter.output(at: 0)
} } catch let error {
print("Failed to invoke the interpreter with error: \(error.localizedDescription)")
let results: [Float]
switch outputTensor.dataType {
case .uInt8:
guard let quantization = outputTensor.quantizationParameters else {
print("No results returned because the quantization values for the output tensor are nil.")
return nil return nil
} }
let quantizedResults = [UInt8](outputTensor.data)
results = quantizedResults.map { let results: [Float]
quantization.scale * Float(Int($0) - quantization.zeroPoint) switch outputTensor.dataType {
case .uInt8:
guard let quantization = outputTensor.quantizationParameters else {
print("No results returned because the quantization values for the output tensor are nil.")
return nil
}
let quantizedResults = [UInt8](outputTensor.data)
results = quantizedResults.map {
quantization.scale * Float(Int($0) - quantization.zeroPoint)
}
case .float32:
results = [Float32](unsafeData: outputTensor.data) ?? []
default:
print("Output tensor data type \(outputTensor.dataType) is unsupported for this example app.")
return nil
} }
case .float32:
results = [Float32](unsafeData: outputTensor.data) ?? [] return results
default:
print("Output tensor data type \(outputTensor.dataType) is unsupported for this example app.")
return nil
} }
return results
}
/// Returns the RGB data representation of the given image buffer with the specified `byteCount`.
///
/// - Parameters
/// - buffer: The pixel buffer to convert to RGB data.
/// - byteCount: The expected byte count for the RGB data calculated using the values that the
/// model was trained on: `batchSize * imageWidth * imageHeight * componentsCount`.
/// - isModelQuantized: Whether the model is quantized (i.e. fixed point values rather than
/// floating point values).
/// - Returns: The RGB data representation of the image buffer or `nil` if the buffer could not be
/// converted.
private func rgbDataFromBuffer( private func rgbDataFromBuffer(
_ buffer: CVPixelBuffer, _ buffer: CVPixelBuffer,
byteCount: Int, byteCount: Int,
...@@ -293,26 +269,12 @@ class SBKModelDataHandler { ...@@ -293,26 +269,12 @@ class SBKModelDataHandler {
// MARK: - Extensions // MARK: - Extensions
extension Data { extension Data {
/// Creates a new buffer by copying the buffer pointer of the given array.
///
/// - Warning: The given array's element type `T` must be trivial in that it can be copied bit
/// for bit with no indirection or reference-counting operations; otherwise, reinterpreting
/// data from the resulting buffer has undefined behavior.
/// - Parameter array: An array with elements of type `T`.
init<T>(copyingBufferOf array: [T]) { init<T>(copyingBufferOf array: [T]) {
self = array.withUnsafeBufferPointer(Data.init) self = array.withUnsafeBufferPointer(Data.init)
} }
} }
extension Array { extension Array {
/// Creates a new array from the bytes of the given unsafe data.
///
/// - Warning: The array's `Element` type must be trivial in that it can be copied bit for bit
/// with no indirection or reference-counting operations; otherwise, copying the raw bytes in
/// the `unsafeData`'s buffer to a new array returns an unsafe copy.
/// - Note: Returns `nil` if `unsafeData.count` is not a multiple of
/// `MemoryLayout<Element>.stride`.
/// - Parameter unsafeData: The data containing the bytes to turn into an array.
init?(unsafeData: Data) { init?(unsafeData: Data) {
guard unsafeData.count % MemoryLayout<Element>.stride == 0 else { return nil } guard unsafeData.count % MemoryLayout<Element>.stride == 0 else { return nil }
#if swift(>=5.0) #if swift(>=5.0)
...@@ -327,20 +289,3 @@ extension Array { ...@@ -327,20 +289,3 @@ extension Array {
#endif // swift(>=5.0) #endif // swift(>=5.0)
} }
} }
extension UIImage {
func getPixelColor(pos: CGPoint, dataImage: Data, image: UIImage) -> UIColor {
let pixelData = image.cgImage!.dataProvider!.data
let data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
let pixelInfo: Int = ((Int(image.size.width) * Int(pos.y)) + Int(pos.x)) * 4
let r = CGFloat(dataImage[pixelInfo]) / CGFloat(255.0)
let g = CGFloat(dataImage[pixelInfo+1]) / CGFloat(255.0)
let b = CGFloat(dataImage[pixelInfo+2]) / CGFloat(255.0)
let a = CGFloat(dataImage[pixelInfo+3]) / CGFloat(255.0)
return UIColor(red: r, green: g, blue: b, alpha: a)
}
}
//
// AnchorOption.swift
// OCR-SDK
//
// Created by annguyen on 12/03/2021.
// Copyright © 2021 itsol. All rights reserved.
//
import Foundation
class AnchorOption {
init(inputSizeWidth: Int, inputSizeHeight: Int, minScale: Double, maxScale: Double, anchorOffsetX: Double, anchorOffsetY: Double, numLayers: Int, featureMapWidth: [Int], featureMapHeight: [Int], strides: [Int], aspectRatios: [Double], reduceBoxesInLowestLayer: Bool, interpolatedScaleAspectRatio: Double, fixedAnchorSize: Bool) {
self.inputSizeWidth = inputSizeWidth
self.inputSizeHeight = inputSizeHeight
self.minScale = minScale
self.maxScale = maxScale
self.anchorOffsetX = anchorOffsetX
self.anchorOffsetY = anchorOffsetY
self.numLayers = numLayers
self.featureMapWidth = featureMapWidth
self.featureMapHeight = featureMapHeight
self.strides = strides
self.aspectRatios = aspectRatios
self.reduceBoxesInLowestLayer = reduceBoxesInLowestLayer
self.interpolatedScaleAspectRatio = interpolatedScaleAspectRatio
self.fixedAnchorSize = fixedAnchorSize
}
var inputSizeWidth: Int
var inputSizeHeight: Int
var minScale: Double
var maxScale: Double
var anchorOffsetX: Double
var anchorOffsetY: Double
var numLayers: Int
var featureMapWidth: [Int]
var featureMapHeight: [Int]
var strides: [Int]
var aspectRatios: [Double]
var reduceBoxesInLowestLayer: Bool
var interpolatedScaleAspectRatio: Double
var fixedAnchorSize: Bool
func stridesSize() -> Int {
return strides.count
}
func featureMapHeightSize() -> Int {
return featureMapHeight.count
}
func featureMapWidthSize() -> Int {
return featureMapWidth.count
}
}
//
// Detection.swift
// OCR-SDK
//
// Created by annguyen on 12/03/2021.
// Copyright © 2021 itsol. All rights reserved.
//
import Foundation
struct Detection {
init(score: Double, xMin: Double, yMin: Double, width: Double, height: Double, classID: Int, landMark: [Landmark]) {
self.score = score
self.xMin = xMin
self.yMin = yMin
self.width = width
self.height = height
self.classID = classID
self.landMark = landMark
}
var score: Double
var xMin: Double
var yMin: Double
var width: Double
var height: Double
var classID: Int
var landMark: [Landmark]
}
//
// EMSimilarity.swift
// SwiftSim
//
// Created by Evan Moss on 8/1/16.
// Copyright © 2016 Enterprising Technologies LLC. All rights reserved.
//
// The MIT License (MIT)
//
// Copyright (c) 2016 Evan Moss
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
import Foundation
enum EMSimilarityMode {
case Cosine
case Tanimoto
case Ochiai
case JaccardIndex
case JaccardDistance
case Dice
case Hamming
}
enum EMVectorSizeMismatchMode {
case Bail
case Truncate
}
class EMSimilarity {
/** Similarity metric mode **/
private var currentSimMode = [EMSimilarityMode.Cosine]
/** Set the currentSimMode via push **/
func pushSimMode(mode: EMSimilarityMode) {
self.currentSimMode.append(mode)
}
/** Pop the currentSimMode via pop if it won't make the stack empty **/
func popSimMode() {
if self.currentSimMode.count > 1 {
let _ = self.currentSimMode.popLast()
}
}
/** Get the currently set similarity mode **/
func getCurrentSimMode() -> EMSimilarityMode? {
return self.currentSimMode.last
}
/** Mismatch Mode **/
private var currentMismatchMode = [EMVectorSizeMismatchMode.Bail]
/** Set the currentMismatcMode via push **/
func pushMismatchMode(mode: EMVectorSizeMismatchMode) {
self.currentMismatchMode.append(mode)
}
/** Pop the currentMismatchMode via pop if it won't make the stack empty **/
func popMismatchMode() {
if self.currentMismatchMode.count > 1 {
let _ = self.currentMismatchMode.popLast()
}
}
/** Get the currently set mistmatch mode **/
func getCurrentMismatchMode() -> EMVectorSizeMismatchMode? {
return self.currentMismatchMode.last
}
/** Dot Product **/
private func dot(A: [Double], B: [Double]) -> Double {
var x: Double = 0
for i in 0...A.count-1 {
x += A[i] * B[i]
}
return x
}
/** Vector Magnitude **/
private func magnitude(A: [Double]) -> Double {
var x: Double = 0
for elt in A {
x += elt * elt
}
return sqrt(x)
}
/** Cosine similarity **/
private func cosineSim(A: [Double], B: [Double]) -> Double {
return dot(A: A, B: B) / (magnitude(A: A) * magnitude(A: B))
}
/** Tanimoto similarity **/
private func tanimotoSim(A: [Double], B: [Double]) -> Double {
let Amag = magnitude(A: A)
let Bmag = magnitude(A: B)
let AdotB = dot(A: A, B: B)
return AdotB / (Amag * Amag + Bmag * Bmag - AdotB)
}
/** Ochiai similarity **/
private func ochiaiSim(A: [Double], B: [Double]) -> Double {
let a = Set(A)
let b = Set(B)
return Double(a.intersection(b).count) / sqrt(Double(a.count) * Double(b.count))
}
/** Jaccard index **/
private func jaccardIndex(A: [Double], B: [Double]) -> Double {
let a = Set(A)
let b = Set(B)
return Double(a.intersection(b).count) / Double(a.union(b).count)
}
/** Jaccard distance **/
private func jaccardDist(A: [Double], B: [Double]) -> Double {
return 1.0 - jaccardIndex(A: A, B: B)
}
/** Dice coeeficient **/
private func diceCoef(A: [Double], B: [Double]) -> Double {
let a = Set(A)
let b = Set(B)
return 2.0 * Double(a.intersection(b).count) / (Double(a.count) + Double(b.count))
}
/** Hamming distance **/
private func hammingDist(A: [Double], B: [Double]) -> Double {
var x: Double = 0
if A.isEmpty {
return x
}
for i in 0...A.count-1 {
if A[i] != B[i] {
x += 1
}
}
return x
}
private let encforceEqualVectorSizes: Set<EMSimilarityMode> = [.Cosine, .Tanimoto, .Hamming]
private let bailOnEmptyInput: Set<EMSimilarityMode> = [.Cosine, .Tanimoto, .Ochiai]
private let allowEmptyInputs: Set<EMSimilarityMode> = [.Hamming]
/**
* Main compute mode
* Double types
* Returns the similarity results or -1.0 on caught error
*/
func compute(A: [Double], B: [Double]) -> Double {
// get the mode
var mode = EMSimilarityMode.Cosine
if let _mode = self.getCurrentSimMode() {
mode = _mode
}
else {
return -1
}
// are both vectors empty?
if A.isEmpty && B.isEmpty && !allowEmptyInputs.contains(mode) {
// divide by zero -> D.N.E.
return -1
}
// is one of the vectors empty and would this case a divide by zero error?
if bailOnEmptyInput.contains(mode) && (A.isEmpty || B.isEmpty) {
return -1
}
// look for vector size mismatch for modes in encforceEqualVectorSizes
if encforceEqualVectorSizes.contains(mode) && A.count != B.count {
if let mismatchMode = self.getCurrentMismatchMode() {
switch mismatchMode {
case .Bail:
return -1
case .Truncate:
let a = A.count < B.count ? A : B
let _b = A.count < B.count ? B : A
var b = [Double]()
if a.count > 0 {
for i in 0...a.count-1 {
b.append(_b[i])
}
}
return compute(A: a, B: b)
}
}
else {
return -1
}
}
switch mode {
case .Cosine:
return cosineSim(A: A, B: B)
case .Tanimoto:
return tanimotoSim(A: A, B: B)
case .Ochiai:
return ochiaiSim(A: A, B: B)
case .JaccardIndex:
return jaccardIndex(A: A, B: B)
case .JaccardDistance:
return jaccardDist(A: A, B: B)
case .Dice:
return diceCoef(A: A, B: B)
case .Hamming:
return hammingDist(A: A, B: B)
}
}
}
This diff is collapsed.
//
// LandMark.swift
// OCR-SDK
//
// Created by annguyen on 12/03/2021.
// Copyright © 2021 itsol. All rights reserved.
//
import Foundation
class Landmark{
init(x: Double, y: Double) {
self.x = x
self.y = y
}
var x: Double
var y: Double
}
//
// NormalizeOp.swift
// movanai
//
// Created by Nguyen Van An on 4/4/21.
//
import Foundation
class NormalizeOp {
var x: Float = 0
var y: Float = 0
init(_ x: Float, _ y: Float ) {
self.x = x
self.y = y
}
}
//
// OptionsFace.swift
// OCR-SDK
//
// Created by annguyen on 12/03/2021.
// Copyright © 2021 itsol. All rights reserved.
//
import Foundation
class OptionsFace {
init(numClasses: Int, numBoxes: Int, numCoords: Int, keypointCoordOffset: Int, ignoreClasses: [Int], scoreClippingThresh: Double, minScoreThresh: Double, numKeypoints: Int, numValuesPerKeypoint: Int, boxCoordOffset: Int, xScale: Double, yScale: Double, wScale: Double, hScale: Double, applyExponentialOnBoxSize: Bool, reverseOutputOrder: Bool, sigmoidScore: Bool, flipVertically: Bool) {
self.numClasses = numClasses
self.numBoxes = numBoxes
self.numCoords = numCoords
self.keypointCoordOffset = keypointCoordOffset
self.ignoreClasses = ignoreClasses
self.scoreClippingThresh = scoreClippingThresh
self.minScoreThresh = minScoreThresh
self.numKeypoints = numKeypoints
self.numValuesPerKeypoint = numValuesPerKeypoint
self.boxCoordOffset = boxCoordOffset
self.xScale = xScale
self.yScale = yScale
self.wScale = wScale
self.hScale = hScale
self.applyExponentialOnBoxSize = applyExponentialOnBoxSize
self.reverseOutputOrder = reverseOutputOrder
self.sigmoidScore = sigmoidScore
self.flipVertically = flipVertically
}
var numClasses: Int
var numBoxes: Int
var numCoords: Int
var keypointCoordOffset : Int
var ignoreClasses: [Int]
var scoreClippingThresh: Double
var minScoreThresh: Double
var numKeypoints: Int
var numValuesPerKeypoint: Int
var boxCoordOffset: Int
var xScale: Double
var yScale: Double
var wScale : Double
var hScale: Double
var applyExponentialOnBoxSize: Bool
var reverseOutputOrder: Bool
var sigmoidScore: Bool
var flipVertically: Bool
}
class Anchor {
init(xCenter: Double, yCenter: Double, h: Double, w: Double) {
self.xCenter = xCenter
self.yCenter = yCenter
self.h = h
self.w = w
}
var xCenter: Double
var yCenter: Double
var h: Double
var w: Double
}
...@@ -11,6 +11,7 @@ class SBKResultCapture: UIViewController { ...@@ -11,6 +11,7 @@ class SBKResultCapture: UIViewController {
@IBOutlet weak var imgPhotoCard: UIImageView! @IBOutlet weak var imgPhotoCard: UIImageView!
public var imageData: Data? public var imageData: Data?
public var dataCrop: Data? public var dataCrop: Data?
var cropZone: CGRect?
@IBOutlet weak var btnNext: UIButton! @IBOutlet weak var btnNext: UIButton!
@IBOutlet weak var btnClose: UIButton! @IBOutlet weak var btnClose: UIButton!
@IBOutlet weak var lbDescription: UILabel! @IBOutlet weak var lbDescription: UILabel!
...@@ -61,7 +62,9 @@ class SBKResultCapture: UIViewController { ...@@ -61,7 +62,9 @@ class SBKResultCapture: UIViewController {
if UIDevice.current.userInterfaceIdiom == .pad { if UIDevice.current.userInterfaceIdiom == .pad {
cropImage = SBKValidateInput.shared.cropImageHorizontal(image: imageCap!, rect: CGRect(x: imageCap!.size.width * 1 / 10, y: imageCap!.size.height * 3 / 20, width: imageCap!.size.width * 8 / 10, height: imageCap!.size.height * 8 / 10), scale: 1.0)!.rotate(radians: .pi / 2) cropImage = SBKValidateInput.shared.cropImageHorizontal(image: imageCap!, rect: CGRect(x: imageCap!.size.width * 1 / 10, y: imageCap!.size.height * 3 / 20, width: imageCap!.size.width * 8 / 10, height: imageCap!.size.height * 8 / 10), scale: 1.0)!.rotate(radians: .pi / 2)
} else { } else {
cropImage = self.cropImage(image: imageCap!, rect: CGRect(x: imageCap!.size.width / 20, y: imageCap!.size.height / 8 + imageCap!.size.height / 50, width: imageCap!.size.width * 18 / 20, height: imageCap!.size.width * 18 / 20 * 3 / 4 ), scale: scale) if self.cropZone != nil {
cropImage = imageCap!.crop(rect: self.cropZone!, scale: 1.0)
}
} }
dataCrop = cropImage!.pngData() dataCrop = cropImage!.pngData()
...@@ -72,14 +75,7 @@ class SBKResultCapture: UIViewController { ...@@ -72,14 +75,7 @@ class SBKResultCapture: UIViewController {
fatalError("init(coder:) has not been implemented") fatalError("init(coder:) has not been implemented")
} }
//Xử lý ảnh hiển thị
func cropImage(image: UIImage, rect: CGRect, scale: CGFloat) -> UIImage? {
UIGraphicsBeginImageContextWithOptions(CGSize(width: rect.width, height: rect.height), true, 0.0)
image.draw(at: CGPoint(x: -rect.origin.x , y: -rect.origin.y ))
let croppedImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return croppedImage
}
func navigateToFace() { func navigateToFace() {
DispatchQueue.main.async { DispatchQueue.main.async {
...@@ -153,27 +149,3 @@ class SBKResultCapture: UIViewController { ...@@ -153,27 +149,3 @@ class SBKResultCapture: UIViewController {
} }
} }
extension UIImage {
func rotate(radians: Float) -> UIImage? {
var newSize = CGRect(origin: CGPoint.zero, size: self.size).applying(CGAffineTransform(rotationAngle: CGFloat(radians))).size
// Trim off the extremely small float value to prevent core graphics from rounding it up
newSize.width = floor(newSize.width)
newSize.height = floor(newSize.height)
UIGraphicsBeginImageContextWithOptions(newSize, false, self.scale)
let context = UIGraphicsGetCurrentContext()!
// Move origin to middle
context.translateBy(x: newSize.width/2, y: newSize.height/2)
// Rotate around middle
context.rotate(by: CGFloat(radians))
// Draw the image at its center
self.draw(in: CGRect(x: -self.size.width/2, y: -self.size.height/2, width: self.size.width, height: self.size.height))
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage
}
}
//
// OverLayView.swift
// OCR-SDK
//
// Created by Nguyen Van An on 4/26/21.
// Copyright © 2021 itsol. All rights reserved.
//
import AVFoundation
import UIKit
class OverLayCardView: UIView {
@IBInspectable var previewWidth: CGFloat = 100
@IBInspectable var previewHeight: CGFloat = 100
@IBInspectable var borderLength : CGFloat = 10
@IBInspectable var borderPadding : CGFloat = 0
@IBInspectable var borderWidth : CGFloat = 2
@IBInspectable var borderColor : CGColor = UIColor.white.cgColor
@IBInspectable var marginTop : CGFloat = 0
@IBInspectable var marginLeft : CGFloat = 0
@IBInspectable var connerRadius : CGFloat = 8
let vContainer: UIView = {
let view = UIView()
view.backgroundColor = UIColor.black.withAlphaComponent(0.6)
return view
}()
let vContainer2: UIView = {
let view = UIView()
return view
}()
override init(frame: CGRect) {
super.init(frame: frame)
vContainer.frame = self.bounds
self.addSubview(vContainer)
vContainer2.frame = self.bounds
self.addSubview(vContainer2)
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
func setLayer(){
var x = marginTop
var y = marginLeft
if marginTop == 0 {
x = self.center.x - previewWidth/2
}
if marginLeft == 0 {
y = self.center.y - previewHeight/2
}
//add to container dask
let maskLayer = CALayer()
maskLayer.frame = self.bounds
let circleLayer = CAShapeLayer()
circleLayer.frame = CGRect(x:0 , y:0,width: self.frame.size.width,height: self.frame.size.height)
let finalPath = UIBezierPath(roundedRect: CGRect(x:0 , y:0,width: self.frame.size.width,height: self.frame.size.height), cornerRadius: 0)
let rectPath = UIBezierPath(roundedRect: CGRect(x: x, y: y,width: previewWidth, height: previewHeight), cornerRadius: connerRadius)
finalPath.append(rectPath.reversing())
circleLayer.path = finalPath.cgPath
maskLayer.addSublayer(circleLayer)
vContainer.layer.mask = maskLayer
let clearLayer = CALayer()
clearLayer.frame = vContainer2.bounds
//draw border radius
let path = UIBezierPath(roundedRect: CGRect(x: x + borderPadding, y: y + borderPadding, width: previewWidth - borderPadding*2, height: previewHeight - borderPadding*2), cornerRadius: connerRadius)
let shape = CAShapeLayer()
shape.path = path.cgPath
shape.strokeColor = borderColor
shape.lineWidth = borderWidth
shape.fillColor = UIColor.clear.cgColor
vContainer2.layer.addSublayer(shape)
}
func setBorderColor(color: CGColor){
if borderColor != color {
vContainer2.layer.sublayers = nil
borderColor = color
var x = marginTop
var y = marginLeft
if marginTop == 0 {
x = self.center.x - previewWidth/2
}
if marginLeft == 0 {
y = self.center.y - previewHeight/2
}
//draw border radius
let path = UIBezierPath(roundedRect: CGRect(x: x + borderPadding, y: y + borderPadding, width: previewWidth - borderPadding*2, height: previewHeight - borderPadding*2), cornerRadius: connerRadius)
let shape = CAShapeLayer()
shape.path = path.cgPath
shape.strokeColor = borderColor
shape.lineWidth = borderWidth
shape.fillColor = UIColor.clear.cgColor
vContainer2.layer.addSublayer(shape)
}
}
}
//
// ExtCGImage.swift
// OCR-SDK
//
// Created by Nguyen Van An on 4/26/21.
// Copyright © 2021 itsol. All rights reserved.
//
import Foundation
import UIKit
import CoreGraphics
import CoreImage
import VideoToolbox
extension CGImage{
func toCVPixelBuffer() -> CVPixelBuffer? {
let frameSize = CGSize(width: self.width, height: self.height)
var pixelBuffer:CVPixelBuffer? = nil
let status = CVPixelBufferCreate(kCFAllocatorDefault, Int(frameSize.width), Int(frameSize.height), kCVPixelFormatType_32BGRA , nil, &pixelBuffer)
if status != kCVReturnSuccess {
return nil
}
CVPixelBufferLockBaseAddress(pixelBuffer!, CVPixelBufferLockFlags.init(rawValue: 0))
let data = CVPixelBufferGetBaseAddress(pixelBuffer!)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGBitmapInfo.byteOrder32Little.rawValue | CGImageAlphaInfo.premultipliedFirst.rawValue)
let context = CGContext(data: data, width: Int(frameSize.width), height: Int(frameSize.height), bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(pixelBuffer!), space: rgbColorSpace, bitmapInfo: bitmapInfo.rawValue)
context?.draw(self, in: CGRect(x: 0, y: 0, width: self.width, height: self.height))
CVPixelBufferUnlockBaseAddress(pixelBuffer!, CVPixelBufferLockFlags(rawValue: 0))
return pixelBuffer
}
public static func create(pixelBuffer: CVPixelBuffer) -> CGImage? {
var cgImage: CGImage?
VTCreateCGImageFromCVPixelBuffer(pixelBuffer, options: nil, imageOut: &cgImage)
return cgImage
}
}
//
// ExtCIImage.swift
// OCR-SDK
//
// Created by Nguyen Van An on 4/26/21.
// Copyright © 2021 itsol. All rights reserved.
//
import Foundation
import UIKit
extension CIImage{
func toUIImage() -> UIImage {
let eaglContext = EAGLContext(api: .openGLES2)
let context:CIContext = CIContext(eaglContext: eaglContext!)
let cgImage:CGImage = context.createCGImage(self, from: self.extent)!
let image:UIImage = UIImage.init(cgImage: cgImage)
return image
}
}
// Copyright 2019 The TensorFlow Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// =============================================================================
import UIKit import UIKit
import Accelerate import Accelerate
extension CVPixelBuffer { extension CVPixelBuffer {
/**
Returns thumbnail by cropping pixel buffer to biggest square and scaling the cropped image to
model dimensions.
*/
func centerThumbnail(ofSize size: CGSize ) -> CVPixelBuffer? { func centerThumbnail(ofSize size: CGSize ) -> CVPixelBuffer? {
let imageWidth = CVPixelBufferGetWidth(self) let imageWidth = CVPixelBufferGetWidth(self)
let imageHeight = CVPixelBufferGetHeight(self) let imageHeight = CVPixelBufferGetHeight(self)
let pixelBufferType = CVPixelBufferGetPixelFormatType(self) let pixelBufferType = CVPixelBufferGetPixelFormatType(self)
assert(pixelBufferType == kCVPixelFormatType_32BGRA) assert(pixelBufferType == kCVPixelFormatType_32BGRA)
let inputImageRowBytes = CVPixelBufferGetBytesPerRow(self) let inputImageRowBytes = CVPixelBufferGetBytesPerRow(self)
let imageChannels = 4 let imageChannels = 4
let thumbnailSize = min(imageWidth, imageHeight) let thumbnailSize = min(imageWidth, imageHeight)
CVPixelBufferLockBaseAddress(self, CVPixelBufferLockFlags(rawValue: 0)) CVPixelBufferLockBaseAddress(self, CVPixelBufferLockFlags(rawValue: 0))
...@@ -139,5 +115,76 @@ extension CVPixelBuffer { ...@@ -139,5 +115,76 @@ extension CVPixelBuffer {
return pixelBuffer return pixelBuffer
} }
func resized(to size: CGSize ) -> CVPixelBuffer? {
let imageWidth = CVPixelBufferGetWidth(self)
let imageHeight = CVPixelBufferGetHeight(self)
let pixelBufferType = CVPixelBufferGetPixelFormatType(self)
assert(pixelBufferType == kCVPixelFormatType_32BGRA ||
pixelBufferType == kCVPixelFormatType_32ARGB)
let inputImageRowBytes = CVPixelBufferGetBytesPerRow(self)
let imageChannels = 4
CVPixelBufferLockBaseAddress(self, CVPixelBufferLockFlags(rawValue: 0))
// Finds the biggest square in the pixel buffer and advances rows based on it.
guard let inputBaseAddress = CVPixelBufferGetBaseAddress(self) else {
return nil
}
// Gets vImage Buffer from input image
var inputVImageBuffer = vImage_Buffer(data: inputBaseAddress, height: UInt(imageHeight), width: UInt(imageWidth), rowBytes: inputImageRowBytes)
let scaledImageRowBytes = Int(size.width) * imageChannels
guard let scaledImageBytes = malloc(Int(size.height) * scaledImageRowBytes) else {
return nil
}
// Allocates a vImage buffer for scaled image.
var scaledVImageBuffer = vImage_Buffer(data: scaledImageBytes, height: UInt(size.height), width: UInt(size.width), rowBytes: scaledImageRowBytes)
// Performs the scale operation on input image buffer and stores it in scaled image buffer.
let scaleError = vImageScale_ARGB8888(&inputVImageBuffer, &scaledVImageBuffer, nil, vImage_Flags(0))
CVPixelBufferUnlockBaseAddress(self, CVPixelBufferLockFlags(rawValue: 0))
guard scaleError == kvImageNoError else {
return nil
}
let releaseCallBack: CVPixelBufferReleaseBytesCallback = {mutablePointer, pointer in
if let pointer = pointer {
free(UnsafeMutableRawPointer(mutating: pointer))
}
}
var scaledPixelBuffer: CVPixelBuffer?
// Converts the scaled vImage buffer to CVPixelBuffer
let conversionStatus = CVPixelBufferCreateWithBytes(nil, Int(size.width), Int(size.height), pixelBufferType, scaledImageBytes, scaledImageRowBytes, releaseCallBack, nil, nil, &scaledPixelBuffer)
guard conversionStatus == kCVReturnSuccess else {
free(scaledImageBytes)
return nil
}
return scaledPixelBuffer
}
func toUIImage() -> UIImage {
let ciimage : CIImage = CIImage(cvPixelBuffer: self)
let imageView : UIImage = ciimage.toUIImage()
return imageView
}
func crop(rect: CGRect, scale: CGFloat) -> CVPixelBuffer? {
let imageCgi = CGImage.create(pixelBuffer: self)!
return imageCgi.cropping(to: rect)!.toCVPixelBuffer()
}
} }
//
// ExtUIImage.swift
// OCR-SDK
//
// Created by Nguyen Van An on 4/26/21.
// Copyright © 2021 itsol. All rights reserved.
//
import Foundation
import UIKit
extension UIImage {
//Xử lý ảnh hiển thị
func crop(rect: CGRect, scale: CGFloat) -> UIImage? {
UIGraphicsBeginImageContextWithOptions(CGSize(width: rect.width, height: rect.height), true, 0.0)
self.draw(at: CGPoint(x: -rect.minX / scale, y: -rect.minY / scale))
let croppedImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return croppedImage
}
func getPixelColor(pos: CGPoint, dataImage: Data, image: UIImage) -> UIColor {
let pixelData = image.cgImage!.dataProvider!.data
let data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
let pixelInfo: Int = ((Int(image.size.width) * Int(pos.y)) + Int(pos.x)) * 4
let r = CGFloat(dataImage[pixelInfo]) / CGFloat(255.0)
let g = CGFloat(dataImage[pixelInfo+1]) / CGFloat(255.0)
let b = CGFloat(dataImage[pixelInfo+2]) / CGFloat(255.0)
let a = CGFloat(dataImage[pixelInfo+3]) / CGFloat(255.0)
return UIColor(red: r, green: g, blue: b, alpha: a)
}
func rotate(radians: Float) -> UIImage? {
var newSize = CGRect(origin: CGPoint.zero, size: self.size).applying(CGAffineTransform(rotationAngle: CGFloat(radians))).size
newSize.width = floor(newSize.width)
newSize.height = floor(newSize.height)
UIGraphicsBeginImageContextWithOptions(newSize, false, self.scale)
let context = UIGraphicsGetCurrentContext()!
context.translateBy(x: newSize.width/2, y: newSize.height/2)
context.rotate(by: CGFloat(radians))
self.draw(in: CGRect(x: -self.size.width/2, y: -self.size.height/2, width: self.size.width, height: self.size.height))
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage
}
func resize(targetSize: CGSize) -> UIImage {
let size = self.size
let widthRatio = targetSize.width / self.size.width
let heightRatio = targetSize.height / self.size.height
var newSize: CGSize
if(widthRatio > heightRatio) {
newSize = CGSize(width: size.width * heightRatio, height: size.height * heightRatio)
} else {
newSize = CGSize(width: size.width * widthRatio, height: size.height * widthRatio)
}
let rect = CGRect(x: 0, y: 0, width: newSize.width, height: newSize.height)
UIGraphicsBeginImageContextWithOptions(newSize, false, 1.0)
self.draw(in: rect)
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage!
}
func toCVPixel() -> CVPixelBuffer {
let ciimage = CIImage(image: self)
let eaglContext = EAGLContext(api: .openGLES2)
let tmpcontext = CIContext(eaglContext: eaglContext!)
let cgimage = tmpcontext.createCGImage(ciimage!, from: ciimage!.extent)
return cgimage!.toCVPixelBuffer()!
}
}
...@@ -37,4 +37,24 @@ extension UIViewController { ...@@ -37,4 +37,24 @@ extension UIViewController {
func validateImage(image: Data) -> Bool { func validateImage(image: Data) -> Bool {
return true return true
} }
func popupBackToCaptureCardFront() {
let alert = UIAlertController(title: "Confirm".localized(), message: "You will go back to front card photography.".localized(), preferredStyle: .alert)
let actionClose = UIAlertAction(title: "No".localized(), style: .cancel, handler: nil)
alert.addAction(actionClose)
let actionOk = UIAlertAction(title: "Yes".localized(), style: .default, handler: {
action in
for controller in self.navigationController!.viewControllers as Array {
if controller.isKind(of: SBKCaptureCardVC.self) {
self.navigationController!.popToViewController(controller, animated: true)
break
}
}
})
alert.addAction(actionOk)
present(alert, animated: true, completion: nil)
}
} }
...@@ -57,9 +57,7 @@ class SBKValidateInput { ...@@ -57,9 +57,7 @@ class SBKValidateInput {
guard (currentTimeMs - previousInferenceTimeMs) >= delayBetweenInferencesMs else { return .ERROR } guard (currentTimeMs - previousInferenceTimeMs) >= delayBetweenInferencesMs else { return .ERROR }
previousInferenceTimeMs = currentTimeMs previousInferenceTimeMs = currentTimeMs
// Pass the pixel buffer to TensorFlow Lite to perform inference.
result = modelDataHandler?.runModel(onFrame: pixelBuffer) result = modelDataHandler?.runModel(onFrame: pixelBuffer)
if result == nil { if result == nil {
return .ERROR return .ERROR
} }
...@@ -72,7 +70,7 @@ class SBKValidateInput { ...@@ -72,7 +70,7 @@ class SBKValidateInput {
return .IMAGE_BACK return .IMAGE_BACK
case 3: case 3:
return .PASSPORT return .PASSPORT
case 4: case 4,5,6,7,8,9:
return .IMAGE_FAKE return .IMAGE_FAKE
default: default:
return .ERROR return .ERROR
......
...@@ -6,13 +6,45 @@ target 'OCR-SDK' do ...@@ -6,13 +6,45 @@ target 'OCR-SDK' do
use_frameworks! use_frameworks!
# Pods for OCR-SDK # Pods for OCR-SDK
pod 'TensorFlowLiteSwift' pod 'TensorFlowLiteSwift', '~> 0.0.1-nightly'
#pod 'GoogleMobileVision/FaceDetector' #pod 'GoogleMobileVision/FaceDetector'
#pod 'GTMSessionFetcher' #pod 'GTMSessionFetcher'
end end
#post_install do |installer|
# installer.pods_project.build_configurations.each do |config|
# config.build_settings["EXCLUDED_ARCHS[sdk=iphonesimulator*]"] = "arm64"
# config.build_settings["BITCODE_GENERATION_MODE"] = "bitcode"
# end
#end
#bitcode enable
post_install do |installer| post_install do |installer|
installer.pods_project.build_configurations.each do |config| installer.pods_project.targets.each do |target|
config.build_settings["EXCLUDED_ARCHS[sdk=iphonesimulator*]"] = "arm64" target.build_configurations.each do |config|
# set valid architecture
config.build_settings['VALID_ARCHS'] = 'arm64 armv7 armv7s i386 x86_64'
# build active architecture only (Debug build all)
config.build_settings['ONLY_ACTIVE_ARCH'] = 'NO'
config.build_settings['ENABLE_BITCODE'] = 'YES'
if config.name == 'Release' || config.name == 'Pro'
config.build_settings['BITCODE_GENERATION_MODE'] = 'bitcode'
else # Debug
config.build_settings['BITCODE_GENERATION_MODE'] = 'marker'
end
cflags = config.build_settings['OTHER_CFLAGS'] || ['$(inherited)']
if config.name == 'Release' || config.name == 'Pro'
cflags << '-fembed-bitcode'
else # Debug
cflags << '-fembed-bitcode-marker'
end
config.build_settings['OTHER_CFLAGS'] = cflags
end
end end
end end
PODS: PODS:
- TensorFlowLiteC (2.2.0) - TensorFlowLiteC (0.0.1-nightly.20211102):
- TensorFlowLiteSwift (2.2.0): - TensorFlowLiteC/Core (= 0.0.1-nightly.20211102)
- TensorFlowLiteC (= 2.2.0) - TensorFlowLiteC/Core (0.0.1-nightly.20211102)
- TensorFlowLiteSwift (0.0.1-nightly.20211102):
- TensorFlowLiteSwift/Core (= 0.0.1-nightly.20211102)
- TensorFlowLiteSwift/Core (0.0.1-nightly.20211102):
- TensorFlowLiteC (= 0.0.1-nightly.20211102)
DEPENDENCIES: DEPENDENCIES:
- TensorFlowLiteSwift - TensorFlowLiteSwift (~> 0.0.1-nightly)
SPEC REPOS: SPEC REPOS:
trunk: trunk:
...@@ -12,9 +16,9 @@ SPEC REPOS: ...@@ -12,9 +16,9 @@ SPEC REPOS:
- TensorFlowLiteSwift - TensorFlowLiteSwift
SPEC CHECKSUMS: SPEC CHECKSUMS:
TensorFlowLiteC: b3ab9e867b0b71052ca102a32a786555b330b02e TensorFlowLiteC: 671507d2d839e633f3dc7ab464677d28857a37b6
TensorFlowLiteSwift: 2dd5e9c895e1819501f0fba3d8b69a536bda6c65 TensorFlowLiteSwift: 039777297cdea077fcdd2f8a0f000c1d651ac45f
PODFILE CHECKSUM: a8990648dc4761bcfc73655f0e8e51e3109f0e4f PODFILE CHECKSUM: 5d1bc9d5125d5fec48a2110d5d6596947a9bac74
COCOAPODS: 1.10.1 COCOAPODS: 1.10.2
PODS: PODS:
- TensorFlowLiteC (2.2.0) - TensorFlowLiteC (0.0.1-nightly.20211102):
- TensorFlowLiteSwift (2.2.0): - TensorFlowLiteC/Core (= 0.0.1-nightly.20211102)
- TensorFlowLiteC (= 2.2.0) - TensorFlowLiteC/Core (0.0.1-nightly.20211102)
- TensorFlowLiteSwift (0.0.1-nightly.20211102):
- TensorFlowLiteSwift/Core (= 0.0.1-nightly.20211102)
- TensorFlowLiteSwift/Core (0.0.1-nightly.20211102):
- TensorFlowLiteC (= 0.0.1-nightly.20211102)
DEPENDENCIES: DEPENDENCIES:
- TensorFlowLiteSwift - TensorFlowLiteSwift (~> 0.0.1-nightly)
SPEC REPOS: SPEC REPOS:
trunk: trunk:
...@@ -12,9 +16,9 @@ SPEC REPOS: ...@@ -12,9 +16,9 @@ SPEC REPOS:
- TensorFlowLiteSwift - TensorFlowLiteSwift
SPEC CHECKSUMS: SPEC CHECKSUMS:
TensorFlowLiteC: b3ab9e867b0b71052ca102a32a786555b330b02e TensorFlowLiteC: 671507d2d839e633f3dc7ab464677d28857a37b6
TensorFlowLiteSwift: 2dd5e9c895e1819501f0fba3d8b69a536bda6c65 TensorFlowLiteSwift: 039777297cdea077fcdd2f8a0f000c1d651ac45f
PODFILE CHECKSUM: a8990648dc4761bcfc73655f0e8e51e3109f0e4f PODFILE CHECKSUM: 5d1bc9d5125d5fec48a2110d5d6596947a9bac74
COCOAPODS: 1.10.1 COCOAPODS: 1.10.2
This diff is collapsed.
<?xml version="1.0" encoding="UTF-8"?> <?xml version="1.0" encoding="UTF-8"?>
<Scheme <Scheme
LastUpgradeVersion = "1100" LastUpgradeVersion = "1240"
version = "1.3"> version = "1.3">
<BuildAction <BuildAction
parallelizeBuildables = "YES" parallelizeBuildables = "YES"
......
<?xml version="1.0" encoding="UTF-8"?> <?xml version="1.0" encoding="UTF-8"?>
<Scheme <Scheme
LastUpgradeVersion = "1100" LastUpgradeVersion = "1240"
version = "1.3"> version = "1.3">
<BuildAction <BuildAction
parallelizeBuildables = "YES" parallelizeBuildables = "YES"
...@@ -14,9 +14,9 @@ ...@@ -14,9 +14,9 @@
buildForAnalyzing = "YES"> buildForAnalyzing = "YES">
<BuildableReference <BuildableReference
BuildableIdentifier = "primary" BuildableIdentifier = "primary"
BlueprintIdentifier = "DE1F4D51AD94C30627575AEE202FD099" BlueprintIdentifier = "AC559E53E13B6FBEF4F5CC310A73AFE6"
BuildableName = "Pods_OCR_SDK.framework" BuildableName = "TensorFlowLiteC"
BlueprintName = "Pods-OCR-SDK" BlueprintName = "TensorFlowLiteC"
ReferencedContainer = "container:Pods.xcodeproj"> ReferencedContainer = "container:Pods.xcodeproj">
</BuildableReference> </BuildableReference>
</BuildActionEntry> </BuildActionEntry>
......
<?xml version="1.0" encoding="UTF-8"?> <?xml version="1.0" encoding="UTF-8"?>
<Scheme <Scheme
LastUpgradeVersion = "1100" LastUpgradeVersion = "1240"
version = "1.3"> version = "1.3">
<BuildAction <BuildAction
parallelizeBuildables = "YES" parallelizeBuildables = "YES"
......
<?xml version="1.0" encoding="UTF-8"?>
<Scheme
LastUpgradeVersion = "1100"
version = "1.3">
<BuildAction
parallelizeBuildables = "YES"
buildImplicitDependencies = "YES">
<BuildActionEntries>
<BuildActionEntry
buildForAnalyzing = "YES"
buildForTesting = "YES"
buildForRunning = "YES"
buildForProfiling = "YES"
buildForArchiving = "YES">
<BuildableReference
BuildableIdentifier = "primary"
BlueprintIdentifier = "AC559E53E13B6FBEF4F5CC310A73AFE6"
BuildableName = "TensorFlowLiteC"
BlueprintName = "TensorFlowLiteC"
ReferencedContainer = "container:Pods.xcodeproj">
</BuildableReference>
</BuildActionEntry>
</BuildActionEntries>
</BuildAction>
<TestAction
selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB"
selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB"
shouldUseLaunchSchemeArgsEnv = "YES"
buildConfiguration = "Debug">
<AdditionalOptions>
</AdditionalOptions>
</TestAction>
<LaunchAction
selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB"
selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB"
launchStyle = "0"
useCustomWorkingDirectory = "NO"
ignoresPersistentStateOnLaunch = "NO"
debugDocumentVersioning = "YES"
debugServiceExtension = "internal"
buildConfiguration = "Debug"
allowLocationSimulation = "YES">
<AdditionalOptions>
</AdditionalOptions>
</LaunchAction>
<ProfileAction
savedToolIdentifier = ""
useCustomWorkingDirectory = "NO"
debugDocumentVersioning = "YES"
buildConfiguration = "Release"
shouldUseLaunchSchemeArgsEnv = "YES">
</ProfileAction>
<AnalyzeAction
buildConfiguration = "Debug">
</AnalyzeAction>
<ArchiveAction
buildConfiguration = "Release"
revealArchiveInOrganizer = "YES">
</ArchiveAction>
</Scheme>
<?xml version="1.0" encoding="UTF-8"?>
<Scheme
LastUpgradeVersion = "1100"
version = "1.3">
<BuildAction
parallelizeBuildables = "YES"
buildImplicitDependencies = "YES">
<BuildActionEntries>
<BuildActionEntry
buildForTesting = "YES"
buildForRunning = "YES"
buildForProfiling = "YES"
buildForArchiving = "YES"
buildForAnalyzing = "YES">
<BuildableReference
BuildableIdentifier = "primary"
BlueprintIdentifier = "10418167F619D6DA72BADAD10F9EC02B"
BuildableName = "TensorFlowLite.framework"
BlueprintName = "TensorFlowLiteSwift"
ReferencedContainer = "container:Pods.xcodeproj">
</BuildableReference>
</BuildActionEntry>
</BuildActionEntries>
</BuildAction>
<TestAction
buildConfiguration = "Debug"
selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB"
selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB"
shouldUseLaunchSchemeArgsEnv = "YES">
<Testables>
</Testables>
</TestAction>
<LaunchAction
buildConfiguration = "Debug"
selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB"
selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB"
launchStyle = "0"
useCustomWorkingDirectory = "NO"
ignoresPersistentStateOnLaunch = "NO"
debugDocumentVersioning = "YES"
debugServiceExtension = "internal"
allowLocationSimulation = "YES">
</LaunchAction>
<ProfileAction
buildConfiguration = "Release"
shouldUseLaunchSchemeArgsEnv = "YES"
savedToolIdentifier = ""
useCustomWorkingDirectory = "NO"
debugDocumentVersioning = "YES">
</ProfileAction>
<AnalyzeAction
buildConfiguration = "Debug">
</AnalyzeAction>
<ArchiveAction
buildConfiguration = "Release"
revealArchiveInOrganizer = "YES">
</ArchiveAction>
</Scheme>
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>SchemeUserState</key>
<dict>
<key>Pods-OCR-SDK.xcscheme_^#shared#^_</key>
<dict>
<key>orderHint</key>
<integer>3</integer>
</dict>
<key>TensorFlowLiteC.xcscheme_^#shared#^_</key>
<dict>
<key>orderHint</key>
<integer>1</integer>
</dict>
<key>TensorFlowLiteSwift.xcscheme_^#shared#^_</key>
<dict>
<key>orderHint</key>
<integer>2</integer>
</dict>
</dict>
</dict>
</plist>
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>SchemeUserState</key>
<dict>
<key>Pods-OCR-SDK.xcscheme_^#shared#^_</key>
<dict>
<key>orderHint</key>
<integer>2</integer>
</dict>
<key>TensorFlowLiteC.xcscheme_^#shared#^_</key>
<dict>
<key>orderHint</key>
<integer>1</integer>
</dict>
<key>TensorFlowLiteSwift.xcscheme_^#shared#^_</key>
<dict>
<key>orderHint</key>
<integer>3</integer>
</dict>
</dict>
</dict>
</plist>
<?xml version="1.0" encoding="UTF-8"?>
<Scheme
LastUpgradeVersion = "1100"
version = "1.3">
<BuildAction
parallelizeBuildables = "YES"
buildImplicitDependencies = "YES">
<BuildActionEntries>
<BuildActionEntry
buildForAnalyzing = "YES"
buildForTesting = "YES"
buildForRunning = "YES"
buildForProfiling = "YES"
buildForArchiving = "YES">
<BuildableReference
BuildableIdentifier = "primary"
BlueprintIdentifier = "AC559E53E13B6FBEF4F5CC310A73AFE6"
BuildableName = "TensorFlowLiteC"
BlueprintName = "TensorFlowLiteC"
ReferencedContainer = "container:Pods.xcodeproj">
</BuildableReference>
</BuildActionEntry>
</BuildActionEntries>
</BuildAction>
<TestAction
selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB"
selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB"
shouldUseLaunchSchemeArgsEnv = "YES"
buildConfiguration = "Debug">
<AdditionalOptions>
</AdditionalOptions>
</TestAction>
<LaunchAction
selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB"
selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB"
launchStyle = "0"
useCustomWorkingDirectory = "NO"
ignoresPersistentStateOnLaunch = "NO"
debugDocumentVersioning = "YES"
debugServiceExtension = "internal"
buildConfiguration = "Debug"
allowLocationSimulation = "YES">
<AdditionalOptions>
</AdditionalOptions>
</LaunchAction>
<ProfileAction
savedToolIdentifier = ""
useCustomWorkingDirectory = "NO"
debugDocumentVersioning = "YES"
buildConfiguration = "Release"
shouldUseLaunchSchemeArgsEnv = "YES">
</ProfileAction>
<AnalyzeAction
buildConfiguration = "Debug">
</AnalyzeAction>
<ArchiveAction
buildConfiguration = "Release"
revealArchiveInOrganizer = "YES">
</ArchiveAction>
</Scheme>
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>SchemeUserState</key>
<dict>
<key>Pods-OCR-SDK.xcscheme</key>
<dict>
<key>isShown</key>
<false/>
</dict>
<key>TensorFlowLiteC.xcscheme</key>
<dict>
<key>isShown</key>
<false/>
</dict>
<key>TensorFlowLiteSwift.xcscheme</key>
<dict>
<key>isShown</key>
<false/>
</dict>
</dict>
<key>SuppressBuildableAutocreation</key>
<dict/>
</dict>
</plist>
...@@ -3,8 +3,6 @@ This application makes use of the following third party libraries: ...@@ -3,8 +3,6 @@ This application makes use of the following third party libraries:
## TensorFlowLiteSwift ## TensorFlowLiteSwift
Copyright 2019 The TensorFlow Authors. All rights reserved.
Apache License Apache License
Version 2.0, January 2004 Version 2.0, January 2004
http://www.apache.org/licenses/ http://www.apache.org/licenses/
...@@ -207,4 +205,87 @@ Copyright 2019 The TensorFlow Authors. All rights reserved. ...@@ -207,4 +205,87 @@ Copyright 2019 The TensorFlow Authors. All rights reserved.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
------------------
Files: third_party/compute_library/...
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
------------------
Files: ACKNOWLEDGEMENTS
LICENSE
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
------------------
Files: third_party/hexagon
Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted (subject to the limitations in the
disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of The Linux Foundation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Generated by CocoaPods - https://cocoapods.org Generated by CocoaPods - https://cocoapods.org
...@@ -14,9 +14,7 @@ ...@@ -14,9 +14,7 @@
</dict> </dict>
<dict> <dict>
<key>FooterText</key> <key>FooterText</key>
<string>Copyright 2019 The TensorFlow Authors. All rights reserved. <string> Apache License
Apache License
Version 2.0, January 2004 Version 2.0, January 2004
http://www.apache.org/licenses/ http://www.apache.org/licenses/
...@@ -217,6 +215,89 @@ ...@@ -217,6 +215,89 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
------------------
Files: third_party/compute_library/...
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
------------------
Files: ACKNOWLEDGEMENTS
LICENSE
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
------------------
Files: third_party/hexagon
Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted (subject to the limitations in the
disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of The Linux Foundation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
</string> </string>
<key>License</key> <key>License</key>
<string>Apache</string> <string>Apache</string>
......
${PODS_ROOT}/Target Support Files/Pods-OCR-SDK/Pods-OCR-SDK-resources.sh
${PODS_CONFIGURATION_BUILD_DIR}/GoogleMobileVision/GoogleMVFaceDetectorResources.bundle
\ No newline at end of file
${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/GoogleMVFaceDetectorResources.bundle
\ No newline at end of file
${PODS_ROOT}/Target Support Files/Pods-OCR-SDK/Pods-OCR-SDK-resources.sh
${PODS_CONFIGURATION_BUILD_DIR}/GoogleMobileVision/GoogleMVFaceDetectorResources.bundle
\ No newline at end of file
${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/GoogleMVFaceDetectorResources.bundle
\ No newline at end of file
#!/bin/sh
set -e
set -u
set -o pipefail
function on_error {
echo "$(realpath -mq "${0}"):$1: error: Unexpected failure"
}
trap 'on_error $LINENO' ERR
if [ -z ${UNLOCALIZED_RESOURCES_FOLDER_PATH+x} ]; then
# If UNLOCALIZED_RESOURCES_FOLDER_PATH is not set, then there's nowhere for us to copy
# resources to, so exit 0 (signalling the script phase was successful).
exit 0
fi
mkdir -p "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
RESOURCES_TO_COPY=${PODS_ROOT}/resources-to-copy-${TARGETNAME}.txt
> "$RESOURCES_TO_COPY"
XCASSET_FILES=()
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
case "${TARGETED_DEVICE_FAMILY:-}" in
1,2)
TARGET_DEVICE_ARGS="--target-device ipad --target-device iphone"
;;
1)
TARGET_DEVICE_ARGS="--target-device iphone"
;;
2)
TARGET_DEVICE_ARGS="--target-device ipad"
;;
3)
TARGET_DEVICE_ARGS="--target-device tv"
;;
4)
TARGET_DEVICE_ARGS="--target-device watch"
;;
*)
TARGET_DEVICE_ARGS="--target-device mac"
;;
esac
install_resource()
{
if [[ "$1" = /* ]] ; then
RESOURCE_PATH="$1"
else
RESOURCE_PATH="${PODS_ROOT}/$1"
fi
if [[ ! -e "$RESOURCE_PATH" ]] ; then
cat << EOM
error: Resource "$RESOURCE_PATH" not found. Run 'pod install' to update the copy resources script.
EOM
exit 1
fi
case $RESOURCE_PATH in
*.storyboard)
echo "ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile ${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$RESOURCE_PATH\" .storyboard`.storyboardc $RESOURCE_PATH --sdk ${SDKROOT} ${TARGET_DEVICE_ARGS}" || true
ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$RESOURCE_PATH\" .storyboard`.storyboardc" "$RESOURCE_PATH" --sdk "${SDKROOT}" ${TARGET_DEVICE_ARGS}
;;
*.xib)
echo "ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile ${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$RESOURCE_PATH\" .xib`.nib $RESOURCE_PATH --sdk ${SDKROOT} ${TARGET_DEVICE_ARGS}" || true
ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$RESOURCE_PATH\" .xib`.nib" "$RESOURCE_PATH" --sdk "${SDKROOT}" ${TARGET_DEVICE_ARGS}
;;
*.framework)
echo "mkdir -p ${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" || true
mkdir -p "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" $RESOURCE_PATH ${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" || true
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
;;
*.xcdatamodel)
echo "xcrun momc \"$RESOURCE_PATH\" \"${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH"`.mom\"" || true
xcrun momc "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcdatamodel`.mom"
;;
*.xcdatamodeld)
echo "xcrun momc \"$RESOURCE_PATH\" \"${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcdatamodeld`.momd\"" || true
xcrun momc "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcdatamodeld`.momd"
;;
*.xcmappingmodel)
echo "xcrun mapc \"$RESOURCE_PATH\" \"${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcmappingmodel`.cdm\"" || true
xcrun mapc "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcmappingmodel`.cdm"
;;
*.xcassets)
ABSOLUTE_XCASSET_FILE="$RESOURCE_PATH"
XCASSET_FILES+=("$ABSOLUTE_XCASSET_FILE")
;;
*)
echo "$RESOURCE_PATH" || true
echo "$RESOURCE_PATH" >> "$RESOURCES_TO_COPY"
;;
esac
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_resource "${PODS_CONFIGURATION_BUILD_DIR}/GoogleMobileVision/GoogleMVFaceDetectorResources.bundle"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_resource "${PODS_CONFIGURATION_BUILD_DIR}/GoogleMobileVision/GoogleMVFaceDetectorResources.bundle"
fi
mkdir -p "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
if [[ "${ACTION}" == "install" ]] && [[ "${SKIP_INSTALL}" == "NO" ]]; then
mkdir -p "${INSTALL_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${INSTALL_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
fi
rm -f "$RESOURCES_TO_COPY"
if [[ -n "${WRAPPER_EXTENSION}" ]] && [ "`xcrun --find actool`" ] && [ -n "${XCASSET_FILES:-}" ]
then
# Find all other xcassets (this unfortunately includes those of path pods and other targets).
OTHER_XCASSETS=$(find -L "$PWD" -iname "*.xcassets" -type d)
while read line; do
if [[ $line != "${PODS_ROOT}*" ]]; then
XCASSET_FILES+=("$line")
fi
done <<<"$OTHER_XCASSETS"
if [ -z ${ASSETCATALOG_COMPILER_APPICON_NAME+x} ]; then
printf "%s\0" "${XCASSET_FILES[@]}" | xargs -0 xcrun actool --output-format human-readable-text --notices --warnings --platform "${PLATFORM_NAME}" --minimum-deployment-target "${!DEPLOYMENT_TARGET_SETTING_NAME}" ${TARGET_DEVICE_ARGS} --compress-pngs --compile "${BUILT_PRODUCTS_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
else
printf "%s\0" "${XCASSET_FILES[@]}" | xargs -0 xcrun actool --output-format human-readable-text --notices --warnings --platform "${PLATFORM_NAME}" --minimum-deployment-target "${!DEPLOYMENT_TARGET_SETTING_NAME}" ${TARGET_DEVICE_ARGS} --compress-pngs --compile "${BUILT_PRODUCTS_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}" --app-icon "${ASSETCATALOG_COMPILER_APPICON_NAME}" --output-partial-info-plist "${TARGET_TEMP_DIR}/assetcatalog_generated_info_cocoapods.plist"
fi
fi
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
<key>CFBundlePackageType</key> <key>CFBundlePackageType</key>
<string>FMWK</string> <string>FMWK</string>
<key>CFBundleShortVersionString</key> <key>CFBundleShortVersionString</key>
<string>2.2.0</string> <string>0.0.1</string>
<key>CFBundleSignature</key> <key>CFBundleSignature</key>
<string>????</string> <string>????</string>
<key>CFBundleVersion</key> <key>CFBundleVersion</key>
......
#import "builtin_ops.h"
#import "c_api.h" #import "c_api.h"
#import "c_api_experimental.h"
#import "common.h" #import "common.h"
#import "metal_delegate.h" #import "xnnpack_delegate.h"
#import "c_api_types.h"
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_BUILTIN_OPS_H_
#define TENSORFLOW_LITE_BUILTIN_OPS_H_
// DO NOT EDIT MANUALLY: This file is automatically generated by
// `schema/builtin_ops_header/generator.cc`.
#ifdef __cplusplus
extern "C" {
#endif // __cplusplus
// The enum for builtin operators.
// Note: CUSTOM, DELEGATE, and PLACEHOLDER_FOR_GREATER_OP_CODES are 3 special
// ops which are not real built-in ops.
typedef enum {
kTfLiteBuiltinAdd = 0,
kTfLiteBuiltinAveragePool2d = 1,
kTfLiteBuiltinConcatenation = 2,
kTfLiteBuiltinConv2d = 3,
kTfLiteBuiltinDepthwiseConv2d = 4,
kTfLiteBuiltinDepthToSpace = 5,
kTfLiteBuiltinDequantize = 6,
kTfLiteBuiltinEmbeddingLookup = 7,
kTfLiteBuiltinFloor = 8,
kTfLiteBuiltinFullyConnected = 9,
kTfLiteBuiltinHashtableLookup = 10,
kTfLiteBuiltinL2Normalization = 11,
kTfLiteBuiltinL2Pool2d = 12,
kTfLiteBuiltinLocalResponseNormalization = 13,
kTfLiteBuiltinLogistic = 14,
kTfLiteBuiltinLshProjection = 15,
kTfLiteBuiltinLstm = 16,
kTfLiteBuiltinMaxPool2d = 17,
kTfLiteBuiltinMul = 18,
kTfLiteBuiltinRelu = 19,
kTfLiteBuiltinReluN1To1 = 20,
kTfLiteBuiltinRelu6 = 21,
kTfLiteBuiltinReshape = 22,
kTfLiteBuiltinResizeBilinear = 23,
kTfLiteBuiltinRnn = 24,
kTfLiteBuiltinSoftmax = 25,
kTfLiteBuiltinSpaceToDepth = 26,
kTfLiteBuiltinSvdf = 27,
kTfLiteBuiltinTanh = 28,
kTfLiteBuiltinConcatEmbeddings = 29,
kTfLiteBuiltinSkipGram = 30,
kTfLiteBuiltinCall = 31,
kTfLiteBuiltinCustom = 32,
kTfLiteBuiltinEmbeddingLookupSparse = 33,
kTfLiteBuiltinPad = 34,
kTfLiteBuiltinUnidirectionalSequenceRnn = 35,
kTfLiteBuiltinGather = 36,
kTfLiteBuiltinBatchToSpaceNd = 37,
kTfLiteBuiltinSpaceToBatchNd = 38,
kTfLiteBuiltinTranspose = 39,
kTfLiteBuiltinMean = 40,
kTfLiteBuiltinSub = 41,
kTfLiteBuiltinDiv = 42,
kTfLiteBuiltinSqueeze = 43,
kTfLiteBuiltinUnidirectionalSequenceLstm = 44,
kTfLiteBuiltinStridedSlice = 45,
kTfLiteBuiltinBidirectionalSequenceRnn = 46,
kTfLiteBuiltinExp = 47,
kTfLiteBuiltinTopkV2 = 48,
kTfLiteBuiltinSplit = 49,
kTfLiteBuiltinLogSoftmax = 50,
kTfLiteBuiltinDelegate = 51,
kTfLiteBuiltinBidirectionalSequenceLstm = 52,
kTfLiteBuiltinCast = 53,
kTfLiteBuiltinPrelu = 54,
kTfLiteBuiltinMaximum = 55,
kTfLiteBuiltinArgMax = 56,
kTfLiteBuiltinMinimum = 57,
kTfLiteBuiltinLess = 58,
kTfLiteBuiltinNeg = 59,
kTfLiteBuiltinPadv2 = 60,
kTfLiteBuiltinGreater = 61,
kTfLiteBuiltinGreaterEqual = 62,
kTfLiteBuiltinLessEqual = 63,
kTfLiteBuiltinSelect = 64,
kTfLiteBuiltinSlice = 65,
kTfLiteBuiltinSin = 66,
kTfLiteBuiltinTransposeConv = 67,
kTfLiteBuiltinSparseToDense = 68,
kTfLiteBuiltinTile = 69,
kTfLiteBuiltinExpandDims = 70,
kTfLiteBuiltinEqual = 71,
kTfLiteBuiltinNotEqual = 72,
kTfLiteBuiltinLog = 73,
kTfLiteBuiltinSum = 74,
kTfLiteBuiltinSqrt = 75,
kTfLiteBuiltinRsqrt = 76,
kTfLiteBuiltinShape = 77,
kTfLiteBuiltinPow = 78,
kTfLiteBuiltinArgMin = 79,
kTfLiteBuiltinFakeQuant = 80,
kTfLiteBuiltinReduceProd = 81,
kTfLiteBuiltinReduceMax = 82,
kTfLiteBuiltinPack = 83,
kTfLiteBuiltinLogicalOr = 84,
kTfLiteBuiltinOneHot = 85,
kTfLiteBuiltinLogicalAnd = 86,
kTfLiteBuiltinLogicalNot = 87,
kTfLiteBuiltinUnpack = 88,
kTfLiteBuiltinReduceMin = 89,
kTfLiteBuiltinFloorDiv = 90,
kTfLiteBuiltinReduceAny = 91,
kTfLiteBuiltinSquare = 92,
kTfLiteBuiltinZerosLike = 93,
kTfLiteBuiltinFill = 94,
kTfLiteBuiltinFloorMod = 95,
kTfLiteBuiltinRange = 96,
kTfLiteBuiltinResizeNearestNeighbor = 97,
kTfLiteBuiltinLeakyRelu = 98,
kTfLiteBuiltinSquaredDifference = 99,
kTfLiteBuiltinMirrorPad = 100,
kTfLiteBuiltinAbs = 101,
kTfLiteBuiltinSplitV = 102,
kTfLiteBuiltinUnique = 103,
kTfLiteBuiltinCeil = 104,
kTfLiteBuiltinReverseV2 = 105,
kTfLiteBuiltinAddN = 106,
kTfLiteBuiltinGatherNd = 107,
kTfLiteBuiltinCos = 108,
kTfLiteBuiltinWhere = 109,
kTfLiteBuiltinRank = 110,
kTfLiteBuiltinElu = 111,
kTfLiteBuiltinReverseSequence = 112,
kTfLiteBuiltinMatrixDiag = 113,
kTfLiteBuiltinQuantize = 114,
kTfLiteBuiltinMatrixSetDiag = 115,
kTfLiteBuiltinRound = 116,
kTfLiteBuiltinHardSwish = 117,
kTfLiteBuiltinIf = 118,
kTfLiteBuiltinWhile = 119,
kTfLiteBuiltinNonMaxSuppressionV4 = 120,
kTfLiteBuiltinNonMaxSuppressionV5 = 121,
kTfLiteBuiltinScatterNd = 122,
kTfLiteBuiltinSelectV2 = 123,
kTfLiteBuiltinDensify = 124,
kTfLiteBuiltinSegmentSum = 125,
kTfLiteBuiltinBatchMatmul = 126,
kTfLiteBuiltinPlaceholderForGreaterOpCodes = 127,
kTfLiteBuiltinCumsum = 128,
kTfLiteBuiltinCallOnce = 129,
kTfLiteBuiltinBroadcastTo = 130,
kTfLiteBuiltinRfft2d = 131,
kTfLiteBuiltinConv3d = 132,
kTfLiteBuiltinImag = 133,
kTfLiteBuiltinReal = 134,
kTfLiteBuiltinComplexAbs = 135,
kTfLiteBuiltinHashtable = 136,
kTfLiteBuiltinHashtableFind = 137,
kTfLiteBuiltinHashtableImport = 138,
kTfLiteBuiltinHashtableSize = 139,
kTfLiteBuiltinReduceAll = 140,
kTfLiteBuiltinConv3dTranspose = 141,
kTfLiteBuiltinVarHandle = 142,
kTfLiteBuiltinReadVariable = 143,
kTfLiteBuiltinAssignVariable = 144,
kTfLiteBuiltinBroadcastArgs = 145,
kTfLiteBuiltinRandomStandardNormal = 146,
kTfLiteBuiltinBucketize = 147,
kTfLiteBuiltinRandomUniform = 148,
} TfLiteBuiltinOperator;
#ifdef __cplusplus
} // extern "C"
#endif // __cplusplus
#endif // TENSORFLOW_LITE_BUILTIN_OPS_H_
...@@ -17,8 +17,9 @@ limitations under the License. ...@@ -17,8 +17,9 @@ limitations under the License.
#include <stdarg.h> #include <stdarg.h>
#include <stdint.h> #include <stdint.h>
#include <stdlib.h>
#include "common.h" #include "c_api_types.h" // IWYU pragma: export
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
/// C API for TensorFlow Lite. /// C API for TensorFlow Lite.
...@@ -66,34 +67,39 @@ limitations under the License. ...@@ -66,34 +67,39 @@ limitations under the License.
/// TfLiteInterpreterOptionsDelete(options); /// TfLiteInterpreterOptionsDelete(options);
/// TfLiteModelDelete(model); /// TfLiteModelDelete(model);
#ifdef SWIG
#define TFL_CAPI_EXPORT
#else
#if defined(_WIN32)
#ifdef TFL_COMPILE_LIBRARY
#define TFL_CAPI_EXPORT __declspec(dllexport)
#else
#define TFL_CAPI_EXPORT __declspec(dllimport)
#endif // TFL_COMPILE_LIBRARY
#else
#define TFL_CAPI_EXPORT __attribute__((visibility("default")))
#endif // _WIN32
#endif // SWIG
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif // __cplusplus #endif // __cplusplus
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
// TfLiteVersion returns a string describing version information of the // Opaque types used by the C API.
// TensorFlow Lite library. TensorFlow Lite uses semantic versioning.
TFL_CAPI_EXPORT extern const char* TfLiteVersion(void);
// --------------------------------------------------------------------------
// TfLiteModel wraps a loaded TensorFlow Lite model. // TfLiteModel wraps a loaded TensorFlow Lite model.
typedef struct TfLiteModel TfLiteModel; typedef struct TfLiteModel TfLiteModel;
// TfLiteInterpreterOptions allows customized interpreter configuration.
typedef struct TfLiteInterpreterOptions TfLiteInterpreterOptions;
// Allows delegation of nodes to alternative backends.
typedef struct TfLiteDelegate TfLiteDelegate;
// TfLiteInterpreter provides inference from a provided model.
typedef struct TfLiteInterpreter TfLiteInterpreter;
// A tensor in the interpreter system which is a wrapper around a buffer of
// data including a dimensionality (or NULL if not currently defined).
typedef struct TfLiteTensor TfLiteTensor;
// --------------------------------------------------------------------------
// TfLiteVersion returns a string describing version information of the
// TensorFlow Lite library. TensorFlow Lite uses semantic versioning.
TFL_CAPI_EXPORT extern const char* TfLiteVersion(void);
// Returns a model from the provided buffer, or null on failure. // Returns a model from the provided buffer, or null on failure.
//
// NOTE: The caller retains ownership of the `model_data` and should ensure that
// the lifetime of the `model_data` must be at least as long as the lifetime
// of the `TfLiteModel`.
TFL_CAPI_EXPORT extern TfLiteModel* TfLiteModelCreate(const void* model_data, TFL_CAPI_EXPORT extern TfLiteModel* TfLiteModelCreate(const void* model_data,
size_t model_size); size_t model_size);
...@@ -104,10 +110,6 @@ TFL_CAPI_EXPORT extern TfLiteModel* TfLiteModelCreateFromFile( ...@@ -104,10 +110,6 @@ TFL_CAPI_EXPORT extern TfLiteModel* TfLiteModelCreateFromFile(
// Destroys the model instance. // Destroys the model instance.
TFL_CAPI_EXPORT extern void TfLiteModelDelete(TfLiteModel* model); TFL_CAPI_EXPORT extern void TfLiteModelDelete(TfLiteModel* model);
// --------------------------------------------------------------------------
// TfLiteInterpreterOptions allows customized interpreter configuration.
typedef struct TfLiteInterpreterOptions TfLiteInterpreterOptions;
// Returns a new interpreter options instances. // Returns a new interpreter options instances.
TFL_CAPI_EXPORT extern TfLiteInterpreterOptions* TFL_CAPI_EXPORT extern TfLiteInterpreterOptions*
TfLiteInterpreterOptionsCreate(); TfLiteInterpreterOptionsCreate();
...@@ -134,17 +136,13 @@ TFL_CAPI_EXPORT extern void TfLiteInterpreterOptionsAddDelegate( ...@@ -134,17 +136,13 @@ TFL_CAPI_EXPORT extern void TfLiteInterpreterOptionsAddDelegate(
// //
// * `reporter` takes the provided `user_data` object, as well as a C-style // * `reporter` takes the provided `user_data` object, as well as a C-style
// format string and arg list (see also vprintf). // format string and arg list (see also vprintf).
// * `user_data` is optional. If provided, it is owned by the client and must // * `user_data` is optional. If non-null, it is owned by the client and must
// remain valid for the duration of the interpreter lifetime. // remain valid for the duration of the interpreter lifetime.
TFL_CAPI_EXPORT extern void TfLiteInterpreterOptionsSetErrorReporter( TFL_CAPI_EXPORT extern void TfLiteInterpreterOptionsSetErrorReporter(
TfLiteInterpreterOptions* options, TfLiteInterpreterOptions* options,
void (*reporter)(void* user_data, const char* format, va_list args), void (*reporter)(void* user_data, const char* format, va_list args),
void* user_data); void* user_data);
// --------------------------------------------------------------------------
// TfLiteInterpreter provides inference from a provided model.
typedef struct TfLiteInterpreter TfLiteInterpreter;
// Returns a new interpreter using the provided model and options, or null on // Returns a new interpreter using the provided model and options, or null on
// failure. // failure.
// //
...@@ -176,7 +174,11 @@ TFL_CAPI_EXPORT extern TfLiteTensor* TfLiteInterpreterGetInputTensor( ...@@ -176,7 +174,11 @@ TFL_CAPI_EXPORT extern TfLiteTensor* TfLiteInterpreterGetInputTensor(
// //
// NOTE: After a resize, the client *must* explicitly allocate tensors before // NOTE: After a resize, the client *must* explicitly allocate tensors before
// attempting to access the resized tensor data or invoke the interpreter. // attempting to access the resized tensor data or invoke the interpreter.
//
// REQUIRES: 0 <= input_index < TfLiteInterpreterGetInputTensorCount(tensor) // REQUIRES: 0 <= input_index < TfLiteInterpreterGetInputTensorCount(tensor)
//
// This function makes a copy of the input dimensions, so the client can safely
// deallocate `input_dims` immediately after this function returns.
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteInterpreterResizeInputTensor( TFL_CAPI_EXPORT extern TfLiteStatus TfLiteInterpreterResizeInputTensor(
TfLiteInterpreter* interpreter, int32_t input_index, const int* input_dims, TfLiteInterpreter* interpreter, int32_t input_index, const int* input_dims,
int32_t input_dims_size); int32_t input_dims_size);
...@@ -191,9 +193,34 @@ TFL_CAPI_EXPORT extern TfLiteStatus TfLiteInterpreterAllocateTensors( ...@@ -191,9 +193,34 @@ TFL_CAPI_EXPORT extern TfLiteStatus TfLiteInterpreterAllocateTensors(
// Runs inference for the loaded graph. // Runs inference for the loaded graph.
// //
// Before calling this function, the caller should first invoke
// TfLiteInterpreterAllocateTensors() and should also set the values for the
// input tensors. After successfully calling this function, the values for the
// output tensors will be set.
//
// NOTE: It is possible that the interpreter is not in a ready state to // NOTE: It is possible that the interpreter is not in a ready state to
// evaluate (e.g., if a ResizeInputTensor() has been performed without a call to // evaluate (e.g., if AllocateTensors() hasn't been called, or if a
// ResizeInputTensor() has been performed without a subsequent call to
// AllocateTensors()). // AllocateTensors()).
//
// If the (experimental!) delegate fallback option was enabled in the
// interpreter options, then the interpreter will automatically fall back to
// not using any delegates if execution with delegates fails. For details, see
// TfLiteInterpreterOptionsSetEnableDelegateFallback in c_api_experimental.h.
//
// Returns one of the following status codes:
// - kTfLiteOk: Success. Output is valid.
// - kTfLiteDelegateError: Execution with delegates failed, due to a problem
// with the delegate(s). If fallback was not enabled, output is invalid.
// If fallback was enabled, this return value indicates that fallback
// succeeded, the output is valid, and all delegates previously applied to
// the interpreter have been undone.
// - kTfLiteApplicationError: Same as for kTfLiteDelegateError, except that
// the problem was not with the delegate itself, but rather was
// due to an incompatibility between the delegate(s) and the
// interpreter or model.
// - kTfLiteError: Unexpected/runtime failure. Output is invalid.
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteInterpreterInvoke( TFL_CAPI_EXPORT extern TfLiteStatus TfLiteInterpreterInvoke(
TfLiteInterpreter* interpreter); TfLiteInterpreter* interpreter);
...@@ -202,7 +229,7 @@ TFL_CAPI_EXPORT extern int32_t TfLiteInterpreterGetOutputTensorCount( ...@@ -202,7 +229,7 @@ TFL_CAPI_EXPORT extern int32_t TfLiteInterpreterGetOutputTensorCount(
const TfLiteInterpreter* interpreter); const TfLiteInterpreter* interpreter);
// Returns the tensor associated with the output index. // Returns the tensor associated with the output index.
// REQUIRES: 0 <= input_index < TfLiteInterpreterGetOutputTensorCount(tensor) // REQUIRES: 0 <= output_index < TfLiteInterpreterGetOutputTensorCount(tensor)
// //
// NOTE: The shape and underlying data buffer for output tensors may be not // NOTE: The shape and underlying data buffer for output tensors may be not
// be available until after the output tensor has been both sized and allocated. // be available until after the output tensor has been both sized and allocated.
......
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_C_C_API_EXPERIMENTAL_H_
#define TENSORFLOW_LITE_C_C_API_EXPERIMENTAL_H_
#include "builtin_ops.h"
#include "c_api.h"
#include "common.h"
#ifdef __cplusplus
extern "C" {
#endif // __cplusplus
/// Resets all variable tensors to zero.
///
/// WARNING: This is an experimental API and subject to change.
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteInterpreterResetVariableTensors(
TfLiteInterpreter* interpreter);
/// Adds an op registration for a builtin operator.
///
/// Op registrations are used to map ops referenced in the flatbuffer model
/// to executable function pointers (`TfLiteRegistration`s).
///
/// NOTE: The interpreter will make a shallow copy of `registration` internally,
/// so the caller should ensure that its contents (function pointers, etc...)
/// remain valid for the duration of the interpreter's lifetime. A common
/// practice is making the provided `TfLiteRegistration` instance static.
///
/// Code that uses this function should NOT call
/// `TfLiteInterpreterOptionsSetOpResolver` on the same options object.
///
/// WARNING: This is an experimental API and subject to change.
TFL_CAPI_EXPORT void TfLiteInterpreterOptionsAddBuiltinOp(
TfLiteInterpreterOptions* options, TfLiteBuiltinOperator op,
const TfLiteRegistration* registration, int32_t min_version,
int32_t max_version);
/// Adds an op registration for a custom operator.
///
/// Op registrations are used to map ops referenced in the flatbuffer model
/// to executable function pointers (`TfLiteRegistration`s).
///
/// NOTE: The interpreter will make a shallow copy of `registration` internally,
/// so the caller should ensure that its contents (function pointers, etc...)
/// remain valid for the duration of any created interpreter's lifetime. A
/// common practice is making the provided `TfLiteRegistration` instance static.
///
/// The lifetime of the string pointed to by `name` must be at least as long
/// as the lifetime of the `TfLiteInterpreterOptions`.
///
/// Code that uses this function should NOT call
/// `TfLiteInterpreterOptionsSetOpResolver` on the same options object.
///
/// WARNING: This is an experimental API and subject to change.
TFL_CAPI_EXPORT void TfLiteInterpreterOptionsAddCustomOp(
TfLiteInterpreterOptions* options, const char* name,
const TfLiteRegistration* registration, int32_t min_version,
int32_t max_version);
/// Registers callbacks for resolving builtin or custom operators.
///
/// The `TfLiteInterpreterOptionsSetOpResolver` function provides an alternative
/// method for registering builtin ops and/or custom ops, by providing operator
/// resolver callbacks. Unlike using `TfLiteInterpreterOptionsAddBuiltinOp`
/// and/or `TfLiteInterpreterOptionsAddAddCustomOp`, these let you register all
/// the operators in a single call.
///
/// Code that uses this function should NOT call
/// `TfLiteInterpreterOptionsAddBuiltin` or
/// `TfLiteInterpreterOptionsAddCustomOp` on the same options object.
///
/// If `op_resolver_user_data` is non-null, its lifetime must be at least as
/// long as the lifetime of the `TfLiteInterpreterOptions`.
///
/// WARNING: This is an experimental API and subject to change.
void TfLiteInterpreterOptionsSetOpResolver(
TfLiteInterpreterOptions* options,
const TfLiteRegistration* (*find_builtin_op)(void* user_data,
TfLiteBuiltinOperator op,
int version),
const TfLiteRegistration* (*find_custom_op)(void* user_data,
const char* custom_op,
int version),
void* op_resolver_user_data);
/// Returns a new interpreter using the provided model and options, or null on
/// failure, where the model uses only the operators explicitly added to the
/// options. This is the same as `TFLiteInterpreterCreate` from `c_api.h`,
/// except that the only operators that are supported are the ones registered
/// in `options` via calls to `TfLiteInterpreterOptionsSetOpResolver`,
/// `TfLiteInterpreterOptionsAddBuiltinOp`, and/or
/// `TfLiteInterpreterOptionsAddCustomOp`.
///
/// * `model` must be a valid model instance. The caller retains ownership of
/// the object, and can destroy it immediately after creating the interpreter;
/// the interpreter will maintain its own reference to the underlying model
/// data.
/// * `options` should not be null. The caller retains ownership of the object,
/// and can safely destroy it immediately after creating the interpreter.
///
/// NOTE: The client *must* explicitly allocate tensors before attempting to
/// access input tensor data or invoke the interpreter.
///
/// WARNING: This is an experimental API and subject to change.
TFL_CAPI_EXPORT extern TfLiteInterpreter*
TfLiteInterpreterCreateWithSelectedOps(const TfLiteModel* model,
const TfLiteInterpreterOptions* options);
/// Enable or disable the NN API delegate for the interpreter (true to enable).
///
/// WARNING: This is an experimental API and subject to change.
TFL_CAPI_EXPORT extern void TfLiteInterpreterOptionsSetUseNNAPI(
TfLiteInterpreterOptions* options, bool enable);
/// Enable or disable CPU fallback for the interpreter (true to enable).
/// If enabled, TfLiteInterpreterInvoke will do automatic fallback from
/// executing with delegate(s) to regular execution without delegates
/// (i.e. on CPU).
///
/// Allowing the fallback is suitable only if both of the following hold:
/// - The caller is known not to cache pointers to tensor data across
/// TfLiteInterpreterInvoke calls.
/// - The model is not stateful (no variables, no LSTMs) or the state isn't
/// needed between batches.
///
/// When delegate fallback is enabled, TfLiteInterpreterInvoke will
/// behave as follows:
/// If one or more delegates were set in the interpreter options
/// (see TfLiteInterpreterOptionsAddDelegate),
/// AND inference fails,
/// then the interpreter will fall back to not using any delegates.
/// In that case, the previously applied delegate(s) will be automatically
/// undone, and an attempt will be made to return the interpreter to an
/// invokable state, which may invalidate previous tensor addresses,
/// and the inference will be attempted again, using input tensors with
/// the same value as previously set.
///
/// WARNING: This is an experimental API and subject to change.
TFL_CAPI_EXPORT extern void TfLiteInterpreterOptionsSetEnableDelegateFallback(
TfLiteInterpreterOptions* options, bool enable);
// Set if buffer handle output is allowed.
//
/// When using hardware delegation, Interpreter will make the data of output
/// tensors available in `tensor->data` by default. If the application can
/// consume the buffer handle directly (e.g. reading output from OpenGL
/// texture), it can set this flag to false, so Interpreter won't copy the
/// data from buffer handle to CPU memory. WARNING: This is an experimental
/// API and subject to change.
TFL_CAPI_EXPORT extern void TfLiteSetAllowBufferHandleOutput(
const TfLiteInterpreter* interpreter, bool allow_buffer_handle_output);
/// Allow a delegate to look at the graph and modify the graph to handle
/// parts of the graph themselves. After this is called, the graph may
/// contain new nodes that replace 1 more nodes.
/// 'delegate' must outlive the interpreter.
/// Use `TfLiteInterpreterOptionsAddDelegate` instead of this unless
/// absolutely required.
/// Returns one of the following three status codes:
/// 1. kTfLiteOk: Success.
/// 2. kTfLiteDelegateError: Delegation failed due to an error in the
/// delegate. The Interpreter has been restored to its pre-delegation state.
/// NOTE: This undoes all delegates previously applied to the Interpreter.
/// 3. kTfLiteError: Unexpected/runtime failure.
/// WARNING: This is an experimental API and subject to change.
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteInterpreterModifyGraphWithDelegate(
const TfLiteInterpreter* interpreter, TfLiteDelegate* delegate);
/// Returns the tensor index corresponding to the input tensor
///
/// WARNING: This is an experimental API and subject to change.
TFL_CAPI_EXPORT extern int32_t TfLiteInterpreterGetInputTensorIndex(
const TfLiteInterpreter* interpreter, int32_t input_index);
/// Returns the tensor index corresponding to the output tensor
///
/// WARNING: This is an experimental API and subject to change.
TFL_CAPI_EXPORT extern int32_t TfLiteInterpreterGetOutputTensorIndex(
const TfLiteInterpreter* interpreter, int32_t output_index);
#ifdef __cplusplus
} // extern "C"
#endif // __cplusplus
#endif // TENSORFLOW_LITE_C_C_API_EXPERIMENTAL_H_
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// This file declares types used by the pure C inference API defined in c_api.h,
// some of which are also used in the C++ and C kernel and interpreter APIs.
#ifndef TENSORFLOW_LITE_C_C_API_TYPES_H_
#define TENSORFLOW_LITE_C_C_API_TYPES_H_
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
// Define TFL_CAPI_EXPORT macro to export a function properly with a shared
// library.
#ifdef SWIG
#define TFL_CAPI_EXPORT
#elif defined(TFL_STATIC_LIBRARY_BUILD)
#define TFL_CAPI_EXPORT
#else // not definded TFL_STATIC_LIBRARY_BUILD
#if defined(_WIN32)
#ifdef TFL_COMPILE_LIBRARY
#define TFL_CAPI_EXPORT __declspec(dllexport)
#else
#define TFL_CAPI_EXPORT __declspec(dllimport)
#endif // TFL_COMPILE_LIBRARY
#else
#define TFL_CAPI_EXPORT __attribute__((visibility("default")))
#endif // _WIN32
#endif // SWIG
// Note that new error status values may be added in future in order to
// indicate more fine-grained internal states, therefore, applications should
// not rely on status values being members of the enum.
typedef enum TfLiteStatus {
kTfLiteOk = 0,
// Generally referring to an error in the runtime (i.e. interpreter)
kTfLiteError = 1,
// Generally referring to an error from a TfLiteDelegate itself.
kTfLiteDelegateError = 2,
// Generally referring to an error in applying a delegate due to
// incompatibility between runtime and delegate, e.g., this error is returned
// when trying to apply a TF Lite delegate onto a model graph that's already
// immutable.
kTfLiteApplicationError = 3,
// Generally referring to serialized delegate data not being found.
// See tflite::delegates::Serialization.
kTfLiteDelegateDataNotFound = 4,
// Generally referring to data-writing issues in delegate serialization.
// See tflite::delegates::Serialization.
kTfLiteDelegateDataWriteError = 5,
// Generally referring to data-reading issues in delegate serialization.
// See tflite::delegates::Serialization.
kTfLiteDelegateDataReadError = 6,
// Generally referring to issues when the TF Lite model has ops that cannot be
// resolved at runtime. This could happen when the specific op is not
// registered or built with the TF Lite framework.
kTfLiteUnresolvedOps = 7,
} TfLiteStatus;
// Types supported by tensor
typedef enum {
kTfLiteNoType = 0,
kTfLiteFloat32 = 1,
kTfLiteInt32 = 2,
kTfLiteUInt8 = 3,
kTfLiteInt64 = 4,
kTfLiteString = 5,
kTfLiteBool = 6,
kTfLiteInt16 = 7,
kTfLiteComplex64 = 8,
kTfLiteInt8 = 9,
kTfLiteFloat16 = 10,
kTfLiteFloat64 = 11,
kTfLiteComplex128 = 12,
kTfLiteUInt64 = 13,
kTfLiteResource = 14,
kTfLiteVariant = 15,
kTfLiteUInt32 = 16,
} TfLiteType;
// Legacy. Will be deprecated in favor of TfLiteAffineQuantization.
// If per-layer quantization is specified this field will still be populated in
// addition to TfLiteAffineQuantization.
// Parameters for asymmetric quantization. Quantized values can be converted
// back to float using:
// real_value = scale * (quantized_value - zero_point)
typedef struct TfLiteQuantizationParams {
float scale;
int32_t zero_point;
} TfLiteQuantizationParams;
#ifdef __cplusplus
} // extern C
#endif
#endif // TENSORFLOW_LITE_C_C_API_TYPES_H_
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
...@@ -13,66 +13,43 @@ See the License for the specific language governing permissions and ...@@ -13,66 +13,43 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
==============================================================================*/ ==============================================================================*/
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_METAL_DELEGATE_H_ #ifndef TENSORFLOW_LITE_DELEGATES_XNNPACK_XNNPACK_DELEGATE_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_METAL_DELEGATE_H_ #define TENSORFLOW_LITE_DELEGATES_XNNPACK_XNNPACK_DELEGATE_H_
#ifdef SWIG #include "common.h"
#define TFL_CAPI_EXPORT
#else
#if defined(_WIN32)
#ifdef TFL_COMPILE_LIBRARY
#define TFL_CAPI_EXPORT __declspec(dllexport)
#else
#define TFL_CAPI_EXPORT __declspec(dllimport)
#endif // TFL_COMPILE_LIBRARY
#else
#define TFL_CAPI_EXPORT __attribute__((visibility("default")))
#endif // _WIN32
#endif // SWIG
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#else
// For "C" 'bool' is not built-in type.
#include <stdbool.h>
#endif // __cplusplus #endif // __cplusplus
typedef struct TfLiteDelegate TfLiteDelegate;
typedef enum {
// waitUntilCompleted
TFLGpuDelegateWaitTypePassive,
// Minimize latency. It uses active spinning instead of mutex and consumes
// additional CPU resources.
TFLGpuDelegateWaitTypeActive,
// Useful when the output is used with GPU pipeline then or if external
// command encoder is set.
TFLGpuDelegateWaitTypeDoNotWait,
// Tries to avoid GPU sleep mode.
TFLGpuDelegateWaitTypeAggressive,
} TFLGpuDelegateWaitType;
// Creates a new delegate instance that need to be destroyed with
// DeleteFlowDelegate when delegate is no longer used by tflite.
typedef struct { typedef struct {
// Allows to quantify tensors, downcast values, process in float16 etc. // Number of threads to use in the thread pool.
bool allow_precision_loss; // 0 or negative value means no thread pool used.
TFLGpuDelegateWaitType wait_type; int32_t num_threads;
} TFLGpuDelegateOptions; } TfLiteXNNPackDelegateOptions;
// Returns a structure with the default XNNPack delegate options.
TFL_CAPI_EXPORT TfLiteXNNPackDelegateOptions
TfLiteXNNPackDelegateOptionsDefault();
// Creates a new delegate instance that need to be destroyed with // Creates a new delegate instance that need to be destroyed with
// `TFLDeleteTfLiteGpuDelegate` when delegate is no longer used by TFLite. // `TfLiteXNNPackDelegateDelete` when delegate is no longer used by TFLite.
// When `options` is set to `nullptr`, the following default values are used: // When `options` is set to `nullptr`, the following default values are used:
// .precision_loss_allowed = false, TFL_CAPI_EXPORT TfLiteDelegate* TfLiteXNNPackDelegateCreate(
// .wait_type = kPassive, const TfLiteXNNPackDelegateOptions* options);
TFL_CAPI_EXPORT extern TfLiteDelegate* TFLGpuDelegateCreate(
const TFLGpuDelegateOptions* options); // Returns the pthreadpool_t object used for parallelization in XNNPACK.
// Can return NULL if the XNNPack delegate is single-threaded.
//
// WARNING: This API is experimental and subject to change.
TFL_CAPI_EXPORT void* TfLiteXNNPackDelegateGetThreadPool(
TfLiteDelegate* delegate);
// Destroys a delegate created with `TFLGpuDelegateCreate` call. // Destroys a delegate created with `TfLiteXNNPackDelegateCreate` call.
TFL_CAPI_EXPORT extern void TFLGpuDelegateDelete(TfLiteDelegate* delegate); TFL_CAPI_EXPORT void TfLiteXNNPackDelegateDelete(TfLiteDelegate* delegate);
#ifdef __cplusplus #ifdef __cplusplus
} // extern "C" }
#endif // __cplusplus #endif // __cplusplus
#endif // TENSORFLOW_LITE_DELEGATES_GPU_METAL_DELEGATE_H_ #endif // TENSORFLOW_LITE_DELEGATES_XNNPACK_XNNPACK_DELEGATE_H_
...@@ -2,5 +2,4 @@ framework module TensorFlowLiteC { ...@@ -2,5 +2,4 @@ framework module TensorFlowLiteC {
umbrella header "TensorFlowLiteC.h" umbrella header "TensorFlowLiteC.h"
export * export *
module * { export * } module * { export * }
link framework "Metal"
} }
This diff is collapsed.
This diff is collapsed.
...@@ -44,7 +44,7 @@ extension InterpreterError: LocalizedError { ...@@ -44,7 +44,7 @@ extension InterpreterError: LocalizedError {
case .failedToCreateInterpreter: case .failedToCreateInterpreter:
return "Failed to create the interpreter." return "Failed to create the interpreter."
case .failedToResizeInputTensor(let index): case .failedToResizeInputTensor(let index):
return "Failed to resize input tesnor at index \(index)." return "Failed to resize input tensor at index \(index)."
case .failedToCopyDataToInputTensor: case .failedToCopyDataToInputTensor:
return "Failed to copy data to input tensor." return "Failed to copy data to input tensor."
case .failedToAllocateTensors: case .failedToAllocateTensors:
......
...@@ -73,6 +73,8 @@ extension Tensor { ...@@ -73,6 +73,8 @@ extension Tensor {
case float16 case float16
/// A 32-bit single precision floating point. /// A 32-bit single precision floating point.
case float32 case float32
/// A 64-bit double precision floating point.
case float64
/// Creates a new instance from the given `TfLiteType` or `nil` if the data type is unsupported /// Creates a new instance from the given `TfLiteType` or `nil` if the data type is unsupported
/// or could not be determined because there was an error. /// or could not be determined because there was an error.
...@@ -94,6 +96,8 @@ extension Tensor { ...@@ -94,6 +96,8 @@ extension Tensor {
self = .float16 self = .float16
case kTfLiteFloat32: case kTfLiteFloat32:
self = .float32 self = .float32
case kTfLiteFloat64:
self = .float64
case kTfLiteNoType: case kTfLiteNoType:
fallthrough fallthrough
default: default:
......
# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
# yarn lockfile v1
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment