1
Fork 0
This repository has been archived on 2025-04-12. You can view files and clone it, but cannot push or open issues or pull requests.
silica-viewer/SilicaViewer/Document.swift
2022-06-14 11:48:42 -04:00

923 lines
34 KiB
Swift

import Cocoa
import ZIPFoundation
import CoreFoundation
import Accelerate
import CoreMedia
struct SilicaChunk {
var x: Int = 0
var y: Int = 0
var image: CGImage?
}
// all supported Procreate blend modes
enum BlendMode : Int {
case Normal = 0,
Multiply = 1,
Screen = 2,
Add = 3,
Lighten = 4,
Exclusion = 5,
Difference = 6,
Subtract = 7,
LinearBurn = 8,
ColorDodge = 9,
ColorBurn = 10,
Overlay = 11,
HardLight = 12,
Color = 13,
Luminosity = 14,
Hue = 15,
Saturation = 16,
SoftLight = 17,
// what is this mysterious 18?
Darken = 19,
// extended modes
HardMix = 20,
VividLight = 21,
LinearLight = 22,
PinLight = 23,
LighterColor = 24,
DarkerColor = 25,
Divide = 26
}
struct SilicaLayerData {
var blendMode: BlendMode = .Normal
var chunks: [SilicaChunk] = []
var opacity: Double = 1.0
var hidden: Bool = false
}
struct SilicaLayer : Equatable {
static func == (lhs: SilicaLayer, rhs: SilicaLayer) -> Bool {
return lhs.name == rhs.name && lhs.clipped == rhs.clipped
}
var name: String = ""
var data: SilicaLayerData = SilicaLayerData()
var mask: SilicaLayerData?
var clipped: Bool = false
}
struct SilicaDocument {
var trackedTime: Int = 0
var tileSize: Int = 0
var orientation: Int = 0
var flippedHorizontally: Bool = false
var flippedVertically: Bool = false
var name: String = ""
var authorName: String = ""
var strokeCount: Int = 0
var backgroundColor: CGColor = .white
var colorSpace: CGColorSpace = CGColorSpaceCreateDeviceRGB()
var width: Int = 0
var height: Int = 0
var layers: [SilicaLayer] = []
var videoFrame: (Int, Int) = (0, 0)
lazy var nsSize = {
return NSSize(width: width, height: height)
}()
lazy var cgSize = {
return CGSize(width: width, height: height)
}()
lazy var cgRect = {
return CGRect(origin: .zero, size: cgSize)
}()
}
func objectRefGetValue2(_ objectRef: CFTypeRef) -> UInt32 {
let val = unsafeBitCast(objectRef, to: UInt64.self)
return valueForKeyedArchiverUID(val)
}
class Document: NSDocument {
var data: Data? // oh no...
var dict: NSDictionary?
var info = SilicaDocument()
var rows: Int = 0
var columns: Int = 0
var remainderWidth: Int = 0
var remainderHeight: Int = 0
struct SilicaParsingError: Error, LocalizedError {
enum Kind {
case invalid
}
let kind: Kind
let filename: URL?
public var errorDescription: String? {
switch self.kind {
case .invalid:
return filename!.lastPathComponent + " is an invalid Silica Document."
}
}
}
func throwError(_ error: SilicaParsingError.Kind) {
DispatchQueue.main.sync {
let _ = presentError(SilicaParsingError(kind: error, filename: fileURL))
}
}
override init() {
super.init()
}
override func makeWindowControllers() {
let storyboard = NSStoryboard(name: NSStoryboard.Name("Main"), bundle: nil)
let windowController = storyboard.instantiateController(withIdentifier: NSStoryboard.SceneIdentifier("Document Window Controller")) as! NSWindowController
self.addWindowController(windowController)
}
override class func canConcurrentlyReadDocuments(ofType: String) -> Bool {
return ofType == "com.procreate"
}
func getIdealFilename() -> String {
if (!(info.name.isEmpty)) {
return info.name
} else {
return fileURL!.deletingPathExtension().lastPathComponent
}
}
/*
Pass in an object from the $object array, which always contains a $class key.
*/
func getDocumentClassName(dict: NSDictionary) -> String? {
let objectsArray = self.dict?["$objects"] as! NSArray
if let value = dict["$class"] {
let classObjectId = objectRefGetValue2(value as CFTypeRef)
let classObject = objectsArray[Int(classObjectId)] as! NSDictionary
return classObject["$classname"] as? String
}
return nil
}
/*
Returns the correct tile size, taking into account the remainder between tile size and image size.
*/
func getTileSize(_ x: Int, _ y: Int) -> (Int, Int) {
var width: Int = info.tileSize
var height: Int = info.tileSize
if((x + 1) == columns) {
width = info.tileSize - remainderWidth
}
if(y == rows) {
height = info.tileSize - remainderHeight
}
return (width, height)
}
/*
Converts a CFKeyedArchiveUID from a NSKeyedArchive to a Int for indexing an array or dictionary.
*/
func getClassID(id: Any?) -> Int {
return Int(objectRefGetValue2(id! as CFTypeRef))
}
/*
Parses the chunk filename, ex. "1~1.chunk" to integer coordinates (1, 2).
*/
func parseChunkFilename(_ filename: String) -> (Int, Int)? {
let pathURL = URL(fileURLWithPath: filename)
let pathComponents = pathURL.lastPathComponent.replacingOccurrences(of: ".chunk", with: "").components(separatedBy: "~")
if let x = Int(pathComponents[0]), let y = Int(pathComponents[1]) {
return (x, y + 1)
} else {
return nil
}
}
func getChunkRect(_ chunk: SilicaChunk) -> NSRect {
let x = chunk.x
var y = chunk.y
let (width, height) = getTileSize(x, y)
if y == rows {
y = 0
}
return NSRect(x: info.tileSize * x, y: info.height - (info.tileSize * y), width: width, height: height)
}
func parseRawBlendMode(blendMode: Int, extendedBlend: Int) -> BlendMode? {
if blendMode == 0 && blendMode != extendedBlend {
return BlendMode(rawValue: extendedBlend)
} else {
return BlendMode(rawValue: blendMode)
}
}
func getBlendKernel(_ layer: SilicaLayer) -> CIBlendKernel? {
switch(layer.data.blendMode) {
case .Normal:
return .sourceOver
case .Multiply:
return .multiply
case .Screen:
return .screen
case .Add:
return .componentAdd
case .Lighten:
return .lighten
case .Exclusion:
return .exclusion
case .Difference:
return .difference
case .Subtract:
return .subtract
case .LinearBurn:
return .linearBurn
case .ColorDodge:
return .colorDodge
case .ColorBurn:
return .colorBurn
case .Overlay:
return .overlay
case .HardLight:
return .hardLight
case .Color:
return .color
case .Luminosity:
return .luminosity
case .Hue:
return .hue
case .Saturation:
return .saturation
case .SoftLight:
return .softLight
case .Darken:
return .darken
case .HardMix:
return .hardMix
case .VividLight:
return .vividLight
case .LinearLight:
return .linearLight
case .PinLight:
return .pinLight
case .LighterColor:
return .lighterColor
case .DarkerColor:
return .darkerColor
case .Divide:
return .divide
}
}
func parseSilicaLayer(archive: Archive, dict: NSDictionary, isMask: Bool) -> SilicaLayer? {
let objectsArray = self.dict?["$objects"] as! NSArray
if getDocumentClassName(dict: dict) == LayerClassName {
var layer = SilicaLayer()
if let val = dict["name"] {
let NameClassID = getClassID(id: val)
let NameClass = objectsArray[NameClassID] as! NSString
layer.name = NameClass as String
}
let UUIDKey = dict["UUID"]
let UUIDClassID = getClassID(id: UUIDKey)
let UUIDClass = objectsArray[UUIDClassID] as! NSString
let maskKey = dict["mask"]
let maskClassID = getClassID(id: maskKey)
let maskClass = objectsArray[maskClassID]
layer.data.blendMode = parseRawBlendMode(blendMode: (dict["blend"] as? NSNumber)!.intValue, extendedBlend: (dict["extendedBlend"] as? NSNumber)!.intValue)!
layer.data.opacity = (dict["opacity"] as? NSNumber)!.doubleValue
layer.data.hidden = (dict["hidden"] as? Bool)!
layer.clipped = (dict["clipped"] as? Bool)!
if maskClassID != 0 {
layer.mask = parseSilicaLayer(archive: archive, dict: maskClass as! NSDictionary, isMask: true)?.data
}
var chunkPaths: [String] = []
archive.forEach { (entry: Entry) in
if entry.path.contains(String(UUIDClass)) {
chunkPaths.append(entry.path)
}
}
layer.data.chunks = Array(repeating: SilicaChunk(), count: chunkPaths.count)
let dispatchGroup = DispatchGroup()
let queue = DispatchQueue(label: "imageWork")
DispatchQueue.concurrentPerform(iterations: chunkPaths.count) { (i: Int) in
guard let threadArchive = Archive(data: self.data!, accessMode: .read) else {
return
}
let threadEntry = threadArchive[chunkPaths[i]]
guard let (x, y) = parseChunkFilename(threadEntry!.path) else {
return
}
let (width, height) = getTileSize(x, y)
let numChannels = isMask ? 1 : 4
let byteSize = width * height * numChannels
let uncompressedMemory = UnsafeMutablePointer<UInt8>.allocate(capacity: byteSize)
guard let lzoData = readData(archive: threadArchive, entry: threadEntry!) else {
return
}
lzoData.withUnsafeBytes({ (bytes: UnsafeRawBufferPointer) -> Void in
var len = lzo_uint(byteSize)
lzo1x_decompress_safe(bytes.baseAddress!.assumingMemoryBound(to: uint8.self), lzo_uint(lzoData.count), uncompressedMemory, &len, nil)
})
let imageData = Data(bytes: uncompressedMemory, count: byteSize)
let render: CGColorRenderingIntent = .defaultIntent
let rgbColorSpace = isMask ? CGColorSpaceCreateDeviceGray() : info.colorSpace
let bitmapInfo = CGBitmapInfo(rawValue: (isMask ? CGImageAlphaInfo.none : CGImageAlphaInfo.premultipliedLast).rawValue).union(.byteOrder32Big)
let providerRef: CGDataProvider? = CGDataProvider(data: imageData as CFData)
guard let cgimage = CGImage(width: width, height: height, bitsPerComponent: 8, bitsPerPixel: 8 * numChannels, bytesPerRow: width * numChannels, space: rgbColorSpace, bitmapInfo: bitmapInfo, provider: providerRef!, decode: nil, shouldInterpolate: false, intent: render) else {
return
}
queue.async(group: dispatchGroup) {
layer.data.chunks[i].image = cgimage
layer.data.chunks[i].x = x
layer.data.chunks[i].y = y
}
}
dispatchGroup.wait()
return layer
}
return nil
}
func parsePairString(_ str: String) -> (Int, Int)? {
let sizeComponents = str.replacingOccurrences(of: "{", with: "").replacingOccurrences(of: "}", with: "").components(separatedBy: ", ")
if sizeComponents.count == 2 {
let width = Int(sizeComponents[0])
let height = Int(sizeComponents[1])
return (width!, height!)
} else {
return nil
}
}
func parseSilicaDocument(archive: Archive, dict: NSDictionary) {
let objectsArray = self.dict?["$objects"] as! NSArray
if getDocumentClassName(dict: dict) == DocumentClassName {
info.trackedTime = (dict[TrackedTimeKey] as! NSNumber).intValue
info.tileSize = (dict[TileSizeKey] as! NSNumber).intValue
info.orientation = (dict[OrientationKey] as! NSNumber).intValue
info.flippedHorizontally = (dict[FlippedHorizontallyKey] as! NSNumber).boolValue
info.flippedVertically = (dict[FlippedVerticallyKey] as! NSNumber).boolValue
let videoResolutionClassKey = dict["SilicaDocumentVideoSegmentInfoKey"]
let videoResolutionClassID = getClassID(id: videoResolutionClassKey)
let videoResolution = objectsArray[videoResolutionClassID] as! NSDictionary
let frameSizeClassKey = videoResolution["frameSize"]
let frameSizeClassID = getClassID(id: frameSizeClassKey)
let frameSize = objectsArray[frameSizeClassID] as! String
info.videoFrame = parsePairString(frameSize)!
let colorProfileClassKey = dict["colorProfile"]
if colorProfileClassKey != nil {
let colorProfileClassID = getClassID(id: colorProfileClassKey)
let colorProfile = objectsArray[colorProfileClassID] as! NSDictionary
let colorProfileNameClassKey = colorProfile["SiColorProfileArchiveICCNameKey"]
let colorProfileNameClassID = getClassID(id: colorProfileNameClassKey)
let colorProfileName = objectsArray[colorProfileNameClassID] as! NSString
// we only support the basic "Display P3" color space... does Procreate actually store the ICC data??
if colorProfileName == "Display P3" {
info.colorSpace = CGColorSpace(name: CGColorSpace.displayP3)!
}
} else {
info.colorSpace = CGColorSpace(name: CGColorSpace.displayP3)!
}
let backgroundClassKey = dict["backgroundColor"]
let backgroundClassID = getClassID(id: backgroundClassKey)
let background = objectsArray[backgroundClassID] as! NSData
var backgroundArray: [Float] = [0.0, 0.0, 0.0, 0.0]
background.getBytes(&backgroundArray, length: 16)
let backgroundCgArray: [CGFloat] = [CGFloat(backgroundArray[0]), CGFloat(backgroundArray[1]), CGFloat(backgroundArray[2]), CGFloat(backgroundArray[3])]
info.backgroundColor = CGColor(colorSpace: info.colorSpace, components: backgroundCgArray)!
let strokeClassKey = dict[StrokeCountKey]
let strokeClassID = getClassID(id: strokeClassKey)
let strokeCount = objectsArray[strokeClassID] as! NSNumber
info.strokeCount = Int(truncating: strokeCount)
let nameClassKey = dict[NameKey]
let nameClassID = getClassID(id: nameClassKey)
let nameString = objectsArray[nameClassID] as! NSString
if nameString != "$null" {
info.name = nameString as String
}
let authorClassKey = dict[AuthorNameKey]
if authorClassKey != nil {
let authorClassID = getClassID(id: authorClassKey)
let authorString = objectsArray[authorClassID] as! String
if authorString != "$null" {
info.authorName = authorString
}
}
let sizeClassKey = dict[SizeKey]
let sizeClassID = getClassID(id: sizeClassKey)
let sizeString = objectsArray[sizeClassID] as! String
let (width, height) = parsePairString(sizeString)!
info.width = width
info.height = height
columns = Int(ceil(Float(info.width) / Float(info.tileSize)))
rows = Int(ceil(Float(info.height) / Float(info.tileSize))) + 1 // TODO: lol why
if info.width % info.tileSize != 0 {
remainderWidth = (columns * info.tileSize) - info.width
}
if info.height % info.tileSize != 0 {
remainderHeight = (rows * info.tileSize) - info.height
}
let layersClassKey = dict[LayersKey]
let layersClassID = getClassID(id: layersClassKey)
let layersClass = objectsArray[layersClassID] as! NSDictionary
let array = layersClass["NS.objects"] as! NSArray
for object in array {
let layerClassID = getClassID(id: object)
let layerClass = objectsArray[layerClassID] as! NSDictionary
guard let layer = parseSilicaLayer(archive: archive, dict: layerClass, isMask: false) else { return }
info.layers.append(layer)
}
}
}
func parseDocument(archive: Archive, dict: NSDictionary) {
// double check if this archive is really correct
if let value = dict["$version"] {
if (value as! Int) != NSKeyedArchiveVersion {
throwError(.invalid)
return
}
self.dict = dict
let objectsArray = dict["$objects"] as! NSArray
// let's read the $top class, which is always going to be SilicaDocument type.
let topObject = dict["$top"] as! NSDictionary
let topClassID = objectRefGetValue2(topObject["root"] as CFTypeRef)
let topObjectClass = objectsArray[Int(topClassID)] as! NSDictionary
parseSilicaDocument(archive: archive, dict: topObjectClass)
}
}
override func read(from data: Data, ofType typeName: String) throws {
self.data = data
guard let archive = Archive(data: data, accessMode: Archive.AccessMode.read) else {
throwError(.invalid)
return
}
guard let documentEntry = archive[DocumentArchivePath] else {
throwError(.invalid)
return
}
guard let documentData = readData(archive: archive, entry: documentEntry) else {
throwError(.invalid)
return
}
var plistFormat = PropertyListSerialization.PropertyListFormat.binary
guard let propertyList = try? PropertyListSerialization.propertyList(from: documentData, options: [], format: &plistFormat) else {
throwError(.invalid)
return
}
parseDocument(archive: archive, dict: propertyList as! NSDictionary)
}
func makeBlendImage(_ layer: SilicaLayer) -> CGImage {
var maskContext: CGContext?
if layer.mask != nil {
let grayColorSpace = CGColorSpaceCreateDeviceGray()
let maskBitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.none.rawValue).union(.byteOrder16Big)
maskContext = CGContext(data: nil, width: info.width, height: info.height, bitsPerComponent: 16, bytesPerRow: 0, space: grayColorSpace, bitmapInfo: maskBitmapInfo.rawValue)
maskContext?.setFillColor(.white)
maskContext?.fill(info.cgRect)
for chunk in layer.mask!.chunks {
maskContext?.draw(chunk.image!, in: getChunkRect(chunk))
}
}
return (maskContext?.makeImage())!
}
func simpleDrawLayer(_ layer : SilicaLayer) -> CGImage? {
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedLast.rawValue).union(.byteOrder32Big)
// start by creating a new layer composite image, needed for image masking
let layerContext = CGContext(data: nil, width: info.width, height: info.height, bitsPerComponent: 8, bytesPerRow: info.width * 4, space: info.colorSpace, bitmapInfo: bitmapInfo.rawValue)
layerContext?.clear(info.cgRect)
for chunk in layer.data.chunks {
layerContext?.setAlpha(1.0)
layerContext?.setBlendMode(.normal)
if !layer.data.hidden {
layerContext?.draw(chunk.image!, in: getChunkRect(chunk))
}
}
return layerContext?.makeImage()
}
func blendLayer(_ layer : SilicaLayer, previousImage : inout CGImage?) -> CIImage {
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedLast.rawValue).union(.byteOrder32Big)
// start by creating a new layer composite image, needed for image masking
let layerContext = CGContext(data: nil, width: info.width, height: info.height, bitsPerComponent: 8, bytesPerRow: info.width * 4, space: info.colorSpace, bitmapInfo: bitmapInfo.rawValue)
layerContext?.clear(info.cgRect)
let kernel = getBlendKernel(layer)
Swift.print(layer.name + " - " + kernel!.name)
for chunk in layer.data.chunks {
layerContext?.setAlpha(CGFloat(layer.data.opacity))
layerContext?.setBlendMode(.normal)
if !layer.data.hidden {
layerContext?.draw(chunk.image!, in: getChunkRect(chunk))
}
}
let layerImage = layerContext?.makeImage()
// apply image
return kernel!.apply(foreground: CIImage(cgImage: layerImage!), background: previousImage == nil ? CIImage(color: .clear) : CIImage(cgImage: previousImage!), colorSpace: info.colorSpace)!
}
// this returns all of the layers that are clipping onto this one
func getAllClippingLayers(layer: SilicaLayer) -> [SilicaLayer] {
var clippingLayers : [SilicaLayer] = []
let layers : [SilicaLayer] = info.layers.reversed()
let index = layers.firstIndex(of: layer)! + 1
if index >= layers.count {
return clippingLayers
}
for layerIndex in index...layers.count - 1 {
//Swift.print("checking " + layers[layerIndex].name + " is clipping = " + layers[layerIndex].clipped.description)
if(layers[layerIndex].clipped) {
clippingLayers.append(layers[layerIndex])
} else {
break
}
}
return clippingLayers
}
func blendLayer(_ layer : SilicaLayer, previousImage : inout CGImage?, applyOpacity : Bool) -> CIImage {
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedLast.rawValue).union(.byteOrder32Big)
// start by creating a new layer composite image, needed for image masking
let layerContext = CGContext(data: nil, width: info.width, height: info.height, bitsPerComponent: 8, bytesPerRow: info.width * 4, space: info.colorSpace, bitmapInfo: bitmapInfo.rawValue)
layerContext?.clear(info.cgRect)
let kernel = getBlendKernel(layer)
for chunk in layer.data.chunks {
if applyOpacity {
layerContext?.setAlpha(CGFloat(layer.data.opacity))
} else {
layerContext?.setAlpha(1.0)
}
layerContext?.setBlendMode(.normal)
if !layer.data.hidden {
layerContext?.draw(chunk.image!, in: getChunkRect(chunk))
}
}
let layerImage = layerContext?.makeImage()
// apply image
return kernel!.apply(foreground: CIImage(cgImage: layerImage!), background: previousImage == nil ? CIImage(color: .clear) : CIImage(cgImage: previousImage!), colorSpace: info.colorSpace)!
}
func makeComposite() -> NSImage? {
// create the final composite output image
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedLast.rawValue).union(.byteOrder32Big)
let ccgContext = CGContext(data: nil, width: info.width, height: info.height, bitsPerComponent: 8, bytesPerRow: info.width * 4, space: info.colorSpace, bitmapInfo: bitmapInfo.rawValue)
ccgContext?.setFillColor(info.backgroundColor)
ccgContext?.fill(info.cgRect)
let context = CIContext()
guard let cgImage = ccgContext?.makeImage() else {
return nil
}
var masterImage = CIImage(cgImage: cgImage)
var previousImage: CGImage? = nil
for layer in info.layers.reversed() {
if !layer.data.hidden && !layer.clipped {
// start by creating a new layer composite image, needed for image masking
let layerContext = CGContext(data: nil, width: info.width, height: info.height, bitsPerComponent: 8, bytesPerRow: info.width * 4, space: info.colorSpace, bitmapInfo: bitmapInfo.rawValue)
layerContext?.clear(info.cgRect)
var maskContext: CGContext?
let kernel = getBlendKernel(layer)
if layer.mask != nil {
let grayColorSpace = CGColorSpaceCreateDeviceGray()
let maskBitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.none.rawValue).union(.byteOrder16Big)
maskContext = CGContext(data: nil, width: info.width, height: info.height, bitsPerComponent: 16, bytesPerRow: 0, space: grayColorSpace, bitmapInfo: maskBitmapInfo.rawValue)
maskContext?.setFillColor(.white)
maskContext?.fill(info.cgRect)
for chunk in layer.mask!.chunks {
maskContext?.draw(chunk.image!, in: getChunkRect(chunk))
}
}
for chunk in layer.data.chunks {
layerContext?.setAlpha(CGFloat(layer.data.opacity))
layerContext?.setBlendMode(.normal)
if !layer.data.hidden {
layerContext?.draw(chunk.image!, in: getChunkRect(chunk))
}
}
let clippingLayers = getAllClippingLayers(layer: layer)
if !clippingLayers.isEmpty {
let layerImage = layerContext?.makeImage()
var clippedMaster: CGImage? = layerImage
for layer in clippingLayers {
// so we if we want to clip, we want to gather all of the clipping layers in order first...
let temporaryClippedMaster = blendLayer(layer, previousImage: &clippedMaster)
clippedMaster = context.createCGImage(temporaryClippedMaster, from: info.cgRect, format: .RGBA8, colorSpace: info.colorSpace)
}
layerContext?.setAlpha(1.0)
layerContext?.setBlendMode(.sourceAtop)
layerContext?.draw(clippedMaster!, in: info.cgRect)
}
let layerImage = layerContext?.makeImage()
if layer.mask != nil && maskContext != nil {
let maskImage = (maskContext?.makeImage())!
let newImage = layerImage!.masking(maskImage)!
previousImage = newImage
} else {
previousImage = layerImage
}
// apply image
masterImage = kernel!.apply(foreground: CIImage(cgImage: previousImage!), background: masterImage, colorSpace: info.colorSpace)!
}
}
guard let finalCgImage = context.createCGImage(masterImage, from: info.cgRect, format: .RGBA8, colorSpace: info.colorSpace) else {
return nil
}
var image = NSImage(cgImage: finalCgImage, size: info.nsSize)
if info.orientation == 3 {
image = image.imageRotatedByDegreess(degrees: 90)
} else if info.orientation == 4 {
image = image.imageRotatedByDegreess(degrees: -90)
}
if info.flippedHorizontally && (info.orientation == 1 || info.orientation == 2) {
image = image.flipHorizontally()
} else if info.flippedHorizontally && (info.orientation == 3 || info.orientation == 4) {
image = image.flipVertically()
} else if info.flippedVertically && (info.orientation == 1 || info.orientation == 2) {
image = image.flipVertically()
} else if !info.flippedVertically && (info.orientation == 3 || info.orientation == 4) {
image = image.flipHorizontally()
}
return image
}
func makeThumbnail() -> NSImage? {
guard let archive = Archive(data: data!, accessMode: Archive.AccessMode.read) else {
return nil
}
guard let entry = archive[ThumbnailPath] else {
return nil
}
guard let thumbnailData = readData(archive: archive, entry: entry) else {
return nil
}
return NSImage(data: thumbnailData)
}
}
public extension NSImage {
func imageRotatedByDegreess(degrees:CGFloat) -> NSImage {
var imageBounds = NSMakeRect(0.0, 0.0, size.width, size.height)
let pathBounds = NSBezierPath(rect: imageBounds)
var transform = NSAffineTransform()
transform.rotate(byDegrees: degrees)
pathBounds.transform(using: transform as AffineTransform)
let rotatedBounds:NSRect = NSMakeRect(NSZeroPoint.x, NSZeroPoint.y, pathBounds.bounds.size.width, pathBounds.bounds.size.height )
let rotatedImage = NSImage(size: rotatedBounds.size)
imageBounds.origin.x = NSMidX(rotatedBounds) - (NSWidth(imageBounds) / 2)
imageBounds.origin.y = NSMidY(rotatedBounds) - (NSHeight(imageBounds) / 2)
transform = NSAffineTransform()
transform.translateX(by: +(NSWidth(rotatedBounds) / 2 ), yBy: +(NSHeight(rotatedBounds) / 2))
transform.rotate(byDegrees: degrees)
transform.translateX(by: -(NSWidth(rotatedBounds) / 2 ), yBy: -(NSHeight(rotatedBounds) / 2))
rotatedImage.lockFocus()
transform.concat()
self.draw(in: imageBounds, from: .zero, operation: .copy, fraction: 1.0)
rotatedImage.unlockFocus()
return rotatedImage
}
func flipHorizontally() -> NSImage {
let flipedImage = NSImage(size: size)
flipedImage.lockFocus()
let transform = NSAffineTransform()
transform.translateX(by: size.width, yBy: 0.0)
transform.scaleX(by: -1.0, yBy: 1.0)
transform.concat()
let rect = NSMakeRect(0, 0, size.width, size.height)
self.draw(at: .zero, from: rect, operation: .sourceOver, fraction: 1.0)
flipedImage.unlockFocus()
return flipedImage
}
func flipVertically() -> NSImage {
let flipedImage = NSImage(size: size)
flipedImage.lockFocus()
let transform = NSAffineTransform()
transform.translateX(by: 0.0, yBy: size.height)
transform.scaleX(by: 1.0, yBy: -1.0)
transform.concat()
let rect = NSMakeRect(0, 0, size.width, size.height)
self.draw(at: .zero, from: rect, operation: .sourceOver, fraction: 1.0)
flipedImage.unlockFocus()
return flipedImage
}
}
class ColorFilter: CIFilter {
// 2
var inputImage: CIImage?
// 3
static var kernel: CIKernel = { () -> CIColorKernel in
guard let url = Bundle.main.url(forResource: "default",
withExtension: "metallib"),
let data = try? Data(contentsOf: url) else {
fatalError("Unable to load metallib")
}
guard let kernel = try? CIColorKernel(
functionName: "colorFilterKernel",
fromMetalLibraryData: data) else {
fatalError("Unable to create color kernel")
}
return kernel
}()
// 4
override var outputImage: CIImage? {
guard let inputImage = inputImage else { return nil }
return ColorFilter.kernel.apply(
extent: inputImage.extent,
roiCallback: { _, rect in
return rect
},
arguments: [inputImage])
}
}
extension CGImage {
func toGrayscale() -> CGImage? {
let rect = CGRect(x: 0, y: 0, width: width, height: height)
let grayColorSpace = CGColorSpaceCreateDeviceGray()
let maskBitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.none.rawValue).union(.byteOrder16Big)
let ccgContext = CGContext(data: nil, width: width, height: height, bitsPerComponent: 16, bytesPerRow: width * 2, space: grayColorSpace, bitmapInfo: maskBitmapInfo.rawValue)
ccgContext?.setFillColor(.black)
ccgContext?.fill(rect)
ccgContext?.draw(self, in: rect)
return ccgContext?.makeImage()
}
}