[AltStore] Loads images remotely rather than including them in app bundle

This commit is contained in:
Riley Testut
2019-08-20 19:06:03 -05:00
parent a1c9049b4b
commit 7d48b831ed
58 changed files with 5168 additions and 334 deletions

441
Pods/Nuke/Sources/DataCache.swift generated Normal file
View File

@@ -0,0 +1,441 @@
// The MIT License (MIT)
//
// Copyright (c) 2015-2019 Alexander Grebenyuk (github.com/kean).
import Foundation
// MARK: - DataCaching
/// Data cache.
///
/// - warning: The implementation must be thread safe.
public protocol DataCaching {
/// Retrieves data from cache for the given key.
func cachedData(for key: String) -> Data?
/// Stores data for the given key.
/// - note: The implementation must return immediately and store data
/// asynchronously.
func storeData(_ data: Data, for key: String)
}
// MARK: - DataCache
/// Data cache backed by a local storage.
///
/// The DataCache uses LRU cleanup policy (least recently used items are removed
/// first). The elements stored in the cache are automatically discarded if
/// either *cost* or *count* limit is reached. The sweeps are performed periodically.
///
/// DataCache always writes and removes data asynchronously. It also allows for
/// reading and writing data in parallel. This is implemented using a "staging"
/// area which stores changes until they are flushed to disk:
///
/// // Schedules data to be written asynchronously and returns immediately
/// cache[key] = data
///
/// // The data is returned from the staging area
/// let data = cache[key]
///
/// // Schedules data to be removed asynchronously and returns immediately
/// cache[key] = nil
///
/// // Data is nil
/// let data = cache[key]
///
/// Thread-safe.
///
/// - warning: It's possible to have more than one instance of `DataCache` with
/// the same `path` but it is not recommended.
public final class DataCache: DataCaching {
/// A cache key.
public typealias Key = String
/// The maximum number of items. `1000` by default.
///
/// Changes tos `countLimit` will take effect when the next LRU sweep is run.
public var countLimit: Int = 1000
/// Size limit in bytes. `100 Mb` by default.
///
/// Changes to `sizeLimit` will take effect when the next LRU sweep is run.
public var sizeLimit: Int = 1024 * 1024 * 100
/// When performing a sweep, the cache will remote entries until the size of
/// the remaining items is lower than or equal to `sizeLimit * trimRatio` and
/// the total count is lower than or equal to `countLimit * trimRatio`. `0.7`
/// by default.
internal var trimRatio = 0.7
/// The path for the directory managed by the cache.
public let path: URL
/// The number of seconds between each LRU sweep. 30 by default.
/// The first sweep is performed right after the cache is initialized.
///
/// Sweeps are performed in a background and can be performed in parallel
/// with reading.
public var sweepInterval: TimeInterval = 30
/// The delay after which the initial sweep is performed. 10 by default.
/// The initial sweep is performed after a delay to avoid competing with
/// other subsystems for the resources.
private var initialSweepDelay: TimeInterval = 15
// Staging
private let _lock = NSLock()
private var _staging = Staging()
/* testable */ let _wqueue = DispatchQueue(label: "com.github.kean.Nuke.DataCache.WriteQueue")
/// A function which generates a filename for the given key. A good candidate
/// for a filename generator is a _cryptographic_ hash function like SHA1.
///
/// The reason why filename needs to be generated in the first place is
/// that filesystems have a size limit for filenames (e.g. 255 UTF-8 characters
/// in AFPS) and do not allow certain characters to be used in filenames.
public typealias FilenameGenerator = (_ key: String) -> String?
private let _filenameGenerator: FilenameGenerator
/// Creates a cache instance with a given `name`. The cache creates a directory
/// with the given `name` in a `.cachesDirectory` in `.userDomainMask`.
/// - parameter filenameGenerator: Generates a filename for the given URL.
/// The default implementation generates a filename using SHA1 hash function.
public convenience init(name: String, filenameGenerator: @escaping (String) -> String? = DataCache.filename(for:)) throws {
guard let root = FileManager.default.urls(for: .cachesDirectory, in: .userDomainMask).first else {
throw NSError(domain: NSCocoaErrorDomain, code: NSFileNoSuchFileError, userInfo: nil)
}
try self.init(path: root.appendingPathComponent(name, isDirectory: true), filenameGenerator: filenameGenerator)
}
/// Creates a cache instance with a given path.
/// - parameter filenameGenerator: Generates a filename for the given URL.
/// The default implementation generates a filename using SHA1 hash function.
public init(path: URL, filenameGenerator: @escaping (String) -> String? = DataCache.filename(for:)) throws {
self.path = path
self._filenameGenerator = filenameGenerator
try self._didInit()
}
/// A `FilenameGenerator` implementation which uses SHA1 hash function to
/// generate a filename from the given key.
public static func filename(for key: String) -> String? {
return key.sha1
}
private func _didInit() throws {
try FileManager.default.createDirectory(at: path, withIntermediateDirectories: true, attributes: nil)
_wqueue.asyncAfter(deadline: .now() + initialSweepDelay) { [weak self] in
self?._performAndScheduleSweep()
}
}
// MARK: DataCaching
/// Retrieves data for the given key. The completion will be called
/// syncrhonously if there is no cached data for the given key.
public func cachedData(for key: Key) -> Data? {
_lock.lock()
if let change = _staging.change(for: key) {
_lock.unlock()
switch change {
case let .add(data):
return data
case .remove:
return nil
}
}
_lock.unlock()
guard let url = _url(for: key) else {
return nil
}
return try? Data(contentsOf: url)
}
/// Stores data for the given key. The method returns instantly and the data
/// is written asynchronously.
public func storeData(_ data: Data, for key: Key) {
_lock.sync {
let change = _staging.add(data: data, for: key)
_wqueue.async {
if let url = self._url(for: key) {
try? data.write(to: url)
}
self._lock.sync {
self._staging.flushed(change)
}
}
}
}
/// Removes data for the given key. The method returns instantly, the data
/// is removed asynchronously.
public func removeData(for key: Key) {
_lock.sync {
let change = _staging.removeData(for: key)
_wqueue.async {
if let url = self._url(for: key) {
try? FileManager.default.removeItem(at: url)
}
self._lock.sync {
self._staging.flushed(change)
}
}
}
}
/// Removes all items. The method returns instantly, the data is removed
/// asynchronously.
public func removeAll() {
_lock.sync {
let change = _staging.removeAll()
_wqueue.async {
try? FileManager.default.removeItem(at: self.path)
try? FileManager.default.createDirectory(at: self.path, withIntermediateDirectories: true, attributes: nil)
self._lock.sync {
self._staging.flushed(change)
}
}
}
}
/// Accesses the data associated with the given key for reading and writing.
///
/// When you assign a new data for a key and the key already exists, the cache
/// overwrites the existing data.
///
/// When assigning or removing data, the subscript adds a requested operation
/// in a staging area and returns immediately. The staging area allows for
/// reading and writing data in parallel.
///
/// // Schedules data to be written asynchronously and returns immediately
/// cache[key] = data
///
/// // The data is returned from the staging area
/// let data = cache[key]
///
/// // Schedules data to be removed asynchronously and returns immediately
/// cache[key] = nil
///
/// // Data is nil
/// let data = cache[key]
///
public subscript(key: Key) -> Data? {
get {
return cachedData(for: key)
}
set {
if let data = newValue {
storeData(data, for: key)
} else {
removeData(for: key)
}
}
}
// MARK: Managing URLs
/// Uses the `FilenameGenerator` that the cache was initialized with to
/// generate and return a filename for the given key.
public func filename(for key: Key) -> String? {
return _filenameGenerator(key)
}
/* testable */ func _url(for key: Key) -> URL? {
guard let filename = self.filename(for: key) else {
return nil
}
return self.path.appendingPathComponent(filename, isDirectory: false)
}
// MARK: Flush Changes
/// Synchronously waits on the caller's thread until all outstanding disk IO
/// operations are finished.
func flush() {
_wqueue.sync {}
}
// MARK: Sweep
private func _performAndScheduleSweep() {
_sweep()
_wqueue.asyncAfter(deadline: .now() + sweepInterval) { [weak self] in
self?._performAndScheduleSweep()
}
}
/// Schedules a cache sweep to be performed immediately.
public func sweep() {
_wqueue.async {
self._sweep()
}
}
/// Discards the least recently used items first.
private func _sweep() {
var items = contents(keys: [.contentAccessDateKey, .totalFileAllocatedSizeKey])
guard !items.isEmpty else {
return
}
var size = items.reduce(0) { $0 + ($1.meta.totalFileAllocatedSize ?? 0) }
var count = items.count
let sizeLimit = self.sizeLimit / Int(1 / trimRatio)
let countLimit = self.countLimit / Int(1 / trimRatio)
guard size > sizeLimit || count > countLimit else {
return // All good, no need to perform any work.
}
// Most recently accessed items first
let past = Date.distantPast
items.sort { // Sort in place
($0.meta.contentAccessDate ?? past) > ($1.meta.contentAccessDate ?? past)
}
// Remove the items until we satisfy both size and count limits.
while (size > sizeLimit || count > countLimit), let item = items.popLast() {
size -= (item.meta.totalFileAllocatedSize ?? 0)
count -= 1
try? FileManager.default.removeItem(at: item.url)
}
}
// MARK: Contents
struct Entry {
let url: URL
let meta: URLResourceValues
}
func contents(keys: [URLResourceKey] = []) -> [Entry] {
guard let urls = try? FileManager.default.contentsOfDirectory(at: path, includingPropertiesForKeys: keys, options: .skipsHiddenFiles) else {
return []
}
let _keys = Set(keys)
return urls.compactMap {
guard let meta = try? $0.resourceValues(forKeys: _keys) else {
return nil
}
return Entry(url: $0, meta: meta)
}
}
// MARK: Inspection
/// The total number of items in the cache.
/// - warning: Requires disk IO, avoid using from the main thread.
public var totalCount: Int {
return contents().count
}
/// The total file size of items written on disk.
///
/// Uses `URLResourceKey.fileSizeKey` to calculate the size of each entry.
/// The total allocated size (see `totalAllocatedSize`. on disk might
/// actually be bigger.
///
/// - warning: Requires disk IO, avoid using from the main thread.
public var totalSize: Int {
return contents(keys: [.fileSizeKey]).reduce(0) {
$0 + ($1.meta.fileSize ?? 0)
}
}
/// The total file allocated size of all the items written on disk.
///
/// Uses `URLResourceKey.totalFileAllocatedSizeKey`.
///
/// - warning: Requires disk IO, avoid using from the main thread.
public var totalAllocatedSize: Int {
return contents(keys: [.totalFileAllocatedSizeKey]).reduce(0) {
$0 + ($1.meta.totalFileAllocatedSize ?? 0)
}
}
// MARK: - Staging
/// DataCache allows for parallel reads and writes. This is made possible by
/// DataCacheStaging.
///
/// For example, when the data is added in cache, it is first added to staging
/// and is removed from staging only after data is written to disk. Removal works
/// the same way.
private final class Staging {
private var changes = [String: Change]()
private var changeRemoveAll: ChangeRemoveAll?
struct ChangeRemoveAll {
let id: Int
}
struct Change {
let key: String
let id: Int
let type: ChangeType
}
enum ChangeType {
case add(Data)
case remove
}
private var nextChangeId = 0
// MARK: Changes
func change(for key: String) -> ChangeType? {
if let change = changes[key] {
return change.type
}
if changeRemoveAll != nil {
return .remove
}
return nil
}
// MARK: Register Changes
func add(data: Data, for key: String) -> Change {
return _makeChange(.add(data), for: key)
}
func removeData(for key: String) -> Change {
return _makeChange(.remove, for: key)
}
private func _makeChange(_ type: ChangeType, for key: String) -> Change {
nextChangeId += 1
let change = Change(key: key, id: nextChangeId, type: type)
changes[key] = change
return change
}
func removeAll() -> ChangeRemoveAll {
nextChangeId += 1
let change = ChangeRemoveAll(id: nextChangeId)
changeRemoveAll = change
changes.removeAll()
return change
}
// MARK: Flush Changes
func flushed(_ change: Change) {
if let index = changes.index(forKey: change.key),
changes[index].value.id == change.id {
changes.remove(at: index)
}
}
func flushed(_ change: ChangeRemoveAll) {
if changeRemoveAll?.id == change.id {
changeRemoveAll = nil
}
}
}
}

160
Pods/Nuke/Sources/DataLoader.swift generated Normal file
View File

@@ -0,0 +1,160 @@
// The MIT License (MIT)
//
// Copyright (c) 2015-2019 Alexander Grebenyuk (github.com/kean).
import Foundation
public protocol Cancellable: class {
func cancel()
}
public protocol DataLoading {
/// - parameter didReceiveData: Can be called multiple times if streaming
/// is supported.
/// - parameter completion: Must be called once after all (or none in case
/// of an error) `didReceiveData` closures have been called.
func loadData(with request: URLRequest,
didReceiveData: @escaping (Data, URLResponse) -> Void,
completion: @escaping (Error?) -> Void) -> Cancellable
}
extension URLSessionTask: Cancellable {}
/// Provides basic networking using `URLSession`.
public final class DataLoader: DataLoading {
public let session: URLSession
private let _impl: _DataLoader
/// Initializes `DataLoader` with the given configuration.
/// - parameter configuration: `URLSessionConfiguration.default` with
/// `URLCache` with 0 MB memory capacity and 150 MB disk capacity.
public init(configuration: URLSessionConfiguration = DataLoader.defaultConfiguration,
validate: @escaping (URLResponse) -> Swift.Error? = DataLoader.validate) {
self._impl = _DataLoader()
self.session = URLSession(configuration: configuration, delegate: _impl, delegateQueue: _impl.queue)
self._impl.session = self.session
self._impl.validate = validate
}
/// Returns a default configuration which has a `sharedUrlCache` set
/// as a `urlCache`.
public static var defaultConfiguration: URLSessionConfiguration {
let conf = URLSessionConfiguration.default
conf.urlCache = DataLoader.sharedUrlCache
return conf
}
/// Validates `HTTP` responses by checking that the status code is 2xx. If
/// it's not returns `DataLoader.Error.statusCodeUnacceptable`.
public static func validate(response: URLResponse) -> Swift.Error? {
guard let response = response as? HTTPURLResponse else { return nil }
return (200..<300).contains(response.statusCode) ? nil : Error.statusCodeUnacceptable(response.statusCode)
}
#if !os(macOS)
private static let cachePath = "com.github.kean.Nuke.Cache"
#else
private static let cachePath: String = {
let cachePaths = NSSearchPathForDirectoriesInDomains(.cachesDirectory, .userDomainMask, true)
if let cachePath = cachePaths.first, let identifier = Bundle.main.bundleIdentifier {
return cachePath.appending("/" + identifier)
}
return ""
}()
#endif
/// Shared url cached used by a default `DataLoader`. The cache is
/// initialized with 0 MB memory capacity and 150 MB disk capacity.
public static let sharedUrlCache = URLCache(
memoryCapacity: 0,
diskCapacity: 150 * 1024 * 1024, // 150 MB
diskPath: cachePath
)
public func loadData(with request: URLRequest, didReceiveData: @escaping (Data, URLResponse) -> Void, completion: @escaping (Swift.Error?) -> Void) -> Cancellable {
return _impl.loadData(with: request, didReceiveData: didReceiveData, completion: completion)
}
/// Errors produced by `DataLoader`.
public enum Error: Swift.Error, CustomDebugStringConvertible {
/// Validation failed.
case statusCodeUnacceptable(Int)
/// Either the response or body was empty.
@available(*, deprecated, message: "This error case is not used any more")
case responseEmpty
public var debugDescription: String {
switch self {
case let .statusCodeUnacceptable(code): return "Response status code was unacceptable: " + code.description // compiles faster than interpolation
case .responseEmpty: return "Either the response or body was empty."
}
}
}
}
// Actual data loader implementation. We hide NSObject inheritance, hide
// URLSessionDataDelegate conformance, and break retain cycle between URLSession
// and URLSessionDataDelegate.
private final class _DataLoader: NSObject, URLSessionDataDelegate {
weak var session: URLSession! // This is safe.
var validate: (URLResponse) -> Swift.Error? = DataLoader.validate
let queue = OperationQueue()
private var handlers = [URLSessionTask: _Handler]()
override init() {
self.queue.maxConcurrentOperationCount = 1
}
/// Loads data with the given request.
func loadData(with request: URLRequest, didReceiveData: @escaping (Data, URLResponse) -> Void, completion: @escaping (Error?) -> Void) -> Cancellable {
let task = session.dataTask(with: request)
let handler = _Handler(didReceiveData: didReceiveData, completion: completion)
queue.addOperation { // `URLSession` is configured to use this same queue
self.handlers[task] = handler
}
task.resume()
return task
}
// MARK: URLSessionDelegate
func urlSession(_ session: URLSession, dataTask: URLSessionDataTask, didReceive response: URLResponse, completionHandler: @escaping (URLSession.ResponseDisposition) -> Void) {
guard let handler = handlers[dataTask] else {
completionHandler(.cancel)
return
}
// Validate response as soon as we receive it can cancel the request if necessary
if let error = validate(response) {
handler.completion(error)
completionHandler(.cancel)
return
}
completionHandler(.allow)
}
func urlSession(_ session: URLSession, task: URLSessionTask, didCompleteWithError error: Error?) {
guard let handler = handlers[task] else { return }
handlers[task] = nil
handler.completion(error)
}
// MARK: URLSessionDataDelegate
func urlSession(_ session: URLSession, dataTask: URLSessionDataTask, didReceive data: Data) {
guard let handler = handlers[dataTask], let response = dataTask.response else { return }
// We don't store data anywhere, just send it to the pipeline.
handler.didReceiveData(data, response)
}
private final class _Handler {
let didReceiveData: (Data, URLResponse) -> Void
let completion: (Error?) -> Void
init(didReceiveData: @escaping (Data, URLResponse) -> Void, completion: @escaping (Error?) -> Void) {
self.didReceiveData = didReceiveData
self.completion = completion
}
}
}

300
Pods/Nuke/Sources/ImageCache.swift generated Normal file
View File

@@ -0,0 +1,300 @@
// The MIT License (MIT)
//
// Copyright (c) 2015-2019 Alexander Grebenyuk (github.com/kean).
import Foundation
#if !os(macOS)
import UIKit
#else
import Cocoa
#endif
/// In-memory image cache.
///
/// The implementation must be thread safe.
public protocol ImageCaching: class {
/// Returns the `ImageResponse` stored in the cache with the given request.
func cachedResponse(for request: ImageRequest) -> ImageResponse?
/// Stores the given `ImageResponse` in the cache using the given request.
func storeResponse(_ response: ImageResponse, for request: ImageRequest)
/// Remove the response for the given request.
func removeResponse(for request: ImageRequest)
}
/// Convenience subscript.
public extension ImageCaching {
/// Accesses the image associated with the given request.
subscript(request: ImageRequest) -> Image? {
get {
return cachedResponse(for: request)?.image
}
set {
if let newValue = newValue {
storeResponse(ImageResponse(image: newValue, urlResponse: nil), for: request)
} else {
removeResponse(for: request)
}
}
}
}
/// Memory cache with LRU cleanup policy (least recently used are removed first).
///
/// The elements stored in cache are automatically discarded if either *cost* or
/// *count* limit is reached. The default cost limit represents a number of bytes
/// and is calculated based on the amount of physical memory available on the
/// device. The default cmount limit is set to `Int.max`.
///
/// `Cache` automatically removes all stored elements when it received a
/// memory warning. It also automatically removes *most* of cached elements
/// when the app enters background.
public final class ImageCache: ImageCaching {
private let _impl: _Cache<ImageRequest.CacheKey, ImageResponse>
/// The maximum total cost that the cache can hold.
public var costLimit: Int {
get { return _impl.costLimit }
set { _impl.costLimit = newValue }
}
/// The maximum number of items that the cache can hold.
public var countLimit: Int {
get { return _impl.countLimit }
set { _impl.countLimit = newValue }
}
/// Default TTL (time to live) for each entry. Can be used to make sure that
/// the entries get validated at some point. `0` (never expire) by default.
public var ttl: TimeInterval {
get { return _impl.ttl }
set { _impl.ttl = newValue }
}
/// The total cost of items in the cache.
public var totalCost: Int {
return _impl.totalCost
}
/// The total number of items in the cache.
public var totalCount: Int {
return _impl.totalCount
}
/// Shared `Cache` instance.
public static let shared = ImageCache()
/// Initializes `Cache`.
/// - parameter costLimit: Default value representes a number of bytes and is
/// calculated based on the amount of the phisical memory available on the device.
/// - parameter countLimit: `Int.max` by default.
public init(costLimit: Int = ImageCache.defaultCostLimit(), countLimit: Int = Int.max) {
_impl = _Cache(costLimit: costLimit, countLimit: countLimit)
}
/// Returns a recommended cost limit which is computed based on the amount
/// of the phisical memory available on the device.
public static func defaultCostLimit() -> Int {
let physicalMemory = ProcessInfo.processInfo.physicalMemory
let ratio = physicalMemory <= (536_870_912 /* 512 Mb */) ? 0.1 : 0.2
let limit = physicalMemory / UInt64(1 / ratio)
return limit > UInt64(Int.max) ? Int.max : Int(limit)
}
/// Returns the `ImageResponse` stored in the cache with the given request.
public func cachedResponse(for request: ImageRequest) -> ImageResponse? {
return _impl.value(forKey: ImageRequest.CacheKey(request: request))
}
/// Stores the given `ImageResponse` in the cache using the given request.
public func storeResponse(_ response: ImageResponse, for request: ImageRequest) {
_impl.set(response, forKey: ImageRequest.CacheKey(request: request), cost: self.cost(for: response.image))
}
/// Removes response stored with the given request.
public func removeResponse(for request: ImageRequest) {
_impl.removeValue(forKey: ImageRequest.CacheKey(request: request))
}
/// Removes all cached images.
public func removeAll() {
_impl.removeAll()
}
/// Removes least recently used items from the cache until the total cost
/// of the remaining items is less than the given cost limit.
public func trim(toCost limit: Int) {
_impl.trim(toCost: limit)
}
/// Removes least recently used items from the cache until the total count
/// of the remaining items is less than the given count limit.
public func trim(toCount limit: Int) {
_impl.trim(toCount: limit)
}
/// Returns cost for the given image by approximating its bitmap size in bytes in memory.
func cost(for image: Image) -> Int {
#if !os(macOS)
let dataCost = ImagePipeline.Configuration.isAnimatedImageDataEnabled ? (image.animatedImageData?.count ?? 0) : 0
// bytesPerRow * height gives a rough estimation of how much memory
// image uses in bytes. In practice this algorithm combined with a
// concervative default cost limit works OK.
guard let cgImage = image.cgImage else {
return 1 + dataCost
}
return cgImage.bytesPerRow * cgImage.height + dataCost
#else
return 1
#endif
}
}
internal final class _Cache<Key: Hashable, Value> {
// We don't use `NSCache` because it's not LRU
private var map = [Key: LinkedList<Entry>.Node]()
private let list = LinkedList<Entry>()
private let lock = NSLock()
var costLimit: Int {
didSet { lock.sync(_trim) }
}
var countLimit: Int {
didSet { lock.sync(_trim) }
}
private(set) var totalCost = 0
var ttl: TimeInterval = 0
var totalCount: Int {
return map.count
}
init(costLimit: Int, countLimit: Int) {
self.costLimit = costLimit
self.countLimit = countLimit
#if os(iOS) || os(tvOS)
NotificationCenter.default.addObserver(self, selector: #selector(removeAll), name: UIApplication.didReceiveMemoryWarningNotification, object: nil)
NotificationCenter.default.addObserver(self, selector: #selector(didEnterBackground), name: UIApplication.didEnterBackgroundNotification, object: nil)
#endif
}
deinit {
#if os(iOS) || os(tvOS)
NotificationCenter.default.removeObserver(self)
#endif
}
func value(forKey key: Key) -> Value? {
lock.lock(); defer { lock.unlock() }
guard let node = map[key] else {
return nil
}
guard !node.value.isExpired else {
_remove(node: node)
return nil
}
// bubble node up to make it last added (most recently used)
list.remove(node)
list.append(node)
return node.value.value
}
func set(_ value: Value, forKey key: Key, cost: Int = 0, ttl: TimeInterval? = nil) {
lock.lock(); defer { lock.unlock() }
let ttl = ttl ?? self.ttl
let expiration = ttl == 0 ? nil : (Date() + ttl)
let entry = Entry(value: value, key: key, cost: cost, expiration: expiration)
_add(entry)
_trim() // _trim is extremely fast, it's OK to call it each time
}
@discardableResult
func removeValue(forKey key: Key) -> Value? {
lock.lock(); defer { lock.unlock() }
guard let node = map[key] else { return nil }
_remove(node: node)
return node.value.value
}
private func _add(_ element: Entry) {
if let existingNode = map[element.key] {
_remove(node: existingNode)
}
map[element.key] = list.append(element)
totalCost += element.cost
}
private func _remove(node: LinkedList<Entry>.Node) {
list.remove(node)
map[node.value.key] = nil
totalCost -= node.value.cost
}
@objc dynamic func removeAll() {
lock.sync {
map.removeAll()
list.removeAll()
totalCost = 0
}
}
private func _trim() {
_trim(toCost: costLimit)
_trim(toCount: countLimit)
}
@objc private dynamic func didEnterBackground() {
// Remove most of the stored items when entering background.
// This behavior is similar to `NSCache` (which removes all
// items). This feature is not documented and may be subject
// to change in future Nuke versions.
lock.sync {
_trim(toCost: Int(Double(costLimit) * 0.1))
_trim(toCount: Int(Double(countLimit) * 0.1))
}
}
func trim(toCost limit: Int) {
lock.sync { _trim(toCost: limit) }
}
private func _trim(toCost limit: Int) {
_trim(while: { totalCost > limit })
}
func trim(toCount limit: Int) {
lock.sync { _trim(toCount: limit) }
}
private func _trim(toCount limit: Int) {
_trim(while: { totalCount > limit })
}
private func _trim(while condition: () -> Bool) {
while condition(), let node = list.first { // least recently used
_remove(node: node)
}
}
private struct Entry {
let value: Value
let key: Key
let cost: Int
let expiration: Date?
var isExpired: Bool {
guard let expiration = expiration else { return false }
return expiration.timeIntervalSinceNow < 0
}
}
}

219
Pods/Nuke/Sources/ImageDecoding.swift generated Normal file
View File

@@ -0,0 +1,219 @@
// The MIT License (MIT)
//
// Copyright (c) 2015-2019 Alexander Grebenyuk (github.com/kean).
#if !os(macOS)
import UIKit
#else
import Cocoa
#endif
#if os(watchOS)
import WatchKit
#endif
// MARK: - ImageDecoding
/// Decodes image data.
public protocol ImageDecoding {
/// Produces an image from the image data. A decoder is a one-shot object
/// created for a single image decoding session. If image pipeline has
/// progressive decoding enabled, the `decode(data:isFinal:)` method gets
/// called each time the data buffer has new data available. The decoder may
/// decide whether or not to produce a new image based on the previous scans.
func decode(data: Data, isFinal: Bool) -> Image?
}
// An image decoder that uses native APIs. Supports progressive decoding.
// The decoder is stateful.
public final class ImageDecoder: ImageDecoding {
// `nil` if decoder hasn't detected whether progressive decoding is enabled.
private(set) internal var isProgressive: Bool?
// Number of scans that the decoder has found so far. The last scan might be
// incomplete at this point.
private(set) internal var numberOfScans = 0
private var lastStartOfScan: Int = 0 // Index of the last Start of Scan that we found
private var scannedIndex: Int = -1 // Index at which previous scan was finished
public init() { }
public func decode(data: Data, isFinal: Bool) -> Image? {
let format = ImageFormat.format(for: data)
guard !isFinal else { // Just decode the data.
let image = _decode(data)
if ImagePipeline.Configuration.isAnimatedImageDataEnabled, case .gif? = format { // Keep original data around in case of GIF
image?.animatedImageData = data
}
return image
}
// Determined (if we haven't yet) whether the image supports progressive
// decoding or not (only proressive JPEG is allowed for now, but you can
// add support for other formats by implementing your own decoder).
isProgressive = isProgressive ?? format?.isProgressive
guard isProgressive == true else { return nil }
// Check if there is more data to scan.
guard (scannedIndex + 1) < data.count else { return nil }
// Start scaning from the where we left off previous time.
var index = (scannedIndex + 1)
var numberOfScans = self.numberOfScans
while index < (data.count - 1) {
scannedIndex = index
// 0xFF, 0xDA - Start Of Scan
if data[index] == 0xFF, data[index+1] == 0xDA {
lastStartOfScan = index
numberOfScans += 1
}
index += 1
}
// Found more scans this the previous time
guard numberOfScans > self.numberOfScans else { return nil }
self.numberOfScans = numberOfScans
// `> 1` checks that we've received a first scan (SOS) and then received
// and also received a second scan (SOS). This way we know that we have
// at least one full scan available.
return (numberOfScans > 1 && lastStartOfScan > 0) ? _decode(data[0..<lastStartOfScan]) : nil
}
}
// Image initializers are documented as fully-thread safe:
//
// > The immutable nature of image objects also means that they are safe
// to create and use from any thread.
//
// However, there are some versions of iOS which violated this. The
// `UIImage` is supposably fully thread safe again starting with iOS 10.
//
// The `queue.sync` call below prevents the majority of the potential
// crashes that could happen on the previous versions of iOS.
//
// See also https://github.com/AFNetworking/AFNetworking/issues/2572
private let _queue = DispatchQueue(label: "com.github.kean.Nuke.DataDecoder")
internal func _decode(_ data: Data) -> Image? {
return _queue.sync {
#if os(macOS)
return NSImage(data: data)
#else
#if os(iOS) || os(tvOS)
let scale = UIScreen.main.scale
#else
let scale = WKInterfaceDevice.current().screenScale
#endif
return UIImage(data: data, scale: scale)
#endif
}
}
// MARK: - ImageDecoderRegistry
/// A register of image codecs (only decoding).
public final class ImageDecoderRegistry {
/// A shared registry.
public static let shared = ImageDecoderRegistry()
private var matches = [(ImageDecodingContext) -> ImageDecoding?]()
/// Returns a decoder which matches the given context.
public func decoder(for context: ImageDecodingContext) -> ImageDecoding {
for match in matches {
if let decoder = match(context) {
return decoder
}
}
return ImageDecoder() // Return default decoder if couldn't find a custom one.
}
/// Registers a decoder to be used in a given decoding context. The closure
/// is going to be executed before all other already registered closures.
public func register(_ match: @escaping (ImageDecodingContext) -> ImageDecoding?) {
matches.insert(match, at: 0)
}
func clear() {
matches = []
}
}
/// Image decoding context used when selecting which decoder to use.
public struct ImageDecodingContext {
public let request: ImageRequest
internal let urlResponse: URLResponse?
public let data: Data
}
// MARK: - Image Formats
enum ImageFormat: Equatable {
/// `isProgressive` is nil if we determined that it's a jpeg, but we don't
/// know if it is progressive or baseline yet.
case jpeg(isProgressive: Bool?)
case png
case gif
// Returns `nil` if not enough data.
static func format(for data: Data) -> ImageFormat? {
// JPEG magic numbers https://en.wikipedia.org/wiki/JPEG
if _match(data, [0xFF, 0xD8, 0xFF]) {
var index = 3 // start scanning right after magic numbers
while index < (data.count - 1) {
// A example of first few bytes of progressive jpeg image:
// FF D8 FF E0 00 10 4A 46 49 46 00 01 01 00 00 48 00 ...
//
// 0xFF, 0xC0 - Start Of Frame (baseline DCT)
// 0xFF, 0xC2 - Start Of Frame (progressive DCT)
// https://en.wikipedia.org/wiki/JPEG
if data[index] == 0xFF {
if data[index+1] == 0xC2 { return .jpeg(isProgressive: true) } // progressive
if data[index+1] == 0xC0 { return .jpeg(isProgressive: false) } // baseline
}
index += 1
}
// It's a jpeg but we don't know if progressive or not yet.
return .jpeg(isProgressive: nil)
}
// GIF magic numbers https://en.wikipedia.org/wiki/GIF
if _match(data, [0x47, 0x49, 0x46]) {
return .gif
}
// PNG Magic numbers https://en.wikipedia.org/wiki/Portable_Network_Graphics
if _match(data, [0x89, 0x50, 0x4E, 0x47, 0x0D, 0x0A, 0x1A, 0x0A]) {
return .png
}
// Either not enough data, or we just don't know this format yet.
return nil
}
var isProgressive: Bool? {
if case let .jpeg(isProgressive) = self { return isProgressive }
return false
}
private static func _match(_ data: Data, _ numbers: [UInt8]) -> Bool {
guard data.count >= numbers.count else { return false }
return !zip(numbers.indices, numbers).contains { (index, number) in
data[index] != number
}
}
}
// MARK: - Animated Images
private var _animatedImageDataAK = "Nuke.AnimatedImageData.AssociatedKey"
extension Image {
// Animated image data. Only not `nil` when image data actually contains
// an animated image.
public var animatedImageData: Data? {
get { return objc_getAssociatedObject(self, &_animatedImageDataAK) as? Data }
set { objc_setAssociatedObject(self, &_animatedImageDataAK, newValue, .OBJC_ASSOCIATION_RETAIN_NONATOMIC) }
}
}

911
Pods/Nuke/Sources/ImagePipeline.swift generated Normal file
View File

@@ -0,0 +1,911 @@
// The MIT License (MIT)
//
// Copyright (c) 2015-2019 Alexander Grebenyuk (github.com/kean).
import Foundation
// MARK: - ImageTask
/// A task performed by the `ImagePipeline`. The pipeline maintains a strong
/// reference to the task until the request finishes or fails; you do not need
/// to maintain a reference to the task unless it is useful to do so for your
/// apps internal bookkeeping purposes.
public /* final */ class ImageTask: Hashable {
/// An identifier uniquely identifies the task within a given pipeline. Only
/// unique within this pipeline.
public let taskId: Int
fileprivate weak var delegate: ImageTaskDelegate?
/// The request with which the task was created. The request might change
/// during the exetucion of a task. When you update the priority of the task,
/// the request's prir also gets updated.
public private(set) var request: ImageRequest
/// The number of bytes that the task has received.
public fileprivate(set) var completedUnitCount: Int64 = 0
/// A best-guess upper bound on the number of bytes the client expects to send.
public fileprivate(set) var totalUnitCount: Int64 = 0
/// Returns a progress object for the task. The object is created lazily.
public var progress: Progress {
if _progress == nil { _progress = Progress() }
return _progress!
}
fileprivate private(set) var _progress: Progress?
/// A completion handler to be called when task finishes or fails.
public typealias Completion = (_ response: ImageResponse?, _ error: ImagePipeline.Error?) -> Void
/// A progress handler to be called periodically during the lifetime of a task.
public typealias ProgressHandler = (_ response: ImageResponse?, _ completed: Int64, _ total: Int64) -> Void
// internal stuff associated with a task
fileprivate var metrics: ImageTaskMetrics
fileprivate weak var session: ImageLoadingSession?
internal init(taskId: Int, request: ImageRequest) {
self.taskId = taskId
self.request = request
self.metrics = ImageTaskMetrics(taskId: taskId, startDate: Date())
}
// MARK: - Priority
/// Update s priority of the task even if the task is already running.
public func setPriority(_ priority: ImageRequest.Priority) {
request.priority = priority
delegate?.imageTask(self, didUpdatePrioity: priority)
}
// MARK: - Cancellation
fileprivate var isCancelled: Bool {
return _isCancelled.value
}
private var _isCancelled = Atomic(false)
/// Marks task as being cancelled.
///
/// The pipeline will immediately cancel any work associated with a task
/// unless there is an equivalent outstanding task running (see
/// `ImagePipeline.Configuration.isDeduplicationEnabled` for more info).
public func cancel() {
// Make sure that we ignore if `cancel` being called more than once.
if _isCancelled.swap(to: true, ifEqual: false) {
delegate?.imageTaskWasCancelled(self)
}
}
// MARK: - Hashable
public func hash(into hasher: inout Hasher) {
hasher.combine(ObjectIdentifier(self).hashValue)
}
public static func == (lhs: ImageTask, rhs: ImageTask) -> Bool {
return ObjectIdentifier(lhs) == ObjectIdentifier(rhs)
}
}
protocol ImageTaskDelegate: class {
func imageTaskWasCancelled(_ task: ImageTask)
func imageTask(_ task: ImageTask, didUpdatePrioity: ImageRequest.Priority)
}
// MARK: - ImageResponse
/// Represents an image response.
public final class ImageResponse {
public let image: Image
public let urlResponse: URLResponse?
// the response is only nil when new disk cache is enabled (it only stores
// data for now, but this might change in the future).
public init(image: Image, urlResponse: URLResponse?) {
self.image = image; self.urlResponse = urlResponse
}
}
// MARK: - ImagePipeline
/// `ImagePipeline` will load and decode image data, process loaded images and
/// store them in caches.
///
/// See [Nuke's README](https://github.com/kean/Nuke) for a detailed overview of
/// the image pipeline and all of the related classes.
///
/// `ImagePipeline` is created with a configuration (`Configuration`).
///
/// `ImagePipeline` is thread-safe.
public /* final */ class ImagePipeline: ImageTaskDelegate {
public let configuration: Configuration
// This is a queue on which we access the sessions.
private let queue = DispatchQueue(label: "com.github.kean.Nuke.ImagePipeline")
// Image loading sessions. One or more tasks can be handled by the same session.
private var sessions = [AnyHashable: ImageLoadingSession]()
private var nextTaskId = Atomic<Int>(0)
// Unlike `nextTaskId` doesn't need to be atomic because it's accessed only on a queue
private var nextSessionId: Int = 0
private let rateLimiter: RateLimiter
/// Shared image pipeline.
public static var shared = ImagePipeline()
/// The closure that gets called each time the task is completed (or cancelled).
/// Guaranteed to be called on the main thread.
public var didFinishCollectingMetrics: ((ImageTask, ImageTaskMetrics) -> Void)?
public struct Configuration {
/// Image cache used by the pipeline.
public var imageCache: ImageCaching?
/// Data loader used by the pipeline.
public var dataLoader: DataLoading
/// Data loading queue. Default maximum concurrent task count is 6.
public var dataLoadingQueue = OperationQueue()
/// Data cache used by the pipeline.
public var dataCache: DataCaching?
/// Data caching queue. Default maximum concurrent task count is 2.
public var dataCachingQueue = OperationQueue()
/// Default implementation uses shared `ImageDecoderRegistry` to create
/// a decoder that matches the context.
internal var imageDecoder: (ImageDecodingContext) -> ImageDecoding = {
return ImageDecoderRegistry.shared.decoder(for: $0)
}
/// Image decoding queue. Default maximum concurrent task count is 1.
public var imageDecodingQueue = OperationQueue()
/// This is here just for backward compatibility with `Loader`.
internal var imageProcessor: (Image, ImageRequest) -> AnyImageProcessor? = { $1.processor }
/// Image processing queue. Default maximum concurrent task count is 2.
public var imageProcessingQueue = OperationQueue()
/// `true` by default. If `true` the pipeline will combine the requests
/// with the same `loadKey` into a single request. The request only gets
/// cancelled when all the registered requests are.
public var isDeduplicationEnabled = true
/// `true` by default. It `true` the pipeline will rate limits the requests
/// to prevent trashing of the underlying systems (e.g. `URLSession`).
/// The rate limiter only comes into play when the requests are started
/// and cancelled at a high rate (e.g. scrolling through a collection view).
public var isRateLimiterEnabled = true
/// `false` by default. If `true` the pipeline will try to produce a new
/// image each time it receives a new portion of data from data loader.
/// The decoder used by the image loading session determines whether
/// to produce a partial image or not.
public var isProgressiveDecodingEnabled = false
/// If the data task is terminated (either because of a failure or a
/// cancellation) and the image was partially loaded, the next load will
/// resume where it was left off. Supports both validators (`ETag`,
/// `Last-Modified`). The resumable downloads are enabled by default.
public var isResumableDataEnabled = true
/// If `true` pipeline will detects GIFs and set `animatedImageData`
/// (`UIImage` property). It will also disable processing of such images,
/// and alter the way cache cost is calculated. However, this will not
/// enable actual animated image rendering. To do that take a look at
/// satellite projects (FLAnimatedImage and Gifu plugins for Nuke).
/// `false` by default (to preserve resources).
public static var isAnimatedImageDataEnabled = false
/// Creates default configuration.
/// - parameter dataLoader: `DataLoader()` by default.
/// - parameter imageCache: `Cache.shared` by default.
public init(dataLoader: DataLoading = DataLoader(), imageCache: ImageCaching? = ImageCache.shared) {
self.dataLoader = dataLoader
self.imageCache = imageCache
self.dataLoadingQueue.maxConcurrentOperationCount = 6
self.dataCachingQueue.maxConcurrentOperationCount = 2
self.imageDecodingQueue.maxConcurrentOperationCount = 1
self.imageProcessingQueue.maxConcurrentOperationCount = 2
}
}
/// Initializes `ImagePipeline` instance with the given configuration.
/// - parameter configuration: `Configuration()` by default.
public init(configuration: Configuration = Configuration()) {
self.configuration = configuration
self.rateLimiter = RateLimiter(queue: queue)
}
public convenience init(_ configure: (inout ImagePipeline.Configuration) -> Void) {
var configuration = ImagePipeline.Configuration()
configure(&configuration)
self.init(configuration: configuration)
}
// MARK: Loading Images
/// Loads an image with the given url.
@discardableResult
public func loadImage(with url: URL, progress: ImageTask.ProgressHandler? = nil, completion: ImageTask.Completion? = nil) -> ImageTask {
return loadImage(with: ImageRequest(url: url), progress: progress, completion: completion)
}
/// Loads an image for the given request using image loading pipeline.
@discardableResult
public func loadImage(with request: ImageRequest, progress: ImageTask.ProgressHandler? = nil, completion: ImageTask.Completion? = nil) -> ImageTask {
let task = ImageTask(taskId: getNextTaskId(), request: request)
task.delegate = self
queue.async {
// Fast memory cache lookup. We do this asynchronously because we
// expect users to check memory cache synchronously if needed.
if task.request.memoryCacheOptions.isReadAllowed,
let response = self.configuration.imageCache?.cachedResponse(for: task.request) {
task.metrics.isMemoryCacheHit = true
self._didCompleteTask(task, response: response, error: nil, completion: completion)
return
}
// Memory cache lookup failed -> start loading.
self._startLoadingImage(
for: task,
handlers: ImageLoadingSession.Handlers(progress: progress, completion: completion)
)
}
return task
}
private func getNextTaskId() -> Int {
return nextTaskId.increment()
}
private func getNextSessionId() -> Int {
nextSessionId += 1
return nextSessionId
}
private func _startLoadingImage(for task: ImageTask, handlers: ImageLoadingSession.Handlers) {
// Create a new image loading session or register with an existing one.
let session = _createSession(with: task.request)
task.session = session
task.metrics.session = session.metrics
task.metrics.wasSubscibedToExistingSession = !session.tasks.isEmpty
// Register handler with a session.
session.tasks[task] = handlers
session.updatePriority()
// Already loaded and decoded the final image and started processing
// for previously registered tasks (if any).
if let image = session.decodedFinalImage {
_session(session, processImage: image, for: task)
}
}
// MARK: ImageTaskDelegate
func imageTaskWasCancelled(_ task: ImageTask) {
queue.async {
self._didCancelTask(task)
}
}
func imageTask(_ task: ImageTask, didUpdatePrioity: ImageRequest.Priority) {
queue.async {
guard let session = task.session else { return }
session.updatePriority()
session.processingSessions[task]?.updatePriority()
}
}
// MARK: ImageLoadingSession (Managing)
private func _createSession(with request: ImageRequest) -> ImageLoadingSession {
// Check if session for the given key already exists.
//
// This part is more clever than I would like. The reason why we need a
// key even when deduplication is disabled is to have a way to retain
// a session by storing it in `sessions` dictionary.
let key: AnyHashable = configuration.isDeduplicationEnabled ? ImageRequest.LoadKey(request: request) : UUID()
if let session = sessions[key] {
return session
}
let session = ImageLoadingSession(sessionId: getNextSessionId(), request: request, key: key)
sessions[key] = session
_loadImage(for: session) // Start the pipeline
return session
}
private func _cancelSession(for task: ImageTask) {
guard let session = task.session else { return }
session.tasks[task] = nil
// When all registered tasks are cancelled, the session is deallocated
// and the underlying operation is cancelled automatically.
let processingSession = session.processingSessions.removeValue(forKey: task)
processingSession?.tasks.remove(task)
// Cancel the session when there are no remaining tasks.
if session.tasks.isEmpty {
_tryToSaveResumableData(for: session)
session.cts.cancel()
session.metrics.wasCancelled = true
_didFinishSession(session)
} else {
// We're not cancelling the task session yet because there are
// still tasks registered to it, but we need to update the priority.
session.updatePriority()
processingSession?.updatePriority()
}
}
// MARK: Pipeline (Loading Data)
private func _loadImage(for session: ImageLoadingSession) {
// Use rate limiter to prevent trashing of the underlying systems
if configuration.isRateLimiterEnabled {
// Rate limiter is synchronized on pipeline's queue. Delayed work is
// executed asynchronously also on this same queue.
rateLimiter.execute(token: session.cts.token) { [weak self, weak session] in
guard let session = session else { return }
self?._checkDiskCache(for: session)
}
} else { // Start loading immediately.
_checkDiskCache(for: session)
}
}
private func _checkDiskCache(for session: ImageLoadingSession) {
guard let cache = configuration.dataCache, let key = session.request.urlString else {
_loadData(for: session) // Skip disk cache lookup, load data
return
}
session.metrics.checkDiskCacheStartDate = Date()
let operation = BlockOperation { [weak self, weak session] in
guard let session = session else { return }
let data = cache.cachedData(for: key)
session.metrics.checkDiskCacheEndDate = Date()
self?.queue.async {
if let data = data {
self?._decodeFinalImage(for: session, data: data)
} else {
self?._loadData(for: session)
}
}
}
configuration.dataCachingQueue.enqueue(operation, for: session)
}
private func _loadData(for session: ImageLoadingSession) {
guard !session.token.isCancelling else { return } // Preflight check
// Wrap data request in an operation to limit maximum number of
// concurrent data tasks.
let operation = Operation(starter: { [weak self, weak session] finish in
guard let session = session else { finish(); return }
self?.queue.async {
self?._actuallyLoadData(for: session, finish: finish)
}
})
configuration.dataLoadingQueue.enqueue(operation, for: session)
}
// This methods gets called inside data loading operation (Operation).
private func _actuallyLoadData(for session: ImageLoadingSession, finish: @escaping () -> Void) {
session.metrics.loadDataStartDate = Date()
var urlRequest = session.request.urlRequest
// Read and remove resumable data from cache (we're going to insert it
// back in the cache if the request fails to complete again).
if configuration.isResumableDataEnabled,
let resumableData = ResumableData.removeResumableData(for: urlRequest) {
// Update headers to add "Range" and "If-Range" headers
resumableData.resume(request: &urlRequest)
// Save resumable data so that we could use it later (we need to
// verify that server returns "206 Partial Content" before using it.
session.resumableData = resumableData
// Collect metrics
session.metrics.wasResumed = true
session.metrics.resumedDataCount = resumableData.data.count
}
let task = configuration.dataLoader.loadData(
with: urlRequest,
didReceiveData: { [weak self, weak session] (data, response) in
self?.queue.async {
guard let session = session else { return }
self?._session(session, didReceiveData: data, response: response)
}
},
completion: { [weak self, weak session] (error) in
finish() // Important! Mark Operation as finished.
self?.queue.async {
guard let session = session else { return }
self?._session(session, didFinishLoadingDataWithError: error)
}
})
session.token.register {
task.cancel()
finish() // Make sure we always finish the operation.
}
}
private func _session(_ session: ImageLoadingSession, didReceiveData chunk: Data, response: URLResponse) {
// Check if this is the first response.
if session.urlResponse == nil {
// See if the server confirmed that we can use the resumable data.
if let resumableData = session.resumableData {
if ResumableData.isResumedResponse(response) {
session.data = resumableData.data
session.resumedDataCount = Int64(resumableData.data.count)
session.metrics.serverConfirmedResume = true
}
session.resumableData = nil // Get rid of resumable data
}
}
// Append data and save response
session.data.append(chunk)
session.urlResponse = response
// Collect metrics
session.metrics.downloadedDataCount = ((session.metrics.downloadedDataCount ?? 0) + chunk.count)
// Update tasks' progress and call progress closures if any
let (completed, total) = (Int64(session.data.count), response.expectedContentLength + session.resumedDataCount)
let tasks = session.tasks
DispatchQueue.main.async {
for (task, handlers) in tasks where !task.isCancelled {
(task.completedUnitCount, task.totalUnitCount) = (completed, total)
handlers.progress?(nil, completed, total)
task._progress?.completedUnitCount = completed
task._progress?.totalUnitCount = total
}
}
// Check if progressive decoding is enabled (disabled by default)
if configuration.isProgressiveDecodingEnabled {
// Check if we haven't loaded an entire image yet. We give decoder
// an opportunity to decide whether to decode this chunk or not.
// In case `expectedContentLength` is undetermined (e.g. 0) we
// don't allow progressive decoding.
guard session.data.count < response.expectedContentLength else { return }
_setNeedsDecodePartialImage(for: session)
}
}
private func _session(_ session: ImageLoadingSession, didFinishLoadingDataWithError error: Swift.Error?) {
session.metrics.loadDataEndDate = Date()
if let error = error {
_tryToSaveResumableData(for: session)
_session(session, didFailWithError: .dataLoadingFailed(error))
return
}
let data = session.data
session.data.removeAll() // We no longer need the data stored in session.
_decodeFinalImage(for: session, data: data)
}
// MARK: Pipeline (Decoding)
private func _setNeedsDecodePartialImage(for session: ImageLoadingSession) {
guard session.decodingOperation == nil else {
return // Already enqueued an operation.
}
let operation = BlockOperation { [weak self, weak session] in
guard let session = session else { return }
self?._actuallyDecodePartialImage(for: session)
}
_enqueueDecodingOperation(operation, for: session)
}
private func _actuallyDecodePartialImage(for session: ImageLoadingSession) {
// As soon as we get a chance to execute, grab the latest available
// data, create a decoder (if necessary) and decode the data.
let (data, decoder): (Data, ImageDecoding?) = queue.sync {
let data = session.data
let decoder = _decoder(for: session, data: data)
return (data, decoder)
}
// Produce partial image
if let image = decoder?.decode(data: data, isFinal: false) {
let scanNumber: Int? = (decoder as? ImageDecoder)?.numberOfScans
queue.async {
let container = ImageContainer(image: image, isFinal: false, scanNumber: scanNumber)
for task in session.tasks.keys {
self._session(session, processImage: container, for: task)
}
}
}
}
private func _decodeFinalImage(for session: ImageLoadingSession, data: Data) {
// Basic sanity checks, should never happen in practice.
guard !data.isEmpty, let decoder = _decoder(for: session, data: data) else {
_session(session, didFailWithError: .decodingFailed)
return
}
let metrics = session.metrics
let operation = BlockOperation { [weak self, weak session] in
guard let session = session else { return }
metrics.decodeStartDate = Date()
let image = autoreleasepool {
decoder.decode(data: data, isFinal: true) // Produce final image
}
metrics.decodeEndDate = Date()
self?.queue.async {
let container = image.map {
ImageContainer(image: $0, isFinal: true, scanNumber: nil)
}
self?._session(session, didDecodeFinalImage: container, data: data)
}
}
_enqueueDecodingOperation(operation, for: session)
}
private func _enqueueDecodingOperation(_ operation: Foundation.Operation, for session: ImageLoadingSession) {
configuration.imageDecodingQueue.enqueue(operation, for: session)
session.decodingOperation?.cancel()
session.decodingOperation = operation
}
// Lazily creates a decoder if necessary.
private func _decoder(for session: ImageLoadingSession, data: Data) -> ImageDecoding? {
guard !session.isDecodingDisabled else {
return nil
}
// Return the existing processor in case it has already been created.
if let decoder = session.decoder {
return decoder
}
// Basic sanity checks.
guard !data.isEmpty else {
return nil
}
let context = ImageDecodingContext(request: session.request, urlResponse: session.urlResponse, data: data)
let decoder = configuration.imageDecoder(context)
session.decoder = decoder
return decoder
}
private func _tryToSaveResumableData(for session: ImageLoadingSession) {
// Try to save resumable data in case the task was cancelled
// (`URLError.cancelled`) or failed to complete with other error.
if configuration.isResumableDataEnabled,
let response = session.urlResponse, !session.data.isEmpty,
let resumableData = ResumableData(response: response, data: session.data) {
ResumableData.storeResumableData(resumableData, for: session.request.urlRequest)
}
}
private func _session(_ session: ImageLoadingSession, didDecodeFinalImage image: ImageContainer?, data: Data) {
session.decoder = nil // Decoding session completed, no longer need decoder.
session.decodedFinalImage = image
guard let image = image else {
_session(session, didFailWithError: .decodingFailed)
return
}
// Store data in data cache (in case it's enabled))
if !data.isEmpty, let dataCache = configuration.dataCache, let key = session.request.urlString {
dataCache.storeData(data, for: key)
}
for task in session.tasks.keys {
_session(session, processImage: image, for: task)
}
}
// MARK: Pipeline (Processing)
/// Processes the input image for each of the given tasks. The image is processed
/// only once for the equivalent processors.
/// - parameter completion: Will get called synchronously if processing is not
/// required. If it is will get called on `self.queue` when processing is finished.
private func _session(_ session: ImageLoadingSession, processImage image: ImageContainer, for task: ImageTask) {
let isFinal = image.isFinal
guard let processor = _processor(for: image.image, request: task.request) else {
_session(session, didProcessImage: image.image, isFinal: isFinal, metrics: TaskMetrics(), for: task)
return // No processing needed.
}
if !image.isFinal && session.processingSessions[task] != nil {
return // Back pressure - we'are already busy processing another partial image
}
// Find existing session or create a new one.
let processingSession = _processingSession(for: image, processor: processor, session: session, task: task)
// Register task with a processing session.
processingSession.tasks.insert(task)
session.processingSessions[task] = processingSession
processingSession.updatePriority()
}
private func _processingSession(for image: ImageContainer, processor: AnyImageProcessor, session: ImageLoadingSession, task: ImageTask) -> ImageProcessingSession {
func findExistingSession() -> ImageProcessingSession? {
return session.processingSessions.values.first {
$0.processor == processor && $0.image.image === image.image
}
}
if let processingSession = findExistingSession() {
return processingSession
}
let processingSession = ImageProcessingSession(processor: processor, image: image)
let isFinal = image.isFinal
let operation = BlockOperation { [weak self, weak session, weak processingSession] in
var metrics = TaskMetrics.started()
let output: Image? = autoreleasepool {
processor.process(image: image, request: task.request)
}
metrics.end()
self?.queue.async {
guard let session = session else { return }
for task in (processingSession?.tasks ?? []) {
if session.processingSessions[task] === processingSession {
session.processingSessions[task] = nil
}
self?._session(session, didProcessImage: output, isFinal: isFinal, metrics: metrics, for: task)
}
}
}
operation.queuePriority = task.request.priority.queuePriority
session.priority.observe { [weak operation] in
operation?.queuePriority = $0.queuePriority
}
configuration.imageProcessingQueue.addOperation(operation)
processingSession.operation = operation
return processingSession
}
private func _processor(for image: Image, request: ImageRequest) -> AnyImageProcessor? {
if Configuration.isAnimatedImageDataEnabled && image.animatedImageData != nil {
return nil // Don't process animated images.
}
return configuration.imageProcessor(image, request)
}
private func _session(_ session: ImageLoadingSession, didProcessImage image: Image?, isFinal: Bool, metrics: TaskMetrics, for task: ImageTask) {
if isFinal {
task.metrics.processStartDate = metrics.startDate
task.metrics.processEndDate = metrics.endDate
let error: Error? = (image == nil ? .processingFailed : nil)
_session(session, didCompleteTask: task, image: image, error: error)
} else {
guard let image = image else { return }
_session(session, didProducePartialImage: image, for: task)
}
}
// MARK: ImageLoadingSession (Callbacks)
private func _session(_ session: ImageLoadingSession, didProducePartialImage image: Image, for task: ImageTask) {
// Check if we haven't completed the session yet by producing a final image
// or cancelling the task.
guard sessions[session.key] === session else { return }
let response = ImageResponse(image: image, urlResponse: session.urlResponse)
if let handler = session.tasks[task], let progress = handler.progress {
DispatchQueue.main.async {
guard !task.isCancelled else { return }
progress(response, task.completedUnitCount, task.totalUnitCount)
}
}
}
private func _session(_ session: ImageLoadingSession, didCompleteTask task: ImageTask, image: Image?, error: Error?) {
let response = image.map {
ImageResponse(image: $0, urlResponse: session.urlResponse)
}
// Store response in memory cache if allowed.
if let response = response, task.request.memoryCacheOptions.isWriteAllowed {
configuration.imageCache?.storeResponse(response, for: task.request)
}
if let handlers = session.tasks.removeValue(forKey: task) {
_didCompleteTask(task, response: response, error: error, completion: handlers.completion)
}
if session.tasks.isEmpty {
_didFinishSession(session)
}
}
private func _session(_ session: ImageLoadingSession, didFailWithError error: Error) {
for task in session.tasks.keys {
_session(session, didCompleteTask: task, image: nil, error: error)
}
}
private func _didFinishSession(_ session: ImageLoadingSession) {
// Check if session is still registered.
guard sessions[session.key] === session else { return }
session.metrics.endDate = Date()
sessions[session.key] = nil
}
// Cancel the session in case all handlers were removed.
private func _didCancelTask(_ task: ImageTask) {
task.metrics.wasCancelled = true
task.metrics.endDate = Date()
_cancelSession(for: task)
guard let didCollectMetrics = didFinishCollectingMetrics else { return }
DispatchQueue.main.async {
didCollectMetrics(task, task.metrics)
}
}
private func _didCompleteTask(_ task: ImageTask, response: ImageResponse?, error: Error?, completion: ImageTask.Completion?) {
task.metrics.endDate = Date()
DispatchQueue.main.async {
guard !task.isCancelled else { return }
completion?(response, error)
self.didFinishCollectingMetrics?(task, task.metrics)
}
}
// MARK: Errors
/// Represents all possible image pipeline errors.
public enum Error: Swift.Error, CustomDebugStringConvertible {
/// Data loader failed to load image data with a wrapped error.
case dataLoadingFailed(Swift.Error)
/// Decoder failed to produce a final image.
case decodingFailed
/// Processor failed to produce a final image.
case processingFailed
public var debugDescription: String {
switch self {
case let .dataLoadingFailed(error): return "Failed to load image data: \(error)"
case .decodingFailed: return "Failed to create an image from the image data"
case .processingFailed: return "Failed to process the image"
}
}
}
}
// MARK: - ImageLoadingSession
/// A image loading session. During a lifetime of a session handlers can
/// subscribe to and unsubscribe from it.
private final class ImageLoadingSession {
let sessionId: Int
/// The original request with which the session was created.
let request: ImageRequest
let key: AnyHashable // loading key
let cts = CancellationTokenSource()
var token: CancellationToken { return cts.token }
// Registered image tasks.
var tasks = [ImageTask: Handlers]()
struct Handlers {
let progress: ImageTask.ProgressHandler?
let completion: ImageTask.Completion?
}
// Data loading session.
var urlResponse: URLResponse?
var resumableData: ResumableData?
var resumedDataCount: Int64 = 0
lazy var data = Data()
// Decoding session.
var decoder: ImageDecoding?
var decodedFinalImage: ImageContainer? // Decoding result
weak var decodingOperation: Foundation.Operation?
// Processing sessions.
var processingSessions = [ImageTask: ImageProcessingSession]()
// Metrics that we collect during the lifetime of a session.
let metrics: ImageTaskMetrics.SessionMetrics
let priority: Property<ImageRequest.Priority>
deinit {
decodingOperation?.cancel()
}
init(sessionId: Int, request: ImageRequest, key: AnyHashable) {
self.sessionId = sessionId
self.request = request
self.key = key
self.metrics = ImageTaskMetrics.SessionMetrics(sessionId: sessionId)
self.priority = Property(value: request.priority)
}
func updatePriority() {
priority.update(with: tasks.keys)
}
var isDecodingDisabled: Bool {
return !tasks.keys.contains {
!$0.request.isDecodingDisabled
}
}
}
private final class ImageProcessingSession {
let processor: AnyImageProcessor
let image: ImageContainer
var tasks = Set<ImageTask>()
weak var operation: Foundation.Operation?
let priority = Property<ImageRequest.Priority>(value: .normal)
deinit {
operation?.cancel()
}
init(processor: AnyImageProcessor, image: ImageContainer) {
self.processor = processor; self.image = image
}
// Update priority for processing operations (those are per image task,
// not per image session).
func updatePriority() {
priority.update(with: tasks)
}
}
struct ImageContainer {
let image: Image
let isFinal: Bool
let scanNumber: Int?
}
// MARK: - Extensions
private extension Property where T == ImageRequest.Priority {
func update<Tasks: Sequence>(with tasks: Tasks) where Tasks.Element == ImageTask {
if let newPriority = tasks.map({ $0.request.priority }).max(), self.value != newPriority {
self.value = newPriority
}
}
}
private extension Foundation.OperationQueue {
func enqueue(_ operation: Foundation.Operation, for session: ImageLoadingSession) {
operation.queuePriority = session.priority.value.queuePriority
session.priority.observe { [weak operation] in
operation?.queuePriority = $0.queuePriority
}
session.token.register { [weak operation] in
operation?.cancel()
}
addOperation(operation)
}
}

172
Pods/Nuke/Sources/ImagePreheater.swift generated Normal file
View File

@@ -0,0 +1,172 @@
// The MIT License (MIT)
//
// Copyright (c) 2015-2019 Alexander Grebenyuk (github.com/kean).
import Foundation
/// Prefetches and caches image in order to eliminate delays when you request
/// individual images later.
///
/// To start preheating call `startPreheating(with:)` method. When you
/// need an individual image just start loading an image using `Loading` object.
/// When preheating is no longer necessary call `stopPreheating(with:)` method.
///
/// All `Preheater` methods are thread-safe.
public final class ImagePreheater {
private let pipeline: ImagePipeline
private let queue = DispatchQueue(label: "com.github.kean.Nuke.Preheater")
private let preheatQueue = OperationQueue()
private var tasks = [PreheatKey: Task]()
private let destination: Destination
/// Prefetching destination.
public enum Destination {
/// Prefetches the image and stores it both in memory and disk caches
/// (in case they are enabled, naturally, there is no reason to prefetch
/// unless they are).
case memoryCache
/// Prefetches image data and stores in disk cache. Will no decode
/// the image data and will therefore useless less CPU.
case diskCache
}
/// Initializes the `Preheater` instance.
/// - parameter manager: `Loader.shared` by default.
/// - parameter `maxConcurrentRequestCount`: 2 by default.
/// - parameter destination: `.memoryCache` by default.
public init(pipeline: ImagePipeline = ImagePipeline.shared, destination: Destination = .memoryCache, maxConcurrentRequestCount: Int = 2) {
self.pipeline = pipeline
self.destination = destination
self.preheatQueue.maxConcurrentOperationCount = maxConcurrentRequestCount
}
/// Starte preheating images for the given urls.
/// - note: See `func startPreheating(with requests: [ImageRequest])` for more info
public func startPreheating(with urls: [URL]) {
startPreheating(with: _requests(for: urls))
}
/// Starts preheating images for the given requests.
///
/// When you call this method, `Preheater` starts to load and cache images
/// for the given requests. At any time afterward, you can create tasks
/// for individual images with equivalent requests.
public func startPreheating(with requests: [ImageRequest]) {
queue.async {
for request in requests {
self._startPreheating(with: self._updatedRequest(request))
}
}
}
private func _startPreheating(with request: ImageRequest) {
let key = PreheatKey(request: request)
// Check if we we've already started preheating.
guard tasks[key] == nil else { return }
// Check if the image is already in memory cache.
guard pipeline.configuration.imageCache?.cachedResponse(for: request) == nil else {
return // already in memory cache
}
let task = Task(request: request, key: key)
let token = task.cts.token
let operation = Operation(starter: { [weak self] finish in
let task = self?.pipeline.loadImage(with: request) { [weak self] _, _ in
self?._remove(task)
finish()
}
token.register {
task?.cancel()
finish()
}
})
preheatQueue.addOperation(operation)
token.register { [weak operation] in operation?.cancel() }
tasks[key] = task
}
private func _remove(_ task: Task) {
queue.async {
guard self.tasks[task.key] === task else { return }
self.tasks[task.key] = nil
}
}
/// Stops preheating images for the given urls.
public func stopPreheating(with urls: [URL]) {
stopPreheating(with: _requests(for: urls))
}
/// Stops preheating images for the given requests and cancels outstanding
/// requests.
///
/// - parameter destination: `.memoryCache` by default.
public func stopPreheating(with requests: [ImageRequest]) {
queue.async {
for request in requests {
self._stopPreheating(with: self._updatedRequest(request))
}
}
}
private func _stopPreheating(with request: ImageRequest) {
if let task = tasks[PreheatKey(request: request)] {
tasks[task.key] = nil
task.cts.cancel()
}
}
/// Stops all preheating tasks.
public func stopPreheating() {
queue.async {
self.tasks.forEach { $0.1.cts.cancel() }
self.tasks.removeAll()
}
}
private func _requests(for urls: [URL]) -> [ImageRequest] {
return urls.map {
var request = ImageRequest(url: $0)
request.priority = .low
return request
}
}
private func _updatedRequest(_ request: ImageRequest) -> ImageRequest {
guard destination == .diskCache else {
return request // Avoid creating a new copy
}
var request = request
// What we do under the hood is we disable decoding for the requests
// that are meant to not be stored in memory cache.
request.isDecodingDisabled = (destination == .diskCache)
return request
}
private final class Task {
let key: PreheatKey
let request: ImageRequest
let cts = CancellationTokenSource()
init(request: ImageRequest, key: PreheatKey) {
self.request = request
self.key = key
}
}
private struct PreheatKey: Hashable {
let cacheKey: ImageRequest.CacheKey
let loadKey: ImageRequest.LoadKey
init(request: ImageRequest) {
self.cacheKey = ImageRequest.CacheKey(request: request)
self.loadKey = ImageRequest.LoadKey(request: request)
}
}
}

193
Pods/Nuke/Sources/ImageProcessing.swift generated Normal file
View File

@@ -0,0 +1,193 @@
// The MIT License (MIT)
//
// Copyright (c) 2015-2019 Alexander Grebenyuk (github.com/kean).
import Foundation
/// Performs image processing.
public protocol ImageProcessing: Equatable {
/// Returns processed image.
func process(image: Image, context: ImageProcessingContext) -> Image?
}
/// Image processing context used when selecting which processor to use.
public struct ImageProcessingContext {
public let request: ImageRequest
public let isFinal: Bool
public let scanNumber: Int? // need a more general purpose way to implement this
}
/// Composes multiple processors.
internal struct ImageProcessorComposition: ImageProcessing {
private let processors: [AnyImageProcessor]
/// Composes multiple processors.
public init(_ processors: [AnyImageProcessor]) {
self.processors = processors
}
/// Processes the given image by applying each processor in an order in
/// which they were added. If one of the processors fails to produce
/// an image the processing stops and `nil` is returned.
func process(image: Image, context: ImageProcessingContext) -> Image? {
return processors.reduce(image) { image, processor in
return autoreleasepool {
image.flatMap { processor.process(image: $0, context: context) }
}
}
}
/// Returns true if the underlying processors are pairwise-equivalent.
public static func == (lhs: ImageProcessorComposition, rhs: ImageProcessorComposition) -> Bool {
return lhs.processors == rhs.processors
}
}
/// Type-erased image processor.
public struct AnyImageProcessor: ImageProcessing {
private let _process: (Image, ImageProcessingContext) -> Image?
private let _processor: Any
private let _equals: (AnyImageProcessor) -> Bool
public init<P: ImageProcessing>(_ processor: P) {
self._process = { processor.process(image: $0, context: $1) }
self._processor = processor
self._equals = { ($0._processor as? P) == processor }
}
public func process(image: Image, context: ImageProcessingContext) -> Image? {
return self._process(image, context)
}
public static func == (lhs: AnyImageProcessor, rhs: AnyImageProcessor) -> Bool {
return lhs._equals(rhs)
}
}
internal struct AnonymousImageProcessor<Key: Hashable>: ImageProcessing {
private let _key: Key
private let _closure: (Image) -> Image?
init(_ key: Key, _ closure: @escaping (Image) -> Image?) {
self._key = key; self._closure = closure
}
func process(image: Image, context: ImageProcessingContext) -> Image? {
return self._closure(image)
}
static func == (lhs: AnonymousImageProcessor, rhs: AnonymousImageProcessor) -> Bool {
return lhs._key == rhs._key
}
}
extension ImageProcessing {
func process(image: ImageContainer, request: ImageRequest) -> Image? {
let context = ImageProcessingContext(request: request, isFinal: image.isFinal, scanNumber: image.scanNumber)
return process(image: image.image, context: context)
}
}
#if !os(macOS)
import UIKit
/// Decompresses and (optionally) scales down input images. Maintains
/// original aspect ratio.
///
/// Decompressing compressed image formats (such as JPEG) can significantly
/// improve drawing performance as it allows a bitmap representation to be
/// created in a background rather than on the main thread.
public struct ImageDecompressor: ImageProcessing {
/// An option for how to resize the image.
public enum ContentMode {
/// Scales the image so that it completely fills the target size.
/// Doesn't clip images.
case aspectFill
/// Scales the image so that it fits the target size.
case aspectFit
}
/// Size to pass to disable resizing.
public static let MaximumSize = CGSize(
width: CGFloat.greatestFiniteMagnitude,
height: CGFloat.greatestFiniteMagnitude
)
private let targetSize: CGSize
private let contentMode: ContentMode
private let upscale: Bool
/// Initializes `Decompressor` with the given parameters.
/// - parameter targetSize: Size in pixels. `MaximumSize` by default.
/// - parameter contentMode: An option for how to resize the image
/// to the target size. `.aspectFill` by default.
public init(targetSize: CGSize = MaximumSize, contentMode: ContentMode = .aspectFill, upscale: Bool = false) {
self.targetSize = targetSize
self.contentMode = contentMode
self.upscale = upscale
}
/// Decompresses and scales the image.
public func process(image: Image, context: ImageProcessingContext) -> Image? {
return decompress(image, targetSize: targetSize, contentMode: contentMode, upscale: upscale)
}
/// Returns true if both have the same `targetSize` and `contentMode`.
public static func == (lhs: ImageDecompressor, rhs: ImageDecompressor) -> Bool {
return lhs.targetSize == rhs.targetSize && lhs.contentMode == rhs.contentMode
}
#if !os(watchOS)
/// Returns target size in pixels for the given view. Takes main screen
/// scale into the account.
public static func targetSize(for view: UIView) -> CGSize { // in pixels
let scale = UIScreen.main.scale
let size = view.bounds.size
return CGSize(width: size.width * scale, height: size.height * scale)
}
#endif
}
internal func decompress(_ image: UIImage, targetSize: CGSize, contentMode: ImageDecompressor.ContentMode, upscale: Bool) -> UIImage {
guard let cgImage = image.cgImage else { return image }
let bitmapSize = CGSize(width: cgImage.width, height: cgImage.height)
let scaleHor = targetSize.width / bitmapSize.width
let scaleVert = targetSize.height / bitmapSize.height
let scale = contentMode == .aspectFill ? max(scaleHor, scaleVert) : min(scaleHor, scaleVert)
return decompress(image, scale: CGFloat(upscale ? scale : min(scale, 1)))
}
internal func decompress(_ image: UIImage, scale: CGFloat) -> UIImage {
guard let cgImage = image.cgImage else { return image }
let size = CGSize(
width: round(scale * CGFloat(cgImage.width)),
height: round(scale * CGFloat(cgImage.height))
)
// For more info see:
// - Quartz 2D Programming Guide
// - https://github.com/kean/Nuke/issues/35
// - https://github.com/kean/Nuke/issues/57
let alphaInfo: CGImageAlphaInfo = isOpaque(cgImage) ? .noneSkipLast : .premultipliedLast
guard let ctx = CGContext(
data: nil,
width: Int(size.width), height: Int(size.height),
bitsPerComponent: 8, bytesPerRow: 0,
space: CGColorSpaceCreateDeviceRGB(),
bitmapInfo: alphaInfo.rawValue) else {
return image
}
ctx.draw(cgImage, in: CGRect(origin: CGPoint.zero, size: size))
guard let decompressed = ctx.makeImage() else { return image }
return UIImage(cgImage: decompressed, scale: image.scale, orientation: image.imageOrientation)
}
private func isOpaque(_ image: CGImage) -> Bool {
let alpha = image.alphaInfo
return alpha == .none || alpha == .noneSkipFirst || alpha == .noneSkipLast
}
#endif

335
Pods/Nuke/Sources/ImageRequest.swift generated Normal file
View File

@@ -0,0 +1,335 @@
// The MIT License (MIT)
//
// Copyright (c) 2015-2019 Alexander Grebenyuk (github.com/kean).
import Foundation
#if !os(macOS)
import UIKit
#endif
/// Represents an image request.
public struct ImageRequest {
// MARK: Parameters of the Request
internal var urlString: String? {
return _ref._urlString
}
/// The `URLRequest` used for loading an image.
public var urlRequest: URLRequest {
get { return _ref.resource.urlRequest }
set {
_mutate {
$0.resource = Resource.urlRequest(newValue)
$0._urlString = newValue.url?.absoluteString
}
}
}
/// Processor to be applied to the image. `Decompressor` by default.
///
/// Decompressing compressed image formats (such as JPEG) can significantly
/// improve drawing performance as it allows a bitmap representation to be
/// created in a background rather than on the main thread.
public var processor: AnyImageProcessor? {
get {
// Default processor on macOS is nil, on other platforms is Decompressor
#if !os(macOS)
return _ref._isDefaultProcessorUsed ? ImageRequest.decompressor : _ref._processor
#else
return _ref._isDefaultProcessorUsed ? nil : _ref._processor
#endif
}
set {
_mutate {
$0._isDefaultProcessorUsed = false
$0._processor = newValue
}
}
}
/// The policy to use when reading or writing images to the memory cache.
public struct MemoryCacheOptions {
/// `true` by default.
public var isReadAllowed = true
/// `true` by default.
public var isWriteAllowed = true
public init() {}
}
/// `MemoryCacheOptions()` (read allowed, write allowed) by default.
public var memoryCacheOptions: MemoryCacheOptions {
get { return _ref.memoryCacheOptions }
set { _mutate { $0.memoryCacheOptions = newValue } }
}
/// The execution priority of the request.
public enum Priority: Int, Comparable {
case veryLow = 0, low, normal, high, veryHigh
internal var queuePriority: Operation.QueuePriority {
switch self {
case .veryLow: return .veryLow
case .low: return .low
case .normal: return .normal
case .high: return .high
case .veryHigh: return .veryHigh
}
}
public static func < (lhs: Priority, rhs: Priority) -> Bool {
return lhs.rawValue < rhs.rawValue
}
}
/// The relative priority of the operation. This value is used to influence
/// the order in which requests are executed. `.normal` by default.
public var priority: Priority {
get { return _ref.priority }
set { _mutate { $0.priority = newValue }}
}
/// Returns a key that compares requests with regards to caching images.
///
/// The default key considers two requests equivalent it they have the same
/// `URLRequests` and the same processors. `URLRequests` are compared
/// just by their `URLs`.
public var cacheKey: AnyHashable? {
get { return _ref.cacheKey }
set { _mutate { $0.cacheKey = newValue } }
}
/// Returns a key that compares requests with regards to loading images.
///
/// The default key considers two requests equivalent it they have the same
/// `URLRequests` and the same processors. `URLRequests` are compared by
/// their `URL`, `cachePolicy`, and `allowsCellularAccess` properties.
public var loadKey: AnyHashable? {
get { return _ref.loadKey }
set { _mutate { $0.loadKey = newValue } }
}
/// If decoding is disabled, when the image data is loaded, the pipeline is
/// not going to create an image from it and will produce the `.decodingFailed`
/// error instead. `false` by default.
var isDecodingDisabled: Bool {
// This only used by `ImagePreheater` right now
get { return _ref.isDecodingDisabled }
set { _mutate { $0.isDecodingDisabled = newValue } }
}
/// Custom info passed alongside the request.
public var userInfo: Any? {
get { return _ref.userInfo }
set { _mutate { $0.userInfo = newValue }}
}
// MARK: Initializers
/// Initializes a request with the given URL.
public init(url: URL) {
_ref = Container(resource: Resource.url(url))
_ref._urlString = url.absoluteString
// creating `.absoluteString` takes 50% of time of Request creation,
// it's still faster than using URLs as cache keys
}
/// Initializes a request with the given request.
public init(urlRequest: URLRequest) {
_ref = Container(resource: Resource.urlRequest(urlRequest))
_ref._urlString = urlRequest.url?.absoluteString
}
#if !os(macOS)
/// Initializes a request with the given URL.
/// - parameter processor: Custom image processer.
public init<Processor: ImageProcessing>(url: URL, processor: Processor) {
self.init(url: url)
self.processor = AnyImageProcessor(processor)
}
/// Initializes a request with the given request.
/// - parameter processor: Custom image processer.
public init<Processor: ImageProcessing>(urlRequest: URLRequest, processor: Processor) {
self.init(urlRequest: urlRequest)
self.processor = AnyImageProcessor(processor)
}
/// Initializes a request with the given URL.
/// - parameter targetSize: Size in pixels.
/// - parameter contentMode: An option for how to resize the image
/// to the target size.
public init(url: URL, targetSize: CGSize, contentMode: ImageDecompressor.ContentMode, upscale: Bool = false) {
self.init(url: url, processor: ImageDecompressor(
targetSize: targetSize,
contentMode: contentMode,
upscale: upscale
))
}
/// Initializes a request with the given request.
/// - parameter targetSize: Size in pixels.
/// - parameter contentMode: An option for how to resize the image
/// to the target size.
public init(urlRequest: URLRequest, targetSize: CGSize, contentMode: ImageDecompressor.ContentMode, upscale: Bool = false) {
self.init(urlRequest: urlRequest, processor: ImageDecompressor(
targetSize: targetSize,
contentMode: contentMode,
upscale: upscale
))
}
fileprivate static let decompressor = AnyImageProcessor(ImageDecompressor())
#endif
// CoW:
private var _ref: Container
private mutating func _mutate(_ closure: (Container) -> Void) {
if !isKnownUniquelyReferenced(&_ref) {
_ref = Container(container: _ref)
}
closure(_ref)
}
/// Just like many Swift built-in types, `ImageRequest` uses CoW approach to
/// avoid memberwise retain/releases when `ImageRequest` is passed around.
private class Container {
var resource: Resource
var _urlString: String? // memoized absoluteString
// true unless user set a custom one, this allows us not to store the
// default processor anywhere in the `Container` & skip equality tests
// when the default processor is used
var _isDefaultProcessorUsed: Bool = true
var _processor: AnyImageProcessor?
var memoryCacheOptions = MemoryCacheOptions()
var priority: ImageRequest.Priority = .normal
var cacheKey: AnyHashable?
var loadKey: AnyHashable?
var isDecodingDisabled: Bool = false
var userInfo: Any?
/// Creates a resource with a default processor.
init(resource: Resource) {
self.resource = resource
}
/// Creates a copy.
init(container ref: Container) {
self.resource = ref.resource
self._urlString = ref._urlString
self._isDefaultProcessorUsed = ref._isDefaultProcessorUsed
self._processor = ref._processor
self.memoryCacheOptions = ref.memoryCacheOptions
self.priority = ref.priority
self.cacheKey = ref.cacheKey
self.loadKey = ref.loadKey
self.isDecodingDisabled = ref.isDecodingDisabled
self.userInfo = ref.userInfo
}
}
/// Resource representation (either URL or URLRequest).
private enum Resource {
case url(URL)
case urlRequest(URLRequest)
var urlRequest: URLRequest {
switch self {
case let .url(url): return URLRequest(url: url) // create lazily
case let .urlRequest(urlRequest): return urlRequest
}
}
}
}
public extension ImageRequest {
/// Appends a processor to the request. You can append arbitrary number of
/// processors to the request.
mutating func process<P: ImageProcessing>(with processor: P) {
guard let existing = self.processor else {
self.processor = AnyImageProcessor(processor)
return
}
// Chain new processor and the existing one.
self.processor = AnyImageProcessor(ImageProcessorComposition([existing, AnyImageProcessor(processor)]))
}
/// Appends a processor to the request. You can append arbitrary number of
/// processors to the request.
func processed<P: ImageProcessing>(with processor: P) -> ImageRequest {
var request = self
request.process(with: processor)
return request
}
/// Appends a processor to the request. You can append arbitrary number of
/// processors to the request.
mutating func process<Key: Hashable>(key: Key, _ closure: @escaping (Image) -> Image?) {
process(with: AnonymousImageProcessor<Key>(key, closure))
}
/// Appends a processor to the request. You can append arbitrary number of
/// processors to the request.
func processed<Key: Hashable>(key: Key, _ closure: @escaping (Image) -> Image?) -> ImageRequest {
return processed(with: AnonymousImageProcessor<Key>(key, closure))
}
}
internal extension ImageRequest {
struct CacheKey: Hashable {
let request: ImageRequest
func hash(into hasher: inout Hasher) {
if let customKey = request._ref.cacheKey {
hasher.combine(customKey)
} else {
hasher.combine(request._ref._urlString?.hashValue ?? 0)
}
}
static func == (lhs: CacheKey, rhs: CacheKey) -> Bool {
let lhs = lhs.request, rhs = rhs.request
if let lhsCustomKey = lhs._ref.cacheKey, let rhsCustomKey = rhs._ref.cacheKey {
return lhsCustomKey == rhsCustomKey
}
guard lhs._ref._urlString == rhs._ref._urlString else {
return false
}
return (lhs._ref._isDefaultProcessorUsed && rhs._ref._isDefaultProcessorUsed)
|| (lhs.processor == rhs.processor)
}
}
struct LoadKey: Hashable {
let request: ImageRequest
func hash(into hasher: inout Hasher) {
if let customKey = request._ref.loadKey {
hasher.combine(customKey)
} else {
hasher.combine(request._ref._urlString?.hashValue ?? 0)
}
}
static func == (lhs: LoadKey, rhs: LoadKey) -> Bool {
func isEqual(_ lhs: URLRequest, _ rhs: URLRequest) -> Bool {
return lhs.cachePolicy == rhs.cachePolicy
&& lhs.allowsCellularAccess == rhs.allowsCellularAccess
}
let lhs = lhs.request, rhs = rhs.request
if let lhsCustomKey = lhs._ref.loadKey, let rhsCustomKey = rhs._ref.loadKey {
return lhsCustomKey == rhsCustomKey
}
return lhs._ref._urlString == rhs._ref._urlString
&& isEqual(lhs.urlRequest, rhs.urlRequest)
}
}
}

115
Pods/Nuke/Sources/ImageTaskMetrics.swift generated Normal file
View File

@@ -0,0 +1,115 @@
// The MIT License (MIT)
//
// Copyright (c) 2015-2019 Alexander Grebenyuk (github.com/kean).
import Foundation
public struct ImageTaskMetrics: CustomDebugStringConvertible {
public let taskId: Int
public internal(set) var wasCancelled: Bool = false
public internal(set) var session: SessionMetrics?
public let startDate: Date
public internal(set) var processStartDate: Date?
public internal(set) var processEndDate: Date?
public internal(set) var endDate: Date? // failed or completed
public var totalDuration: TimeInterval? {
guard let endDate = endDate else { return nil }
return endDate.timeIntervalSince(startDate)
}
/// Returns `true` is the task wasn't the one that initiated image loading.
public internal(set) var wasSubscibedToExistingSession: Bool = false
public internal(set) var isMemoryCacheHit: Bool = false
init(taskId: Int, startDate: Date) {
self.taskId = taskId; self.startDate = startDate
}
public var debugDescription: String {
var printer = Printer()
printer.section(title: "Task Information") {
$0.value("Task ID", taskId)
$0.timeline("Duration", startDate, endDate, isReversed: false)
$0.timeline("Process", processStartDate, processEndDate)
$0.value("Was Cancelled", wasCancelled)
$0.value("Is Memory Cache Hit", isMemoryCacheHit)
$0.value("Was Subscribed To Existing Image Loading Session", wasSubscibedToExistingSession)
}
printer.section(title: "Image Loading Session") {
$0.string(session.map({ $0.debugDescription }) ?? "nil")
}
return printer.output()
}
// Download session metrics. One more more tasks can share the same
// session metrics.
public final class SessionMetrics: CustomDebugStringConvertible {
/// - important: Data loading might start prior to `timeResumed` if the task gets
/// coalesced with another task.
public let sessionId: Int
public internal(set) var wasCancelled: Bool = false
// MARK: - Timeline
public let startDate = Date()
public internal(set) var checkDiskCacheStartDate: Date?
public internal(set) var checkDiskCacheEndDate: Date?
public internal(set) var loadDataStartDate: Date?
public internal(set) var loadDataEndDate: Date?
public internal(set) var decodeStartDate: Date?
public internal(set) var decodeEndDate: Date?
@available(*, deprecated, message: "Please use the same property on `ImageTaskMetrics` instead.")
public internal(set) var processStartDate: Date?
@available(*, deprecated, message: "Please use the same property on `ImageTaskMetrics` instead.")
public internal(set) var processEndDate: Date?
public internal(set) var endDate: Date? // failed or completed
public var totalDuration: TimeInterval? {
guard let endDate = endDate else { return nil }
return endDate.timeIntervalSince(startDate)
}
// MARK: - Resumable Data
public internal(set) var wasResumed: Bool?
public internal(set) var resumedDataCount: Int?
public internal(set) var serverConfirmedResume: Bool?
public internal(set) var downloadedDataCount: Int?
public var totalDownloadedDataCount: Int? {
guard let downloaded = self.downloadedDataCount else { return nil }
return downloaded + (resumedDataCount ?? 0)
}
init(sessionId: Int) { self.sessionId = sessionId }
public var debugDescription: String {
var printer = Printer()
printer.section(title: "Session Information") {
$0.value("Session ID", sessionId)
$0.value("Total Duration", Printer.duration(totalDuration))
$0.value("Was Cancelled", wasCancelled)
}
printer.section(title: "Timeline") {
$0.timeline("Total", startDate, endDate)
$0.line(String(repeating: "-", count: 36))
$0.timeline("Check Disk Cache", checkDiskCacheStartDate, checkDiskCacheEndDate)
$0.timeline("Load Data", loadDataStartDate, loadDataEndDate)
$0.timeline("Decode", decodeStartDate, decodeEndDate)
}
printer.section(title: "Resumable Data") {
$0.value("Was Resumed", wasResumed)
$0.value("Resumable Data Count", resumedDataCount)
$0.value("Server Confirmed Resume", serverConfirmedResume)
}
return printer.output()
}
}
}

496
Pods/Nuke/Sources/ImageView.swift generated Normal file
View File

@@ -0,0 +1,496 @@
// The MIT License (MIT)
//
// Copyright (c) 2015-2019 Alexander Grebenyuk (github.com/kean).
import Foundation
#if !os(macOS)
import UIKit.UIImage
/// Alias for `UIImage`.
public typealias Image = UIImage
#else
import AppKit.NSImage
/// Alias for `NSImage`.
public typealias Image = NSImage
#endif
#if !os(watchOS)
/// Displays images. Adopt this protocol in views to make them compatible with
/// Nuke APIs.
///
/// The protocol is defined as `@objc` to enable users to override its methods
/// in extensions (e.g. you can override `display(image:)` in `UIImageView` subclass).
@objc public protocol ImageDisplaying {
@objc func display(image: Image?)
}
#if !os(macOS)
import UIKit
/// A `UIView` that implements `ImageDisplaying` protocol.
public typealias ImageDisplayingView = UIView & ImageDisplaying
extension UIImageView: ImageDisplaying {
/// Displays an image.
open func display(image: Image?) {
self.image = image
}
}
#else
import Cocoa
/// An `NSView` that implements `ImageDisplaying` protocol.
public typealias ImageDisplayingView = NSView & ImageDisplaying
extension NSImageView: ImageDisplaying {
/// Displays an image.
open func display(image: Image?) {
self.image = image
}
}
#endif
/// Loads an image into the view.
///
/// Before loading the new image prepares the view for reuse by cancelling any
/// outstanding requests and removing previously displayed images (if any).
///
/// If the image is stored in memory cache, the image is displayed immediately.
/// If not, the image is loaded using an image pipeline. Displays a `placeholder`
/// if it was provided. When the request completes the loaded image is displayed
/// (or `failureImage` in case of an error).
///
/// Nuke keeps a weak reference to the view. If the view is deallocated
/// the associated request automatically gets cancelled.
///
/// - parameter options: `ImageLoadingOptions.shared` by default.
/// - parameter progress: A closure to be called periodically on the main thread
/// when the progress is updated. `nil` by default.
/// - parameter completion: A closure to be called on the main thread when the
/// request is finished. Gets called synchronously if the response was found in
/// memory cache. `nil` by default.
/// - returns: An image task of `nil` if the image was found in memory cache.
@discardableResult
public func loadImage(with url: URL,
options: ImageLoadingOptions = ImageLoadingOptions.shared,
into view: ImageDisplayingView,
progress: ImageTask.ProgressHandler? = nil,
completion: ImageTask.Completion? = nil) -> ImageTask? {
return loadImage(with: ImageRequest(url: url), options: options, into: view, progress: progress, completion: completion)
}
/// Loads an image into the view.
///
/// Before loading the new image prepares the view for reuse by cancelling any
/// outstanding requests and removing previously displayed images (if any).
///
/// If the image is stored in memory cache, the image is displayed immediately.
/// If not, the image is loaded using an image pipeline. Displays a `placeholder`
/// if it was provided. When the request completes the loaded image is displayed
/// (or `failureImage` in case of an error).
///
/// Nuke keeps a weak reference to the view. If the view is deallocated
/// the associated request automatically gets cancelled.
///
/// - parameter options: `ImageLoadingOptions.shared` by default.
/// - parameter progress: A closure to be called periodically on the main thread
/// when the progress is updated. `nil` by default.
/// - parameter completion: A closure to be called on the main thread when the
/// request is finished. Gets called synchronously if the response was found in
/// memory cache. `nil` by default.
/// - returns: An image task of `nil` if the image was found in memory cache.
@discardableResult
public func loadImage(with request: ImageRequest,
options: ImageLoadingOptions = ImageLoadingOptions.shared,
into view: ImageDisplayingView,
progress: ImageTask.ProgressHandler? = nil,
completion: ImageTask.Completion? = nil) -> ImageTask? {
assert(Thread.isMainThread)
let controller = ImageViewController.controller(for: view)
return controller.loadImage(with: request, options: options, progress: progress, completion: completion)
}
/// Cancels an outstanding request associated with the view.
public func cancelRequest(for view: ImageDisplayingView) {
assert(Thread.isMainThread)
ImageViewController.controller(for: view).cancelOutstandingTask()
}
// MARK: - ImageLoadingOptions
/// A range of options that control how the image is loaded and displayed.
public struct ImageLoadingOptions {
/// Shared options.
public static var shared = ImageLoadingOptions()
/// Placeholder to be displayed when the image is loading. `nil` by default.
public var placeholder: Image?
/// The image transition animation performed when displaying a loaded image.
/// Only runs when the image was not found in memory cache. `.nil` by default.
public var transition: Transition?
/// Image to be displayed when the request fails. `nil` by default.
public var failureImage: Image?
/// The image transition animation performed when displaying a failure image.
/// `.nil` by default.
public var failureImageTransition: Transition?
/// If true, the requested image will always appear with transition, even
/// when loaded from cache
public var alwaysTransition = false
/// If true, every time you request a new image for a view, the view will be
/// automatically prepared for reuse: image will be set to `nil`, and animations
/// will be removed. `true` by default.
public var isPrepareForReuseEnabled = true
/// Custom pipeline to be used. `nil` by default.
public var pipeline: ImagePipeline?
#if !os(macOS)
/// Content modes to be used for each image type (placeholder, success,
/// failure). `nil` by default (don't change content mode).
public var contentModes: ContentModes?
/// Custom content modes to be used for each image type (placeholder, success,
/// failure).
public struct ContentModes {
/// Content mode to be used for the loaded image.
public var success: UIView.ContentMode
/// Content mode to be used when displaying a `failureImage`.
public var failure: UIView.ContentMode
/// Content mode to be used when displaying a `placeholder`.
public var placeholder: UIView.ContentMode
/// - parameter success: A content mode to be used with a loaded image.
/// - parameter failure: A content mode to be used with a `failureImage`.
/// - parameter placeholder: A content mode to be used with a `placeholder`.
public init(success: UIView.ContentMode, failure: UIView.ContentMode, placeholder: UIView.ContentMode) {
self.success = success; self.failure = failure; self.placeholder = placeholder
}
}
/// - parameter placeholder: Placeholder to be displayed when the image is
/// loading . `nil` by default.
/// - parameter transision: The image transition animation performed when
/// displaying a loaded image. Only runs when the image was not found in
/// memory cache `.nil` by default (no animations).
/// - parameter failureImage: Image to be displayd when request fails.
/// `nil` by default.
/// - parameter failureImageTransition: The image transition animation
/// performed when displaying a failure image. `.nil` by default.
/// - parameter contentModes: Content modes to be used for each image type
/// (placeholder, success, failure). `nil` by default (don't change content mode).
public init(placeholder: Image? = nil, transition: Transition? = nil, failureImage: Image? = nil, failureImageTransition: Transition? = nil, contentModes: ContentModes? = nil) {
self.placeholder = placeholder
self.transition = transition
self.failureImage = failureImage
self.failureImageTransition = failureImageTransition
self.contentModes = contentModes
}
#else
public init(placeholder: Image? = nil, transition: Transition? = nil, failureImage: Image? = nil, failureImageTransition: Transition? = nil) {
self.placeholder = placeholder
self.transition = transition
self.failureImage = failureImage
self.failureImageTransition = failureImageTransition
}
#endif
/// An animated image transition.
public struct Transition {
var style: Style
struct Parameters { // internal representation
let duration: TimeInterval
#if !os(macOS)
let options: UIView.AnimationOptions
#endif
}
enum Style { // internal representation
case fadeIn(parameters: Parameters)
case custom((ImageDisplayingView, Image) -> Void)
}
#if !os(macOS)
/// Fade-in transition (cross-fade in case the image view is already
/// displaying an image).
public static func fadeIn(duration: TimeInterval, options: UIView.AnimationOptions = .allowUserInteraction) -> Transition {
return Transition(style: .fadeIn(parameters: Parameters(duration: duration, options: options)))
}
#else
/// Fade-in transition.
public static func fadeIn(duration: TimeInterval) -> Transition {
return Transition(style: .fadeIn(parameters: Parameters(duration: duration)))
}
#endif
/// Custom transition. Only runs when the image was not found in memory cache.
public static func custom(_ closure: @escaping (ImageDisplayingView, Image) -> Void) -> Transition {
return Transition(style: .custom(closure))
}
}
public init() {}
}
// MARK: - ImageViewController
/// Manages image requests on behalf of an image view.
///
/// - note: With a few modifications this might become public at some point,
/// however as it stands today `ImageViewController` is just a helper class,
/// making it public wouldn't expose any additional functionality to the users.
private final class ImageViewController {
// Ideally should be `unowned` but can't because of the Swift bug
// https://bugs.swift.org/browse/SR-7369
private weak var imageView: ImageDisplayingView?
private weak var task: ImageTask?
private var taskId: Int = 0
// Automatically cancel the request when the view is deallocated.
deinit {
cancelOutstandingTask()
}
init(view: /* weak */ ImageDisplayingView) {
self.imageView = view
}
// MARK: - Associating Controller
static var controllerAK = "ImageViewController.AssociatedKey"
// Lazily create a controller for a given view and associate it with a view.
static func controller(for view: ImageDisplayingView) -> ImageViewController {
if let controller = objc_getAssociatedObject(view, &ImageViewController.controllerAK) as? ImageViewController {
return controller
}
let controller = ImageViewController(view: view)
objc_setAssociatedObject(view, &ImageViewController.controllerAK, controller, .OBJC_ASSOCIATION_RETAIN)
return controller
}
// MARK: - Loading Images
func loadImage(with request: ImageRequest,
options: ImageLoadingOptions,
progress: ImageTask.ProgressHandler? = nil,
completion: ImageTask.Completion? = nil) -> ImageTask? {
cancelOutstandingTask()
guard let imageView = imageView else {
return nil
}
if options.isPrepareForReuseEnabled { // enabled by default
#if !os(macOS)
imageView.layer.removeAllAnimations()
#else
imageView.layer?.removeAllAnimations()
#endif
}
let pipeline = options.pipeline ?? ImagePipeline.shared
// Quick synchronous memory cache lookup
if request.memoryCacheOptions.isReadAllowed,
let imageCache = pipeline.configuration.imageCache,
let response = imageCache.cachedResponse(for: request) {
handle(response: response, error: nil, fromMemCache: true, options: options)
completion?(response, nil)
return nil
}
// Display a placeholder.
if let placeholder = options.placeholder {
imageView.display(image: placeholder)
#if !os(macOS)
if let contentMode = options.contentModes?.placeholder {
imageView.contentMode = contentMode
}
#endif
} else {
if options.isPrepareForReuseEnabled {
imageView.display(image: nil) // Remove previously displayed images (if any)
}
}
// Makes sure that view reuse is handled correctly.
let taskId = self.taskId
// Start the request.
self.task = pipeline.loadImage(
with: request,
progress: { [weak self] response, completed, total in
guard self?.taskId == taskId else { return }
self?.handle(partialImage: response, options: options)
progress?(response, completed, total)
},
completion: { [weak self] response, error in
guard self?.taskId == taskId else { return }
self?.handle(response: response, error: error, fromMemCache: false, options: options)
completion?(response, error)
}
)
return self.task
}
func cancelOutstandingTask() {
taskId += 1
task?.cancel()
task = nil
}
// MARK: - Handling Responses
#if !os(macOS)
private func handle(response: ImageResponse?, error: Error?, fromMemCache: Bool, options: ImageLoadingOptions) {
if let image = response?.image {
_display(image, options.transition, options.alwaysTransition, fromMemCache, options.contentModes?.success)
} else if let failureImage = options.failureImage {
_display(failureImage, options.failureImageTransition, options.alwaysTransition, fromMemCache, options.contentModes?.failure)
}
self.task = nil
}
private func handle(partialImage response: ImageResponse?, options: ImageLoadingOptions) {
guard let image = response?.image else { return }
_display(image, options.transition, options.alwaysTransition, false, options.contentModes?.success)
}
private func _display(_ image: Image, _ transition: ImageLoadingOptions.Transition?, _ alwaysTransition: Bool, _ fromMemCache: Bool, _ newContentMode: UIView.ContentMode?) {
guard let imageView = imageView else { return }
if !fromMemCache || alwaysTransition, let transition = transition {
switch transition.style {
case let .fadeIn(params):
_runFadeInTransition(image: image, params: params, contentMode: newContentMode)
case let .custom(closure):
// The user is reponsible for both displaying an image and performing
// animations.
closure(imageView, image)
}
} else {
imageView.display(image: image)
}
if let newContentMode = newContentMode {
imageView.contentMode = newContentMode
}
}
// Image view used for cross-fade transition between images with different
// content modes.
private lazy var transitionImageView = UIImageView()
private func _runFadeInTransition(image: Image, params: ImageLoadingOptions.Transition.Parameters, contentMode: UIView.ContentMode?) {
guard let imageView = imageView else { return }
// Special case where we animate between content modes, only works
// on imageView subclasses.
if let contentMode = contentMode, imageView.contentMode != contentMode, let imageView = imageView as? UIImageView, imageView.image != nil {
_runCrossDissolveWithContentMode(imageView: imageView, image: image, params: params)
} else {
_runSimpleFadeIn(image: image, params: params)
}
}
private func _runSimpleFadeIn(image: Image, params: ImageLoadingOptions.Transition.Parameters) {
guard let imageView = imageView else { return }
UIView.transition(
with: imageView,
duration: params.duration,
options: params.options.union(.transitionCrossDissolve),
animations: {
imageView.display(image: image)
},
completion: nil
)
}
/// Performs cross-dissolve animation alonside transition to a new content
/// mode. This isn't natively supported feature and it requires a second
/// image view. There might be better ways to implement it.
private func _runCrossDissolveWithContentMode(imageView: UIImageView, image: Image, params: ImageLoadingOptions.Transition.Parameters) {
// Lazily create a transition view.
let transitionView = self.transitionImageView
// Create a transition view which mimics current view's contents.
transitionView.image = imageView.image
transitionView.contentMode = imageView.contentMode
imageView.addSubview(transitionView)
transitionView.frame = imageView.bounds
// "Manual" cross-fade.
transitionView.alpha = 1
imageView.alpha = 0
imageView.image = image // Display new image in current view
UIView.animate(
withDuration: params.duration,
delay: 0,
options: params.options,
animations: {
transitionView.alpha = 0
imageView.alpha = 1
},
completion: { isCompleted in
if isCompleted {
transitionView.removeFromSuperview()
}
}
)
}
#else
private func handle(response: ImageResponse?, error: Error?, fromMemCache: Bool, options: ImageLoadingOptions) {
// NSImageView doesn't support content mode, unfortunately.
if let image = response?.image {
_display(image, options.transition, options.alwaysTransition, fromMemCache)
} else if let failureImage = options.failureImage {
_display(failureImage, options.failureImageTransition, options.alwaysTransition, fromMemCache)
}
self.task = nil
}
private func handle(partialImage response: ImageResponse?, options: ImageLoadingOptions) {
guard let image = response?.image else { return }
_display(image, options.transition, options.alwaysTransition, false)
}
private func _display(_ image: Image, _ transition: ImageLoadingOptions.Transition?, _ alwaysTransition: Bool, _ fromMemCache: Bool) {
guard let imageView = imageView else { return }
if !fromMemCache || alwaysTransition, let transition = transition {
switch transition.style {
case let .fadeIn(params):
_runFadeInTransition(image: image, params: params)
case let .custom(closure):
// The user is reponsible for both displaying an image and performing
// animations.
closure(imageView, image)
}
} else {
imageView.display(image: image)
}
}
private func _runFadeInTransition(image: Image, params: ImageLoadingOptions.Transition.Parameters) {
let animation = CABasicAnimation(keyPath: "opacity")
animation.duration = params.duration
animation.fromValue = 0
animation.toValue = 1
imageView?.layer?.add(animation, forKey: "imageTransition")
imageView?.display(image: image)
}
#endif
}
#endif

620
Pods/Nuke/Sources/Internal.swift generated Normal file
View File

@@ -0,0 +1,620 @@
// The MIT License (MIT)
//
// Copyright (c) 2015-2019 Alexander Grebenyuk (github.com/kean).
import Foundation
// MARK: - Lock
extension NSLock {
func sync<T>(_ closure: () -> T) -> T {
lock(); defer { unlock() }
return closure()
}
}
// MARK: - RateLimiter
/// Controls the rate at which the work is executed. Uses the classic [token
/// bucket](https://en.wikipedia.org/wiki/Token_bucket) algorithm.
///
/// The main use case for rate limiter is to support large (infinite) collections
/// of images by preventing trashing of underlying systems, primary URLSession.
///
/// The implementation supports quick bursts of requests which can be executed
/// without any delays when "the bucket is full". This is important to prevent
/// rate limiter from affecting "normal" requests flow.
internal final class RateLimiter {
private let bucket: TokenBucket
private let queue: DispatchQueue
private var pending = LinkedList<Task>() // fast append, fast remove first
private var isExecutingPendingTasks = false
private typealias Task = (CancellationToken, () -> Void)
/// Initializes the `RateLimiter` with the given configuration.
/// - parameter queue: Queue on which to execute pending tasks.
/// - parameter rate: Maximum number of requests per second. 80 by default.
/// - parameter burst: Maximum number of requests which can be executed without
/// any delays when "bucket is full". 25 by default.
init(queue: DispatchQueue, rate: Int = 80, burst: Int = 25) {
self.queue = queue
self.bucket = TokenBucket(rate: Double(rate), burst: Double(burst))
}
func execute(token: CancellationToken, _ closure: @escaping () -> Void) {
let task = Task(token, closure)
if !pending.isEmpty || !_execute(task) {
pending.append(task)
_setNeedsExecutePendingTasks()
}
}
private func _execute(_ task: Task) -> Bool {
guard !task.0.isCancelling else {
return true // No need to execute
}
return bucket.execute(task.1)
}
private func _setNeedsExecutePendingTasks() {
guard !isExecutingPendingTasks else { return }
isExecutingPendingTasks = true
// Compute a delay such that by the time the closure is executed the
// bucket is refilled to a point that is able to execute at least one
// pending task. With a rate of 100 tasks we expect a refill every 10 ms.
let delay = Int(1.15 * (1000 / bucket.rate)) // 14 ms for rate 80 (default)
let bounds = max(100, min(5, delay)) // Make the delay is reasonable
queue.asyncAfter(deadline: .now() + .milliseconds(bounds), execute: _executePendingTasks)
}
private func _executePendingTasks() {
while let node = pending.first, _execute(node.value) {
pending.remove(node)
}
isExecutingPendingTasks = false
if !pending.isEmpty { // Not all pending items were executed
_setNeedsExecutePendingTasks()
}
}
private final class TokenBucket {
let rate: Double
private let burst: Double // maximum bucket size
private var bucket: Double
private var timestamp: TimeInterval // last refill timestamp
/// - parameter rate: Rate (tokens/second) at which bucket is refilled.
/// - parameter burst: Bucket size (maximum number of tokens).
init(rate: Double, burst: Double) {
self.rate = rate
self.burst = burst
self.bucket = burst
self.timestamp = CFAbsoluteTimeGetCurrent()
}
/// Returns `true` if the closure was executed, `false` if dropped.
func execute(_ closure: () -> Void) -> Bool {
refill()
guard bucket >= 1.0 else {
return false // bucket is empty
}
bucket -= 1.0
closure()
return true
}
private func refill() {
let now = CFAbsoluteTimeGetCurrent()
bucket += rate * max(0, now - timestamp) // rate * (time delta)
timestamp = now
if bucket > burst { // prevent bucket overflow
bucket = burst
}
}
}
}
// MARK: - Operation
internal final class Operation: Foundation.Operation {
private var _isExecuting = false
private var _isFinished = false
private var isFinishCalled = Atomic(false)
override var isExecuting: Bool {
set {
guard _isExecuting != newValue else {
fatalError("Invalid state, operation is already (not) executing")
}
willChangeValue(forKey: "isExecuting")
_isExecuting = newValue
didChangeValue(forKey: "isExecuting")
}
get {
return _isExecuting
}
}
override var isFinished: Bool {
set {
guard !_isFinished else {
fatalError("Invalid state, operation is already finished")
}
willChangeValue(forKey: "isFinished")
_isFinished = newValue
didChangeValue(forKey: "isFinished")
}
get {
return _isFinished
}
}
typealias Starter = (_ finish: @escaping () -> Void) -> Void
private let starter: Starter
init(starter: @escaping Starter) {
self.starter = starter
}
override func start() {
guard !isCancelled else {
isFinished = true
return
}
isExecuting = true
starter { [weak self] in
self?._finish()
}
}
private func _finish() {
// Make sure that we ignore if `finish` is called more than once.
if isFinishCalled.swap(to: true, ifEqual: false) {
isExecuting = false
isFinished = true
}
}
}
// MARK: - LinkedList
/// A doubly linked list.
internal final class LinkedList<Element> {
// first <-> node <-> ... <-> last
private(set) var first: Node?
private(set) var last: Node?
deinit {
removeAll()
}
var isEmpty: Bool {
return last == nil
}
/// Adds an element to the end of the list.
@discardableResult
func append(_ element: Element) -> Node {
let node = Node(value: element)
append(node)
return node
}
/// Adds a node to the end of the list.
func append(_ node: Node) {
if let last = last {
last.next = node
node.previous = last
self.last = node
} else {
last = node
first = node
}
}
func remove(_ node: Node) {
node.next?.previous = node.previous // node.previous is nil if node=first
node.previous?.next = node.next // node.next is nil if node=last
if node === last {
last = node.previous
}
if node === first {
first = node.next
}
node.next = nil
node.previous = nil
}
func removeAll() {
// avoid recursive Nodes deallocation
var node = first
while let next = node?.next {
node?.next = nil
next.previous = nil
node = next
}
last = nil
first = nil
}
final class Node {
let value: Element
fileprivate var next: Node?
fileprivate var previous: Node?
init(value: Element) {
self.value = value
}
}
}
// MARK: - CancellationTokenSource
/// Manages cancellation tokens and signals them when cancellation is requested.
///
/// All `CancellationTokenSource` methods are thread safe.
internal final class CancellationTokenSource {
/// Returns `true` if cancellation has been requested.
var isCancelling: Bool {
return lock.sync { observers == nil }
}
/// Creates a new token associated with the source.
var token: CancellationToken {
return CancellationToken(source: self)
}
private var lock = NSLock()
private var observers: [() -> Void]? = []
/// Initializes the `CancellationTokenSource` instance.
init() {}
fileprivate func register(_ closure: @escaping () -> Void) {
if !_register(closure) {
closure()
}
}
private func _register(_ closure: @escaping () -> Void) -> Bool {
lock.lock()
defer { lock.unlock() }
observers?.append(closure)
return observers != nil
}
/// Communicates a request for cancellation to the managed tokens.
func cancel() {
if let observers = _cancel() {
observers.forEach { $0() }
}
}
private func _cancel() -> [() -> Void]? {
lock.lock()
defer { lock.unlock() }
let observers = self.observers
self.observers = nil // transition to `isCancelling` state
return observers
}
}
/// Enables cooperative cancellation of operations.
///
/// You create a cancellation token by instantiating a `CancellationTokenSource`
/// object and calling its `token` property. You then pass the token to any
/// number of threads, tasks, or operations that should receive notice of
/// cancellation. When the owning object calls `cancel()`, the `isCancelling`
/// property on every copy of the cancellation token is set to `true`.
/// The registered objects can respond in whatever manner is appropriate.
///
/// All `CancellationToken` methods are thread safe.
internal struct CancellationToken {
fileprivate let source: CancellationTokenSource? // no-op when `nil`
/// Returns `true` if cancellation has been requested for this token.
/// Returns `false` if the source was deallocated.
var isCancelling: Bool {
return source?.isCancelling ?? false
}
/// Registers the closure that will be called when the token is canceled.
/// If this token is already cancelled, the closure will be run immediately
/// and synchronously.
func register(_ closure: @escaping () -> Void) {
source?.register(closure)
}
}
// MARK: - ResumableData
/// Resumable data support. For more info see:
/// - https://developer.apple.com/library/content/qa/qa1761/_index.html
internal struct ResumableData {
let data: Data
let validator: String // Either Last-Modified or ETag
init?(response: URLResponse, data: Data) {
// Check if "Accept-Ranges" is present and the response is valid.
guard !data.isEmpty,
let response = response as? HTTPURLResponse,
response.statusCode == 200 /* OK */ || response.statusCode == 206, /* Partial Content */
let acceptRanges = response.allHeaderFields["Accept-Ranges"] as? String,
acceptRanges.lowercased() == "bytes",
let validator = ResumableData._validator(from: response) else {
return nil
}
// NOTE: https://developer.apple.com/documentation/foundation/httpurlresponse/1417930-allheaderfields
// HTTP headers are case insensitive. To simplify your code, certain
// header field names are canonicalized into their standard form.
// For example, if the server sends a content-length header,
// it is automatically adjusted to be Content-Length.
self.data = data; self.validator = validator
}
private static func _validator(from response: HTTPURLResponse) -> String? {
if let entityTag = response.allHeaderFields["ETag"] as? String {
return entityTag // Prefer ETag
}
// There seems to be a bug with ETag where HTTPURLResponse would canonicalize
// it to Etag instead of ETag
// https://bugs.swift.org/browse/SR-2429
if let entityTag = response.allHeaderFields["Etag"] as? String {
return entityTag // Prefer ETag
}
if let lastModified = response.allHeaderFields["Last-Modified"] as? String {
return lastModified
}
return nil
}
func resume(request: inout URLRequest) {
var headers = request.allHTTPHeaderFields ?? [:]
// "bytes=1000-" means bytes from 1000 up to the end (inclusive)
headers["Range"] = "bytes=\(data.count)-"
headers["If-Range"] = validator
request.allHTTPHeaderFields = headers
}
// Check if the server decided to resume the response.
static func isResumedResponse(_ response: URLResponse) -> Bool {
// "206 Partial Content" (server accepted "If-Range")
return (response as? HTTPURLResponse)?.statusCode == 206
}
// MARK: Storing Resumable Data
/// Shared between multiple pipelines. Thread safe. In the future version we
/// might feature more customization options.
static var _cache = _Cache<String, ResumableData>(costLimit: 32 * 1024 * 1024, countLimit: 100) // internal only for testing purposes
static func removeResumableData(for request: URLRequest) -> ResumableData? {
guard let url = request.url?.absoluteString else { return nil }
return _cache.removeValue(forKey: url)
}
static func storeResumableData(_ data: ResumableData, for request: URLRequest) {
guard let url = request.url?.absoluteString else { return }
_cache.set(data, forKey: url, cost: data.data.count)
}
}
// MARK: - Printer
/// Helper type for printing nice debug descriptions.
internal struct Printer {
private(set) internal var _out = String()
private let timelineFormatter: DateFormatter
init(_ string: String = "") {
self._out = string
timelineFormatter = DateFormatter()
timelineFormatter.dateFormat = "HH:mm:ss.SSS"
}
func output(indent: Int = 0) -> String {
return _out.components(separatedBy: .newlines)
.map { $0.isEmpty ? "" : String(repeating: " ", count: indent) + $0 }
.joined(separator: "\n")
}
mutating func string(_ str: String) {
_out.append(str)
}
mutating func line(_ str: String) {
_out.append(str)
_out.append("\n")
}
mutating func value(_ key: String, _ value: CustomStringConvertible?) {
let val = value.map { String(describing: $0) }
line(key + " - " + (val ?? "nil"))
}
/// For producting nicely formatted timelines like this:
///
/// 11:45:52.737 - Data Loading Start Date
/// 11:45:52.739 - Data Loading End Date
/// nil - Decoding Start Date
mutating func timeline(_ key: String, _ date: Date?) {
let value = date.map { timelineFormatter.string(from: $0) }
self.value((value ?? "nil "), key) // Swtich key with value
}
mutating func timeline(_ key: String, _ start: Date?, _ end: Date?, isReversed: Bool = true) {
let duration = _duration(from: start, to: end)
let value = "\(_string(from: start)) \(_string(from: end)) (\(duration))"
if isReversed {
self.value(value.padding(toLength: 36, withPad: " ", startingAt: 0), key)
} else {
self.value(key, value)
}
}
mutating func section(title: String, _ closure: (inout Printer) -> Void) {
_out.append(contentsOf: title)
_out.append(" {\n")
var printer = Printer()
closure(&printer)
_out.append(printer.output(indent: 4))
_out.append("}\n")
}
// MARK: Formatters
private func _string(from date: Date?) -> String {
return date.map { timelineFormatter.string(from: $0) } ?? "nil"
}
private func _duration(from: Date?, to: Date?) -> String {
guard let from = from else { return "nil" }
guard let to = to else { return "unknown" }
return Printer.duration(to.timeIntervalSince(from)) ?? "nil"
}
static func duration(_ duration: TimeInterval?) -> String? {
guard let duration = duration else { return nil }
let m: Int = Int(duration) / 60
let s: Int = Int(duration) % 60
let ms: Int = Int(duration * 1000) % 1000
var output = String()
if m > 0 { output.append("\(m):") }
output.append(output.isEmpty ? "\(s)." : String(format: "%02d.", s))
output.append(String(format: "%03ds", ms))
return output
}
}
// MARK: - Misc
struct TaskMetrics {
var startDate: Date? = nil
var endDate: Date? = nil
static func started() -> TaskMetrics {
var metrics = TaskMetrics()
metrics.start()
return metrics
}
mutating func start() {
startDate = Date()
}
mutating func end() {
endDate = Date()
}
}
/// A simple observable property. Not thread safe.
final class Property<T> {
var value: T {
didSet {
for observer in observers {
observer(value)
}
}
}
init(value: T) {
self.value = value
}
private var observers = [(T) -> Void]()
// For our use-cases we can just ignore unsubscribing for now.
func observe(_ closure: @escaping (T) -> Void) {
observers.append(closure)
}
}
// MARK: - Atomic
/// A thread-safe value wrapper.
final class Atomic<T> {
private var _value: T
private let lock = NSLock()
init(_ value: T) {
self._value = value
}
var value: T {
get {
lock.lock()
let value = _value
lock.unlock()
return value
}
set {
lock.lock()
_value = newValue
lock.unlock()
}
}
}
extension Atomic where T: Equatable {
/// "Compare and Swap"
func swap(to newValue: T, ifEqual oldValue: T) -> Bool {
lock.lock()
defer { lock.unlock() }
guard _value == oldValue else {
return false
}
_value = newValue
return true
}
}
extension Atomic where T == Int {
/// Atomically increments the value and retruns a new incremented value.
func increment() -> Int {
lock.lock()
defer { lock.unlock() }
_value += 1
return _value
}
}
// MARK: - Misc
import CommonCrypto
extension String {
/// Calculates SHA1 from the given string and returns its hex representation.
///
/// ```swift
/// print("http://test.com".sha1)
/// // prints "50334ee0b51600df6397ce93ceed4728c37fee4e"
/// ```
var sha1: String? {
guard let input = self.data(using: .utf8) else { return nil }
#if swift(>=5.0)
let hash = input.withUnsafeBytes { (bytes: UnsafeRawBufferPointer) -> [UInt8] in
var hash = [UInt8](repeating: 0, count: Int(CC_SHA1_DIGEST_LENGTH))
CC_SHA1(bytes.baseAddress, CC_LONG(input.count), &hash)
return hash
}
#else
var hash = [UInt8](repeating: 0, count: Int(CC_SHA1_DIGEST_LENGTH))
input.withUnsafeBytes {
_ = CC_SHA1($0, CC_LONG(input.count), &hash)
}
#endif
return hash.map({ String(format: "%02x", $0) }).joined()
}
}