delayed the creation of StableDiffusionPipeline until the first image generation.

main
Yasuhito Nagatomo 2 years ago
parent 509a72fda0
commit 853679a826

1
.gitignore vendored

@ -3,6 +3,7 @@
# gitignore contributors: remember to update Global/Xcode.gitignore, Objective-C.gitignore & Swift.gitignore # gitignore contributors: remember to update Global/Xcode.gitignore, Objective-C.gitignore & Swift.gitignore
# avoid checking CoreML models # avoid checking CoreML models
CoreMLModels
merges.txt merges.txt
vocab.json vocab.json
*.mlmodelc *.mlmodelc

@ -5,8 +5,8 @@
A minimal iOS app that generates images using Stable Diffusion v2. A minimal iOS app that generates images using Stable Diffusion v2.
You can create images specifying any prompt (text) such as "a photo of an astronaut riding a horse on mars". You can create images specifying any prompt (text) such as "a photo of an astronaut riding a horse on mars".
- macOS 13.1 beta 4 or newer, Xcode 14.1 - macOS 13.1 RC or newer, Xcode 14.2 or newer
- iPhone 12+ / iOS 16.2 beta 4 or newer, iPad Pro with M1/M2 / iPadOS 16.2 beta 4 or newer - iPhone 12+ / iOS 16.2 RC or newer, iPad Pro with M1/M2 / iPadOS 16.2 RC or newer
You can run the app on above mobile devices. You can run the app on above mobile devices.
And you can run the app on Mac, building as a Designed for iPad app. And you can run the app on Mac, building as a Designed for iPad app.
@ -24,6 +24,16 @@ You can see how it works through the simple sample code.
![Image](images/ss0_1280.png) ![Image](images/ss0_1280.png)
## Change Log
- [1.0.1 (2)] - 2022-12-08 `[Changed]`
- Changed to delay creation of `StableDiffusionPipeline` until the first image
generation and execute it in a background task.
- This eliminates the freeze when starting the app, but it takes time
to generate the first image.
- Tested with Xcode 4.2 RC and macOS 13.1 RC.
- With the release of Xcode 14.2 RC, the correct Target OS 16.2 was specified.
## Convert CoreML models ## Convert CoreML models
Convert the PyTorch SD2 model to CoreML models, following Apple's instructions. Convert the PyTorch SD2 model to CoreML models, following Apple's instructions.

@ -305,7 +305,7 @@
ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor; ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor;
CODE_SIGN_STYLE = Automatic; CODE_SIGN_STYLE = Automatic;
CURRENT_PROJECT_VERSION = 1; CURRENT_PROJECT_VERSION = 2;
DEVELOPMENT_ASSET_PATHS = "\"imggensd2/Preview Content\""; DEVELOPMENT_ASSET_PATHS = "\"imggensd2/Preview Content\"";
DEVELOPMENT_TEAM = J5CY9Q9UP5; DEVELOPMENT_TEAM = J5CY9Q9UP5;
ENABLE_PREVIEWS = YES; ENABLE_PREVIEWS = YES;
@ -320,7 +320,7 @@
"$(inherited)", "$(inherited)",
"@executable_path/Frameworks", "@executable_path/Frameworks",
); );
MARKETING_VERSION = 1.0; MARKETING_VERSION = 1.0.1;
PRODUCT_BUNDLE_IDENTIFIER = com.atarayosd.imggensd2; PRODUCT_BUNDLE_IDENTIFIER = com.atarayosd.imggensd2;
PRODUCT_NAME = "$(TARGET_NAME)"; PRODUCT_NAME = "$(TARGET_NAME)";
SWIFT_EMIT_LOC_STRINGS = YES; SWIFT_EMIT_LOC_STRINGS = YES;
@ -335,7 +335,7 @@
ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor; ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor;
CODE_SIGN_STYLE = Automatic; CODE_SIGN_STYLE = Automatic;
CURRENT_PROJECT_VERSION = 1; CURRENT_PROJECT_VERSION = 2;
DEVELOPMENT_ASSET_PATHS = "\"imggensd2/Preview Content\""; DEVELOPMENT_ASSET_PATHS = "\"imggensd2/Preview Content\"";
DEVELOPMENT_TEAM = J5CY9Q9UP5; DEVELOPMENT_TEAM = J5CY9Q9UP5;
ENABLE_PREVIEWS = YES; ENABLE_PREVIEWS = YES;
@ -350,7 +350,7 @@
"$(inherited)", "$(inherited)",
"@executable_path/Frameworks", "@executable_path/Frameworks",
); );
MARKETING_VERSION = 1.0; MARKETING_VERSION = 1.0.1;
PRODUCT_BUNDLE_IDENTIFIER = com.atarayosd.imggensd2; PRODUCT_BUNDLE_IDENTIFIER = com.atarayosd.imggensd2;
PRODUCT_NAME = "$(TARGET_NAME)"; PRODUCT_NAME = "$(TARGET_NAME)";
SWIFT_EMIT_LOC_STRINGS = YES; SWIFT_EMIT_LOC_STRINGS = YES;

@ -49,25 +49,19 @@ final class ImageGenerator: ObservableObject {
@Published var generationState: GenerationState = .idle @Published var generationState: GenerationState = .idle
@Published var generatedImages: GeneratedImages? @Published var generatedImages: GeneratedImages?
private let sdpipeline: StableDiffusionPipeline private var sdPipeline: StableDiffusionPipeline?
init() { init() {
guard let path = Bundle.main.path(forResource: "CoreMLModels", ofType: nil, inDirectory: nil) else {
fatalError("Fatal error: failed to find the CoreML models.")
}
let resourceURL = URL(fileURLWithPath: path)
// TODO: move the pipeline creation to background task because it's heavy
if let pipeline = try? StableDiffusionPipeline(resourcesAt: resourceURL) {
sdpipeline = pipeline
} else {
fatalError("Fatal error: failed to create the Stable-Diffusion-Pipeline.")
}
} }
func setState(_ state: GenerationState) { // for actor isolation func setState(_ state: GenerationState) { // for actor isolation
generationState = state generationState = state
} }
func setPipeline(_ pipeline: StableDiffusionPipeline) { // for actor isolation
sdPipeline = pipeline
}
func setGeneratedImages(_ images: GeneratedImages) { // for actor isolation func setGeneratedImages(_ images: GeneratedImages) { // for actor isolation
generatedImages = images generatedImages = images
} }
@ -76,12 +70,29 @@ final class ImageGenerator: ObservableObject {
guard generationState == .idle else { return } guard generationState == .idle else { return }
Task.detached(priority: .high) { Task.detached(priority: .high) {
await self.setState(.generating(progressStep: 0)) await self.setState(.generating(progressStep: 0))
if await self.sdPipeline == nil {
guard let path = Bundle.main.path(forResource: "CoreMLModels", ofType: nil, inDirectory: nil) else {
fatalError("Fatal error: failed to find the CoreML models.")
}
let resourceURL = URL(fileURLWithPath: path)
if let pipeline = try? StableDiffusionPipeline(resourcesAt: resourceURL) {
await self.setPipeline(pipeline)
} else {
fatalError("Fatal error: failed to create the Stable-Diffusion-Pipeline.")
}
}
if let sdPipeline = await self.sdPipeline {
do { do {
// API:
// generateImages(prompt: String, imageCount: Int = 1, stepCount: Int = 50, seed: Int = 0, // generateImages(prompt: String, imageCount: Int = 1, stepCount: Int = 50, seed: Int = 0,
// disableSafety: Bool = false, // disableSafety: Bool = false,
// progressHandler: (StableDiffusionPipeline.Progress) -> Bool = { _ in true }) throws -> [CGImage?] // progressHandler: (StableDiffusionPipeline.Progress) -> Bool = { _ in true })
// throws -> [CGImage?]
// TODO: use the progressHandler // TODO: use the progressHandler
let cgImages = try self.sdpipeline.generateImages(prompt: parameter.prompt, let cgImages = try sdPipeline.generateImages(prompt: parameter.prompt,
imageCount: parameter.imageCount, imageCount: parameter.imageCount,
stepCount: parameter.stepCount, stepCount: parameter.stepCount,
seed: parameter.seed, seed: parameter.seed,
@ -98,8 +109,10 @@ final class ImageGenerator: ObservableObject {
disableSafety: parameter.disableSafety, disableSafety: parameter.disableSafety,
images: uiImages.map { uiImage in GeneratedImage(uiImage: uiImage) })) images: uiImages.map { uiImage in GeneratedImage(uiImage: uiImage) }))
} catch { } catch {
print("failed.") print("failed to generate images.")
}
} }
await self.setState(.idle) await self.setState(.idle)
} }
} }

Loading…
Cancel
Save