Fix typos: Successfully facilitate getting pipeline overridden (#30)

pull/65/head
Christian Clauss 1 year ago committed by GitHub
parent ec9fe68259
commit d6a54fc2b0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -240,7 +240,7 @@ Differences may be less or more pronounced for different inputs. Please see the
</details>
<details>
<summary> <b> <a name="low-mem-conversion"></a> Q3: </b> My Mac has 8GB RAM and I am converting models to Core ML using the example command. The process is geting killed because of memory issues. How do I fix this issue? </summary>
<summary> <b> <a name="low-mem-conversion"></a> Q3: </b> My Mac has 8GB RAM and I am converting models to Core ML using the example command. The process is getting killed because of memory issues. How do I fix this issue? </summary>
<b> A3: </b> In order to minimize the memory impact of the model conversion process, please execute the following command instead:
@ -313,7 +313,7 @@ On iOS, depending on the iPhone model, Stable Diffusion model versions, selected
<b> 4. Weights and Activations Data Type </b>
When quantizing models from float32 to lower-precision data types such as float16, the generated images are [known to vary slightly](https://lambdalabs.com/blog/inference-benchmark-stable-diffusion) in semantics even when using the same PyTorch model. Core ML models generated by coremltools have float16 weights and activations by default [unless explicitly overriden](https://github.com/apple/coremltools/blob/main/coremltools/converters/_converters_entry.py#L256). This is not expected to be a major source of difference.
When quantizing models from float32 to lower-precision data types such as float16, the generated images are [known to vary slightly](https://lambdalabs.com/blog/inference-benchmark-stable-diffusion) in semantics even when using the same PyTorch model. Core ML models generated by coremltools have float16 weights and activations by default [unless explicitly overridden](https://github.com/apple/coremltools/blob/main/coremltools/converters/_converters_entry.py#L256). This is not expected to be a major source of difference.
</details>

@ -576,7 +576,7 @@ def convert_unet(pipe, args):
# Set the output descriptions
coreml_unet.output_description["noise_pred"] = \
"Same shape and dtype as the `sample` input. " \
"The predicted noise to faciliate the reverse diffusion (denoising) process"
"The predicted noise to facilitate the reverse diffusion (denoising) process"
_save_mlpackage(coreml_unet, out_path)
logger.info(f"Saved unet into {out_path}")

@ -75,7 +75,7 @@ public extension StableDiffusionPipeline {
safetyChecker = SafetyChecker(modelAt: urls.safetyCheckerURL, configuration: config)
}
// Construct pipelien
// Construct pipeline
self.init(textEncoder: textEncoder,
unet: unet,
decoder: decoder,

@ -74,23 +74,23 @@ class TestStableDiffusionForTextToImage(unittest.TestCase):
with self.subTest(model="vae_decoder"):
logger.info("Converting vae_decoder")
torch2coreml.convert_vae_decoder(self.pytorch_pipe, self.cli_args)
logger.info("Successfuly converted vae_decoder")
logger.info("Successfully converted vae_decoder")
with self.subTest(model="unet"):
logger.info("Converting unet")
torch2coreml.convert_unet(self.pytorch_pipe, self.cli_args)
logger.info("Successfuly converted unet")
logger.info("Successfully converted unet")
with self.subTest(model="text_encoder"):
logger.info("Converting text_encoder")
torch2coreml.convert_text_encoder(self.pytorch_pipe, self.cli_args)
logger.info("Successfuly converted text_encoder")
logger.info("Successfully converted text_encoder")
with self.subTest(model="safety_checker"):
logger.info("Converting safety_checker")
torch2coreml.convert_safety_checker(self.pytorch_pipe,
self.cli_args)
logger.info("Successfuly converted safety_checker")
logger.info("Successfully converted safety_checker")
def test_end_to_end_image_generation_speed(self):
""" Tests:

Loading…
Cancel
Save