parent
2caf938730
commit
18c9d175a9
@ -0,0 +1,34 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
const fs = require('fs');
|
||||
const util = require('util');
|
||||
const exec = util.promisify(require('child_process').exec);
|
||||
const readFile = util.promisify(fs.readFile);
|
||||
|
||||
async function run() {
|
||||
const models = ['runwaymlsd15/Resources', 'openjourneyv2/Resources', 'compiled'];
|
||||
let prompts = (await readFile('prompts.txt', 'utf8')).split('\n');
|
||||
let index = 0;
|
||||
|
||||
while (index < prompts.length) {
|
||||
for (const model of models) {
|
||||
const prompt = prompts[index];
|
||||
console.log(`Running ${model} on prompt: ${prompt}...`);
|
||||
// run the specific command for the model on the prompt
|
||||
const { stdout, stderr } = await exec(`./StableDiffusionSample --resource-path ${model} "${prompt}" --step-count 120`);
|
||||
console.log(stdout);
|
||||
console.error(stderr);
|
||||
console.log('Done.');
|
||||
console.log();
|
||||
|
||||
// check if any new prompts have been added to the file
|
||||
const updatedPrompts = (await readFile('prompts.txt', 'utf8')).split('\n');
|
||||
if (updatedPrompts.length > prompts.length) {
|
||||
prompts = updatedPrompts;
|
||||
}
|
||||
}
|
||||
index++;
|
||||
}
|
||||
}
|
||||
|
||||
run().catch(error => console.error(error));
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
@ -1,63 +0,0 @@
|
||||
// For licensing see accompanying LICENSE.md file.
|
||||
// Copyright (C) 2022 Apple Inc. All Rights Reserved.
|
||||
|
||||
import XCTest
|
||||
import CoreML
|
||||
@testable import StableDiffusion
|
||||
|
||||
@available(iOS 16.2, macOS 13.1, *)
|
||||
final class StableDiffusionTests: XCTestCase {
|
||||
|
||||
var vocabFileInBundleURL: URL {
|
||||
let fileName = "vocab"
|
||||
guard let url = Bundle.module.url(forResource: fileName, withExtension: "json") else {
|
||||
fatalError("BPE tokenizer vocabulary file is missing from bundle")
|
||||
}
|
||||
return url
|
||||
}
|
||||
|
||||
var mergesFileInBundleURL: URL {
|
||||
let fileName = "merges"
|
||||
guard let url = Bundle.module.url(forResource: fileName, withExtension: "txt") else {
|
||||
fatalError("BPE tokenizer merges file is missing from bundle")
|
||||
}
|
||||
return url
|
||||
}
|
||||
|
||||
func testBPETokenizer() throws {
|
||||
|
||||
let tokenizer = try BPETokenizer(mergesAt: mergesFileInBundleURL, vocabularyAt: vocabFileInBundleURL)
|
||||
|
||||
func testPrompt(prompt: String, expectedIds: [Int]) {
|
||||
|
||||
let (tokens, ids) = tokenizer.tokenize(input: prompt)
|
||||
|
||||
print("Tokens = \(tokens)\n")
|
||||
print("Expected tokens = \(expectedIds.map({ tokenizer.token(id: $0) }))")
|
||||
print("ids = \(ids)\n")
|
||||
print("Expected Ids = \(expectedIds)\n")
|
||||
|
||||
XCTAssertEqual(ids,expectedIds)
|
||||
}
|
||||
|
||||
testPrompt(prompt: "a photo of an astronaut riding a horse on mars",
|
||||
expectedIds: [49406, 320, 1125, 539, 550, 18376, 6765, 320, 4558, 525, 7496, 49407])
|
||||
|
||||
testPrompt(prompt: "Apple CoreML developer tools on a Macbook Air are fast",
|
||||
expectedIds: [49406, 3055, 19622, 5780, 10929, 5771, 525, 320, 20617,
|
||||
1922, 631, 1953, 49407])
|
||||
}
|
||||
|
||||
func test_randomNormalValues_matchNumPyRandom() {
|
||||
var random = NumPyRandomSource(seed: 12345)
|
||||
let samples = random.normalArray(count: 10_000)
|
||||
let last5 = samples.suffix(5)
|
||||
|
||||
// numpy.random.seed(12345); print(numpy.random.randn(10000)[-5:])
|
||||
let expected = [-0.86285345, 2.15229409, -0.00670556, -1.21472309, 0.65498866]
|
||||
|
||||
for (value, expected) in zip(last5, expected) {
|
||||
XCTAssertEqual(value, expected, accuracy: .ulpOfOne.squareRoot())
|
||||
}
|
||||
}
|
||||
}
|
Loading…
Reference in new issue