From b0a11594aec50892a02cd8d129eee2dfe93a8bb8 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 25 Sep 2022 21:23:15 +0300 Subject: [PATCH] Initial release --- .gitignore | 3 + Makefile | 109 + convert-pt-to-ggml.py | 328 ++ dr_wav.h | 6434 +++++++++++++++++++++++++++++++++++++++ ggml.c | 6689 +++++++++++++++++++++++++++++++++++++++++ ggml.h | 527 ++++ main.cpp | 2116 +++++++++++++ models/.gitignore | 1 + samples/.gitignore | 1 + samples/jfk.wav | Bin 0 -> 352078 bytes 10 files changed, 16208 insertions(+) create mode 100644 .gitignore create mode 100644 Makefile create mode 100644 convert-pt-to-ggml.py create mode 100644 dr_wav.h create mode 100644 ggml.c create mode 100644 ggml.h create mode 100644 main.cpp create mode 100644 models/.gitignore create mode 100644 samples/.gitignore create mode 100644 samples/jfk.wav diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..b23f03e --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +sync.sh +main +*.o diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..50b68e1 --- /dev/null +++ b/Makefile @@ -0,0 +1,109 @@ +main: ggml.o main.o + g++ -o main ggml.o main.o + +ggml.o: ggml.c ggml.h + gcc -O3 -mavx -mavx2 -mfma -mf16c -c ggml.c + +main.o: main.cpp ggml.h + g++ -O3 -std=c++11 -c main.cpp + +# clean up the directory +clean: + rm -f *.o main + +# run the program +run: main + ./main + +# download the following audio samples into folder "./samples": +.PHONY: samples +samples: + @echo "Downloading samples..." + mkdir -p samples + @wget --quiet --show-progress -O samples/gb0.ogg https://upload.wikimedia.org/wikipedia/commons/2/22/George_W._Bush%27s_weekly_radio_address_%28November_1%2C_2008%29.oga + @wget --quiet --show-progress -O samples/gb1.ogg https://upload.wikimedia.org/wikipedia/commons/1/1f/George_W_Bush_Columbia_FINAL.ogg + @wget --quiet --show-progress -O samples/hp0.ogg https://upload.wikimedia.org/wikipedia/en/d/d4/En.henryfphillips.ogg + @echo "Converting to 16-bit WAV ..." + @ffmpeg -loglevel -0 -y -i samples/gb0.ogg -ar 16000 -ac 1 -c:a pcm_s16le samples/gb0.wav + @ffmpeg -loglevel -0 -y -i samples/gb1.ogg -ar 16000 -ac 1 -c:a pcm_s16le samples/gb1.wav + @ffmpeg -loglevel -0 -y -i samples/hp0.ogg -ar 16000 -ac 1 -c:a pcm_s16le samples/hp0.wav + +.PHONY: tiny.en +tiny.en: main + @echo "Downloading tiny.en (75 MB just once)" + mkdir -p models + @if [ ! -f models/ggml-tiny.en.bin ]; then \ + wget --quiet --show-progress -O models/ggml-tiny.en.bin https://ggml.ggerganov.com/ggml-model-whisper-tiny.en.bin ; \ + fi + @echo "===============================================" + @echo "Running tiny.en on all samples in ./samples ..." + @echo "===============================================" + @echo "" + @for f in samples/*.wav; do \ + echo "----------------------------------------------" ; \ + echo "[+] Running base.en on $$f ... (run 'ffplay $$f' to listen)" ; \ + echo "----------------------------------------------" ; \ + echo "" ; \ + ./main -m models/ggml-tiny.en.bin -f $$f ; \ + echo "" ; \ + done + +.PHONY: base.en +base.en: main + @echo "Downloading base.en (142 MB just once)" + mkdir -p models + @if [ ! -f models/ggml-base.en.bin ]; then \ + wget --quiet --show-progress -O models/ggml-base.en.bin https://ggml.ggerganov.com/ggml-model-whisper-base.en.bin ; \ + fi + @echo "===============================================" + @echo "Running base.en on all samples in ./samples ..." + @echo "===============================================" + @echo "" + @for f in samples/*.wav; do \ + echo "----------------------------------------------" ; \ + echo "[+] Running base.en on $$f ... (run 'ffplay $$f' to listen)" ; \ + echo "----------------------------------------------" ; \ + echo "" ; \ + ./main -m models/ggml-base.en.bin -f $$f ; \ + echo "" ; \ + done + +.PHONY: small.en +small.en: main + @echo "Downloading small.en (466 MB just once)" + mkdir -p models + @if [ ! -f models/ggml-small.en.bin ]; then \ + wget --quiet --show-progress -O models/ggml-small.en.bin https://ggml.ggerganov.com/ggml-model-whisper-small.en.bin ; \ + fi + @echo "===============================================" + @echo "Running small.en on all samples in ./samples ..." + @echo "===============================================" + @echo "" + @for f in samples/*.wav; do \ + echo "----------------------------------------------" ; \ + echo "[+] Running base.en on $$f ... (run 'ffplay $$f' to listen)" ; \ + echo "----------------------------------------------" ; \ + echo "" ; \ + ./main -m models/ggml-small.en.bin -f $$f ; \ + echo "" ; \ + done + +.PHONY: medium.en +medium.en: main + @echo "Downloading medium.en (1.5 GB just once)" + mkdir -p models + @if [ ! -f models/ggml-medium.en.bin ]; then \ + wget --quiet --show-progress -O models/ggml-medium.en.bin https://ggml.ggerganov.com/ggml-model-whisper-medium.en.bin ; \ + fi + @echo "===============================================" + @echo "Running medium.en on all samples in ./samples ..." + @echo "===============================================" + @echo "" + @for f in samples/*.wav; do \ + echo "----------------------------------------------" ; \ + echo "[+] Running base.en on $$f ... (run 'ffplay $$f' to listen)" ; \ + echo "----------------------------------------------" ; \ + echo "" ; \ + ./main -m models/ggml-medium.en.bin -f $$f ; \ + echo "" ; \ + done diff --git a/convert-pt-to-ggml.py b/convert-pt-to-ggml.py new file mode 100644 index 0000000..22bd12e --- /dev/null +++ b/convert-pt-to-ggml.py @@ -0,0 +1,328 @@ +# Convert Whisper transformer model from PyTorch to ggml format +# +# Usage: python convert-pt-to-ggml.py ~/.cache/whisper/medium.pt ~/path/to/repo/whisper/ ./models/whisper-medium +# +# You need to clone the original repo in ~/path/to/repo/whisper/ +# +# git clone https://github.com/openai/whisper ~/path/to/repo/whisper/ +# +# It is used to various assets needed by the algorithm: +# +# - tokenizer +# - mel filters +# +# Also, you need to have the original models in ~/.cache/whisper/ +# See the original repo for more details. +# +# This script loads the specified model and whisper assets and saves them in ggml format. +# The output is a single binary file containing the following information: +# +# - hparams +# - mel filters +# - tokenizer vocab +# - model variables +# +# For each variable, write the following: +# +# - Number of dimensions (int) +# - Name length (int) +# - Dimensions (int[n_dims]) +# - Name (char[name_length]) +# - Data (float[n_dims]) +# + +import io +import os +import sys +import struct +import json +import code +import torch +import numpy as np + +from transformers import GPTJForCausalLM +from transformers import GPT2TokenizerFast + +# ref: https://github.com/openai/whisper/blob/8cf36f3508c9acd341a45eb2364239a3d81458b9/whisper/tokenizer.py#L10-L110 +LANGUAGES = { + "en": "english", + "zh": "chinese", + "de": "german", + "es": "spanish", + "ru": "russian", + "ko": "korean", + "fr": "french", + "ja": "japanese", + "pt": "portuguese", + "tr": "turkish", + "pl": "polish", + "ca": "catalan", + "nl": "dutch", + "ar": "arabic", + "sv": "swedish", + "it": "italian", + "id": "indonesian", + "hi": "hindi", + "fi": "finnish", + "vi": "vietnamese", + "iw": "hebrew", + "uk": "ukrainian", + "el": "greek", + "ms": "malay", + "cs": "czech", + "ro": "romanian", + "da": "danish", + "hu": "hungarian", + "ta": "tamil", + "no": "norwegian", + "th": "thai", + "ur": "urdu", + "hr": "croatian", + "bg": "bulgarian", + "lt": "lithuanian", + "la": "latin", + "mi": "maori", + "ml": "malayalam", + "cy": "welsh", + "sk": "slovak", + "te": "telugu", + "fa": "persian", + "lv": "latvian", + "bn": "bengali", + "sr": "serbian", + "az": "azerbaijani", + "sl": "slovenian", + "kn": "kannada", + "et": "estonian", + "mk": "macedonian", + "br": "breton", + "eu": "basque", + "is": "icelandic", + "hy": "armenian", + "ne": "nepali", + "mn": "mongolian", + "bs": "bosnian", + "kk": "kazakh", + "sq": "albanian", + "sw": "swahili", + "gl": "galician", + "mr": "marathi", + "pa": "punjabi", + "si": "sinhala", + "km": "khmer", + "sn": "shona", + "yo": "yoruba", + "so": "somali", + "af": "afrikaans", + "oc": "occitan", + "ka": "georgian", + "be": "belarusian", + "tg": "tajik", + "sd": "sindhi", + "gu": "gujarati", + "am": "amharic", + "yi": "yiddish", + "lo": "lao", + "uz": "uzbek", + "fo": "faroese", + "ht": "haitian creole", + "ps": "pashto", + "tk": "turkmen", + "nn": "nynorsk", + "mt": "maltese", + "sa": "sanskrit", + "lb": "luxembourgish", + "my": "myanmar", + "bo": "tibetan", + "tl": "tagalog", + "mg": "malagasy", + "as": "assamese", + "tt": "tatar", + "haw": "hawaiian", + "ln": "lingala", + "ha": "hausa", + "ba": "bashkir", + "jw": "javanese", + "su": "sundanese", +} + +# ref: https://github.com/openai/whisper/blob/8cf36f3508c9acd341a45eb2364239a3d81458b9/whisper/tokenizer.py#L273-L292 +def build_tokenizer(path_to_whisper_repo: str, name: str = "gpt2"): + os.environ["TOKENIZERS_PARALLELISM"] = "false" + path = os.path.join(path_to_whisper_repo, "whisper/assets", name) + tokenizer = GPT2TokenizerFast.from_pretrained(path) + + specials = [ + "<|startoftranscript|>", + *[f"<|{lang}|>" for lang in LANGUAGES.keys()], + "<|translate|>", + "<|transcribe|>", + "<|startoflm|>", + "<|startofprev|>", + "<|nocaptions|>", + "<|notimestamps|>", + ] + + tokenizer.add_special_tokens(dict(additional_special_tokens=specials)) + return tokenizer + +# ref: https://github.com/openai/gpt-2/blob/master/src/encoder.py +def bytes_to_unicode(): + """ + Returns list of utf-8 byte and a corresponding list of unicode strings. + The reversible bpe codes work on unicode strings. + This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. + When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. + This is a signficant percentage of your normal, say, 32K bpe vocab. + To avoid that, we want lookup tables between utf-8 bytes and unicode strings. + And avoids mapping to whitespace/control characters the bpe code barfs on. + """ + bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1)) + cs = bs[:] + n = 0 + for b in range(2**8): + if b not in bs: + bs.append(b) + cs.append(2**8+n) + n += 1 + cs = [chr(n) for n in cs] + return dict(zip(bs, cs)) + + +if len(sys.argv) < 4: + print("Usage: convert-pt-to-ggml.py model.pt path-to-whisper-repo dir-output [use-f32]\n") + sys.exit(1) + +fname_inp = sys.argv[1] +dir_whisper = sys.argv[2] +dir_out = sys.argv[3] + +# try to load PyTorch binary data +try: + model_bytes = open(fname_inp, "rb").read() + with io.BytesIO(model_bytes) as fp: + checkpoint = torch.load(fp, map_location="cpu") +except: + print("Error: failed to load PyTorch model file: %s" % fname_inp) + sys.exit(1) + +hparams = checkpoint["dims"] +print("hparams:", hparams) + +list_vars = checkpoint["model_state_dict"] + +#print(list_vars['encoder.positional_embedding']) +#print(list_vars['encoder.conv1.weight']) +#print(list_vars['encoder.conv1.weight'].shape) + +# load mel filters +n_mels = hparams["n_mels"] +with np.load(os.path.join(dir_whisper, "whisper/assets", "mel_filters.npz")) as f: + filters = torch.from_numpy(f[f"mel_{n_mels}"]) + #print (filters) + +#code.interact(local=locals()) + +multilingual = hparams["n_vocab"] == 51865 +tokenizer = build_tokenizer(dir_whisper, multilingual and "multilingual" or "gpt2") + +#print(tokenizer) +#print(tokenizer.name_or_path) +#print(len(tokenizer.additional_special_tokens)) +dir_tokenizer = tokenizer.name_or_path + +# output in the same directory as the model +fname_out = dir_out + "/ggml-model.bin" + +with open(dir_tokenizer + "/vocab.json", "r") as f: + tokens = json.load(f) + +# use 16-bit or 32-bit floats +use_f16 = True +if len(sys.argv) > 4: + use_f16 = False + fname_out = dir_out + "/ggml-model-f32.bin" + +fout = open(fname_out, "wb") + +fout.write(struct.pack("i", 0x67676d6c)) # magic: ggml in hex +fout.write(struct.pack("i", hparams["n_vocab"])) +fout.write(struct.pack("i", hparams["n_audio_ctx"])) +fout.write(struct.pack("i", hparams["n_audio_state"])) +fout.write(struct.pack("i", hparams["n_audio_head"])) +fout.write(struct.pack("i", hparams["n_audio_layer"])) +fout.write(struct.pack("i", hparams["n_text_ctx"])) +fout.write(struct.pack("i", hparams["n_text_state"])) +fout.write(struct.pack("i", hparams["n_text_head"])) +fout.write(struct.pack("i", hparams["n_text_layer"])) +fout.write(struct.pack("i", hparams["n_mels"])) +fout.write(struct.pack("i", use_f16)) + +# write mel filters +fout.write(struct.pack("i", filters.shape[0])) +fout.write(struct.pack("i", filters.shape[1])) +for i in range(filters.shape[0]): + for j in range(filters.shape[1]): + fout.write(struct.pack("f", filters[i][j])) + +byte_encoder = bytes_to_unicode() +byte_decoder = {v:k for k, v in byte_encoder.items()} + +fout.write(struct.pack("i", len(tokens))) + +for key in tokens: + text = bytearray([byte_decoder[c] for c in key]).decode('utf-8', errors='replace').encode('utf-8') + fout.write(struct.pack("i", len(text))) + fout.write(text) + +for name in list_vars.keys(): + data = list_vars[name].squeeze().numpy() + print("Processing variable: " + name + " with shape: ", data.shape) + + # reshape conv bias from [n] to [n, 1] + if name == "encoder.conv1.bias" or \ + name == "encoder.conv2.bias": + data = data.reshape(data.shape[0], 1) + print(" Reshaped variable: " + name + " to shape: ", data.shape) + + n_dims = len(data.shape); + + # looks like the whisper models are in f16 by default + # so we need to convert the small tensors to f32 until we fully support f16 in ggml + # ftype == 0 -> float32, ftype == 1 -> float16 + ftype = 1; + if use_f16: + if n_dims < 2 or \ + name == "encoder.conv1.bias" or \ + name == "encoder.conv2.bias" or \ + name == "encoder.positional_embedding" or \ + name == "decoder.positional_embedding": + ftype = 0 + data = data.astype(np.float32) + print(" Converting to float32") + data = data.astype(np.float32) + ftype = 0 + else: + data = data.astype(np.float32) + ftype = 0 + + #if name.startswith("encoder"): + # if name.endswith("mlp.0.weight") or \ + # name.endswith("mlp.2.weight"): + # print(" Transposing") + # data = data.transpose() + + # header + str = name.encode('utf-8') + fout.write(struct.pack("iii", n_dims, len(str), ftype)) + for i in range(n_dims): + fout.write(struct.pack("i", data.shape[n_dims - 1 - i])) + fout.write(str); + + # data + data.tofile(fout) + +fout.close() + +print("Done. Output file: " + fname_out) +print("") diff --git a/dr_wav.h b/dr_wav.h new file mode 100644 index 0000000..fd3e95b --- /dev/null +++ b/dr_wav.h @@ -0,0 +1,6434 @@ +/* +WAV audio loader and writer. Choice of public domain or MIT-0. See license statements at the end of this file. +dr_wav - v0.12.16 - 2020-12-02 + +David Reid - mackron@gmail.com + +GitHub: https://github.com/mackron/dr_libs +*/ + +/* +RELEASE NOTES - VERSION 0.12 +============================ +Version 0.12 includes breaking changes to custom chunk handling. + + +Changes to Chunk Callback +------------------------- +dr_wav supports the ability to fire a callback when a chunk is encounted (except for WAVE and FMT chunks). The callback has been updated to include both the +container (RIFF or Wave64) and the FMT chunk which contains information about the format of the data in the wave file. + +Previously, there was no direct way to determine the container, and therefore no way to discriminate against the different IDs in the chunk header (RIFF and +Wave64 containers encode chunk ID's differently). The `container` parameter can be used to know which ID to use. + +Sometimes it can be useful to know the data format at the time the chunk callback is fired. A pointer to a `drwav_fmt` object is now passed into the chunk +callback which will give you information about the data format. To determine the sample format, use `drwav_fmt_get_format()`. This will return one of the +`DR_WAVE_FORMAT_*` tokens. +*/ + +/* +Introduction +============ +This is a single file library. To use it, do something like the following in one .c file. + + ```c + #define DR_WAV_IMPLEMENTATION + #include "dr_wav.h" + ``` + +You can then #include this file in other parts of the program as you would with any other header file. Do something like the following to read audio data: + + ```c + drwav wav; + if (!drwav_init_file(&wav, "my_song.wav", NULL)) { + // Error opening WAV file. + } + + drwav_int32* pDecodedInterleavedPCMFrames = malloc(wav.totalPCMFrameCount * wav.channels * sizeof(drwav_int32)); + size_t numberOfSamplesActuallyDecoded = drwav_read_pcm_frames_s32(&wav, wav.totalPCMFrameCount, pDecodedInterleavedPCMFrames); + + ... + + drwav_uninit(&wav); + ``` + +If you just want to quickly open and read the audio data in a single operation you can do something like this: + + ```c + unsigned int channels; + unsigned int sampleRate; + drwav_uint64 totalPCMFrameCount; + float* pSampleData = drwav_open_file_and_read_pcm_frames_f32("my_song.wav", &channels, &sampleRate, &totalPCMFrameCount, NULL); + if (pSampleData == NULL) { + // Error opening and reading WAV file. + } + + ... + + drwav_free(pSampleData); + ``` + +The examples above use versions of the API that convert the audio data to a consistent format (32-bit signed PCM, in this case), but you can still output the +audio data in its internal format (see notes below for supported formats): + + ```c + size_t framesRead = drwav_read_pcm_frames(&wav, wav.totalPCMFrameCount, pDecodedInterleavedPCMFrames); + ``` + +You can also read the raw bytes of audio data, which could be useful if dr_wav does not have native support for a particular data format: + + ```c + size_t bytesRead = drwav_read_raw(&wav, bytesToRead, pRawDataBuffer); + ``` + +dr_wav can also be used to output WAV files. This does not currently support compressed formats. To use this, look at `drwav_init_write()`, +`drwav_init_file_write()`, etc. Use `drwav_write_pcm_frames()` to write samples, or `drwav_write_raw()` to write raw data in the "data" chunk. + + ```c + drwav_data_format format; + format.container = drwav_container_riff; // <-- drwav_container_riff = normal WAV files, drwav_container_w64 = Sony Wave64. + format.format = DR_WAVE_FORMAT_PCM; // <-- Any of the DR_WAVE_FORMAT_* codes. + format.channels = 2; + format.sampleRate = 44100; + format.bitsPerSample = 16; + drwav_init_file_write(&wav, "data/recording.wav", &format, NULL); + + ... + + drwav_uint64 framesWritten = drwav_write_pcm_frames(pWav, frameCount, pSamples); + ``` + +dr_wav has seamless support the Sony Wave64 format. The decoder will automatically detect it and it should Just Work without any manual intervention. + + +Build Options +============= +#define these options before including this file. + +#define DR_WAV_NO_CONVERSION_API + Disables conversion APIs such as `drwav_read_pcm_frames_f32()` and `drwav_s16_to_f32()`. + +#define DR_WAV_NO_STDIO + Disables APIs that initialize a decoder from a file such as `drwav_init_file()`, `drwav_init_file_write()`, etc. + + + +Notes +===== +- Samples are always interleaved. +- The default read function does not do any data conversion. Use `drwav_read_pcm_frames_f32()`, `drwav_read_pcm_frames_s32()` and `drwav_read_pcm_frames_s16()` + to read and convert audio data to 32-bit floating point, signed 32-bit integer and signed 16-bit integer samples respectively. Tested and supported internal + formats include the following: + - Unsigned 8-bit PCM + - Signed 12-bit PCM + - Signed 16-bit PCM + - Signed 24-bit PCM + - Signed 32-bit PCM + - IEEE 32-bit floating point + - IEEE 64-bit floating point + - A-law and u-law + - Microsoft ADPCM + - IMA ADPCM (DVI, format code 0x11) +- dr_wav will try to read the WAV file as best it can, even if it's not strictly conformant to the WAV format. +*/ + +#ifndef dr_wav_h +#define dr_wav_h + +#ifdef __cplusplus +extern "C" { +#endif + +#define DRWAV_STRINGIFY(x) #x +#define DRWAV_XSTRINGIFY(x) DRWAV_STRINGIFY(x) + +#define DRWAV_VERSION_MAJOR 0 +#define DRWAV_VERSION_MINOR 12 +#define DRWAV_VERSION_REVISION 16 +#define DRWAV_VERSION_STRING DRWAV_XSTRINGIFY(DRWAV_VERSION_MAJOR) "." DRWAV_XSTRINGIFY(DRWAV_VERSION_MINOR) "." DRWAV_XSTRINGIFY(DRWAV_VERSION_REVISION) + +#include /* For size_t. */ + +/* Sized types. */ +typedef signed char drwav_int8; +typedef unsigned char drwav_uint8; +typedef signed short drwav_int16; +typedef unsigned short drwav_uint16; +typedef signed int drwav_int32; +typedef unsigned int drwav_uint32; +#if defined(_MSC_VER) + typedef signed __int64 drwav_int64; + typedef unsigned __int64 drwav_uint64; +#else + #if defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))) + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wlong-long" + #if defined(__clang__) + #pragma GCC diagnostic ignored "-Wc++11-long-long" + #endif + #endif + typedef signed long long drwav_int64; + typedef unsigned long long drwav_uint64; + #if defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))) + #pragma GCC diagnostic pop + #endif +#endif +#if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__)) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__) + typedef drwav_uint64 drwav_uintptr; +#else + typedef drwav_uint32 drwav_uintptr; +#endif +typedef drwav_uint8 drwav_bool8; +typedef drwav_uint32 drwav_bool32; +#define DRWAV_TRUE 1 +#define DRWAV_FALSE 0 + +#if !defined(DRWAV_API) + #if defined(DRWAV_DLL) + #if defined(_WIN32) + #define DRWAV_DLL_IMPORT __declspec(dllimport) + #define DRWAV_DLL_EXPORT __declspec(dllexport) + #define DRWAV_DLL_PRIVATE static + #else + #if defined(__GNUC__) && __GNUC__ >= 4 + #define DRWAV_DLL_IMPORT __attribute__((visibility("default"))) + #define DRWAV_DLL_EXPORT __attribute__((visibility("default"))) + #define DRWAV_DLL_PRIVATE __attribute__((visibility("hidden"))) + #else + #define DRWAV_DLL_IMPORT + #define DRWAV_DLL_EXPORT + #define DRWAV_DLL_PRIVATE static + #endif + #endif + + #if defined(DR_WAV_IMPLEMENTATION) || defined(DRWAV_IMPLEMENTATION) + #define DRWAV_API DRWAV_DLL_EXPORT + #else + #define DRWAV_API DRWAV_DLL_IMPORT + #endif + #define DRWAV_PRIVATE DRWAV_DLL_PRIVATE + #else + #define DRWAV_API extern + #define DRWAV_PRIVATE static + #endif +#endif + +typedef drwav_int32 drwav_result; +#define DRWAV_SUCCESS 0 +#define DRWAV_ERROR -1 /* A generic error. */ +#define DRWAV_INVALID_ARGS -2 +#define DRWAV_INVALID_OPERATION -3 +#define DRWAV_OUT_OF_MEMORY -4 +#define DRWAV_OUT_OF_RANGE -5 +#define DRWAV_ACCESS_DENIED -6 +#define DRWAV_DOES_NOT_EXIST -7 +#define DRWAV_ALREADY_EXISTS -8 +#define DRWAV_TOO_MANY_OPEN_FILES -9 +#define DRWAV_INVALID_FILE -10 +#define DRWAV_TOO_BIG -11 +#define DRWAV_PATH_TOO_LONG -12 +#define DRWAV_NAME_TOO_LONG -13 +#define DRWAV_NOT_DIRECTORY -14 +#define DRWAV_IS_DIRECTORY -15 +#define DRWAV_DIRECTORY_NOT_EMPTY -16 +#define DRWAV_END_OF_FILE -17 +#define DRWAV_NO_SPACE -18 +#define DRWAV_BUSY -19 +#define DRWAV_IO_ERROR -20 +#define DRWAV_INTERRUPT -21 +#define DRWAV_UNAVAILABLE -22 +#define DRWAV_ALREADY_IN_USE -23 +#define DRWAV_BAD_ADDRESS -24 +#define DRWAV_BAD_SEEK -25 +#define DRWAV_BAD_PIPE -26 +#define DRWAV_DEADLOCK -27 +#define DRWAV_TOO_MANY_LINKS -28 +#define DRWAV_NOT_IMPLEMENTED -29 +#define DRWAV_NO_MESSAGE -30 +#define DRWAV_BAD_MESSAGE -31 +#define DRWAV_NO_DATA_AVAILABLE -32 +#define DRWAV_INVALID_DATA -33 +#define DRWAV_TIMEOUT -34 +#define DRWAV_NO_NETWORK -35 +#define DRWAV_NOT_UNIQUE -36 +#define DRWAV_NOT_SOCKET -37 +#define DRWAV_NO_ADDRESS -38 +#define DRWAV_BAD_PROTOCOL -39 +#define DRWAV_PROTOCOL_UNAVAILABLE -40 +#define DRWAV_PROTOCOL_NOT_SUPPORTED -41 +#define DRWAV_PROTOCOL_FAMILY_NOT_SUPPORTED -42 +#define DRWAV_ADDRESS_FAMILY_NOT_SUPPORTED -43 +#define DRWAV_SOCKET_NOT_SUPPORTED -44 +#define DRWAV_CONNECTION_RESET -45 +#define DRWAV_ALREADY_CONNECTED -46 +#define DRWAV_NOT_CONNECTED -47 +#define DRWAV_CONNECTION_REFUSED -48 +#define DRWAV_NO_HOST -49 +#define DRWAV_IN_PROGRESS -50 +#define DRWAV_CANCELLED -51 +#define DRWAV_MEMORY_ALREADY_MAPPED -52 +#define DRWAV_AT_END -53 + +/* Common data formats. */ +#define DR_WAVE_FORMAT_PCM 0x1 +#define DR_WAVE_FORMAT_ADPCM 0x2 +#define DR_WAVE_FORMAT_IEEE_FLOAT 0x3 +#define DR_WAVE_FORMAT_ALAW 0x6 +#define DR_WAVE_FORMAT_MULAW 0x7 +#define DR_WAVE_FORMAT_DVI_ADPCM 0x11 +#define DR_WAVE_FORMAT_EXTENSIBLE 0xFFFE + +/* Constants. */ +#ifndef DRWAV_MAX_SMPL_LOOPS +#define DRWAV_MAX_SMPL_LOOPS 1 +#endif + +/* Flags to pass into drwav_init_ex(), etc. */ +#define DRWAV_SEQUENTIAL 0x00000001 + +DRWAV_API void drwav_version(drwav_uint32* pMajor, drwav_uint32* pMinor, drwav_uint32* pRevision); +DRWAV_API const char* drwav_version_string(void); + +typedef enum +{ + drwav_seek_origin_start, + drwav_seek_origin_current +} drwav_seek_origin; + +typedef enum +{ + drwav_container_riff, + drwav_container_w64, + drwav_container_rf64 +} drwav_container; + +typedef struct +{ + union + { + drwav_uint8 fourcc[4]; + drwav_uint8 guid[16]; + } id; + + /* The size in bytes of the chunk. */ + drwav_uint64 sizeInBytes; + + /* + RIFF = 2 byte alignment. + W64 = 8 byte alignment. + */ + unsigned int paddingSize; +} drwav_chunk_header; + +typedef struct +{ + /* + The format tag exactly as specified in the wave file's "fmt" chunk. This can be used by applications + that require support for data formats not natively supported by dr_wav. + */ + drwav_uint16 formatTag; + + /* The number of channels making up the audio data. When this is set to 1 it is mono, 2 is stereo, etc. */ + drwav_uint16 channels; + + /* The sample rate. Usually set to something like 44100. */ + drwav_uint32 sampleRate; + + /* Average bytes per second. You probably don't need this, but it's left here for informational purposes. */ + drwav_uint32 avgBytesPerSec; + + /* Block align. This is equal to the number of channels * bytes per sample. */ + drwav_uint16 blockAlign; + + /* Bits per sample. */ + drwav_uint16 bitsPerSample; + + /* The size of the extended data. Only used internally for validation, but left here for informational purposes. */ + drwav_uint16 extendedSize; + + /* + The number of valid bits per sample. When is equal to WAVE_FORMAT_EXTENSIBLE, + is always rounded up to the nearest multiple of 8. This variable contains information about exactly how + many bits are valid per sample. Mainly used for informational purposes. + */ + drwav_uint16 validBitsPerSample; + + /* The channel mask. Not used at the moment. */ + drwav_uint32 channelMask; + + /* The sub-format, exactly as specified by the wave file. */ + drwav_uint8 subFormat[16]; +} drwav_fmt; + +DRWAV_API drwav_uint16 drwav_fmt_get_format(const drwav_fmt* pFMT); + + +/* +Callback for when data is read. Return value is the number of bytes actually read. + +pUserData [in] The user data that was passed to drwav_init() and family. +pBufferOut [out] The output buffer. +bytesToRead [in] The number of bytes to read. + +Returns the number of bytes actually read. + +A return value of less than bytesToRead indicates the end of the stream. Do _not_ return from this callback until +either the entire bytesToRead is filled or you have reached the end of the stream. +*/ +typedef size_t (* drwav_read_proc)(void* pUserData, void* pBufferOut, size_t bytesToRead); + +/* +Callback for when data is written. Returns value is the number of bytes actually written. + +pUserData [in] The user data that was passed to drwav_init_write() and family. +pData [out] A pointer to the data to write. +bytesToWrite [in] The number of bytes to write. + +Returns the number of bytes actually written. + +If the return value differs from bytesToWrite, it indicates an error. +*/ +typedef size_t (* drwav_write_proc)(void* pUserData, const void* pData, size_t bytesToWrite); + +/* +Callback for when data needs to be seeked. + +pUserData [in] The user data that was passed to drwav_init() and family. +offset [in] The number of bytes to move, relative to the origin. Will never be negative. +origin [in] The origin of the seek - the current position or the start of the stream. + +Returns whether or not the seek was successful. + +Whether or not it is relative to the beginning or current position is determined by the "origin" parameter which will be either drwav_seek_origin_start or +drwav_seek_origin_current. +*/ +typedef drwav_bool32 (* drwav_seek_proc)(void* pUserData, int offset, drwav_seek_origin origin); + +/* +Callback for when drwav_init_ex() finds a chunk. + +pChunkUserData [in] The user data that was passed to the pChunkUserData parameter of drwav_init_ex() and family. +onRead [in] A pointer to the function to call when reading. +onSeek [in] A pointer to the function to call when seeking. +pReadSeekUserData [in] The user data that was passed to the pReadSeekUserData parameter of drwav_init_ex() and family. +pChunkHeader [in] A pointer to an object containing basic header information about the chunk. Use this to identify the chunk. +container [in] Whether or not the WAV file is a RIFF or Wave64 container. If you're unsure of the difference, assume RIFF. +pFMT [in] A pointer to the object containing the contents of the "fmt" chunk. + +Returns the number of bytes read + seeked. + +To read data from the chunk, call onRead(), passing in pReadSeekUserData as the first parameter. Do the same for seeking with onSeek(). The return value must +be the total number of bytes you have read _plus_ seeked. + +Use the `container` argument to discriminate the fields in `pChunkHeader->id`. If the container is `drwav_container_riff` or `drwav_container_rf64` you should +use `id.fourcc`, otherwise you should use `id.guid`. + +The `pFMT` parameter can be used to determine the data format of the wave file. Use `drwav_fmt_get_format()` to get the sample format, which will be one of the +`DR_WAVE_FORMAT_*` identifiers. + +The read pointer will be sitting on the first byte after the chunk's header. You must not attempt to read beyond the boundary of the chunk. +*/ +typedef drwav_uint64 (* drwav_chunk_proc)(void* pChunkUserData, drwav_read_proc onRead, drwav_seek_proc onSeek, void* pReadSeekUserData, const drwav_chunk_header* pChunkHeader, drwav_container container, const drwav_fmt* pFMT); + +typedef struct +{ + void* pUserData; + void* (* onMalloc)(size_t sz, void* pUserData); + void* (* onRealloc)(void* p, size_t sz, void* pUserData); + void (* onFree)(void* p, void* pUserData); +} drwav_allocation_callbacks; + +/* Structure for internal use. Only used for loaders opened with drwav_init_memory(). */ +typedef struct +{ + const drwav_uint8* data; + size_t dataSize; + size_t currentReadPos; +} drwav__memory_stream; + +/* Structure for internal use. Only used for writers opened with drwav_init_memory_write(). */ +typedef struct +{ + void** ppData; + size_t* pDataSize; + size_t dataSize; + size_t dataCapacity; + size_t currentWritePos; +} drwav__memory_stream_write; + +typedef struct +{ + drwav_container container; /* RIFF, W64. */ + drwav_uint32 format; /* DR_WAVE_FORMAT_* */ + drwav_uint32 channels; + drwav_uint32 sampleRate; + drwav_uint32 bitsPerSample; +} drwav_data_format; + + +/* See the following for details on the 'smpl' chunk: https://sites.google.com/site/musicgapi/technical-documents/wav-file-format#smpl */ +typedef struct +{ + drwav_uint32 cuePointId; + drwav_uint32 type; + drwav_uint32 start; + drwav_uint32 end; + drwav_uint32 fraction; + drwav_uint32 playCount; +} drwav_smpl_loop; + + typedef struct +{ + drwav_uint32 manufacturer; + drwav_uint32 product; + drwav_uint32 samplePeriod; + drwav_uint32 midiUnityNotes; + drwav_uint32 midiPitchFraction; + drwav_uint32 smpteFormat; + drwav_uint32 smpteOffset; + drwav_uint32 numSampleLoops; + drwav_uint32 samplerData; + drwav_smpl_loop loops[DRWAV_MAX_SMPL_LOOPS]; +} drwav_smpl; + +typedef struct +{ + /* A pointer to the function to call when more data is needed. */ + drwav_read_proc onRead; + + /* A pointer to the function to call when data needs to be written. Only used when the drwav object is opened in write mode. */ + drwav_write_proc onWrite; + + /* A pointer to the function to call when the wav file needs to be seeked. */ + drwav_seek_proc onSeek; + + /* The user data to pass to callbacks. */ + void* pUserData; + + /* Allocation callbacks. */ + drwav_allocation_callbacks allocationCallbacks; + + + /* Whether or not the WAV file is formatted as a standard RIFF file or W64. */ + drwav_container container; + + + /* Structure containing format information exactly as specified by the wav file. */ + drwav_fmt fmt; + + /* The sample rate. Will be set to something like 44100. */ + drwav_uint32 sampleRate; + + /* The number of channels. This will be set to 1 for monaural streams, 2 for stereo, etc. */ + drwav_uint16 channels; + + /* The bits per sample. Will be set to something like 16, 24, etc. */ + drwav_uint16 bitsPerSample; + + /* Equal to fmt.formatTag, or the value specified by fmt.subFormat if fmt.formatTag is equal to 65534 (WAVE_FORMAT_EXTENSIBLE). */ + drwav_uint16 translatedFormatTag; + + /* The total number of PCM frames making up the audio data. */ + drwav_uint64 totalPCMFrameCount; + + + /* The size in bytes of the data chunk. */ + drwav_uint64 dataChunkDataSize; + + /* The position in the stream of the first byte of the data chunk. This is used for seeking. */ + drwav_uint64 dataChunkDataPos; + + /* The number of bytes remaining in the data chunk. */ + drwav_uint64 bytesRemaining; + + + /* + Only used in sequential write mode. Keeps track of the desired size of the "data" chunk at the point of initialization time. Always + set to 0 for non-sequential writes and when the drwav object is opened in read mode. Used for validation. + */ + drwav_uint64 dataChunkDataSizeTargetWrite; + + /* Keeps track of whether or not the wav writer was initialized in sequential mode. */ + drwav_bool32 isSequentialWrite; + + + /* smpl chunk. */ + drwav_smpl smpl; + + + /* A hack to avoid a DRWAV_MALLOC() when opening a decoder with drwav_init_memory(). */ + drwav__memory_stream memoryStream; + drwav__memory_stream_write memoryStreamWrite; + + /* Generic data for compressed formats. This data is shared across all block-compressed formats. */ + struct + { + drwav_uint64 iCurrentPCMFrame; /* The index of the next PCM frame that will be read by drwav_read_*(). This is used with "totalPCMFrameCount" to ensure we don't read excess samples at the end of the last block. */ + } compressed; + + /* Microsoft ADPCM specific data. */ + struct + { + drwav_uint32 bytesRemainingInBlock; + drwav_uint16 predictor[2]; + drwav_int32 delta[2]; + drwav_int32 cachedFrames[4]; /* Samples are stored in this cache during decoding. */ + drwav_uint32 cachedFrameCount; + drwav_int32 prevFrames[2][2]; /* The previous 2 samples for each channel (2 channels at most). */ + } msadpcm; + + /* IMA ADPCM specific data. */ + struct + { + drwav_uint32 bytesRemainingInBlock; + drwav_int32 predictor[2]; + drwav_int32 stepIndex[2]; + drwav_int32 cachedFrames[16]; /* Samples are stored in this cache during decoding. */ + drwav_uint32 cachedFrameCount; + } ima; +} drwav; + + +/* +Initializes a pre-allocated drwav object for reading. + +pWav [out] A pointer to the drwav object being initialized. +onRead [in] The function to call when data needs to be read from the client. +onSeek [in] The function to call when the read position of the client data needs to move. +onChunk [in, optional] The function to call when a chunk is enumerated at initialized time. +pUserData, pReadSeekUserData [in, optional] A pointer to application defined data that will be passed to onRead and onSeek. +pChunkUserData [in, optional] A pointer to application defined data that will be passed to onChunk. +flags [in, optional] A set of flags for controlling how things are loaded. + +Returns true if successful; false otherwise. + +Close the loader with drwav_uninit(). + +This is the lowest level function for initializing a WAV file. You can also use drwav_init_file() and drwav_init_memory() +to open the stream from a file or from a block of memory respectively. + +Possible values for flags: + DRWAV_SEQUENTIAL: Never perform a backwards seek while loading. This disables the chunk callback and will cause this function + to return as soon as the data chunk is found. Any chunks after the data chunk will be ignored. + +drwav_init() is equivalent to "drwav_init_ex(pWav, onRead, onSeek, NULL, pUserData, NULL, 0);". + +The onChunk callback is not called for the WAVE or FMT chunks. The contents of the FMT chunk can be read from pWav->fmt +after the function returns. + +See also: drwav_init_file(), drwav_init_memory(), drwav_uninit() +*/ +DRWAV_API drwav_bool32 drwav_init(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, const drwav_allocation_callbacks* pAllocationCallbacks); +DRWAV_API drwav_bool32 drwav_init_ex(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc onSeek, drwav_chunk_proc onChunk, void* pReadSeekUserData, void* pChunkUserData, drwav_uint32 flags, const drwav_allocation_callbacks* pAllocationCallbacks); + +/* +Initializes a pre-allocated drwav object for writing. + +onWrite [in] The function to call when data needs to be written. +onSeek [in] The function to call when the write position needs to move. +pUserData [in, optional] A pointer to application defined data that will be passed to onWrite and onSeek. + +Returns true if successful; false otherwise. + +Close the writer with drwav_uninit(). + +This is the lowest level function for initializing a WAV file. You can also use drwav_init_file_write() and drwav_init_memory_write() +to open the stream from a file or from a block of memory respectively. + +If the total sample count is known, you can use drwav_init_write_sequential(). This avoids the need for dr_wav to perform +a post-processing step for storing the total sample count and the size of the data chunk which requires a backwards seek. + +See also: drwav_init_file_write(), drwav_init_memory_write(), drwav_uninit() +*/ +DRWAV_API drwav_bool32 drwav_init_write(drwav* pWav, const drwav_data_format* pFormat, drwav_write_proc onWrite, drwav_seek_proc onSeek, void* pUserData, const drwav_allocation_callbacks* pAllocationCallbacks); +DRWAV_API drwav_bool32 drwav_init_write_sequential(drwav* pWav, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, drwav_write_proc onWrite, void* pUserData, const drwav_allocation_callbacks* pAllocationCallbacks); +DRWAV_API drwav_bool32 drwav_init_write_sequential_pcm_frames(drwav* pWav, const drwav_data_format* pFormat, drwav_uint64 totalPCMFrameCount, drwav_write_proc onWrite, void* pUserData, const drwav_allocation_callbacks* pAllocationCallbacks); + +/* +Utility function to determine the target size of the entire data to be written (including all headers and chunks). + +Returns the target size in bytes. + +Useful if the application needs to know the size to allocate. + +Only writing to the RIFF chunk and one data chunk is currently supported. + +See also: drwav_init_write(), drwav_init_file_write(), drwav_init_memory_write() +*/ +DRWAV_API drwav_uint64 drwav_target_write_size_bytes(const drwav_data_format* pFormat, drwav_uint64 totalSampleCount); + +/* +Uninitializes the given drwav object. + +Use this only for objects initialized with drwav_init*() functions (drwav_init(), drwav_init_ex(), drwav_init_write(), drwav_init_write_sequential()). +*/ +DRWAV_API drwav_result drwav_uninit(drwav* pWav); + + +/* +Reads raw audio data. + +This is the lowest level function for reading audio data. It simply reads the given number of +bytes of the raw internal sample data. + +Consider using drwav_read_pcm_frames_s16(), drwav_read_pcm_frames_s32() or drwav_read_pcm_frames_f32() for +reading sample data in a consistent format. + +pBufferOut can be NULL in which case a seek will be performed. + +Returns the number of bytes actually read. +*/ +DRWAV_API size_t drwav_read_raw(drwav* pWav, size_t bytesToRead, void* pBufferOut); + +/* +Reads up to the specified number of PCM frames from the WAV file. + +The output data will be in the file's internal format, converted to native-endian byte order. Use +drwav_read_pcm_frames_s16/f32/s32() to read data in a specific format. + +If the return value is less than it means the end of the file has been reached or +you have requested more PCM frames than can possibly fit in the output buffer. + +This function will only work when sample data is of a fixed size and uncompressed. If you are +using a compressed format consider using drwav_read_raw() or drwav_read_pcm_frames_s16/s32/f32(). + +pBufferOut can be NULL in which case a seek will be performed. +*/ +DRWAV_API drwav_uint64 drwav_read_pcm_frames(drwav* pWav, drwav_uint64 framesToRead, void* pBufferOut); +DRWAV_API drwav_uint64 drwav_read_pcm_frames_le(drwav* pWav, drwav_uint64 framesToRead, void* pBufferOut); +DRWAV_API drwav_uint64 drwav_read_pcm_frames_be(drwav* pWav, drwav_uint64 framesToRead, void* pBufferOut); + +/* +Seeks to the given PCM frame. + +Returns true if successful; false otherwise. +*/ +DRWAV_API drwav_bool32 drwav_seek_to_pcm_frame(drwav* pWav, drwav_uint64 targetFrameIndex); + + +/* +Writes raw audio data. + +Returns the number of bytes actually written. If this differs from bytesToWrite, it indicates an error. +*/ +DRWAV_API size_t drwav_write_raw(drwav* pWav, size_t bytesToWrite, const void* pData); + +/* +Writes PCM frames. + +Returns the number of PCM frames written. + +Input samples need to be in native-endian byte order. On big-endian architectures the input data will be converted to +little-endian. Use drwav_write_raw() to write raw audio data without performing any conversion. +*/ +DRWAV_API drwav_uint64 drwav_write_pcm_frames(drwav* pWav, drwav_uint64 framesToWrite, const void* pData); +DRWAV_API drwav_uint64 drwav_write_pcm_frames_le(drwav* pWav, drwav_uint64 framesToWrite, const void* pData); +DRWAV_API drwav_uint64 drwav_write_pcm_frames_be(drwav* pWav, drwav_uint64 framesToWrite, const void* pData); + + +/* Conversion Utilities */ +#ifndef DR_WAV_NO_CONVERSION_API + +/* +Reads a chunk of audio data and converts it to signed 16-bit PCM samples. + +pBufferOut can be NULL in which case a seek will be performed. + +Returns the number of PCM frames actually read. + +If the return value is less than it means the end of the file has been reached. +*/ +DRWAV_API drwav_uint64 drwav_read_pcm_frames_s16(drwav* pWav, drwav_uint64 framesToRead, drwav_int16* pBufferOut); +DRWAV_API drwav_uint64 drwav_read_pcm_frames_s16le(drwav* pWav, drwav_uint64 framesToRead, drwav_int16* pBufferOut); +DRWAV_API drwav_uint64 drwav_read_pcm_frames_s16be(drwav* pWav, drwav_uint64 framesToRead, drwav_int16* pBufferOut); + +/* Low-level function for converting unsigned 8-bit PCM samples to signed 16-bit PCM samples. */ +DRWAV_API void drwav_u8_to_s16(drwav_int16* pOut, const drwav_uint8* pIn, size_t sampleCount); + +/* Low-level function for converting signed 24-bit PCM samples to signed 16-bit PCM samples. */ +DRWAV_API void drwav_s24_to_s16(drwav_int16* pOut, const drwav_uint8* pIn, size_t sampleCount); + +/* Low-level function for converting signed 32-bit PCM samples to signed 16-bit PCM samples. */ +DRWAV_API void drwav_s32_to_s16(drwav_int16* pOut, const drwav_int32* pIn, size_t sampleCount); + +/* Low-level function for converting IEEE 32-bit floating point samples to signed 16-bit PCM samples. */ +DRWAV_API void drwav_f32_to_s16(drwav_int16* pOut, const float* pIn, size_t sampleCount); + +/* Low-level function for converting IEEE 64-bit floating point samples to signed 16-bit PCM samples. */ +DRWAV_API void drwav_f64_to_s16(drwav_int16* pOut, const double* pIn, size_t sampleCount); + +/* Low-level function for converting A-law samples to signed 16-bit PCM samples. */ +DRWAV_API void drwav_alaw_to_s16(drwav_int16* pOut, const drwav_uint8* pIn, size_t sampleCount); + +/* Low-level function for converting u-law samples to signed 16-bit PCM samples. */ +DRWAV_API void drwav_mulaw_to_s16(drwav_int16* pOut, const drwav_uint8* pIn, size_t sampleCount); + + +/* +Reads a chunk of audio data and converts it to IEEE 32-bit floating point samples. + +pBufferOut can be NULL in which case a seek will be performed. + +Returns the number of PCM frames actually read. + +If the return value is less than it means the end of the file has been reached. +*/ +DRWAV_API drwav_uint64 drwav_read_pcm_frames_f32(drwav* pWav, drwav_uint64 framesToRead, float* pBufferOut); +DRWAV_API drwav_uint64 drwav_read_pcm_frames_f32le(drwav* pWav, drwav_uint64 framesToRead, float* pBufferOut); +DRWAV_API drwav_uint64 drwav_read_pcm_frames_f32be(drwav* pWav, drwav_uint64 framesToRead, float* pBufferOut); + +/* Low-level function for converting unsigned 8-bit PCM samples to IEEE 32-bit floating point samples. */ +DRWAV_API void drwav_u8_to_f32(float* pOut, const drwav_uint8* pIn, size_t sampleCount); + +/* Low-level function for converting signed 16-bit PCM samples to IEEE 32-bit floating point samples. */ +DRWAV_API void drwav_s16_to_f32(float* pOut, const drwav_int16* pIn, size_t sampleCount); + +/* Low-level function for converting signed 24-bit PCM samples to IEEE 32-bit floating point samples. */ +DRWAV_API void drwav_s24_to_f32(float* pOut, const drwav_uint8* pIn, size_t sampleCount); + +/* Low-level function for converting signed 32-bit PCM samples to IEEE 32-bit floating point samples. */ +DRWAV_API void drwav_s32_to_f32(float* pOut, const drwav_int32* pIn, size_t sampleCount); + +/* Low-level function for converting IEEE 64-bit floating point samples to IEEE 32-bit floating point samples. */ +DRWAV_API void drwav_f64_to_f32(float* pOut, const double* pIn, size_t sampleCount); + +/* Low-level function for converting A-law samples to IEEE 32-bit floating point samples. */ +DRWAV_API void drwav_alaw_to_f32(float* pOut, const drwav_uint8* pIn, size_t sampleCount); + +/* Low-level function for converting u-law samples to IEEE 32-bit floating point samples. */ +DRWAV_API void drwav_mulaw_to_f32(float* pOut, const drwav_uint8* pIn, size_t sampleCount); + + +/* +Reads a chunk of audio data and converts it to signed 32-bit PCM samples. + +pBufferOut can be NULL in which case a seek will be performed. + +Returns the number of PCM frames actually read. + +If the return value is less than it means the end of the file has been reached. +*/ +DRWAV_API drwav_uint64 drwav_read_pcm_frames_s32(drwav* pWav, drwav_uint64 framesToRead, drwav_int32* pBufferOut); +DRWAV_API drwav_uint64 drwav_read_pcm_frames_s32le(drwav* pWav, drwav_uint64 framesToRead, drwav_int32* pBufferOut); +DRWAV_API drwav_uint64 drwav_read_pcm_frames_s32be(drwav* pWav, drwav_uint64 framesToRead, drwav_int32* pBufferOut); + +/* Low-level function for converting unsigned 8-bit PCM samples to signed 32-bit PCM samples. */ +DRWAV_API void drwav_u8_to_s32(drwav_int32* pOut, const drwav_uint8* pIn, size_t sampleCount); + +/* Low-level function for converting signed 16-bit PCM samples to signed 32-bit PCM samples. */ +DRWAV_API void drwav_s16_to_s32(drwav_int32* pOut, const drwav_int16* pIn, size_t sampleCount); + +/* Low-level function for converting signed 24-bit PCM samples to signed 32-bit PCM samples. */ +DRWAV_API void drwav_s24_to_s32(drwav_int32* pOut, const drwav_uint8* pIn, size_t sampleCount); + +/* Low-level function for converting IEEE 32-bit floating point samples to signed 32-bit PCM samples. */ +DRWAV_API void drwav_f32_to_s32(drwav_int32* pOut, const float* pIn, size_t sampleCount); + +/* Low-level function for converting IEEE 64-bit floating point samples to signed 32-bit PCM samples. */ +DRWAV_API void drwav_f64_to_s32(drwav_int32* pOut, const double* pIn, size_t sampleCount); + +/* Low-level function for converting A-law samples to signed 32-bit PCM samples. */ +DRWAV_API void drwav_alaw_to_s32(drwav_int32* pOut, const drwav_uint8* pIn, size_t sampleCount); + +/* Low-level function for converting u-law samples to signed 32-bit PCM samples. */ +DRWAV_API void drwav_mulaw_to_s32(drwav_int32* pOut, const drwav_uint8* pIn, size_t sampleCount); + +#endif /* DR_WAV_NO_CONVERSION_API */ + + +/* High-Level Convenience Helpers */ + +#ifndef DR_WAV_NO_STDIO +/* +Helper for initializing a wave file for reading using stdio. + +This holds the internal FILE object until drwav_uninit() is called. Keep this in mind if you're caching drwav +objects because the operating system may restrict the number of file handles an application can have open at +any given time. +*/ +DRWAV_API drwav_bool32 drwav_init_file(drwav* pWav, const char* filename, const drwav_allocation_callbacks* pAllocationCallbacks); +DRWAV_API drwav_bool32 drwav_init_file_ex(drwav* pWav, const char* filename, drwav_chunk_proc onChunk, void* pChunkUserData, drwav_uint32 flags, const drwav_allocation_callbacks* pAllocationCallbacks); +DRWAV_API drwav_bool32 drwav_init_file_w(drwav* pWav, const wchar_t* filename, const drwav_allocation_callbacks* pAllocationCallbacks); +DRWAV_API drwav_bool32 drwav_init_file_ex_w(drwav* pWav, const wchar_t* filename, drwav_chunk_proc onChunk, void* pChunkUserData, drwav_uint32 flags, const drwav_allocation_callbacks* pAllocationCallbacks); + +/* +Helper for initializing a wave file for writing using stdio. + +This holds the internal FILE object until drwav_uninit() is called. Keep this in mind if you're caching drwav +objects because the operating system may restrict the number of file handles an application can have open at +any given time. +*/ +DRWAV_API drwav_bool32 drwav_init_file_write(drwav* pWav, const char* filename, const drwav_data_format* pFormat, const drwav_allocation_callbacks* pAllocationCallbacks); +DRWAV_API drwav_bool32 drwav_init_file_write_sequential(drwav* pWav, const char* filename, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, const drwav_allocation_callbacks* pAllocationCallbacks); +DRWAV_API drwav_bool32 drwav_init_file_write_sequential_pcm_frames(drwav* pWav, const char* filename, const drwav_data_format* pFormat, drwav_uint64 totalPCMFrameCount, const drwav_allocation_callbacks* pAllocationCallbacks); +DRWAV_API drwav_bool32 drwav_init_file_write_w(drwav* pWav, const wchar_t* filename, const drwav_data_format* pFormat, const drwav_allocation_callbacks* pAllocationCallbacks); +DRWAV_API drwav_bool32 drwav_init_file_write_sequential_w(drwav* pWav, const wchar_t* filename, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, const drwav_allocation_callbacks* pAllocationCallbacks); +DRWAV_API drwav_bool32 drwav_init_file_write_sequential_pcm_frames_w(drwav* pWav, const wchar_t* filename, const drwav_data_format* pFormat, drwav_uint64 totalPCMFrameCount, const drwav_allocation_callbacks* pAllocationCallbacks); +#endif /* DR_WAV_NO_STDIO */ + +/* +Helper for initializing a loader from a pre-allocated memory buffer. + +This does not create a copy of the data. It is up to the application to ensure the buffer remains valid for +the lifetime of the drwav object. + +The buffer should contain the contents of the entire wave file, not just the sample data. +*/ +DRWAV_API drwav_bool32 drwav_init_memory(drwav* pWav, const void* data, size_t dataSize, const drwav_allocation_callbacks* pAllocationCallbacks); +DRWAV_API drwav_bool32 drwav_init_memory_ex(drwav* pWav, const void* data, size_t dataSize, drwav_chunk_proc onChunk, void* pChunkUserData, drwav_uint32 flags, const drwav_allocation_callbacks* pAllocationCallbacks); + +/* +Helper for initializing a writer which outputs data to a memory buffer. + +dr_wav will manage the memory allocations, however it is up to the caller to free the data with drwav_free(). + +The buffer will remain allocated even after drwav_uninit() is called. The buffer should not be considered valid +until after drwav_uninit() has been called. +*/ +DRWAV_API drwav_bool32 drwav_init_memory_write(drwav* pWav, void** ppData, size_t* pDataSize, const drwav_data_format* pFormat, const drwav_allocation_callbacks* pAllocationCallbacks); +DRWAV_API drwav_bool32 drwav_init_memory_write_sequential(drwav* pWav, void** ppData, size_t* pDataSize, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, const drwav_allocation_callbacks* pAllocationCallbacks); +DRWAV_API drwav_bool32 drwav_init_memory_write_sequential_pcm_frames(drwav* pWav, void** ppData, size_t* pDataSize, const drwav_data_format* pFormat, drwav_uint64 totalPCMFrameCount, const drwav_allocation_callbacks* pAllocationCallbacks); + + +#ifndef DR_WAV_NO_CONVERSION_API +/* +Opens and reads an entire wav file in a single operation. + +The return value is a heap-allocated buffer containing the audio data. Use drwav_free() to free the buffer. +*/ +DRWAV_API drwav_int16* drwav_open_and_read_pcm_frames_s16(drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks); +DRWAV_API float* drwav_open_and_read_pcm_frames_f32(drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks); +DRWAV_API drwav_int32* drwav_open_and_read_pcm_frames_s32(drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks); +#ifndef DR_WAV_NO_STDIO +/* +Opens and decodes an entire wav file in a single operation. + +The return value is a heap-allocated buffer containing the audio data. Use drwav_free() to free the buffer. +*/ +DRWAV_API drwav_int16* drwav_open_file_and_read_pcm_frames_s16(const char* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks); +DRWAV_API float* drwav_open_file_and_read_pcm_frames_f32(const char* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks); +DRWAV_API drwav_int32* drwav_open_file_and_read_pcm_frames_s32(const char* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks); +DRWAV_API drwav_int16* drwav_open_file_and_read_pcm_frames_s16_w(const wchar_t* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks); +DRWAV_API float* drwav_open_file_and_read_pcm_frames_f32_w(const wchar_t* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks); +DRWAV_API drwav_int32* drwav_open_file_and_read_pcm_frames_s32_w(const wchar_t* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks); +#endif +/* +Opens and decodes an entire wav file from a block of memory in a single operation. + +The return value is a heap-allocated buffer containing the audio data. Use drwav_free() to free the buffer. +*/ +DRWAV_API drwav_int16* drwav_open_memory_and_read_pcm_frames_s16(const void* data, size_t dataSize, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks); +DRWAV_API float* drwav_open_memory_and_read_pcm_frames_f32(const void* data, size_t dataSize, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks); +DRWAV_API drwav_int32* drwav_open_memory_and_read_pcm_frames_s32(const void* data, size_t dataSize, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks); +#endif + +/* Frees data that was allocated internally by dr_wav. */ +DRWAV_API void drwav_free(void* p, const drwav_allocation_callbacks* pAllocationCallbacks); + +/* Converts bytes from a wav stream to a sized type of native endian. */ +DRWAV_API drwav_uint16 drwav_bytes_to_u16(const drwav_uint8* data); +DRWAV_API drwav_int16 drwav_bytes_to_s16(const drwav_uint8* data); +DRWAV_API drwav_uint32 drwav_bytes_to_u32(const drwav_uint8* data); +DRWAV_API drwav_int32 drwav_bytes_to_s32(const drwav_uint8* data); +DRWAV_API drwav_uint64 drwav_bytes_to_u64(const drwav_uint8* data); +DRWAV_API drwav_int64 drwav_bytes_to_s64(const drwav_uint8* data); + +/* Compares a GUID for the purpose of checking the type of a Wave64 chunk. */ +DRWAV_API drwav_bool32 drwav_guid_equal(const drwav_uint8 a[16], const drwav_uint8 b[16]); + +/* Compares a four-character-code for the purpose of checking the type of a RIFF chunk. */ +DRWAV_API drwav_bool32 drwav_fourcc_equal(const drwav_uint8* a, const char* b); + +#ifdef __cplusplus +} +#endif +#endif /* dr_wav_h */ + + +/************************************************************************************************************************************************************ + ************************************************************************************************************************************************************ + + IMPLEMENTATION + + ************************************************************************************************************************************************************ + ************************************************************************************************************************************************************/ +#if defined(DR_WAV_IMPLEMENTATION) || defined(DRWAV_IMPLEMENTATION) +#ifndef dr_wav_c +#define dr_wav_c + +#include +#include /* For memcpy(), memset() */ +#include /* For INT_MAX */ + +#ifndef DR_WAV_NO_STDIO +#include +#include +#endif + +/* Standard library stuff. */ +#ifndef DRWAV_ASSERT +#include +#define DRWAV_ASSERT(expression) assert(expression) +#endif +#ifndef DRWAV_MALLOC +#define DRWAV_MALLOC(sz) malloc((sz)) +#endif +#ifndef DRWAV_REALLOC +#define DRWAV_REALLOC(p, sz) realloc((p), (sz)) +#endif +#ifndef DRWAV_FREE +#define DRWAV_FREE(p) free((p)) +#endif +#ifndef DRWAV_COPY_MEMORY +#define DRWAV_COPY_MEMORY(dst, src, sz) memcpy((dst), (src), (sz)) +#endif +#ifndef DRWAV_ZERO_MEMORY +#define DRWAV_ZERO_MEMORY(p, sz) memset((p), 0, (sz)) +#endif +#ifndef DRWAV_ZERO_OBJECT +#define DRWAV_ZERO_OBJECT(p) DRWAV_ZERO_MEMORY((p), sizeof(*p)) +#endif + +#define drwav_countof(x) (sizeof(x) / sizeof(x[0])) +#define drwav_align(x, a) ((((x) + (a) - 1) / (a)) * (a)) +#define drwav_min(a, b) (((a) < (b)) ? (a) : (b)) +#define drwav_max(a, b) (((a) > (b)) ? (a) : (b)) +#define drwav_clamp(x, lo, hi) (drwav_max((lo), drwav_min((hi), (x)))) + +#define DRWAV_MAX_SIMD_VECTOR_SIZE 64 /* 64 for AVX-512 in the future. */ + +/* CPU architecture. */ +#if defined(__x86_64__) || defined(_M_X64) + #define DRWAV_X64 +#elif defined(__i386) || defined(_M_IX86) + #define DRWAV_X86 +#elif defined(__arm__) || defined(_M_ARM) + #define DRWAV_ARM +#endif + +#ifdef _MSC_VER + #define DRWAV_INLINE __forceinline +#elif defined(__GNUC__) + /* + I've had a bug report where GCC is emitting warnings about functions possibly not being inlineable. This warning happens when + the __attribute__((always_inline)) attribute is defined without an "inline" statement. I think therefore there must be some + case where "__inline__" is not always defined, thus the compiler emitting these warnings. When using -std=c89 or -ansi on the + command line, we cannot use the "inline" keyword and instead need to use "__inline__". In an attempt to work around this issue + I am using "__inline__" only when we're compiling in strict ANSI mode. + */ + #if defined(__STRICT_ANSI__) + #define DRWAV_INLINE __inline__ __attribute__((always_inline)) + #else + #define DRWAV_INLINE inline __attribute__((always_inline)) + #endif +#elif defined(__WATCOMC__) + #define DRWAV_INLINE __inline +#else + #define DRWAV_INLINE +#endif + +#if defined(SIZE_MAX) + #define DRWAV_SIZE_MAX SIZE_MAX +#else + #if defined(_WIN64) || defined(_LP64) || defined(__LP64__) + #define DRWAV_SIZE_MAX ((drwav_uint64)0xFFFFFFFFFFFFFFFF) + #else + #define DRWAV_SIZE_MAX 0xFFFFFFFF + #endif +#endif + +#if defined(_MSC_VER) && _MSC_VER >= 1400 + #define DRWAV_HAS_BYTESWAP16_INTRINSIC + #define DRWAV_HAS_BYTESWAP32_INTRINSIC + #define DRWAV_HAS_BYTESWAP64_INTRINSIC +#elif defined(__clang__) + #if defined(__has_builtin) + #if __has_builtin(__builtin_bswap16) + #define DRWAV_HAS_BYTESWAP16_INTRINSIC + #endif + #if __has_builtin(__builtin_bswap32) + #define DRWAV_HAS_BYTESWAP32_INTRINSIC + #endif + #if __has_builtin(__builtin_bswap64) + #define DRWAV_HAS_BYTESWAP64_INTRINSIC + #endif + #endif +#elif defined(__GNUC__) + #if ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) + #define DRWAV_HAS_BYTESWAP32_INTRINSIC + #define DRWAV_HAS_BYTESWAP64_INTRINSIC + #endif + #if ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)) + #define DRWAV_HAS_BYTESWAP16_INTRINSIC + #endif +#endif + +DRWAV_API void drwav_version(drwav_uint32* pMajor, drwav_uint32* pMinor, drwav_uint32* pRevision) +{ + if (pMajor) { + *pMajor = DRWAV_VERSION_MAJOR; + } + + if (pMinor) { + *pMinor = DRWAV_VERSION_MINOR; + } + + if (pRevision) { + *pRevision = DRWAV_VERSION_REVISION; + } +} + +DRWAV_API const char* drwav_version_string(void) +{ + return DRWAV_VERSION_STRING; +} + +/* +These limits are used for basic validation when initializing the decoder. If you exceed these limits, first of all: what on Earth are +you doing?! (Let me know, I'd be curious!) Second, you can adjust these by #define-ing them before the dr_wav implementation. +*/ +#ifndef DRWAV_MAX_SAMPLE_RATE +#define DRWAV_MAX_SAMPLE_RATE 384000 +#endif +#ifndef DRWAV_MAX_CHANNELS +#define DRWAV_MAX_CHANNELS 256 +#endif +#ifndef DRWAV_MAX_BITS_PER_SAMPLE +#define DRWAV_MAX_BITS_PER_SAMPLE 64 +#endif + +static const drwav_uint8 drwavGUID_W64_RIFF[16] = {0x72,0x69,0x66,0x66, 0x2E,0x91, 0xCF,0x11, 0xA5,0xD6, 0x28,0xDB,0x04,0xC1,0x00,0x00}; /* 66666972-912E-11CF-A5D6-28DB04C10000 */ +static const drwav_uint8 drwavGUID_W64_WAVE[16] = {0x77,0x61,0x76,0x65, 0xF3,0xAC, 0xD3,0x11, 0x8C,0xD1, 0x00,0xC0,0x4F,0x8E,0xDB,0x8A}; /* 65766177-ACF3-11D3-8CD1-00C04F8EDB8A */ +/*static const drwav_uint8 drwavGUID_W64_JUNK[16] = {0x6A,0x75,0x6E,0x6B, 0xF3,0xAC, 0xD3,0x11, 0x8C,0xD1, 0x00,0xC0,0x4F,0x8E,0xDB,0x8A};*/ /* 6B6E756A-ACF3-11D3-8CD1-00C04F8EDB8A */ +static const drwav_uint8 drwavGUID_W64_FMT [16] = {0x66,0x6D,0x74,0x20, 0xF3,0xAC, 0xD3,0x11, 0x8C,0xD1, 0x00,0xC0,0x4F,0x8E,0xDB,0x8A}; /* 20746D66-ACF3-11D3-8CD1-00C04F8EDB8A */ +static const drwav_uint8 drwavGUID_W64_FACT[16] = {0x66,0x61,0x63,0x74, 0xF3,0xAC, 0xD3,0x11, 0x8C,0xD1, 0x00,0xC0,0x4F,0x8E,0xDB,0x8A}; /* 74636166-ACF3-11D3-8CD1-00C04F8EDB8A */ +static const drwav_uint8 drwavGUID_W64_DATA[16] = {0x64,0x61,0x74,0x61, 0xF3,0xAC, 0xD3,0x11, 0x8C,0xD1, 0x00,0xC0,0x4F,0x8E,0xDB,0x8A}; /* 61746164-ACF3-11D3-8CD1-00C04F8EDB8A */ +static const drwav_uint8 drwavGUID_W64_SMPL[16] = {0x73,0x6D,0x70,0x6C, 0xF3,0xAC, 0xD3,0x11, 0x8C,0xD1, 0x00,0xC0,0x4F,0x8E,0xDB,0x8A}; /* 6C706D73-ACF3-11D3-8CD1-00C04F8EDB8A */ + +static DRWAV_INLINE drwav_bool32 drwav__guid_equal(const drwav_uint8 a[16], const drwav_uint8 b[16]) +{ + int i; + for (i = 0; i < 16; i += 1) { + if (a[i] != b[i]) { + return DRWAV_FALSE; + } + } + + return DRWAV_TRUE; +} + +static DRWAV_INLINE drwav_bool32 drwav__fourcc_equal(const drwav_uint8* a, const char* b) +{ + return + a[0] == b[0] && + a[1] == b[1] && + a[2] == b[2] && + a[3] == b[3]; +} + + + +static DRWAV_INLINE int drwav__is_little_endian(void) +{ +#if defined(DRWAV_X86) || defined(DRWAV_X64) + return DRWAV_TRUE; +#elif defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && __BYTE_ORDER == __LITTLE_ENDIAN + return DRWAV_TRUE; +#else + int n = 1; + return (*(char*)&n) == 1; +#endif +} + +static DRWAV_INLINE drwav_uint16 drwav__bytes_to_u16(const drwav_uint8* data) +{ + return (data[0] << 0) | (data[1] << 8); +} + +static DRWAV_INLINE drwav_int16 drwav__bytes_to_s16(const drwav_uint8* data) +{ + return (short)drwav__bytes_to_u16(data); +} + +static DRWAV_INLINE drwav_uint32 drwav__bytes_to_u32(const drwav_uint8* data) +{ + return (data[0] << 0) | (data[1] << 8) | (data[2] << 16) | (data[3] << 24); +} + +static DRWAV_INLINE drwav_int32 drwav__bytes_to_s32(const drwav_uint8* data) +{ + return (drwav_int32)drwav__bytes_to_u32(data); +} + +static DRWAV_INLINE drwav_uint64 drwav__bytes_to_u64(const drwav_uint8* data) +{ + return + ((drwav_uint64)data[0] << 0) | ((drwav_uint64)data[1] << 8) | ((drwav_uint64)data[2] << 16) | ((drwav_uint64)data[3] << 24) | + ((drwav_uint64)data[4] << 32) | ((drwav_uint64)data[5] << 40) | ((drwav_uint64)data[6] << 48) | ((drwav_uint64)data[7] << 56); +} + +static DRWAV_INLINE drwav_int64 drwav__bytes_to_s64(const drwav_uint8* data) +{ + return (drwav_int64)drwav__bytes_to_u64(data); +} + +static DRWAV_INLINE void drwav__bytes_to_guid(const drwav_uint8* data, drwav_uint8* guid) +{ + int i; + for (i = 0; i < 16; ++i) { + guid[i] = data[i]; + } +} + + +static DRWAV_INLINE drwav_uint16 drwav__bswap16(drwav_uint16 n) +{ +#ifdef DRWAV_HAS_BYTESWAP16_INTRINSIC + #if defined(_MSC_VER) + return _byteswap_ushort(n); + #elif defined(__GNUC__) || defined(__clang__) + return __builtin_bswap16(n); + #else + #error "This compiler does not support the byte swap intrinsic." + #endif +#else + return ((n & 0xFF00) >> 8) | + ((n & 0x00FF) << 8); +#endif +} + +static DRWAV_INLINE drwav_uint32 drwav__bswap32(drwav_uint32 n) +{ +#ifdef DRWAV_HAS_BYTESWAP32_INTRINSIC + #if defined(_MSC_VER) + return _byteswap_ulong(n); + #elif defined(__GNUC__) || defined(__clang__) + #if defined(DRWAV_ARM) && (defined(__ARM_ARCH) && __ARM_ARCH >= 6) && !defined(DRWAV_64BIT) /* <-- 64-bit inline assembly has not been tested, so disabling for now. */ + /* Inline assembly optimized implementation for ARM. In my testing, GCC does not generate optimized code with __builtin_bswap32(). */ + drwav_uint32 r; + __asm__ __volatile__ ( + #if defined(DRWAV_64BIT) + "rev %w[out], %w[in]" : [out]"=r"(r) : [in]"r"(n) /* <-- This is untested. If someone in the community could test this, that would be appreciated! */ + #else + "rev %[out], %[in]" : [out]"=r"(r) : [in]"r"(n) + #endif + ); + return r; + #else + return __builtin_bswap32(n); + #endif + #else + #error "This compiler does not support the byte swap intrinsic." + #endif +#else + return ((n & 0xFF000000) >> 24) | + ((n & 0x00FF0000) >> 8) | + ((n & 0x0000FF00) << 8) | + ((n & 0x000000FF) << 24); +#endif +} + +static DRWAV_INLINE drwav_uint64 drwav__bswap64(drwav_uint64 n) +{ +#ifdef DRWAV_HAS_BYTESWAP64_INTRINSIC + #if defined(_MSC_VER) + return _byteswap_uint64(n); + #elif defined(__GNUC__) || defined(__clang__) + return __builtin_bswap64(n); + #else + #error "This compiler does not support the byte swap intrinsic." + #endif +#else + /* Weird "<< 32" bitshift is required for C89 because it doesn't support 64-bit constants. Should be optimized out by a good compiler. */ + return ((n & ((drwav_uint64)0xFF000000 << 32)) >> 56) | + ((n & ((drwav_uint64)0x00FF0000 << 32)) >> 40) | + ((n & ((drwav_uint64)0x0000FF00 << 32)) >> 24) | + ((n & ((drwav_uint64)0x000000FF << 32)) >> 8) | + ((n & ((drwav_uint64)0xFF000000 )) << 8) | + ((n & ((drwav_uint64)0x00FF0000 )) << 24) | + ((n & ((drwav_uint64)0x0000FF00 )) << 40) | + ((n & ((drwav_uint64)0x000000FF )) << 56); +#endif +} + + +static DRWAV_INLINE drwav_int16 drwav__bswap_s16(drwav_int16 n) +{ + return (drwav_int16)drwav__bswap16((drwav_uint16)n); +} + +static DRWAV_INLINE void drwav__bswap_samples_s16(drwav_int16* pSamples, drwav_uint64 sampleCount) +{ + drwav_uint64 iSample; + for (iSample = 0; iSample < sampleCount; iSample += 1) { + pSamples[iSample] = drwav__bswap_s16(pSamples[iSample]); + } +} + + +static DRWAV_INLINE void drwav__bswap_s24(drwav_uint8* p) +{ + drwav_uint8 t; + t = p[0]; + p[0] = p[2]; + p[2] = t; +} + +static DRWAV_INLINE void drwav__bswap_samples_s24(drwav_uint8* pSamples, drwav_uint64 sampleCount) +{ + drwav_uint64 iSample; + for (iSample = 0; iSample < sampleCount; iSample += 1) { + drwav_uint8* pSample = pSamples + (iSample*3); + drwav__bswap_s24(pSample); + } +} + + +static DRWAV_INLINE drwav_int32 drwav__bswap_s32(drwav_int32 n) +{ + return (drwav_int32)drwav__bswap32((drwav_uint32)n); +} + +static DRWAV_INLINE void drwav__bswap_samples_s32(drwav_int32* pSamples, drwav_uint64 sampleCount) +{ + drwav_uint64 iSample; + for (iSample = 0; iSample < sampleCount; iSample += 1) { + pSamples[iSample] = drwav__bswap_s32(pSamples[iSample]); + } +} + + +static DRWAV_INLINE float drwav__bswap_f32(float n) +{ + union { + drwav_uint32 i; + float f; + } x; + x.f = n; + x.i = drwav__bswap32(x.i); + + return x.f; +} + +static DRWAV_INLINE void drwav__bswap_samples_f32(float* pSamples, drwav_uint64 sampleCount) +{ + drwav_uint64 iSample; + for (iSample = 0; iSample < sampleCount; iSample += 1) { + pSamples[iSample] = drwav__bswap_f32(pSamples[iSample]); + } +} + + +static DRWAV_INLINE double drwav__bswap_f64(double n) +{ + union { + drwav_uint64 i; + double f; + } x; + x.f = n; + x.i = drwav__bswap64(x.i); + + return x.f; +} + +static DRWAV_INLINE void drwav__bswap_samples_f64(double* pSamples, drwav_uint64 sampleCount) +{ + drwav_uint64 iSample; + for (iSample = 0; iSample < sampleCount; iSample += 1) { + pSamples[iSample] = drwav__bswap_f64(pSamples[iSample]); + } +} + + +static DRWAV_INLINE void drwav__bswap_samples_pcm(void* pSamples, drwav_uint64 sampleCount, drwav_uint32 bytesPerSample) +{ + /* Assumes integer PCM. Floating point PCM is done in drwav__bswap_samples_ieee(). */ + switch (bytesPerSample) + { + case 2: /* s16, s12 (loosely packed) */ + { + drwav__bswap_samples_s16((drwav_int16*)pSamples, sampleCount); + } break; + case 3: /* s24 */ + { + drwav__bswap_samples_s24((drwav_uint8*)pSamples, sampleCount); + } break; + case 4: /* s32 */ + { + drwav__bswap_samples_s32((drwav_int32*)pSamples, sampleCount); + } break; + default: + { + /* Unsupported format. */ + DRWAV_ASSERT(DRWAV_FALSE); + } break; + } +} + +static DRWAV_INLINE void drwav__bswap_samples_ieee(void* pSamples, drwav_uint64 sampleCount, drwav_uint32 bytesPerSample) +{ + switch (bytesPerSample) + { + #if 0 /* Contributions welcome for f16 support. */ + case 2: /* f16 */ + { + drwav__bswap_samples_f16((drwav_float16*)pSamples, sampleCount); + } break; + #endif + case 4: /* f32 */ + { + drwav__bswap_samples_f32((float*)pSamples, sampleCount); + } break; + case 8: /* f64 */ + { + drwav__bswap_samples_f64((double*)pSamples, sampleCount); + } break; + default: + { + /* Unsupported format. */ + DRWAV_ASSERT(DRWAV_FALSE); + } break; + } +} + +static DRWAV_INLINE void drwav__bswap_samples(void* pSamples, drwav_uint64 sampleCount, drwav_uint32 bytesPerSample, drwav_uint16 format) +{ + switch (format) + { + case DR_WAVE_FORMAT_PCM: + { + drwav__bswap_samples_pcm(pSamples, sampleCount, bytesPerSample); + } break; + + case DR_WAVE_FORMAT_IEEE_FLOAT: + { + drwav__bswap_samples_ieee(pSamples, sampleCount, bytesPerSample); + } break; + + case DR_WAVE_FORMAT_ALAW: + case DR_WAVE_FORMAT_MULAW: + { + drwav__bswap_samples_s16((drwav_int16*)pSamples, sampleCount); + } break; + + case DR_WAVE_FORMAT_ADPCM: + case DR_WAVE_FORMAT_DVI_ADPCM: + default: + { + /* Unsupported format. */ + DRWAV_ASSERT(DRWAV_FALSE); + } break; + } +} + + +static void* drwav__malloc_default(size_t sz, void* pUserData) +{ + (void)pUserData; + return DRWAV_MALLOC(sz); +} + +static void* drwav__realloc_default(void* p, size_t sz, void* pUserData) +{ + (void)pUserData; + return DRWAV_REALLOC(p, sz); +} + +static void drwav__free_default(void* p, void* pUserData) +{ + (void)pUserData; + DRWAV_FREE(p); +} + + +static void* drwav__malloc_from_callbacks(size_t sz, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + if (pAllocationCallbacks == NULL) { + return NULL; + } + + if (pAllocationCallbacks->onMalloc != NULL) { + return pAllocationCallbacks->onMalloc(sz, pAllocationCallbacks->pUserData); + } + + /* Try using realloc(). */ + if (pAllocationCallbacks->onRealloc != NULL) { + return pAllocationCallbacks->onRealloc(NULL, sz, pAllocationCallbacks->pUserData); + } + + return NULL; +} + +static void* drwav__realloc_from_callbacks(void* p, size_t szNew, size_t szOld, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + if (pAllocationCallbacks == NULL) { + return NULL; + } + + if (pAllocationCallbacks->onRealloc != NULL) { + return pAllocationCallbacks->onRealloc(p, szNew, pAllocationCallbacks->pUserData); + } + + /* Try emulating realloc() in terms of malloc()/free(). */ + if (pAllocationCallbacks->onMalloc != NULL && pAllocationCallbacks->onFree != NULL) { + void* p2; + + p2 = pAllocationCallbacks->onMalloc(szNew, pAllocationCallbacks->pUserData); + if (p2 == NULL) { + return NULL; + } + + if (p != NULL) { + DRWAV_COPY_MEMORY(p2, p, szOld); + pAllocationCallbacks->onFree(p, pAllocationCallbacks->pUserData); + } + + return p2; + } + + return NULL; +} + +static void drwav__free_from_callbacks(void* p, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + if (p == NULL || pAllocationCallbacks == NULL) { + return; + } + + if (pAllocationCallbacks->onFree != NULL) { + pAllocationCallbacks->onFree(p, pAllocationCallbacks->pUserData); + } +} + + +static drwav_allocation_callbacks drwav_copy_allocation_callbacks_or_defaults(const drwav_allocation_callbacks* pAllocationCallbacks) +{ + if (pAllocationCallbacks != NULL) { + /* Copy. */ + return *pAllocationCallbacks; + } else { + /* Defaults. */ + drwav_allocation_callbacks allocationCallbacks; + allocationCallbacks.pUserData = NULL; + allocationCallbacks.onMalloc = drwav__malloc_default; + allocationCallbacks.onRealloc = drwav__realloc_default; + allocationCallbacks.onFree = drwav__free_default; + return allocationCallbacks; + } +} + + +static DRWAV_INLINE drwav_bool32 drwav__is_compressed_format_tag(drwav_uint16 formatTag) +{ + return + formatTag == DR_WAVE_FORMAT_ADPCM || + formatTag == DR_WAVE_FORMAT_DVI_ADPCM; +} + +static unsigned int drwav__chunk_padding_size_riff(drwav_uint64 chunkSize) +{ + return (unsigned int)(chunkSize % 2); +} + +static unsigned int drwav__chunk_padding_size_w64(drwav_uint64 chunkSize) +{ + return (unsigned int)(chunkSize % 8); +} + +static drwav_uint64 drwav_read_pcm_frames_s16__msadpcm(drwav* pWav, drwav_uint64 samplesToRead, drwav_int16* pBufferOut); +static drwav_uint64 drwav_read_pcm_frames_s16__ima(drwav* pWav, drwav_uint64 samplesToRead, drwav_int16* pBufferOut); +static drwav_bool32 drwav_init_write__internal(drwav* pWav, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount); + +static drwav_result drwav__read_chunk_header(drwav_read_proc onRead, void* pUserData, drwav_container container, drwav_uint64* pRunningBytesReadOut, drwav_chunk_header* pHeaderOut) +{ + if (container == drwav_container_riff || container == drwav_container_rf64) { + drwav_uint8 sizeInBytes[4]; + + if (onRead(pUserData, pHeaderOut->id.fourcc, 4) != 4) { + return DRWAV_AT_END; + } + + if (onRead(pUserData, sizeInBytes, 4) != 4) { + return DRWAV_INVALID_FILE; + } + + pHeaderOut->sizeInBytes = drwav__bytes_to_u32(sizeInBytes); + pHeaderOut->paddingSize = drwav__chunk_padding_size_riff(pHeaderOut->sizeInBytes); + *pRunningBytesReadOut += 8; + } else { + drwav_uint8 sizeInBytes[8]; + + if (onRead(pUserData, pHeaderOut->id.guid, 16) != 16) { + return DRWAV_AT_END; + } + + if (onRead(pUserData, sizeInBytes, 8) != 8) { + return DRWAV_INVALID_FILE; + } + + pHeaderOut->sizeInBytes = drwav__bytes_to_u64(sizeInBytes) - 24; /* <-- Subtract 24 because w64 includes the size of the header. */ + pHeaderOut->paddingSize = drwav__chunk_padding_size_w64(pHeaderOut->sizeInBytes); + *pRunningBytesReadOut += 24; + } + + return DRWAV_SUCCESS; +} + +static drwav_bool32 drwav__seek_forward(drwav_seek_proc onSeek, drwav_uint64 offset, void* pUserData) +{ + drwav_uint64 bytesRemainingToSeek = offset; + while (bytesRemainingToSeek > 0) { + if (bytesRemainingToSeek > 0x7FFFFFFF) { + if (!onSeek(pUserData, 0x7FFFFFFF, drwav_seek_origin_current)) { + return DRWAV_FALSE; + } + bytesRemainingToSeek -= 0x7FFFFFFF; + } else { + if (!onSeek(pUserData, (int)bytesRemainingToSeek, drwav_seek_origin_current)) { + return DRWAV_FALSE; + } + bytesRemainingToSeek = 0; + } + } + + return DRWAV_TRUE; +} + +static drwav_bool32 drwav__seek_from_start(drwav_seek_proc onSeek, drwav_uint64 offset, void* pUserData) +{ + if (offset <= 0x7FFFFFFF) { + return onSeek(pUserData, (int)offset, drwav_seek_origin_start); + } + + /* Larger than 32-bit seek. */ + if (!onSeek(pUserData, 0x7FFFFFFF, drwav_seek_origin_start)) { + return DRWAV_FALSE; + } + offset -= 0x7FFFFFFF; + + for (;;) { + if (offset <= 0x7FFFFFFF) { + return onSeek(pUserData, (int)offset, drwav_seek_origin_current); + } + + if (!onSeek(pUserData, 0x7FFFFFFF, drwav_seek_origin_current)) { + return DRWAV_FALSE; + } + offset -= 0x7FFFFFFF; + } + + /* Should never get here. */ + /*return DRWAV_TRUE; */ +} + + +static drwav_bool32 drwav__read_fmt(drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, drwav_container container, drwav_uint64* pRunningBytesReadOut, drwav_fmt* fmtOut) +{ + drwav_chunk_header header; + drwav_uint8 fmt[16]; + + if (drwav__read_chunk_header(onRead, pUserData, container, pRunningBytesReadOut, &header) != DRWAV_SUCCESS) { + return DRWAV_FALSE; + } + + + /* Skip non-fmt chunks. */ + while (((container == drwav_container_riff || container == drwav_container_rf64) && !drwav__fourcc_equal(header.id.fourcc, "fmt ")) || (container == drwav_container_w64 && !drwav__guid_equal(header.id.guid, drwavGUID_W64_FMT))) { + if (!drwav__seek_forward(onSeek, header.sizeInBytes + header.paddingSize, pUserData)) { + return DRWAV_FALSE; + } + *pRunningBytesReadOut += header.sizeInBytes + header.paddingSize; + + /* Try the next header. */ + if (drwav__read_chunk_header(onRead, pUserData, container, pRunningBytesReadOut, &header) != DRWAV_SUCCESS) { + return DRWAV_FALSE; + } + } + + + /* Validation. */ + if (container == drwav_container_riff || container == drwav_container_rf64) { + if (!drwav__fourcc_equal(header.id.fourcc, "fmt ")) { + return DRWAV_FALSE; + } + } else { + if (!drwav__guid_equal(header.id.guid, drwavGUID_W64_FMT)) { + return DRWAV_FALSE; + } + } + + + if (onRead(pUserData, fmt, sizeof(fmt)) != sizeof(fmt)) { + return DRWAV_FALSE; + } + *pRunningBytesReadOut += sizeof(fmt); + + fmtOut->formatTag = drwav__bytes_to_u16(fmt + 0); + fmtOut->channels = drwav__bytes_to_u16(fmt + 2); + fmtOut->sampleRate = drwav__bytes_to_u32(fmt + 4); + fmtOut->avgBytesPerSec = drwav__bytes_to_u32(fmt + 8); + fmtOut->blockAlign = drwav__bytes_to_u16(fmt + 12); + fmtOut->bitsPerSample = drwav__bytes_to_u16(fmt + 14); + + fmtOut->extendedSize = 0; + fmtOut->validBitsPerSample = 0; + fmtOut->channelMask = 0; + memset(fmtOut->subFormat, 0, sizeof(fmtOut->subFormat)); + + if (header.sizeInBytes > 16) { + drwav_uint8 fmt_cbSize[2]; + int bytesReadSoFar = 0; + + if (onRead(pUserData, fmt_cbSize, sizeof(fmt_cbSize)) != sizeof(fmt_cbSize)) { + return DRWAV_FALSE; /* Expecting more data. */ + } + *pRunningBytesReadOut += sizeof(fmt_cbSize); + + bytesReadSoFar = 18; + + fmtOut->extendedSize = drwav__bytes_to_u16(fmt_cbSize); + if (fmtOut->extendedSize > 0) { + /* Simple validation. */ + if (fmtOut->formatTag == DR_WAVE_FORMAT_EXTENSIBLE) { + if (fmtOut->extendedSize != 22) { + return DRWAV_FALSE; + } + } + + if (fmtOut->formatTag == DR_WAVE_FORMAT_EXTENSIBLE) { + drwav_uint8 fmtext[22]; + if (onRead(pUserData, fmtext, fmtOut->extendedSize) != fmtOut->extendedSize) { + return DRWAV_FALSE; /* Expecting more data. */ + } + + fmtOut->validBitsPerSample = drwav__bytes_to_u16(fmtext + 0); + fmtOut->channelMask = drwav__bytes_to_u32(fmtext + 2); + drwav__bytes_to_guid(fmtext + 6, fmtOut->subFormat); + } else { + if (!onSeek(pUserData, fmtOut->extendedSize, drwav_seek_origin_current)) { + return DRWAV_FALSE; + } + } + *pRunningBytesReadOut += fmtOut->extendedSize; + + bytesReadSoFar += fmtOut->extendedSize; + } + + /* Seek past any leftover bytes. For w64 the leftover will be defined based on the chunk size. */ + if (!onSeek(pUserData, (int)(header.sizeInBytes - bytesReadSoFar), drwav_seek_origin_current)) { + return DRWAV_FALSE; + } + *pRunningBytesReadOut += (header.sizeInBytes - bytesReadSoFar); + } + + if (header.paddingSize > 0) { + if (!onSeek(pUserData, header.paddingSize, drwav_seek_origin_current)) { + return DRWAV_FALSE; + } + *pRunningBytesReadOut += header.paddingSize; + } + + return DRWAV_TRUE; +} + + +static size_t drwav__on_read(drwav_read_proc onRead, void* pUserData, void* pBufferOut, size_t bytesToRead, drwav_uint64* pCursor) +{ + size_t bytesRead; + + DRWAV_ASSERT(onRead != NULL); + DRWAV_ASSERT(pCursor != NULL); + + bytesRead = onRead(pUserData, pBufferOut, bytesToRead); + *pCursor += bytesRead; + return bytesRead; +} + +#if 0 +static drwav_bool32 drwav__on_seek(drwav_seek_proc onSeek, void* pUserData, int offset, drwav_seek_origin origin, drwav_uint64* pCursor) +{ + DRWAV_ASSERT(onSeek != NULL); + DRWAV_ASSERT(pCursor != NULL); + + if (!onSeek(pUserData, offset, origin)) { + return DRWAV_FALSE; + } + + if (origin == drwav_seek_origin_start) { + *pCursor = offset; + } else { + *pCursor += offset; + } + + return DRWAV_TRUE; +} +#endif + + + +static drwav_uint32 drwav_get_bytes_per_pcm_frame(drwav* pWav) +{ + /* + The bytes per frame is a bit ambiguous. It can be either be based on the bits per sample, or the block align. The way I'm doing it here + is that if the bits per sample is a multiple of 8, use floor(bitsPerSample*channels/8), otherwise fall back to the block align. + */ + if ((pWav->bitsPerSample & 0x7) == 0) { + /* Bits per sample is a multiple of 8. */ + return (pWav->bitsPerSample * pWav->fmt.channels) >> 3; + } else { + return pWav->fmt.blockAlign; + } +} + +DRWAV_API drwav_uint16 drwav_fmt_get_format(const drwav_fmt* pFMT) +{ + if (pFMT == NULL) { + return 0; + } + + if (pFMT->formatTag != DR_WAVE_FORMAT_EXTENSIBLE) { + return pFMT->formatTag; + } else { + return drwav__bytes_to_u16(pFMT->subFormat); /* Only the first two bytes are required. */ + } +} + +static drwav_bool32 drwav_preinit(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc onSeek, void* pReadSeekUserData, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + if (pWav == NULL || onRead == NULL || onSeek == NULL) { + return DRWAV_FALSE; + } + + DRWAV_ZERO_MEMORY(pWav, sizeof(*pWav)); + pWav->onRead = onRead; + pWav->onSeek = onSeek; + pWav->pUserData = pReadSeekUserData; + pWav->allocationCallbacks = drwav_copy_allocation_callbacks_or_defaults(pAllocationCallbacks); + + if (pWav->allocationCallbacks.onFree == NULL || (pWav->allocationCallbacks.onMalloc == NULL && pWav->allocationCallbacks.onRealloc == NULL)) { + return DRWAV_FALSE; /* Invalid allocation callbacks. */ + } + + return DRWAV_TRUE; +} + +static drwav_bool32 drwav_init__internal(drwav* pWav, drwav_chunk_proc onChunk, void* pChunkUserData, drwav_uint32 flags) +{ + /* This function assumes drwav_preinit() has been called beforehand. */ + + drwav_uint64 cursor; /* <-- Keeps track of the byte position so we can seek to specific locations. */ + drwav_bool32 sequential; + drwav_uint8 riff[4]; + drwav_fmt fmt; + unsigned short translatedFormatTag; + drwav_bool32 foundDataChunk; + drwav_uint64 dataChunkSize = 0; /* <-- Important! Don't explicitly set this to 0 anywhere else. Calculation of the size of the data chunk is performed in different paths depending on the container. */ + drwav_uint64 sampleCountFromFactChunk = 0; /* Same as dataChunkSize - make sure this is the only place this is initialized to 0. */ + drwav_uint64 chunkSize; + + cursor = 0; + sequential = (flags & DRWAV_SEQUENTIAL) != 0; + + /* The first 4 bytes should be the RIFF identifier. */ + if (drwav__on_read(pWav->onRead, pWav->pUserData, riff, sizeof(riff), &cursor) != sizeof(riff)) { + return DRWAV_FALSE; + } + + /* + The first 4 bytes can be used to identify the container. For RIFF files it will start with "RIFF" and for + w64 it will start with "riff". + */ + if (drwav__fourcc_equal(riff, "RIFF")) { + pWav->container = drwav_container_riff; + } else if (drwav__fourcc_equal(riff, "riff")) { + int i; + drwav_uint8 riff2[12]; + + pWav->container = drwav_container_w64; + + /* Check the rest of the GUID for validity. */ + if (drwav__on_read(pWav->onRead, pWav->pUserData, riff2, sizeof(riff2), &cursor) != sizeof(riff2)) { + return DRWAV_FALSE; + } + + for (i = 0; i < 12; ++i) { + if (riff2[i] != drwavGUID_W64_RIFF[i+4]) { + return DRWAV_FALSE; + } + } + } else if (drwav__fourcc_equal(riff, "RF64")) { + pWav->container = drwav_container_rf64; + } else { + return DRWAV_FALSE; /* Unknown or unsupported container. */ + } + + + if (pWav->container == drwav_container_riff || pWav->container == drwav_container_rf64) { + drwav_uint8 chunkSizeBytes[4]; + drwav_uint8 wave[4]; + + /* RIFF/WAVE */ + if (drwav__on_read(pWav->onRead, pWav->pUserData, chunkSizeBytes, sizeof(chunkSizeBytes), &cursor) != sizeof(chunkSizeBytes)) { + return DRWAV_FALSE; + } + + if (pWav->container == drwav_container_riff) { + if (drwav__bytes_to_u32(chunkSizeBytes) < 36) { + return DRWAV_FALSE; /* Chunk size should always be at least 36 bytes. */ + } + } else { + if (drwav__bytes_to_u32(chunkSizeBytes) != 0xFFFFFFFF) { + return DRWAV_FALSE; /* Chunk size should always be set to -1/0xFFFFFFFF for RF64. The actual size is retrieved later. */ + } + } + + if (drwav__on_read(pWav->onRead, pWav->pUserData, wave, sizeof(wave), &cursor) != sizeof(wave)) { + return DRWAV_FALSE; + } + + if (!drwav__fourcc_equal(wave, "WAVE")) { + return DRWAV_FALSE; /* Expecting "WAVE". */ + } + } else { + drwav_uint8 chunkSizeBytes[8]; + drwav_uint8 wave[16]; + + /* W64 */ + if (drwav__on_read(pWav->onRead, pWav->pUserData, chunkSizeBytes, sizeof(chunkSizeBytes), &cursor) != sizeof(chunkSizeBytes)) { + return DRWAV_FALSE; + } + + if (drwav__bytes_to_u64(chunkSizeBytes) < 80) { + return DRWAV_FALSE; + } + + if (drwav__on_read(pWav->onRead, pWav->pUserData, wave, sizeof(wave), &cursor) != sizeof(wave)) { + return DRWAV_FALSE; + } + + if (!drwav__guid_equal(wave, drwavGUID_W64_WAVE)) { + return DRWAV_FALSE; + } + } + + + /* For RF64, the "ds64" chunk must come next, before the "fmt " chunk. */ + if (pWav->container == drwav_container_rf64) { + drwav_uint8 sizeBytes[8]; + drwav_uint64 bytesRemainingInChunk; + drwav_chunk_header header; + drwav_result result = drwav__read_chunk_header(pWav->onRead, pWav->pUserData, pWav->container, &cursor, &header); + if (result != DRWAV_SUCCESS) { + return DRWAV_FALSE; + } + + if (!drwav__fourcc_equal(header.id.fourcc, "ds64")) { + return DRWAV_FALSE; /* Expecting "ds64". */ + } + + bytesRemainingInChunk = header.sizeInBytes + header.paddingSize; + + /* We don't care about the size of the RIFF chunk - skip it. */ + if (!drwav__seek_forward(pWav->onSeek, 8, pWav->pUserData)) { + return DRWAV_FALSE; + } + bytesRemainingInChunk -= 8; + cursor += 8; + + + /* Next 8 bytes is the size of the "data" chunk. */ + if (drwav__on_read(pWav->onRead, pWav->pUserData, sizeBytes, sizeof(sizeBytes), &cursor) != sizeof(sizeBytes)) { + return DRWAV_FALSE; + } + bytesRemainingInChunk -= 8; + dataChunkSize = drwav__bytes_to_u64(sizeBytes); + + + /* Next 8 bytes is the same count which we would usually derived from the FACT chunk if it was available. */ + if (drwav__on_read(pWav->onRead, pWav->pUserData, sizeBytes, sizeof(sizeBytes), &cursor) != sizeof(sizeBytes)) { + return DRWAV_FALSE; + } + bytesRemainingInChunk -= 8; + sampleCountFromFactChunk = drwav__bytes_to_u64(sizeBytes); + + + /* Skip over everything else. */ + if (!drwav__seek_forward(pWav->onSeek, bytesRemainingInChunk, pWav->pUserData)) { + return DRWAV_FALSE; + } + cursor += bytesRemainingInChunk; + } + + + /* The next bytes should be the "fmt " chunk. */ + if (!drwav__read_fmt(pWav->onRead, pWav->onSeek, pWav->pUserData, pWav->container, &cursor, &fmt)) { + return DRWAV_FALSE; /* Failed to read the "fmt " chunk. */ + } + + /* Basic validation. */ + if ((fmt.sampleRate == 0 || fmt.sampleRate > DRWAV_MAX_SAMPLE_RATE) || + (fmt.channels == 0 || fmt.channels > DRWAV_MAX_CHANNELS) || + (fmt.bitsPerSample == 0 || fmt.bitsPerSample > DRWAV_MAX_BITS_PER_SAMPLE) || + fmt.blockAlign == 0) { + return DRWAV_FALSE; /* Probably an invalid WAV file. */ + } + + + /* Translate the internal format. */ + translatedFormatTag = fmt.formatTag; + if (translatedFormatTag == DR_WAVE_FORMAT_EXTENSIBLE) { + translatedFormatTag = drwav__bytes_to_u16(fmt.subFormat + 0); + } + + + /* + We need to enumerate over each chunk for two reasons: + 1) The "data" chunk may not be the next one + 2) We may want to report each chunk back to the client + + In order to correctly report each chunk back to the client we will need to keep looping until the end of the file. + */ + foundDataChunk = DRWAV_FALSE; + + /* The next chunk we care about is the "data" chunk. This is not necessarily the next chunk so we'll need to loop. */ + for (;;) + { + drwav_chunk_header header; + drwav_result result = drwav__read_chunk_header(pWav->onRead, pWav->pUserData, pWav->container, &cursor, &header); + if (result != DRWAV_SUCCESS) { + if (!foundDataChunk) { + return DRWAV_FALSE; + } else { + break; /* Probably at the end of the file. Get out of the loop. */ + } + } + + /* Tell the client about this chunk. */ + if (!sequential && onChunk != NULL) { + drwav_uint64 callbackBytesRead = onChunk(pChunkUserData, pWav->onRead, pWav->onSeek, pWav->pUserData, &header, pWav->container, &fmt); + + /* + dr_wav may need to read the contents of the chunk, so we now need to seek back to the position before + we called the callback. + */ + if (callbackBytesRead > 0) { + if (!drwav__seek_from_start(pWav->onSeek, cursor, pWav->pUserData)) { + return DRWAV_FALSE; + } + } + } + + + if (!foundDataChunk) { + pWav->dataChunkDataPos = cursor; + } + + chunkSize = header.sizeInBytes; + if (pWav->container == drwav_container_riff || pWav->container == drwav_container_rf64) { + if (drwav__fourcc_equal(header.id.fourcc, "data")) { + foundDataChunk = DRWAV_TRUE; + if (pWav->container != drwav_container_rf64) { /* The data chunk size for RF64 will always be set to 0xFFFFFFFF here. It was set to it's true value earlier. */ + dataChunkSize = chunkSize; + } + } + } else { + if (drwav__guid_equal(header.id.guid, drwavGUID_W64_DATA)) { + foundDataChunk = DRWAV_TRUE; + dataChunkSize = chunkSize; + } + } + + /* + If at this point we have found the data chunk and we're running in sequential mode, we need to break out of this loop. The reason for + this is that we would otherwise require a backwards seek which sequential mode forbids. + */ + if (foundDataChunk && sequential) { + break; + } + + /* Optional. Get the total sample count from the FACT chunk. This is useful for compressed formats. */ + if (pWav->container == drwav_container_riff) { + if (drwav__fourcc_equal(header.id.fourcc, "fact")) { + drwav_uint32 sampleCount; + if (drwav__on_read(pWav->onRead, pWav->pUserData, &sampleCount, 4, &cursor) != 4) { + return DRWAV_FALSE; + } + chunkSize -= 4; + + if (!foundDataChunk) { + pWav->dataChunkDataPos = cursor; + } + + /* + The sample count in the "fact" chunk is either unreliable, or I'm not understanding it properly. For now I am only enabling this + for Microsoft ADPCM formats. + */ + if (pWav->translatedFormatTag == DR_WAVE_FORMAT_ADPCM) { + sampleCountFromFactChunk = sampleCount; + } else { + sampleCountFromFactChunk = 0; + } + } + } else if (pWav->container == drwav_container_w64) { + if (drwav__guid_equal(header.id.guid, drwavGUID_W64_FACT)) { + if (drwav__on_read(pWav->onRead, pWav->pUserData, &sampleCountFromFactChunk, 8, &cursor) != 8) { + return DRWAV_FALSE; + } + chunkSize -= 8; + + if (!foundDataChunk) { + pWav->dataChunkDataPos = cursor; + } + } + } else if (pWav->container == drwav_container_rf64) { + /* We retrieved the sample count from the ds64 chunk earlier so no need to do that here. */ + } + + /* "smpl" chunk. */ + if (pWav->container == drwav_container_riff || pWav->container == drwav_container_rf64) { + if (drwav__fourcc_equal(header.id.fourcc, "smpl")) { + drwav_uint8 smplHeaderData[36]; /* 36 = size of the smpl header section, not including the loop data. */ + if (chunkSize >= sizeof(smplHeaderData)) { + drwav_uint64 bytesJustRead = drwav__on_read(pWav->onRead, pWav->pUserData, smplHeaderData, sizeof(smplHeaderData), &cursor); + chunkSize -= bytesJustRead; + + if (bytesJustRead == sizeof(smplHeaderData)) { + drwav_uint32 iLoop; + + pWav->smpl.manufacturer = drwav__bytes_to_u32(smplHeaderData+0); + pWav->smpl.product = drwav__bytes_to_u32(smplHeaderData+4); + pWav->smpl.samplePeriod = drwav__bytes_to_u32(smplHeaderData+8); + pWav->smpl.midiUnityNotes = drwav__bytes_to_u32(smplHeaderData+12); + pWav->smpl.midiPitchFraction = drwav__bytes_to_u32(smplHeaderData+16); + pWav->smpl.smpteFormat = drwav__bytes_to_u32(smplHeaderData+20); + pWav->smpl.smpteOffset = drwav__bytes_to_u32(smplHeaderData+24); + pWav->smpl.numSampleLoops = drwav__bytes_to_u32(smplHeaderData+28); + pWav->smpl.samplerData = drwav__bytes_to_u32(smplHeaderData+32); + + for (iLoop = 0; iLoop < pWav->smpl.numSampleLoops && iLoop < drwav_countof(pWav->smpl.loops); ++iLoop) { + drwav_uint8 smplLoopData[24]; /* 24 = size of a loop section in the smpl chunk. */ + bytesJustRead = drwav__on_read(pWav->onRead, pWav->pUserData, smplLoopData, sizeof(smplLoopData), &cursor); + chunkSize -= bytesJustRead; + + if (bytesJustRead == sizeof(smplLoopData)) { + pWav->smpl.loops[iLoop].cuePointId = drwav__bytes_to_u32(smplLoopData+0); + pWav->smpl.loops[iLoop].type = drwav__bytes_to_u32(smplLoopData+4); + pWav->smpl.loops[iLoop].start = drwav__bytes_to_u32(smplLoopData+8); + pWav->smpl.loops[iLoop].end = drwav__bytes_to_u32(smplLoopData+12); + pWav->smpl.loops[iLoop].fraction = drwav__bytes_to_u32(smplLoopData+16); + pWav->smpl.loops[iLoop].playCount = drwav__bytes_to_u32(smplLoopData+20); + } else { + break; /* Break from the smpl loop for loop. */ + } + } + } + } else { + /* Looks like invalid data. Ignore the chunk. */ + } + } + } else { + if (drwav__guid_equal(header.id.guid, drwavGUID_W64_SMPL)) { + /* + This path will be hit when a W64 WAV file contains a smpl chunk. I don't have a sample file to test this path, so a contribution + is welcome to add support for this. + */ + } + } + + /* Make sure we seek past the padding. */ + chunkSize += header.paddingSize; + if (!drwav__seek_forward(pWav->onSeek, chunkSize, pWav->pUserData)) { + break; + } + cursor += chunkSize; + + if (!foundDataChunk) { + pWav->dataChunkDataPos = cursor; + } + } + + /* If we haven't found a data chunk, return an error. */ + if (!foundDataChunk) { + return DRWAV_FALSE; + } + + /* We may have moved passed the data chunk. If so we need to move back. If running in sequential mode we can assume we are already sitting on the data chunk. */ + if (!sequential) { + if (!drwav__seek_from_start(pWav->onSeek, pWav->dataChunkDataPos, pWav->pUserData)) { + return DRWAV_FALSE; + } + cursor = pWav->dataChunkDataPos; + } + + + /* At this point we should be sitting on the first byte of the raw audio data. */ + + pWav->fmt = fmt; + pWav->sampleRate = fmt.sampleRate; + pWav->channels = fmt.channels; + pWav->bitsPerSample = fmt.bitsPerSample; + pWav->bytesRemaining = dataChunkSize; + pWav->translatedFormatTag = translatedFormatTag; + pWav->dataChunkDataSize = dataChunkSize; + + if (sampleCountFromFactChunk != 0) { + pWav->totalPCMFrameCount = sampleCountFromFactChunk; + } else { + pWav->totalPCMFrameCount = dataChunkSize / drwav_get_bytes_per_pcm_frame(pWav); + + if (pWav->translatedFormatTag == DR_WAVE_FORMAT_ADPCM) { + drwav_uint64 totalBlockHeaderSizeInBytes; + drwav_uint64 blockCount = dataChunkSize / fmt.blockAlign; + + /* Make sure any trailing partial block is accounted for. */ + if ((blockCount * fmt.blockAlign) < dataChunkSize) { + blockCount += 1; + } + + /* We decode two samples per byte. There will be blockCount headers in the data chunk. This is enough to know how to calculate the total PCM frame count. */ + totalBlockHeaderSizeInBytes = blockCount * (6*fmt.channels); + pWav->totalPCMFrameCount = ((dataChunkSize - totalBlockHeaderSizeInBytes) * 2) / fmt.channels; + } + if (pWav->translatedFormatTag == DR_WAVE_FORMAT_DVI_ADPCM) { + drwav_uint64 totalBlockHeaderSizeInBytes; + drwav_uint64 blockCount = dataChunkSize / fmt.blockAlign; + + /* Make sure any trailing partial block is accounted for. */ + if ((blockCount * fmt.blockAlign) < dataChunkSize) { + blockCount += 1; + } + + /* We decode two samples per byte. There will be blockCount headers in the data chunk. This is enough to know how to calculate the total PCM frame count. */ + totalBlockHeaderSizeInBytes = blockCount * (4*fmt.channels); + pWav->totalPCMFrameCount = ((dataChunkSize - totalBlockHeaderSizeInBytes) * 2) / fmt.channels; + + /* The header includes a decoded sample for each channel which acts as the initial predictor sample. */ + pWav->totalPCMFrameCount += blockCount; + } + } + + /* Some formats only support a certain number of channels. */ + if (pWav->translatedFormatTag == DR_WAVE_FORMAT_ADPCM || pWav->translatedFormatTag == DR_WAVE_FORMAT_DVI_ADPCM) { + if (pWav->channels > 2) { + return DRWAV_FALSE; + } + } + +#ifdef DR_WAV_LIBSNDFILE_COMPAT + /* + I use libsndfile as a benchmark for testing, however in the version I'm using (from the Windows installer on the libsndfile website), + it appears the total sample count libsndfile uses for MS-ADPCM is incorrect. It would seem they are computing the total sample count + from the number of blocks, however this results in the inclusion of extra silent samples at the end of the last block. The correct + way to know the total sample count is to inspect the "fact" chunk, which should always be present for compressed formats, and should + always include the sample count. This little block of code below is only used to emulate the libsndfile logic so I can properly run my + correctness tests against libsndfile, and is disabled by default. + */ + if (pWav->translatedFormatTag == DR_WAVE_FORMAT_ADPCM) { + drwav_uint64 blockCount = dataChunkSize / fmt.blockAlign; + pWav->totalPCMFrameCount = (((blockCount * (fmt.blockAlign - (6*pWav->channels))) * 2)) / fmt.channels; /* x2 because two samples per byte. */ + } + if (pWav->translatedFormatTag == DR_WAVE_FORMAT_DVI_ADPCM) { + drwav_uint64 blockCount = dataChunkSize / fmt.blockAlign; + pWav->totalPCMFrameCount = (((blockCount * (fmt.blockAlign - (4*pWav->channels))) * 2) + (blockCount * pWav->channels)) / fmt.channels; + } +#endif + + return DRWAV_TRUE; +} + +DRWAV_API drwav_bool32 drwav_init(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + return drwav_init_ex(pWav, onRead, onSeek, NULL, pUserData, NULL, 0, pAllocationCallbacks); +} + +DRWAV_API drwav_bool32 drwav_init_ex(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc onSeek, drwav_chunk_proc onChunk, void* pReadSeekUserData, void* pChunkUserData, drwav_uint32 flags, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + if (!drwav_preinit(pWav, onRead, onSeek, pReadSeekUserData, pAllocationCallbacks)) { + return DRWAV_FALSE; + } + + return drwav_init__internal(pWav, onChunk, pChunkUserData, flags); +} + + +static drwav_uint32 drwav__riff_chunk_size_riff(drwav_uint64 dataChunkSize) +{ + drwav_uint64 chunkSize = 4 + 24 + dataChunkSize + drwav__chunk_padding_size_riff(dataChunkSize); /* 4 = "WAVE". 24 = "fmt " chunk. */ + if (chunkSize > 0xFFFFFFFFUL) { + chunkSize = 0xFFFFFFFFUL; + } + + return (drwav_uint32)chunkSize; /* Safe cast due to the clamp above. */ +} + +static drwav_uint32 drwav__data_chunk_size_riff(drwav_uint64 dataChunkSize) +{ + if (dataChunkSize <= 0xFFFFFFFFUL) { + return (drwav_uint32)dataChunkSize; + } else { + return 0xFFFFFFFFUL; + } +} + +static drwav_uint64 drwav__riff_chunk_size_w64(drwav_uint64 dataChunkSize) +{ + drwav_uint64 dataSubchunkPaddingSize = drwav__chunk_padding_size_w64(dataChunkSize); + + return 80 + 24 + dataChunkSize + dataSubchunkPaddingSize; /* +24 because W64 includes the size of the GUID and size fields. */ +} + +static drwav_uint64 drwav__data_chunk_size_w64(drwav_uint64 dataChunkSize) +{ + return 24 + dataChunkSize; /* +24 because W64 includes the size of the GUID and size fields. */ +} + +static drwav_uint64 drwav__riff_chunk_size_rf64(drwav_uint64 dataChunkSize) +{ + drwav_uint64 chunkSize = 4 + 36 + 24 + dataChunkSize + drwav__chunk_padding_size_riff(dataChunkSize); /* 4 = "WAVE". 36 = "ds64" chunk. 24 = "fmt " chunk. */ + if (chunkSize > 0xFFFFFFFFUL) { + chunkSize = 0xFFFFFFFFUL; + } + + return chunkSize; +} + +static drwav_uint64 drwav__data_chunk_size_rf64(drwav_uint64 dataChunkSize) +{ + return dataChunkSize; +} + + +static size_t drwav__write(drwav* pWav, const void* pData, size_t dataSize) +{ + DRWAV_ASSERT(pWav != NULL); + DRWAV_ASSERT(pWav->onWrite != NULL); + + /* Generic write. Assumes no byte reordering required. */ + return pWav->onWrite(pWav->pUserData, pData, dataSize); +} + +static size_t drwav__write_u16ne_to_le(drwav* pWav, drwav_uint16 value) +{ + DRWAV_ASSERT(pWav != NULL); + DRWAV_ASSERT(pWav->onWrite != NULL); + + if (!drwav__is_little_endian()) { + value = drwav__bswap16(value); + } + + return drwav__write(pWav, &value, 2); +} + +static size_t drwav__write_u32ne_to_le(drwav* pWav, drwav_uint32 value) +{ + DRWAV_ASSERT(pWav != NULL); + DRWAV_ASSERT(pWav->onWrite != NULL); + + if (!drwav__is_little_endian()) { + value = drwav__bswap32(value); + } + + return drwav__write(pWav, &value, 4); +} + +static size_t drwav__write_u64ne_to_le(drwav* pWav, drwav_uint64 value) +{ + DRWAV_ASSERT(pWav != NULL); + DRWAV_ASSERT(pWav->onWrite != NULL); + + if (!drwav__is_little_endian()) { + value = drwav__bswap64(value); + } + + return drwav__write(pWav, &value, 8); +} + + +static drwav_bool32 drwav_preinit_write(drwav* pWav, const drwav_data_format* pFormat, drwav_bool32 isSequential, drwav_write_proc onWrite, drwav_seek_proc onSeek, void* pUserData, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + if (pWav == NULL || onWrite == NULL) { + return DRWAV_FALSE; + } + + if (!isSequential && onSeek == NULL) { + return DRWAV_FALSE; /* <-- onSeek is required when in non-sequential mode. */ + } + + /* Not currently supporting compressed formats. Will need to add support for the "fact" chunk before we enable this. */ + if (pFormat->format == DR_WAVE_FORMAT_EXTENSIBLE) { + return DRWAV_FALSE; + } + if (pFormat->format == DR_WAVE_FORMAT_ADPCM || pFormat->format == DR_WAVE_FORMAT_DVI_ADPCM) { + return DRWAV_FALSE; + } + + DRWAV_ZERO_MEMORY(pWav, sizeof(*pWav)); + pWav->onWrite = onWrite; + pWav->onSeek = onSeek; + pWav->pUserData = pUserData; + pWav->allocationCallbacks = drwav_copy_allocation_callbacks_or_defaults(pAllocationCallbacks); + + if (pWav->allocationCallbacks.onFree == NULL || (pWav->allocationCallbacks.onMalloc == NULL && pWav->allocationCallbacks.onRealloc == NULL)) { + return DRWAV_FALSE; /* Invalid allocation callbacks. */ + } + + pWav->fmt.formatTag = (drwav_uint16)pFormat->format; + pWav->fmt.channels = (drwav_uint16)pFormat->channels; + pWav->fmt.sampleRate = pFormat->sampleRate; + pWav->fmt.avgBytesPerSec = (drwav_uint32)((pFormat->bitsPerSample * pFormat->sampleRate * pFormat->channels) / 8); + pWav->fmt.blockAlign = (drwav_uint16)((pFormat->channels * pFormat->bitsPerSample) / 8); + pWav->fmt.bitsPerSample = (drwav_uint16)pFormat->bitsPerSample; + pWav->fmt.extendedSize = 0; + pWav->isSequentialWrite = isSequential; + + return DRWAV_TRUE; +} + +static drwav_bool32 drwav_init_write__internal(drwav* pWav, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount) +{ + /* The function assumes drwav_preinit_write() was called beforehand. */ + + size_t runningPos = 0; + drwav_uint64 initialDataChunkSize = 0; + drwav_uint64 chunkSizeFMT; + + /* + The initial values for the "RIFF" and "data" chunks depends on whether or not we are initializing in sequential mode or not. In + sequential mode we set this to its final values straight away since they can be calculated from the total sample count. In non- + sequential mode we initialize it all to zero and fill it out in drwav_uninit() using a backwards seek. + */ + if (pWav->isSequentialWrite) { + initialDataChunkSize = (totalSampleCount * pWav->fmt.bitsPerSample) / 8; + + /* + The RIFF container has a limit on the number of samples. drwav is not allowing this. There's no practical limits for Wave64 + so for the sake of simplicity I'm not doing any validation for that. + */ + if (pFormat->container == drwav_container_riff) { + if (initialDataChunkSize > (0xFFFFFFFFUL - 36)) { + return DRWAV_FALSE; /* Not enough room to store every sample. */ + } + } + } + + pWav->dataChunkDataSizeTargetWrite = initialDataChunkSize; + + + /* "RIFF" chunk. */ + if (pFormat->container == drwav_container_riff) { + drwav_uint32 chunkSizeRIFF = 28 + (drwav_uint32)initialDataChunkSize; /* +28 = "WAVE" + [sizeof "fmt " chunk] */ + runningPos += drwav__write(pWav, "RIFF", 4); + runningPos += drwav__write_u32ne_to_le(pWav, chunkSizeRIFF); + runningPos += drwav__write(pWav, "WAVE", 4); + } else if (pFormat->container == drwav_container_w64) { + drwav_uint64 chunkSizeRIFF = 80 + 24 + initialDataChunkSize; /* +24 because W64 includes the size of the GUID and size fields. */ + runningPos += drwav__write(pWav, drwavGUID_W64_RIFF, 16); + runningPos += drwav__write_u64ne_to_le(pWav, chunkSizeRIFF); + runningPos += drwav__write(pWav, drwavGUID_W64_WAVE, 16); + } else if (pFormat->container == drwav_container_rf64) { + runningPos += drwav__write(pWav, "RF64", 4); + runningPos += drwav__write_u32ne_to_le(pWav, 0xFFFFFFFF); /* Always 0xFFFFFFFF for RF64. Set to a proper value in the "ds64" chunk. */ + runningPos += drwav__write(pWav, "WAVE", 4); + } + + + /* "ds64" chunk (RF64 only). */ + if (pFormat->container == drwav_container_rf64) { + drwav_uint32 initialds64ChunkSize = 28; /* 28 = [Size of RIFF (8 bytes)] + [Size of DATA (8 bytes)] + [Sample Count (8 bytes)] + [Table Length (4 bytes)]. Table length always set to 0. */ + drwav_uint64 initialRiffChunkSize = 8 + initialds64ChunkSize + initialDataChunkSize; /* +8 for the ds64 header. */ + + runningPos += drwav__write(pWav, "ds64", 4); + runningPos += drwav__write_u32ne_to_le(pWav, initialds64ChunkSize); /* Size of ds64. */ + runningPos += drwav__write_u64ne_to_le(pWav, initialRiffChunkSize); /* Size of RIFF. Set to true value at the end. */ + runningPos += drwav__write_u64ne_to_le(pWav, initialDataChunkSize); /* Size of DATA. Set to true value at the end. */ + runningPos += drwav__write_u64ne_to_le(pWav, totalSampleCount); /* Sample count. */ + runningPos += drwav__write_u32ne_to_le(pWav, 0); /* Table length. Always set to zero in our case since we're not doing any other chunks than "DATA". */ + } + + + /* "fmt " chunk. */ + if (pFormat->container == drwav_container_riff || pFormat->container == drwav_container_rf64) { + chunkSizeFMT = 16; + runningPos += drwav__write(pWav, "fmt ", 4); + runningPos += drwav__write_u32ne_to_le(pWav, (drwav_uint32)chunkSizeFMT); + } else if (pFormat->container == drwav_container_w64) { + chunkSizeFMT = 40; + runningPos += drwav__write(pWav, drwavGUID_W64_FMT, 16); + runningPos += drwav__write_u64ne_to_le(pWav, chunkSizeFMT); + } + + runningPos += drwav__write_u16ne_to_le(pWav, pWav->fmt.formatTag); + runningPos += drwav__write_u16ne_to_le(pWav, pWav->fmt.channels); + runningPos += drwav__write_u32ne_to_le(pWav, pWav->fmt.sampleRate); + runningPos += drwav__write_u32ne_to_le(pWav, pWav->fmt.avgBytesPerSec); + runningPos += drwav__write_u16ne_to_le(pWav, pWav->fmt.blockAlign); + runningPos += drwav__write_u16ne_to_le(pWav, pWav->fmt.bitsPerSample); + + pWav->dataChunkDataPos = runningPos; + + /* "data" chunk. */ + if (pFormat->container == drwav_container_riff) { + drwav_uint32 chunkSizeDATA = (drwav_uint32)initialDataChunkSize; + runningPos += drwav__write(pWav, "data", 4); + runningPos += drwav__write_u32ne_to_le(pWav, chunkSizeDATA); + } else if (pFormat->container == drwav_container_w64) { + drwav_uint64 chunkSizeDATA = 24 + initialDataChunkSize; /* +24 because W64 includes the size of the GUID and size fields. */ + runningPos += drwav__write(pWav, drwavGUID_W64_DATA, 16); + runningPos += drwav__write_u64ne_to_le(pWav, chunkSizeDATA); + } else if (pFormat->container == drwav_container_rf64) { + runningPos += drwav__write(pWav, "data", 4); + runningPos += drwav__write_u32ne_to_le(pWav, 0xFFFFFFFF); /* Always set to 0xFFFFFFFF for RF64. The true size of the data chunk is specified in the ds64 chunk. */ + } + + /* + The runningPos variable is incremented in the section above but is left unused which is causing some static analysis tools to detect it + as a dead store. I'm leaving this as-is for safety just in case I want to expand this function later to include other tags and want to + keep track of the running position for whatever reason. The line below should silence the static analysis tools. + */ + (void)runningPos; + + /* Set some properties for the client's convenience. */ + pWav->container = pFormat->container; + pWav->channels = (drwav_uint16)pFormat->channels; + pWav->sampleRate = pFormat->sampleRate; + pWav->bitsPerSample = (drwav_uint16)pFormat->bitsPerSample; + pWav->translatedFormatTag = (drwav_uint16)pFormat->format; + + return DRWAV_TRUE; +} + + +DRWAV_API drwav_bool32 drwav_init_write(drwav* pWav, const drwav_data_format* pFormat, drwav_write_proc onWrite, drwav_seek_proc onSeek, void* pUserData, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + if (!drwav_preinit_write(pWav, pFormat, DRWAV_FALSE, onWrite, onSeek, pUserData, pAllocationCallbacks)) { + return DRWAV_FALSE; + } + + return drwav_init_write__internal(pWav, pFormat, 0); /* DRWAV_FALSE = Not Sequential */ +} + +DRWAV_API drwav_bool32 drwav_init_write_sequential(drwav* pWav, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, drwav_write_proc onWrite, void* pUserData, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + if (!drwav_preinit_write(pWav, pFormat, DRWAV_TRUE, onWrite, NULL, pUserData, pAllocationCallbacks)) { + return DRWAV_FALSE; + } + + return drwav_init_write__internal(pWav, pFormat, totalSampleCount); /* DRWAV_TRUE = Sequential */ +} + +DRWAV_API drwav_bool32 drwav_init_write_sequential_pcm_frames(drwav* pWav, const drwav_data_format* pFormat, drwav_uint64 totalPCMFrameCount, drwav_write_proc onWrite, void* pUserData, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + if (pFormat == NULL) { + return DRWAV_FALSE; + } + + return drwav_init_write_sequential(pWav, pFormat, totalPCMFrameCount*pFormat->channels, onWrite, pUserData, pAllocationCallbacks); +} + +DRWAV_API drwav_uint64 drwav_target_write_size_bytes(const drwav_data_format* pFormat, drwav_uint64 totalSampleCount) +{ + /* Casting totalSampleCount to drwav_int64 for VC6 compatibility. No issues in practice because nobody is going to exhaust the whole 63 bits. */ + drwav_uint64 targetDataSizeBytes = (drwav_uint64)((drwav_int64)totalSampleCount * pFormat->channels * pFormat->bitsPerSample/8.0); + drwav_uint64 riffChunkSizeBytes; + drwav_uint64 fileSizeBytes = 0; + + if (pFormat->container == drwav_container_riff) { + riffChunkSizeBytes = drwav__riff_chunk_size_riff(targetDataSizeBytes); + fileSizeBytes = (8 + riffChunkSizeBytes); /* +8 because WAV doesn't include the size of the ChunkID and ChunkSize fields. */ + } else if (pFormat->container == drwav_container_w64) { + riffChunkSizeBytes = drwav__riff_chunk_size_w64(targetDataSizeBytes); + fileSizeBytes = riffChunkSizeBytes; + } else if (pFormat->container == drwav_container_rf64) { + riffChunkSizeBytes = drwav__riff_chunk_size_rf64(targetDataSizeBytes); + fileSizeBytes = (8 + riffChunkSizeBytes); /* +8 because WAV doesn't include the size of the ChunkID and ChunkSize fields. */ + } + + return fileSizeBytes; +} + + +#ifndef DR_WAV_NO_STDIO + +/* drwav_result_from_errno() is only used for fopen() and wfopen() so putting it inside DR_WAV_NO_STDIO for now. If something else needs this later we can move it out. */ +#include +static drwav_result drwav_result_from_errno(int e) +{ + switch (e) + { + case 0: return DRWAV_SUCCESS; + #ifdef EPERM + case EPERM: return DRWAV_INVALID_OPERATION; + #endif + #ifdef ENOENT + case ENOENT: return DRWAV_DOES_NOT_EXIST; + #endif + #ifdef ESRCH + case ESRCH: return DRWAV_DOES_NOT_EXIST; + #endif + #ifdef EINTR + case EINTR: return DRWAV_INTERRUPT; + #endif + #ifdef EIO + case EIO: return DRWAV_IO_ERROR; + #endif + #ifdef ENXIO + case ENXIO: return DRWAV_DOES_NOT_EXIST; + #endif + #ifdef E2BIG + case E2BIG: return DRWAV_INVALID_ARGS; + #endif + #ifdef ENOEXEC + case ENOEXEC: return DRWAV_INVALID_FILE; + #endif + #ifdef EBADF + case EBADF: return DRWAV_INVALID_FILE; + #endif + #ifdef ECHILD + case ECHILD: return DRWAV_ERROR; + #endif + #ifdef EAGAIN + case EAGAIN: return DRWAV_UNAVAILABLE; + #endif + #ifdef ENOMEM + case ENOMEM: return DRWAV_OUT_OF_MEMORY; + #endif + #ifdef EACCES + case EACCES: return DRWAV_ACCESS_DENIED; + #endif + #ifdef EFAULT + case EFAULT: return DRWAV_BAD_ADDRESS; + #endif + #ifdef ENOTBLK + case ENOTBLK: return DRWAV_ERROR; + #endif + #ifdef EBUSY + case EBUSY: return DRWAV_BUSY; + #endif + #ifdef EEXIST + case EEXIST: return DRWAV_ALREADY_EXISTS; + #endif + #ifdef EXDEV + case EXDEV: return DRWAV_ERROR; + #endif + #ifdef ENODEV + case ENODEV: return DRWAV_DOES_NOT_EXIST; + #endif + #ifdef ENOTDIR + case ENOTDIR: return DRWAV_NOT_DIRECTORY; + #endif + #ifdef EISDIR + case EISDIR: return DRWAV_IS_DIRECTORY; + #endif + #ifdef EINVAL + case EINVAL: return DRWAV_INVALID_ARGS; + #endif + #ifdef ENFILE + case ENFILE: return DRWAV_TOO_MANY_OPEN_FILES; + #endif + #ifdef EMFILE + case EMFILE: return DRWAV_TOO_MANY_OPEN_FILES; + #endif + #ifdef ENOTTY + case ENOTTY: return DRWAV_INVALID_OPERATION; + #endif + #ifdef ETXTBSY + case ETXTBSY: return DRWAV_BUSY; + #endif + #ifdef EFBIG + case EFBIG: return DRWAV_TOO_BIG; + #endif + #ifdef ENOSPC + case ENOSPC: return DRWAV_NO_SPACE; + #endif + #ifdef ESPIPE + case ESPIPE: return DRWAV_BAD_SEEK; + #endif + #ifdef EROFS + case EROFS: return DRWAV_ACCESS_DENIED; + #endif + #ifdef EMLINK + case EMLINK: return DRWAV_TOO_MANY_LINKS; + #endif + #ifdef EPIPE + case EPIPE: return DRWAV_BAD_PIPE; + #endif + #ifdef EDOM + case EDOM: return DRWAV_OUT_OF_RANGE; + #endif + #ifdef ERANGE + case ERANGE: return DRWAV_OUT_OF_RANGE; + #endif + #ifdef EDEADLK + case EDEADLK: return DRWAV_DEADLOCK; + #endif + #ifdef ENAMETOOLONG + case ENAMETOOLONG: return DRWAV_PATH_TOO_LONG; + #endif + #ifdef ENOLCK + case ENOLCK: return DRWAV_ERROR; + #endif + #ifdef ENOSYS + case ENOSYS: return DRWAV_NOT_IMPLEMENTED; + #endif + #ifdef ENOTEMPTY + case ENOTEMPTY: return DRWAV_DIRECTORY_NOT_EMPTY; + #endif + #ifdef ELOOP + case ELOOP: return DRWAV_TOO_MANY_LINKS; + #endif + #ifdef ENOMSG + case ENOMSG: return DRWAV_NO_MESSAGE; + #endif + #ifdef EIDRM + case EIDRM: return DRWAV_ERROR; + #endif + #ifdef ECHRNG + case ECHRNG: return DRWAV_ERROR; + #endif + #ifdef EL2NSYNC + case EL2NSYNC: return DRWAV_ERROR; + #endif + #ifdef EL3HLT + case EL3HLT: return DRWAV_ERROR; + #endif + #ifdef EL3RST + case EL3RST: return DRWAV_ERROR; + #endif + #ifdef ELNRNG + case ELNRNG: return DRWAV_OUT_OF_RANGE; + #endif + #ifdef EUNATCH + case EUNATCH: return DRWAV_ERROR; + #endif + #ifdef ENOCSI + case ENOCSI: return DRWAV_ERROR; + #endif + #ifdef EL2HLT + case EL2HLT: return DRWAV_ERROR; + #endif + #ifdef EBADE + case EBADE: return DRWAV_ERROR; + #endif + #ifdef EBADR + case EBADR: return DRWAV_ERROR; + #endif + #ifdef EXFULL + case EXFULL: return DRWAV_ERROR; + #endif + #ifdef ENOANO + case ENOANO: return DRWAV_ERROR; + #endif + #ifdef EBADRQC + case EBADRQC: return DRWAV_ERROR; + #endif + #ifdef EBADSLT + case EBADSLT: return DRWAV_ERROR; + #endif + #ifdef EBFONT + case EBFONT: return DRWAV_INVALID_FILE; + #endif + #ifdef ENOSTR + case ENOSTR: return DRWAV_ERROR; + #endif + #ifdef ENODATA + case ENODATA: return DRWAV_NO_DATA_AVAILABLE; + #endif + #ifdef ETIME + case ETIME: return DRWAV_TIMEOUT; + #endif + #ifdef ENOSR + case ENOSR: return DRWAV_NO_DATA_AVAILABLE; + #endif + #ifdef ENONET + case ENONET: return DRWAV_NO_NETWORK; + #endif + #ifdef ENOPKG + case ENOPKG: return DRWAV_ERROR; + #endif + #ifdef EREMOTE + case EREMOTE: return DRWAV_ERROR; + #endif + #ifdef ENOLINK + case ENOLINK: return DRWAV_ERROR; + #endif + #ifdef EADV + case EADV: return DRWAV_ERROR; + #endif + #ifdef ESRMNT + case ESRMNT: return DRWAV_ERROR; + #endif + #ifdef ECOMM + case ECOMM: return DRWAV_ERROR; + #endif + #ifdef EPROTO + case EPROTO: return DRWAV_ERROR; + #endif + #ifdef EMULTIHOP + case EMULTIHOP: return DRWAV_ERROR; + #endif + #ifdef EDOTDOT + case EDOTDOT: return DRWAV_ERROR; + #endif + #ifdef EBADMSG + case EBADMSG: return DRWAV_BAD_MESSAGE; + #endif + #ifdef EOVERFLOW + case EOVERFLOW: return DRWAV_TOO_BIG; + #endif + #ifdef ENOTUNIQ + case ENOTUNIQ: return DRWAV_NOT_UNIQUE; + #endif + #ifdef EBADFD + case EBADFD: return DRWAV_ERROR; + #endif + #ifdef EREMCHG + case EREMCHG: return DRWAV_ERROR; + #endif + #ifdef ELIBACC + case ELIBACC: return DRWAV_ACCESS_DENIED; + #endif + #ifdef ELIBBAD + case ELIBBAD: return DRWAV_INVALID_FILE; + #endif + #ifdef ELIBSCN + case ELIBSCN: return DRWAV_INVALID_FILE; + #endif + #ifdef ELIBMAX + case ELIBMAX: return DRWAV_ERROR; + #endif + #ifdef ELIBEXEC + case ELIBEXEC: return DRWAV_ERROR; + #endif + #ifdef EILSEQ + case EILSEQ: return DRWAV_INVALID_DATA; + #endif + #ifdef ERESTART + case ERESTART: return DRWAV_ERROR; + #endif + #ifdef ESTRPIPE + case ESTRPIPE: return DRWAV_ERROR; + #endif + #ifdef EUSERS + case EUSERS: return DRWAV_ERROR; + #endif + #ifdef ENOTSOCK + case ENOTSOCK: return DRWAV_NOT_SOCKET; + #endif + #ifdef EDESTADDRREQ + case EDESTADDRREQ: return DRWAV_NO_ADDRESS; + #endif + #ifdef EMSGSIZE + case EMSGSIZE: return DRWAV_TOO_BIG; + #endif + #ifdef EPROTOTYPE + case EPROTOTYPE: return DRWAV_BAD_PROTOCOL; + #endif + #ifdef ENOPROTOOPT + case ENOPROTOOPT: return DRWAV_PROTOCOL_UNAVAILABLE; + #endif + #ifdef EPROTONOSUPPORT + case EPROTONOSUPPORT: return DRWAV_PROTOCOL_NOT_SUPPORTED; + #endif + #ifdef ESOCKTNOSUPPORT + case ESOCKTNOSUPPORT: return DRWAV_SOCKET_NOT_SUPPORTED; + #endif + #ifdef EOPNOTSUPP + case EOPNOTSUPP: return DRWAV_INVALID_OPERATION; + #endif + #ifdef EPFNOSUPPORT + case EPFNOSUPPORT: return DRWAV_PROTOCOL_FAMILY_NOT_SUPPORTED; + #endif + #ifdef EAFNOSUPPORT + case EAFNOSUPPORT: return DRWAV_ADDRESS_FAMILY_NOT_SUPPORTED; + #endif + #ifdef EADDRINUSE + case EADDRINUSE: return DRWAV_ALREADY_IN_USE; + #endif + #ifdef EADDRNOTAVAIL + case EADDRNOTAVAIL: return DRWAV_ERROR; + #endif + #ifdef ENETDOWN + case ENETDOWN: return DRWAV_NO_NETWORK; + #endif + #ifdef ENETUNREACH + case ENETUNREACH: return DRWAV_NO_NETWORK; + #endif + #ifdef ENETRESET + case ENETRESET: return DRWAV_NO_NETWORK; + #endif + #ifdef ECONNABORTED + case ECONNABORTED: return DRWAV_NO_NETWORK; + #endif + #ifdef ECONNRESET + case ECONNRESET: return DRWAV_CONNECTION_RESET; + #endif + #ifdef ENOBUFS + case ENOBUFS: return DRWAV_NO_SPACE; + #endif + #ifdef EISCONN + case EISCONN: return DRWAV_ALREADY_CONNECTED; + #endif + #ifdef ENOTCONN + case ENOTCONN: return DRWAV_NOT_CONNECTED; + #endif + #ifdef ESHUTDOWN + case ESHUTDOWN: return DRWAV_ERROR; + #endif + #ifdef ETOOMANYREFS + case ETOOMANYREFS: return DRWAV_ERROR; + #endif + #ifdef ETIMEDOUT + case ETIMEDOUT: return DRWAV_TIMEOUT; + #endif + #ifdef ECONNREFUSED + case ECONNREFUSED: return DRWAV_CONNECTION_REFUSED; + #endif + #ifdef EHOSTDOWN + case EHOSTDOWN: return DRWAV_NO_HOST; + #endif + #ifdef EHOSTUNREACH + case EHOSTUNREACH: return DRWAV_NO_HOST; + #endif + #ifdef EALREADY + case EALREADY: return DRWAV_IN_PROGRESS; + #endif + #ifdef EINPROGRESS + case EINPROGRESS: return DRWAV_IN_PROGRESS; + #endif + #ifdef ESTALE + case ESTALE: return DRWAV_INVALID_FILE; + #endif + #ifdef EUCLEAN + case EUCLEAN: return DRWAV_ERROR; + #endif + #ifdef ENOTNAM + case ENOTNAM: return DRWAV_ERROR; + #endif + #ifdef ENAVAIL + case ENAVAIL: return DRWAV_ERROR; + #endif + #ifdef EISNAM + case EISNAM: return DRWAV_ERROR; + #endif + #ifdef EREMOTEIO + case EREMOTEIO: return DRWAV_IO_ERROR; + #endif + #ifdef EDQUOT + case EDQUOT: return DRWAV_NO_SPACE; + #endif + #ifdef ENOMEDIUM + case ENOMEDIUM: return DRWAV_DOES_NOT_EXIST; + #endif + #ifdef EMEDIUMTYPE + case EMEDIUMTYPE: return DRWAV_ERROR; + #endif + #ifdef ECANCELED + case ECANCELED: return DRWAV_CANCELLED; + #endif + #ifdef ENOKEY + case ENOKEY: return DRWAV_ERROR; + #endif + #ifdef EKEYEXPIRED + case EKEYEXPIRED: return DRWAV_ERROR; + #endif + #ifdef EKEYREVOKED + case EKEYREVOKED: return DRWAV_ERROR; + #endif + #ifdef EKEYREJECTED + case EKEYREJECTED: return DRWAV_ERROR; + #endif + #ifdef EOWNERDEAD + case EOWNERDEAD: return DRWAV_ERROR; + #endif + #ifdef ENOTRECOVERABLE + case ENOTRECOVERABLE: return DRWAV_ERROR; + #endif + #ifdef ERFKILL + case ERFKILL: return DRWAV_ERROR; + #endif + #ifdef EHWPOISON + case EHWPOISON: return DRWAV_ERROR; + #endif + default: return DRWAV_ERROR; + } +} + +static drwav_result drwav_fopen(FILE** ppFile, const char* pFilePath, const char* pOpenMode) +{ +#if _MSC_VER && _MSC_VER >= 1400 + errno_t err; +#endif + + if (ppFile != NULL) { + *ppFile = NULL; /* Safety. */ + } + + if (pFilePath == NULL || pOpenMode == NULL || ppFile == NULL) { + return DRWAV_INVALID_ARGS; + } + +#if _MSC_VER && _MSC_VER >= 1400 + err = fopen_s(ppFile, pFilePath, pOpenMode); + if (err != 0) { + return drwav_result_from_errno(err); + } +#else +#if defined(_WIN32) || defined(__APPLE__) + *ppFile = fopen(pFilePath, pOpenMode); +#else + #if defined(_FILE_OFFSET_BITS) && _FILE_OFFSET_BITS == 64 && defined(_LARGEFILE64_SOURCE) + *ppFile = fopen64(pFilePath, pOpenMode); + #else + *ppFile = fopen(pFilePath, pOpenMode); + #endif +#endif + if (*ppFile == NULL) { + drwav_result result = drwav_result_from_errno(errno); + if (result == DRWAV_SUCCESS) { + result = DRWAV_ERROR; /* Just a safety check to make sure we never ever return success when pFile == NULL. */ + } + + return result; + } +#endif + + return DRWAV_SUCCESS; +} + +/* +_wfopen() isn't always available in all compilation environments. + + * Windows only. + * MSVC seems to support it universally as far back as VC6 from what I can tell (haven't checked further back). + * MinGW-64 (both 32- and 64-bit) seems to support it. + * MinGW wraps it in !defined(__STRICT_ANSI__). + * OpenWatcom wraps it in !defined(_NO_EXT_KEYS). + +This can be reviewed as compatibility issues arise. The preference is to use _wfopen_s() and _wfopen() as opposed to the wcsrtombs() +fallback, so if you notice your compiler not detecting this properly I'm happy to look at adding support. +*/ +#if defined(_WIN32) + #if defined(_MSC_VER) || defined(__MINGW64__) || (!defined(__STRICT_ANSI__) && !defined(_NO_EXT_KEYS)) + #define DRWAV_HAS_WFOPEN + #endif +#endif + +static drwav_result drwav_wfopen(FILE** ppFile, const wchar_t* pFilePath, const wchar_t* pOpenMode, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + if (ppFile != NULL) { + *ppFile = NULL; /* Safety. */ + } + + if (pFilePath == NULL || pOpenMode == NULL || ppFile == NULL) { + return DRWAV_INVALID_ARGS; + } + +#if defined(DRWAV_HAS_WFOPEN) + { + /* Use _wfopen() on Windows. */ + #if defined(_MSC_VER) && _MSC_VER >= 1400 + errno_t err = _wfopen_s(ppFile, pFilePath, pOpenMode); + if (err != 0) { + return drwav_result_from_errno(err); + } + #else + *ppFile = _wfopen(pFilePath, pOpenMode); + if (*ppFile == NULL) { + return drwav_result_from_errno(errno); + } + #endif + (void)pAllocationCallbacks; + } +#else + /* + Use fopen() on anything other than Windows. Requires a conversion. This is annoying because fopen() is locale specific. The only real way I can + think of to do this is with wcsrtombs(). Note that wcstombs() is apparently not thread-safe because it uses a static global mbstate_t object for + maintaining state. I've checked this with -std=c89 and it works, but if somebody get's a compiler error I'll look into improving compatibility. + */ + { + mbstate_t mbs; + size_t lenMB; + const wchar_t* pFilePathTemp = pFilePath; + char* pFilePathMB = NULL; + char pOpenModeMB[32] = {0}; + + /* Get the length first. */ + DRWAV_ZERO_OBJECT(&mbs); + lenMB = wcsrtombs(NULL, &pFilePathTemp, 0, &mbs); + if (lenMB == (size_t)-1) { + return drwav_result_from_errno(errno); + } + + pFilePathMB = (char*)drwav__malloc_from_callbacks(lenMB + 1, pAllocationCallbacks); + if (pFilePathMB == NULL) { + return DRWAV_OUT_OF_MEMORY; + } + + pFilePathTemp = pFilePath; + DRWAV_ZERO_OBJECT(&mbs); + wcsrtombs(pFilePathMB, &pFilePathTemp, lenMB + 1, &mbs); + + /* The open mode should always consist of ASCII characters so we should be able to do a trivial conversion. */ + { + size_t i = 0; + for (;;) { + if (pOpenMode[i] == 0) { + pOpenModeMB[i] = '\0'; + break; + } + + pOpenModeMB[i] = (char)pOpenMode[i]; + i += 1; + } + } + + *ppFile = fopen(pFilePathMB, pOpenModeMB); + + drwav__free_from_callbacks(pFilePathMB, pAllocationCallbacks); + } + + if (*ppFile == NULL) { + return DRWAV_ERROR; + } +#endif + + return DRWAV_SUCCESS; +} + + +static size_t drwav__on_read_stdio(void* pUserData, void* pBufferOut, size_t bytesToRead) +{ + return fread(pBufferOut, 1, bytesToRead, (FILE*)pUserData); +} + +static size_t drwav__on_write_stdio(void* pUserData, const void* pData, size_t bytesToWrite) +{ + return fwrite(pData, 1, bytesToWrite, (FILE*)pUserData); +} + +static drwav_bool32 drwav__on_seek_stdio(void* pUserData, int offset, drwav_seek_origin origin) +{ + return fseek((FILE*)pUserData, offset, (origin == drwav_seek_origin_current) ? SEEK_CUR : SEEK_SET) == 0; +} + +DRWAV_API drwav_bool32 drwav_init_file(drwav* pWav, const char* filename, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + return drwav_init_file_ex(pWav, filename, NULL, NULL, 0, pAllocationCallbacks); +} + + +static drwav_bool32 drwav_init_file__internal_FILE(drwav* pWav, FILE* pFile, drwav_chunk_proc onChunk, void* pChunkUserData, drwav_uint32 flags, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + drwav_bool32 result; + + result = drwav_preinit(pWav, drwav__on_read_stdio, drwav__on_seek_stdio, (void*)pFile, pAllocationCallbacks); + if (result != DRWAV_TRUE) { + fclose(pFile); + return result; + } + + result = drwav_init__internal(pWav, onChunk, pChunkUserData, flags); + if (result != DRWAV_TRUE) { + fclose(pFile); + return result; + } + + return DRWAV_TRUE; +} + +DRWAV_API drwav_bool32 drwav_init_file_ex(drwav* pWav, const char* filename, drwav_chunk_proc onChunk, void* pChunkUserData, drwav_uint32 flags, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + FILE* pFile; + if (drwav_fopen(&pFile, filename, "rb") != DRWAV_SUCCESS) { + return DRWAV_FALSE; + } + + /* This takes ownership of the FILE* object. */ + return drwav_init_file__internal_FILE(pWav, pFile, onChunk, pChunkUserData, flags, pAllocationCallbacks); +} + +DRWAV_API drwav_bool32 drwav_init_file_w(drwav* pWav, const wchar_t* filename, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + return drwav_init_file_ex_w(pWav, filename, NULL, NULL, 0, pAllocationCallbacks); +} + +DRWAV_API drwav_bool32 drwav_init_file_ex_w(drwav* pWav, const wchar_t* filename, drwav_chunk_proc onChunk, void* pChunkUserData, drwav_uint32 flags, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + FILE* pFile; + if (drwav_wfopen(&pFile, filename, L"rb", pAllocationCallbacks) != DRWAV_SUCCESS) { + return DRWAV_FALSE; + } + + /* This takes ownership of the FILE* object. */ + return drwav_init_file__internal_FILE(pWav, pFile, onChunk, pChunkUserData, flags, pAllocationCallbacks); +} + + +static drwav_bool32 drwav_init_file_write__internal_FILE(drwav* pWav, FILE* pFile, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, drwav_bool32 isSequential, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + drwav_bool32 result; + + result = drwav_preinit_write(pWav, pFormat, isSequential, drwav__on_write_stdio, drwav__on_seek_stdio, (void*)pFile, pAllocationCallbacks); + if (result != DRWAV_TRUE) { + fclose(pFile); + return result; + } + + result = drwav_init_write__internal(pWav, pFormat, totalSampleCount); + if (result != DRWAV_TRUE) { + fclose(pFile); + return result; + } + + return DRWAV_TRUE; +} + +static drwav_bool32 drwav_init_file_write__internal(drwav* pWav, const char* filename, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, drwav_bool32 isSequential, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + FILE* pFile; + if (drwav_fopen(&pFile, filename, "wb") != DRWAV_SUCCESS) { + return DRWAV_FALSE; + } + + /* This takes ownership of the FILE* object. */ + return drwav_init_file_write__internal_FILE(pWav, pFile, pFormat, totalSampleCount, isSequential, pAllocationCallbacks); +} + +static drwav_bool32 drwav_init_file_write_w__internal(drwav* pWav, const wchar_t* filename, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, drwav_bool32 isSequential, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + FILE* pFile; + if (drwav_wfopen(&pFile, filename, L"wb", pAllocationCallbacks) != DRWAV_SUCCESS) { + return DRWAV_FALSE; + } + + /* This takes ownership of the FILE* object. */ + return drwav_init_file_write__internal_FILE(pWav, pFile, pFormat, totalSampleCount, isSequential, pAllocationCallbacks); +} + +DRWAV_API drwav_bool32 drwav_init_file_write(drwav* pWav, const char* filename, const drwav_data_format* pFormat, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + return drwav_init_file_write__internal(pWav, filename, pFormat, 0, DRWAV_FALSE, pAllocationCallbacks); +} + +DRWAV_API drwav_bool32 drwav_init_file_write_sequential(drwav* pWav, const char* filename, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + return drwav_init_file_write__internal(pWav, filename, pFormat, totalSampleCount, DRWAV_TRUE, pAllocationCallbacks); +} + +DRWAV_API drwav_bool32 drwav_init_file_write_sequential_pcm_frames(drwav* pWav, const char* filename, const drwav_data_format* pFormat, drwav_uint64 totalPCMFrameCount, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + if (pFormat == NULL) { + return DRWAV_FALSE; + } + + return drwav_init_file_write_sequential(pWav, filename, pFormat, totalPCMFrameCount*pFormat->channels, pAllocationCallbacks); +} + +DRWAV_API drwav_bool32 drwav_init_file_write_w(drwav* pWav, const wchar_t* filename, const drwav_data_format* pFormat, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + return drwav_init_file_write_w__internal(pWav, filename, pFormat, 0, DRWAV_FALSE, pAllocationCallbacks); +} + +DRWAV_API drwav_bool32 drwav_init_file_write_sequential_w(drwav* pWav, const wchar_t* filename, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + return drwav_init_file_write_w__internal(pWav, filename, pFormat, totalSampleCount, DRWAV_TRUE, pAllocationCallbacks); +} + +DRWAV_API drwav_bool32 drwav_init_file_write_sequential_pcm_frames_w(drwav* pWav, const wchar_t* filename, const drwav_data_format* pFormat, drwav_uint64 totalPCMFrameCount, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + if (pFormat == NULL) { + return DRWAV_FALSE; + } + + return drwav_init_file_write_sequential_w(pWav, filename, pFormat, totalPCMFrameCount*pFormat->channels, pAllocationCallbacks); +} +#endif /* DR_WAV_NO_STDIO */ + + +static size_t drwav__on_read_memory(void* pUserData, void* pBufferOut, size_t bytesToRead) +{ + drwav* pWav = (drwav*)pUserData; + size_t bytesRemaining; + + DRWAV_ASSERT(pWav != NULL); + DRWAV_ASSERT(pWav->memoryStream.dataSize >= pWav->memoryStream.currentReadPos); + + bytesRemaining = pWav->memoryStream.dataSize - pWav->memoryStream.currentReadPos; + if (bytesToRead > bytesRemaining) { + bytesToRead = bytesRemaining; + } + + if (bytesToRead > 0) { + DRWAV_COPY_MEMORY(pBufferOut, pWav->memoryStream.data + pWav->memoryStream.currentReadPos, bytesToRead); + pWav->memoryStream.currentReadPos += bytesToRead; + } + + return bytesToRead; +} + +static drwav_bool32 drwav__on_seek_memory(void* pUserData, int offset, drwav_seek_origin origin) +{ + drwav* pWav = (drwav*)pUserData; + DRWAV_ASSERT(pWav != NULL); + + if (origin == drwav_seek_origin_current) { + if (offset > 0) { + if (pWav->memoryStream.currentReadPos + offset > pWav->memoryStream.dataSize) { + return DRWAV_FALSE; /* Trying to seek too far forward. */ + } + } else { + if (pWav->memoryStream.currentReadPos < (size_t)-offset) { + return DRWAV_FALSE; /* Trying to seek too far backwards. */ + } + } + + /* This will never underflow thanks to the clamps above. */ + pWav->memoryStream.currentReadPos += offset; + } else { + if ((drwav_uint32)offset <= pWav->memoryStream.dataSize) { + pWav->memoryStream.currentReadPos = offset; + } else { + return DRWAV_FALSE; /* Trying to seek too far forward. */ + } + } + + return DRWAV_TRUE; +} + +static size_t drwav__on_write_memory(void* pUserData, const void* pDataIn, size_t bytesToWrite) +{ + drwav* pWav = (drwav*)pUserData; + size_t bytesRemaining; + + DRWAV_ASSERT(pWav != NULL); + DRWAV_ASSERT(pWav->memoryStreamWrite.dataCapacity >= pWav->memoryStreamWrite.currentWritePos); + + bytesRemaining = pWav->memoryStreamWrite.dataCapacity - pWav->memoryStreamWrite.currentWritePos; + if (bytesRemaining < bytesToWrite) { + /* Need to reallocate. */ + void* pNewData; + size_t newDataCapacity = (pWav->memoryStreamWrite.dataCapacity == 0) ? 256 : pWav->memoryStreamWrite.dataCapacity * 2; + + /* If doubling wasn't enough, just make it the minimum required size to write the data. */ + if ((newDataCapacity - pWav->memoryStreamWrite.currentWritePos) < bytesToWrite) { + newDataCapacity = pWav->memoryStreamWrite.currentWritePos + bytesToWrite; + } + + pNewData = drwav__realloc_from_callbacks(*pWav->memoryStreamWrite.ppData, newDataCapacity, pWav->memoryStreamWrite.dataCapacity, &pWav->allocationCallbacks); + if (pNewData == NULL) { + return 0; + } + + *pWav->memoryStreamWrite.ppData = pNewData; + pWav->memoryStreamWrite.dataCapacity = newDataCapacity; + } + + DRWAV_COPY_MEMORY(((drwav_uint8*)(*pWav->memoryStreamWrite.ppData)) + pWav->memoryStreamWrite.currentWritePos, pDataIn, bytesToWrite); + + pWav->memoryStreamWrite.currentWritePos += bytesToWrite; + if (pWav->memoryStreamWrite.dataSize < pWav->memoryStreamWrite.currentWritePos) { + pWav->memoryStreamWrite.dataSize = pWav->memoryStreamWrite.currentWritePos; + } + + *pWav->memoryStreamWrite.pDataSize = pWav->memoryStreamWrite.dataSize; + + return bytesToWrite; +} + +static drwav_bool32 drwav__on_seek_memory_write(void* pUserData, int offset, drwav_seek_origin origin) +{ + drwav* pWav = (drwav*)pUserData; + DRWAV_ASSERT(pWav != NULL); + + if (origin == drwav_seek_origin_current) { + if (offset > 0) { + if (pWav->memoryStreamWrite.currentWritePos + offset > pWav->memoryStreamWrite.dataSize) { + offset = (int)(pWav->memoryStreamWrite.dataSize - pWav->memoryStreamWrite.currentWritePos); /* Trying to seek too far forward. */ + } + } else { + if (pWav->memoryStreamWrite.currentWritePos < (size_t)-offset) { + offset = -(int)pWav->memoryStreamWrite.currentWritePos; /* Trying to seek too far backwards. */ + } + } + + /* This will never underflow thanks to the clamps above. */ + pWav->memoryStreamWrite.currentWritePos += offset; + } else { + if ((drwav_uint32)offset <= pWav->memoryStreamWrite.dataSize) { + pWav->memoryStreamWrite.currentWritePos = offset; + } else { + pWav->memoryStreamWrite.currentWritePos = pWav->memoryStreamWrite.dataSize; /* Trying to seek too far forward. */ + } + } + + return DRWAV_TRUE; +} + +DRWAV_API drwav_bool32 drwav_init_memory(drwav* pWav, const void* data, size_t dataSize, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + return drwav_init_memory_ex(pWav, data, dataSize, NULL, NULL, 0, pAllocationCallbacks); +} + +DRWAV_API drwav_bool32 drwav_init_memory_ex(drwav* pWav, const void* data, size_t dataSize, drwav_chunk_proc onChunk, void* pChunkUserData, drwav_uint32 flags, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + if (data == NULL || dataSize == 0) { + return DRWAV_FALSE; + } + + if (!drwav_preinit(pWav, drwav__on_read_memory, drwav__on_seek_memory, pWav, pAllocationCallbacks)) { + return DRWAV_FALSE; + } + + pWav->memoryStream.data = (const drwav_uint8*)data; + pWav->memoryStream.dataSize = dataSize; + pWav->memoryStream.currentReadPos = 0; + + return drwav_init__internal(pWav, onChunk, pChunkUserData, flags); +} + + +static drwav_bool32 drwav_init_memory_write__internal(drwav* pWav, void** ppData, size_t* pDataSize, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, drwav_bool32 isSequential, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + if (ppData == NULL || pDataSize == NULL) { + return DRWAV_FALSE; + } + + *ppData = NULL; /* Important because we're using realloc()! */ + *pDataSize = 0; + + if (!drwav_preinit_write(pWav, pFormat, isSequential, drwav__on_write_memory, drwav__on_seek_memory_write, pWav, pAllocationCallbacks)) { + return DRWAV_FALSE; + } + + pWav->memoryStreamWrite.ppData = ppData; + pWav->memoryStreamWrite.pDataSize = pDataSize; + pWav->memoryStreamWrite.dataSize = 0; + pWav->memoryStreamWrite.dataCapacity = 0; + pWav->memoryStreamWrite.currentWritePos = 0; + + return drwav_init_write__internal(pWav, pFormat, totalSampleCount); +} + +DRWAV_API drwav_bool32 drwav_init_memory_write(drwav* pWav, void** ppData, size_t* pDataSize, const drwav_data_format* pFormat, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + return drwav_init_memory_write__internal(pWav, ppData, pDataSize, pFormat, 0, DRWAV_FALSE, pAllocationCallbacks); +} + +DRWAV_API drwav_bool32 drwav_init_memory_write_sequential(drwav* pWav, void** ppData, size_t* pDataSize, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + return drwav_init_memory_write__internal(pWav, ppData, pDataSize, pFormat, totalSampleCount, DRWAV_TRUE, pAllocationCallbacks); +} + +DRWAV_API drwav_bool32 drwav_init_memory_write_sequential_pcm_frames(drwav* pWav, void** ppData, size_t* pDataSize, const drwav_data_format* pFormat, drwav_uint64 totalPCMFrameCount, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + if (pFormat == NULL) { + return DRWAV_FALSE; + } + + return drwav_init_memory_write_sequential(pWav, ppData, pDataSize, pFormat, totalPCMFrameCount*pFormat->channels, pAllocationCallbacks); +} + + + +DRWAV_API drwav_result drwav_uninit(drwav* pWav) +{ + drwav_result result = DRWAV_SUCCESS; + + if (pWav == NULL) { + return DRWAV_INVALID_ARGS; + } + + /* + If the drwav object was opened in write mode we'll need to finalize a few things: + - Make sure the "data" chunk is aligned to 16-bits for RIFF containers, or 64 bits for W64 containers. + - Set the size of the "data" chunk. + */ + if (pWav->onWrite != NULL) { + drwav_uint32 paddingSize = 0; + + /* Padding. Do not adjust pWav->dataChunkDataSize - this should not include the padding. */ + if (pWav->container == drwav_container_riff || pWav->container == drwav_container_rf64) { + paddingSize = drwav__chunk_padding_size_riff(pWav->dataChunkDataSize); + } else { + paddingSize = drwav__chunk_padding_size_w64(pWav->dataChunkDataSize); + } + + if (paddingSize > 0) { + drwav_uint64 paddingData = 0; + drwav__write(pWav, &paddingData, paddingSize); /* Byte order does not matter for this. */ + } + + /* + Chunk sizes. When using sequential mode, these will have been filled in at initialization time. We only need + to do this when using non-sequential mode. + */ + if (pWav->onSeek && !pWav->isSequentialWrite) { + if (pWav->container == drwav_container_riff) { + /* The "RIFF" chunk size. */ + if (pWav->onSeek(pWav->pUserData, 4, drwav_seek_origin_start)) { + drwav_uint32 riffChunkSize = drwav__riff_chunk_size_riff(pWav->dataChunkDataSize); + drwav__write_u32ne_to_le(pWav, riffChunkSize); + } + + /* the "data" chunk size. */ + if (pWav->onSeek(pWav->pUserData, (int)pWav->dataChunkDataPos + 4, drwav_seek_origin_start)) { + drwav_uint32 dataChunkSize = drwav__data_chunk_size_riff(pWav->dataChunkDataSize); + drwav__write_u32ne_to_le(pWav, dataChunkSize); + } + } else if (pWav->container == drwav_container_w64) { + /* The "RIFF" chunk size. */ + if (pWav->onSeek(pWav->pUserData, 16, drwav_seek_origin_start)) { + drwav_uint64 riffChunkSize = drwav__riff_chunk_size_w64(pWav->dataChunkDataSize); + drwav__write_u64ne_to_le(pWav, riffChunkSize); + } + + /* The "data" chunk size. */ + if (pWav->onSeek(pWav->pUserData, (int)pWav->dataChunkDataPos + 16, drwav_seek_origin_start)) { + drwav_uint64 dataChunkSize = drwav__data_chunk_size_w64(pWav->dataChunkDataSize); + drwav__write_u64ne_to_le(pWav, dataChunkSize); + } + } else if (pWav->container == drwav_container_rf64) { + /* We only need to update the ds64 chunk. The "RIFF" and "data" chunks always have their sizes set to 0xFFFFFFFF for RF64. */ + int ds64BodyPos = 12 + 8; + + /* The "RIFF" chunk size. */ + if (pWav->onSeek(pWav->pUserData, ds64BodyPos + 0, drwav_seek_origin_start)) { + drwav_uint64 riffChunkSize = drwav__riff_chunk_size_rf64(pWav->dataChunkDataSize); + drwav__write_u64ne_to_le(pWav, riffChunkSize); + } + + /* The "data" chunk size. */ + if (pWav->onSeek(pWav->pUserData, ds64BodyPos + 8, drwav_seek_origin_start)) { + drwav_uint64 dataChunkSize = drwav__data_chunk_size_rf64(pWav->dataChunkDataSize); + drwav__write_u64ne_to_le(pWav, dataChunkSize); + } + } + } + + /* Validation for sequential mode. */ + if (pWav->isSequentialWrite) { + if (pWav->dataChunkDataSize != pWav->dataChunkDataSizeTargetWrite) { + result = DRWAV_INVALID_FILE; + } + } + } + +#ifndef DR_WAV_NO_STDIO + /* + If we opened the file with drwav_open_file() we will want to close the file handle. We can know whether or not drwav_open_file() + was used by looking at the onRead and onSeek callbacks. + */ + if (pWav->onRead == drwav__on_read_stdio || pWav->onWrite == drwav__on_write_stdio) { + fclose((FILE*)pWav->pUserData); + } +#endif + + return result; +} + + + +DRWAV_API size_t drwav_read_raw(drwav* pWav, size_t bytesToRead, void* pBufferOut) +{ + size_t bytesRead; + + if (pWav == NULL || bytesToRead == 0) { + return 0; + } + + if (bytesToRead > pWav->bytesRemaining) { + bytesToRead = (size_t)pWav->bytesRemaining; + } + + if (pBufferOut != NULL) { + bytesRead = pWav->onRead(pWav->pUserData, pBufferOut, bytesToRead); + } else { + /* We need to seek. If we fail, we need to read-and-discard to make sure we get a good byte count. */ + bytesRead = 0; + while (bytesRead < bytesToRead) { + size_t bytesToSeek = (bytesToRead - bytesRead); + if (bytesToSeek > 0x7FFFFFFF) { + bytesToSeek = 0x7FFFFFFF; + } + + if (pWav->onSeek(pWav->pUserData, (int)bytesToSeek, drwav_seek_origin_current) == DRWAV_FALSE) { + break; + } + + bytesRead += bytesToSeek; + } + + /* When we get here we may need to read-and-discard some data. */ + while (bytesRead < bytesToRead) { + drwav_uint8 buffer[4096]; + size_t bytesSeeked; + size_t bytesToSeek = (bytesToRead - bytesRead); + if (bytesToSeek > sizeof(buffer)) { + bytesToSeek = sizeof(buffer); + } + + bytesSeeked = pWav->onRead(pWav->pUserData, buffer, bytesToSeek); + bytesRead += bytesSeeked; + + if (bytesSeeked < bytesToSeek) { + break; /* Reached the end. */ + } + } + } + + pWav->bytesRemaining -= bytesRead; + return bytesRead; +} + + + +DRWAV_API drwav_uint64 drwav_read_pcm_frames_le(drwav* pWav, drwav_uint64 framesToRead, void* pBufferOut) +{ + drwav_uint32 bytesPerFrame; + drwav_uint64 bytesToRead; /* Intentionally uint64 instead of size_t so we can do a check that we're not reading too much on 32-bit builds. */ + + if (pWav == NULL || framesToRead == 0) { + return 0; + } + + /* Cannot use this function for compressed formats. */ + if (drwav__is_compressed_format_tag(pWav->translatedFormatTag)) { + return 0; + } + + bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + + /* Don't try to read more samples than can potentially fit in the output buffer. */ + bytesToRead = framesToRead * bytesPerFrame; + if (bytesToRead > DRWAV_SIZE_MAX) { + bytesToRead = (DRWAV_SIZE_MAX / bytesPerFrame) * bytesPerFrame; /* Round the number of bytes to read to a clean frame boundary. */ + } + + /* + Doing an explicit check here just to make it clear that we don't want to be attempt to read anything if there's no bytes to read. There + *could* be a time where it evaluates to 0 due to overflowing. + */ + if (bytesToRead == 0) { + return 0; + } + + return drwav_read_raw(pWav, (size_t)bytesToRead, pBufferOut) / bytesPerFrame; +} + +DRWAV_API drwav_uint64 drwav_read_pcm_frames_be(drwav* pWav, drwav_uint64 framesToRead, void* pBufferOut) +{ + drwav_uint64 framesRead = drwav_read_pcm_frames_le(pWav, framesToRead, pBufferOut); + + if (pBufferOut != NULL) { + drwav__bswap_samples(pBufferOut, framesRead*pWav->channels, drwav_get_bytes_per_pcm_frame(pWav)/pWav->channels, pWav->translatedFormatTag); + } + + return framesRead; +} + +DRWAV_API drwav_uint64 drwav_read_pcm_frames(drwav* pWav, drwav_uint64 framesToRead, void* pBufferOut) +{ + if (drwav__is_little_endian()) { + return drwav_read_pcm_frames_le(pWav, framesToRead, pBufferOut); + } else { + return drwav_read_pcm_frames_be(pWav, framesToRead, pBufferOut); + } +} + + + +DRWAV_API drwav_bool32 drwav_seek_to_first_pcm_frame(drwav* pWav) +{ + if (pWav->onWrite != NULL) { + return DRWAV_FALSE; /* No seeking in write mode. */ + } + + if (!pWav->onSeek(pWav->pUserData, (int)pWav->dataChunkDataPos, drwav_seek_origin_start)) { + return DRWAV_FALSE; + } + + if (drwav__is_compressed_format_tag(pWav->translatedFormatTag)) { + pWav->compressed.iCurrentPCMFrame = 0; + + /* Cached data needs to be cleared for compressed formats. */ + if (pWav->translatedFormatTag == DR_WAVE_FORMAT_ADPCM) { + DRWAV_ZERO_OBJECT(&pWav->msadpcm); + } else if (pWav->translatedFormatTag == DR_WAVE_FORMAT_DVI_ADPCM) { + DRWAV_ZERO_OBJECT(&pWav->ima); + } else { + DRWAV_ASSERT(DRWAV_FALSE); /* If this assertion is triggered it means I've implemented a new compressed format but forgot to add a branch for it here. */ + } + } + + pWav->bytesRemaining = pWav->dataChunkDataSize; + return DRWAV_TRUE; +} + +DRWAV_API drwav_bool32 drwav_seek_to_pcm_frame(drwav* pWav, drwav_uint64 targetFrameIndex) +{ + /* Seeking should be compatible with wave files > 2GB. */ + + if (pWav == NULL || pWav->onSeek == NULL) { + return DRWAV_FALSE; + } + + /* No seeking in write mode. */ + if (pWav->onWrite != NULL) { + return DRWAV_FALSE; + } + + /* If there are no samples, just return DRWAV_TRUE without doing anything. */ + if (pWav->totalPCMFrameCount == 0) { + return DRWAV_TRUE; + } + + /* Make sure the sample is clamped. */ + if (targetFrameIndex >= pWav->totalPCMFrameCount) { + targetFrameIndex = pWav->totalPCMFrameCount - 1; + } + + /* + For compressed formats we just use a slow generic seek. If we are seeking forward we just seek forward. If we are going backwards we need + to seek back to the start. + */ + if (drwav__is_compressed_format_tag(pWav->translatedFormatTag)) { + /* TODO: This can be optimized. */ + + /* + If we're seeking forward it's simple - just keep reading samples until we hit the sample we're requesting. If we're seeking backwards, + we first need to seek back to the start and then just do the same thing as a forward seek. + */ + if (targetFrameIndex < pWav->compressed.iCurrentPCMFrame) { + if (!drwav_seek_to_first_pcm_frame(pWav)) { + return DRWAV_FALSE; + } + } + + if (targetFrameIndex > pWav->compressed.iCurrentPCMFrame) { + drwav_uint64 offsetInFrames = targetFrameIndex - pWav->compressed.iCurrentPCMFrame; + + drwav_int16 devnull[2048]; + while (offsetInFrames > 0) { + drwav_uint64 framesRead = 0; + drwav_uint64 framesToRead = offsetInFrames; + if (framesToRead > drwav_countof(devnull)/pWav->channels) { + framesToRead = drwav_countof(devnull)/pWav->channels; + } + + if (pWav->translatedFormatTag == DR_WAVE_FORMAT_ADPCM) { + framesRead = drwav_read_pcm_frames_s16__msadpcm(pWav, framesToRead, devnull); + } else if (pWav->translatedFormatTag == DR_WAVE_FORMAT_DVI_ADPCM) { + framesRead = drwav_read_pcm_frames_s16__ima(pWav, framesToRead, devnull); + } else { + DRWAV_ASSERT(DRWAV_FALSE); /* If this assertion is triggered it means I've implemented a new compressed format but forgot to add a branch for it here. */ + } + + if (framesRead != framesToRead) { + return DRWAV_FALSE; + } + + offsetInFrames -= framesRead; + } + } + } else { + drwav_uint64 totalSizeInBytes; + drwav_uint64 currentBytePos; + drwav_uint64 targetBytePos; + drwav_uint64 offset; + + totalSizeInBytes = pWav->totalPCMFrameCount * drwav_get_bytes_per_pcm_frame(pWav); + DRWAV_ASSERT(totalSizeInBytes >= pWav->bytesRemaining); + + currentBytePos = totalSizeInBytes - pWav->bytesRemaining; + targetBytePos = targetFrameIndex * drwav_get_bytes_per_pcm_frame(pWav); + + if (currentBytePos < targetBytePos) { + /* Offset forwards. */ + offset = (targetBytePos - currentBytePos); + } else { + /* Offset backwards. */ + if (!drwav_seek_to_first_pcm_frame(pWav)) { + return DRWAV_FALSE; + } + offset = targetBytePos; + } + + while (offset > 0) { + int offset32 = ((offset > INT_MAX) ? INT_MAX : (int)offset); + if (!pWav->onSeek(pWav->pUserData, offset32, drwav_seek_origin_current)) { + return DRWAV_FALSE; + } + + pWav->bytesRemaining -= offset32; + offset -= offset32; + } + } + + return DRWAV_TRUE; +} + + +DRWAV_API size_t drwav_write_raw(drwav* pWav, size_t bytesToWrite, const void* pData) +{ + size_t bytesWritten; + + if (pWav == NULL || bytesToWrite == 0 || pData == NULL) { + return 0; + } + + bytesWritten = pWav->onWrite(pWav->pUserData, pData, bytesToWrite); + pWav->dataChunkDataSize += bytesWritten; + + return bytesWritten; +} + + +DRWAV_API drwav_uint64 drwav_write_pcm_frames_le(drwav* pWav, drwav_uint64 framesToWrite, const void* pData) +{ + drwav_uint64 bytesToWrite; + drwav_uint64 bytesWritten; + const drwav_uint8* pRunningData; + + if (pWav == NULL || framesToWrite == 0 || pData == NULL) { + return 0; + } + + bytesToWrite = ((framesToWrite * pWav->channels * pWav->bitsPerSample) / 8); + if (bytesToWrite > DRWAV_SIZE_MAX) { + return 0; + } + + bytesWritten = 0; + pRunningData = (const drwav_uint8*)pData; + + while (bytesToWrite > 0) { + size_t bytesJustWritten; + drwav_uint64 bytesToWriteThisIteration; + + bytesToWriteThisIteration = bytesToWrite; + DRWAV_ASSERT(bytesToWriteThisIteration <= DRWAV_SIZE_MAX); /* <-- This is checked above. */ + + bytesJustWritten = drwav_write_raw(pWav, (size_t)bytesToWriteThisIteration, pRunningData); + if (bytesJustWritten == 0) { + break; + } + + bytesToWrite -= bytesJustWritten; + bytesWritten += bytesJustWritten; + pRunningData += bytesJustWritten; + } + + return (bytesWritten * 8) / pWav->bitsPerSample / pWav->channels; +} + +DRWAV_API drwav_uint64 drwav_write_pcm_frames_be(drwav* pWav, drwav_uint64 framesToWrite, const void* pData) +{ + drwav_uint64 bytesToWrite; + drwav_uint64 bytesWritten; + drwav_uint32 bytesPerSample; + const drwav_uint8* pRunningData; + + if (pWav == NULL || framesToWrite == 0 || pData == NULL) { + return 0; + } + + bytesToWrite = ((framesToWrite * pWav->channels * pWav->bitsPerSample) / 8); + if (bytesToWrite > DRWAV_SIZE_MAX) { + return 0; + } + + bytesWritten = 0; + pRunningData = (const drwav_uint8*)pData; + + bytesPerSample = drwav_get_bytes_per_pcm_frame(pWav) / pWav->channels; + + while (bytesToWrite > 0) { + drwav_uint8 temp[4096]; + drwav_uint32 sampleCount; + size_t bytesJustWritten; + drwav_uint64 bytesToWriteThisIteration; + + bytesToWriteThisIteration = bytesToWrite; + DRWAV_ASSERT(bytesToWriteThisIteration <= DRWAV_SIZE_MAX); /* <-- This is checked above. */ + + /* + WAV files are always little-endian. We need to byte swap on big-endian architectures. Since our input buffer is read-only we need + to use an intermediary buffer for the conversion. + */ + sampleCount = sizeof(temp)/bytesPerSample; + + if (bytesToWriteThisIteration > ((drwav_uint64)sampleCount)*bytesPerSample) { + bytesToWriteThisIteration = ((drwav_uint64)sampleCount)*bytesPerSample; + } + + DRWAV_COPY_MEMORY(temp, pRunningData, (size_t)bytesToWriteThisIteration); + drwav__bswap_samples(temp, sampleCount, bytesPerSample, pWav->translatedFormatTag); + + bytesJustWritten = drwav_write_raw(pWav, (size_t)bytesToWriteThisIteration, temp); + if (bytesJustWritten == 0) { + break; + } + + bytesToWrite -= bytesJustWritten; + bytesWritten += bytesJustWritten; + pRunningData += bytesJustWritten; + } + + return (bytesWritten * 8) / pWav->bitsPerSample / pWav->channels; +} + +DRWAV_API drwav_uint64 drwav_write_pcm_frames(drwav* pWav, drwav_uint64 framesToWrite, const void* pData) +{ + if (drwav__is_little_endian()) { + return drwav_write_pcm_frames_le(pWav, framesToWrite, pData); + } else { + return drwav_write_pcm_frames_be(pWav, framesToWrite, pData); + } +} + + +static drwav_uint64 drwav_read_pcm_frames_s16__msadpcm(drwav* pWav, drwav_uint64 framesToRead, drwav_int16* pBufferOut) +{ + drwav_uint64 totalFramesRead = 0; + + DRWAV_ASSERT(pWav != NULL); + DRWAV_ASSERT(framesToRead > 0); + + /* TODO: Lots of room for optimization here. */ + + while (framesToRead > 0 && pWav->compressed.iCurrentPCMFrame < pWav->totalPCMFrameCount) { + /* If there are no cached frames we need to load a new block. */ + if (pWav->msadpcm.cachedFrameCount == 0 && pWav->msadpcm.bytesRemainingInBlock == 0) { + if (pWav->channels == 1) { + /* Mono. */ + drwav_uint8 header[7]; + if (pWav->onRead(pWav->pUserData, header, sizeof(header)) != sizeof(header)) { + return totalFramesRead; + } + pWav->msadpcm.bytesRemainingInBlock = pWav->fmt.blockAlign - sizeof(header); + + pWav->msadpcm.predictor[0] = header[0]; + pWav->msadpcm.delta[0] = drwav__bytes_to_s16(header + 1); + pWav->msadpcm.prevFrames[0][1] = (drwav_int32)drwav__bytes_to_s16(header + 3); + pWav->msadpcm.prevFrames[0][0] = (drwav_int32)drwav__bytes_to_s16(header + 5); + pWav->msadpcm.cachedFrames[2] = pWav->msadpcm.prevFrames[0][0]; + pWav->msadpcm.cachedFrames[3] = pWav->msadpcm.prevFrames[0][1]; + pWav->msadpcm.cachedFrameCount = 2; + } else { + /* Stereo. */ + drwav_uint8 header[14]; + if (pWav->onRead(pWav->pUserData, header, sizeof(header)) != sizeof(header)) { + return totalFramesRead; + } + pWav->msadpcm.bytesRemainingInBlock = pWav->fmt.blockAlign - sizeof(header); + + pWav->msadpcm.predictor[0] = header[0]; + pWav->msadpcm.predictor[1] = header[1]; + pWav->msadpcm.delta[0] = drwav__bytes_to_s16(header + 2); + pWav->msadpcm.delta[1] = drwav__bytes_to_s16(header + 4); + pWav->msadpcm.prevFrames[0][1] = (drwav_int32)drwav__bytes_to_s16(header + 6); + pWav->msadpcm.prevFrames[1][1] = (drwav_int32)drwav__bytes_to_s16(header + 8); + pWav->msadpcm.prevFrames[0][0] = (drwav_int32)drwav__bytes_to_s16(header + 10); + pWav->msadpcm.prevFrames[1][0] = (drwav_int32)drwav__bytes_to_s16(header + 12); + + pWav->msadpcm.cachedFrames[0] = pWav->msadpcm.prevFrames[0][0]; + pWav->msadpcm.cachedFrames[1] = pWav->msadpcm.prevFrames[1][0]; + pWav->msadpcm.cachedFrames[2] = pWav->msadpcm.prevFrames[0][1]; + pWav->msadpcm.cachedFrames[3] = pWav->msadpcm.prevFrames[1][1]; + pWav->msadpcm.cachedFrameCount = 2; + } + } + + /* Output anything that's cached. */ + while (framesToRead > 0 && pWav->msadpcm.cachedFrameCount > 0 && pWav->compressed.iCurrentPCMFrame < pWav->totalPCMFrameCount) { + if (pBufferOut != NULL) { + drwav_uint32 iSample = 0; + for (iSample = 0; iSample < pWav->channels; iSample += 1) { + pBufferOut[iSample] = (drwav_int16)pWav->msadpcm.cachedFrames[(drwav_countof(pWav->msadpcm.cachedFrames) - (pWav->msadpcm.cachedFrameCount*pWav->channels)) + iSample]; + } + + pBufferOut += pWav->channels; + } + + framesToRead -= 1; + totalFramesRead += 1; + pWav->compressed.iCurrentPCMFrame += 1; + pWav->msadpcm.cachedFrameCount -= 1; + } + + if (framesToRead == 0) { + return totalFramesRead; + } + + + /* + If there's nothing left in the cache, just go ahead and load more. If there's nothing left to load in the current block we just continue to the next + loop iteration which will trigger the loading of a new block. + */ + if (pWav->msadpcm.cachedFrameCount == 0) { + if (pWav->msadpcm.bytesRemainingInBlock == 0) { + continue; + } else { + static drwav_int32 adaptationTable[] = { + 230, 230, 230, 230, 307, 409, 512, 614, + 768, 614, 512, 409, 307, 230, 230, 230 + }; + static drwav_int32 coeff1Table[] = { 256, 512, 0, 192, 240, 460, 392 }; + static drwav_int32 coeff2Table[] = { 0, -256, 0, 64, 0, -208, -232 }; + + drwav_uint8 nibbles; + drwav_int32 nibble0; + drwav_int32 nibble1; + + if (pWav->onRead(pWav->pUserData, &nibbles, 1) != 1) { + return totalFramesRead; + } + pWav->msadpcm.bytesRemainingInBlock -= 1; + + /* TODO: Optimize away these if statements. */ + nibble0 = ((nibbles & 0xF0) >> 4); if ((nibbles & 0x80)) { nibble0 |= 0xFFFFFFF0UL; } + nibble1 = ((nibbles & 0x0F) >> 0); if ((nibbles & 0x08)) { nibble1 |= 0xFFFFFFF0UL; } + + if (pWav->channels == 1) { + /* Mono. */ + drwav_int32 newSample0; + drwav_int32 newSample1; + + newSample0 = ((pWav->msadpcm.prevFrames[0][1] * coeff1Table[pWav->msadpcm.predictor[0]]) + (pWav->msadpcm.prevFrames[0][0] * coeff2Table[pWav->msadpcm.predictor[0]])) >> 8; + newSample0 += nibble0 * pWav->msadpcm.delta[0]; + newSample0 = drwav_clamp(newSample0, -32768, 32767); + + pWav->msadpcm.delta[0] = (adaptationTable[((nibbles & 0xF0) >> 4)] * pWav->msadpcm.delta[0]) >> 8; + if (pWav->msadpcm.delta[0] < 16) { + pWav->msadpcm.delta[0] = 16; + } + + pWav->msadpcm.prevFrames[0][0] = pWav->msadpcm.prevFrames[0][1]; + pWav->msadpcm.prevFrames[0][1] = newSample0; + + + newSample1 = ((pWav->msadpcm.prevFrames[0][1] * coeff1Table[pWav->msadpcm.predictor[0]]) + (pWav->msadpcm.prevFrames[0][0] * coeff2Table[pWav->msadpcm.predictor[0]])) >> 8; + newSample1 += nibble1 * pWav->msadpcm.delta[0]; + newSample1 = drwav_clamp(newSample1, -32768, 32767); + + pWav->msadpcm.delta[0] = (adaptationTable[((nibbles & 0x0F) >> 0)] * pWav->msadpcm.delta[0]) >> 8; + if (pWav->msadpcm.delta[0] < 16) { + pWav->msadpcm.delta[0] = 16; + } + + pWav->msadpcm.prevFrames[0][0] = pWav->msadpcm.prevFrames[0][1]; + pWav->msadpcm.prevFrames[0][1] = newSample1; + + + pWav->msadpcm.cachedFrames[2] = newSample0; + pWav->msadpcm.cachedFrames[3] = newSample1; + pWav->msadpcm.cachedFrameCount = 2; + } else { + /* Stereo. */ + drwav_int32 newSample0; + drwav_int32 newSample1; + + /* Left. */ + newSample0 = ((pWav->msadpcm.prevFrames[0][1] * coeff1Table[pWav->msadpcm.predictor[0]]) + (pWav->msadpcm.prevFrames[0][0] * coeff2Table[pWav->msadpcm.predictor[0]])) >> 8; + newSample0 += nibble0 * pWav->msadpcm.delta[0]; + newSample0 = drwav_clamp(newSample0, -32768, 32767); + + pWav->msadpcm.delta[0] = (adaptationTable[((nibbles & 0xF0) >> 4)] * pWav->msadpcm.delta[0]) >> 8; + if (pWav->msadpcm.delta[0] < 16) { + pWav->msadpcm.delta[0] = 16; + } + + pWav->msadpcm.prevFrames[0][0] = pWav->msadpcm.prevFrames[0][1]; + pWav->msadpcm.prevFrames[0][1] = newSample0; + + + /* Right. */ + newSample1 = ((pWav->msadpcm.prevFrames[1][1] * coeff1Table[pWav->msadpcm.predictor[1]]) + (pWav->msadpcm.prevFrames[1][0] * coeff2Table[pWav->msadpcm.predictor[1]])) >> 8; + newSample1 += nibble1 * pWav->msadpcm.delta[1]; + newSample1 = drwav_clamp(newSample1, -32768, 32767); + + pWav->msadpcm.delta[1] = (adaptationTable[((nibbles & 0x0F) >> 0)] * pWav->msadpcm.delta[1]) >> 8; + if (pWav->msadpcm.delta[1] < 16) { + pWav->msadpcm.delta[1] = 16; + } + + pWav->msadpcm.prevFrames[1][0] = pWav->msadpcm.prevFrames[1][1]; + pWav->msadpcm.prevFrames[1][1] = newSample1; + + pWav->msadpcm.cachedFrames[2] = newSample0; + pWav->msadpcm.cachedFrames[3] = newSample1; + pWav->msadpcm.cachedFrameCount = 1; + } + } + } + } + + return totalFramesRead; +} + + +static drwav_uint64 drwav_read_pcm_frames_s16__ima(drwav* pWav, drwav_uint64 framesToRead, drwav_int16* pBufferOut) +{ + drwav_uint64 totalFramesRead = 0; + drwav_uint32 iChannel; + + static drwav_int32 indexTable[16] = { + -1, -1, -1, -1, 2, 4, 6, 8, + -1, -1, -1, -1, 2, 4, 6, 8 + }; + + static drwav_int32 stepTable[89] = { + 7, 8, 9, 10, 11, 12, 13, 14, 16, 17, + 19, 21, 23, 25, 28, 31, 34, 37, 41, 45, + 50, 55, 60, 66, 73, 80, 88, 97, 107, 118, + 130, 143, 157, 173, 190, 209, 230, 253, 279, 307, + 337, 371, 408, 449, 494, 544, 598, 658, 724, 796, + 876, 963, 1060, 1166, 1282, 1411, 1552, 1707, 1878, 2066, + 2272, 2499, 2749, 3024, 3327, 3660, 4026, 4428, 4871, 5358, + 5894, 6484, 7132, 7845, 8630, 9493, 10442, 11487, 12635, 13899, + 15289, 16818, 18500, 20350, 22385, 24623, 27086, 29794, 32767 + }; + + DRWAV_ASSERT(pWav != NULL); + DRWAV_ASSERT(framesToRead > 0); + + /* TODO: Lots of room for optimization here. */ + + while (framesToRead > 0 && pWav->compressed.iCurrentPCMFrame < pWav->totalPCMFrameCount) { + /* If there are no cached samples we need to load a new block. */ + if (pWav->ima.cachedFrameCount == 0 && pWav->ima.bytesRemainingInBlock == 0) { + if (pWav->channels == 1) { + /* Mono. */ + drwav_uint8 header[4]; + if (pWav->onRead(pWav->pUserData, header, sizeof(header)) != sizeof(header)) { + return totalFramesRead; + } + pWav->ima.bytesRemainingInBlock = pWav->fmt.blockAlign - sizeof(header); + + if (header[2] >= drwav_countof(stepTable)) { + pWav->onSeek(pWav->pUserData, pWav->ima.bytesRemainingInBlock, drwav_seek_origin_current); + pWav->ima.bytesRemainingInBlock = 0; + return totalFramesRead; /* Invalid data. */ + } + + pWav->ima.predictor[0] = drwav__bytes_to_s16(header + 0); + pWav->ima.stepIndex[0] = header[2]; + pWav->ima.cachedFrames[drwav_countof(pWav->ima.cachedFrames) - 1] = pWav->ima.predictor[0]; + pWav->ima.cachedFrameCount = 1; + } else { + /* Stereo. */ + drwav_uint8 header[8]; + if (pWav->onRead(pWav->pUserData, header, sizeof(header)) != sizeof(header)) { + return totalFramesRead; + } + pWav->ima.bytesRemainingInBlock = pWav->fmt.blockAlign - sizeof(header); + + if (header[2] >= drwav_countof(stepTable) || header[6] >= drwav_countof(stepTable)) { + pWav->onSeek(pWav->pUserData, pWav->ima.bytesRemainingInBlock, drwav_seek_origin_current); + pWav->ima.bytesRemainingInBlock = 0; + return totalFramesRead; /* Invalid data. */ + } + + pWav->ima.predictor[0] = drwav__bytes_to_s16(header + 0); + pWav->ima.stepIndex[0] = header[2]; + pWav->ima.predictor[1] = drwav__bytes_to_s16(header + 4); + pWav->ima.stepIndex[1] = header[6]; + + pWav->ima.cachedFrames[drwav_countof(pWav->ima.cachedFrames) - 2] = pWav->ima.predictor[0]; + pWav->ima.cachedFrames[drwav_countof(pWav->ima.cachedFrames) - 1] = pWav->ima.predictor[1]; + pWav->ima.cachedFrameCount = 1; + } + } + + /* Output anything that's cached. */ + while (framesToRead > 0 && pWav->ima.cachedFrameCount > 0 && pWav->compressed.iCurrentPCMFrame < pWav->totalPCMFrameCount) { + if (pBufferOut != NULL) { + drwav_uint32 iSample; + for (iSample = 0; iSample < pWav->channels; iSample += 1) { + pBufferOut[iSample] = (drwav_int16)pWav->ima.cachedFrames[(drwav_countof(pWav->ima.cachedFrames) - (pWav->ima.cachedFrameCount*pWav->channels)) + iSample]; + } + pBufferOut += pWav->channels; + } + + framesToRead -= 1; + totalFramesRead += 1; + pWav->compressed.iCurrentPCMFrame += 1; + pWav->ima.cachedFrameCount -= 1; + } + + if (framesToRead == 0) { + return totalFramesRead; + } + + /* + If there's nothing left in the cache, just go ahead and load more. If there's nothing left to load in the current block we just continue to the next + loop iteration which will trigger the loading of a new block. + */ + if (pWav->ima.cachedFrameCount == 0) { + if (pWav->ima.bytesRemainingInBlock == 0) { + continue; + } else { + /* + From what I can tell with stereo streams, it looks like every 4 bytes (8 samples) is for one channel. So it goes 4 bytes for the + left channel, 4 bytes for the right channel. + */ + pWav->ima.cachedFrameCount = 8; + for (iChannel = 0; iChannel < pWav->channels; ++iChannel) { + drwav_uint32 iByte; + drwav_uint8 nibbles[4]; + if (pWav->onRead(pWav->pUserData, &nibbles, 4) != 4) { + pWav->ima.cachedFrameCount = 0; + return totalFramesRead; + } + pWav->ima.bytesRemainingInBlock -= 4; + + for (iByte = 0; iByte < 4; ++iByte) { + drwav_uint8 nibble0 = ((nibbles[iByte] & 0x0F) >> 0); + drwav_uint8 nibble1 = ((nibbles[iByte] & 0xF0) >> 4); + + drwav_int32 step = stepTable[pWav->ima.stepIndex[iChannel]]; + drwav_int32 predictor = pWav->ima.predictor[iChannel]; + + drwav_int32 diff = step >> 3; + if (nibble0 & 1) diff += step >> 2; + if (nibble0 & 2) diff += step >> 1; + if (nibble0 & 4) diff += step; + if (nibble0 & 8) diff = -diff; + + predictor = drwav_clamp(predictor + diff, -32768, 32767); + pWav->ima.predictor[iChannel] = predictor; + pWav->ima.stepIndex[iChannel] = drwav_clamp(pWav->ima.stepIndex[iChannel] + indexTable[nibble0], 0, (drwav_int32)drwav_countof(stepTable)-1); + pWav->ima.cachedFrames[(drwav_countof(pWav->ima.cachedFrames) - (pWav->ima.cachedFrameCount*pWav->channels)) + (iByte*2+0)*pWav->channels + iChannel] = predictor; + + + step = stepTable[pWav->ima.stepIndex[iChannel]]; + predictor = pWav->ima.predictor[iChannel]; + + diff = step >> 3; + if (nibble1 & 1) diff += step >> 2; + if (nibble1 & 2) diff += step >> 1; + if (nibble1 & 4) diff += step; + if (nibble1 & 8) diff = -diff; + + predictor = drwav_clamp(predictor + diff, -32768, 32767); + pWav->ima.predictor[iChannel] = predictor; + pWav->ima.stepIndex[iChannel] = drwav_clamp(pWav->ima.stepIndex[iChannel] + indexTable[nibble1], 0, (drwav_int32)drwav_countof(stepTable)-1); + pWav->ima.cachedFrames[(drwav_countof(pWav->ima.cachedFrames) - (pWav->ima.cachedFrameCount*pWav->channels)) + (iByte*2+1)*pWav->channels + iChannel] = predictor; + } + } + } + } + } + + return totalFramesRead; +} + + +#ifndef DR_WAV_NO_CONVERSION_API +static unsigned short g_drwavAlawTable[256] = { + 0xEA80, 0xEB80, 0xE880, 0xE980, 0xEE80, 0xEF80, 0xEC80, 0xED80, 0xE280, 0xE380, 0xE080, 0xE180, 0xE680, 0xE780, 0xE480, 0xE580, + 0xF540, 0xF5C0, 0xF440, 0xF4C0, 0xF740, 0xF7C0, 0xF640, 0xF6C0, 0xF140, 0xF1C0, 0xF040, 0xF0C0, 0xF340, 0xF3C0, 0xF240, 0xF2C0, + 0xAA00, 0xAE00, 0xA200, 0xA600, 0xBA00, 0xBE00, 0xB200, 0xB600, 0x8A00, 0x8E00, 0x8200, 0x8600, 0x9A00, 0x9E00, 0x9200, 0x9600, + 0xD500, 0xD700, 0xD100, 0xD300, 0xDD00, 0xDF00, 0xD900, 0xDB00, 0xC500, 0xC700, 0xC100, 0xC300, 0xCD00, 0xCF00, 0xC900, 0xCB00, + 0xFEA8, 0xFEB8, 0xFE88, 0xFE98, 0xFEE8, 0xFEF8, 0xFEC8, 0xFED8, 0xFE28, 0xFE38, 0xFE08, 0xFE18, 0xFE68, 0xFE78, 0xFE48, 0xFE58, + 0xFFA8, 0xFFB8, 0xFF88, 0xFF98, 0xFFE8, 0xFFF8, 0xFFC8, 0xFFD8, 0xFF28, 0xFF38, 0xFF08, 0xFF18, 0xFF68, 0xFF78, 0xFF48, 0xFF58, + 0xFAA0, 0xFAE0, 0xFA20, 0xFA60, 0xFBA0, 0xFBE0, 0xFB20, 0xFB60, 0xF8A0, 0xF8E0, 0xF820, 0xF860, 0xF9A0, 0xF9E0, 0xF920, 0xF960, + 0xFD50, 0xFD70, 0xFD10, 0xFD30, 0xFDD0, 0xFDF0, 0xFD90, 0xFDB0, 0xFC50, 0xFC70, 0xFC10, 0xFC30, 0xFCD0, 0xFCF0, 0xFC90, 0xFCB0, + 0x1580, 0x1480, 0x1780, 0x1680, 0x1180, 0x1080, 0x1380, 0x1280, 0x1D80, 0x1C80, 0x1F80, 0x1E80, 0x1980, 0x1880, 0x1B80, 0x1A80, + 0x0AC0, 0x0A40, 0x0BC0, 0x0B40, 0x08C0, 0x0840, 0x09C0, 0x0940, 0x0EC0, 0x0E40, 0x0FC0, 0x0F40, 0x0CC0, 0x0C40, 0x0DC0, 0x0D40, + 0x5600, 0x5200, 0x5E00, 0x5A00, 0x4600, 0x4200, 0x4E00, 0x4A00, 0x7600, 0x7200, 0x7E00, 0x7A00, 0x6600, 0x6200, 0x6E00, 0x6A00, + 0x2B00, 0x2900, 0x2F00, 0x2D00, 0x2300, 0x2100, 0x2700, 0x2500, 0x3B00, 0x3900, 0x3F00, 0x3D00, 0x3300, 0x3100, 0x3700, 0x3500, + 0x0158, 0x0148, 0x0178, 0x0168, 0x0118, 0x0108, 0x0138, 0x0128, 0x01D8, 0x01C8, 0x01F8, 0x01E8, 0x0198, 0x0188, 0x01B8, 0x01A8, + 0x0058, 0x0048, 0x0078, 0x0068, 0x0018, 0x0008, 0x0038, 0x0028, 0x00D8, 0x00C8, 0x00F8, 0x00E8, 0x0098, 0x0088, 0x00B8, 0x00A8, + 0x0560, 0x0520, 0x05E0, 0x05A0, 0x0460, 0x0420, 0x04E0, 0x04A0, 0x0760, 0x0720, 0x07E0, 0x07A0, 0x0660, 0x0620, 0x06E0, 0x06A0, + 0x02B0, 0x0290, 0x02F0, 0x02D0, 0x0230, 0x0210, 0x0270, 0x0250, 0x03B0, 0x0390, 0x03F0, 0x03D0, 0x0330, 0x0310, 0x0370, 0x0350 +}; + +static unsigned short g_drwavMulawTable[256] = { + 0x8284, 0x8684, 0x8A84, 0x8E84, 0x9284, 0x9684, 0x9A84, 0x9E84, 0xA284, 0xA684, 0xAA84, 0xAE84, 0xB284, 0xB684, 0xBA84, 0xBE84, + 0xC184, 0xC384, 0xC584, 0xC784, 0xC984, 0xCB84, 0xCD84, 0xCF84, 0xD184, 0xD384, 0xD584, 0xD784, 0xD984, 0xDB84, 0xDD84, 0xDF84, + 0xE104, 0xE204, 0xE304, 0xE404, 0xE504, 0xE604, 0xE704, 0xE804, 0xE904, 0xEA04, 0xEB04, 0xEC04, 0xED04, 0xEE04, 0xEF04, 0xF004, + 0xF0C4, 0xF144, 0xF1C4, 0xF244, 0xF2C4, 0xF344, 0xF3C4, 0xF444, 0xF4C4, 0xF544, 0xF5C4, 0xF644, 0xF6C4, 0xF744, 0xF7C4, 0xF844, + 0xF8A4, 0xF8E4, 0xF924, 0xF964, 0xF9A4, 0xF9E4, 0xFA24, 0xFA64, 0xFAA4, 0xFAE4, 0xFB24, 0xFB64, 0xFBA4, 0xFBE4, 0xFC24, 0xFC64, + 0xFC94, 0xFCB4, 0xFCD4, 0xFCF4, 0xFD14, 0xFD34, 0xFD54, 0xFD74, 0xFD94, 0xFDB4, 0xFDD4, 0xFDF4, 0xFE14, 0xFE34, 0xFE54, 0xFE74, + 0xFE8C, 0xFE9C, 0xFEAC, 0xFEBC, 0xFECC, 0xFEDC, 0xFEEC, 0xFEFC, 0xFF0C, 0xFF1C, 0xFF2C, 0xFF3C, 0xFF4C, 0xFF5C, 0xFF6C, 0xFF7C, + 0xFF88, 0xFF90, 0xFF98, 0xFFA0, 0xFFA8, 0xFFB0, 0xFFB8, 0xFFC0, 0xFFC8, 0xFFD0, 0xFFD8, 0xFFE0, 0xFFE8, 0xFFF0, 0xFFF8, 0x0000, + 0x7D7C, 0x797C, 0x757C, 0x717C, 0x6D7C, 0x697C, 0x657C, 0x617C, 0x5D7C, 0x597C, 0x557C, 0x517C, 0x4D7C, 0x497C, 0x457C, 0x417C, + 0x3E7C, 0x3C7C, 0x3A7C, 0x387C, 0x367C, 0x347C, 0x327C, 0x307C, 0x2E7C, 0x2C7C, 0x2A7C, 0x287C, 0x267C, 0x247C, 0x227C, 0x207C, + 0x1EFC, 0x1DFC, 0x1CFC, 0x1BFC, 0x1AFC, 0x19FC, 0x18FC, 0x17FC, 0x16FC, 0x15FC, 0x14FC, 0x13FC, 0x12FC, 0x11FC, 0x10FC, 0x0FFC, + 0x0F3C, 0x0EBC, 0x0E3C, 0x0DBC, 0x0D3C, 0x0CBC, 0x0C3C, 0x0BBC, 0x0B3C, 0x0ABC, 0x0A3C, 0x09BC, 0x093C, 0x08BC, 0x083C, 0x07BC, + 0x075C, 0x071C, 0x06DC, 0x069C, 0x065C, 0x061C, 0x05DC, 0x059C, 0x055C, 0x051C, 0x04DC, 0x049C, 0x045C, 0x041C, 0x03DC, 0x039C, + 0x036C, 0x034C, 0x032C, 0x030C, 0x02EC, 0x02CC, 0x02AC, 0x028C, 0x026C, 0x024C, 0x022C, 0x020C, 0x01EC, 0x01CC, 0x01AC, 0x018C, + 0x0174, 0x0164, 0x0154, 0x0144, 0x0134, 0x0124, 0x0114, 0x0104, 0x00F4, 0x00E4, 0x00D4, 0x00C4, 0x00B4, 0x00A4, 0x0094, 0x0084, + 0x0078, 0x0070, 0x0068, 0x0060, 0x0058, 0x0050, 0x0048, 0x0040, 0x0038, 0x0030, 0x0028, 0x0020, 0x0018, 0x0010, 0x0008, 0x0000 +}; + +static DRWAV_INLINE drwav_int16 drwav__alaw_to_s16(drwav_uint8 sampleIn) +{ + return (short)g_drwavAlawTable[sampleIn]; +} + +static DRWAV_INLINE drwav_int16 drwav__mulaw_to_s16(drwav_uint8 sampleIn) +{ + return (short)g_drwavMulawTable[sampleIn]; +} + + + +static void drwav__pcm_to_s16(drwav_int16* pOut, const drwav_uint8* pIn, size_t totalSampleCount, unsigned int bytesPerSample) +{ + unsigned int i; + + /* Special case for 8-bit sample data because it's treated as unsigned. */ + if (bytesPerSample == 1) { + drwav_u8_to_s16(pOut, pIn, totalSampleCount); + return; + } + + + /* Slightly more optimal implementation for common formats. */ + if (bytesPerSample == 2) { + for (i = 0; i < totalSampleCount; ++i) { + *pOut++ = ((const drwav_int16*)pIn)[i]; + } + return; + } + if (bytesPerSample == 3) { + drwav_s24_to_s16(pOut, pIn, totalSampleCount); + return; + } + if (bytesPerSample == 4) { + drwav_s32_to_s16(pOut, (const drwav_int32*)pIn, totalSampleCount); + return; + } + + + /* Anything more than 64 bits per sample is not supported. */ + if (bytesPerSample > 8) { + DRWAV_ZERO_MEMORY(pOut, totalSampleCount * sizeof(*pOut)); + return; + } + + + /* Generic, slow converter. */ + for (i = 0; i < totalSampleCount; ++i) { + drwav_uint64 sample = 0; + unsigned int shift = (8 - bytesPerSample) * 8; + + unsigned int j; + for (j = 0; j < bytesPerSample; j += 1) { + DRWAV_ASSERT(j < 8); + sample |= (drwav_uint64)(pIn[j]) << shift; + shift += 8; + } + + pIn += j; + *pOut++ = (drwav_int16)((drwav_int64)sample >> 48); + } +} + +static void drwav__ieee_to_s16(drwav_int16* pOut, const drwav_uint8* pIn, size_t totalSampleCount, unsigned int bytesPerSample) +{ + if (bytesPerSample == 4) { + drwav_f32_to_s16(pOut, (const float*)pIn, totalSampleCount); + return; + } else if (bytesPerSample == 8) { + drwav_f64_to_s16(pOut, (const double*)pIn, totalSampleCount); + return; + } else { + /* Only supporting 32- and 64-bit float. Output silence in all other cases. Contributions welcome for 16-bit float. */ + DRWAV_ZERO_MEMORY(pOut, totalSampleCount * sizeof(*pOut)); + return; + } +} + +static drwav_uint64 drwav_read_pcm_frames_s16__pcm(drwav* pWav, drwav_uint64 framesToRead, drwav_int16* pBufferOut) +{ + drwav_uint32 bytesPerFrame; + drwav_uint64 totalFramesRead; + drwav_uint8 sampleData[4096]; + + /* Fast path. */ + if ((pWav->translatedFormatTag == DR_WAVE_FORMAT_PCM && pWav->bitsPerSample == 16) || pBufferOut == NULL) { + return drwav_read_pcm_frames(pWav, framesToRead, pBufferOut); + } + + bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + + totalFramesRead = 0; + + while (framesToRead > 0) { + drwav_uint64 framesRead = drwav_read_pcm_frames(pWav, drwav_min(framesToRead, sizeof(sampleData)/bytesPerFrame), sampleData); + if (framesRead == 0) { + break; + } + + drwav__pcm_to_s16(pBufferOut, sampleData, (size_t)(framesRead*pWav->channels), bytesPerFrame/pWav->channels); + + pBufferOut += framesRead*pWav->channels; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + + return totalFramesRead; +} + +static drwav_uint64 drwav_read_pcm_frames_s16__ieee(drwav* pWav, drwav_uint64 framesToRead, drwav_int16* pBufferOut) +{ + drwav_uint64 totalFramesRead; + drwav_uint8 sampleData[4096]; + drwav_uint32 bytesPerFrame; + + if (pBufferOut == NULL) { + return drwav_read_pcm_frames(pWav, framesToRead, NULL); + } + + bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + + totalFramesRead = 0; + + while (framesToRead > 0) { + drwav_uint64 framesRead = drwav_read_pcm_frames(pWav, drwav_min(framesToRead, sizeof(sampleData)/bytesPerFrame), sampleData); + if (framesRead == 0) { + break; + } + + drwav__ieee_to_s16(pBufferOut, sampleData, (size_t)(framesRead*pWav->channels), bytesPerFrame/pWav->channels); + + pBufferOut += framesRead*pWav->channels; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + + return totalFramesRead; +} + +static drwav_uint64 drwav_read_pcm_frames_s16__alaw(drwav* pWav, drwav_uint64 framesToRead, drwav_int16* pBufferOut) +{ + drwav_uint64 totalFramesRead; + drwav_uint8 sampleData[4096]; + drwav_uint32 bytesPerFrame; + + if (pBufferOut == NULL) { + return drwav_read_pcm_frames(pWav, framesToRead, NULL); + } + + bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + + totalFramesRead = 0; + + while (framesToRead > 0) { + drwav_uint64 framesRead = drwav_read_pcm_frames(pWav, drwav_min(framesToRead, sizeof(sampleData)/bytesPerFrame), sampleData); + if (framesRead == 0) { + break; + } + + drwav_alaw_to_s16(pBufferOut, sampleData, (size_t)(framesRead*pWav->channels)); + + pBufferOut += framesRead*pWav->channels; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + + return totalFramesRead; +} + +static drwav_uint64 drwav_read_pcm_frames_s16__mulaw(drwav* pWav, drwav_uint64 framesToRead, drwav_int16* pBufferOut) +{ + drwav_uint64 totalFramesRead; + drwav_uint8 sampleData[4096]; + drwav_uint32 bytesPerFrame; + + if (pBufferOut == NULL) { + return drwav_read_pcm_frames(pWav, framesToRead, NULL); + } + + bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + + totalFramesRead = 0; + + while (framesToRead > 0) { + drwav_uint64 framesRead = drwav_read_pcm_frames(pWav, drwav_min(framesToRead, sizeof(sampleData)/bytesPerFrame), sampleData); + if (framesRead == 0) { + break; + } + + drwav_mulaw_to_s16(pBufferOut, sampleData, (size_t)(framesRead*pWav->channels)); + + pBufferOut += framesRead*pWav->channels; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + + return totalFramesRead; +} + +DRWAV_API drwav_uint64 drwav_read_pcm_frames_s16(drwav* pWav, drwav_uint64 framesToRead, drwav_int16* pBufferOut) +{ + if (pWav == NULL || framesToRead == 0) { + return 0; + } + + if (pBufferOut == NULL) { + return drwav_read_pcm_frames(pWav, framesToRead, NULL); + } + + /* Don't try to read more samples than can potentially fit in the output buffer. */ + if (framesToRead * pWav->channels * sizeof(drwav_int16) > DRWAV_SIZE_MAX) { + framesToRead = DRWAV_SIZE_MAX / sizeof(drwav_int16) / pWav->channels; + } + + if (pWav->translatedFormatTag == DR_WAVE_FORMAT_PCM) { + return drwav_read_pcm_frames_s16__pcm(pWav, framesToRead, pBufferOut); + } + + if (pWav->translatedFormatTag == DR_WAVE_FORMAT_IEEE_FLOAT) { + return drwav_read_pcm_frames_s16__ieee(pWav, framesToRead, pBufferOut); + } + + if (pWav->translatedFormatTag == DR_WAVE_FORMAT_ALAW) { + return drwav_read_pcm_frames_s16__alaw(pWav, framesToRead, pBufferOut); + } + + if (pWav->translatedFormatTag == DR_WAVE_FORMAT_MULAW) { + return drwav_read_pcm_frames_s16__mulaw(pWav, framesToRead, pBufferOut); + } + + if (pWav->translatedFormatTag == DR_WAVE_FORMAT_ADPCM) { + return drwav_read_pcm_frames_s16__msadpcm(pWav, framesToRead, pBufferOut); + } + + if (pWav->translatedFormatTag == DR_WAVE_FORMAT_DVI_ADPCM) { + return drwav_read_pcm_frames_s16__ima(pWav, framesToRead, pBufferOut); + } + + return 0; +} + +DRWAV_API drwav_uint64 drwav_read_pcm_frames_s16le(drwav* pWav, drwav_uint64 framesToRead, drwav_int16* pBufferOut) +{ + drwav_uint64 framesRead = drwav_read_pcm_frames_s16(pWav, framesToRead, pBufferOut); + if (pBufferOut != NULL && drwav__is_little_endian() == DRWAV_FALSE) { + drwav__bswap_samples_s16(pBufferOut, framesRead*pWav->channels); + } + + return framesRead; +} + +DRWAV_API drwav_uint64 drwav_read_pcm_frames_s16be(drwav* pWav, drwav_uint64 framesToRead, drwav_int16* pBufferOut) +{ + drwav_uint64 framesRead = drwav_read_pcm_frames_s16(pWav, framesToRead, pBufferOut); + if (pBufferOut != NULL && drwav__is_little_endian() == DRWAV_TRUE) { + drwav__bswap_samples_s16(pBufferOut, framesRead*pWav->channels); + } + + return framesRead; +} + + +DRWAV_API void drwav_u8_to_s16(drwav_int16* pOut, const drwav_uint8* pIn, size_t sampleCount) +{ + int r; + size_t i; + for (i = 0; i < sampleCount; ++i) { + int x = pIn[i]; + r = x << 8; + r = r - 32768; + pOut[i] = (short)r; + } +} + +DRWAV_API void drwav_s24_to_s16(drwav_int16* pOut, const drwav_uint8* pIn, size_t sampleCount) +{ + int r; + size_t i; + for (i = 0; i < sampleCount; ++i) { + int x = ((int)(((unsigned int)(((const drwav_uint8*)pIn)[i*3+0]) << 8) | ((unsigned int)(((const drwav_uint8*)pIn)[i*3+1]) << 16) | ((unsigned int)(((const drwav_uint8*)pIn)[i*3+2])) << 24)) >> 8; + r = x >> 8; + pOut[i] = (short)r; + } +} + +DRWAV_API void drwav_s32_to_s16(drwav_int16* pOut, const drwav_int32* pIn, size_t sampleCount) +{ + int r; + size_t i; + for (i = 0; i < sampleCount; ++i) { + int x = pIn[i]; + r = x >> 16; + pOut[i] = (short)r; + } +} + +DRWAV_API void drwav_f32_to_s16(drwav_int16* pOut, const float* pIn, size_t sampleCount) +{ + int r; + size_t i; + for (i = 0; i < sampleCount; ++i) { + float x = pIn[i]; + float c; + c = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); + c = c + 1; + r = (int)(c * 32767.5f); + r = r - 32768; + pOut[i] = (short)r; + } +} + +DRWAV_API void drwav_f64_to_s16(drwav_int16* pOut, const double* pIn, size_t sampleCount) +{ + int r; + size_t i; + for (i = 0; i < sampleCount; ++i) { + double x = pIn[i]; + double c; + c = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); + c = c + 1; + r = (int)(c * 32767.5); + r = r - 32768; + pOut[i] = (short)r; + } +} + +DRWAV_API void drwav_alaw_to_s16(drwav_int16* pOut, const drwav_uint8* pIn, size_t sampleCount) +{ + size_t i; + for (i = 0; i < sampleCount; ++i) { + pOut[i] = drwav__alaw_to_s16(pIn[i]); + } +} + +DRWAV_API void drwav_mulaw_to_s16(drwav_int16* pOut, const drwav_uint8* pIn, size_t sampleCount) +{ + size_t i; + for (i = 0; i < sampleCount; ++i) { + pOut[i] = drwav__mulaw_to_s16(pIn[i]); + } +} + + + +static void drwav__pcm_to_f32(float* pOut, const drwav_uint8* pIn, size_t sampleCount, unsigned int bytesPerSample) +{ + unsigned int i; + + /* Special case for 8-bit sample data because it's treated as unsigned. */ + if (bytesPerSample == 1) { + drwav_u8_to_f32(pOut, pIn, sampleCount); + return; + } + + /* Slightly more optimal implementation for common formats. */ + if (bytesPerSample == 2) { + drwav_s16_to_f32(pOut, (const drwav_int16*)pIn, sampleCount); + return; + } + if (bytesPerSample == 3) { + drwav_s24_to_f32(pOut, pIn, sampleCount); + return; + } + if (bytesPerSample == 4) { + drwav_s32_to_f32(pOut, (const drwav_int32*)pIn, sampleCount); + return; + } + + + /* Anything more than 64 bits per sample is not supported. */ + if (bytesPerSample > 8) { + DRWAV_ZERO_MEMORY(pOut, sampleCount * sizeof(*pOut)); + return; + } + + + /* Generic, slow converter. */ + for (i = 0; i < sampleCount; ++i) { + drwav_uint64 sample = 0; + unsigned int shift = (8 - bytesPerSample) * 8; + + unsigned int j; + for (j = 0; j < bytesPerSample; j += 1) { + DRWAV_ASSERT(j < 8); + sample |= (drwav_uint64)(pIn[j]) << shift; + shift += 8; + } + + pIn += j; + *pOut++ = (float)((drwav_int64)sample / 9223372036854775807.0); + } +} + +static void drwav__ieee_to_f32(float* pOut, const drwav_uint8* pIn, size_t sampleCount, unsigned int bytesPerSample) +{ + if (bytesPerSample == 4) { + unsigned int i; + for (i = 0; i < sampleCount; ++i) { + *pOut++ = ((const float*)pIn)[i]; + } + return; + } else if (bytesPerSample == 8) { + drwav_f64_to_f32(pOut, (const double*)pIn, sampleCount); + return; + } else { + /* Only supporting 32- and 64-bit float. Output silence in all other cases. Contributions welcome for 16-bit float. */ + DRWAV_ZERO_MEMORY(pOut, sampleCount * sizeof(*pOut)); + return; + } +} + + +static drwav_uint64 drwav_read_pcm_frames_f32__pcm(drwav* pWav, drwav_uint64 framesToRead, float* pBufferOut) +{ + drwav_uint64 totalFramesRead; + drwav_uint8 sampleData[4096]; + + drwav_uint32 bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + + totalFramesRead = 0; + + while (framesToRead > 0) { + drwav_uint64 framesRead = drwav_read_pcm_frames(pWav, drwav_min(framesToRead, sizeof(sampleData)/bytesPerFrame), sampleData); + if (framesRead == 0) { + break; + } + + drwav__pcm_to_f32(pBufferOut, sampleData, (size_t)framesRead*pWav->channels, bytesPerFrame/pWav->channels); + + pBufferOut += framesRead*pWav->channels; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + + return totalFramesRead; +} + +static drwav_uint64 drwav_read_pcm_frames_f32__msadpcm(drwav* pWav, drwav_uint64 framesToRead, float* pBufferOut) +{ + /* + We're just going to borrow the implementation from the drwav_read_s16() since ADPCM is a little bit more complicated than other formats and I don't + want to duplicate that code. + */ + drwav_uint64 totalFramesRead = 0; + drwav_int16 samples16[2048]; + while (framesToRead > 0) { + drwav_uint64 framesRead = drwav_read_pcm_frames_s16(pWav, drwav_min(framesToRead, drwav_countof(samples16)/pWav->channels), samples16); + if (framesRead == 0) { + break; + } + + drwav_s16_to_f32(pBufferOut, samples16, (size_t)(framesRead*pWav->channels)); /* <-- Safe cast because we're clamping to 2048. */ + + pBufferOut += framesRead*pWav->channels; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + + return totalFramesRead; +} + +static drwav_uint64 drwav_read_pcm_frames_f32__ima(drwav* pWav, drwav_uint64 framesToRead, float* pBufferOut) +{ + /* + We're just going to borrow the implementation from the drwav_read_s16() since IMA-ADPCM is a little bit more complicated than other formats and I don't + want to duplicate that code. + */ + drwav_uint64 totalFramesRead = 0; + drwav_int16 samples16[2048]; + while (framesToRead > 0) { + drwav_uint64 framesRead = drwav_read_pcm_frames_s16(pWav, drwav_min(framesToRead, drwav_countof(samples16)/pWav->channels), samples16); + if (framesRead == 0) { + break; + } + + drwav_s16_to_f32(pBufferOut, samples16, (size_t)(framesRead*pWav->channels)); /* <-- Safe cast because we're clamping to 2048. */ + + pBufferOut += framesRead*pWav->channels; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + + return totalFramesRead; +} + +static drwav_uint64 drwav_read_pcm_frames_f32__ieee(drwav* pWav, drwav_uint64 framesToRead, float* pBufferOut) +{ + drwav_uint64 totalFramesRead; + drwav_uint8 sampleData[4096]; + drwav_uint32 bytesPerFrame; + + /* Fast path. */ + if (pWav->translatedFormatTag == DR_WAVE_FORMAT_IEEE_FLOAT && pWav->bitsPerSample == 32) { + return drwav_read_pcm_frames(pWav, framesToRead, pBufferOut); + } + + bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + + totalFramesRead = 0; + + while (framesToRead > 0) { + drwav_uint64 framesRead = drwav_read_pcm_frames(pWav, drwav_min(framesToRead, sizeof(sampleData)/bytesPerFrame), sampleData); + if (framesRead == 0) { + break; + } + + drwav__ieee_to_f32(pBufferOut, sampleData, (size_t)(framesRead*pWav->channels), bytesPerFrame/pWav->channels); + + pBufferOut += framesRead*pWav->channels; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + + return totalFramesRead; +} + +static drwav_uint64 drwav_read_pcm_frames_f32__alaw(drwav* pWav, drwav_uint64 framesToRead, float* pBufferOut) +{ + drwav_uint64 totalFramesRead; + drwav_uint8 sampleData[4096]; + drwav_uint32 bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + + totalFramesRead = 0; + + while (framesToRead > 0) { + drwav_uint64 framesRead = drwav_read_pcm_frames(pWav, drwav_min(framesToRead, sizeof(sampleData)/bytesPerFrame), sampleData); + if (framesRead == 0) { + break; + } + + drwav_alaw_to_f32(pBufferOut, sampleData, (size_t)(framesRead*pWav->channels)); + + pBufferOut += framesRead*pWav->channels; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + + return totalFramesRead; +} + +static drwav_uint64 drwav_read_pcm_frames_f32__mulaw(drwav* pWav, drwav_uint64 framesToRead, float* pBufferOut) +{ + drwav_uint64 totalFramesRead; + drwav_uint8 sampleData[4096]; + + drwav_uint32 bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + + totalFramesRead = 0; + + while (framesToRead > 0) { + drwav_uint64 framesRead = drwav_read_pcm_frames(pWav, drwav_min(framesToRead, sizeof(sampleData)/bytesPerFrame), sampleData); + if (framesRead == 0) { + break; + } + + drwav_mulaw_to_f32(pBufferOut, sampleData, (size_t)(framesRead*pWav->channels)); + + pBufferOut += framesRead*pWav->channels; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + + return totalFramesRead; +} + +DRWAV_API drwav_uint64 drwav_read_pcm_frames_f32(drwav* pWav, drwav_uint64 framesToRead, float* pBufferOut) +{ + if (pWav == NULL || framesToRead == 0) { + return 0; + } + + if (pBufferOut == NULL) { + return drwav_read_pcm_frames(pWav, framesToRead, NULL); + } + + /* Don't try to read more samples than can potentially fit in the output buffer. */ + if (framesToRead * pWav->channels * sizeof(float) > DRWAV_SIZE_MAX) { + framesToRead = DRWAV_SIZE_MAX / sizeof(float) / pWav->channels; + } + + if (pWav->translatedFormatTag == DR_WAVE_FORMAT_PCM) { + return drwav_read_pcm_frames_f32__pcm(pWav, framesToRead, pBufferOut); + } + + if (pWav->translatedFormatTag == DR_WAVE_FORMAT_ADPCM) { + return drwav_read_pcm_frames_f32__msadpcm(pWav, framesToRead, pBufferOut); + } + + if (pWav->translatedFormatTag == DR_WAVE_FORMAT_IEEE_FLOAT) { + return drwav_read_pcm_frames_f32__ieee(pWav, framesToRead, pBufferOut); + } + + if (pWav->translatedFormatTag == DR_WAVE_FORMAT_ALAW) { + return drwav_read_pcm_frames_f32__alaw(pWav, framesToRead, pBufferOut); + } + + if (pWav->translatedFormatTag == DR_WAVE_FORMAT_MULAW) { + return drwav_read_pcm_frames_f32__mulaw(pWav, framesToRead, pBufferOut); + } + + if (pWav->translatedFormatTag == DR_WAVE_FORMAT_DVI_ADPCM) { + return drwav_read_pcm_frames_f32__ima(pWav, framesToRead, pBufferOut); + } + + return 0; +} + +DRWAV_API drwav_uint64 drwav_read_pcm_frames_f32le(drwav* pWav, drwav_uint64 framesToRead, float* pBufferOut) +{ + drwav_uint64 framesRead = drwav_read_pcm_frames_f32(pWav, framesToRead, pBufferOut); + if (pBufferOut != NULL && drwav__is_little_endian() == DRWAV_FALSE) { + drwav__bswap_samples_f32(pBufferOut, framesRead*pWav->channels); + } + + return framesRead; +} + +DRWAV_API drwav_uint64 drwav_read_pcm_frames_f32be(drwav* pWav, drwav_uint64 framesToRead, float* pBufferOut) +{ + drwav_uint64 framesRead = drwav_read_pcm_frames_f32(pWav, framesToRead, pBufferOut); + if (pBufferOut != NULL && drwav__is_little_endian() == DRWAV_TRUE) { + drwav__bswap_samples_f32(pBufferOut, framesRead*pWav->channels); + } + + return framesRead; +} + + +DRWAV_API void drwav_u8_to_f32(float* pOut, const drwav_uint8* pIn, size_t sampleCount) +{ + size_t i; + + if (pOut == NULL || pIn == NULL) { + return; + } + +#ifdef DR_WAV_LIBSNDFILE_COMPAT + /* + It appears libsndfile uses slightly different logic for the u8 -> f32 conversion to dr_wav, which in my opinion is incorrect. It appears + libsndfile performs the conversion something like "f32 = (u8 / 256) * 2 - 1", however I think it should be "f32 = (u8 / 255) * 2 - 1" (note + the divisor of 256 vs 255). I use libsndfile as a benchmark for testing, so I'm therefore leaving this block here just for my automated + correctness testing. This is disabled by default. + */ + for (i = 0; i < sampleCount; ++i) { + *pOut++ = (pIn[i] / 256.0f) * 2 - 1; + } +#else + for (i = 0; i < sampleCount; ++i) { + float x = pIn[i]; + x = x * 0.00784313725490196078f; /* 0..255 to 0..2 */ + x = x - 1; /* 0..2 to -1..1 */ + + *pOut++ = x; + } +#endif +} + +DRWAV_API void drwav_s16_to_f32(float* pOut, const drwav_int16* pIn, size_t sampleCount) +{ + size_t i; + + if (pOut == NULL || pIn == NULL) { + return; + } + + for (i = 0; i < sampleCount; ++i) { + *pOut++ = pIn[i] * 0.000030517578125f; + } +} + +DRWAV_API void drwav_s24_to_f32(float* pOut, const drwav_uint8* pIn, size_t sampleCount) +{ + size_t i; + + if (pOut == NULL || pIn == NULL) { + return; + } + + for (i = 0; i < sampleCount; ++i) { + double x; + drwav_uint32 a = ((drwav_uint32)(pIn[i*3+0]) << 8); + drwav_uint32 b = ((drwav_uint32)(pIn[i*3+1]) << 16); + drwav_uint32 c = ((drwav_uint32)(pIn[i*3+2]) << 24); + + x = (double)((drwav_int32)(a | b | c) >> 8); + *pOut++ = (float)(x * 0.00000011920928955078125); + } +} + +DRWAV_API void drwav_s32_to_f32(float* pOut, const drwav_int32* pIn, size_t sampleCount) +{ + size_t i; + if (pOut == NULL || pIn == NULL) { + return; + } + + for (i = 0; i < sampleCount; ++i) { + *pOut++ = (float)(pIn[i] / 2147483648.0); + } +} + +DRWAV_API void drwav_f64_to_f32(float* pOut, const double* pIn, size_t sampleCount) +{ + size_t i; + + if (pOut == NULL || pIn == NULL) { + return; + } + + for (i = 0; i < sampleCount; ++i) { + *pOut++ = (float)pIn[i]; + } +} + +DRWAV_API void drwav_alaw_to_f32(float* pOut, const drwav_uint8* pIn, size_t sampleCount) +{ + size_t i; + + if (pOut == NULL || pIn == NULL) { + return; + } + + for (i = 0; i < sampleCount; ++i) { + *pOut++ = drwav__alaw_to_s16(pIn[i]) / 32768.0f; + } +} + +DRWAV_API void drwav_mulaw_to_f32(float* pOut, const drwav_uint8* pIn, size_t sampleCount) +{ + size_t i; + + if (pOut == NULL || pIn == NULL) { + return; + } + + for (i = 0; i < sampleCount; ++i) { + *pOut++ = drwav__mulaw_to_s16(pIn[i]) / 32768.0f; + } +} + + + +static void drwav__pcm_to_s32(drwav_int32* pOut, const drwav_uint8* pIn, size_t totalSampleCount, unsigned int bytesPerSample) +{ + unsigned int i; + + /* Special case for 8-bit sample data because it's treated as unsigned. */ + if (bytesPerSample == 1) { + drwav_u8_to_s32(pOut, pIn, totalSampleCount); + return; + } + + /* Slightly more optimal implementation for common formats. */ + if (bytesPerSample == 2) { + drwav_s16_to_s32(pOut, (const drwav_int16*)pIn, totalSampleCount); + return; + } + if (bytesPerSample == 3) { + drwav_s24_to_s32(pOut, pIn, totalSampleCount); + return; + } + if (bytesPerSample == 4) { + for (i = 0; i < totalSampleCount; ++i) { + *pOut++ = ((const drwav_int32*)pIn)[i]; + } + return; + } + + + /* Anything more than 64 bits per sample is not supported. */ + if (bytesPerSample > 8) { + DRWAV_ZERO_MEMORY(pOut, totalSampleCount * sizeof(*pOut)); + return; + } + + + /* Generic, slow converter. */ + for (i = 0; i < totalSampleCount; ++i) { + drwav_uint64 sample = 0; + unsigned int shift = (8 - bytesPerSample) * 8; + + unsigned int j; + for (j = 0; j < bytesPerSample; j += 1) { + DRWAV_ASSERT(j < 8); + sample |= (drwav_uint64)(pIn[j]) << shift; + shift += 8; + } + + pIn += j; + *pOut++ = (drwav_int32)((drwav_int64)sample >> 32); + } +} + +static void drwav__ieee_to_s32(drwav_int32* pOut, const drwav_uint8* pIn, size_t totalSampleCount, unsigned int bytesPerSample) +{ + if (bytesPerSample == 4) { + drwav_f32_to_s32(pOut, (const float*)pIn, totalSampleCount); + return; + } else if (bytesPerSample == 8) { + drwav_f64_to_s32(pOut, (const double*)pIn, totalSampleCount); + return; + } else { + /* Only supporting 32- and 64-bit float. Output silence in all other cases. Contributions welcome for 16-bit float. */ + DRWAV_ZERO_MEMORY(pOut, totalSampleCount * sizeof(*pOut)); + return; + } +} + + +static drwav_uint64 drwav_read_pcm_frames_s32__pcm(drwav* pWav, drwav_uint64 framesToRead, drwav_int32* pBufferOut) +{ + drwav_uint64 totalFramesRead; + drwav_uint8 sampleData[4096]; + drwav_uint32 bytesPerFrame; + + /* Fast path. */ + if (pWav->translatedFormatTag == DR_WAVE_FORMAT_PCM && pWav->bitsPerSample == 32) { + return drwav_read_pcm_frames(pWav, framesToRead, pBufferOut); + } + + bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + + totalFramesRead = 0; + + while (framesToRead > 0) { + drwav_uint64 framesRead = drwav_read_pcm_frames(pWav, drwav_min(framesToRead, sizeof(sampleData)/bytesPerFrame), sampleData); + if (framesRead == 0) { + break; + } + + drwav__pcm_to_s32(pBufferOut, sampleData, (size_t)(framesRead*pWav->channels), bytesPerFrame/pWav->channels); + + pBufferOut += framesRead*pWav->channels; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + + return totalFramesRead; +} + +static drwav_uint64 drwav_read_pcm_frames_s32__msadpcm(drwav* pWav, drwav_uint64 framesToRead, drwav_int32* pBufferOut) +{ + /* + We're just going to borrow the implementation from the drwav_read_s16() since ADPCM is a little bit more complicated than other formats and I don't + want to duplicate that code. + */ + drwav_uint64 totalFramesRead = 0; + drwav_int16 samples16[2048]; + while (framesToRead > 0) { + drwav_uint64 framesRead = drwav_read_pcm_frames_s16(pWav, drwav_min(framesToRead, drwav_countof(samples16)/pWav->channels), samples16); + if (framesRead == 0) { + break; + } + + drwav_s16_to_s32(pBufferOut, samples16, (size_t)(framesRead*pWav->channels)); /* <-- Safe cast because we're clamping to 2048. */ + + pBufferOut += framesRead*pWav->channels; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + + return totalFramesRead; +} + +static drwav_uint64 drwav_read_pcm_frames_s32__ima(drwav* pWav, drwav_uint64 framesToRead, drwav_int32* pBufferOut) +{ + /* + We're just going to borrow the implementation from the drwav_read_s16() since IMA-ADPCM is a little bit more complicated than other formats and I don't + want to duplicate that code. + */ + drwav_uint64 totalFramesRead = 0; + drwav_int16 samples16[2048]; + while (framesToRead > 0) { + drwav_uint64 framesRead = drwav_read_pcm_frames_s16(pWav, drwav_min(framesToRead, drwav_countof(samples16)/pWav->channels), samples16); + if (framesRead == 0) { + break; + } + + drwav_s16_to_s32(pBufferOut, samples16, (size_t)(framesRead*pWav->channels)); /* <-- Safe cast because we're clamping to 2048. */ + + pBufferOut += framesRead*pWav->channels; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + + return totalFramesRead; +} + +static drwav_uint64 drwav_read_pcm_frames_s32__ieee(drwav* pWav, drwav_uint64 framesToRead, drwav_int32* pBufferOut) +{ + drwav_uint64 totalFramesRead; + drwav_uint8 sampleData[4096]; + + drwav_uint32 bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + + totalFramesRead = 0; + + while (framesToRead > 0) { + drwav_uint64 framesRead = drwav_read_pcm_frames(pWav, drwav_min(framesToRead, sizeof(sampleData)/bytesPerFrame), sampleData); + if (framesRead == 0) { + break; + } + + drwav__ieee_to_s32(pBufferOut, sampleData, (size_t)(framesRead*pWav->channels), bytesPerFrame/pWav->channels); + + pBufferOut += framesRead*pWav->channels; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + + return totalFramesRead; +} + +static drwav_uint64 drwav_read_pcm_frames_s32__alaw(drwav* pWav, drwav_uint64 framesToRead, drwav_int32* pBufferOut) +{ + drwav_uint64 totalFramesRead; + drwav_uint8 sampleData[4096]; + + drwav_uint32 bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + + totalFramesRead = 0; + + while (framesToRead > 0) { + drwav_uint64 framesRead = drwav_read_pcm_frames(pWav, drwav_min(framesToRead, sizeof(sampleData)/bytesPerFrame), sampleData); + if (framesRead == 0) { + break; + } + + drwav_alaw_to_s32(pBufferOut, sampleData, (size_t)(framesRead*pWav->channels)); + + pBufferOut += framesRead*pWav->channels; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + + return totalFramesRead; +} + +static drwav_uint64 drwav_read_pcm_frames_s32__mulaw(drwav* pWav, drwav_uint64 framesToRead, drwav_int32* pBufferOut) +{ + drwav_uint64 totalFramesRead; + drwav_uint8 sampleData[4096]; + + drwav_uint32 bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + + totalFramesRead = 0; + + while (framesToRead > 0) { + drwav_uint64 framesRead = drwav_read_pcm_frames(pWav, drwav_min(framesToRead, sizeof(sampleData)/bytesPerFrame), sampleData); + if (framesRead == 0) { + break; + } + + drwav_mulaw_to_s32(pBufferOut, sampleData, (size_t)(framesRead*pWav->channels)); + + pBufferOut += framesRead*pWav->channels; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + + return totalFramesRead; +} + +DRWAV_API drwav_uint64 drwav_read_pcm_frames_s32(drwav* pWav, drwav_uint64 framesToRead, drwav_int32* pBufferOut) +{ + if (pWav == NULL || framesToRead == 0) { + return 0; + } + + if (pBufferOut == NULL) { + return drwav_read_pcm_frames(pWav, framesToRead, NULL); + } + + /* Don't try to read more samples than can potentially fit in the output buffer. */ + if (framesToRead * pWav->channels * sizeof(drwav_int32) > DRWAV_SIZE_MAX) { + framesToRead = DRWAV_SIZE_MAX / sizeof(drwav_int32) / pWav->channels; + } + + if (pWav->translatedFormatTag == DR_WAVE_FORMAT_PCM) { + return drwav_read_pcm_frames_s32__pcm(pWav, framesToRead, pBufferOut); + } + + if (pWav->translatedFormatTag == DR_WAVE_FORMAT_ADPCM) { + return drwav_read_pcm_frames_s32__msadpcm(pWav, framesToRead, pBufferOut); + } + + if (pWav->translatedFormatTag == DR_WAVE_FORMAT_IEEE_FLOAT) { + return drwav_read_pcm_frames_s32__ieee(pWav, framesToRead, pBufferOut); + } + + if (pWav->translatedFormatTag == DR_WAVE_FORMAT_ALAW) { + return drwav_read_pcm_frames_s32__alaw(pWav, framesToRead, pBufferOut); + } + + if (pWav->translatedFormatTag == DR_WAVE_FORMAT_MULAW) { + return drwav_read_pcm_frames_s32__mulaw(pWav, framesToRead, pBufferOut); + } + + if (pWav->translatedFormatTag == DR_WAVE_FORMAT_DVI_ADPCM) { + return drwav_read_pcm_frames_s32__ima(pWav, framesToRead, pBufferOut); + } + + return 0; +} + +DRWAV_API drwav_uint64 drwav_read_pcm_frames_s32le(drwav* pWav, drwav_uint64 framesToRead, drwav_int32* pBufferOut) +{ + drwav_uint64 framesRead = drwav_read_pcm_frames_s32(pWav, framesToRead, pBufferOut); + if (pBufferOut != NULL && drwav__is_little_endian() == DRWAV_FALSE) { + drwav__bswap_samples_s32(pBufferOut, framesRead*pWav->channels); + } + + return framesRead; +} + +DRWAV_API drwav_uint64 drwav_read_pcm_frames_s32be(drwav* pWav, drwav_uint64 framesToRead, drwav_int32* pBufferOut) +{ + drwav_uint64 framesRead = drwav_read_pcm_frames_s32(pWav, framesToRead, pBufferOut); + if (pBufferOut != NULL && drwav__is_little_endian() == DRWAV_TRUE) { + drwav__bswap_samples_s32(pBufferOut, framesRead*pWav->channels); + } + + return framesRead; +} + + +DRWAV_API void drwav_u8_to_s32(drwav_int32* pOut, const drwav_uint8* pIn, size_t sampleCount) +{ + size_t i; + + if (pOut == NULL || pIn == NULL) { + return; + } + + for (i = 0; i < sampleCount; ++i) { + *pOut++ = ((int)pIn[i] - 128) << 24; + } +} + +DRWAV_API void drwav_s16_to_s32(drwav_int32* pOut, const drwav_int16* pIn, size_t sampleCount) +{ + size_t i; + + if (pOut == NULL || pIn == NULL) { + return; + } + + for (i = 0; i < sampleCount; ++i) { + *pOut++ = pIn[i] << 16; + } +} + +DRWAV_API void drwav_s24_to_s32(drwav_int32* pOut, const drwav_uint8* pIn, size_t sampleCount) +{ + size_t i; + + if (pOut == NULL || pIn == NULL) { + return; + } + + for (i = 0; i < sampleCount; ++i) { + unsigned int s0 = pIn[i*3 + 0]; + unsigned int s1 = pIn[i*3 + 1]; + unsigned int s2 = pIn[i*3 + 2]; + + drwav_int32 sample32 = (drwav_int32)((s0 << 8) | (s1 << 16) | (s2 << 24)); + *pOut++ = sample32; + } +} + +DRWAV_API void drwav_f32_to_s32(drwav_int32* pOut, const float* pIn, size_t sampleCount) +{ + size_t i; + + if (pOut == NULL || pIn == NULL) { + return; + } + + for (i = 0; i < sampleCount; ++i) { + *pOut++ = (drwav_int32)(2147483648.0 * pIn[i]); + } +} + +DRWAV_API void drwav_f64_to_s32(drwav_int32* pOut, const double* pIn, size_t sampleCount) +{ + size_t i; + + if (pOut == NULL || pIn == NULL) { + return; + } + + for (i = 0; i < sampleCount; ++i) { + *pOut++ = (drwav_int32)(2147483648.0 * pIn[i]); + } +} + +DRWAV_API void drwav_alaw_to_s32(drwav_int32* pOut, const drwav_uint8* pIn, size_t sampleCount) +{ + size_t i; + + if (pOut == NULL || pIn == NULL) { + return; + } + + for (i = 0; i < sampleCount; ++i) { + *pOut++ = ((drwav_int32)drwav__alaw_to_s16(pIn[i])) << 16; + } +} + +DRWAV_API void drwav_mulaw_to_s32(drwav_int32* pOut, const drwav_uint8* pIn, size_t sampleCount) +{ + size_t i; + + if (pOut == NULL || pIn == NULL) { + return; + } + + for (i= 0; i < sampleCount; ++i) { + *pOut++ = ((drwav_int32)drwav__mulaw_to_s16(pIn[i])) << 16; + } +} + + + +static drwav_int16* drwav__read_pcm_frames_and_close_s16(drwav* pWav, unsigned int* channels, unsigned int* sampleRate, drwav_uint64* totalFrameCount) +{ + drwav_uint64 sampleDataSize; + drwav_int16* pSampleData; + drwav_uint64 framesRead; + + DRWAV_ASSERT(pWav != NULL); + + sampleDataSize = pWav->totalPCMFrameCount * pWav->channels * sizeof(drwav_int16); + if (sampleDataSize > DRWAV_SIZE_MAX) { + drwav_uninit(pWav); + return NULL; /* File's too big. */ + } + + pSampleData = (drwav_int16*)drwav__malloc_from_callbacks((size_t)sampleDataSize, &pWav->allocationCallbacks); /* <-- Safe cast due to the check above. */ + if (pSampleData == NULL) { + drwav_uninit(pWav); + return NULL; /* Failed to allocate memory. */ + } + + framesRead = drwav_read_pcm_frames_s16(pWav, (size_t)pWav->totalPCMFrameCount, pSampleData); + if (framesRead != pWav->totalPCMFrameCount) { + drwav__free_from_callbacks(pSampleData, &pWav->allocationCallbacks); + drwav_uninit(pWav); + return NULL; /* There was an error reading the samples. */ + } + + drwav_uninit(pWav); + + if (sampleRate) { + *sampleRate = pWav->sampleRate; + } + if (channels) { + *channels = pWav->channels; + } + if (totalFrameCount) { + *totalFrameCount = pWav->totalPCMFrameCount; + } + + return pSampleData; +} + +static float* drwav__read_pcm_frames_and_close_f32(drwav* pWav, unsigned int* channels, unsigned int* sampleRate, drwav_uint64* totalFrameCount) +{ + drwav_uint64 sampleDataSize; + float* pSampleData; + drwav_uint64 framesRead; + + DRWAV_ASSERT(pWav != NULL); + + sampleDataSize = pWav->totalPCMFrameCount * pWav->channels * sizeof(float); + if (sampleDataSize > DRWAV_SIZE_MAX) { + drwav_uninit(pWav); + return NULL; /* File's too big. */ + } + + pSampleData = (float*)drwav__malloc_from_callbacks((size_t)sampleDataSize, &pWav->allocationCallbacks); /* <-- Safe cast due to the check above. */ + if (pSampleData == NULL) { + drwav_uninit(pWav); + return NULL; /* Failed to allocate memory. */ + } + + framesRead = drwav_read_pcm_frames_f32(pWav, (size_t)pWav->totalPCMFrameCount, pSampleData); + if (framesRead != pWav->totalPCMFrameCount) { + drwav__free_from_callbacks(pSampleData, &pWav->allocationCallbacks); + drwav_uninit(pWav); + return NULL; /* There was an error reading the samples. */ + } + + drwav_uninit(pWav); + + if (sampleRate) { + *sampleRate = pWav->sampleRate; + } + if (channels) { + *channels = pWav->channels; + } + if (totalFrameCount) { + *totalFrameCount = pWav->totalPCMFrameCount; + } + + return pSampleData; +} + +static drwav_int32* drwav__read_pcm_frames_and_close_s32(drwav* pWav, unsigned int* channels, unsigned int* sampleRate, drwav_uint64* totalFrameCount) +{ + drwav_uint64 sampleDataSize; + drwav_int32* pSampleData; + drwav_uint64 framesRead; + + DRWAV_ASSERT(pWav != NULL); + + sampleDataSize = pWav->totalPCMFrameCount * pWav->channels * sizeof(drwav_int32); + if (sampleDataSize > DRWAV_SIZE_MAX) { + drwav_uninit(pWav); + return NULL; /* File's too big. */ + } + + pSampleData = (drwav_int32*)drwav__malloc_from_callbacks((size_t)sampleDataSize, &pWav->allocationCallbacks); /* <-- Safe cast due to the check above. */ + if (pSampleData == NULL) { + drwav_uninit(pWav); + return NULL; /* Failed to allocate memory. */ + } + + framesRead = drwav_read_pcm_frames_s32(pWav, (size_t)pWav->totalPCMFrameCount, pSampleData); + if (framesRead != pWav->totalPCMFrameCount) { + drwav__free_from_callbacks(pSampleData, &pWav->allocationCallbacks); + drwav_uninit(pWav); + return NULL; /* There was an error reading the samples. */ + } + + drwav_uninit(pWav); + + if (sampleRate) { + *sampleRate = pWav->sampleRate; + } + if (channels) { + *channels = pWav->channels; + } + if (totalFrameCount) { + *totalFrameCount = pWav->totalPCMFrameCount; + } + + return pSampleData; +} + + + +DRWAV_API drwav_int16* drwav_open_and_read_pcm_frames_s16(drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + drwav wav; + + if (channelsOut) { + *channelsOut = 0; + } + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (totalFrameCountOut) { + *totalFrameCountOut = 0; + } + + if (!drwav_init(&wav, onRead, onSeek, pUserData, pAllocationCallbacks)) { + return NULL; + } + + return drwav__read_pcm_frames_and_close_s16(&wav, channelsOut, sampleRateOut, totalFrameCountOut); +} + +DRWAV_API float* drwav_open_and_read_pcm_frames_f32(drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + drwav wav; + + if (channelsOut) { + *channelsOut = 0; + } + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (totalFrameCountOut) { + *totalFrameCountOut = 0; + } + + if (!drwav_init(&wav, onRead, onSeek, pUserData, pAllocationCallbacks)) { + return NULL; + } + + return drwav__read_pcm_frames_and_close_f32(&wav, channelsOut, sampleRateOut, totalFrameCountOut); +} + +DRWAV_API drwav_int32* drwav_open_and_read_pcm_frames_s32(drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + drwav wav; + + if (channelsOut) { + *channelsOut = 0; + } + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (totalFrameCountOut) { + *totalFrameCountOut = 0; + } + + if (!drwav_init(&wav, onRead, onSeek, pUserData, pAllocationCallbacks)) { + return NULL; + } + + return drwav__read_pcm_frames_and_close_s32(&wav, channelsOut, sampleRateOut, totalFrameCountOut); +} + +#ifndef DR_WAV_NO_STDIO +DRWAV_API drwav_int16* drwav_open_file_and_read_pcm_frames_s16(const char* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + drwav wav; + + if (channelsOut) { + *channelsOut = 0; + } + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (totalFrameCountOut) { + *totalFrameCountOut = 0; + } + + if (!drwav_init_file(&wav, filename, pAllocationCallbacks)) { + return NULL; + } + + return drwav__read_pcm_frames_and_close_s16(&wav, channelsOut, sampleRateOut, totalFrameCountOut); +} + +DRWAV_API float* drwav_open_file_and_read_pcm_frames_f32(const char* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + drwav wav; + + if (channelsOut) { + *channelsOut = 0; + } + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (totalFrameCountOut) { + *totalFrameCountOut = 0; + } + + if (!drwav_init_file(&wav, filename, pAllocationCallbacks)) { + return NULL; + } + + return drwav__read_pcm_frames_and_close_f32(&wav, channelsOut, sampleRateOut, totalFrameCountOut); +} + +DRWAV_API drwav_int32* drwav_open_file_and_read_pcm_frames_s32(const char* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + drwav wav; + + if (channelsOut) { + *channelsOut = 0; + } + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (totalFrameCountOut) { + *totalFrameCountOut = 0; + } + + if (!drwav_init_file(&wav, filename, pAllocationCallbacks)) { + return NULL; + } + + return drwav__read_pcm_frames_and_close_s32(&wav, channelsOut, sampleRateOut, totalFrameCountOut); +} + + +DRWAV_API drwav_int16* drwav_open_file_and_read_pcm_frames_s16_w(const wchar_t* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + drwav wav; + + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (channelsOut) { + *channelsOut = 0; + } + if (totalFrameCountOut) { + *totalFrameCountOut = 0; + } + + if (!drwav_init_file_w(&wav, filename, pAllocationCallbacks)) { + return NULL; + } + + return drwav__read_pcm_frames_and_close_s16(&wav, channelsOut, sampleRateOut, totalFrameCountOut); +} + +DRWAV_API float* drwav_open_file_and_read_pcm_frames_f32_w(const wchar_t* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + drwav wav; + + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (channelsOut) { + *channelsOut = 0; + } + if (totalFrameCountOut) { + *totalFrameCountOut = 0; + } + + if (!drwav_init_file_w(&wav, filename, pAllocationCallbacks)) { + return NULL; + } + + return drwav__read_pcm_frames_and_close_f32(&wav, channelsOut, sampleRateOut, totalFrameCountOut); +} + +DRWAV_API drwav_int32* drwav_open_file_and_read_pcm_frames_s32_w(const wchar_t* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + drwav wav; + + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (channelsOut) { + *channelsOut = 0; + } + if (totalFrameCountOut) { + *totalFrameCountOut = 0; + } + + if (!drwav_init_file_w(&wav, filename, pAllocationCallbacks)) { + return NULL; + } + + return drwav__read_pcm_frames_and_close_s32(&wav, channelsOut, sampleRateOut, totalFrameCountOut); +} +#endif + +DRWAV_API drwav_int16* drwav_open_memory_and_read_pcm_frames_s16(const void* data, size_t dataSize, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + drwav wav; + + if (channelsOut) { + *channelsOut = 0; + } + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (totalFrameCountOut) { + *totalFrameCountOut = 0; + } + + if (!drwav_init_memory(&wav, data, dataSize, pAllocationCallbacks)) { + return NULL; + } + + return drwav__read_pcm_frames_and_close_s16(&wav, channelsOut, sampleRateOut, totalFrameCountOut); +} + +DRWAV_API float* drwav_open_memory_and_read_pcm_frames_f32(const void* data, size_t dataSize, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + drwav wav; + + if (channelsOut) { + *channelsOut = 0; + } + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (totalFrameCountOut) { + *totalFrameCountOut = 0; + } + + if (!drwav_init_memory(&wav, data, dataSize, pAllocationCallbacks)) { + return NULL; + } + + return drwav__read_pcm_frames_and_close_f32(&wav, channelsOut, sampleRateOut, totalFrameCountOut); +} + +DRWAV_API drwav_int32* drwav_open_memory_and_read_pcm_frames_s32(const void* data, size_t dataSize, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + drwav wav; + + if (channelsOut) { + *channelsOut = 0; + } + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (totalFrameCountOut) { + *totalFrameCountOut = 0; + } + + if (!drwav_init_memory(&wav, data, dataSize, pAllocationCallbacks)) { + return NULL; + } + + return drwav__read_pcm_frames_and_close_s32(&wav, channelsOut, sampleRateOut, totalFrameCountOut); +} +#endif /* DR_WAV_NO_CONVERSION_API */ + + +DRWAV_API void drwav_free(void* p, const drwav_allocation_callbacks* pAllocationCallbacks) +{ + if (pAllocationCallbacks != NULL) { + drwav__free_from_callbacks(p, pAllocationCallbacks); + } else { + drwav__free_default(p, NULL); + } +} + +DRWAV_API drwav_uint16 drwav_bytes_to_u16(const drwav_uint8* data) +{ + return drwav__bytes_to_u16(data); +} + +DRWAV_API drwav_int16 drwav_bytes_to_s16(const drwav_uint8* data) +{ + return drwav__bytes_to_s16(data); +} + +DRWAV_API drwav_uint32 drwav_bytes_to_u32(const drwav_uint8* data) +{ + return drwav__bytes_to_u32(data); +} + +DRWAV_API drwav_int32 drwav_bytes_to_s32(const drwav_uint8* data) +{ + return drwav__bytes_to_s32(data); +} + +DRWAV_API drwav_uint64 drwav_bytes_to_u64(const drwav_uint8* data) +{ + return drwav__bytes_to_u64(data); +} + +DRWAV_API drwav_int64 drwav_bytes_to_s64(const drwav_uint8* data) +{ + return drwav__bytes_to_s64(data); +} + + +DRWAV_API drwav_bool32 drwav_guid_equal(const drwav_uint8 a[16], const drwav_uint8 b[16]) +{ + return drwav__guid_equal(a, b); +} + +DRWAV_API drwav_bool32 drwav_fourcc_equal(const drwav_uint8* a, const char* b) +{ + return drwav__fourcc_equal(a, b); +} + +#endif /* dr_wav_c */ +#endif /* DR_WAV_IMPLEMENTATION */ + +/* +RELEASE NOTES - v0.11.0 +======================= +Version 0.11.0 has breaking API changes. + +Improved Client-Defined Memory Allocation +----------------------------------------- +The main change with this release is the addition of a more flexible way of implementing custom memory allocation routines. The +existing system of DRWAV_MALLOC, DRWAV_REALLOC and DRWAV_FREE are still in place and will be used by default when no custom +allocation callbacks are specified. + +To use the new system, you pass in a pointer to a drwav_allocation_callbacks object to drwav_init() and family, like this: + + void* my_malloc(size_t sz, void* pUserData) + { + return malloc(sz); + } + void* my_realloc(void* p, size_t sz, void* pUserData) + { + return realloc(p, sz); + } + void my_free(void* p, void* pUserData) + { + free(p); + } + + ... + + drwav_allocation_callbacks allocationCallbacks; + allocationCallbacks.pUserData = &myData; + allocationCallbacks.onMalloc = my_malloc; + allocationCallbacks.onRealloc = my_realloc; + allocationCallbacks.onFree = my_free; + drwav_init_file(&wav, "my_file.wav", &allocationCallbacks); + +The advantage of this new system is that it allows you to specify user data which will be passed in to the allocation routines. + +Passing in null for the allocation callbacks object will cause dr_wav to use defaults which is the same as DRWAV_MALLOC, +DRWAV_REALLOC and DRWAV_FREE and the equivalent of how it worked in previous versions. + +Every API that opens a drwav object now takes this extra parameter. These include the following: + + drwav_init() + drwav_init_ex() + drwav_init_file() + drwav_init_file_ex() + drwav_init_file_w() + drwav_init_file_w_ex() + drwav_init_memory() + drwav_init_memory_ex() + drwav_init_write() + drwav_init_write_sequential() + drwav_init_write_sequential_pcm_frames() + drwav_init_file_write() + drwav_init_file_write_sequential() + drwav_init_file_write_sequential_pcm_frames() + drwav_init_file_write_w() + drwav_init_file_write_sequential_w() + drwav_init_file_write_sequential_pcm_frames_w() + drwav_init_memory_write() + drwav_init_memory_write_sequential() + drwav_init_memory_write_sequential_pcm_frames() + drwav_open_and_read_pcm_frames_s16() + drwav_open_and_read_pcm_frames_f32() + drwav_open_and_read_pcm_frames_s32() + drwav_open_file_and_read_pcm_frames_s16() + drwav_open_file_and_read_pcm_frames_f32() + drwav_open_file_and_read_pcm_frames_s32() + drwav_open_file_and_read_pcm_frames_s16_w() + drwav_open_file_and_read_pcm_frames_f32_w() + drwav_open_file_and_read_pcm_frames_s32_w() + drwav_open_memory_and_read_pcm_frames_s16() + drwav_open_memory_and_read_pcm_frames_f32() + drwav_open_memory_and_read_pcm_frames_s32() + +Endian Improvements +------------------- +Previously, the following APIs returned little-endian audio data. These now return native-endian data. This improves compatibility +on big-endian architectures. + + drwav_read_pcm_frames() + drwav_read_pcm_frames_s16() + drwav_read_pcm_frames_s32() + drwav_read_pcm_frames_f32() + drwav_open_and_read_pcm_frames_s16() + drwav_open_and_read_pcm_frames_s32() + drwav_open_and_read_pcm_frames_f32() + drwav_open_file_and_read_pcm_frames_s16() + drwav_open_file_and_read_pcm_frames_s32() + drwav_open_file_and_read_pcm_frames_f32() + drwav_open_file_and_read_pcm_frames_s16_w() + drwav_open_file_and_read_pcm_frames_s32_w() + drwav_open_file_and_read_pcm_frames_f32_w() + drwav_open_memory_and_read_pcm_frames_s16() + drwav_open_memory_and_read_pcm_frames_s32() + drwav_open_memory_and_read_pcm_frames_f32() + +APIs have been added to give you explicit control over whether or not audio data is read or written in big- or little-endian byte +order: + + drwav_read_pcm_frames_le() + drwav_read_pcm_frames_be() + drwav_read_pcm_frames_s16le() + drwav_read_pcm_frames_s16be() + drwav_read_pcm_frames_f32le() + drwav_read_pcm_frames_f32be() + drwav_read_pcm_frames_s32le() + drwav_read_pcm_frames_s32be() + drwav_write_pcm_frames_le() + drwav_write_pcm_frames_be() + +Removed APIs +------------ +The following APIs were deprecated in version 0.10.0 and have now been removed: + + drwav_open() + drwav_open_ex() + drwav_open_write() + drwav_open_write_sequential() + drwav_open_file() + drwav_open_file_ex() + drwav_open_file_write() + drwav_open_file_write_sequential() + drwav_open_memory() + drwav_open_memory_ex() + drwav_open_memory_write() + drwav_open_memory_write_sequential() + drwav_close() + + + +RELEASE NOTES - v0.10.0 +======================= +Version 0.10.0 has breaking API changes. There are no significant bug fixes in this release, so if you are affected you do +not need to upgrade. + +Removed APIs +------------ +The following APIs were deprecated in version 0.9.0 and have been completely removed in version 0.10.0: + + drwav_read() + drwav_read_s16() + drwav_read_f32() + drwav_read_s32() + drwav_seek_to_sample() + drwav_write() + drwav_open_and_read_s16() + drwav_open_and_read_f32() + drwav_open_and_read_s32() + drwav_open_file_and_read_s16() + drwav_open_file_and_read_f32() + drwav_open_file_and_read_s32() + drwav_open_memory_and_read_s16() + drwav_open_memory_and_read_f32() + drwav_open_memory_and_read_s32() + drwav::totalSampleCount + +See release notes for version 0.9.0 at the bottom of this file for replacement APIs. + +Deprecated APIs +--------------- +The following APIs have been deprecated. There is a confusing and completely arbitrary difference between drwav_init*() and +drwav_open*(), where drwav_init*() initializes a pre-allocated drwav object, whereas drwav_open*() will first allocated a +drwav object on the heap and then initialize it. drwav_open*() has been deprecated which means you must now use a pre- +allocated drwav object with drwav_init*(). If you need the previous functionality, you can just do a malloc() followed by +a called to one of the drwav_init*() APIs. + + drwav_open() + drwav_open_ex() + drwav_open_write() + drwav_open_write_sequential() + drwav_open_file() + drwav_open_file_ex() + drwav_open_file_write() + drwav_open_file_write_sequential() + drwav_open_memory() + drwav_open_memory_ex() + drwav_open_memory_write() + drwav_open_memory_write_sequential() + drwav_close() + +These APIs will be removed completely in a future version. The rationale for this change is to remove confusion between the +two different ways to initialize a drwav object. +*/ + +/* +REVISION HISTORY +================ +v0.12.16 - 2020-12-02 + - Fix a bug when trying to read more bytes than can fit in a size_t. + +v0.12.15 - 2020-11-21 + - Fix compilation with OpenWatcom. + +v0.12.14 - 2020-11-13 + - Minor code clean up. + +v0.12.13 - 2020-11-01 + - Improve compiler support for older versions of GCC. + +v0.12.12 - 2020-09-28 + - Add support for RF64. + - Fix a bug in writing mode where the size of the RIFF chunk incorrectly includes the header section. + +v0.12.11 - 2020-09-08 + - Fix a compilation error on older compilers. + +v0.12.10 - 2020-08-24 + - Fix a bug when seeking with ADPCM formats. + +v0.12.9 - 2020-08-02 + - Simplify sized types. + +v0.12.8 - 2020-07-25 + - Fix a compilation warning. + +v0.12.7 - 2020-07-15 + - Fix some bugs on big-endian architectures. + - Fix an error in s24 to f32 conversion. + +v0.12.6 - 2020-06-23 + - Change drwav_read_*() to allow NULL to be passed in as the output buffer which is equivalent to a forward seek. + - Fix a buffer overflow when trying to decode invalid IMA-ADPCM files. + - Add include guard for the implementation section. + +v0.12.5 - 2020-05-27 + - Minor documentation fix. + +v0.12.4 - 2020-05-16 + - Replace assert() with DRWAV_ASSERT(). + - Add compile-time and run-time version querying. + - DRWAV_VERSION_MINOR + - DRWAV_VERSION_MAJOR + - DRWAV_VERSION_REVISION + - DRWAV_VERSION_STRING + - drwav_version() + - drwav_version_string() + +v0.12.3 - 2020-04-30 + - Fix compilation errors with VC6. + +v0.12.2 - 2020-04-21 + - Fix a bug where drwav_init_file() does not close the file handle after attempting to load an erroneous file. + +v0.12.1 - 2020-04-13 + - Fix some pedantic warnings. + +v0.12.0 - 2020-04-04 + - API CHANGE: Add container and format parameters to the chunk callback. + - Minor documentation updates. + +v0.11.5 - 2020-03-07 + - Fix compilation error with Visual Studio .NET 2003. + +v0.11.4 - 2020-01-29 + - Fix some static analysis warnings. + - Fix a bug when reading f32 samples from an A-law encoded stream. + +v0.11.3 - 2020-01-12 + - Minor changes to some f32 format conversion routines. + - Minor bug fix for ADPCM conversion when end of file is reached. + +v0.11.2 - 2019-12-02 + - Fix a possible crash when using custom memory allocators without a custom realloc() implementation. + - Fix an integer overflow bug. + - Fix a null pointer dereference bug. + - Add limits to sample rate, channels and bits per sample to tighten up some validation. + +v0.11.1 - 2019-10-07 + - Internal code clean up. + +v0.11.0 - 2019-10-06 + - API CHANGE: Add support for user defined memory allocation routines. This system allows the program to specify their own memory allocation + routines with a user data pointer for client-specific contextual data. This adds an extra parameter to the end of the following APIs: + - drwav_init() + - drwav_init_ex() + - drwav_init_file() + - drwav_init_file_ex() + - drwav_init_file_w() + - drwav_init_file_w_ex() + - drwav_init_memory() + - drwav_init_memory_ex() + - drwav_init_write() + - drwav_init_write_sequential() + - drwav_init_write_sequential_pcm_frames() + - drwav_init_file_write() + - drwav_init_file_write_sequential() + - drwav_init_file_write_sequential_pcm_frames() + - drwav_init_file_write_w() + - drwav_init_file_write_sequential_w() + - drwav_init_file_write_sequential_pcm_frames_w() + - drwav_init_memory_write() + - drwav_init_memory_write_sequential() + - drwav_init_memory_write_sequential_pcm_frames() + - drwav_open_and_read_pcm_frames_s16() + - drwav_open_and_read_pcm_frames_f32() + - drwav_open_and_read_pcm_frames_s32() + - drwav_open_file_and_read_pcm_frames_s16() + - drwav_open_file_and_read_pcm_frames_f32() + - drwav_open_file_and_read_pcm_frames_s32() + - drwav_open_file_and_read_pcm_frames_s16_w() + - drwav_open_file_and_read_pcm_frames_f32_w() + - drwav_open_file_and_read_pcm_frames_s32_w() + - drwav_open_memory_and_read_pcm_frames_s16() + - drwav_open_memory_and_read_pcm_frames_f32() + - drwav_open_memory_and_read_pcm_frames_s32() + Set this extra parameter to NULL to use defaults which is the same as the previous behaviour. Setting this NULL will use + DRWAV_MALLOC, DRWAV_REALLOC and DRWAV_FREE. + - Add support for reading and writing PCM frames in an explicit endianness. New APIs: + - drwav_read_pcm_frames_le() + - drwav_read_pcm_frames_be() + - drwav_read_pcm_frames_s16le() + - drwav_read_pcm_frames_s16be() + - drwav_read_pcm_frames_f32le() + - drwav_read_pcm_frames_f32be() + - drwav_read_pcm_frames_s32le() + - drwav_read_pcm_frames_s32be() + - drwav_write_pcm_frames_le() + - drwav_write_pcm_frames_be() + - Remove deprecated APIs. + - API CHANGE: The following APIs now return native-endian data. Previously they returned little-endian data. + - drwav_read_pcm_frames() + - drwav_read_pcm_frames_s16() + - drwav_read_pcm_frames_s32() + - drwav_read_pcm_frames_f32() + - drwav_open_and_read_pcm_frames_s16() + - drwav_open_and_read_pcm_frames_s32() + - drwav_open_and_read_pcm_frames_f32() + - drwav_open_file_and_read_pcm_frames_s16() + - drwav_open_file_and_read_pcm_frames_s32() + - drwav_open_file_and_read_pcm_frames_f32() + - drwav_open_file_and_read_pcm_frames_s16_w() + - drwav_open_file_and_read_pcm_frames_s32_w() + - drwav_open_file_and_read_pcm_frames_f32_w() + - drwav_open_memory_and_read_pcm_frames_s16() + - drwav_open_memory_and_read_pcm_frames_s32() + - drwav_open_memory_and_read_pcm_frames_f32() + +v0.10.1 - 2019-08-31 + - Correctly handle partial trailing ADPCM blocks. + +v0.10.0 - 2019-08-04 + - Remove deprecated APIs. + - Add wchar_t variants for file loading APIs: + drwav_init_file_w() + drwav_init_file_ex_w() + drwav_init_file_write_w() + drwav_init_file_write_sequential_w() + - Add drwav_target_write_size_bytes() which calculates the total size in bytes of a WAV file given a format and sample count. + - Add APIs for specifying the PCM frame count instead of the sample count when opening in sequential write mode: + drwav_init_write_sequential_pcm_frames() + drwav_init_file_write_sequential_pcm_frames() + drwav_init_file_write_sequential_pcm_frames_w() + drwav_init_memory_write_sequential_pcm_frames() + - Deprecate drwav_open*() and drwav_close(): + drwav_open() + drwav_open_ex() + drwav_open_write() + drwav_open_write_sequential() + drwav_open_file() + drwav_open_file_ex() + drwav_open_file_write() + drwav_open_file_write_sequential() + drwav_open_memory() + drwav_open_memory_ex() + drwav_open_memory_write() + drwav_open_memory_write_sequential() + drwav_close() + - Minor documentation updates. + +v0.9.2 - 2019-05-21 + - Fix warnings. + +v0.9.1 - 2019-05-05 + - Add support for C89. + - Change license to choice of public domain or MIT-0. + +v0.9.0 - 2018-12-16 + - API CHANGE: Add new reading APIs for reading by PCM frames instead of samples. Old APIs have been deprecated and + will be removed in v0.10.0. Deprecated APIs and their replacements: + drwav_read() -> drwav_read_pcm_frames() + drwav_read_s16() -> drwav_read_pcm_frames_s16() + drwav_read_f32() -> drwav_read_pcm_frames_f32() + drwav_read_s32() -> drwav_read_pcm_frames_s32() + drwav_seek_to_sample() -> drwav_seek_to_pcm_frame() + drwav_write() -> drwav_write_pcm_frames() + drwav_open_and_read_s16() -> drwav_open_and_read_pcm_frames_s16() + drwav_open_and_read_f32() -> drwav_open_and_read_pcm_frames_f32() + drwav_open_and_read_s32() -> drwav_open_and_read_pcm_frames_s32() + drwav_open_file_and_read_s16() -> drwav_open_file_and_read_pcm_frames_s16() + drwav_open_file_and_read_f32() -> drwav_open_file_and_read_pcm_frames_f32() + drwav_open_file_and_read_s32() -> drwav_open_file_and_read_pcm_frames_s32() + drwav_open_memory_and_read_s16() -> drwav_open_memory_and_read_pcm_frames_s16() + drwav_open_memory_and_read_f32() -> drwav_open_memory_and_read_pcm_frames_f32() + drwav_open_memory_and_read_s32() -> drwav_open_memory_and_read_pcm_frames_s32() + drwav::totalSampleCount -> drwav::totalPCMFrameCount + - API CHANGE: Rename drwav_open_and_read_file_*() to drwav_open_file_and_read_*(). + - API CHANGE: Rename drwav_open_and_read_memory_*() to drwav_open_memory_and_read_*(). + - Add built-in support for smpl chunks. + - Add support for firing a callback for each chunk in the file at initialization time. + - This is enabled through the drwav_init_ex(), etc. family of APIs. + - Handle invalid FMT chunks more robustly. + +v0.8.5 - 2018-09-11 + - Const correctness. + - Fix a potential stack overflow. + +v0.8.4 - 2018-08-07 + - Improve 64-bit detection. + +v0.8.3 - 2018-08-05 + - Fix C++ build on older versions of GCC. + +v0.8.2 - 2018-08-02 + - Fix some big-endian bugs. + +v0.8.1 - 2018-06-29 + - Add support for sequential writing APIs. + - Disable seeking in write mode. + - Fix bugs with Wave64. + - Fix typos. + +v0.8 - 2018-04-27 + - Bug fix. + - Start using major.minor.revision versioning. + +v0.7f - 2018-02-05 + - Restrict ADPCM formats to a maximum of 2 channels. + +v0.7e - 2018-02-02 + - Fix a crash. + +v0.7d - 2018-02-01 + - Fix a crash. + +v0.7c - 2018-02-01 + - Set drwav.bytesPerSample to 0 for all compressed formats. + - Fix a crash when reading 16-bit floating point WAV files. In this case dr_wav will output silence for + all format conversion reading APIs (*_s16, *_s32, *_f32 APIs). + - Fix some divide-by-zero errors. + +v0.7b - 2018-01-22 + - Fix errors with seeking of compressed formats. + - Fix compilation error when DR_WAV_NO_CONVERSION_API + +v0.7a - 2017-11-17 + - Fix some GCC warnings. + +v0.7 - 2017-11-04 + - Add writing APIs. + +v0.6 - 2017-08-16 + - API CHANGE: Rename dr_* types to drwav_*. + - Add support for custom implementations of malloc(), realloc(), etc. + - Add support for Microsoft ADPCM. + - Add support for IMA ADPCM (DVI, format code 0x11). + - Optimizations to drwav_read_s16(). + - Bug fixes. + +v0.5g - 2017-07-16 + - Change underlying type for booleans to unsigned. + +v0.5f - 2017-04-04 + - Fix a minor bug with drwav_open_and_read_s16() and family. + +v0.5e - 2016-12-29 + - Added support for reading samples as signed 16-bit integers. Use the _s16() family of APIs for this. + - Minor fixes to documentation. + +v0.5d - 2016-12-28 + - Use drwav_int* and drwav_uint* sized types to improve compiler support. + +v0.5c - 2016-11-11 + - Properly handle JUNK chunks that come before the FMT chunk. + +v0.5b - 2016-10-23 + - A minor change to drwav_bool8 and drwav_bool32 types. + +v0.5a - 2016-10-11 + - Fixed a bug with drwav_open_and_read() and family due to incorrect argument ordering. + - Improve A-law and mu-law efficiency. + +v0.5 - 2016-09-29 + - API CHANGE. Swap the order of "channels" and "sampleRate" parameters in drwav_open_and_read*(). Rationale for this is to + keep it consistent with dr_audio and dr_flac. + +v0.4b - 2016-09-18 + - Fixed a typo in documentation. + +v0.4a - 2016-09-18 + - Fixed a typo. + - Change date format to ISO 8601 (YYYY-MM-DD) + +v0.4 - 2016-07-13 + - API CHANGE. Make onSeek consistent with dr_flac. + - API CHANGE. Rename drwav_seek() to drwav_seek_to_sample() for clarity and consistency with dr_flac. + - Added support for Sony Wave64. + +v0.3a - 2016-05-28 + - API CHANGE. Return drwav_bool32 instead of int in onSeek callback. + - Fixed a memory leak. + +v0.3 - 2016-05-22 + - Lots of API changes for consistency. + +v0.2a - 2016-05-16 + - Fixed Linux/GCC build. + +v0.2 - 2016-05-11 + - Added support for reading data as signed 32-bit PCM for consistency with dr_flac. + +v0.1a - 2016-05-07 + - Fixed a bug in drwav_open_file() where the file handle would not be closed if the loader failed to initialize. + +v0.1 - 2016-05-04 + - Initial versioned release. +*/ + +/* +This software is available as a choice of the following licenses. Choose +whichever you prefer. + +=============================================================================== +ALTERNATIVE 1 - Public Domain (www.unlicense.org) +=============================================================================== +This is free and unencumbered software released into the public domain. + +Anyone is free to copy, modify, publish, use, compile, sell, or distribute this +software, either in source code form or as a compiled binary, for any purpose, +commercial or non-commercial, and by any means. + +In jurisdictions that recognize copyright laws, the author or authors of this +software dedicate any and all copyright interest in the software to the public +domain. We make this dedication for the benefit of the public at large and to +the detriment of our heirs and successors. We intend this dedication to be an +overt act of relinquishment in perpetuity of all present and future rights to +this software under copyright law. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to + +=============================================================================== +ALTERNATIVE 2 - MIT No Attribution +=============================================================================== +Copyright 2020 David Reid + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ diff --git a/ggml.c b/ggml.c new file mode 100644 index 0000000..c29422c --- /dev/null +++ b/ggml.c @@ -0,0 +1,6689 @@ +#include "ggml.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define GGML_DEBUG 0 +#define GGML_MEM_ALIGN 16 + +#define MAX(a, b) ((a) > (b) ? (a) : (b)) +#define MIN(a, b) ((a) < (b) ? (a) : (b)) + +#define UNUSED(x) (void)(x) +#define SWAP(x, y, T) do { T SWAP = x; x = y; y = SWAP; } while (0) + +#define GGML_ASSERT(x) assert(x) + +#ifdef GGML_USE_ACCELERATE +#include +#endif + +// floating point type used to accumulate sums +typedef double ggml_float; + +// 16-bit float +// on Arm, we use __fp16 +// on x86, we use uint16_t +#ifdef __ARM_NEON + +// if YCM cannot find , make a symbolic link to it, for example: +// +// $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/ +// +#include + +float ggml_fp16_to_fp32(ggml_fp16_t x) { + return x; +} + +ggml_fp16_t ggml_fp32_to_fp16(float x) { + return x; +} + +#else + +#include + +static inline float fp32_from_bits(uint32_t w) { + union { + uint32_t as_bits; + float as_value; + } fp32 = { w }; + return fp32.as_value; +} + +static inline uint32_t fp32_to_bits(float f) { + union { + float as_value; + uint32_t as_bits; + } fp32 = { f }; + return fp32.as_bits; +} + +float ggml_fp16_to_fp32(ggml_fp16_t h) { + const uint32_t w = (uint32_t) h << 16; + const uint32_t sign = w & UINT32_C(0x80000000); + const uint32_t two_w = w + w; + + const uint32_t exp_offset = UINT32_C(0xE0) << 23; +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__) + const float exp_scale = 0x1.0p-112f; +#else + const float exp_scale = fp32_from_bits(UINT32_C(0x7800000)); +#endif + const float normalized_value = fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale; + + const uint32_t magic_mask = UINT32_C(126) << 23; + const float magic_bias = 0.5f; + const float denormalized_value = fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias; + + const uint32_t denormalized_cutoff = UINT32_C(1) << 27; + const uint32_t result = sign | + (two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value) : fp32_to_bits(normalized_value)); + return fp32_from_bits(result); +} + +ggml_fp16_t ggml_fp32_to_fp16(float f) { +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__) + const float scale_to_inf = 0x1.0p+112f; + const float scale_to_zero = 0x1.0p-110f; +#else + const float scale_to_inf = fp32_from_bits(UINT32_C(0x77800000)); + const float scale_to_zero = fp32_from_bits(UINT32_C(0x08800000)); +#endif + float base = (fabsf(f) * scale_to_inf) * scale_to_zero; + + const uint32_t w = fp32_to_bits(f); + const uint32_t shl1_w = w + w; + const uint32_t sign = w & UINT32_C(0x80000000); + uint32_t bias = shl1_w & UINT32_C(0xFF000000); + if (bias < UINT32_C(0x71000000)) { + bias = UINT32_C(0x71000000); + } + + base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base; + const uint32_t bits = fp32_to_bits(base); + const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00); + const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF); + const uint32_t nonsign = exp_bits + mantissa_bits; + return (sign >> 16) | (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign); +} +#endif + +// +// timing +// + +int64_t ggml_time_ms(void) { + struct timespec ts; + clock_gettime(CLOCK_MONOTONIC, &ts); + return (int64_t)ts.tv_sec*1000 + (int64_t)ts.tv_nsec/1000000; +} + +int64_t ggml_time_us(void) { + struct timespec ts; + clock_gettime(CLOCK_MONOTONIC, &ts); + return (int64_t)ts.tv_sec*1000000 + (int64_t)ts.tv_nsec/1000; +} + +int64_t ggml_cycles(void) { + return clock(); +} + +int64_t ggml_cycles_per_ms(void) { + return CLOCKS_PER_SEC/1000; +} + +#ifdef GGML_PERF +#define ggml_perf_time_ms() ggml_time_ms() +#define ggml_perf_time_us() ggml_time_us() +#define ggml_perf_cycles() ggml_cycles() +#define ggml_perf_cycles_per_ms() ggml_cycles_per_ms() +#else +#define ggml_perf_time_ms() 0 +#define ggml_perf_time_us() 0 +#define ggml_perf_cycles() 0 +#define ggml_perf_cycles_per_ms() 0 +#endif + +// +// cache line +// + +#if defined(__cpp_lib_hardware_interference_size) + const size_t CACHE_LINE_SIZE = hardware_destructive_interference_size; +#else + const size_t CACHE_LINE_SIZE = 64; +#endif + +const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float); + +// +// fundamental operations +// + +inline static void ggml_vec_set_i8(const int n, int8_t * x, const int8_t v) { for (int i = 0; i < n; ++i) x[i] = v; } + +inline static void ggml_vec_set_i16(const int n, int16_t * x, const int16_t v) { for (int i = 0; i < n; ++i) x[i] = v; } + +inline static void ggml_vec_set_i32(const int n, int32_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; } + +inline static void ggml_vec_set_f16(const int n, ggml_fp16_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; } + +inline static void ggml_vec_add_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] + y[i]; } +inline static void ggml_vec_acc_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] += x[i]; } +inline static void ggml_vec_acc1_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] += v; } +inline static void ggml_vec_sub_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] - y[i]; } +inline static void ggml_vec_set_f32 (const int n, float * x, const float v) { for (int i = 0; i < n; ++i) x[i] = v; } +inline static void ggml_vec_cpy_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]; } +inline static void ggml_vec_neg_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = -x[i]; } +inline static void ggml_vec_mul_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]*y[i]; } +inline static void ggml_vec_div_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]/y[i]; } + +inline static void ggml_vec_dot_f32(const int n, float * restrict s, const float * restrict x, const float * restrict y) { + ggml_float sumf = 0.0; +#ifdef __ARM_NEON + // NEON 128-bit + const int n16 = (n & ~15); + + float32x4_t sum0 = vdupq_n_f32(0); + float32x4_t sum1 = vdupq_n_f32(0); + float32x4_t sum2 = vdupq_n_f32(0); + float32x4_t sum3 = vdupq_n_f32(0); + + float32x4_t x0, x1, x2, x3; + float32x4_t y0, y1, y2, y3; + + for (int i = 0; i < n16; i += 16) { + x0 = vld1q_f32(x + i + 0); + x1 = vld1q_f32(x + i + 4); + x2 = vld1q_f32(x + i + 8); + x3 = vld1q_f32(x + i + 12); + + y0 = vld1q_f32(y + i + 0); + y1 = vld1q_f32(y + i + 4); + y2 = vld1q_f32(y + i + 8); + y3 = vld1q_f32(y + i + 12); + + sum0 = vfmaq_f32(sum0, x0, y0); + sum1 = vfmaq_f32(sum1, x1, y1); + sum2 = vfmaq_f32(sum2, x2, y2); + sum3 = vfmaq_f32(sum3, x3, y3); + } + + // reduce sum0..sum3 to sum0 + sum0 = vaddq_f32(sum0, sum1); + sum2 = vaddq_f32(sum2, sum3); + sum0 = vaddq_f32(sum0, sum2); + + float32x2_t sumf32 = vadd_f32(vget_low_f32(sum0), vget_high_f32(sum0)); + sumf = vget_lane_f32(sumf32, 0) + vget_lane_f32(sumf32, 1); + + // leftovers + for (int i = n16; i < n; ++i) { + sumf += x[i]*y[i]; + } +#elif defined(__AVX2__) + // AVX 256-bit (unroll 4) + const int n32 = (n & ~31); + + __m256 sum0 = _mm256_setzero_ps(); + __m256 sum1 = _mm256_setzero_ps(); + __m256 sum2 = _mm256_setzero_ps(); + __m256 sum3 = _mm256_setzero_ps(); + + __m256 x0, x1, x2, x3; + __m256 y0, y1, y2, y3; + + for (int i = 0; i < n32; i += 32) { + x0 = _mm256_loadu_ps(x + i + 0); + x1 = _mm256_loadu_ps(x + i + 8); + x2 = _mm256_loadu_ps(x + i + 16); + x3 = _mm256_loadu_ps(x + i + 24); + + y0 = _mm256_loadu_ps(y + i + 0); + y1 = _mm256_loadu_ps(y + i + 8); + y2 = _mm256_loadu_ps(y + i + 16); + y3 = _mm256_loadu_ps(y + i + 24); + + sum0 = _mm256_fmadd_ps(x0, y0, sum0); + sum1 = _mm256_fmadd_ps(x1, y1, sum1); + sum2 = _mm256_fmadd_ps(x2, y2, sum2); + sum3 = _mm256_fmadd_ps(x3, y3, sum3); + } + + sum0 = _mm256_add_ps(sum0, sum1); + sum2 = _mm256_add_ps(sum2, sum3); + sum0 = _mm256_add_ps(sum0, sum2); + + const __m128 r4 = _mm_add_ps(_mm256_castps256_ps128(sum0), _mm256_extractf128_ps(sum0, 1)); + const __m128 r2 = _mm_add_ps(r4, _mm_movehl_ps(r4, r4)); + const __m128 r1 = _mm_add_ss(r2, _mm_movehdup_ps(r2)); + + sumf = _mm_cvtss_f32(r1); + + // leftovers + for (int i = n32; i < n; ++i) { + sumf += x[i]*y[i]; + } +#else + // scalar + for (int i = 0; i < n; ++i) { + sumf += x[i]*y[i]; + } +#endif + + *s = sumf; +} + +inline static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t * restrict x, ggml_fp16_t * restrict y) { + ggml_float sumf = 0.0; +#ifdef __ARM_NEON + const int n32 = (n & ~31); + + float16x8_t sum0 = vdupq_n_f16(0); + float16x8_t sum1 = vdupq_n_f16(0); + float16x8_t sum2 = vdupq_n_f16(0); + float16x8_t sum3 = vdupq_n_f16(0); + + float16x8_t x0, x1, x2, x3; + float16x8_t y0, y1, y2, y3; + + for (int i = 0; i < n32; i += 32) { + x0 = vld1q_f16(x + i + 0 ); + x1 = vld1q_f16(x + i + 8 ); + x2 = vld1q_f16(x + i + 16); + x3 = vld1q_f16(x + i + 24); + + y0 = vld1q_f16(y + i + 0 ); + y1 = vld1q_f16(y + i + 8 ); + y2 = vld1q_f16(y + i + 16); + y3 = vld1q_f16(y + i + 24); + + sum0 = vfmaq_f16(sum0, x0, y0); + sum1 = vfmaq_f16(sum1, x1, y1); + sum2 = vfmaq_f16(sum2, x2, y2); + sum3 = vfmaq_f16(sum3, x3, y3); + } + + // reduce sum0..sum3 to sum0 + sum0 = vaddq_f16(sum0, sum1); + sum2 = vaddq_f16(sum2, sum3); + sum0 = vaddq_f16(sum0, sum2); + + // load sum0 into 2 float32x4_t + float32x4_t sum0f32 = vcvt_f32_f16(vget_low_f16(sum0)); + float32x4_t sum1f32 = vcvt_f32_f16(vget_high_f16(sum0)); + + // reduce sum0f32 and sum1f32 to sumf + sum0f32 = vaddq_f32(sum0f32, sum1f32); + + float32x2_t sumf32 = vadd_f32(vget_low_f32(sum0f32), vget_high_f32(sum0f32)); + sumf = vget_lane_f32(sumf32, 0) + vget_lane_f32(sumf32, 1); + + // leftovers + for (int i = n32; i < n; ++i) { + GGML_ASSERT(false); // should not end up here + sumf += ggml_fp16_to_fp32(x[i])*ggml_fp16_to_fp32(y[i]); + } +#elif defined(__AVX2__) + // AVX 256-bit (unroll 4) + const int n32 = (n & ~31); + + __m256 sum0 = _mm256_setzero_ps(); + __m256 sum1 = _mm256_setzero_ps(); + __m256 sum2 = _mm256_setzero_ps(); + __m256 sum3 = _mm256_setzero_ps(); + + __m256 x0, x1, x2, x3; + __m256 y0, y1, y2, y3; + + for (int i = 0; i < n32; i += 32) { + x0 = _mm256_cvtph_ps(_mm_loadu_si128((__m128i*)(x + i + 0 ))); + x1 = _mm256_cvtph_ps(_mm_loadu_si128((__m128i*)(x + i + 8 ))); + x2 = _mm256_cvtph_ps(_mm_loadu_si128((__m128i*)(x + i + 16))); + x3 = _mm256_cvtph_ps(_mm_loadu_si128((__m128i*)(x + i + 24))); + + y0 = _mm256_cvtph_ps(_mm_loadu_si128((__m128i*)(y + i + 0 ))); + y1 = _mm256_cvtph_ps(_mm_loadu_si128((__m128i*)(y + i + 8 ))); + y2 = _mm256_cvtph_ps(_mm_loadu_si128((__m128i*)(y + i + 16))); + y3 = _mm256_cvtph_ps(_mm_loadu_si128((__m128i*)(y + i + 24))); + + sum0 = _mm256_fmadd_ps(x0, y0, sum0); + sum1 = _mm256_fmadd_ps(x1, y1, sum1); + sum2 = _mm256_fmadd_ps(x2, y2, sum2); + sum3 = _mm256_fmadd_ps(x3, y3, sum3); + } + + const __m256 sum01 = _mm256_add_ps(sum0, sum1); + const __m256 sum23 = _mm256_add_ps(sum2, sum3); + const __m256 sum0123 = _mm256_add_ps(sum01, sum23); + + const __m128 r4 = _mm_add_ps(_mm256_castps256_ps128(sum0123), _mm256_extractf128_ps(sum0123, 1)); + const __m128 r2 = _mm_add_ps(r4, _mm_movehl_ps(r4, r4)); + const __m128 r1 = _mm_add_ss(r2, _mm_movehdup_ps(r2)); + + sumf = _mm_cvtss_f32(r1); + + // leftovers + for (int i = n32; i < n; ++i) { + GGML_ASSERT(false); + sumf += ggml_fp16_to_fp32(x[i])*ggml_fp16_to_fp32(y[i]); + } +#else + for (int i = 0; i < n; ++i) { + sumf += ggml_fp16_to_fp32(x[i])*ggml_fp16_to_fp32(y[i]); + } +#endif + + *s = sumf; +} + +inline static void ggml_vec_mad_f32(const int n, float * restrict y, const float * restrict x, const float v) { +#ifdef __ARM_NEON + // NEON 128-bit + const int n16 = (n & ~15); + + const float32x4_t v4 = vdupq_n_f32(v); + + float32x4_t x0, x1, x2, x3; + float32x4_t y0, y1, y2, y3; + + for (int i = 0; i < n16; i += 16) { + x0 = vld1q_f32(x + i + 0); + x1 = vld1q_f32(x + i + 4); + x2 = vld1q_f32(x + i + 8); + x3 = vld1q_f32(x + i + 12); + + y0 = vld1q_f32(y + i + 0); + y1 = vld1q_f32(y + i + 4); + y2 = vld1q_f32(y + i + 8); + y3 = vld1q_f32(y + i + 12); + + y0 = vfmaq_f32(y0, x0, v4); + y1 = vfmaq_f32(y1, x1, v4); + y2 = vfmaq_f32(y2, x2, v4); + y3 = vfmaq_f32(y3, x3, v4); + + vst1q_f32(y + i + 0, y0); + vst1q_f32(y + i + 4, y1); + vst1q_f32(y + i + 8, y2); + vst1q_f32(y + i + 12, y3); + } + + // leftovers + for (int i = n16; i < n; ++i) { + y[i] += x[i]*v; + } +#elif defined(__AVX2__) + // AVX 256-bit (unroll 4) + const int n32 = (n & ~31); + + const __m256 v4 = _mm256_set1_ps(v); + + __m256 x0, x1, x2, x3; + __m256 y0, y1, y2, y3; + + for (int i = 0; i < n32; i += 32) { + x0 = _mm256_loadu_ps(x + i + 0); + x1 = _mm256_loadu_ps(x + i + 8); + x2 = _mm256_loadu_ps(x + i + 16); + x3 = _mm256_loadu_ps(x + i + 24); + + y0 = _mm256_loadu_ps(y + i + 0); + y1 = _mm256_loadu_ps(y + i + 8); + y2 = _mm256_loadu_ps(y + i + 16); + y3 = _mm256_loadu_ps(y + i + 24); + + y0 = _mm256_fmadd_ps(x0, v4, y0); + y1 = _mm256_fmadd_ps(x1, v4, y1); + y2 = _mm256_fmadd_ps(x2, v4, y2); + y3 = _mm256_fmadd_ps(x3, v4, y3); + + _mm256_storeu_ps(y + i + 0, y0); + _mm256_storeu_ps(y + i + 8, y1); + _mm256_storeu_ps(y + i + 16, y2); + _mm256_storeu_ps(y + i + 24, y3); + } + + // leftovers + for (int i = n32; i < n; ++i) { + y[i] += x[i]*v; + } +#else + // scalar + for (int i = 0; i < n; ++i) { + y[i] += x[i]*v; + } +#endif +} + +inline static void ggml_vec_mad_f16(const int n, ggml_fp16_t * restrict y, ggml_fp16_t * restrict x, const float v) { +#ifdef __ARM_NEON + // NEON 128-bit + const int n32 = (n & ~31); + + const float16x8_t v8 = vdupq_n_f16(v); + + float16x8_t x0, x1, x2, x3; + float16x8_t y0, y1, y2, y3; + + for (int i = 0; i < n32; i += 32) { + y0 = vld1q_f16(y + i + 0 ); + y1 = vld1q_f16(y + i + 8 ); + y2 = vld1q_f16(y + i + 16); + y3 = vld1q_f16(y + i + 24); + + x0 = vld1q_f16(x + i + 0 ); + x1 = vld1q_f16(x + i + 8 ); + x2 = vld1q_f16(x + i + 16); + x3 = vld1q_f16(x + i + 24); + + y0 = vfmaq_f16(y0, x0, v8); + y1 = vfmaq_f16(y1, x1, v8); + y2 = vfmaq_f16(y2, x2, v8); + y3 = vfmaq_f16(y3, x3, v8); + + vst1q_f16(y + i + 0 , y0); + vst1q_f16(y + i + 8 , y1); + vst1q_f16(y + i + 16, y2); + vst1q_f16(y + i + 24, y3); + } + + // leftovers + for (int i = n32; i < n; ++i) { + GGML_ASSERT(false); + y[i] = ggml_fp32_to_fp16(ggml_fp16_to_fp32(y[i]) + ggml_fp16_to_fp32(x[i])*v); + } +#elif defined(__AVX2__) + // AVX 256-bit + const int n32 = (n & ~31); + + const __m256 v8 = _mm256_set1_ps(v); + + __m256 x0, x1, x2, x3; + __m256 y0, y1, y2, y3; + + for (int i = 0; i < n32; i += 32) { + y0 = _mm256_cvtph_ps(_mm_loadu_si128((__m128i*)(y + i + 0 ))); + y1 = _mm256_cvtph_ps(_mm_loadu_si128((__m128i*)(y + i + 8 ))); + y2 = _mm256_cvtph_ps(_mm_loadu_si128((__m128i*)(y + i + 16))); + y3 = _mm256_cvtph_ps(_mm_loadu_si128((__m128i*)(y + i + 24))); + + x0 = _mm256_cvtph_ps(_mm_loadu_si128((__m128i*)(x + i + 0 ))); + x1 = _mm256_cvtph_ps(_mm_loadu_si128((__m128i*)(x + i + 8 ))); + x2 = _mm256_cvtph_ps(_mm_loadu_si128((__m128i*)(x + i + 16))); + x3 = _mm256_cvtph_ps(_mm_loadu_si128((__m128i*)(x + i + 24))); + + y0 = _mm256_fmadd_ps(x0, v8, y0); + y1 = _mm256_fmadd_ps(x1, v8, y1); + y2 = _mm256_fmadd_ps(x2, v8, y2); + y3 = _mm256_fmadd_ps(x3, v8, y3); + + _mm_storeu_si128((__m128i*)(y + i + 0 ), _mm256_cvtps_ph(y0, 0)); + _mm_storeu_si128((__m128i*)(y + i + 8 ), _mm256_cvtps_ph(y1, 0)); + _mm_storeu_si128((__m128i*)(y + i + 16), _mm256_cvtps_ph(y2, 0)); + _mm_storeu_si128((__m128i*)(y + i + 24), _mm256_cvtps_ph(y3, 0)); + } + + // leftovers + for (int i = n32; i < n; ++i) { + GGML_ASSERT(false); + y[i] = ggml_fp32_to_fp16(ggml_fp16_to_fp32(y[i]) + ggml_fp16_to_fp32(x[i])*v); + } +#else + for (int i = 0; i < n; ++i) { + y[i] = ggml_fp32_to_fp16(ggml_fp16_to_fp32(y[i]) + ggml_fp16_to_fp32(x[i])*v); + } +#endif +} + +inline static void ggml_vec_scale_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] *= v; } +inline static void ggml_vec_norm_f32 (const int n, float * s, const float * x) { ggml_vec_dot_f32(n, s, x, x); *s = sqrt(*s); } +inline static void ggml_vec_sqr_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]*x[i]; } +inline static void ggml_vec_sqrt_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = sqrt(x[i]); } +inline static void ggml_vec_abs_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = fabsf(x[i]); } +inline static void ggml_vec_sgn_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : ((x[i] < 0.f) ? -1.f : 0.f); } +inline static void ggml_vec_step_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : 0.f; } +inline static void ggml_vec_relu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : 0.f; } + +const ggml_float GELU_COEF_A = 0.044715; +const ggml_float SQRT_2_OVER_PI = 0.79788456080286535587989211986876; + +inline static void ggml_vec_gelu_f32 (const int n, float * y, const float * x) { + for (int i = 0; i < n; ++i) { + //y[i] = 0.5f*x[i]*(1.f + tanhf(SQRT_2_OVER_PI*(x[i] + 0.044715f*x[i]*x[i]*x[i]))); + //0.5*x*(1+tf.tanh(np.sqrt(2/np.pi)*(x+0.044715*tf.pow(x, 3)))) + const ggml_float xx = x[i]; + y[i] = 0.5*xx*(1.0 + tanh(SQRT_2_OVER_PI*xx*(1.0 + GELU_COEF_A*xx*xx))); + } +} + +inline static void ggml_vec_sum_f32 (const int n, float * s, const float * x) { ggml_float sum = 0.0; for (int i = 0; i < n; ++i) sum += x[i]; *s += sum; } +inline static void ggml_vec_norm_inv_f32(const int n, float * s, const float * x) { ggml_vec_norm_f32(n, s, x); *s = 1./(*s); } + +// +// logging +// + +#if (GGML_DEBUG >= 1) +#define GGML_PRINT_DEBUG(...) printf(__VA_ARGS__) +#else +#define GGML_PRINT_DEBUG(...) +#endif + +#if (GGML_DEBUG >= 5) +#define GGML_PRINT_DEBUG_5(...) printf(__VA_ARGS__) +#else +#define GGML_PRINT_DEBUG_5(...) +#endif + +#if (GGML_DEBUG >= 10) +#define GGML_PRINT_DEBUG_10(...) printf(__VA_ARGS__) +#else +#define GGML_PRINT_DEBUG_10(...) +#endif + +#define GGML_PRINT(...) printf(__VA_ARGS__) + +// +// data types +// + +const size_t GGML_TYPE_SIZE[GGML_TYPE_COUNT] = { + sizeof(int8_t ), + sizeof(int16_t), + sizeof(int32_t), + sizeof(ggml_fp16_t), + sizeof(float ), +}; + +const char * GGML_OP_LABEL[GGML_OP_COUNT] = { + "NONE", + + "DUP", + "ADD", + "SUB", + "MUL", + "DIV", + "SQR", + "SQRT", + "SUM", + "MEAN", + "REPEAT", + "ABS", + "SGN", + "NEG", + "STEP", + "RELU", + "GELU", + "NORM", + + "MUL_MAT", + + "SCALE", + "CPY", + "RESHAPE", + "VIEW", + "PERMUTE", + "TRANSPOSE", + "GET_ROWS", + "DIAG_MASK_INF", + "SOFT_MAX", + "ROPE", + "CONV_1D_1S", + "CONV_1D_2S", +}; + +const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { + "none", + + "x", + "x+y", + "x-y", + "x*y", + "x/y", + "x^2", + "√x", + "Σx", + "Σx/n", + "repeat(x)", + "abs(x)", + "sgn(x)", + "-x", + "step(x)", + "relu(x)", + "gelu(x)", + "norm(x)", + + "X*Y", + + "x*v", + "x-\\>y", + "reshape(x)", + "view(x)", + "permute(x)", + "transpose(x)", + "get_rows(x)", + "diag_mask_inf(x)", + "soft_max(x)", + "rope(x)", + "conv_1d_1s(x)", + "conv_1d_2s(x)", +}; + +// +// ggml object +// + +struct ggml_object { + size_t offset; + size_t size; + + struct ggml_object * next; + + char padding[8]; +}; + +const size_t GGML_OBJECT_SIZE = sizeof(struct ggml_object); + +static_assert(sizeof(struct ggml_object)%GGML_MEM_ALIGN == 0, "ggml_object size must be a multiple of GGML_MEM_ALIGN"); +static_assert(sizeof(struct ggml_tensor)%GGML_MEM_ALIGN == 0, "ggml_tensor size must be a multiple of GGML_MEM_ALIGN"); + +// +// ggml context +// + +struct ggml_context { + size_t mem_size; + void * mem_buffer; + bool mem_buffer_owned; + + int n_objects; + + struct ggml_object * objects_begin; + struct ggml_object * objects_end; +}; + +struct ggml_context_container { + bool used; + + struct ggml_context context; +}; + +// +// compute types +// + +enum ggml_task_type { + GGML_TASK_INIT = 0, + GGML_TASK_COMPUTE, + GGML_TASK_FINALIZE, +}; + +struct ggml_compute_params { + enum ggml_task_type type; + + int ith, nth; + + // work buffer for all threads + size_t wsize; + void * wdata; +}; + +// +// ggml state +// + +struct ggml_state { + struct ggml_context_container contexts[GGML_MAX_CONTEXTS]; +}; + +// global state +struct ggml_state g_state; + +//////////////////////////////////////////////////////////////////////////////// + +void ggml_print_object(const struct ggml_object * obj) { + GGML_PRINT(" - ggml_object: offset = %zu, size = %zu, next = %p\n", + obj->offset, obj->size, (const void *) obj->next); +} + +void ggml_print_objects(const struct ggml_context * ctx) { + struct ggml_object * obj = ctx->objects_begin; + + GGML_PRINT("%s: objects in context %p:\n", __func__, (const void *) ctx); + + while (obj != NULL) { + ggml_print_object(obj); + obj = obj->next; + } + + GGML_PRINT("%s: --- end ---\n", __func__); +} + +int ggml_nelements(const struct ggml_tensor * tensor) { + static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); + + return tensor->ne[0]*tensor->ne[1]*tensor->ne[2]*tensor->ne[3]; +} + +int ggml_nrows(const struct ggml_tensor * tensor) { + static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); + + return tensor->ne[1]*tensor->ne[2]*tensor->ne[3]; +} + +size_t ggml_nbytes(const struct ggml_tensor * tensor) { + static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); + + return ggml_nelements(tensor)*GGML_TYPE_SIZE[tensor->type]; +} + +size_t ggml_type_size(enum ggml_type type) { + return GGML_TYPE_SIZE[type]; +} + +size_t ggml_element_size(const struct ggml_tensor * tensor) { + return GGML_TYPE_SIZE[tensor->type]; +} + +bool ggml_is_scalar(const struct ggml_tensor * tensor) { + static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); + + return tensor->ne[0] == 1 && tensor->ne[1] == 1 && tensor->ne[2] == 1 && tensor->ne[3] == 1; +} + +bool ggml_is_vector(const struct ggml_tensor * tensor) { + static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); + + return tensor->ne[1] == 1 && tensor->ne[2] == 1 && tensor->ne[3] == 1; +} + +bool ggml_is_matrix(const struct ggml_tensor * tensor) { + static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); + + return tensor->ne[2] == 1 && tensor->ne[3] == 1; +} + +bool ggml_can_mul_mat(const struct ggml_tensor * t0, const struct ggml_tensor * t1) { + static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); + + return + (t0->ne[0] == t1->ne[0]) && + (t0->ne[2] == t1->ne[2]) && + (t0->ne[3] == t1->ne[3]); +} + +bool ggml_is_contiguous(const struct ggml_tensor * tensor) { + static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); + + return + tensor->nb[0] == GGML_TYPE_SIZE[tensor->type] && + tensor->nb[1] == tensor->nb[0]*tensor->ne[0] && + tensor->nb[2] == tensor->nb[1]*tensor->ne[1] && + tensor->nb[3] == tensor->nb[2]*tensor->ne[2]; +} + +bool ggml_is_padded_1d(const struct ggml_tensor * tensor) { + static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); + + return + tensor->nb[0] == GGML_TYPE_SIZE[tensor->type] && + tensor->nb[2] == tensor->nb[1]*tensor->ne[1] && + tensor->nb[3] == tensor->nb[2]*tensor->ne[2];; +} + +bool ggml_are_same_shape(const struct ggml_tensor * t0, const struct ggml_tensor * t1) { + static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); + + return + (t0->ne[0] == t1->ne[0] ) && + (t0->ne[1] == t1->ne[1] ) && + (t0->ne[2] == t1->ne[2] ) && + (t0->ne[3] == t1->ne[3] ); +} + +// check if t1 can be represented as a repeatition of t0 +bool ggml_can_repeat(const struct ggml_tensor * t0, const struct ggml_tensor * t1) { + static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); + + return + (t1->ne[0]%t0->ne[0] == 0) && + (t1->ne[1]%t0->ne[1] == 0) && + (t1->ne[2]%t0->ne[2] == 0) && + (t1->ne[3]%t0->ne[3] == 0); +} + +int ggml_up32(int n) { + return (n + 31) & ~31; +} + +int ggml_up64(int n) { + return (n + 63) & ~63; +} + +// assert that pointer is aligned to GGML_MEM_ALIGN +#define ggml_assert_aligned(ptr) \ + assert(((uintptr_t) (ptr))%GGML_MEM_ALIGN == 0) + +//////////////////////////////////////////////////////////////////////////////// + +struct ggml_context * ggml_init(struct ggml_init_params params) { + // find non-used context in g_state + struct ggml_context * ctx = NULL; + + static bool first_time = true; + if (first_time) { + for (int i = 0; i < GGML_MAX_CONTEXTS; i++) { + g_state.contexts[i].used = false; + } + first_time = false; + } + + for (int i = 0; i < GGML_MAX_CONTEXTS; i++) { + if (!g_state.contexts[i].used) { + g_state.contexts[i].used = true; + ctx = &g_state.contexts[i].context; + + GGML_PRINT_DEBUG("%s: found unused context %d\n", __func__, i); + break; + } + } + + if (ctx == NULL) { + GGML_PRINT_DEBUG("%s\n", "ggml_init: no unused context found"); + return NULL; + } + + *ctx = (struct ggml_context) { + .mem_size = params.mem_size, + .mem_buffer = params.mem_buffer ? params.mem_buffer : malloc(params.mem_size), + .mem_buffer_owned = params.mem_buffer ? false : true, + .n_objects = 0, + .objects_begin = NULL, + .objects_end = NULL, + }; + + ggml_assert_aligned(ctx->mem_buffer); + + return ctx; +} + +void ggml_free(struct ggml_context * ctx) { + for (int i = 0; i < GGML_MAX_CONTEXTS; i++) { + if (&g_state.contexts[i].context == ctx) { + g_state.contexts[i].used = false; + + GGML_PRINT_DEBUG("ggml_free: context %d with %d objects has been freed. memory used = %zu\n", + i, ctx->n_objects, ctx->objects_end->offset + ctx->objects_end->size); + + if (ctx->mem_buffer_owned) { + free(ctx->mem_buffer); + } + + return; + } + } + + GGML_PRINT_DEBUG("%s: context not found\n", __func__); +} + +size_t ggml_used_mem(const struct ggml_context * ctx) { + return ctx->objects_end->offset + ctx->objects_end->size; +} + +//////////////////////////////////////////////////////////////////////////////// + +struct ggml_tensor * ggml_new_tensor_impl( + struct ggml_context * ctx, + enum ggml_type type, + int n_dims, + const int* ne, + void* data) { + // always insert objects at the end of the context's memory pool + struct ggml_object * obj_cur = ctx->objects_end; + + const size_t cur_offset = obj_cur == NULL ? 0 : obj_cur->offset; + const size_t cur_size = obj_cur == NULL ? 0 : obj_cur->size; + const size_t cur_end = cur_offset + cur_size; + + size_t size_needed = 0; + + if (data == NULL) { + size_needed += GGML_TYPE_SIZE[type]; + for (int i = 0; i < n_dims; i++) { + size_needed *= ne[i]; + } + // align to GGML_MEM_ALIGN + size_needed = ((size_needed + GGML_MEM_ALIGN - 1)/GGML_MEM_ALIGN)*GGML_MEM_ALIGN; + + } + size_needed += sizeof(struct ggml_tensor); + + if (cur_end + size_needed + GGML_OBJECT_SIZE > ctx->mem_size) { + GGML_PRINT("%s: not enough space in the context's memory pool\n", __func__); + assert(false); + return NULL; + } + + char * const mem_buffer = ctx->mem_buffer; + + struct ggml_object * const obj_new = (struct ggml_object *)(mem_buffer + cur_end); + + *obj_new = (struct ggml_object) { + .offset = cur_end + GGML_OBJECT_SIZE, + .size = size_needed, + .next = NULL, + }; + + if (obj_cur != NULL) { + obj_cur->next = obj_new; + } else { + // this is the first object in this context + ctx->objects_begin = obj_new; + } + + ctx->objects_end = obj_new; + + //GGML_PRINT_DEBUG("%s: inserted new object at %zu\n", __func__, cur_end); + + struct ggml_tensor * const result = (struct ggml_tensor *)(mem_buffer + obj_new->offset); + + ggml_assert_aligned(result); + + *result = (struct ggml_tensor) { + /*.type =*/ type, + /*.n_dims =*/ n_dims, + /*.ne =*/ { 1, 1, 1, 1 }, + /*.nb =*/ { 0, 0, 0, 0 }, + /*.op =*/ GGML_OP_NONE, + /*.is_param =*/ false, + /*.grad =*/ NULL, + /*.src0 =*/ NULL, + /*.src1 =*/ NULL, + /*.n_tasks =*/ 0, + /*.perf_runs =*/ 0, + /*.perf_cycles =*/ 0, + /*.perf_time_us =*/ 0, + /*.data =*/ data == NULL ? (void *)(result + 1) : data, + /*.pad =*/ { 0 }, + }; + + ggml_assert_aligned(result->data); + + for (int i = 0; i < n_dims; i++) { + result->ne[i] = ne[i]; + } + + result->nb[0] = GGML_TYPE_SIZE[type]; + for (int i = 1; i < GGML_MAX_DIMS; i++) { + result->nb[i] = result->nb[i - 1]*result->ne[i - 1]; + } + + ctx->n_objects++; + + return result; +} + +struct ggml_tensor * ggml_new_tensor( + struct ggml_context * ctx, + enum ggml_type type, + int n_dims, + const int* ne) { + return ggml_new_tensor_impl(ctx, type, n_dims, ne, NULL); +} + +struct ggml_tensor * ggml_new_tensor_1d( + struct ggml_context * ctx, + enum ggml_type type, + int ne0) { + return ggml_new_tensor(ctx, type, 1, &ne0); +} + +struct ggml_tensor * ggml_new_tensor_2d( + struct ggml_context * ctx, + enum ggml_type type, + int ne0, + int ne1) { + const int ne[2] = { ne0, ne1 }; + return ggml_new_tensor(ctx, type, 2, ne); +} + +struct ggml_tensor * ggml_new_tensor_3d( + struct ggml_context * ctx, + enum ggml_type type, + int ne0, + int ne1, + int ne2) { + const int ne[3] = { ne0, ne1, ne2 }; + return ggml_new_tensor(ctx, type, 3, ne); +} + +struct ggml_tensor * ggml_new_tensor_4d( + struct ggml_context * ctx, + enum ggml_type type, + int ne0, + int ne1, + int ne2, + int ne3) { + const int ne[4] = { ne0, ne1, ne2, ne3 }; + return ggml_new_tensor(ctx, type, 4, ne); +} + +struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value) { + struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1); + + ggml_set_f32(result, value); + + return result; +} + +struct ggml_tensor * ggml_dup_tensor(struct ggml_context * ctx, const struct ggml_tensor * src) { + return ggml_new_tensor_impl(ctx, src->type, src->n_dims, src->ne, NULL); +} + +struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor) { + memset(tensor->data, 0, ggml_nbytes(tensor)); + return tensor; +} + +struct ggml_tensor * ggml_set_f32(struct ggml_tensor * tensor, float value) { + const int n = ggml_nrows(tensor); + const int nc = tensor->ne[0]; + const size_t n1 = tensor->nb[1]; + + char * const data = tensor->data; + + switch (tensor->type) { + case GGML_TYPE_I8: + { + assert(tensor->nb[0] == sizeof(int8_t)); + for (int i = 0; i < n; i++) { + ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value); + } + } break; + case GGML_TYPE_I16: + { + assert(tensor->nb[0] == sizeof(int16_t)); + for (int i = 0; i < n; i++) { + ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value); + } + } break; + case GGML_TYPE_I32: + { + assert(tensor->nb[0] == sizeof(int32_t)); + for (int i = 0; i < n; i++) { + ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value); + } + } break; + case GGML_TYPE_F16: + { + assert(tensor->nb[0] == sizeof(ggml_fp16_t)); + for (int i = 0; i < n; i++) { + ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), value); + } + } break; + case GGML_TYPE_F32: + { + assert(tensor->nb[0] == sizeof(float)); + for (int i = 0; i < n; i++) { + ggml_vec_set_f32(nc, (float *)(data + i*n1), value); + } + } break; + case GGML_TYPE_COUNT: + { + assert(false); + } break; + } + + return tensor; +} + +float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i) { + switch (tensor->type) { + case GGML_TYPE_I8: + { + assert(tensor->nb[0] == sizeof(int8_t)); + return ((int8_t *)(tensor->data))[i]; + } break; + case GGML_TYPE_I16: + { + assert(tensor->nb[0] == sizeof(int16_t)); + return ((int16_t *)(tensor->data))[i]; + } break; + case GGML_TYPE_I32: + { + assert(tensor->nb[0] == sizeof(int32_t)); + return ((int32_t *)(tensor->data))[i]; + } break; + case GGML_TYPE_F16: + { + assert(tensor->nb[0] == sizeof(ggml_fp16_t)); + return ggml_fp16_to_fp32(((ggml_fp16_t *)(tensor->data))[i]); + } break; + case GGML_TYPE_F32: + { + assert(tensor->nb[0] == sizeof(float)); + return ((float *)(tensor->data))[i]; + } break; + case GGML_TYPE_COUNT: + { + assert(false); + } break; + } + + assert(false); + return 0.0f; +} + +void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value) { + switch (tensor->type) { + case GGML_TYPE_I8: + { + assert(tensor->nb[0] == sizeof(int8_t)); + ((int8_t *)(tensor->data))[i] = value; + } break; + case GGML_TYPE_I16: + { + assert(tensor->nb[0] == sizeof(int16_t)); + ((int16_t *)(tensor->data))[i] = value; + } break; + case GGML_TYPE_I32: + { + assert(tensor->nb[0] == sizeof(int32_t)); + ((int32_t *)(tensor->data))[i] = value; + } break; + case GGML_TYPE_F16: + { + assert(tensor->nb[0] == sizeof(ggml_fp16_t)); + ((ggml_fp16_t *)(tensor->data))[i] = ggml_fp32_to_fp16(value); + } break; + case GGML_TYPE_F32: + { + assert(tensor->nb[0] == sizeof(float)); + ((float *)(tensor->data))[i] = value; + } break; + case GGML_TYPE_COUNT: + { + assert(false); + } break; + } +} + +void * ggml_get_data(const struct ggml_tensor * tensor) { + return tensor->data; +} + +float * ggml_get_data_f32(const struct ggml_tensor * tensor) { + assert(tensor->type == GGML_TYPE_F32); + return (float *)(tensor->data); +} + +struct ggml_tensor * ggml_view_tensor( + struct ggml_context * ctx, + const struct ggml_tensor * src) { + return ggml_new_tensor_impl(ctx, src->type, src->n_dims, src->ne, src->data); +} + +//////////////////////////////////////////////////////////////////////////////// + +// ggml_dup + +struct ggml_tensor * ggml_dup_impl( + struct ggml_context * ctx, + struct ggml_tensor * a, + bool inplace) { + bool is_node = false; + + if (!inplace && (a->grad)) { + is_node = true; + } + + struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + + result->op = GGML_OP_DUP; + result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->src0 = a; + result->src1 = NULL; + + return result; +} + +struct ggml_tensor * ggml_dup( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_dup_impl(ctx, a, false); +} + +struct ggml_tensor * ggml_dup_inplace( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_dup_impl(ctx, a, true); +} + +// ggml_add + +struct ggml_tensor * ggml_add_impl( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b, + bool inplace) { + assert(ggml_are_same_shape(a, b)); + + bool is_node = false; + + if (!inplace && (a->grad || b->grad)) { + is_node = true; + } + + struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + + result->op = GGML_OP_ADD; + result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->src0 = a; + result->src1 = b; + + return result; +} + +struct ggml_tensor * ggml_add( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b) { + return ggml_add_impl(ctx, a, b, false); +} + +struct ggml_tensor * ggml_add_inplace( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b) { + return ggml_add_impl(ctx, a, b, true); +} + +// ggml_sub + +struct ggml_tensor * ggml_sub_impl( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b, + bool inplace) { + assert(ggml_are_same_shape(a, b)); + + bool is_node = false; + + if (!inplace && (a->grad || b->grad)) { + is_node = true; + } + + struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + + result->op = GGML_OP_SUB; + result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->src0 = a; + result->src1 = b; + + return result; +} + +struct ggml_tensor * ggml_sub( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b) { + return ggml_sub_impl(ctx, a, b, false); +} + +struct ggml_tensor * ggml_sub_inplace( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b) { + return ggml_sub_impl(ctx, a, b, true); +} + +// ggml_mul + +struct ggml_tensor * ggml_mul_impl( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b, + bool inplace) { + assert(ggml_are_same_shape(a, b)); + + bool is_node = false; + + if (!inplace && (a->grad || b->grad)) { + is_node = true; + } + + if (inplace) { + assert(is_node == false); + } + + struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + + result->op = GGML_OP_MUL; + result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->src0 = a; + result->src1 = b; + + return result; +} + +struct ggml_tensor * ggml_mul( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b) { + return ggml_mul_impl(ctx, a, b, false); +} + +struct ggml_tensor * ggml_mul_inplace( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b) { + return ggml_mul_impl(ctx, a, b, true); +} + +// ggml_div + +struct ggml_tensor * ggml_div_impl( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b, + bool inplace) { + assert(ggml_are_same_shape(a, b)); + + bool is_node = false; + + if (!inplace && (a->grad || b->grad)) { + is_node = true; + } + + if (inplace) { + assert(is_node == false); + } + + struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + + result->op = GGML_OP_DIV; + result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->src0 = a; + result->src1 = b; + + return result; +} + +struct ggml_tensor * ggml_div( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b) { + return ggml_div_impl(ctx, a, b, false); +} + +struct ggml_tensor * ggml_div_inplace( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b) { + return ggml_div_impl(ctx, a, b, true); +} + +// ggml_sqr + +struct ggml_tensor * ggml_sqr_impl( + struct ggml_context * ctx, + struct ggml_tensor * a, + bool inplace) { + bool is_node = false; + + if (!inplace && (a->grad)) { + is_node = true; + } + + struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + + result->op = GGML_OP_SQR; + result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->src0 = a; + result->src1 = NULL; + + return result; +} + +struct ggml_tensor * ggml_sqr( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_sqr_impl(ctx, a, false); +} + +struct ggml_tensor * ggml_sqr_inplace( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_sqr_impl(ctx, a, true); +} + +// ggml_sqrt + +struct ggml_tensor * ggml_sqrt_impl( + struct ggml_context * ctx, + struct ggml_tensor * a, + bool inplace) { + bool is_node = false; + + if (!inplace && (a->grad)) { + is_node = true; + } + + struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + + result->op = GGML_OP_SQRT; + result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->src0 = a; + result->src1 = NULL; + + return result; +} + +struct ggml_tensor * ggml_sqrt( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_sqrt_impl(ctx, a, false); +} + +struct ggml_tensor * ggml_sqrt_inplace( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_sqrt_impl(ctx, a, true); +} + +// ggml_sum + +struct ggml_tensor * ggml_sum( + struct ggml_context * ctx, + struct ggml_tensor * a) { + bool is_node = false; + + if (a->grad) { + is_node = true; + } + + struct ggml_tensor * result = ggml_new_tensor_1d(ctx, a->type, 1); + + result->op = GGML_OP_SUM; + result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->src0 = a; + result->src1 = NULL; + + return result; +} + +// ggml_mean + +struct ggml_tensor * ggml_mean( + struct ggml_context * ctx, + struct ggml_tensor * a) { + bool is_node = false; + + if (a->grad) { + assert(false); // TODO: implement + is_node = true; + } + + int ne[GGML_MAX_DIMS] = { 1, a->ne[1], a->ne[2], a->ne[3] }; + struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, a->n_dims, ne); + + result->op = GGML_OP_MEAN; + result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->src0 = a; + result->src1 = NULL; + + return result; +} + +// ggml_repeat + +struct ggml_tensor * ggml_repeat( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b) { + assert(ggml_can_repeat(a, b)); + + bool is_node = false; + + if (a->grad) { + is_node = true; + } + + if (ggml_are_same_shape(a, b) && !is_node) { + return a; + } + + struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, b->n_dims, b->ne); + + result->op = GGML_OP_REPEAT; + result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->src0 = a; + result->src1 = NULL; + + return result; +} + +// ggml_abs + +struct ggml_tensor * ggml_abs_impl( + struct ggml_context * ctx, + struct ggml_tensor * a, + bool inplace) { + bool is_node = false; + + if (!inplace && (a->grad)) { + is_node = true; + } + + struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + + result->op = GGML_OP_ABS; + result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->src0 = a; + result->src1 = NULL; + + return result; +} + +struct ggml_tensor * ggml_abs( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_abs_impl(ctx, a, false); +} + +struct ggml_tensor * ggml_abs_inplace( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_abs_impl(ctx, a, true); +} + + +// ggml_sgn + +struct ggml_tensor * ggml_sgn_impl( + struct ggml_context * ctx, + struct ggml_tensor * a, + bool inplace) { + bool is_node = false; + + if (!inplace && (a->grad)) { + is_node = true; + } + + struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + + result->op = GGML_OP_SGN; + result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->src0 = a; + result->src1 = NULL; + + return result; +} + +struct ggml_tensor * ggml_sgn( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_sgn_impl(ctx, a, false); +} + +struct ggml_tensor * ggml_sgn_inplace( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_sgn_impl(ctx, a, true); +} + +// ggml_neg + +struct ggml_tensor * ggml_neg_impl( + struct ggml_context * ctx, + struct ggml_tensor * a, + bool inplace) { + bool is_node = false; + + if (!inplace && (a->grad)) { + is_node = true; + } + + struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + + result->op = GGML_OP_NEG; + result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->src0 = a; + result->src1 = NULL; + + return result; +} + +struct ggml_tensor * ggml_neg( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_neg_impl(ctx, a, false); +} + +struct ggml_tensor * ggml_neg_inplace( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_neg_impl(ctx, a, true); +} + +// ggml_step + +struct ggml_tensor * ggml_step_impl( + struct ggml_context * ctx, + struct ggml_tensor * a, + bool inplace) { + bool is_node = false; + + if (!inplace && (a->grad)) { + is_node = true; + } + + struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + + result->op = GGML_OP_STEP; + result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->src0 = a; + result->src1 = NULL; + + return result; +} + +struct ggml_tensor * ggml_step( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_step_impl(ctx, a, false); +} + +struct ggml_tensor * ggml_step_inplace( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_step_impl(ctx, a, true); +} + +// ggml_relu + +struct ggml_tensor * ggml_relu_impl( + struct ggml_context * ctx, + struct ggml_tensor * a, + bool inplace) { + bool is_node = false; + + if (!inplace && (a->grad)) { + is_node = true; + } + + struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + + result->op = GGML_OP_RELU; + result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->src0 = a; + result->src1 = NULL; + + return result; +} + +struct ggml_tensor * ggml_relu( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_relu_impl(ctx, a, false); +} + +struct ggml_tensor * ggml_relu_inplace( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_relu_impl(ctx, a, true); +} + +// ggml_gelu + +struct ggml_tensor * ggml_gelu_impl( + struct ggml_context * ctx, + struct ggml_tensor * a, + bool inplace) { + bool is_node = false; + + if (!inplace && (a->grad)) { + is_node = true; + } + + struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + + result->op = GGML_OP_GELU; + result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->src0 = a; + result->src1 = NULL; + + return result; +} + +struct ggml_tensor * ggml_gelu( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_gelu_impl(ctx, a, false); +} + +struct ggml_tensor * ggml_gelu_inplace( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_gelu_impl(ctx, a, true); +} + +// ggml_norm + +struct ggml_tensor * ggml_norm_impl( + struct ggml_context * ctx, + struct ggml_tensor * a, + bool inplace) { + bool is_node = false; + + if (!inplace && (a->grad)) { + assert(false); // TODO: implement backward + is_node = true; + } + + struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + + result->op = GGML_OP_NORM; + result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->src0 = a; + result->src1 = NULL; // TODO: maybe store epsilon here? + + return result; +} + +struct ggml_tensor * ggml_norm( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_norm_impl(ctx, a, false); +} + +struct ggml_tensor * ggml_norm_inplace( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_norm_impl(ctx, a, true); +} + +// ggml_mul_mat + +struct ggml_tensor * ggml_mul_mat( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b) { + assert(ggml_can_mul_mat(a, b)); + + bool is_node = false; + + if (a->grad || b->grad) { + is_node = true; + } + + const int ne[4] = { a->ne[1], b->ne[1], a->ne[2], b->ne[3] }; + struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, MIN(a->n_dims, b->n_dims), ne); + + result->op = GGML_OP_MUL_MAT; + result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->src0 = a; + result->src1 = b; + + return result; +} + +// ggml_scale + +struct ggml_tensor * ggml_scale_impl( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b, + bool inplace) { + assert(ggml_is_scalar(b)); + assert(ggml_is_padded_1d(a)); + + bool is_node = false; + + if (!inplace && (a->grad || b->grad)) { + assert(false); // TODO: implement backward + is_node = true; + } + + // TODO: when implement backward, fix this: + //struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct ggml_tensor * result = ggml_view_tensor(ctx, a); + + result->op = GGML_OP_SCALE; + result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->src0 = a; + result->src1 = b; + + return result; +} + +struct ggml_tensor * ggml_scale( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b) { + return ggml_scale_impl(ctx, a, b, false); +} + +struct ggml_tensor * ggml_scale_inplace( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b) { + return ggml_scale_impl(ctx, a, b, true); +} + +// ggml_cpy + +struct ggml_tensor * ggml_cpy_impl( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b, + bool inplace) { + assert(ggml_nelements(a) == ggml_nelements(b)); + + bool is_node = false; + + if (!inplace && (a->grad || b->grad)) { + assert(false); // TODO: implement backward + is_node = true; + } + + // make a view of the destination + struct ggml_tensor * result = ggml_view_tensor(ctx, b); + + result->op = GGML_OP_CPY; + result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->src0 = a; + result->src1 = b; + + return result; +} + +struct ggml_tensor * ggml_cpy( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b) { + return ggml_cpy_impl(ctx, a, b, false); +} + +struct ggml_tensor * ggml_cpy_inplace( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b) { + return ggml_cpy_impl(ctx, a, b, true); +} + +// ggml_reshape + +struct ggml_tensor * ggml_reshape( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b) { + assert(ggml_is_contiguous(a)); + assert(ggml_is_contiguous(b)); + assert(ggml_nelements(a) == ggml_nelements(b)); + + bool is_node = false; + + if (a->grad || b->grad) { + assert(false); // TODO: implement backward + is_node = true; + } + + struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, b->n_dims, b->ne, a->data); + + result->op = GGML_OP_RESHAPE; + result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->src0 = a; + result->src1 = NULL; + + return result; +} + +struct ggml_tensor * ggml_reshape_2d( + struct ggml_context * ctx, + struct ggml_tensor * a, + int ne0, + int ne1) { + assert(ggml_is_contiguous(a)); + assert(ggml_nelements(a) == ne0*ne1); + + bool is_node = false; + + if (a->grad) { + assert(false); // TODO: implement backward + is_node = true; + } + + const int ne[2] = { ne0, ne1 }; + struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 2, ne, a->data); + + result->op = GGML_OP_RESHAPE; + result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->src0 = a; + result->src1 = NULL; + + return result; +} + +struct ggml_tensor * ggml_reshape_3d( + struct ggml_context * ctx, + struct ggml_tensor * a, + int ne0, + int ne1, + int ne2) { + assert(ggml_is_contiguous(a)); + assert(ggml_nelements(a) == ne0*ne1*ne2); + + bool is_node = false; + + if (a->grad) { + assert(false); // TODO: implement backward + is_node = true; + } + + const int ne[3] = { ne0, ne1, ne2 }; + struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 3, ne, a->data); + + result->op = GGML_OP_RESHAPE; + result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->src0 = a; + result->src1 = NULL; + + return result; +} + +// ggml_view_1d + +struct ggml_tensor * ggml_view_1d( + struct ggml_context * ctx, + struct ggml_tensor * a, + int ne0, + size_t offset) { + if (a->grad) { + assert(false); // gradient propagation is not supported + } + + struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 1, &ne0, (char *) a->data + offset); + + result->op = GGML_OP_VIEW; + result->grad = NULL; + result->src0 = a; + result->src1 = NULL; // TODO: maybe store the offset here? + + return result; +} + +// ggml_view_2d + +struct ggml_tensor * ggml_view_2d( + struct ggml_context * ctx, + struct ggml_tensor * a, + int ne0, + int ne1, + size_t nb1, + size_t offset) { + if (a->grad) { + assert(false); // gradient propagation is not supported + } + + const int ne[GGML_MAX_DIMS] = { ne0, ne1, 1, 1 }; + + struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 2, ne, (char *) a->data + offset); + + result->nb[1] = nb1; + result->nb[2] = result->nb[1]*ne1; + result->nb[3] = result->nb[2]; + + result->op = GGML_OP_VIEW; + result->grad = NULL; + result->src0 = a; + result->src1 = NULL; // TODO: maybe store the offset here? + + return result; +} + +// ggml_permute + +struct ggml_tensor * ggml_permute( + struct ggml_context * ctx, + struct ggml_tensor * a, + int axis0, + int axis1, + int axis2, + int axis3) { + assert(axis0 >= 0 && axis0 < GGML_MAX_DIMS); + assert(axis1 >= 0 && axis1 < GGML_MAX_DIMS); + assert(axis2 >= 0 && axis2 < GGML_MAX_DIMS); + assert(axis3 >= 0 && axis3 < GGML_MAX_DIMS); + + assert(axis0 != axis1); + assert(axis0 != axis2); + assert(axis0 != axis3); + assert(axis1 != axis2); + assert(axis1 != axis3); + assert(axis2 != axis3); + + bool is_node = false; + + if (a->grad) { + assert(false); // TODO: implement backward + is_node = true; + } + + struct ggml_tensor * result = ggml_view_tensor(ctx, a); + + int ne[GGML_MAX_DIMS]; + int nb[GGML_MAX_DIMS]; + + ne[axis0] = a->ne[0]; + ne[axis1] = a->ne[1]; + ne[axis2] = a->ne[2]; + ne[axis3] = a->ne[3]; + + nb[axis0] = a->nb[0]; + nb[axis1] = a->nb[1]; + nb[axis2] = a->nb[2]; + nb[axis3] = a->nb[3]; + + result->ne[0] = ne[0]; + result->ne[1] = ne[1]; + result->ne[2] = ne[2]; + result->ne[3] = ne[3]; + + result->nb[0] = nb[0]; + result->nb[1] = nb[1]; + result->nb[2] = nb[2]; + result->nb[3] = nb[3]; + + result->op = GGML_OP_PERMUTE; + result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->src0 = a; + result->src1 = NULL; // TODO: maybe store the permutation here? + + return result; +} + +// ggml_transpose + +struct ggml_tensor * ggml_transpose( + struct ggml_context * ctx, + struct ggml_tensor * a) { + bool is_node = false; + + if (a->grad) { + assert(false); // TODO: implement backward + is_node = true; + } + + struct ggml_tensor * result = ggml_view_tensor(ctx, a); + + result->ne[0] = a->ne[1]; + result->ne[1] = a->ne[0]; + + result->nb[0] = a->nb[1]; + result->nb[1] = a->nb[0]; + + result->op = GGML_OP_TRANSPOSE; + result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->src0 = a; + result->src1 = NULL; + + return result; +} + +// ggml_get_rows + +struct ggml_tensor * ggml_get_rows( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b) { + assert(ggml_is_matrix(a) && ggml_is_vector(b) && b->type == GGML_TYPE_I32); + + bool is_node = false; + + if (a->grad || b->grad) { + assert(false); // TODO: implement backward + is_node = true; + } + + // TODO: implement non F32 return + //struct ggml_tensor * result = ggml_new_tensor_2d(ctx, a->type, a->ne[0], b->ne[0]); + struct ggml_tensor * result = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, a->ne[0], b->ne[0]); + + result->op = GGML_OP_GET_ROWS; + result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->src0 = a; + result->src1 = b; + + return result; +} + +// ggml_diag_mask_inf + +struct ggml_tensor * ggml_diag_mask_inf( + struct ggml_context * ctx, + struct ggml_tensor * a, + int n_past) { + bool is_node = false; + + if (a->grad) { + assert(false); // TODO: implement backward + is_node = true; + } + + // TODO: when implement backward, fix this: + //struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct ggml_tensor * result = ggml_view_tensor(ctx, a); + + struct ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 1); + ((int32_t *) b->data)[0] = n_past; + + result->op = GGML_OP_DIAG_MASK_INF; + result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->src0 = a; + result->src1 = b; + + return result; +} + +// ggml_soft_max + +struct ggml_tensor * ggml_soft_max( + struct ggml_context * ctx, + struct ggml_tensor * a) { + bool is_node = false; + + if (a->grad) { + assert(false); // TODO: implement backward + is_node = true; + } + + // TODO: when implement backward, fix this: + //struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct ggml_tensor * result = ggml_view_tensor(ctx, a); + + result->op = GGML_OP_SOFT_MAX; + result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->src0 = a; + result->src1 = NULL; + + return result; +} + +// ggml_rope + +struct ggml_tensor * ggml_rope( + struct ggml_context * ctx, + struct ggml_tensor * a, + int n_past, + int n_dims, + int mode) { + assert(n_past >= 0); + bool is_node = false; + + if (a->grad) { + assert(false); // TODO: implement backward + is_node = true; + } + + // TODO: when implement backward, fix this: + //struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct ggml_tensor * result = ggml_view_tensor(ctx, a); + + struct ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 3); + ((int32_t *) b->data)[0] = n_past; + ((int32_t *) b->data)[1] = n_dims; + ((int32_t *) b->data)[2] = mode; + + result->op = GGML_OP_ROPE; + result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->src0 = a; + result->src1 = b; + + return result; +} + +// ggml_conv_1d_1s + +struct ggml_tensor * ggml_conv_1d_1s( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b) { + assert(ggml_is_matrix(b)); + assert(a->ne[1] == b->ne[1]); + assert(a->ne[3] == 1); + bool is_node = false; + + if (a->grad || b->grad) { + assert(false); // TODO: implement backward + is_node = true; + } + + const int ne[4] = { b->ne[0], a->ne[2], 1, 1, }; + struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne); + + result->op = GGML_OP_CONV_1D_1S; + result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->src0 = a; + result->src1 = b; + + return result; +} + +// ggml_conv_1d_2s + +struct ggml_tensor * ggml_conv_1d_2s( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b) { + assert(ggml_is_matrix(b)); + assert(a->ne[1] == b->ne[1]); + assert(a->ne[3] == 1); + bool is_node = false; + + if (a->grad || b->grad) { + assert(false); // TODO: implement backward + is_node = true; + } + + const int ne[4] = { b->ne[0]/2, a->ne[2], 1, 1, }; + struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne); + + result->op = GGML_OP_CONV_1D_2S; + result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->src0 = a; + result->src1 = b; + + return result; +} + +//////////////////////////////////////////////////////////////////////////////// + +void ggml_set_param( + struct ggml_context * ctx, + struct ggml_tensor * tensor) { + tensor->is_param = true; + + assert(tensor->grad == NULL); + tensor->grad = ggml_dup_tensor(ctx, tensor); +} + +// ggml_compute_forward_dup + +void ggml_compute_forward_dup_f16( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + struct ggml_tensor * dst) { + assert(params->ith == 0); + assert(ggml_is_contiguous(dst)); + assert(ggml_nelements(dst) == ggml_nelements(src0)); + + if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + return; + } + + //const int ne00 = src0->ne[0]; + //const int ne01 = src0->ne[1]; + //const int ne02 = src0->ne[2]; + //const int ne03 = src0->ne[3]; + + //const size_t nb00 = src0->nb[0]; + //const size_t nb01 = src0->nb[1]; + //const size_t nb02 = src0->nb[2]; + //const size_t nb03 = src0->nb[3]; + + if (ggml_is_contiguous(src0) && src0->type == dst->type) { + memcpy(dst->data, src0->data, ggml_nelements(dst) * GGML_TYPE_SIZE[src0->type]); + return; + } + + GGML_ASSERT(false); // TODO: implement +} + +void ggml_compute_forward_dup_f32( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + struct ggml_tensor * dst) { + GGML_ASSERT(params->ith == 0); + GGML_ASSERT(ggml_is_contiguous(dst)); + GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); + + if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + return; + } + + const int ne00 = src0->ne[0]; + const int ne01 = src0->ne[1]; + const int ne02 = src0->ne[2]; + const int ne03 = src0->ne[3]; + + const size_t nb00 = src0->nb[0]; + const size_t nb01 = src0->nb[1]; + const size_t nb02 = src0->nb[2]; + const size_t nb03 = src0->nb[3]; + + if (ggml_is_contiguous(src0) && src0->type == dst->type) { + memcpy(dst->data, src0->data, ggml_nelements(dst) * GGML_TYPE_SIZE[src0->type]); + return; + } + + if (src0->nb[0] == sizeof(float)) { + if (dst->type == GGML_TYPE_F32) { + int id = 0; + const size_t rs = ne00*nb00; + + for (int i03 = 0; i03 < ne03; i03++) { + for (int i02 = 0; i02 < ne02; i02++) { + for (int i01 = 0; i01 < ne01; i01++) { + const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03; + char * dst_ptr = (char *) dst->data + id*rs; + + memcpy(dst_ptr, src0_ptr, rs); + + id++; + } + } + } + } else if (dst->type == GGML_TYPE_F16) { + int id = 0; + ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data; + + for (int i03 = 0; i03 < ne03; i03++) { + for (int i02 = 0; i02 < ne02; i02++) { + for (int i01 = 0; i01 < ne01; i01++) { + for (int i00 = 0; i00 < ne00; i00++) { + const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); + + dst_ptr[id] = ggml_fp32_to_fp16(*src0_ptr); + id++; + } + } + } + } + } else { + GGML_ASSERT(false); // TODO: implement + } + } else { + printf("%s: this is not optimal - fix me\n", __func__); + + if (dst->type == GGML_TYPE_F32) { + int id = 0; + float * dst_ptr = (float *) dst->data; + + for (int i03 = 0; i03 < ne03; i03++) { + for (int i02 = 0; i02 < ne02; i02++) { + for (int i01 = 0; i01 < ne01; i01++) { + for (int i00 = 0; i00 < ne00; i00++) { + const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); + + dst_ptr[id] = *src0_ptr; + id++; + } + } + } + } + } else if (dst->type == GGML_TYPE_F16) { + int id = 0; + ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data; + + for (int i03 = 0; i03 < ne03; i03++) { + for (int i02 = 0; i02 < ne02; i02++) { + for (int i01 = 0; i01 < ne01; i01++) { + for (int i00 = 0; i00 < ne00; i00++) { + const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); + + dst_ptr[id] = ggml_fp32_to_fp16(*src0_ptr); + id++; + } + } + } + } + } else { + GGML_ASSERT(false); // TODO: implement + } + } +} + +void ggml_compute_forward_dup( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + struct ggml_tensor * dst) { + switch (src0->type) { + case GGML_TYPE_F16: + { + ggml_compute_forward_dup_f16(params, src0, dst); + } break; + case GGML_TYPE_F32: + { + ggml_compute_forward_dup_f32(params, src0, dst); + } break; + case GGML_TYPE_I8: + case GGML_TYPE_I16: + case GGML_TYPE_I32: + case GGML_TYPE_COUNT: + { + GGML_ASSERT(false); + } break; + } +} + +// ggml_compute_forward_add + +void ggml_compute_forward_add_f32( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + const struct ggml_tensor * src1, + struct ggml_tensor * dst) { + GGML_ASSERT(params->ith == 0); + GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst)); + + if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + return; + } + + const int n = ggml_nrows(src0); + const int nc = src0->ne[0]; + + const size_t nb00 = src0->nb[0]; + const size_t nb01 = src0->nb[1]; + + const size_t nb10 = src1->nb[0]; + const size_t nb11 = src1->nb[1]; + + const size_t nb0 = dst->nb[0]; + const size_t nb1 = dst->nb[1]; + + GGML_ASSERT( nb0 == sizeof(float)); + GGML_ASSERT(nb00 == sizeof(float)); + + if (nb10 == sizeof(float)) { + for (int j = 0; j < n; j++) { + ggml_vec_add_f32(nc, + (float *) ((char *) dst->data + j*nb1), + (float *) ((char *) src0->data + j*nb01), + (float *) ((char *) src1->data + j*nb11)); + } + } else { + // src1 is not contiguous + for (int j = 0; j < n; j++) { + float * dst_ptr = (float *) ((char *) dst->data + j*nb1); + float * src0_ptr = (float *) ((char *) src0->data + j*nb01); + for (int i = 0; i < nc; i++) { + float * src1_ptr = (float *) ((char *) src1->data + j*nb11 + i*nb10); + + dst_ptr[i] = src0_ptr[i] + *src1_ptr; + } + } + } +} + +void ggml_compute_forward_add( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + const struct ggml_tensor * src1, + struct ggml_tensor * dst) { + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_add_f32(params, src0, src1, dst); + } break; + case GGML_TYPE_I8: + case GGML_TYPE_I16: + case GGML_TYPE_I32: + case GGML_TYPE_F16: + case GGML_TYPE_COUNT: + { + assert(false); + } break; + } +} + +// ggml_compute_forward_sub + +void ggml_compute_forward_sub_f32( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + const struct ggml_tensor * src1, + struct ggml_tensor * dst) { + assert(params->ith == 0); + assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst)); + + if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + return; + } + + const int n = ggml_nrows(src0); + const int nc = src0->ne[0]; + + assert( dst->nb[0] == sizeof(float)); + assert(src0->nb[0] == sizeof(float)); + assert(src1->nb[0] == sizeof(float)); + + for (int i = 0; i < n; i++) { + ggml_vec_sub_f32(nc, + (float *) ((char *) dst->data + i*( dst->nb[1])), + (float *) ((char *) src0->data + i*(src0->nb[1])), + (float *) ((char *) src1->data + i*(src1->nb[1]))); + } +} + +void ggml_compute_forward_sub( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + const struct ggml_tensor * src1, + struct ggml_tensor * dst) { + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_sub_f32(params, src0, src1, dst); + } break; + case GGML_TYPE_I8: + case GGML_TYPE_I16: + case GGML_TYPE_I32: + case GGML_TYPE_F16: + case GGML_TYPE_COUNT: + { + assert(false); + } break; + } +} + +// ggml_compute_forward_mul + +void ggml_compute_forward_mul_f32( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + const struct ggml_tensor * src1, + struct ggml_tensor * dst) { + assert(params->ith == 0); + assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst)); + + if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + return; + } + + const int n = ggml_nrows(src0); + const int nc = src0->ne[0]; + + assert( dst->nb[0] == sizeof(float)); + assert(src0->nb[0] == sizeof(float)); + assert(src1->nb[0] == sizeof(float)); + + for (int i = 0; i < n; i++) { + ggml_vec_mul_f32(nc, + (float *) ((char *) dst->data + i*( dst->nb[1])), + (float *) ((char *) src0->data + i*(src0->nb[1])), + (float *) ((char *) src1->data + i*(src1->nb[1]))); + } +} + +void ggml_compute_forward_mul( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + const struct ggml_tensor * src1, + struct ggml_tensor * dst) { + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_mul_f32(params, src0, src1, dst); + } break; + case GGML_TYPE_I8: + case GGML_TYPE_I16: + case GGML_TYPE_I32: + case GGML_TYPE_F16: + case GGML_TYPE_COUNT: + { + assert(false); + } break; + } +} + +// ggml_compute_forward_div + +void ggml_compute_forward_div_f32( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + const struct ggml_tensor * src1, + struct ggml_tensor * dst) { + assert(params->ith == 0); + assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst)); + + if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + return; + } + + const int n = ggml_nrows(src0); + const int nc = src0->ne[0]; + + assert( dst->nb[0] == sizeof(float)); + assert(src0->nb[0] == sizeof(float)); + assert(src1->nb[0] == sizeof(float)); + + for (int i = 0; i < n; i++) { + ggml_vec_div_f32(nc, + (float *) ((char *) dst->data + i*( dst->nb[1])), + (float *) ((char *) src0->data + i*(src0->nb[1])), + (float *) ((char *) src1->data + i*(src1->nb[1]))); + } +} + +void ggml_compute_forward_div( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + const struct ggml_tensor * src1, + struct ggml_tensor * dst) { + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_div_f32(params, src0, src1, dst); + } break; + case GGML_TYPE_I8: + case GGML_TYPE_I16: + case GGML_TYPE_I32: + case GGML_TYPE_F16: + case GGML_TYPE_COUNT: + { + assert(false); + } break; + } +} + +// ggml_compute_forward_sqr + +void ggml_compute_forward_sqr_f32( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + struct ggml_tensor * dst) { + assert(params->ith == 0); + assert(ggml_are_same_shape(src0, dst)); + + if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + return; + } + + const int n = ggml_nrows(src0); + const int nc = src0->ne[0]; + + assert( dst->nb[0] == sizeof(float)); + assert(src0->nb[0] == sizeof(float)); + + for (int i = 0; i < n; i++) { + ggml_vec_sqr_f32(nc, + (float *) ((char *) dst->data + i*( dst->nb[1])), + (float *) ((char *) src0->data + i*(src0->nb[1]))); + } +} + +void ggml_compute_forward_sqr( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + struct ggml_tensor * dst) { + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_sqr_f32(params, src0, dst); + } break; + case GGML_TYPE_I8: + case GGML_TYPE_I16: + case GGML_TYPE_I32: + case GGML_TYPE_F16: + case GGML_TYPE_COUNT: + { + assert(false); + } break; + } +} + +// ggml_compute_forward_sqrt + +void ggml_compute_forward_sqrt_f32( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + struct ggml_tensor * dst) { + assert(params->ith == 0); + assert(ggml_are_same_shape(src0, dst)); + + if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + return; + } + + const int n = ggml_nrows(src0); + const int nc = src0->ne[0]; + + assert( dst->nb[0] == sizeof(float)); + assert(src0->nb[0] == sizeof(float)); + + for (int i = 0; i < n; i++) { + ggml_vec_sqrt_f32(nc, + (float *) ((char *) dst->data + i*( dst->nb[1])), + (float *) ((char *) src0->data + i*(src0->nb[1]))); + } +} + +void ggml_compute_forward_sqrt( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + struct ggml_tensor * dst) { + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_sqrt_f32(params, src0, dst); + } break; + case GGML_TYPE_I8: + case GGML_TYPE_I16: + case GGML_TYPE_I32: + case GGML_TYPE_F16: + case GGML_TYPE_COUNT: + { + assert(false); + } break; + } +} + +// ggml_compute_forward_sum + +void ggml_compute_forward_sum_f32( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + struct ggml_tensor * dst) { + assert(params->ith == 0); + assert(ggml_is_scalar(dst)); + + if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + return; + } + + assert(ggml_is_scalar(dst)); + assert(src0->nb[0] == sizeof(float)); + + *(float *) (dst->data) = 0.0f; + + const int ne00 = src0->ne[0]; + const int ne01 = src0->ne[1]; + const int ne02 = src0->ne[2]; + const int ne03 = src0->ne[3]; + + const size_t nb01 = src0->nb[1]; + const size_t nb02 = src0->nb[2]; + const size_t nb03 = src0->nb[3]; + + for (int i03 = 0; i03 < ne03; i03++) { + for (int i02 = 0; i02 < ne02; i02++) { + for (int i01 = 0; i01 < ne01; i01++) { + ggml_vec_sum_f32(ne00, + (float *) (dst->data), + (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03)); + } + } + } +} + +void ggml_compute_forward_sum( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + struct ggml_tensor * dst) { + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_sum_f32(params, src0, dst); + } break; + case GGML_TYPE_I8: + case GGML_TYPE_I16: + case GGML_TYPE_I32: + case GGML_TYPE_F16: + case GGML_TYPE_COUNT: + { + assert(false); + } break; + } +} + +// ggml_compute_forward_mean + +void ggml_compute_forward_mean_f32( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + struct ggml_tensor * dst) { + assert(params->ith == 0); + + if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + return; + } + + assert(src0->nb[0] == sizeof(float)); + + const int ne00 = src0->ne[0]; + const int ne01 = src0->ne[1]; + const int ne02 = src0->ne[2]; + const int ne03 = src0->ne[3]; + + const size_t nb01 = src0->nb[1]; + const size_t nb02 = src0->nb[2]; + const size_t nb03 = src0->nb[3]; + + const int ne0 = dst->ne[0]; + const int ne1 = dst->ne[1]; + const int ne2 = dst->ne[2]; + const int ne3 = dst->ne[3]; + + assert(ne0 == 1); + assert(ne1 == ne01); + assert(ne2 == ne02); + assert(ne3 == ne03); + + UNUSED(ne0); + UNUSED(ne1); + UNUSED(ne2); + UNUSED(ne3); + + const size_t nb1 = dst->nb[1]; + const size_t nb2 = dst->nb[2]; + const size_t nb3 = dst->nb[3]; + + for (int i03 = 0; i03 < ne03; i03++) { + for (int i02 = 0; i02 < ne02; i02++) { + for (int i01 = 0; i01 < ne01; i01++) { + *(float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3) = 0.0f; + + ggml_vec_sum_f32(ne00, + (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3), + (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03)); + + *(float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3) /= (float) ne00; + } + } + } +} + +void ggml_compute_forward_mean( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + struct ggml_tensor * dst) { + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_mean_f32(params, src0, dst); + } break; + case GGML_TYPE_I8: + case GGML_TYPE_I16: + case GGML_TYPE_I32: + case GGML_TYPE_F16: + case GGML_TYPE_COUNT: + { + assert(false); + } break; + } +} + +// ggml_compute_forward_repeat + +void ggml_compute_forward_repeat_f32( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + struct ggml_tensor * dst) { + assert(params->ith == 0); + assert(ggml_can_repeat(src0, dst)); + + if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + return; + } + + // TODO: implement support for rank > 2 tensors + assert(src0->ne[2] == 1); + assert(src0->ne[3] == 1); + assert( dst->ne[2] == 1); + assert( dst->ne[3] == 1); + + const int nc = dst->ne[0]; + const int nr = dst->ne[1]; + const int nc0 = src0->ne[0]; + const int nr0 = src0->ne[1]; + const int ncr = nc/nc0; // guaranteed to be an integer due to the check in ggml_can_repeat + const int nrr = nr/nr0; // guaranteed to be an integer due to the check in ggml_can_repeat + + // TODO: support for transposed / permuted tensors + assert( dst->nb[0] == sizeof(float)); + assert(src0->nb[0] == sizeof(float)); + + // TODO: maybe this is not optimal? + for (int i = 0; i < nrr; i++) { + for (int j = 0; j < ncr; j++) { + for (int k = 0; k < nr0; k++) { + ggml_vec_cpy_f32(nc0, + (float *) ((char *) dst->data + (i*nr0 + k)*( dst->nb[1]) + j*nc0*( dst->nb[0])), + (float *) ((char *) src0->data + ( k)*(src0->nb[1]))); + } + } + } +} + +void ggml_compute_forward_repeat( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + struct ggml_tensor * dst) { + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_repeat_f32(params, src0, dst); + } break; + case GGML_TYPE_I8: + case GGML_TYPE_I16: + case GGML_TYPE_I32: + case GGML_TYPE_F16: + case GGML_TYPE_COUNT: + { + assert(false); + } break; + } +} + +// ggml_compute_forward_abs + +void ggml_compute_forward_abs_f32( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + struct ggml_tensor * dst) { + assert(params->ith == 0); + assert(ggml_are_same_shape(src0, dst)); + + if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + return; + } + + const int n = ggml_nrows(src0); + const int nc = src0->ne[0]; + + assert(dst->nb[0] == sizeof(float)); + assert(src0->nb[0] == sizeof(float)); + + for (int i = 0; i < n; i++) { + ggml_vec_abs_f32(nc, + (float *) ((char *) dst->data + i*( dst->nb[1])), + (float *) ((char *) src0->data + i*(src0->nb[1]))); + } +} + +void ggml_compute_forward_abs( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + struct ggml_tensor * dst) { + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_abs_f32(params, src0, dst); + } break; + case GGML_TYPE_I8: + case GGML_TYPE_I16: + case GGML_TYPE_I32: + case GGML_TYPE_F16: + case GGML_TYPE_COUNT: + { + assert(false); + } break; + } +} + +// ggml_compute_forward_sgn + +void ggml_compute_forward_sgn_f32( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + struct ggml_tensor * dst) { + assert(params->ith == 0); + assert(ggml_are_same_shape(src0, dst)); + + if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + return; + } + + const int n = ggml_nrows(src0); + const int nc = src0->ne[0]; + + assert(dst->nb[0] == sizeof(float)); + assert(src0->nb[0] == sizeof(float)); + + for (int i = 0; i < n; i++) { + ggml_vec_sgn_f32(nc, + (float *) ((char *) dst->data + i*( dst->nb[1])), + (float *) ((char *) src0->data + i*(src0->nb[1]))); + } +} + +void ggml_compute_forward_sgn( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + struct ggml_tensor * dst) { + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_sgn_f32(params, src0, dst); + } break; + case GGML_TYPE_I8: + case GGML_TYPE_I16: + case GGML_TYPE_I32: + case GGML_TYPE_F16: + case GGML_TYPE_COUNT: + { + assert(false); + } break; + } +} + +// ggml_compute_forward_neg + +void ggml_compute_forward_neg_f32( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + struct ggml_tensor * dst) { + assert(params->ith == 0); + assert(ggml_are_same_shape(src0, dst)); + + if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + return; + } + + const int n = ggml_nrows(src0); + const int nc = src0->ne[0]; + + assert(dst->nb[0] == sizeof(float)); + assert(src0->nb[0] == sizeof(float)); + + for (int i = 0; i < n; i++) { + ggml_vec_neg_f32(nc, + (float *) ((char *) dst->data + i*( dst->nb[1])), + (float *) ((char *) src0->data + i*(src0->nb[1]))); + } +} + +void ggml_compute_forward_neg( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + struct ggml_tensor * dst) { + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_neg_f32(params, src0, dst); + } break; + case GGML_TYPE_I8: + case GGML_TYPE_I16: + case GGML_TYPE_I32: + case GGML_TYPE_F16: + case GGML_TYPE_COUNT: + { + assert(false); + } break; + } +} + +// ggml_compute_forward_step + +void ggml_compute_forward_step_f32( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + struct ggml_tensor * dst) { + assert(params->ith == 0); + assert(ggml_are_same_shape(src0, dst)); + + if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + return; + } + + const int n = ggml_nrows(src0); + const int nc = src0->ne[0]; + + assert(dst->nb[0] == sizeof(float)); + assert(src0->nb[0] == sizeof(float)); + + for (int i = 0; i < n; i++) { + ggml_vec_step_f32(nc, + (float *) ((char *) dst->data + i*( dst->nb[1])), + (float *) ((char *) src0->data + i*(src0->nb[1]))); + } +} + +void ggml_compute_forward_step( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + struct ggml_tensor * dst) { + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_step_f32(params, src0, dst); + } break; + case GGML_TYPE_I8: + case GGML_TYPE_I16: + case GGML_TYPE_I32: + case GGML_TYPE_F16: + case GGML_TYPE_COUNT: + { + assert(false); + } break; + } +} + +// ggml_compute_forward_relu + +void ggml_compute_forward_relu_f32( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + struct ggml_tensor * dst) { + assert(params->ith == 0); + assert(ggml_are_same_shape(src0, dst)); + + if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + return; + } + + const int n = ggml_nrows(src0); + const int nc = src0->ne[0]; + + assert(dst->nb[0] == sizeof(float)); + assert(src0->nb[0] == sizeof(float)); + + for (int i = 0; i < n; i++) { + ggml_vec_relu_f32(nc, + (float *) ((char *) dst->data + i*( dst->nb[1])), + (float *) ((char *) src0->data + i*(src0->nb[1]))); + } +} + +void ggml_compute_forward_relu( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + struct ggml_tensor * dst) { + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_relu_f32(params, src0, dst); + } break; + case GGML_TYPE_I8: + case GGML_TYPE_I16: + case GGML_TYPE_I32: + case GGML_TYPE_F16: + case GGML_TYPE_COUNT: + { + assert(false); + } break; + } +} + +// ggml_compute_forward_gelu + +void ggml_compute_forward_gelu_f32( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + struct ggml_tensor * dst) { + GGML_ASSERT(ggml_is_contiguous(src0)); + GGML_ASSERT(ggml_is_contiguous(dst)); + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + + if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + return; + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src0->ne[0]; + const int nr = ggml_nrows(src0); + + // rows per thread + const int dr = (nr + nth - 1)/nth; + + // row range for this thread + const int ir0 = dr*ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_vec_gelu_f32(nc, + (float *) ((char *) dst->data + i1*( dst->nb[1])), + (float *) ((char *) src0->data + i1*(src0->nb[1]))); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k]; + UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +void ggml_compute_forward_gelu( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + struct ggml_tensor * dst) { + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_gelu_f32(params, src0, dst); + } break; + case GGML_TYPE_I8: + case GGML_TYPE_I16: + case GGML_TYPE_I32: + case GGML_TYPE_F16: + case GGML_TYPE_COUNT: + { + assert(false); + } break; + } +} + +// ggml_compute_forward_norm + +void ggml_compute_forward_norm_f32( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + struct ggml_tensor * dst) { + assert(params->ith == 0); + assert(ggml_are_same_shape(src0, dst)); + + if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + return; + } + + assert(src0->nb[0] == sizeof(float)); + + const int ne00 = src0->ne[0]; + const int ne01 = src0->ne[1]; + const int ne02 = src0->ne[2]; + const int ne03 = src0->ne[3]; + + const size_t nb01 = src0->nb[1]; + const size_t nb02 = src0->nb[2]; + const size_t nb03 = src0->nb[3]; + + const size_t nb1 = dst->nb[1]; + const size_t nb2 = dst->nb[2]; + const size_t nb3 = dst->nb[3]; + + const ggml_float eps = 1e-5f; // TODO: make this a parameter + + // TODO: optimize + for (int i03 = 0; i03 < ne03; i03++) { + for (int i02 = 0; i02 < ne02; i02++) { + for (int i01 = 0; i01 < ne01; i01++) { + const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03); + + ggml_float mean = 0.0; + for (int i00 = 0; i00 < ne00; i00++) { + mean += x[i00]; + } + + mean /= ne00; + + float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3); + + ggml_float sum2 = 0.0; + for (int i00 = 0; i00 < ne00; i00++) { + ggml_float v = x[i00] - mean; + y[i00] = v; + sum2 += v*v; + } + + const float scale = 1.0/sqrt(sum2/ne00 + eps); + + ggml_vec_scale_f32(ne00, y, scale); + } + } + } +} + +void ggml_compute_forward_norm( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + struct ggml_tensor * dst) { + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_norm_f32(params, src0, dst); + } break; + case GGML_TYPE_I8: + case GGML_TYPE_I16: + case GGML_TYPE_I32: + case GGML_TYPE_F16: + case GGML_TYPE_COUNT: + { + assert(false); + } break; + } +} + +// ggml_compute_forward_mul_mat + +void ggml_compute_forward_mul_mat_f32( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + const struct ggml_tensor * src1, + struct ggml_tensor * dst) { + int64_t t0 = ggml_perf_time_us(); + UNUSED(t0); + + const int ne00 = src0->ne[0]; + const int ne01 = src0->ne[1]; + const int ne02 = src0->ne[2]; + const int ne03 = src0->ne[3]; + + const int ne10 = src1->ne[0]; + const int ne11 = src1->ne[1]; + const int ne12 = src1->ne[2]; + const int ne13 = src1->ne[3]; + + const int ne0 = dst->ne[0]; + const int ne1 = dst->ne[1]; + const int ne2 = dst->ne[2]; + const int ne3 = dst->ne[3]; + const int ne = ne0*ne1*ne2*ne3; + + const int nb00 = src0->nb[0]; + const int nb01 = src0->nb[1]; + const int nb02 = src0->nb[2]; + const int nb03 = src0->nb[3]; + + const int nb10 = src1->nb[0]; + const int nb11 = src1->nb[1]; + const int nb12 = src1->nb[2]; + const int nb13 = src1->nb[3]; + + const int nb0 = dst->nb[0]; + const int nb1 = dst->nb[1]; + const int nb2 = dst->nb[2]; + const int nb3 = dst->nb[3]; + + const int ith = params->ith; + const int nth = params->nth; + + assert(ne02 == ne12); + assert(ne03 == ne13); + assert(ne2 == ne12); + assert(ne3 == ne13); + + // TODO: we don't support permuted src0 + assert(nb00 == sizeof(float) || nb01 == sizeof(float)); + + // dst cannot be transposed or permuted + assert(nb0 == sizeof(float)); + assert(nb0 <= nb1); + assert(nb1 <= nb2); + assert(nb2 <= nb3); + + assert(ne0 == ne01); + assert(ne1 == ne11); + assert(ne2 == ne02); + assert(ne3 == ne03); + + // nb01 >= nb00 - src0 is not transposed + // compute by src0 rows + // + // nb00 < nb01 - src0 is transposed + // compute by src0 columns + + if (params->type == GGML_TASK_INIT) { + if (nb01 >= nb00) { + return; + } + + // TODO: fix this memset (wsize is overestimated) + memset(params->wdata, 0, params->wsize); + return; + } + + if (params->type == GGML_TASK_FINALIZE) { + if (nb01 >= nb00) { + return; + } + + // TODO: fix this memset (wsize is overestimated) + //assert(params->wsize == (ggml_nbytes(dst) + CACHE_LINE_SIZE)*nth); + + float * const wdata = params->wdata; + + // cols per thread + const int dc = (ne + nth - 1)/nth; + + // col range for this thread + const int ic0 = dc*ith; + const int ic1 = MIN(ic0 + dc, ne); + + ggml_vec_cpy_f32(ic1 - ic0, (float *) dst->data + ic0, wdata + ic0); + + for (int k = 1; k < nth; k++) { + ggml_vec_acc_f32(ic1 - ic0, (float *) dst->data + ic0, wdata + (ne + CACHE_LINE_SIZE_F32)*k + ic0); + } + + return; + } + +//#ifdef GGML_USE_ACCELERATE +// // try to use BLAS +// +// if (nb01 >= nb00 && ne0 > 1024 && ne1 > 1024) { +// if (params->ith != 0) return; +// printf("XXXXXXXX\n"); +// +// GGML_ASSERT(ggml_is_contiguous(src0)); +// GGML_ASSERT(ggml_is_contiguous(src1)); +// +// printf("ne00 = %d, ne01 = %d, ne02 = %d, ne03 = %d\n", ne00, ne01, ne02, ne03); +// printf("ne10 = %d, ne11 = %d, ne12 = %d, ne13 = %d\n", ne10, ne11, ne12, ne13); +// printf("ne0 = %d, ne1 = %d, ne2 = %d, ne3 = %d\n", ne0, ne1, ne2, ne3); +// +// printf("nb00 = %d, nb01 = %d, nb02 = %d, nb03 = %d\n", nb00, nb01, nb02, nb03); +// printf("nb10 = %d, nb11 = %d, nb12 = %d, nb13 = %d\n", nb10, nb11, nb12, nb13); +// printf("nb0 = %d, nb1 = %d, nb2 = %d, nb3 = %d\n", nb0, nb1, nb2, nb3); +// +// float * const wdata = params->wdata; +// +// int64_t tsum = 0.0; +// for (int i03 = 0; i03 < ne03; i03++) { +// for (int i02 = 0; i02 < ne02; i02++) { +// const float * x = (float *) ((char *) src0->data + i02*nb02 + i03*nb03); +// const float * y = (float *) ((char *) src1->data + i02*nb12 + i03*nb13); +// float * z = (float *) ((char *) dst->data + i02*nb2 + i03*nb3); +// +// // transpose src1 +// for (int j = 0; j < ne11; ++j) { +// for (int i = 0; i < ne10; ++i) { +// wdata[i*ne11 + j] = y[j*ne10 + i]; +// } +// } +// +// { +// const int64_t tt0 = ggml_time_us(); +// cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, +// 1500, 1500, 64, +// 1.0, x, 64, +// wdata, 1500, +// 0.0, z, 1500); +// const int64_t tt1 = ggml_time_us(); +// tsum += tt1 - tt0; +// } +// +// // transpose z +// for (int j = 0; j < ne1; ++j) { +// for (int i = 0; i < ne0; ++i) { +// wdata[i*ne1 + j] = z[j*ne0 + i]; +// } +// } +// +// memcpy(z, wdata, ne0*ne1*sizeof(float)); +// +// //cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, +// // ne0, ne1, 64, +// // 1.0f, +// // x, ne00, +// // y, ne11, +// // 0.0f, +// // z, 1500); +// } +// } +// printf("time = %f ms\n", tsum/1000.0); +// return; +// } else { +// //cblas_sgemv(CblasRowMajor, CblasTrans, ne00, ne01, 1.0, src0->data, ne01, src1->data, 1, 0.0, dst->data, 1); +// } +// +//#endif + + + if (nb01 >= nb00) { + // TODO: do not support transposed src1 + assert(nb10 == sizeof(float)); + + // parallelize by src0 rows using ggml_vec_dot_f32 + + // total rows in src0 + const int nr = ne01*ne02*ne03; + + // rows per thread + const int dr = (nr + nth - 1)/nth; + + // row range for this thread + const int ir0 = dr*ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int ir = ir0; ir < ir1; ++ir) { + // src0 indices + const int i03 = ir/(ne02*ne01); + const int i02 = (ir - i03*ne02*ne01)/ne01; + const int i01 = (ir - i03*ne02*ne01 - i02*ne01); + + for (int ic = 0; ic < ne11; ++ic) { + // src1 indices + const int i13 = i03; + const int i12 = i02; + const int i11 = ic; + + // dst indices + const int i0 = i01; + const int i1 = i11; + const int i2 = i02; + const int i3 = i03; + + ggml_vec_dot_f32(ne00, + (float *) ((char *) dst->data + (i0*nb0 + i1*nb1 + i2*nb2 + i3*nb3)), + (float *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03)), + (float *) ((char *) src1->data + (i11*nb11 + i12*nb12 + i13*nb13))); + } + } + } else { + // parallelize by src1 columns using ggml_vec_mad_f32 + // each thread has its own work data + // during FINALIZE we accumulate all work data into dst + + // total columns in src1 + const int nc = ne10; + + // columns per thread + const int dc = (nc + nth - 1)/nth; + + // column range for this thread + const int ic0 = dc*ith; + const int ic1 = MIN(ic0 + dc, nc); + + // work data for thread + const int wo = (ne + CACHE_LINE_SIZE_F32)*ith; + float * const wdata = params->wdata; + + for (int i13 = 0; i13 < ne13; ++i13) { + for (int i12 = 0; i12 < ne12; ++i12) { + for (int i11 = 0; i11 < ne11; ++i11) { + for (int ic = ic0; ic < ic1; ++ic) { + // src1 indices + const int i10 = ic; + + // src0 indices + const int i03 = i13; + const int i02 = i12; + const int i00 = ic; + + // dst indices + const int i1 = i11; + const int i2 = i12; + const int i3 = i13; + + assert(sizeof(float)*(wo + i3*ne2*ne1*ne0 + i2*ne1*ne0 + i1*ne0 + ne01) <= params->wsize); + + ggml_vec_mad_f32(ne01, + (float *) (wdata + wo + i3*ne2*ne1*ne0 + i2*ne1*ne0 + i1*ne0), + (float *) ((char *) src0->data + (i00*nb00 + i02*nb02 + i03*nb03)), + *(float *) ((char *) src1->data + (i10*nb10 + i11*nb11 + i12*nb12 + i13*nb13))); + } + } + } + } + } + + //int64_t t1 = ggml_perf_time_us(); + //static int64_t acc = 0; + //acc += t1 - t0; + //if (t1 - t0 > 10) { + // printf("\n"); + // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03); + // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03); + // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13); + // printf("nb10 = %5d, nb11 = %5d, nb12 = %5d, nb13 = %5d\n", nb10, nb11, nb12, nb13); + + // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc); + //} +} + +void ggml_compute_forward_mul_mat_f16_f32( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + const struct ggml_tensor * src1, + struct ggml_tensor * dst) { + int64_t t0 = ggml_perf_time_us(); + UNUSED(t0); + + const int ne00 = src0->ne[0]; + const int ne01 = src0->ne[1]; + const int ne02 = src0->ne[2]; + const int ne03 = src0->ne[3]; + + const int ne10 = src1->ne[0]; + const int ne11 = src1->ne[1]; + const int ne12 = src1->ne[2]; + const int ne13 = src1->ne[3]; + + const int ne0 = dst->ne[0]; + const int ne1 = dst->ne[1]; + const int ne2 = dst->ne[2]; + const int ne3 = dst->ne[3]; + const int ne = ne0*ne1*ne2*ne3; + + const int nb00 = src0->nb[0]; + const int nb01 = src0->nb[1]; + const int nb02 = src0->nb[2]; + const int nb03 = src0->nb[3]; + + const int nb10 = src1->nb[0]; + const int nb11 = src1->nb[1]; + const int nb12 = src1->nb[2]; + const int nb13 = src1->nb[3]; + + const int nb0 = dst->nb[0]; + const int nb1 = dst->nb[1]; + const int nb2 = dst->nb[2]; + const int nb3 = dst->nb[3]; + + const int ith = params->ith; + const int nth = params->nth; + + assert(ne02 == ne12); + assert(ne03 == ne13); + assert(ne2 == ne12); + assert(ne3 == ne13); + + // TODO: we don't support permuted src0 + assert(nb00 == sizeof(ggml_fp16_t) || nb01 == sizeof(ggml_fp16_t)); + + // dst cannot be transposed or permuted + assert(nb0 == sizeof(float)); + assert(nb0 <= nb1); + assert(nb1 <= nb2); + assert(nb2 <= nb3); + + assert(ne0 == ne01); + assert(ne1 == ne11); + assert(ne2 == ne02); + assert(ne3 == ne03); + + // nb01 >= nb00 - src0 is not transposed + // compute by src0 rows + // + // nb00 < nb01 - src0 is transposed + // compute by src0 columns + + if (params->type == GGML_TASK_INIT) { + if (nb01 >= nb00) { + ggml_fp16_t * const wdata = params->wdata; + + int id = 0; + for (int i13 = 0; i13 < ne13; ++i13) { + for (int i12 = 0; i12 < ne12; ++i12) { + for (int i11 = 0; i11 < ne11; ++i11) { + for (int i10 = 0; i10 < ne10; ++i10) { + wdata[id++] = ggml_fp32_to_fp16(*(float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i10*nb10)); + } + } + } + } + + GGML_ASSERT(id*sizeof(ggml_fp16_t) <= params->wsize); + + return; + } + + // TODO: fix this memset (wsize is overestimated) + memset(params->wdata, 0, params->wsize); + return; + } + + if (params->type == GGML_TASK_FINALIZE) { + if (nb01 >= nb00) { + return; + } + + // TODO: fix this memset (wsize is overestimated) + //assert(params->wsize == (ggml_nbytes(dst) + CACHE_LINE_SIZE)*nth); + + ggml_fp16_t * const wdata = params->wdata; + + // cols per thread + const int dc = (ne + nth - 1)/nth; + + // col range for this thread + const int ic0 = dc*ith; + const int ic1 = MIN(ic0 + dc, ne); + + for (int i = ic0; i < ic1; ++i) { + ((float *) dst->data)[i] = ggml_fp16_to_fp32(wdata[i]); + } + + for (int k = 1; k < nth; k++) { + for (int i = ic0; i < ic1; ++i) { + ((float *) dst->data)[i] += ggml_fp16_to_fp32(wdata[(ne + CACHE_LINE_SIZE_F32)*k + i]); + } + } + + return; + } + + if (nb01 >= nb00) { + // fp16 -> half the size, so divide by 2 + // TODO: do not support transposed src1 + assert(nb10/2 == sizeof(ggml_fp16_t)); + + // parallelize by src0 rows using ggml_vec_dot_f32 + + // total rows in src0 + const int nr = ne01*ne02*ne03; + + // rows per thread + const int dr = (nr + nth - 1)/nth; + + // row range for this thread + const int ir0 = dr*ith; + const int ir1 = MIN(ir0 + dr, nr); + + ggml_fp16_t * wdata = params->wdata; + + for (int ir = ir0; ir < ir1; ++ir) { + // src0 indices + const int i03 = ir/(ne02*ne01); + const int i02 = (ir - i03*ne02*ne01)/ne01; + const int i01 = (ir - i03*ne02*ne01 - i02*ne01); + + const int i13 = i03; + const int i12 = i02; + + const int i0 = i01; + const int i2 = i02; + const int i3 = i03; + + ggml_fp16_t * src0_row = (ggml_fp16_t *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03)); + ggml_fp16_t * src1_col = wdata + (i13*ne12*ne11 + i12*ne11 + 0)*ne00; + + float * dst_col = (float *) ((char *) dst->data + (i0*nb0 + 0*nb1 + i2*nb2 + i3*nb3)); + + for (int ic = 0; ic < ne11; ++ic) { + assert(ne00 % 32 == 0); + + ggml_vec_dot_f16(ne00, &dst_col[ic*ne0], src0_row, src1_col + ic*ne00); + } + } + } else { + // parallelize by src1 columns using ggml_vec_mad_f32 + // each thread has its own work data + // during FINALIZE we accumulate all work data into dst + + // total columns in src1 + const int nc = ne10; + + // columns per thread + const int dc = (nc + nth - 1)/nth; + + // column range for this thread + const int ic0 = dc*ith; + const int ic1 = MIN(ic0 + dc, nc); + + // work data for thread + const int wo = (ne + CACHE_LINE_SIZE_F32)*ith; + ggml_fp16_t * const wdata = params->wdata; + + for (int i13 = 0; i13 < ne13; ++i13) { + for (int i12 = 0; i12 < ne12; ++i12) { + for (int i11 = 0; i11 < ne11; ++i11) { + // dst indices + const int i1 = i11; + const int i2 = i12; + const int i3 = i13; + + ggml_fp16_t * dst_row = wdata + wo + i3*ne2*ne1*ne0 + i2*ne1*ne0 + i1*ne0; + + for (int ic = ic0; ic < ic1; ++ic) { + // src1 indices + const int i10 = ic; + + // src0 indices + const int i03 = i13; + const int i02 = i12; + const int i00 = ic; + + assert(sizeof(ggml_fp16_t)*(wo + i3*ne2*ne1*ne0 + i2*ne1*ne0 + i1*ne0 + ne01) <= params->wsize); + + ggml_fp16_t * src0_col = (ggml_fp16_t *) ((char *) src0->data + (i00*nb00 + i02*nb02 + i03*nb03)); + float src1_val = * (float *) ((char *) src1->data + (i10*nb10 + i11*nb11 + i12*nb12 + i13*nb13)); + + ggml_vec_mad_f16(ne01, dst_row, src0_col, src1_val); + } + } + } + } + } + + //int64_t t1 = ggml_time_us(); + //static int64_t acc = 0; + //acc += t1 - t0; + //if (t1 - t0 > 10) { + // printf("\n"); + // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03); + // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03); + // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13); + + // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc); + //} +} + +void ggml_compute_forward_mul_mat( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + const struct ggml_tensor * src1, + struct ggml_tensor * dst) { + switch (src0->type) { + case GGML_TYPE_F16: + { + ggml_compute_forward_mul_mat_f16_f32(params, src0, src1, dst); + } break; + case GGML_TYPE_F32: + { + ggml_compute_forward_mul_mat_f32(params, src0, src1, dst); + } break; + case GGML_TYPE_I8: + case GGML_TYPE_I16: + case GGML_TYPE_I32: + case GGML_TYPE_COUNT: + { + assert(false); + } break; + } +} + +// ggml_compute_forward_scale + +void ggml_compute_forward_scale_f32( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + const struct ggml_tensor * src1, + struct ggml_tensor * dst) { + GGML_ASSERT(ggml_is_contiguous(src0)); + GGML_ASSERT(ggml_is_contiguous(dst)); + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + GGML_ASSERT(ggml_is_scalar(src1)); + + if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + return; + } + + // scale factor + const float v = *(float *) src1->data; + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src0->ne[0]; + const int nr = ggml_nrows(src0); + + // rows per thread + const int dr = (nr + nth - 1)/nth; + + // row range for this thread + const int ir0 = dr*ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_vec_scale_f32(nc, (float *) ((char *) dst->data + i1*(dst->nb[1])), v); + } +} + +void ggml_compute_forward_scale( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + const struct ggml_tensor * src1, + struct ggml_tensor * dst) { + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_scale_f32(params, src0, src1, dst); + } break; + case GGML_TYPE_I8: + case GGML_TYPE_I16: + case GGML_TYPE_I32: + case GGML_TYPE_F16: + case GGML_TYPE_COUNT: + { + assert(false); + } break; + } +} + +// ggml_compute_forward_cpy + +void ggml_compute_forward_cpy( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + struct ggml_tensor * dst) { + ggml_compute_forward_dup(params, src0, dst); +} + +// ggml_compute_forward_reshape + +void ggml_compute_forward_reshape( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + struct ggml_tensor * dst) { + // NOP + UNUSED(params); + UNUSED(src0); + UNUSED(dst); +} + +// ggml_compute_forward_view + +void ggml_compute_forward_view( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0) { + // NOP + UNUSED(params); + UNUSED(src0); +} + +// ggml_compute_forward_permute + +void ggml_compute_forward_permute( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0) { + // NOP + UNUSED(params); + UNUSED(src0); +} + +// ggml_compute_forward_transpose + +void ggml_compute_forward_transpose( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0) { + // NOP + UNUSED(params); + UNUSED(src0); +} + +// ggml_compute_forward_get_rows + +void ggml_compute_forward_get_rows_f16( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + const struct ggml_tensor * src1, + struct ggml_tensor * dst) { + assert(params->ith == 0); + + if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + return; + } + + const int nc = src0->ne[0]; + const int nr = ggml_nelements(src1); + + assert( dst->ne[0] == nc); + assert( dst->ne[1] == nr); + assert(src0->nb[0] == sizeof(ggml_fp16_t)); + + for (int i = 0; i < nr; ++i) { + const int r = ((int32_t *) src1->data)[i]; + + for (int j = 0; j < nc; ++j) { + ggml_fp16_t v = ((ggml_fp16_t *) ((char *) src0->data + r*src0->nb[1]))[j]; + ((float *) ((char *) dst->data + i*dst->nb[1]))[j] = ggml_fp16_to_fp32(v); + } + } +} + +void ggml_compute_forward_get_rows_f32( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + const struct ggml_tensor * src1, + struct ggml_tensor * dst) { + assert(params->ith == 0); + + if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + return; + } + + const int nc = src0->ne[0]; + const int nr = ggml_nelements(src1); + + assert( dst->ne[0] == nc); + assert( dst->ne[1] == nr); + assert(src0->nb[0] == sizeof(float)); + + for (int i = 0; i < nr; ++i) { + const int r = ((int32_t *) src1->data)[i]; + + ggml_vec_cpy_f32(nc, + (float *) ((char *) dst->data + i*dst->nb[1]), + (float *) ((char *) src0->data + r*src0->nb[1])); + } +} + +void ggml_compute_forward_get_rows( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + const struct ggml_tensor * src1, + struct ggml_tensor * dst) { + switch (src0->type) { + case GGML_TYPE_F16: + { + ggml_compute_forward_get_rows_f16(params, src0, src1, dst); + } break; + case GGML_TYPE_F32: + { + ggml_compute_forward_get_rows_f32(params, src0, src1, dst); + } break; + case GGML_TYPE_I8: + case GGML_TYPE_I16: + case GGML_TYPE_I32: + case GGML_TYPE_COUNT: + { + assert(false); + } break; + } +} + +// ggml_compute_forward_diag_mask_inf + +void ggml_compute_forward_diag_mask_inf_f32( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + const struct ggml_tensor * src1, + struct ggml_tensor * dst) { + assert(params->ith == 0); + assert(src1->type == GGML_TYPE_I32); + assert(ggml_nelements(src1) == 1); + + if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + return; + } + + const int n_past = ((int32_t *) src1->data)[0]; + + // TODO: handle transposed/permuted matrices + + const int n = ggml_nrows(src0); + const int nc = src0->ne[0]; + const int nr = src0->ne[1]; + const int nz = n/nr; + + assert( dst->nb[0] == sizeof(float)); + assert(src0->nb[0] == sizeof(float)); + + for (int k = 0; k < nz; k++) { + for (int j = 0; j < nr; j++) { + for (int i = n_past; i < nc; i++) { + if (i > n_past + j) { + *(float *)((char *) dst->data + k*dst->nb[2] + j*dst->nb[1] + i*dst->nb[0]) = -INFINITY; + } + } + } + } +} + +void ggml_compute_forward_diag_mask_inf( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + const struct ggml_tensor * src1, + struct ggml_tensor * dst) { + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_diag_mask_inf_f32(params, src0, src1, dst); + } break; + case GGML_TYPE_I8: + case GGML_TYPE_I16: + case GGML_TYPE_I32: + case GGML_TYPE_F16: + case GGML_TYPE_COUNT: + { + assert(false); + } break; + } +} + +// ggml_compute_forward_soft_max + +void ggml_compute_forward_soft_max_f32( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + struct ggml_tensor * dst) { + GGML_ASSERT(ggml_is_contiguous(src0)); + GGML_ASSERT(ggml_is_contiguous(dst)); + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + + if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + return; + } + + // TODO: handle transposed/permuted matrices + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src0->ne[0]; + const int nr = ggml_nrows(src0); + + // rows per thread + const int dr = (nr + nth - 1)/nth; + + // row range for this thread + const int ir0 = dr*ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + float *p = (float *)((char *) dst->data + i1*dst->nb[1]); + +#ifndef NDEBUG + for (int i = 0; i < nc; ++i) { + assert(!isnan(p[i])); + } +#endif + + float max = -INFINITY; + for (int i = 0; i < nc; i++) { + max = MAX(max, p[i]); + } + + ggml_float sum = 0.0; + for (int i = 0; i < nc; i++) { + const ggml_float v = (p[i] == -INFINITY) ? 0.0 : exp(p[i] - max); + sum += v; + p[i] = v; + } + + assert(sum > 0.0f); + + sum = 1.0/sum; + ggml_vec_scale_f32(nc, p, sum); + +#ifndef NDEBUG + for (int i = 0; i < nc; ++i) { + assert(!isnan(p[i])); + assert(!isinf(p[i])); + } +#endif + } +} + +void ggml_compute_forward_soft_max( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + struct ggml_tensor * dst) { + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_soft_max_f32(params, src0, dst); + } break; + case GGML_TYPE_I8: + case GGML_TYPE_I16: + case GGML_TYPE_I32: + case GGML_TYPE_F16: + case GGML_TYPE_COUNT: + { + assert(false); + } break; + } +} + +// ggml_compute_forward_rope + +void ggml_compute_forward_rope_f32( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + const struct ggml_tensor * src1, + struct ggml_tensor * dst) { + assert(params->ith == 0); + assert(src1->type == GGML_TYPE_I32); + assert(ggml_nelements(src1) == 3); + + if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + return; + } + + const int n_past = ((int32_t *) src1->data)[0]; + const int n_dims = ((int32_t *) src1->data)[1]; + const int mode = ((int32_t *) src1->data)[2]; + + //const int ne0 = src0->ne[0]; + const int ne1 = src0->ne[1]; + const int ne2 = src0->ne[2]; + const int ne3 = src0->ne[3]; + + const int nb0 = src0->nb[0]; + const int nb1 = src0->nb[1]; + const int nb2 = src0->nb[2]; + const int nb3 = src0->nb[3]; + + //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3); + //printf("n_past = %d, ne2 = %d\n", n_past, ne2); + + assert(nb0 == sizeof(float)); + + // TODO: optimize + for (int i3 = 0; i3 < ne3; i3++) { + for (int i2 = (mode == 0 ? 0 : n_past); i2 < ne2; i2++) { + const int p = (mode == 0 ? n_past + i2 : i2); + for (int i1 = 0; i1 < ne1; i1++) { + for (int i0 = 0; i0 < n_dims; i0 += 2) { + const double theta = pow(10000.0, ((double)-i0)/n_dims); + + const double cos_theta = cos(p*theta); + const double sin_theta = sin(p*theta); + + const float * const src = (float *)((char *) src0->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); + float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); + + double x0 = src[0]; + double x1 = src[1]; + + dst_data[0] = x0*cos_theta - x1*sin_theta; + dst_data[1] = x0*sin_theta + x1*cos_theta; + } + } + } + } +} + +void ggml_compute_forward_rope( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + const struct ggml_tensor * src1, + struct ggml_tensor * dst) { + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_rope_f32(params, src0, src1, dst); + } break; + case GGML_TYPE_I8: + case GGML_TYPE_I16: + case GGML_TYPE_I32: + case GGML_TYPE_F16: + case GGML_TYPE_COUNT: + { + assert(false); + } break; + } +} + +// ggml_compute_forward_conv_1d_1s + +void ggml_compute_forward_conv_1d_1s_f16_f32( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + const struct ggml_tensor * src1, + struct ggml_tensor * dst) { + GGML_ASSERT(src0->type == GGML_TYPE_F16); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F32); + + int64_t t0 = ggml_perf_time_us(); + UNUSED(t0); + + const int ne00 = src0->ne[0]; + const int ne01 = src0->ne[1]; + const int ne02 = src0->ne[2]; + //const int ne03 = src0->ne[3]; + + const int ne10 = src1->ne[0]; + const int ne11 = src1->ne[1]; + //const int ne12 = src1->ne[2]; + //const int ne13 = src1->ne[3]; + + //const int ne0 = dst->ne[0]; + //const int ne1 = dst->ne[1]; + //const int ne2 = dst->ne[2]; + //const int ne3 = dst->ne[3]; + //const int ne = ne0*ne1*ne2*ne3; + + const int nb00 = src0->nb[0]; + const int nb01 = src0->nb[1]; + const int nb02 = src0->nb[2]; + //const int nb03 = src0->nb[3]; + + const int nb10 = src1->nb[0]; + const int nb11 = src1->nb[1]; + //const int nb12 = src1->nb[2]; + //const int nb13 = src1->nb[3]; + + //const int nb0 = dst->nb[0]; + const int nb1 = dst->nb[1]; + //const int nb2 = dst->nb[2]; + //const int nb3 = dst->nb[3]; + + const int ith = params->ith; + const int nth = params->nth; + + const int nk = ne00; + const int nh = nk/2; + + const int ew0 = ggml_up32(ne01); + + GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes + GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); + GGML_ASSERT(nb10 == sizeof(float)); + + // WHISPER + if (params->type == GGML_TASK_INIT) { + // TODO: fix this memset (wsize is overestimated) + memset(params->wdata, 0, params->wsize); + + // prepare kernel data (src0) + { + ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; + + for (int i02 = 0; i02 < ne02; i02++) { + for (int i01 = 0; i01 < ne01; i01++) { + const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i02*nb02 + i01*nb01); + ggml_fp16_t * dst_data = wdata + i02*ew0*ne00; + for (int i00 = 0; i00 < ne00; i00++) { + dst_data[i00*ew0 + i01] = src[i00]; + } + } + } + } + + // prepare source data (src1) + { + ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + ne02*ew0*ne00; + + for (int i11 = 0; i11 < ne11; i11++) { + const float * const src = (float *)((char *) src1->data + i11*nb11); + ggml_fp16_t * dst_data = wdata; + for (int i10 = 0; i10 < ne10; i10++) { + dst_data[(i10 + nh)*ew0 + i11] = ggml_fp32_to_fp16(src[i10]); + } + } + } + + return; + } + + if (params->type == GGML_TASK_FINALIZE) { + return; + } + + // total rows in dst + const int nr = ne02; + + // rows per thread + const int dr = (nr + nth - 1)/nth; + + // row range for this thread + const int ir0 = dr*ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + float * dst_data = (float *)((char *) dst->data + i1*nb1); + for (int i0 = 0; i0 < ne10; ++i0) { + dst_data[i0] = 0; + for (int k = -nh; k <= nh; k++) { + float v = 0.0f; + ggml_vec_dot_f16(ew0, &v, + (ggml_fp16_t *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0, + (ggml_fp16_t *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0); + + dst_data[i0] += v; + } + } + } +} + +void ggml_compute_forward_conv_1d_1s_f32( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + const struct ggml_tensor * src1, + struct ggml_tensor * dst) { + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F32); + + int64_t t0 = ggml_perf_time_us(); + UNUSED(t0); + + const int ne00 = src0->ne[0]; + const int ne01 = src0->ne[1]; + const int ne02 = src0->ne[2]; + //const int ne03 = src0->ne[3]; + + const int ne10 = src1->ne[0]; + const int ne11 = src1->ne[1]; + //const int ne12 = src1->ne[2]; + //const int ne13 = src1->ne[3]; + + //const int ne0 = dst->ne[0]; + //const int ne1 = dst->ne[1]; + //const int ne2 = dst->ne[2]; + //const int ne3 = dst->ne[3]; + //const int ne = ne0*ne1*ne2*ne3; + + const int nb00 = src0->nb[0]; + const int nb01 = src0->nb[1]; + const int nb02 = src0->nb[2]; + //const int nb03 = src0->nb[3]; + + const int nb10 = src1->nb[0]; + const int nb11 = src1->nb[1]; + //const int nb12 = src1->nb[2]; + //const int nb13 = src1->nb[3]; + + //const int nb0 = dst->nb[0]; + const int nb1 = dst->nb[1]; + //const int nb2 = dst->nb[2]; + //const int nb3 = dst->nb[3]; + + const int ith = params->ith; + const int nth = params->nth; + + const int nk = ne00; + const int nh = nk/2; + + const int ew0 = ggml_up32(ne01); + + GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes + GGML_ASSERT(nb00 == sizeof(float)); + GGML_ASSERT(nb10 == sizeof(float)); + + // WHISPER + if (params->type == GGML_TASK_INIT) { + // TODO: fix this memset (wsize is overestimated) + memset(params->wdata, 0, params->wsize); + + // prepare kernel data (src0) + { + float * const wdata = (float *) params->wdata + 0; + + for (int i02 = 0; i02 < ne02; i02++) { + for (int i01 = 0; i01 < ne01; i01++) { + const float * const src = (float *)((char *) src0->data + i02*nb02 + i01*nb01); + float * dst_data = wdata + i02*ew0*ne00; + for (int i00 = 0; i00 < ne00; i00++) { + dst_data[i00*ew0 + i01] = src[i00]; + } + } + } + } + + // prepare source data (src1) + { + float * const wdata = (float *) params->wdata + ne02*ew0*ne00; + + for (int i11 = 0; i11 < ne11; i11++) { + const float * const src = (float *)((char *) src1->data + i11*nb11); + float * dst_data = wdata; + for (int i10 = 0; i10 < ne10; i10++) { + dst_data[(i10 + nh)*ew0 + i11] = src[i10]; + } + } + } + + return; + } + + if (params->type == GGML_TASK_FINALIZE) { + return; + } + + // total rows in dst + const int nr = ne02; + + // rows per thread + const int dr = (nr + nth - 1)/nth; + + // row range for this thread + const int ir0 = dr*ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + float * dst_data = (float *)((char *) dst->data + i1*nb1); + for (int i0 = 0; i0 < ne10; ++i0) { + dst_data[i0] = 0; + for (int k = -nh; k <= nh; k++) { + float v = 0.0f; + ggml_vec_dot_f32(ew0, &v, + (float *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0, + (float *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0); + + dst_data[i0] += v; + } + } + } +} + +void ggml_compute_forward_conv_1d_1s( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + const struct ggml_tensor * src1, + struct ggml_tensor * dst) { + switch (src0->type) { + case GGML_TYPE_F16: + { + ggml_compute_forward_conv_1d_1s_f16_f32(params, src0, src1, dst); + } break; + case GGML_TYPE_F32: + { + ggml_compute_forward_conv_1d_1s_f32(params, src0, src1, dst); + } break; + case GGML_TYPE_I8: + case GGML_TYPE_I16: + case GGML_TYPE_I32: + case GGML_TYPE_COUNT: + { + GGML_ASSERT(false); + } break; + } +} + +// ggml_compute_forward_conv_1d_2s + +void ggml_compute_forward_conv_1d_2s_f16_f32( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + const struct ggml_tensor * src1, + struct ggml_tensor * dst) { + GGML_ASSERT(src0->type == GGML_TYPE_F16); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F32); + + int64_t t0 = ggml_perf_time_us(); + UNUSED(t0); + + const int ne00 = src0->ne[0]; + const int ne01 = src0->ne[1]; + const int ne02 = src0->ne[2]; + //const int ne03 = src0->ne[3]; + + const int ne10 = src1->ne[0]; + const int ne11 = src1->ne[1]; + //const int ne12 = src1->ne[2]; + //const int ne13 = src1->ne[3]; + + //const int ne0 = dst->ne[0]; + //const int ne1 = dst->ne[1]; + //const int ne2 = dst->ne[2]; + //const int ne3 = dst->ne[3]; + //const int ne = ne0*ne1*ne2*ne3; + + const int nb00 = src0->nb[0]; + const int nb01 = src0->nb[1]; + const int nb02 = src0->nb[2]; + //const int nb03 = src0->nb[3]; + + const int nb10 = src1->nb[0]; + const int nb11 = src1->nb[1]; + //const int nb12 = src1->nb[2]; + //const int nb13 = src1->nb[3]; + + //const int nb0 = dst->nb[0]; + const int nb1 = dst->nb[1]; + //const int nb2 = dst->nb[2]; + //const int nb3 = dst->nb[3]; + + const int ith = params->ith; + const int nth = params->nth; + + const int nk = ne00; + const int nh = nk/2; + + const int ew0 = ggml_up32(ne01); + + GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes + GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); + GGML_ASSERT(nb10 == sizeof(float)); + + // WHISPER + if (params->type == GGML_TASK_INIT) { + // TODO: fix this memset (wsize is overestimated) + memset(params->wdata, 0, params->wsize); + + // prepare kernel data (src0) + { + ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; + + for (int i02 = 0; i02 < ne02; i02++) { + for (int i01 = 0; i01 < ne01; i01++) { + const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i02*nb02 + i01*nb01); + ggml_fp16_t * dst_data = wdata + i02*ew0*ne00; + for (int i00 = 0; i00 < ne00; i00++) { + dst_data[i00*ew0 + i01] = src[i00]; + } + } + } + } + + // prepare source data (src1) + { + ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + ne02*ew0*ne00; + + for (int i11 = 0; i11 < ne11; i11++) { + const float * const src = (float *)((char *) src1->data + i11*nb11); + ggml_fp16_t * dst_data = wdata; + for (int i10 = 0; i10 < ne10; i10++) { + dst_data[(i10 + nh)*ew0 + i11] = ggml_fp32_to_fp16(src[i10]); + } + } + } + + return; + } + + if (params->type == GGML_TASK_FINALIZE) { + return; + } + + // total rows in dst + const int nr = ne02; + + // rows per thread + const int dr = (nr + nth - 1)/nth; + + // row range for this thread + const int ir0 = dr*ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + float * dst_data = (float *)((char *) dst->data + i1*nb1); + for (int i0 = 0; i0 < ne10; i0 += 2) { + dst_data[i0/2] = 0; + for (int k = -nh; k <= nh; k++) { + float v = 0.0f; + ggml_vec_dot_f16(ew0, &v, + (ggml_fp16_t *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0, + (ggml_fp16_t *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0); + + dst_data[i0/2] += v; + } + } + } +} + +void ggml_compute_forward_conv_1d_2s_f32( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + const struct ggml_tensor * src1, + struct ggml_tensor * dst) { + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F32); + + int64_t t0 = ggml_perf_time_us(); + UNUSED(t0); + + const int ne00 = src0->ne[0]; + const int ne01 = src0->ne[1]; + const int ne02 = src0->ne[2]; + //const int ne03 = src0->ne[3]; + + const int ne10 = src1->ne[0]; + const int ne11 = src1->ne[1]; + //const int ne12 = src1->ne[2]; + //const int ne13 = src1->ne[3]; + + //const int ne0 = dst->ne[0]; + //const int ne1 = dst->ne[1]; + //const int ne2 = dst->ne[2]; + //const int ne3 = dst->ne[3]; + //const int ne = ne0*ne1*ne2*ne3; + + const int nb00 = src0->nb[0]; + const int nb01 = src0->nb[1]; + const int nb02 = src0->nb[2]; + //const int nb03 = src0->nb[3]; + + const int nb10 = src1->nb[0]; + const int nb11 = src1->nb[1]; + //const int nb12 = src1->nb[2]; + //const int nb13 = src1->nb[3]; + + //const int nb0 = dst->nb[0]; + const int nb1 = dst->nb[1]; + //const int nb2 = dst->nb[2]; + //const int nb3 = dst->nb[3]; + + const int ith = params->ith; + const int nth = params->nth; + + const int nk = ne00; + const int nh = nk/2; + + const int ew0 = ggml_up32(ne01); + + GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes + GGML_ASSERT(nb00 == sizeof(float)); + GGML_ASSERT(nb10 == sizeof(float)); + + // WHISPER + if (params->type == GGML_TASK_INIT) { + // TODO: fix this memset (wsize is overestimated) + memset(params->wdata, 0, params->wsize); + + // prepare kernel data (src0) + { + float * const wdata = (float *) params->wdata + 0; + + for (int i02 = 0; i02 < ne02; i02++) { + for (int i01 = 0; i01 < ne01; i01++) { + const float * const src = (float *)((char *) src0->data + i02*nb02 + i01*nb01); + float * dst_data = wdata + i02*ew0*ne00; + for (int i00 = 0; i00 < ne00; i00++) { + dst_data[i00*ew0 + i01] = src[i00]; + } + } + } + } + + // prepare source data (src1) + { + float * const wdata = (float *) params->wdata + ne02*ew0*ne00; + + for (int i11 = 0; i11 < ne11; i11++) { + const float * const src = (float *)((char *) src1->data + i11*nb11); + float * dst_data = wdata; + for (int i10 = 0; i10 < ne10; i10++) { + dst_data[(i10 + nh)*ew0 + i11] = src[i10]; + } + } + } + + return; + } + + if (params->type == GGML_TASK_FINALIZE) { + return; + } + + // total rows in dst + const int nr = ne02; + + // rows per thread + const int dr = (nr + nth - 1)/nth; + + // row range for this thread + const int ir0 = dr*ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + float * dst_data = (float *)((char *) dst->data + i1*nb1); + for (int i0 = 0; i0 < ne10; i0 += 2) { + dst_data[i0/2] = 0; + for (int k = -nh; k <= nh; k++) { + float v = 0.0f; + ggml_vec_dot_f32(ew0, &v, + (float *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0, + (float *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0); + + dst_data[i0/2] += v; + } + } + } +} + +void ggml_compute_forward_conv_1d_2s( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + const struct ggml_tensor * src1, + struct ggml_tensor * dst) { + switch (src0->type) { + case GGML_TYPE_F16: + { + ggml_compute_forward_conv_1d_2s_f16_f32(params, src0, src1, dst); + } break; + case GGML_TYPE_F32: + { + ggml_compute_forward_conv_1d_2s_f32(params, src0, src1, dst); + } break; + case GGML_TYPE_I8: + case GGML_TYPE_I16: + case GGML_TYPE_I32: + case GGML_TYPE_COUNT: + { + GGML_ASSERT(false); + } break; + } +} + +///////////////////////////////// + +void ggml_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor) { + assert(params); + + switch (tensor->op) { + case GGML_OP_DUP: + { + ggml_compute_forward_dup(params, tensor->src0, tensor); + } break; + case GGML_OP_ADD: + { + ggml_compute_forward_add(params, tensor->src0, tensor->src1, tensor); + } break; + case GGML_OP_SUB: + { + ggml_compute_forward_sub(params, tensor->src0, tensor->src1, tensor); + } break; + case GGML_OP_MUL: + { + ggml_compute_forward_mul(params, tensor->src0, tensor->src1, tensor); + } break; + case GGML_OP_DIV: + { + ggml_compute_forward_div(params, tensor->src0, tensor->src1, tensor); + } break; + case GGML_OP_SQR: + { + ggml_compute_forward_sqr(params, tensor->src0, tensor); + } break; + case GGML_OP_SQRT: + { + ggml_compute_forward_sqrt(params, tensor->src0, tensor); + } break; + case GGML_OP_SUM: + { + ggml_compute_forward_sum(params, tensor->src0, tensor); + } break; + case GGML_OP_MEAN: + { + ggml_compute_forward_mean(params, tensor->src0, tensor); + } break; + case GGML_OP_REPEAT: + { + ggml_compute_forward_repeat(params, tensor->src0, tensor); + } break; + case GGML_OP_ABS: + { + ggml_compute_forward_abs(params, tensor->src0, tensor); + } break; + case GGML_OP_SGN: + { + ggml_compute_forward_sgn(params, tensor->src0, tensor); + } break; + case GGML_OP_NEG: + { + ggml_compute_forward_neg(params, tensor->src0, tensor); + } break; + case GGML_OP_STEP: + { + ggml_compute_forward_step(params, tensor->src0, tensor); + } break; + case GGML_OP_RELU: + { + ggml_compute_forward_relu(params, tensor->src0, tensor); + } break; + case GGML_OP_GELU: + { + ggml_compute_forward_gelu(params, tensor->src0, tensor); + } break; + case GGML_OP_NORM: + { + ggml_compute_forward_norm(params, tensor->src0, tensor); + } break; + case GGML_OP_MUL_MAT: + { + ggml_compute_forward_mul_mat(params, tensor->src0, tensor->src1, tensor); + } break; + case GGML_OP_SCALE: + { + ggml_compute_forward_scale(params, tensor->src0, tensor->src1, tensor); + } break; + case GGML_OP_CPY: + { + ggml_compute_forward_cpy(params, tensor->src0, tensor); + } break; + case GGML_OP_RESHAPE: + { + ggml_compute_forward_reshape(params, tensor->src0, tensor); + } break; + case GGML_OP_VIEW: + { + ggml_compute_forward_view(params, tensor->src0); + } break; + case GGML_OP_PERMUTE: + { + ggml_compute_forward_permute(params, tensor->src0); + } break; + case GGML_OP_TRANSPOSE: + { + ggml_compute_forward_transpose(params, tensor->src0); + } break; + case GGML_OP_GET_ROWS: + { + ggml_compute_forward_get_rows(params, tensor->src0, tensor->src1, tensor); + } break; + case GGML_OP_DIAG_MASK_INF: + { + ggml_compute_forward_diag_mask_inf(params, tensor->src0, tensor->src1, tensor); + } break; + case GGML_OP_SOFT_MAX: + { + ggml_compute_forward_soft_max(params, tensor->src0, tensor); + } break; + case GGML_OP_ROPE: + { + ggml_compute_forward_rope(params, tensor->src0, tensor->src1, tensor); + } break; + case GGML_OP_CONV_1D_1S: + { + ggml_compute_forward_conv_1d_1s(params, tensor->src0, tensor->src1, tensor); + } break; + case GGML_OP_CONV_1D_2S: + { + ggml_compute_forward_conv_1d_2s(params, tensor->src0, tensor->src1, tensor); + } break; + case GGML_OP_NONE: + { + // nop + } break; + case GGML_OP_COUNT: + { + assert(false); + } break; + }; +} + +//////////////////////////////////////////////////////////////////////////////// + +void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor * tensor, bool inplace) { + struct ggml_tensor * src0 = tensor->src0; + struct ggml_tensor * src1 = tensor->src1; + + switch (tensor->op) { + case GGML_OP_DUP: + { + if (src0->grad) { + src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace); + } + } break; + case GGML_OP_ADD: + { + if (src0->grad) { + src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace); + } + if (src1->grad) { + src1->grad = ggml_add_impl(ctx, src1->grad, tensor->grad, inplace); + } + } break; + case GGML_OP_SUB: + { + if (src0->grad) { + src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace); + } + if (src1->grad) { + src1->grad = ggml_sub_impl(ctx, src1->grad, tensor->grad, inplace); + } + } break; + case GGML_OP_MUL: + { + if (src0->grad) { + src0->grad = + ggml_add_impl(ctx, + src0->grad, + ggml_mul(ctx, src1, tensor->grad), + inplace); + } + if (src1->grad) { + src1->grad = + ggml_add_impl(ctx, + src1->grad, + ggml_mul(ctx, src0, tensor->grad), + inplace); + } + } break; + case GGML_OP_DIV: + { + if (src0->grad) { + src0->grad = + ggml_add_impl(ctx, + src0->grad, + ggml_div(ctx, tensor->grad, src1), + inplace); + } + if (src1->grad) { + src1->grad = + ggml_sub_impl(ctx, + src1->grad, + ggml_mul(ctx, + tensor->grad, + ggml_div(ctx, tensor, src1)), + inplace); + } + } break; + case GGML_OP_SQR: + { + if (src0->grad) { + src0->grad = + ggml_add_impl(ctx, + src0->grad, + ggml_mul(ctx, + ggml_mul(ctx, src0, tensor->grad), + ggml_repeat(ctx, ggml_new_f32(ctx, 2.0f), src0)), + inplace); + } + } break; + case GGML_OP_SQRT: + { + if (src0->grad) { + src0->grad = + ggml_add_impl(ctx, + src0->grad, + ggml_div(ctx, + ggml_repeat(ctx, ggml_new_f32(ctx, 0.5f), tensor), + tensor), + inplace); + } + } break; + case GGML_OP_SUM: + { + if (src0->grad) { + src0->grad = + ggml_add_impl(ctx, + src0->grad, + ggml_repeat(ctx, tensor->grad, src0->grad), + inplace); + } + } break; + case GGML_OP_MEAN: + { + assert(false); // TODO: implement + } break; + case GGML_OP_REPEAT: + { + if (src0->grad) { + src0->grad = + ggml_add_impl(ctx, + src0->grad, + ggml_sum(ctx, tensor->grad), + inplace); + } + } break; + case GGML_OP_ABS: + { + if (src0->grad) { + src0->grad = + ggml_add_impl(ctx, + src0->grad, + ggml_mul(ctx, + ggml_sgn(ctx, src0), + tensor->grad), + inplace); + } + } break; + case GGML_OP_SGN: + { + if (src0->grad) { + // noop + } + } break; + case GGML_OP_NEG: + { + if (src0->grad) { + src0->grad = ggml_sub_impl(ctx, src0->grad, tensor->grad, inplace); + } + } break; + case GGML_OP_STEP: + { + if (src0->grad) { + // noop + } + } break; + case GGML_OP_RELU: + { + if (src0->grad) { + src0->grad = ggml_sub_impl(ctx, + src0->grad, + ggml_mul(ctx, + ggml_step(ctx, src0), + tensor->grad), + inplace); + } + } break; + case GGML_OP_GELU: + { + assert(false); // TODO: not implemented + } break; + case GGML_OP_NORM: + { + assert(false); // TODO: not implemented + } break; + case GGML_OP_MUL_MAT: + { + if (src0->grad) { + // TODO: this requires outer product - ggml_out_prod(ctx, src1, tensor->grad); + assert(false); + } + if (src1->grad) { + src1->grad = + ggml_add_impl(ctx, + src1->grad, + // TODO: fix transpose, the node will break the graph connections + ggml_mul_mat(ctx, ggml_transpose(ctx, src0), tensor->grad), + inplace); + } + } break; + case GGML_OP_SCALE: + { + GGML_ASSERT(false); // TODO: not implemented + } break; + case GGML_OP_CPY: + { + GGML_ASSERT(false); // TODO: not implemented + } break; + case GGML_OP_RESHAPE: + { + GGML_ASSERT(false); // TODO: not implemented + } break; + case GGML_OP_VIEW: + { + GGML_ASSERT(false); // not supported + } break; + case GGML_OP_PERMUTE: + { + GGML_ASSERT(false); // TODO: not implemented + } break; + case GGML_OP_TRANSPOSE: + { + GGML_ASSERT(false); // TODO: not implemented + } break; + case GGML_OP_GET_ROWS: + { + GGML_ASSERT(false); // TODO: not implemented + } break; + case GGML_OP_DIAG_MASK_INF: + { + GGML_ASSERT(false); // TODO: not implemented + } break; + case GGML_OP_SOFT_MAX: + { + GGML_ASSERT(false); // TODO: not implemented + } break; + case GGML_OP_ROPE: + { + GGML_ASSERT(false); // TODO: not implemented + } break; + case GGML_OP_CONV_1D_1S: + { + GGML_ASSERT(false); // TODO: not implemented + } break; + case GGML_OP_CONV_1D_2S: + { + GGML_ASSERT(false); // TODO: not implemented + } break; + case GGML_OP_NONE: + { + // nop + } break; + case GGML_OP_COUNT: + { + GGML_ASSERT(false); + } break; + }; +} + +void ggml_visit_parents(struct ggml_cgraph * cgraph, struct ggml_tensor * node) { + if (node->grad == NULL) { + // this usually happens when we generate intermediate nodes from constants in the backward pass + // it can also happen during forward pass, if the user performs computations with constants + if (node->op != GGML_OP_NONE) { + //GGML_PRINT_DEBUG("%s: warning: node %p has no grad, but op %d\n", __func__, (void *) node, node->op); + } + } + + // check if already visited + for (int i = 0; i < cgraph->n_nodes; i++) { + if (cgraph->nodes[i] == node) { + return; + } + } + + for (int i = 0; i < cgraph->n_leafs; i++) { + if (cgraph->leafs[i] == node) { + return; + } + } + + if (node->src0) { + ggml_visit_parents(cgraph, node->src0); + } + + if (node->src1) { + ggml_visit_parents(cgraph, node->src1); + } + + if (node->op == GGML_OP_NONE && node->grad == NULL) { + // reached a leaf node, not part of the gradient graph (e.g. a constant) + assert(cgraph->n_leafs < GGML_MAX_NODES); + + cgraph->leafs[cgraph->n_leafs] = node; + cgraph->n_leafs++; + } else { + assert(cgraph->n_nodes < GGML_MAX_NODES); + + cgraph->nodes[cgraph->n_nodes] = node; + cgraph->grads[cgraph->n_nodes] = node->grad; + cgraph->n_nodes++; + } +} + +void ggml_build_forward_impl(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor, bool expand) { + if (!expand) { + cgraph->n_nodes = 0; + cgraph->n_leafs = 0; + } + + const int n0 = cgraph->n_nodes; + UNUSED(n0); + + ggml_visit_parents(cgraph, tensor); + + const int n_new = cgraph->n_nodes - n0; + GGML_PRINT_DEBUG("%s: visited %d new nodes\n", __func__, n_new); + + if (n_new > 0) { + // the last added node should always be starting point + assert(cgraph->nodes[cgraph->n_nodes - 1] == tensor); + } +} + +void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor) { + ggml_build_forward_impl(cgraph, tensor, true); +} + +struct ggml_cgraph ggml_build_forward(struct ggml_tensor * tensor) { + struct ggml_cgraph result = { + /*.n_nodes =*/ 0, + /*.n_leafs =*/ 0, + /*.n_threads =*/ 0, + /*.work_size =*/ 0, + /*.work =*/ NULL, + /*.nodes =*/ { NULL }, + /*.grads =*/ { NULL }, + /*.leafs =*/ { NULL }, + /*.perf_runs =*/ 0, + /*.perf_cycles =*/ 0, + /*.perf_time_us =*/ 0, + }; + + ggml_build_forward_impl(&result, tensor, false); + + return result; +} + +struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cgraph * gf, bool keep) { + struct ggml_cgraph result = *gf; + + assert(gf->n_nodes > 0); + + // if we are keeping the gradient graph, we have to detach the gradient nodes from the original graph + if (keep) { + for (int i = 0; i < gf->n_nodes; i++) { + struct ggml_tensor * node = gf->nodes[i]; + + if (node->grad) { + node->grad = ggml_dup_tensor(ctx, node); + gf->grads[i] = node->grad; + } + } + } + + for (int i = gf->n_nodes - 1; i >= 0; i--) { + struct ggml_tensor * node = gf->nodes[i]; + + // because we detached the grad nodes from the original graph, we can afford inplace operations + if (node->grad) { + ggml_compute_backward(ctx, node, keep); + } + } + + for (int i = gf->n_nodes - 1; i >= 0; i--) { + struct ggml_tensor * node = gf->nodes[i]; + + if (node->is_param) { + GGML_PRINT_DEBUG("%s: found root node %p\n", __func__, (void *) node); + ggml_build_forward_impl(&result, node->grad, true); + } + } + + return result; +} + +// +// thread data +// +// synchronization is done via busy loops +// I tried using spin locks, but not sure how to use them correctly - the things I tried were slower than busy loops +// + +#ifdef __APPLE__ + +//#include + +//typedef os_unfair_lock ggml_lock_t; +// +//#define ggml_lock_init(x) UNUSED(x) +//#define ggml_lock_destroy(x) UNUSED(x) +//#define ggml_lock_lock os_unfair_lock_lock +//#define ggml_lock_unlock os_unfair_lock_unlock +// +//#define GGML_LOCK_INITIALIZER OS_UNFAIR_LOCK_INIT + +typedef int ggml_lock_t; + +#define ggml_lock_init(x) UNUSED(x) +#define ggml_lock_destroy(x) UNUSED(x) +#define ggml_lock_lock(x) UNUSED(x) +#define ggml_lock_unlock(x) UNUSED(x) + +#define GGML_LOCK_INITIALIZER 0 + +#else + +//typedef pthread_spinlock_t ggml_lock_t; + +//#define ggml_lock_init(x) pthread_spin_init(x, PTHREAD_PROCESS_PRIVATE) +//#define ggml_lock_destroy pthread_spin_destroy +//#define ggml_lock_lock pthread_spin_lock +//#define ggml_lock_unlock pthread_spin_unlock + +typedef int ggml_lock_t; + +#define ggml_lock_init(x) UNUSED(x) +#define ggml_lock_destroy(x) UNUSED(x) +#define ggml_lock_lock(x) UNUSED(x) +#define ggml_lock_unlock(x) UNUSED(x) + +#define GGML_LOCK_INITIALIZER 0 + +#endif + +struct ggml_compute_state_shared { + ggml_lock_t spin; + + int n_threads; + + // synchronization primitives + atomic_int n_ready; + atomic_bool has_work; + atomic_bool stop; // stop all threads +}; + +struct ggml_compute_state { + pthread_t thrd; + + struct ggml_compute_params params; + struct ggml_tensor * node; + + struct ggml_compute_state_shared * shared; +}; + +// function used by each compute thread +void * ggml_graph_compute_one(void * data) { + struct ggml_compute_state * state = (struct ggml_compute_state *) data; + + ggml_compute_forward(&state->params, state->node); + + return NULL; +} + +void * ggml_graph_compute_thread(void * data) { + struct ggml_compute_state * state = (struct ggml_compute_state *) data; + + const int n_threads = state->shared->n_threads; + + while (true) { + if (atomic_fetch_add(&state->shared->n_ready, 1) == n_threads - 1) { + atomic_store(&state->shared->has_work, false); + } else { + while (atomic_load(&state->shared->has_work)) { + if (atomic_load(&state->shared->stop)) { + return NULL; + } + ggml_lock_lock (&state->shared->spin); + ggml_lock_unlock(&state->shared->spin); + } + } + + atomic_fetch_sub(&state->shared->n_ready, 1); + + // wait for work + while (!atomic_load(&state->shared->has_work)) { + if (atomic_load(&state->shared->stop)) { + return NULL; + } + ggml_lock_lock (&state->shared->spin); + ggml_lock_unlock(&state->shared->spin); + } + + // check if we should stop + if (atomic_load(&state->shared->stop)) { + break; + } + + if (state->node) { + ggml_compute_forward(&state->params, state->node); + state->node = NULL; + } else { + break; + } + } + + return NULL; +} + +void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph) { + if (cgraph->n_threads <= 0) { + cgraph->n_threads = 8; + } + + const int n_threads = cgraph->n_threads; + + struct ggml_compute_state_shared state_shared = { + /*.spin =*/ GGML_LOCK_INITIALIZER, + /*.n_threads =*/ n_threads, + /*.n_ready =*/ 0, + /*.has_work =*/ false, + /*.stop =*/ false, + }; + struct ggml_compute_state * workers = n_threads > 1 ? alloca(sizeof(struct ggml_compute_state)*(n_threads - 1)) : NULL; + + // create thread pool + if (n_threads > 1) { + ggml_lock_init(&state_shared.spin); + + atomic_store(&state_shared.has_work, true); + + for (int j = 0; j < n_threads - 1; j++) { + workers[j] = (struct ggml_compute_state) { + .thrd = 0, + .params = { + .type = GGML_TASK_COMPUTE, + .ith = j + 1, + .nth = n_threads, + .wsize = cgraph->work ? ggml_nbytes(cgraph->work) : 0, + .wdata = cgraph->work ? cgraph->work->data : NULL, + }, + .node = NULL, + .shared = &state_shared, + }; + int rc = pthread_create(&workers[j].thrd, NULL, ggml_graph_compute_thread, &workers[j]); + assert(rc == 0); + UNUSED(rc); + } + } + + // initialize tasks + work buffer + { + size_t work_size = 0; + + // thread scheduling for the different operations + for (int i = 0; i < cgraph->n_nodes; i++) { + struct ggml_tensor * node = cgraph->nodes[i]; + + switch (node->op) { + case GGML_OP_DUP: + case GGML_OP_ADD: + case GGML_OP_SUB: + case GGML_OP_MUL: + case GGML_OP_DIV: + case GGML_OP_SQR: + case GGML_OP_SQRT: + case GGML_OP_SUM: + case GGML_OP_MEAN: + case GGML_OP_REPEAT: + case GGML_OP_ABS: + case GGML_OP_SGN: + case GGML_OP_NEG: + case GGML_OP_STEP: + case GGML_OP_RELU: + { + node->n_tasks = 1; + } break; + case GGML_OP_GELU: + { + node->n_tasks = MIN(n_threads, ggml_nrows(node->src0)); + } break; + case GGML_OP_NORM: + { + node->n_tasks = 1; + } break; + case GGML_OP_MUL_MAT: + { + // TODO: use different scheduling for different matrix sizes + node->n_tasks = n_threads; + + size_t cur = 0; + + // TODO: better way to determine if the matrix is transposed + if (node->src0->nb[1] < node->src0->nb[0]) { + cur = ggml_nbytes(node)*node->n_tasks; // TODO: this can become (n_tasks-1) + } else { + if (node->src0->type == GGML_TYPE_F16 && + node->src1->type == GGML_TYPE_F32) { + cur = sizeof(ggml_fp16_t)*ggml_nelements(node->src1); + } else if (node->src0->type == GGML_TYPE_F32 && + node->src1->type == GGML_TYPE_F32) { + cur = 0; + } else { + GGML_ASSERT(false); + } + } + + work_size = MAX(work_size, cur); + } break; + case GGML_OP_SCALE: + { + node->n_tasks = MIN(n_threads, ggml_nrows(node->src0)); + } break; + case GGML_OP_CPY: + case GGML_OP_RESHAPE: + case GGML_OP_VIEW: + case GGML_OP_PERMUTE: + case GGML_OP_TRANSPOSE: + case GGML_OP_GET_ROWS: + case GGML_OP_DIAG_MASK_INF: + { + node->n_tasks = 1; + } break; + case GGML_OP_SOFT_MAX: + { + node->n_tasks = MIN(n_threads, ggml_nrows(node->src0)); + } break; + case GGML_OP_ROPE: + { + node->n_tasks = 1; + } break; + case GGML_OP_CONV_1D_1S: + case GGML_OP_CONV_1D_2S: + { + // WHISPER + node->n_tasks = n_threads; + + GGML_ASSERT(node->src0->ne[3] == 1); + GGML_ASSERT(node->src1->ne[2] == 1); + GGML_ASSERT(node->src1->ne[3] == 1); + + size_t cur = 0; + const int nk = node->src0->ne[0]; + + if (node->src0->type == GGML_TYPE_F16 && + node->src1->type == GGML_TYPE_F32) { + cur = sizeof(ggml_fp16_t)*( + nk*ggml_up32(node->src0->ne[1])*node->src0->ne[2] + + ( 2*(nk/2) + node->src1->ne[0])*node->src1->ne[1] + ); + } else if (node->src0->type == GGML_TYPE_F32 && + node->src1->type == GGML_TYPE_F32) { + cur = sizeof(float)*( + nk*ggml_up32(node->src0->ne[1])*node->src0->ne[2] + + ( 2*(nk/2) + node->src1->ne[0])*node->src1->ne[1] + ); + } else { + GGML_ASSERT(false); + } + + work_size = MAX(work_size, cur); + } break; + case GGML_OP_NONE: + { + node->n_tasks = 1; + } break; + case GGML_OP_COUNT: + { + assert(false); + } break; + }; + } + + if (cgraph->work != NULL && work_size > cgraph->work_size) { + assert(false); // TODO: better handling + } + + if (work_size > 0 && cgraph->work == NULL) { + cgraph->work_size = work_size + CACHE_LINE_SIZE*(n_threads - 1); + + GGML_PRINT_DEBUG("%s: allocating work buffer for graph (%zu bytes)\n", __func__, cgraph->work_size); + cgraph->work = ggml_new_tensor_1d(ctx, GGML_TYPE_I8, cgraph->work_size); + } + } + + const int64_t perf_start_cycles = ggml_perf_cycles(); + const int64_t perf_start_time_us = ggml_perf_time_us(); + + for (int i = 0; i < cgraph->n_nodes; i++) { + GGML_PRINT_DEBUG_5("%s: %d/%d\n", __func__, i, cgraph->n_nodes); + + struct ggml_tensor * node = cgraph->nodes[i]; + + // TODO: this could be used to avoid unnecessary computations, but it needs to be improved + //if (node->grad == NULL && node->perf_runs > 0) { + // continue; + //} + + const int64_t perf_node_start_cycles = ggml_perf_cycles(); + const int64_t perf_node_start_time_us = ggml_perf_time_us(); + + // INIT + struct ggml_compute_params params = { + /*.type =*/ GGML_TASK_INIT, + /*.ith =*/ 0, + /*.nth =*/ n_threads, + /*.wsize =*/ cgraph->work ? ggml_nbytes(cgraph->work) : 0, + /*.wdata =*/ cgraph->work ? cgraph->work->data : NULL, + }; + + ggml_compute_forward(¶ms, node); + + // COMPUTE + if (node->n_tasks > 1) { + if (atomic_fetch_add(&state_shared.n_ready, 1) == n_threads - 1) { + atomic_store(&state_shared.has_work, false); + } + + while (atomic_load(&state_shared.has_work)) { + ggml_lock_lock (&state_shared.spin); + ggml_lock_unlock(&state_shared.spin); + } + + // launch thread pool + for (int j = 0; j < n_threads - 1; j++) { + workers[j].params = (struct ggml_compute_params) { + .type = GGML_TASK_COMPUTE, + .ith = j + 1, + .nth = n_threads, + .wsize = cgraph->work ? ggml_nbytes(cgraph->work) : 0, + .wdata = cgraph->work ? cgraph->work->data : NULL, + }; + workers[j].node = node; + } + + atomic_fetch_sub(&state_shared.n_ready, 1); + + while (atomic_load(&state_shared.n_ready) > 0) { + ggml_lock_lock (&state_shared.spin); + ggml_lock_unlock(&state_shared.spin); + } + + atomic_store(&state_shared.has_work, true); + } + + params.type = GGML_TASK_COMPUTE; + ggml_compute_forward(¶ms, node); + + // wait for thread pool + if (node->n_tasks > 1) { + if (atomic_fetch_add(&state_shared.n_ready, 1) == n_threads - 1) { + atomic_store(&state_shared.has_work, false); + } + + while (atomic_load(&state_shared.has_work)) { + ggml_lock_lock (&state_shared.spin); + ggml_lock_unlock(&state_shared.spin); + } + + atomic_fetch_sub(&state_shared.n_ready, 1); + + while (atomic_load(&state_shared.n_ready) != 0) { + ggml_lock_lock (&state_shared.spin); + ggml_lock_unlock(&state_shared.spin); + } + } + + // FINALIZE + if (node->n_tasks > 1) { + if (atomic_fetch_add(&state_shared.n_ready, 1) == n_threads - 1) { + atomic_store(&state_shared.has_work, false); + } + + while (atomic_load(&state_shared.has_work)) { + ggml_lock_lock (&state_shared.spin); + ggml_lock_unlock(&state_shared.spin); + } + + // launch thread pool + for (int j = 0; j < n_threads - 1; j++) { + workers[j].params = (struct ggml_compute_params) { + .type = GGML_TASK_FINALIZE, + .ith = j + 1, + .nth = n_threads, + .wsize = cgraph->work ? ggml_nbytes(cgraph->work) : 0, + .wdata = cgraph->work ? cgraph->work->data : NULL, + }; + workers[j].node = node; + } + + atomic_fetch_sub(&state_shared.n_ready, 1); + + while (atomic_load(&state_shared.n_ready) > 0) { + ggml_lock_lock (&state_shared.spin); + ggml_lock_unlock(&state_shared.spin); + } + + atomic_store(&state_shared.has_work, true); + } + + params.type = GGML_TASK_FINALIZE; + ggml_compute_forward(¶ms, node); + + // wait for thread pool + if (node->n_tasks > 1) { + if (atomic_fetch_add(&state_shared.n_ready, 1) == n_threads - 1) { + atomic_store(&state_shared.has_work, false); + } + + while (atomic_load(&state_shared.has_work)) { + ggml_lock_lock (&state_shared.spin); + ggml_lock_unlock(&state_shared.spin); + } + + atomic_fetch_sub(&state_shared.n_ready, 1); + + while (atomic_load(&state_shared.n_ready) != 0) { + ggml_lock_lock (&state_shared.spin); + ggml_lock_unlock(&state_shared.spin); + } + } + + // performance stats (node) + { + int64_t perf_cycles_cur = ggml_perf_cycles() - perf_node_start_cycles; + int64_t perf_time_us_cur = ggml_perf_time_us() - perf_node_start_time_us; + + node->perf_runs++; + node->perf_cycles += perf_cycles_cur; + node->perf_time_us += perf_time_us_cur; + } + } + + // join thread pool + if (n_threads > 1) { + atomic_store(&state_shared.stop, true); + atomic_store(&state_shared.has_work, true); + + for (int j = 0; j < n_threads - 1; j++) { + int rc = pthread_join(workers[j].thrd, NULL); + assert(rc == 0); + UNUSED(rc); + } + + ggml_lock_destroy(&state_shared.spin); + } + + // performance stats (graph) + { + int64_t perf_cycles_cur = ggml_perf_cycles() - perf_start_cycles; + int64_t perf_time_us_cur = ggml_perf_time_us() - perf_start_time_us; + + cgraph->perf_runs++; + cgraph->perf_cycles += perf_cycles_cur; + cgraph->perf_time_us += perf_time_us_cur; + + GGML_PRINT_DEBUG("%s: perf (%d) - cpu = %.3f / %.3f ms, wall = %.3f / %.3f ms\n", + __func__, cgraph->perf_runs, + (double) perf_cycles_cur / (double) ggml_cycles_per_ms(), + (double) cgraph->perf_cycles / (double) ggml_cycles_per_ms() / (double) cgraph->perf_runs, + (double) perf_time_us_cur / 1000.0, + (double) cgraph->perf_time_us / 1000.0 / cgraph->perf_runs); + } +} + +void ggml_graph_reset(struct ggml_cgraph * cgraph) { + for (int i = 0; i < cgraph->n_nodes; i++) { + struct ggml_tensor * grad = cgraph->grads[i]; + + if (grad) { + ggml_set_zero(grad); + } + } +} + +void ggml_graph_print(const struct ggml_cgraph * cgraph) { + int64_t perf_total_per_op_us[GGML_OP_COUNT] = {0}; + + GGML_PRINT("=== GRAPH ===\n"); + + GGML_PRINT_DEBUG("n_threads = %d\n", cgraph->n_threads); + GGML_PRINT_DEBUG("total work size = %zu bytes\n",cgraph->work_size); + + GGML_PRINT("n_nodes = %d\n", cgraph->n_nodes); + for (int i = 0; i < cgraph->n_nodes; i++) { + struct ggml_tensor * node = cgraph->nodes[i]; + + perf_total_per_op_us[node->op] += node->perf_time_us; + + GGML_PRINT(" - %3d: [ %6d, %6d] %16s %s (%3d) cpu = %7.3f / %7.3f ms, wall = %7.3f / %7.3f ms\n", + i, + node->ne[0], node->ne[1], + GGML_OP_LABEL[node->op], node->is_param ? "x" : node->grad ? "g" : " ", node->perf_runs, + (double) node->perf_cycles / (double) ggml_cycles_per_ms(), + (double) node->perf_cycles / (double) ggml_cycles_per_ms() / (double) node->perf_runs, + (double) node->perf_time_us / 1000.0, + (double) node->perf_time_us / 1000.0 / node->perf_runs); + } + + GGML_PRINT("n_leafs = %d\n", cgraph->n_leafs); + for (int i = 0; i < cgraph->n_leafs; i++) { + struct ggml_tensor * node = cgraph->leafs[i]; + + GGML_PRINT(" - %3d: [ %6d, %6d] %8s\n", + i, + node->ne[0], node->ne[1], + GGML_OP_LABEL[node->op]); + } + + for (int i = 0; i < GGML_OP_COUNT; i++) { + GGML_PRINT("perf_total_per_op_us[%16s] = %7.3f ms\n", GGML_OP_LABEL[i], (double) perf_total_per_op_us[i] / 1000.0); + } + + GGML_PRINT("========================================\n"); +} + +// check if node is part of the graph +bool ggml_graph_find(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node) { + if (cgraph == NULL) { + return true; + } + + for (int i = 0; i < cgraph->n_nodes; i++) { + if (cgraph->nodes[i] == node) { + return true; + } + } + + return false; +} + +struct ggml_tensor * ggml_graph_get_parent(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node) { + for (int i = 0; i < cgraph->n_nodes; i++) { + struct ggml_tensor * parent = cgraph->nodes[i]; + + if (parent->grad == node) { + return parent; + } + } + + return NULL; +} + +void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph * gf, const char * filename) { + char color[16]; + + FILE * fp = fopen(filename, "w"); + assert(fp); + + fprintf(fp, "digraph G {\n"); + fprintf(fp, " newrank = true;\n"); + fprintf(fp, " rankdir = LR;\n"); + + for (int i = 0; i < gb->n_nodes; i++) { + struct ggml_tensor * node = gb->nodes[i]; + + if (ggml_graph_get_parent(gb, node) != NULL) { + continue; + } + + if (node->is_param) { + snprintf(color, sizeof(color), "yellow"); + } else if (node->grad) { + if (ggml_graph_find(gf, node)) { + snprintf(color, sizeof(color), "green"); + } else { + snprintf(color, sizeof(color), "lightblue"); + } + } else { + snprintf(color, sizeof(color), "white"); + } + + fprintf(fp, " \"%p\" [ \ +style = filled; fillcolor = %s; shape = record; \ +label=\"%d [%d, %d] | %s", + (void *) node, color, + i, node->ne[0], node->ne[1], + GGML_OP_SYMBOL[node->op]); + + if (node->grad) { + fprintf(fp, " | %s\"; ]\n", GGML_OP_SYMBOL[node->grad->op]); + } else { + fprintf(fp, "\"; ]\n"); + } + } + + for (int i = 0; i < gb->n_leafs; i++) { + struct ggml_tensor * node = gb->leafs[i]; + + snprintf(color, sizeof(color), "pink"); + + if (ggml_nelements(node) == 1) { + fprintf(fp, " \"%p\" [ \ +style = filled; fillcolor = %s; shape = record; \ +label=\"%.1e\"; ]\n", + (void *) node, color, ggml_get_f32_1d(node, 0)); + } else { + fprintf(fp, " \"%p\" [ \ +style = filled; fillcolor = %s; shape = record; \ +label=\"CONST %d [%d, %d]\"; ]\n", + (void *) node, color, + i, node->ne[0], node->ne[1]); + } + } + + for (int i = 0; i < gb->n_nodes; i++) { + struct ggml_tensor * node = gb->nodes[i]; + + struct ggml_tensor * parent = ggml_graph_get_parent(gb, node); + + if (node->src0) { + struct ggml_tensor * parent0 = ggml_graph_get_parent(gb, node->src0); + + fprintf(fp, " \"%p\":%s -> \"%p\":%s [ arrowhead = %s; style = %s; label = \"x\"; ]\n", + parent0 ? (void *) parent0 : (void *) node->src0, + parent0 ? "g" : "x", + parent ? (void *) parent : (void *) node, + parent ? "g" : "x", + parent ? "empty" : "vee", + parent ? "dashed" : "solid"); + } + + if (node->src1) { + struct ggml_tensor * parent1 = ggml_graph_get_parent(gb, node->src1); + + fprintf(fp, " \"%p\":%s -> \"%p\":%s [ arrowhead = %s; style = %s; label = \"y\"; ]\n", + parent1 ? (void *) parent1 : (void *) node->src1, + parent1 ? "g" : "x", + parent ? (void *) parent : (void *) node, + parent ? "g" : "x", + parent ? "empty" : "vee", + parent ? "dashed" : "solid"); + } + } + + for (int i = 0; i < gb->n_leafs; i++) { + struct ggml_tensor * node = gb->leafs[i]; + + if (node->src0) { + fprintf(fp, " \"%p\":%s -> \"%p\":%s [ label = \"x\"; ]\n", + (void *) node->src0, "x", + (void *) node, "x"); + } + + if (node->src1) { + fprintf(fp, " \"%p\":%s -> \"%p\":%s [ label = \"y\"; ]\n", + (void *) node->src1, "x", + (void *) node, "x"); + } + } + + fprintf(fp, "}\n"); + + fclose(fp); + + GGML_PRINT("%s: dot -Tpng %s -o %s.png && open %s.png\n", __func__, filename, filename, filename); +} + +//////////////////////////////////////////////////////////////////////////////// + +void ggml_opt_set_params(int np, struct ggml_tensor * const ps[], const float * x) { + int i = 0; + for (int p = 0; p < np; ++p) { + const int ne = ggml_nelements(ps[p]) ; + // TODO: add function to set tensor from array + for (int j = 0; j < ne; ++j) { + ggml_set_f32_1d(ps[p], j, x[i++]); + } + } +} + +void ggml_opt_get_params(int np, struct ggml_tensor * const ps[], float * x) { + int i = 0; + for (int p = 0; p < np; ++p) { + const int ne = ggml_nelements(ps[p]) ; + // TODO: add function to get all elements at once + for (int j = 0; j < ne; ++j) { + x[i++] = ggml_get_f32_1d(ps[p], j); + } + } +} + +void ggml_opt_get_grad(int np, struct ggml_tensor * const ps[], float * g) { + int i = 0; + for (int p = 0; p < np; ++p) { + const int ne = ggml_nelements(ps[p]) ; + // TODO: add function to get all elements at once + for (int j = 0; j < ne; ++j) { + g[i++] = ggml_get_f32_1d(ps[p]->grad, j); + } + } +} + +// +// ADAM +// +// ref: https://arxiv.org/pdf/1412.6980.pdf +// + +enum ggml_opt_result ggml_opt_adam( + struct ggml_context * ctx, + struct ggml_opt_params params, + struct ggml_tensor * f, + struct ggml_cgraph * gf, + struct ggml_cgraph * gb) { + assert(ggml_is_scalar(f)); + + gf->n_threads = params.n_threads; + gb->n_threads = params.n_threads; + + // these will store the parameters we want to optimize + struct ggml_tensor * ps[GGML_MAX_PARAMS]; + + int np = 0; + int nx = 0; + for (int i = 0; i < gf->n_nodes; ++i) { + if (gf->nodes[i]->is_param) { + GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op); + + assert(np < GGML_MAX_PARAMS); + + ps[np++] = gf->nodes[i]; + nx += ggml_nelements(gf->nodes[i]); + } + } + + // constants + const float alpha = params.adam.alpha; + const float beta1 = params.adam.beta1; + const float beta2 = params.adam.beta2; + const float eps = params.adam.eps; + + float * x = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // view of the parameters + float * g1 = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // gradient + float * g2 = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // gradient squared + float * m = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // first moment + float * v = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // second moment + float * mh = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // first moment hat + float * vh = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // second moment hat + + float * pf = params.past > 0 ? ggml_new_tensor_1d(ctx, GGML_TYPE_F32, params.past)->data : NULL; // past function values + + // initialize + ggml_vec_set_f32(nx, m, 0.0f); + ggml_vec_set_f32(nx, v, 0.0f); + + // update view + ggml_opt_get_params(np, ps, x); + + // compute the function value + ggml_graph_reset (gf); + ggml_set_f32 (f->grad, 1.0f); + ggml_graph_compute(ctx, gb); + + float fx_prev = ggml_get_f32_1d(f, 0); + if (pf) { + pf[0] = fx_prev; + } + + int n_no_improvement = 0; + float fx_best = fx_prev; + + // run the optimizer + for (int t = 0; t < params.adam.n_iter; ++t) { + GGML_PRINT_DEBUG ("=== iter %d ===\n", t); + + GGML_PRINT_DEBUG ("f = %10.6f\n", ggml_get_f32_1d(f, 0)); + GGML_PRINT_DEBUG_5("df/dx0 = %10.6f\n", ggml_get_f32_1d(ps[0]->grad, 0)); + GGML_PRINT_DEBUG_5("df/dx1 = %10.6f\n", ggml_get_f32_1d(ps[1]->grad, 0)); + + for (int i = 0; i < np; ++i) { + GGML_PRINT_DEBUG("param %d: %10.6f, g = %10.6f\n", i, + ggml_get_f32_1d(ps[i], 0), ggml_get_f32_1d(ps[i]->grad, 0)); + } + + const int64_t t_start_wall = ggml_time_us(); + const int64_t t_start_cpu = ggml_cycles(); + UNUSED(t_start_wall); + UNUSED(t_start_cpu); + + { + // update the gradient + ggml_opt_get_grad(np, ps, g1); + + // m_t = beta1*m_t-1 + (1 - beta1)*g_t + ggml_vec_scale_f32(nx, m, beta1); + ggml_vec_mad_f32 (nx, m, g1, 1.0f - beta1); + + // g2 = g1^2 + ggml_vec_sqr_f32 (nx, g2, g1); + + // v_t = beta2*v_t-1 + (1 - beta2)*g_t^2 + ggml_vec_scale_f32(nx, v, beta2); + ggml_vec_mad_f32 (nx, v, g2, 1.0f - beta2); + + // m^hat = m_t / (1 - beta1^t) + // v^hat = v_t / (1 - beta2^t) + // x_t = x_t-1 - alpha*m^hat/(sqrt(v^hat) + eps) + ggml_vec_cpy_f32 (nx, mh, m); + ggml_vec_cpy_f32 (nx, vh, v); + + ggml_vec_scale_f32(nx, mh, alpha/(1.0f - powf(beta1, t + 1))); + ggml_vec_scale_f32(nx, vh, 1.0f/(1.0f - powf(beta2, t + 1))); + + ggml_vec_sqrt_f32 (nx, vh, vh); + ggml_vec_acc1_f32 (nx, vh, eps); + + ggml_vec_div_f32 (nx, mh, mh, vh); + ggml_vec_sub_f32 (nx, x, x, mh); + + // update the parameters + ggml_opt_set_params(np, ps, x); + } + + ggml_graph_reset (gf); + ggml_set_f32 (f->grad, 1.0f); + ggml_graph_compute(ctx, gb); + + const float fx = ggml_get_f32_1d(f, 0); + + // check convergence + if (fabsf(fx - fx_prev)/fx < params.adam.eps_f) { + GGML_PRINT_DEBUG("converged\n"); + + return GGML_OPT_OK; + } + + // delta-based convergence test + if (pf != NULL) { + // need at least params.past iterations to start checking for convergence + if (params.past <= t) { + const float rate = (pf[t%params.past] - fx)/fx; + + if (fabs(rate) < params.delta) { + return GGML_OPT_OK; + } + } + + pf[t%params.past] = fx; + } + + // check for improvement + if (params.max_no_improvement > 0) { + if (fx_best > fx) { + fx_best = fx; + n_no_improvement = 0; + } else { + ++n_no_improvement; + + if (n_no_improvement >= params.max_no_improvement) { + return GGML_OPT_OK; + } + } + } + + fx_prev = fx; + + { + const int64_t t_end_cpu = ggml_cycles(); + GGML_PRINT_DEBUG("time iter: %5.3f s\n", (t_end_cpu - t_start_cpu)/CLOCKS_PER_SEC); + UNUSED(t_end_cpu); + + const int64_t t_end_wall = ggml_time_us(); + GGML_PRINT_DEBUG("wall time iter: %5.3f s\n", (t_end_wall - t_start_wall)/1e6); + UNUSED(t_end_wall); + } + } + + return GGML_OPT_DID_NOT_CONVERGE; +} + +// +// L-BFGS +// +// the L-BFGS implementation below is based on the following implementation: +// +// https://github.com/chokkan/liblbfgs +// + +struct ggml_lbfgs_iteration_data { + float alpha; + float ys; + float * s; + float * y; +}; + +static enum ggml_opt_result linesearch_backtracking( + struct ggml_context * ctx, + const struct ggml_opt_params * params, + int nx, + float * x, + float * fx, + float * g, + float * d, + float * step, + const float * xp, + struct ggml_tensor * f, + struct ggml_cgraph * gf, + struct ggml_cgraph * gb, + const int np, + struct ggml_tensor * ps[]) { + int count = 0; + + float width = 0.0f; + float dg = 0.0f; + float finit = 0.0f; + float dginit = 0.0f; + float dgtest = 0.0f; + + const float dec = 0.5f; + const float inc = 2.1f; + + if (*step <= 0.) { + return GGML_LINESEARCH_INVALID_PARAMETERS; + } + + // compute the initial gradient in the search direction + ggml_vec_dot_f32(nx, &dginit, g, d); + + // make sure that d points to a descent direction + if (0 < dginit) { + return GGML_LINESEARCH_FAIL; + } + + // initialize local variables + finit = *fx; + dgtest = params->lbfgs.ftol*dginit; + + while (true) { + ggml_vec_cpy_f32(nx, x, xp); + ggml_vec_mad_f32(nx, x, d, *step); + + // evaluate the function and gradient values + { + ggml_opt_set_params(np, ps, x); + + ggml_graph_reset (gf); + ggml_set_f32 (f->grad, 1.0f); + ggml_graph_compute(ctx, gb); + + ggml_opt_get_grad(np, ps, g); + + *fx = ggml_get_f32_1d(f, 0); + } + + ++count; + + if (*fx > finit + (*step)*dgtest) { + width = dec; + } else { + // Armijo condition is satisfied + if (params->lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_ARMIJO) { + return count; + } + + ggml_vec_dot_f32(nx, &dg, g, d); + + // check the Wolfe condition + if (dg < params->lbfgs.wolfe * dginit) { + width = inc; + } else { + if(params->lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_WOLFE) { + // regular Wolfe conditions + return count; + } + + if(dg > -params->lbfgs.wolfe*dginit) { + width = dec; + } else { + // strong Wolfe condition (GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE) + return count; + } + return count; + } + } + + if (*step < params->lbfgs.min_step) { + return GGML_LINESEARCH_MINIMUM_STEP; + } + if (*step > params->lbfgs.max_step) { + return GGML_LINESEARCH_MAXIMUM_STEP; + } + if (params->lbfgs.max_linesearch <= count) { + return GGML_LINESEARCH_MAXIMUM_ITERATIONS; + } + + (*step) *= width; + } + + return GGML_LINESEARCH_FAIL; +} + +enum ggml_opt_result ggml_opt_lbfgs( + struct ggml_context * ctx, + struct ggml_opt_params params, + struct ggml_tensor * f, + struct ggml_cgraph * gf, + struct ggml_cgraph * gb) { + if (params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_WOLFE || + params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE) { + if (params.lbfgs.wolfe <= params.lbfgs.ftol || 1. <= params.lbfgs.wolfe) { + return GGML_OPT_INVALID_WOLFE; + } + } + + gf->n_threads = params.n_threads; + gb->n_threads = params.n_threads; + + const int m = params.lbfgs.m; + + // these will store the parameters we want to optimize + struct ggml_tensor * ps[GGML_MAX_PARAMS]; + + int np = 0; + int nx = 0; + for (int i = 0; i < gf->n_nodes; ++i) { + if (gf->nodes[i]->is_param) { + GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op); + + assert(np < GGML_MAX_PARAMS); + + ps[np++] = gf->nodes[i]; + nx += ggml_nelements(gf->nodes[i]); + } + } + + float * x = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // current parameters + float * xp = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // previous parameters + float * g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // current gradient + float * gp = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // previous gradient + float * d = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // search direction + + float * pf = params.past > 0 ? ggml_new_tensor_1d(ctx, GGML_TYPE_F32, params.past)->data : NULL; // past function values + + float fx = 0.0f; // cost function value + float xnorm = 0.0f; // ||x|| + float gnorm = 0.0f; // ||g|| + float step = 0.0f; + + // initialize x from the graph nodes + ggml_opt_get_params(np, ps, x); + + // the L-BFGS memory + struct ggml_lbfgs_iteration_data * lm = alloca(sizeof(struct ggml_lbfgs_iteration_data)*m); + + for (int i = 0; i < m; ++i) { + lm[i].alpha = 0.0f; + lm[i].ys = 0.0f; + lm[i].s = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; + lm[i].y = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; + } + + // evaluate the function value and its gradient + { + ggml_opt_set_params(np, ps, x); + + ggml_graph_reset (gf); + ggml_set_f32 (f->grad, 1.0f); + ggml_graph_compute(ctx, gb); + + ggml_opt_get_grad(np, ps, g); + + fx = ggml_get_f32_1d(f, 0); + } + + if (pf) { + pf[0] = fx; + } + + float fx_best = fx; + + // search direction = -gradient + ggml_vec_neg_f32(nx, d, g); + + // ||x||, ||g|| + ggml_vec_norm_f32(nx, &xnorm, x); + ggml_vec_norm_f32(nx, &gnorm, g); + + if (xnorm < 1.0f) { + xnorm = 1.0f; + } + + // already optimized + if (gnorm/xnorm <= params.lbfgs.eps) { + return GGML_OPT_OK; + } + + // initial step + ggml_vec_norm_inv_f32(nx, &step, d); + + int j = 0; + int k = 1; + int ls = 0; + int end = 0; + int bound = 0; + int n_no_improvement = 0; + + float ys = 0.0f; + float yy = 0.0f; + float beta = 0.0f; + + while (true) { + // store the current position and gradient vectors + ggml_vec_cpy_f32(nx, xp, x); + ggml_vec_cpy_f32(nx, gp, g); + + ls = linesearch_backtracking(ctx, ¶ms, nx, x, &fx, g, d, &step, xp, f, gf, gb, np, ps); + + if (ls < 0) { + // linesearch failed - go back to the previous point and return + ggml_vec_cpy_f32(nx, x, xp); + ggml_vec_cpy_f32(nx, g, gp); + + return ls; + } + + ggml_vec_norm_f32(nx, &xnorm, x); + ggml_vec_norm_f32(nx, &gnorm, g); + + GGML_PRINT_DEBUG("f = %10.6f\n", ggml_get_f32_1d(f, 0)); + + if (xnorm < 1.0) { + xnorm = 1.0; + } + if (gnorm/xnorm <= params.lbfgs.eps) { + // converged + return GGML_OPT_OK; + } + + // delta-based convergence test + if (pf != NULL) { + // need at least params.past iterations to start checking for convergence + if (params.past <= k) { + const float rate = (pf[k%params.past] - fx)/fx; + + if (fabs(rate) < params.delta) { + return GGML_OPT_OK; + } + } + + pf[k%params.past] = fx; + } + + // check for improvement + if (params.max_no_improvement > 0) { + if (fx < fx_best) { + fx_best = fx; + n_no_improvement = 0; + } else { + n_no_improvement++; + + if (n_no_improvement >= params.max_no_improvement) { + return GGML_OPT_OK; + } + } + } + + if (params.lbfgs.n_iter != 0 && params.lbfgs.n_iter < k + 1) { + // reached the maximum number of iterations + return GGML_OPT_DID_NOT_CONVERGE; + } + + // update vectors s and y: + // s_{k+1} = x_{k+1} - x_{k} = \step * d_{k}. + // y_{k+1} = g_{k+1} - g_{k}. + // + ggml_vec_sub_f32(nx, lm[end].s, x, xp); + ggml_vec_sub_f32(nx, lm[end].y, g, gp); + + // compute scalars ys and yy: + // ys = y^t \cdot s -> 1 / \rho. + // yy = y^t \cdot y. + // + ggml_vec_dot_f32(nx, &ys, lm[end].y, lm[end].s); + ggml_vec_dot_f32(nx, &yy, lm[end].y, lm[end].y); + + lm[end].ys = ys; + + // find new search direction + // ref: https://en.wikipedia.org/wiki/Limited-memory_BFGS + + bound = (m <= k) ? m : k; + k++; + end = (end + 1)%m; + + // initialize search direction with -g + ggml_vec_neg_f32(nx, d, g); + + j = end; + for (int i = 0; i < bound; ++i) { + j = (j + m - 1) % m; + // \alpha_{j} = \rho_{j} s^{t}_{j} \cdot q_{k+1} + ggml_vec_dot_f32(nx, &lm[j].alpha, lm[j].s, d); + lm[j].alpha /= lm[j].ys; + // q_{i} = q_{i+1} - \alpha_{i} y_{i} + ggml_vec_mad_f32(nx, d, lm[j].y, -lm[j].alpha); + } + + ggml_vec_scale_f32(nx, d, ys/yy); + + for (int i = 0; i < bound; ++i) { + // \beta_{j} = \rho_{j} y^t_{j} \cdot \gamma_{i} + ggml_vec_dot_f32(nx, &beta, lm[j].y, d); + beta /= lm[j].ys; + // \gamma_{i+1} = \gamma_{i} + (\alpha_{j} - \beta_{j}) s_{j} + ggml_vec_mad_f32(nx, d, lm[j].s, lm[j].alpha - beta); + j = (j + 1)%m; + } + + step = 1.0; + } + + return GGML_OPT_DID_NOT_CONVERGE; +} + +struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type) { + struct ggml_opt_params result; + + switch (type) { + case GGML_OPT_ADAM: + { + result = (struct ggml_opt_params) { + .type = GGML_OPT_ADAM, + .n_threads = 1, + .past = 0, + .delta = 1e-5f, + + .max_no_improvement = 100, + + .print_forward_graph = true, + .print_backward_graph = true, + + .adam = { + .n_iter = 10000, + .alpha = 0.001f, + .beta1 = 0.9f, + .beta2 = 0.999f, + .eps = 1e-8f, + .eps_f = 1e-5f, + .eps_g = 1e-3f, + }, + }; + } break; + case GGML_OPT_LBFGS: + { + result = (struct ggml_opt_params) { + .type = GGML_OPT_LBFGS, + .n_threads = 1, + .past = 0, + .delta = 1e-5f, + + .max_no_improvement = 0, + + .print_forward_graph = true, + .print_backward_graph = true, + + .lbfgs = { + .m = 6, + .n_iter = 100, + .max_linesearch = 20, + + .eps = 1e-5f, + .ftol = 1e-4f, + .wolfe = 0.9f, + .min_step = 1e-20f, + .max_step = 1e+20f, + + .linesearch = GGML_LINESEARCH_DEFAULT, + }, + }; + } break; + } + + return result; +} + +enum ggml_opt_result ggml_opt( + struct ggml_context * ctx, + struct ggml_opt_params params, + struct ggml_tensor * f) { + bool free_ctx = false; + if (ctx == NULL) { + struct ggml_init_params params_ctx = { + .mem_size = 16*1024*1024, + .mem_buffer = NULL, + }; + + ctx = ggml_init(params_ctx); + if (ctx == NULL) { + return GGML_OPT_NO_CONTEXT; + } + + free_ctx = true; + } + + enum ggml_opt_result result = GGML_OPT_OK; + + // build forward + backward compute graphs + struct ggml_cgraph gf = ggml_build_forward (f); + struct ggml_cgraph gb = ggml_build_backward(ctx, &gf, false); + + switch (params.type) { + case GGML_OPT_ADAM: + { + result = ggml_opt_adam(ctx, params, f, &gf, &gb); + } break; + case GGML_OPT_LBFGS: + { + result = ggml_opt_lbfgs(ctx, params, f, &gf, &gb); + } break; + } + + if (params.print_forward_graph) { + ggml_graph_print (&gf); + ggml_graph_dump_dot(&gf, NULL, "opt-forward.dot"); + } + + if (params.print_backward_graph) { + ggml_graph_print (&gb); + ggml_graph_dump_dot(&gb, &gf, "opt-backward.dot"); + } + + if (free_ctx) { + ggml_free(ctx); + } + + return result; +} + +//////////////////////////////////////////////////////////////////////////////// diff --git a/ggml.h b/ggml.h new file mode 100644 index 0000000..1078fbe --- /dev/null +++ b/ggml.h @@ -0,0 +1,527 @@ +#pragma once + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include + +#define GGML_MAX_DIMS 4 +#define GGML_MAX_NODES 4096 +#define GGML_MAX_PARAMS 16 +#define GGML_MAX_CONTEXTS 16 + +#ifdef __ARM_NEON +// we use the built-in 16-bit float type +typedef __fp16 ggml_fp16_t; +#else +typedef uint16_t ggml_fp16_t; +#endif + +float ggml_fp16_to_fp32(ggml_fp16_t x); +ggml_fp16_t ggml_fp32_to_fp16(float x); + +struct ggml_object; +struct ggml_context; + +enum ggml_type { + GGML_TYPE_I8, + GGML_TYPE_I16, + GGML_TYPE_I32, + GGML_TYPE_F16, + GGML_TYPE_F32, + GGML_TYPE_COUNT, +}; + +enum ggml_op { + GGML_OP_NONE = 0, + + GGML_OP_DUP, + GGML_OP_ADD, + GGML_OP_SUB, + GGML_OP_MUL, + GGML_OP_DIV, + GGML_OP_SQR, + GGML_OP_SQRT, + GGML_OP_SUM, + GGML_OP_MEAN, + GGML_OP_REPEAT, + GGML_OP_ABS, + GGML_OP_SGN, + GGML_OP_NEG, + GGML_OP_STEP, + GGML_OP_RELU, + GGML_OP_GELU, + GGML_OP_NORM, // normalize + + GGML_OP_MUL_MAT, + + GGML_OP_SCALE, + GGML_OP_CPY, + GGML_OP_RESHAPE, + GGML_OP_VIEW, + GGML_OP_PERMUTE, + GGML_OP_TRANSPOSE, + GGML_OP_GET_ROWS, + GGML_OP_DIAG_MASK_INF, + GGML_OP_SOFT_MAX, + GGML_OP_ROPE, + GGML_OP_CONV_1D_1S, + GGML_OP_CONV_1D_2S, + + GGML_OP_COUNT, +}; + +// n-dimensional tensor +struct ggml_tensor { + enum ggml_type type; + + int n_dims; + int ne[GGML_MAX_DIMS]; // number of elements + size_t nb[GGML_MAX_DIMS]; // stride in bytes: + // nb[0] = sizeof(type) + // nb[1] = nb[0] * ne[0] + padding + // nb[i] = nb[i-1] * ne[i-1] + + // compute data + enum ggml_op op; + + bool is_param; + + struct ggml_tensor * grad; + struct ggml_tensor * src0; + struct ggml_tensor * src1; + + // thread scheduling + int n_tasks; + + // performance + int perf_runs; + int64_t perf_cycles; + int64_t perf_time_us; + + void * data; + char pad[8]; +}; + +// computation graph +struct ggml_cgraph { + int n_nodes; + int n_leafs; + int n_threads; + + size_t work_size; + struct ggml_tensor * work; + + struct ggml_tensor * nodes[GGML_MAX_NODES]; + struct ggml_tensor * grads[GGML_MAX_NODES]; + struct ggml_tensor * leafs[GGML_MAX_NODES]; + + // performance + int perf_runs; + int64_t perf_cycles; + int64_t perf_time_us; +}; + +struct ggml_init_params { + // memory pool + size_t mem_size; // bytes + void * mem_buffer; // if NULL, memory will be allocated internally +}; + +int64_t ggml_time_ms(void); +int64_t ggml_time_us(void); +int64_t ggml_cycles(void); +int64_t ggml_cycles_per_ms(void); + +void ggml_print_object (const struct ggml_object * obj); +void ggml_print_objects(const struct ggml_context * ctx); + +int ggml_nelements(const struct ggml_tensor * tensor); +size_t ggml_nbytes (const struct ggml_tensor * tensor); + +size_t ggml_type_size (enum ggml_type type); +size_t ggml_element_size(const struct ggml_tensor * tensor); + +struct ggml_context * ggml_init(struct ggml_init_params params); +void ggml_free(struct ggml_context * ctx); + +size_t ggml_used_mem(const struct ggml_context * ctx); + +struct ggml_tensor * ggml_new_tensor( + struct ggml_context * ctx, + enum ggml_type type, + int n_dims, + const int *ne); + +struct ggml_tensor * ggml_new_tensor_1d( + struct ggml_context * ctx, + enum ggml_type type, + int ne0); + +struct ggml_tensor * ggml_new_tensor_2d( + struct ggml_context * ctx, + enum ggml_type type, + int ne0, + int ne1); + +struct ggml_tensor * ggml_new_tensor_3d( + struct ggml_context * ctx, + enum ggml_type type, + int ne0, + int ne1, + int ne2); + +struct ggml_tensor * ggml_new_tensor_4d( + struct ggml_context * ctx, + enum ggml_type type, + int ne0, + int ne1, + int ne2, + int ne3); + +struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value); + +struct ggml_tensor * ggml_dup_tensor (struct ggml_context * ctx, const struct ggml_tensor * src); +struct ggml_tensor * ggml_view_tensor(struct ggml_context * ctx, const struct ggml_tensor * src); + +struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor); +struct ggml_tensor * ggml_set_f32 (struct ggml_tensor * tensor, float value); + +float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i); +void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value); + + void * ggml_get_data (const struct ggml_tensor * tensor); +float * ggml_get_data_f32(const struct ggml_tensor * tensor); + +// +// operations on tensors with backpropagation +// + +struct ggml_tensor * ggml_dup( + struct ggml_context * ctx, + struct ggml_tensor * a); + +struct ggml_tensor * ggml_add( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b); + +struct ggml_tensor * ggml_sub( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b); + +struct ggml_tensor * ggml_mul( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b); + +struct ggml_tensor * ggml_div( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b); + +struct ggml_tensor * ggml_sqr( + struct ggml_context * ctx, + struct ggml_tensor * a); + +struct ggml_tensor * ggml_sqrt( + struct ggml_context * ctx, + struct ggml_tensor * a); + +// return scalar +// TODO: compute sum along rows +struct ggml_tensor * ggml_sum( + struct ggml_context * ctx, + struct ggml_tensor * a); + +// mean along rows +struct ggml_tensor * ggml_mean( + struct ggml_context * ctx, + struct ggml_tensor * a); + +// if a is the same shape as b, and a is not parameter, return a +// otherwise, return a new tensor: repeat(a) to fit in b +struct ggml_tensor * ggml_repeat( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b); + +struct ggml_tensor * ggml_abs( + struct ggml_context * ctx, + struct ggml_tensor * a); + +struct ggml_tensor * ggml_sgn( + struct ggml_context * ctx, + struct ggml_tensor * a); + +struct ggml_tensor * ggml_neg( + struct ggml_context * ctx, + struct ggml_tensor * a); + +struct ggml_tensor * ggml_step( + struct ggml_context * ctx, + struct ggml_tensor * a); + +struct ggml_tensor * ggml_relu( + struct ggml_context * ctx, + struct ggml_tensor * a); + +// TODO: double-check this computation is correct +struct ggml_tensor * ggml_gelu( + struct ggml_context * ctx, + struct ggml_tensor * a); + +// normalize along rows +// TODO: eps is hardcoded to 1e-5 for now +struct ggml_tensor * ggml_norm( + struct ggml_context * ctx, + struct ggml_tensor * a); + +// A: m rows, n columns +// B: p rows, n columns (i.e. we transpose it internally) +// result is m columns, p rows +struct ggml_tensor * ggml_mul_mat( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b); + +// +// operations on tensors without backpropagation +// + +// in-place, returns view(a) +struct ggml_tensor * ggml_scale( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b); + +// a -> b, return view(b) +struct ggml_tensor * ggml_cpy( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b); + +// return view(a), b specifies the new shape +// TODO: when we start computing gradient, make a copy instead of view +struct ggml_tensor * ggml_reshape( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b); + +// return view(a) +// TODO: when we start computing gradient, make a copy instead of view +struct ggml_tensor * ggml_reshape_2d( + struct ggml_context * ctx, + struct ggml_tensor * a, + int ne0, + int ne1); + +// return view(a) +// TODO: when we start computing gradient, make a copy instead of view +struct ggml_tensor * ggml_reshape_3d( + struct ggml_context * ctx, + struct ggml_tensor * a, + int ne0, + int ne1, + int ne2); + +// offset in bytes +struct ggml_tensor * ggml_view_1d( + struct ggml_context * ctx, + struct ggml_tensor * a, + int ne0, + size_t offset); + +struct ggml_tensor * ggml_view_2d( + struct ggml_context * ctx, + struct ggml_tensor * a, + int ne0, + int ne1, + size_t nb1, // row stride in bytes + size_t offset); + +struct ggml_tensor * ggml_permute( + struct ggml_context * ctx, + struct ggml_tensor * a, + int axis0, + int axis1, + int axis2, + int axis3); + +// alias for ggml_permute(ctx, a, 1, 0, 2, 3) +struct ggml_tensor * ggml_transpose( + struct ggml_context * ctx, + struct ggml_tensor * a); + +struct ggml_tensor * ggml_get_rows( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b); + +// set elements above the diagonal to -INF +// in-place, returns view(a) +struct ggml_tensor * ggml_diag_mask_inf( + struct ggml_context * ctx, + struct ggml_tensor * a, + int n_past); + +// in-place, returns view(a) +struct ggml_tensor * ggml_soft_max( + struct ggml_context * ctx, + struct ggml_tensor * a); + +// rotary position embedding +// in-place, returns view(a) +// if mode == 1, skip n_past elements +// TODO: avoid creating a new tensor every time +struct ggml_tensor * ggml_rope( + struct ggml_context * ctx, + struct ggml_tensor * a, + int n_past, + int n_dims, + int mode); + +// padding = 1 +// TODO: we don't support extra parameters for now +// that's why we are hard-coding the stride, padding, and dilation +// not great .. +struct ggml_tensor * ggml_conv_1d_1s( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b); + +struct ggml_tensor * ggml_conv_1d_2s( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b); + +// +// automatic differentiation +// + +void ggml_set_param( + struct ggml_context * ctx, + struct ggml_tensor * tensor); + +void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor); + +struct ggml_cgraph ggml_build_forward (struct ggml_tensor * tensor); +struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cgraph * gf, bool keep); + +void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph); +void ggml_graph_reset (struct ggml_cgraph * cgraph); + +// print info and performance information for the graph +void ggml_graph_print(const struct ggml_cgraph * cgraph); + +// dump the graph into a file using the dot format +void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph * gf, const char * filename); + +// +// optimization +// + +// optimization methods +enum ggml_opt_type { + GGML_OPT_ADAM, + GGML_OPT_LBFGS, +}; + +// linesearch methods +enum ggml_linesearch { + GGML_LINESEARCH_DEFAULT = 1, + + GGML_LINESEARCH_BACKTRACKING_ARMIJO = 0, + GGML_LINESEARCH_BACKTRACKING_WOLFE = 1, + GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE = 2, +}; + +// optimization return values +enum ggml_opt_result { + GGML_OPT_OK = 0, + GGML_OPT_DID_NOT_CONVERGE, + GGML_OPT_NO_CONTEXT, + GGML_OPT_INVALID_WOLFE, + GGML_OPT_FAIL, + + GGML_LINESEARCH_FAIL = -128, + GGML_LINESEARCH_MINIMUM_STEP, + GGML_LINESEARCH_MAXIMUM_STEP, + GGML_LINESEARCH_MAXIMUM_ITERATIONS, + GGML_LINESEARCH_INVALID_PARAMETERS, +}; + +// optimization parameters +// +// see ggml.c (ggml_opt_default_params) for default values +// +struct ggml_opt_params { + enum ggml_opt_type type; + + int n_threads; + + // delta-based convergence test + // + // if past == 0 - disabled + // if past > 0: + // stop if |f(x) - f(x_past)| < delta * max(1, |f(x)|) + // + int past; + float delta; + + // maximum number of iterations without improvement + // + // if 0 - disabled + // if > 0: + // assume convergence if no cost improvement in this number of iterations + // + int max_no_improvement; + + bool print_forward_graph; + bool print_backward_graph; + + union { + // ADAM parameters + struct { + int n_iter; + + float alpha; // learning rate + float beta1; + float beta2; + float eps; // epsilon for numerical stability + float eps_f; // epsilon for convergence test + float eps_g; // epsilon for convergence test + } adam; + + // LBFGS parameters + struct { + int m; // number of corrections to approximate the inv. Hessian + int n_iter; + int max_linesearch; + + float eps; // convergence tolerance + float ftol; // line search tolerance + float wolfe; + float min_step; + float max_step; + + enum ggml_linesearch linesearch; + } lbfgs; + }; +}; + +struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type); + +// optimize the function defined by the tensor f +enum ggml_opt_result ggml_opt( + struct ggml_context * ctx, + struct ggml_opt_params params, + struct ggml_tensor * f); + +#ifdef __cplusplus +} +#endif diff --git a/main.cpp b/main.cpp new file mode 100644 index 0000000..acbaa91 --- /dev/null +++ b/main.cpp @@ -0,0 +1,2116 @@ +#include "ggml.h" + +// third-party utilities +// use your favorite implementations +#define DR_WAV_IMPLEMENTATION +#include "dr_wav.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum e_model { + MODEL_UNKNOWN, + MODEL_TINY, + MODEL_BASE, + MODEL_SMALL, + MODEL_MEDIUM, + MODEL_LARGE, +}; + +const size_t MB = 1024*1024; + +const std::map MEM_REQ_MODEL = { + { MODEL_TINY, 100ull*MB }, + { MODEL_BASE, 190ull*MB }, + { MODEL_SMALL, 610ull*MB }, + { MODEL_MEDIUM, 1900ull*MB }, + { MODEL_LARGE, 3600ull*MB }, +}; + +const std::map MEM_REQ_ENCODE = { + { MODEL_TINY, 80ull*MB }, + { MODEL_BASE, 128ull*MB }, + { MODEL_SMALL, 300ull*MB }, + { MODEL_MEDIUM, 680ull*MB }, + { MODEL_LARGE, 1100ull*MB }, +}; + +const std::map MEM_REQ_ENCODE_LAYER = { + { MODEL_TINY, 170ull*MB }, + { MODEL_BASE, 230ull*MB }, + { MODEL_SMALL, 350ull*MB }, + { MODEL_MEDIUM, 450ull*MB }, + { MODEL_LARGE, 570ull*MB }, +}; + +const std::map MEM_REQ_DECODE = { + { MODEL_TINY, 190ull*MB }, + { MODEL_BASE, 190ull*MB }, + { MODEL_SMALL, 190ull*MB }, + { MODEL_MEDIUM, 200ull*MB }, + { MODEL_LARGE, 200ull*MB }, +}; + +const std::map MEM_REQ_DECODE_LAYER = { + { MODEL_TINY, 32ull*MB }, + { MODEL_BASE, 44ull*MB }, + { MODEL_SMALL, 64ull*MB }, + { MODEL_MEDIUM, 84ull*MB }, + { MODEL_LARGE, 110ull*MB }, +}; + +const int SAMPLE_RATE = 16000; +const int N_FFT = 400; +const int N_MEL = 80; +const int HOP_LENGTH = 160; +const int CHUNK_SIZE = 30; // seconds + +struct whisper_mel { + int n_len; + int n_mel; + + std::vector data; +}; + +struct whisper_filters { + int32_t n_mel; + int32_t n_fft; + + std::vector data; +}; + +struct whisper_vocab { + using id = int32_t; + using token = std::string; + + int n_vocab = 51864; + + std::map token_to_id; + std::map id_to_token; + + id token_eot = 50256; + id token_sot = 50257; + id token_prev = 50360; + id token_solm = 50361; // ?? + id token_beg = 50363; + + bool is_multilingual() const { + return n_vocab == 51865; + } +}; + +// command-line parameters +struct whisper_params { + int32_t seed = -1; // RNG seed + int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency()); + + int32_t max_tokens_per_iter = 64; + + bool verbose = false; + bool print_special_tokens = false; + + std::string model = "models/whisper-tiny.en/ggml-model.bin"; // model path + + std::string fname_inp = "default.wav"; +}; + +void whisper_print_usage(int argc, char ** argv, const whisper_params & params); + +bool whisper_params_parse(int argc, char ** argv, whisper_params & params) { + for (int i = 1; i < argc; i++) { + std::string arg = argv[i]; + + if (arg == "-s" || arg == "--seed") { + params.seed = std::stoi(argv[++i]); + } else if (arg == "-t" || arg == "--threads") { + params.n_threads = std::stoi(argv[++i]); + } else if (arg == "-T" || arg == "--tokens") { + params.max_tokens_per_iter = std::stoi(argv[++i]); + } else if (arg == "-v" || arg == "--verbose") { + params.verbose = true; + } else if (arg == "-ps" || arg == "--print_special") { + params.print_special_tokens = true; + } else if (arg == "-m" || arg == "--model") { + params.model = argv[++i]; + } else if (arg == "-f" || arg == "--file") { + params.fname_inp = argv[++i]; + } else if (arg == "-h" || arg == "--help") { + whisper_print_usage(argc, argv, params); + exit(0); + } else { + fprintf(stderr, "error: unknown argument: %s\n", arg.c_str()); + whisper_print_usage(argc, argv, params); + exit(0); + } + } + + return true; +} + +void whisper_print_usage(int argc, char ** argv, const whisper_params & params) { + fprintf(stderr, "usage: %s [options]\n", argv[0]); + fprintf(stderr, "\n"); + fprintf(stderr, "options:\n"); + fprintf(stderr, " -h, --help show this help message and exit\n"); + fprintf(stderr, " -s SEED, --seed SEED RNG seed (default: -1)\n"); + fprintf(stderr, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads); + fprintf(stderr, " -T N, --tokens N maximum number of tokens to generate per iteration (default: %d)\n", params.max_tokens_per_iter); + fprintf(stderr, " -v, --verbose verbose output\n"); + fprintf(stderr, " -ps, --print_special print special tokens\n"); + fprintf(stderr, " -m FNAME, --model FNAME\n"); + fprintf(stderr, " model path (default: %s)\n", params.model.c_str()); + fprintf(stderr, " -f FNAME, --file FNAME\n"); + fprintf(stderr, " input WAV file path (default: %s)\n", params.fname_inp.c_str()); + fprintf(stderr, "\n"); +} + + +// medium +// hparams: { +// 'n_mels': 80, +// 'n_vocab': 51864, +// 'n_audio_ctx': 1500, +// 'n_audio_state': 1024, +// 'n_audio_head': 16, +// 'n_audio_layer': 24, +// 'n_text_ctx': 448, +// 'n_text_state': 1024, +// 'n_text_head': 16, +// 'n_text_layer': 24 +// } +// +// default hparams (Whisper tiny) +struct whisper_hparams { + int32_t n_vocab = 51864; + int32_t n_audio_ctx = 1500; + int32_t n_audio_state = 384; + int32_t n_audio_head = 6; + int32_t n_audio_layer = 4; + int32_t n_text_ctx = 448; + int32_t n_text_state = 384; + int32_t n_text_head = 6; + int32_t n_text_layer = 4; + int32_t n_mels = 80; + int32_t f16 = 1; +}; + +// audio encoding layer +struct whisper_layer_encoder { + // encoder.blocks.*.attn_ln + struct ggml_tensor * attn_ln_0_w; + struct ggml_tensor * attn_ln_0_b; + + // encoder.blocks.*.attn.out + struct ggml_tensor * attn_ln_1_w; + struct ggml_tensor * attn_ln_1_b; + + // encoder.blocks.*.attn.query + struct ggml_tensor * attn_q_w; + struct ggml_tensor * attn_q_b; + + // encoder.blocks.*.attn.key + struct ggml_tensor * attn_k_w; + + // encoder.blocks.*.attn.value + struct ggml_tensor * attn_v_w; + struct ggml_tensor * attn_v_b; + + // encoder.blocks.*.mlp_ln + struct ggml_tensor * mlp_ln_w; + struct ggml_tensor * mlp_ln_b; + + // encoder.blocks.*.mlp.0 + struct ggml_tensor * mlp_0_w; + struct ggml_tensor * mlp_0_b; + + // encoder.blocks.*.mlp.2 + struct ggml_tensor * mlp_1_w; + struct ggml_tensor * mlp_1_b; +}; + +// token decoding layer +struct whisper_layer_decoder { + // decoder.blocks.*.attn_ln + struct ggml_tensor * attn_ln_0_w; + struct ggml_tensor * attn_ln_0_b; + + // decoder.blocks.*.attn.out + struct ggml_tensor * attn_ln_1_w; + struct ggml_tensor * attn_ln_1_b; + + // decoder.blocks.*.attn.query + struct ggml_tensor * attn_q_w; + struct ggml_tensor * attn_q_b; + + // decoder.blocks.*.attn.key + struct ggml_tensor * attn_k_w; + + // decoder.blocks.*.attn.value + struct ggml_tensor * attn_v_w; + struct ggml_tensor * attn_v_b; + + // decoder.blocks.*.cross_attn_ln + struct ggml_tensor * cross_attn_ln_0_w; + struct ggml_tensor * cross_attn_ln_0_b; + + // decoder.blocks.*.cross_attn.out + struct ggml_tensor * cross_attn_ln_1_w; + struct ggml_tensor * cross_attn_ln_1_b; + + // decoder.blocks.*.cross_attn.query + struct ggml_tensor * cross_attn_q_w; + struct ggml_tensor * cross_attn_q_b; + + // decoder.blocks.*.cross_attn.key + struct ggml_tensor * cross_attn_k_w; + + // decoder.blocks.*.cross_attn.value + struct ggml_tensor * cross_attn_v_w; + struct ggml_tensor * cross_attn_v_b; + + // decoder.blocks.*.mlp_ln + struct ggml_tensor * mlp_ln_w; + struct ggml_tensor * mlp_ln_b; + + // decoder.blocks.*.mlp.0 + struct ggml_tensor * mlp_0_w; + struct ggml_tensor * mlp_0_b; + + // decoder.blocks.*.mlp.2 + struct ggml_tensor * mlp_1_w; + struct ggml_tensor * mlp_1_b; +}; + +struct whisper_model { + e_model type = MODEL_UNKNOWN; + + whisper_hparams hparams; + whisper_filters filters; + + // encoder.positional_embedding + struct ggml_tensor * e_pe; + + // encoder.conv1 + struct ggml_tensor * e_conv_1_w; + struct ggml_tensor * e_conv_1_b; + + // encoder.conv2 + struct ggml_tensor * e_conv_2_w; + struct ggml_tensor * e_conv_2_b; + + // encoder.ln_post + struct ggml_tensor * e_ln_w; + struct ggml_tensor * e_ln_b; + + // decoder.positional_embedding + struct ggml_tensor * d_pe; // DD + + // decoder.token_embedding + struct ggml_tensor * d_te; // DD + + // decoder.ln + struct ggml_tensor * d_ln_w; // DD + struct ggml_tensor * d_ln_b; // DD + + std::vector layers_encoder; + std::vector layers_decoder; + + // key + value memory + struct ggml_tensor * memory_k; + struct ggml_tensor * memory_v; + + struct ggml_tensor * memory_cross_k; + struct ggml_tensor * memory_cross_v; + + // + struct ggml_context * ctx; + std::map tensors; +}; + +// load the model from a ggml file +// +// file format: +// +// - hparams +// - pre-computed mel filters +// - vocab +// - weights +// +// see the convert-pt-to-ggml.py script for details +// +bool whisper_model_load(const std::string & fname, whisper_model & model, whisper_vocab & vocab) { + printf("%s: loading model from '%s'\n", __func__, fname.c_str()); + + auto fin = std::ifstream(fname, std::ios::binary); + if (!fin) { + fprintf(stderr, "%s: failed to open '%s'\n", __func__, fname.c_str()); + return false; + } + + // verify magic + { + uint32_t magic; + fin.read((char *) &magic, sizeof(magic)); + if (magic != 0x67676d6c) { + fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname.c_str()); + return false; + } + } + + //load hparams + { + auto & hparams = model.hparams; + + fin.read((char *) &hparams.n_vocab, sizeof(hparams.n_vocab)); + fin.read((char *) &hparams.n_audio_ctx, sizeof(hparams.n_audio_ctx)); + fin.read((char *) &hparams.n_audio_state, sizeof(hparams.n_audio_state)); + fin.read((char *) &hparams.n_audio_head, sizeof(hparams.n_audio_head)); + fin.read((char *) &hparams.n_audio_layer, sizeof(hparams.n_audio_layer)); + fin.read((char *) &hparams.n_text_ctx, sizeof(hparams.n_text_ctx)); + fin.read((char *) &hparams.n_text_state, sizeof(hparams.n_text_state)); + fin.read((char *) &hparams.n_text_head, sizeof(hparams.n_text_head)); + fin.read((char *) &hparams.n_text_layer, sizeof(hparams.n_text_layer)); + fin.read((char *) &hparams.n_mels, sizeof(hparams.n_mels)); + fin.read((char *) &hparams.f16, sizeof(hparams.f16)); + + assert(hparams.n_text_state == hparams.n_audio_state); + + if (hparams.n_audio_layer == 4) { + model.type = e_model::MODEL_TINY; + } + + if (hparams.n_audio_layer == 6) { + model.type = e_model::MODEL_BASE; + } + + if (hparams.n_audio_layer == 12) { + model.type = e_model::MODEL_SMALL; + } + + if (hparams.n_audio_layer == 24) { + model.type = e_model::MODEL_MEDIUM; + } + + if (hparams.n_audio_layer == 32) { + model.type = e_model::MODEL_LARGE; + } + + printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab); + printf("%s: n_audio_ctx = %d\n", __func__, hparams.n_audio_ctx); + printf("%s: n_audio_state = %d\n", __func__, hparams.n_audio_state); + printf("%s: n_audio_head = %d\n", __func__, hparams.n_audio_head); + printf("%s: n_audio_layer = %d\n", __func__, hparams.n_audio_layer); + printf("%s: n_text_ctx = %d\n", __func__, hparams.n_text_ctx); + printf("%s: n_text_state = %d\n", __func__, hparams.n_text_state); + printf("%s: n_text_head = %d\n", __func__, hparams.n_text_head); + printf("%s: n_text_layer = %d\n", __func__, hparams.n_text_layer); + printf("%s: n_mels = %d\n", __func__, hparams.n_mels); + printf("%s: f16 = %d\n", __func__, hparams.f16); + printf("%s: type = %d\n", __func__, model.type); + + const size_t mem_required = + MEM_REQ_MODEL.at(model.type) + + MEM_REQ_ENCODE.at(model.type) + + MEM_REQ_ENCODE_LAYER.at(model.type) + + MEM_REQ_DECODE.at(model.type) + + MEM_REQ_DECODE_LAYER.at(model.type); + + printf("%s: mem_required = %.2f MB\n", __func__, mem_required / 1024.0 / 1024.0); + } + + // load mel filters + { + auto & filters = model.filters; + + fin.read((char *) &filters.n_mel, sizeof(filters.n_mel)); + fin.read((char *) &filters.n_fft, sizeof(filters.n_fft)); + + filters.data.resize(filters.n_mel * filters.n_fft); + fin.read((char *) filters.data.data(), filters.data.size() * sizeof(float)); + } + + // load vocab + { + int32_t n_vocab = 0; + fin.read((char *) &n_vocab, sizeof(n_vocab)); + + //if (n_vocab != model.hparams.n_vocab) { + // fprintf(stderr, "%s: invalid model file '%s' (bad vocab size %d != %d)\n", + // __func__, fname.c_str(), n_vocab, model.hparams.n_vocab); + // return false; + //} + + std::string word; + for (int i = 0; i < n_vocab; i++) { + uint32_t len; + fin.read((char *) &len, sizeof(len)); + + word.resize(len); + fin.read((char *) word.data(), len); + + vocab.token_to_id[word] = i; + vocab.id_to_token[i] = word; + + //printf("%s: vocab[%d] = '%s'\n", __func__, i, word.c_str()); + } + + vocab.n_vocab = model.hparams.n_vocab; + if (vocab.is_multilingual()) { + vocab.token_eot++; + vocab.token_sot++; + vocab.token_prev++; + vocab.token_solm++; + vocab.token_beg++; + } + + if (n_vocab < model.hparams.n_vocab) { + printf("%s: adding %d extra tokens\n", __func__, model.hparams.n_vocab - n_vocab); + for (int i = n_vocab; i < model.hparams.n_vocab; i++) { + if (i > vocab.token_beg) { + word = "[_TT_" + std::to_string(i - vocab.token_beg) + "]"; + } else if (i == vocab.token_eot) { + word = "[_EOT_]"; + } else if (i == vocab.token_sot) { + word = "[_SOT_]"; + } else if (i == vocab.token_prev) { + word = "[_PREV_]"; + } else if (i == vocab.token_beg) { + word = "[_BEG_]"; + } else { + word = "[_extra_token_" + std::to_string(i) + "]"; + } + vocab.token_to_id[word] = i; + vocab.id_to_token[i] = word; + } + } + } + + // for the big tensors, we have the option to store the data in 16-bit floats + // in order to save memory and also to speed up the computation + const ggml_type wtype = model.hparams.f16 ? GGML_TYPE_F16 : GGML_TYPE_F32; + + auto & ctx = model.ctx; + + size_t ctx_size = 0; + + { + const auto & hparams = model.hparams; + + const int n_vocab = hparams.n_vocab; + + const int n_audio_ctx = hparams.n_audio_ctx; + const int n_audio_state = hparams.n_audio_state; + const int n_audio_layer = hparams.n_audio_layer; + + const int n_text_ctx = hparams.n_text_ctx; + const int n_text_state = hparams.n_text_state; + const int n_text_layer = hparams.n_text_layer; + + const int n_mels = hparams.n_mels; + + // encoder + { + // TODO: F16 .. maybe not? + ctx_size += n_audio_ctx*n_audio_state*ggml_type_size(GGML_TYPE_F32); // e_pe; + + ctx_size += 3*n_mels*n_audio_state*ggml_type_size(wtype); // e_conv_1_w + ctx_size += n_audio_state*ggml_type_size(GGML_TYPE_F32); // e_conv_1_b + + ctx_size += 3*n_audio_state*n_audio_state*ggml_type_size(wtype); // e_conv_2_w + ctx_size += n_audio_state*ggml_type_size(GGML_TYPE_F32); // e_conv_2_b + + ctx_size += n_audio_state*ggml_type_size(GGML_TYPE_F32); // e_ln_w; + ctx_size += n_audio_state*ggml_type_size(GGML_TYPE_F32); // e_ln_b; + } + + // decoder + { + // TODO: F16 .. maybe not? + ctx_size += n_text_ctx*n_text_state*ggml_type_size(GGML_TYPE_F32); // d_pe; + + ctx_size += n_vocab*n_text_state*ggml_type_size(wtype); // d_te; + + ctx_size += n_text_state*ggml_type_size(GGML_TYPE_F32); // d_ln_w; + ctx_size += n_text_state*ggml_type_size(GGML_TYPE_F32); // d_ln_b; + } + + // encoder layers + { + ctx_size += n_audio_layer*(n_audio_state*ggml_type_size(GGML_TYPE_F32)); // mlp_ln_w + ctx_size += n_audio_layer*(n_audio_state*ggml_type_size(GGML_TYPE_F32)); // mlp_ln_b + + ctx_size += n_audio_layer*(4*n_audio_state*n_audio_state*ggml_type_size(wtype)); // mlp_0_w + ctx_size += n_audio_layer*( 4*n_audio_state*ggml_type_size(GGML_TYPE_F32)); // mlp_0_b + + ctx_size += n_audio_layer*(4*n_audio_state*n_audio_state*ggml_type_size(wtype)); // mlp_1_w + ctx_size += n_audio_layer*( n_audio_state*ggml_type_size(GGML_TYPE_F32)); // mlp_1_b + + ctx_size += n_audio_layer*(n_audio_state*ggml_type_size(GGML_TYPE_F32)); // attn_ln_0_w + ctx_size += n_audio_layer*(n_audio_state*ggml_type_size(GGML_TYPE_F32)); // attn_ln_0_b + + ctx_size += n_audio_layer*(n_audio_state*n_audio_state*ggml_type_size(wtype)); // attn_q_w + ctx_size += n_audio_layer*( n_audio_state*ggml_type_size(GGML_TYPE_F32)); // attn_q_b + + ctx_size += n_audio_layer*(n_audio_state*n_audio_state*ggml_type_size(wtype)); // attn_k_w + + ctx_size += n_audio_layer*(n_audio_state*n_audio_state*ggml_type_size(wtype)); // attn_v_w + ctx_size += n_audio_layer*( n_audio_state*ggml_type_size(GGML_TYPE_F32)); // attn_v_b + + ctx_size += n_audio_layer*(n_audio_state*n_audio_state*ggml_type_size(wtype)); // attn_ln_1_w + ctx_size += n_audio_layer*( n_audio_state*ggml_type_size(GGML_TYPE_F32)); // attn_ln_1_b + } + + // decoder layers + { + ctx_size += n_text_layer*(n_text_state*ggml_type_size(GGML_TYPE_F32)); // mlp_ln_w + ctx_size += n_text_layer*(n_text_state*ggml_type_size(GGML_TYPE_F32)); // mlp_ln_b + + ctx_size += n_text_layer*(4*n_text_state*n_text_state*ggml_type_size(wtype)); // mlp_0_w + ctx_size += n_text_layer*( 4*n_text_state*ggml_type_size(GGML_TYPE_F32)); // mlp_0_b + + ctx_size += n_text_layer*(4*n_text_state*n_text_state*ggml_type_size(wtype)); // mlp_1_w + ctx_size += n_text_layer*( n_text_state*ggml_type_size(GGML_TYPE_F32)); // mlp_1_b + + ctx_size += n_text_layer*(n_text_state*ggml_type_size(GGML_TYPE_F32)); // attn_ln_0_w + ctx_size += n_text_layer*(n_text_state*ggml_type_size(GGML_TYPE_F32)); // attn_ln_0_b + + ctx_size += n_text_layer*(n_text_state*n_text_state*ggml_type_size(wtype)); // attn_q_w + ctx_size += n_text_layer*( n_text_state*ggml_type_size(GGML_TYPE_F32)); // attn_q_b + + ctx_size += n_text_layer*(n_text_state*n_text_state*ggml_type_size(wtype)); // attn_k_w + + ctx_size += n_text_layer*(n_text_state*n_text_state*ggml_type_size(wtype)); // attn_v_w + ctx_size += n_text_layer*( n_text_state*ggml_type_size(GGML_TYPE_F32)); // attn_v_b + + ctx_size += n_text_layer*(n_text_state*n_text_state*ggml_type_size(wtype)); // attn_ln_1_w + ctx_size += n_text_layer*( n_text_state*ggml_type_size(GGML_TYPE_F32)); // attn_ln_1_b + // + ctx_size += n_text_layer*(n_text_state*ggml_type_size(GGML_TYPE_F32)); // cross_attn_ln_0_w + ctx_size += n_text_layer*(n_text_state*ggml_type_size(GGML_TYPE_F32)); // cross_attn_ln_0_b + + ctx_size += n_text_layer*(n_text_state*n_text_state*ggml_type_size(wtype)); // cross_attn_q_w + ctx_size += n_text_layer*( n_text_state*ggml_type_size(GGML_TYPE_F32)); // cross_attn_q_b + + ctx_size += n_text_layer*(n_text_state*n_text_state*ggml_type_size(wtype)); // cross_attn_k_w + + ctx_size += n_text_layer*(n_text_state*n_text_state*ggml_type_size(wtype)); // cross_attn_v_w + ctx_size += n_text_layer*( n_text_state*ggml_type_size(GGML_TYPE_F32)); // cross_attn_v_b + + ctx_size += n_text_layer*(n_text_state*n_text_state*ggml_type_size(wtype)); // cross_attn_ln_1_w + ctx_size += n_text_layer*( n_text_state*ggml_type_size(GGML_TYPE_F32)); // cross_attn_ln_1_b + } + + ctx_size += n_text_layer*n_text_ctx*n_text_state*ggml_type_size(GGML_TYPE_F32); // memory_k + ctx_size += n_text_layer*n_text_ctx*n_text_state*ggml_type_size(GGML_TYPE_F32); // memory_v + + ctx_size += n_text_layer*n_audio_ctx*n_text_state*ggml_type_size(GGML_TYPE_F32); // memory_cross_k + ctx_size += n_text_layer*n_audio_ctx*n_text_state*ggml_type_size(GGML_TYPE_F32); // memory_cross_v + + ctx_size += (15 + 15*n_audio_layer + 24*n_text_layer)*256; // object overhead + + printf("%s: ggml ctx size = %6.2f MB\n", __func__, ctx_size/(1024.0*1024.0)); + } + + // create the ggml context + { + struct ggml_init_params params = { + .mem_size = ctx_size, + .mem_buffer = NULL, + }; + + model.ctx = ggml_init(params); + if (!model.ctx) { + fprintf(stderr, "%s: ggml_init() failed\n", __func__); + return false; + } + } + + // prepare memory for the weights + { + const auto & hparams = model.hparams; + + const int n_vocab = hparams.n_vocab; + + const int n_audio_ctx = hparams.n_audio_ctx; + const int n_audio_state = hparams.n_audio_state; + const int n_audio_layer = hparams.n_audio_layer; + + const int n_text_ctx = hparams.n_text_ctx; + const int n_text_state = hparams.n_text_state; + const int n_text_layer = hparams.n_text_layer; + + const int n_mels = hparams.n_mels; + + model.layers_encoder.resize(n_audio_layer); + model.layers_decoder.resize(n_text_layer); + + // encoder + { + model.e_pe = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_audio_state, n_audio_ctx); + + model.e_conv_1_w = ggml_new_tensor_3d(ctx, wtype, 3, n_mels, n_audio_state); + model.e_conv_1_b = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, 1, n_audio_state); + + model.e_conv_2_w = ggml_new_tensor_3d(ctx, wtype, 3, n_audio_state, n_audio_state); + model.e_conv_2_b = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, 1, n_audio_state); + + model.e_ln_w = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_audio_state); + model.e_ln_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_audio_state); + + // map by name + model.tensors["encoder.positional_embedding"] = model.e_pe; + + model.tensors["encoder.conv1.weight"] = model.e_conv_1_w; + model.tensors["encoder.conv1.bias"] = model.e_conv_1_b; + + model.tensors["encoder.conv2.weight"] = model.e_conv_2_w; + model.tensors["encoder.conv2.bias"] = model.e_conv_2_b; + + model.tensors["encoder.ln_post.weight"] = model.e_ln_w; + model.tensors["encoder.ln_post.bias"] = model.e_ln_b; + + for (int i = 0; i < n_audio_layer; ++i) { + auto & layer = model.layers_encoder[i]; + + layer.mlp_ln_w = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_audio_state); + layer.mlp_ln_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_audio_state); + + layer.mlp_0_w = ggml_new_tensor_2d(ctx, wtype, n_audio_state, 4*n_audio_state); + layer.mlp_0_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4*n_audio_state); + + layer.mlp_1_w = ggml_new_tensor_2d(ctx, wtype, 4*n_audio_state, n_audio_state); + layer.mlp_1_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_audio_state); + + layer.attn_ln_0_w = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_audio_state); + layer.attn_ln_0_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_audio_state); + + layer.attn_q_w = ggml_new_tensor_2d(ctx, wtype, n_audio_state, n_audio_state); + layer.attn_q_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_audio_state); + + layer.attn_k_w = ggml_new_tensor_2d(ctx, wtype, n_audio_state, n_audio_state); + + layer.attn_v_w = ggml_new_tensor_2d(ctx, wtype, n_audio_state, n_audio_state); + layer.attn_v_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_audio_state); + + layer.attn_ln_1_w = ggml_new_tensor_2d(ctx, wtype, n_audio_state, n_audio_state); + layer.attn_ln_1_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_audio_state); + + // map by name + model.tensors["encoder.blocks." + std::to_string(i) + ".mlp_ln.weight"] = layer.mlp_ln_w; + model.tensors["encoder.blocks." + std::to_string(i) + ".mlp_ln.bias"] = layer.mlp_ln_b; + + model.tensors["encoder.blocks." + std::to_string(i) + ".mlp.0.weight"] = layer.mlp_0_w; + model.tensors["encoder.blocks." + std::to_string(i) + ".mlp.0.bias"] = layer.mlp_0_b; + + model.tensors["encoder.blocks." + std::to_string(i) + ".mlp.2.weight"] = layer.mlp_1_w; + model.tensors["encoder.blocks." + std::to_string(i) + ".mlp.2.bias"] = layer.mlp_1_b; + + model.tensors["encoder.blocks." + std::to_string(i) + ".attn_ln.weight"] = layer.attn_ln_0_w; + model.tensors["encoder.blocks." + std::to_string(i) + ".attn_ln.bias"] = layer.attn_ln_0_b; + + model.tensors["encoder.blocks." + std::to_string(i) + ".attn.query.weight"] = layer.attn_q_w; + model.tensors["encoder.blocks." + std::to_string(i) + ".attn.query.bias"] = layer.attn_q_b; + + model.tensors["encoder.blocks." + std::to_string(i) + ".attn.key.weight"] = layer.attn_k_w; + + model.tensors["encoder.blocks." + std::to_string(i) + ".attn.value.weight"] = layer.attn_v_w; + model.tensors["encoder.blocks." + std::to_string(i) + ".attn.value.bias"] = layer.attn_v_b; + + model.tensors["encoder.blocks." + std::to_string(i) + ".attn.out.weight"] = layer.attn_ln_1_w; + model.tensors["encoder.blocks." + std::to_string(i) + ".attn.out.bias"] = layer.attn_ln_1_b; + } + } + + // decoder + { + model.d_pe = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_text_state, n_text_ctx); + + model.d_te = ggml_new_tensor_2d(ctx, wtype, n_text_state, n_vocab); + + model.d_ln_w = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_text_state); + model.d_ln_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_text_state); + + // map by name + model.tensors["decoder.positional_embedding"] = model.d_pe; + + model.tensors["decoder.token_embedding.weight"] = model.d_te; + + model.tensors["decoder.ln.weight"] = model.d_ln_w; + model.tensors["decoder.ln.bias"] = model.d_ln_b; + + for (int i = 0; i < n_text_layer; ++i) { + auto & layer = model.layers_decoder[i]; + + layer.mlp_ln_w = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_text_state); + layer.mlp_ln_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_text_state); + + layer.mlp_0_w = ggml_new_tensor_2d(ctx, wtype, n_text_state, 4*n_text_state); + layer.mlp_0_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4*n_text_state); + + layer.mlp_1_w = ggml_new_tensor_2d(ctx, wtype, 4*n_text_state, n_text_state); + layer.mlp_1_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_text_state); + + layer.attn_ln_0_w = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_text_state); + layer.attn_ln_0_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_text_state); + + layer.attn_q_w = ggml_new_tensor_2d(ctx, wtype, n_text_state, n_text_state); + layer.attn_q_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_text_state); + + layer.attn_k_w = ggml_new_tensor_2d(ctx, wtype, n_text_state, n_text_state); + + layer.attn_v_w = ggml_new_tensor_2d(ctx, wtype, n_text_state, n_text_state); + layer.attn_v_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_text_state); + + layer.attn_ln_1_w = ggml_new_tensor_2d(ctx, wtype, n_text_state, n_text_state); + layer.attn_ln_1_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_text_state); + + layer.cross_attn_ln_0_w = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_text_state); + layer.cross_attn_ln_0_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_text_state); + + layer.cross_attn_q_w = ggml_new_tensor_2d(ctx, wtype, n_text_state, n_text_state); + layer.cross_attn_q_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_text_state); + + layer.cross_attn_k_w = ggml_new_tensor_2d(ctx, wtype, n_text_state, n_text_state); + + layer.cross_attn_v_w = ggml_new_tensor_2d(ctx, wtype, n_text_state, n_text_state); + layer.cross_attn_v_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_text_state); + + layer.cross_attn_ln_1_w = ggml_new_tensor_2d(ctx, wtype, n_text_state, n_text_state); + layer.cross_attn_ln_1_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_text_state); + + // map by name + model.tensors["decoder.blocks." + std::to_string(i) + ".mlp_ln.weight"] = layer.mlp_ln_w; + model.tensors["decoder.blocks." + std::to_string(i) + ".mlp_ln.bias"] = layer.mlp_ln_b; + + model.tensors["decoder.blocks." + std::to_string(i) + ".mlp.0.weight"] = layer.mlp_0_w; + model.tensors["decoder.blocks." + std::to_string(i) + ".mlp.0.bias"] = layer.mlp_0_b; + + model.tensors["decoder.blocks." + std::to_string(i) + ".mlp.2.weight"] = layer.mlp_1_w; + model.tensors["decoder.blocks." + std::to_string(i) + ".mlp.2.bias"] = layer.mlp_1_b; + + model.tensors["decoder.blocks." + std::to_string(i) + ".attn_ln.weight"] = layer.attn_ln_0_w; + model.tensors["decoder.blocks." + std::to_string(i) + ".attn_ln.bias"] = layer.attn_ln_0_b; + + model.tensors["decoder.blocks." + std::to_string(i) + ".attn.query.weight"] = layer.attn_q_w; + model.tensors["decoder.blocks." + std::to_string(i) + ".attn.query.bias"] = layer.attn_q_b; + + model.tensors["decoder.blocks." + std::to_string(i) + ".attn.key.weight"] = layer.attn_k_w; + + model.tensors["decoder.blocks." + std::to_string(i) + ".attn.value.weight"] = layer.attn_v_w; + model.tensors["decoder.blocks." + std::to_string(i) + ".attn.value.bias"] = layer.attn_v_b; + + model.tensors["decoder.blocks." + std::to_string(i) + ".attn.out.weight"] = layer.attn_ln_1_w; + model.tensors["decoder.blocks." + std::to_string(i) + ".attn.out.bias"] = layer.attn_ln_1_b; + + model.tensors["decoder.blocks." + std::to_string(i) + ".cross_attn_ln.weight"] = layer.cross_attn_ln_0_w; + model.tensors["decoder.blocks." + std::to_string(i) + ".cross_attn_ln.bias"] = layer.cross_attn_ln_0_b; + + model.tensors["decoder.blocks." + std::to_string(i) + ".cross_attn.query.weight"] = layer.cross_attn_q_w; + model.tensors["decoder.blocks." + std::to_string(i) + ".cross_attn.query.bias"] = layer.cross_attn_q_b; + + model.tensors["decoder.blocks." + std::to_string(i) + ".cross_attn.key.weight"] = layer.cross_attn_k_w; + + model.tensors["decoder.blocks." + std::to_string(i) + ".cross_attn.value.weight"] = layer.cross_attn_v_w; + model.tensors["decoder.blocks." + std::to_string(i) + ".cross_attn.value.bias"] = layer.cross_attn_v_b; + + model.tensors["decoder.blocks." + std::to_string(i) + ".cross_attn.out.weight"] = layer.cross_attn_ln_1_w; + model.tensors["decoder.blocks." + std::to_string(i) + ".cross_attn.out.bias"] = layer.cross_attn_ln_1_b; + } + } + } + + // key + value memory + { + const auto & hparams = model.hparams; + + const int n_text_state = hparams.n_text_state; + const int n_text_layer = hparams.n_text_layer; + const int n_text_ctx = hparams.n_text_ctx; + + { + const int n_mem = n_text_layer*n_text_ctx; + const int n_elements = n_text_state*n_mem; + + model.memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements); + model.memory_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements); + } + + { + const int n_audio_ctx = hparams.n_audio_ctx; + + const int n_mem = n_text_layer*n_audio_ctx; + const int n_elements = n_text_state*n_mem; + + model.memory_cross_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements); + model.memory_cross_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements); + } + + const size_t memory_size = + ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v) + + ggml_nbytes(model.memory_cross_k) + ggml_nbytes(model.memory_cross_v); + + printf("%s: memory size = %8.2f MB \n", __func__, memory_size/1024.0/1024.0); + } + + // load weights + { + size_t total_size = 0; + + while (true) { + int32_t n_dims; + int32_t length; + int32_t ftype; + + fin.read(reinterpret_cast(&n_dims), sizeof(n_dims)); + fin.read(reinterpret_cast(&length), sizeof(length)); + fin.read(reinterpret_cast(&ftype), sizeof(ftype)); + + if (fin.eof()) { + break; + } + + int32_t nelements = 1; + int32_t ne[3] = { 1, 1, 1 }; + for (int i = 0; i < n_dims; ++i) { + fin.read(reinterpret_cast(&ne[i]), sizeof(ne[i])); + nelements *= ne[i]; + } + + std::string name(length, 0); + fin.read(&name[0], length); + + if (model.tensors.find(name.data()) == model.tensors.end()) { + fprintf(stderr, "%s: unknown tensor '%s' in model file\n", __func__, name.data()); + return false; + } + + auto tensor = model.tensors[name.data()]; + if (ggml_nelements(tensor) != nelements) { + fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data()); + return false; + } + + if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1] || tensor->ne[2] != ne[2]) { + fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d, %d], expected [%d, %d, %d]\n", + __func__, name.data(), tensor->ne[0], tensor->ne[1], tensor->ne[2], ne[0], ne[1], ne[2]); + return false; + } + + const size_t bpe = (ftype == 0) ? sizeof(float) : sizeof(ggml_fp16_t); + + if (nelements*bpe != ggml_nbytes(tensor)) { + fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n", + __func__, name.data(), ggml_nbytes(tensor), nelements*bpe); + return false; + } + + fin.read(reinterpret_cast(tensor->data), ggml_nbytes(tensor)); + + //printf("%24s - [%5d, %5d], type = %6s, %6.2f MB\n", name.data(), ne[0], ne[1], ftype == 0 ? "float" : "f16", ggml_nbytes(tensor)/1024.0/1024.0); + total_size += ggml_nbytes(tensor); + } + + printf("%s: model size = %8.2f MB\n", __func__, total_size/1024.0/1024.0); + } + + fin.close(); + + return true; +} + +// evaluate the encoder +// +// given audio recording (more specifically, its log mel spectrogram), runs forward pass of the encoder +// part of the transformer model and returns the encoded features +// +// - model: the model +// - n_threads: number of threads to use +// - mel_offset: offset in the mel spectrogram (i.e. audio offset) +// - mel_inp: input mel spectrogram +// - features: output encoded features +// +bool whisper_encode( + const whisper_model & model, + const int n_threads, + const int mel_offset, + const whisper_mel & mel_inp, + std::vector & features) { + const auto & hparams = model.hparams; + + const int n_vocab = hparams.n_vocab; + + const int n_ctx = hparams.n_audio_ctx; + const int n_state = hparams.n_audio_state; + const int n_head = hparams.n_audio_head; + const int n_layer = hparams.n_audio_layer; + + const int N = n_ctx; + + const int n_mels = hparams.n_mels; + assert(mel_inp.n_mel == n_mels); + + struct ggml_init_params params; + + { + static size_t buf_size = MEM_REQ_ENCODE.at(model.type); + static void * buf = malloc(buf_size); + + params = { + .mem_size = buf_size, + .mem_buffer = buf, + }; + } + + struct ggml_context * ctx0 = ggml_init(params); + + struct ggml_tensor * mel = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, 2*n_ctx, n_mels); + assert(mel->type == GGML_TYPE_F32); + { + float * dst = (float *) mel->data; + memset(dst, 0, ggml_nbytes(mel)); + + const int i0 = std::min(mel_offset, mel_inp.n_len); + const int i1 = std::min(mel_offset + 2*n_ctx, mel_inp.n_len); + + for (int j = 0; j < mel_inp.n_mel; ++j) { + for (int i = i0; i < i1; ++i) { + dst[j*2*n_ctx + (i - i0)] = mel_inp.data[j*mel_inp.n_len + i]; + } + } + } + + struct ggml_tensor * cur; + + // convolution + gelu + { + cur = ggml_conv_1d_1s(ctx0, model.e_conv_1_w, mel); + cur = ggml_add(ctx0, + ggml_repeat(ctx0, + model.e_conv_1_b, + cur), + cur); + + cur = ggml_gelu(ctx0, cur); + + cur = ggml_conv_1d_2s(ctx0, model.e_conv_2_w, cur); + cur = ggml_add(ctx0, + ggml_repeat(ctx0, + model.e_conv_2_b, + cur), + cur); + + cur = ggml_gelu(ctx0, cur); + } + + cur = ggml_add(ctx0, model.e_pe, ggml_transpose(ctx0, cur)); + + struct ggml_tensor * inpL = cur; + + for (int il = 0; il < n_layer; ++il) { + const auto & layer = model.layers_encoder[il]; + + // create separate context for each layer to reduce memory usage + + struct ggml_init_params paramsL; + { + static size_t buf_size = MEM_REQ_ENCODE_LAYER.at(model.type); + static void * buf = malloc(buf_size); + + paramsL = { + .mem_size = buf_size, + .mem_buffer = buf, + }; + } + + struct ggml_context * ctxL = ggml_init(paramsL); + + // norm + { + cur = ggml_norm(ctxL, inpL); + + // cur = ln_0_w*cur + ln_0_b + cur = ggml_add(ctxL, + ggml_mul(ctxL, + ggml_repeat(ctxL, layer.attn_ln_0_w, cur), + cur), + ggml_repeat(ctxL, layer.attn_ln_0_b, cur)); + } + + // self-attention + { + struct ggml_tensor * Qcur = ggml_mul_mat(ctxL, + layer.attn_q_w, + cur); + + Qcur = ggml_add(ctxL, + ggml_repeat(ctxL, + layer.attn_q_b, + Qcur), + Qcur); + + Qcur = ggml_scale(ctxL, Qcur, ggml_new_f32(ctxL, pow(float(n_state)/n_head, -0.25))); + + // no bias for Key + struct ggml_tensor * Kcur = ggml_mul_mat(ctxL, + layer.attn_k_w, + cur); + + Kcur = ggml_scale(ctxL, Kcur, ggml_new_f32(ctxL, pow(float(n_state)/n_head, -0.25))); + + struct ggml_tensor * Vcur = ggml_mul_mat(ctxL, + layer.attn_v_w, + cur); + + Vcur = ggml_add(ctxL, + ggml_repeat(ctxL, + layer.attn_v_b, + Vcur), + Vcur); + + // ------ + + struct ggml_tensor * Q = + ggml_permute(ctxL, + ggml_cpy(ctxL, + Qcur, + ggml_new_tensor_3d(ctxL, GGML_TYPE_F32, n_state/n_head, n_head, N)), + 0, 2, 1, 3); + + struct ggml_tensor * K = + ggml_permute(ctxL, + ggml_cpy(ctxL, + Kcur, + ggml_new_tensor_3d(ctxL, GGML_TYPE_F16, n_state/n_head, n_head, N)), // F16 ! + 0, 2, 1, 3); + + //// BLAS attempt + //struct ggml_tensor * KQ = + // ggml_mul_mat(ctxL, + // ggml_cpy(ctxL, K, ggml_new_tensor_3d(ctxL, GGML_TYPE_F32, n_state/n_head, N, n_head)), + // ggml_cpy(ctxL, Q, ggml_new_tensor_3d(ctxL, GGML_TYPE_F32, n_state/n_head, N, n_head))); + + // K * Q + struct ggml_tensor * KQ = ggml_mul_mat(ctxL, K, Q); + + //struct ggml_tensor * K = + // ggml_cpy(ctxL, + // ggml_permute(ctxL, + // ggml_reshape_3d(ctxL, + // Kcur, + // n_state/n_head, n_head, N), + // 1, 2, 0, 3), + // ggml_new_tensor_3d(ctxL, GGML_TYPE_F16, N, n_state/n_head, n_head) + // ); + + //// K * Q + //struct ggml_tensor * KQ = ggml_mul_mat(ctxL, ggml_transpose(ctxL, K), Q); + + //struct ggml_tensor * KQ_scaled = + // ggml_scale(ctxL, + // KQ, + // ggml_new_f32(ctxL, 1.0f/sqrt(float(n_state)/n_head)) + // ); + + struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctxL, KQ); + + //struct ggml_tensor * V_trans = + // ggml_permute(ctxL, + // ggml_cpy(ctxL, + // Vcur, + // ggml_new_tensor_3d(ctxL, GGML_TYPE_F16, n_state/n_head, n_head, N)), + // 1, 2, 0, 3); + + //struct ggml_tensor * KQV = ggml_mul_mat(ctxL, V_trans, KQ_soft_max); + + struct ggml_tensor * V = + ggml_cpy(ctxL, + ggml_permute(ctxL, + ggml_reshape_3d(ctxL, + Vcur, + n_state/n_head, n_head, N), + 0, 2, 1, 3), + ggml_new_tensor_3d(ctxL, GGML_TYPE_F16, n_state/n_head, N, n_head) // F16 ! + ); + + struct ggml_tensor * KQV = ggml_mul_mat(ctxL, ggml_transpose(ctxL, V), KQ_soft_max); + + struct ggml_tensor * KQV_merged = ggml_permute(ctxL, KQV, 0, 2, 1, 3); + + cur = ggml_cpy(ctxL, + KQV_merged, + ggml_new_tensor_2d(ctxL, GGML_TYPE_F32, n_state, N)); + } + + // projection + { + cur = ggml_mul_mat(ctxL, + layer.attn_ln_1_w, + cur); + + cur = ggml_add(ctxL, + ggml_repeat(ctxL, layer.attn_ln_1_b, cur), + cur); + } + + // add the input + cur = ggml_add(ctxL, cur, inpL); + + struct ggml_tensor * inpFF = cur; + + // feed-forward network + { + // norm + { + cur = ggml_norm(ctxL, inpFF); + + // cur = mlp_ln_w*cur + mlp_ln_b + cur = ggml_add(ctxL, + ggml_mul(ctxL, + ggml_repeat(ctxL, layer.mlp_ln_w, cur), + cur), + ggml_repeat(ctxL, layer.mlp_ln_b, cur)); + } + + // fully connected + cur = ggml_mul_mat(ctxL, + layer.mlp_0_w, + cur); + + cur = ggml_add(ctxL, + ggml_repeat(ctxL, layer.mlp_0_b, cur), + cur); + + // GELU activation + cur = ggml_gelu(ctxL, cur); + + // projection + cur = ggml_mul_mat(ctxL, + layer.mlp_1_w, + cur); + + cur = ggml_add(ctxL, + ggml_repeat(ctxL, layer.mlp_1_b, cur), + cur); + } + + // output from this layer + struct ggml_tensor * inpO = ggml_add(ctxL, cur, inpFF); + + { + struct ggml_cgraph gf = { .n_threads = n_threads }; + + ggml_build_forward_expand(&gf, inpO); + ggml_graph_compute (ctxL, &gf); + + //ggml_graph_print(&gf); + } + + // TODO: this is a hack to have per-layer computation graphs - need to come up with something better + // input for next layer (inpO -> inpL) + memcpy(inpL->data, inpO->data, ggml_nbytes(inpL)); + inpL->op = GGML_OP_NONE; + inpL->src0 = NULL; + inpL->src1 = NULL; + + //printf("%s: - used_mem(%d) = %f MB\n", __func__, il, ggml_used_mem(ctxL)/1024.0/1024.0); + + ggml_free(ctxL); + } + + cur = inpL; + + // norm + { + cur = ggml_norm(ctx0, cur); + + // cur = ln_f_g*cur + ln_f_b + cur = ggml_add(ctx0, + ggml_mul(ctx0, + ggml_repeat(ctx0, model.e_ln_w, cur), + cur), + ggml_repeat(ctx0, model.e_ln_b, cur)); + } + + // run the computation + { + struct ggml_cgraph gf = { .n_threads = n_threads }; + + ggml_build_forward_expand(&gf, cur); + ggml_graph_compute (ctx0, &gf); + + //ggml_graph_print(&gf); + } + + // cur + //{ + // printf("ne0 = %d\n", cur->ne[0]); + // printf("ne1 = %d\n", cur->ne[1]); + // for (int i = 0; i < 10; ++i) { + // printf("%8.4f ", ((float *)(cur->data))[i]); + // } + // printf("... "); + // for (int i = cur->ne[0] - 10; i < cur->ne[0]; ++i) { + // printf("%8.4f ", ((float *)(cur->data))[i]); + // } + // printf("\n"); + //} + + // pre-compute cross-attention memory + { + struct ggml_cgraph gf = { .n_threads = n_threads }; + + // TODO: hack to disconnect the encoded features from the previous graph + cur->op = GGML_OP_NONE; + cur->src0 = NULL; + cur->src1 = NULL; + + for (int il = 0; il < model.hparams.n_text_layer; ++il) { + auto & layer = model.layers_decoder[il]; + + struct ggml_tensor * Kcross = ggml_mul_mat(ctx0, + layer.cross_attn_k_w, + cur); + + Kcross = ggml_scale(ctx0, Kcross, ggml_new_f32(ctx0, pow(float(n_state)/n_head, -0.25))); + + struct ggml_tensor * Vcross = ggml_mul_mat(ctx0, + layer.cross_attn_v_w, + cur); + + Vcross = ggml_add(ctx0, + ggml_repeat(ctx0, + layer.cross_attn_v_b, + Vcross), + Vcross); + + struct ggml_tensor * k = ggml_view_1d(ctx0, model.memory_cross_k, n_state*n_ctx, (ggml_element_size(model.memory_cross_k)*n_state)*(il*n_ctx)); + struct ggml_tensor * v = ggml_view_1d(ctx0, model.memory_cross_v, n_state*n_ctx, (ggml_element_size(model.memory_cross_v)*n_state)*(il*n_ctx)); + + ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Kcross, k)); + ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Vcross, v)); + } + + ggml_graph_compute(ctx0, &gf); + } + + //////////////////////////////////////////////////////////////////////////// + + // output the features + assert(cur->type == GGML_TYPE_F32); + features.resize(cur->ne[0]*cur->ne[1]); + memcpy(features.data(), cur->data, features.size()*sizeof(float)); + + //printf("%s: used_mem = %f MB\n", __func__, ggml_used_mem(ctx0)/1024.0/1024.0); + + ggml_free(ctx0); + + return true; +} + +// evaluate the decoder +// +// given text prompt + audio features -> predicts the probabilities for the next token +// +// - model: the model +// - n_threads: number of threads to use +// - n_past: prompt length +// - prompt: text prompt +// - logits_out: output logits +// - probs_out: output probabilities +// +bool whisper_decode( + const whisper_model & model, + const int n_threads, + const int n_past, + const std::vector & prompt, + std::vector & logits_out, + std::vector & probs_out) { + const auto & hparams = model.hparams; + + const int n_vocab = hparams.n_vocab; + + const int n_ctx = hparams.n_text_ctx; + const int n_state = hparams.n_text_state; + const int n_head = hparams.n_text_head; + const int n_layer = hparams.n_text_layer; + + const int N = prompt.size(); + const int M = hparams.n_audio_ctx; + + struct ggml_init_params params; + + { + static size_t buf_size = MEM_REQ_DECODE.at(model.type); + static void * buf = malloc(buf_size); + + params = { + .mem_size = buf_size, + .mem_buffer = buf, + }; + } + + struct ggml_context * ctx0 = ggml_init(params); + + struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N); + memcpy(embd->data, prompt.data(), N*ggml_element_size(embd)); + + struct ggml_tensor * position = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N); + for (int i = 0; i < N; ++i) { + ((int32_t *) position->data)[i] = n_past + i; + } + + // wte + wpe + struct ggml_tensor * cur = + ggml_add(ctx0, + ggml_get_rows(ctx0, model.d_te, embd), + ggml_get_rows(ctx0, model.d_pe, position)); + + struct ggml_tensor * inpL = cur; + + for (int il = 0; il < n_layer; ++il) { + const auto & layer = model.layers_decoder[il]; + + struct ggml_init_params paramsL; + + { + static size_t buf_size = MEM_REQ_DECODE_LAYER.at(model.type); + static void * buf = malloc(buf_size); + + paramsL = { + .mem_size = buf_size, + .mem_buffer = buf, + }; + } + + struct ggml_context * ctxL = ggml_init(paramsL); + struct ggml_cgraph gf = { .n_threads = n_threads }; + + // norm + { + cur = ggml_norm(ctxL, inpL); + + // cur = ln_0_w*cur + ln_0_b + cur = ggml_add(ctxL, + ggml_mul(ctxL, + ggml_repeat(ctxL, layer.attn_ln_0_w, cur), + cur), + ggml_repeat(ctxL, layer.attn_ln_0_b, cur)); + } + + // self-attention + { + struct ggml_tensor * Qcur = ggml_mul_mat(ctxL, + layer.attn_q_w, + cur); + + Qcur = ggml_add(ctxL, + ggml_repeat(ctxL, + layer.attn_q_b, + Qcur), + Qcur); + + Qcur = ggml_scale(ctxL, Qcur, ggml_new_f32(ctxL, pow(float(n_state)/n_head, -0.25))); + + // no bias for Key + struct ggml_tensor * Kcur = ggml_mul_mat(ctxL, + layer.attn_k_w, + cur); + + Kcur = ggml_scale(ctxL, Kcur, ggml_new_f32(ctxL, pow(float(n_state)/n_head, -0.25))); + + struct ggml_tensor * Vcur = ggml_mul_mat(ctxL, + layer.attn_v_w, + cur); + + Vcur = ggml_add(ctxL, + ggml_repeat(ctxL, + layer.attn_v_b, + Vcur), + Vcur); + + // store key and value to memory + { + struct ggml_tensor * k = ggml_view_1d(ctxL, model.memory_k, N*n_state, (ggml_element_size(model.memory_k)*n_state)*(il*n_ctx + n_past)); + struct ggml_tensor * v = ggml_view_1d(ctxL, model.memory_v, N*n_state, (ggml_element_size(model.memory_v)*n_state)*(il*n_ctx + n_past)); + + ggml_build_forward_expand(&gf, ggml_cpy(ctxL, Kcur, k)); + ggml_build_forward_expand(&gf, ggml_cpy(ctxL, Vcur, v)); + } + + // ------ + + struct ggml_tensor * Q = + ggml_permute(ctxL, + ggml_cpy(ctxL, + Qcur, + ggml_new_tensor_3d(ctxL, GGML_TYPE_F32, n_state/n_head, n_head, N)), + 0, 2, 1, 3); + + struct ggml_tensor * K = + ggml_permute(ctxL, + ggml_reshape_3d(ctxL, + ggml_view_1d(ctxL, model.memory_k, (n_past + N)*n_state, il*n_ctx*ggml_element_size(model.memory_k)*n_state), + n_state/n_head, n_head, n_past + N), + 0, 2, 1, 3); + + // K * Q + struct ggml_tensor * KQ = ggml_mul_mat(ctxL, K, Q); + + //struct ggml_tensor * KQ_scaled = + // ggml_scale(ctxL, + // KQ, + // ggml_new_f32(ctxL, 1.0f/sqrt(float(n_state)/n_head)) + // ); + + struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctxL, KQ, n_past); + + struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctxL, KQ_masked); + + struct ggml_tensor * V_trans = + ggml_permute(ctxL, + ggml_reshape_3d(ctxL, + ggml_view_1d(ctxL, model.memory_v, (n_past + N)*n_state, il*n_ctx*ggml_element_size(model.memory_v)*n_state), + n_state/n_head, n_head, n_past + N), + 1, 2, 0, 3); + + struct ggml_tensor * KQV = ggml_mul_mat(ctxL, V_trans, KQ_soft_max); + + struct ggml_tensor * KQV_merged = ggml_permute(ctxL, KQV, 0, 2, 1, 3); + + cur = ggml_cpy(ctxL, + KQV_merged, + ggml_new_tensor_2d(ctxL, GGML_TYPE_F32, n_state, N)); + } + + { + cur = ggml_mul_mat(ctxL, + layer.attn_ln_1_w, + cur); + + cur = ggml_add(ctxL, + ggml_repeat(ctxL, layer.attn_ln_1_b, cur), + cur); + } + + // add the input + struct ggml_tensor * inpCA = ggml_add(ctxL, cur, inpL); + + // norm + { + cur = ggml_norm(ctxL, inpCA); // Note we use inpCA here + + // cur = ln_0_w*cur + ln_0_b + cur = ggml_add(ctxL, + ggml_mul(ctxL, + ggml_repeat(ctxL, layer.cross_attn_ln_0_w, cur), + cur), + ggml_repeat(ctxL, layer.cross_attn_ln_0_b, cur)); + } + + // cross-attention + { + struct ggml_tensor * Qcur = ggml_mul_mat(ctxL, + layer.cross_attn_q_w, + cur); + + Qcur = ggml_add(ctxL, + ggml_repeat(ctxL, + layer.cross_attn_q_b, + Qcur), + Qcur); + + Qcur = ggml_scale(ctxL, Qcur, ggml_new_f32(ctxL, pow(float(n_state)/n_head, -0.25))); + + // Kcross is already scaled + struct ggml_tensor * Kcross = + ggml_reshape_3d(ctxL, + ggml_view_1d(ctxL, model.memory_cross_k, M*n_state, il*M*ggml_element_size(model.memory_cross_k)*n_state), + n_state/n_head, n_head, M); + + struct ggml_tensor * Vcross = + ggml_reshape_3d(ctxL, + ggml_view_1d(ctxL, model.memory_cross_v, M*n_state, il*M*ggml_element_size(model.memory_cross_v)*n_state), + n_state/n_head, n_head, M); + + // ------ + + struct ggml_tensor * Q = + ggml_permute(ctxL, + ggml_cpy(ctxL, + Qcur, + ggml_new_tensor_3d(ctxL, GGML_TYPE_F32, n_state/n_head, n_head, N)), + 0, 2, 1, 3); + + struct ggml_tensor * K = ggml_permute(ctxL, Kcross, 0, 2, 1, 3); + + // K * Q + struct ggml_tensor * KQ = ggml_mul_mat(ctxL, K, Q); + + //struct ggml_tensor * KQ_scaled = + // ggml_scale(ctxL, + // KQ, + // ggml_new_f32(ctxL, 1.0f/sqrt(float(n_state)/n_head)) + // ); + + // no masking for cross-attention + //struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctxL, KQ_scaled, n_past); + + struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctxL, KQ); + + struct ggml_tensor * V_trans = ggml_permute(ctxL, Vcross, 1, 2, 0, 3); + + struct ggml_tensor * KQV = ggml_mul_mat(ctxL, V_trans, KQ_soft_max); + + struct ggml_tensor * KQV_merged = ggml_permute(ctxL, KQV, 0, 2, 1, 3); + + // cur = KQV_merged.contiguous().view(n_state, N) + cur = ggml_cpy(ctxL, + KQV_merged, + ggml_new_tensor_2d(ctxL, GGML_TYPE_F32, n_state, N)); + } + + // projection + { + cur = ggml_mul_mat(ctxL, + layer.cross_attn_ln_1_w, + cur); + + cur = ggml_add(ctxL, + ggml_repeat(ctxL, layer.cross_attn_ln_1_b, cur), + cur); + } + + + // add the input + cur = ggml_add(ctxL, cur, inpCA); + + struct ggml_tensor * inpFF = cur; + + // feed-forward network + { + // norm + { + cur = ggml_norm(ctxL, inpFF); + + // cur = ln_2_g*cur + ln_2_b + // [ 768, N] + cur = ggml_add(ctxL, + ggml_mul(ctxL, + ggml_repeat(ctxL, layer.mlp_ln_w, cur), + cur), + ggml_repeat(ctxL, layer.mlp_ln_b, cur)); + } + + // fully connected + cur = ggml_mul_mat(ctxL, + layer.mlp_0_w, + cur); + + cur = ggml_add(ctxL, + ggml_repeat(ctxL, layer.mlp_0_b, cur), + cur); + + // GELU activation + cur = ggml_gelu(ctxL, cur); + + // projection + cur = ggml_mul_mat(ctxL, + layer.mlp_1_w, + cur); + + cur = ggml_add(ctxL, + ggml_repeat(ctxL, layer.mlp_1_b, cur), + cur); + } + + // output from this layer + struct ggml_tensor * inpO = ggml_add(ctxL, cur, inpFF); + + { + ggml_build_forward_expand(&gf, inpO); + ggml_graph_compute (ctxL, &gf); + + //ggml_graph_print(&gf); + } + + // TODO: this is a hack to have per-layer computation graphs - need to come up with something better + // input for next layer (inpO -> inpL) + memcpy(inpL->data, inpO->data, ggml_nbytes(inpL)); + inpL->op = GGML_OP_NONE; + inpL->src0 = NULL; + inpL->src1 = NULL; + + if (N > 1) { + //printf("%s: - used_mem(%d) = %f MB\n", __func__, il, ggml_used_mem(ctxL)/1024.0/1024.0); + } + + ggml_free(ctxL); + } + + cur = inpL; + + // norm + { + cur = ggml_norm(ctx0, cur); + + cur = ggml_add(ctx0, + ggml_mul(ctx0, + ggml_repeat(ctx0, model.d_ln_w, cur), + cur), + ggml_repeat(ctx0, model.d_ln_b, cur)); + } + + struct ggml_tensor * logits = ggml_mul_mat(ctx0, model.d_te, cur); + + // logits -> probs + cur = ggml_dup(ctx0, logits); + cur = ggml_soft_max(ctx0, cur); // in-place + + // run the computation + { + struct ggml_cgraph gf = { .n_threads = n_threads }; + + ggml_build_forward_expand(&gf, cur); + ggml_graph_compute (ctx0, &gf); + } + + logits_out.resize(N*n_vocab); + memcpy(logits_out.data(), ggml_get_data(logits), sizeof(float)*N*n_vocab); + + probs_out.resize(N*n_vocab); + memcpy(probs_out.data(), ggml_get_data(cur), sizeof(float)*N*n_vocab); + + //if (N > 1) { + // const float mem_per_token = ggml_used_mem(ctx0)/1024.0/1024.0/N; + // printf("%s: used_mem = %f MB / %f per token\n", __func__, ggml_used_mem(ctx0)/1024.0/1024.0, mem_per_token); + // printf("%s: max mem = %f MB\n", __func__, mem_per_token*model.hparams.n_text_ctx); + //} + + ggml_free(ctx0); + + return true; +} + +// the most basic sampling scheme - select the top token +// TODO: beam search +// TODO: temperature +whisper_vocab::id whisper_sample_best( + const whisper_vocab & vocab, + const float * probs, + double temp, + int offset = 0) { + int n_logits = vocab.id_to_token.size(); + + std::vector> probs_id; + probs_id.reserve(n_logits); + + for (int i = offset; i < n_logits; i++) { + probs_id.push_back(std::make_pair(probs[i], i)); + } + + const int top_k = 10; + + // find the top K tokens + std::partial_sort( + probs_id.begin(), + probs_id.begin() + top_k, probs_id.end(), + [](const std::pair & a, const std::pair & b) { + return a.first > b.first; + }); + + probs_id.resize(top_k); + + //printf("\n"); + //for (int i = 0; i < (int) probs_id.size(); i++) { + // printf("%d: '%s' %f, %d\n", i, vocab.id_to_token.at(probs_id[i].second).c_str(), probs_id[i].first, probs_id[i].second); + //} + + int res = 0; + while (probs_id[res].second == vocab.token_solm && res < (int) probs_id.size() - 1) { + res++; + } + + return probs_id[res].second; +} + +// Cooley-Tukey FFT +// poor man's implmentation - use something better +// input is real-valued +// output is complex-valued +void fft(const std::vector & in, std::vector & out) { + out.resize(in.size()*2); + + int N = in.size(); + + if (N == 1) { + out[0] = in[0]; + out[1] = 0; + return; + } + + std::vector even; + std::vector odd; + + for (int i = 0; i < N; i++) { + if (i % 2 == 0) { + even.push_back(in[i]); + } else { + odd.push_back(in[i]); + } + } + + std::vector even_fft; + std::vector odd_fft; + + fft(even, even_fft); + fft(odd, odd_fft); + + for (int k = 0; k < N/2; k++) { + float theta = 2*M_PI*k/N; + + float re = cos(theta); + float im = -sin(theta); + + float re_odd = odd_fft[2*k + 0]; + float im_odd = odd_fft[2*k + 1]; + + out[2*k + 0] = even_fft[2*k + 0] + re*re_odd - im*im_odd; + out[2*k + 1] = even_fft[2*k + 1] + re*im_odd + im*re_odd; + + out[2*(k + N/2) + 0] = even_fft[2*k + 0] - re*re_odd + im*im_odd; + out[2*(k + N/2) + 1] = even_fft[2*k + 1] - re*im_odd - im*re_odd; + } +} + +// ref: https://github.com/openai/whisper/blob/main/whisper/audio.py#L92-L124 +bool log_mel_spectrogram( + const std::vector sf32, + const int sample_rate, + const int fft_size, + const int fft_step, + const int n_mel, + const int n_threads, + const whisper_filters & filters, + whisper_mel & mel) { + const int n_sample = sf32.size(); + const float * samples = sf32.data(); + + // Hanning window + std::vector hann; + hann.resize(fft_size); + for (int i = 0; i < fft_size; i++) { + hann[i] = 0.5*(1.0 - cos((2.0*M_PI*i)/(fft_size))); + } + + mel.n_mel = n_mel; + mel.n_len = (n_sample)/fft_step; + mel.data.resize(mel.n_mel*mel.n_len); + + const int n_fft = 1 + fft_size/2; + + printf("%s: n_sample = %d, n_len = %d\n", __func__, n_sample, mel.n_len); + printf("%s: recording length: %f s\n", __func__, (float) n_sample/sample_rate); + + std::vector workers(n_threads); + for (int iw = 0; iw < n_threads; ++iw) { + workers[iw] = std::thread([&](int ith) { + std::vector fft_in; + fft_in.resize(fft_size); + for (int i = 0; i < fft_size; i++) { + fft_in[i] = 0.0; + } + + std::vector fft_out; + fft_out.resize(2*fft_size); + + for (int i = ith; i < mel.n_len; i += n_threads) { + const int offset = i*fft_step; + + // apply Hanning window + for (int j = 0; j < fft_size; j++) { + if (offset + j < n_sample) { + fft_in[j] = hann[j]*samples[offset + j]; + } else { + fft_in[j] = 0.0; + } + } + + // FFT -> mag^2 + fft(fft_in, fft_out); + + for (int j = 0; j < n_fft; j++) { + fft_out[j] = (fft_out[2*j + 0]*fft_out[2*j + 0] + fft_out[2*j + 1]*fft_out[2*j + 1]); + } + + // mel spectrogram + for (int j = 0; j < mel.n_mel; j++) { + double sum = 0.0; + + for (int k = 0; k < n_fft; k++) { + sum += fft_out[k]*filters.data[j*n_fft + k]; + } + if (sum < 1e-10) { + sum = 1e-10; + } + + sum = log10(sum); + + mel.data[j*mel.n_len + i] = sum; + } + } + }, iw); + } + + for (int iw = 0; iw < n_threads; ++iw) { + workers[iw].join(); + } + + // clamping and normalization + double mmax = -1e20; + for (int i = 0; i < mel.n_mel*mel.n_len; i++) { + if (mel.data[i] > mmax) { + mmax = mel.data[i]; + } + } + + mmax -= 8.0; + + for (int i = 0; i < mel.n_mel*mel.n_len; i++) { + if (mel.data[i] < mmax) { + mel.data[i] = mmax; + } + + mel.data[i] = (mel.data[i] + 4.0)/4.0; + } + + return true; +} + +int main(int argc, char ** argv) { + const int64_t t_main_start_us = ggml_time_us(); + + whisper_params params; + params.model = "models/whisper-tiny.en/ggml-model.bin"; + + if (whisper_params_parse(argc, argv, params) == false) { + return 1; + } + + if (params.seed < 0) { + params.seed = time(NULL); + } + + // Model loading + + //printf("%s: seed = %d\n", __func__, params.seed); + + int64_t t_load_us = 0; + int64_t t_mel_us = 0; + int64_t t_sample_us = 0; + int64_t t_encode_us = 0; + int64_t t_decode_us = 0; + + whisper_vocab vocab; + whisper_model model; + + // load the model + { + const int64_t t_start_us = ggml_time_us(); + + if (!whisper_model_load(params.model, model, vocab)) { + fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str()); + return 1; + } + + t_load_us = ggml_time_us() - t_start_us; + } + + // WAV input + std::vector pcmf32; + { + drwav wav; + if (!drwav_init_file(&wav, params.fname_inp.c_str(), NULL)) { + fprintf(stderr, "%s: failed to open WAV file '%s' - check your input\n", argv[0], params.fname_inp.c_str()); + return 2; + } + + if (wav.channels != 1) { + fprintf(stderr, "%s: WAV file '%s' must be mono\n", argv[0], params.fname_inp.c_str()); + return 3; + } + + if (wav.sampleRate != SAMPLE_RATE) { + fprintf(stderr, "%s: WAV file '%s' must be 16 kHz\n", argv[0], params.fname_inp.c_str()); + return 4; + } + + if (wav.bitsPerSample != 16) { + fprintf(stderr, "%s: WAV file '%s' must be 16-bit\n", argv[0], params.fname_inp.c_str()); + return 5; + } + + std::vector pcm16; + pcm16.resize(wav.totalPCMFrameCount); + drwav_read_pcm_frames_s16(&wav, wav.totalPCMFrameCount, pcm16.data()); + drwav_uninit(&wav); + + // convert to float + pcmf32.resize(pcm16.size()); + for (size_t i = 0; i < pcm16.size(); i++) { + pcmf32[i] = float(pcm16[i])/32768.0f; + } + } + + // compute log mel spectrogram + whisper_mel mel_inp; + { + const int64_t t_start_us = ggml_time_us(); + + log_mel_spectrogram(pcmf32, SAMPLE_RATE, N_FFT, HOP_LENGTH, N_MEL, params.n_threads, model.filters, mel_inp); + + t_mel_us = ggml_time_us() - t_start_us; + } + + std::vector prompt_past = { }; + + // main loop + int seek = 0; + while (true) { + if (seek >= mel_inp.n_len) { + break; + } + + // encode audio features starting at offset seek + std::vector features; + { + const int64_t t_start_us = ggml_time_us(); + + if (!whisper_encode(model, params.n_threads, seek, mel_inp, features)) { + fprintf(stderr, "%s: failed to eval\n", __func__); + return 1; + } + + t_encode_us = ggml_time_us() - t_start_us; + } + + std::vector probs; + std::vector logits; + + // SOT + // ref: https://github.com/openai/whisper/blob/15ab54826343c27cfaf44ce31e9c8fb63d0aa775/whisper/decoding.py#L506-L526 + // TODO: use different initial tokens for different tasks + std::vector prompt = { vocab.token_sot }; + + int n_past = 0; + + if (prompt_past.size() > 0) { + int n_take = std::min(model.hparams.n_text_ctx/2, int(prompt_past.size())); + + prompt = { vocab.token_prev }; + prompt.insert(prompt.end(), prompt_past.end() - n_take, prompt_past.end()); + prompt.push_back(vocab.token_sot); + + prompt_past.clear(); + prompt_past.insert(prompt_past.end(), prompt.begin() + 1, prompt.end() - 1); + } + + bool done = false; + int seek_delta = 100*CHUNK_SIZE; + whisper_vocab::id last_id = 0; + + //for (int i = 0; i < prompt.size(); i++) { + // printf("%s: prompt[%d] = %s\n", __func__, i, vocab.id_to_token[prompt[i]].c_str()); + //} + + printf("\n"); + for (int i = 0; i < model.hparams.n_text_ctx/2; ++i) { + // decode + if (prompt.size() > 0) { + const int64_t t_start_us = ggml_time_us(); + + if (!whisper_decode(model, params.n_threads, n_past, prompt, logits, probs)) { + fprintf(stderr, "%s: failed to eval\n", __func__); + return 1; + } + + t_decode_us += ggml_time_us() - t_start_us; + } + + n_past += prompt.size(); + prompt.clear(); + + { + // sample next token + const float temp = 1.0; // TODO + + const int n_vocab = model.hparams.n_vocab; + + whisper_vocab::id id = 0; + + { + const int64_t t_start_sample_us = ggml_time_us(); + + id = whisper_sample_best(vocab, probs.data() + (probs.size() - n_vocab), temp, i > params.max_tokens_per_iter ? vocab.token_beg : 0); + + t_sample_us += ggml_time_us() - t_start_sample_us; + } + + // end of text token + if (id == vocab.token_eot) { + break; + } + + // 2 consecutive time tokens + if (id > vocab.token_beg && last_id > vocab.token_beg) { + seek_delta = 2*(id - vocab.token_beg); + done = true; + } + last_id = id; + + // add it to the context + prompt.push_back(id); + prompt_past.push_back(id); + } + + // display text + for (auto id : prompt) { + if (params.print_special_tokens == false && id >= vocab.token_eot) { + continue; + } + printf("%s", vocab.id_to_token[id].c_str()); + } + fflush(stdout); + + if (done) { + break; + } + } + + seek += seek_delta; + } + + // report timing + { + const int64_t t_main_end_us = ggml_time_us(); + + printf("\n\n"); + printf("%s: load time = %8.2f ms\n", __func__, t_load_us/1000.0f); + printf("%s: mel time = %8.2f ms\n", __func__, t_mel_us/1000.0f); + printf("%s: sample time = %8.2f ms\n", __func__, t_sample_us/1000.0f); + printf("%s: encode time = %8.2f ms / %.2f ms per layer\n", __func__, t_encode_us/1000.0f, t_encode_us/1000.0f/model.hparams.n_audio_layer); + printf("%s: decode time = %8.2f ms\n", __func__, t_decode_us/1000.0f); + printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f); + } + + ggml_free(model.ctx); + + return 0; +} diff --git a/models/.gitignore b/models/.gitignore new file mode 100644 index 0000000..a8a0dce --- /dev/null +++ b/models/.gitignore @@ -0,0 +1 @@ +*.bin diff --git a/samples/.gitignore b/samples/.gitignore new file mode 100644 index 0000000..72e8ffc --- /dev/null +++ b/samples/.gitignore @@ -0,0 +1 @@ +* diff --git a/samples/jfk.wav b/samples/jfk.wav new file mode 100644 index 0000000000000000000000000000000000000000..3184d372cd2f8b804d3a540c70ec50d927b335d2 GIT binary patch literal 352078 zcmeEuRd^Ij7j0GdXxxR61W5=9K?4LhSRlB&ySuylK@aZkaBz2bmp}+fh`Y~Zyu0eI z!2Pd0+}Hc`GheJ{y1HuD-fK&(P4{-KTelvfBz>ZKw;DZZW(5~Q2*qn@Q~a`s5P`Um zj_rE%EQdeab#C3IU60m1o$!0dVY5fqYgnUBWR2RjYLSt{W)345<-h;_D}nz?;J*_1 zuLS;oN`UkK_tQB3{qJ|_?_bNV{JHw)xBq;i4EO)M{&Pp!=l{=ZWuyJ)(|>;d&tHE= z{?C2?_s{=xg#Y^;#u?pVhHvK^`E_2v zhml=m4(URgkhbJ6%shjuNEOnIj3gV#VzQTOftojUo(BgkMf8uREwI+F6F7}wA9OME_` zjuCUQ+9Vze`Ay}6`Cq&ecf$9{cy;1Bc8G0Y2Qa=9Bw5TZu~}>_JIQpc5wFQrT;SFD zGU&r3!pS6hf}W=@=~cRf?xas?0ZpNcs6G8co|3O5pH!tSs1MebPTr9$5=Y*X(_|E+ zB=Jnh*us6Fk#8}dBm5*kgqd~VgRr|pc?6H-Z^MT{&NCpjJ^VTM#vFUXPR^4HpG90 z`?0c<;_xTcCbulhn-dNOy+>~eud1g@|CcpL0Een|A9L-u-BHP7G_)p zE6?B$@Tn~5#(k`@H*W{KyM%pl;tqT~e*<}4gSC4BE!;_E5)8~Kz@Ft|_m5#-cVUO0prr-8 z0vUkME68U&FAX;O5L*2Ybm0W7;Sv6N$)7^H1^hGa$l(Ey!+yx*1&Jpm#EXXg*1tWZ z7jP(p>w$vlkopyVhTrCUu>)tYMhmZsovnu5&c!$Zz@#2z8c?VQc6%UZ_yp_T1HGEU z8(^)4OoGMDVRg#@QVwLzS$j4GGOiALoC>+_&wlvya>#2QcEA(+)gCguh}qeb8RR1=N0-5xcF}1-g{Ij5zv)r>j2dZq zAzC;qNCFkPP%OLl=QuF|hOV><1j37Hd7khO=nai1lKduo5e449%JctC@f? zB7qQz*yBm0F4j90*Pmeaw}B*C&@E@uAJS`q*^GwGm4`mmBAa3Np;%8p;KOO4#dhfO zR(c()Y(aG-gGfXJJ^4%$Nic1WYrSX}tm-)#PxO2V(0mH-%w1q->w&bC+~sX~G#?Gr zD`uD35$uWry6Md;@Ibs2yc8NA0NEbK>Jx$OzhK2qq#I_j8`c{GjD1S3!HSk+rqP(^ za`GJSw8L!w!PPzZ-T?ls67--kiRW9eYXhG+xnEzSDv5g%h#!|?!Z@Ly@Rv|Qa1%0UEVO10 ztw7IXU&7(p(y+6U*xNhUpBC_kec*xr1?mmp2l#i`>R~>YH-ty=gzS1k8!td3X28dE zgD+?YZFr7Z_ya}L;kSG9id=+GNQHi6Lmy7F>+Car-@|${S7w$gv%ai58_ABc$LtEb z!_2HX?BEuj@(knN#`EIv{Sto-f4mY_wG93-1-h364fKE(^#z`{MPcCuDNo1HQ*=Ca zqLj*{AN@qFG=@HaS2+j?bbv+Fp%VE~ao`72Q2IKHm4+xWTxNj+jO*hV&hErU#RSOWB+goWV#FOW_S@F0*x0Wb4l7e%Bt zEW#d$ng#8B1bjV2&X59JZB0kg`9O<7G@1^E-PE9dh$dX9I}OFB4e+_(+5xg#9kOt75L6F5-NO}eD0c+U@q}~dwi-c!vj+X~d!3tyGg+h1} zpkNn7QnjG_=lKU{%6DK#b#jTgVD2f{;ddmJsId|s?D$mdN^RPaE(S6*f?w>0_4cRL zXaRYE^+nR5vjCm_duvgNl=s|Wur@%pgt9`HD2p8o)B^9A((B=+;DM9x)za- z5&AI~KE{a^%W3j6d5gS2ULjwSHL$Ro%#GKB2fmJZ_QNiXhqw9xe^Hm#gG`3d&4|3J z3wFYLBn@qZ1;Qa=xzJ5;75v~i8X?w9L4@`Q_M{PFATe)5WF?T^Vfa}UP~r(Bumy+^ zf|(D+OnO6qL%4~lfV@U%?^Sq$X)FTs`XyIDY`Btjh4n9D87vsqwE;NW3Yb|H>uCke z35Ew4K*qoyErxYhg-7DN0cj3obw^CU8E6#73!w2itOEDt*}#%A&9BR=Af6fsi9Ljb z?!njZgI`^V-J-zPvC!dju>4d~7g*SuRtDM;`j9-t&Zod4&OmQx;k9|O>zQhq3j~QMd0r;K@AG;HNpaNvnA2Mr&xI7!)N6$2nnTp$UB@aOCkPWO~ z0_&g?uNK4!SyVY7XEm}2*1Zh# zz6!1G$z2gQxMD`%;fq%z+Drwyt;1^fvDe7JD#P~-;ybXqvMBf%tR)Un*n9ZUw#aZg zVdQYcUuWQ1_w(Jrf{wf~*0h12MqHjCMafeY*4 zEgH~rKn;6D$AyFuk!AuN`@ueIBZ3@-oz-K+P)>k9x1nie(Qyp?SR^8{vp}fMkZezQ z_891>7MSP>eGP{OH^N@5LB#M1sJVg+!e6I}nfOo@p5;T!Q!ApibJ(GW=^jk z2av@Kpx`3lXgqX$7iK;THrE4k-w55A51eTUzo}zJro_*V(33mAWArER2CpESEwItk z(D1GJo&@wv;i1S-^gzJ($Zs2ym6+!p_>3K}%mF~;vPwdK_^EEcEod592)${FD+#12 zq)?3-kvHvwU5CPo1>nO2V9OYMt;XCAVJ?2aqUP{ib1~DhmDT3{h$j6p4_D~ZdSFX^ zGIX~BokY7L@;QiHgkjGm#Bi;!m&4$_hX2msJ;+VW%$~b* zH(rLxdk|41uvUnAM&ns`QH!_%pOyN1WmQolI0A%T0qv>|jmhB?5n+8HNd#pFL{|pb zN-KB{H)=;C=qTt$Cm~h1C3F%5;R(G%_0WtfzyvqcCh9_87SOr4Zz?p?6YKiFi2?7ONrEkAd@2ed!g!&FP)YC+d=V{6h@l(8hO>b9jbQ2Bfq0XF zY$e#G@36M-@b*ST^>Ns#D)3pAV2f9Q*&mT1Jz+NF(=y8g(z#=I$6%dhE3X5zX$B9K zf~ch(G8|uehh!56*q#kIF&Ekz2EFsbuE$~g3y2eHVGT`T3G)zfZ-BltJgWz0k&AV& zMtt%TR`V5-I0Wg<#{MpWH~fUkfdh0S3qJ84z6Swwd=TNJLr0@IW4oCTJ0Q1`?d7I& zUwINbm>`-T9R@<@f-PsR$hdf6fFOQU)%LC=r@_ji*PLWltHS53{uyGivJTz(q zGR1Nj{WbFj)-1r7F2HjWsYZJsmuQE0`2_t&Z8Svi7CzC(sIkd z9=VB%%wo)7JgjssYI95AxBN*y;`6eK!5ZxDXGGSWq51EDeYauX;nJ>E_};y*pmUCF$a5EmN|@tXD7hfj>rjDVZ6^kj;PA2wtihkohQVp&GQw1$XPACo#~MpLj1FcWi)dz1hxRLov;#5 z_}3B8<-*_ox)-a+?3hV*WmVW9Xpagy=JmUBco$e{K<051)u}qL@p`b8htQR}$idnp zliyDh=~&^eunp0rg+8H25Jl{#Z=p#W=w#$1P2rOT=z)op`ROo3=$(*hm8UPU?n>k~ z_BrTxRmC0^kTQ!>K@Ua%|34$HdILKwJ7d-Z_ZPtPG(rrq8hCITm7jQ^q89d8jEZLn zYAaRQ{3ahKePRaM*d;{viGbeG^>!8$DXzVj&Da+T@DCv z8(ty|o+BE26OGvZ7@~{H(1Spl2TgE9jOmP3Bm@0xqE?lT>ToDLRyZtJi(Pw#9UlTT zZi)EeBW(2!=6(cmnFNj20Q0hVAa;BrH0S}jg7~F8-A&!#XBP|Yg|Bo49SCa?=yk*q z7Z97?MRa=!GaG}PxDw)*s))IAfuINQly|WFx~Qgf0PZeFRo5MLt+znC6NtV$AZuR@ z30#2o%*QI9Lr?U`-%Xg)CE(#GAZ|M18XffH96yTB?FELm1_C+r1XOwFK~wbbBZ-Ux zrN_gA?S&p(#ZJ6~OwZ!W#G^4cC*XpLDzTrz^e$qeX2`uOAuhUzeDMS%bdl(YJG@C2 zVm2*SHXBu*Nx+dSzoU$Ds8l4u#?H|7sPWXMrSRVkA^YaQq4uzrfAGXI%zcj9K@WK1 z2*|OlK6@Vpw`PdM%jyLmfs1pn+P{E^G042)5EEn|QfvVGSOB^8gx~D|JlqGZ5&27? z%6s+}k=Ps5EcT)TGX*)!Hq>Cwu(RwF^MQtxqIwhoOS^-$9tRrVhUPh7gb-kLQ)Chg zp%FK*Gushi)WLigAPPJN>*etJMq~nN1Z3R_A;RNfcfdOlENDPo~*(6)V;{T_V2HDbITz^@uWQ3;s(7+TT} z8kG%i+ypz{5nB5T*f$!x=#L#(hDhu>iNFqL10Bu)3I4|J52l;wF+`f1P-WRb7vkDt zXhlQ%4VCm*qQJR_1AK8M)LtSehaQcAlun{<-w(1YizCya%Vp;?*YG_Q{v-@nrvNPk z=txDxZ2pL^`eNRu_L)8#hB?2$ju%&D&>YwuwTQ@-UuNhW98k*S>*uM+5TvliOmsrVQ#M5Cy2fQp; z-!k~g@u*(4hUd&hWoa2=q|w0koHLJqha8g?Jm_zghAMu`2Qp^t&kxs8Z-kK$CPF7k|v(EoDqL|yQgid;n8@5_H7 z7OIcA4np>R6h6KkbZrgnv@Le0I%f2Me1c_WK^K1k+smrPyO32!0Qn~3DFdNh-yzvX zkWm@3j)gWXhwtiu`i&D}-Lvo{4N&!N%_=iLRuz6DSN3N;*(lUEW1tzutQ>S@6%g7U z-l95A2P=+2$HG{S=B-QUk#sViS;%Eh91PY?I5pii2Pll881-trtFjaPL7jb%g^Nu z{EU$eau{;jQ?LL#!9!p*lkxnu znBidLCE@UYt)PLQu=;4!6H1{qGf=?@fmOAGRIj4mw-h?Q4H`cle$Na)Q+D3+9JYT5 zzo!BtD?pwd;bCrLZjXT$wfHwku2|k9ca}qBtz1JsBzq&uo5O}dXL_>fY&)WKC;0eV zybIPENmrw~DKZ}rCorhNEAhY(vXU4Z<+FiVGNQl%h?wfqvGDRuP_cZ7JbOQI?i=>ljJPNU)s=a$ z>RRv^k;rW}!sk3d?zjW-IJ=?i<#F%+@V7iHyWBy7K2YlzEO!E177yQng{cARC)G8CJo_<=c97701~t3*HDc( zgIYut)CfXRH@uBAVcxM^SB>O?vPq@=DuCX%jyv zJQiyS?RklmZ#yO>vLkevXeSEPo9}1m_&1zow83fU65`CC$}QOu(pW4Oy@ap)H7Yl| z;f-8KE%?7-yqbJm8qMlZ7qO+dN?3(BYcBL?C7&n%Ycty_G7nf}Ev}NcSp6)_`VX0; z{%&vQ&_h#R_-LzQzF^r$`fD#c&3C9M?l6rhdQ`AcSIu@+Jfdl(2~hOsjimxx9UiN! z<(TCBx4pYaq;KXz^IO~Bq`l&$aNg!%2sCuERiY~43J+z0K>7pn4^}J$sc)(0DkA7* zauF3bCsK-xqM^LR_R?0LHx*+{cUYyxoVziIbeNc z`ymZw+jtmqidn)Y#U$k+g&yZtgN4IlW3eVTn?nt^jeBfG(2VicqsB#sM&@7g1>v%o zirrnIN^tn%taYraR*P%IDeC%mVVXJgt3_=v>4zICnj6`g%Z=n`ws2clsVBQinkg!1 zPdH@TpHYufbW$d0?%6%i)K?tG`X^gvnQX>d=AD*w%Q?$t>uTwcd{%zRkEo)Z4Q>jj zU5fj1s#J@-7hfnEP-(B^H1@)uDxT*E2vQN z%KDQ3E&ViF^%r#yi^PIwMb#{o702x2?8d2s6eq+vs%*QhPQRQhIUUvZ74zivwjs70 z%W*^ZlG5VP(jP{%v8}0%Es(wtM+%SVOJ%5ifBQMA8Nvf$n0S@D+G)h6BV&Gej}eY{-wxYM6B@i{|bvNsudQ%GHUoAD1 zXB{G(?3_v*25V{vi8R3Ou-jPIF6!}ejA^>AO74O5J=udxefUp0TpFSqlT)1i-FVn} zaFuD*YgQso8M2q-J*Kybl7=~_(r!l_jUp`SgzS_r*eK{|C~)P_RQXwaVKkFag2Oc=_vQey7rF>H-zRRvMV)z0ChQY&%)Pv6LYh(?&Xd%tx;t*RC|zl%ZQ*9A^Bc*zYR1?Y3*JUE_Mgr_eLV>AN;V-BOrjbC#1-|7u%X`X?@Z zGv$3+>MpXleB-eA0LuHNv+RlXhU!1-1q3BHDujn}JJ zThE?)l%{Y`ev(dA3Pjf#!D=a5$^PhK2Oy5At!DjJtjwB}XpKFZ5SVYz zjGA1#RhlWvrQ$I42G8c9OCwn56SsKjOVOGfA?J9>dcIT~SvoXnW5SE#5h_>pC&P)X zn^{u9rXp5QuO!hDB@EYiJ0I}$^R(F!oYwm}cdq0a-nsH5_50MISH&+wQ@s@~-X~lS z+WMq_j7v}LBK7s&PSfz)bf3>aE>r}4exy|X2Yc;Pdw;Gzhtde!LF`-~gdU4w8f^EjurTud~(l2H# z$!VQGK`#rv+^cx^aj>v@wr=#c-6N0q!0lCv{nLeGsU1Jx{4t}XzujkdrfQKpm^N^Hr}rAK}4_vvoRnbKF5Nd2b5m%4ac zIn_b;_m$^{dsgY>8}GHSLj9mzkLoPkM8sr=6w#q1F1uo3e(|&{eSF)5)9JIa3R3r{ z&n~^9nrm+p#z;M>m*aV#RYB)#Wz;zkFwtBR`|Zn^>~e}jF87rBZ12~nZk2mRvaI$= z{^7p&9G+^XyDaf*T`4>`yz1?+4?#Pf2Nvy&Iq|_O;Zfe5g6FC3Kg!3BOCG43>S(OS zn(S@xELdCqVWp_hUx9U9^v=Tr2LzpQIG8;oc6)No!sGcHQ?4eAPI69K`*YE+@5N)3 zOC8KA53zxEwddE0FT!uvITkv>p-=XcZ$2pl*k%tm_fo4V?$~4Jhi77T8aKMS_-=H5 zPbXV%NW0b7eFxNV{cBBx>^Y%$U2M7UscB=2{d8aQJkyUQe2hJmm|&Rb8(GJnGJ`9rikg8L$WK|;>m;J)Wl=41HLtTd-7?w4W_-HQ^fuBIu{I;Ak;oBNCO*u`S45Ot`@wF9{+7Pv@W^SpZo zR;poFhXr<4RW&vJtI)6hW zuiBAqTe>%DQ*n^IDQ^Fp>Ob!o>j+y**2Q|goAUK}^5DG3ri=DlD%J}B6g1Cmq_~Ex zR~@yVuDvIiZISvK=>=c5e5jSImd`oQa5|}YLoSNtRI4>dJSK<5G)h6<&6nE!9dG99 zlO$bB$70uMlAL0U?V5Ne-6!X6$!x_u-xigEecZ)s`Xi-(2}_;2JD*aSZ2=|GY2Cli z|2{fL5N0^ERJM>FvoEU6>iyy^yGs>y^=!3seR_!dg(#3pv3mf)Duf#238v|v-c!`%eSrnJELr9Yj2 zEzEVHoqhifc^%~D*3tYeE2W^8vYWTyepOD%z7{|BOSz9vt^+P^;AJO98GpDyq}kAV;^JMUwo~&e2(vrn77s5TVjsKCuRDRE`eF$7yONM zv0=5~6mX@%>b5mod4xK#!-?yDF3zXc3x<2yb5d`mduO?44$69`d#=3|4R1>0h((*Iyh#3X@CaU3_iXq*U+!|s zvxfIxZGR&xTw1!ZbXe-gm)~z+ezY^DxFFE}cyPVyoqem>1{V7&&xgKh?bA)wdS`Gs z%d8(eV&4|dB^|A`vpyv3NUEP!Ep=(8i*=9JkZL8>`&X#$U?*)fa@#HC1p6=Qe=U2{ z4DUZbZ}s(Z!D(d8`G&D2HI2cx8uB~k6u;Qo%6iKxc{#sw+Fm}lVu^bScQZ{e$wfIo z#g`>dM}HI&$L94BvdV{6QM;w%q5 zzuA6hGolP!Im9!)qOyvQUzoyIcUgZ?eC@o*X^g6c+^5j{$JEztV{Yo7IxKSNA~i30 zTw*RAZEm6r3-}Q+E@D-H(Qb`hB_C^Gk#~KKqrBTF=KIFGzAyJSAm(gJb=^(XSMTB8 z4TN7gDH&5_Mdh5R1MP*#mQG7DBjZZ)nC7cTAMH(}BL92#$&3rB#aTs`7`MPGQq{da zdu5;O6X}Ob{t<3CxH(=BUlzCinf5;RTfKr{@r(T+?Lu*cv{3)G=(DuMw@aO0b-ewj zX|mP%9uB^P>^oXh44cdyixoc?ysZ3c@%Pa5<;78|jg^npu~oWG>lF+%?)7+FuV;sX zb_c_n83(`S4|l{*qN_b-IQ}p<&&)|Uo_ITJwDql9uh6184?W<41MXisx!{2)XX@k9xk+Qx!*snC%Sx zj0<(G(yd>&zrXb5YkYdf0aF`~IyJR*2l(HV=HzbDFLA%yu((U3=pgUxq@@poUN~mj z(P7F6>!f0H!GpY)>9>*x7HFJ$M>Ip3|Aynk^haNGzrNah+y{Dg74~Pe{^0m(#P^E2 zoesNwqC6KlG*!JOZ!Gn!^_;5L3T&P9S9`x+bc*eyw8@xWm|wEOvO&K!q1B6Rk1D)A z5i1l;^pF~wTKpAs*vON56p!_Fiu&AreXEN8rFoOzc6j?L?YdM~d4OLtT`Gyn?UK4J z^^^Xon^<#2{pvx9ik0~lv-XvElX2Q-_D@vtrr9Y`pLTp{lx?9gE`!|;IZPJDmQKxU zVC?JFw*Js2o2&VF-*CC;TG>8LzG8}&Hb|TEhkdtuFMZh@cj?ER_-@5J+y;kl55MAl znQzs%x4w1%RNuX`cl1_om(1V~}#p^!pdA~R5b7?Ax7J}t4eOUVWuMOYMiXACE3Y*qyb;Efs8?&On*Gv|zOTDtg za;v>}`;;TT>;7SPZhOsW&%ti>G}~;;3yRZ(oK5_s--~en>Pstyc$U{3m985lYplxN zv6yOd_kZj2;nAlnu}d?<>8J|l>U0hBbl8|5oAS(%9@w^B*FF(VZ`lt|OnvS61qD{) zZknQ=qHbzyo>>?(B_<==(XMjHf?DAf^>kEPNX*?Im2$J$5tsVDVXjThx`b+9Rwb3# zK6}*)3aj|m<$>i;)|M=P>2L2rRa2|}42<${QUw@&3$jY9YqLGqIo#LHj$8fK{%w!A z*9Kpo!L>c>4hrZd&&!;jTc8cBJGg6Fr;{}lic`spKA!%Zo)T$t(TsFDqpo9Uo6zi) z`|HbDoqW8S9Bt}e;cxSX#E{rCsaGw#-PQycT%B~qF^TU|V}o)ISYBx3y|R6JJ6@AA zN|U5Or+fbSRc41-sxjW(v`*aR&(@z&d9`heH72Kcl|#|TuWMfA$0k|3`)>-LSov?e zkwwcAz2e=nC$JvwxqgS;o=ZKmlH>O!Bxb#_+MK48%k-=zo-FK}z0Vlu{3PgL^{wH9 zg6?`0lP$WMx^Ysz_?55Lolk8Y^Xl!k&kJ*U+eKD-T{+ivoLt{DO;JB!Rs-kOmL?M_ zFI7x0Xqp`IGc93p-Yr_g?YZ*^YuBV+A3w$D3v0V>3;7k~?=+>fUdF)G>Dh}7Lsbs$ zKiyQyBwg3cyQ!zM2A6JC9&xj{mBOx?l$1B;sJuMm1KI@ttnxe{#c{9P&Q!_TQFgFN zr2`6Xru~R}82dKeL$qzVoUwx+_bExIq!-B%xm~I7l9W^Yyy%*_ws=&PO&dx1l2K9D=^s}o=M9ei z-Ol=U^wWCQv;QcJWkqr$*1)>3_(tZz^lte*&0aJ{Y$oLJjpUGOi)+X7jungilRck0 zW~zL6GeevFojI$E-dm?CG8NY?OLDT4>tvM}9`WX^hw)a)D}A8pqH&`Bbjc0fTf;b; zSTGJ^;rc)Lr0}|`|^#-J#i0oGTTWiu=CCR4ed-TY<@T)IVKxT z){>D$UL`;E*GlvCkMu7}GfP$a3dMN^PYXPXw;DFe?uuaLS<=T=!OA3plNUE$MBG$; z?YZL(r@k)s?z22byKCJBxD~kGa%`(^FC1nG<~AiE`HS*875eGZ&Gp$kRP68ZD?+x` z-nEuTmirO64=!2ucf@WIGrzR#k%oaG+7w!yRXn($X5pw3vwmdhaNX#lPX*13(si?p zEi7i61MexkP`Inwsah%v#0WI8`3cTsF_@jLgc+(z+DN<4c1da>%^MD<*|9oliK6Ji4)fhgD;>mo~k>s#9@ zTZE;uskyPaf#_QldlvO9KCQ2A8f`vq9HGBbvR)r<_G6N0Rr_gHC{E$T(n)Yp^cQ;& z2l=)&))Ho|Dmjo5inZ#`noioyc216GTmn3*dfL0EJH4}8rM|0(5niLFeHABOx463; zXsc_RDGijH$giaoX^-S?`)STI4mWNvow7vQ;;qdsf##Ox(Uy$v=sqL*9rPe9WixyOgFQL->94EPI)S4f%AX#H`wCpzDu)MNtGbs%dO0OH$ z)?!;T>8UN-8fZOjttkhRU~rx^f)_a}*Rq9JTA9mRR$6yhOU&DhA%@O|Oyfz5N=lNl zr33ODevpn7Un(3_3F_K*(GHg!nd4K(JjZ@cbDXX^x;tFawp1IHLzEjK$6PvxEc{&u zzslP1L0}I2keg#iDw;l*?$jIg(T1x=tEs$YjHSN$v{7lgXdTO4gdxHKFm4{xv-A<~ zD*0I3T0dHsSko=jEicUXOp{Er%mZzqWSQcKs-rqieO*&oJ6L2l>!qo#eWpf#XMROysmMAmoSu^l6zZgnaUe3mVP%> zGBq)EHx4wsHpnKuwU(R?uFw>zuPxvD#u{avW$A8NX|XpOOPA|TmRu;Qr%TaaH|i{P z_38mk3yG!zmPnXQmA2O^lRkG~2#@b#= zAEX*mPpK{V(DQ85&6078>8*K_<)mdUW*2WMwjJV1#c=fxO$Tk7wu=2rhwqMO9h*3; zvzx8mp_!%bs0vX{SG#Fbw0E?%v;#Dms;kO8Wt4iMrl01NYMCNc_&|^06!kn?Dj6+_ z<|xYrOG8V9d9bOGX_)Dlv6kUvX}qDF`MTw&rKV+wd5F2O<(lO-_U>(|Sn6chYwBn@ zZysqnW;$iv%P)%G6*U!06n#{OGz;uZb_VS}%@1{sdcDR+>#Ye>_7_gj{z9f;5Gsk= z#c0J~MH6dseu&Z>8 zK3VrpU)M-1!8R&emaaR}RLE9o1hkzmBR1WmCmeQ7N<}yTH4sgY(W3u;eS^?ClZekz*hIhM1pXIIQpBoqR1cJTahb4B=r2wc+lrrs z5Sq_A$T_wP)~RUhjkZ0rYOIee4Xh(<@1+9yIJ>|;;QZ;4E!OI3onT#r6Q`NBYSshh zRMTAZ56g8MVGcNtWH|N9=NfuSIHs7WIIZ;1xvQnYmuGXo1mHotOqD?%hc&w-hw0?w><*hW17J|Lj zMi?xHD^5V#jv@yqHj};6!J3<-SE>t!g_ihmb0C8L7+HqC4#wNy8afs^X1krQP8F za!58fqp!dX=)$+-Z0!)3DvNL)8_8>sPxOo6C@vOfDf%j{N{i|ePI$`fb-p4<5uvOP zTW%$up%ut&oL67r3vt$8kKdL%$i3No?2-&F;4itYJPYSzXQX=4W?QLsgRL{7zPa)d z>6_GDej#^Y<=8=a40{W^(&1FM1*;DaFc9pRDYBdN*mh8=A^XaY<@)esgXk5(L-AGl zRDDtXuTrCYrWBMr#pmL7g+WnAxkYh7uz;h|n|2qJqL=tDShw}z)pme6b&AL&oVKK| zaoT%@EtV6c>T+YYiAUgca|P>ylk}|^Um&g63hAV6i_JlrB_-GvSo>OrnHQQynWmc5 zruC)*^EOKx>rq=hDNcI929hz(2L!p5n?yX)kmZR*3h-WMQH( zjrIm3!;d_W7f4T})l!sHSF%_yS{_^ct&uiCHcESJ=WP>hUe*q_DD)QiGl6xHM@YA& zJ#s%51Xf}4$MgNgGJ-9iId5OZW*!S*-+ zR!j(3MLI5m(Q5-&pc(kf={y}B0UyCH@S+dE-iv-jp?4%f@RYz`u%hd`YG(gvfS z5SEBe!b(`_c=0K<7Y7PUcq>*_zF~a>SqE9W**-`ErC-uA@YdqlIC2uPjlEbT+6xcK z2=I4DGkeyNPhr>Omu{2R=tAlepv{;}xEYrz$)Js5lOwjH)!@?yCnGw_?N zm+hmqrCi89uu+~gVZJg^y~!LfM3JE3df9Xh=7z!(J^oyg3OZY8tin`|Xm z2)<+v&EvW9C|(cjy9D$RO7tSQH9mATxJlJ%lHe)yfo2wwJhq=t21XTgA9Ryd6WWVZ z2q91CUSTAiDjXM@f~Vpx%p&J_Td*`J7~1>5B@w_}jfH$5 ztUcX}ejW?BWwYrpalY6U9b`3y=2T0Mh6r0W<&hnubA>ciB0Rv|geF;vIy)aa0NPh8u zfeKmRt!`sRISBgmL2f6-*qD48_qFH4vAZML3jPIbsGZ~<`dDk1@#HbLSoQ*)c1w|g zMDVUGsf-KC`ts%I`g>1JbPIST4Z%)IrWv$834tG-1|Cr`>BxUD@Ho+Bc@VvV@2I0V zPtilXD$G-OD4K)6v_e=-e~?II5fNlOxHPYUMB{)b%fbD;0wnLjUBFx&4qlGTljTQj z0vKmz$a4@22b*fF++I?$!R&{0PR`_uzxBPR^3vhn0J4+mpwJTbCsQV-cApGU;+ zA`g{ASyM2RQh8Zt|2eQmyMj5mir%J9!Yt}c`as_ofPv)-WLryWa(DFndXPde-ztD7 z?g;+B3yFevPehMwA-L`F=s!45`qRPuI@`iO0PTk0lPdVl!{Btn4oGG64ZP)#i3-S4 z3p>1=*OszvIr3U|7W!;u8|A8eFr?^$j*?Q$<~cnCmeVfUKq#RfzzaGHPT>l$r$^F> zLIGIo-zi3Y|>F$6-_yIkDUBKAdOgu1;iX;uZ z&R0|+4o60sC+wk3!O7Z7TMDxUC2T2)P66*M7=H5x#;OIYTm?xz=I@vjc>vaY3RuQ- z(I>SK(ZFM9<0WwMB}6qxz?X}_6W2rHL(tRm0KKxC!BO`BW=td9nN_ZUF5MMu2NR2kbB4%6VCdzC9iKVt)a>4}sx2lP62Ny#~&wH(SeRk{7Iy2MXu}k+tCO zMuDxmnN^ejl_!I}zZ@LHhrByE1csdtwHHp)31HDL1WF4$6Taf5JeOtjE$FQIivAf{ zu7hrpg;;52n#wD~DjZ2Gu<*aJuhKPnD0?kkk)E)X+zuJ)J5He4*Tt)16Y+*n04y#9 z!#D@=R}Jb-vJ@!3kQV?$f4fTcjP7bho|7}g27Pi z0KRTHG81f4EqliLBjX_aj{Jr_CQH$`^O=qjwu39T1ToW6K8EjRJ(v%?^KSHHO#|~g z6rbW0-3jQLX(fE8pTTk8PZT^wUXQM@Vax)C7y&=6oxBs>0T0;}vVk&iGVSS5(vY`g z_n>omz|JwO3Z%Az=MobQ2dn(C5Gz~&v%Nj-2+ycRZWAfy(B&kah6_Is2lWPOL@|@p zUydd<>2ZFI3=@*UQEe+G2{Jt0QFP|lNAK}fc1G6AlaRZX12T7{3gKUVK(5J?5PA79 zBbh)SFmxsgbxB1ziuWOrVE1^iZ@0<~WJRh2!)xyzzWG z5&o$?s+%wPY*^=Fxkz>c1GEjdLlpQG9a1?!!rG)1{%;6V$VcQ?@Ck>Z!%D$Q)8GMS zpuaE~{pRSqCfTe4d&OqSNzyU?4`H$b+1F9)3R@5Sj|F?#Y{aC8_;BPcxri@q==`|@ zF77_`7lzZFhzUx;+G~d^OX)Qp%;L#45&vfiGR6?`9O5lkAjm>Qy(P$O`-oSFBRUzH zpp!Y91$fAd=}aLKHrijPfqHbbxJB?_d!!~<%SCw>%aqGW(X1&w#+yj%tvQx+ zwx)=f5>WMnx0WCC`}8=e53l!~9puSEBgJ|$2)n%lY+Ce_qKozm7_ajXlXYf&nczRZtf%@iO_@6A|`3i9GYhs+i*z+y&d-UdYmy6Ije}wtN z9uA>XWUIJNC}CB}NFkKOu$j2~0{m15u7_UVmPg1R(T$MFR*?5>jAWK~^8;k0I7GaR zigSP>Of2TB$up`Thu9Gy={WhOe3{gveV}Kq(Cb-XI4?kFNng4{tfLq};n@(uSmBqM z5JcSB4df(~qzvmK^wPbR9sqAM1hdqc8~E-OcE$Lf@^thAdQmQ#vRnD5i;ZX*?{kGJ>??lhBQ^a107Cd(R``=30YW(+?A_oTl^m=hY_QD@l?r6nlHVS&aqU~mK#tr zYPzT__DLEix}-JK-m*!v8yp9mQg*B%jzmUL*)A zS!orT@))UuG(^_P7o?-MRH-4dkK3%ad>K*YYC);=SNM~$(j_UAJXVg?%uyc~eaUKh ztF71;ijKXFsF?eRo0KaQeTmh!#j@LWPz-k1SGAyGo%cQEuWNEve>b)*-h3wyjbXd4aXE zsTTM@yXB!|1+q6+gbKx>^9lQ*$MXZ9PAuN+Q+EpD(9%Qss_Z)+SlCN(#yKsmTEg} z4L7ybZ7GT4cHQJ5`pztW(cdMYbwG*#3BM*DJ=6oOUG=SW zNxJ*RKXP-@CjD~$IWji#$F+p6Y4dWKt{0nZchObl?%{5B&+r)JS?aOJt)Y{h)=BY` zSF=VGFGyD>-2SonM|51xgq7)!N><5f3U~X9u1manddGPNd$$V63JI;*DeX?5s>)e>*TlDuYSD$C)St}oOjr8T72d9!CzNtWaSb-A-pTLB#|CG<*PSxQ*kXhr$F*LS11eX^snIF& zd*kv=el+Y~cTCuGzr_w)=w4~NrK!$4)BUI7Q{v0lPdYqtc;y-J zZ~bI-wD;30jq5jws@J+j+l%eSwC&Y=O6?jI*SV#OPYvsG2d0MqobvI_Q}26jcS|2M ze3A3HIIX_a((7_~%O)4vTxqu|s(jt#&}+e)D;EU#d8KO8q{O1{$)7&mfAZv}-K9C_ z!Y|alVR{~tu+{Lu`F_CMn+pV2PuT(7wKIh|CtmaFO(B`)G?X?)x=TF(~O3wkm45HTCTuIhFb#vsTvg#Ww3LVz;a>TeR81 z2Q{g%liDi5daz!pcq9(yZ0_eZT9$_9r2ORkz!iLGlg*wYykZ;)B}Z+Hf1jXSUF~UN z_8yQue^12W7EFPr_Xy?UWs-pT>+|=;R+G2bHa%&hKj+t@&R9QTZ3!W-=B{s9Y35HR zJA=9enG=z`p9iT5y6VdL3Uo#+sb#pfhFH7ul3eSGYC-UgTcOQ4kRqy;<*9i z<3x;^r%wD~hqd2pUD-1`etWh{dl$A7ISH(qvmSNr-9I!VE`vNUQBh90eO>f4vOLVo zNrrTjnogT=w)5Z4^(($;{B&4#;_T`1#4dEbW%;*K|C%#>HuGWl?~XS@{MR4ddSM%2 z9oW-_cwBLZGuC&p718{(SH~+<7ejBMUn7;;_59?4W$blhiL)2==TMI=)-Y2;ZifSX zPFsF~{HqbD9~<_DbK0i>I}lX&W|lcBY$MhY*8Q-XE$1)s1VTp0XUCoK$ zh`sdps134XlWTfIJ7b1AXLkd?Auc1%f^6j|j*PA9zB0UCcu7eC?<8(?xa@DXsMQN^ zTZMa#cn@!f>|AKj_-ie63}peI%I>WhDefyk7Uh(EZeAEGSLWb}j=)7#8)4h65|=LP zadN;3mA|Gcx^@5UcI(6d~&s-8r$HwXYts^grps@U&0?a zx#Mt(u(5wy;%ewE>@l5e8ORDI1HaR@&N)tBpSV4l$m^O*Sb!Rxcg*pR4}Rl5PjRrM zSv?~S;J<*{)G{?(w^y0Zi|BT(ykE4dP+9VCHMOH*e49dmTj|IOtyx!=a69h$%Edl$ zHhqw%f{D)hniq{WLy0pHie#mYEKK}jI&h+K?DXUUZ**>lPKpCM{NsBoz{*W+{~xu0 zw%qQ4sS*m-+Z(88r1BLfp<{n}d;Z0oj{Kss|1~cj-LBX}{ErCFuwg(%~_jNZWPV7zAteT)5n z^k=jFlbj7RmJ%4#BCF;S?f)- z(tx^o*E%N~1YM7(uP8<}J*v_99AVf5Y53S^3ddT=k&FVKn!k373)vOe#HzM?XW!;z zL%(bB47nQ=48-a#%q9)R)#nybGdoiwGE08tG?WZ+r8jZsST7>qC%W%ClX!i(n3ZVU zu4awxY+7Aw)>XkhEbCXnR4e3Y$uUv2V3#OQJR_kS1eQg%KRwp@{c>TE51RdD384&H z-NpY0Yk=)RP=QqOaW=1VL*B{s-5HSu!pct_zb8gjPNp3mw^yxA3fx}4i4bP%IE`H- z&Fj-Q)U*gkcF+En+s!W)hYR*|Ws{FMPXxHRl}ZpS#>(GA9-JTG;}U0|Z+FB2L^U;* zfIW4`7y1BM@~E-O`ZEQaQ?Gv8kTzd*t{&LeEc8bjGx*DB3GD5ZExXpG1X9kvy)VNpmf?E=D@XWeq3GQAl# zCKLqlF`W0D)M+WtQWPY~m31j0;BDqvh&Q9ikY16dLw45D&5k`>AeD zIyE2v^)=Hm^KSm3Kaz%^p2?}pI=$rqzY}X}6SgHi**L!>l(mESR`R&}Le076?W0kG z!`!G*yV2|kFehy~Tm+gsFuzoP*2siu9;6S@vf8NsQ2m_YF2nZEi6;#AFg%O^h!h2M z?J6tE+nl$gFto(CQrsdLiB-%QpLcgyuG`3s_lWUZ5${Dc$0&~va2f;JbH@Wk$9NmZ ze~y3RM(~Avn#glDNxD>X5mRRW#wXgp&c)J>LgCX}7`Lg}xD3PQ@D}*!Lb=GRJN3^{ zUTU^^&h32P(v*hU!R2x*Q-$Y`m7VcTTbCu?UVAdI-$0HqM#Ec-JJyfO_=2hDBd5kH zIc3uacp~Agxv}|Dbv)9WV(x9`Z^ZgSzF_m3KFxe*7lE%c@XVDEG@wr)Yr9_Tl3AY` zo!*v1{|#zz8c2|O7%lbiU6C0}*ixK$e#3a^6Z>~y<`k^UhMmGC&wQI&F-RI(HgTQf z%oXs8#CPRt{Q|au_T5kIr*;goT4j6FWrg=<=Zhvw;f;tzed@o3i&}P2R^)(25v>oo#sgMeq zY!PWaEn)eV!<+wJUEm&xE120n_+n7V`Ni8iys91A=E`myY91sED#wVT9QAALEr+vy z)&85@Rxuqs*9A0sD`?A2exSRM2cSH;=g9Z!YuP>D?|;9V7M-grJ=Uxp`A?}b`^(#X z1$9$+;*O-N8$1KcE&X(Cp2-xBf7Sr?9qJ%92X+44|G$1a_Rax!jz~sA4``uadHl&8|f+clsE1C5p50dfe_! z$fdas4Uh~WZ1T*suc&`&r0aQIMm?b;xyQMCW6$o9tDfq&mAKgBCZC>Cvh}`27`|nb9+QzTmDe>PjGp_v}ZaXyn1nfyk3|PH#OZ4uzFR@N5 zSGpDAP34z(NBCx98~)b*x~6lD5gi3R-aXh}_(-_0R^v`cbGq)o*Vojg(WTM9@%B9W%_QD)d0m1dC?- z#PX@-Rikq4>(L#xt+`d-87a!Ig0!Zhgr@L`#o95mGWX`CymgP`8WY#VmMuAFp9`^> zUC-Mtc**}dyrcP9HKIw4(*6(ycii_ChoSB=& zU*_Ei?99Fn;qS*;?4Or^#$^l?{8!^O5H1bCPS6R#6)Q&9!(+vpik6Qt?a*1X9TSfy z{ds0n2YZI<;~T&BwvB!sy)c5~mdiH4f=TTx7vDW@8rldw-|Kr&y_XgBwNV{m%=iTM zx$^wrsmfhBYf~M5LehQnX8(}de{gDmVYbiwlOk`dWo>*ID_CC=46^2^v6DxK^2TpW zsRm-2hw7?25=R@RnkVWft!Hcv>C!5ypVv9>Ovfv>){f3TS^hXC!ps}B7h^$q2RE1O z?cMxmHoGu=F5NSG?AM_>zrJK~Gsc@aw)pXy^&7f2RmU7(P4?trl>$6_Pw%JE#Ie+_ z`Fd`PerRI)0ska#T6jhF5twDXlaUb6>GRpqg?iDs%;%i%FZwDX0QL#K2sx?$hZooG zQ@Sfxm-RS%eSTe;Tg%7M?W)V>v0l$2zC;a2*T=4jxg9yqya=U_J!$@{4LO)R@T$48 z`d;hi(QiD6AY3>xyF#G_5RDa#Z-H_CN0`<20;gH8UY{y@C?Nzq2>t-QtcjR9(d77x zlC>fIb$Vg;<`O{T>)|-1#?;zl$I{%juVZkV{S($j4S6xKAEvIh+;8<7cs8hNbErqR zn~t3n&}W9lTcy+TOWHlC1Pa~rhnJ1h1M)7apUX8bj?-_WRD-SkEo?vVo~WqPx@=Xh zEOUDnF7NJdR%6+asr;1DR~JgSV0~2rW$V(d9WkYg4q0xR19rV_^lzW<>}^8T+-zJo zaGB>d`$qap@kFy^VH@I!<#yLrpA?rGJDlALrh`|NOP!?|Vl$`&Y6OXqof~>y!ztLD zh0SovjL&~u9?)LS2?l)}dD0iv!K0H1VceH*M+OcqjCz^%uEIpL^4z#%qlaYu#%ew@^mp zGers)?KZ$|kRM0_O*Y<15B3OWZKSNRxkNW}m$FK%5J+1P3+e^o$>Ya^TAj*+3m#?m zrpYsT#gLX4ljFKq76&~dBYs5xOuVvw7Ne+|N3ASOY0^W~AF@8kE=q35H_f$Mx@o3v7?NqT;P;ZN6OI3@euDr6K{%8LNVW$kB zq^VA6uIbYuzQiFa&1H~bMlrTZp7H#x%iqppDT53sS+6p3*8<5jgvJ`oRow z{@R+>!5p~~-%jriJh|a~{+L9X`9(3L!Tvk1p+O@lf(rw=IrSVOVE#FAxrPFD1 z7J?uh@E|P3!o-2&+(vz9cEwP#-A)yS&e!+g8k?N>EPlnxFjofJLXg?Jqoc0!Mu~qmiBR6!RV#5fzKELo>di3`v--=U< z3*sB+r|LoXtV!-iL$RwoV>}W+#(^W785E`%jf+KL6+Dlq)Ih)WyH9 zw@Ub(@Da*uFV5nrs4WS*w(Z}1DN@Ebi#jt?H1MJ0cXLv0QMp}pXQ$iLv)TX2|IrG- z{;0k9ujVUg!LAG!FZ*C?7A49V%G^c~5cz05PJufOsucS5kSmRHjecxTVSYQ6eyP~L z!)7)VN2Z_gD_Hh?lk>Ji+jqrYSW-!YfOk)kJB^x7)G5p7OWZ00yG^HKq!9IOP$X4W)mff3|ADeuOU|{)X3UhNnec7b|b)H)KSmzsR_oOQ_)X z;T3ozN5?JxPa~Ja97+0|SP%_eY(~TM-gtk#r?#?WVEChiMnVJ>1G(zCoM6x;C{E)c9)PwV-W2Qym4mh3dz% zji)UaJJc|X9H(pmmYue2I)PSXS_1n7nt)w}eO9?nj4kpl9UwXhDK9p?e)QNNIgfFP(;uK>C$!sa45k&9(DA!Q-OodBX}th(Z{zw4yks zx*u}AZnwvBw@oc=oHkCPU|L`~sB75A&~eFzqIbRruJvo=OH+UC0PbokJ({{v5Ru; z_arl2ZR;(U*v8ujke?8J5m|`$*l0}k0!%o?{?Oq6=TE_=tk>!1vfdVRnpK?tfo82{ z-8i8ct4OhDl6J=*k9_75PPnerO-~NZwd1RA{Vx9#)O3vfVmwu3si}eABN|ybJAQPZ zbNy=n#$0dLDNwcWmY`p?apU2Vj^(y><}4~-qXo_L51XE8k) zEGxSt+cUH#M=s@*Q7imE_y-IEg9U67diHN^v93}6KANx3;$|-|#x?zz0O?cA;+Z`` z-H}nzrxNDlE!X_^)>~Wz?U&xSt}M!3y(oP!0d+wC{9ep_I__dSDYp4VYuO(=9HH zyJjqQcj8p+_Q+)SMzc`h!tBb4w(iIEmn*s|me=uGul4_&j!@Wu(=jRLAr3jNZLD3i z`xFW-(P_KGN=rMu9_@htjBkUjkw2I^#6HkeT6Qq6A=5m2zTkbu>Mk?UI)pW~!Y6jw z(&z(;FSnR&`m-p7wio?TA>m%^yVPj$C-zrbaaqNtmIp&GM5UTNh*zeEX@7Za@V?I+ zrNXI~=$=kzY;O?qVIJ^5=m3ucEHoYvhFfTVEzx8)yqw5G(d6hWg=offu z#Z*klHtg1{q}%ssvEx7kT%L&)0I zzhZ_q{}&gxiseVQ6X7El>_o@<`x^bK+$zE<`E?69rdQy%{N?k{B#tIW8$ZS1o0G&Ym+%jhYig#CouhfM)rmrRVfw<#(+iiWb@rJYXSpI1;(*bNn41vy(RcY%iq*G$DY zCX{ZRUYg~y+-wuXMRAC~FraK{t8K2~)Y&)R?cOtPHoHgjH#D0_r|e=LaZP6qIKN|R zU3!@N>~5Rk2>y6Cj2iG%tR8Y}y;q?r*p~@OU7Y$m$FFh=`>nVQHe<8Y>u&ggsJ~-w zCnRr5U%J4&Wo!&QHZPeT?;mWsTUAiGtM+KKV{hu@^jv`^5%QHFv0cGPVLYKX(nDBX z?vGqU9m1_>X89&%1Y>BbWa-GQR^O^`#S1wH(&Rszb1qf+_9xDrLcAhZdt423UQLMk zl3=#+=wgKv$oMPBRJMkDwC`+-x^7k7o`w~z&)Kx8;JLdR32c|Ct38$B;hapLb1Y)n zyB#*nJ8fp~jcN&V=-+_C*`blKHdWQ8lJ2~7m-@zc+tZaeCe9p&%IrV@kc^6}9BmYDzRN z=KXrNC99qH8FuzCK6xmpRa=!-39ip=zSR1rGjMpRP@u3}2!d|Iy|PTAzjY1v+U-N| z4e;^s%wze{02U7rL8=hpxsjV~rwvo}^_;gkcXAIE9<03Bvw5Z#hPF|-u@*gFd1u4Z zn01>xR(}Y%XPXS+i8IH4^maB?*96udX?xHu>je*8<911Pnym|K!C1_&d6nH$MuXvB z`#qmLFOXZE^N^j&%mNlF8{uLH&$l#|b>!M+3Nn6VozG`hAbRp<;^Cn-#cu4-Zz~DW zdt)EQEQx9hHl{5?CCMQ$*fLoo|Ljx-ZTSgA^Udb}`0LV5}w5iIj)K%}} z7%=63$j8Rxl`D?9lkzV%P4!Z6Yn;PwY78m+S%@q=SHvzht$?)7jlEW=Fm05-Ja;Yn z7%{kP)pF!=&|(vh7}6!+WpVU)SGS_!ORaZvSJ#T(yzY~|1*6`gZW%^JA{;e&JoFy`%x%m*P@iD>fBzhN!W1Th`~T*|=oH^`5y7cziZ{w4q&61FjO*wlwv%9Botg+@9#4^-w1R$I$yp zEe>!ud%ty|=fX}0WqIYf-eSD6EkRGo;-)pqU5v2-~X z<7&6myL)lM%12SXYr~>amiPFN+5JF&SFGdEyJxFz8NQ6ue-i5+G>O}e^?#h+Ah!b$ z5R=4a8#|{Cw?MzNknGU8fYaV-?td70HfYon*`LWJz4ndPf4T~ya}?R@3$~Qm)mL`i zo&2Hvic7XX>GL8Sy5`pUhz*JB0IRkxg0mKx{Hr_2-`wY4-&#^ySo_ zrOx$3-7lu>RF3#wTBRRxS#s2ub*iYdt1Fi4gP$>6jaxN?-0eSIr}fmuCS2g<8TgQ-`EmKd!sprW z%l3QUxn3m>t=^Hho?BYW^~xuq>Z9w43-{nl^se5UD-B=0Vxt zWzp47nvZms4MDgX|g|_61 zhyuIJ9cdHkhjQ1JLYg)XzMs(pFPd(0atv@-c5KbLD2J7`OJspXo+FMnq(}r!8OOUc zAa5zHK3Acx8mK?t;@0(R(1UYp#z%1k{M4wI!es_~2Lz3Wfx|Ij4FOzF8dGdrgwN2_ zar!&+tL_&WWuHs0NZ*;WyM$i{X7i`ts`sEvECZc)_>#jamOff~VDXxe3;qW^?$KXZ z^d;-XrA zksI9pmC;c^>rCLtMEid=y5CO<_ZEj9U&J?H?)+!Y(SB{aS#x~DtGdeC;yOgrwRV@jh6()K-~t9;VMAu_^8Fdw zyd-?d%1}T5DK{Q1+S~(iSJ^(z@3Uwwsw9=#mX!Q{R&l!FWOx1e#H>g^ifOWZ;JDIb zpWlhVJwYP@Dc-(@E1Mh18@~f0Q4Y_zbIr$+2VQr3wtZ+yZY*h%w(`4IjQr-mmfM2O zh{3jQOof+aU|EQHXiVToPY{d1f#N#KGz zCcxq|HQ7n%qW1Xc_0_Z5EuOj2fk9qu){H#@-Kt~B4hpFgxc=oGV@?F30@lP<2UAdhxWl@3wpJVD@IP5_V4d_)4ZGkP1MMs$-7K|2F=85Y%T_Y4t#Q zCVTe{228XGcFJFaQgC{UAnH@cKTL{C83XT7VAE}yi`@_5t8R+7ags*#{gl3=ef0xL zqeyP^OoO~u{{|+(dYXK)IA(Rk<{xqi`Jzp(CB%HO@h454sz&G|?>g%w*Jf z%vj3!30hp&UJR*oUweHwi+Ztyu!UO3Nld}mm1wC44`*I9JD`W59ij;xCu(8 zo4L=X&-3?*zRnfM-)l&K?T}Q&OH3YqlxS+qH3}i7;nrh5pj?p&hPm}RC=qZ#zeoE- ztyeb7Kh6(GIrFiKB=ve-h9TwMsM}yD(X@t}q7I|W&^yo$r~pI@^gPH_w^ptaO9XrP z|MDLR*g`*XgtShPY9LCNLM|h`(F>ThI3cbUw+6QgdjgF^u7jCFPzD0#Jt!Nv3E;4B zPQO@hqTjDurv0NnrUIyl>U!;4L%!G#fHwHGivVxI$6$Q8C*mi38B7FL0UC7M)OY1~ zC6VF`kzTZ4{CbWoyQ}KfJp&p+Zy-X@A25X&4tg!>5&{C3K~12xkRi|&U@hP;AP{^4 zvI&|9Jr03@e**6V>huS+KI-R6xH3U?Q>$9=0wD}EBN1}JKyqdnl5`IZ9^IGvA8L%k zKx587kuH|x&6>{YW&@-rzV-lB?EE&0h|TZ!cQUhBkd6TV1GkS7?OkF zg&_l_7HPpibP`~aR&%Yqb#Fq#MAgPYz<}rtw(N!FF^XiZ@@bY zZ#oK4=)%-3@`v-iQpfqB`7N?I8Cm8oGn2O~xT-)+kmkGkgZe+sfR?5^uKlEWuSw7@ z*Et$o+-LREI;u{qtyowc>O;y3MUb*q>7;a!=Nj1Gs}kb8OqQc4 zQtmepiN|#}09!ydpk079`fP2l=A7n{CQHj&_!qPmhC*$^!U*%kVB=2X!^Q}s$M|&^ zE95bl6087J=rS}58cV&m!5sZoU#elM8Wqh7p3>ZK^u(%S6i4J%iW((U{Zr+s+^RUC zbkP6}EY4O#Q%&%~M+1i@)LzyoR2j+u!`o7|AV48F3w9fsiAlo85*!FC2{FVZBcf3> z!3g&X-HwQdeS};!7(+u1#D$BY|H2z^MXypXSB%U@&A&8I%W}4A39C7%_-&LB2-^;@1#-afdJx>h;eu}{1luG`wmNo>k%Ym4T6D^V;~0SY!~`BF3dR3WQvfD;z7MYOSCi6 zyP}`MPH~6yy|QG%25}Jk8@n5mg`r~ENH!3yREW#?8@P`qi^jks>Y;x}A>7|Hdt^o$ z3!ohS5eGA#F|jwRHD5=%K=QIkHtRG|5Px7yV3&YP3=F|@U9s9;g9i9Qd!gYVPyLMg zv%Giqtk7I^NAg|u5Ksn};|b<-q|&jfSQS9oNe zshE{j$#{xdWsUr{46T|3tVWI!`YrU70eX@XmTqd-V_j#m9J!z#6EqELy5rkPtv2ae)XzCjFPnHV9N+g3 z+kF%-x~Rz1^l1GJw8Bm8GSzFpUto8u%IR1)JrwPNDiT)Rut^)vDU`x=}_)?QWONMQc?wTDE*n85K?;q)N!}&A`gSK2C z&wU*(Z;q?ETPJ8b-TJKi;M6s3IWdQ9MN4#`*d4I|pzi8~$}CMZFa~Zgw^Lu+@6Vo!SB)zMQbh)?58{RViO(i+s3+}F)OS=LM=I;H zVJMmZ(!yjhL<^&_|b*Hn{oVnGpt7$XI_><$bGTsDg&f{-g}cc}PY@F^=NC#fQLU_h=%r&x_K z!UL8sGWLJ%B9A{uqdleQ)VQr#ii`S_~_c?BaNhyy(wldkrK_ zZks<0Pc|c3y)v0WG2m>}L;Oa38|EHilT9>pi~IkW2Wf|G{D{vqgs}sSj%B|Kd3ip? z@0vaNi7+H-ojrrK&L`aOAk)P>4k}Z|3AYU%X8FPIg>IPN^#{u5~jNuyGka4KFBYD_)tM`4MgDke~knt(Vj3~A1 zRk^m%rC_p9P%i1=%%h00c9qVPuC?AuZU^Vj|7IzO@RHWUEMrPpRj!|%l;(Qq1nBeOb}Q0AZg4xO2D}PYVU}Ze+qr}V z^EmA%3;5{aOG(Dv0gTL54VyK-EQObhlm=HFY~Xi1^{WeUaLs*xXC-Xfz?$6NZf@plU2i|!yCDO=2dzr;E}!% zKt^A+NTzzxmN~>S-g_i^Zg&)!ZiYQl-QaKQ&8X&-6#UMs9BlqL;-$K8+~uHQFXB~7tVCjt9pjApg2zO5W>igMU zKd_&7Ql74j(p_7iA<9j@*rq##I?0%8y@CSH`QRPnO+#RL3f1K2)}pc#zifWLsl3=) zJJGKzFb#VhBCi<~O%VdkjIW zyD?|XyEVq?8||v;c{@s;nO1Hz5JG>$b%Z9XtqvcU&8`!kCIQAl-5yFZ5WPZ;;(h9@ zD_xYcF30UxZPl@E_{>*Sv7N13hsPUFxz{%L5c?hi8@NV!M|6Lvr?t2B=Wyqofnr9A zjFr>_OpM{Zegl&wNj%B;(?M!4yL;6@GDjr=XwMtSs>8@1Mk~obo%Aj%+#9`#f$@IL zjz5X-^*_bF!y|Rph1lG(Uq`E%owvDWU?`U*uA!& zb*1e=Q+MSMAQ<(R`5}jG&MR$sSRcJmx?c2yyK+2zIBukNDt?xteq%8AI{VVh~}bsnEw0Ci`&A-#L|L=v+z2jIqDH+q+_M|J7~7TR~*C(nE(wR9Lyhe;y2GH zY2exg%{ma2P+*(QNOK$UIvTh!G|CS{`;6hJss&90fckI6tm5h_mv-<-@*Eq%AcLLx zPBspWc3HLu%n|5rz+FwKOv#;NJ9QJr(j}exH0VZrD%qJC$hc!w3xBMr6#e8zkHz#? z_59Pnek$7FEjBasVLSvI9I#|-M!c(yyPMC4K!)EodIBz3mB{}vSk~xPQl9_z_o~L_ z1BjXZ5SC?{Bb2d=zTfV+%}$fU2&sOz`ljqA&#!N5JF9=G&|mEcEJxZ~Rxr$53n?75 zwfesq4(H#IaQ3Dyd2ho+jx-p^gO4Cypzn)yKfQ`Z9(bOsI|)UKk(ni-i-KyW*(fn6BRbC?Q3I1r;H1 z=KGx_uI2XjxN5^|og4=au)BggntMo-qw*H`J;G9ACSj>*9eJ&zFJr{Hm}T$P;br8Q zK`2wV@|^nH>r_R5a(#+p>+1)0N~+N)yJfB%R|n@ZyVF*#gkDgms!a})I&our7BwI5 z!gD*6`wgYs5HiOl+&hMuXKoIE>o#KPLwoAuRmH;nm1IcY~C_FHdl@;wH;;9oX=BHR=Z3$V*Un>X<~IS zop`Q(1ks(>zeM1xSqXn@a@66R*M47X7MS!K#1-=UE!wJ^-nC43nT;pOpabUlAO*Ha#avsOrNL*U3fTuLuFXcH!fcpfE%;F=VmYyQ?8lc zBpM+<>NYBz)kZqEdDNJ7XLt{pV=w0eALFy^CcM7;-*EM`x&y(9g4qum+vpS;A+(5X0!KtihRak&bmAw zpxKB55;_-_O$4<{TdoiPoq3{D!Q%xj=q#zRD!D;*j=4|x5g zR>FOi50%wgd(9%X37CxYL`6u$*)t74+fGc3sNqIoj!*njg70}GTg)jpu<^ykKTz2p zT6T#Mus0?S_64q+n4e8AYaWZ)3LErS8+#VTZ&%=)>mF06GH&E@fyoi_vKA>8zn?x96b6|^sYrtGkoV1m_q~=Y1=*WI` zva!zjXV6p_*K5YyO@6uiOVOVn{W&YTALv@m^T{$RjrnOpF5vM@`}8eEw-J%GI0z7i z3&c4EVKPL!J2#bo_!Uy|wKr`3Bo0J&BTI?Nka56K=oc6j_;leua-->06A)5tV9>2} z>3nY6UzG{XGVW4n6`AMtA}lDR&=HSVF|BP}nKzX7rMyKjYTQD*PT6VRLHr-8TNf=o zuGo#Kc1#KJkF1GE@v*l&qOKg8tFQHgkenrYaOIj)ErSlzpmSb=ZLa6-ojyee!6db-q>Rt(y6{n+7N7 ze*`+r5rPAGYWJ$K5R`chQy!GP73Mp@pckPwl{3iVc!?l5A|!NyjtEC zmS;;FuF1#Egd~5o*5F%V0=Gf#qryz}_Cz1s@a4-a0#{Nspa@P`ySkyPU;@V@d|hg<86Mx;f3#| z#jub-##w@4AKLeC{gS4GL$aA2^3eH*;u6tc;zCJ{vKrQ5lj(Wc=Kw7b{}jyB2Px*H z`*a(y1B6?8(N}JMP;Ng|*HBm-UZ`rynrEBFTD?KW%f5*! zqz`~rL>R@(O&W46Vt?pU=Q3R3{K*kvC#J_?{Dz1wCU8rJ!Ur2CKF-LLNwDRX0WJ&f z^VBWaBfulNFRD)p`-N0&fyqlmpAg;gsD4c!No8!6;N1}N*t5m7Wu7_+Z62t|D+hE1 zDUO)oY+j;6=S`;NLh}WgDbE?<|1eA$R^_vrnvB{hPvTw~?Vo^%_DX7nHbYCB$^AJQNDW zPLec1*5IL*Uyab-L(?+(1yC8$kM`I_MSE_%2-yH1g9d@Kkm;mMyC~DM^Sq9-${j83 z0u(OX$=Tx)O$=A?<^4~3OuJP*tsHykBb)y$O&5H}6MHe^iCUNy#{Il^l`GPj=HO#H zYsSWHffZ}M&hF*KPwgGDX;oHw*ZtkE7yOb66&u0dOk!>AtUZjlm`Ut!JdZf=|1CbG zB-mP>aoderS(j{XkPt_z7gL!>sKO`2V$}_%Uvs_jP5)PpLU3DZ z1Ab-ll$=DqVxcz5BJ3eHnYdb2Qx03NL4@-^wDTL${d@@n^WJ)cRRmPP-PxZsi8i$2 zl&WdkXHXbM3PI0TaPl})QZ}O2+L&HLb+RZoIbeC#zSa4*^8?#AXsoP&qaIHko$0M^ z+0%Ay;Mk;ytD6Z}XdtHBtfZ1`msnWh$*8Ni5R#tKV=YB~5xiLR6@7vD8tY)@OyqLyG_RN2uS(rLMto^r1bWn_K;c&+AIyZPpPk<{sHjFHMFO9wYAk zCVZJL3`*A@5PuuK)Xr;h={qRM)|+581Uuvxov%6@gdmb^a%qK5J`P#NXB8G>L&NK4 z^q_x?r54}Or-gydfNBIgO!^e4Unp11iEJhxkFkX>7tr`WmJxIl<`D`DcN?~ou$@}W zg1A!bEzp@W<|B6}Wm1VoEjQuomO}nAe@XXtHSyAV zZFX)q-mABrdnzEzEYk=uGe$eX_orx$D=PAvzfYc1Zqei9alG-#F;TAWC+eZ`e>U^Z zOpjQXal0Q@o2*CeU@o_PrregAo|d={9~pI@nNl@t(q^7?Rg}aOY^d_>J;sH}vJGY@ zKh+N91?XL?J+8feT>z2sggA;%F z&!^bDs?%C$y?k;p{B{jj?=bv+4j?_%} zis`cR!o7XDbvLWvE&d~k;%K=}@}KyU{1t4_%#wP=5#aidm$T1S&kU9kEtOV`q)7f?gNh_0Ob(^rXQRAA;L!nP;zc3UjIQ*zT?J z{_QPwUqU}=285kcxC{4jL;2rl{uV;UnO(zmH>=BO6sH!)$jx| zSHLCY4x?PlYCAK=57*~z_g(M1tYvo5*%Xq+zet**nfGpb`%KnM{$yo0zV2Gtv-0(A zVUr(R z($E#wU{}Q`53hu@0Ve;IJdqD6&gmH_Z}Up~O|D11FL>{Bk7u1_U>Vg;2Wd%G<(Nh4 zplR@+fSo?nFiGQ@jy>(RZi;Lo_sxqA1An7u&_Rd>L=9n`%`N9=o{#+aJ_#%~<&$Nn z#VNC3lS@V$P_FXKLBnFGcBbiR&x;}DKv+AdCa5~J{qFe8oWK4XEC{(3`3twrvddwV zW$l6Ssxc&xznEVpWa3WY^Y9WwVQt?`O<#F^am`%&Z8##dX2X%ULQKvWQ=L;E{q>!>!ew-4Z% znYch4BDlMIOR2kW-M!tdck8y)-QC^Ype=R7-61#xLP&_)$a{b9;T-;hOlI!AW6$&X z<}A(6tDM!ez5S7LtFo+pk#co!6aIwFak=Dv-Y3%E5wI@!X6W_Myr2@_i*7Fj#q=cN zre&V~lUAa^yDqlOtb19tqIyNcU#i{KH26F6jnJcRBrBaPRJy(LB?YbvznmF3G@h10`eJW11?rD!bf81ft)+iUO8XF%xOcuuW}62rq*UYp z1O(l2}HJIJaxytG1#Nv$+^_6^+eU`75^#Re;1$-A?q?3a&kMe<(?`Sn2)i2PV z=$Wma**&8hQxELTFs`uO!wx_nD5IHbPNwr!?o;kH=UU(b*D^2CLn%?vCQN7_V6M^? ztAje#?R`4r%2moWomW&UHLA(fms%U~^9YCjg}sIQo|n(-<$H-1yPR-c=dxX}mvfC) zj}*f_@Kcxqv9Jz{7`WvV%nZ8%{e_jHV;w<`wO9o-kJ>~3L>JSZQm4{iF=N zw8h9yVm{hqTW5J}RO|A!4>j{OPc>V$TYCLfD|wt%e7ksB*W6t2l)1c5JiD?P>N@`yQL!l3^}2cUuoSj-j=VJ@#^tgEPe;2KH4W zNMbdE9Q5(TOS}Tpp^0dQgW;HGA7KC2ei-~Nu(0l0VjOC11^aH>u-bSuM79p;%H^~*W`JP1muskdfe~hc~H+VAs6x#^I`AfjE zYbOnZ^1wcx4E(2yKu2;1bsDKQXM35w8)(%@j&uhL%||z4YRnzDR9k^{Iu)dOyMY{D zHqh1lLG25K)WchW=dB{2A-5qjkO;&KK4KtFpC% zI)GM34(1HB-7CP-y8~3`G^hq7Ixhh38IR;i_~VnYTGWCb1{u-{tQPz%#@?cNs0}+$ zaDc%j0ZQBwpugV7hk*QB9Y{AX1wP*qxD?@nqufCWqE4i4qvRuT@HZ$8*!hzXFQg9~ z3B3gg!DE3`wHxvWTIFlfO6V*ci$oz(;MX36X>bil!{tL+KpmR`(zmyP@5>>fz?iK7 z8X8E_#-E`&dx@>iy3k6p?Y0j{6-i-&I+Gdd391OA) z($Gr|9|zxY*HH_E?rLC5_XjEBAwc;|2MR9>@k6E~%aOB4B~pfbLB^5&DWfShWEMFD zc@Je1a@-D{-uH1hIh6Jv;8>P8%%}*TNt`F01*WhIQiJ>j#(6x|LVW=|>j_8}u&_bW zEl3Lf1hN5y4hK;8x7cjJgSh}+mLhsb`{=5P0NOhPYVSMb?H9BDLE41XaH zqi&`yr^J(^5DaM76NzLz0BZwd<&=G<{gr*OqaDofztKu`4aNqk!(zfjNWcs=faLf= zz{EZTeFZgDbAV8~2zb03K#Ey1ke|Q7iEtnUlA3|@nh50Awb(i!eD5H6LVBRr^U06M z;gmNN61A0*MLA3nP!5pGz$y>|4~G_z{E1&!G1>?H>39bu?WgEJSYP}e$m90GNw^MU z;)_8l@)02J%>>WM3Zeb*F?b-X0^<5rXbkiNsKF+jfh*A6juv~PUFx{%c#fK}27D!< zA;ypjNC%-5I0uXqFXUf<6-_5w5Iy_`at4Xi&qzOs9rzgR1FAr6;KvhNjh)7Z0^4^j z`WDrov#_Q35~2(E#BHSC&>q+sd4sGWXOYv$i;*?(6v%`02mcTIhW-Y7YA_gkm%%!b z3o3i$AaR=uoXSj+6r_YJfXTie)PPBl0mv@oCh`}u2wns=lAe+_0xvg!ID+HA?afF3 zMdMLVbS}!my0B-!ok<4?-AjSPeiRtnBM=HXkh~r^)BiwiqyfOaJrBfGF8&Q`08*a` z9|%&^pMeB$7veU-1i9%bunE7wVgv!o zRsb6?4rE*)Kv-lESBX2Me2_LBgv25Jfs;4|WD2~1XTYf-6|b>X$jCZ#iVK=)5ie4&J{lo z*0X0=KRgFGmM=kmdLCF^)_^^%C5d4I-VWjzH-mZEhwOk)17-C-sD?ZU%u6j+jpYJg z{xzBejt_w@1fT2$k_W}?VZ=2-`{AFP%S zfUb>UN-PXN3KY- z`U<}aR;zd^yO3$_@flHUYb>k(kBehP9W#zS4uMDXb_NR+$) z%_c1awRJ=B49pFqVQawX^uvea(?AM)8b}8)fNPD%;QXW}&VV%bSYS-wftP|=DMi-7 ze}Kb$2FieHp#-p^X9B&u9VG4yB{;+rd^<4P`J@HVZy-R&f%Js^;2XY!!r<@lZ+IO% z9sUQL36B98d=Rjk?-4e9C!UUf#GMIWV7Xoaq9hORK`)~WuqPoPz(#`{+o50%8~`Id z1^oAU_(R}Yp95m=G2r260eRE{g!=E`h^@t*gL=r__z#d3Pk?V=BEABvayhgWtRCaR z*kXazWd@K<&jUgH50HT;6Q@Cj{Qtb>A7Bfgk?sKd{W%aHSCbxr8q{0xS;!Tvw$TIz zR*48=5$P~+p(#)p$Okz^;P_j-3*Sp{NOICw-~(%c^V~|z2iIj4Ko4F6O!J{&or@wG ziT{A#83`QhpWtd?2C#8YK|PQsFo3DB9;iDR&`luLU%;t&0!{^Ucn-KKSV}4bYqTG< zgJc6|l3*aJo&`F4Jd^}X-%321AcHiDIv}uzL#v@Bq{VnW+K7tr7-BS7QzW2Lati(( zj|F?u3wY48z~@~A*N-c~@p=Y@gPD66RPOF0Jp~!{1MqOX2|o`y-=#pqR{%R51Gxfw z@#FXyu$TJ~A;4FUB&{XRffV_<#7q(m9Lvcd=cgK+_2YrE%mn8aIvfV4K*hu?aL&FC ztpJHGb>Nt%K z4FG$k;Bxw}BbF0c?>4xW7Z;M{ou_2#$jm6Q6;+Itx9Bx`FeQ z73k#@k`q4G@xyk_UJK0UnM4Oj$;tvY_af45XcZJp97Xp!W}rq)2G;zO;QD~T_TtUN zR8lf<^*^9%v3bA|e+ssGBZ)>#1EU9Io4`?UFHp!`;C5s-rJCYS*@5JMx~JdZ?=!$v z@o?bc?!=U6HufLk0j>x1KsfcKo}~nnM<5TuS{wn7B5tCK9K+EJpr;2w9mGBChvO7T z5;;tag$m&q@=(eSiV9(qc<4(z7kdRw2FXum#@h-jn;sL%(1?K~X__As~>8ASRA`^%ncU2bJM5{X9mEK&&E z>%XvMAbHORBE2s)l5!Rri2b(b*mt8*gd8}se#jnhpD_#D3ld}OxR$gT)UN)Z97IZh zp+5q1Lqi>l?M?O=EP*70EnxhEfHZI}TSer9(PRaAH6fH=6esdR5(7KzSmnq^M}uqP z`yhoT7xMub71h8pT||z94&(n~I5w9!2+c(XA|oI(IIoQ$PQp_tMrsD_Je7@{!p>SB zSUx${LnA<<&@J)`C=O3SYaFjp3zql4jBt8C)+2^D`2zOU8fxw_F>HM79ro0I*Wz#e z=`dne#|Y~N^H0kTbQDreU&f4~N0E1uN`Ub_7~FgAM>^0lGu4!5j<@si0%#bLMo=tG zy-#!sD+e*KM+kGo8~Aa|M@Ty9e<|yH^9pM-F`vd~R#Cl)kEZ{?EB(Q`S}TVXMt)D( zLG_}2rX8Ru$Pj5h`Wa(TbWSPa6)sGEBz+zhVK$pC+6yp)W0K{9(bMeg_zhCX7LxOc zqt@^GO@=3EU)EU3I4_N7l%$mXosgLq>e$+0>S?Ovo+jg0$8L0r`CL!FGFMsEbJ9AJ zJcd)qj}$+09Uvtn0+(AnKId%U0%0KF0m93rD>7h>{ zUNh32eYw|J2goy_PHMXFo6oG!%;1BbuXriYP(7`4V5_8MT+5QSvt1js?|aj`O|9z2 zKCPKO6zsUuboXz8kHRKJBu1o!Ee~AaIm1Q8r8sZr=5X_w1JQitgfjP^)=%@kZzz0Q z%WO8aZ0d;Bjzn9TV_ojLfA=^io9yyXQsQ$W@<-y8gk_-MsNGGpo84|26OFG#RC2w&0KFj*u@AL7@kH9Ue2iPX-~8CnDk zGV(9|sQ-%p+*-s@u#{QK7tKix39YWZccE1xOCXe}>NjhEeLzG12Pv`PXJn(9D@iAT z-+avy-f_8LLSaUG(iL=#>8T>2C78?=sBY2S8YA&03ib-?2 z;e3}>N@~*1tMAQg{9g7o=0|2OR5MVm!7fr=DTj$b{0%vszry!;{G!3%M)Ss=9x-je zgaogEzqvB2TFGyV?-I*F1AAXvthQDi-28XRzxnq|R@PKC+-e_VJjifJ+ue_gBU#zxkyIb& zzHYH$l$7%RPg8}lsK46vU)pKCs4=K)Wyy<*vYMV+tolcJTTxQD1+#{b$d`-csXK*h5t6 zndx?byBoe}yK9?@oDo=kk4K9VXT|;#lHj#N?8Z1}DQTNlt*8oYJln=-8CP?%xF+vh z!IWyZ9x*+|>v!b#xSw&4V#~q`+<8tA#R`wYYSrgzCY7$LyQ=m?7g5QKm!#(2y!NQh zOx+GMZj3f#q#S2A@k%Ex{!O<=^Ujh%Crj~w#nC4dS0{Ev!2xoKk-Dwdw_aPyu7Voh zHtwl@TkKIdqy(?Y>kP#cT;GIWN!pwAGPWeN$Nd8<$UaK%(EaQ=*fh4htV-9t7qRk2 z2?d-5q#Dya!%53-yTUBd8THeM>#T5A4;tA0w1wWKw!LSCO5Xd-3inTJN#P_M4gT9j zO4M~6ukxr&Qs|om^?k};7w<01s?BL1Z=TN4`@5yE2R%v~8AJ0dWC>R4yHEZD zwrQqyELYy_+6%nfekIhpd#06wAAWG)VvjGvt(+Q$2j!Y=obtcQ#*$Zc`&CDKuc~w{ zYum4xPdNMgB}aKCl%?$McRFoQiaPX+u+fxJZT@-e^Sv*suXn#*`Zd22?O`ENT#kenPd>@aa_1LXjxFBuTJuU z_0c-DB{5g_Hsay$2d=Lxf7UCWJMK&W#4Z@HV<0>APhw3}q0d66^Oi^L;;LKul!A&n z@7_dPsIId$p=}Rvj+Xfii_S~f5$_)sDz{PHwEXf1AGSTIe9`{Pv-yLif_Bv9Yj9|v z2?NZ@%flb}yZasVP4{34HzAWWZj~p#j(y|(lJk1p$7zLYdw%eeBJ29kALcXCca+Bv zPRu^hJj>6UV zQLd2=@6)V>y*`zqug6|(dqH|1lryv8l=(GpW*9UeaQMVQ8lb6G#$O8EF1yF{K=-Rt zYv<-{`ohop_;L4FOYzvA2+pd2>xsh$<&D}qx}smMPb*Z?cC^$vr}q~>|9IW*Zh!p% zU5d#_sh2GestsKdzCU_N%&dS3oEw_XvdqshFP=O;_VUb^U1j&w39OsJjQ-ujb`L#0 z7#Yk+yAbu*4Pni*-EWtbjQJMzVfTkcpYpyBEZM0_rGNB398VrxF(z)@t$`zgkFvZAoE5!|X0|?#FjQ^!=|7Js~t zEN=7Q&7-akPKur9v5C3Kw7uE8)R4EZa8Y?hoo6Qn-681olY~nnTcgfJ$%7JHevwXe zf31CAX#Fk!(Eud#gB8IlCrBY$s@L}zVy?(<~! z3vSkr;wjx*$Sx9PuwTlQA(Ej#Q}DnOqG#|X&A8f4d2fDY{I1Fsl*PBHv12Zy0*^+Z zF{cweV{3imIIB!ETDz)i%36y0=HWTI(z=eRIGOj@ZEE1^7*StCe|GTo==a+C9JezO_C$bSLY{_-_yD7JyqGgJ*DTMZd`Ymy&$LUU|H9 z&O`;OC3Wjds|s!vtS>*&-i4;}Hn~6WV~5bgGlN#k7Sc4PrK%Hcn;Y#_Z%QtdJgEJr zbD&8=(3mF4=inoW7y8wv;r$-;-5i(Vy_kMn^S;KEeLi!}lb;X8Z+L&!G~Bn26_f=R zBN8qwpe(W7?Im{~ z?Vz-LsqI&hUcI=rs+Vf_hGwt}+}&g9`$Z0VHSp`eH)&YZEtg39hSu0p{9DkgOAiyC zTR(Hk8aoL{CLbO7Dfw2PMJc!9-UR$5*ox<9<2oW0v&%e6(iFkEjS!!a=Co7%ulr>8 z8-mf~c=Kg-xbjEaq_*JJH_bQNA~gQi=irW@hdM&MH(*PQcao{k!v6RBPf2k1@}}HV zRh8fU5&Uk`(|1qSzN-Q)=+UHYk`o~V_!i&S}y}rF{OK=Nle0!pHK!I!C53n{Rf7u3vZ2W zNDfPk3i`_5gO(|)O0R#>y;}Ng!;2~JkLE$0TWDS$X(9hbJxzSkcVOa+fWx9thKyL! z3)R2Nk1Jf)uwFaWYQqN77l~cn7fHv>2g-R z_>RW#fXBA}V-}z>laTDzwsuIGsUJsQuX3WtgExLyn~;b}B> z59&J7RjWGNQ*BBjujb+6IPrht2I*7J9IrL#%OU=7lkW)Q^)P zSk6_``VeWB-m>JNQcnWdV!u7U$d=05nT(v3LYuzMwLUN8M8gcgJi4V>d1E?U7(gAgNCMeUlW zS!0l!##p{!t0;>Y$7#dhP)DGCUuQtm*7^*Ew0T?mydJT`i}l!*=h^9#5VSL*E-EbS zx!+UIwQe3_Hr;Mn+`Xm^YRawsQgN{CPr0?yrDk=#sr7MhH$H+z73tiiK@Y;+!-fQ? zy;QR4ynaxZuC%*O1NNrL+x*PdL$XsVsg0yH)=|Cs&ezS;>uGgU8cwxO={<&TbUG=o z^IH*;9{D-$Xu_^&ZJ@7bu=E#yI0e@)YkgkVzvgk})snCI+wy3I?@P)mD(llb9p;3K zI&K-qYdKvm&BN+e)D<)~wodE})pN)bT(|ol4BHvqku)_;+|L|4J+Q_jK+?eJaWK1T z>o-?1%PR`Qa*a9r3WTM~%Fc!!)k3SD{15k!d~lFY^w$`E#KIu8Z$I}HT<}aoMQ(bk zc-AOyozWT4`?qx|eg_%?Gl`Mri0<%KS@W3IH|^1#t27SB5T|&LsF3l|QxejX2PFr` z^$jt2Y!yvr1rkejvi5=X@~YUfkwv6}m4*3bgKB>^8M_)R8d@I_%|jCSAZlyEzp;v- z8}9wYS)8FHy@m#=@3vIFtnN|hlm({6u$g|BmF|Q(r!&*h`kp}@C)+1>pzTY$GEL(c zc6oFNKlVeSIY}8G9kJY327rm7ls+1=7LRaY9W zwOnY;=ya zUxeQAm@a!HpFE7-VUj#buz5|dx90DzB;|F@2aLp_N>2C$g{}_Y8#LE@jC{JVKW)3k ztDD}=X~|SnR(Dnj6}Hw(%Hy4TRlS-whCx;V!DS9|VS3*Vnip9Ue=ad4685A!C%~ze zyIuWjrA6WScMHZ9*H^VEspvH37VamZQu;^!L6D9-G_`4Nb`R;=)g5RmhrPI$vkJHm|W~;B#-yieNVvO z;cw%1CkG~O4b7BJpzpWFcOP$|zywNg02t-<$}Uq82DJO(3=Y{XH+&d%?xhdUxwOS(369OxLOa_$kS zuc$KAWxAObF@AutK~fS(joFhJ-3LzfNje{Nk-rzEsVf@%N>68h`TZ-evD8w_>io;G z#A&!>r`+hi+542|TcJO>&hkXh)qU!nVvNM%=|gxV*9C5G-BVomvHFsDj$G?drPM`WmpD`pZAeyhbNtri4N2s1x$7;0roNzP zEYAIX>StD-y>d?zRkhQqVCp5CJ)OOxJmQ z@s`XKxG@&Pec*NEzDO8;+CJSu#VTyRx`Rq>(=0`1?TMNV^-tQK^?t-))-Fj-;N{q= zq&rFT6COmL@VE1BIBL50w60g!swP!_sD0P8vdyb&x*-cf*>$`sLB061B*itA_n1~d z2+#wL0CWmE8TG_#;Szd3wvrh@6H|PsOnL`x4COuKOVF@!)*sr(?YWBY)urY4N{5#7 z>Nd5P=?)Qg&O`aOAg^d++{pNUVuyznyS3A=n~!v#YOSt4TW+s7THo9h+?t`fW;;d? z;N^;Xq;e0V+W?n!PXAHHptFoKdWY$58n;{G9s6K^PNn#P%wHZT>nCNop5Xg2Z;=jK z<8*FaKf(NZP*G8~sob-AS;MT3E!y*T4()`XuSZGX@Q9G;tSEZEVCV*=dpz&!fIb z;FFO1A!;ACsDk3{*l)0@@|%Mc5ehvJpVv1I=qbWR&|Nr_1()Rgybj49^A1ptpbPZ7 zRjysvHCxSRu#wbl{3{-l1Lp-Jfe(F_y1~NRv}5)xExq$cvtNB^^}R|;)%NPTI$FzL zUAQLCP-H(rap%96TYb1eJHqBfB}Xq0?e*~BPo#~(wTAXiT~lFQUPWw4cX>!lhOQ49 zhn!(=bY1UB^=xtd>Euq1v41oSG!C-mU>~9J43&7e@5rE&ej7Y0qkj=vgQqnXGeNP;>*5(+ z2mD3;TfF-y7_S{qrpurjq~di|QFQyq6S7fp+7cN`yxv9v@k>bk@; z(tlv+wWy$&rD0n<`TXq^tu;$^Ua_P6QGw>qzrX(~BosO72bNU0+W9CjRQvdv+@Fhc zn9bM;v(h}#E_3X6_#(Ny$sVhGUw9ps6XJo+!DPO5Nl#H*V#D$pMb+He%Ek>q10oFV zj&;aRdId9={gykxCBc22w~w#Ce|6CC5Es8N*N@Chf@2)ev9LD1EU!?P9rL@mpijdn zO}*6!?RVNI+wBc`E8P5D&asAJLkv_s)AZWZXriM=`b<%r?5ymQWR|$m*+|X8oQ&CB zj~bcP)^d+Z-`eCxs6AXg%#=)gr4C{~VOp90IGF_FTmz*IvbmmoKh!tDquAvVS4mxO z@77S2VNKGiu;TF2?s{X_B4a9klR3lXpvM)jUT{oZcx%WH?JKNf?4|Yr;5l~>yH+yD z>$CT4Pnm2w|0r!F`n`8X`;b~)N#8=ZqRz5;wQbEyx(8W4Ad@+}_(udLzC?fuOu}^G z1_2?MC|W3D3C22u95qB`uhCsr?QV6g+gwgAFRC5cK0u4uN|EPIxR@lnFPkqJCK$}j z0J)E=NlhTf@d<2U8AQKj8hN#Jy!e$<1m&gUn|@8#?#7{2UrTqCdR1W6|29ZFzG%nU z^ASDspz|NTuXwoRku*^{Sz0ceEccaO6#o#6;T~jMB;B{(*X4BXQHaYg6}OgduO85J zMzzm86h6nEE^3vXcH1uz3wAplVGEdQ$}8j-Rp4}41j#J!|9F%r5?(*O_}I|8+6B`a9_0zi+FB!&oB)hopHPYOg6? z6Ftv)ioG}}dgaW?%Ci}Q}2Y71a-(0D}NluSwuAp<>&9JpAr{V9)JEh5G zk1ChfUu(Oi*<^cyykTDvTom6FjR*O2i0GwAB4~G>%e^6NbhS!CU6+YI@?zQR$!+!p zz4zM>*7=uRDq!Z7fzWu!yx)({R zg<7XKv^dg8+ezJS<>vbS<)cfUl#Z@6*74i#^nQ1+C~a(!;GSr@u$aGHc-|!gRIOI< zYlOF4-$`#t$4d&tWB52LiyVu>dWmwIBBYX4GO>_fIHLG@`INdxZB^Q*_Laytrj6$$ zx!``@%j9kH+2^~{r`prUy;b(NYbsyHoC0rfxEflNH|j-|TPl86<Se4)(Hl>=GZ z!02$k&l}7g3_7_YUDV<|!pFjquH)sX8}8=khDweIikUAVg~eCBsqtL-{DNsYtn4j0 zvkII+Du+i~xi-}iP9DzcHGPImiHMqX9ElTqU*l4q;TUnzy%bgPVy)I<=5BW~{7WW|UV&AX6qrCriSGdmM z9ijUZ-lmbND~$&#X~k3XH|0$(m|PN471Y?#Y0%ppBgsG5St7c-n=d^O4qg}hBxq$o ziO)a}Tv9CPVP{cp;SBR()yRgm6^lzxm(8qP!9$L(QqZLCVtluNAAxk5xUY8_~2~ zIaNn@%!04eZ#f;}PZDev#EO~Hl~Niw?lWB0$Rj-B+_Po#B%eji&ZFtG39}JI0oSiA z^DnrY17|m7I~Ar^#k4Nagj=H_HN%HLP{Q;G^BL_|%oHV=?$p<%z z`(pV^smY~;|Ah4n{?BqmRiv0xwj{qcyX{YW&i4Xp<>jWc-3Lt5@YmE7=YcL)+&VoE zc@6Z=^7+U2r1x+4rLuU}EPga|Dv4}4(i7cszXHuWmFis$k&2}<_8{_)Y-RyP5tz3=-(dW3ej^?x{VDNtu?ZyH zh0HIo1?0`imUTda*R2v3)faAHNWmzYOkaq z&U3^y@>$-UegplZd@H=UzUTeU`IO235tgyFWP|;s_EP&_io+FO3No{kas)-}>h-PP z^?N7-UAFmr3jP+pC45!DVp%`#V}>_nE8zobzl+JkoiXuNX}oN$w8|xqw}^JY7NrWW z-(GSf_v`PhUvsht7wKw7DIc1?z!N#`!hfZMJV-v1ee=8zcwY4U?z7kbv#-FtP4tB0 zf&`i>JNR`wOYUax`*!Kev!5f2$cigHGRQ8J_~nIZg&)H9`wvIJ!?hWq^ zwYZXB@&?QEyubMRd+(I3D2KVJUy%W*HcQ@=`Ow2q+`b8B7FWn0{K zc=ULdcx;l}B>B=o?tgj4xeXNASV!S?=8g`}>L-QtT-)z{zlY_uR}SmAZ+=c+Ab%fr zBH?_}jfD2-B>|p-I&`pRn`%vWhsbd1ydM|L9OP^vn(J{8_aGvSc z!{4oWkIIKMC25M#TMQjPTC!iRk`0h>UBe{nB}-hSq9&0=a!p?9y3k1g*O=7GPgTA- zN4}l?di&eqpDzoN8y@LyAu>@|peSi%+Lu0a;v8XLynNU>hO2GUTbFji>Rr9R?H3sX zT;6#O^7HcHNg$@5eQ)>XhLxowa*#h$@{22)nl|)+7asIMyr-hST_;H=yME>WcBTo6 zUGK^QWRSea)8<#|bJJxO)n?3ZbuAtDee8$#AIE%q@aJgh-|god)11QHriOZ^Jna7{ zWkCe)J4u>D57O^w9?~{2V;>Zg85~6AnpT)Ta?Un}%Pcw7ubHP*Vso=TOU?XA2;bpA%e7nm? zX_1)6cEv{+LVMLTf9abotgLMrqCJQ^oCsN|@9ogVv46+cgw=a26G6^% zD4C{c< z+j62^?c6A-JXt{b_3=kq(Zi+=^-#kD$0vrf>m1K`-$_C1!=<6UUe7@X=`v>>^sTq5 zV@BsNjhB9#>764T31=Px{WAt3;Zz0Q4r}ygO;(q+CA@*wFuM7EyHoc$-CRq$;|$5h z=n>6#XL$v9-}2iU^eJeT-&l`Fl4-mJNT=>v%gw6mp#L@GNB_L+y1^=U-8isfra1p6 zo92@r)F)CK)jwpq_er-nS3TpGaeP})Gui>EvovBO6-%IEoZ&*D%XOPCnT(U)74%2oA@ zvK_hee#hst>J(iUBZ>Ma_>BKWZx8WbP#=x5V~wgo8*g}S zd5nvxE$n>$YSAz7lxvjbM9acrgzDGs+)o&=jm7kIQtMI0xp?jqMnQ@W*DEY4QC&^)-kg$Ex zPa>8D=J`~5s0E2cuj*5CL2Gj-vxj2n>(~ej=`ozO+(+y>Qn7x1*P-^Ctyi1BH;roP zYMa%0TXW4~12tuc)z5YdzQw5$jB?rJ7Uka>GB4zQK%7*mJc8=ont*?o{KG}EpW2(H%fTk34x`7 z=lpN@j`SP=-nvcDK589Vzp;EZsD}7YF}tg==a%-2c?LX^V;3^qo(CjH2*YH4V?FI| zLq)#idP9c)vKkw!ZCzN~G0U z;>a{-w#x($c~E@Bg3uMdf80${g2zD)n9ms|S}xh91G};inodinPoOSDNCaw*=}88a zGurmGo$~G`%>)D2Qer!VzQR2T79NDphOaS}a_dBoWG}t0`@QvB;*;U|RW^=0iFmE) z-}<_4eZ{%b%*wM(AG#pzQ^Q`zI&7pq|EJ{p{f3G-wOuH*>JFfPY(XL|7**6Lj(xao;=Ncfwgy>6K&-Jeg>7tgsB# zFY6i9RoYH!``Ws>?Nj?E<(sZd_2FKDvBGS!COZ=Ga;QJ`0P7p~fpCYoN_ zm&H*tDfE5hOi~aw&>m^+W2P8`^;qvK?Qczhmgt>l$TvEhL?(-AxaFDkh}{XRCuYGM zijIDVbA`7}FiLQNSK~B_6-S$Z>?8g-j#$^4b4^#w(bk8yv-a~2XRI0TAn9Q`#SWfI z+G*LezVthcYF0Og=4^Hv%b_waPzX{uw#u%tq?%X;cl}sB)9}XNZX9mBZtP>ym{yt} zm>-#En#JbF<`dRP$5HGWQ4Jj=kEKO1WXuE1B<4r@aq3-i0I~>zi4ZIZ!|_7WQP9;A z0`GtW;dCe!RBQ*qeUUkoG4x($0c#)g6a64<2yFn3N&7>!P_mITq!$I-S;|evj0X&f2D72b)N1_~?I!+(e&)LFB;Iy(GESNcgwv|#v zUQ2$15O5ltL7IXOLAL>8b`@%Ih#W}{AM^wI30;V;bzHGcv`jH&8$Xzo<_{Ky^`#xg zW)S-zE6fH}p9-wf_R<_?+G>n8elQ+0Z?;*`k3<3K4rvFd%f3reK*Py{X%`qq<{-A6 za}y}PqqvuxzdA8F8pcrS8iWHA&{KFKvIW@$OG(o(9_Waef>nUtm9gkJN4DMFF18J| z(t+|@Ywd4OLnCns;R33kcjIz=4z>vGu(w&&=Dwz123$`V`kK?MS8X1S!I+Xb3)3i# z)X%grv=-`H+8Ra-E1aFqMu7=C)cGCvEN>(4K6j`y&Te7cr;a6WL9QXkL2rqMB*Hh? zS6U{TubXqsf#zkVT_(hG%QgrN!v_-^hz!u#gF9fyVVl*gHiYZ{(aQ}{h8q1*JpyW1 zEWI|}aATwShPAR5v zvlFOyNE5MG&>?gIR6ir6U&H{=d(@6j!+Sul;F444Vy0oFX%ldo+ZrBC3=Bpvdbkcmra@6|JuEnpB8yOOgjWdoTV$WrnS-wu~ zT#=wnpy$W%E<5Wv0(K=sP75Ya1oo}~HGq!ZP`HQ^L5G2A-9VX${0l86^#`@iT=cBt z5Be0pO;Cw0bd_zg>9Q_NyAE`O=V`JuyR~<8Lyhico0(^sWhOwcY^Wj8NHr%~$&NyN z2(l6g!S`7#b~kGq%gXd+&0tG8CRRKnj2cgVhtwiB5Km+-+!wNe=YE4o#mH6qT=pT( z9`+cfAMIZxfs}w}pq=&uKz^;YUqSo)uagTav}ah7OrggAj7Lm&&Ce_+tdnh>U-*5 z+AO*k1E+h_9+3ATU&%{p9~l2KCo;;ZzmY7`VEjIs%I9p z!dPP^p=)st^cm>Rb(B5i=`a`ahO!_YQbY-3oMesUq&jcmy%1yw9|*Q^b6NMO$KjJu z6|#VKiZPwxLn}hqkQsD|Xuw}oAsfjg3PEwBF=%<@J)}^_DDx6Sv;KqOsEdpE>ibP-(fs$UIWf!tME0XF`!E> z5cDWLAXbv5l7-AoPKTZ6I!)(XU5kEyX0*HT!f+gCR`ktsiUM2W@BUa=OOz5un^zj3g2Xlf$1G9vmT?V4R>AQJctj zDLa`|E~wdMAE(QxV&pmIW~16ONqrc{o#*h!^Euow)*j@hW3yRm(wc8O2EY`$oOz1+ z2jlB^baX2Cwe__h8a}sms9ENrpbN0qw!oC%%hbHkp0xy05aBAfe&CJeIOnUB2aYh4 zyS0sM=6`cNEga2y1Ut}p2ZNYFbK!;wDg5E=dsGXl+S*^o(>2*YGj53k+~0X39^+h< z9GKjK868g8R?=e1JjN7;4*u7Cuw!`@y1kDRN?Wy2jrzjnZ%}34euxs?!P720JkXX{$;8r)exmmZ8$`<*!%K0vgSZI*F z)R1K0Lr1PLj&;sN17Hv#POiQ?HCx?$w$wGKCuy?VO6{=-#SUsDw5|$O(%` zI35}0@r!<5*WK*VdR0FVv9o+RC_N2NRZpm_`u;ZaXV$tRQuiCm4AF8SnV#jC2n1~k zeZCY5AJO;ifUSLQhWC*h$zI)9)t>U<3PwkvimSg1?sBJ_x3?{+?y4Q5>VSCeixcMc z&yRf~8Bg#vP}`x#eH|ohxU4a1bo`9qBO+(~y^7Yprgs}Mg!f(emAjs1u(o$BEpPqx z;VUoarDBo=V{PGAGUhru^oge9q|^Lk0nDUjX$O-}ge;d1rMnoojZT%Jb*I{syRn{H z<7klPP^M-zysF`}ZMN3)62fxQN>d;CeJAf$Gn*$Xb~K(=Poy03m=`uV@PTL_KC?@z zSk|0oddG?e3C>HME|dPzPE-6{nExx}cXP=zWi076Z;bFTqZ)l{^(9WSW$yKnL;AT6 zcoe_RH%@qr{9kXNBD>PJF+$~~IjH?^=>q*;PWpn@JuN=ktH@88U);(eclr>K_L1o#tqA^}n4g>#p6IsINkMYeys24J+#e`VquM<>XuZj#gH)sA znfg=bK!Y=7oupqxZNIS8#eS~H92K+qRl}i%b;@`=Q%VjS9aiP>hP~FF-y`jrX~)?0 zVg>IN#l@bc9nwN7-<1>d=Y7$e=GoS7te?DjjFTY!V<#OIUi63v4v6K(1qCrBjqG8F z!sy-nr*vKE_ZqTdPlH>h-2Q~7BR84P^!(AA;Irb{!55RCrB03D^3CRj%1w>j+NApR znoc$nK#558o68@Lztp&PdFUR%FmEw`GGi+it?zC>R5d#{=GXE3t_GUrBd1AvR~pT) zXI@$-wm3^Jg zQ+v6K#5|7JuYAuZn5^{(j!^er7^<|ilW!}WmOeZsH|##e8EQKm%1%x&*md#`YFdU;~Tj~i85%EyTg9vf=xz1sSLwC{d z4%r4K^b%D|?aItIACzy>K6hm%S8P}LnGEkH!kY~1RcGbS!V){zV7ET%-jn?w7-h7Z z7Hs=*BRM&pDDTckb-NgKsWEvl8UEI1kZ+@zAQgG&~>S9{yO7?gh5$M{YGeXu> z{iyhJ*^Som#0|-PdaLqR)4_`Exm78nlEQMsI{TP^4k?MTjoA?rV>6vvq&UME8Vz$< z=ho9oE;uRkuI01e#b19u;AQJq`=V#^Y?CQ|QzK}fYWR%J$Zm$-yihD|L z@ouZ`b~6lVrhn7N;oj@x?K5?7YX8iTZgxB9`&}bs$LYbQm&|AA%vG+e8&fbZb;_qcALb`@ z72T7y8m{y9=rJ#(%<^AVUBg1zMT2}#+wiPVOIJHxwz(`jC9(1Qo!=wdl5~8WLxNKy zW(O~FsulaT?#o}AF)PQW_M0+;&u3z~?CS;>TK`;>Ijp2eQE2vWKuh1efrI;P3VdT3 z##gkD?<{3Ei0_NH@GrYw*6Qawr1UZ|;%f6=z;5aM8|DE>S#H{HyA&e$r`CwJvi)!io4P zX(uY~uu5BH_sRVxh2L;!mCU9O>igL(M~AwlT#y%%8l4=N{iC*uu`qUW*7`(Z zX4qJ#{rdHal=8Hn*%Q5l*j)rs#(hqve69`_wL?*XrJxD3mwnu zRco(z)HU9yjj5g8nAp-**04s19#G-0-dLW#gh|zIJTUke_is zOffgUC$rf4T=%*GpZlzKn=PR)<93&_F~62(jLE%Sb+oIJyUx4HyVYhEohn(-(k>kBFgtKakGoO6 z5m~;^>>8xM$uF8uvTbcsnq`gu)R~oi$ex_)o76KsymYgwkJaNakHO3P*ZOMA=Ib6Z zthJ-O=J;K3e{M>vh8C{>GUlz%=byhfbUl*B+0Ae{>u7Af-(-MvExl9Ot8rrKkWL-|bDfl1xl;_#Bi%}K(ZPAT2b#M~IVd2mtW?r#6-|HJK69POOfX4gET{!(Rg z!RVi%DF(^unU^arYsXkG4DF2m(96*~)MBE(+z91Yx0UV#9iQrpWLbIbpZ(uF`(&Q$ z*{PJeJ4)SboZ>AGLu2_Qu~hkY%f9OUrBjQil+CLjB+tMkgoASDMs;mtyPx2o^*!Il zp66ohqmKriu{7Y%wY4>dwG3`w*KW}wtqm`FoB2KI+4roBQRQn@6($F~Eh5UhXL{VV zjxx)$Dsx%vv(|5z`!x*WYAxTJd?4=d`#WjjH8Os`*(8TjI}>w8Iv01$oweU(-fi`b zd+M|5BU?>W*NNxkQ*~jhQ{&Y3L;MqyCvMWPzxyqT7IaUx+b`hd6T0>&=V?!B_b5fJ zo6EAGZrdl}Q*wTRPRB>xux_hDbHmU3opy}0nrM^eZ0MQb+3LK=Bvmt|Y*5nfw;x|O zeRV57qf9d#=D5d2VykPYBe{Y<tD>?C4X$n+}`kxcCotbeAcnke3#YW} zhWE{9yS_1pL?iSsOY5j{GQMt2)rp2Cm7ji~^R%Ga$OB>ap2y7=(Vl7yBT%Ze3prD* zeYh)qT>EPt6-8T=OOQ0~kod1%{) zCjaIUok!RP-96@eEG7E-n*DWY#rsM>wQLg}b2uBkI%-M8IuAo*bFQoFZr2&jIDv&; zZygoW(l9$OA0C;Uu3xrVHOX{>=kmZqULxE748I#}x3uqe+Gm;Xi*5nN&s4?bm(u5a zx$?y{^I=tQ&0A@Wxs~~BgCOxRa)@@f>~zc4`nPrF&9~%b)I`I%W`X*vwX5nf3+Cj% zsSeV-G9Kif9pWEe>>uIKpx2Yzqk5w5&&z}Y0{QQLcWjN13iM%9r#e1DG*tJH>w; zM9b5LUk&c<@yr3eBJ!n--O}C8Edkd;!~H8A)Y8dZjv`m#&m_{$ z5<9_5<-c`x`R_AFWX#W7+B8OZ-u{r^uhg$-zm{i} zbo9&DG~;rARf+V8)obhRCX9q5_1PQBoc2eJ!|KM?oobpYd(0P0hUxAlZ+D)oDJoxI z_e;4_>g>42CnsQuuflnj>2={iW{ygruFx*xB3Y_gtq>a0H3ny$#|3$WfANpAoTu7cc;Z`LT;k^qxmVj4N}?@vtul;KMX~%At+Bd5 zezoIl%l@YJ7L41Y!u7kGIO?bHC)$ZR_xdzhr{uD|uV3HL)j`MIV$JV}LRq!ikaoYdP@5VY4n$+5fP67c|OZ|m*uYClsS2fOF=fcj-W zZ4%CYlBO;y8?Lm{BQ1_taE21e7*eTO+|{dTNDao()XizvXEuw=rQan{Y|pk!)n6)) zHEm#a8ZC1<=hr{5)caq@1IA~B-?tD3=}=HzUgtvZ?*Sl$MeQVg7VF7u-gpt6Z$6%Z(FZ%d+l4}on&V(%xS!w z!+js{t$Vt*^jxQs9H5tApfFfzkS6UW$>V#;>#e5E=kH!Um`LOBh(5~b^uu-RevJVa*WL>~kj4D{?fJQw)LoVM#ZbY3lb zp62k)>w8kxo2ocP5AiG0o3`g1{;`cQK1YaJ1ImXKH&o?yCQ$kM6OAHuNUeW!AUc5G z+9=|ap@VIZGuvk&CLSrL;lfU$4u%eca<%3I)*Y=ZV3EWJ6*sX9i^62LxwK}N3MD&_EgL!^m zsmmfW;b-;jI`7{L(&AHoWsRu5%PqHg-ba}0tcHtfn4QQ7u_XXO_=QUrBo6QP?jCNRTqwaY5AINg4t z8v1YY6=vSd;g2Z~)d%DaPO1G8{ryqS#TG|>cc08CWlw#7V}}u@riMDkjh0ItcH0jz zKEf=nE6wvspOg6Ri)U(R$$yGqgBk~W&w8)WZUaqyxwa;cs%zEirfQi1Ye+i^O4;ZR zvid>^RoT?$PU)FRT#kC%`pxw8a`RG&sA|deO1Ycx`&)Q6QGb}PvDoKj8e$rB(sind1(DPtYHVtl(r&1(V>`9x zig9f(>&BKhmasMUDwMtU$352hj|vt9YFuYo?9qEGn8nO!KUQ<6Voaqg?ug&WFwPYG z-dvJYYoi)(^vO*Sa-he%K0bZM1l_R=Bbrq;e6;8VwMuoi!C zwZZk>D08`9f+(7Jthms!vhIAv@&aL2M8=Gq2jv%9CThAQldP7z{_Qh9FeChA?>jw4 zdK{MSRC+<3U|{R^PDi;_yJ6MhoC|64R6*9#lIYG=`f(l;BV3~Th823AF*~MxU40}! zy}-5dV1u@mk&RRb$e%Zyt{70Q-!YB8Y{__Ejx^}AKQhT@k4-3mF(+?kvrxCU`ihK=oZO!TCYEFN2Yz0$uvxn;V1jrNv$T-(p8 zk7b#44jLolsh*3X;s%cFH^INwJd;msCmM?yel;uG4BK}#ch!n2UKKYMZ>#>*5ly%o zwA;qIFY=z}GsRyTvMKn3Tcv(R*U<7yzr*sywY1unnx!V|Z1a!*T$jQ9ime>T+_O6C zKdz@?-yJ*`H}^r)$CrOEZ;8bc#B6OMO4(aI#+d-e#Apr$@ENSYKt}IbWHr zt*F23Tm7WUl*&yF8Lc5r#^vjB*JlM}m*wYI&sOa;4D7Zx;Ar=4VIF}YF2*LSg?HJ- z%5EKGOJ>t*+#bm<8v!fadRPY=OxEjVs&AKXby%FyzPQ9W z%i_nz^w^w=($IR#X7iTy9g*4>;`L^aT)u@IiVhi&6n@|Fp3tLpV{v}o=xSrtztnl= zSW9}TS62DA!CwTwVmdCGzVxr@wV{t;$VdBWk`ncjF0O-?yE0|uX8ykHc-_1bvpnP9 zQDt9c{}?ay*%@gUdDyqnqL^!Irxa6l#@hXMX|aAQN@=G`Gk$JP9h|;2zgO+=_8KL{ zX$AA7N|Vploz54$2Lv^T7zdy7-eWz6n^2*jnU`)?Fsc5y{FU~${6bZJ=GG+BpM15N zH*(k(u`haHn7w10bTK`eIiqaVI*R|3T8g%6lpQ0Q)>S0rjm&>vt7Y0vS9p95%?cyD zN0@$P>f1~@)NX7nqR5&1#@*T2^za1D@*bw=DyGWSyim;VR+cN&F4jcso!IlC&r6VEga-% zVT4|ZR4VGOoL`$;Q1)wemgcuxRdo9`rBwYubA~9F7#d|;>~r*Rw{#omP-L~yD$Qz_ z8KcXx-mNE!ebQHb_e)PN8Ps7%$@Ls{A2F|6mNxn;z4TYR+D48Y@VoC;uU5&Nmic8t z7113U@m8x|Rx`yBZNCc+XSDxHsC6KRTFKo7J_#;y#y6=)+*5&xeue2ltC?1@2F==e z4X;bG^A6_@C{LsEz{}h>WJ%#(!>ZH42D}Avz6K8n7uYg)}Ll%Vlv*ix4{(g zQ|OjjRHT$fR4lG8s5faNx(0V_ZhhB2N;#0fA--)ySXtX-TMEo8Ou~(H4XO+#8e1Fp zmFkIB@tYKt&3Sc64aaa-?z-j(zmZJB1kC3orIT!2X`SUj+8;L^FEZEM?3md)tvyrL zOLbozu1+uAonl?;PgmVg(?gX3 zCFvIBX@ypPOZiqahLciug7LI1l}{}dj+R{0yJYBK(#u%aAW1h*JY47_s1*DoyhJDQ ztxTOZQTu>N;AaagM0+VA|b)rj(hF)U2l~8m^x0#P1W3&|LVNkrK3`^BLuvz!tj|7kyO18k>-^AdZ%ZXLUEhfOTH027R?af6a5hWCpseW z(=E^?^iJtWMZd^PTsrez8>aQs=4#?pSLOR;^@^L?7{W{7CtfaDfXVb5Fw^xRwTKY0 zm1<6*Bg^QR*iqK$CGS#bWhr&DQTT89e7oV`Vu(V5Rnh#Kw+)2?2j+$pb=c`H<^&)UcA8%~4yw~br~p(Hcu z)57l1eddIVXlQ*f>G30UY85PguxW{CZA1M6MeaO=9+eawv(Af6w#-I6NG~Wr>G&seQrGG#a9zo zs6DilCMiQqGwws|BtkG5@FCI8`Laz~OKq!$V9MDwyqE~)SE2jf2Q!MFGX-oX*TyGf z`tmELQJbRGYRhnoXf&ozzT#@w>udn`AG+kp^i#oI!9z?%Hz(ioR@@8bF z_hL+PHp0Zz1;lec4Z7`0W;bSj*0A0AGejzRg5oF_%qC2xyfIgG6Q(_{C-)FHxMj>h z%@I|w@}?p{`ARia7u64v|&%n9zx`mp`k zx9lj)d2|P__MCPU&J^khcL`eQ0rYB2B`)TZ*%s|5%|lHu?J%Z|4I~0FKm0m<5L1r{ zVUuu?C{^@LR3!8kNGUBJ#y!JX$Ted%jQXYeoVr=PS2IMro^j_WBAztEtZ5A$Oq)<` zm}P6lPvY*dU5pVkO#4$K*Cc8KFo&`~=}$irv7C&iD%Vd9>mM}kzUg&4=@vb*q| z12wNO!F!+DLPKl!GM2msno33cx!hs0?gz6f;qB(Q>l~(_GP%Rzv#Qj zLoCKr{|4O6D+ze23OsEEtb(MKq$#Fo-XdHv=hcrq zPH6b0nDv{;Rx*0beCj#QJBX&j9<-_u`TRUc)>a1Ht9=AsW|c^<~d8SS}P(2 z(|bA0Z0>=%@Jra2Ock?@b>#}VBEFnBLW-$n)IKVddQXO9uYHLLnCI)q`EsiEJAI{f(o|C10?YU&2#fN>%yk~l0J670j-c!|zWs)moC7&%9$Q zF|#;|31IeOy7+PIvm^1DxI)gLo>H0A1x(5PNj@OYkP+k(Vif-`ru|OCB;PxP3nie> z&`yH$f@Hw~JlRH?qK8xe;$)s+PU&34<3sFw<|SrO7jgZeoZwHrfq%P7&=tkVFJweXHwBViOAlM$J?y^^ypW2(+4(%#Ttc_zMFlTrR zkx4$GBIr#}K=_V{^lvdqc?u=~%ekJITpGu8XNE8|tZg4Aoj>LiiMyENA4IREm(gK# zJmy`0A&&CHxxVZ*Or?Db4TCk9DSI5V=e@`y&v?#}h59#WlDF*Th!MD8PI@QX2} zel`5#GG^;GV`lIMO!D0Wjez^`zY+8hT1*#F->Cm6hHS_D;Ovqg(Na$;5a*p8d_-V=ai&6e$o0 z#?j-bxnv$DZ5v@ua3AbS5I5L?U%e5ldGf2otWk>C8tv}=vkPDpH4J#C)gl%D0`V*gYR#} zN-wg**qu$(zP(r&P0MW;Q5>S(1W82{sgD`7b2b2GgVv2bHCO?md&8)zF{fqBhN=)L1 za4*!#BH^QXWCfz~ZD?M2QWWKl)lVi)VPgFi;uk-GTZKvTd*QL=m{EO@b6{hb zwamY4Hm672gQu@R9CzSp&KUO)gSb=t22B0$&MVpN>=aHyc#;}K(ovXo?uEJPL&+J4 zyFd6R|DUUV0I@WZi)OXBovmPNxGugHYt$zv<2MVacq$V5D;9JsS%VBV6Bgq~&;)K= zapMrrG~7+>em?f;B@sg<(n|y$f&p|pPTCN<5|`-Tz!%%eS==IRpe9|rifiY6G5xv} zcD{x@LdmGjR6ePQgzg_VQzK8{323%3UG4QF|!`_SXZae3LYx z&7lM#q-bI=aM}QEyymjzf~HDyRO`if02g&>*D#w|Id_O?#0>Cc=$-UIM7~HZqUIAT z*%MkHMvwnUjTN30y3jtn7xWW+_)hX9@?8uSMI40Ez)WZv%piLlNAY z*g(pu-h!ue5ZTBr0$JBa&_TL6s(HCF|U_K@wZ*`oZdIH5Yq z%qO!2-Gyb;7rvg2PNH9;Fig4DqjvHe8L@VN)`qcT_ppbV@mg=~ zL+xQ^1M9;b;f`UJx(Pp)cmTAuo;bu+G6`C#_Jzh19@w13Ok+a;HMBzkeyDkA>2jsDP~c;(fuYV7T%|i)7yod z__8ENqy>_iMTG-zSF=*)wdRC2n!U~1va^{+%|ms%CYp;Q2hicbaQn#7rn;=X$uFdJMdf01on)PUx+C>|N$=~A z)8C;NDhU+)A`Wm9n4{|33T@Y+j-~Bf$^cN7+D#Cyt#`IAQkP#iHS|x3EU6HDd+A&Or_7Zk)F8`I*k<8E?r29hhLnK4K zM?Jw$XIE>LYNhIls#x`xYPRC5Y^Hpg>N2yDn2jtSiJ9jC^gn_=;#Qsgx(2$ZaFcn9 zPJ+lwz>`AEDt^Hx;&i0yB??Emp~70VSKX+2%QlhGqK7)a_44$3=)4fQOMXj77@aZd zVc;XtgF0Bb%t`jN^LU4DXMN|$u4i2_vL&)C*;A#A-6#y#8)PuZFvlRo;H`nHk)d&u zp+w3E&+}HAEO~C{gm!Jq+UB#(_gWWqILO)+JJb?}<6T9^q$i9n8NW5EGT1GZ>6{dw z6b=_;Q@e=}ZXuJd&XO}70qw3G6`~P_`67j->b&Zvic z06kf7lm1LT<<0rt1V_rKP1Fd|lknyJfE~n_E-h)kE_zAT7j>4dNC&C=aSt}w_XOSZrAo=?l95JRqYz>ULfpDCW$RT#b^ptv=OU( zOx03uvnL9Xjfg}103Z-G7lEBIhoVF}ut^6oh1dyomO!X3Idbbc z88n)%K`E#QaRPcwuYjZKiFkCrCh%Qc4b)3GPKed_<@;f$9`F^sA!@$w!~s;ydw@wN z!WNfPv#36(pa@b*=<))t7n={IohLxEXW72MF8Qd;w?prWqgF#j!kDf{7Of5VT;#zyh(vGcW}k__|Npssb8`j;}M29Pg^y~I1B0_qwjsQNbGow)$u$unFf7mqjAo&X7K zq?S;DR4Hm8d$I>`%yZ5j8b5CA8oc`xsU@_b+G6b`yrVTAN=nWAE@+GNMiugd986uq z%HL3vp_KBBI0^k1Gp+zCFK@Z={9FDTAJ2d1e}QGHgRaO6;vi{6*-)9JEfg_sK)bDw z+sN(bzTz}P_z_T+nF9Th{rr2>fK~hoUW>D~0&1(~O0d&2`9f$Mr4uE2>Bc?8#wC?s)c#{D(ur* zpi@0Oa~3g!%myA_1ofU^dIarDpQT=qS%eCDN#po+V5*9-zRzH)A`z>0;?E^~FL>2; za8+ff5E8gU+*4=}ZHJmn8W9S_A51EU{e&mfc8=oQ!ifV=rlX--H5q#~mv4Z&PXpdb zc7wLdK<*b;GN(fLVgyY2G;PENYhkAw8?u0r_4tIrn$fYU zd4HhcWvHh$z`#bZm223;OZ;Zw?^nR6<@`J%pJ>6J#-U#L5B^aNwImnRA(_P7|JNDU z2_6``gN&diPzzxnxzO)t1BZCkj=Ic2E=)O z7Ca~p3O&W_M&NK$RB2aHdtb(zlBIlqVhjB32KG3I(1T8p20s%AO`4&ol3ST^P{V7} z&V)KzHai;jp^Iv)f)>Jrh|k0abooM;SK3_14#DqAPGcR1ZJKARtgy&Z_N><3*O zC*lM=wSo=C3J5j|-0WC*!5?cAAyP8LV{#=mhUTFQ=pfibtEtP#c<&G|_M;vSgGw5~ zW?|K9k>eOHi+=_WSOC>1UA!SW8h&F;`Xe?8z@~%)ksI-cv2uc249zJAGz~vqxDuG`1w%CD{B|DBumk{td$_e}d29 zP*05mpECnHEJ2+=8;qBRAA~B}6VH8==!JK&-KhY|glfjSmTK5@6lnqbA5LySjQ<7n z@EU(p086zbiul*ip!|#QCZ7OT+$R%I5h|fWb__dO0GmHX1VP_xJ+$?VU{{CXUC#I^ z4WG#GM(sZozxy0K{Tb}|A(R=PLgh;z69&wo-bI0lH6;%5&U__Q-llSq$g0n{B2R}sK!DO6)w|*jyfazZZ=5HW;G@bAz=aVCGo+`E3DWY9*&&Ls=2Hh^uUOXxiBzQZV>!v-y7!8Eo*Ia$&hIp;=}{ zJcK5wE7mrUzW{wO7x?v8M9nq)b<7UFNDL&?v2z#Tze!N}TLT5WDL@&~L@3T72KuNC z_%s{*do>Y6Hj)2Oa;iV@&QdVbKVb_cu;xE#ZU($^6CzVIR$<2f1sx}bzeez5kB=0U738aK|8_S z(m)p-z!5UM$*Ba>Gy^9pA)A1eRv`l00C{{O2=Gdi$YJn_X5sQs;sg)f zxpHLHCcJmt4ldN={~59b+}?012e>4N{Eaxajkp6g&xyE(mHY?H7LT7hOjx3aaTPdp zCnEJ$Sju7O1op<0FD9}OU+%(^*FmN39Xd$C{4V|jP?|r#g=5%H&^h|XCbAT_1kv&f zR}5R2kNrMH2+6gG`8?T7CL)gngVD%`4P1iiAWghO#w$hkdkDU4JZv=+n4%FLJCU$O zo*D$Lu^)(h2Y6GwnY<1hbt7BPMkD+B1D!8N?&(RWiF>3L(dsq!LynAg2e!5p`%wz- zZ9xv42)5=9>~SqLz?Qc?WyktH8)|2=Gi|>Q!0&Y0Te10Ec2o8P$ zc@66}09#%FE@&?uOaG=^z>BJ38TPQzQta1YA`>3D1W_Rt(O^09%SoI`8!S15r~os& zl3UB(1IIZFO!ZwxpWOzIcn>rhKe2{fAAI#g{8|Uy&N0BHC%8OL0UfQLh?w`m{Xc;R zJ;r{R0#h6WPZ2`oVs&CLr=iftwSd2h2rppKzrgEVL>BCYZq;POf+t`bt-!PP$Gn0Y z$dY(R5xbI$XR3fTbO*XRi!YM^Q%9Na{Rl_>EAY{6O;X%2BY&*H8!i8w~${ zKm~a?p$|~6{B!a-;J24zg`fE6z$^WM{ud(h#$v@Cz&|6Qsc8$_-v>pvT;2dmX)LOO z&FB`iLH#&|?FJiXcoFcWFXe^$C=9z<3FP+(9{3fRcQbSxZ(^4gvddsk*5K+NLs9QP z)GhOYa=pO5#*(gN3ZB>zn!=sHp87yPKKu>D_EPQ)`g^C5nPbq|aKzId1-mv4yP*#o z9}A?R8Fx*IFVT+F|i z1gvRAWx*=`==$}6w&5>)x)NMs1tQ>3ZaTM{>j{={BP{Sel+OHtG%i5<%o-MP9(~T? z=p#-BoYWuq5)1Bh=PPMhTO5LMl{CaB07p*{~rKf(9^8i$$a;d&zSI}^qHXsA&C z;{N3(foTrrRBRIB;Ce)ra75dlz~gV=Nxjh-ng_d~2!AMBFGpk}kdeQgb)UsO0cEB;VY+zO0v3t4k1xc_8uu$>I5W%eXaErs8K-$ekKX$T#h*)yCn zGB4J!314H7F~!hubjO~X6W?J$gOPVt55^AjFcFNGa6=otFUeFrGFQ^~iGA#!K|R z&p}7L7)-)=%@WoUvEeMN${d}?NY(->p+DHY+!~y2GL>+AkPVg09@sqx^d7>ohn~RC@j#*5xefeA)KqGs7`gEQBHm2wiw^M7S*-XP z5V;Ap0#)WijdL4z}k5W;^yv2%5pu5+_K7#N5h7G*NL;`cvrtXOLjzAk$Ksj6B>u*qT^yPK% zQz=|4^2Gu!i5r1j{-;hn1+-rV)!0hx$u&ZY$W=-dpkK>E-#Y^t?me)zBf2=9(Czd` zeJRH$%mvc6zz#}L!Dj-WheHQ60RI03PwR>f{u-!LCZi6#k9ac~NW_A;52Qa2IU)j4 zzZ+_hFF5%i^gS0qV^NK0@Tbdym(O@(XmqOZgl`f5>Y?BJoZAbmh-h1|Z&P6(pAjXizz;k_Z8ZpceF>*fiArS#Zj3Z> zVqmFuFdP-Yq#T}PI6SV6m<+_J1B;&mkGI4WAPLY=D)kKgdM!B>HP8mkb~%iwa{)0H z9cSRh!H7Lp$W0r7n5UEG=)kTtz=?Z{E%U4(fg31%jiXh zg0+f5M4Hbp11H)Gb9vsQd!2+`%;$&V|13fk@EP@`FO`anD<+2l2S)=x`=AbUm*(JhUMfi67Ylfw0g`3W;v_FokV8T1xD^d>|2FM z-3uOa0%tlCNcA!D$^t|NFM{VM62IY*_NXX$(u(Ry?S`6iGB}tF)cBi`&k67U4g-(W!2jY9>CYjeUc}vi79gk<@XN_SE?ZC= zD!|BD-~_AK5zwfej%vykCwGsW3PkXU4eRR2v(QDuZ6d%pfB8p$h(&erE(}! zlma`z9%hhtVGpa3VLXT_ya|4%6Tg=m4g45@n^i`*AF%`Ldw`v=LFacM5F!cw{taTF z5vtcZoFk7tt_701hsPln z-NmWdBZk~Xym*f6+8258EKue9|Km~%WWqlazLThnz>q<>k#h&t!a`uQ704`Q#2iF` z8PMC7fz$jGPlBKYJC?kK3=)d_A`^&bU@Z$#3%^HfdV;lxP`lZnCY_1BPs6OJ3`D*Y zhz%~NRqvuo$;4Lz_Wdw0ng%@MP|h35=Xa28G{Dcx@N@0l5d3TaZw4LQhuEvnh^CE* zg(>jjkHB3kke8J3qSx@~vGCNj;2QJs6EpFDP5>z_1{PS4)9a47I~>pQ7*XjvEM+C| zR4!u6b@<|bpoC?xlpctkThIyU#9Xp`=-7+F!mok$^F*)-&#>w#!0mQG+{7_RTInJj`Ndy$`jI<)9AJfqH1 z9I$%~IT$s^J)p0-!@VBh&CXa{>VUbsFz1Ud)k1Uj#~B)QYwZOti(wk z0vad40yjZ*dN$b04_rEG(tcP~B=(m>_KU+>o4F}Kqdig0jRY5Q5$xbYaCC*p)7QA? z$bHX%m0qLrJ%ei55#F~7HWvuyqyW7QSFB(y&OQ+cDi67EFwieUu0|AAAW8%SW%9uM z@37)?sAcW2<{WUtb?7%eB1DLM%dmgzNlWry)KIfw>4W+6_${Yl2QB!yr^pVb$lx<^ zB6VQ(uERQ9!Nbi$lqf`Xb(T$MZGe+;&j-G-gF66(zYP5eW9;cd;J+H=X#x!V9oSBH zE(%fOHtL)qxUsSqHaH&a+8!WQPuv8mNA6yY)$PFquS`@wQ^40zuqGd{S^cr2W~eNp zFmWjq7+?kv%69nRHel~U)D}-*ZwaU#CgTbF!Um=R_uN1f&O`nPLXMw_Q@IWw-NV_S z%Vfpf`g6YryvJZvY(e1PzMv+mj;;mEZwH?!QpI-UAP_ z61nU$>al)c#LgnG$H0eDfTKLHLw$fIt^naL0$<>Zyto7%y4OJFH^@0aN$vlSdVhLF zNAR64I7@59jyNExDY>2snVXh%Vg`ReF;Yd5-9gr%M1IegiQ~!AF40e*@lo0@y$?aN0w} z;$-SAZkZ6+Gj~Ll@vu2vUlj3|Phf{f0p)K*wQ2!;au>E`23{r> zm4q+;iH7&BM9im=d92V|+>1T!1m;={CTI+h@Dp%7zqnd3R}rWnmV?`}MHV$eZzlnD z+%4S0_y_2IHrJP%i3*_z2-*a7xHXXeZp2aCG(?POf!BM1m2w2@@|B!P-9c@t0Me`i zq6h(-@(!!)B%D!4uRZQ+%Ri6yA?(%^jykjq>V z)h6P<%iz@+;3zf&O|62RzCa#3i|8vryS;Ll~)?-AfxUL#|s09j68 zXM>Zt$NpxA!t!gm{#ZEyHTmg4MP}%HTm^PIg%cZ(|LWkvkU#IhVh$mXhaiewgSVOE z98wU~z7suA)g%B(-arOEfxVoBoPe8JIE#6>&2$J6d>IhkYQ&KD*w1=cLOS|P-#HQy zC=DJQ22YG2d~k+=h$|zJC;V{Z=shw?GotcitmHGg!?}o3sih7NhG88QR zK&+t(CwdMnQ2+mDhD3PYM{o}(;R!LY7MiaGI`hKa)SckDUV!D=$~u9wjX*s(4|}r> z9Nbhy!a&^FI)yy=1E01D$YVDm$#QgMo4J$VEq#$UoRIUbKyU3WR&bN*2iD~=zAm5# zJp%Y}BNa@UQT5;=>d>XwfOi_~X-oP8`ojY1I{454bWMiv`q)1nnC3Y;k%Kr7+*e(T ze03XjVmMaN2R`Hp&S@=ByCaabEBx*a^4tl;`KdTf^gB@v*&~a!1MLdQ2B6+=KyX)( z9d-fDzC^4a3uZ=vTThRW6O=#;Gw^J)VS!6wQ^jD(ctlB_n~vYH3$~twulJ~Xf>DQ$ zhs~M5W{q)rai~|;<9ik$R?E;sYX=q#0K;a8Y;_O6=NbB|MZksCz;|Q7YaYNS4+M8G z9a&rqMkpF}yYaSl@LZy~&8Z`(T%MfNCrd9VPIkt%w%S zu&bwV`qPm|#AGIL^d90oVs8Ozp~0vZBWWfADdC*g`lwS%?aJ zEO1y3ZYl>L&y7JXqJm8vMfDVjy7?Z^8OiD5esLr$#tb(M4Ulv9a(596z5>CEvG-2E zs<*kpoR+->ZyU)@g7u`Z9c(BNMj!MUw_~-wK#FhhRSq`U103K^c-G&D8Df${o-lz$ zj{#@R;nwjiDuQYOE0cqJoWbbv90T_~7uBnjdWT!x%fR2SMQ`69tmHz(UmtiuC3bEu zIz&y#SNDN%FW_l-c#bu8Z#tg)9d@IU$Oh{)06p78pyQi(!k36(neh2eFf|-2=6Kc- zw$lp{_9*chC!hxQ3`I1b45auSpL__Xwj5S}75U{V&8BPP!f z-B5SM@HL34?-6Zw!rPu8j^0Q0`{xd045IN|#0wq7?g5A<&Zx-VA)_ooR$GMpA>&8l z?tv>WeZgyaP-@(ePk;??M63?IPrH3!*dH1cCP zkf#I{iv_URKd6$d&@J78>UShQZ6SDm7jzYF<96*8vL~3{8#t*yC~O<7)fx9{&Vk#C zrgl-=si8ReWaNyIbnr{#vBg4l`^-vWkZ z8>+*-_>3p~MdXbbbTVp?+2e5z67U~A;PvmplaI#dTm@dJ!`B-ynmvH2Tfy=q<7Wrs z(}Vt?Tf`VYM408s4;$g7KXF5O0sLSY{4^W+!wJ6Lh)j1MJ2(~C?+7s086cVQ@YZ3l zzWrc=?*J1mM0WE-efJvnbqM%r3%oHC44yq0tW;o$IHDU6!D!?KCwP`VZbE-YZv2B| zafb%GI|=OZ8uW!S$WHXf(oxYS;T#SjQ(uJd?*o(M3-8~8lkAC=4~Mm9A#ZI%A|m_%7kLEvs1Wt=edLNqsK#2+=@5c-HAehe z4OBK5)o2y+V+fcCM?76`>|QE*|550FK(Yc{$W(9}XMpy8;41x1t-8#KVd)P5CMV_%T~imR)MeS4($CHb(t|bgKDrF5>!)0 zn4>)k-P9lW?j#_36L`Z1^nsmlOR5ex)Azw%OHq&Z$Ni8k)B#jj?x=$nqKdi?mRNx} zmq|`QzIh8z__J37;P=+R{9QonxyVN*@YVyUIzM3-K7$*G!#?bSon1n0R|CHG8}|Ma zp1J-1ezO+2AObO>1l(FD&eM?-pmxb&ud##KGTb?;V<_Abux1CMa{Y>|XMAO4o^~Zs3BkuAHrjM5M%gw zFlF{!Hn_kqU`FHF4|uceHG0sk=scQ$7t=-N%0<7n8QjxZ&Il1E65Y>k=%AcNl=<^U zPY$s7d7P;MxedAM51+mYm3BYe+L?m9770v~1^(m^;@ed)G9=hA2OysUVjTGhwNfNd z<4PcqXT(UnMR6Kv-j7N^9n_4S(E;K#qh{kC-&f#{RKzutVMlX%SlwFA6p^45 zzP1$kYYwtVD}I{}{~eY33-D@lf#~m}YRW)dHbyUPEv)4ZI%qO*ec6~v>jHk!0YC8{ zqV-{TjyLN4&)C;~$Rckrhy5$NmKy~Yc|5p4C(KCyNZ+Lg(S^W|_2h1>oFE&4K)sL; zuLEJ0;}+*qurns)ZFm8LyzU4L)DNG#A2BNjd@1h9B8G*-V>)qi=BOS1a2A`8_oeUv zJDlEi_@53k@(sMjq(ywI0gv?z*>^ggZ!>oF8ayi*-&2hmz#6=^H{x>@EO!Tf+iGC> zSbTm5kmD2}yJ9@k9>kK%*!uxM(Ve)Pvkm*w3D4Mx6=mW^g%3Q-4;di{^^6D%dMv8Q zJ~+3_$b038u4|D&X5u{W5Y6cN$$)MCyxnjgF{TL>TNJRe13C(~*fs14yw}tLexf_- z{3EDEpj?CfUWo{wjQbfM@TSvU@KU32Pi+}Ez9PiZ5xBvZh`*bFKB1IK1j@XGznP2L zjz_L7LbP6rPgsVvzQ;}wh^uz6@RdNNIk;@3eeJoOp~}BLhZD2Uc2u zowNeKkOUiD0;~{(XWx(2jzZ^S4p^;Zb}zdeH`i9O(^+5K$ZEn}to`VY_yE!01I9=| zc3X(dz6mwf4&dbLU{UOlC61zoj0GAS15fXQr{2LD-=On;8a8wYXD|WLS&1hf2Y$=~ zr&W&(IT!52QGC58?eH$rVMLP2lmS(a=TFDZ_5=3Wj`*7m936?TA?T#-17ZnBy*d)H zcNnb28oj#&ME7Xy=p5LyE20R23V#+d(K>L`zPP8>g801=bxHzq;S*qJL!e}j|MO6n z5ZnIL38R4SLvgm_u{wSJB5a)CzOZvyOI86EXC_;~Zsb^Q8ho^a4~6x`qNXSZFCoDF zmk6>4dZYhi=`6sa?4CBhL3blch$0{$V7FMn@~kClbzK!qvt|rqcWJ6gKjb+^P`!J_jeQPloITQpd4drW$+C11H)s z_hF#GThz5TXvpu#TOPxHj>5tnVGv0?uQR$+9eTzj>S$8x>}Oy--N4|dYUNgrzdaG}(_wc=9xMMr?_Ov5Hkk5WIK3_TNjf>?G-hZwUe_O=wBg@K`l+`t#sPdSg}J;U zj#?2rC&9a#5haaLbO+J(^n#g_g1#cl+)LaKA}2k>Xpgh%Lm25DB2qlhTFLqk#;4{I zOM6p`C?NKUdP$_>Iv#u)^be&wCy^TAA+SD>d20yA9L=bvvszL*VarsujI9(*{l+`y zk%i9$9W3Yo`oq4rB9L&nv=%N=q*h=j@XS|yRtFfK1Bf>O4kPwXr-3|esdOh28^U=; z2suw%o*_k>UBoPog5xAm_Zr75+Y#l*p(fe#XbcnN%pVx4VB4Zz?8y4fA?rPaR;epr zCf_4JCO;|P%wFe9bgplbrx8(n>BmvRLEmF}yW#gLvfz#Qb}%zKhM2pXPnZT$mytV- z#vc8!NKM^N09b#IRn*&nS==mu}F@C2-+5`4EI-i?Q6 zda=UJYKCnNCRj=Tk~6M@RZ8dp=mYclsb0M(#V19)LV`BUDOqH9rF8tHlI@GyYA5h& z09U!4SH^=#=2%)37&!+8X*Yjykm%B*R0U%#Cet}bM$(A-NDWo)o2W`RP;X1=gh^2M zX?1{YECFFez3UB`Zy7##kQK3H?bh(QnW~mt;YKPILFgOi=-cP8x4z70GFHRZW5xZ5ad+wL%aCZH-v)ywZO9mZP&f`utp<_prso zf1}G?U=$y)@{!yxL7N)9ir(}Y*zA7ZdoYZ>FVT1;nc8RSaDRx-r$E;}Xce(|{snMr zE{wPrp4A7KUpr-Ubv2thl6bXAN8yQYE)3c3tjLB>PO$3f;mPBxWD zC*%_5AwtcF#64#kf)#7H#tGoz4^+l13#c0d;4zOF`v4(Ec5Eae-Ml2Kv^pfA^1M zqBN3j@(QUhb?9}}AK%l{yAB@Lm71v^el~@=$N{N@o!G;O&2!K{P&*UaZZ{*M7OPO)0`!KdCR7oSit{Nc(^=&^}F zB_6~m$Etag9o5or{8q z^p0I9TVd;WU=#JErC6WnxmwIK1F1`t!r8^m5wD}*&!k(r7dQO?YKrUT+4-YunW;>~+MAf^qV0@e(xbwpub!tCs&CbXEUqNue9 z3tPoC2M{ZEpp#arrhpI@u&49vZYX2zo?%%Yj zLOh3h-ZXU&juZLcC6MD1=oG_z&jm?5`1}Q+O>5>wbUQwT;~2r(-(eeu?AF((dRidW zBjZ>o+fO#)Pi>@$>?O*H2azL`_&r~mg`a%q3iY@WZ?vCM7{N!Z&4Jq366zht`K)f} zIUlh-CD~a7JaGfw@`_#7C+QN|2Qmy&TGN4lRdH7lLC!TV7NCqI6jc^zYXtfjF%ttp@Q!%vRQU05Fme^_sxx}WJ+A)-ub4^w z-Wi`yp(gx>xbi}ckp0nm6HsY#6}`ZcDtdR;@y^G=7bPsL6|V}yqwUC0M2G!w-ZcoN zr3F20518R{aLpEmvWj@BFLfse%|(fs!U_)r?=FIZn!HaFGJqB!geRYGuQDPV$W!Xk z;gf?>@D2Tb5gEp3V)R>9@-6>YgZI1fheSA89KK+L!sPuukIe5Vb@pibS`_%+ zH))}?0ab>U>~q_NCACy@`Vh&Ye3n*%*;wKQ3am9 z1v_f#)Ev*<4yTjh-%)4byU0+2(Xb+!pC|Odq_We-oO=8%PQVz#j+3S2TsE-%nJ`c% zJky&jz>Z2y7W*f{i5VL51bW97$X0-QyRhX`RB3il+1bz6T7E0eEYKzMxjaN}F4v=}OBrfc+NJqI4T;(My&uiw|7j%9Fe+p+DxiH5U zii3)Cil*@83!s`0tmrIU;w8D~GQ89c1*RJnoD0~1(5X58NNGe=X-jko#-q;T^CQuG zo-#*;*sEZN*U?Ca!p0kczGu|DV=>BGEvQvRM$!tMC8T#ztwvr9`K0?;Kp6>$pY`IlAgmpB+@`spIoBZGHl)qb`uWYt7OK+ z4zUTSB>v2`m2@$jdL5R05+9k34_?4GUeZC~0eff!OYlT-YYg`cqhHOK{VshJkJ;1t zfSoee6h(?|RC9j7%LZes9=yIYzR?49WC33;pyO_$y9*h=JsRr*yhP|D;=WIw@SKGp z=pph59Tef0(r~QU9vk>84dY7YFr&v&$SC48@z5!W)JYt%d3@It~A#r#B=j#qd*K zo@>tfo2e4WQ-8ucOF++hDCHS+<^_`P{Q-R*f_N|Z>>2QVx#SN#XC?3E3FopPaU>tVWyXUT`vTM)5B318U|lEke>>ttL-GrEdLnFjcyJxsvQ4rS znYBEZe%_6;S=4zKqJJjQZ*mf(@J9K1Kt}3Dk3c?3$Xoc<4J_?3mcNhq-->>Ujm+sp zeBK*A{2#2Q6M35-&x=MkTunr2fV~C7XF|zkZDHj`%-=5jtT#XXBj3Npif_jrAA-Rl z*jp}@@D`wU1?c@9z54|_2V<~2!IoO9S*krN)dx2Iom|ElUh7H)Ckd9jlDyMMRmZxv zz#l>xZybpKp3FRv*(jxZUWM*ZOsstXVt5cSimBgoUrSbhHvD`!_P7Pb{xyhHi#K(^ zpB{pWt6&04SyxVK0g})+1FlTz-(fu(A=h=tZPtR zIzt^Shu+R0{+cDgZ%)tir0$6bRifLPu7!Pq~lG`nR{JwjCgqQDq`IsSk_1~|6^GEbe`9jn(8|I zwJpr+6HL24C^3aA+z*3~VV%B$T{dtDTly|~f^xaUy&~#-EwGDxaUKS{3up4Lf&4!E zhDxL{U}QekKyB$GEPFgx@`sUk!qaWY;LWKg*rGhxfl;HG)fgCi5vcnQPJf2y&8OO9 zP4(g%5oA5}em`(Uj-Tt(HQ)+go=q(80Q2)jGpdL83zeoh>+gfr4Fc~Zsx#Q#UMf{G zs#kMiD|d)2hv5Q6C}$Sb5lqolmJ``#g46?93z6qZ`CTw*@sS)nj(w=sWXSL z8pW(T`oIyaJQ}NcLpS$gK0Os1$;GRlD<$Nh*1X$nP~8{rO~8_puoNe-^fLHwgiQ}Y zLDS=FI2ELEM1rOG-V!oYV|29=Dgsw{=4!RNszshGaC#j`n+ImJWZrMX8aj}t2az+( z;#%9HIk>*8aqb}BJkGy^@$1>F;Yo1n4@$)+IPP!yr8`LtBfzmhf8V{KMqJuR4F2|4mExcx}EZjV20zkDo^_`&%m5AA!`o(q3_a<>&i$WAlrS+J1nC^UVfX+(EjYD4LG*%@_q-H1#fmXZF#{_ZmJKaBeTzZr!` zo&<|a$YG}7x7ysv@&ol%SL~t{8HyerT})n3gx?QhwrjA5U-+mEUbBe1s)>GDrV0TVvpuFM02WoSJbOT`QWdiwL+|0Ga6!dUSF? zXB8MM>LHD#iLj9U*fSmN^s`=ox!gbty2rb(A#zOT=i5}K-ZPF!aPmBvR|c_DMpVCq z@05XUq2w0k#Pg_RIz&Bu})ti^;T4x5UwhW%HJn$7E=&cTUPC~}Na9H+ygiTX z)Qes>CH-$}WtUO3u8`4<28s895|j9U7nr=9Zk%uM%w@!zE@U1$*pKLq4&a?`z~;wN zD`^5-yG8Cj2g}n##ra59e2@7tqcY$NB8(z0=}oWOQ0l?S^fBdA=UYrqULaivzT_*; z)Kmwd2hAp8pT=gs;7j>btU9qCuc#Pgg6}t&(a+5E2k_pWne&I0r@}(VvNvX|y6*K! zYDHywl59Bm<}961aCe{J$n%xN>h3bK3fQ4b|q$AqPi4`9^pVQ({HNQ&RF|EHScVqO2Q7F;9yFTo@D{+%_GvkfPYnU`o?JbdDEmLWH+(u{j#g@iegUFJdZ7RVNXo~D%@nM zl8MxTwXmIW>?Z0CzSvSN@s!4(8|UI-$5_WeYC506q&x5!12Aj~nDtVvsAXf1!LW}r zM1yO@l1DIWkuSH!7mQ%4r|~x%b)7*DCrFmG!FL={QHTI2X|J0 z;xcs%cR~wGgPT7GVJ%s|B%YN^Rx2{qR(zr}D$ol2*aCYrhBJ%>c@{9{0r1E6up5CC z{&dplVl96T`b7V4U0Gy@Ro;#l&+za;if_u5xl_O?q*zah2L!??hsVI6)_t zwUcuVrXXk>NIahCv;&S?fEIp%zLpHE;{ds98vNoHnaT;ZZ7!iIpO4a5BWXc)wu1<> zlDRdICJ}=!!^Di)lMzccevkMTg$MsfE}h3{PhzoIFoNx{i(u59Fc{-U*iIibn2R8G zH9yA?JtQcxZ>g^qk&D%1rifvPn7X7{5zQL-ycN5BD_Kc>X}tf`P=QX{?bS7{3Zm?nm@K0>c~5Pjks=ykVz4JpT&O!GxMfE~t2kR|!wZ zf{hPDDGFw_53?VjE0Me}-8nJD_*2+NQ>yU`iB5-M9HPtY0M8pnZjuCkY~sDM8D(Ge zWFvZ;UQr1wRJ20__am}-V}pCiV9tTO6;uQ5h;Kfy>lJ+3NoG-Wx*cW)GtqivoXa$U zn9-l_o-(gv(E?J){L=BtM3CAX1*jvQrX)WSdz>=ql=Fd;*s;$b78Y~>Y_$S4^wp@k zoa=4E*v5dOe%QbkBDW<>>mH~vfW9jk73UXdTIZ-cmnzJOGiLM|y+-BCV;A0W=KBKm z^i9N|Whmwc;h!mZa9=zl1*W|itjNOm4NwZF(1#g7C($ERsu1kh8A}+%wdB)}^OE0x zMA59~?5^AJkD;)qH^j?lXtZ(6=x8#EuJ}kDtD*sFyoJGj=j#%z{2zNCj8VkBQTbIU z1X-}LI1r^bOgV<0+b3Y*D;}YIcLLpL5#QOf`aj70lptpmk5AyhJD630Y8*MeT%AMz z;&a6=!9Vzz(B>_qsVF@5l5}`iTUL7%YulBdMQz)K5p*J21)&@?;(K=(XeI9^&g}Sw z*F`fT&Zgok9sXMfVt)tGreIa(SVaYK>n#;XOLnpva9%>4!k9??A2S+AHu8;1O|r5- z&ku*?>eCyMCFu-je?a{BN~GEXN_^)0lQQh14;6DxRVBuZr2C^Bl)fg>rarxqxZ}>8 z&PM%e2__#Wmb}8EW|QT*(HX}brP&|*gy??>3z$M3kV+x-9!>J;_25D{JnI}DGKQ7y z0`rK$uMFr;F;$7vv)+RXGs#5SuqR{-5i%Z(8$(~54^bx!zVaOQqz#L*Ac`&~@`~(S zNfywNJn9naup4=E3EHa(8u1jyG?@5MgFV+s2B7F(#R?*bj~%2RVc3gM=mvnuPr-J7 z*zt0rLk#no4??Vgk^SSdw8$p*gTgn-I_GdDf^Q_F7CL|zv#|61M2!d7jX1@23bmEJ zaI8Ii*8qeppca&l9~=PPf8qU_tZi3V<8-*qPI!#)=XmB~DvWOy(MaHwKWuC^@A(aW z@(&L61MYZ%d7Fe5xfRy(3X2S5G%37WGGh*8^slkGpRDOR(DM$T>4@DpV%hf@^AsXg zB@8K_oT`irxg5Ovr_`rzXN9(sMwDtrG!(U(>#U5D{Yj(g?wO9J6^r(7hC1hm9$Zbw zRtIuXGg%Jm_-j=1gY@z=$Bq+G1V#LIBJ-aQvMfX65bB~A%4h{D(hZnfAlb-VvU~?v z@pqW|9jssp+IJgpbS0J|b}ZLNwaaA;Bj9nOC&vwru@hV9haG#ugMwf~{jldWdLANB zLxU7^VIcRYJw&6I-=$U}*yv^$n%HBrj9IaP?cF4TWwJ;8G@4@wIrA;44HfpYoX{k? z)O*65kI|dem3$>^lZ%^Y%%0z9xE-&vz$B{SkCpkZ%pAr2&cj7`s0=YJd3nbaaU_Fno> zYLtnvic0MG3sE@^wiQa0?o2#fLj^Sq4RH-R^cHfLF=SXh>AvU;KWU0u!ToS~yDfEbJ6ioIa;dW`=ptnepm5Q2A31@nwmPtk;eu<~o@8Y=v6 z1va!2j%NisY6ByL6V7W>FRrM|>!?L)aPf^|QKU-pA}&f%3ZaC#%yV+>=x3!exgdwLA_ zSxrAfBedln;BK^HuVNh9axC?S&3N1vVt18d5|R3k(hMJ-!c~Qn|BC%8A(C!r5Ou`e z7;0v*s95ir&8_4jr{S41h#i+fkIy__(~}j)9-KRzYN10HRH{1DZq6Q@RCs7UT38YP zUnAy^gKZk%9s2CX+DtBQMK^^3=wCo(?-SADgSvlS0kiCjXEj5`Nd?n1R2f8y2V99S zIa4?Oou)oPE1rn5AImt$&Y5j+jf)^lcjCMA z!4AifRrbIRhof3Np}O%OQBa)AlL7h~qx?@IC$LAm=?-Uj409J9qxH}MqQK1VVB0Bm z#`=~R6$WabXT4KUEZSfXhU}X-NUYchesp6dYRE#q!K*gHq+VicznQB6L=YeRz5uON zL3D2l=hSAjQ;6&PS(TUAd@NC{hT8gdkb4uh;()3wYGUIUvkl#PW#riUFxwuCrwN~9 zOst&7XlhYbCFtNP&IaiRs%e2FVpl{ISNBq_0Q4afzsY_B8+t1yv&(D&nz$3Hii+_( zgay~|brow}!1JboB6AqYHEjMkt1=lzY(pndBDmR**u4`xTFG-}vud7r$3}WYC&B7& z<7K9-{|kCuN>KO~qii;(r%yxqgu2-%PWISJ$Av=SKs-4LUYzDU@E8;1Vw0F~azmQ!~rJTHX6kB>rY)u9c4&dL%hyjyf5hIvI6V*pn=`_|^%}7dNYDcil z$?(gKAlN+e?xCRBSRR9jgSuoFhf$HoQVXafM=?cdjNn=#loN>_S3!j&7CEsRzZyq^NcXbx-aLDx!WMzEUPG=%YFaE^vIv3U|ZW5cK`8FNx+C%A_MwXh-m zTiZAv;|&#+dZ=14@M=r$5Alk;VK<-l5#JvJnu?tN6H)UM{&E2S5>L@&$Pa)G4~P)!v4nlFdu?pI2n+85daoh37N;kbl5sn+T3gBR&cpb(!mi4> zl2**-Q|#7ET1N%Y4XfS-+I>NfwW2>nQx*wUbb>!@0xROFs?P-D3gDyFVC775*%9#6 zhU{|aPOgy;ifZFOH{qeF@E0ra;{xn*AKo3L=TQ4apK_ z;3==c_iNz&67VCByx=bD)&lD=Cl5;_FUa5;IuM!qZ?IXOTyapM@zSc5-&BU5$c)3s64j)QLoRAu%vuL&sN zLzzD_DkCmrB!|?IV*`ln1IO&c|2@b~eYv~R9nh-}V_C8Y#=zQ^GbNpPO?hpKJ z2Ur#gf}Mf=<|(wnwKD$yg+lm%-y|!Vp)6KY^&7yd-lXp53KzQx54#WQAAz;)h1*Yr zLk!{ffuQC>*x5k1Lp2sq40bE&?GpXAU8%ioqz-n0Rj(i>|D&rV1S_k>ry^jl5uh-4 zKfsnVse_k+ilWvT1-6Xi)eVT)JBUE(%K9+hEVB8@_?j=A!VX2#3x+WbkBf!LeWXs) zg!;lQSY`ze71rhp{tInnH=loqudR&xJlazZeOVf;`eY)EDydexqf*pqEcU zEkQ{(dJ=4&$73Bhy@fd4hzO@*gyEo?IL##j|D8qLZOXWwGh2svY^18y9;7`83J0^M z`#^+pwZ?WC7SV$odOR`I8eXpC-%;4{Y0xYUA6rXKDDWZy+-L^7> z1>}3;?8whBb_-sA32o;d)-a0v)t@=mA#uU))TFd4uxG$*kW?jd=gI9IbT*+vB(J`xsj6z=7~ zt`!sX<1Dn|EJYl;kQ+0-9Yn5HiqpdOGM{?XfvTAG{$OAxENvV+I^xie$E!O>eAS4w z2RwRA4N3I2EFxq5D>dQ_E;)53Gj)IGUszfw`1_Q8+G|us#(@h4u=gZv$_b6p7v10= zHa3?T^+9iW1+Th~m5WnxhGW6uWEeuh`UC$x$>TcyAFcSz{s3QvzQUUws1rE3_oE_; z-4v_o=-Wz`T8v*Nu>X4o3RoDL=WW!4_2f8J%-T5Y`UO#73Y<%a-ESsXu7r5OZ64X( z)Pp_gJ*kh$II~~`8u)G51y1*QuI@}7O)PZeno_vpgYZz%by&bW+f#S^j8%@IqOg*z z`8|8xB2nA!k}YQ_blJ1jm3(Lo-9cACK4S8rOnQa34g)y0lEmk_0 zEJ>U=IUY8-i&?$~zTE>^?w}ofgi-Cn_C)`JJ8?~%u^~DG{PC=5c-m@Ir15CqsUYkf z_^h}SfHzrj6ggxzpQ8dd>gZlCN6`}bXDc$1L$JuRC=*HOZxz^OClFy4IFLd;uLF#} znkvFCEXW5>Q_%epPR~mX*b;@U{=z2%!0(e-@D@1uMZC|SOr$OG&;Z^dcE;Z%(rIB6 zV)uO-=+zAbhy*$2u^Uy?PL)bu=4C(e`aZEH7!GnAG#2L$>k<)Lk-yZ!px!H;xw6Ud zIMEZao9Cs$0Gi>?!62%HlWJnYhJi$}Mbvig!_{^Xd9U#ZL(4A!?X;<>e<5cLBKif8 zp=^RpC*WzLh;m~8!ffW-K;ln@!8(2LH zMMHEN>EWwaS)n6X);*MhAM^yt@H9hq^XlU_pO}};d`>I0gHB*iIsS4PH5mE^5L%CjJ68Q^Ves7P*w`Xs-EYZHR?r6?^B(m51^Sh9 zJuWC6v$5>wTuC)+@5RoAv+(#v@ULM+i!JnNwu5POA!EG`O6;TZZX(U3BV#%%;6W49lC2tmp2_(BQds9g&d%>Z z+?x-=iT#rI@Ws9S`zU1+ zc%Lg4Ukis@fR~k_1lh^{@ysVE0nYRIdi%4wyGPfvop2ZAaMT* zI>UKzf=92@AO8OCEeT>!)ANLWnGqa~+=Sd-t)=C6;Ktpc*9K3MlOSVRzN z*-`8^i#($fdD~1@bqNgUBnnG0eKt0-Lio=>a?oe&G_0Wy{3LhHJPWJr!+2}xz!06j z;+}NJc&!iGY!zAFY4ErayzezUcrt4$&JaEc2li%%M>lk{LBx?w*k=@ov6gGOigs*8 z#k36fEj(CbVv*ZxnGnr`B5!jj$c|i=C-5&Uq zH}g}Um^6X8Q79*Y9A^0cFk-}Z9-D~~f2sGyQ}ek4Q@X_Ohrk`C@fmT{fW+>&Vd!vn z(opzRL(~y*GNl(QI}tpY51+qHmx3SMvRrAy=UTzbgz}b0uG|UV6sWw5dF>CwnFt2Y z#1iv~A7WS6dyu3%$hLydjzgLH3TqUq5O;&3o2Hsw*~j>4AV}2;u5=APy%?my>N61zlqKUs#JmXG`mUQ0x;g{Cr4Po$j zZ{lwyJHy3^57$`lm-J{=5bJ&u6Wf9h3DP#4c<}}md@sA8%E_4egMj7iSX1lLxut>%Y~#VM~ZZ zLx=^}S;^jHwF(~D3DtrH%&Isc^&Xu>wT$`=Rh4>- zNpxG)p*TJzXXwi(FNE>lCGMzL!BhDCIo7BLRdorcDh8>u_ttbmzJhJuA-BIpls<ZQfkm%V8?SlUl(QMHE7ot@9$3BE@X5mM1TyU^fcN9KLv(* zs6;34KvZ!fc-BYqL`#&i3FIC1d9-DH8sQse)D4Yz?r`$4yJRyORJ%J68BgGG2gn(B zqSy678SMeTcn0^gLbtz<7kt30GvIl_SotJ8`!sfWoYA_{75s=RS;NW*z2zgm*aZvO zgYwpzj`l@hbANWTXrjo8n7oWwCVIQlP-I5Jh_WR;h$!#D+ZAd(>j(YPcI>n8WB%mQ zH)P8N?7^sE-j5R929q`2r_LS&*W61ylClF)#n{fWGU8N#1TsE7tauT#)eC%Xi?!Ck zei~pYd$ACARU)x)3yOUJ6<=pYrB8-Ek=U}3T)PeUy&6W5q;$gzR`QrnE+z80Ca}*m znCnd#s01|p%)Wzdpj0lpY#$VR2ibUfPG{0l_L%*r>9QgoHJtbMnr^cL=$Zv^(K+O% zE#OKM$v?cM;b=E!@tD@ckn_ycKeC`#?1tB6cTm2tI_1 z6}uGH^AhvA0Uo%Nl@Ein?&4kU^L|^gs`FS`EM1+Gi7orVm`q|$0v`Vy+?h|FF_cI< z7$oYBUVoAm^?|Kk#Ez!nGolkO2^$*>rq!atBw>$n;OrlElNnCdZoat(vX zqzX~gE%B&9FraMg@|l|Be8MWtU5si0^;N6N}*I+Nk*bnNe%ClC_;^ zC3b~8V)i~zZI`H7#Aq^-xy(X)o@GQ8L+lYbhpuC!66azC<8Kd@GQ4sxme~$=+lWXK zz+)L+d6K$+D_-eL+;k$F>p<+A#u_dl3sOmTadPt^9$VpRk#x9yrq``G_jhiH`gaz_ zxgABg6TIm*mOhu-Xg#iZ3o-Q+Yx|B-yaYwp5__XKBixVt;WZ3AoH4C{ou#6rJ!2i4 zV~?T_t_KQNZ#eH>Jitx$3+$Rr6bxnb{pk&OEG8Dh{w-}$NN~V3E-3^QEd{LXAri08y({*R{f32X%xB0E3y)Q6s%IX zP$fF1yts(|}{0%F29A8Lg-Ok{%i@0i$C+hP)9YDN;M8BncRtBEDQe8P+j$KW| zKIVbF+sSOlk+nu}h4)y+S$NC;b_)$Acl`l(nuhW zg)p9nU{*9;WxI&BbMgOll(nf`(^uwtGq`+(J|P4Cx{?aQMY6tu^x?du56+2MJ^V|IuUt zP3V=q3*Lwmy*7Yu3wg(4@NN~cUogqV*g;=Zls&}QQCMqRc*Styi5X)O=WK|*vLaKh zV6-cV9=>EE&xVnAnbAfL>B zEOoAL)WzPQP{`4B1n}+|ho=#CeP(}J_Xa(^sg zG^1?7s^@}R32<6>?%O#OyPO3RQBXznLq++32NsZ##?iHOhZ$T#eW|OuF4md+=`PVC zpTCNeWej<(U>N7{X-#<9L|BszbI}#gTFw}RPSpiH;wF7;Uc}-%vI2T||8g(Te=y7? z*oL@siKy7cGTvylt~~|@GL^WdiSiat?^-t4c^N$!3tOWs~vXpk^1CzvceVE^Jp^I zbvz?cX^X$Tq6X{<>PG25fQAL9Z+o{eS2I($?bkEQLrCdcX{9NdRC&}kBS#=p_FvhaV ztMUCva*R5DpM8fepYcG9QHCX%^OaA4ipI> z0F2l}UeXY>%_Ig1hAw5L%*j`Dz?n6y$~oq#5s_mMJS>#@kqxP5z34IXzP{`F;+1>ZQ!*tg&r zlUadTcwA$ukqM0G3KlE6VQ*p;6Nqi=VMhLZzX}yRkWqi2Lni}kumn^77nAoBL*Bt; z5}4!0VAT-V?kL!u3f;Ra{`Hlf6n84V6JVaJ)G_2UDCkD4h(WQa0jD?cy4TE*4-xb- zDc9#&^Tm zJCm)2(1S9F+Oj_9&t=jzp9n8INuR}BqOdq0ISt^F$^@)Gls((c3w{++A!xpCCm;LeQE99bg zLCa&rjU4iYm2jfnSWXQqkg3*LyqJx(izkqWZt##Jfx z$jy54nftL$OYmeoip34^Kt^v=Z#Yv6_~r~)la};3YWQ-H&{c9u)likg`p3dCs$ol6 zjPeVdLhPv6K-RwoU(8pE&Y!W2H3`eVkHt>ss$=`lm`{XMK{w8WMX%|`nk2m?BgEuW(HA4>l5iC%!V@QRn@ zP;*dU#68f=$OD70-dr;Ca54ldRXhszMznzC@cnMglF)j(uqKY=4d(CwO?dHeGS5Qt zo(g90G2g!gJD#z6XL+3;_M+gliDl$LFR`*Ktgz_Et;X_hP^<8TWmx||qpJgb-oiCi z)Sxc&i6ijlExh9gILBJ-C=%|cQd>?GS#LU;gV?81Mt-X$c}PS)!??AH3lI6+E95($ zL8?@cQpK~x-D*C=?lo11iNu}g)^wq#qCNL6?}q{t1838q1LPps^_u5JaV7ceG*|>O z4uTQ;5%1&I6@be1@|Ic%#O%4bdCASoY+T}fUTS$(|Sq9HH41tGP2Y6 zT#E+xMHz=@{^S#H!_~vcKPq5h#pI@<>!w^e0&Ltv6dVrLZB%=gCU&3C49{kq&&iWs zVaY8(>je0lJH1&&-2Zqat91{S^@gbP0-p6$oyC@*Gh~8#->6%(g)1)wQQLvx+wd_9 zVo3`8-HTYzlX&8)?s_cdUf~Z3}=w!j3Ufv-C8(2%vOC6D; zA#qkzVl%Mu?=bcCXj}vW#{}K z_O;wWf%}FHbfDK}I99TUEZUO&OuF=g#K13F;=|KnLrHku5~ANj&WCM4mz6eWtIWXH z%#~48T(T5;bd_#`;nXU-v9769=z4;$)A6u=WSky6nvm6XSJz;4hzoo1x9QC9A=vPE zp49?&5{6wE@~T1TX0dc$HGs>^18v3m2O8`cm4bVJVLdnDG)3qI-?6DAc*iwnA(QKG z#w!niJ-X6AJV)G5N$k?OfJJA*nAVZu^add`iD~XI$*riw)6szXU<+;0WIxd__LC@l zlUR6%hXiKwgbs@Bc=}97w**-ru`mGE)gUP$2WfQ$&Yv( zAa`&i>qw;@J`*f+LrcCz4|)hSaWk^%d1Qj0&|ID2lu~M9eeuYJ_+&Ti{}@<*2yatT z<8|QuzmO?(Mt^f9@(;$se{w>k0jt~qoR}j$LkGG6yxECITe7HjWTlx@NS|XXW%Q7Aql*k+9s}_9UrLdKjv;57hcAh9k445R?z?lBSg{SxQ6dkz@?h2KBs>i}_Q4q0{qy;Ox{QNO^%1hB^xlpKt*dlBTX4}TvFVlM(8`runu zV3Yz*YC&`nJI5l`%F{jO?Jm(JfKUC&UnBU8LgI=Z`K>$t(F?B*hVe(jNyL7UUep7> zFwfhFMvkf&s$pk{7gxyOexvgQlHqM(CqOd0C8jd|AXdg0Us#E@bO_rNwYlb4^msC+ zKr-DM*qahN>4z;uqWgu@+y9m>c@0?_S-y(i-P`c@9?W7myjGmOdW?MMDa>dao#j!) z^{;r`0DQrf`H3Vun8S*8AY;&hg;|4WZm27-7@HMQ!h@fOu?CHZ(>DCR7KAp2-H1Il z=ZKf1VchOm{Ae)vH>zkPn7kK;ahYgS!;b%t@PAi6Z9198Z?p*$x)qm@=bQy28W6{( z6ALHNYjYHioIvd8PW@{vxpgD@Z1Q2$j?$mt?@I7C1WyrnHW9qH97RX$Zu4N>#xQCV zPgE#N2+w+p)Z6qFRqOB<3WbJWPYfI`P`&#H3HE5qN$Mr^srfqeNooPhijwK?L!= z$o#)B?m#?XJEPxC?AXD3O~9&jsM;(9IXm)hZBUhq(UZkF30Kis{Ft@xbf!E6qd&rh zf0Os?<8c-I{W;G|M4z=pTOY(&mT;{<$gvlb#bi^*vLvqRqYgXJYc-PU=UMDy3!24d ze9H+Y?+Js~Mjfm`Ek2AwC{CB^L`LEXPI!>-nNs;bMMUWjx<7$?YoGvS;w$gbOM=i; zW5IwcjO{tGVg$QL?O{GM$cIkwSJ6+JK-35WL9TNJzj$S1B2If4>q#=VYeeq_`0FFE zryLXw!FT>piLjw6KNa+p!GVpb)(OQjfUb#0?2W9UBfX5=`X{-Dg8Q9*gF);j@>xn> z;}=u$NI8scGTK-x6t!q{%0X0BCZhVCLoX>r@2$`37ILD%O0{q2!wIA5fiWk__ajDl z5_8sqGr?%hGeFL1tW9fhQtSl}gslxmx$?(jo5GU^z?5BJsamW=28b~lW#lMdPhfae z#0Oh)vVKJM3E;m!?DRbLeGngZ#Rg0HdnM=~2T6oZ*BXzOfC#3rR!uth%2>sRVAe>q zuqN1Y4VEyR9?g10=LKLzl2T69zgnpaW46NsACTc)#R^t|6vsfeRIo<_tFi(m=E2OO zQ6Iixje%rC_duXbm}C~+c!$&#QA24CvFjH-N&k_Neg%8~!(uFXSd)E}!$3-@{1?z; zRS!jAw6uUMs1M$I3QO*c()S#+aD_ueW067BW@mw4AMpG4aJX0ywG>;pK<2!Ze>332 z`YIJXu9BS7gj_X(v5!IP7AN-(QlArlguODWc&BK(g>GRr&0%&yaPq+*ol?>d)je5# z{?$F%M_Cz||fbuAQadh=e1+;lHrIAdt-;WUFHw z5Yw zoM zS+|F~6~c4vs0xVsKu2P=xNk-=&mD^uzhIR^@y>kgBo36gPwbnIVsE1SLe07vxUz`T z2?q0Xx>hs%f`hnUkPeEtP(J*yT?H z*4z_bwiaYqz)W9Y-NlKxA!veM)wSws6k1n0BzA%vtFeR^%-2#rcNr1N16-@ZUTdl0 z7x1M+yb$-0?7{DiU=re9r=r#@l;gfU+l+`d9j4wEOTP_Hhh8X3-KcA?{lD~9D>K`kT+JcE360Ft3DAX59Ps;@3+D5^XXA} z18X=B)-EM89gDS$$DU$PCFbHuZbY*8yx&k}(+(ao3(c{p#bevoWJ&!h$}i|dw|EE$?s#arWtr;JSvsg110X!)CKFcfoX2QE(XHLHo_m? z(;@H8-nwN}J+$en`b>T79T}-79r4Dp@9>DZSi~Ia(Lv~j6Hy|Pu=ZZeOC|3v_F%j6 ziQ&v*AeF68^c{{P@^{A;JLA!Vh_WVJUko}-0qkG`mK_O)jKC(26QUWU3V_A$C)3FfLS)_xNxv4+QF&exqtG;Yc` zW|7JD=l5wuWmnd~m(}RTb#8>?yaInS$%OR4cy~DOS`c?9UcZ$1DbU>wf0@9)^;vOo zhM)x&A?{Ht_Mv^n8skx_dXZt>=j!g0e>7;>XBcD@mc;{`zT_8 zE9zPZii0JLP1Irn$Zn&sk|TJUAG-P{vh!?^NJ7>w_(mSOy4VMAfcDWEPHch|Ci0HQ z@yTkm6c-|*m+Bw$pQP*nrq2i2-_xsffXqRPI%!WWsWF+m3AyETGJ9|IVLz;60iA1Y z$mcAG9apKN#)Easv2s_G>U(%g2=O=o@3}-o5_cK62*M7o za(h`?)!2N?tbV!AOX6!awFj6+n&-0xz@#+3s-bRc%h_hJ@{{E|i*}b5RAiMsEI5(# zCVzfqu;iZ1L1n5m($qKoVDrqz*=(;)Gg-KFidL1LtL_$Mhoa+I^YbrNHB}C(sVqBQ zKB;DvVgcGmZ<$i3so`0J`FcKjvHF(`z8j1(erFzMK1#n>DX&~wzOCH5bXeh?{2oOw z${tlttXf^ET`{q;t7NfBn}+7jwRVa6?W+q5Z0Dx0 zW?0=@ISp=mi2Bf3t+&QwtiM_o*HrgZO_bi2 z-_>fS^H8U!{$k^ICIto$wY{_kY2VgNksMHZOD}4?(>Nw|Pz;I@h?gu&lg% zTU{&7hgu(03u+#zMj5WO`DxqJ`n*AR$-lCuh385vYBiOY%KK%`N|dIin{+QBC2#{bDQO1%r#l;oAiRv6WVY24Rr zSEHZbJasxWmaFAVaeQ({+Qr~pP z89?Wbhh`7MMW)$$oRU|4R1vQ^+Mv*MsC7T9w>pmH({q27BuGALb(Wdc^RrEc_BDYhaRnelH18IBy z-7BAIxZ2*{v8?_M%V@I)CPgO4Ezj3$U^1#^YU=Jkv;R&@Kbx~X|3}fzVtMYARL}I+ z<%TJ;6W1dq#w872>e{Ah-a`hHg2b4rt$yDjpE%U8Yd;Jan)g?wN|D$0S z=P^wV7(J?ZU6ilt(BNF_bzOs-A2n%Hey1?1>WJ!2omQD~?#eW?c#A*(qSt2(SNt~p zV|&}K%DRDRhGBt;bN!D_*PVl{U2C0^XZ+HR-I}&4=UV>g!qIu>QX9lwN-d~*YIx3U zwZ5;;H4{DC+YY1bj@hkic+0k@QHip;dP?2#Due9UX#FVLzjw1EtJ`S!%XG>&WFE<$ zpenVU)jYD*Z})762G(25<{I1SS}C`b4lCG}6Ox=CRrO{3AG4fwbq3ly4J^#8>s8uT zG;;9R(4xj&+xoDwy6|nmlaiao-Z>r8OjFmU%}nW$@F8hV-hVayR8Go~vgSqx_4d}c zv778X->cZ8z~Q`kslJ=aFvIxEkPl_C{mW2bs-9%p##lv7PF$jhaG33Vw&|Ptr>#yn z%y*saY-Q_bx>u*Ju0#Hk_zRI;J`eg4kR&VWQ@0y$yDzsfl-W=9s%bl`g_Wb3(Jc9S zDW@1zh0-dNlKeJdS4^K@M$zAr{d0O1OKNou+SoR1XjnJoY9e;Nvis67D^@4c!N0N0X0pjmi@6OOy4-A%>^RPDl^J&d%hHU! z`g?2q+03Vf1B#aBYGg}`=1N{$Ty&Y)>SL?R&XyK>29YM)Ek77fl@nu1ALkY%2L6ox zuK(vo!rYAVk}~NngPF!tjFT)wn^?E`*Y26Cr?ErbjmiW?L(MdeMO8J)b3Ubn-3Z^1 zwzTT9BA464&d@w(7HWH`aiL3r^CkObrunifWqq z4VpaiI_{a)IMb%0UW9!I$7MGA^g7jzEb>YX{xRv}#)z4}+r&?a4@v4=utWCQWOcpe zjf-0hY3JxR+G?%&r^Y{*?Us{#g9ndn&VvATVs>SVe40xqmBPFzFuGH+N)U~ zr)!obIwrL<^F!0-r(Mjd%j_KgE%I5^m&`oLPUB(b>E@@*4jFXN&NRN~dfBI|&l;zB zs`)9SqBi}C&sd^#)*fFOnDF?AQ{1ln&UIOu=0@+#@0bnMi&B}@)YSQEyO=FEzoa#@ zbWvva?9*idvPXvgCMR_a>yA`5mUcF^Yfx*`+uXpwRQ{&IH19>$hitPY}UDR}yEA^+_b@%AmypdOXcYUXu=Fe)cXC@}iPTro>KW<}Ow}1Y{ z*GvCqD^p%%Z&FRM_Hp;@6y)pEVX|dqj?dq7`Kydh*wr)ZUTvQm{Igrs?c_6+TDohD z<{F0Uh0Ek+nmH><#_Jrn3^J&x@XZ*Pq)e!n9#}ESsGCi)(V)7;<=Z90&5IpxHjVK( zy_5_e%sY8f}Dc&M;3-8q{k#JdRO)Mw>a)`Ty9#|(hZvJO}^?|sr+lF>+NxD?7iOmuv<(MBL}(h ziYjGRzwF7yMy1#Dva@3ImKPjN-S~Y&q(y#l{ihvWx~6!hng6LsOg)e}Lu-P|U5_eL z)1td64ms8}k(#3n8=GmH&eGYY_*QYT{AQJpYKxYU_QaaN^n{-)qs}CJ%KcP1S7Iku zDQDMO%1q7WPG%n6JeIjla&BjnE4iCho21MfRpMIuE8iveMD~{Ct3S4X?U34EFT>ln z=Utzf&RZozl7_{etC;Dyzx9{K$5r8(wwY&&-`3`8elguwZ@Rfe>sEDx()bdK8f{|z z2L1l!Zn0xNYKHfU(=3hA=%i~Pol`lcy0^{(o3&0GJkneHcKFb2k8yZ`UgECQz~Xw` zVO*OE*r6saS8ftmU;P>!qrUvP}KzewrP% zGqk%K#2a?j-d$%_?ozh8dc6FvK^MaS<>P;G-#dMM7Qd$KiN*|#HGEeEgqOHDmj)o zAZ@K|zKdhaPYop1nb}TdLp84HZO~h0(9_6DN4NHA(VK!Xr3Y&-$Q2r$Yu0D%_}Mf( zBcj9K)uo1dwTA9>i!+C2Ptrc=VbaZ{+l98_O@l4w)m<%YRIy(pUVBMxdX9gxbL{J{ zChvPiUM%iw+p+E4w*Q=5^s`H@{BtczF#FS5(LTNYw9-q-B=9O6kG~Y6Khu*{?)%wU6J#=#L?(s)AMbEJILEDa35#V zu6SH(T;5Yzgn_TrDrdy+b03$!8b!m>I?YW@Z@0VQdBEJOd{kOc_F%0eu8rD{_Gn~K zRoFGVMdbv8UkwgB{I)XIEG~ZhFChJ3{*yXm{gVchYrZ6m|Ge$%nz$R;t1ERizv$%F zcFgNhJlv>t=R##i;M;KX1 z?Xn}Hl0OU!FO98Ns?h0fG(_?`e{_yUouAb=_hs#`ch&6}?--~Wkr!O*sO4wCF5Kb< z86NSW-~NxJvw)5&Yqs#UmbkmSyAeXr!5s#7cLsNNcV=)MY;cDVB*X{_A@1(((%skI z&i@vRQPO?yk*ZyFPJOj2CMhYsIn?goL3#s}RvI1u=D#UwRBgAf5Az9lqVu6ew{a$4 zq480-$Y8Fimtn40S!G&STAiSJFSAtdNAXS5GuOxbN|2OW63_Je8|{&2w=b<8)4EFg zsOR@SXZxgjmFgX6`%)TPyHp&l5l60V7+w&Z#iafCRh1pesd;S~-Wi^5RMF6$y|+lo zarUEwFFMt79z`+5{goMd<18z!S}mU$zm<*cu5Q&)IY}334p5uGE-jlJx8>ce&s6sQ z&a-MG<-Q$F#UZ6l>|L9Xu$1J0Jf5T>Og#Khh)UQ`*=f(Be^t<$?DoU1X+9g;jVVOM6G@~vyfATFd{b7sl1`TY%z^Kf^9Hq6?k&mfa zTe!5gNq*YBwcofQwf!x8Z47<8E$ZXCZ)*25sg)-;d1kHsp7S;)@<-_{vw%VK2VOQ` zSRb42R=7!c;c{@$n?9EM8s#OK>znWCIoeII9c9&Ge2RWnQ=cDF_g41Ca+vuVD!sHk zmVa;hd0M7k_n+Fo)#tPh%J(ihsXSvjy;s68%aLiJZpLbDj#V2J{}}jL)~H7`wx<_H zjr!pD#kydR*5R@rW&vFpytJ@S2G#Nb%xk{m8det~r@8=qBI^+Dat>&F1^VQ2ascM_;E zO<%k6buI1wGRaZx&sUEg_|ERr!L0uDYp=b1f4UzO7vv89?OJ(WTMW87XqksLA6{^& zcszI1zgz8{j*I%vuF3gFVy}FW{8&&pylaGHpXgs#l^;`G zu6EX|ZP1cY(L)#eCaRsPYp6}8$D4mL9>Q4^n0}YMKK+(W=}U(C$p`QD-9!5qP0ISW zK3CT(=)_?209G=mI494m{kK-7#WMSJhoPqDgk9CUDnE8l)QU3MVxXd`YQH3_NA`={ zlj7VMPWTJA8cGXGOGhXzO(zB(8j(Kn>K`i0XPs2TTjHv*j-fM`Qfl#={yy^a?WqJ(Ze{Yq;u2R8?W>2-QS%fhA`+!f80 z2BUQQO1(Omyp>UU5h*_nYuwr2UBAk7GaCxCR1Yj&LX3uH3=ixbV}4Nht}@p;Y-wd= z%snkr``zm+`EhQ7Y1>_!he0Z*bT%i$Cvii?KE2zavxf}zS-?!Hj0ceJKci>1QBE1o zqWMN5zuKgHQ|lZ0g4SxyX+(RiO-jtyz0v#f=5%kD8IltkmJ|-G2ow+7*7gn=c7Je* zPppPhqk8QqVWz<_Jtx)H;(JLGzi;^dI3rJ0=j7REuXng~VX-V-T76IZFz{a^oX7I_ zOF}DbL~qkByLfwl`v6NnO?PE+^NP;(OpyL|!%j(EZGGb0kGhdo=|h`$aLP8#lC!yV zt(LUFP9x}A|ENKOdl^|BBZ9k&C8XIr)8T|^>HeP&zmicOGtC4mXU2cInN{n_ENRZj z7Fy5Cvp%q|gQFN!7Fae&)unG|J;Uykz1(uKJhrW^;iKw}R+{l%{h7?==55)dV-H2A zr=P4ptIF)!UO6fEOW7pikX=NGw0} z!+om#O$`33p?{ZWyz30M(dc6oZecf28CiU%{0sX~FVDQz`i{c|8z=dy+S;Od?PUg= zos`zGvfMg0#VE!x@q6h5MWgto<5Kzi0>>H`%G*{y0{47oRZZ7Yn&6meAf7>UN?>@AuFnN(yTMJ znkH*5b>srG`YaB9ZYxvEqR!~Ou>WbZkhxOa99{o0De89qDSE04=lMW$e(|m!(V3%_ zp_YVCzDJtj<<98h!6o-Q{?+s9+2y-MzE+br&i{to+a>$5W7x}=oKX!V~q zr7smfIkURR^gStat2(7cc4GrJg{Sv6a#G2!6IzBF-B&m-le(0=iQN@(@awDGK*rWp z>EWQs6(_`nr5;guJA4gs^nRc>rhQb&{*u4jL$)i! zJZaLZB2`xAE4&AKxEl;>X-N4ex;gpZx>r=8WPj_<%$gq^Sw#03GrypOKjw$iR?(_m zofma)dVllVV7$6XKPf$ejv86~OWob`lV_vai@ZO-4N7|5{>6gyo$5&#OS%jSf^s#R z*J!S@b#Y~#ZWzl|GfVmvcc`}5bo<}(c(1p*%_qk>>3-&cmOg5c>dV_&GSU*83x9Hp z&7S&Z_e<=3(Q=Y1xXo3*0sCu)TE8mcXZ5!$i9c({>n-qF;vO%%SU4_mb7333%)!$8 zq_dGhvm&EhtxVoAKqJl~-=W;@o33By?=qvhlM+`O52qIKrv zLe0s{bve;ZbL5w-j{Cjpw>$K*)pc5j-ekPhzQT}ik4!g+{vA6!H(oK#OyX8z7T=zi z=AIJXuwKv6{gd}$J73wdx(fv}t8rdx^wVa%LzvYB`HaS#(wVI{bb32Yb$Vc6&WbyXGH+Mj$XMWfsK(e>YCdU5#uBf<@ zNVRbHWj@jRk1Km6WTc%`+Ss>+Yz=m`Rp$%}&SV~`bCpTVWscij|FwQf{#pLKu%gq% z#Mmv%{;1l6>db`H&({+d)R#({rN>&n=kCoJ*VLe~!*O=ds_>^lp;pJ3M}$Hz%Wj^z zDJy2^MVxqjHkz$+&`GTN6CD_fv0k z!zg~b*q}e&%1!H2U1Gw<_e(#lO7`y@Z286Gl*!gc_g_W7;u|~+qrKM!SUdhFZ)})Y z#8f;~jM6z_Q|B_sDOvwcS7yb~#bxeM=U;cIbUG8gD?3=nNhI`z-ESp**@ySN1NsRrj|TYcj!WL1?7!b)DNaC-OG6{$my&;Nkb# zIHYyd?`>a06INCTs(x&Bt!Ck<^6E}4nX|FVbw%izUaOsY8MNx}vfOJ1PFms2@54U) z5v^OUr?J%ji*ak?yx47#!wUR#qWsAq(dMM=LZ?psp4y-7Lu9us*16SqbXY%P*3_S@ zJ|u3mI~7pl{z%8DB`(|P_wl^%t-DE6_GyJG!?0j)yPk%ZRlMhipn+a3#>?b}x^wI- z9op3URH+iCfB%-E-yEoS$Em?Gw!8Y*+^@Bc-4Y}}vk@74BlboetxvWZ8{Fc)UFQJ1qg~Ow zq18`ZZxZbFI`D6g4TclDeX3@(zA<N!beUQGqp?~bl zHwWWgyUm^Z23gn}NKLw)w^pG$Ooaq6GPCFa+~ zYbM(IT4b`T)6Yl#N*LR@&CcJykJC2oi~Ne#ma1JWEeTFCD^bj;+1}Qq z73Y-WvPbV!^We1L7|Vpm1-DvD6_=amSLjvWRJdzJSzdDg(yK8n-u;E{DJD^4iT-_h zV9DsnxM$D4hcx|VcG7FHty~eEaP#%yAG5eI?hk_k?ZRY3R5soB+K0E+E3fKZ@#!4s z(`TJoTeC~itd<}niO+M7OWMn-4dOK-uEn;N-zBz4p0=#Yy`Ej$_(*G>dv(8A!w!TE zwR9JY6j9Pt-BhN!a!Oq5dr9=GvR$%tn{>M@&GmINey>l7X_lKt`XqRFSo>&LDK^vu zmI-x#lgq5FgZqR9JAWeWYF0GLH0C)g-5wi0YM+yN=zH_GkhIwPM&-D+^wRG|hnkaR zkF4DMFZ~fVtkU04S6`LNnX2DYvuXX1B)l&F=vFM#;XIRFw$ooTUqr;k#55UN{^c{u zL0$cbqOv`!{bSpq&K(jhhpxUe2fcDx$t@~LshThA=UV9RZl}-XrZ4}P_Aw^YhC8M= zSvtA;SlaBoK}siUN%)pA6NVdjo{}|pY-0bDM@f!0Yh@wSh z*I^6V`;L>69-a3y)-rdJ+V}vi-X^xCgn9ep_Uo;eYtpK}@naoR2A;;F=kV5*qwAdI z6I~w!nAtzz)w9-qtbgPFYk8Z!OrP(rIGEqR{(!unyqa`u0_8ZkUEq5hQ4MwbA`Yt;DcPtkYtc4{SuB!;UytYJunjbdu+ zx)Q_O(~bR1ox&{!%(atrZ7RB0yGql@t2*dUhh60JqWoC(Z$YX4&2h{GiLGK>b8CmG z#LuM4Wo)o|ShJ_SF43LPc(UnR{k774Sw5-LGv=3GSGJq59?Kk;Qf@_5MpS*1f#5UE z$JyLPx?X6&*_l(3YjK7$#%^FACZwOqz%E%sBj;@D<+Rmonvh+-EoFCfBL~iLS|c2&a;g|yQ&svodF`i?55>O?RbBQD0q(X-yZ%hy zoYL0P%XVP@+VC?r-QtRlh24ug4phb!ENdEN9Mfxh?{}u38<*t;Hinvn^-AtlZ0^+l zDn0D`#&3^*N7v33lbDS{v+{2-U2~O1v724+nqD$DW39KHV;e^_IW*Li^hvoEEs6VG z7|X15ogTQ=ZZLnY#G$xaxxqTg-_m`&UNzc})T-V!zAjW0O;SbK8}_XaeP+I2A!}rX zVOCSU%5A5LpYxKxo_ei(-}hH#{buP%{dnzXl6_>ntj#pc_xXUPem~saX$@EIuXoG0 zi2e7a15o%JhJ{?$7QyO8LYF}n6+5tB=Z~T zKiXl2j=$t4ZN%-ZHcDRl`AOud#L$`+%|ATM{7Ov@x0vNRmJgIzJ5Bca>=a1nms z8f99Ix{J(?y8avZ)9-|>pq(Mf;4W4_jWd3d^0+)V{_=)xv=n=dS1wtp?mxYHAQ`AeK&L3adt4{Bj|l>82xMM=hNR_ zrdn6(w!Y~24)E#(@qW+%k|ym3GMM2Dep4=YyRY4 zN$IgJBkaa8f0q>fU=x?u6&OwqD(~~!Ypd~Q%B1ak*`B-y<%vY9lYbwZAkt)u;&=no z)v3POO55SM*;I*51DT!{wes7fUs|P&o%wPLqhzDGI+wM>EuMJJ@Jn}O4fS=7=$w=H zZs#&zfBh6$oE+X%tfgzW(E2(PP^OpCnZJtNVe;PI(RQubY=igeD>$3Rmg?4y$9fUI zL;E)d53;Zjg;ufi3E?Bvr_ZzcZGV^VPaPZ^6)R7jUOs|dp&w=S&fL`Ko_V%=d(ddF z`9>S5M?{2VG=ID}|C7U$&7T)nUDxU3^wT+GnKjjx4;AfX2{nB=(&aFPbIZSCI zJFlZ53u)e+)t>M@=|_QYm$TkRM~#kr4FoGFSE6NYr#u>Tl~xE%D+2IH0-S}xey+^Jy*M2HJq;)TyeU3C$Yflq{kJ9 zZq2J*_i7%M_AXKs^eM6~yH$0h+Nn%8FFku(>8Y;sx)s>R_dNSKk97Iq6XieH<&mD8 zk88eLomTQapUMr&eDX^cXAobN^HH_c#xZb_?>XxlO>44^^%QjFWybfMZuqS6Nw?`m z-6#u9A5eT-_-&qGKf&~;Jcm2osw!*FZY${3c3QGTy0LpfWnRwVto_9YTMC)ECi5Jo zxfHr|xR3MHu@BK1MY~C!%HxE=^<^b%ssb9?OP?i~f6M+kw0^rj<#Vyu5#R5oyA+qo zo>jK7;|<*H^E~AKj$U(20=v%?Dsw+IQW`lne)bOy_H(lW(|7(Ce%*L%w+qFwpHKs5pYjR3{Qta=n^baYXi38HA z8a^55_R1SMWLUV5jiyK2;pSsxZ&Nd;RHwJL+suw@sJFe!%K!DWXcc$DsMUIeiL-Q3 zXL;?;vZw+sZ*-YqOTFS{=a%~S<>SiT>a&$`8V1%r-WH+u{o4B954+=8W#lZbYss(e zUlEi)Fnx38gRk;{^*7lm*8lOgAwy%+o_ZB}N zv#;%;nLiUh=b0%4!))hFmrW)H{Qa_jlS)2M`yBbpqcX6Y5|424?Qa{iTGsOMdUhUf z2mLYe^PgrT)&-ZD$Fon%u4cF7d?@&ypPlm~FSFFA`fAnk^2ZfloA)xd)|J7%h7KQW z?z`D&AZ^O7Rs^eT$&u=7^^coSMmJ?u?Y8B;3a^*FsJCs)?22al3F}l_nirRpXEJIJd$kJXzD9IG-x%PYxCxIGw*QcW|edgx3bBtYJW{x z^SQ1^sx8XzZO7`4l^-g)THsjdQL?D6hWkTjfT_C4zj}{!bc{~f6u8Cs)&;ymg!Rcy zYEdmc(iUCHW#?ylXSJl){4W1BHceJCxMQx|!SoONN6voEZT2B{51gEQE(eYBALVdL z{aZ`Fl1+JI^J7XHtGye;TltQJ_VIO(ist0ylq5BpEC1qyID6Gv+LouGyG!sT5p1ujfRWn2c4US)+QE)qqUYv?QzR>eHK3+7r6(@@pjeT3ZZs%}T6}*==^1XW!3CYWRoDRq5K~ zSesk(q&Bf8z4Ae+P0^&n&&A;ttLmn=k?ahbQy-@LKv&SHkmOPG83*}5^&x6EWFO_5 zHGgW4(<)LQqJBtygocgQdaW|`&+;L1P1$T_B2li4Yu9h9Zj-gE+D^AMwWPJo=(wsh z6|NKC$ZJfd+9$2&+AFkQYMs{%)UcF~r2iA=@vD_D+Z!95Yo?T&l!li6QE|6!c*~vE z?pCkP?W!z7LygwIXJ%tF$gbWx$&}D*QA?BzV?NWd^n4~<5+QYz=Sn|QrNU(1QE(;m zm`zf5=_APyW;J<3Xk&8}^Sg$1>}vIB{@!TQFu%d9^;x$%zlqFYOl5g$XEaZ11!+fW zvl{l&T?EC2D%?Ab+MYF4)RT2Vwe#z4H*9UZ*f_SOx$Cp|ul%)+iJ^nZck>u)Lpwja z`L++N35)B-v3iYa=fsQMnjIN!+q%c$myvP>yNJi#clwTe1J=qW`B&K!>1sMdC|12zJnLT5`A4g?v7$D&`gnC~ zO?RDn)6=#M-8a^seo@|ArXlN8`=S-Cy;f_z z<~ogZ`6x*Q`nyc0&6r5)7WpsLLj3Sf;I?w2c!HU{ zY#LejrS419n6?=m|8)JSWL2T;f4J-VMOY)ck!FmUG(Z~9oFYeaM%~H|_b#scq~e2O zw{pE|E4!TEEuO=DTvv&s`cj=!di{0h>(uG&)8VyMI-_*MwNupdrC#(?kab3rt3(@g zS{RIb(_guX$`IwhoE3SQo=X*?@|qGiqOb2A{sDK3T2m=+y80z z?ab?b-z|1Hcg*VAqBy7AqZ+KbrA$|y;cpQp%mL|q=}JkvL{k>39kLzcf>(t9D%Nr(8|`Ru(E9&0J=}rQ4;>5`o%EWZ(|{ zF3v^erp!__bQN^ucC>efcJ=9$cl7S0lw-NOd<`ey9&w>?gL+IIBqI1q)f;7+^19+~ zm!flgXK{y4x3x-(ozIOR`p|l`30)%Dpcbp4qcu$Py4nkQwb~~&J2i6+56uvHD&54i z%a5xk$Y)D^q+xPSo+8zv_X&^KCJs~$eh^=QioI>Pznw0Wq1LW|Kg#)Vc3@f7aSS_7 zIbLzPtDyZ#+s$^@u7ye?cB}Ggx4SZzL%l94NC)FKeFNri3iFkkMeG+M#KW|Wp&)7=#lUag__)oG`<*yE@r#9 zK>`?7{2t{`r9EFGtVX4861S4G7sF9!_mX@mGJ+nlnm*6u(*F=QxhtF=(I2MJrDQT7m8+lu8QNw6Cr#E z7c9<3Mc+^=MGR(-vT4L8iJdf_nn_NgEoggUF=`X9qNeU8e?ys}d?>n-o7g8^{kkJm zLr|GBmG8xCp|WkKAQ#l|bdHjTvQx}ss+!qIk!+%}Ui?g*CftSBoQaT6?Z#95DGa0! zNn9jXn9YPfSHfF>uzH?MrT3HB{1B0+%;~kLVD~`ZV=bnQnMZErl>{xTmP|p#&=~3| zYMl~Lb+0Wt@oU&?!e(j^F_do?)}R8Z9NmY12%q?N@H=*jMZ_0jnJP%NhWo(35O)!Q z{1;`G@*=xMAgI}tA!?eNXibTXK1%*YwbS2lGkh(<@?TZ4$^!O2zl*=Zg>cjPZ{mJ( zE2=-t=%dsd`lBRC`dqq}xl1Gnny8)lNTiWG|4?yE5uuWE$--gcGk=?#N!Zah=^xBH zT3clJsRYCHp&y8^gmdH~dN2B(OeGR|SK_C%Om>}eMFliT7Nhd$jBrCWk~KgT_f}z` zID|+PCU9@~lhg%fH8p@7kGkw`VgNOjZWm9nzN{Yt*T%J)k^ld;13G)3lPrr zh+*O?s!GDrwPG7Fl3}S9aR`}5Z58ixXT%ZIXxtsUz?~HC;?~qYB8p5GPVgRNIMbg_ z0Z+IAV{w8RElH!pXm`d^LP|`j&HOC? z+b}VOi(tX8W2(pwVI_Hz>87;6mAFn?(k=7>IskPtcgapMA02RFgg9|7Q6Q9ZA>w3u z1!X~uq&6^au)Q$t(m;u?^rP&A^a`_&(UyqRbfGUhl$CQ&xV3yPcbJu;qt6mmR99Tb zvhH?%2-(j6WX%Xa$xq21TAlb4T|1^x;ml^rlvsdTmJ|FMm8IgS!j|jls*=X-<1PxX z#nYgN)T2IZFr6WDmW`o*3ZFQ8a__2xsvCe~~W|Q>jlh2w!Z5>OD`1Yj`suf;>j8 z6~lN^K%Xd7qaGH|32(?!W+@fHon?9PC{-i6qvLjy@I$l&-{3atNB$xi;SIY;u%>b_ zqQ-m%w}N*?Et3=K@n)^_8Ppt-AY27azD*cF{zLo_^~j!H znQpX-&Zhe_A0(3`ze#YS$kX(9a-49QbLY>9M^UvKNmP@W#6JEn-h`~A_c6WbT&$J_ zY`7{_DdTv-3Uxl{X~<`DZTtfv0bNtjRZu7qdGZN$mWbrbxp$b?tL!{wwrT|7An7a3 zVH_xHakTKS7(@J|9!fjpPVywFt>gg35Z?T7)pIt1+$znG<}t@eN1>X_*hrs9*Gj7-qZw0DB3$6iId`ss zk0I2kox~G<4|`0tTvem`!d(OJ~{?Wlw+vu zqlgq<&b?EOR@y4AblWI;sVM$BYP3U8NtGasAcoS9+<5WL^1iEIzg|cBgoUJ%I*&$+XxU7 zOo+3v)_GClI_QfG1!s{@~-d4A}SG@Lz7I3>glhPdG;Q7nlP7fQPUiRD@{M1Rh7#T~D9X z8W3^*LItiZ?8qzB88(3u6oGd=N4~^J>%!Luzy|CCso@u4iCR$uFfyisg>{V>24dz} zRCE6zMuL#&M(jkbvym79&W{c%tgk}THS+eyhULFjw}M3Xc$;&0pK`&1Sj~j^!B_ufw)D=tg9bFH~ z+rN0WpP*%T@P~vC;(9!n9;ySAP@DPy^~qKE^QW-dT!~0jUS@zR(L;)TiE71Jn8R_f ziH5`$R7Fk!8)P`@<@51rXM{-TP2@39DVQmIL;e0c)ICa3FE~^DhnR>y40}+!Y64DU zHh8nEP?Nn8<9!#^lwSB=1#0|1p{DZ@2#^C%`AVaPvb_@m-|J1R_<;O}Pn3;cj>4;-H!UsYB35-1_eg%JUA)~!S$F8O4LF4oBgQA{)&0i1f@iZ*+?LU zfE?9GPD9Q1OjK~UgYYTh$sb`2&4UH=0%PJoP?atUTIgc%MKAz;0(C|IBZ*|AV)dxF zLts&9eNXU0jb<`t&Jerr4(J{RP?9}W{XNy&Jw)jWkf#bzUtEP=#pbAKJ&5)D5H-Uy z&`s|x2<5-(I_nm}wc?C>@FpNhr_SAgr^%+=`gTRhBj7s3iuoIoA znyp0jX(ImGf|^o4yvq?V4_VB+H(8Eq|0dY3H0-eu?4wy?KG+2lFsdqH44$$alYzr?B7F||;>gD`d$*f#{I0lMhdEMPT!5IG1*&&DcGzQ7ai@ctu>jl*g|HbFzAS2qPlMahjB4Y#VD=rsUh&2! zCW9ey1J%;AQLlU!?{EyCFdN@<3YE4l7^mA9?{cun8c_+q9W}*i7?noU&*q{Uf0r;1 zd@URN>G|+TuRt=Hffd=)8+#(?R(2TK$6yN_6eH2Uu^G{<3{~76SRHEEO>aTAY8Q5) zKg|n}Q0hSz+6KL8k2hX{?!9+GWmo`Pa|XnxR9KTE1Y$Wbkutzvn2)hrjFq|(^~ww? z(bqydS5e0F2Xx2y1fs)rkam5*KBG{{{}dI*tMR^`;J^$)t@L`-3unUCw_x5jqcVFh zcHmujHBImwTcH{aP_yg?=GY0a3Kk3Au+~{@5}U`yvqh{mw~kBaZeiaYAnM6%>I&_{ zbkffBNf0Hjg3M?DHcBFFq8nza4@QF{)L{iblOsXWH2|Bf7x^dHp*sj4Flr`|QFw1# zyk`b1)FhDmHSmsC#mmAmzJU9P-j<)avD_DS3N+FrHkHli0)+kIarE8n>8}$HUPvOoddehi;4~4=&0mysrbSQ>a?lU#@!SGv z_XXhcX_6l?Lh8hC?4L^P;xzstZwO8c30~ZEsEJVcyNag{M{V*U;tW zA-dQ-;=iEhUIcpUed2<-4O}l?8${F|en1tdduuWO$#_nMI1ZG&_uv(L1z~O<=#vxS zmGAHx=!g-8^=StFMlUe<`_NKmKhw>~B{!H@dKDE+?!fiY8 zXlSsPpv27w)A1<{neDB9u^=3?hOg?|}|`P>L!>kJ#Vg3y5- zHKO)XF;pYFBGa}#(aJZil#4K~Od)>s=+`ChQY?5H=;A!qT9i$GGi z1wKO)>h^njC2a@4$e1+1_}NnXVEwki-|qycA|1Nv5aQOqFrEt7F?~>P=7F8B0F|yz zEC#RRJ-RlYN5qkXSviNEjrCj|HxNA1Q1rwY3zq3dSpC}=tsD6JPFR~7)b=k0F9aF_ zW1tTW`VBT}GBg;J7;I_~teX_`QAs+3uxmv*pnJ}A>IynF-oT3IU`=j-^JjoLJ%nyU zVWJD{R1Zt#ENo;?Z<-!rv=hd95CPSL?vf|?7H$(apS#aZLBDl7Fds^Va8Sld#g(wA z*2HYs_jgc_p`z91jM_4Sc>F>c6K;+a&m;7O)Xg!FDefMuXwfM|>*m zL>Ld=nYa6nhTH*-dGL(EZzg3(;&IhH%m+Xdz z`5}HMhEspjXXy*nW@z-@7{l`*FkZm=iiPqiLOin(wzL_m+?Zl$eOj9uMnpk}^hNKb zk%S&%##v|@RVbjV1~kTE=vI=9M$T;ld6(EH?!mVX29*&>AE<>Pc2gUfaX zM3IgEW6t%5%1uIq;tDp(RS+gPL@Y~$f5g4;`ENnEU5(w|!@so}aZK##Z+sLq5j~KvzLDn;A@`!x2pTGoq#n{MY3y+1 z=N?Xr-}Y3jZhHSEwT-v=)7kSixtr_qlQ6XR?NxKr7G@#GhOTPMs-O zw<2PiSs=h=fJYLC_q>Am{{}DPRh){S42vdUS0zG2s)UhHA>W{q4~WP3EnE{fLueN} zv1UUEA7lzDg(NYbc!z$YBat~8^*>Hz9Qax>Ux#ji`~?J^FEZYdOY2001L zohR^-g&-fUgr?XlUcjC{M$8k9g=zc}X#3TmSa*W<9LNvhejqC{2cLA7d7os4*|>;4#o=94uEVahn{79EprN04h{Dd6@F0Ly+nF7Z&Oy z^sWo|Ro4-po`si-234eoJHrz_-S{REzlBTu0Uo3xeCAwniEx2mhnQ`OV2!bx2fsQL zzO))Ye*wKIlX`;+ zKL(1S4ywZt_FoNgg&Jr-)u5TALCv0qmGguedkSXn3w)O^B~sJqaQZpopvmxib+Evn zKy#9VrnMXVo2iJM4uGlHCCrB694*ER<3QgV2P#fARFw%aN;&BhA))G3^07Q;7LEgl2yQNFNX5n2QOR*on4HWqZiI9 z+(7r6FZy7AP61Og4Ex>;Jn|!i7c5Bv-f%JO>;$O66~bel;EOpQ^uoRh+RrO^X)9zx zy+Ff>2bEBZ%)`9ZV>eENGR=TCGXX8^BG{@FxI?jZ89 zg`GWv^?U(iDTgZR=6&GxED^bwVAf5+@r^=PJ0tQERKh{nfI4X1rQo6(QIDbJ{(KXCi0=dp#2y@@eBgNCky_G2KnkC*vg;qIa&B8EAk@zr&QDzB)lEBhc#fU zRdRL}@^EK3bAAyzgHiCfS;QaUBRvDRDIdD5r+ZBkEbb4?)&}r(df1&=!W<~OF2pbq z=!y0ky)KQAx9FiU-X{+uE0amS!aK?lCmw>mnF(Lz2JN^Ck(mSLu%3uVSImtd;A>M4 zU|&MPb_{?Y*CYCgn+0_t6F%b!)PO#^=}m`c;Ds>63?Fd@^dDApPY1f5erY`n(qvF4 z)e&8WBJvqVoQ2Z+6XRIV?}3gmLYBjxZ{RHXQg|pMVikFn+C^J1yO=`8Q6ezckt6&~ zo6@_%yR;fy_!h{~TVnlRL^y;?8p$@cdiRdtn)9WpfZo8$pk4N52gRkTlK7Ygp@{ zh^?Wf!KR&o*hC%g(}A~HiAebu@@mysE3>fj_LJAZs`Eu|s0q={Qv6#wM)wW;#Z6@F zHp3pogJ*XF9a{z?k2VBs^|8=@b77Mj5#6|A$2LMI(TJh-5p(wkm97wR!&k7*jiDyK zgQZmqACv*IcC{eK`$=Klt1(Yn#7#Vv7S?tTmoX1S$x2}&R<}7;*%pw@y0KGVfDBjz zPgVu18BT7;e*cA7urJj3Flf;$P_V`5W)X?lem&xrx&LEUKSMkcja}f6bDr^tD{mrx z84q7M2fW^W$muM{dKrpmD->E0?G1qjuEcXWLGjdzn?ab`jvQbpxe1i38{k*HLl$5U z{`@;+Ag=QRct_;>p7Dc)9C)+;?-B6>tByckp#ygKKb&OcpzlE)qR~#_B$#Vqc-9_@ z{x&E~8qakUdipf7)E~)E@1nV{mv8WPpNIg7c z2rQ2|qGb`vCkr0*5Pa|$@byiQzcGdCeuKSbPhN&)TZ=eT7xC?5a3F)h275?MBRj$Q z^@0Kz3U1muD3b~B;%>-q)q-z+1tiGF_?{As?^KMy1MrAHVV$i6?Jx&1|6%aB`$O%1 z0)M)j=g|#dI>vD>c3=>47L(yw7JUjJaYh)z~9Ilb9jTDAb9$U!@!oz!Mk3CrRfcY z8U|*b0k~>^!3*z3CMyIHPByH25VDtdF#8j*YF0q0421HuruySo6xPW=@YLI2;g7*% zA4Q(37gWT5WC-;Jy`eTEs??!!VZrBN-ZsDnUx62!2)%0tpIQdxU;}QNfb5JCpXx(K zAcL?Seq^!*?~3p4mpCp;PWoW*d4=2Jj0V4BHQp9 zs;dK;!+SUt(1-o;!rQrHk2YdlhvSW|VI6$KK6(Hx(+{zxB{5o@fJoN^`LkK*G^>lT z&VpU+M?OVvR}Kc?xuMDiriFsI!}( z-u2Lrd-i+|W;O_GCI@85so3Sc5qliKX+RdNmjO;FPC+v!AkX&>KeG!v`#0jRdXo5ca7R@^!b7@j8#Z`x?X<#drp1ycvu5 zLZ6T2PU0K?ghz~l_D{!djD^ko0tL|@yU7}zjE}%B_H=gY=_h**tLG->s01-=AYzd( zi2IJ>&3-_=TEN4Z5Fe4PJ%t!#7nGPg*3@Zu#zpW7gRtXz|F0M7CgftCVTaqHuRs`b zUp{0n+{icx`?L?)gJ7H|xxf}KgvFW06nEy=MpuJ^`eD8awJ3_EjgI;4XY*2A*OU zh}CyM?ca!JeTsiNjmTjDX67LL%m8FmGQm{j;Qa@{QoADN9R$mo0ycU&6mUJPX%Z-i zOOVrjjx)TF|9Qe=SV>V(^xcSA4&mfs05W--peHSmpPLO9<{g~LwZUV@!g4J{ywXD( zjv+RqxAb9j>j|PRkhz$@VW3&B$4qu&KZN6SYZ3naIAWO*$Vx4TB~T#-{SK9P4{tgI zbT;VevREP<6oc^nZs3ISSl5x56;nKa9`=tuv_mQS zd=rQfPU9Ar3K`w27_rreL|@{GcVp(f@iQXv8n%Fgf;U92Hx>%18QH!-%<~el3hy5V zFWDc=WL@L})sX?NBX3a#bT_=k5p+e##T$?R-%s|u?_qHuP7hAN#vTWAv!^Fyk6McY z>2fsur5(@U_uQSFf+mF*A9?EMHV#ImS zAWy`2=>6e*GiQyx>W;mtgILxH#tzfR+1eVM zxjo09`UTZ-5O)(JF($JSA68?}>Inn+MV!EDaodoyTEw%+B4-IhVUs*z_r~MAS`Yl( zUDQvk*wQW!SheV`Tsj;$RGG8Db}$A zw7^@eRxi;>n9G-PvD{m34L1rW5ry1AeuO~7pDaSOG8?w<73j%}p$e4fOZFYvn_kot zsQnYvL{Mp)a1!zlGWY7@CQyX`Eb}q2EY&%)b$-xl?~o|A9QdhdxjDrO#63c+wnr@F1N2Btow!gmONg>&Fe^ z{^RnwJKRD%?PzW@I=2e@D^)=_qng1l1edx3mVYH;FLUgXaOf;g?AjgJ>*<6up5_hm ziT?P6vAB=(i9W?#ghceiP2hTJBz=y)4`TW>~7pToJz>_XL|nbcH0<(GxWQtGAo}LJy}?K>eMJ9xkQOUHb?S%aN_Q z%%8v~jpoPkQ*esCnCCE01CUoA1I~Lctm0+N|7*eqIr1{}k$Ok8A}hTVaj*$2)^_Bh zwqvdB!L5hEG=-ax&XP*TpP5T1;ai?Tx%Tj#pFy!sL58C@pU&OpR&xY*jXlUdVT)NA z?h&PO93O_fu^BlS`>hVAes;u0tat)?e>AL}zIYJczz0$Md+sSt;VQUw(3Dr8!75&)T7879$vt#xSEG%X1V+K6FprrF%p4{k=WRXx zVKUd$jaKp`k$K`ijw6ZXkcjD;>q$Gkp9 zubno;k(%hgqX|C!8R!%jA-4_lXo@r z*V+#%zB(;Z#pqcpqeo%K)-hKlpCyAOD&`gA#$2E^>8(@-GDVhP5+~p+uUOay1+oL4 zcpPp+9i|4MBj^caZ*0KPHzTzO8FHhIn9qBh3#X17c&7Y#%hW>ZyjRB2?AvG`<#j1PXbs z1<>jbkl`Fk=0S<1V0C&y4{t?$Rwc|AzT!5951-GUf!az%*5QdL#jT2Ev?ep3*~1u1 zG^Hb@za+;bt0Z$U!Z)!diX^2{H`xN*%r0i`GYc4kxegY68}SooSlvk9KRoN;Rb~>xWcBq!%ZnYgh5a+NVLOdeG6n;5B6>-TK zVGwc)J3+1AhZQyk8viiviG88Q&`I4Tg8*a0qPesL7FHHkfqA3<^AM$<>S=$sr6Trt98f^$Y;v5r5dQFm`AUJ%9sI< zIhKmVeKj>iWsK;CRdo=%bRoZtO;Qd~%2m%)eyUx{M&%#uDtLv%TsGT^S?;gQRYWPq zDW|Enuo-MGjPq62pO@jBB!aC{HLFtDYVHRgEd*lqEuk(_-gGb{!Hvho^6BbN)nBPC zkXy(Cq{hkr_sXk^dCEHFTBV)xkkUalN_9hNqnOwIS;49{vG0^}MVP`^)y-bR9RLe> zh_n0+Ay+iQZBHvEL>8m|Leog|oZ5QnZsr=ZPqIwXOdo+=44^OLbby1NtEBEqv*qXH z!Ll7#Y0+|bwM$ZTwb8+T2nVv3>Qm574GIt4A zM9P8WM50LuA#^2|)f+U`G!kVirE6uz@@>+)%wdUz+6m2tTB9{c*+=p(?tVAZ>D8rB zrgGzXTb!H?<8oDX%2;KiVpo^g-nZjlMVzpkawn&-V*8<{$&DLZ_H@2iRT0+mYE2)F zmGm1`Vb^-4uegsklNd@5$qHpdCC@}sxuoNI$6JLuZoey;Q<|>~1{&ShouU3%rlgAFkTa>RYQ-b*K7Oy?yJE?iyZ(`=crB zk@lI5d_CDbqLUD&$V2sAO*a}lYmK7vxuLj^JOtGmByOUfCM=Yvo7Po6DhV$cU#?v@ zy2FBS(D-1m#7x)H$x<-iXAz33(0HFEezhL$wxI^@(xKeI&gf>gzJLAIhRBAZ+M3FN z6_2XOhPc)NU4JRRs_c{px)?>S=%<#V<*NNnC*AP0=^n%La*plntnNI}d7)L&P*qoXu*01#= z$nEWJjr9!`jZuw18iSfsn$OlxD^1J@N^FilktofyulUftMI+v1)S&ZkBwG;e9iO(Q~a|67;-czp@*s zo0``RkC+ay7-T)sYMFr=Ro*(ZzPo8(*Im{N{r#2*rmC9_w{wFM_rw<^zDpaPpIDaw)_Njlt&g z91LwpJ!?Aq|2R4eu&S~y4BvAv-QA56f`Wj7f!+DrjvZsi*gf{x?bzL2V|Sne3Q9MK zG)Onxd(Qt}A0B6%QMl*qz2aMIukYK9=5@ifAq}5o5A-%!%yVqktaCFDyJb3BbwWv( zUzQctbsj3Fd~}ssW=#W%o}?a#9v1QGQ)q-$a@(>dO*iYwuI`=(y#Mv6b$o02$l{BQ zwQY$>s>bT3$m%w~!n1azu1G(flT!RgRkajpR_VUlYpkQKURC|05+T1wZd1|U^;-Il z&Yk@)v^(h6#jc}VQF-o{Y1!l&L2RRYz{1To$Rxh;?#~_3^L@EmSksH>}ZuL`J}S2d|_soc`&j_DJf)^)4$ z2Ba>G&Iw)iBJ0hGxD^$;hR@vlwz2Fwq1&eRZ@h+E$7!9f-B7USXV?7dvbWX!YZt>~ z`d>r)^7!H%)wi{e*uV6&_i}LTroXR#O5x^A|J1Q*KDmP%tgRHSY=X|T80hS3Q(?G7 z*{HDMn-WNZA@zNJ|SK9gO`iVqa`+_O*(ksy4cPjjFWfsWgR?COaKY*sm}fr1M1e zu-qo6I=w0Lac+-a{p()p-F5lX_e_hK9^syIJic2`RxD|JAdS;cWe7gUyriL7Ch4Edz zAsU8y2KM7UX1mNVaZ?OvbS@v5ElZh@GBUfg?4i`c!ocf!>pp(#y+3;ByX-aGSf5(y zF9zxk)H>1lsr+ccx-^SO*Eb&?J3WeVpPSMF#8PCUrUCHpMuQnUh(zx5j ze#L##Lk9bOus&X2m2&0ls+hw+Ez3sM{7|;hPSA^$H*f4JTW7J<-N9L9lqu|N)T)fh zZXS2x>)9BMoX1Te79YLp+O+9l-gcHxnaw7#J~t^{D6DSyF7%MrHlC@iF71)|J)+Ai zueWc$_sI2a_+r?ynGkH*Ygn&NohSQEvaM=NNwbO6i&^#aRe4lHgW{0MTgwgxyTtv{ z2UA^lh1&tsNV%l^T)QJbHg3k3{V~eiXH7#)nm7N|)~w6TPK5!sZo_pCmM+WKm1$7C zvHC&H^2+t)8%s=nK8ia1dfcnsU!pTK%Y3ALmX&@hdph)A*fqM9XuC-kll?HN+t=x- z9m;mAvUNIGX*eIXzNIlpsiSA&l;?Wg++Gphw7-5*Nl41+$cHg&b7wVOH-7AXq3xpX zS9%QZWZ7z^_3I{OZepfKe$VpQN_mA#<=dLI6>~G^hWC7v_c1TNW8TMF7o(BBwLM1; z2X|FN<2?kJ@~hmCYshIkw-mx+|a8|6M91 zPyO6A;@^zwYJZ)zPAl5k_qFa99dgs}fPH{al{X;yTiVV1aizB_H0uvYBYL8ncw^W@9Tw>zFcElHyZWUs*2I-D5YHqMWbgG#m ztG83M(ryuCZKyq8*{>uoG5fP=SntH6CGOfE+`Kz3?yKE*dPuOp%AR{Tvp>d*DcOb1 zYW{2BJ|K;bvg*PSkw;$r`p_-$V8QU(xq5d!9)@`Il(#K*Q5sEB{gmZA~hRLP>=ABLm`n)LoZDt>_ukHDk%{qzQ?Ys5~ zI_~1Br6@{|SH!)}327Xw-C2K_{(4;vp|n^hTKoOpPfiJ|^A6M=*7xyP*=cBx*1@IC zz8b~XU&(6r-7|V*j(^?o2EX#cQrCu)(q+|Qd7MpYz?OC!oK~sIi+AQ-Obh%vC3IAb zS4BtT3GO-pW?hc;(h0fmb=T0pYI9mjbb8A3%Kf^1OhfgbXwA_qZu*gv_jUir&aqqa zS5zBGBg{N}#&rw|UgG)8bgFoz*d$GrEEIW5$F=%N`VAN1hFxj+q8e=au*Ic-SI&#& z4JB3CIw>Zz>hH+Cc`xd&Dh#dKwL05raoZyIDQ3~K^1{r7%mk03)1r;WOI2h2q()76 zI?NNBTKM#DyTfyW?y54^pCO4xANM}dfB)&{N6lr9hg<#`Vm9zrze_>6)^b&0o@?@} zl#RcB$!ksOEV9iv8kQ+DvOL3{yfOW9Dx!OHWDMN}cE4H}RbeP|D zdD|NAYmP&8vx`0BI>r2%y{%&IuV(2FlJav;Hw@AH+kR}|p?*@oDBmLa`0Rl(!#}z` zPkFgF!BQ3JFt^o!4r6=$(|c~4wbu3xcd{y0ky2Np?;rg;Or~*~I zGK`v#(Y5rW_8xDeZY#PfTE{p0>hMsnJH_Ci|-j6;Vy))}c^;k{8?pfQw zL9TrUxL>bdly*I8^ZWlEAAj*Eb)k+&3(XG8y4Ce=)AN;oN23J=tE1c_`Xn7K@sbx? z^!2cAy{kok?r@pC&SATk{+j)2hdd81G1V)}O8M_gjZCvp-B+i>2cDv2jo>Q|2Oias|cZL2eNZ0PW=&-LC5 zx?B(3>Fgz&lG-*jFMMXsK547j2G^LD8GZv@ehP{oV?Q-KH-Bj#dLX8$Sbru8aRM>s1w_qV7HK`}pI_ zqNtf=tF7C%_3mcZwRgywcH3M}t5&3L`EocWxL~;yYZ~Q}<2&8&t;-0Ft9d`7--a1S zRK^U-+)-ww^TK^iz(~J;yn1^scWI`Rmw!EBNs99iy~Nb8d0#%~Y}CHxemk%(cx3xz z-&hNs>V@&0-%o#)@%g{3A)0ocH#<(~dcD(^HfG)`GrgL|#LDov=%smyjm-_T96z)e z=3DMyAfK1lF}B^8!C#t3-%QUdPtl2Y$?#hf(9i#Q%SN|{I)4@1PWYPK=f|Ry`tSe5 zPb`knYH9E3QRutX|FO4?+5EB}(ODk?-`@|LlM-G3#J(Z0AlSEUU|^q?pY7X76SKUc z2PTdyUZ(1*V_|OVF7tcqS+1{H)IO%;*LSgpGiDWzX}DtAx`k6v_m+E`1-m}8s&2AP zcl;KeQ1hd6+WeU9Ni!>c7?j(0cV6a(){o~S^CuObq9?uU_|`i-J8h@(fs1d)Uft_E zD%zN}d}_B)xh!qpm-TVmN-t>UniyIC<1)$ny}P^qt>R5_ufMj4`IUA&zkBs&Ej|0L zo}E0-yWVx#W4lC=RB$DAcgC@-o=HZLqhd}MJeA+Gy5N@To!;`AcZv1CO;H&eBSyWw z`F3M;Ue#Q?lR-jAb?5mVZu*V4^;8yQ7Jd0Ie0Hi|O{T7|^+-1(-@jVKSXfqlN<0~* zj6a_Ju{68hPIs=eyzqIP`CJq1mkf3W^sDh%=(NQwQfEZtl)Uix_1~joD-t#( z+NMXAj?fwJu)}?Rvz?w}y@uNdN;5MPJ`a3#<4vomyZO&_M)>sW<2Zct0I_{jv%Q9o zOM6C-|JV?{tL&~(7q=ar+nVon-DQ)lB`a^9>h|3@es+FuWw3#f?ThAjTTO1+!>yTR zh@OksxpZ5`viL!9m*T%BEXr6@u|xZ*d0)#QYZvEacLUq!V&^Q!$n~NBd!O*BA@N7e z2-{x4KL=#=|1)^5i=eGh9+P%AdRhF9qAj{tT<3Z$w%=-Gp|wJFxy&Q8NBoDBw`B)( zMZ3wax0)^X?Cw6-I$iIe6kHWhpq;Ik9h}|xBRq3V?$ydS!cRRT!wp6s%-&nZnC{RB zE4!L9BhozL!Pl*^hB>wJU0%I=ju~LrrK9&H^Ag$XQsc~i$ z7`0M-sC@flWPEOFdbPIkA$xC^wO;#LPxtHRd`a(fhJJKo&^xu5_>V zNLlz#lUVIRy&( zf{goI?o}V~)uFS|CB@N-u1WL5TPHlP54ExNc;4(!r>mBOb+6ZV%UK#bIc|FKY@IvS zMK)!2weBI_tDLXtjcBN^99Yp*yf$Z4mQ8l++@tv$OBXh{$_q4?=o~RrSsFRNa49zZ zQEi&+^d&hwE3$k1^K2wR>^8Q$(CdAOowtU?9GyR8epLr@re!Y76RTb-W}C&>+%=ET z@26L!yIZDTx#d@Fje*Vsn{;Pghkq?|EJ`h2>iN}QFLEr*Ey*wIR9v2YIcs;`wvxe> zzO|7}a)q;=v&jZ?J5yinpLI=n(FtYY-9EO6oSvyulc`_j@x7bZpr|gs%`WO|G~Fn^ zl=3j@bo76@ONBqJ8(ojNk9T@+wo~!Cx~Onh!N8hldR2BcPD>n)+YPh*VUea^EemR} zs`IXUSv911OV-}mBu7@I%vTgMLzKmU<9w)m$s z($dz~vYXMs)jgU#^L2DJa?hc#=rUL=02|995!0M*1S`8KXXD> z&&q+C2TjM=ymCx+dFAljBtvmi>00krnx3BbebBd+@heh7(^K;oDq~C!+iTe+*~B;{ zHe2L$L8q}eG1}zKx~IL~k55jm{$w=E)1yo4A!i0|4{|Y`TrMVEjmQYy6+SP;thSe> ztM7<_y)BM7$;~t5TE9w@pQkvKb=5!Y@~inGr(i2Ri=pOW29C1c)z8aDR9IBj|MJQ1 zle{<9JRv9BzUi375wGQ~kN9-4c2aug`KDe2(HX3Efw`GxOdk7H&g%KD;L*=O@Z zs;A5Q>IP}&$y4MxIxEdC**>vLwYgw>#3{rr&wiC|*UI>eptuRKy2)`r?iG5}jM7+d z)y69#Ah*?A`wHPzxnpU&s*^SDRTbs&m4Wq5DlN_?`pU-1*J!rTaB13DQvG8?(#yD3 z38ORil=av3Y_=?@qIH?WS4pR6aL$w>x9XN+nf49iR5Lx(FoW~jqqqgv*6@>a|CXBG zo6NlGoNn<^%LJGOvzm)KD7AeqVL(+cZtsihX^V0N9echaRc+)HzWyzG+gzv zEgw3(HUC$7R3;W{l;{;Ha+9*$GBeZP=E&>kna}av+2LNN(`~{$S6k)@=2hcLyObX- zU6tD<-R|eR>N;&Jvo$tP-Sq;WhpY|R=eu4{H|I#$(bqTM?06sjaou;vqQ6Zh2Da$7 zvHc;B63gD2x66`~TSPxjlhuUl=~-x*yPAX;rx}jXsS@AT##I)VnHIgvdYW7o@07Z> zc$>o0eyMM0yE&aF2A}f^HS{R&nwSwf`PsQ=yTbS88yXk29Xf2(=)@is&JImwSy2h! zV!OniPr6tL+-(xu#%g~efr0396bVpR&e#PoYLyWZ864F& zJ-MQ-Mv?JJiy3B_I&+$qmk0goPTP(q{wpWO04Fm}Fm=nl?10G*e#ELHcCc%h|#Gr|kqS z-Ky4w6AL5CE;Sz4U2pH=`!&d?b#GToZQp#mh-1&{UbKzck$bY?g>Hy-mGl3cwwp%C z`!&69m{q^FCaA*c*Xn{X`6Kcd7oMxI61rQqYO$z8dB}yJYcBD6*)>}-?|rU%{QgVRasp@W3^O{ zddTEj?UMAe&f*T^i}pudYntzC@!or?!+XW>f(hRhZ}+{v8-22-pTmhD*LIh^Yb+Dx zw<}YA9*&KRvdXNMt#S`+Gs$nBb!NkeoI7c%^yr@>OBM)I9gEw)=$+kZxzn+R(BwYv zdp!B`&F-Wpb>Zd{Jrcd2JB-sbDW8=WlHa~~LCL(*s)}>-IPLpy!}ETcV$LqA;qqjm}H+J?1HfSM+-t$aLB(JC@HaEpE7FP~%YLcFJ)u z5$ILT&aAAc)t}?P=jMCKlgzTrbhTfIU9>#xs(lJt^>O0n`5#tM1HUYdy^jbxexhIbOnq;s_U_9a z&KjQ(N@}7@x)jxwv}rKcEY(faHc<5CUPqT&qw2%8tz{=o>fOfr-S*e=7-@33+C9xN za_i@3;fLb~lzcSE@;(qexQ)5fIhA|%p0t@I>veS;r`dngdjIQI+QPJ$?4gBY%WNe} zJEyjp-QC-4GVNM4BKlp_yR;|2>T6CYGqpX@+;wq(ZCqRXG%qFlQQ^_b9MvWLa*Jc8 zo8;XquI48eysKO&KWwJ5>uvo-?}_S7?SX3V@~8r<{C$;SI>R0NdUW*&b$euErtzex zDd9}ik8fiVzWrEU(MGp>vwj`!beqtothn-wL8`8c*o1O|CM(A`sB^OU&4N-)y~pcB#QwJeY3NYl^K)D z%`|2iZ8lh~c-QFAa8k9I8{>ZnQ}i{gPFVXGY*bFJovMuD#MDgnw&95Qhwgvovu*!p z``YxOJip?PTzPtQqLdVrGqfSttaGHZ=2YNn&G?L22{89dC48>w2`IK4Jam@sWRJuBe);_1*fgds?$l z+mo8Z%l)(NrAnD2ilZ9V=*%_!$7qRSSkrXnXN@(+)9tF=KQ|v{drW4YKRcysX4hi> zs>6+=^-7#1+?=A4b`_^8uQq<6MMehocQbvYeQd9_t!2IO~t zp4a)HE-i){o+>VkTNM+Vxu;wq9@X1qo9i1#x87im@DWX{G#EFtWV?h9P3uejvj5Jps2m|a;%4SK zLRfdBg?OtVFZzO69&!zDV6U(&o z`{rILep+XzvD84zvmFb@l-;TcSI*F}v+{7B?y|$-f7Tf$ z2HG_Z&r3HK&n_KT;aXkPpe5g-9jp_qeNQ*XaEyVLBCy`EvTsF7rC&|UhIq+BbAm=E zrX>qCn&}+t}K^T_NZSXDD>;i-dc^dY-KWC_m_s3W`*W8p>>0M z)#u8U)sEHwR-Ug~*)T^~ro{sS zH_k14RrnkW{kJJl2xHR6LSur~c&%mfQL1-Mn$i`fMCLMs_E2M;UVno|o$K>Neh&#nwBc}ii?G_ z8kX9dw3}&OkWGe@;ijH=6!qcTwnmBMlF(eUQ72U2)F4#XS}RZk;m`?smVsBr|{I6s%S2}76Vl4 znaR2#R&isuwqlRq$W(q3{E0Qp{9c1s){a}|YnVvX5SBAjl7s@9gYZmh&nbN$)i;=j zd!;^bb^@4}^kA-YgZQ;+PSbjo4ilzhWc#J}=oLl7N{CSTHhDCDX=)I?nUb6<)0K{( zRWn+;h0@76$zQIeVWlxtE|V=pDeyh&NHSroY!)-fFNH@64~?_(Zm1!qY(a+&5;6Cc}>O2>(WPgp+*PIwTkw_cs@HwSSXtzo>pE|P7&>x z+zJqrm=gS?NXT>x{A9OrYn66vT{A{^+Um669ze3i6Su0z(-Wpi*RKZwYEWDOEN!^%E+bcbm zRS9bpn>Bni;^ec0nX(XRo@!as@y6awFO}7*$zm7g!>7q+$UPK6@*>zfW6&PxCR~%; z#lKW1Ra^KB3)w-b06y3wX)m+5yWnIuqG$11nWfw>E@Q&eNoK`FY&{czfvVfe?W!{I zA*_LI;vpssA1RlqGQ>VGiMKFa{1G%3q}^wx z!-JTloWYd(|CpyuX0H9Av|dgm?gW6CR7lU z#0TN8?1V?)3+u<4f4`GW5uVE%727myG&gHjYwS_z%Xi40i^eJwCGnzZA?(Xs)jVmb zaE9lbAv2RMFk?Lj?rGbm0ZrqSH&lz6jIM{Fd=y5qF*D{F%qrhuD&GcH{yW%yCd~U; z$u=!Q|3e5uUmICg|PJClD zS{oB&nlNNf!Z8Yg>+lt3NlzGDEtuur3-jQqECHpvHz*k2m4}gEOoge$ipofp zHdDLd%%~lKH(U&_)=PShj*l&#KS(@-XFOr{t}p+mN<08dWh$%`{ofi2k6;({Wc|wV zoeXq9%a}K|LBp!O;L1GVEjU^Z@UBvr7rr6iD=!qn;0`~K+?do}3nxB+xzgLP09(PL z_Lb){Te^lRSXVd!#mtq>h6SxB-4Mf2#l6Z@=?|uS_2AELk#>pqR8wFm+=ffoAGH)Y z%A)zOL&q|m{FXPC(9X#~W9kW>d>N*eg2}nv=x+=Z+px!vVR$v7QM!PYzs}5PApDwG z^ph;nCDLbtKL=&UV=#{XVLpEd)A!?G;dsD7@`WQLkS%0n(k{##e}p+21T*EP%#umt z2JB`cjJE6O8#yw~`cgE6MKPY~Nn^3sZxtX9{zeFKH<{UW4^dZ@uDq&RD~=WmP>7k$ zw0Mzd%gU;mH9YCwsvHYx8*jY5O?KpfOV{r`Je=G4cd$$};r&?`5 z2aUwhu!roW|1rVs#=P=oST-l&s6OUT|ABQfn^{{FZ(ssPGM)PbZtq-dayt943T9Ri zab!Nxdb?06bb%`{n(5UepdIESk70th9h~%Wctac$ zu?jd&I#|j;xO5AJPjKz~v*Qm`ca+_jMf)il!9K}_H8&B)$qd;9$q8#%Azqf2GMjBH zAI9A7cX_-JPPX+RlXYVn_X_OaB}^TMzz@iT_j4W{lTXYqYQcu?^4kaXh)UmKN{--X z9!FCxPu5wgWtvx)Idx0ku3TEhyB{EO+M}P@j|t^O@i|(30q`HwP^w5lYhfas31`tl zH2@Wy>C$j~ZzA)wuH=v1%z{oQ9^EI8cY#geM7%xD3K^i=avO&01$ZeVU_EBz)8CkZ zJk6YM74z$J$OT1GXVFrnU{ZIjG#mTzL3632ybH=k7s%O}@X|&|h44vlz^WgQD(D=( zZ>vx$>x%ByB3K8-Fiw_;1)?)NgMRS%_o6nql8Nqp*kGRWo$|SAmQ*17B#e?@LL0~r z?TSEVa(K@w$ul6kiS?$R4og>t{D zyR=!Bj+M`aeREg71LcDKLK}E^&xr^1*w9KW`ZSygIXXZiVFum;!F7>~#0yJUT|0RT zdvrSdWLMyZAAzTO0sV~c@I@wr1kb=V@Fk**gtt6Y9ID!`j8tA0KjSZpVJzJv?#X3m z;8-714N;8{6>#n%Wq(LUFy0qQs|9blqY#6pND46x{Xx_rkHg0^mQRu&5K^VTM16ea zf3UHBp(oQC7GxJ;oP4b!mRZAK6mo{csJP42>R(vw6`2!Cbbp~X;VC`jZEz&&qL3lJKT~~xJRLqDR|q5$@Ku$ z8Kn+N9>ZmS32v~%r^;sv_ds_qglBO4{*+`)Zr>G^VmW-*lc;Ef!sy-uo4OFi5KSueBOS6bYN`xV9UPUw~_MTK@g+Ly`TfL7!im|ny!A6Ru~ zU{ZdEi{2Nea!azw7TJ8B(;r@%3lVY)%)VlA5pS~>UfC69=D+h)FJx`V3m3^4YSqR+ z;2``DTRkAT!ML;_hqi%dv=j#VK{$q*YV}Nh1q!`kd~AjjumooHLNe7BETb>^W*#;& zk|;f2S_un6SA4DNqB4RFWhSnHlkr~mgE??XJS^#eAPV3Cf91*M!G}#kYsi~-D5Bmd zK`XKa-!n-32cBgh9OAvOOz#NpYJLqG7Omk4e88GKe)~x$JmnG!bNz%6S##n=g7i%$ zg2~eP|NUe)h0`z%e!|uchjlieD#Qqe`aak}D$u`DW-1>bw-9DXlVL`;pyJvOZ+MH! ziTJ1`Rf@hS(aj}lPKQ-_8?}>J;$zijV%b7*J31RLQE=JyoF771Xmf0x6qx` z10zm>=lcYnU!a(Ve%E6xbv*2y#qf=0!An9BLCjL^MQ`et7y@2@N7f$0|6PyP%YE*Vv$^!tJRR)Z=`WRu~jFM;Woi(cns zv{_f87BZF?^aalBRbrG0Op8>a=s1{k_gPDK$p>Y#li~f5 zmb=S-sJxWTn-Uwxqq|b18bnQCF7K(xPz+a?$?wSgrH`lpE>+%9UQiVhzvjV%I*N{0 zs3@VgH&m5D)zMb9SuB>~iSvutK_C34C$*iQxCIu-6R^|2k|BCE3l;a^ppBQ0M5QR7 zyz>SPl?UhrjfOE2BGZu1Q24`BX+~`wM3vPAA4`%gAwHc(9WI!HI{2g3N3|<47LaL(feo|S`edpgDv_Ht)5fet%d7tNsV4k71bY&j11z@ zSt^B5Xn-69RcOh_P_dQpNgd%or%8IZ`L5$!F)}_O5;FeU=6q}5!~U?Ogt?$$!uAp zZSt+c1ld}Aa5?*?L=|`|mj4fXd{=mcI%H3I4`Dv>`YOKr4L<(`IO+*7Cg;Mq{7Syw zC|&0hFQI;rDJrF0p7K9gYdp9OY}{~hAUa@eQ2kg8QdvixU?A|Gpz>5A_h{l?DB8}; zh;VxF)~CTpGsTWh@*La6Aa&;goLeYsM_n|5=y?J3K9cBaMYZ0Siaeh8*rr;9Zt^K1 zRGu$)lluq<$t%5x?T4fbFjH-?f>w$X3M0hZ$zhr7d4Ss2sSr~6%&YLHR-iW&h?d14ti%hLv}zq47gn`D+BTKMpS`eV zZwe>r@mwUP$;rU?h`hV>mXf5wM32z#3`_T4(sO*4jRUHOd{jesdPZA27KQ^>dZA%vsKbe*!h_- z)fc1UrB*dnka<3$0Fnmps~%?NZIoR6RI^bn>?IwMT@{+6>2Vx2h@sS4r%|a{Dn_Wb zqq1frJ{FgQqVAv$;3aP$r^v~8MO1LD@q!a@XZ=A=JLHE@wwS~UOoUk z%7>5Lh<4;c(2Ph1Y8?`HRa?D|-d=_k@yJa1_+P;iZ4W>sG6fZX~AFtDdPMPyv_?{#=K) zj32DzH|ROU5RvYK#&&=$G-O6-i|%EI8p*R~s29eO#g5TKc+6*ABWewx8!S+fZln5n zEG;0zz`tf?lc+*d=|u+8j~k70h&IgD$<#vS@Tg zKDCoLGzuMuu~?}i^{73FtE;SzpEsH}{YtfNF8haSzB^j&-l{6J$Vb3Fodr@d0M)mF zJ$XR>1613Bw>%E6&4HPlEp7oLU7@;`u(#Rd-pOQZJ*wLp@^L&?kR?o`i}8r)n}I+7 zfr4BmN)@h>k1R>hPy{Mop{}+YEr~Hi(rVP|Zm9l38RC|BTPl=kf?h81OD@*~iS|Wl zVK%A|^H^z3s$FCHKl7=*AMk!M=_cwoBiLgrD(WcSbOb)4R`(7do-anlxF0q%2eqfZ zL3MFYC-Ri2_KK_`(Es#CXQDzF0lL27g9`K%7|e>!$PLig75;1wItY8n7sWhhA?tb`?W`i+_BvI{NMh|%Z2deQ^Dh|K zL%4-XkNbP$~Z4SEjiLGXL9@H`^oaByWhzyFMG(^0%~p@(y{Wdc#L)XhyL6GeDPmtHJ;HAzl^0r8cMI{9eF4i&5K>s$Zz=swc1|+dnAAe&r(Y} zqKEVc%DVT#K2c~+ou}86Lw}+@4Dtzh#caBe-l)MWB>vry1%ta63;op*klw3`3}b^T z(JXS}>fbfo3_5pp?4pAFXhLSWgMIX1&!&=jdV!CE(4uyhKA`>NFKSXpd={Tbr-`aV zg%B+FIJ#33YjK_Lnk=0o1HP8!Q)!!%sZ9AaEmUF5scI^CcLjZqkHmlttoBma=9M5C zOP+Ha=N`tt^U^o6+8N%vFJ9Gy_uNIiJp>v#g97m^qLwFr8cvzd%E zhmq@=)0Y-I zqi3q_^(p9@bSLi*=B}3EXxuCyFUAUsSjWG?$wR^8Nmv>Y6Gg9;?53vBhhE%z?6o6G zANz?z2GpJn(n!{QEcSW@R1gO$SU}fUkN)Ro@_Z}wI$n}N!c=`#!KkEYv&TC4RtMIr z5wwv2j$DPNq>SEn1ua! ziVlGWDiPXP(-xj(I!b*rKtKIugQY{L-ISoa(2~^~iF(5yR4)G5$sUwmU!#9g#kr3T zIBftmTYvJ&RIrF1p1(wj6b;a{?oA!C0PK`SzhMEWB_F+t3E0R#JbxYcoAhQMqWQUl z!GQhfD6XM~_(F&IUm~a${nzejfkmLmT0%V7NOyV$rzh9xr(7i8No3H;M6RRk{W*Tt zDQx;SRl-5)|DNb(&BcDN5VdC0fBg&Fyp9LnoM_3pl*r+;YJb3 z7J%GkyvLtdMLcWroyR}8!nPwLJ!VC2k)eXwwXfWN;EpAjf)Lga z`LANpfz)R8Ac73E-<4oo_3bBWO_f#DNP+bBorr8<__jIKpIVjg4bQuX>?GnhmpCC* ztC_pw5%-9Ep2W}x})O@T8JIj;C}K-Vr;t^9Ey4@VP^WbT2l& zfEs5SU65?C6+Mz1Rh7yJ?ax!hl%b$Awd#B^HK#wSTL(}>nnJ`HjVfg#zWafMK2~w>Ck(qrAjzQtf*r}7t+ZxCbpDQ_q-(= zyuuE5W0yAQd(_jNd=3KtpsK-+hJv`%lUav%9y9aUOExoRToaD_V)+T}H`tp6x==X== z7bSF)H&J;;vadFHfAe!Nh&<2_r_^xj zGuGzGN*hyOcI9h0xHOflvX2}sa%!Q?|G5t8kh3qDbaZcES?h_4E7{3?ypt(!{~o)| z_`L!P>ASxdKcF+@$O*@NFm!ut%8+jGHc6k_^)~7oYs7bC@@wGQUmyj2PK7400_szt zg<$Pe{(k~KV#w)fH8G`%zUXu8*o9}*Aj%xXl5X>*zK7=kClXJwUM;kZUU1e@LQRzM zTRE&fQR^J&;Xc+|MYlEpi{3!SLbHqSE2qjd1@|lIM@Q2QUyXL$ULyWXqCggTX9E3( zEu6*eBI~yx7A!(3X^ZR-f3k$A;7=?$g@vj$+Pvwco<|up1>HGq;U(7N!4nV zTd3rI&~^WW{fBa&LMN)h_3ZXNqUkAocE1PK@h>`9|KSfMbQku~cTg)!Z^CMCV3FlaYkVVK{s8Yz;LOD4cYS>jTUo)n z_M^M>2o$5lBhS)hDHP|@&u)j!jV3O{;Cqf_0WG5G6cn)TP}$8U`-Tx23yDtu(XY%y zM{~Y>DJKLZAlRsVK3#{r)Dr{|Zu4dAM9!*>}Cu%qm$M*27@7Pa0=@iopSNOF9 zJ1iGFgVSe{V`h?LBoIj%xk=(}r_Asxc3ew-Px`Y7Hd10Sn% zLONEyk3Umm$XCRLozxx#OZSa(ISE< z>Q1+hd0=WUGxX-lQ5x?`gm5NiZsK_tlhHSUT&H6*lR=V)@vZAb?4$ht6v)F2Tk)Vu zvt$+{Q8tE{*$s7B32bu-Y`g})A4>kM;wu(kG80zd^%i6bKl*61sD=)qBG(*CI!7Nr zlJmBu%&d%)_d>;SIPpM6ozk1$;XM4!k8Z{$Y<)8EbT99Bn!InwjyK@zH>t`xv!Wfb zmE&aP{UDhd^c-XHhS&W12V}4cOH3jg4&vUFX5w)1yXXflISB?)*T8+q3wuR#F_Uuw z4EyfqSovLI$$M7fJ#}9fVo4w29A97$Dv!ggn;X?Z zXRLM#XiY&s?-O}jsT!yHsVqiKD-Ye7OW?`*cvXSehHmgMR&)n8xrJxDg8FMV*xQaL z`b<7408viD&y?WrZP?c;{$0Yl&L&@NAk!Yj?gtWU-*RF%jD6N;`e#0N)SO8*B?w^# zeXO6vI3u$E0eV2)h{8jtDo+wKyou98u#r(L!5JU3+WW;P0&>*%lErEVKc_Bu}-nTfa5i|V_KVnuH{0}i}@Km58$ z<|?R9Ce*VF?9zg}(8?5I|2AfEx_7_Un!$(*{6R2S_ns0ZxXSfqF zS77BLu>)h8(!tFVZ=--Q~|M9n+$=u)Re&z7r8gvf^5j9b}MRPKipF9twzl(~f z6Mc!ksQ&&3`fM$~g38-E`n+qXp8n?jHeu7pc#=#`A+zXKUF3WJ6e>A++dz&pX4Mt+ zZ>@Ohqh!h$Dz;sqfC+fcCG37O?|2;xDP__jm;2D}(P)#~Ws^(4#WGBYq24AYUl zI)v5l;VkVu8kZfYEuG1RiS(=*!E60d>+MVAZ%1|W5^oCNXZB}jbLgDdVtXCQQ~uQD z)3Cu^tltwVVjXPMnlFE06}a*|wN#OMW)Y1-9mNe)35|uh)L>QEracp9O0g^5t~+?E z1(p9r&Uv-y%&x`8mF(nWB3n37tqoQ@1$(|gukSAXuRUZQ89o1S;=pBc$R6U^990|D zTveuO7`vm1XP(A$=CjYQ*!etAn!48#g3sJ!O$E^J5oQJ#3eKFgKg8S2h-0zb*BvN- zB;*qtl89FC@#V9+%b$*s4?buwOQc`)o%}orYU zo!o@Z-nJqd+{Vit=?+d{XM_0Mcv)-SV*wqxFYLm8o@EQ3y_Tn$Mns##o12pt)taO8 zQLGyee(OWlM}3G$n@&c#Mm_O{j$J8V+1SKo{62@y`~hOZZPvyM+;S4Vv~PUcZuYJ} z`Pdp9aFzbSKH|4MSbH};Ttl8{2VIuS{M2G%&Ob~@tIy1y(d+8Rr(|LK?!=+Lu+XPi zdUN`_XXz+B!7J>kMJ92AA0mIn+1OM0LMGb;YKZ>{&-k<&PURM0c>|>B;$77=w0Hlb z&Ne}b_a>jZh@YH>mAO;1O=V3Cc)FfM{#pD}0d`)*+uooK%)w`8fsy^FYwDOBio(*) z!hR?rp7?>y)g7^J#4vrVr!CR!f9yjY)-jOoW(`>MQj1F?-{o|yyX7I z)$&BSqrzKZhj#loc_~@B7gHTiL2%JBOR)5H6h75|a~-p(-`Uw;zw?AKJw^v|$T86F zJ)U6}b8xmGt^e_S->LG{TKF0CA|pY1PlyVBpbhn0U?g_yM3uW8)L=-w>cnJQKm6$_ zHs{J7s83S=Br6ZXr{>^cgXp`q=bzp2lBeXUQS8Nhyx|W#pq?6Ak4UnTi1H0w7)Iyi zJcw`#)xjW8hg`UXH9Ktuc}rvwRD2h-HT|op5o@* zj+}xjK=oOyU?%v@mikD2Pv&eYnG0CsBD$pKh_`B$>u3D#C~>Wf^Dt{V7HW;@>pZm< zXGSydgwI%LB3(#pCW{O>N9;iTltBbY;#}%5^}7=`GaDp(9`u<728;&X_v1bjHL5uY z7FZHl`}8OsxdB+0uEZ_+<_gYo zYFYig*u-S^#g6zh3yXLG0_;gl&>P+a}R3_2A5JFq2YhY{?Wj8Bj)N%!in1Og|%^^X9FB9(m>?eX>(bhb3|o>zSqc#F?i^pQpFjhqIZ^Ofw9kN4y2! z+eUxw7`F6DwC8(1v7bZevBohOe2lte6S&uzH9A9`y_z?e&z>n|b2tTBPn=M4QgjyV zkxTs?E_~oSMl&(w&fJ?szvT-^wo&?d{2P^G$)(}#cp%9!803MMaQ z3aJZ-{s^|Qo-^D2{MQhk`ZfqC2>)DxPxS=>ULZP7;lyD(xh{qHRmimVY*u|J{hJz| zI-J^~8{L9js*QU@^?5{y5@MOT)S45bo*?%7WQmPDXAO3dCCaHa2D9g3;EfI7oN#8$ zPT*bY`$Tpz19OXLCu!E#f$OM z7IYdqfy=ac+Ktp2)tsG=#-7#Za?6OneOT$%yw?Hh$5kNvo;-gh{=A<0wU=~`=&*%8 zrKie-6JuZ1U{$ zBGsBdovZV#*+)*#bg30GS;67pmj8Il%?;{VF z>nq-Ia`=#^Kg5aDAf~Uv!DUYJB5-je*06xeDV4t5IgoJ|?DiGjZ;b`5=e<^e>?2q; zIbD={Ai#Q2eK(5w7FTVuZWNJT;4DjhvqT-8=m_G`1?qrw%DoA%#3^`*`nKnj)OvU6T-+n>+-BYP6M^2+e{tsx zQn+ESo*Cd9#O)*Gk$0dBZLFjY``(3jULgML{2lYYV7t!X!zkkLM=W9uC!W8kh~B_o z^ia8jGR9C>*I?tf`Pu}A2_zoQ;w0@f`}K_qtPS>CBWnj9vY>Oh5+oD>#x20dcJp)} zIFZ*NQe2>8{Fv_eHge2u`s6{(E$-zhJ>;4EX+Ers@x<@mtfLkvt2H_EH7BbAxNRIZ z@)n%Cn7&UDUE!Pfz8s(KNQ_S51h-MOi*CyRB3~#q#{lf$t{4n5SVv#nmiXP4+~PrQ z4&^NS7&|_iK0`FQC5T*pk1ky`rwk@U-bugT$As)r!TIV$@}NJFI+Qi8r1L$R3j7sk zZT@l(V&4g&JssHxzj4wveyS2Qzn$FKk?(lLKB#XLzXS>>pcj*a?<8T5&R~>Zod0Oj zEnSYKoWipAbLZF?o+TVR8NyFvg7!=1k>&pVe%|*|Z&#oag9y zUj!x9kPrW&Z*iY@QxO3>fKK)JCNS6~&ZrlHneKxYVwu?Tf@Ri*tGy+L^f77hzh*{B=(3C zJ8kL;^~u&AP|*gWj27oXw$cOM#SQ+;H1_xewRubMYFAkv`@M?qvnS^c!gtl@mxJgM zs80x{@;obfdRHpRE?|9i=cF%jB36(y={|s)4L%be#?y)Yn~a=F6w&2xEg|kbAU4ME z&$)DHPZF>95g9k}2CkqBIXiy7w zR7P0$GqCVrBJnl$_&vR@Ih?6aCl7TX6Mdu$xE!Re!8CgrQQ$t&#TQ&~n*NkGD?0;> zV~rG5LHI8CS)ssXuQ3)#_t<;w{MxYq278D)50k_geO_J#S+SR{n$aSCHe+ zQPCJPqf<-;lEjI-F?T;pFb4Z^dq4#BT>%lekguoAk}qI3ek(UcH>f%jr?tV*%ZP&- z)aZ@utPeA$tH5OkiLuk<*X1$XqhbyZ=mu=Q^H};rx}OOksu5JvtvL-=aB6Eu1+Sh; zVCI;tD}k)15p{3V<5Ch`)YDVz$w1jyPBxvOW>}RE9fbWp5cl%THmqtG z{Gwy5Tr3{h_&cYo@8SFb;yg!Yxgw^q9@EJpL)fKD#F}JLU-D$n))4QNbTh+cy|By` zWVcOJ7X4tMsBeC}Kuwy!++_qEnI%j$4FaVV&@oif8IW@d7ze%^gcsfee^$~@oJbdF zI=O5c=g;4%=!#Vi)Hz$2WBmg!n?^QXLWavGj%}v9t*#`Nka<_(9n~P$73{Ej>TEZ^ zQ|J6)!qwx8`V8JQ;Q;YjxRMLzwdZ;J5mQ$3rbmCDHnpNJmC5SgB&rqA&wEeI)Fqyj(xLE!xj2y%M_b`B zp618uZY5g6dciIRaYFD6zli|9d?!-A=99035vK4)lgVV>SjS%0z9Zc=ANWs7l|5El z#IDxMLW_N2$b?bIGgAx?I8ne(8-1!09G5rYCF(F zmT_upPQEB&UQo^N{Xwp9V%O@3WW`v=QzC;kwkD%{a1u*ALnNNV*Z+k2-`yMwZgjjx z+!%x(s!uSFfR1Nyelmxs@{XU?1}yLZFFJ;Yu42VTks;Onm$P)cFR_vXm@!*I#1G;V zwfQ>_*o7!g$g=TgFFehP8bpuDS0AAejMtTJ&_CEpM|uxGn7BN~yG>zIWEx$p#i|>s zS9H85G09xa9Q;JqT!WZBm6a`Gf@Ce|uRkY>+xc~ZpKnSG`-2mLQ`A43K+Yk=2Tl4_ zt@)ja@PVgYz&;=QKTT%=7DfBD@tv7nmhKQlP*D*BySux)ySuyJ*Y57uPV8>%4lE2r z0YNFH*_fIA|Az1SE-n>`-I?b(=RWtj&w0-9^AlO=^|@V<|lN0&BC0 zh;9KP7GV)>h!%bDr;+SVKkR=A%ws6=pd4(}oBYHRoP0){@Fdgwf%p58|4oJ0e*rb$ zgB!QVQf$<$wI_`J!*6Tn*#@ADjNvy__^B5)%r3;xQ{d@xzB`q7Y74h_ChlFKX0iqL zTL~OcsB)!}3ywhfc?5clC{TW4QH!$RcV1)!!(jn?z;0L8)|-rcE3Z_G94Z|~k!P#H zKDnR)3?^s4OP<4hxpAVW1l+b8n(!`m+@F5Nj^vdcKukAQ@g1?|5KL23hc@F`wXl?bvG7b? z9W2}#4Q>ffnOlY){s+Bq8-ARCJvD=sIHA{QEN%iQvkOI{9KF6Fsz9ej6%>nD`Vn4( zwd=r*-DG?xVQ_be^H)$=7r;A4!;{Xz{}?gKj_CS#M7eCPYKsKTzXQSHG!&YCxB%N*Bcm~=;R?HRb_)T6Mtpm0!i+&yNT!<{mHg-sp$*^ zJ1b$2tI-+~u-wn^H!W)|jJ1mJT7P(FA=K^$;Ltm?m+yGne%@mqKDiNQvKs~wf!%CD zA#KXegk!t4@WbwWUYfe=XH=IiSn_lBc_KXubBHT$bgEfYd%o((RlFc7I+2_F12T^y zw|av&ejvW~;|1B|G}d|+w(Cb+bAfG6qc-A4o_q!# z8O|x}@l>7{p|g}D7jeTfj*wkcBTw>&Pik7)F!E=K-D`&Z^(NQp4aR1oJ{nM1tp(k* zF(5)i5bik9Uh}U&e!eKHG!>uy0Xutw>g|hNgo>Fc+vAAgk=)&UcHd9teIzQR zgEXu0=w-0HiTL(ltYii1(s1H$7^>?sy4zxvnposb6o4?kdy4$im%W&SFMKA3rJ+uj zqQat0D%4?_D|xq_WR4?HFIdr4Jr zDLTI^Ct{MxWm~W!#ZY_F=!UAv-qay0`N0*(64NwRR{{(!h5rs`t>zGy?h~a{n8RW` zcpcqfZt$`vJgrzmI2(9kMOpcRVwcD1gjC z<3Anw@Dg6aq;_DNO}XxR><2TU(D6S}TUvyMk;Q6-q5o-mV{@=H9;`V;j$5Boj_+Yt zaXitxs#u592aD)C*@$>luQ`-yEcS zV8Uy5?jCE|ju?85J6(Y9XqEO0uqK5QxheE59_LDzkT*neU8h*(1Mu;cL^JKr3Hlgz zu+mdtcHij?8At5sf%O(R8oH*rr(*XVP}p_0 zn{?s4Ag`^-H64YUP9-1x11dcRmmcBoyV&busHelp-&Pc4OBeC7G2|3R`f!S}-d0d9 z8NFr~8S7K-Yaz_IC)q?T5PTeb>K^{o1UnzX|AxVgYoH*M<9p0!A)i!F#j;RN$*K(%Qt`H2;!@h0mt94qNf z)VM=t+K5%k(zW7>2a;nnfyp-_ORfV$a1*p%mbG|d1h}hpS1cj2-6KcJ;*Q7B4N;vt z(USv~Bf96%IrW3x4&^NVQ8d2AWLP<5*IFEVt5zp7&OmQ00u#7FjJ?R~twPPM1a>9y z)1z63j^sqa^c@;(W#Ob{VHFl#H~eD_vFjjg{yOYE2YW5SJ7%MU{NjG>u?Gw4!Ax|j z6#9w#a1Vu8jf=c?IP6j-F18}KD+}@l!P6$PAJJIq4miIfJvawg?T!UigdtpSE)jP% zrv};+joX4TKY7|-HvKjyl=bvLDx8k52di&I9(53Pt0^4j3u@H>6uba-emvK?o-A%D zABRBId@AZQK)jDw;SNxs9=ZA;vZBiT-ZLWh57^5Q*k2sjbroM)2hYC9-4+%iIHA9r zJ>G#;tYTH1;5fyxmi=S}h49Hf#N8$ZoTL><{hHcgS@_%&5VRBd;Z&GfI93ycPTdNA zG>MqomNhNP*Xv`=HRx;E%v~-)DJqO@O~e+LvHlCWl7{Tc175EVxl()Z+Qh25P}QBl zeYM6WLQr0;x+*9aAyhfu!dS<^>l<-3T|ieeUObgtL#2B%7R&2E6x&Cy#1n8Jhu>;V zJ~0i<@4)Xa=N>$n3KL3&=?y(0GEAlz_FjbO?99KTu+m1@ZVxh)|M0>Dn>!egNR==h zt30Lqs_RVEsFnB+{O};^Nq@1fn1;?;jcDRRg|QZ$c#l~15Nd;8$PnkF!|Z0=wW(P{ zV7_mN)SW=Deb~%Az)1Y=4(i5Pw8|=EKt6P>EkSoTvfHCs!OgIZXRM(fm0&jS5ex^{aLyZV)9TZO zVIpTZQ(Bc+S(ttNObm?V&pUj163lNlYEEH%?>T%X4G+4>9>n4CK0+vncbpt?J6ZN2 zG#Jjzkjveo!Wdp~;;1BkoI?ch!>%iXE)K#M82(r=yAaV>Q+(Iqg?*{bo}_pD5gO!Y z;+)PFheH38uAK2i+u}s;C2+Cn;KfUR=P(hz7?JEgo~(6mG%CQ{=5&AT!8dpUD7NK8 zw7E?jnU4R2;M>cHiaqdv4%Ae#@VF*0&&S}^WMcMwB5f#LZcpKV>990EP^lHHZ65E~ z3GA?=v-mi_{~sQ>2t>{Y(T>7HCcq*V5tBdRNwHjYDKfPt=!#2;XYXJYj;v-cULl;T zkA>%I{U;NNMpL=})9|6L=wgjfuOfsZSVKB`uN7Pk#V_3G{CR}kzM@CwI#JUBtM%k6 zF5pX_v9$B-)@JT$Eo<=stelPA?dFxIp;ZKuv-^pCl>5E#L~aI6>DUNj#M`CmUcNk?i? z-SF>xasln6(^~pd2nLmc;0>3#a~r%agt*cZwwHkYMDml>h*+Wg<|tU~ zZY<`F&OrQWicL9#I|qrAxA=PlD^efioPxU84Hi8Fgh(KZ8%O<_$pb{nE+_+Gi^yARicuN@T3g%& z*ND_}*?AZCpgBE3nl1jvD+Uo=LTo=_o2S6S->UYE>nQRif6#Xd3@r*??+hk~&>>VC zyjTNg97UcQ3Q|ABLR(YCH~>CRz&pc0;Zn?0y-e;ipDQVb0(b(Zr*(+!rY`UqO-#?( z##ivZX+*1&tXy??+iHIQKR9lA@Uv<`o;njRDMVgjMBAB2o>7IUa}MSq;&;qIpc2~v zj<01mR($ngLC&gmW&8pIvaq18plmySrbfZaS0Jl2@be~e4V8P-p3gfGo45ng4J+{R zCdA*}u$xmv`vI`j$Ed-c@;3eEiPt8PA>OaFe6S1rB@KvoJI2n~Fke_w~i!)irn|w`&CjS~_2!(k(0gX*` zoew9w(9v`CRXNZ3`X`DVw($`?XawtOAj@e3qrV7xxS%6Op|*S@(PCl*>{XU^h%*0|H=u#>Q57lDkGpf*W)S~k_ zH`I-%HFra=a1rgqC)}wERy~BCvYU9nKp%MkS@K`S2eq|I0e9=bd8=`}YdrauE4Z+h zclJjwOCtgnXV-SIM~}Jl-7puegD)OrdjZ?}hm2z@zWvtbg57=q#a@Hl4cTjVvWu(W zNM-KMpK59ba|p`d9aq8VT0}TIux&V3UYn18cy2fqw+*O5y}|z~M3YhltTl!RbC`P3 zCY1Ux+Xt@EmnS$+$7l3J#pPnXZ6K06glS*=`D-je6mCGXovt!y!T`i0tu zxX=yU2_x>^q7Hk8)$2h7)^WmWmpFs9YA1H(^n$1Oj8$ICtg7FjLo;CyZ2cgS>?&Be z6}0rVMNr+kiNY|5r%10>F7Zz7=$Ly%6mZ6irVtN<>E8N+pPR|1TVdCYiA^D7V&!1h zapV(!3bN-9Xy9*%M1FKlzC^Fls2k=v^|J<2=qoMTi-A99|N#=K*EZatx z#yc{f|A^B=K%=ch_g(mnHsj(rURaxax&)fZZ!$W2w2n4J{S~MQMzEwIIm=Ym+eL^4 z4L@PSF2YT6q66e8ufg+Wm3dLZv=M^vK~oTbOzbb zW3tboWM%V+7q`i=f{EQe;Cwu4MlRO#2J83)Ptv|#jCy|y(6t-y(+~_R&pFE|EX{#y z?!}e11UpK@9<+X?(IAN@Rj3tM*a=uzJHFC|?`mC{4~dNytR;j@e;l=`?I?Z^>B8H@ z%(Ib9Feybx^f>I{x6%uwI166fhNaqw988&mvGwLEf>Ef7b4|_Asx-y9BJX(;#25h1 z)+YBXNnW%B?AS*4QbW+ABYh*gsG4u1(zF|1dKHFm6t=>iD#ObHxtCg?-!O9dr$p;G zc;6L1Zjz_GN8ftQ$78H*pnd@9sxjB5CeH=$ z&ODNhD43hbrcNt&IS*GzZ4L6>hdnO^bA4H>(yZEY5WY1r>@C`qi7BZi3qHy*&#o1J zd_^OZU#ieA6f8>OPjuQBpu{&a#0A8|I>g~;M9WZKsSB&@43E+{)Gd6wjm=DN%0V)` z2)ar+14GR_iSE(*#Jc5VE*|{+QJpV2TO`O6Al5^XaTK=^M_c1vEm)r?SXW07CG%{^YldO<+8K>q z)U&tJUGWC?JQ>dP7h66JN^XOhhQlWI!O`lYKxi3VZPxY_$ZFv1pt>KUbJd zHKsc2e~q8hdK9{%TW8TvQG#CYs;qrWD%DMyBw0ng57vc|bNay4PGDf4r zn~LRqYNuKrUIO;Cjo;M}A&V0x?NP*b@y=WDm3{Dw=ft;iLU9y97d~w4ZW}6z9f;!j z+(~!T*RS|(Jv4ud>c_v#P(}?7qRS-Pm&u4u{QCm6!j4!tg71caVqJ+RA*Zn3fNV0<2{@qIDl2X!;5#qIy2So z@HbD;ZyK7!F!1a?QPG35#xF!qskBr?3YPN4cj7*Aw%A^jsb|lp6XzBl)}M?r2Cb?G z_Y(_;ISUV~#wm_H_*Wff1-1pP(&=gxP=ktcI(7;_@rU!%;q=tyFcW(qymSM$nobRW z57~Wbe54xQQi6%0MX9gc24#EW*Y7!}*GPN_K1G76_2C`biR%qy=N7njCo1E9WWb54 zJ-+Y_EAK~~TLq7NjXj9u%_HeJ?+P|70NJCs-j-lOXY!#@M9XK~`!;g2b+E=g?6Di& zDRr4|){`^lMbIwZ$`SHaCYnB!pUI!(qOgjIbXnA;nzTTf2Oi7HRFLBb5x+5cl7Sqi z4c43t_ne0!^@GzWZsJ|4zZ1ze&XH%#;A&P;(|XStJgeX?b`+n9{!$~>*(4^ST@PXf z(%~e%>Fe)CG*YSOlmKa~No5f&6%~=5-PC)!b(K=JmQ&$6zct!59jShw;^o!l0 zuKSA_6_@EmpRe?1=0r861YM}1)O`2TX)16kuXI7b?_ISG7FZ3heS!B~z`u`!NwF~C z=VWA)!7?q2oJ%%5g7tf>D@k3iGr910tfeUV)EDY^=jhM6Om)tgsM4D`Koi6k%&~J4 z74CQ>D0h+=SCKvQ=1loNWEu1DswN+=k=v3G89z0&Ms;-m3;7XGx|bNdNxyulTE2? zPb6D#A{s<9#s0dkF?+L_YpV&5m*HAp$V3jJ12qEO?dZPPOVnj?W!>dlz`GFdC3+-q;t1<*U(I!BwAd(IS zS+;=8@2Eg%(~64F*E_yoJu9PFYD)eH*0(Bo-PBZ^N0=+MKL! zrw+to7NUEd1@*=k@b^MkS{SFpUct?`k~4neE{tf8`{-hcq0{`AkVej7V;0d~x&TJw zmDRul16^1Dp#-hrYA(WV%;ezXQJuFFZ(f1U?YM85*L()bo!|=F5X~dWkwU1~E~oqb z1X)fv41PGuNF0@d%UJeo^6M5TLRuexSNK2{Eb{MiCAtd=5T3V~usN z(K)==E^H-P=T09)9@(=t6?`(ba~+(KxJu2Uth%!7RTOh=qrmFru=_A#Om$-NU$VWE zaCM$KPh6cw9ViNn+z8joQno5Rltz5iQUa8AU|TF%Q#pF^_JVyT)tmV3O4R6q;!_PA z`34@lfKIc?=NDon7EXV!1Ys*6iE(xm{l5vKAa5})*w$7@e~hG{|9%Q z0MGH%T_d|+QV_wXgKI0ehiA{EqqXTXiLb3`%3+j3NL zK9jqNXk6pDqc3!HSK(E(iSRqH_6Fz#6UgAY!*3Q-qnt~J(;_mv`rKP95O^3o&<*}v z7A7%=`@9HVccPX(52a}y`#1q?n1n~2V(qol6(zXCh9F%EHuIdXC8H~}#SdD7g8_VW zArmbGGk--Ee3zbJCn|sY=wQ1-B;GC#r!HGw%p!*?$)u8tXics}t2@Npm8=0%JYZF0 zVJA1ZyIt(z0l3N)7}Z?X(vho+hmET2*(CgJH}CX}9H|y;)<@8oY)M}07Phzxh3q#T zWXE*Ai!k%6bU;_6zikgp&W0}U&#DE3Csr7cKk+4^Kudmv{c6#_IQ|~TJ>s0Y0Wpl{eB|nM^kJ z1*W2%(fCPhF9G-XLPlPg)dUX9S!;(Ys!4TA|DpQE}9#>k`>-!2UuD zw1mz?vCXXEQ}}5Nv0^J|WKW)X9;_D7gjz98X&>6j+JfGu>Vh__xilK=O^~)RDn=!^ zM-4KVio}UJ*wZduEh;)LZ-Dv*hLVCw;%4g0WVrj%&kfv&N=L64q3u!_~mf4^jXBZL@d;XwiwFm z)Ms6_=OTQfva1k5*Wk<9%(#mIA6vtd4%7ED5j+VKQn`;gtbR1FH-y?&Y3f}?u<^@u zp!`Gqpbr^NHumv}N(;|&WWSq%jas)xYuLx{DtI6ZKgxbcvwMr-l3J9LbXgpm}+>wkmSe_78f z}`(Ye3zM+cS$v-dSwp;86WOPjY>oS&IU z*V07vKr@vRCo-vIdUM0!JVvn+&#TNJU+z-y^CmQ=3&fHv5Woh{$wWD;!EB;b`HsAv z6U0N9QFKIpAji^OJPwV^fv%C>bOraerPv*20FMGs^@_ZBuZPNZobhqFOU~L9{IDZ}$HMrwp&j^8Sr>>R9`H#&xLrrQurI#zfvAy$ zWr<9jnFR~B=Z;RmHODgNWeeAT7jLT$hnUDUAH^!45@8k)`)6Q>8e1*}#*V_DE)&Ui zf{4BOS_#&D0hV_Qtb32WIC7OOu>WRoMNPBVK$PiC#on81-Ux?03I8cY6?!P_tuA?L z0{3u>9V$)q-o;6xzTy=0n?hnTyhNM%wyYr9jV6la!_(i8A+IIV+sIlUW(DVA;U8d+ zwO|<);PQTS@M~QY%?nT=nY|be%X>|a=>&48d1!Gf$ek{bpA=%JBFS6Jg5o>jM_Rwj zRTPS5T-yxJGm4@+b?M${*V$-Ff0?kbRs6sl_K)IHo>kR@DQU+zWwi@Fb(ktnKdx0z zMdUQ-mq^t#2E{&{{rrp;why~gsF7U<9j9R>e1(|Bk-ETI)jY!_5}?WEQTzW0$z-!1B{@9CK>2J*MYllGyE z=CcpKu&oHHH$PeTV1Dlhb18>n9a=~2QaT&nxT!$6yNJXv_ENK97Z5OyYlX!6i6{I$+KZz=HNupAJ?$H|y-UVO($|_!< zlX5$`PI=Hko1$stGhg(Do!GQC{do_(P^5dKCY)|0UgiWxbi~twi1f~A2Sxb)D*R|N z@82Gdb{HGe&U@YjrHWx`LqNc#?A%!a$%J-+9j*1)}TbrtB*-VX1{$o^`ww2?N-s-E?(izgX;1j% zEwtdB;AIx~*&N22LI(MVEGmoFXuI%_9Uy;0NR&~3Y-imk%q*N_oEU|sX%U$QfG#@@;X)Zt`h zAndjuk+?ipv7FtBL5DD-fs~{Na~#TKTi#*P_3&II@--COiauO_B$0R#UmeU&tVHp;f^yghtG!GvtL1khx!+{GTY*h# zC#`OPxLe@oC(ty$Q*{bf4#6znTlZO)S`S&nPGD^PXzCFAHt@5V-UF&ZtRK6g1Bp1X!@ZUUUr ziq>%h&Txc}yJ&1*K-H7%_du?^(>RM&Rw6!MqC%Tb9IL?o7p5Y64ldMz)!oV}7be4K2WluF$ijlm^cK62%X4jZ zQQg^s5|GZcl>lu113%wXu&38!2)#T9LG$Tgx|yz(S5#@!z>iSye+8M`e!Tw&jCUcm z$kyaZV^CL*5essuBvgcHtivNTwbGHQ@G^3hRm6}R)Hw{~%gmRBFZ@E=`hjA736$u^ z%2Z+X%fm;tRhojuEkI2i2-~p1T*E-^5Ul3}7FPxRZ3pWc0>e!QiLP=jt*EPB<7%1{ zwGxQ4PRetpOGe4@On_+wce{WV){NXJN$tp*U7}(i4U0I-eQe{(*6}eDlwXb~YrNwp z)r_K8LO6(ij&84?#Hmy4wjYe4BdqKg73N0NGxVGnEsQPKr3X42-~5GjH>6kKDg8e~ z`2D9uDs8UnJbDbYb1uDEyB{b%3z>W{i$2`?bX&y{4=Qr1=L(gLIe6j_totGB91cHf z4~uBT?;I?6(w94Dn${9suTU$L$)3HbQT^ZCoM1WzjuVxFh(A@~<7Ggt+f<>S!ayC^ z;rZA=F14<8aL_+wMaRI3iCp0*eli!F=}Uati;kpC=dXze+p%Xq=y$8YYZ=iw|D(RF zw;kq`!5b{`Av4j3vDZ!@>@%#dBYCxUPGcIc`xML$r@peE_nLrbFXA+bHkqNJ5XYMI zgH3K=zaQ~)>rp$s>G=0zjSqlw*YT&HFtfqrb@f@pqp-HJyykr@;08S69ZbTF+DLs6 zq8_@omAvU4b(7&-$FEkh8}-3rZTdwB$kdsv-Aq0;l6Q;e^$xImFJNyxYf7zwO`T#D(ui--M28A^ zU^%)P53stgU{l%?_?)=zo?PcWx;0dF1Z+E+Dpmx1Xf>Xcg*_f2(}?2)T@Jo=h40?Q z=V#)VQK&D~u(XTp;zQW&AXX`ovvpOer39h>uNSXz+Hxkx4?c4rA2(4oZbfwFDG$7ZKUwfw?qL>;?+!?IlnU=jy1ZlX%hPl@ z`k(-nh9jN>&+ntgX-`NP!5Zx#kJjqmU9iKkaM!``nZ3m8m0bOJKGt(Kay~X)mR;F_ z#@-V|tj5p3ffGis>MHBq3A9^{s<{m>uz=ZX$@;#r3y0CME5b!hAlo>yzHnkg5<1&8 zEc*f3=w0PG)uu`G2Wiu^2cpHUfXAn*?Rn(~*lPmH**n-?2d+q)*g1`V&%?THIy-P+ z7N_sMczVJ+e03S>NeIfC8#SgW*y}QIxh~PY6Lp*jREcORHd>!e6BzVK7<(!fFb}q0 z5$+QRJNrTGT8)mg4L-4(6*~^XO+#b1!EUno$X6P{GH#$HYtPv1O}_S?v(o**8m+I~ zlMMVC-anP>K6+Y1wp&P9?zFv-Z?#bsp$l2}}OqkHae-h|Os3hcoCI4dY z+UYSJnfwoGTIWFV+vw!4vA`>=fD5~&%@Vpu4&lmKKu0*g8w#P;YtVzKmVwMx&s2_} z{M1(}p>PkUZ(AhSUrk1uNlz8e|FR8Wx1&JS^PrcGwb=v5-a{;H2Mn^ zKC8JAb^8XHhjw=PGkZIRT3>{&FtMXOKd(*L&&DI>vB&A`(;+yHHlOeU9&{T|dI}Ct zVm*q}@mP(#*@XSy1oOPJjt*@kiuP8Fm9)-sJ~d5EuFM8~Md6-2U5#jxq?++Q3xyb=ZOg6J#N zE0|{OF4dNrNZ!(K^yL@gWS$1?$&&_m^8}8a=y8(xg^J!^A%~j(Tl86jFb?dFC*rvi zHO8T$UjS{UkXu^GeLj#|wBUacsKQ?Ol^6PDOAxOHd-9B&<09H|N%DgdRBlcD#Axt% zK2_6bdd#&sP_giaWO|7&lkFzxd{KbvlIv)F=MBl8+u&`x;WILl+ z_+gkv2XL_qIonO*$0xMSRv@KTN8lL<@SG3o0;Wj*CQENXyeJPBo{wg=6IE~#%-BV# z!e>W0PadUc6Ywe$Wm<7(LG0fEuJbr~d^NDd9!wagITIxp^R;84dP4Uyq>H=t1Ab5-_7L8iqErKMmfu9nbq0RuGFe zISzID0`<7>tYrk(9tE=RBRBsES9=b^>!{ORg)5h$zik@6C)iZh{03Ivk8AIRKmJ6+ zwi3zxVRP%ziq=pc(5k`9xSvt1iPoVzkbLO}HgBXGsBb}a+yMfMt zup+U?3{FgDu$E(~zH~wPd=LAmMb=*mWm9{i#J}9DRx5l!PS^{}c+TAy1tmMNgHBjP z4tH;a!&|6Ab!P{L5(!Np-xgS6d8Q_A;XL|w@{Ck^iOYh5TDL(FqU2*D#%VPB7uaqu zbhqXpR4i;vq*mA;-msSI8$&gvJbCVB@OCWuRdI6aZ{+3Uv7~>{gEpcf?m?rEBtC4w z(!Y~mD=?sj%-!5Tw$(!1#`78?@V@QnRes_Hknlb0wux7pgvR2*bk=%!Pdpz{MKKqpxg*DW4$% zKZc<+LZi@A!CcSpRitBbFYn;Ty}MCk8_ga6qBd3rO#TNiGwE88El!5FY1QT|odIuj zrA9VNC?;gV>_4&JDP)Huu%Pp*u$qQ(7UC=94C>Z?1uSQM9)7*R#1;A-eovP zdl@Zs6&ROVFpnmiT8TXs-l^cnKBjYNJx|TqUG4nBW*F5d?(7-}JB#j?O<-;`df@@~ zaT(UwA4X~WIgOu?J~U4Y@N04oj`tW+^7NRQA0 ze}e=@^x+ZM-hcR442)B&#+hNO+H>=YfSa3$Tc<%Z?J3A5QGEl^0y?3k7bivwoJ@Ys z_a?#FCXvN`!CM!w2W|PYoG5o1hGikf&E=X(QNf62WkWsbuL5r~?E@HKj^YFRF#@ zu!nq}uy>qH^CO%il#^krVfBU47gnMJ+mcZp)VAn3%fY^ za&0o_$J93#p>*%S(^jEMOvnFsz@Q?y`-5=J)>IYmah00B+Me2c8@Rz%n9Ll|Y6?}Q z8mxI`^wT$>>}P6khuG-=7{?YOf;L5Y1uW1FKKTr#rUR2gE=09FbfiKhpaoows_i%6bsLbUNDYSFjUD?Y6y2ji`JJjB1R1Cb) zvrKUHvveTTr_bOc_AQ~8Xw1GgtGyRqdJ@#Qi=I`5epe&iDJQXoqRg2bf+m{{6DW>i z8ci&?MYeYg|JlmEp8-?apjfu&fBX5f08X(QHn|6^T}l5;Ir8fRblIFB+unj&^@jUe z0k0eazD5x3dw?27SXw7md>MV0aXc?@GaX+>DF&bO5x-KW(7MBF!H;_qC1X*&dlJJ; zMBV^+(+k%51o*w5T>L%ndJ|UeNUW*MZg>@Ro4-U6DawlUhj%SuuMdG9d7wloEdL8y z!cJoFXHeucd0H5{Lk0NMacuJ{zMn<>c|?Zv8B7VsPp%P@JHyS(@~0lT$sts=TPRrp2`r&BhFRXjA|CQAy|;9%C33p>jgrZ> z?*bx!1kYo5BK{U7$;MM>qUph0U7)MRqC)KC#~IMKE%~M&)ekT1Kzo8+2T=PDR=1md)aHt|<Pw+qxy?!w+y(;w8l z;5=9jJZli>ae`}k#rwt5wJ?@kOJ=>jQ2)kJ&#}7}apimv)UC*#Xhro`W z@hlro}{yeZHfjjO`-jxrh=!w4slk23gR)xT`IBc^89B?Z3^%Dh;XP$%c zW-{y)^1{AkfR(rxBN<9Dw5kY@qY^kY9Ui!s2vLvJzA@l z=UvwYag}|@?!!Q?4_Isfoa`(SdOT~Ch6eVWOlkzt;2rtG9Q4T!Xj3cUfa8d|6WQBS z1-bb(B7mNCpM-CeBZ?emS4yJWY14ks!pSDssxSxQFUaQ#mbkzk788M1;c=y@{=Y?; zUqrTbftAh&H^Z^os=U88DdjFG=gO69=eFF4ayEEIYhuI6V7ti$ z9h;TtfsEujKH~R#(Nvn#@ADSBvVwe*u%S9wjdo&WCmH5N*yMS#%0uXHD~Jl(b68&y z?QSXl@P_Ne5_|HaKzLUdu%Rbf!*L?mJFYtvE0(dX@#GT$L|5kK!G@QylNaGa&sj@t zR{1#AB0*P}6Nx6i>OsEQ8)a=axxo(hXa$^XChE(5YU=R7NJ#fhe1-5gPUAs!{ zWh5w>LhVJs{;#m-yWli=LS3qH7kHk?TX8Y$EDh{kjWskAY(yVlGUFh4v6hoICL-IT z;w;84DhOtx!x!G?A=dE?ru_%DRGb)+$Th?j%!+*r8eD-BPiMs%6ZcA@+-lE|(@3(`aHcv3Ccl<-+&~5~ zTzn&zklIVlrDjq~$yfT1^BHDrArUrQ8b9y{zgFNQ4~W}~1%GiC+2(F^+QD#)qwxOS zXvEi9p&mrxujmY;SnDDn!4WbUZ6fSb(9xMw+$&i13GD7BdLTA48&j(?-lLoRjC_&G zR%=ePZsiGJm5Fj!*p)5BI)(aCXKE(#JQHpdo%kKaELQG1-Fg>5Bu6z!QRUX0I%|ts z_e9yK?zWxLofEo>vEp{ArhbP0jQ*^CnEt&qLQ17NgW^fHsRR6oEoC7f<%*dZ*Hb7R%G5-N*w2iKh*vJJJ^EDyLRT3Db1rY|+YVtFz^v z>3e>k{M5W@c?IsndfYZG;gz}D@BAK`a<@%9Z$MsyRY_I>ix*e&9j-i(Y3eZ zY(srrx+N)hb*7R&I<;d;YD(Yq-kI6Cr7aVbN4j!`=?-t45}jnnyAG25A;S@Irfrmb z(0n`JCI4Pt!@SSAfq7%{tD3i2PFl+y8C??S&_ zexZJXZw0R@uEXsA2m`F~IkW#prtVGt?{}}{1!)->g>s$E-xXOB9gn-Lb8YI{(zU2- zCFk+>FC|Xd%I=n*dC#-EX6iEJzeh6GWm>Y@!QyUs^|u*eXXPOBeUOSwEN?e7M;>GwRw8!j6K;EOqbLNcD-E6dzSE> z9*|qa7<{V8+kklA#vV%@1^o-9asIuG$kd_9=abV?m;RZYnU?$0x=t)_@8{gfP4evL zwcIPt)7j&?%L9j!b}z)Ys*`1RZcbKa=7!9L8O{D~|NC#|((KW>fu_mU2C9edyD(2* z%jo3r+2M?1Q|H|-g7tGf3fueRn`Q6?yneP2(x!|ig8-xSjOJV&LRct`dcD%CS(}XFC?!@TADcN*OZjE>CLkA=F`Fq zhn=oPy(;-91)VG=6nkFeaiQ@ZUyMi9E%~Q2V^Y5+CH*Y?>q%-_#=_hdmQZ1h!(q3n z9xvVNx`((oa=Ybx(!QZSK;X2v>Z2rBwwZGBP39WPa!#n;*G;BAG?AxKWH5CsU6>&agzG?j=)#<%4y!yhIsDmN7-0FK5X)Hb;F!`>oEM-F-Yy zc@Fh{Ql?+3i z)4Zn^jxACn$hGME;_k&K7T)TSXZ)s)%-xc@EAG^%BJa+>9scQQ{N(g&xp9hWzuWiU z;(4K8O63&qAM~NnOpkBI-}2?G?rB|9hoq42c*Y6WQgMXa<7WBP+{M?_1ld@A9 zXB^L&Y#uB0cdqX{DsV~BS|t~SxrI$FmgPOjc+0Z-@7$zC-`{?I@L|da>1#w{i_B1K zC%wSq*rt`R4^1iVQe?G%1;1w=wd~WBdwFg-VL30e6EnuAjY}?@BB!6t6tm?lV{WYV zw%r#G)xTv>=isTquZqSNy%zA^b&|dojBci_v8hyM&p)sJ1pNt0bx2;AJRxOx%8r!H zX)pdx$~uy}N@?uWAfR{2k7dVHoKmH0#SBEkw96ouzRtDng4O*d=e(EMgEAhZCMC5<2#=5Wxh!>WZYSHn zc3oT|eEtn8QgU!9$Kp|ervu&=8s+I?|5zPpDPnGxcP*n*dhLve{Ll0g*jsOzZsz<< z-LET0>_uKslH^Oz536L$Zd zVloKzoHqOH3iJrN86f(0@^0-HR9Fa@?dk6jBc4~M=5I``{r%R*L2oy|Y4^75mutU< z-R65x^nSJO57Ur0H?J9pTnM1 z>ry?i^heJ}f}80{M(vb{l$5-ij-88?2-B6{Q+iN5j~CVq zzwY6-x~H4WZO|tCu}_9BCg)!InxtkwuKrk>ye-GkazEFYQa|=@bV79Z*ovt$~a zgF``WtO;mc^^Xv$Rr$O6G-weqtRMVrb^BhA9mD78|ED-aazL`U+ z+a;HWwyl}v|J={~ma+f$j<`SZcao1}R4^YhHV9}Nb|5^pjIG#U?`L9APO*%Ax#i^n zw*69?(@6I~qnMcxz3t(mNbhHNzTL^)=+-&xW&QbW=C&Qxpk|3_LgTm#&niVq_fEaE z#Z*Y~%W0w9G$y&*yiD$^T`sx=xL@}C<-O78yGN$+KkJtqyZo=#tJW2!J?1#h;2p~? z^1JSj13yfOp??*1z4!gl%nAd_UI-H1zNtGh%cpnGnWby!u-&z?*B;Lf#!5M7f3$t; z`YQe7x#UW^H@9T#_NLvB$$ta$9%uDPYn{?Ly?xeW z^9bWlehmp`fgA3xZmGQHKj2Q{hfyEuOkqeV-mkQ#CjyPx0LXI zUae*0xLOItpBV>czD!!3o+J4AOfOuzP#dq&PGRQwZ;21qURx1q{kSH#gs-7$cJuu$ z0&CfqOm#0OZ%K;!xbuU1Lbu#CLV2-m-sbqAsH7LspX+3fcid7es!GecZnd74n-y?V znwPrhbDvKeQkLug^;zQM;9zfBn>gmZ?b+!Ut{-0~i1|UD6DzlFJ*d@`@KFwH(~R*$ zQuj#3 zvZB6|Gqc*q)p&8_R@*zh-roQFubXd~2^@y*7c$0uw%uZVW!-&KfaQjL?Lwpd1DtEf zcmMW_U-@qSi>B}X`SId+**`;-oI(>Sm#Vd<%vjHL%9M=U^s&~}Uj8As1AE$!%=7t>uF`i?&ly`4iK$?&xTRPJkI#-?4kyK0rUieO{Qmy^-KVlK+mq*I zyJz(K-9Ej9)G2@O+by$fv&DG?6&)0~((#gMK)T=0N}ogDe119eUDF>2 zvkN(W4*F8!M_^wsNAJBw9+pfm8C7JhYbA4Vg7o2jl=!~VhZi3%eX&n$n0Y}yZdbvj zwOdt>CH~_}2UHtfrDgC4scYuvjNX<{qN*RS#-z{xH1m1A*Joq;C6-T$O|D^1anqN0 zQSm`lYq_8PckD_@x4c}-?5y~(Sf0aeZfS;m^HZ4~fo8s%A+xHgdMOJolbZ^kUe+ zGW$#Di!}*}C|R+1;X>gK;dwLSO1$m#%I96+$2(C+p1pc@J^FoGU7?4&S3srU&Bbqr z9jX!7tY6cU6>Itw(yh#Ho9g$gdh*Wnuc@gCi@ptv+50W-M{G(P<+kU$kZ$1{t30Y4 zT&8uA%if#4R(ig%|DAv0*UgxPA18e%_w_)6HPP!w$Cy5eN6qhC#`y0DdKxk{Y(|;= zCGQ1aE!@QCp2Jq#c{$cPCCl&o(&uyUSAW*@v+ehm+^@35S|m3)%gr|4C$rS0GJ8ul zD!Q$(v(FIM2~Oi2ii)ixOY^`!onOWOed;zxsB zd~Q3W3Jv5k`R8(Cb4%qN$oP@;Hm>mRjGXOCjP0#-&iRDb2Cse|54@HZ8s(SdQc2QL zQIV`Ovflman4OY;Ht%@uNcpZY)#Zi5|n+7_9<=D)O#6GaDq z=d1QzY>7GgjD0zeb?z?i9$TFc8-wjPIQ4VdYWH4DMxRWy?#fNdJ#4LG8;+7!K}?eV zv1OYc>O>(5%(&yzyQiwEJsxDr!?Ncpe!eh{eCJRQx zbYnC7D)!O#n;ah4FE<{x@8kT@^}h3P;~ybL$*?|@4Z11%C}Ws?lp#{=Y&&HgZ#tGc zAZKrGl&Lq_z*y@jd5bzkC~c@=Ki=3w>ZltlEYx?^Z?|284HdGSuvVuobjsQ{e^K_y ztVTJGdCvJc`CrYs*7r(`uBc(M@wj1^@J9J;`D=KddXE#%uk^BP&QHrZl6mb9qvbb2-NVRT&u8SJ3c*)BwftI1{>n**%ooGDfaMQW0 z%LJ$A4(;qC$Qd?i{dcH0ztDnSpftW$i)xj8o)>6Va)cXp6CD>j`??0XJ$2dcu-MQ; zaulNFP5JGz+N1}i{7vcf*P8t$Z)SeKykog?{%|Ep@Rr=A-TF6n<&4$rY8fi(7mDR| zuN2kV$ucGXQ+BJYX*pL+Yvn!aP--9*#D)64b|dY*oPu5aT#MP}IJ@-9s@;S!WK&7dbpM-DJrR z&EKAPChvWInB|JHOqi`tGlbgpwVPo#+E7b+hdT9GxT?!ik8qm*mwa5FqLjD&hdQp> zt~2YQs_mH2P5;YK&hCn#w7!pUkclfrRi2h;eP(fx2UDj$Ew`6jDmQE^L}x>wah>s! zVX~AcO|ffkKh*x3@x7hV5G6(kqs7P4ZK`pUA!>}T#}-evw}?qwNb>1Ww&71W8k;bJNM1;aeM zJ$Cc$n%Z?XoRK`mS-MN=3HhdFl6i~iQU1yNQszq5Rq`vPrp`&aqhE>gGsfP{VTnVQ zL#+K-Lp^aE6|QZ}i`qy1qMPuSno=CkKJ=G%=|k+g+dVV%FU{^UGE zzRr}Ne>H!E>6y8i^{xC#?JD$>DxmSX8U(u=b{*_`>L*g6uOs#nW>bCn2bG|%^i?{l zuV%>7SJjV`N=eaDpka++h`xb1OqZ#SRZhz7l_0dp4)plND4W#^RH07kwDY+k!c*O5 zo(w$(uNz=kp|2oa5+ZHo)EIguHYl#jR_gdYmH$w)+o5oGM86(lyGDt>at(@qVI1wsIM$_<6O*ksj4(g&^om~Fz=!P&zi1BEz&6L zVFy?1rV69P*@B`TQ2te?(N#o`i`+xe<_c}3n_vy+$&MG= z7Ij;j=dg~Mh_%&xo1L&0dpak)kzB|khidg9sjn0*bZ3PlbytO<;yCd*^`53&YZ6mm zyNR#Fancj%oU}lyBW>V}_GB@eS+H|-ZPjtiSKNxVlqS+f3U#H{;xnp+wS{S%4f=+j ze^&4o8|aKY4|S;0)*5EHWzCb%Sj$_|EMdxEwZ1aQ+Di7*Ef-e`ZEbIP3e7roF1>TY&7#i7jT0NDm00~ES#Tb(u|WWM{cNFX7I2}5`1l()e$qLv-&zx zB=JQ@{V16F%{ALe)m2wpI7d}dSE<`{mDHz}TJk5`BF+Ka0q;uCgXBav zZ6ncSd!ekc*?1~ToYKJRZCxfWux-?hR+?C5SVL_NLUCJy#cXnyZ-XjRmBE(ta-8mr zFpTGm)KUj1Eo@A9ptk5pmF=f)ury4ZNbhA|ahZOb;i-NONL@yZ7FLUG408;n#I97J zwC8wN5r^s~DW2**D&+BMsBH^1?;Ls|+`01u>I=D+eAwnL6%!q8p=tx&5b2vx!SF2kNTRN$&s5alTm8SZ-U4Pe@Fa1Y< zrmwg{Dyx4i-n2DV+~vMX8)2aMKuI%OEkPL&%t} z+MZf}TQh9^q`~52rLSeU93fbxg<>9^3|(}Gg}35JVXWF-o*=ie?GOd&zOJgeN%qI? z@2i`wx#m9B?o?a;Qp>Banrxrxzx+fu$1Rw@9reTe;yKA**ePdNSK3wyH`GejBZsQ( zkp>tNrR!od@i_gkqv*A{WOJnxUQfN}k6^D$RVq=lN+9Rxs^(huTI53aF;GRf2LdX5tDUQY~AUVUS}&yr`H~}iZ#*FRvD}tB4!Hx z=+7A{7S=2JetK7_p+3j1m0fpn3O!u=Y&I%~Gq8f{@;Ujr(vr@hVx0S*End|xwHsl` z65hdzN(V@`tG9=*NpuzvGje>5#GLj=?FN#0ffWR?HmTcxk&~ zfPR{=5ysriHV&R+(`nD^qML#z!dIaSr;_xI2;Eka^rxhgVv22%JXS7a+dwzia&>{6 zZ~4!lMf`t{N}-4o@w+*7SA=IM{>KZ;d^|8aDd zVNq>;6hAr1Fm!iIDTsk$gD5KIHLzRk?(XjH?m)2-0|O8c0ZHlZnPK|m`}ckBmm6G} zIcJ}}_t|T$-};xLIBbMCva)8VE(_Y%ua#ES?F5ixWg)L**dOkK8AAweJ{nki)}7yP4AIzwf_R>-6$D9>g!lO&hPm1;hG$5f*i_ER@`Rmv z4@0&tnu#YLnaq=w2^`p$T3&ra-^`Zat%4ooB4h{kR)^_(&|8rtP@l6q68fa>rFPkpFDs+=N3?e zk)>YoBgyq5o*c%NXuVaVRSHdoaWm{z^Qkx5ld2Te6WuyCA6pFL(vrEa|EpcC_h;6j zeTbg8g1czEtec>pK#k$LVcm(d_+~T%x)_bvaRDkmBdL}iF!47jkOYZ`iONMQCF#-( zsfA>b@Hw#%8w;N7mynuxg;@|vA{rmeZ7>KyYyQqupv}ZjT+aQ_((NnT{8Wo|L#aM= zH^b(RCvDGKBa{;TJGKg(nnStUvJvH+7($#zj!Vlw!dcVjQfDWWlw3Q4KxC?3Jy zp>o(KI4vv^5rPgr-*8l2sA}#wt4Y!h&?&W9nr&)hN3+^im!>BSUIr_pCn#a3=xZ?M zjlLaf)h+cOU6Zkr z9!KXI*6JQ=pX#CvIff60IOA#RB~7q%AfYvq`D{>Xo^`~kV|AIvB}@w&2fT&xOdGX{ z)^lM*qx z2!3)#y`A>AzAJYXe@)y*?U}LK@rt`eV7l|qeR zuAoCOPcl=!&%B5Hwd5q(#$Ta-Fm7a)JjVKkMYE(FRT{&z+1dyun;0OL2&2$5w4-jM za!l)_)?|fT^GUy&&gT2#0r)`Pgc_i0(xhv*8y+zGkZN=WKg$@?VbXS?#jQPFEv9ZE z4g!U!O!yzshR(u96B`AG#j8wnty1iz4goewvv1Np0tbvlXA;HY`Ld0&ebNw7HDLi!v%G|!mT~8eTr(2Rz*Prhp<{|G)We_ z;~%I%^(2Lda*bvfg}_eEg6*%rt2)sBsJ%kvrT@sZqhkcSMLk7Bg?okf!3$9!dMi$q z(UwyCXh+Inq#bSvXPme{_a8eGT=qlYtSK6C=62B`)Fz6h+Km(S<22t@Q&mqi-t;5v zsVGx2STq@@xR-Q+VW3W;_0@l4%5T%E4_k8cv0%a@rYn9|a};$Gszk{DCka;AN}Q?65mW3hdXRlGEwf3Fd= z|7wnJ@^1Rnw4gbmc|=QTn_TT}SZmy%KdoJ-^Q9-?@v_A>u8uAC&n*v0J|gQ3-#QW$ zLly5--*tBE9W0$B1PFef4pxq++f<3yq&As!Jf?#L15L}#2FopFbERP-1e;BZi~-Cd z^fZ|&3N+a+|7;#^k!rrgVwaWBCeY@mO|<=F$3oj(CI##w)t1Ko)dwohR&A+&(WGe7 zx9rvM=xEbnRuYTe;w&!IXu@137n&Whaj>2%qp|76_>Spq0gZjDf0P|7>ssy5oTEO< zUd2!28~9iBGinOmhoRX9bcfJJ_TIeK3bQ$4B{i!O8IeNjgx*Y-uNki)470HrW{(}k z?iKETT$h6Knj$R_he@-sNguafLTpl(73lvYxHfnQ{SU@MrA>%Tgk1$n+3N@y3}=5k41V)ldUS8{5>vv z+;>siWm;@8IW8<9uL*Vwl-N0DkIuY?_CORY=Or<%hxdDV#(w&i4%PyK6nRtb$Ia>(S4^&6)S&qCj49&@cN zMCHge_MpK}IlFN}ZFEgx&6K*;4O^QUo3FS2Rmbe0oGx3w~El(&X&PvI!$ox zV?GOCtwS2u7J8?re7W&C>&uaEUot0`?r49{hy;sF-`E~;ed9I1lWk{7Xy3pj=Si}K zXr6I)homXH;(GDc!qdg0E6{p*3)6N-c|^AWtu-xi2=mxcMLtpd7iX}40;-T!YmF-vkL(p6w)Xe9891lw^X{j6*mmF4Mdg)Z>CAs? z8c;AdRsQMYhp8W1QtC1umHccwj!m%{>btvZOV61-kgmPD^bOwOx7zuF`6H=X8YxY} zcj%5crj`!LwM|?7`R2z{U&KE;GTTaCszfHQ-7P!Y^kDmZ8AL_7N5=Kt6smDvg>P)8 zih{Fi(_&KElKy+|m6DMAs_wq#9lIFsEsircbE@?=hGeZr9FsR6oq> z`265~`Rki6SHHRS^-hsMiQ-MB5!S8tWv(ZEngZ4Y_XzUq^ugQU`PFT=-Cgk`EmE1C zVe#Qc;>{P|UOjrRO1Yd9)^G#4Zl4?cqi?67JBIHZa<>1|9&-XeIr<9QwNiybd9|aY zeO8mE;bil)R^R3ob!}DmD+4M|R_|*=G2GtQe`444o=f{?4s03lp;v8Ci^ELJr}17+ zNXqMX^{+m?JpX1|%AJD!3K3aiF~)w5W4TkHtHDj__R~e+=x+DGw$!G##b%M#C~hh# z`jm0z>*z11zOG11{C%W+LPwSuaSskVJ>bYt>)~C8JnZ{BB+B!y#Zo@DEudyqjYH#w zrqzu}&2LoO^lxA_6E_X7HLw0wy{y@tjx?L)u|Mbqe6qBN*S%+S-Qs=IGLQYQ&gR#c z4?CaFc%1iW{`0q=1{63dvjpb0i`{9T?VZ8{?gy;#D|Hjw?vsbhewtt+8#cb(qvB=G zmDH^%x4xcCpONGBw^wrsUuu`rIjH}_p`S-Q9`QC>5@8jT>oi1qRj;kJ`fDgnthra8 zT=%WPQE^4vO*=)gqsg{@TkYaHpSD0Q#j@Qyxy$|T&Al)63+_`L8sHggW=n0V^2~7l z(EruQ#MVUWZMPrW|9)5Z6U*&adsg{9>lEjQ`SPBxo$gxXOTGwu3I?G?dh6B~6|??q z%%su>X8HelS(?x=QC}@>b$9M|bD%t?>u`1S*od`(^PS>N3{+6ls=xIGNB=&lzFq&O zadF!y?K1j4y-V+`o~jt#vZ3itdmpBg%*(OaYj@zdt}#8*!b$>6TqW|ClwZx^tmcoe z-rRk;`eo4jcWHym?`d-+j!whg?q0do-jm+%w`#cRB!b87+OQ9`@HMP5ui?no0oI(K{A|4x)LQs1XrmocsjwdqEuecqS+IG@>G|9Ot~=;GpUd&~U3*(9?lNfT?K%&M7Il#x}I zI`(HXvBYB&0`Z_vFa15mRFqMpHfe`@~wQh<>`%`eVhe1>qG1t@E`7hDzoS z7DQtFK6U@5qN=`sPZp?)2REttc~;5p3I5|l2le>c?M2WukFSP zH-J_gl*nf36;JncJoxQ8g8S8`AmS$zxa@E~Rb*@wTkZD?$o8TXZ_)R4bc-d{BKXm;Zc|6#vfc^Wn6c z*^`P-G_7MzN?qtba8ML*zGZIB)+L5t6OrH{+Ri(I2rk| zBu`OwO!ba<;*ii;)34{yL&F-Q?0T&A$TZDC#_7kmht`R!k(N31PGM(Bv2X`jgXS5| zv>d9kt>|CH)x>4b9nwQE9~~lTz!mGYc&% z-?yG;Jxs4Vymue%Y3JtRFx|$=zPIZ*j|$f~ha9U-vJ&KuVnO-Q-%a0zAET2Vefjp= zye3`qT=2?aM(0WWj}Dy@lM#8j=S6Qbv)x>O&8PNDO;hWN8p74i$SJZB+rtWtd)0Bx zRAociwdz^T^(rZ2W)kl*%l}yb?o;V9!PeZ=ntZ~~V+Lw_H4udpGi@@c-w8$AD_R;} zYEp2-{6B|s7YCQSc7MzZ<=?GlIUI9XY%P-)iWc$PRPU;@3Z7-DzQ6zcIpx3XE!Fi} zg;3>ir?YCn(&3Xv+>N&BLk6C;UWIg5SGPH|d~EQ8GM`doEaa>ovMz?ijvg)ZtNF4K z)k|7m>)s;KrUzY}gP(RC)J5w%)w#*co3!P6GkqwX%As~n!HQqke!coLsdR5mdP~0c zJ~CK3&}x?b5c_A=nHHf|ckI?Vu5t*mnJxDu?euRNo60^HOv;&=^&-odf2=M;zf1hn z;bOp@aQVQogEsXw>9*YGkA)K#+rFcIYpr+fmfB^FGLO`-Rcbbwoh#Mi!L*LhT8{?7KIJaeUn`cLrF%2>lNz7Jiv(ZC0NKw|5*Sf$U zA&X6<7XJ1XZU;RFyDqiuFRv%nRNoFpA?)a?oL>L3Xy~t#87H%E6pXFVG=*xfa92pV zY^qJ3bEWH87dw|HZeKlCyDxU`YNwP_sDrYo!uV(6uNmp>KPofwO9m+FG2H5z*O9Ob zeY^C#(Q|T_alUu$9|(qNJ~inZ3YuQDENfq{sfG9JZA`i$Kz~R3UfI9-P~%0#D*86@ zL_A48%Ffg6zWZ;de%9`?hxjn6Obh;Dwl6bTb+r0KVYffW3r3aBuREbU4Q}kwqA&}w zOP2Q)pF&TC`!lx?=N~p|`9bj(q@&|P&8Wf!zlUa3WIW5l3l7x|*N!B^t*g9;goz_& zNA&7xA1d!;>v%`J!C2A0qgmYCz2#Y(SH}*$7o1ECqwW}H=;o?sHh-!sY-m=jHEiT= z5aVS-Y}y>m9QxRHw{nxO5ZayI?cAVNMmfFj_OZ_JX z{tnpZ+s*lid>65h-^|)mBemr!S4DGEVzspVNBR1iYfZOQSD7iIdUGrL6jzb2Y2c)Q z7Vj}GxmF8A`>7L(dCiNPW;MNRKC5)3_Mp@83^a-VNSmr}G+eFNUOu7vX~Xb#ccWAo zY%$2Tr+u~48uvDLYiFVDa*MMj>DXh#2lZz4ZIz-qvbv^ZN&ckV+4+~Mc4`tt2OWYt z6?U}=x9xEuSn4UU^_L#wS7}SyyqaPfiW){V?`yMFQp(?oeQgul#;Xn)V$kjq#B!12 z4R?|EHJ^O%zHS*-7J}XSC(Uh@OG^)zzo;{`Cm6b+h+q@h4|QgS>u$F1uOD7*S-YnB zwz?0G${yJtbGzX7%{kR^g1wcMTJ}`pBJN3q(D@yM+kIPh)^DpGUahPdUwgMUt0A#{ zu;C%z#k|r5?<5I!4R-4^#+9*LB#FZHY&KO%Wiw}3F>}QDRQI4`THDWtwwm|Vook;r zP1GF0UYUJ$9Ok*mm-Qh$oE;skUE~JA0hZL6cCd*_1&@VCcUi^?Ytc-?O)jjSmw$c#G}X(AVw_0QXs8!oNr`!1KshebwT5k`W5xs zdU5kE<#q$~`ikPrQmljQ{OqIcci6tM6q`lKY$Zztdx=o;6S)P>!#%Mph@PINJ*gPe zf;P^qAKv(+ZHJ}@lSO z`c%1AZK{3KEbjQv*vQ`?%_VP4B4pR4uSGO@mG}bZ!V4w8O-sxf^C+_uGQDUOR!d9u zh=%M4Q(+yiG;<6m>0pjWq{JSW$Yg^gfJX){Q4_|jdd9>!bxA=-&*JM~aaf_9_+l@VwBxlDdFx*7jUE*EYQ zwvspT3+OEV5#tEi3#~pze^uwJUuzTrMbQir-+v&F*oQAiPU14bZlRaxnrNgrUHn&3|#Wo@`K7j*r5n_V1V=M4i_-CVKQEf0-=-743@g0*t(V z{46vDpD9Qb9gqx^&XRN!?IGJS0?mO`Xm6mgN+F-W7ATn0IX$>Hp3q0Ax5mN7$wpI3 zPi4{vnAPkpE(5MYK4jJ}qmLmGJ_k~XDj=|}0gZbCXU`_ktHJp*-B@RArYvj zzb+J*N!Ov@=P9(Zl`zS46_pNZ-6(hm^8|JMIQx-P@EyoU^f)#e?v*$G4pgl1Xc*$h zABS|I20o7|B!#;IweK|$=~qLZ{VEVWl0gNJhflE#C?i1OLH2|5pcM4bO3;fG*ed)c zz6U%Bm5?621u4B7+*|Oc80q!&3EH3WW_tp6=sRzPe!{GXi^Oo^CTQ@3u>(Li`o~@# z4SmJ`I#5hNSKR~I%2kjaE&@i^I_Soj1nKPf|1$_KLB{Y8e-K%S#)3j+i9f)Opwp2U zK7{jz)AKZX2;BnLV;ndeY{0Faf2ws?_<$uEq{f( z2b?c6wt{H{k6V9kHKYw6qgOCHyaV{U2jP=V1wWe^5bN$?orreAi=0NTC5^-joWvp! z;59I*l$dg(&QtT~3`P#@i3#8&_=4C$me3dSzscYusD!k6GNd%ifXL;{{sQODc{mmR z0V(-(&}9BKp)3O(ITRAjVUS)_0@Hap8je`-9^3)&v@8TVKE|&{zM(I$X}AWTNQ@$U ziQjk+d<@nLy#c(Cr@&;I2@EtlbSr$;ACMz;gv@*y5I{BnMfWz)BEet6PGK$BS|*hB z=HmE%KrL}X%=t5{C(}T^HhwkiGDI8qff_uQ-48^P7)Zu{=Z`=>H5+vC>-az-n+PWL zgcHn)6VXe2IUCJ*&jV+OpMh$0h-Nc;@yi;RS<{b0z*en7I&)4+FV z0tZAX&?y7>t8gVoLlW2*61>+TBWsC1f!z8%$o>8T{^@a`2`p#lLta%1>FE1h4cxs1 z@Xd|@*19J+ex4#XA!%!YZ2@l9zuti|;Nbj#6f|7UFf<3sgC4+H<=G$^v*rh$K(3_92r zV3@6j{;WALuT}#C>^^G_oP&Nqx;VqF1>$8Z5&?Jl9;B?5&`RGIuKOoQ*3Jii;2-c5 z97Q(sN1%yl6cAhkSr5(7vxUk&uETyTd>078}v_<-KPooR;amjsN6en9Fu0J_3ha5$|3TFq&28tg%TLWbQ3 zSYwO9MF%fJ(2{f;W~p1ifA|2O%oLKY2>u5@M&uKR2pb{|=E@=1G;{}$Gy6h1b_gV! z+knQ<0CUq!W&)i~ZKoE4qC0`^&kSID03$LSxB?=0DsDpR{T~Ox;-7~F65Bhut85$S z(78+)(4)Peq3t0z75EK>&`Wj^v4G5eFu1l@pbPwCV@Cl)AP;C7)4+e50Ia(rc;2T$ zt~?4TEVE%OFNXwr63_-)fRAqiWc+6E6}sRacqP^kI|EPFHb~&k;kLt5dz*O$Px=q` zDtCeJ4Q@~!at7@MUYhy%DSREQ2M6)Vcq))0k|4o94en(IWVSy7+u~o#g9G^Yp2O(s z%Uy(K)jjNPR>WRpPB1%|;Yobm8k`&<8vVqKOKBm6wGa@$RF@cp^zM}0RNRUXxMC6L{Yg*<-?I0Mc2S#V{%;F+mo@51U%K_2rt%!Ete zx6B1sV>NgqNG_b!Fav>M)5NX$hD2-+LXnt=L|>rIjAKx-2G!hg~C2_GP()+_)Y?Ocs0y7p1_pY0)(Yt_IW{q zR2bD1r08G3d_5Uaz;})XXXYPZ*i8dk#wfU+Z;(x}i*VHpQs`o$WvG(|Ad*ueC4KKmgFdbn%oP%teH#_J)BF!P74+b2a`0q zmxn%jR7#E!^dKZiD!5+LId5z{F`k%&&F9B6w#E%Qop!NdKeG(xsZW&7THw|G_ zH1n4WfWDhRWE2}ir_rN14H6BGpJ7NKcb#Er9Q++ixNYE7P5{Ea`WDd=!)5^hHfg?-c!V0F0x2MdM&nhhL_ z7clb`0&DDFyV^=%)_ee7Q6@a+bAb*&hP%Skhzx#m3Az*7b$HkTUjv@RW?&nHBR#-_ zdVx7f|Duzah2Z|)#(F^G=mhBADd09EJ>fStVhPALxI0aJCDxO?NJbE4s3l*?TxH5w zAnXBivH+6h=Zw+Rd?t=}#scwM=zcDg!I)z9333t7Cldsp$h-Ih^eH@l5kPd4q36LN zyBDZc3E=*-#|%gd_la3fZ=eg9HT)&aoAAfCfijQLSB#wTGK~QvD4l%(WKun2WL~oe zVF$bsYe1hN?)*ybDZd|BfHvTDKZuRQFXIgu32y`;XdhUY%8_xH6S$I=qR;sUtP|XU zA~qhqLV7|Xq>8wL?&W^Ld)Yf=6X7L1BAiIRLU(Z4%q`{)`+{fD4fs6pr9$TjGmS}Q zT|k|$K!<}nYZ?2DxeGhcCA24Z6CFvU6Wg$_+-mx?akcR-O(1e&8`*>S4}AeEgBtvo z>Fhve9F=7JO)ubAz&%j01vEkrrK^|){CeoSnGf^x5i}8b&c8rz;-%zDG6=I}BMi>E z3;Hc|CwL3mO{U_|Vb7kUb@XP=3vU+YiYeh+d=u}^<}x*`5_T>-V4pY$sAOCD6R<`f zf%|`xbKyTCm$9D2B60xP5BEh!0TCq`u>g1BVcdy$20o}kOqnr~8pxz@;oxZgw@Vns zO-5E@v+-Bphdl_SnY+kOa9E}QuL6g)dj zOy(SS0NDauiyPcT)`wX^FJZ5t#l#IVns|fUWWG}GXm`#MwIa?5`it(1<_VV){^((T zCE|ypg2%#*!ZD;hwuciygkzI&C=-Tk#TOA5usi&DCW#4~6)zJ@$1oGeDgLO~HK=9=kyj1Tga7$eFR9Ta@Sx^e5M zjmAuRF|w5$CF&uR64#LfY$)TyUgEy~vG5g)~M zW25P1#*fB3%p}ACzm9J}uW&S^6Lv5#u7MW&lbeIca4*6MpNKx@kFzV_tsBM@eSy=X zN??ziM#?yEro?!`c!rumpJAG~Dp&=g!2{&aTxT!x8_|LIW?~B-j+}=T{wC~7q3H~~ zky-e5@{?eUz>Tq@D&(W zZZO9lz?R~@h?!(h;dGHmIG3=6UDR2;AKo8};$!K}#zPd4n!xv|2Tz-vCFv-b0}@bA zd=z#P-GaR#u0i(5k(h;^g?-aN#23;zhxrfmDx;$@)p*S4uJ5BYH`wt9$vu#9SxRn0 z1+aIs0hVxQRLPm~%aPt_2fvcBFzzulQZu;{l!ATSUDOrcv#)aoW*aq4e@A1gp|pMm z3u9O6A@hh|gWV+j$xM73FmxTTyWkx>iJv6b38O`Y!b*Wa@Rq7#B_!VLezKcm)bBHCX!Z)n}xG1YjN zt3XR}kswFN36inLOr6fBW2It;;+JZh*4Mb5M%XW00@8-EK>N7@JGgD|cfavr!XA=p z$r;H_sfB5fe2h#knJk<`g!5s>RoZJuNb4KP!y}4>MXdq#2o=nrlS!s-e%Aih7hg2rd~ZqeM-Ggo2cxV5}%E#1Oc!u{f3U!}#{i)G>4iO;Q zA-IK1qSL6K+%J5c&_g=IY`OJ(+b>o_W#6#LMuR3%a~Ieo9j%v@Z>XW9yY!Frp6CYF zO#jijs}bdZ_Q&nll$bhC8*a$di!=+AV&$X`LYuBX&h*1Ki8E!mnXkFI*+@Q~XqfX%{jQVyH=zb&cywpGu!cF4?Az%!u}P&31~5>N53B z&2{FG_@?D6TT7c%Q!P;iYjP%Uz_ti)h+Ty*FpRsP%WN(ytS;vL!cAaioEey!{^qq4a=Gbb!!7xY;G-gwU}y$x{mX5@I|~P+W81*)!MSO z^rR&BxAT+d{}`C-QPZSZl9;VsXV zUFE~uy{mtf`BoTf=PLFvSH-Vw8axdFeSOm`0}a{;srFY1%fhEJ;hCl1?+FB-{1o zhfQltRc7ASQyiv&`(u1ZR_V#yclj?XpSNByk`l~`>QoRk*;it}gKSr7%5UZ5q$PX{ zN&jAmGy|E)V6Pua9VV}7h+>Z-l7JLPl3x0B}zhuJbIdnWi zUQ|!d56fJecIc-vdwTBS9OBQ_N+ja?M79Ya%l>U*vDS z3j73|V|4e;Lz2gHan<9;Mx$MRS!Aj`i?4mR{-Aqr^JU1dxT!?HwK zCK-7@hNkYz``dhsEOD*qwkvY)uq7jw#Y7Is?L5P_g<4Z}E@#e9yVNDAEm@&|XEe2F z{YcV2E9h3Q{XI+k4qCjV&o#Q0J};z8RMqR-bQCVlvpeW=*dfL;#r&xaxXGsHLG#Cr+Z&ztaR*?5eo-B2n%x?A-t@JsfaB(U)sNNPK|G4 za~rF&GDHctx;gdC>#yp*&@E8-LAkCnsJLIr`O1S0O{!^_qqUhwi~B3P4;Egw{;tDZ z-&%Dd%G7DKiTVC%Yd;hxK72OkRb$$`x}W%U*UjDaQG-Whj*v$m?)l9lTy(8{VyW97 zWA6QY>EBB=lJ<878R=zvKKRgp(F5-VUo&5(?^0J)e7~Sy*|sKk-Ftki)e7$~ftNft zTIj@m&AnW6JY{wbs9Vd)!u0RR>w6EY?-V{rd`ITDY4_O}LfQtp4B0n$^T6od--CMD zYtZ_(os}bt|NhbZHvF2Lt1gRG_7Wz#e~w6t-7~_X=MCFC`iP3=9KYNRWi748>9^t< z+XrrO&JX1p@cH}8n_Yr^Cb+$jI~zY#K1$#68hk18?@e z(S1+Ib>A2}nNXn{)A+n(aCS`kn4cqaE|j?`&q)sX`$okMFN!E~UoMPMeW;vLmejHb zdu^e%!EOAlH_JC7F3K5gHf)qtw&z8!6LxOojrO^Pqf+7%uRmV;RQ7)4@9pg-rh|P) zgq`npB-A4$F~lMCNAMv}m3)jbq;A2VgFkM5tp8k>xvtDinJ)_WtL~Qo$3ii}6UryG zS8IkVVo9|_oYyqhG|P`d0o~f(zm=zQZR)y==oK9-wf<#1UwJdV?(M|HV{c2o?$6Dt z%Vj#58yrs9Z?tW5F%5Q)UPB+E$aV|L`%HM!;Oyx5UI88lg|k#Is=|vR3zq-QsTy9p zw;ofy#=BbFG4n&(TZ9!66<)1((TDbhoo4wBu}vkrqm|@v(Nxhad6sKUuqt?gvlUU> z>QwS7uNKR@852$?2%_o>zYpF_9?zv9AEsUvaoH3!G|vu4z@A(?cXi0&#^w!LW5j)Nb}hY zU5t99@>c7chL*~if3wOawRB@$1o`|O)u4vty2Gtm^a_j9e$k=30!`eX+wZhCmsz5J zSXQ#iCDqGlEo4g@|0`Xeb3E1Q`-IG}g1FLkRjtiy^qb+WZ;>e7($`ztEu{aW0b6@{ z1U_{@q=Wbmx^~5&mP1WR4M|nvlFH)3I*~4yMU1KKvl`0 zOaI7dv%$B*r+L?yD~MdiLg%U4pcvQQ(HLIgQ`GCPMZ^1!&HACbu9{5sckMC8P7>nS z-%Gv?CnUi>8O(E^)N&bg-*sgz12feL}i~I(y5`5f2c8Ii;Ne&O1%@q0*H_KE;2_)9WkSmUMj7 zEY-a#o_Te1-|a|S^^|XsgphNXs`iZXMZYJcrKbuq%(D(< z336_gL^OWT-bDA9^s_B??-lSVRMay${6zPS!N)yM+80_(Gz}8>Mpx;+H`SJA=hkI) z`IVBhq~O`#t<|GjW@@MK6Gb6rCv0O}1APYv{ny1j#Jfvm&@SKYE{`pikts%8HK1up zWpu%n%&*^DQ#!tWO1+V_yP&Z8l=2JfEFobu?}A^Kf6p!mtl%5%I^wBYSfHdre$Mie%@~J=F4H|m zdZl}Qa+~FJ*7k+Dr{o=)X1u9Z!`-ZJ+uwG)^=Hef)*wZ9wXZ&hQgOTS>B3g=R%w(8 zVRAq+TXaD%QGkmWafR4U@RQ$be5lD*y;q)8ZcwgPj#jNx_t2H-9SpAx!|6M`Cq9qp zOcn^Inp`paVj;KaE7wThh~34<#W`Y3j0i6i$(Sp8kK01$0QDot=)p{Y9A+l(%fuQ? zf&S2{&CveR4%O#Vty~u%NIIf#_#SYowVj^|xl0qmfXs&!FhxC~9H=kGFr%|!zs_A- zqiNM1(?2$rF&~f>#2w)q@kmLY*i^hx6eog2jGz~B3x7dsMX#lAWXnwt%TlE6qU*#2 zI}+5dw7ZRQd>&aPDVHsj`^)1^t_gQy_WV`IW~DPOyMXYGIc#p)|q)4{J^sH>7$u(&pxRvuH7etZZ z#OjazMkn(}sWiX+qS^L27_xre-u zX{yOd>3#_+P7(~qdvePSr_>{qUJ9+^km`O1*Ab`Qskx@xVMN(C$Y*>A*+wkH%TY_D zjcuVPQR9u@4Dp6Wy-;7TP0?)E%+j9H73qH&-!hKK1AMWtPBKqcXIdw}WENnaX@0}P z*Xo6}mCa+TE9QB!AL7U4Z*&wlj^1r-Hy8|83?ub~ZlTsoYo!g-Uem_uV)X+JJB?!c zIx__lLqAa!R*69~3%q%)q(>PS>l?I2%|*>^&0WoKO}W}!Q=y%0{K$&2Es)-tCJiz* zm>soTV!hG&ilx%*n5o9(ic};SDOwAsJ)w9n^eyMiyfOM2O7u$oZ@qk_*l5$se1pmyI^DlfD!06eS941?{Amj3=ysKOx88pbwB|{C2JvYtNX_w$v)) z2xBi}y`h)kh`y74s$O82U~n^h)34Mo)t}U_*Ei|Bb@#Lnw0T;Qj@FqO^XWJ)5@|sn z;iJg4g5JVhVVuZJoFVQfnJeid=@8!(4-z+vG@|v8VEHcIBPtYhClBKZD1m&3#vD^< zklP3y%Z-p%pFzh1W#gjpsquu--I!=tZLl`{0*=fm!y>~lLnlL`K}M~i7l48m1-ylB zz|%bht%X;yzi^UeEvOa53Fiqr3A+kk33rQNMHIb)xAPeSn%ql%BRcR=R z2Xs}shx&O2wc(<1BXxq#VoX5a?+W^25j5Hy2ipHJ$VgAc-w~#QQo%~0m1vPDQuIu? zOW;ht!24hg(6!5hR%GOkgW{_N_Jck43A>9QAw;AnsU}VkHK6;4qu?1~Xb!uI zc}dTt&(Sf^#~R8F=9d8p*A*xl%LNyO{vr!ev~a562}uzIk%G-bLy-_T%^3h(f<#sf zDt|RM5?Vs~@eepDT#W^Ankj*6^OQe@3OVEvXP(0>6l#4ggR7*} zeF28)F5_cL&YWaT_#enctS_;SoFpKH(}e4VUEogOf-|5bc#*ZlWnwA({te&~xs46O zYC${h4E%=;xC=NEc%nbKkQ`4o5mCeuTnn961F+$!0Qn6*1`RWsIRrcvPwJ9!kdZc6 z8RN{ebE4c47*7gY*W5$uQv(VTB-z+)ez4Z-Vh^0s7B9P{(5VR?w{zVRVStufUp# zXL>R23`f7APeVSv2Q!Gd&J2aNfQ#@X&0sX(@90UNr9@PUkvG1fQsIR868n(DkQ8)2 zsJ@fPe9{hhDW?VZ1d|2tfQWOM;9z`KgFoj6Tn~R}L%j_y$(cru=gPis!m_2?O<{J=0JB(LPW4kep`az##Dw&(` zX&Y!8<{-4*+y$n?l^ zVF}}uCrrp_ayS`I29svw7s7*Bgr5n52lRk=c%9K}RwEFb2|~ zJE9!^@&R_f0w@d_@EM?E5Y8a4VO1Cn1cfG`=9m-TfQNzt>AN$Q4&!wh5UxAHx$zn7 z127XN;!kh|o`O5#E3mO}*B8J!rU01%YNuI;3gn4ivy$6)CBKjg|RTcCuW+pqG z8^ME;3CfOc{W<`WZegzN|I{T%QL%m-~c64cY-@JTwrF~fqEz8h5c2+##;aTeT5 z7qJnjAF=}+3^PFA{SHq}C1}rG*gMQwXk2oEWPfjTiaQuDLL}U%>iZg4g3k_%2)pr1&17 z_+14Lz-(ZwCqaXQ3%>`H%HiO6nF{WmEudrf1Kqy_I^(0*Q_MC7o_?4Qn}O|K!uT>7 zbQL^Pk<3EoD5jnvSMokU?q3W(t^c6=;slIG6ZkWSKvTwF z(ApMYtw0q!3ZIDvKha^rlXwfh1rz)jG%0j}7M3~CZ(sx8DC1xEE#C{2qX(exTR`_@ z0?+_X!CiO{45L2as!0P~rw^J5eD*WwK_J2RgJyz1(1-m48pFfjzgzN`VdR*?h#U>Q zM4Nyv^c6;07=X)iq3>rHYKkVn^O_5UqDFWY(xGqqF06vvnPU1Dy^9V4H&-zIoSw&Y zf%aBAehYE~6#LU~{W{>vb_XZj5cr)F;GFv;H;?NKdx{;<@0!By;Lh`#VdPh$pRsM= zMjVINz*qw!5~%z+p!=aPd*y>8WhHd$Fwm|Y2D8O=*bfAAui4AM>3IgeJR$TT1%S)P z6%=I#?nxhLaYz6z#T#(g{A(4D1meX9P>83)Siylv-vm#s9vY^q;0opN(5?b&iv+rX zL%;)Y3t9oTz)1hc7P<%=>xJ-7aacR>^Cv+I$6>@0{4NpLHS9UGxNU-`ZY^|Zhe1Qb z93+~@Ia_uflRyuq*V3)@CFU4=irWpo9~MU2cF?;A@Nc1K;RsNWPQpx*1nx3B@Hy<| zS?FSrK?B51So!BdKT038flmW39l{-gwN(#0j~ZxweE{qEYJL^sjP`=xxdyr$JHTmi z7uq|9z_l3;PPV(~YnVBvz&`2+tPh3YY>Gi2!4)tAQbz#Hy!+wrn&3D2!zX#ngBuOp zNKse}I3xw=J!orL3%-)k$QbZ49EZNgaj<_01HR4zehWVlp~0P$2K=lDxPvpGb>#v4 zl?roUH1`u`WmmoqdRO}Zv8XR}@O0;0Ab+psgy3y)Ms{uYvOQ3_-LVx0C-VIdQ(d;UIILvZqflIIkoC0Uin@9~b1U^9b!JKmw2oc|b8~Byq z3x9TZXhBQj_aZ*vq|4^xxhn86eFL)A1w@4o29ihqktF`08jr! ztO9$6e)?A zhr9wkxfOO0qnWqNAmDR}=$D2NpuY8D0$A{nf)_`CEPy%xCl`o~$AQ5KK8Y^eAn-*< z_{FFfCP4OsAE}*>0wW z&M$z5CNJPc9i_wR(cre64c&b{;4{_0{%jYt>h*>@co;l0kKrmV=1J5CR_V3SaI+p+ zj<&+AcMLy3e8Dx)29brF$0TGExsvFPjRYrTG1m)uh~*G#$isvRI|rV`6Z{J3P1S(D zdJ~?5ExZJJ5c9!_c%GdC+PWpvgZl{ge+u>mNPq@*2lJkuMBkv@nVmp~Hv&g#DeNtl z^8@)+;4?eHRlz7#!_1irs<$NuJ|J*dv@+xP)mSN!K_-Bs^e@hU7Cs-GV6li8yNwSZ zYVlQABF`~lFzV#U7hp_U!@hJpIvYrpk>IS;fWPi5yv>+`=ShnG;ODZgjD~s2TjL7i zB$16dBSYDIdL$#}+QB~*j68(9y&v9Up3|QgSB?iZA&GdhQ>hNa7^5HU#BBr$xRj{J z+|b>eHS-)?)~Vbb*v0eEmzECq>@?6ge(^KFUvL<_g3X*Wkoz{Fh1hF60@om++)?HL zg8*?X7kh>0Vo#7*aOj=ktU%3GKm(l|RM{@5H+Z^~@aMQfN@@%Jmf6lXV6#c!VPhw` zcKR{2BRJtxg}f+U)K72@yUjU)Py8s;lWzjAPA$yCOJMXr!&DfD%x32s59um(x2U81 zOI%Mnl3wT`=CCmx82ve%8#WIAi+1I@Q6Kbb-A(;7V;u7mIApOrXz}pO%*9W_$%VIQ znP3Hxi=QPIi##Rc#Y@ONe7;ejk2DlBdoY8bPNWpZ5y^ZpjT!qIUQsc848ED{Cx|0= z;{CwUB*XT@b+=N?B!;>?JMxi++2e@V*!0jjCEn(d%1E&`$ydp@$E8+VDvfu?Fw@Wfj_DSan zE6|;c5J(`I)D-R*Ivh90qtRk;4lHCAu)grVB?W(G7~Tp5&fVZQD&?L4_jHNzmQJgQ z)yfQ|bQHWdjss?!hhe++n`XE!&G-mr(eH>Uf0)@oFJ_}KJF%nugn5m8mvoR|Hmou? z$gWanGp+esIVIkS-K5@WI;j`welfRkS5clQO>hmri-i8`9YkY^e(>g-Av`YZE}Sp) z6ulI_Bc|}@3|%@lx4&qoRIjwp^#S@wO(*57R)^;O%`xrcHD;7GdyT;;qrSl~k$s8B zivLJE!x`)eelXA!Taa*JtZar^l4-H14f$(SY9HxZ=zh3PR3UK}+mUyWQ$YKhgnS}D zNR4tU^F(=ztktBuEW&iLDQR+BaE!leFsRpbDAdPvTMRD7Q3g`CwquN9M7xvnwtBM8 zSvOn#UAbL(Q5|c9o-{lk+}s7&2<$T6BB+;!nTxH*TK_RyE;4dM^{do!t&&=Ut|i?B zJBSnLBCapfj~UB%5TtaATyGv^nP+Kcwb^pNd9iFZP&Z}*vBhB6LvQBaVRC#hZ)GS} zENMDf|E^(Wi;d!iGE6y3aZqtpd9Y)vR&NMpB6tS-CG?YhHeY7tXnof5uzZ84f`4t8 zs=cjqq(hJkSRRr=hv|BCv?-r;oY(i_3x##27cEX&rdf`(s<4W;5?GSvF{WP9HsML~ z6yZZOVY86WOt`^AZQ9<^WZziR_@z0ko(f0iIZyhtJ#YAUQZ|erT=?)DJ*1EQZBfbzssKg)|2R6!=r*pd3lED~ zvSgWJ2sm+=9BRYNoHWeL%*+ilGgHIN%;3Z^GmClnJMZ@+U0qr2vaFf8_s-dSKf9bP z*vZP|A`(vOHV}V)Z6>OZWi`HhW5q8?JPPKvjho2YZ{H^J1VG|aI)@ByCOr_5Y zx)+Wso8v)+KC%(IXrou3r;XF5YCG%yH9Rr2)i2OQDTm0?CAj1ZxfF-?ayA-xJ~uq2 z?%y7-x5TsAxv%15siHJgw#re(iAe8MN&%sM>N5z z3rp!P{{P%|$7=fvyWF|WdBBdB$CV@$A1irMk?U?3n!qnczyU3-p&M!)78{rFEw)L- zIz1-ei>{!YfBrT*~p|MI`&l7)RM4*4fSHtMmmwW?5=uX?N9XB=rBWL6u8 zYc47d%ce-LP|3u9$S?Mm|Aq5OMPAvuvQ1@iWjQ7Li$3RPl3h@r_KF^K?<`yfOJvrNypPrUubf@ljn{ z6H=>n?@hGzTU5QM2UeZsfT^#ZRVBzG3A-?W=Dk&2Q4Xnnd&RV}Tg7PwJ@eM)RL+^3 zH>0GfE0UIqK{zG6!=Pivm=g28%JHhsxc=5fnzO`7cyGRsc9fJz8v-?8j^lgb?c68% ztsrMuh0VrW%hxDpD)fqzYSFmQ+Bm9t)PaaQ7PWDtY6=C-?)<6HRIlA3R1}vjE9p?! zH1B=Rm7L`{NjdLxPUkNwO>@Q3y#*_>A5EoXy1r3ost&FhR`YC?ff2XWA~78m@xNtd zN~z*Ey4E|SsB321^z`hVCAZxB+1L0SVErDF6iBnwRt0&)$UPewa=-|_!Fvua;Sjlwk$jASEH0RX|M9{+x%X8 za1nijj%McYA4p7}7uB@#;VQcL-H|O!=hXA%NwRLTUT}r>aXwGA@+}1qvI80YGsorV z^0V{a<($vCQz+PVp=aU_VuN&ydWg9&_D=O1wT32bNyrJGp!F;Et0MKJ@r-6Tambfj zh@~I@Jv_Z*zP;>~qs-mcFN7kvY2s}1gXVuO8f^`KwW;ym$j@P!x?!558l(1rauMD$ zxWR@Nt<9;GF)U4+aWCtB)~s}E>h08s>`$c%-$(8(mMfW~Zfy=$%CFWusc#|~+tGYn ztI;63L;CUBeCa`Mm}7FjI`dk_^qeMzlS)>UwX%Qr-eKm6)9|VC5>v;xj@9?nNUIti z=ZM%Fw#d-NP-0lCt08;Lj&o!deb4)py(eQMrd_SytG}*N5eI?;D=hhwvUX&-az7TnDEX&iw)AL=&ygRbXf2rb5 z!K|Fzoc#qWOSYH4bR_u;m^<85&V*PLC&G`!mnRffQbMQJcXL(4JdH)uNmEJQM=*Qp z+A=Dz@<9a~GP(4_nawlK{F?rKRZ6eyzsg#>zXz7EE5#;6s&c;dY_;6l>yj$R9kI+e znJqUXo=5x+D^y*={s{~ze~{NLr$>ILVs8mi7PQUrrUkc!E--b7Uxs?IT;=eD6|u7` zErl%fxv(x_QRZ5P+md=T>Kf$Sse&QaA zb8)MxPSmWV26e58lE{(9VWvQMT%;rXj3GhBGDmG^@}qJFf;d$EM_%dHwsiSaO z@y(Km;x7ed1;2{Bmv^!scYmbqWJ|-Js9y2a6VAptE8VJ;727PXdCYuEea&ZLCHpCm zAN0^~{VmHQvd8@?`qLnNMVdU#1O5g_$wnLDit#QAoZ#ll{E<~_KWWgfI$>R)FE>w) zbtU|&wBM4fS|(I?O(-2uI;*^MS&O1)`TYy~ln%7bc6ARBVlVky(~PK?n0Jvx#EZyp zl{|5VO4@L@HbyoORj~haY2FL^ZFml!x-b28c4=mDsy%gT_U*!*#cxV(+q!xOh7O8J zn!EAGYoAMu4L_m2qARne#RySdEK==WVsA+9ZsgQBf7((?yB19?m|c)jRIg&2N5iy4 zPs(Bq3D(mQufywF^)bHq7jbJNh8fefyHvF$CbS4rFO}${Wq)VZ%ea`owdiAZ-_(ph zdAWy*mKW|TTwLDVGcS~lDh;m_W+xhB+8P%C1>|VezZ1?x2dqns-K9^sW}!8~_1+V< zucd~v<)v7$y5zAVG4z-ZArX=`+CW%03vFI&EsXvYbIclLFly$gXDhbjLG~8y4F%lV zB57t?CZ2aWd+YDxKaQk^=X}oVpSQ7qFMZ?~7Qo1#me-Z9#mx%au4`xgS-H6Ct=PMf z@`!+GhrE{fhE5B1_PuxJ+85cm^1n)D70rF$_>TBhYJ%dR)@N7~_QLuq=6kF?dO-M3 z;}HEUjanW{DB!Jb9=qH%vhY^+?3{x+D>9y^o=*#8&&|7(`zOy(a@;1m=F{(FS=MI2 zsyV1KXf|2j#&3-86!kic)RBtKWKFa^_ru@CrF8hAQ*J@Ay;SDOWZR>k@lBFT%3s=} z#;4X1aqX)p<5x%AH&r+8GhEj^km_(ZvQGFXu)VBd?v5O)pnd-NOmkZQjPW@Q^U#9C z(yopw?s48cZnIhvT~KLU_&&peFf2+PQx`gx&kfB zvYqz-%kJWTqMR&F>(pv>Crs-j*T=*~ZZ^%-_EsNPx+Q0jdTb-+Fg@9OxwJ>#rMxGF z=L-hqglGTAYg2r>bXSGeJ5?`q=O@A>WOUEZ-|mwiNNo;U!rQU?@I)K}DmcAu#{;%d~d@EwLNYE;=uet~L^ zwukrr3`XH?SF%30Apc&8wd6ql#=KWW#_~QD`8KCp5u6wN5O^ZEwGS(qV*8t(K>Nmd zW1}#g=^s-EV|&#AydnRY!GoPV^*lbm!vDecv>3D94>iYrLDDo<(n#J$)mirha7{d|l0wx^mp(P_?A`Ti}!nx|U`jDZUd3e|pbu(fps2iQca3<2%!xtHn z1gE*4+nf0}qTOU(*$BygNXpz&IP@RQH_W$;NxGHF45~BCZ(pGzH!irtx6^aXKC|Ra z(XFx(_Hz|zNlp<_p5^%AYU6no$Yh_f?V(Fz3D!ZEWLaQ5sj`Bv!mXIA8)WWoS}(oI z%nHusOOOCN*uUE+4Zim;cYkq5G6K<2F-sAW&X!(QjyLwUF1F@`Ei?SkJe92{dXb-{ zy~$Q|Q|CBmxUZ3Sl)YC)v~!yKvwde->xwq+mHwZBvGft4GHT*vzU4j@rZxn^Hyd4w zmy(;3BH2PsO4t?i1m%C6FOVpFB%v#VUglpHd>(A+^}6x{XVD_*PU$@SiP#&%6&DTH zO%b{wimsBqxEY;?_mbA54$}K<)yq?yae93B_W6%S7gRep^ zlxC1iAWvCD4p4X2xzz8J2NWwLqhJDh5#K?4;-+}^IJ&rZd3$(fx>~!2y5>4pJC-{Z zIcIqKg?qtB z%Y0sa4t>Zhsze1OX#m9eqj!sMT<~`2O~CHgGIfz`tQ>ltvp`V^2SWZ0JXuj$ zuQyg`s>&vjZKQ`a6OGTb->AVbht0s>6LZjL$h7R|r}K}vBQVSVF6^PsD3T?gpl78L zeqY{NR)iklda%pcqcCs1Axxqhcp5k_xbyu?D3ZAvn&H3aT>{gTg5Y<5E&q@}T(D2D z18sp;mdBvXKSz@BHdMBJsP?mds-~8#8#P+iQH^UJ$eU12$&bJxF9KH*gS;iy5RJr@ z^bdL+>XW^eM-gd4KJt}pL#@Kl|8<0my+9k7CoE*2_(ywV{44z(y(7E@f$-2g-+cFb z@7JIu^wdAw|0bw~toUc)7fjJV!z2b&BBGZ3yXKcZT(41gm!Z{-x3$6qn8y$1w$pC{ z+XI!Eh5SKxO-L;Uq5+t;9cM08fX6dIzeL{F9p0yLB;Yhjcde zTxwGeR4tRP#CAdl_;}Fu{y+|<0kjv?5-5SD(?gYoID9=KXX?;lVr8NNKM%=k52V&l zLmuaE^fo_@o*XdxUVD;#@j+LhzsKsxbBy&m0-b}Jz%753U?$y-Ka0Mk2FlmSjN}yT z7`b2BML*Lx)v#34P#y-XUR>YRa9g#W3`07K*I>3f5{nW#u@3e-*O}eH$OIQ?{0|s! z=oG&Nze&C(M&sq^YIt|8j=#rGB7?YmaEmqcH}D?u4T4X=d-gdUPK7rza5(rVxHY&K z8k!)9j~gX(fE6L522rW9W9njESN$H%COJ-xBzsGb$;Zod2AyF_-_ChX8PaKtK<^_a`w|(57mJgaYIIXxi@gT^Lw_uUts+KK%O$U=2y!mCej6fhnD?PukQrOZ zg!$^ZQk^$lE;r*f_!GR(oei7|ytkNE=w-Y$D)M8+`{Xl4o~n;Zqa@^y!R_%#HXk~k zu8_s3A9}QhV+)91*mZtBbBIesHi$FX4YZt_4Xr|U=t6mk)g^8MfBqKn6hDVI2I|X7 zYym%<{lYcnbLjE@xnA1)(HHFx^B?uq^ErLH0@s6sLn4hrsmj{$(MG zdCbfM#i2eE$6V#YfI_fN{2N+r-t&4;!R^rVy-tV(kIrv&DrnE|!LiT}|3<8)46-YV zUP`BYo9u5{mh_ZlE|mn0M1SM2#Hm6uWFl4QUSTz~h!=z7#LV9TGRZ|y2&)Jid5f?M z^ovCP5VM)728vM)_CZJtJPag;XgYw~i4_?00qJ)cd6buN{~;oO#wE+A5fjf9Vw zNS&7E$-64gDtSekJW;+}-bE3qXenDnk;F4B9eV(cCd;rE$PpktoDt08KIo+h0GWF` z^O+|wKbC~-;tnxd_AOVRZ^oVp9SEKdMh0s`uxDgotDp6d?$Ms)z$ftUy$glGiXRlF zqQDm+nnCKTmSm9pfoh88iMl}fNO4ZFQSnZmFDsC?k!&XG5hx^y_1IUu0(5{^pnP@$ z9>-gxE%>V1;E%DZpxwq{I+Pb;IW;4PZm0L5-u@GwT=zV8gsY!pt{rjIc2;o>c1?E8 zb}N0$g2Ne#`@%ir?}`6nGswG=E;5&FsI0fNyYzw#QwZ|IvbS(=xXF9uA95tP*B(*b zsk>A&X;9Wx5m1yW_CTuYyfR+(P+6)lE6&JPN)AJg_CEF+IVH^Fq|Ec6KJd;r##`hb z;<7tX$0mEE{i)4hKjpaUvUsArU*I$JWOi`7g^S2MVC0X&%Rvv##(R)iqywn`tMNPn zp;nPTnB$k?Ur4JY8r*89rPJkKrCi8naxHptKr=C#;^1Lgf8;FzQMj?-+%tifi=Mnp&Fr=;AET`nj5MeniL9x8z>Yi z2^r{2dIY30!+}FWfJ0ykxB+-n33>D`l6q26T1S3Mp-`<-g{zh;2Pju5oyx_k6RNk$ z=kiI?osTn zT7r=u!A$}3&2FImH>9tHQ2G@O9pOxSP&nQE5y;uk0u8-8mV;*y2>F(HL#W6WBu5+~ zCJ^nR$Kg39!L|Zx0D<cq!t=8=&&H0oKw-(4z{0T+kf75AL5#$ie>& z+HE4V5w!yqCY;Y=Yq9g$THJds7HBk8!Ii^^Y0#IHh9skPvCrUhY6Q-lulN^ShVO^& zrbT!o@ZB6h-vQI02lx?e;3=3bd;m3c5>O}Z@V}t5svo49qeKJp71Wzq@Tra<3xR>~ zA9#v}p#8yrvlmphhsXuEhyMeo!WZ$5NCBbf1~^l;0?p?fFcciCk_Xf(-5GXTmzR)A8@uS2Jy2Z7b+AMfZMuBVRIyg`Q;2byx>Nx}YeKTOLq=3UB2^z-wp@-nR z-wDqITft}33g%d6xB~DJUF63L z&w$u5_5WQ+L*N@;0A!C8pzU~oXtElpMSJ0183#_ZkML7(LYK;JSVLB0U4a)71!?Ea z=puA5I0V}O1*A1{87LdK;hyOz#PQ3xBb=R6^4Z)m@WiIEBfx1jkBx$UrWx?l-*OW{ z*_$Se7uEEm0d>pK-7wsPHJDC*ja<69g6TKk*sxZ6@$K{scFK8^BRqANCMqV1o2s z=pyuU7l2ds30M?KkSN~EyTRADfSU*F+(`Z&JZnkNBQgmz%SiA8oB+@05b+=JCU`;i zpyRLtNYQtM=L<9r;wkZicuRzcF=RFwPoC7WZp7>2P2uYQ1+Fsza?{h%UBI>44K9g& z;E~x5PLluaK`W5P&;hjrt|9?stI42o{*4_(v%#@+9NZ~Cg+4+HASiui3C;~}rDV8f z82B8!^Yi)d@O(I62QC7~>;{!J>;bSz_QCh_9Na^%P&e`ss3B3{2J8v0hPh%l=vnL_HiUa< zE!=$v;OVQ6B*N7k1&+ft$Rp@A91i@Eec)H8F&;Psr{T{B@Iie;cLP6Z4?H=nC_ooo zG*F5f3LfC-JmNnC;iw9D7J3N3gjmrF3G-^=KzK7a1umnf&;}XF=X1&Y74WCDgQu~F zFdAH2ZTV>6jQRML(4=_}^79XXLNF0s2%b_Uu$pdSX86YELoeM=aN8UK@8$#`9Wn4p zH^7rNP|Oh9A_Q_sTm|<`RcL#=2cNq#&SOTb6?l?50jK6N@=W|s_`wf?#yf#42Tyh+ zH<>O7NuI!7%9N{EaOHXImOjCV1$O z8wGx&ePSE97Egd+bRQ@=pTK2^!*jV1RlsTF94^AQ{tN2`twrd0Kd!Z!p44gr3> zx8faeGS%mE_@BaXa4fF@debaqFIog=y)tAT(3s-TT4D*mNO&i%2l~z#$lT|EKSwY8 z0_R>+t}&>i`VT#akHmh$liU-$ELYJSWHG;yZ6kcZ1*#p{81nQQBmgeHo&0vj z%okzji7|)<{0Ab^6e|=CLr=#-WIWvYQ&0(32JE6lOaq@k8hp<-q(9%E-;0dE8O$XH zIW>OK&lZnCTcCu0Dt;l>NS08A@CHyu zEFnhVtFbEhAZVl7B79}Ov1%k9`%i4gy=C|D&ww2UU4GmEp%5{l_rdi$4xLUYDFaa( zDHgJj$B@Jy$5rFwP=dIDr3w$&G`c2DGg?6aZ{9vG#D(~C@JX(S2Y?`z1l>d(v8%v4 z8IHBXtKxsqnLu6Ihjs>M&~kAa`WQ~4WiGuP;6n+H-kKeRc)306SqK;f6WG6k!*C z0DMv-!5cLl?t1~eRy%|x;7INVD}$XK&Nx8vxyId~eSw5f7cK{O(3ae4_7~fgzl@X< z5t5xlw;pZ$D3!SCYp?$Q6 zNbpucjxK>F!(8wSz5{w-W!NwJfG4sOG%#ifBamx&8u^S!LwAVRgpSZobQiK37x@JC z20xIPB3l7`gV$UV+fbB99IB1#1CoA%jA_Orgid;zEae0KBF}gKMs_JUi{iLXp@`t1 z;0Ssi7Y^%cSKw#87H&gVRuxPtzJQZp3t>C7Jzn7a^tlj8FJ?E0*RchdAXuRPhsH_S zBv}qsLCk~eQ-kUUtum)64!rpbKrscE?B_B_6%0~Xzn2$ zP7h>evX@vJ*AksC*(r~bQsg7aI8AKWTgymG|FGBk2I`iIM#>ZF3MEI9;xw*=yUtBu zF!))=Y_}kV`xo0^-bDLawvhYnuIZW+7$HVbpQ$#;*I-+J0o@UafWBxKa+=xdZQ`h6 zZ{qG5Ok%o)2K)c@weVGPx3~Y}MCrwth8QJcY$V+&G?=N67iw!mhfY7uYq?t&6|1Ou zvF5Ub#}-l(DLqLP5~s*C)Itw&C%Ha(rn|e8%L*e4Pn4ba9>r^f^{@0UJWdi9T;to# zcaWuMFNR;C0|!b z(C}WjRq|S#$WeS6={D;Uk0o)jgG}#q?ajxdC&UA!IkvXtv$P={<@8sKvAJx~j-RfU z?y~Y>xgFBa=g8fHv~$e^?)`#7Q(;{jX)#@vFC`jFhnu#>cTc>Sur1=9 zF-9-dUDMRiT+w_{rI33<>E-qFc4i70@wq+9_XK8h6~PV;P3eSkHZVvvx>{oMn$5da z%hBXw&G6Q;GaANFN$-`MqF>s!=AX&vmFX)s`Zfo*rt5i>oPu>W>iyXy6^6ug3Nu!&-Y6O@Z%~hmXNbsrxKCUi`2(SFTEBa&? ze%tb)_4iGAeO&i_qNAkfcTQ$bV#z@NQRt~?2|v_g~JmrCB`#}!cy);HPfs#sUhKVx~y?Vk%X6Krng5_(OD3G{UYY$N?B zS;f%Eaua&CeoMa-h2Yezfow)!VJRpF4RnJ=4PlkUODwW}+Lxwzng_^{UyKZ&I8IowK(pJy9?>??BOe`%Z5| z?^)Zg{Ck<_^WM5TB0VJs$Pcejr2HD z5E|{*dk)xcK?pQUmTw%Oxr0;<1bnsGx034NFRDCDAap;1=S$Ru*NVTCPjhc&)**HI zBzIXse0Jl)quxKncg-KoD$*R>ZNKc$(sHVWx{)dmPYRLl=I%~`zqrxpKGIEH$L2F- z{yXe(*@3WLkxe3pS<}KMY8p}-uqToU>cNTwyv3!qr8-W)(_0X%M|IS{*L|Sm+{n-% zuBWtaggMDlDKGoFlI7Rvw$6vn zmZ2;7FjZIWSLrhPeZ`N`S?;HNBsrc;N9*z{LV-X7UZQwz*%rMqa-+p$nx;Xe>EwCk zeuGc7irrAQyP#cZ1E<_ygKr?Mty!#;5Ha8mcOnhtpRJ#2=<4*2Z6mqwDk@%8+SAGT zBj~+>2hNA3p9?L8ZHt{YI#f)qldr^9_|G_{J~!I|>nZ6YFQsO)dWWLaV80zaBL093 zWM5%OXq+qG@r&)LpB>vO`inM6mL|Wcyd$5cXr#;3jUkd<*5Yv$QvbeCF})bgQAXqe4=!774#Vh+2*G!kp{?11F$6~t`1@a$QDx;u(2)WYry4Qxcs(oQ2mRc``Mbe(9=m@Tx!yb= z=Crj$e?l9jpQ;=Ws1jng5sD$ea%TX2wd!hEcE+Bw3P7>G~LLe~m()XgsM2Ct^dR zJn`OL{=Wl*1Fm2tW+*#`FTmWgN6HV<0Ja?*z6Z3k^pjO4q7%K>73LV|u7Fp|Gq!1t zZ~hW`n7^kZ-?4~ZL9J5;CCPkGzXK>4^Q432{Um?ke}sYb2HzZCPi7wwwKpU8nc0Ct zU)$h#(WGo^{$za^)?e2`Yti4($<%KYpHvEsL!zXQ+ltFeTmu7FLwA_}V1iQu4u(^T zI(kNZo;rn{rT$Y@);5wa;kP;M#W#v8S4^}QmY*oivpowKg2{+hF&<;KWe{m>`ADl^Ya=*jctVzm-f>y;e7sPedOE zgYJo5E_9W(g{XjnZi-lmT}TzWtM8`&8h4dgAeoNUWf}$YLN|E=axagN8|Zl?latel zG{-+9w#h%Mx~XnUL)cxEkj~fVhBvXkG_BWd(F{_pm6G@;Bn8btnlL9_=JFq<{mRGM zN*&kSf_GlvDsxJlh@C)Zz*&Eh_!oAa948$l{ejoy4)|XJHKS3eFWr~v&eenru)wVh z&U79xcR4CC6)=0M%>N3V4y|DR0CiA_*C2~XoGJ$2@?lAZ{I>d@R?ws>!)5a&KV^g_ zQ=h00Q+HJqE1b&t@&vLU`UIw!llc$9XP)=2sUBHi68)TM#`x*CtV;+XDOvtPJq$-C;S6yR65>B4jipZ{t0d9V{!)>O1=XtrDE5g!3|#}$gHz~B z35)Fy2WEfSM>QC*K^p^FJ7s#qfZ@_BxNNmJsvrkz$q>?k~B|+X7SDfg&?JgomC4iJ&zwDX<_khrP*v z6XuIwkRog{F$%83IJg46aRYgUx=-a3`+y>N9P*j1Q4F~*RD(&M3erU^B&ucTXKV%W z05TH~u;bWw{4g1yT1#Y-T0kFIBqd~zWD`NNkjXLGaLFy`dnUvmJjoAWm2_lqk3Z2D z>$L(QFUwozjrZO0ef2H#P4W%#uMDePY`V&12Phe#}zn%Ss`DS4awmm;9)m{S-TDEN$!xCWhpY9 zybziawo5lci|a~aBvvHW+|G>A}chVQ;7yT`QD?*oO3FKXl^9W=a zk3jIl=5@dNRG4mq1MD z&zRo`;npw;|eVglKP z+D28S3dmwe!#^RvkY=hqRf)O|-(@5*9cIGKK_!TXZukmdJ*R>;2(>qm){MlTL2~CR zb`lb0t3Uzz3>?_eu&;E4H2!IJ9ovI-FvZLub}QSQjbviz$ssYw2KR=3)BV{F+Q}GU(+$qT2)`5RQJ36qsFQX)W1oBmV zASE3QiN!=9O&lkGQkx`4B?IA&W0~ZzbTp7^nn}J8Z7?ekmcIj4^eB6Q*+M@HO$$v3 z)d@*MXo!GbkM%)aFePw5@F4Im&@Xr>w3}(i&46j|ILPKcM8dG&pg`5Y&*Gbi_T+hT zH#EdoA=8Ln#58;gHVJhgDwrs~h5TnP$h6OZZ2T9v_iw_Kb_eADvfy4*!`ihCdxc+v z`9(`&FJ2Sq5)HA@kZ~1-PC!L{OFs{F4;_R?@~5Hu!6$*SfmMNTff1mo&!l@Wmzj5r zhq=Sf;SY<$ARU;1J%*pMA878YA#Fb&9R*p^WLTqX;_vW$93f^B81MkD5c`N4WFnP8 zQIZ1c88oZb2F`LD5+k<}<@hXo0%&WMh#>wQn9=o!FZeVdBiF>*K=StubYvI=5$1Gb zfmV4n_$L?>8Wq|dsvEi*{206dxs0Vu2bKh?f}GQ`@ys-uqCKHT^nbL0nav2ySJuk! z5C*_X-4{24#@&IeLd8k8NF0*gQngGaJ0?{?J4$1TpIS$ilTG0ntxpUB>i#(5Z^&Cq z2@CYzFC`LS_1g@;)i3M`W<%G&^zRSo518;Bw5}rHjnyX1DSSfu~>t`&VPh6Sy-T$`a*UU`^~QZzk_3 z_sUA8V}TEHn)(QDYF)_p#AndvOTiUXfRBV0kVe3Q=!IuNzPdj)3^KHdkW}WdWq2*T z5LTD^kjC!<+EznkA>{je@EEs?`J28DB-Oco#6Qz_-#f}X*PHL1?9=;y_%{Vs0S$aV zy`5PI`Ry){f;|AayAfz4+%;W+?ERTMK!%gqgr8_it|EVsUh*Q@m#j|KB-fBA)tt)w z-&ByS0~FSVkkvkqo&>(_U)XKPn&n`7;S+9xu8d@S1?bz?u|k+RC&Dy&4ro|=VG1h3 zq&6DJM`qZI=7ipa)O3G(7hMi6w7&}2?PtB4yCsR*|MXbJtj<3|T4-6g2NUbQ&|~m8xSy!TtnO^kHcq*kmw{L<)q$F%z=q|J+dBEOdSQX z?i*Q8xm3PE)d%r*d0Sb*`5G&02-P-IzuB1 z1uTCnhTzrU2^vUb;rp=!hk-1={POa3RfpKH#IVw*6->E57iSwVF^ zLGPwndIU3^8N^g&HIS-)!s+-5{wmx94M0`h0lMRHP=l9 zf_m5tG`|246;$9Y=m>6yDfny95gx*GoeVp`1bhN<5Bg8`;hDG}Xb=uqT_S-wa|!mG zsmKB`8K(D-(P;P$$D&)s8nCKl3->_*DS>R61BeDv@ie=Up2Fzl zpaX4S3fUK2klh2W2`85ex^;if$TbqOKrI;z9;#O8aeOb94$PR%n3SxHR|9g=a!f~c zCG^Nup#X`)-iqm*i|>NfB37bx1-Y;pp3r4H%UjVbY#id@^0_a-%TGkMfcs$)Xky** zsnD)-5j4OGATHDccgF`H_~=31zll8t&#RiV@+U<#wiUkyokpM0t6~+dC(Y4og@(i& z$l#^$k?c$M0cYiG%ujZzxDQ*6O8IBBf>ClbKOQ-Q*C#Jw1Na~GXqH4%VBKzm>7h%; zislQ;*>Yx#_=40)dlEZ^kIX0at5G|wLRH1)!avMYUyS>K2Vv@B z24XHR^Nz6@9QOm`#TfK&u76;qw;}YocjYbM>$m|s`b@C`x;MrNow&o|ORA58kxUd3 zx-Pv+m_z)Q%#u9CfAGyhmY{@Nf)A0_l1?O?;vQ~1`-NXfTvVbusTw2SvCZkt$WEog zI8`@P@{n)IOyr;8@zOq$b@&zV9>!1`34<8U&gX5!QF%uhg6*Z}`%IA4?TWGFPHYx^ z(mB4Ov8xKZk@!Lm5|;oCyS^XDYV1Jp3VtH(_z6Ky4+xloy_tK00vm|U76s;NuwiH( z%*0c{rLY9M4-FU}$##-vIx0q>5C@c;s{FxIZ>@R&bL!vS>*&;jS;$eLq+iT`svSd4%5*i~0USKSYIZ zAq~_Ce6RR~yUzt-9p2824D|sf_dwuz^yKP>=KJyBYJMBJN~WM9_!_RHrJ#{o1WNLd^?26B*L zY_V{N*d%Q$xhWUB$=a}Chv@o=PJX*0z;bviGoDqz!A|Dl+tp^LYY&7P=8BM zWee>dO;`B^GLw|a52&mvUb>8Eip|Acl3U83%5&r>zMnlGDAJ zwd`fM4ll$@SShthu2HZOC-H{-Am0hvM+?1CvsCGo|ArGkCX1CnRY+Cml#68p@uu8> z;ISahUBo3420zHo^0c%!w2ySoauv9~c^lBf_)6et)&$!4Ck3M=HI5pE+~rRSOVRz{SJ@)eqwo2@_=fp_IHZo2PP6A*U^oAS zxF*Y#ACkq$!W1ji_jMAZ#~3tJ*UeEsP_0(ys9&o(D1J&#;XA|*yp3-N&2&SM=3J*h zvU`>DxhvU|>{;PPJcYhvv_S|%&w?jqEBH!!0GodtfIU8vtr0ypnfZ&EOIHMrde6BL zcY>>?<6qk@TZui}dC*fg*pAP~8pGUUgS54rQGU@h(Z4W^HKrJv==W=vYPaY|nC_b9 z>lAVgz7gm@7YSbCmALUy{NX@9&lJyC|F@tsSS>gNp6~?p2L1#ZWtL+dfSp)J`k%a+ z!YVsLsKjMVi_qlYCx0tXl!K~xRkpOOYFX1#wzxy_)8dn5FP&^~lK2)U$sXia$}Cr^ z2k5pM_8C7K?->`F*s!_g3!qY-R*Fo; zS%PIt6O_2BgL0GnvUHL3seH4#icYC*t*AlXN7@Pn?9E_~d#+7Z_N(M->9z7G+kuKE zW#>yQ<+ogSg11>C&k3jT>GDpxY}4(qp~l9#zqA$lTjuo<<021-S2Hx1k3fflA2S0* zfLf5qU!hwBU-&k81XpKgyrZd07Zi|rvTv#z>TjwxN~7Ya;;OV#C#o&Mki)FOQ(KPNAc zTUCT^yQxO__wX`Pea$`DBS|?`8?ViVT$M_<=NISvl`|oyZO+#0M%i<7$O2!<9$SGk z&yyCMDUOo<(ats}L=;8Lh)9Smi{@iD$6t(l9$jt@*M5*UksDS0)YD}d!fWpnTWZzl))&c@YEXdXK* z0x{Lp9#SvUzS4`@%Ca=Rn=jf~-PWqKSAimTYi`q`>W->RESaoODMu)3s`}{#3l-BS zen3L6xUDh%=(jN|qK+7+QjTC#`-9^41();I=Eh_r=|_JT{l1v~Df>X~_FTw}7E4^f z7NK%g3A%y0wR)HNM@+rSbt>T;gqI<_JEA7|Z`5;u!^_MNRbV9+%tPg4Z z(;w$lDPJ7iij9<EJ!x;!{>z+^>&ly)>&pseMd$0vd-Id*45()86uC7%zw-RJdXY23KInD& zw#H%Rg0LuUZE`Jp&i~nUzN~$2??3lbYW=>Ed7~u4C*jXPFVbl0gYvxoU|7HK#u1Mq z{1M5~JK}aF+>O~|GzOy@J4D@DwNV~P zwiJ0bf*lz8>B=l(G75e-PVJX@pzw-a?f;9l;xAR>EvKS0V){lmjyz($Zyg_n#2$>@ z8W~|+qnskyfsJAj&)|}j%gGSs_?~tmV^Hq8Vs8cMYUn>L zq{t5$SBDRb_&0J*R9@8Ws0UF)BKw-XnhUZhYBHRS*M$&QVey*mf76Df&Cl$YL*x$5 z?NKnk{BLh-CIIOV6EX;GPs&y8%%)0j6T+*kt8%-_tjfdUmPB{4wl`)fcp#!gg}Qlj zD#jK}&zhJqCUaxXt-^NY3mi&M!_X*9uW}idgpD@`%{440%K}UP@X3}`!*`{Inu6?M zPJ4~E8-;jw9#G(osg={SGV$y&IX4RuD!zHX1Wz;3yaf4;-<5sT*N$>0+^O27>bNSg zmAA&(qqjyd=3V-ZipNApG)g?qwDg2ZSLE->X_<37Z+c04#|>|<&?IrH?3uo$Wr%f7 z#9xs^BWp&Uh`b)z%W5#)Q_qoACB5RzP*10<_Xr17|5+@imYVL8J;+pkFN+3eI3r7E7Szf6 zoLiQEr=-M|?KxOdYs4<=3(GlUminZuf~<`da_RmA$FAZ} zId9Wb|3suaGw0^4&TCS1yCTn%A8f^xux>sAIgag;%+p*lSB}~hv#HXCN-v@pM>Mf0 z!zLQ!>Sa_HWFY^T8R)-k-&C@*uy?_@f^o(5Y}4EozFl-SdO|TvKhY$Hm0HZ!hvDtQ zUxag(EaMt=hV&&d8i`?Ucy3k@MaQz=rq%j$J#9{AlbmDuYfD$VQi7GZVL}6#SboP_ zQ;!v+jISa$#!iUum7q&F7dtI#P5{&mx9Ix2yE1(%R@p)=lP(CaK|`>M!an zyeD=H8N7>ai%P~6^)8xOve@SEdoDRh#nj<(>&04Q?JsMDU+ySVkwgn+~{@LCzrJ^`KS0vhuGoNz87%ZC52!+h zedd&KV+0oA3lCY;mg%M9k}IMSw6M!NOmAifXw{tjE&jb@?I46u}$}p ztW~UqO(R=MBjg*D545z&7v3atU{rF{u!tTO&e&BSsoku6Pjx~TunR&V-*4yj^1_mW zVrj{xvVWZG{X@Ya7lzf5&(YywsTQv_BjT?J!g|{>DD0;$S~(C-up5wB>>po_eOC#c z*D{MqFHAq3aVoQY_N=@*CAFQu12ed#NGIa11j6#lVVX(C+2Kzj8%Cdqz8XbEj57B& zF3~$Qx$*6|^Z_P*T-y_Lc-a zTu)+^a)f@jDb^Bh9d2y_gBp9-Z{thtX9Z|A#BF3LyVL){IkkLo(Vje8&W7B&`E3i` z#mmYEIeWwEEl2L)AE@@SWJQ_Mu0Ex^Wn63S6230H#7vl)>aS_*YTfELas$zgUmeQ! z-ElRyy)FGyJhpgZ=|FoIuPs=eYlgLx#i$k9$6C2=uy(onzETajpXo9=m4Vemy7Etf zedTbsvnxw`7PQNa$a$UfD(`aP|8aEI;Z0=U8_#5B;?}gOySvdADDGO^VR2_!92R#L zcXwZWad!%(KwIkWO`A%R#xs+V-}(L?p8bR6SvJkwx%b?2-uLrP)0~DB^a+ml91SKA z4v~&fJajs!->UJVR?+gIDYR*D3*QiIUqTP!HVPIg@>OMS{_f3gn_Qne%bX_4mrEOk zJ8&(M!&Hzjtb@9=6uaX zsB%{wayjUns|;0^D2~Y1NRlMorInIhLIECyaOiNGu6<6MN3%)2v%#w=R{PwPMyz0C z_&a5x)J#Y_NHBQM<_{l0oX}sqMN$(}&Y-h|5jMLkbv|pQB8`~OA)d$t@sXx^4 zw6Q~Ls_{0F#EIa|mK;?kx$X2A=Naym<(1;4b0DHRG#Y8FipTO}tYhlO)_VVDon z>`+;E8g6Q9n#XD;wDi<EG(O&oH0*5`6 z+-Xg-oU#0CFo1mk#3uLRcBaIT8CT!=~z%T_u)aNg~rc3H1#bV+s!5HH6o zkDbB0(ZhT<5i1Il?2v^xO;YAM zc{xQp&3Ag_RHIzz@|VkFr_<6QqB=f_jX)9^XKE5@0GHlD`%UoVHN(V9Mfy`i>0(;X zEP)Aw9Q_X;E_g0T;^)Eqr2_4M^rM&B6HWbe1uZGf*ENBfxlKKqc4^ct-L-S{W&_7E z8r=6oIh(LU0!F-F)~s0Wd{$*uJ$G?+ZgR?0M9U9LXG(m;2Ekn3bgThX{WQ3-6WB7S z-UhHsVZ!Uq1kfE|lKP1HM)jcMSU2eNx#Gn}1Y{(WQ@J8hj!QR) zxB?oVfk9F;q%!6qo=7a@ge!qbyp>x9tmZ&u9ON1PguZ+-n*^!QIpBPC0t&1vP=Uqt zHR$TblNW5CEzL0LdT4m0@2MN5MYO0++3o=j-6NJvdpUKPBg6iJQ|YXDsbs%YA@8o} zt5_iSksXn26M-upk3cP;Yq$t%|7dW$6o9+7h*bdHx*u=^o^xlS|G# zn7oQE=A95c5nY%3lE%pf${1-c=_bhw@m0|R;RnGM!3@C|!Dzv5P?=8>-WROpJMn&D z^UznobpOqYA^ma#u*zStG`)a21ikGva*utN^^EzEF}MAHsX9xvU9{tL*V`wUysVe( z#pFcFLEC@|y9qhK*~FHEgK7}-H}HkevVVa(X#sj2*!t7()i};u&b!L<=2!Fk3X(wo zk}4b}Y7s?dqyN&Yon6!b`IxZ{yP@SyC5iG>d&$Bu$IOE>7`*23z= zZ67T+%?dNu9AfTmjyHEUSD4>h9$NneLPHbrkE5K%5INR?cZa`8ut|_ANE37wwD4|X zS={5ubx5xE<1B~%>T7HvZ<3%NsE5CZog{Z9>m(84ESQnTz?5_qZ!WF@Pv~-thh0W1 zVD4YUzNR05PC{khYP}4rAlRI5Lcsmn$+E-x(UxvMKr|6Nay1zaGe0BDS6(>u!08@H z4TPMBD)24N12-oIZlN)VjoXUu!e-*Pc~OFkpvXuPy%WwCdH~1pDLjQQ+$coLrUE&= z73Lg)z??nInmCo9sE7pS#2Ii-#emkV1$g0~=>+CF>w;_p#XvQ1fS8nizR~k!5frCfAKu|-ItH+cq{nd_^tdR{xQCtXMuTeI6f05uC9=Av63$nOoPwH@}>Mn(BbvOQcx8n zlNO*N>;xzQUP6k%b5sI}(Y28I{tbw?Z>Vk#9?a;@*-_gmOLxl>P>Ia3?6K^!_Oc%) z5=n1J1XxJd(GpNE6)|%;y4^Tb)%U0Y2Rn3?MGn!oFvOgGyGahP9-Akakfp?zE*c@Hsr4C zwbs}k5|xfhNS#dsB7GRrh`ix4XcIn@U&cSrAICSsv%Jd>7N`XE;A(n{E(aAG4}XqM zha8K8km6AQGv5kU2rS)r$kgZs{l^7Z8Yp0%;Dhl>NcQf5y~QfwN&BM8I=F^<0)e(a%pW^Z3Lx2^q1V#y=vrnB{8uti+PHEDqJLu*_!MB3%*F0u?;!2> zJxm*4fcH-dj!-oy4!Xil)C>6G8-ZPFyaIOavB*N?5i%U+k&n?n*fB_o7!C~X zsodwF&PO;Q>?HWEo)SNaQeqo%k?7zEqSrA0usb*b@LjIt zt_Htl0Qwnx-!KnE@;G^%`QV`b!9HaFgL9#n?ax6FE9CHsK+!Z6lEZI+Kdlm7iErXb z_}BUIf`;TM4!rnDw}JqFHOt~+-#eEuUG4yTZL4Xp0~I+*SaSspWCF1*Td$f2Zu z!TQUmXF;C(NzQn9yG1}0-UAGV3Dhvh3Nn`X)BedmlGs9QC*~5-M7zDn?y&y{PXKO+ zR0BN<{N%;RFwg~E$3_8lqK$Wox0FZW0$ha!qI0;{Ak}#k>jlLA^_*LrrJOkUBo{yl za0i&`6`X6x4eok$A(n-YfIj|IAteyPs=bd7gcOnjpo`Z)&d@jqOOW1U8s+zd+YV3-fzMXRtmu)kly10Y*u2X6_U1p0$2q!(vA^O3skh=XUK zB?HLGgcG5H8T}*juwxWVCPmXhMm9+rA>~f zK)DDa!^uhDL;phNkZ}&m!GT@%5|BvZIK$us_kqlci`XT6Hg7#|0lb&#;3d?8#wHw% z<(eQl<2QJOkAjbU03haTPOy33VO@| zdXe4edF~ddDZaDk!0k&hkAS!JjlK>}_5dJY6|omNQQ#1t3f{fT=oj=Y`o9c@Z=kzz z2fB44TS+I;1liA_21UUhsuxYsr(s@y3OxSL;AuYK zoLUqDmy4t8rfb`Z1+2AIu1gv28oGZ|{F1HcoAW2`g^zq3Gc?sNFbJ;9Z`78F&J zVNQPp5|dKUnV{VJhcg^9w!hGaXlMEiHHexCs+7(^A@BgQ$Tc|GceDE-Gu)lK8{Yd% zkXvw@yAmAknc(>!hFkz8LkDC4#~bRXhoIaL)BCBOR2OiTFM$;KQ&cQum862}`4gxu zCvkcsTBv=Np})~mbQfB}Rlshwlk*ic2nQe^Xbdxv@dF0JRJxoh0zJr0&`%_TPk1H* z(|U9|b_1)&O0oGsQSJoER`0-Nyaqn~7I0|iLiN@i{OpIoFZ_dgOP!^%zJo~w zQWj9*bOfJvCKJs$j6CD|fe#Z$vmuAX8=RRxp!T^2T=|W#W;*|W8WKrw1}>9OaX(%*Zz_K)e+IviN8oPwY&d&_pvw3YSlF{TsgQT>0o2(MkSLK0?(R9D z`YD7|i8!VacC!c6PWV)R(#x1j>@pxxOaX<{3(!!!hJ=xq3IA4S1Lf*=jbQm2n2c34W0c18=(wXbN{Aji3lf zFOwi^=CvaWbV<{wdqA?tqqi~pAy+J2~ zcH#%8KfFakHj#M*esLDmkH^^Ga1WcrxeV&m3y?^1igOP<)m%``&f-jf9zj8EnuD}}`r{@j z-NbPA2Xn)bR`|^8neXrh{-&OQj%NWCLj49ksl@RObU(?CIEo9o4$tZJ;23RSA95~% zU%v@Xp&*#*-2r{fB*^?o;P&Ts;N~G>@QOWygqN<6R4|1-1mF2#P@K7OvSHHv3t0}& zu@5L?W`Hh*hnip)VY&IB@!AIG^;#$PCEb8N;MP2G1qX>X$N4K=GB#u4f0bW~g%>!`uHC zvjuiRGt`fBAjiEsoVDAKOvC^>%LVZLyo7Iv0)KCh|4(JP3v1R6XT1zK7dwD@zZkND zO5n3%kQlR(3ug#d3~usfxWf!Yp1|Fq8)R%4Aot}GYuav}crO^HGE+nadv0WG0KrWO;}jW~eiPmr@DYJa4!&!5f;4m2)Q{!y!ee5mZonA*JOdxXY!;N66Wk z58u`}PBqj%XOQ)vODc!#pKYKdT+5xu?F`TBG#3R9#Z5Rf{{f$N1#^mi0#5yGsy}Fx zIH34af)n5#`H19Dd2|3&Aip@@LG$edljf1&!H9%vWE7;*nBniv;I?z`aeE+o=r!cA zF7PJag_EWORP43PTObZN12?22tomh~L*M|HLo(NH(0JMLdAtDLEzmwJz}vuqFb$mw zRHt#kh4>76&sT6+-==Ml*3kreKbM{fN|#Td;9JAqft4*|JYZ+%f*S58lg%ig%8Q4J z#s@jW4hHvlBG7Q^U@||G?a3};N+DN+3#+6GcJkqDMi1m@!8< zuesZ?6xc%-V#iRR^a8c!ICm~6PXm#m92K1OdpN-AU=Zdb@GnL~)pj1TYK)*<%cm-- zRrEnd!~6pY4Sg9qy`7Q6PxK1vu2b|`*m>VmYHA!^$ZUguJ00{f8OT?tXy*VeM+Uhx zUeJTs&zZsX#u3=bk3+V^dvqmtEu<+$!K%7|q(H({2CT7SW)*XsZ2_g=9jHoYLv{2F z6ryS1G4BgH(#fDOA41=U|MNgZ#?`^THw4~6jNM67j(*_4Uqh!sy}?6bIAx3gs*u@q zXNn@z$W|aISx6|@83vxr09YTXoOzHhI1qXxS@4YdLQc>qU`e>Z+uF*V40$&w*9#H@ zqnUErgGIT)xI5p#FXsQiUvsy>ni&FX=m5J4GV5l7zaxq}54EBVpi3vHPEh}qlh;T; z$0I776>}eB*Fa5?&O3ruFpJ45TdHla?T&2*@s&Ew*@~w9PnD0)=6-~H$`|ZjY#3Ch z5BW>b^|a8QYF$bID+D{j8;S4dwy`r9AE5Qzf?8!ZZC$1e6XwpTbq_CQ(eJWIwyuiF+ImKh#d+8pnY zqu6(@2eXrWN(>>Fk)w$Y_OAbPR8Yf_3;Y?7FFOrg4&?tD`#+9!_BD1(z!i~#Z2nb2 zq39f>84ST1m=%s$j;ZVs-booSTUBqAyQL@ik=#G&T>CEbZ^JahO+!{YqZ_2d^j-B; z+9}#I?RnPjv;f^LT&{>x?QnhXy3_5hYk=Z4%2vxn?#>t&6%W?E+wIrL#e0pZoa{4xk?qKYa#O|ATrc^I^_DvS!7FgY+O85Hd>408esH=b9)r%JlHr^f z%mhFx%|J-`T8TiH#I{yHscC%8&9au_)ny-QXE(3ay6cj34fK~Qqc6kxDPs?cwxlb^Yt@~5AA<(T(NA zTwC4Tq3NO~TQ|%y-CEf`w)sv2S--F`qqU#8KYfGS1#<@u^$sQOw!^hh`9|(4>mvCg z9L;YPM0pq4EcCD`UhP>uzOmiDORDk=^jt5$%2{feY69ZD=z-f{ zUpL=}9udww$#UKR>>s><-&HtF)Ir=ybQ&k=G;670vmxIkw#_4@Q2jq~RNF_}dfPH= zmB89KWgTvLWW;n!HH+2%Htp42uy&=xxr=c%FIZp~4^bX>YjsO-Zjs)X805Jw``p*L zhd6!X{BG-A6HpIaSv*jFUbKxo*;rN*Q#zq(Bbm$JBWl8SQ#)-Z2@mv=+{b;8yGYp& z@=audbfL54lXRSPzQ~E!fxD4?2i5gs$l$DkWY0H*$-3C;PyC?8&=H`K!SqwxGUFy4 zSNlWPW;kq8f#UI->67Jx)nt80ykMjGU!-E^!LF}dfS8H?V0ApD^KmbKZ=1^?{!lBU zIp6fgbOw7NyD!mlG7Y!X1$A@VvS@+igiOvGOdd1l8Zzv9{Ij#hbBTMF{4AeA+psx; z-V&i4mxqb(V=oy`dJac~*Yo;dgP57NtH$TR>y1Tr3arBa(ARc}zP4pSTXXwS%TRl| zHKsjCbG+f2CebjfU` z7iV8(frvyRX%nNu9ip$2pMqwVu@LPm%mbN!`74B}$TCxb2GLwEu~Y_=1e2W*l3py@BOnQX_d%X3}RTq)UIfl5rMTm$w5ckz4fQY{Dr2igSDM%75S zW0JAd`F5F3WQugWf!PmYAJ(qW@eC@9&2|!8kwYMJE{ZwFxq?#oZ-H7eP7y2D3lg}K zfxaY=)XTq1TX`dxSX+)c!(PW($@_sWB!=non`da<%sXrs%-yxK8yoA-HI}v|n*yx& zOeH#87ib; zJh1j&L*iL?+J|lD$rO{7M%V)eaZX^vB?ZoV-4oqT$dp(R=LDW3U7&a`PJq)WmgYf% zHw6{-OZqn1&;F;i*!;`9#?}G)Kzzp@o1f*Ld4hE(Ie;?|`@tPRrPvk`(a3843PC46 z#%}}i?-!`ko**aDT;_^RY7T()lVeJ>MA-xEmBv4{3v}l!gD4R*ANtw8EKUyx|E?K* zAS@D+{9N$3380&HRRn3_uKx0)*m_Ps+$tU9^1;PZ9?Lreg#T~6?~?hFUOXrEFEWal zNO}R?@iM!S`p@PAtm)ad-qb9P4T0oON1FAVCB~6}{^aSouN{=_Z(;y=M7qOGdm`09 zOtS^pdJ$XUuHDrU;21;gqlS>>HVJW)PJoouhsa9wGu9C(z4LL0xR2Z<87iF1zkt1D z{g{E+Xz6FUw=jg02kCXEkSY9iKzY4@qSRbRCU=s!LMjm5#)6QaPzB#ZwsSFHb9s>C z?dO43I>(U?uAe`tu8`l@4$hi(<|9;!70@}Y1P^vU)CUc)n!1G+g2@c!h=Giv^7$jp)6g1pAxwivb=b z{C+BrV&BjtTg{b$hwmJhfvLh_I+BT@-5ife7wDcR0UP@`vB$9vZs|UB1nmY{=8qi& z?ZVj%sno4>fAHKyflo;W6N?_uRggP)P>nsd3y1^u{r1PiByt;Jw%;HZ(qEWfFo~Om z-RHUSy#>R?w`CjT{bW0(U8TFkR|KB?a{g_>MgD&LHl&~RVEpJRIPw3+x^lVznJSI^ zN!=fA~l2JQ#$C~9fp40MxZ~P1DckD^mP!_8P)-;zJ1Il>H_@v zE9wqsG5l;rjzff*SPuNHY`Bw{s8%AG=mvk510EKd?eM?84QCwayY~PQbqfY=CSiB+ zCSfbzOCS*p>sdmAL)bBF`v3CB~&3cN*6fPR}3yPCsA263(d+j9fwZ=`^0gbCOdw#~7e z?B-BWPaG3SS7NdKu051I2u!I|NWC*sIQ^B;AnEvZfrmhkokm$+yrf0?O1we1UYH?t z5yawt;C1YV#^Xu+p1cCiDYC-uOWgz4@Ofqc6-yo>wm9Oz`xx$6MKq8}aO*uv-)4L` zE$mSSg$lnAIvIS(iu8nz{#vR6I%sNg3NeznZl7XP+9x>vrJp;T?UQU;Vlkx!n&viY zBQWn|EQ8RHXFUOHN5e2Z9tqX%H3=!I5Zo4w5g(Pc$c~EF^FQFlko>&~J%{WAy5cva z0D4NE$PKEWL&4;OlKLQb7RQB|24u8+NGmq87h!^dL3d_6+(IK60=k^xRA(TBmH?@9 zEZnwTsTtsn9B999n*nV6lSIq^Qs_V}*~PZrGSS*#-$!PUt>hwbC|{r=pex4VctTdw zLU?mK@p(dbVVK|`e-wWS|D1ppT?R}lM|4HtElA|=#l5ioXm3;p-p6|A^<}fiXf3l4 zk}WGZUE%HA&)nngh6HmZl1$&DHn9$*9a+g4%64PAPzNC0{v3FHYRG?y3-$^27W)_C z05Qvc+P2p=&i33IZp#Bb;U7#rmFB2(=z(Co58T@;AivzgZs*R%&R|w>hdu)LO*0MG8ZO<-Fl|DQMI50w2*zVG1t;`r}9OWpGAe*a4tX z&W0{|1Y|o#z+`TZ1Euv2U!n)-BIeur!aqv8zdhEr&HAskqbCsx{STUB-; z`3W59m5x5tSMs2Jo83Tegd6=0`WC!r{owRCz{!BFSqk%nCfN1pdVC_j2qw29Py(IH z+bgUT%@wYIPHnkBB>bB{nD>kKlb^-c@b=9VvC8ew>^n~Kl9cJTZC631M(2(6gw6?WpANZ zbd0e=r}hn`YOaAdr3dxX9%%ElpCw|*9Ab<8h;^OijdeC*CJRUf`Q2``=2_Bgb4Yi} zLhiGVwe7WMJN|&qvW(sA_(Y7R_91KVr}!##FVJ|o+-`Uyuw2~44ZLgEQJe!lstJMv zqJPC7gf@HtnuK1!(}Xj{XMjGl1M%lB!v_oA3e-F+x&`RTx45bJVqP2$Bm%YO-nA#1gtqZ^SL-O#0OLQ#N)uz9LT)5eAob&h#e*0@ zEn-~Q6?A9vnZ2_km;KF^pci3hR6_SA5uYJ?BewCIxeuXo-Gu)uTqSjq`HSi>Pn6G_ zCHg2CBIzdN<9~4du^{0elAWR&d^(E4%!l*U<8xKWY+xh)!njy1B=g>JBv5s% z2?(Iz6t<6mypSeqjw6S?&8)VsH;y#yGPl~}sbp%sO=VcFn`;PjQ>aAZy*bMeYLr^9lfN87;w^Y_K3dLNH`~S%QB*YaenX-99S%EH zCKb$83olDG!VcV?u;UisD`i2-80kQCKYa^9B>kMFN`J8l>xF*B7mJR{rpvd8Pvhs1 z>qs$X5{we>6_%o7n6>n8jtG44E~t$2pW}ypkweXWB{1;+KpMbzB(hItM!~!!i_>6_ zFw&4^a>xFFs&Wjsy6OX4J~sE!eYG$^BfD&MZaQ!ud(6&9$KfX#zSU-GA#S1~$uap7$vaHVeB%TQ ztxkWr{;5(brV8VDD}^Bn1bPeCBmn=y#aId6TR1?Z=FQ;7A=PLq|F>X};3yEn?f`*d z06Ge2oGTFr^yqzA3!L}hB(>kRqPC0n7sMUnmMzYxZJE%N+4|7ZOfjUo{gS%8W~_Qt zdouhqR@)-&+-9HlNU9%xo<9RS%*e^(j(@P;@-pZ7ig}{1{E@;?={u)Qs&y(4r*4v~ zqLJbS;#s0!!Wdy5pW-2cBH;z$Q2sJ}81JNDrtmx8fGyy9KsRweo+nttAI|A*e`A?L zUPMNMKJynbTOX}SY943Sac2vb(APC+>6?<+Mu&AU=Pos0msNGAv~RTou}OAd_|Ef9xtfp_ZPkq`AKr5Bjx=RCdE&Mm;A28 zSyCh&FZYq}kxIn3`Ei&G`pZt(8dS-4C;x-|jVbJ92Vxmy$Tm_o8FLkx&OA4^HN3C3 zHYHi>=t^R~KCr2$I=lI@(Zz9>SwYF|m(7PweJv&;ij#_!pq=4+uVb!K10WGbjMfNq zMSi@Q><1#&-b7y!?RGom{oBn`mdWc2L`*-ADBp|T$*#Yoc|0DTE*z>bI~|kq1;cR+ zT}DHyt8p06nc(ivdx35yX0<-9y;EPHzYk=me&!X;>Gk8)xoszGZ`p-xm|bhwZMbcY zvJ0p_>}ZyC93)yu9{tXdY5zrRVrOClup^uY)HqNQy+Eu|spkeCscVSn0lJa5MV6^L z?2+ox;LH()00(V5zf874@m%UJ%E0$9)2%nnar84$jdP}Qs^F>pTl3NS9<7V0wZaR0 z4w58bQh4th`Q17){)E^r!YNuGAGkwtuoE$rC@NaiDp0GS5<%lQ#67^^P zh5N=W@)5d79^BhyFrCI-_9^V~K^Yrg)rDbP0A(H|(`R~W zL0Qo==sF_5bzc3whC#Z~)Jg$QtmFJ{$D8U~yD{!A2?1?>RnB@|2(lRKAoKUQ>ng$(gUN+#8GY8VjxI&Uqpqx{eWdBGIfZT&43f(Qx#sOP z!s3jI<86Z+(4R1lt=V62yF}Bnll2zPMi*N%wd>l|^lJH6uWnxL^3y=@xT?$u?j9Ex zjpQb zH}dK~Ru|RUnijMIbFgVw+kN{+{!$g|JKt}V>um94;R~1HVWX1gCch88qznh;heL7M z+r_6ucGGTA@2{$_8KRDE@@|^al&N{$`n$b@H5+m%UN{cn-&90Md@Lt&o>#tTKeCf6 z6Kzl&k|*GlX>6UYXidTJVy7}?*^!byrQ0gc)ca{VYV_)^kkUBaF;SN0=M%cl|AK0% zVvVaRSe_tGDUV+g^4>GUb%*B-KZVaYS$}(=I={Sg#f@6LDNDnu@6|r8{h|pnhcd%a ze|(VOkzDJU6lRRw88biVqw9RxHOXMvMVXUG&)m{uwTFw`bFXHf%qq>={##yjs$yu3 zs)nx0t@+$E)Z!4t`aX!-5sCQi1OMeT|5cIMu}`B!L28cz)oS-j?{%I6NsMJ>&EldN z#e%A(>fx==wPV`)w3M}uGG#H}MU>0mK1j&%D07@Lz9iBmu)yQE%N?gW*<(>Cwu5MG z=~}Tfx8%q0Z%OIQSLD~#g8Yibwb?cMD!G+R-NE*r=zG<*z?ETm=(nJ};H}|4QCOr; zNU?8>XC(AjGu>_~TG+FiWBH3R6MqiRPpK%WH)`^l6`Fj_;P&bCH8JBB7Wg$vmVBg( zu-moHRN}&@*pNejdAhE8ATe>Cnz&6Z70rcney9Dc%NUX2_2WfOTcN$|d6l5v-aOoV z39-lny{87g3d|2!7{m+ni5L+6I5atw4EZC->=mMThFA?<>dq9t&x!cml$TZ*U6NSw zU-iVgX^jEeMC&*$ZZIbDm7RrIWMs_0zdTJ53Ur57?h@S2_W>gY#>o=zCrIluFPB=6W!5zj*PA&H>@ zL1MSfh)H8A)c#oeHSvowed71=S<7?#6?sc!_b z4e8n0WrZtCv5HqUuFb2gkpkSy7LlEhnKZJaFcs@OE%AFqO7N9HGB7S^NWcq^j>1Ai z@AAKYjs72_pO^1e7QQoieV4XW zkl#deLP~V?~lcuYd1IRq4&N@zsvNB8YC9^ zh9q3-?%d~IkIkJr$1e%n<(KIpR|Uya`J0)=R+BciEt=HU+iP4I%4PjZ~bHX|XC|$m=)0<0+cVqc58n(NgZeXsME9^ZO8x;La==|D$Z^k3xuLB0W(Q}-;C z{+F666nq=>cE)Su-ONvhjI+N#mh7r>Z3x!xB((yK%W^+$RCMQiJs)%r?DQ*XRm#s! zpE`a|Y>jC0x+xfM4sD=H0)Lle-2ePn`to1n3hm|cx?38DHWDb4MQE1$_n5I=e)n|m zo!m>`B|ITKxVuNbLM{MRfOUoOi`J+9W?s+FEid*xJ@xF^tBIe6|5{kQx~5Un%lw4v zq^R_`80ZmGlbqGDOOh?&uOv~YD=CX&^8-oOpF#=g*^*zmp>S9B%kL=}Lw}tvyj4Xs zyl=MZewyQ`FahGZHKx3aOP@>qy7zPHwV>0w=rX_aE>}gMtFwJD#A_$3@e<;D{=4HZ zPrtCes`}{uYe#YK>huP#)9G`!Vc!TzTT{gf$7#$+J5z>g1X*F!Y;8FVQbs zzeb$pX3zo zndA(XOPw;4%`Yn~+1Ae=UyXb^?CHYTo~}ZPyf!lqYAxF3CCM+s`r$R&z<^9 z{iEpDq}+dtzLtHc@Tl5V`&e_@+=O*;?-KPswO`+5{o8x_cW#fN0#e=F<@rJ@eiAwD z@P~xb%O#_Jcz(G3;_TD4FD`xLXI(Ds-1x=R84*i5DQi@7J*WGR3>_ccD``x3w%^u% zfn8%`$NBb_Y_@lATvO_r-y@fsx2)iPaaGy4ireK&%f6S_)&;fqLb^JS2>y|{q+3d# zExi|aNsBKH(z(5nZGhgy0^SX-n}gEtt!pbdm|6XC{p*J>?!897?8-h=zOH$^%?15a zJj-dk+g6{{zyl$xBe!(Wc5~_bt@nj44RQba&JvUwuGG9NT%Ysh*RU*UuCeGzMPkji z+EsNi>Zz@RtWS9(J$pr0cDmDZbl-?Re|KG(APOn;IIS2Vpf~|kXX3e8*7mDrRMGtG z9~q}U&3vEsXGb4!@d9Mf4R?w&YL2O zJpH9Bk*PLX|GBND^?UQs2DajU;hC(X>F?hD@#@{%1>b(>_pbkDw)5|KybHM$^&)0W zj4+on9pMj~)^5+j%SZm~K*e%rDND{U77g7iP^Yd)s(kH^nf*kZDM; zPGo>6$gG# zdOz~b?GI-&$wGbY+Sa><1hdAJZ}PHlL^r#rLw}}VeO!hNN(&e`qU+u00K70kQ@a=&^{n`W8 zMf@A@+rcM(VZ>O!d_^`bYF=7YoKgMG_qF*pd8@g@xIIBcf_wnJ2 zQmT6;40|@{bXJA%DNd9VO6@?qB>#i_NvcivkE+am8RE!V!wR;vAl?G zdP*lc=LI#y{7SSZX2leRWCeT*c^*3=Y zMWxde@N zG|k&8Eu_L*Bg^O?3qAzBE`7^E^nHH`W@}R9YTe^sHcOVS4e-@~|pZ<)G5w!hU%ja-Za-l#H)GVVb}r-N!^s zOFow>?vxfUj1)y~js2D|C!UFP4f645lQ~!9^j}C-s9-i>g!DhLnGMX6x`LnHG7P+IhnFGK2Ktby9Et=Jm7h^ zUGYOgjgb9b;T`E!>9$tU7jv^tZ(dV%K3|b}=(bDn?iu+pgOG2>&;#qNvQ9q~CJ*Ue1|IY_3#4IN9a z{Z9DBes9l6`uRQYX8Gm1E=^?XFQb*n28I|dxTh3&pA6)L7sUFd{F6GZGrz;H2!DUd z^%zj1V);AqBIaLX$HutI$3^nrrp$tjj2~&aMCoDmPjheHBUe0dOxT==eUa9Px1kFH zDc_skh^LEdjniw{ez6AINGxc5S$RD-Afxg9=+}S0`TcoZ_T=(bO`dfOY7~!9yj6Zs zZS=_SzT|g0@K|U-^s2_2aCLhC=T&fv4hF6X0Nu@b;8o+zauh- zq@VbllHMWnNAB&)^{qRIMLd-g-(!d`7O*++eDKAvvWPE{>!P|uyF_=6ED22t6nf># z#~?|jAUT;U&rq3u(U%;yfA8D zNPj=ZEkia2pUI>-a)C&fY?bIcsr#3ybC`_hU;g=Q`?Bu)(cC4~bJ{DB{!TWZ&tbyY z>bS62Y4pQLX+&8_PJrC+h_BqQ#K*&MbRGiUUhKmHCwYS3a#q+9+!I z(Dqe7-qc_{=Ez396{zo^2-i4O{O4GgXgqAU-+tGBB=3PZzIrK?NdlpZRbQ_ip5uKBGyXgq0NVtoR6JI&nN5|#VSfM21-VQg5J z@aoX1fit|s&RXG6#D_#IYYo}j!E<%lwa_(MC2>MT+c+N9+BT!QvSv=ji<0@po~1J@ zQ|l)-OZ2_Wn~1l}RV+rLQg!xW1B*f@L~tWJgt?bvh(5PS&gdtA;9;(2r5rbj^Vtz#`L;ZaDs+ICCiiIu< zTrWB^Qa3>sXS%hq#jifSI;e6^DPC+Ud0myQPHZz7=Ghie1)OepAMsr0gI-$yAweBO zio#qXeuNGVU_E9zmGKca)85x|+(5NFsXJYvFFgRONnYqyus;8C0l(}}!*CZ)^Hrkd?EPGaX9xgW^?^yAc~%3~!7g*gSk;kNm- zwqMh5?QGLvqMq4+2J($!R{6`*C*Vp*O}HRZ9G({B?(<5OFZsqzBlt$QwsGonm6wa< zg;NX8=l{-|kbkmpb?L_{LQU%fs3rUd@_DYczKbKeCe|j?N!En+=pMmg9@X;CqD=mK zw1%M^uN`ZFO~chaYPM(wXpX3Z>z~(uZdA2=)Gjv#J0@`x`5nby6=&W4^YIMe2dV>D z|4TlU>tFKm!V>(Fqa z(b}RiE+Ka#%Xp;ln)JMKi2F|O9=;=eZ~1)nn&9r@GDTi56rdtUN0YHFM6;rPY}Kr? z%O&;2vf?|%M47Q#qBb{sx4*J~MEUYOw++5igJ(rtic&{bh5Lk_39xub6&v{lko)i( z+&LRKl`xaaBuAJd+e>xJ;9Yaqk1-rDMO#+e{+F{X1x-JKuH{XZ{C4WAy5c(8HO^&` z;+NQoKaN{YU9qMZI_pkrv$fT2Jz9#J;+h`8uLVt$H5*#Sw_VmAZl7(JqNUldgK_DSY(?SHl1Z9duTXlZJB-2AZRqi%@luyqv?N`FMI@WzW@%Evo*Qsubp zQKrl5CCh}TusEi`)@(Xz>}+V!Ry1F2Y_1Qhy-~flW?h}4VO-<9)|00G^iN(F`9ar8 z??2(xnG*FXW`9gx%Xf>e;}M)(GQ*Z@3nzkr-Hw|lwKuoEZ(60Es%}z0 zQ9oDD(U^4$YyoT}ZV~R1xjAEQUp>xxA|CTqX-;Y>CT96N&;is^Q$NV4k~ICN-qA3k zeobw8^^%(E`hPXUv^fTaErp(jDWxM^tv;Uvn?ph({G!WaEYZA(zXI=hC>0O+l}H9X zpBQWYsqd)k-JW7Hm^vG+`W4!|7NliB+b%uVu+{M1G>%xp3FO6#lBIn48aX3ZI&E;8 zs8}O?ERdlRb`E{X@y4bwu4tR7dDGapX+SgC8lg>V^Kbh@-^Y^VaOQ9^0spcvMfz0v zmq)&@ETGl@mH)Ybse#=CetGA)^cG*?##0AuYEz%~)V2c6ON|0{hf*6xOtz=k>MdO@ z;nr@TW7$H45Uvh2dj$U~q(oPQE&PxCRKYNQFMKms!ECXQv~;p&+B#WnhHzaE?M>YT zAoBG!Uoamr78%kZ=_iA81nq*4;_3KL#bHiATzh-<_qpf2!0V^iWS`DH91nq0gWxE4 zEIrrmX2~$!F}%~RY`vlrT3e|ToMp&dP9EKroM2mE&9HX1a?J;f<>p0XKh7*{H*W(! zP;gu@RZzxv7d+$h@SEVQ`-Xf)p0IZ*1u3-~EKf`yjhjpdO-~IM+QUum_DY8@oeC+h z9HbKaD(EUbuC%+paMuAHBGu=W?D)M~&p>;;sB=Lb>?4SSh(B36Twx-{m2YBO6$Y2WC#>j&!I=_Te=`ydCO@j=A+a$%a3>qM%wULpRk0s;bW2i^_5 z>|g8E#dV=mqQr{H=|Q#;rV8E0=56YK8Vc3-o9;AkX)9^pVQ~cxjdQ(hObW4oxxIR(8 zx1DDhL(X7EAosC6$Xf3yv&$zsk99xo-OG2TugTZw7wPBi#Z{SP$-`<3attKC+usuJ2nMt%J*bUr6!$8&A2K2KLMHcM(L|9& zlp#7I*o;j_PC>456z#D4T05Gr7|!bL?V~Lx>}%{M+eD>yN6Gdnx43zEiG2R@75MiK@C&H(-Qww{ zYL>m`-(ZFIIfg4Ob@j)p-j=T@kF2y-FRxz>$x62@D4`>kktvQz4j;#PM=agMtYrHE zO`#q1p}l1bWrt<&W&g?ENPme2@{+i%%rb`wm=HSi0#pC?bZrOyb(7KBWCQLLEl0Jy ze}x+H1gSwfL`KODDhTB&m5bX~w{7lu9(O%MJsMng%AG~`Q7#p1zMws)@lhw&=hi){ zx2StJpKbfIJ;*Ae2ZBSoh3P^kP_IdUP^}f(v+Y;x`^lbcf6SLZP0(NHDH;#Dfg14z zK?nQ|_c~kc@FZGoO^^f9-F(TiiWp1xX5Be!kaxg6PQw=BLSAoP2C%vN@=FC%#KWZ* zW!)9NO0}}l>4jV=og(VUUxel{8;Pmr5=e>~qpfVc(|V)rt+rC9G?KP5I+kMs1)dZ2 z7b&;LS12?<-B5q^pa>c|0JYSj-WYYKWnRL zsM%)eXB}jjWD(iM5ORvke&8s89Ug-%<9P@&g~#C&b(8;aYH_*fcGkVntwJSoj*w3l z9mb;QWXp7YbL%~gvpT=-pE_avt$IfN1Xj@{!z#lV*p2@(OVAnfc$%vro!o_s+cs-}%nD z=k(NlbI$Vg^lKlmKTs;rt>BD;JA*3u&-I??e#hyq&R6VXE0H%Kt3`UNw2f&a)30W{ z%knUm&pTn>Y+J-F$yvf;%>=^~r=>12ZqvM$_%#k}6C@Ou!<`?T>!-cj>Q zTLCd$Tf%V7VYySN%L>;!u7_QlyQR4g^IYM%&m-Rbp<73nIS%8r_k>%v!Fjhc=cc|+ z`kk;LzG(b`_{@Z~q=l*5sjm(*U9mQniz=y1PWG0|T6@@UGh3EhK3iWad!@1ZicTSJ zmpwDRd;0wGKJK0A_0eOwYjLNF294HRuvr7lEAxuxM&zu?S!Ar7J2m%pUL#8}?$bV@ zv;Q^qf_Q-`>VI_s4vn4vx(;^_@hs{6+53ZMaW}2=T!(F1qcYpv!Pq2IOg)qs4>H;k z-#Vdj;`^kHsb4b!v!imh=gsC`WlggBUoFpUBbn@)BVE#eaVp|I)cdbrfxuZojs!ixKLha z;-)8TPrjYjGSfGwfN7QOiCT>+y7Jtx|EQ~LSWI`Bl5Q?Lqq)&CY;hgTe+da|4X6zo!RPD-1Cv!UnfsPn6|C-MEPiGYpRvE$v87>U%EAQeu^d0KfX+C=)ax+rp6wN z+n>-WB|LLco`)=Iw2mKLdwJIOl>;M!w-y>3T(RKup!fc{UNQ7C?y4&*ex@GkKWmfx z*)gu%9Jf-d7j8*2&C1(se4Vu=iDr>dTmEiYU@l@hmV3?E$e5k;C8v zHqlVm`Kjw-_X8dkJR?0yx@%p-oE93+Yn+8z@_WlNrhl1oE@t1!+MJb;6`8d#D?Dp_ zws%f?&Jb*7n5irkqrI7zbV2^6&X>06bdG5*uAXT=4gp?4FM^H*od`VUzrbg!#}b!L z4!<;8)JSV*Q+iIHtWOzo`oi>Y>8&yi*++Al=9V*MnHyQ-tY&Tx{AYc`)F_?2NFJql z3!@|t-B&{ur~Y(XZ|LIg+Q;>R%T8wpr|Sk$w^i~LZreVaMjFp&XCk4i6=UMf8# z!*VkB z+CkB$YO|%@+_QC3ugSx0)vcqs6%kNXin8 zOxF3pjTI{F)j;l3zty(TUpMq{Om|x5e8Bmjvybz8M=u9geK?b_2g&6u9rOO?BxN1W z+?9DCYi!Qd+$*MLmW$N=PL!>(eHyn>o+@u)2b`#ux6yH;23-J}!f^yhr!>!X6&zYQ zZFer^GT3Fg%PyBbE|Z;4JDxP0(4EB+Skx@pXl-IyX>!aQ>^Q7m;FxR4j=VHse)~)iIZM1p<|3xm@Xs#iJ{_H(Mzl@xTp#8Ra<52RI`Wa zK&}r{f7PtCtPk1o#+jyH-0Qd?l+sFu&W`P!V_hz}E_7Y#66zf6bj=}GUq^RPql$Zk zpY)<@LN|(6LV{oxlIU@;Rd~mZ@fqAMDQfDF`#WcMwtv>1jDzVP(m!V8WH!n6HLl7l zVX@c*!6NO@T{l#5baPtgwA#tfX`f@7!)HTP{dR3%jZyp{loif%BmJ0KLkOqe!$NTy zGyWclR$}mXhB1)3`Kmp?R}4OMWJt)pXEj zJ5+YQ>vEMn?&HdR0O!vRJM|s3cf{7}Fy?r>S!-C%nP*WW8f>{_X=DwzR<%vCy|8)E zzhi*yxwWSCGFb1F#lgD8+S^uMu0_wIY+;tvMq7dTjJ+IQIanN0=mlpoR5Lu$DVku( zfW3{imA6^A6XMI{9g`Zy^uL;c zB8+9y%46Fi>s|DvpE=dE%2dtN!IWk?U=FZ+uspGj<$2QNaC#QpR5hH@aAva&k=8O1 zY9uRbplhk`XE^2X-f^2#6X#pbf1Hmxr#kg@Tw=Jd6SQ=j6%H!*w4TR&@mizAbJ zidsf6!6(U5#5&q~n=WPsdMhN@j?2}#X)=|2T4_wa(bLK0vh;v17Xz5^lq_x%JBd+3 zWhP*~QbLrTbckqgdrG8u(-Ofu>8!(f-c7aw_Ksp|H({(;P5L2i)U?sQ(-zj@#&sb~ zCL5)_%5<8M^q4xpteN^seYq(eCU%o!h%~2|mos^>yUkm6RLZIih34WSX%by-{OHlu zM1MzL(=deE`+kPHhP`xran^m*^p);mYaYw9Y$nSub6N9C(>YU=Da1VA{FZwNyDS0L zW7ZDL&iNzXPzum5;JR9qc|#-Uf7nJ!lg?_!X`OWUb=&kkxZ%{r!RWBk@uuT+`gQhq zxNrENkI-$^4yBKiD0nNmR)b}OX%3S%eR5ATgKSuC!Mqsy-fcJUxBRkJm&Ym1)F8T& zRiUHNcg+rM3Ec$UD&1PaatrP3LpADyK-*sfdZnoF4uPpWM}H$cgWRb zQFf#cf?2+)Oj48Sd3cxJDb29)N7TF6;t{Hqo+&4lqsj(4Mdi{5XqHX2rdac=T3aET zmo3Hm87*^^-Iz^yN;og>mOf}I={BKHMIDMed^3zNtfdQ8AzcUUU`-z>RNNx`V-8b@ zQci9Q1GA9ZU`f`2wmjQ3Is-1E-dhkdge}b1d&f<~cH#@6xG<6a6pv(gxi5DJXETR$ zfO$Hzt@@!0#lfdftRc3mw#jm9WsDjwl;t+bB;9@eSVN+r31`>e;j!VB{<3ZnoxIvh zL+Jwchi99wT$f$sQMPB+5!U9+LG!Rpu-R<4Q0}cE24+=UaTj2&|zu`^RZrVbE+7fQ|73v>6PXo znAN3pf!Rf;nLf%Gdhe`L3Mq4B3q4iJ+a6kvS`(Q#!c}P{kNZWcKxacqk*a9QFrR6F z^i7;7M$>nri+VuuQijU+>7#LqeqndzN%YQItg7@KyTr`FZ}bRRN4JtgbgsyvpVUPq zLiwpUs3p{K>PNMXKz|^iCOi6s-ZtNvjT9%&67SGEV?VRd=Bek@By}v4GOlvNr>oRc z>LCr2I!YFvsiCl(E=VqPSv;fkS5)~DeY;$UoOi0#gcrgdu|A#dhS4eNvRGEU&;C%3 zO%J1@e3fGUr>gc+&+~J=@x^VKu2x%V&+qlh`!EagqR^NwEDzYrmGt{KuRdmHYjYY8>3Or5jypfZSkBDA ztiJwK$1h?&mqfpi?cy3bs%+(KLWL}(d4k%732CCX536rnr zKIw%Gi_neJUDp1i=gJ?Z-1!LZ-1wYG_ou4d`wB42rjm60Y{NY@vw4O^=9Z$t)|=aX z2jq@e=>~LGi%?yJ?SdkVXY$u$y4fk3Z`u^yH>NH>H7qf_(d+evKo17;Bgcd>YE$MG z4zW$PhFPj{+jXe9lX-~wh}n^zYLmG$GejQEbH7l#2m;-VZc0@(3pB6bI2X{D#;mPH zr>kbV0=j?NMcOpYNbcYbl~#*=g%{igy<&a9Ow>Q-L+1PDnwAwzRfa z#i^|ADRUlsOIER^xCu$w%lyemZl-ouRw%JbwjwZfY!N5Cm99=@m}j<|ETqZumQk;Mr+>i9>gfrCA2dUqa@RqpnvsOoY6eeUN2!6l+*RtS&DUSiFr7uQJJd)oqeg>plsTchAf52hm0 zM!NSDmX?bulLSakQ8qoTsNzE*7B@xbG5ZelPaIiIb?v3Z~IV??!${LyORe zX?7R6`?8hZl+!c~u}Vhf%H`4H$wueLt7=hhMILAEKb7C~8p@%2<3BY|)uIg#&~9(^ zVK&u5iVGfZP%EuQ2a)p2$)kDhr$#PZ@_km48N zhOh~1x>@MOX|-Thq8{wBkIq-cm2=p?&gwR%;eO}ydI-;iI!xVLB0d)-sV0`&-j^$m zURJSUoKT0}gDcR^NAx9%z=OSJLSbd@O7^uau-&qmY@@NhS6I&-wS;g87PcQ#B_pv$ zTZu=mV{z&sCp(pq=#O;G>XokD)A)OC9YGutZ zdieE}UWh(oYx*fSQWKHEgY~*hb^Ob-B5Ld zVy3U^IQkY%qF3Gn`7xbGgOy42E8L-u1AX?E>T3#XPikdt8>WuFWUgW*eULtyTb8e| zYi%{9)BCFrQ#O0y=MS^ftHifrHvQ1PVrRUV zNb4kBVuExY+LuRnxYcxiJ4f%sbh>Q~lzV|z9>_YS96sc@QifhvrG#TbCFFUd_=l;$ z#TW+mgHDn=#YN(J@wZr9N@Q|wjD26|oaL#klncwJZQbdx-Gj4tw>fiDc@JI3{n(=b zG_DY5qNPvWbKxQ$-dn6sm#UTG7I8M+RXgI>{l(@;<{aL;wOAK_m&T_r5KiF-`wK_t zIJQ~oOsBaJMF3x|;_mohdY%=?JIO4(UI0F? zu`~l~qm?$&RlABPBfH<|gcpOXZd6(*S?HUivXbsROte=g5?}vEU*$ylSFR=^b^__? znTno-9@fBG^~5s&Vh(*DR=!m@Cwyj9b#JV|9jt9#Y{xPFZz_5_l-_Ng!VfI?Q`#k2Z%6&n@W9HwaX+l1b&g>5KVW zo-Oy|9OuYag zeO|+-*JCx66+h*rJOm3dQNGR8@sG?MHEL9TPh-bG4KZ$&}bB>MT|< z3ppPJ(h}90YI!<7-vQ&zMc1||?o8{};Awhdk2|q{&&1ozbGJupGo*I-1~FJVQgwAyu;R4vEnd+{FV{4vaBlSGrOa63}9WSo@N55)j)6>N=$(m|4S~Ky_FR*Gdq59e2?Oxx4T~7=Tw!#n&X@J4#8D=#b~mB-sqSe>;i0%HX_kdTX{&X3EKQ zDA>aaKHD;xz8XZYgQChjy4HDOF=w%ViaG`<3>6Ba$5oKY0ifJR`F%S}s2Y&NEHL#g zB?JGng6QTCvr&!mTe2}$xfN)uA$`URgDey2Boc~`IfJdJ$DW39^RhOPPCw4>E%G)3 zTfCJ%gqH-9&=9>ZO!vMpB>F6JJ(rG*jRZfwyEVO73t*F`sFUg7ryw^oK^HSX8X<}c zdUg+MTn<0ej`;F8{qyTGC%T-_9yHsWCpbr^<{9YG7W69x&vp$S;1ycZk3NyUVB2^# zm&ptbh%*-Q*-t^XJJ7Rb!c5|9UyxE?A%Z@;C9zAD#Vl;e6X7VBe}oV$+)>wapRzSN zT?{!41<^0SKkcHIsa1`^tDR9#BNcl3FZQPQE&cP*!DOW(7NRtF`Fp4{)OG45kobOe zAn&qCZ7rw*b#8QvJd!Vc(V(yprYGM-Hz%o;*n`Sy5jB`Td85EG?Ll18%1z$UQQZmN z(y~7y>+gjndk4dFmcBiGHDq_SZrZb&K^lL}J7$^JmTqB3-=Z6y!ex3Deq$Ho=#v>` z+hQAMYfev-UbfY?EZaT#0F#y5^M0l2MZTQrrc1HM^|?=f4Lyv(3mesPLU&;hJC{ow z6T+U?qO*M?b&Vop4YuJqeC20!Y`teI4hJ}qzMs|bm|?;)I%c1u4?{40Zs+oz|1o*^ zfN&FJc98u^P}1}9XeqU;x|vTAL7?$MTOx`mERqJByF@b5Z#swH&Ef>sbAU5$#7X_+ zCr;64StjdQm;J7%e5PkiQ!r+(+*cV#ztI=$R)#X2SbH8`!GH(*PPfd)XxeoqL;A8m zo22eiBjz7}#2UC^jn5(_;o!9K^dbFC$LF8&RwhEpObr!g@gz`eJM|a*;a(OJ>3Z(;G zGzVj)2dGD}2bV#*A?kBw2N7HoCbIXy*5=3!_}TMxaQ_3I2%u}^32?Cty537i-Q%F= zb?9en;>Px%8hdBulF|g}l9UJ^(o?fT^ODzp8b8f$sU*`~M}x2LA;+DC63l`q0fP6V z-*G9T-5mKM-7SmA-Mb~=k9-~ed8Aqp&wPUpFFjx+?_jN7fjy^+3i?+G z|9Jqt7zutUNGIS>e4Hnkt{Ch5E?ynqRpJh9CgDG1}uHulj3{#9h%5RiU>L)Toi={8z#BZvp#NN1SZb+@c zFME-!q9E7@;NacD9{g!lp*~+K*5ZY_7Syo@yHgXC@PT;Zi+Wz@FFGTk)ARSCg#iqg|=(r&Tpm zG`pnxc*L(nTz&D1`@rx|6nEtc&v%8Wd?)_>51m}6%BAIa+dBHGcEQ@*vPIJiJ4W6> zEYKP(Qx>n>NAwfj@mOz!^~l*4bu`v>Ke90!%UPPg{Z&`dHUB0^!yUP9uidPDru|MI z*okB!OK7^lcr_F%sY8`x@+VtO=AxVE*~M^z@luWGg5Kz^$+15Ua=``O*(j{4R||_so_D`hYxs}FM>{t zhyPCzqQP2KK|3vpz$(*~dMbMT2Xs>%U3x1YCz|Vl%`VGY3ewZRJQ!>*JDe<+C8A7$ z&1;QcsUjQ{d|3NUEO%d!3;NVIlC}{auIDvb`YCos-|yr5YY5BK>qO#bu!SGszq`nTgkGpaN3GRtalwcuhU@a2y0;PF}c&U)aDjkM->MKgb*_rU=_t5;Jc=RyP_Bc8W z%gR?e4aZ^ysuG7d5zj=*Cy>G8pz`6$K84k$ z6ST%cw#2j57RK=uiFl%6*z+Is&F!xq0l!w~IlC(Zu+cR^dR|zEMVw+E@hJTEZ5YMo zU^q!@(ww5d_Bhz*Em*w@*n=(LhbKf@M_|Oa5VP-qce%||&K66?is zvDd4>NHV#@?{Lb+VPB@pE$EG3TCOfvL5iBm|Cngd6)t)htfmK@^eY z(4RnkkyxbX`G0zbbnM4!>zPNi79X+}>t2ls5s7da3XydgK~KlU3E1~BXuv)-npnLz z(|xAHWNrni4x#Jv7_e6+GO`B5-;mzlok3@=?D1NBdvU3TB$BP$!I_o7Cv6j&A@?z$ zmFuh^7t~z`t8Qnsn!@)^A%-ge0#)F3{e%wceV$`GXSy1k9ZD>>8J%m#G=kr@r{uN* zk<%C83P(6mCv4Prc&1pPo_Ly=X^Z5gX+@6Z3Ejp^)6u;$U!rtO3=|g%CiPO!<*;ytN`rnhFY=AlN9{}`@qt1Ycf(k#*>@+?=xUhqGY1b3pp zEug`J^nrRNACwosIh>QD<=Vvf+rdx4_`}=a;F3r|C^Fm+ez+Tqa8>1(?5T871}RmP zc=-l10RoiC$`<0oW1L!FJnC=0Dk|@=F+Sky7fKOq<7#5BSkRtHO~Be+Vy_N?FC|b! zXK6V)Jsq#Kk{Jw3@Fqpz7bn3ArX$~#Kx9`ry{2j{u*GBE{{d`o0y>^+s|Kf_V**1k zzH_v28-(7UXY0nP8O2t3;A-@suSeYTkeK`wSmPzM6L{%5>avm$_1kN}}T<#@$gT~9lJ-iirN#F6o8;PmfN-A80eLl-9Jd>)HMY{XI zZ$@KD&7kqOVDY6OgSy1E8Jvm+Ol6eM$i11r@Pz(mp5WTIaI8Cs``3$Ar3fiS3dCnc z5v9dR>C$sJ^cmt_FxeAO-DOVl9M;TBP0jaM%Rz<<)HwB^;38JUvVXxA%djv$Smv){ z41WC-{{206@*TQ66HBumezqgnLx&$%ZD(ywZGXuk9kI2=XPiUlw<;~r(5FHnH2x^^ zJ4j4`BN_z{wM(^eYKfr1QXr^*#EB*w`5Ua~Id-rI{O$>&&mDN0N6f}xh}M)+)K}>?<4#&?_hTYPU z8F)nmRsh5{<=R*$ysn} zG@oyu{8tF?8!Ai`{$qc8z#EnUBSjK1?g8_skVzPcEy~806cqP>-b1B2;E!A}9{Gy` zxm6QmiCiOKwT~b@3(@XJ{Nx+ef!N%iyw7;}<)36%AL0r3f@Zvh6s%%<<}-ML#Om}J|h?*riUpq2#JWW%(zA~)0q+qjmvsirzt?F%>a9NTNx z_!&e`gVo~X+BzW<&vmt1IX*ooBO77pN4k13$!9rDm6%2=IY>8#PjDL#B=Pvf}lx=uTK^|L!AuaUCd=BH& z5{a1!tJ$8Iqljb^Ux45)!|x9uChH`&K*BqKOgf3Ju+UjVY%}qGHHB8}T6Mu$h{7kE zu<#DT39^}=v9(2*KeSUGFE@~5Kz9FZ&P zb19h!N3iQ6ut7_#cP6;4D1N*M@xXDAa%&>J4S0|*Vj!O55!Dmdqn5@WGMeVJh7iCtX{!}L$FDsIHcE@)p6%v&!|Vi$g00H!O? z*=W(>14N;->Bc+}i&CHGI--`r1{5aK)eT*E2dc_a+NuZfZq=~hl?4S|ibp>R;&Flm z7iMp51)pStii5d_Pz#S1h%EL;w=RMa{s>WmJ8|MaaRmA5c(}H=;L*;K3!c7_s1Xl> z-#-v5)@6;ilz#BZA>^F8U=ya1^~pr~OJE7Zh-5sO#qkHs^H04(e)X@Kk?+Io{96PJ zL?;-abKsl4@SOfwxi^&!hvX@;hFMMhZT_|o)~U=qTg42*-_}Azc){=- zDwWzv${MvENNE)L!z#oR1IS`mAkY6!{7I}4hCkfS*F-#SZ!l{;7&kxBN&Jqy`wCuU z59cEF9gwKsLJ(~COfiC|X-pl{NaVgVv!vzzC01yc;s#+_`f_gHs`QHk!V{UvDk97eS*XV|HFSSVL4YEiiJQ&_Rhpx7fFcNOhSPepd z*WnBHfC$^MLpKCZJVa}r;Wyg#h%;=;?lvRJFG7xDsnmqXd?hGLCl00-sfw9XM@{lcPA-3orLfW>scyR=3V7h-9Az#=Y4(iLR%D4B=}L^B_;d1v5s=i|pG zq6L*fDxtAGS>rgKzddoQeR`CNf6L?( z6Nu>3sQ4O-H`#;6)+H{mW5IpAOI4oa5i|9Y*~8DAWe4=IB&@0@D7A=cPz&Yz@s0SD zaafdQ_>neX;Bc&RZM^OkEWe%cRI%-`;M+Oqw@hrdnVqrmJ#OsEU9>TmcvNDt>REE5 zKH^pQ^K!(DMl^XKh`|g7{R!@QO@`iu&-Eo*J&QJXB*PMj_p*Xa3J8wa1wWp05bI9C zzwRbucOgHPtIW<9WM5-o{mR2e70+j%>k$)F<#|SKhUOH*yNKSjCFj;VlMdud*|MDV4iF2>;+aVW3in0B!%@lBAZ&! zS18%)B6zw0?BxLREnd_Kd}5^{mVX%>+6Z(i0b98Z>|IS*57*pPNoKCt0N6!WvVw88 zZD?hE<`+z2I@(F{=$+sV9w`4Imm38mtF`Z+O>`l8N(9Z008wlaw_zJ};x2YzGZFFH z{2Wk0&S)kPz)((Y3oP?VbR?Q;u*ulyJ}`_8;AYnmja7q<-40J#hkTpeHyuEy`?1IG zd9NDk_k84j98`OP+>=h_E_Z%jehJx(C-{P9Xr&6iA4l}GjQDS-?15)GP5g9=vy{21 z^$~pVg7030#N5J)7hv*TBoXiiaD}f>5(HcpPt+Do=qoJ7zK=z(u8>J@0T<$et^0$# z-r~uFSzljH+Evse`!0M%V1EiwwNanig~wp$i?9M~(V2Pq2>LX3p+6Wh7yEn=l-D0L zQ-W%Ty=d47=F>!g+J}&$+9M%8c!U#DQ_Wh_Bm>^LKWnKQVT9uwXFzxtn}Jko=f@!AG0LR*YWiHL%@B zWG^a18WR0qK%UEEMReHbMP!psA{G6mDbg|`&zVTaS}KlKBQK@l3?sp#>p0hz?AazP z*%QvWCU|TuS(M4h%u%9$MJzXkpqfwr7x?>>XiccJ^WknYo<)Sv861rTr(Y*ihs z^=W=W3 zHI#?*io~ML;3;0Pm*4o5ogjmUeBK3WwHD>uR}{) zC+oDTwXmzh*sn06lDcSdNi0rX;#qH49S^w5%I^Vpx3Si}nW!edBB1cjcCqOOeM0b#R)azZ!W~^UGVSYiAxrfYHB06&$%{%wKbi)4c0Vtm{qyxbM=`X9L)Reu5aiS5Mnukkt} zk(dQL@Qk^%5uD{?Ud*^7b61~VFO#v45CpVf5tw2nx}FSc^c39CkbSC+PpiaEmEq-0 zz0x*5*_BCf9_+^|{lg4u8)P6MziOg6vTq=g6~x#4TaYjT=6wjd z#VjLoR%4i*=>qb6j64*^Mihp)WwP+~3-Kj;&^!~CbttpY-CAgk42SPd zr541K3f@5Six2QxQ^2-;u=UMB2u0C?ConNdXiZ5_?ow$X$a+5&Tq~r8QaUvW~v_yOHxuTMHyiAaWf`K6DAuc{2FI6Hhx)_)C?uz3S?9 zzF(U!t(Nw{O;y%Z;HJnb@N_huyCR;u0};$fFlBpmt}ane6D;m}a(H>@{8(b^Vqnx5 ztYI%u<}&=q1m-l@UPm2<38D}p48G>z#}zacXxtD;>g$fVwGAD z8+K>?rMW|JQ~E5uC(5fvPU|60;zgeB1lV3>cXRp4XZfh61=VZq@V!hGWbYE?X-Go^ z+A<8RFcqIujr>PBPUt!>XEu z5n^Mq2ot5|R5{O+?np1WQL~gGeRAbA?R z>>TP+PZN`bP$Afk&ua@Gx(f@i6I`RD?EJzok=?rZ0Id=LRcF7e#@RH{k2tzQ4uOh+?KDQ_p zoAmBRQudJfIIY&lGatix42FIB4zD{5#;_ODJ*!YV+KE$aiN1P?4?#r7k?u7-=WJm< zl2aB;=L|CufG!%5fVJ&Oe;$;xqWd7|?B3D)=i<=U13m z&x;D6WgwCraF~C%4`n~=ASARDa{nD$VqxC*XtZ)1nXPa%Bb7`veO;$e~lj4zZ|C@48|%0M^*?FHUq@u4QrHwZ0!Xloh3gr zFu#_?l?>@LQ1UA|OD+wg*%K{KRO%xIWjIZlIOjS3++Oj#8X5KxGCBFDc=P7?@*S*s z8TQT{d-N5Vzm$&x{%}U0dA4{g=>RefAHb-2VC*B=F5+DX)LQC&AifUb zPQnQomn`DE4rFls@e_qvc{#j$FXXu*c}#l_tN^tSj&MkM?D=n5mWyFqmy!(}L(cmG z6N!6*8VumIUf86MWQ=wY{pzufQ;2ig!vaTgDi-wYEt0;5h;|xsISMYM9Qv-u(_CQZ z&fdDPcbp86G;qCHg0PA1FYiF|tVVyqz&;Gz^o zVp}39v#INiLSLfbLdvt+9q4~`H5qHMm!CWZ&J~HXB0(9EXy+@kT6^K3LW!_z!%6nx z8D5Z^ZjR)>6h*4c8dJ^Y1q!l>AHbZk!~;#y{=Y$4LAZ%6~tJ#PRn@1+H$XCdKu9tCodG0F@q zU;rn*1gU>bC6EMqYfJn+4jg!mn&DNP%0TS&eDG2dmTfSeV;A3V=I5N@E87z%MRVp$ zIq_R$?|Sem4~y^u?l77-JMFoazBzj#HF7WG4c<3caPi-u9D}3MvYD-2?NpTGx z%gJ`$y4iZh`revrEn+Kd3$)d;ZM1zQcQTD?7A8qkn{ilZMpV&OdM$ZUovSjfJCe-7 zLFpv-J*E>MOaKj)BQmQmHbJ)<@f=$593)i-4b8vYtdvU7w_IE%|*)$w?Vro;z+Y8vQ4 z!5{V|P8vftd<9zH4TM|@JW>&hvVth|06*u8mso;L8-qXV#9oV7-)Efg0QSEDe*PH# z{S4m75e(tY%8F6JJ|W*jzTh-AV$rvAd&gesx|?@OT7!+5hAh++_pG%VlA7qb0dkb9}szbAj*pX|0m-K*OLiv#S>{k zYj@GRE}V8na=QXqpUGJJb)3#Lc4#R0^cj9?HVk|W409?q2f?h_o;zJnoNJH3Yr$1z z5Z4z#&!=Ez|Kq*v@ntal}r&i^^`Qz1kr&?wrza zbln5~%AI(#F}f6owf)EQWnn``Vr_@P6xWB_3I%i6QE^l5ZO%rwHe(^z5aUh8I!wX_ zwqW0b;FmY!KjYDxwrF)7*YlBkV52VXQ7#1=t3Sqtu^EgGpR+mRK$ z;D&df%?fm^BpC!hjemIrCYXYj&O!Q)kbfx-BC3f0uLNfjgwKD2b*Kpo*9mSdQ|v+f zxgIXWUMcw)-eM8VavUr6=Kjwv-ft9p{Tk0Y6hxVYXZ?mRYt0!iL9;5uV;VSl6)P1_ zork^Vzdce@fe70Q8h!{W+e>Dz4T#QzNsey#a5Gg-r{x*sCU+3|RfPdl6gx|^1$+O7 z8iP`x#b6kD?-|I9|LVdZWOxhN#Dh@K+O=-6JyFgD?I>&L^Ch{}ozkz-ojMFHIpb z34?11#LjwgBEe*X>ht?!SeV=Vu%7uDRh1am3IaHcrE7=oNyCt&rev)2z+9-e2m%-Z+;t1udbG5^)InxTR7ymHz3bbiA$SO8*+pgCy~f) z2G7?SockP{b^tUHfQS5{_^@Luu{O(iO#!R@BwN;u9oa&i!Vs{3D9B z4MtiA!h#1$&x!7i@V}noH~hpU{7oJ>R6$cKu&STrz`U_=J>lUllQntB>oI?tO}xFE zJJU-!gZ-e5L7=@Oc;15G@9I>c%wlhfs$Z1Lc!ie49h1Qn8C2j0kSiM@pF)n$68{N! zNuxZU{L>6F+5Y&C{ZwTC<(UQ(H8urjjwNG#oND3^$g2zMY024l!{a`Hk31k%)AZ0Z z;7;irXr~+67i9$h0fg5pG>tx11T0eIeNB;EMq+SwH)qf9yTp0zjml7QJxkK*5$yb6`UbRDbCKaIN)e>`Hc?_#Jo7EiWGIrn5-p9O zPW?0to(s>{oTu7|oQ#nl$}^~UK1YVS5H_P18XO45EP`DgLnKulT(OBfP9~PnAi0B% z_feB`g!tbm4nf9Sl0EA|BpHV_dcjF1z)7dT$`wM}17Vl#x$>&;RW@pJBrJ4GEYL}8 z)OupD=6Ik*=t5EPJzn!LnCKT4@ElTpAGx@Mh1-d?j)24b0jnDWcX^jT6=mjM+=KN_$L@va*K-NnqF4a-?0~QNipFIUcYG$NbB}vNE^@X_wTW0G2;UctB(LFQxphmlzYkd+#^+@TJ;C?sR8GgSqpyinW`ool6Cbt2 zlk6aFUW;Eo4NLKl>dngJrmsoAq#SC9KEvf#laAx<&w(w{KuewQPb+SeWklkXx}8VLZmcoZjH% zvha~#h_ufjCD&Qu1MI~vFyL<@xK-pUcA!%(uq{^nOeN$c6zTX)R(dgvU5I!AP4k0| zDUG((Lt^YaZy7vVL+sya^s74f^P6hH?sep|{MEbo@KG>whw#S3(K7=v>qxx!JW$&c zs!@&P7OqiueF}7zBhJFwP9|$`73;5+QmDRr3wr8-+UkUYegpY()L!QMB+1QP�`&S>P+J7v&d>|qH0)X z^@egvPP0YY3Q>2ED-VVpX%C-YnAK*2+jn9COTe2gpw=W<+6~Jdn(yyN<+FHnIid!z zfOs6c*os)NJ(l|t^@hi>XRGjqSNU5wwP#ha9T&K5upJpJjqj-sQqLheu``}i*$pqa zpgMT#IMEx6U5)5J65o;qj%=jtle_YyThaL;R8I^9*Ih*$mtgm1a3{Q$IFUW{0oi1t z{Y{bTzMzve#G0>(1H;gK?(`!SjX@Fn@qt%a!+Y*pCDM0kGiUY|X{(35e}&yHO?^}> z{_isgr7TkU6kK*&7>=}82lvh+7rF_}zQ7J-bEYN!tWj)!XrGJS&F4CLPnf~TSxu2U$mO_GR0xSlKpqc)Bi_K*xnX~9P=(zQCTlw} zk}uh23mMaapqlq+a7AvHb^;%jhmC(vR3(TT@EmsSD}XO6&$-%f+FWIYVf?tVLb8mBlK}FmEUC)XSlmkL{|&&C?Xzk&3ml9#W8 zC0)nv+=M%}*Hmx90yHD~xQ3ief-iZ(ow0v#nsUUAZxbY zOJ9Uf_vd_G!S&t7Yb-`PKHyaqtnOZTiw0B%9H#e5FTCA;Ivbsr*K+z>VdyKd@{M>s zJ@HlnuzxTd*GO{kzxl2MXdg4r!Jj?BO~>hPQI4CC?UA1QN+Yzu2`l;^-f$T8r5&Xq zVCHsYeY5C$(OFZ68u=1? zuZEM^hpcR6w;z$oG;p7O8vp0OXHA6@k3#;+KhOJk}M>>lf7sukoH6iJ)d8Pj1AlSMVf< z`03SX<~U;ZZ|u@A>}DivvYslIMAncC#@|WB#BpN6W@JMil0)5q3@oL7y*^*-$q?5g zYcdl4JC-MCMdqLhp6Rzxmrn`gYXi|!3KrgzJ1RchrufId6cnR5`6(al2GKnbpob z(G2XrJFH<0r`ZMU&;&*zTh@?0nuFinK;3IeEc7K<*jW6&2WL73+*%%MDf9OnvdUe- z|F`hi%dnB(`MxXsew)`Jkp3yoXE5ho31qm4{8lw-Bz4?}rHg1+14)MQOd?Y@PW;3w zKJwjj(TF%0%Wbd^g^;qHSj~ZW_bg&Cg?rs$*!WoXz7)H%1&=ugtU8Qcv*%4*U>xU@ zIXeOma1c2tOFx+)Dq$vr6?V{RYdUw(R&bNy3ulo`Ogez)v*%NmQ}6DH9qvY4{uqp8 z=W~o?EyG|Z3gKIG(Vw5#j2U3J=VTpTfs7Qe#!#Z}zp#*-LG3TeQ`hI@63Ij-ac(ix zJAES}bAa8oz-_Dm;atY9^+xsvV3+OONdx%mH^eG2NJ1l6&yH}gtMJ;TyGEO~(~`5x{L-efV7BqI@FM3EAhr#YX0jdQlwhxJ5~f{{EwcJ&1^KNw8B zf>R!YRQ8SchoXQO-LK-B%I(WuS6rcv0%Z;CsZu`Tc00NJ{wwgMrC z~>jl zP)A|n7Gsrvv)cB=R`)^t6RAX-B(J6h*dC*Nmq${!bDz67fABSKRL%_LNs4kddszDb zc#`e-XZsz1TtPw_H^LzHpuTGWD=RLhvFEeF>qC&tbKH44&6>(%x2u5l?U^et)?mL2 zcAnF_#0qDjq2F2WR^l}qn5q;^oe|x%!E7eLI4yrQ@DCZFJp$Y>)Y|pr95sYts z2ZQh(j653oE}GAPKS!p#=?$Qihl=NNlKitL>@ztTFb&PS82 z#8?g10yg3gAz7Lx-BA)v_p;AqO)yQ+zVRsNcfeztrfgoL>^tWA+7w5)s?>Z=`K-Ie>#v_R4WFXol&m+7H@Bj?Uh73$7~A+I@uRE>F`eaZYu)slRXAoIO76LAp72 zgEB>^V>^;JId7%)wz$|~nR{d3B)?;x%k?GXOSU7L+YaOPp+b~(i)m(-OY*P8$ypPW zByB;x*>J(Bh{Id;rFECo#bvFtMqQnKHfxvZj%|!wLUy*~WEM}3PX8-Z^(q*!#Pg(X zg0)2M{k$QnN$;=M+WKVm&-Bb~nO8mgNakhJ2JJoPOvhl&IEBtZ4wJllFd-<+d!_DN z-iwUJIkH?)s~4K*I3x-21Ct(Qnyer7u`U^oIl@?Lb0O9Fwoh%}XYLk<5NU(8z3H^I zvqtOiL^oP!jz26ZPsnYT8Jx4-+Ef{8u9)L!>}fu2IcCzD@+>oiw+^8`bAvq!3~~vT z*IBfhy-vj)Je5G>vrJvqjLe>C_mi$CrDt@pw$+@`E|BVJ?mIkjU*@|c&@t$$cbZ`p zHGdVfbD1*sHTOY^XTpg1G6^G-#$=SUn5C!MRcaAyu3~lQ>XGI#&V9YR-tD*Hs1&GI z-HZADbRVa`B>a_InmjTWr>`>Zvlg|!%&ng@AxlVI7~d~tv^v?}qvX)y&wR5rn=I>u zvz}uLHwv!ml42^KRwSiI%9o@$iQAL2GD_tx%&X7Dnr+!91TXJTrC{od?l}W#;B~FnYyGAv5S*~ji++NAB7$bPMQf^P zR=7;_tKsXf_szNx6Zx~zx5wXF{JW8xBknib;)X?!$O>}&>E6OY)?M}J8`#LJh`ycU zv>5xz^errP6q=Uvhw}mCri?wksowG2zo8d*F zr{#_1D;2gGu4lO>KgkF*`I~lUduEPHZFlvl9jwYH7TLa-wyFtEf-uu+;1By>dX!=97bpYb$Q-@ ziw1-=^w=h}x3tb%lzk@eqGRiz?w*fR-aQ#~*Z9fXl<76Q&^iB>LQL$rsFB7(1s+t7 zuRg4JEAQ_P>ojfQMV_h)Y&G-3)8k_%(6DCs=c&ob_{{n1;wS6z|Q#TbjvD3Z@1uNCz1rO36<^0p|S=*w!q&cik=(=fKg*=s< zbv+n-CD`2Uo{Z)^BgXT#h+auh&XeXrKM%i?WcAl=vp8eEfQ+bZDoLpO&Mb6F2%Z+O z*nX~bl`P15tIY!W3SkOYhdnV=G%P9b_|sO@q~z-#(-)8zxEc2?@3RzjIUJlBa?3SX z8pypwWU*6B$LU|3b6B#hK;^SuN zUF{R%@EkF(H{~W}`sY|zWwxIIhwRVV@!Fr{WKn{vU3avHmwTryTJ*u+PDKmQBhZTXMG+YZy^(ojbQ+RlT$;oS7m_a2pb_KKzgCZuw^00>6_XM}1rEju0Vr z8Q;u58uKT&pXki5*qTxHV@uA4s+HQE($(&-d>8pn5Bw53AY2^0(znr*=QN6csp<3| z%k=AiY|Bfk_+^)iE|m9Fp3;|!ygduTCE=x>dDb?zjvi5dqrA2{4Y4A*f$f1sPk-Oc zA6wa{;&-kxqwdd)(zo^TU6&-!yk~~|@R{q(^?cdmYShZ$J}y<_5nThy-(|~6Bby(# zdRAFvn|?0HNXZSX+sp8*m%82Y`VdgnBQ4CjhphXUkPM$Lt4Y`yb!%Np@sr#)`9F&P zEfJS~s!DIl*1o|kt&jWG1ZM~Q@Sf~#`L0t~;u{+Ni0XU7A%WDmYqnu-=S) zU?Xy`cF{P0@?IG7q(?%~6u(Oz9oCI_o<`Q%y-Hhko9DWYU8Br)N9b9`!Gl+B7 zJi?)edzQ}+-&Yq6C}u>gO{bOCxoux}e?OVySmoDJ-lZb9Q8U@?;!LNFo`p`VO|-MCU!#Al z+cNV6mZ`g+>6-q!NYS#jL(^ESINubZn~#PFIp*_h4?Es>-r&f$j2ATGOF8Sf3go(8 z4Qt)0de<7C#`(IDP~FL;hSK{vJEZoO^>%8vbKc$EtSz>2rT8C?5r3pBRD8?tn-^TN zrYf@`SQ|#9iESKoYmwQIw2n^g;udkL?5ho z*p^uKwf^8}?KDUpgt>?_@?H4Q!mwqmQ6M3`7`rhHoD<2EmAC;B|*aVB2xTNidmeojklX? zHVAR+?Axqv8QRf9vt5@!nc%I%oJyyU^XFOZbXex#?6|=t(k0(ISX4`t5DLuMFk6{g zai&~UrK&<}PpQ&OBasn|3-U?y!se}=mu+u1@6apJ$9+6_-&$^0WR*H8YC5i(hN;CB zwb@U~($qFsD3OSlaF+^01>W4LJVKHnkCT~raY(gkseV6X&`dWhM^v12TnVl|4>_OB zPT9P(k97L!IKa}8tEBp{t2k58(H&k@Q%cuXh1B+~eWlFMTX7F?>&cg07b&%1wDm>L zhvE5g!vh|2lN+9uf2xb5!-xZh_RyEB-{Dl5|6|P6_)2~d0)M8=+WaAy? z>+mH^oNiVdudUfo&KH}TByQMdlNtSj>uX-`wA^=hfZk0gD`no8b|Hs3S%{;0NOgMt z{ruvJJuKIdon9hO6$RP};H5a3&K~d&(!3u4y_?-XwnT{uPIr6^eRc1oJO8ho2)52?F{3 z@a1}>IkZltbka^kS4aj~Jdjos7m=6TLiq@X8n=C(ZZ3&36}?mEWU8mOYJD0M7133T z6s?K_4f&cwJU{E(c9GW4G)1{2B=%%@7I+D}B$#+KT6ji&~W+&w7vL6#f9HMrZQW z7MROapQnDYZhSK!+sPqRv;KGUu!{KNv1Q>^&WaXgs@_9z-lE+q+-j!fCb@^K#A=)G z?3lvHL5@#!@>0&9TY0?||8=11Z?z-xzZ66&*Hc%x`GP#jHhH*g6+e#ik~dc<5M&X> z$Uy3_ws&(&%~VBG+i7esq^kVkxH8X>nSx#BQyshcoDBHxc1@|1qn1Y+a%O;fEZWJREEgo6@gd}PO z`<#Ew;;~Erz|4>hUSDLd+3`#;I@olv>1^4ulBCL2id4n5hK1@M=vPUyg^vx-cDnfk zL7H%q)B7;r*md3?(d^3kX%!zn{j{yWZOYY4YaI)Im0NVUAVSdyD^I6X2h4mr{~g|n zXlRD%ON`(xLC>q_D6W-GQTXX6i^p0xiA2~F{H8bdi^*-Dv-f=MoAIO8 z*EHVrrfyMoUUr7!t6s*BvYzOc@Ak}UFjoL9*BPA4UoDuzol0GAJzWt|lwY|@UBPvi z{UgX=d2|>aCwXP7@fsa+FxbUC$!rGa75avrs&Q|qDt}Qrr{Za~zIJo-tu7XwDE?_N z+P1`|-7G~oQts&86#Xz}tH(=bb2;|YIwP&>U{|uiLV2=qLBZDAdm5ZALuUr zeEF6?wX+Yl*L1qJq~K8H7VR9c4Thq|FoQt_@qlvIzHc-yw=dz< z_h%ZdtDJXQ74r^b+1&fG4-Sv~(?Uo3cbTm+c6IjCed)T@w6E-XVQ`7QthsWk^0UcJ z{25X!SKCNkn_PyQU*L9=pYK-K+b$fn9N(6lb>ZFp6m`KD-AZ}RcJ^@M+8we8w(jd)7h~O< z>$964Q=on4`*LKuu3->qra4|r{c0}xtQ#pVwkmK4bUWngWz#5{!@b5~xwCjac$QJG zT3HuePOs(aTZ)g(9WE%PegQaY^U zp8`SA%zB(!F3WKU^i1*d_fEETmn<<)^J$Jc98vBvk&Y_ie?;Hh`T4mPX)CHtD^4n3 zsM<{@@ls`PY$iG??7J+Tg^AQX%^Te-TrK>Ha9VZcw@ZK2v~@tDyUkGNV#@+f78}p6 zu=(yaFvuKmIA5WAnP&&y`uFMJ115ga%d3s%~u_!A`SW=RVV4+O5*N)aoMt zjO@4X{GN9rr`orvUuJH5wl-On!BOR!jCx0(tF1W2XFvi^9=D_6(0CHpvoQQ-dc&^1FLG$+@q!2U@2zF0UR_ zT3YxvKPdM_o>$ojWr0D&StNdL`Q77N=$y#Me&;Qhi|)$zIy`nrm;BI8E1CH*;br6Z zJJlAdQB8O1S9Vr$)|nY3t3)N1OFSP1-3@TF2NtsVO>KQMsTtojqWw%0+t|`_UK>P& z*k}8N_*=O2u}Zi6?vU-d$KRq`y)V};o*huHEYjsS=8VYj`#B?LQhA8ttD?54h%(6r zI)3y?i<*<52^(U)2`VS2<#A3IY|OBn+ACjAKFxc5s$hS|v95GgT+3kneLiU(YBSy? zFGw6!7<|KKA3d}EQ0v9ES#60e`x;l*{3@ANIJc(EaLxRN=P;ky&KE31@+Ed1enl}I zy+4Fe_9iN=fnQNoUS4Ka$Ycd)-u(SOXLrTrj!f+ z)zqdp4gWSKG#sjFZ1AKPJNyh99~kO#&gQe-dZ$h9rGXE^7jZV@J2OIJF2W_*7 z-}W7zqg;>L{IQC38si-hvLd=LYNhWx(UQ(MMPeyGr}A6CXI}cQywRo0%11P8H8xB3 zI*jrx3TlmW?;jeKWanh++q9symYg7ZAl_jrEN*%A{qDnOfjL$kX2cuGC)on2n~btK z>zxwb8Yd22Wq;SyT)8QGR<31bbz@xX^_ICxIKfv}sV)i$|D@Q}F*Ac=-A#7p77muy zuC4(k-u=utn)4-rKlXn}diUTfnV(r-*dl5=)%?bgBCqv(9T^$7Fn(J9=t6Q$dw9!Qt?FG`#%iqfk?NC*tv_BR_e}Q9 zaPO!V(RKpYTdrRnSR0+QAgDFwPs|bjCuWN@dkY&reR(_JyP@cD=Dlt`Ewll;Jmw*cGlH|bK{m@PVZemO!-k;GN5&pereYV)v&fX2;p27 zIihb@{NtY0VOb7RowO7!^U-7q4hd$c z;O)4LeY-_owx47wtX%i=#K)ko?uBWp1PY<*G^f=a$Uggiq5FG}kMZ}}AfJe3P!7a3 z%T(v}mcI-W%ZB|-{9f>*U$%Kwd*>B$O4shzqndu=BA+eMed7cBMZ`A+-4e#vEAm=O zTiT9ee9=?nZLRUs(gz-|mX`iPT`5u zW$k*A#^Iq{O1f(H;e(ue_OOhtjf;)cxs{oE_wSMF%}jxH!!`*!rB6`%Gr z4Y%qH+#P^8t`X7L2*EDr(~%ARTH`2J8+vlZ!|yZS1g2cc+Nf+KEie(Cuk$3`q+@;4 z<1*sW@H&TkY`t=O^)6Kt&&T>EZ%E_p-}Py)vj~u{Inh5vxTpuRl_`*2_8lCxG463s z=U&!fE;a^LN*0pAuj*+$CurgJZ~vOH=1K2I&wjl!e)M<}Ju`Z(?=z{ZF}^F35ZO+0 zZ+Ct#b?<5^ob!Iuvkj@v4Woq$7meF#t9!ie+>JJ$dfe=PCL!NnBV4H3R(LJrNkL`j z0>KMjkB+9|w>eHFCmS8;28VqS{iA$bsxe#ol;n^r&#%P)yWbnf&nAzoFVB~Jm{jGA z&aoU|Re%V}#VLDVKP*+4b;j%-Ff8JljhA6W{dDD6rr2h%x5}fRxxqkHR{q%W5&3nv zxl+2_!_RevlUSxpawww@dYCD@1`;9Gqjz@ad z1^Wahxr`SnH1&1w>#udFP2ZTFq(WU^KQ&*SmYi{=K`br^2#EISzQSpOG{WqSYj&_> z;1o-d?sbL!&(z=dOLEl{_>1``x@_tfG{kj96JKOgoG$ry_>Zu&Cq6R!ggm<~E{9zE zx`bOjC8^?*sck=YH0}{y@v!jp7iQMxq@Vd7TIOWjW47Ee(D|q3H{?+3in_VYAzi)b zOXve^9Wy|^QlTg_YtrJePOSkRLCd{*+8mR3nFqUX2^i%30ZDAURzAJVyWF})+%|_| z*!hNAYBQaj&S95e*JO^Kp8|D`pU{b#bi+q{8Sf7-UF;$s&vR^BSUB_#R&=C6#%v;bKw7;wNf;bkrkC0kRyCUOqrF3- zX7$<;s}52*g<1V$SL1xe`Y{uyK>rAT*8KiiTHTsOO~GDK6Et5s9jH{{1WQlb?{*5C z8tK1igvkpTF3~`u@f}&Gxxi|cpuDX*&puODeG*qV=eqfbQtFZKd)_zY+EdYPCqrxk zT|ACDZLwO33tMNF&#P!_{-EDtx~yT8Rn=+L?^+fj;kNBQ$9>C`EOL*qyMs!V zN6Ty~`YFEEBhAm-3~hR4eXXi#9vLFKCgqBRk|^tVM}hTxZaB$fh6>~|SFu!x$qeQV zf)Uya)q~1sG)|$MY{G*MgfLbWs$Th7B}4QNY=eRWqk4s3_q}Nq&ssI{N*3pbl>cl^ zU`n{(uy2N1jl^ij*=Rn^H6!R@IORXZBA0oieXJUx7*RC5U`z$l__O7n>UH~(cC2NP zGP`{hw%)3{L#)|5_O*VBf!2FzIy>rhH`y6j9(sw#GruTLWXfx&=kF_7)L|`c@z~-m zv+&S*m+K3QYqywAo7a0C>K+#189Lf)pm<-~rsCQH8DN*`=DzYKE9g{QMOwAHn#<+8@Jx){^Z&VDU9?GFtoBqL*YlE-bz@>azy<6w(&?{hu2 zMSY8$6u>d(Ys<=|Ip?zTN?tYAcJ|OrZcppr&@mFFO|8Rqr+!Y>wjok`+|@*>S2Yi= zT2>l&#}Q7!VfK?&KbhDg!tp07sI>o?o3TjIMz>fT_WlvwcVM4{uU=`?j?!P5vV#51 zVkAM(h%MA!Qr>BBY>#7H%$B*&2{|6d3+m^RDBP^BsGMGWv1X*E3P~lR!EG2vxw`pH zHYF^>(P!T?Z5O_L9W?e=`G$a zT+2bEw`|5Z>z&#x@==4TrfN`mhvG$Bi*W=l5nkbMGhJyoThZLq9}luxWx1B01eN!N zU25Gt`UDS=AF$|W^P0uab4?1F0FjPpNU<{wsGW_rnF7Cl6-kk4c%b(x)n-52IM zEsiMf*AZLikfZL(dHjX<`A?cdji?2@NZ z$myEXB(8r|d#jPulALn!LA@t<`);XUK=;artmt=?hqWKbw;Z}XE)6K($vB*WEKwqKL=|}0p zT4PFnf4BRvh1?EV9<;}IwNnpyqR>OsCAldcNpN+RjW^0~6)wyj zmb0NKrye)n7f!UAYwK&>PkvaUmlVn3q*ozpe5O<;50p$p`*ohHU0A-WOkZJF{a^LY znw<^Z+Q%Dqvg3)lf-mwuj`Q7*y3ThvV^6u1c^&f#c50O^!jg2`TW{1(t>{^HuN<%X zx09iWBq`EM7D~I_PT7uoY;Tzp(#xWWf^OVNcz^T+ETS7JxNimbL)sT)m4h;YbAs)2dmdJW)m*K8ft{$l7Xs>EF zcdaq*V*ep-@-%`5;YQ&UVSsp=tX4kIe6~E_%)u;B<}0OzEU^=mkU!7`$Sb;qyl>iM zXwg=6W_PA*dmAg5eZ&Sx{O!#pcv4}$?*@&r zv$;dvF+iQG-J%aQs*Hb3YU&R=7F5>upw?|fW#A$60cao=`H6*dmT)tO4ahNatzo5s zHl8CF(thA}T#qX8x15J?y<tgnA;&&@V(3_ciAlN5yI3_;FWp zXK;^kL%5;D3N#s1eoA24?$Q?+1v-)A!Q0L4!O17wiPMm-SIBw9>&?F%1m5a09Y)SK9@Ss!`bXPWTco|(rS9_6yPL+*i`W}LIXKd{OfKXdNWie87C=sX zgu%?1Zx9#=qYF8JIzrbY8;E+2fRljhu-%-Se5v3)Z!uRuJVj132FQ?$Vb6mz$5f<> zv1h>{jgus>6W*nh>*pDqj0X)@^mhys$q9@l z`W_c^_i){a7|@hGg?loTXuvdVvdL0s(#SOzH7|9n(He40(&(O`_cDTluZ(rV6hMYd z0+;j~sGdE+^rI(IM=1gG3zBy-k)fCxw};HXpTrx^Q?8i5Uf?727fj|>V&~Z!Y9wTR zTmq)&7kdXtf-OKs*&t$E4cU7HH;T8OC*!4XA|RPF92^)20Uw=2~y2}_yYC@MFgPBK^IlTj1PBJMADwJ$7N=&oJLQ2lg2hwL5whRA2 z*l@H&1l*I293N~WGZ={3(Qus~Vf6oJkQi{&AaKrIMW#^S+21gSB6*qo9{kh%F9L?& zi|5OoNI*IiD6sc{o6A|qak~Y&o+2RGtyxcWIi$fBgL?Bc=*aINO`zpl#1_!4)M1h~ z9yd-g{xDoNT2o)?{y^~@fNSieF>-yzW}K<2RRN(z`wr1$3Yc~{PhB5FCc&Q({sV;<^$-GtB?ZJ z29Ls%z`<-V7J+^RV22$QHns}%TDi&WCqxFm1RQJ%QcI^$?`S!ai?QGvYJsf=f9yA) z(Vc-!!Q=4r@FcgPVeCstZ|KGlps%|QYSuiM17D~$%mLIB*dZbMfq6@hW_JMz6o$1x zhEfo2hyDZk1CXM~{)SIvft+I!scO@9atvf{?5EdLMk*X~AJ;K^kTqBdHW?p71QX}* zCP>57fdAnP1Yu{>-ywJW8gN1jfoxv@?8kcaJWdjiiHF2G&I95VTFm^Qt$>1B4yr~A z@L}x_8s0GW@c%#gDtIGV1Zn9#un^F8qo@TgBnW&tO0fod7ujlDWa^+GmkQB?KJdD6 zpV624j+`d8aZmzwBV-P+Mb?ZZyAW{%S94qR1KnbpZ|pGKGCm`tm{Ev1xSf}vtJ!2o zgjmlOp)(;J#unWOI`5sp-cDnVkn>G_s7Xi^k<8%|FOf6M2-X=Bax%Fs+`F6(ycLpk z_Mzw4!QdKk6#Ttzvi+EJs)D-2j0QFTXcj!V*sG`oA>*{*iRd;MVSA7u%mv?ry@0g# zFYp|Gfr7P_84225KTzs3s2^mWb%2(A5Uhswm3pc!Hzr zf52AG0OfKCD1z7hCH7e#U_?qWN65Q^+7gWL1>ldE2O8yG$Zy#FR5T96YZ2(obEy54 zEfB0B$SRokgD?`ijQbKZA-8!2{9}Wg1Ff|m5_#enF@26aXZm6~Oh!XG`yQs5y$xFE zduR$a8{YsasS~kx@Qiyxw&y2s6?LK}l2b`9>JdE)d|5sN1vwv-*-CUBb`(g=Y1mBk z6YI>7)CU+z)8P}mrH(^R-Yp8F4>Ju&D;7qCb5O`|6>|GRPJA<@T)N>TkQJ%y1m+Nh zk}DxOG!Zg_!|1)tBT(zx!C7TCHW14I22BPTGe?1`IR+^vu-A}*adaz_foM?& zV2&nX=@<*V_HMi{J`@{@o{~ zgATWzs-z-l0%+WH*kvIv8qx*EVP%*Gd=U!aJAbh0u-o8}FI)hQ@AlLUNbR`-ioJankaJq(6p(tQN76a}0*FS#+ zb{qD!{=g5~fHGbOBj^~YZ=Y0kqBrShpKN@&Amu3Tduuz~#agXoPDpMx#O1Jp>pm5wJ1a z;gsf=F5V+e4rI8wnphtVGo31*XF48Ed!0xvxZ*ohrz z8}<-S#V_IGfT+3(RC*CyyC3q2IZitRx132IVU~d#c>>S{NyuI_9qYtz5cLE_yeGQw zZ0sVs3D(zQ(2?H)?rR?~;-`W4JjLu`Pk_5?Fi-*gfop#ci41qp1K={n0)aroN%I2i zY4$*M%IVRNY15a9U{4`==wWb%TMT=B0ni2yfYrYPJJ|_36Oz}2komj_cE7d2GA01q zPzc{Y8uDspf|qa-c<(5I!de4+?q$%scYsHR5?13Mzz|&qAB#9}Rlxpt<(I zId3E|=qrFse@O$0~Ebee{FgOaU0tVcj9{BW7a1R&@l*@0}ai0U-x*f=*THqG{ zGC&jI9$o`>i3{AvJlK`S!EcxeRmuqJ^=>lZ?>jAyAK>rnwb@FCHLWTeuNQ% z0=aexya{lidQI@05`k|_fzLA>nC0%kFAV}d<_5A49R)tT{@6Hl4XliO7;`+J1P8)> z-Uw@12@qLfKyeO&=kp5KmUnQXb^=1H5*#p=fj5T?{Q~5OBkUKWVLTQCeWQfCG8>4s zfxxz}1=kK5*#OV)6RhOtfbqHoCtDHB)oK_SLU5+o2kcK1eH4h?C*Z?#7Cy_rKs%iV z|Ajulo&H@%|B}iJ!3}HNb~4=gk#P3@>!ex$oMsX-6g8r5STaydmtY?J z<$V2t1V8%sYghneKBIUgdL42X{<^|Qfkx^FZUkn)V7_Ih!v69O{Fk1=m6)@WV0TD> z`J;yOSuLEYo`5g)9g3yW=yTvE6v-CCycNU!EQk5`3x0Am%+fVLeSQXh?k`i>6ZXiJ zz_pG8Qq>wLHdo-SZUFU_iq66&;m*WjI3aAq129NBV~+#7{(wCV>|`A52m8ULrYivp@|lhn&XE@R<&R4~HA1Mx6z3ko~Y~ABAVo39OD6vlY%69GJ@? z;5tKqM~VqJyE3@`pWwT31I{+laE8>w2wekfzZ($c1(2073HZ+UK*)sxhxHlqdIn(G z;0_~1BatBX0g<+86<{gVUFk|R!v7i+R|-m9(9i5FtLan5`3-U4Wbw9P)lHhUxGWi z9NtLR!TahTc*}MJ_Uk^Jr`upBP6x_vH{6>C;CTHA-d{(+?iLLGH(S9uWE-5?_5d-z z7Cu`6oUJFrm@$LQ@Pp_XOo0CYr{YR@_flZ9An&0E9*eyN@3I+ijvvKtVt&CIt)vC) zcO(!zq}6a<7lGIKYG{0c~~$R=y8#0zAjA0WY3CunIf^XJ;|& zA=}~lm%>^+5eQ!ejJS_*J)4073xbo@-vowF@C3iW+2aMg!#l$4SqI#03yl9{U`-o< zH4g>a+zb9Y7M%7T0Qpq}jM+k<4tKyltpJaY1aL|C3}d?|yrmohFY8>`PeOo^tw$b# z$M}A9J~)hr0!w%v7`Qj!(Zoag!I*XcPyg}s6{?o}NY1CS>FIDb-2!g_J>X3B8k}SQ z18=y$J1Bv#M5nQ5=q*$yX(aQg5au*niQEQ9@IA1nzC|YhqkIJTT>=>geyg4^_r}7B zM+Kbva+tYMK-pgY>*4?&I_rT@&Sn0vI>@*?1$VOy{LqBh9`G%CiCV*XArtA(R>1f_ zMCZeK?*M%m@^m&b(XbBR0;>2z4d_yB8?-1+Z?WvbT_P=rkC|WpM7gMC5XY zaXsNJNXfa%nMhp3#zRf^7Q3AuNs3J?jXwP|DuO&} z>SnSrd6=?|1C2Y4PfXt-S^0(Om$AT@X!>mWV_HU1)Jk?gc8$}GpDtJ?VprgMe?`bhmygVvZ#`oP^5AS?oeC6)r?co;TrHB;H ze4Amc&SeHuZlsdDO-F(|w*(R;wuArv0DK2F3^l;_G=cNdN$Mn7Y&vL4HHDE@xSxchWS7nNT3oi+Z#mnl&|WYb1Nk#2g|AMH?Ggt44tsoONg zv?B!8f}}A{Kq_{bP8lB>_=Y9=CHgtW8T10E8V`nCXD(VX37!9*KO-+mH%xiX36;-tCXtumJ?9$IhrF%tt#i?x)AiPG*1yybHNK^?kTv*BdQk7WsPL4IXZtcUXm^s*%~yN1r>crn$J@qs z%+gHLzof3=*}{jiK=}l-FzGOnnm3Dh%U&?1YFav?I+HuaokGyhj$z9&_xGTTh+3nm zBi?<12kt3$HMe;nZpCgJaMj0}eI*G6oAMtPE-OQ7%IiKij#6QU1TJCO?0msHB;ah| zQU8}7y=>BX?Z)AqR&7zLyDiPiiiY+^s>P}OmTGuYpwdazUmI)mrdQ)hq7rkxjmqJ$ zb5G~#_OC2wO4XbRrqtF6)%S|(^K|+4CAO7bbxYa|6fJ0wjW!FAPq38PMA^z5HoHyt z{Sn|97#5iBpXed68cKL}jcjB}2L9py3jB5H_vE~1WwYy-x6RgjVAe9OBjS@8>>d#k zeI#l^Xr@ny^(gE~r>S9B&GpKxijIoIm7dj^ibr+3Y6EM&)?RD#XJW-$ZBpC{{Qm@h z2woRh?Gd6W|0caT4*n=l6;lyl9yOx_U4Y| zu8Vx*y3GuD=9T8sXwz44U3;u%LcyG@0l$7^EG}5DxZCo+{fx%PxQkgp{3|VU%AJj$aVhfZCp@Y%a8IA1x|UTxlOs=dExnC#rmqkM#t8Gj((=K!a#>3zSTV* zM*oghcE9I!!FG;dXje_8^mpc0>hqT`mg&B^m#fR#4w6aOfbropi^*&S_x6Y=% z+nYY}p^Juj4)_qY(6c}^Lu0C3oV)E$YhG-rwqi)-o(g$YcwJp{WT(b(i!;r7r}u^) z<6_?T{1N5icglbFSj=k^bO}F-Zpd0} zwz#%<=-t-3Ja!Ioh?g(oBrxAhtGYI}*|vOW-LB#{t*%X~RW`kAYgJ{m%vIH^L#brZ zOt&AA(+3s}W%|E}it|o4)0nz7#utsxipn-G%WRn0`l*@Q@J^wv2b^kW0W8|`Re zDsf%v_R`VTx<;1}cM!LX zDCR26F1w2(M)$qY?^xt{=M!jb^TLXk#qW!cRXl1msqYx{U1q9(6#=Ei#fh~?j7u#a z24u%P>+`Z_b?6PR?Usq?#rAG>MKwwF+nUBGzf?~w7@Bb?{ahxuG)grQdv2NNdd6#! zcZSDX$2#+4;!Md6i}7|k=Xy^IZ;qpo@1-fM>sN8AB(`*0b+vM|%Fw#BHNEY;+C}F` zwqPouotd}gSVwpNi&0FU)x8^g#5m!^thVtrN#*a$+bZm;s;ZwU7APiH&MsbFaI(~< z=@V^cdDVvw^^3S09O2#IRBlCyy@;#SK^^D?R5guORipCbGA^ah%a9hiHjQ9X%)Z-Y z*alnj&0Wngi+`*M=S$vS{Y!iuJr6riwc5bBu5M}g+3>z4O?A9=TT5Zn{W{Ca=@p9l z`RWJMZDO!A#5T?Tu_YtzXMWttKVX04*I?NtOI>nb zWoBe0md|gOP}|vsXdhmSxKQjOx+a@rKi=E5$Htz51owyycE=5A)t?Fu=LeR>G{mZ= zDbcbY*`NPh%Wo_zZ9w&vf@$_ao;ST_xO7^q5p3cuF?;P4?e*C$(=q}Z(Kf2$Wp44G zl-%J(ze`6|ey=^;oYHY!m(COkzuN`*uIX_ia(z^{@K4?HJ^Q;PIREFg+qO=cOe9l3 zwRf5(RoE8(EMST{O4=)u>Q*V^>K|99*H1ANo4xkl8gVmjY*?aex-3T~aT?+q;ltPq z1P8l@)r~2+RXC)?u{^STPRW*n>3Q=ChLnx1ouppINx`Pw(tmMQMug_Fw}>1{s&>e;Jpe z?c!OskuEA{PunNb(cJT>7ON3lG}|v*MNHC48&}o@)kUfH8k{j}UNFaxt)#|+Z-k%y zIF~CfE}&n_la1uMBKz5W;B0!IB-JrZ!kVy(!&P~WkJW2T=F~=0rRfO!lKVnjWL9Fi z$nJ`x(ZOJSNw$^miT_RE+l)SB`t7)F+VQC@8Esn9R#JaH-8=xheU%Dju*nkM&a%_2W_U_7Dgf?MBPwrsIv8h7rn+=Hsev9l2_!u6kXj zK}JeJ4-&>T34>)@<&)&cWbKk;;#Pqt_Y(G$ZG*G5wehU+0QrnM(`7PT>f@Jhu|sSl2^?+M_AzNP$BZ2Swc5L=Z`f!(C6!d_1g@?OtUHQ zhK7s@b9x#1&~)3BZ1OQZH!RngYd@+dX_j=^7%WVsrUsLMJWPgD_oiN$bO zUZ?QAq+F&kn_yNUnIkOZp1{kHI5r1V2%by^)nb}q%rmHr8fqPK0DPk`=pdZJOn56t zz&|O_2&(ug+yle~3{)0u1XPy1=`1qIn4~|T8?4LK4KZY!DB2NC#)olIIoml-9GX}_ z{K9sEn+8jsHvX$Wt8>urfj7Pw<9Gu{|G4Y1c8^xkWimXawCp7e=Zxj`6}%P(iYH4_ zB?1X0T*Mb~jra!a2$~AMtPh#%pd>g?rkXyOQovXi2PevPs0n|_Sitl!MpV-{5t*%x(T1rVbJA^#?ByD7$HrQ!^sGeA=lHk>==X(_12@PE6PFA znRoPa>JmA|)W=w7U<|ItMMkm7hg?9pfOm0k$Rvc0CSwI%i5<{AF@x%r4k~}a;Q#uO zHAlNaP2Ud-0^ha!D}CTn1{>}TX(s8nplsv*BC4IPTSVG_WbrW4-7i;bh9uFz<@ zYAP`mkt69FOb+V+)s$Dz)4c{=huQGnzLug|5+4)E=r7i}3Co0r`bInke3g7?F^qaUH_Ih}=?7a3=qWH@3NX{q%U|_jlm-E;ZRK*f!u{o+hXV|_#;=ro8%6@2AAR2 zp#p4#PH_YLef9q<2GP(h^QR{>txy+Oi2Z`f$~y3MoCY2F5Of|mL(PHN+XA)ezkZ^_ z!IdrpYD=e}SI`cfsC#rV6-)huG&3J+pB>=$x(VudkD)tp82YdEP^Vu4E^0ie2y6sj zOHgVs(;zKl6;wUXLw)=P_~v~AhpoSE|6Fj?Lcl5QANUQI5Nq%)Tnvt-$v=NXMa{v?+B(ebJ)kmoL?SOvM6zH2L zf#yL4PIb3H8I?sJV%9_DSPZ(7ko|For?5jS3qjRNl2qC15FIZY@^%ha^_#~vE2xL z@*?yB{Eds?IiUOX#7_fSj!y zY#RO>dj*}I>)`85Ltp11C~6!*N%Rz|s&Bxzc?LK^(sVU50mk1r$i^uHwbDLNV)lma zTQk%p-?6(vskal964&XwP_gWQN?tB=4-`@ubQ<5_uAJS(bUYYzTg_Mx>?Jr;a z4X{J6!OOuj@hR$p%?Ah7CUh{khE2wOaV}Qz_nu%l{v3K*FK~iz1?5XAcoe<^=lOir z3M$3%s1y_>Q=!s43;eKKK_4T=H{;7uU-lZ)7dqar*tLuZeCGQzr)ft}ZTN%J@-TWR zRZfnjJHZLv2D*6LA(7x;SRr=8*!%^`2a-L=l*3593muMf=&OE$bQK)51C5NDrJyGb zdPTTr8zDhs8~PtS@gqg)G23F%!Ft*^jITEyG^Kz$Qb_Uy8hB+StLMa`QtbGAgzKy7XfiJMh^&1MYkS z=@X%3l|-<8 z#CGHgD2t@9clJU?GF#}^a1A5j=_}#BPh-DAYHA{+ExjYPrbi}>?toD_i*h9oQPH3V z1XUEdgo?p>ap&UuXbW14E$4>d*H{Cx0zZji$YS;u>O*je$>?wx6Av&2z8j-q#e9J$ zvEDF>&QLbSm!>Q>2R+1yOznnS)DuXgvSn&WIkOAxi#MQMv?cY1`2-09 zVF=FpfKKiaI|#a4Yrwg46mkrEjlV(%LU(Kj>PW1>%-KfT35mv`mj(URq1ao@02%`` z^f>eq4uD3k4k?2((HY9u6i3w|?=TPcU-B-s5!^m^umc$twI67yyU?v!%Y385nBIst zwgVNhducBCgQkPGULcGT89fT>-Q)i5QP9ud$mBwg^aCXOg|Xn8icNt2Ks4IK2BN;i zbWRZ-0cR{Vl7#I<$H7P;(1m~30VEn$)p|%@I1ej)0eB&%F&F7HmXCc!#I(VbMh%A5 zdK=WFC&Mg=h7`hbaM^x~^aKq|I-Ih;ftuwh)6Uv6OGq~=5=P7}hNZ0OSxg%2OA^qC z*gDS12gv9k3X9hJ5f!^vBjh$2|zU4juKOm^Y!p z&Ctv6nPw6rc|-6Ha4_GGsqmSI12|+>Lf?7-^O?$}?!dVu9#-~t*xS?T!_-RJ8Qq2V zM|P0IC>JyjV_A&8Pj%7;IALYNj&lT5LRQE!ybs<34Te>&0WHBBiOqO2QVq}Z8R#{& z>{2?CN~R+a7to*mW}5%U(OE!8akpK3eAYL+@sNas5Zs}-ySo)AMT)z-dvSMn*Wylr z0tJdoAnu;k@%iriogP{a4Uw6@KF__st5neQpp<%r#HRY_riJSgR`xMSX8Z|dvVoWZ zb<`nr#w(Ijs4F-vA0+!z4G0Dl%IA7bay*qpMuHsiyPBapA{emnjZLrpyq=Pg<>q+ku=bORWz*;970F zTw4)ArTmK!l+$v)7R0n+ZxI8Ot7*AEr zi`q{0BYfyVw;}Fe#q3Wcg5KPaSWD$#|7Z?vODQN=5^+|ngmk>6I8#)jqW-sA;e@@F zx=O5sE@d0B5hw41dOYThOe~^`i6Wd2s)J8Dhn$Sw`~xVsIE>aXC|r2WqlDrXVaJ?o zsm}$gX)$u3N{C4$^eIp~zr|T2P5mfel$WUOwF#=K5b8l?yqc>og4P6D2drZi={RN> zBO(ZV)NN!HbWkaJK68Kxrd4Qt%FwgfICchw6bX$X`XVE} zE_p&LtA|2e(S;a~B&b&8Kx#6vLLIM6)qFUggFCKOB>tkOvJ#E8LOV%xWWF*>sKcSjl+A*z|7^V*t0E?L0%ms3ix(yz?hk9djzrIf%D=ZOG zBt^M_be&CdO?jPq2B&~P!hyaqDAJUNVD#5G(XK&C?>N0V*#)2NCeagbdjY!E**JH8 zM;E#R)|x^!NpFF7KAj4JRw)lVg9r7Yjo=T@*6rF-==nnR`Ggg@er{-Y29rs*TbwDFNaF!6skQ7LW_c6ph1KHd$YXj(2}zx;9(=q5Ovq z`Y2_w+7`FHOG+$GLEW%=W~<}$_P8G>ksr0)Y6pEf5^+NGd8keNr;LIAVm{JIw&7gc zhRlXOt2Xq#Z=o(}L3SX{Lo4=9n@&upM>95hAGMeX;oBPi%-OcDO1)t@!}w z+Gg}BytPAEs~eJ`xYgT{_u#D}0A6miO><1VW{9S!+v{ zWh--YX@>ZKx2&m(z82cHhT1f#u8<>Clk4Gj@CpjJV%(s*f$MvPOd`tQ1iD00k_8}0FCh*fi9LZfk9+6m}J#4M*2BlERsG5}m=cWZyAYseiPA@_T8bIuw-gb82JE zfji1NHBlL^d{9%Nour_E*-FLHMyd)~g!Q-{-41t-CuDgd88^C0#60>E)1Mwlo`qZH zSNKSh_3B7>2q6AZ=7>-Is$fz#k}}qdaCwukN?f5t;M8@V`lcI{`qEvQ)ED4Z@)23s zv!NjK5W|`6{1QVO{xW-sDWJF03SG!NLcPvTozO_+KFomIWS#a!mZksQP5NUOe5G}Q zDh6qx z1xu__Ul7p`9;t}Xh@_~sh02~Go& zo9I0%<W8QpLy=7k^f`N5 z&aiLj&0MDG7u#C2w44mF)33 zO+DAJA{~{UXSe&d~Y9na8o;{ z{g5w+G5#>`C+|J6o#w#ZLgW(6DYo9W3fBAflOdI(J+WJ35=;FIA%h%2NgxqAomPmy*3DUmutF-%%|h|mS&4%eMs~0 zPhm?#vIA3{!9g8E&V*)#{s>8ShFYw=n;&M~6W}ucO5XDe?!&(CN+s$V$lK4d#P9dN zMWo*-=`M`~AAUgL?#xr^J@Zn0YxQX)P8L##KgCinv?qL)e?({lRTQ&xMKMYf3$em>%q~H85?o+zGg8e`D|w zSL6VZ5bG&9`UiE5OGw@OZPbssSx-wk_&*Axd`F7zWy{%9OJ2x6+?_pgoY+f97bZ*H zln2TFWE zdc=o_YT-3PjKM!bs)m&g`eZtduH85#P8*C0P^NgXAUUPq&yy)_a(9$e^fvHTa;+?Q zlsmJawZ|+~SH>&p@(`(`V1OE{v@qDe+q=enszfhZSQJpw(p}Hf+%wFZC;Y9}qSDzZ z#$(n*$Fm?lq-#h-=-Tl6k!0!7r8h=*j%*y(C}>U~6QYKkaYXTNm1O?{@srj8T4hqw z5FunrsFm8H@Qy!F>96b%k9iV``@4Dw^|anZJyg}N=obl})>w-49w;eQ+_RW<-E`M? zzbvYpw;{V}?wH~MLW#DOBANeKhbhDE4tyWj&`~d7os$Y}8~IyoTHJpzRl-)=`&cI1 zP6lMzP8c)kv8a#VL#DzdJ<;1IdqU!g#HU$KcZA#yH;%GmJ2IfCf@N5aQ#*Mv$%1|@5N<`dKF$M2r8&unCRlcdmq4*;l3GeSU)-aLB|69j$G%H zkRGMBMAwQLSbAr~(7;>PNZX9S(9o-a9%C>4j8NIXS$v`NQZ!G?Z2yng?@=ii^Iv)w zh`);y{1v=~9$5$`MzOcpCUiX{9{oU0VGlh5XO5fxlioV+`XzrBeJ=Pj-&zn`{L$M| zsR|{p0WQPQrrQoZL*GX(k3JU>?Tj&fqb z;-k>q%+J4-do1Tl&Y`?k#ov87$}^$}RhO5h!vS-Gng>@8_JoXxSP>ZzwI(VpvP{@% z`&UD>p@q4&We>kVuPwxR`ntz^T6>qdhUXS0&HeTzVOqwx;xB%%N)(UcR?cZF=!3?7 z)}z)u%WTUbb2sBCZXsQnsGv*~f_y_gfv!G9hYC&<%q=t*cXoFY#;HHZMcg`z&nbu0 z4b2S7avB162j36>9<`$M;>dSF$IXM;R?HwSgAd>~5z%5B_p6dhp4q-(o@e=$Q?GsN z^Yu#7sk}M9Kj3KID1G(s7aFOLs1e*&{ypE>P|>)`c%5HN?@@32CwcaJ&if>PkuTiy zve;diT;z3Mky=yJ4T7bGBO;_{ctH5WV4L%=fI-gCkkb)!qLt_x5z`&5jZ>L@sH{8m z+DegRQ+$d`Jm9LClakco>%>p*zF*3$r;b#cDmJeU9v68uS}H|`s)3_}|A;sl zb2r@b%q`=>!E z+gEtyKPa!mT`7r);p0p(mT1d+V>>oY4feJyY?wDJ?^FJ^f{O)X3*Hu-E~b6mEqC#Rf$2^Gm$GOT9X8ebXrvua?Z`YD<#T(rd z{qf>FuQQKK%=lE{OJwphthaCnvK9G7ToY~`7sU1WX;T{&fW zlAb&>ZFFW@cCFlrd5a1gcxZVs@sK{lwXnTNHiD1JV(i!-}HG-6iYS?^Qk&eF{$AoX2}-s$<#Tjc<4d`+`Yl zwow+M6F_q!x5l?-a^M_?@NNt}Mler~#f6+YmBky?sL2;ZiSjSFj`W(1B za#ySrTeEb7QXNAd1lfaYh8-`}Ke8k|H6$>oiTxPYLvgt?3TqZ`P8a zeU!O4?_ALZHziW~E!=ebQ1{p>wuWKHN=L_;%kGH_FH=1xxb)cQRgnY3{&P0BeKl7y zF`P#|>p4?M6mBlQ=z3D(&A*VInNaiV=N}8RCb@#;&Qz44sriNZvgIFJnr*vj9V+qF zs6UxZHj?{DcT?UMAIsdG(lB{j(!->csq?e$=Wi&n`X?#HnoYk#l;PI}^awv#dS%?n z_=)A#$3Kf}8MioYYRtm0QvvC=Q0MPKTOHL*<%q7Hui4S5ozpsJcgqP#kNWu;Dx0+_ zlXB*lobrE?W-9ZDCx$R*xlmW|o`By>lh}52D`pv|vGa&jzpbQeVP<}mxUQ+IxXH#GK7i-UZ3Bf;0cCQ_M#P844USn69UVO-N(i49xYBaXkY=c0YHdg- z2a8`z+U8rbBcMa=ke^gIvS50ymNPuRN>Q|HivN|C$4oSywe1fY7%ByRv`k?a>aFE1 z;(jSac`Oa`t}is^6l7dWdzM@o8nBkh6EfdsW2Y@{ShA*QbzWiSl=SbJn+mJ= zRdoT=*plevf-g85**+LfP%G7q3Q0_1>Tvs+Mr4W-;r~+9JKLDLE>TFFopwEMi0?V^ z%rMW=$}-*5%v50g9@Hc9aqNb&-Q(|-Z5NjrlO9zVek3r_^q8_E+o&h8NIQ%Z%nDCT z{Lo$PN98XM@$p^xC&lk7s8Rw6eZ>!~%=e{?^_IxUPUiyeMdseBU_X0&# zG>-`iE;TMnh?)^&jh|UzQiW&b4CTVh{aJQ>j5E?0_RM+S1njoL2%P(Re&g(2SwY#e zGJ=y|Bv1*Lf2>KY1Ksbdj0)Ku^P0JGmA(7}TY>XZ$g}Xz;g>>wI3L?rT3+zcw2@e> zio*30Ij1CTak4e3A@qmG(!#UEJi&E99?ncRk8nhV4vRb;v!v{n3PR<`Dl;p*jav~j zKWbSR>#Sr+=i?a%aa=J;!+kx9`{ry-H>O=qJ(sdJB|JmO4Jq1P22-Q~Hu zjfpq=g80%0%XO~2vRca;->R7_EslE+ocY0f{cWM6x4OIJckuKOhvn8DOT(^)4MJ{BkYjAKk=Gwg)w7HZ9vgMOY-Z)DwKn{+pcY-}e9YbO z>p`;vE?VDMc9{Db1~SPc9Anb5k~*0ef9(E}`Pue0`|Fu+841^t+h!#f9Co$#WVxi` zRj&2IO~L}E`l`VEuzr!A$iGT054+`9%5{{glvK;N<`(3v&!Y?fDqi7UZrTqA+Gi&auU9R?rYE#OQk-Hq9_!C;ZpLG|O94;{xKPU*wcjW%a z2uW`7qs)&MKVyD={F#wzESM+MBBye*je|`&yqR7EKZZa&BL~yp_(Rt6j=uxySabOR zvX+=!vN^vXXI4(#+&=ky$#&r|@f*9EyTv&T0j56Y-z?4TrGvJG_lSO6=1}>MmHSrx zS}CRMjmWUTU50Dw0{6cl`VP!!kZ#Wi%Bfg5x~N@V>x@Y$za+O#T9g=>ayEN`YpYb1 z7|QN54YBUFD5mYELgP`sfKB6DS~&Z2+X(Ak^LKtVnILAk$`#02YtsKs&&wW-J?4oz zjGo8mnGW0DIBoTRgiW{(@Ho7P%O2VIM(6a6vz z&*)82w@ZzPcv|WnPDy3L?e_NEYNC|9$eo#+o>niZB;n8RTYg+f8J|@(?_7RfLEYjt zE|;sk>$t0ycd&3s8l*a?Klt^gW#+AxNw#qNVe4^ow23tIVulhGq2+;_Ob(Z}i^s)> zVqK}dyj%%@Z~mp8fOg<&>(v0&IW)LoSe=L|5kJD$hNpyaAuS!j*6F6l{2MA$ZsrXy z=JKy*Z^`s!Jk6+@!Dl?otd*Nuz!%Re9$!4Q`1j&+#b=P_Gg*7YfM9DXZC??T8=4pT zAviwpSilpTVEt@8Z7~@gOseiwS4;o-0$mpicIB4Id6bipf40O8K6@MaqpovpY-a<9 zhR%g#Tab}G$vXPI$h3B_N~_4*2UI-<_d;?nPtRed9nYZ?~6ar zpW{{CL7wft2&u9fr7uROeF^u#+}}|W+$XGgSl`fDq36TqhBpoOhiXB2_WQ=Z>=mks zHqo1sOQd~HT>WE1LZhEMl8tFG>8CTR~u5gi;^IV{BKHWzZs$(y3P$egu1rE}84#9fJVlkca$$;m0$ zT2jOJ3apl0VwCTaw}o&II@m-t1ihddQ2y00^|05sKQ`Yo?la|?|22;{-QjQ1^Ysr3 zt7OV=q{YHU-$vgEL6l~o;%Fv*62DW|*b^qj_BFubDCe9S^eFg8(15^I&I1AenD4PJ z@}=HNPInE-J&-XheL-5;R3Y_a=K8#eMVs9Qv4=JU9iWZM?{HP^(uV8tBswRckD0k1 z#$%SVpynMh7npBaQY>%GbBxc}9%MW9v@}nwEm(aQ+?!km&oO^jxg0u^@3hHe4zt7X z$lTPr2Q0ny_Un$Yzz%_Lom(6s_EzR1?f^AJJ@1=RG$QAIMuqh7^t_BGIr|IyxK;mk zbmsODUtINH*yWvO@ZU){(2Xq3!&3Ls>(tVX(1`X@t?uFXmP-M#_(LMg^Fx<#2;@g@!nO~ZwnU0!Mtjq0+{nvmM0g?8P=7|Q1dq~O3 zRqvIeQTcmw_3Xl|C)wBYHljYU!J8sv%ionoa!0YD!28F6JU71NzKifAc}DsxC{@T4 zOgMkSSk2y@K<4EV&Tw#{N`a@(dJSQBb8_rq|LB znS1;*+))ZF4{Wmn;sa({-x?+MIniHgC@f z*ZnLi@vWd26QJn|Mn^J;>d35P8?gP^Q~V{f(cZ#QCeR+-Amn-Qv!IcIU+k9+n!Z9P zaK#kv%C!%ilfmhcCb6SoxIUF&vevO-zf7J z4JS+oEUC6XoGHOnXoujFPC8(jC4*a_R}}7*IP=S7ElV4cx+`r}X8YV3g>PL;e14&g z)KdN^caz%rOfGxzZ^he6X1c%lzQ{K6DLcy4)jmD&*Wg*fErTZo&2$XG$`ndR=@DS^ zzVSu-I|?hMj@o0Y9@oII*XT1HFgG$cF}*Pi=gaU_4JOk>OQ3zPqjBKSz}?QgfJfH< z3quQWza?H9G5RK{a1Uc z?W`@)+QvMJZ^L9z$Em^eBDRR{VLSr0@?VBpTyJWYdfvah#Gf}ZYg5{Ubm%bQIL7XO&gH+eJCKJ>V6&o7OfhC1X==jpFmZuIdV^8f)OnagEvQ)Dvxhl;T_A zDRR|usjg?$D6y7QB5hp9VfpcaGLu!5{Y1yI$Hz}Ck2 z&Ai$A4Xnj=h6vV7_0^jzCxpx1B+uVIR^p-R(8(H%gI&j*r#Da^bdNN^TTtAuFfBhb z&zsk=;7ZX>cY*Lk1MP_m<4f~N+*!7OGU`X9uD)#7jFK8ItJjUbSUIvHSIIKO!2}Nq zjSQzFvct%bF;2T}f#EJy2(?K&wSv-Deuh=7Y;+u>@sJ9Kr>5=VF2IpbG)tJYZF zBjk9ic&d1gdmei(ds_+>(3jk!L?~P2(opgqR4Rag&=MM*+uC~lEwP#U35D|#s6~K1 zR-a0CVTb2k@s)yr{BL=l{9#4YTx-3%gu!yQTALU^1%Lq7fql#f^Z;6>-ctb}r1fT- z@Jo!fECF`iaVcn8$hgpPp=CmP2Y$1^wkXDbxe4@WVu~t>KYd%h<$N{$#r_vUX{nri zS-z{B)LKC`e_Y$9)>MxxAeS>|Y{bhtG;TNc@rwPyeQT{c)A>McHv96aT zEnHpQ%RL$138*q}m&&Ma{ZA@`nZkcGt~I~5JhN`HeX$*~)wIF&X`NA# z%|=VG<+XXf`7QQ|?;OuOASHdMHdk3EMTs|s?&1<@vRp-Jq3#Ds=NG7)r|7xhl}%Hv zssr`vOrf7JM%XSK6fOwm#YtjIX*npD2BoUJTk0UCi>t(LA}`($+(M?<7asB3S|U+} z?#{;YoH5CiXntgFXD%?+HKiNY^A2tovy(nTZ(!=c>2QEu$xdKDv)NoRukaf8m|e|? zsIfI9+UbYXgYsZ0S&SA-{7d|i!ah8WIzkJfj}Qkb94=NCZwaLY&CiRn^bd#w zy|fo#@I2AqW7lj)Pp8MzJY5WCP@z5?TJ(G1gKUD{v>JJp+(f;guQOMe%FJ#08W>dn zGO=78{uuv?uWcwYTsAH>)in1opEmU~KIHA(8Kx0M~Ia%9>uPuJlri)t#Uf7U-9Vouoi@W&+qQ>^E`)o? z_C*J?99F|1GFU$jZ-+_t2}}KoZw~TgNH6V;_5J0yiow!w`Ju8DO5+r@Kc377C|_!5 z9YE*k3fj#Ky$E{I2hK&AgVOHGxecpIfW$YS^5PcnOqL8!biFRJB*9q zU-F|3Hw*&}!{Fs_#pR%;e3J=gNE*r3$ndTWHDtUtTFI8Ki<^Xz{$G8C-W%R}-e0`G z`9=vFrQu3}xkFVO9c?XhQ8!8n2`k8tg?WJ-G-T*T?{wt-`Ql8|H z33U9uaQlB#Q#47ZL0Al;BbnDsb#^WLkX^zW*x7Jc#4^p8iOdKliaA3Mf-3Aj*2;Qh z1LCVzNu3}&qz+<^a0hRufjC@xBUe}NYcq*9)DPOgrm>0KJ^mlQh`Y-kVy@6C)e9qV zCi*t5L4`@tT7lU(RJkdOQjj!WtcLpjNzo|nmZr<=6$YfrQ{+i11^?8b_tPoxO~lg< zoY>|-AAJer#(Q`wt@TpS{0>p>%QpF=7$%(db@q9DlY~xEdF71CgL7h}+p+C1Tc(+Y znKzpMFt;&hnp&Br8qXOv@gDXBMrja;jMJ$yWIOP{KEOe=McFGqml{Zvq=?<&6>BB$ zz|J{EU8@~}7HD1(sf{VTG`4?!W&OKYZfR1V0^Ih~!s&R`nTEvbJ% z|LBVI(=wt1;eh^lF0#M}fm+grJV$n-B4{7|mD$MNgqz94l)?L01O;6WCUv);cwxa>P_}|JWsqweBJ#|{R}AfWrS3r12mvzk$)Xb)~3eLe=#ohKKGdG!KrxT zVQf=o3O#_bk#BVpYWNuN@%m^9V0;W#7pb4rDHykphqQ|78h zZ8*F{W5FPP3ANx^Vkw9@bE(%J|&`^i#e?sLS0e{I5@)fCoJRDDTqc*`g<)Fhs%gh6T@-x+%>ImIkzBW$1 zE?1WtiJye;Laf*XBfhS9N4zf;$Rr#^`=Fu91lO)1>4&%B4n2cOVdB~GIM;0FPIF_p zOY9tmpdXN*pbqT}4qRt&Zw~4M@w-rvr1pU;cLu(I0dR>ReHf&di{QBM`a$^U>|k#( z@HO?*7icG;l=92xwt z=(UEw#RZPyM5;3NFVxCi$t-ZDjzX=t2=u*u$`Uz1I)c8+MSr%xflykw@89MB=HDf} z6wAu1k=!1m*CLvbJ*ZA_+8kw0GaZ(w2k8`kPV+ zciTB>v9uXG@E24^W=NOiBIT$G{-L@<9fLWLK&+=6jEAYkR$@7JKl4AW>~PSW%+PZ$ zfSQlSny?E``HH+*UM=sF-JmDVf(mezRt;}>4UrFisTuw@mRXF{&Fbt%R$|+8UATNU zojFaP!}}`-rG6WIkakJ!t}av+b%yqbc1C5?XP~EcK;LIO#(q7hOK1F#`UL-09Q~f| z$2?>TnRn2jC1Tt?B-ax0x(4NQu`*lXm6sp~MJeBvKjA)lpf!UkUC_h8U|S0gB0!C)T1R4f+-$E2A2__L zm{#lqp~y;Mt;(Q;o$vOnMxGny;p`; zs1ewKdvB7jpjg%61guXipnhd;(S_7&dLi=*6M-2$ zf|4C2bwa^LMh*H#7LRAh+y-~w?iZ|a~9Sx%U zMfC${w=JnkNTR=qnS27=#3uSnaN|P3KL!t(5tAc#4P_JF;8hgxXDBCRxZ+;kn`wl?2HGYg}zG8p&l?j zxmoN6dJ+=oN3dn-(PS6uF})d{tohU|Dh1ApC!nMW>OrkMw96kbr#}$`$uzjt1?=@B zsFiSuMpJOfX<6X#od?xdgBG}h>=QG^vS3+Fm)=R&rESuAXt$2RvG)SX*_&9)&*}Z( zr0GP}BUY;)l-imTj;;ro*VXlJAd|fYX><;mPSs`yaOun<>Nm1K`3uO5`*5>KqK;vY znFdGpdZHl}!DKP{xKqDHGQt~L!YEt~Ur#0GB6E#6D6RAj^(_}4s$cZynj+s5*NH3T z5~x<&V3ioJorMmtr&bOgs7&R$G9UE%c(ns&ezw|G`v@Om9eARDryDT!=?`RIY9uoo zDviPLox5==-bICwYjDP*)z$iMlo1Zp4e)qUw27_G_uyNx>DZZX6DP>0Oh0}ryOtQI ze8%2BN>lO_Ry%_B)1N^#pP+4nO8yP8mjF*!I^^H(PgauI`PTckWJ8G- z?>p(C)KKFk+a6O5Dqeo*uP;{8{(#bXB3PR5wfAaGP_g?d_q96kn+zGzd!driDPYo!l1{E|E$C(y|21E>Sf^-n@;tQ_R>6U4PjRuku<&k??!B(Iq=Lzd(jM2K!swE=VmWdh zJ(#E|_w--#RCAwqKk}`V+Mr@k*I&WcS2XJXFttqMZA}6O1iW-S1#jh4+@!dPQJaIe z+7=n+(>XZl>*A;4-dXEYoXO#-i?hlXB|~{LxoC90-Sv{VzCt)Fe_yqWl0&7l-a zy^ulSwL+W$MTRm&OW}+!5b6{+Z=o&nNbs0eiuEckgkZBrJNxjrR2G45x2QsbPuQfu2UQIldjM0Bv6 z&`&DeSjp*(oK>oE;C2JtOP-jVIw^mosJTsq(uAU1E*_m0lGrgr_vZ7Cz~w=wZ8x~O zR23%I^tYo+@YUd3ft+nQy~{ti-+NN$xiInR=plkLcg%P?meibpUTB4W$S@zo-H zMp0QSg)+k(mqO{lXoFKNAR=#(l<(%v*hfvIe;vv-E$hcS>!KIUCtD zv~*BaM;&WrOGrS&z)7~vR5gG9q6=B&l6EE3OzD_gvG88r-R!Kaky*~1Kiw@2|3q;W zPQ|?sj^ifCq4H!l#5p76fqezjSzhaIkn2c3_QqODlbb0e>D%7!1eQ{NU{kR~dLo>@81Qk!M9@GfHR zS#ty41#GZJ@pni+_1WmPhd4u>IRVYhGYF3_-uIU}fHiUd694mR7Jx4hN-X352&?EGx3{2K@pJi_ zeOl={vBSeoTeBFFFwy;Og<(^|axDAxf&N&RE$?G${p62XvEFBDGkJEgIdfmy&AhQv zfT6qnen7bWr|A}Rmv~6^FzhgoGMmjSP1l+8!so&s#UO2P_4vhlbFokgA{4OY&rq#x z&q8;G)^P5$!ysnr21*oc?`G+D-RC+#E_g@3Vf%@D`p` zHn8^%mrPZyS1k$LJ$PtHY62f=zGt0aD{tJYes+;1Glb^kaQdiLLHa7o#C)o?zDp}+ zw+B28{tScJzf0QJyTpQJhIOq+>xts z&cM|0*P*8ZDjBi}xAv2{WbFVY|2S&4x=~DUUCb3SEZLUgJn=60jxF5jc{>Z2ieu@6 z+%9$^o4`rjBE|+E^E@Jp{=sL1#`&0BD&6oM5#qIKaP2%+lB8&TFB{G8q5{De?PIEK zA7ZOvUSX)pwLzlJUPGuUk=?4%=(c%;Z0}*0*EQbPN~|NU^C$S)3l)_}YOi6Qb%=eD zZKk;o{}zPyg=~r8sc|VJ6r|p<-2Wqf|kuug1X9C{bIPc+|*Q zdMs0dCl+nUG)y#%;ewbixU*fsUH1|)4;m0QtuDNrNQHq*thBz4s6o}C|E8BR)O)|;7C&6?q?QHAHdy&rw##e9Oa7<6@`72OpoVL;R0SuLHu47a2b$qK zP}OUSCY=+9i07q-IM)q@|8o{-@&Uvzq=O#Fp5~)X?Jc`3ZsTCC60317jK3Ry=a(}t z$z$+ewN#*Ol4F#&@;8z37kPGgu6n2V+l$@h{y1xHLWW8L*_>U$#~A)J(56pj+WO3* znWh<9afQraIthsk&5*w_9XXDR{6qXV{jEVp`tDopAMNkqvv|*VMtR-73&J+>H*q;S zD1*hb3Ptu}_OMgfuc!@AXDXv|Hqfxew8gUC+SFQLu4sB<$TQ40T}6K)*6@|-L_GjE zgU}9uD*HpumXd@{{$u`4VslxQrz`DM+@tlD%c45QTa=LCVoR_C)&^YyZHx+ z&y|7bHjV+sxgL>BL{O8Ncibbs3csFvioWno!v$k)QzKIc(_qsY<0yVQ^Mi~hYUqbh ztD7vh6N7|k;k?jOG$BFeAxJ?y5rzeaLIv@>dBT7zx06t|#j+9uT_-$w??bn&g= z6dL$z`r3I3Z$+=&d)Cw4v(IzM8|$Ae1c|-H6flAp;dVSx^CC61k+Kj>7aiT0K%@`l z>jTN(z=}V|i0pQ5J~y5#;3o2ucnkk8r*qBt)qDc~k#ESOU(TLo1Gz?=g8%Me$FfhD zmvlK~b^K08!s~Dcy!7>2Z6q>1QvQ~^$$3(H=`C)WWo1ICgs)mGhblU_I1j)nuBhb7 zgXFQ&EHPL3SNJKE2zAB3#amJWv{rMKNTi@GSEhkw@I$SGy5m0OBr0PQKonT_KNllX zWUf$mneW^u{s=#srws<Euw zc@%Y3Bks)g@B}x*N%~P;soqnoA+7sa}V!y{*(%OcbsP zi-lf7i15O{#XsLa!~c)}Z()h#0O!56a#>y@Z$#a=P~L##wEJKVtyZrhA;f^1${X;* zhS1lTpX_697vJBIXc%fVnUYO=&Gjt%EX9^ct8N))Sz~T*K4;E07n;5pI&&V>^LArA zRM!*0dugHeRkG#2NH*^-nsRH>7h}#cQtLh7n@Hr(* zehmuqSK*paT3jx!6&r{Tg{{JCVV@W#zX1KDjy?ieWg56XVelwNq3U;yIE;!%HRJ>J zrw1@ASsypcFwOYEc*i*17;Ic&DB(}>27}vh!r06x@S%Kp{^0-qg#U%>f_c*f6q*<< zM`;Ek&Pb@DXk=vtin{PaxGCfdx5a1DQ2AHX)QhC&;NQKMcFKR@jI{>N;tqIP2F#xX zH52rs)u`gi@+7&AJVyS4wf|S89Wu>ED!E`OOhXlYAE*{B2okkRx6Xp;<6vKMQ}|B2 zk1OKl!Xc{~H<+hcYFSwG3uBgHwxO4yhoOSuZ$6bf#`b2Mw2g9tyL1$Z8k<3?UM?|W zj(?K>vHz}+D7KX%r83eTDM*>2mPeiX0Juy&LGWmZ9?4PF2f|g6x*av+wrVSN9CC>2 zsoRyW=n|$OA?k~iFQrR4VEB!eji_<8Qg$fU)$Mu>bYW+M{}Hl;ScFV9m|7FT7PzF>0JkFb ze<@HP(sTv2=^e-d-Kmb$Z1e$s!*{(2M41c_0eg@paE|^Z-V$9=UD-vKh9mhDnKl zDG^@Ea(Mrzs6y%i>f;}fdzFljUWzQ3i{N?vg^}D;AA(NVRQ$AoNN6d8T!ZbzNqDyx zlFcZF?h1lm7kUichc<%fHHex8K9!7K(iOcBjDTZ!XM^#vHJ~Xg0C%epgtOY{i?%{t zGz(SLOWJ?h9sL8*9bT}HNFCXSZd^W;2s1$!%fb_DON>L^dbrjco%e?5f#o1+GNJ&Fq?~?XNUrp?XC%zXt5qF4CbX`Ay4!0WK>zC+?m0)%5 zMXpD8Edd>r52yvM1&`?rVMovHFd0k@Lv`*4rK2x0k)B0&rk&`A3`Anwdg?zUHROUD z7erJ>(y642$o0C7jE=cr4Y8^l2~?4&2{uM9(L1G&5`)CDK$(}%%Hx$~NT^y2oy|G< zkY~3uFry@p9_v(O_G8S+C{u1rjsKn z19sPKR2wK3ld0OYg3G`s(ZQrJM zfguya{K|ZwD^M+oj(ER$=sw0l%d$nCq#jX@%FCoFP^+L0C>@f=%59`}NF3ZE^^@nx z6XiK_d%2=~P5xWytcGAkYy`&0BqVESAbkaZ6Lx?qqHi*5net3GhGeI+H0xlF(E-Sc zyGo~1kEp*f6MH}#WJc}SLcPRzH=#>L(E#X|5F2B6xwhJK6e#6fg22wxwe zAiID!ry#>#V$H!y*^5xY*l73vIY3p&RT$OpLH1gXdE1?O4aKGnx(k0IQD_=|Z6XeWUU^L4fcu^S zEG`<^cq8;l>P2+G2B{O!y~$Dj!>aua`7Dz`(dr<7fe)cjZHskqGrH8Dbrp27Mj$~D z(A4fl?vn`}g2nVL<}q$3Cy;!$2K=onNL>9+$5HR~x7uYeK9F696qXGA6aH=s^n3nR zo+;C{kH|4ukKDIKdKYR0eS}(%wSOJ5fa)Nlr~$H7QuY1VCpzHoJlDsl4%Mgi0$&Vl z4Y`^;NI8LQ${WNytUr^m{&2|amJVhEwt$*w9kguCbPQeVHjyxrfsGtu-r_xL&&~KP}^e&>m)=7P(zR~}t4l^C; zq53W)SI*GGF@}PXe{%v~>y@5>&fg@hx?WSCry0}?kk}aEi!cLyjLN!4IVfuW2=TZ) z47rTek*L-Hr@KP1US2CjAh(S~X3%7MGBbcG(g%~r*+cN`RfX2*Ec*vnntj6@Vmq-D z=--f(_yN6^5NbKT>TP5Q#eh1fsI8=%!VS4Dd5XOZ65X$AsBqJ_LJFfk8Lk_bGS{_8 zsN#PmW6)!pj^{g=IY*BqqTtv(D>on_p*m_qMyis~%XiuzrtU=7eu72_J-yHT&*Zsa zupCg!>ap5gp^tls=ZxGPZ?0|k+W>7H5r-S2n2f8IZP?_p-G z^*rmj&+9tzV!DUC68T-408K43ff_wUjs*f)F8hey5X@XL-vrB1l!>;@e+(&h>fy74io}iY}$ui{!Ri-?F z-a^v&Ib;iZn0!xHAujN{d~tAca!%TftQMQI4o_cqg}19PkN8Y~$Lp|yV}SEBt0M|% znkqqS0V_BVau?^&bI@(o8b42k$}D6bXk+V(I`D(ETKQC&MxBLxYlU~I@D(qGtU>{! zcCWDSx97Y2aVx;o(}4NydhXQtdI}%lX_$mABo@mS$QRLPC^-mR6zcD4UR$P{th$QV z2cPIEY##X+pO0*W<_(9ZjkAkW@>F5hLI>swVHP{uz0^9^QrlI^-y&L4M)Vd}0QT^a z>|Eg|nna&ftd`Fv=fFuw2GO5p700v#0``NZ_Y83h=%i2NGQ|{S6*>!7g!`Vw|?AHe7*_4e?;SJvI;e zW)`An`2Ox9YahpHrUIJ2W(h65lWke1=N81XQy2i}2}(}mUFm-4S>0^43Zg+}8! za0YZ6?*k_iW2i&Q>iWI`9blf+klI8Il(kn(R;DV})BUhJg2<)teWAy6Ba8c20$=!_ zf4={i_k*XYzmgD+^`z=44yfKJ(_~({wyI6QPQyL$1l&~JQ^}R(bSzl~tKhZ1m7W52 z2eyg$DLipzRdg>KXlm~22g!;xz7%_mWsLQzLvrnKUvO)@dze76JM1E(5ts0TuO-Ig zdgU&?Q*Txdhi@(r`$3vzUhpN-PU84XV$^?v5wA8$ra$6!Zk!f2`|}@+=4BFN%Ugshw_3&jnVzjBLlB(z(jB>aVIysMQW}gqhwc zZaX*KJH@K0xKQ!O^38GI{m(anTMV518%ahUQZkx$n(68X+LeK|gFYJKf)gtpk6=T) z>WvCgHkCdmD^Sf2I38G2JsFE<8~Dz8CwMS-4R^77mpjbW+|k^+rNUdhspwAWXA9?T zEal+m2njDDZqZeg=hQDWw=@+RgT9{OrXgClP&NmR<$w8Fc%FH@-UIHN)_7xtakWXT zaF)o5&z4>?J+UA2CWwCGCH;n4PhzxN@d#2hs{$8$Y)_Jzm)0dEp~r%4{@i#H8s`acFndYS;m=5Eu9<#JU)LuG=_D?T2S49bi5^> zEL$YIqnNGUqFJYIsKjM^@MN)^84P)fSjRK-RpS*?PfIn+HdBrfgY4Kgo6?E-OuP-b zk3A>m%Lr91^%T{6)jjR~z=+_)U@quM@VbzMkXga!ft)W=)gV#iGLV6h{0u+kspouY zn`%90t!LY99b;K+DX~3tt@5e3b3Pyv|bZAnv~47V85mGDzK2TU&cb_cU;L zkSXw}et~{P;LM;o!Sh25q3we(O?|2gu%&TGJE1A7^t(KJUHc)w^3%+h)1}i(ETwgg zzpSWxs{bY$xVE@wc@pTi@lC#ADb`P@dMNodIH^o zI!`(%T#*D*@GSLoWj|RVQ3uw!LUxkBh3A2Nm3fhIwDFvAl<}SMh?%hUbG&dBdIz%a zg*oVJl9valD}z;Orr|-TKRhxbGK@96)%Vhk*In0_2BhhSs`Kcjz&2};tL!!RaQj?q zKWl5-V0*M(Yn@<9G=`d-=1R8vjvTk%ui<~7on#+W=hSM|dPRz&lWMejpK2JK<<6xf zVjkWadSL4^eO!W(hMIEG}a-?vB>y6$NGA z$`+S7OFk6;EU8h^)e`9#>sI(`F(1T*PC^o1| z@X*kgAtM7ODO2eJx(D>0z81Pb;nP@GeaAi9JxhOMk1~Jhl8Qg(>9%k7=Z?m%Ctkg9 zfLNr6RsU0e(#Z5(3^#*!2R{oe(jHR9Ddf=o^#yA!p7ZUqcQzN9!psE~vr3!8)7WPU zwgSkJnMYU<&M3M8IEp1CA@=~=I4qcb9~VViejlesY7xt+ zXH*-yp3>|}l5aJU;W$)W?ZP?Hrc)$cT&wGsMZL5Y=SMl=gw5|$iR5mpG9+0()20@|qu z$ry4ac@94+8vKWxjO{r*PbS;inJ<*zDjiaGrQ$CbnvPk69FIKz_&Y>f#SB%dDp#GP zYZAC4C@J`!VTC>)o&}_OgQ5x51g*yp^rbqh*;<)%fi(qGs>SCXIXYR zioB1x1nDK}#;1_gVYQv2PYRhHRsySVVx|5OQIT^ZwpE%KvNOP?UZ+e}9HuSkf1Jyk z;a=iu4F<9W#+PNEN^6%1We>_XRItVmmZeUD*(Dt&-qAsdd{uqj)WA7GmxG2FdIbE? zd9?L4#}zd75=|8n8K0|??XpQ-VJoo|W)&EVu9o#N2AOlM?c8eGVg(2z04fB%GVzFZ7M{{d51eXPOrpO%rf4Szeh!))(js(aNROZffj{&;b=A}dWu1v*2*Ypk z|8$+V9X2f~D<~2RwiWIxPAdIVCKyX??L561Ew7ToFdqL66QQq)YMO%@R((KwC6EY7 z3t1cD4tXBZI4DWqS0gH?$WZ*O(20HKpW++pIqW=X8)->2pEGGqqm3!BUx_pQv`%q1 zVH-&Y@Cy31s)OEUm=e-5bX3Typk{`rfyn_QG?nEah~H=viQ_{20(1kMHN7h9Qz8_{ z!!>ZVTxYs!i}ciEo?FT$>*>I^ju9Iz2cMP}_ zbS|V(=>Cw8!QF$MhEe(+s^#=y9GdER2Q%L{*qvk_Y5rNUutIO#ZR}vmFk|*8x7SyN zyDYAO&(sU@>8n&@GznT>Lu$*lZ*(O(yY8p{en9(xwmO|AK-EOPjJP27XF7N=XCs^0 zJl3?t=&$H!tYKoH7pJu~z;18`c^WePh3$wFC*^N6^#dyfcMnYo`xV+ZxU%6ufKoSJ zd7WgCA3`RZ<2~%0W;_G1pY4CklwG1)-2R*42%xm7hGXD5-?iVU%f_l4C^H{W9E3qIb_zx z<_uGqsj6{GMOj4?bExgSW1!pZ`^AMwp;#wslJc!q8DI@~8MwkQz#tFI(_hpd)GyHa zHR&2jdtRs1nv`X-3seU}k8=DTe_eMkdzK}@vfdJ9on#$gO|^#GosQ|Qk?v`pD43}a z=Z7F6WVpPB%Ah?P&?dMngbq;#orV3zCxw<8i$3Fj!VYSsy}osjd9N{~LRbE?%w68Y zlxZRDr<|X>i@CS3zt|%SQFqtH1}qCaV#qa^0=onx>M{L1onE^~Evf?4$*S`TrFQAbds?ExGvSZX~YA#IW@*uBRj)6ZD}^j(a71y zHOc+k)5U+DBcufME>=h^qBG@c)nv_eZLC(OIjT0O6{!H&cZwK#g&ja^r_au08`02dl%yV9Kes!*M*Y`DN{+Iq- zD8`}l@Fip^HH5ZPlc|SfI`II%2&eE>U}Ap=&fmX5^ILm9mb=E}`}%up!rReqSGaqo zr@3!Gvz@2FwBy1Ms;b{VAbVl`u)3->G z0809oaEo8Yo?|3t9aG?2>rM5fyZgDeIF+t!SGudh)yMP4H;j#g{KrE0_Pkgv@cq2Q zp-&Nu!TaEs;Ve24KL7;pWULuB6>JI{CX*5wLc?2`{Jh*R3y?+Alc?$B zGkgfTLJWt5VJKUfN%XJsJ@xkYI=pFq6T6CUDsDvD;lIgrnw5>0Pmy<(SCcoDpO8P6 zm&nX?Je^DZ2cDw|xQNZCDntpOa2P)HqR^fRCw?2?apzJ^Q`pL@C12Yc}9AF zcv;_fM#hg5A0eNxhD1JbfV@dvgvLw@eUv^)vBZ4n-0ul_g3aJ^Zw=Gi<={@ji8!Jq zF@V5{HuyAb6J#?eIL+?@?(>O|=9V89atcke{bMFbv9k|j7{;1GbdIs5$0`PC7 z5OL7&+>rPMA=+QDzNnka;%F7$pAXYj@_efx`z zf%H)|tSg4Xxp^P-Jvsx<`W|4X!4y#g9Rla%{lu00J@z&e#5DGI_F24IUrm@Ucl38= z%uFrz44cL^7mkQmAgPgw%tlXP(U6ilOO}u^z;yY@LF9a*CjJ4=lum;=r$2BML4usW z%YI{~F!h-dpd@B6lfi~9adTjD@Im0jJ4kn|EpEY$a7NXOszH%dTdI(HPal>Q%YMo> z%R*%5;7u)$ng^Dx-(W5q33u8*akx+bhPv*+eB}dI-UC>hHr`n85${M}bAJP77C4y$ zgj6A2ypGJk8sfw7+xQjYG`PBc0F%;>d`|QsLWx<>YVi|3=WwyBumGGbO*oWm5BJl0 zNOSvy2`~lSC+Z-Hu?1<0{s8M&704ZSC0Ys~Ap}931s8w@n~17mF1ZEhm)?+) zUXRp8G~lZ`3Fo#tVFb(^abY(9hWiJzhYaX;XeP`RRzlKd4%l-fRE6PK5jcUKqu0R| zQ5{Z3x5I=+jxL6l@JeDne}$XHK4C17Aq?V9v3=PiaL3Q*26KJkb0!EM#6YAJrp8^d zEPOB-Pvudk=|Es~UecrJfv~QwriT45QwZ6C7?k%1Ta66@j{gOiLS$e$-zt_u=E)`;7f!X(?eIz6@~zu@nnEsnL+l4u@ov9DawT;C9#r(}nuroC^kj++yf-?*rMyTJZGg z1_qsN(j>@nJcN8*7nnnEkhO~y-|&aHr@-{IfEDICB&t3@nr$v$1AH*&q>jLAjDWuI zXjBffUmphVdPF!`L5w06;{lLE*MYMr1^%MNK$T7ASx(6dV9z?jo#Y;J99J9i{=>na z)e;gRe;}E+6%t%3NS?*QH?tD57ULn+XC^*E?s6{N)1&ZstRdPGiIG}EdU7Nr(6b&=e^@}{QnO}H)Ygs*Bt-E)*-hX_5Av_{<$|r;3};`H{71oOm%vW~&)6?% zII6-g67{Gn^fy_)+^V>&tg33Ny084Mcqs2DGf`*BUPKBOhHi$roWu=aL!cSZ=9}Y_ z`966sc+Yzq`$GK$r0`dALxgJ5S>zXb1v`b?i8@pk<)Y-Y0#>Bi)NisEtVn>h44-wvKs-Ne7*0hlePAbPNgZ9@gfB6Y>z;CB2kBqY~?TcI=l0BDr!=v$bl z?iRCPYTSk2%tgTJ){UFajpjyjCpZ~@iXRLFN-eOH>A{R-zyk@A{72pcYAZzcmrkOO zQk|fEpg#4A{76iPl+JSGFyvXkL-wpce*q@=IxZF316H&5SUBGWb}0z%`K#bWnI*cw z0<{7djJlA>+J)C4P7*(f_rxk_53u4F;V&%)qJ}}{!b~($3WYS$U;YMsTRNCtMhR7f zT5#n~h3R}Rn5eUmAU=rTa5oMFw&oQ&7|X&`cqs0LtF9fk4t5r!fO)6|>{tWou{cL` zfqUsYeCug^E;Q6O23OxT_(b7Ac`O&7K_0A;G#Kvaa&Xt7NGwdXlOZJ;0wy#cSneJ} z@^pr@4mgDQz+G9OX+8nymHJ>5{E6fMu~-FWsq=vEF@jU13Vfc=Xm4yS)*h39vN;8e z$8dZtz7Y5o9Pf_(j{yKzkW>?Fa4*C>NFdi1>%eCSgB^4+urO+50i;sf0UL7Pw)=@{(M0J$)u~-weK)IPl>V z!LPDb_{oolL^2Rw+-q(;zk#2}pN2gCHu1Q$1Z;=L(0Abaya*HK7f|1N8Zu-NSPgKG z>;-GzO}K*EA_dZa@E6XNssPb3S*#F73!nJ5{0puPca#ldo3gLiLtJOLx<&}|#noU( z;gOqY1mpy*z__e{h7v1G@>{}$elPsCk5CRch%M+ibT&Ex{7&P*!D^9)0y|dVkia&9({B?bh);kWGXkE3|Io_txdI_~TLN=E9*u>4Q!8{okT@@a&gzXEfh&rK zH`33r`Y6DwXaPEH8sxUmi8tYQk>DCN3KxI{Q-Cp|H&94JfH~R+>)|tWKY9i}6$RHB z7#x8&+lUQ?^iYV1IcSxP&cWdYlMX%zH$G9)z5A1*{RpU|{JF^i3zoeS5 zqmht2xWAeKfBs7v0Lf!F*pA+TxA7(HOTHo-!Md>mXebqQj@`j};SGs2;tY`i49+Pm z11Oy~@E)}X=%BCg-}nw0*Dl~!9@pj<0QEGVTLq0o1K}O9LI@H!!@h1N@&Tx-yVyef4*nm0 z4d}e1;3o1DGs&Ce7xF8d6sN$>^ED9ATcN!o8J=!(xLW=RKfw}4!_)2~{O%)wd0Y+8 zv^|nh^1)hF8`vTfFjD7$<$>l1*q!Y{(~&92DQUdeU+BooID$RKT!($)S7r=z%U|YS z#TIh`{4O|gxB`A4i{OB7Z8h*gm(lIeJaG%2-Q6Ks-kPXOmXblx`_Ka3aiVY!wiR}x zW_Ugn!S^%~a;xuP&-$B>74|?bITJjPm*ML2qKWWNazqm{fm}gmkax*AawFl!Z{RzC zgt`w687)vReD+dceR@iSs0O!cTLA%9Xeo5Q>tRn@z}lE(#s@7KfBgd?q0t05(|PbT zasb7B9o~~4Ku66^LP;b7zZQ%gfB@Qwn1v&P44}TVV?G&0S=VwYD`kHtjI|w(N8a^Mo)3f}RLa^wH$$Ho>V{-+;@y zWUUW4omjdmK2Vy@fn?qD$Nkj#!xnD+W^L=3<@)LV=^5ia=RM)8!oCwmqw|Ru)KJ+U z`48nUI3XFX+(aKD+(0)Tgp_!U)J8lA_ud3aiw?plJPl4=^w<|%0~skhIhp7I`?=k) z&p9pY2w9Eg*jQ+Nd_C+-(*Gi^TGQU~sy=Ud8A(oaMU%_c+8rC1O#3&@)diiL`= z@?G*va<#l0eVv$%eizQN#nASZ=D)%8g*LAp&>)!zL`N6cqxL8EQhnv)mFcSc>Nw3O zbu{G9x~ftY-RV}?GoJLff-}p0)=uVJ!l2MU5h2u{6od-)DOHO1;he@$4=>T$&1;pc=gZEbbD>TGvI3kvlLC$dqL(!d20# zwu%tC6#*wBmW97V?{vecQn zL~zoy){Ipbs~oCdYK^9ovM+dujxv3~DxcsR=SSIYkf+Gz`-oq_h<8mi|1Sj~W~1}S zIUV^u@_2MX4sZq>@*8zgQIz58C zfYp?aaIK-;ei-*rNP!(ZC9L9Nm{HzP_j>1lcHEk7{%ZZ;3S!>yz~7*0utPY5eWVsC z|EZnorRp@*eMJdf9rnnR!K-u$86b>-rnY&UMqC0-H^ovh5SMANo-Kwwa}f~5yT$wH zbYeC+g?Nts6ayi*Fb4YZ2;fdb!0>qqJR2t9ga=_bc-X33tSK^`3S!ZhO}gO zlUoj^Yl9fiKZ30LKz=A^VMPBxXaFnmcJ#f0x2t{bX0B%L=U%~I%+7|>w7tNR?n8#- z1ie~b26?eURfh73yuQ4F^0xYzX14m8!bbJNC&CUR5#F3*aSNgqCAO`9sJA8bMUC`) z_rCY1vln?1S_(P{#oTZ1k>Kw5Yx(8800&Ct> zJ{8^>y}TaQ$vM(hejr<)dE-y_2QnyQ_Vx3Ic^0_U?maGr%jv9eb@!T?+5!Q4Bm&+< z0`VH;P1zPzXRSx~B!D&?2t1`Hbf17~Vzq_ZyP7eIKwQON^|xhmxCmgmW&8$T8<)?K z=$_)=%-4h_q^jaXK9WBvluC5aOG_ER6nN#K7 z$fMcya9!b8XzbYH+A_EnMxrS6jsG&@MPJ2O>dDkb;RR$-C$dk3 ztzu{HkNcYSpLvZP_08vB3VqpV_e@(8+iq87mK9e>hq!ESYfrrYm2e-sNq(gIQIBAU zQiu8~FM)o+XjvyJf$Bw<$V~E`@-iByCJ}qF8OUp)4?EL)&sE^EdaU05p2yBxj{43= z&LOV5?kH#>%XDA%{AQx0JJ?~cz{~)T*mvcMNj(^;@}r!bE8l zwvU`k1(9coSCmrGOx;@71bY7Es=<(S`z7-z8Y`O8L-EN{Hvd{Uh>n6JM|Biz_Sh3> zQM&Q}M0OJ-TIk8vVov)H_(pg?d)9gK+_l^-+ybNs13k;#|J=#mNWYo+!d(+?NjI^s zR0eb)Hj%GZ)PNOwiE@NuuYA2M2Tu1Rfv1nfWk^TSDy{;@;zQQMtmTIAZ6G}<=bJ-M zQxD;pH~_l`hR_8-W50yw(^W|JEW@rL8T?Y9;J@)F#cRkKSXp;tbHRNUsyL+-6$y$n zkWIvuC*@ORH|SebKCuL=10Gw9EAhYaP4!-JrPu>)H>`T=O5nSOffcEAT@Fow zb}nCNgtes7=q!4jY?nf-awvB|qvby340%(EAiCpAFat73Xv0r{^w}pSiV5ac3j}zE z=S%&uWib6Yhq=JZcaPkP-vS!@fs`U7O2xo5vq*&a2wGHbiX?IttH8C`I_a-85)XrW z=a0-yKcRM!MtEOQKu_dod?5Z6Js{TM4l-BSjo?`7!_@F5da^tN;AE(hpYp_+w5Z+8LKnmolbQ|7H5Xl1$-D6;lM6u1# zQ?nLtO`WAbkQ)2~rY4pU*>FzT6?-l&fcC`C+!9EzJb?6XU#^gO%$^qKfI&6@QoR|F zle~p@hxWBdXx3Xp^^oPrtn_Uvm*k0Z?65SEf5*}s!zJ-exh~9B-*WF%;30qbRqO=l zE}P4q;nUzWu`8_Bo6xH8w%HfY$5&z)JR?WLnf*aH#T*T+ z`fQ-F4}b-kf&QN-(k93|c7rS8I5cuSf|DH$xKAHoG#N(Upchk1@D#}0BoN86-SUxi zHn^16qUZ4_>H<}rsE_D{@9_Dq@k8MJpp;elk9+31wVpb@w#+Ly9~|N{dlmjitX@11 z27W28iDgKuX z@9*woJ^MYAybJvsxFT^ndI;W0JL7f9R)gr{!{Gg0hm_3s%Xa zc&u!yGC--84JFcH&A5#_Ak&*i)xa0P2~mG!GW^ySv6%OPbt>Q6!>?pJLBC^y@3iNd zdzQ!I)3W2)t4sr?3UimK%+EspKp$N^CP!g@42EJAd;`zPGtk{r1gFrw5D#QGO1VU4 zivI}HlFNfhLOOU}fBN6E_xU}-Cty{};0(wQX#@qlL-jty~qZ1KQNTj=x_85 zusPobZWLGb)$~_O@~zZ6!bI+tH&kC!zn8zl8$lL_LdL4<4@v07;8vduxvwtLYc7M`3EtvY(7H=vJW>cQ_to4g z@gvd~A5Sfzu7GWPiO_&}tgNHkr+cMJrh=h?^{4V@z*~b`mnH9rGe|bx8q(D5s3b|} zTWQ~6jdvV(KXW&B>@eRbA6MGDbZfcRtg`*Fy|5Nr9@(b2%b769X;A;4BTGlWkrR|N zwW9W;#-pC7PF2ys&Ft63>AtEd&oUkLh({u)TQ<)yPD>N~r zn%0Bu_HXvQX9^?(<`urW&l|DgAw^TlMw@Sg{p^~l#Aq|tG?~oFjy+zMv2xu-9M0l5 zVH@z#)JJ(W)jU;Y7W0H{fM39SVZ^ ze0Qu!3g!0u;+aO`O?bu>L;hs|KbsY~7to0|2=Z@Bv0$<;N#UEMMO?mrI#+?C>VCTJ zs<*hEP4|G272Trl8u&~*k6J6#Vcsx}g?O;pgIimCz?=i)WG}~Q>jujL>t1^^F#UbB z$<6bCEV=GlAgZZcx+R%`?1q!CG-{{rY0%ujsj5!Y7-A~*7CQ9js7mEK$Wl1v>jgc@ zZ}G$Ee^MiHE#J8vU({U#ikPrfmu7GoGCAo`Bb|CRvMN- z3rRQCMAY$tsrX1ca9(tNp zJpDT(Cngs!&9S%hs(mN?5BMg?Ybl$Xhr^VcYHCNLHr;# zhkSJ*HeUWz8y9d&TOvP4G{KM4{dGG+)`V2i{v@*Dd@d6VKytp^&w7V?Zo3b76-<)f z?@R{X5;q}Mw-vXIakTdCGy|7)wFv}@su;xgkj$2-qv&qZ6EvWbO8Gc@y2*;P~+Ta53nP{2pg< ziq1Jq0Wy>9Pj*N4`jeb1Z3nD2lUT-;4l=uZH>r(=7ZLL!kA+nPJvZz&(1y^!!TKqh z+sezzL#haQgs8S975&KjliN2hK7T>csPfCE5KE{n#K}9e?e~E*9pcB(4B0B(#n8!> z|9}VXb685H((u0F&%%y|&Iws#NY;*&{Xr0>iM_0((Vw;7MtnW~t7-8JM_<^1m!plP zmx519A(PaSp=CsV)R0<}>TRqyH1<~zk6w4QFCSKHFBA(-6x1x7UQ}9quVi9LcyW)S z-X+z{r~J=k=FoMqLmE78l9!l}_@`0p+P5PQY0EJKEAxJIjk6{czxvfB>-*=&pPzkp ze9`~7Rj|g!@Ii`-;A&OpM;)y)EV6xM_3&juR^4jY@B6~+!9Kl7wU$id7-yQfY5A(6 zJNX^*!wd6DZk1oGxLy9YOmAH6crBDFKZgFTcC5NLCNgG1_2TL=Q9CL{7&ZkI1*!r= zRpZ5XX68@Um$396X*bi(rOo|Ff32H$%h=CXS$Y9)$*1JaLl)Jjmhhmtsby-?s8-dI zrZ=flb9mq%eprR__lQr*x6@zjeBSrvnD_X%x5eE&WylwKOrR;k5ffj#THR^&2Gy-o z>q~TOmH$HPsLrsJjoS*Y{CoY6%^UcqU!JXKvFV?4FS|r)DwZ(cnR>`~SySy>y`(qm zcjyo6#ekZ@3oEUP)I}H#!<3VWhA@4kr93tT-j;Hh6!rpB>WZp(n7jU4dgjKj`@i@8 z^)=U3Qs`WQyi=A3?g?E}=}ttyYAH2F$1RTwuceKB9P_>E(BQGuG>^CR_wS~e*FG3u zyIzm}aPZ6cT-^4C(1avM9jtkxt~q`~!((-w)$3PLhIUe;h{p5XQlnIpJ1M(f_Qzk_ z{-pewldmvGh$GZjf_)MDqNZ1W5K^9oXYFjM>;4E{>8Hve z^i}A{IY>8^zmq2_NwrbkPcuyup`wWu-bLkxzpZ{Y&AR{bOj^um_Se9&y*{~289cbE zyr#bX(Z-jXZD~BEZg$MTD$gRM(A(N2=s)ML(uui)vYxyf`TX|t3-3xZv+}1o2|6S! zt@@ezbfT(t^_F|;FRQXEpqad<_|R@D-kH1l`^QfW($-`szn{*HEpU{TxnGkj4c#M8 zMIWryx9;S)H?bL&`vlKZ*Tcc(YSUHpEjpb?9RqHgQDhRVXYnLSC9sRZT+n-Ng)7N}@{3EzztM|LSQ^fMxZ4#=snAf^->!VGl z*ZvzZB``))A@fRoUD0Jr|9<~jCu>XkzW0AW-2JS|$tpSOt)%>2`AOaQrqx>=Ye%;+ zHgZ-wri;YJx#Ehf-;H1J&!0Zl_!yqmHIJ`QIyd`XAYqznl?>HR#f+@+t>)S4-6O*E zgscRKU}rhzm0is%%oaag&Cq;bQ<}`(R{DcJL<-fv#~rBmy!Q5}w_zg!x~bbL+sdv= za~&g#3UkV{S7lYt*p(Xd(VV@z_?dUB9F5o$7m|p#ThukYyV$u$i-|Qn0n4y)-uXs% z-kq$zX}doxORY#h^yyueGRIy(nZ|iGkcUGW#_AiSG)YXV-g;!y^cw9$E-5hau{G>3 z@^wf?L29+sq)(B*cU3gbaI{N4F@-!-o#M!HE#cNA@kqIzv)G3 ztI|?4-hc00^u{wq)iZiTlh^Gobq(v`O8(paSfk&OC+HULCq-Yf15;C;*M2NMj(R!o z!>KQwa{`NB*vhzFa=Oys+G&lkmN~62w_nuZQp+iIT7@K1Q+>}%etez%uG@>n&;C8L zylL{4F8}?G^p)-{;yHmp;)ynw5Ux*67St@5c>o z+_A-lmT0p_b(dDDsoRd3nFijzjs<1=a{7OY`oO+ZzJL2(O0W98R!JgrM4Mf`L(}SQ z&$qwTc2`TT(dyXMfxm^)iWz^tf35jBK0V>XwA8kr|NMBCA8B0W?T$q$w+2KHxXB3%5_4OwN!p$` zy#DAabya${mvw1*|DyDqZW-2>`lpYcCcPZ^F7i{KU#c=0vq*g>YIu{+9oi-T?ONL2 z&_Y%JOhhEH-xQM*|E1^0inM#_mA*XwJ~X#=q2HM0yG3meXczXS%Gg>f6CNf;HCa`^ zqW0nH)hZ2;H)IM;q4~|Syl*?b>iOZ^XUDh9Khur-d9QLw$d76Xb%!;5-285ni49_E z6-1g<)wvm#4JBN`#r(N>$Nt(2W|pihx0_RZ%Vi@YrqrbpA17UJQ7vI)?b0fb1NtDJ zjkkZre);)%Zl*l5+UK1a>p$my%lf^qXqojF_fZ*Jc|(Kr*1eM1o>;fJt?tHmi>+Dd zt?ay`@o)79)8l{ljy-7kyw3+m*81GklIHeIv69vsTB}k@ct%Xm_-oA`CCsQ5TeT?c zwEh`U$NSj$yg>HL@afTqh>Tv@lXDjRZCEjqjo0+9`Y&!?y!&s_QCM%Lw@lk@ugdsjTjRv9G?b7I2cGZWvp7}j`D9Z$6$A(!MaOi
ERVJxl3%jf> z@?tYSJ#X^3?aQJxOSZi5lr`1cky*vnAtnY4io9Cc8kt+IwAS(ZOr5DQe3h-?Cv`2b z_pZ?uBMO@Te4o)ieNy(df?1}@t^?c>YFJ?R$e`*Q;$}2F(!%_IE?p z(bS668lRW`=$kt;cV2F%{7%OEo~J~|;D*%)HC&juuGQN1v)Xk^EU0-gU@%7-`MlEa zvp+vhOUUT={Y1ey%X7Aj`lgQ7o>rVzlm$!*9~#-YQp1qakVWBQ#LCcxdarUR^_UxK z^OnslD=9(p|NVIQRrRCKA9?aWL{=^3Xp-~O=WHY=^+-9^99FEz9anjWUBb|bc34SV#0hzsfr|K(CKFZ54? zU&p=<`?BHN$Gq?5(;Wu?A~7F(Oc~X4!fM9WslTdGW&)BhF1~YZq{?eOi_A5z$l08M zez^X&-P^foAHE&=H``46CBmfNQ0aQ5-+sh%3X*zi(glfA*NadRBw zihKQ{vyNtJzy0}sE@#lc;l>^wyVOL{KFD5qO-xFSJ2eN@`d4#(Y++PN#FfAs^d`2S z#hu^m``s^-KTBU8e;xKSFJEtY#`jj#GH4>2M|F!{R=I7+KYcrGh|-K3_#Hr!UGgsR zd~(0>|0iApnnXg&$anI_vTsT6Y_ll~d((dAstb#<%ulHiMX^z8#nz(k=Xbq%X28yk@A^VAf4j zG{vV2_1VV0>#lFMi{>B32gWsExK+5SGOy5lSsV2?ZEWDo;8(%d46pT5H9HlvsTbHe z=@>tQsp-As!kn4*uJ-A+;nowDFiV+buzj&>hHp4;LMPBCl;1Rox|!Nm>KMfyN{hde ztk7Q7)L-hgd6)W0wl!FP0)*FmA1;wqup8K`aBg}WYe)A~252s6+iG{Jj?u5NOH!Da z#&_bru+tPl3fXTkb=rtT!HIfZU<#`O zV?c;Kq^hVLorbQ0{KYnCu^-GRyk2KRN4h=2X>xaF&WSH@FTGVcSF=o;s&T42tH-GB zDxCCf;xaY|^+`PhH{=2efn^`htYogS{{hi7iMu2q=vzFBY(=)EHqljNPCAiZ3#PMb z_$_G3vp^HwR_-a=0|=OvY-KKt-^KsO_YvkoQ@t0AM1z4GM5G767>y9~g)iWO=qDTj z7uZ0&Ink2*Md@X!(9NZS#P|%ln0g9{n|;v7UJt!4=5go!L0*IBtmlw-itm#@mxc7d zbQ0`>XYiWfow*J?)=MCrmPu>H-_Rmf!ViPC{pws@7H2;CtFx#u2swu2Z~+ZLlAvKr!|#Fr#&Yf*e@4tfqF~moZKn1AN^KO1NUTCHJ9F1 z&0WK*^Y>#0v1(ovvVlby2Bx4kz%eUD)v)w>!pfJ1miR^;ry1f_z)GqKdNV0fXhyt3dzLW??t9p1E z(VFT+(=t*fQP;?1g2oMyIk7<^r3C2qy4Zc7-iD&}ky*g3RN*p#{jdV_T7y{$`4|r$ zEOwAiA(PN$#lPy`NK};qWB)dYB5D> zh|a(rMF7VPStZH=w}AbvPjSg+kr%y{l6Y3ajs-X>H`Pm$M)j;Ad$0^Yz(HSc7zs~ z)}xq!9tD1Dj?@aMoX_G*@EfJ@THuA&apl|!;TUx8bw`8IICMFfYGM!q`U%HL+0qLj zxXWOQvJ>1qGl(;!7g}CF0p&`-|Jx6ypfhAMVmlBJL#3s{9B_I5^1gDvb9r3r+=wU6 zI};`oN5xBEJUm4113tM8^mwYN`2-8Tis?uV=_@ePE7_3@>yHK_&_Q+|hYMOTY#hUD zl3U?SbEIrO{f=x)q+>sjOJX6IymqiJnLkVcivpLKD~y7vw*Wq;aQrUxWDNp#us3!W zrc?WX$DSt41aj;eG&m`xL|{m>f$1y}?@L8UT`U%wYi;noZUu_92HqGujXVR6FrNR* zkw6e0J1E>82F80vBK_aWhdty_Bs0Qxh~JQhkZLSTEEgO%%`Y&4Glt4wJ%O z+-fcwI(4s!QOHPiESz7a;tQZ__&E82TtdbXnP_F{7f*4~&@2B8QZ_w3wY`ra;r)Vd zEu|x=C;{|XF|fl*A{r0JEXa4M7tpV>pv|m>*n$5H`6LxIEJpI%1xz{u9*?znb@C_W zhrHEiswvqYETE<2vUe3)CrK!U6QuM1@0J>oFY z2~No8r4l4s=4iBv2Krn}l0Q=H; zXz!~morUh)*1&!2lBOZkV1hLSUra0jdc2UVLE7=@*g~ZF{}X9XxI%6)-$v*n@?sUK zhxAz*i`bBKG#|Vp;e;I+{Wma=oCNpJKXC&8nMw9l@=SEKbY?rIJFPBHCU}_r%1Y$Ze78w8pULNFfp0P>r7kq#zTqxU{llTpg$aoHoq`kqC z(+~P5&ylZ)YIqnLBF%ui%*d88@ysh|3Vj2ujJ4P=&?-4kNRVD5o3Y-Ijr&fjC>q+e zze2vV7j_rf3mu}PrOHSW+7lD7x_E88DLeyf05g~ep2`MzKD0vNkar%K_4;DH4F}0;k&qXg(YQ-I)boCEE6X9GwMt6xp_g z>r_>D6heR?2@u>JLU4BQ=)M&N3s8aM5uPP3oTdWOhc6X(QBlWsQAQ zE|L}5tEq>l&rj4rGuSg=+s}}@QYfdfxWMsCJVHHi85XA%xn{txJv2~wGW?RD*5RxTI9Jnvx+0hG0~A;+h~7- z9dpq*Zu(P)^)lbNHzz+#>g-x)E>=&n+gMporwoD-on9;GUpURq;8&^g`9zATvaWqj z+|aISx0HNF`sAtcjT01CgeAoi?G9YRKAr<93tY)Y15wCv(%HoEQdwq}lXv7gE$6L9Gc8O|sa_c&+pq`(T zk;f+)=(dMnpwEB$C^1HUHKUE$-aDSE-jl@Q#yId9Mky2v^P!in-H+0agV3!~fiVp4n_7K4Q!2t0|d8_Y{|v<1kv+p_kcB#@T&h=yT@kS?qDvee1jJq89JxFdd(Csg@OKl#_NI zxMn4VuUN{KMg`8*Q8Zk)%T%aWJ5aV{>p*Qy^g5;WWq&V+)}L?tH|Jt7y4mL(>zF`J zvtOF8y(7Kh#tO4Bv0#jMMe@dY@9*3RU)^`rpulHA`vOj=ZH%3s=2oOu*7vjjSO22E zbF}HQsdu=mN=hl$KSrdw*{8X$=~$&T5h?tPRkAYbkNeDIPa(I{-5+hUv+jGIhKAFc zY_DQ6mqQN}4P`K@kon;k7P3>ze=S?CP;WX1`?c|#gfHuMSlb`SrRS_}o}#XEt|RW% z?mX@{u3+}0idab~5}fqQk?LKlXMw%}TYQSDMa^(`oU4to*?z1V`i(yh9qu|QRn4u= zY-u7w@}*Z&YtHAgw>PbKoVi6tvn{=ojfp_n#C1Pz^RFfGsa%#>Q%&n=rZ*D{ap61! zqcW?Std;Z07dS6XY#N`yo_ckw5Xw#!UE_?&Y$G2LbIh2OZm<9=#8-{anpDhKt49Tn z2~G^|k-DDm1NFX=Pt8FbkZLVa%ieEHPsx*TF0OpS&E&(zKy8bkBVd_tq;sI-rZ&OO zXH@n+Gzwe08AKUkvmiV zOB|GZ$jGI&bw=m~lr`p7ciqIWUweLxiF=YV(~MAy`9-D98|3njcLq8iJ6|~Vi~4vk zw!^ETj$DHe|EijubonQsM&nr)~2}{92#`Px3U=O zZH8(g-ceS(_pK+rXGijv_~^Klgca^yVv7IZv|TfA$PyM>FXVRc@zhz-6wn;omD1i{ z@%v)md|LIP`R6u2ceqx1_b0cEpOYACjPse8wq>R=Av=BNxl8@>`Pnxvm}ry>O^3#E zD_TCce=Pp&{PZryKk0;+9dsx(TlO5;*Jg{(S}#kJj9XJ5)-PGPtPSF~!-9?1T8Xsg zr_}fr{{7SMv98Og>V1<(bYHmZX>^Zr6}RzW{;vWa231M_JKeEVFC1(UnoX0%kGr3r zeH`<-*_X00yMA_XN9czFQ>UAsd4AS$S?Xqbm)09_LZ2l+yGJLc|F!tb$2a+2Hhz2a z%cHpZ-Y9E>_lswq+Bb?_wVx9d1&IF$ce(N_(DdssEm-=(TXWgT2 zJsJGI=Z`O*!U6wety{2rvDU@v6gG2b$yC$#l{X}@+wW9A3VrSO@$>tBpTc53{8$iI zGjX@;fykL|L;jW()>QvpZdaZcsh=6mekFg7dK3Sm+M6>md0gj|v|@&LXwqMa&g5mT z1S_|H$4t3%eat8FEzY?(>`72jIrw|0cctGZe_5JzNm=AOE%0rCOON&}V{SDwdesN- ztHm!be9rA^;>hZ^$oEpf&a}HS+)Mv4?UW4V!#-wO7BtrJ(_EgoGN#bGo3DJ|UyNOo zGS>NDrqB7um%LS4FL|$E!)&KhO*I$)4F9y|?di7#--mn{@WK23-MjMd*F`%LYx`cv zel_A|wY1fCm%I>qR9yYl_S215Ii5sCt$Z5s=|tRG*QAuq2{+;Jtalfc^L^bJ8{~^D zey7-^d}iq1feXEXU*3Cel?<>CSh)qmU zoP7hHITvW7{2OJgnC)d~&5-Dj+?j`l9P;y8eOzr5)Ng4&^m?~F=Do|%wN$;*H4Iys zGi#p699uHx^p7x({fK|3Kd<%d)QdsS7e4HL@7lxI52xLegO}#}Ql@tKCdEf&Kbby% zkkh}CvNs{$*O{O8eqH*b*Z0Stw!N(Lbkys5KQAdggF1#D&vUuhrcxL}ByTBWd^Y%{ zb+*+9`hQB-HG@o5Ub&VqIeNhJ5mEV`-H!PrF9tu)mbE~&QXk7Dmif0}!?2U7R@)7K zoqhA*alqr0$Lgb^4<0@!@ND|02`L}bgcSQw>v_El70Ty%nXak-fBI1?IC;SDSvU^0 z@#Igc^<}`*LwAclaKBVz`?#<9^$JUyzg?mDJYiY#ra7WK`}OyyrZN6j`SfGMKZVXt z)kGPZ{36zPz3Fk`XB|HI#b-D6>U+{0$~-<}KtKbwyblB<1)m9emoLM_8{z5 z%li%AZ*;dwKcx7AT9xW{ExjqkPgY89m%KEkg8Ltt$$w}19_a@;@5H5gRqXbp>wOzgX6UrhJ+#6OzpqsjY%N`I zo8YklHB(K@{65$994|A4XLyr&a)u6$pYgY1+kA`oTKDy)sEkqfUYoxRaX#c@{?#RZ z<*gp1SYM5y&YVFz(p*ooHhomacbNqr3DN+5Nm(M&=94>z^wu zSJv=zX)4J_iPsVrBrc0}KW}|^(Y@iX{`>V*e-XMT|EE0f(*LC&69WQ=WV)QCY1oF) zuq<(*GlSR2Mqhh87I!@lLqC>C=wL2o_ppm~!<)x-F=>_StWq*nWSCv>S)>s;B;UJK zSzLQ!bA9giA^hFb_mg9T5(AR-UvWQrB{#q&wUV`4FO+dwzK!{cg=P$Dkorn$#reav zHLiU^vjpYaybpOkdA@!B8TP&V&(}t)px&V=nR=$%8N4x=wo$uf^|2qY8Gp)C_S=rP z`(CLZoIg{{(f*@SNBa&!OEogh`mB|6-_O}DTbi(_jJea)5_NwCzN`H5{<~t|UL@Q~ z-jx^^mp?vt(oKZ%y>z3$7pBKjT zNi1r&a#rxm7O)|30{S6y0@T3o0T}|?_;y#vdS@jTh>J}4$MaU4#VPtRt|0yOUH*w_ zW#*$<>V*~x*-Cr-%h}DG7xyN1?)UikMk(=0?|)DIvG?Z`G<$=7PKe$6ZFs^*Ym9GD znhWWUq)nH)lz%4F2sitebru%Ytp=V-iR-`3`Y`KN@0VR)N4+}}{rT4b&kprJzcE2& zLsEt2&5{y!J*-fc&SCE|UJjn%pGCWGv`Yy}S?ld7|FU8{OWfz-cDJ)W*wxkUY&|x0 z+;HH^AXZXwm)D;;-}qef>FgWkzan65z;gc%eh+=zjwgC8Jx+Mdmae%86@Tac<@wbq zE-L<8Vqw>O<2LwSgnh>-Z~U;XDp$lwwXEa1&ja79zRl2Gyspnzn~Ce9uXa&mN5~xK z-kEeh?%dC|KhOLQORSjU@C-$XVympFv{UmsD*4{^pB-qWdJ>c|ctp@j|C5g4AXYg< zLHWZQ{s;B;=EoDGi#gty?s?~K@7-%&lA$1yf$BTm^j#JB3^mFVz7?H=9YfLSxT0^< zB1Ijmm#38LXY$pg4oSb0M|nc6fp&cnhDy*f`q3~D>LMuFA5+8WeL83A_i6K-Ud!_bJIh3d);lpE4}FS+Ww zI(uH?){_+^W2)BN*~cf*x!PIN*+HKxQnLjZ0>+y}pIZrZYJ_@S%uw??KC+L~!+(6h zmB8|FFt_;Cb<9(yS#8W0pk-~07T%k#LrLKYK5@BzwfVXI*T3;Kl1ilHaToK>GJ?!6 z=2DqML~DH=nZc8$Xm_=)aIpQ^l3obrw?i-QTm%<*6K^R3EriA*itW_N=!JAtJ+_zK z=xC#h@!j}tCYZJHd^m13RBCHa9b&^uvG4 zT6QzBK>H2Ci3gV$Bqy1+H_?;dd)fT1eAI*3NetH8r7@D08#hoLW(Ur$AsIF~)VyJ(IVL!IAa`^qxtFSqrCdhRicxoaia zJJACfsvTw9-xpl5l^7t7DaYtP$9Xq;@6(TkvMnBC{bz3y{`xIPX6FcSztetm{B{2c zzK@*KoEv-!`NjBU^!p3f(`I^CJW+G2+m+|Cx$JNEQT+M4-Rx!6W1DIL`W;`)&DJ(6 z*-VAQ$V(KHvN2Z*@OnLIy_Y?CJgGdDyj9JtY{qt1S2&uY9=$K1Rp6e0?tU|zyYPZG z)N)L17AgU@G$(tLT(QZUlCCFtk_VvadC-&5yels%6LExC$PSrHse-P+LN-*}Dr0bi zy)4<@Ra%3AEkHwkjeg!y+UazTbgxG+-$cLmD!99QE#J7zJM+F zMr?N1rjPAMH+xA>=Ztom&O<0h++i#B02{t_+3wZV=Heqe@H^Q;+rn;nF?9Sp3x{$= zzCi0Cm$`_0UdeqBZumqj+t$Ov}$cZs+Bo8{PKc!Tc4XwgsYs%6!~ z^$BbkFJLFWgff@y?ygK0PkAFeUEOE!7VGb-<}U5Yh9^#C^rvT;N!C}K7wV(eU)DNf zbYrvQqC3f*-dll_pnwlsXk>ppAA-dx28g3az!~uWeE*s#j1r$jzR`Np@tzP-iGeluSfnM$xOId_0g7 z(pD)VXB#a&Mcgs2d#?Acy6y*VkGqh^&)d%UU`~~}mDOms^uy(6iJn{Ut7meocI0q` z!lnL&R?Ri`O71f?IwQub{#s8h6J8#dK?rj?R_f{W;+mh@3pI_a$^$!$XSvAyVIDE_ z;0V{2Z@I!4!aINGso`ydM^R>PS#L-0HE&iXp#9M=S!ql*Us?Y66u8iq`o+yk$IeeB zF$`URfx;J8r}jL#E9!7C=q(~YZbUJE5Pdw9mMZ=@oHWD7;j(&I^V8>nQ*Xpg?g`En z_tB8}D7S&ZDpoe8m#LXy-Zv*()8ueGdG6pI;)mkVJoGW{Q?b3#Digt$=`}#ij-aaL zk0wbS^`m%=T0?|71}%hkBEOQ&9)M=XSLw1-u_dI5f=Za3PjebU55f5 z{#7ncO%=3a)__<~HqpwVFhKN$?WOPfau_s?v zY!hF^O)*e}6JPVAG!VjEXeYavN6?6vhtvEJVpSrut~X*X{yl!mA(SGVOxxb@Ip;Eo z{4J)Td7z1PxW(+`DJ7x%f}fZ8&dhKjP9(RiDVBpN#{<+OhVZjH+ja3tQSlvLfuhG! zP~IVE=uESZq03YOziGv&>RpG9TPII`PfbsXr?9cf+>NTr^^~p3zie~W!L_74v)FZZBOs8tQr#-e6WTstQQ+4ZbK=3b+o;WYeE-rCI^rG~lL)X}ubV)eH2$RDU?EW`J% z392fZ{h#b1Q_CgX;jQLmvku7$ugOIA>)S zL1j8ZqNk$C1i+2FRXa_XMTebNOp(t$(1bXJT{Q-h*z%-?Fro*E%IXl||7zDy@veKW7!Q`i@E?rdAU``QIt2w1xT+5d6FF zYwF9*XkaZz!z~JDqMYgkT;!aJ!=5i!T8~ipcwjG4&*LpO!7gKcK^LaKS<0Tyj-3mA zxGcnm<|t1E*jL!stj3OJxb{FvkbAAk)?aA8WFfCN6BC5NWH*di*&20>=p^Tu1FSAe zA7)-t)qToS-rf~i7C)*a>dk>fhAMUfN;BK2fj%gel}_@WF~h*KSS_KIQ(r5Z4M^z? zHV)!A*;RV+YM8D?s;%S;W44)A{_%^o*vagVUr8M^(5i&vRj_@+YRBgbhC$I#Y~w`t z;Vl16zKuGrctSDeH>7 zpfu->zZ5~7mxAIYF1HWV53pN;aRW@nd^`sCvIxAT1}g>FJTK-g>Y3&7V@7@5P9w_U zzt)j6^_NVmyjADxPOYeY&)jF##}O?r>R1!)9@bCei!sxxsq|q_xT4r+?kZ+_62plj7*36^8L3$lXQ6ebIxgKUF;{PKjDtH7fHlRPTe; zJdQ=q;f{`KXS*g<{CqXDV;gF~r?p4wB-GsglbhsE+~|UpX7apQ3-8ixW+^2FPnxdk zG#d2?Ht#Wa1eqkxvgd--jIGo??@er}>DC;#!*_rw~jmB)H&pE1r{S}RbR zb!f1}QrSkKy3|}`vd^3646m^lf4)rKmoCj+&nSrZ(s9QmM~GfY%!SKT))*iws8RYn z#}G$jM=pK1lHZ(c^tD}DX2)J4+&Mn&SiH6#X(e!4i&y5z7cyG;EdrEDW)IHA7V{y~ z&m-OpXgVCH{^~_tKF!L)JbpHqLS=28ViB|B>59jA{5{h>sf>+QFQp5*M*BI_ZBYc& z^lf@)&8_6HH^~)rDt)PG@}Z-&SlOgL);_5j;P7-6vqhxxM>8;^JVt##U%SqPaR3qJ z3zO&UVg<9aaIF_vd06 zlX#mo!~TZnQ?zKS7Sd8_{l$H%pN%-St+aFCD6GQhs!jaX)tXEnSK9OK7X0rgblx1C z>kis6s-rKdCpjeF4Ln2oStgArdir?|WB$tG#DxUO;@5ZgYYeXJw-<9ScjVHWkBm)SU;@ zE7nr3+tsNlTcM=+H{JU)IhT`G)!btCl`F+ocq}EIZ}fc3Ca)@cQC%q~Zd1tx!w88G zH{f8Dgx6|O#b-j3G#mc*O{wp?qWG5=&W=rmSAfhKgNoQW8EH*1ZhHQ5@5Ar9lDil> zx--q%_{P?RrBL3UW&5+a{a)EGLbXu(<)3P0(bb-c7tc16tc+vVRkULh$lbNYQ)Qv> zLC0q{-y@@zUqs3A_}(3ZnUM$o(f+7X4MP8}k#dqraYvCBMcA>ZoTVjtW>Ytyoq3t_ z`w;iPc>4R{X!1lUqtubqf`zp*YF~5{N282(6@9qhVzBsT-?Ek)67}D`-r?pRd7X${ zj;{Ne{g9fi5zc?l(LVW&D%g8^pgziU`2p7FY^yqmLkOq7ko_5T$VzCP6<2KW31`Bz zYIkLU?1=Ac7kQRHmJ1FWN&RyyMMBQivsBJnJ=~l~+fL zIxr_dgPlEn2l#FB`{EnsvsBNnE>K){F-}w|b)9-vtgyG3Up@QWd*FzbSMqBgP;$$r zev-S;{3~p(WTxE|-_7}=1lhZc9EQ@>UR0~@%R0&g@Rf7gBi+r zty|1d*J;tg+s;?IS3jZ^px|nP%Xjn;&+-N>M;ria+cuusKfp1tZ2vM7xqTn4yyj<50rzAy%jS9mWr(`NamMEsdhR`){hha+ z;XdQ>vronqu7%%j-x@yq9fkB2s)ssuf;?^g$uAi}l8&3Fy=UAN-D$jwtX|?8{qA=W z$0@jAtg;-!(zoza7V1CjJjN_n%al#7mfrkmqji?c$QT~{t+RU-vw@tT6jjG*>-D~l z?2hwVMRM;&o_BsF5KrU2)Z~Y}$6fo9KO{MmzbC(Nb@HAx6RbF^4*cQ0)(SSRgPk9J zTl=^6@8NgXr@u4avCjDdpZ3eXlYO!|daLtzgI93F{i+0s^vYtH2Oaqt)G&jU-fVTZ zRZ4-b)wb&s3%U|@pAuECig5jv+q7H;46W^tE+5!2l6qh{0=EANc4u4c(WDN&w4 zTrwO~|GkhCZb4OCQdx~7=>fc3w}b!m!asDgJy;H)W}4*A>~7#G#aa379q;avk`|nH zt+B>Vt?B6NeuTG}POs+3>66dzwtudGbAGve+Uaf7ZQ=sTmJ-L>sqC9)wo3BMOGzD# z;m_zBPNRq2qgZw=dzfq@OHw~9RtMnET2G|4i&wU9+!ym}{sv+8*b~>{rV#$+1{jVq^zDn=7RjSOJc zmtnMRAup#x*|;)2#zrd@xac}-iv3M| zM&U6V+yB$;H`X@8f7uo<_I&l9j}gtce_S3w|;oOx_5g^%WgLm>6dN_ghGC9dwXx;+dI;9*|`)fP68C&Hw^axLa zH8&R_kST!R*=1LWnq8mV8v({TiVIq3bXHrbwu=a{82!T zQ>Q3H>;QOYc^o0&g!3GIwF0nvuTjD3_zE1MA_7HkzVY-hQPaD;x+y;pm zXAZR@$iCB2%71NMc5h7i?h@v8We>=BDdjfxcLU;R8$B;7WUuT5E4@5ypAi9QX4GM; zySH`HJZpA_yHy0f}*Z=X0NJK35#a(L=gxo2Y(NcBIm-*(A_!}Q)mwg$C*@rf)2E<#J|DH)^lD4~DuLUtlWEu@ zC9_zJpYj}7#~y2koTSvHdOrtdzfVq(m+VHOw0c$irTnH(OpS{1O!SD|Oo&#>B>NS5 zx2xqVGufW03RcBFq+|;V7eNlRdpd0$B z+_K-}>U|qNdx}JBf@=RizG*wS#wC<*Xe%B7>8+@oL9zFS(q4QMcumk7u9MSbLFPeY z#4ak_$zYiK;M+7;v!G?SSWjj?FhMWwh;@WBk#XqN)XU0iD*is89<|w!C}+Piix_>4 z#a30Oq2Z#gY-HkD2GuU#w?t1)Z?WI%aX> z6xvKbty#)jF$zZMc{+`BnhLA1fU|(}v7-Q*E*13#Y+HCyz-`3M8;34`D^A^7FpLjO z@#+$-I$O=SaSP=b%gOFhIB)HfwoUJ#uVy+}6GW>epM5AwrKyzKb|1XPufX!g*Jv(drs0giVNRP;GGS1j|RT4KBM=YMuk%`{P}aesPR`>Z;4L-3uZ z;y9|+5A3q)E2kSQ0vCfNM7x~Sz}4AqS;Ox*FSDY7Je`e; z&R|WZy-5M*Lod4t-UP?B7+A$&`U_C@y`W(S#d75^2%06&u@hI*DsGlEws`;bF86ML zQ@p}!dUZ71H+aXgA9E0$?@2NTx3sTPhzV6jl!uS2!*O5etk=iIp`U(DTf|u&$lRkd zh+Ac*S-s$b=7JIY)J{S5Xe>A27M)#lB`e)rEgU2(*itUFN8;bziWra`|MyoY{T2f= zHLM`=M|W{T9jaF6eg=!GWa%vW3hfkg-&?3x`-}Z_L}jg;bdVjbebUD+Cspv_qTqzL z!17PPS1ts{?iUmBM)rE*%TsENJ#rd7+G9F|Kzo+-qe9Kh`}WFr@G~ExZAg@oQ)Rro zf+ul(svKW2kbdV5P73Wf!(G*hstZ)0H{HQV6}F>R0LS$Ec)hpKDrnQdFK?oj9*c_e zA>}9P!+Di~swtS%c{b9jgC!NTvzdGN$@QBSQ9 zqj_hKl}%*ki%LKAx+bIRJ(+1+ccw5~K$#mU=}{GHr!+(D`5CBp7v%#U5&m|lbn|^T z(Q6+kf9OocvoeuOi=)JCVoMwF&$UFHEFkK;wMyC`b&0r6ZvBcbdP6IpRo?n-HZ(Of z!;?_2Z-rX?U2_$i1=G-P-zfXz67es-DxpjgB8fcJWGq>zouY`}{O&8%R(YA`Wycj_ zmfC@iG61#Z1Qg-c5XS^a`c6?%g!0b8B{;D`oX;C{;5%eLnHivpv=nVR@II?nZ(-fV&wJ_oh&t<;0tc~c`q8C6yBM5iWf$#WVHPS#e` z2eD{nU#5Ry%1InM08jR^naxZ#Ld|?gL?s&j=4f-Qbxoe9)=IY9phkNX9OkB86a~PG zdaVAEO5%pkW;_TI(7?W<9n{97^>vEPgyS$JrlIxr!R&7Y7>A4t=3RD7JF#7O)*J?V zzqGv>gyS}~%w9VGGi<=bgRX!JP>GL334DR*Coa<=^=HboPVJCRwTK^EZ#7f0#z1u<%#QdP`gs&k!5c?8}SFU4oh-7@~u z&FxGD+n_hyc_iL3-*}egtUmaOAGW4(N({3WT=B1##;Hk74ZV+>`V9op!A+UR1gVU8 zr{qzp*t_6~*WfozP~IxviEp#fGOvK5{5=%>Mo@)^gC>+wE>In;MSuS(72XzRg!?#S z_n8L$*&0yUHY-YIyntp{IsR&h1N|MeBMUG!sKmEV0%0wSzTX;re7bTIoA7+s;^1;&B?fowB`g{MGJcqNPS^W@jfb&SZXX2<^DHT1*R^Y@C)jP?@1YI zN)tzxrXUNAt$}FpSEQZ=x5wk<3Y|eDh~5m|B6AI*cr?FnsK_TuiS44CI#x}i9v5## zVbJ<|M2mr-w!Oi!7J-_WWY_s%9c!t!3gau22xf7XIYkMv1s(QTc**a?-$r4w|40-Q z-EbGsd1607_wTS(d4S!M6p;Eqx%;P`MhPI&9APW!Gg$Z?I3!#7>BH?^%pXp(_3@i} zHXS_qt@u1tM(h0fgdI`JkF?6*8x|Mi$2RKzu5in0t9`cUQB zpE)GXLajs`|K!i~t~c+sx1EzJwl|q^2@|2xR&A>_bJbyR@Sl@=PRpk<6V3_u`GlYG zx!+5*n#_NfpLhG`6J@5F?aNuXh$~D49yl$O&D`P|+|F|LVIo2u@>X@ePXH6Q7M$aC z=n=06`<{s|vQzs{`^xP54O8~=>@YlI5;BQQmW?^|J@BYY;uyXHuZSM=(4&8?T;fyX z_r!LkpYn^TMjP8Y8cDAk7bTjiMutG=T)CSn~~F!Tf~C~mS>Wb#2x$_+$y{5 zVa!iL=9D$rMmZDLvpXsPq(-cvky(T0g)`vr|nhu+qqbWTWQn-KCHt zOH#RQR(zQ4H-o>Coryp!2v{`rK(M`2mL(sYmeuWkY#903SE&oX@!w9TLJY=#V*zh* z5;aLLd6dnnlW-?Kvjy9RPj`Y(l97o?UryF5K4}r$CaO_QP8O$erZ~b(au)Y_f#@JY zaLf3rI`~^BzBn|YCkvu&B5;Hs6VI+R)N_}SI#T9!5vdTV|#JqiZBaq%qe_D#M()W8pV{e z7oRG`K1n?6Ejti*cgb#?f+RZqHn@_cAp#XbTk|OM>vUv-u}o3la4QZ9W`x{|k9HE( z#t?36Hy8uEtwbvnEa0?kjjzQWoGW69ONGgHzlb0wcoylIdp9TdP3OG~W7c!s9#2Lb z!1v5ZH0i^%Yy6*iG+c@uM6-i%kb2YQP6Wqo#J!H>4*uZ_{KL%gFmJyVd^rzO&8p;} z-qItdaaW>H7lrXit_mk7_2+#q7EPHa*5yntqW0;_Ps+i>t0CXHt#lEaH75DP%!%e4 zrn9}x6XrkM!wO6RUNK+Z2g=l38x3FTB)iL#sYL5jr^Snz>PztT6z1Als6QHmf)0Tv zwu`CoXf#kyvxTkVz|oz!)`N5Z9qfBO-dCU5ZLY<6x@5g&LY9r`R}?e*vdRxCx!H7# zX{o*bWUTkx`_uU76yxS5!2U?!R@P#2*^J3$F8E1#`Kz<+LapS%x8WT#P%1q85gUm; zsC;@7nZMxjF`Kuhv)g!1y-6Q+iMo2RTAMuL75ll-3HXJKBmPz=MsK0+`-`2GlZzzq@4K9OT#8{D$H@i3=jFnt{x6nLYW`%nwqb zpg)Rua#5C)<%qeR=x?6jc#)Uq-QMnDmtv}%!EVVM{VW^VBiX(E1xw=qb!$z2`a&|z zY`*tRaNlntQthbbhuPH`-q&gIK}hayZe|xX$z4am{~r)@worYYV886Xb;LT&`HQl; z5(u!Qe%k~7~DS2D{wg?CF=&Pyx18%&XDOe>Pf23|5~UZR$h zGuoFprt$ZJ-OK$-Z6d}2@ko3pD@&0~K5fIS{wZ7bb@2=8W#{A^_MjUGhgqOf|NIc| zxg8GifNj|tWbk*~)=Jdrg}GDR$Q-xHOTL`{!szv;Qs$67_sDhRkHYLpwua4gS>B>L zxx{w+8aagL)0pqOlo)zW_LR+-|JNZlX6DbPXD$=Wd3w&2|1%q*lcX(2!OA$o4gbjP z^5e|>R)+Jb)2P0<=Xj|fD&c4siYL!UW*2YC$_>c}_nB+e7ju=FJgv`c-~Yl9Z34MM z6{kQ>E7C{)VGs3FJ}aHX6LDTtXL8e;9;^%}d@QFYSt-Z!_z6q*8*_w?eAlYvfaUaG zCwT{1nBAnacQZYFfg@n7)s`MAocB|Pc}D~Cokj;xl*(f>9x+*n#V7F4xs6g`LA4!h zuu1A6H4)xod8(uP{KRa`sEUx~OYnEs$zxtTqI|$zv(aU}p-Nb9H(=V52WHp zfa%jQ^6w$u!85x(@!w6RXv04H5OPayCQX0NS0Cjc5vo>KQ>kaz6s|=adjmJ5D(uPn z_^r)>&(>DWscu5KpfbJI-&9!Js2V$yf4Wkq&E)oUp<68r#^p;~n@?r2f!|x3rzFXl z73E_zhGQ+k6KhV!3&ZPeCDVLAK5uEJXf5e+{Xs9rh+a%5@g?F$w6k8a!95o2!ZeFh zf$nBMcA|U)SD+E~#2w~iMZo-Q>eb%xJdX(%)7mhtkG7FL+?85)_DQ4B7@4Op*7NH< z*)6sCGhan9+>9w8TE9`8xOO#g(=w z=XQV*Rf3*s5?$8}HM{eWPkV>IVtB(m&%G;UU3H*u)>1LVQ;|OaiD>U#Z>(c;Fb{d79vtJ;c2OB^Ew<{LvEDEv z!OTw9TBrD_RrPD^t5$>o+5_$HFLpDcZ6J{@#kycslZ)(K_@Fu+fv~5xs%4Z1+|p zL}!qNn_891W``XB$L$uU;4PSLU#gn(%33fZp$*can4HGIGcU$;YYXmD{r_K~oF&#@ zV#56iY-BW5VJ$l5*2;2v)J0Y%ep{f`0Ob^gxL4G^WtXCl^DqM%!Njo&u62uT8$ELi z%+8PMa2_~YUh1-cxs!!Og6OVRR#%W~oQkhq6@Rk>^pPd3GiG1YK_@NXJbfp|l%Ynf zN_6{E{WJjU2;*nl$|LHB6R_%%!ObRvX7rWCdYJ0aS!Y81UjOKQjoY7jC#W=SdatY9wZVOhLTM- zg1q;n8@;C9BhLQK6zVHAK?QoyP!tZLWgEUjPdS zs)q11-^1%ZXJ$cpYOi$$_tb5&7s&cZ;zUOx$ZqaOOS14$c%fm`FgtlRU+4+v(yJbT z1>B7~CILN`qv}$2C4w~YO-_6+_ydu22?|*EFtY4Ou@jEyc%1(dm1R8RWiSy#d2T!4 zxfZ7~DM2jm#d%o3WNb0KwG32fC;1dBm0q|m)`r#MOKwZ8%;4VkQuix|t&zrZZdzJfxab6kd znC&b4WBiu*rg3Ict6Pzte_gZPHH=eoj5^(M&soFqMVV(@bX9Z>^AxmlicETWN2p^b z9+G}~Lv@@TVR_B_a;6xiZO|I2ZJ5xdSXFsTzI0~4wf)Y1zN38iqRO;HSt1Y66UUfa zjmaM2E`mcwW7MRM;yT(x>*4V8IpVY1X=rDadsca4ru(0i$I0<2Wjv+Ka5Vh?adz_Q z>yyFfuCuVSojyV2v-dK|n_;l~;++m(`h3aHGWrlgt zx?wj_59vvc=Qw{ff?F{|WTx^cE>i3_7pi+W@YFfV$`py_bO9gm*;0vW@e_!qnm7BbD*4TJ8C zvKu9)c>RF0pU)QOV11q_gB!&s?(87?hY!U1r}BYvN3Em3g~4$SMTmZCJ|(s6&89&~ zBi`t1`TUuNsY~<;j$Dox+CHLWZghlT^waCDhlQTNrssK_Cr98~`bVqaD|K)~B2PVW z2nO~Gdcz5_rCGuI!~NXd%M;~^^rj26kN;tr@DEl|ZARg?Iu3hSC|dxI3izxxA781l%F6Om#O6@ zkZrev&kUfdy~~czE@OgG)I@Vap5^&QQRhVwhi=iYgb;6E$upp*FZqUlFz=Z#9!jB5 zlZSe@IyKQg`uv5c)QjF*kLrC-=i$xq+0h>TI5*fxjURha7KA)W0CJd|Us(PUfOz6<(W zRKySm|4cWof>#Zvcf7(=xo(#yA1=ogBM9E#M{3AM_zDEkZ4Mw)$IG8x$cWMa7N#++ccOqMEG`yZTf8oggKIL|Bkl3Jhy_rdJzz!ZDJyXpr< zvK{nmJ19UgrcfGH{ZZRZUEc&W{urM}Bg@9~Q>uX3Orz>+M9ka>c8FIc`E&yLxI8G~ zDt@}#uELqvg{zgv9F8=QTs)Qc{VmC@++ z9bsOO59ag>`m%>G`CQ-=v*{H#(@9*SbF9leQ>q}9EOVI&K@@YZ*6hY)1f@>PKF16&ACuhBm>RG!J?%9R zpd_lOA|QByYB%;7|Azm&lRLAR9MQ+S}9UBi_qoJDhEViA=v9fUnKt29|~e(i%Ot z&!DOG;qU}7{jCW{<0ae%U%Kf9%5P;34BztNFlYN9xZo$dH8q)+m|*i93$giCkSh2B zpZujXWF^kzHnM7jt?)e*dc!i*x9Px(7BFx4z%zAFt)?((&%(d|oi{%d?WG-5!OiK{ zsBXmGKTL8eq48{{eC7A#rm`%>UO`=IhA4jablz%q`0we!Ar>+Z`p8qr%jZY{MTwv{ zD@|uG3y%72KF@QeF;qfwAUivC@K#>QnN;lyAUQ4hbFu&LR$ri`)t&P{m`ZsP{dpNq zP(I?AC!SPsxH_VsS$|39X<$ zW=Hdrx!>x+w{C8|?Br(+`|M%JN!Z3-S-iujppyV9>^16g=a|22#W_EJuv0MNg=&Y+tkQOr8Ve7-!>ytL|b@Y!-oIy(c-v=_{Bh%J;gOVLwa zuvh*+27aXHoCALt@q&QXYN@mTq z!1&k;VanT*%IY9|hJJJuEtO5gmA!U9&~EhHP|&Oct2TtQ8;3q*DfrZ1LF|_^SK4m} zGi_3p`t~03)-lfRT%y_xI=AEORE}V7W!WV`ySgdc!TjgIljzKr$8E?b6DueyK-EfuN_eO+imE0k*By}4{UB_8h()tO!*(*c zpJt7w(i%jSf6FdJRG0}Co4`#TMQ57RZYR6IwbMban}J!Zrb{WqB)J~Q@^(4f9z=I| z8tk(I5o?_t!VKUgr?3{)O--W2BlsT~gQChv^|2aHe+hY({e8_Sh47x>K1Q1lb+sc4oQ#=rfCgRQ-~1;4jTV zC5yoS_zIgUtGLVbYcqW&wH+B%@UE^>52DdZlp4leIl#}I#yMzCosfl&xB_12^LT2T zc+!3UKUY3N{)n-k(%E!k`>GD{)`#lNgL+v{wHRmlGWhiw?#UeLoDxi_-Q3U?V9eFX z-Mi>zw{yFW5*OOiowf#1{>_YQEcFo6H0prQC>tK;olj$$G}LmNGp*sou7=bXQOqP> zh|O&NY|&N6M*Tb+aASEsxz*w#o-B7CX2CGd_Z#?B*V*wxaHD+}>2bXo6*w~7?@C&M+o8WNZ zW=9LD3Hs*j`08#1myRY|Xk@oebn*3IlJ(@%v|zel&RWIJRRMa*L~7d#R6e)F zSM>t^&n2{nqDx#}l8Q zz)EieM){6-n3|0M8w4eQs%H?Ma0%$X7PS7xbK3>qtpUo5jjS7JY&wY%;lwDN$?PR~ z4*9hka82$~3EX9BaFTeH8y3oBu#T(5%%N6&(4?hu6U=}{u<`}-`;qvWkK_p~VWzvA z+PaE*lRQ}$x8bqWug|EdYBM*I(*#raRNXpDX|2BY}(;wsXip zqRLA71H7%SHJ6GQpFcY%li>6qZE0cubQ1saBp-=3>L>EnG_}3Ri9X#yqPe8ADZ%#q zZWK%%{N_r0<~!83_xTAYnD?aRGEa$X;+Xo|2o<1whq}Ezd~i zdpGThJf~KmI<=T}RUl4uVm3HR31Uu}M+{QGYb*3bEt47zj@roDjHCN;vSm{$f_^-0 zH@NFr*bo1Z{gyHL?9cY)LfCw@>;W>H6=@cu&ujrIut=7%gc-}uddvTJP(z72PNorw zV1#kvEzKN-WeUkMq6KIK;t$=ohl9Uwj`m&Ivql4B=M zlf{&1Sn}_@W35Qx)HCUBCB&@ij&T2N`fGuHs{_9J9dW!6uZV`jMMM37PcHv2xLHswawXpl$o9Hr!?bSlchrJAJQ&sG)i-_>1MW%GyxM2aFw;rO0dz{am);wX&W4rB|8jXU+Gwp~H|9^JgbmGJwdnh}V8@y?}^S!&gRlLQ# zBfR^(GhjMSAdiew#){o=io1g>b|j9KB5E!qXZ^v6+HwopSpC`jIL^Nj=k0)R@CDOr z-H^@f4$2{%*OR%2W3>@_4o4P8ar6LQYu7k=pNL)GNs$^PwL4 zwkeV2=nEA_CV$|SE(cxiT$`oHrC7OleF2e)pCpFn6xo$0Gs>!I|5lDrDTIK@=j2f^9_1-4s1h@4A%Frox4Z6GQ zV)g&kXXF1rDKdHDIjz3tY4eM`B8qC+)Q)xnIMNLHkEo(8L^FLWY8Y3&1Z@mb^!h?PYv%s75(f1WqeaCd^5bUMA+F8zdxL!f+s*IP_L2Ujg z*49G9rHgSGJ>1=}*FS(zYwRX%q~lrx`d>&~qwW|+haRTn9xe(J2-9^vDz5Nyt&xPEP;}O!-|qWlxxgu5B|TyxWtU|oQP)!QnGQ-j(&M6wP{Ct zH|nH4VA5XOjCtEAGMaw>{}KPx25DXo~GAOaKkO>cDX-z%D`pc$llcu@sUmI z2|TH;_A*svdG;P|GUFa-RX0a?ySNLZ*fP=m-5V!+YSOXSxz_pC zv08fva!`}{aR(EsGuCC}zUQdBr+b9EI{xrKEJfKXKC1V?-IYwLJgoH7u(9W{u{m5GQF3b$4wdb&5UsKD*ji&2uujYE%mIgU zCx(My{2+d22dAG3b1$Q)24`ZZy@6QWlB#^D(n;;2uW-E3i>g26f97zrxSRuv{teuV z6VwT5IFB8`S{6{3{$@u*a(dekHH$Gj$w9>yC$bWcirQyE+(yxfJ%HnX0vx}snpfOo z7O}*tV9xeVal29gX*@;D2!6&4>e~8r_)VE&O+gzjg4%X9Jem)*b-BgIeqCkzcly6KB|ao^&s zRUq?Q+><@EjFVK$m6<(gFeviC*my$+UlVRs6kTbwRod)h8Zv1 z`TeidVoaH*Y0K1YN^v>PoNIn39;QceVIUlX(xArA?0iJ_hPqqd?U?UOa1M4x>!IoZ z*^B$#$@I6|zzaUkQ~C`q`3Hf>1;^|j4$>ZY3%<6EgS8iz+*9sahTF;pCe}^KDN7n3 z-HqKL-hV9gjbIfF6E*FL%+hta9&YwPI?M#>Teir|B0sPhJzhm2E(FCNdc*&Z#fl_2j$IV`=IHciYz{cNnU*E`j?Wwlpo@w5Zb(drL7 zV4=7K8h45Nk^x59GV*l^_%Lo505PC`EtN3hM`Df|7P%bEtxtq;>}D*Vz=h#{SPMrMqm!hD3iO+}diFRCmL$&WLKu zM0ORskrk@({dQ27y;6sWC-y|@we4)ItVdlbyFOa|X2;O|E@cKVQ*~2qbRnm&X9qE- zG8)vZwf$Hznf)H;4%D#P8PyHCV0)bCNR>a0C)if}E%$n_;sLVRs4CHdKs#x;deF{g z4dHaJZ9r8+uqsEH^DEp|0kdAS~EK{8L7SUzlyFjzJ{w0&&-)K zGj~a>seNg!DMD&%N$sK$iXtdQYArQ>NNVh7K{3DZN`fjjesxZYdqOtwBUvW&A&jn0ux@eB8XZ5kbn53Q|y zpcJ8>sI8TK(rC9QOSV0Gi4%(%>K$MN)1f~24ZYVDX*w;1_oOlT*W2T)wENoESUY#G zuthq6o}fmP^kdRc!9^1M0Db~uJ zBNmW9;bp4@Cs4?(0&Uq#Cl{QItONVd5#2*b^ms;ADnF<>)Gyv)r_CAW-_}lc$63kK z+}oZ29r}9cL*EsqiQkcnbd+*gE)u5$v;9k8QaZMi?U?aD!_B+|yU1Pmm9IEmcysTB zSVjIH{Y~khG?QnDeZB91bTnmt)r_XA<3R7UPEmm?Pi0~>Z0Io1+_r|bmbgzTZe=6iLr`amh8Y@(Ow z6Drcyq(HdAlkHezl%8Of_h!<=zOMdUG}u=bM)3x?adUV@Y+mir=u#csU44w>k#2gt zRpLyBcBLB-y!ycOS_x@V3{IJOq$MWr9Na^j>?|uE&%c=8#!RpQ4CPxuT~7h|JOo8U zI=Koj>|5Zn|As>djs@hbh*0N5pjH3fOT=b+3M&1j(mgT`v%?N~mzd)Qc|~a12Lg?M z;IL&V7yvud_JES+>V*R{hQfGN2c_D1!ImmP@Z;x@-c&8NspWB-d@7lFUw9AXn4v_S}+NgKL{7Rs5V ziZn`KK#1mm^=kQ2kB)o#U$%s8cA~w>QXJijHl1{-pO^tAZw+>#3z*8!3;%InuxA#r zTG_qaJd&tALo-%0wFx~YHH0FoH+*`Fq(_+BstQGzhPs0bWq$XsD-&M>DsItSLX zcbzMIIg&XFrCIVUdWN2$tLYH>Ep4t`QvB*G$}HmG%~`_|zzpwlGo`9(Vjwo^l|Zo^ zcGl@L!VmSDjw}{SS40=nYK#yi#!9_NC%Gbc{+F)y+SoIzV4_cu`brz27hC{bf3G-L z95Kz&7+)ckKfr@v}yk$fL+=a~j*p(>)t}Ss9TC;1hvC zwGt*tugJZS?I+;V6orbsq5ZD)%r5JeL;6gVd`~z5bS5MmC4<$A{^DRpRB~{$rjUqp z+-MgrD7_q7ps%yuaH?S68BK<%jRV($Uj+V9I}*mnn{&hE!nR(IUG>uCx|-j&Ox-A3 z9yUpHsPT@`)2L^x)LZG-^$yk`-c~Z{d?X%xF00}Y_X+wm;-POY0NyqSEDhs}gd}M- z_`of2Qe{BJzYTo?CKWTO`az4#_q00ZBnZ<~;SFzgyJX(KVy;eF2(6b>PtCO&DMElX#${+MU(pD)UX{Y-f6L?!AO&_BVF@CYCx~<3@wY_hWcANey_IL{k{!1$kZj52LwJ zWP0qTTV7<+&+>Mt0e6!;qy+YH$HF_elr^yoy@~#T-olt|ZQ|!7OPTD;^Zl&#QG3&a z66Oae@K@l@8V}t2Hjt)~Jd1boc8Q13%(O?^;dzb>MS46`ZRf-R(ge6%w}DkW0SD?* zG7!kq9(gL!#NlqD^NZcW-fg=$>Au1#mg;=UF59L(h1Zu%CF1+TU&H@ctwrwxyP4;0 z2AWv`sQh;xb=H}ej4UW}`l2V~BruAJ$Sm6e#-c6Wx2~j)Qddo(m*9A=#}e!mc7>-2 zyD$~5hToA$)4jKx(^lAQXy-dG^`$MHC)QoFEpjD)WV?9`^06L*vAWLtI#Zo1ZbNcI zo#7wj+b!31cbZ-F-}T$(au(}8#T>T{YWlhE68?|dlpN6p1(o23YKgGU@$-{nqU!pG z1vY5!$Zen_>do`rba5}T8EcbeLa{U2YH!>&`m%S0UGh-n7`=|UI^O%}rJ|O9;Hltj zh66uUK3_>XO%lF>I^h>HX{^fzv##GtSeoBq+XzbFB_}j2@?sq@7gg zRdYT!s~WLZFL#T)!gndqDe!}KN7jWA?w4*`afO_wY^R%~ZEhCZXRl*mB)}m}!1;I# z`F$=BkJZpSJ_Q;#fVY9m;Y0XUvw0QQFWi#a)00ZR@~d23Ov3zMXg0}f`!zS#Vg#o(mfhaGV-rjxs5obsMl-~UUXX0VsPztT+nFXo$m_AzIg7ms~41&rt- zp%B^9b;(UZ=a*Soq=9yU8@R}x;AFV-gz;#=Ohm`e3aLHv{IQio?OT+!m%df0(1pL9DNkK{GW=&5&b-mcT2@^Zu^wRYxjAWo&s-NKvW_ zheKE3Zc9Dg8G$U@pV8QLK&nHBsNZU$cAD;yQoIJvXI6}98RyMu_G~8`$vOwc6T$~j zVwH0;c_m?$^f7L`5UDSX7w=1t=q@d+#VI4j&zxARzWJNgfNvG9O4qPmY)1oO2Q;be z(r1~;P7NVd>;wh!Lho&tv84dW9yjFzC8q?ULTI{u|d@#+pI zb|vhZMLJ=j^uNh2@=cL> z6(O4Nuf5Z_V8q&=J5~5&HWaFHj~(&eCb2Y*PL>JKv2^(v5Qh5VF=46rH5vskNgd_A z%0TV1x?DEh9CK=TRp>$Fn03oZ2LczsMC*|MDr?li%2l!mDGVP&30lV61dMW|myUFj zZe*l))NW`#Hrue9@NQ8gJ09ZA#L@Jkw%dOoFfw>B=maMF967;Dv;(GXK43F2cPw?+ zB027~*}_t4<*=)@< z{?U7xb=fKRQ)wIYkC}9vR9oyJW#K#Unr|bXb3B~atw|rTD%jz1&_P_|nN~ghdgOq) zmPe%1YA|ptFw3`-ZpCRo5NPP<$TJ=!KPBIYsX&u5u+8Y0^xje$$fBFC$Co~QF|PC# zeS&?#jV6i8D7BWFqjXhL;PP(|hrkT^i9Dao7Dn)!b_;8`^@N>;+NBv!XK6@*{m5?U z)*+5E8uPg3JB+OD;Zh#l#)t4dehBqLkY}5}g^NOVWTsUSdEg(j>UILq{t>LiEtT%l z32H#QtGn7x?>1hEv{>uID37D?%{Lo2H7 z)jBwBYf1CaV0REY*#fv7EdCuEW>P&tm#y!eOs^gAtkzyFXOSJ_Jn=$gv62Nx^_%KU z+{KynFe&s-v0Q6`-PP#~&Se7YWv{jOIk7?`Xz2zD30y_~PKi(;k5MC9Ilt|js8v(y zkvKtecR4edWG^wV>*FI*#Ek4PZ#&Dl^mK-HMaZZ}e z^^JOlx!uO8ij!mo5s=%{NE|1;fF`OoQqal(ZCQ%nw_aX?eQKugKJu2%Sob3zmd2G7 zlzysrwBO^yg*@^J@=!)=QQA#3A2yLS>7I9nkxt?DcOe|*)|d;)(mX+T>lyX9$~Y7Q?c zTYUI=v&dxjjdxG@!$UeX8)Mxwdsttw5yE06I#4lq+INViO4-nooQJyaqId-R=|ix1 z9s!do-I3m-htxf4oSf-yx3Z1OR%c#I+DPl-d*4K{2L4OLZ8x~bam0FVQBnvgeYv;y_Eaf2DJN*(pt(@z0vj}qo@9RWEEN~_OM;t@+y#hw71ek?ki;S ze*77f88@6VY_)kj(hhlxU8hvEsf*xh7z{<9Q%X!2G0{MDZ=#PB6(dgso=Knho3Y7X7?I5-TOWw~* z1p4G)TI|m6u^RS1Gu;eZ1K+9kr9@lJk<)+4bn};(O4qb9@KC7xy4354W>*noWgcKFJ!TAJ%)ZlhSu(^M=_JnSgIW1L-l(J&bY8RSY+RUio711p96O!y8r>?8^lI$5P^6p= zWCo|G!}-IKA&-+EXP0CQ=jsF|TeQ~jP-SZov;ZB;85v&uup$6mvvR~Qa z+vLyn8On#!RsJno&AtY9T2*{XfA;qXF7xHc%P`?Cbr!Ns){98P61BL;a~9s~l;Crj zQ#KTR)pu8ZfXsvYq9C884U{~&hnV4vHQz9jZTQ%TgKV%oXt6fJ(|G{eh#Pze5URUQ zQ+C;U%bcxeMW#k(hBt*m;or@r?oRqF7*nBt<@VJ#S3g>5X!KWPh&??#qU79*`=zsu z9=x}dj8Ctjv&A^0Cs`}`uVOpu(+>KE_|_dQo zG8GAKYvuY1*qkLpw*bnR{DKaHe19Scjudkv6 zmy|hQwolnv(SP|D(G%$M%SKaSz+A7d4OMzE`B}*m^=YNz<0aAhPj*dV09~zB_x004 zYNGF2U}W^!GAU(Jqf-4@N{ZCQ4IuyjCa}1lp}F}$howmd;1#tFi z z16SKY@f=XRv+fmVC3fCL=5XVEqaO0wny?uD22%M4K&NbZZ6q`?`AAR+EKohEn^(Y} zx;KQsfGSGjI&Tsf&aYUZQxi#o`N*dp3sXG zXseHLHvDC2eCfO4SaY9)taiDU)+X>g7zwP_K9=`DCH|xI5dQYjBK5xKZ@T{n&83j9 literal 0 HcmV?d00001