From 1e1402795f9e66f5576114025b83ff952413e0ab Mon Sep 17 00:00:00 2001 From: Ryan Date: Wed, 3 Sep 2025 19:24:26 +1000 Subject: [PATCH] First commit --- .gitignore | 35 +- LICENSE | 21 + README.md | 173 ++- __init__.py | 63 + example_workflows/VibeVoice_example.json | 277 ++++ example_workflows/VibeVoice_example.png | Bin 0 -> 140911 bytes js/vibevoice_wrapper_ui.js | 107 ++ pyproject.toml | 23 + requirements.txt | 17 + vibevoice/__init__.py | 0 vibevoice/configs/qwen2.5_1.5b_64k.json | 112 ++ vibevoice/configs/qwen2.5_7b_32k.json | 113 ++ vibevoice/modular/__init__.py | 0 vibevoice/modular/configuration_vibevoice.py | 248 ++++ vibevoice/modular/modeling_vibevoice.py | 488 +++++++ .../modular/modeling_vibevoice_inference.py | 731 ++++++++++ .../modular_vibevoice_diffusion_head.py | 287 ++++ .../modular_vibevoice_text_tokenizer.py | 214 +++ .../modular/modular_vibevoice_tokenizer.py | 1195 +++++++++++++++++ vibevoice/modular/streamer.py | 264 ++++ vibevoice/processor/__init__.py | 0 vibevoice/processor/vibevoice_processor.py | 677 ++++++++++ .../vibevoice_tokenizer_processor.py | 483 +++++++ vibevoice/schedule/__init__.py | 0 vibevoice/schedule/dpm_solver.py | 1065 +++++++++++++++ vibevoice/schedule/timestep_sampler.py | 19 + vibevoice/scripts/__init__.py | 0 ...ert_nnscaler_checkpoint_to_transformers.py | 166 +++ vibevoice_node_chunked_wrapper.py | 220 +++ vibevoice_nodes.py | 617 +++++++++ 30 files changed, 7582 insertions(+), 33 deletions(-) create mode 100644 LICENSE create mode 100644 __init__.py create mode 100644 example_workflows/VibeVoice_example.json create mode 100644 example_workflows/VibeVoice_example.png create mode 100644 js/vibevoice_wrapper_ui.js create mode 100644 pyproject.toml create mode 100644 requirements.txt create mode 100644 vibevoice/__init__.py create mode 100644 vibevoice/configs/qwen2.5_1.5b_64k.json create mode 100644 vibevoice/configs/qwen2.5_7b_32k.json create mode 100644 vibevoice/modular/__init__.py create mode 100644 vibevoice/modular/configuration_vibevoice.py create mode 100644 vibevoice/modular/modeling_vibevoice.py create mode 100644 vibevoice/modular/modeling_vibevoice_inference.py create mode 100644 vibevoice/modular/modular_vibevoice_diffusion_head.py create mode 100644 vibevoice/modular/modular_vibevoice_text_tokenizer.py create mode 100644 vibevoice/modular/modular_vibevoice_tokenizer.py create mode 100644 vibevoice/modular/streamer.py create mode 100644 vibevoice/processor/__init__.py create mode 100644 vibevoice/processor/vibevoice_processor.py create mode 100644 vibevoice/processor/vibevoice_tokenizer_processor.py create mode 100644 vibevoice/schedule/__init__.py create mode 100644 vibevoice/schedule/dpm_solver.py create mode 100644 vibevoice/schedule/timestep_sampler.py create mode 100644 vibevoice/scripts/__init__.py create mode 100644 vibevoice/scripts/convert_nnscaler_checkpoint_to_transformers.py create mode 100644 vibevoice_node_chunked_wrapper.py create mode 100644 vibevoice_nodes.py diff --git a/.gitignore b/.gitignore index cac109b..78da158 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,3 @@ -# ---> Python # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] @@ -8,7 +7,10 @@ __pycache__/ *.so # Distribution / packaging +.github +.idea .Python +__pycache__ build/ develop-eggs/ dist/ @@ -95,12 +97,6 @@ ipython_config.py # install all needed dependencies. #Pipfile.lock -# UV -# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control. -# This is especially recommended for binary packages to ensure reproducibility, and is more -# commonly ignored for libraries. -#uv.lock - # poetry # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. # This is especially recommended for binary packages to ensure reproducibility, and is more @@ -113,10 +109,8 @@ ipython_config.py #pdm.lock # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it # in version control. -# https://pdm.fming.dev/latest/usage/project/#working-with-version-control +# https://pdm.fming.dev/#use-with-ide .pdm.toml -.pdm-python -.pdm-build/ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm __pypackages__/ @@ -167,24 +161,3 @@ cython_debug/ # and can be added to the global gitignore or merged into this file. For a more nuclear # option (not recommended) you can uncomment the following to ignore the entire idea folder. #.idea/ - -# Ruff stuff: -.ruff_cache/ - -# PyPI configuration file -.pypirc - -# ---> JupyterNotebooks -# gitignore template for Jupyter Notebooks -# website: http://jupyter.org/ - -.ipynb_checkpoints -*/.ipynb_checkpoints/* - -# IPython -profile_default/ -ipython_config.py - -# Remove previous ipynb_checkpoints -# git rm -r .ipynb_checkpoints/ - diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..d9d2f32 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2025 WildAi + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md index dbfc477..a1be512 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,172 @@ -# VibeVoice-Modifications + + -VibeVoice-Modifications \ No newline at end of file +
+

ComfyUI-VibeVoice

+ +ComfyUI-VibeVoice Nodes + +

+ A custom node for ComfyUI that integrates Microsoft's VibeVoice, a frontier model for generating expressive, long-form, multi-speaker conversational audio. +
+
+ Report Bug + · + Request Feature + + +[![Stargazers][stars-shield]][stars-url] +[![Issues][issues-shield]][issues-url] +[![Contributors][contributors-shield]][contributors-url] +[![Forks][forks-shield]][forks-url] +

+
+ + + +## About The Project + +This project brings the power of **VibeVoice** into the modular workflow of ComfyUI. VibeVoice is a novel framework by Microsoft for generating expressive, long-form, multi-speaker conversational audio. It excels at creating natural-sounding dialogue, podcasts, and more, with consistent voices for up to 4 speakers. + +The custom node handles everything from model downloading and memory management to audio processing, allowing you to generate high-quality speech directly from a text script and reference audio files. + +**Key Features:** +* **Multi-Speaker TTS:** Generate conversations with up to 4 distinct voices in a single audio output. +* **Zero-Shot Voice Cloning:** Use any audio file (`.wav`, `.mp3`) as a reference for a speaker's voice. +* **Automatic Model Management:** Models are downloaded automatically from Hugging Face and managed efficiently by ComfyUI to save VRAM. +* **Fine-Grained Control:** Adjust parameters like CFG scale, temperature, and sampling methods to tune the performance and style of the generated speech. +* **4-Bit Quantization:** Run the large language model component in 4-bit mode to significantly reduce VRAM usage and improve speed on memory-constrained GPUs, especially for the 7B model. +* **Transformers 4.56+ Compatibility:** Fully backwards compatible with both older and newer versions of the Transformers library. +* **Force Offload Option:** Toggle to force model offloading from VRAM after generation to save memory between runs - now with improved ComfyUI compatibility. + +

(back to top)

+ + +## Getting Started + +Follow these steps to get the ComfyUI-VibeVoice node running in your environment. + +### Installation +The node can be installed via **ComfyUI Manager:** Find `ComfyUI-VibeVoice` and click "Install". Or, install it manually: + +1. **Clone the Repository:** + Navigate to your `ComfyUI/custom_nodes/` directory and clone this repository: + ```sh + git clone https://github.com/wildminder/ComfyUI-VibeVoice.git + ``` + +2. **Install Dependencies:** + Open a terminal or command prompt, navigate into the cloned directory, and install the required Python packages. **For quantization support, ensure you install `bitsandbytes`**. + ```sh + cd ComfyUI-VibeVoice + pip install -r requirements.txt + ``` + +3. **Start/Restart ComfyUI:** + Launch ComfyUI. The "VibeVoice TTS" node will appear under the `audio/tts` category. The first time you use the node, it will automatically download the selected model to your `ComfyUI/models/tts/VibeVoice/` folder. + +## Models +| Model | Context Length | Generation Length | Weight | +|-------|----------------|----------|----------| +| VibeVoice-1.5B | 64K | ~90 min | [HF link](https://huggingface.co/microsoft/VibeVoice-1.5B) | +| VibeVoice-Large| 32K | ~45 min | [HF link](https://huggingface.co/microsoft/VibeVoice-Large) | + +

(back to top)

+ + +## Usage + +The node is designed to be intuitive within the ComfyUI workflow. + +1. **Add Nodes:** Add the `VibeVoice TTS` node to your graph. Use ComfyUI's built-in `Load Audio` node to load your reference voice files. +2. **Connect Voices:** Connect the `AUDIO` output from each `Load Audio` node to the corresponding `speaker_*_voice` input on the VibeVoice TTS node. +3. **Write Script:** In the `text` input, write your dialogue. Assign lines to speakers using the format `Speaker 1: ...`, `Speaker 2: ...`, etc., on separate lines. +4. **Generate:** Queue the prompt. The node will process the script and generate a single audio file containing the full conversation. + +_For a complete workflow, you can drag the example image from the `example_workflows` folder onto your ComfyUI canvas._ + +### Node Inputs + +* **`model_name`**: Select the VibeVoice model to use. +* **`quantize_llm`**: (New!) Enable to run the LLM component in 4-bit (NF4) mode. This dramatically reduces VRAM and can significantly speed up inference on the 7B model. +* **`text`**: The conversational script. Lines must be prefixed with `Speaker :` (e.g., `Speaker 1:`). +* **`cfg_scale`**: Controls how strongly the model adheres to the reference voice's timbre. +* **`inference_steps`**: Number of diffusion steps for the audio decoder. +* **`seed`**: A seed for reproducibility. +* **`do_sample`, `temperature`, `top_p`, `top_k`**: Standard sampling parameters for controlling the creativity and determinism of the speech generation. +* **`force_offload`**: (New!) Forces the model to be completely offloaded from VRAM after generation. Useful for memory management but may slow down subsequent runs. +* **`speaker_*_voice` (Optional)**: Connect an `AUDIO` output from a `Load Audio` node to provide a voice reference. + +### Performance & Quantization + +A key feature of this node is the optional **4-bit quantization** for the language model component. This is highly recommended for users with memory-constrained GPUs (e.g., <= 16GB VRAM) who wish to run the larger `VibeVoice-Large-pt` model. + +**Benefits of `quantize_llm = Enabled`:** + +| Model | Performance Impact | VRAM Savings | +|---|---|---| +| **VibeVoice-Large (7B)** | **~8.5x faster** inference | Saves **>4.4 GB** (over 36%) | +| **VibeVoice-1.5B** | ~1.5x slower inference | Saves **~5.5 GB** (over 63%) | + +As shown, quantization provides a massive speedup and VRAM reduction for the 7B model, making it accessible on a wider range of hardware. While it slightly slows down the 1.5B model, the significant VRAM savings may still be beneficial for complex workflows. + +### Transformers Library Compatibility + +This version includes automatic detection and compatibility for both older and newer versions of the Transformers library: + +* **Transformers 4.56+**: Automatically uses the new method signature for `_prepare_cache_for_generation` +* **Older Versions**: Maintains compatibility with pre-4.56 versions using the legacy method signature +* **Fallback Mechanism**: If detection fails, the node will automatically try both versions to ensure maximum compatibility + +This ensures the node works seamlessly regardless of your Transformers version without requiring manual updates. + +### Tips from the Original Authors + +* **Punctuation:** For Chinese text, using English punctuation (commas and periods) can improve stability. +* **Model Choice:** The 7B model variant (`VibeVoice-Large`) is generally more stable. +* **Spontaneous Sounds/Music:** The model may spontaneously generate background music, especially if the reference audio contains it or if the text includes introductory phrases like "Welcome to...". This is an emergent capability and cannot be directly controlled. +* **Singing:** The model was not trained on singing data, but it may attempt to sing as an emergent behavior. Results may vary. + +

(back to top)

+ + +## Recent Bug Fixes + +### Force Offload Compatibility Fix +* **Fixed:** Resolved `AttributeError: module 'comfy.model_management' has no attribute 'unload_model_clones'` error when using the force offload option +* **Details:** Updated the force offload implementation to use ComfyUI's standard `unload_all_models()` API instead of the deprecated `unload_model_clones()` function +* **Impact:** Force offload functionality now works correctly with all versions of ComfyUI + +### Multi-Speaker DynamicCache Fix +* **Fixed:** Resolved `'DynamicCache' object has no attribute 'key_cache'` error when using multiple speakers +* **Details:** Updated cache access in `modeling_vibevoice_inference.py` to use proper DynamicCache API - accessing layers via indexing instead of deprecated `.key_cache` and `.value_cache` attributes +* **Impact:** Multi-speaker functionality now works correctly with newer versions of Transformers library + +

(back to top)

+ + +## License + +This project is distributed under the MIT License. See `LICENSE.txt` for more information. The VibeVoice model and its components are subject to the licenses provided by Microsoft. Please use responsibly. + +

(back to top)

+ + +## Acknowledgments + +* **Microsoft** for creating and open-sourcing the [VibeVoice](https://github.com/microsoft/VibeVoice) project. +* **The ComfyUI team** for their incredible and extensible platform. +* **othneildrew** for the [Best-README-Template](https://github.com/othneildrew/Best-README-Template). + +

(back to top)

+ + + +[contributors-shield]: https://img.shields.io/github/contributors/wildminder/ComfyUI-VibeVoice.svg?style=for-the-badge +[contributors-url]: https://github.com/wildminder/ComfyUI-VibeVoice/graphs/contributors +[forks-shield]: https://img.shields.io/github/forks/wildminder/ComfyUI-VibeVoice.svg?style=for-the-badge +[forks-url]: https://github.com/wildminder/ComfyUI-VibeVoice/network/members +[stars-shield]: https://img.shields.io/github/stars/wildminder/ComfyUI-VibeVoice.svg?style=for-the-badge +[stars-url]: https://github.com/wildminder/ComfyUI-VibeVoice/stargazers +[issues-shield]: https://img.shields.io/github/issues/wildminder/ComfyUI-VibeVoice.svg?style=for-the-badge +[issues-url]: https://github.com/wildminder/ComfyUI-VibeVoice/issues diff --git a/__init__.py b/__init__.py new file mode 100644 index 0000000..636f83b --- /dev/null +++ b/__init__.py @@ -0,0 +1,63 @@ +import os +import sys +import logging + +# allowing absolute imports like 'from vibevoice.modular...' to work. +current_dir = os.path.dirname(os.path.abspath(__file__)) +if current_dir not in sys.path: + sys.path.append(current_dir) + +import folder_paths + +from .vibevoice_nodes import NODE_CLASS_MAPPINGS as BASE_NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS as BASE_NODE_DISPLAY_NAME_MAPPINGS + +# Configure a logger for the entire custom node package +logger = logging.getLogger(__name__) +logger.setLevel(logging.WARNING) +logger.propagate = False + +if not logger.hasHandlers(): + handler = logging.StreamHandler() + formatter = logging.Formatter(f"[ComfyUI-VibeVoice] %(message)s") + handler.setFormatter(formatter) + logger.addHandler(handler) + + +VIBEVOICE_MODEL_SUBDIR = os.path.join("tts", "VibeVoice") + +vibevoice_models_full_path = os.path.join(folder_paths.models_dir, VIBEVOICE_MODEL_SUBDIR) +os.makedirs(vibevoice_models_full_path, exist_ok=True) + +# Register the tts/VibeVoice path with ComfyUI +tts_path = os.path.join(folder_paths.models_dir, "tts") +if "tts" not in folder_paths.folder_names_and_paths: + supported_exts = folder_paths.supported_pt_extensions.union({".safetensors", ".json"}) + folder_paths.folder_names_and_paths["tts"] = ([tts_path], supported_exts) +else: + if tts_path not in folder_paths.folder_names_and_paths["tts"][0]: + folder_paths.folder_names_and_paths["tts"][0].append(tts_path) + +try: + from .vibevoice_node_chunked_wrapper import ( + NODE_CLASS_MAPPINGS as WRAP_NODE_CLASS_MAPPINGS, + NODE_DISPLAY_NAME_MAPPINGS as WRAP_NODE_DISPLAY_NAME_MAPPINGS, + ) +except Exception as e: + logger.warning(f"[ComfyUI-VibeVoice] Wrapper failed to load: {e}") + WRAP_NODE_CLASS_MAPPINGS = {} + WRAP_NODE_DISPLAY_NAME_MAPPINGS = {} + +# Merge and export +NODE_CLASS_MAPPINGS = { + **BASE_NODE_CLASS_MAPPINGS, + **WRAP_NODE_CLASS_MAPPINGS, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + **BASE_NODE_DISPLAY_NAME_MAPPINGS, + **WRAP_NODE_DISPLAY_NAME_MAPPINGS, +} + +WEB_DIRECTORY = "./js" + +__all__ = ['NODE_CLASS_MAPPINGS', 'NODE_DISPLAY_NAME_MAPPINGS', 'WEB_DIRECTORY'] \ No newline at end of file diff --git a/example_workflows/VibeVoice_example.json b/example_workflows/VibeVoice_example.json new file mode 100644 index 0000000..202648c --- /dev/null +++ b/example_workflows/VibeVoice_example.json @@ -0,0 +1,277 @@ +{ + "id": "b91265e5-1b03-4b63-8dc3-4abd9a030e08", + "revision": 0, + "last_node_id": 11, + "last_link_id": 29, + "nodes": [ + { + "id": 4, + "type": "LoadAudio", + "pos": [ + -1900, + -1130 + ], + "size": [ + 274.080078125, + 136 + ], + "flags": {}, + "order": 0, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "AUDIO", + "type": "AUDIO", + "links": [ + 28 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.52", + "Node name for S&R": "LoadAudio", + "ue_properties": { + "widget_ue_connectable": { + "audio": true, + "audioUI": true, + "upload": true + }, + "version": "7.0.1" + } + }, + "widgets_values": [ + "male_rickmorty.mp3", + null, + null + ] + }, + { + "id": 11, + "type": "VibeVoiceTTS", + "pos": [ + -1570, + -1130 + ], + "size": [ + 460, + 510 + ], + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [ + { + "name": "speaker_1_voice", + "shape": 7, + "type": "AUDIO", + "link": 28 + }, + { + "name": "speaker_2_voice", + "shape": 7, + "type": "AUDIO", + "link": 29 + }, + { + "name": "speaker_3_voice", + "shape": 7, + "type": "AUDIO", + "link": null + }, + { + "name": "speaker_4_voice", + "shape": 7, + "type": "AUDIO", + "link": null + } + ], + "outputs": [ + { + "name": "AUDIO", + "type": "AUDIO", + "links": [ + 27 + ] + } + ], + "properties": { + "cnr_id": "ComfyUI-VibeVoice", + "ver": "37803a884fb8f9b43c38286f6d654c7f97181a73", + "Node name for S&R": "VibeVoiceTTS" + }, + "widgets_values": [ + "VibeVoice-1.5B", + "Speaker 1: I can't believe you did it again. I waited for two hours. Two hours! Not a single call, not a text. Do you have any idea how embarrassing that was, just sitting there alone?\nSpeaker 2: Look, I know, I'm sorry, alright? Work was a complete nightmare. My boss dropped a critical deadline on me at the last minute. I didn't even have a second to breathe, let alone check my phone.\nSpeaker 1: A nightmare? That's the same excuse you used last time. I'm starting to think you just don't care. It's easier to say 'work was crazy' than to just admit that I'm not a priority for you anymore.", + false, + "sdpa", + 1.3, + 10, + 56109085141530, + "randomize", + true, + 0.95, + 0.95, + 0 + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 8, + "type": "LoadAudio", + "pos": [ + -1900, + -940 + ], + "size": [ + 274.080078125, + 136 + ], + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "AUDIO", + "type": "AUDIO", + "links": [ + 29 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.52", + "Node name for S&R": "LoadAudio", + "ue_properties": { + "widget_ue_connectable": { + "audio": true, + "audioUI": true, + "upload": true + }, + "version": "7.0.1" + } + }, + "widgets_values": [ + "male_stewie.mp3", + null, + null + ] + }, + { + "id": 10, + "type": "MarkdownNote", + "pos": [ + -1030, + -960 + ], + "size": [ + 420, + 210 + ], + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "Notes", + "properties": { + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.0.1" + } + }, + "widgets_values": [ + "## Models\n\nWill be downloaded on the first run, or download them manually and place them into the directory: /models/tts/VibeVoice\n\n| Model | Context Length | Generation Length | Weight |\n|-------|----------------|----------|----------|\n| VibeVoice-0.5B-Streaming | - | - | On the way |\n| VibeVoice-1.5B | 64K | ~90 min | [HF link](https://huggingface.co/microsoft/VibeVoice-1.5B) |\n| VibeVoice-Large| 32K | ~45 min | [HF link](https://huggingface.co/microsoft/VibeVoice-Large) |" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 3, + "type": "SaveAudio", + "pos": [ + -1040, + -1130 + ], + "size": [ + 270, + 112 + ], + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [ + { + "name": "audio", + "type": "AUDIO", + "link": 27 + } + ], + "outputs": [], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.52", + "Node name for S&R": "SaveAudio", + "ue_properties": { + "widget_ue_connectable": { + "filename_prefix": true, + "audioUI": true + }, + "version": "7.0.1" + } + }, + "widgets_values": [ + "audio/VibeVoice" + ] + } + ], + "links": [ + [ + 27, + 11, + 0, + 3, + 0, + "AUDIO" + ], + [ + 28, + 4, + 0, + 11, + 0, + "AUDIO" + ], + [ + 29, + 8, + 0, + 11, + 1, + "AUDIO" + ] + ], + "groups": [], + "config": {}, + "extra": { + "ue_links": [], + "links_added_by_ue": [], + "ds": { + "scale": 1.2100000000000004, + "offset": [ + 2024.7933884297524, + 1252.3140495867776 + ] + }, + "frontendVersion": "1.25.11", + "VHS_latentpreview": false, + "VHS_latentpreviewrate": 0, + "VHS_MetadataImage": true, + "VHS_KeepIntermediate": true + }, + "version": 0.4 +} \ No newline at end of file diff --git a/example_workflows/VibeVoice_example.png b/example_workflows/VibeVoice_example.png new file mode 100644 index 0000000000000000000000000000000000000000..92d3a6c1374282fdbdfdcc92f1d4078c3d3484f1 GIT binary patch literal 140911 zcmeFZ2{hF0`!F6UBwLEgHue_#I(AWpWJ@K5F&OJGmYJ~*sZd#?DA`J)gzRLiY=!L0 z*hBU$>tHPJXNIL*+c%7YCq6I<5Kf@&mQJh)VZrDH>8Wb6$0tD$BWz6l3R}3Tvkk6S_&>D zBxWuuAtY%oEg=N9v;e-r%q?YMq7tHTQ82e4w+kF&>uQU182-vJ{j$c^%Ff&=g_AYqng(3ZAHU{NO|afFbVtf;7< zkeHZ+sEHuAtF0$6?7Fy&q_8MhR8$5mCN3o?CLs-USs`H7zyvR>AUD#*67E96#Q}gr z`eN(oghuT^L?A6-2w)vcs3XjQ2o$yp@GpdctLg&ixY=4-!%+YL?%&1%;D|fp zmUBcS5LiJk?!RCDvL>*^uMyupHP21{|fZ7-UZz=Xa8<@yTgA6YXZ;~jrwOwk~Z=a}I#o!~AgTcSXCaa%aLkfem$0_kAoA!LDcfdf-8M1FFM3QGt}i35*109!#sYJ#kg zE+7N`i+>RvG#vW-syk$b0__$^M@P5?3TBP~SnsvVdpVQ~8ZNl=0ML5(ahC+7Cx8wB z4m&c=Eh8){EXIum+IH4;g<@a`G)bVj9bgDJ)Wz1q-T~=?@(^}#lHe93iU#S22~j}4 zpNd_oT(LEWUqRYhz%N}g0JipB>ZN4x&jvEm$US1(=I%9K+j(KyUqR_`|qG^9j)Ll za7PO`)D;DHa{bBCKRb58YU=zg(7%rVchIhI_%F6(_vt?aAr1QXl`WA_SD1s-uk`!1 zX&1(S5SO2${~Z_#?f}>t7z$0YD!Z!h_r`x?*sr7i3xG%`sMAkIp>`kt6$okEf498- z@5>V(|1)EVNZ9|yp)mRX*0Rg|C(EuzwCtBPg??B*0OK74FChaKm4Jc4l2+zmD_L_% z2@46ZI9S?B+EQ9d(n7{cRz?gg29p6y>fgEmKNcjKQh<1WTIU}lgv5lURDop-cI+WY zOb(<8vVb`Xpg`tuge@Ec2YDdTAWK_IkSz)XvxeC^3IikDV74f@CCTdmd}WXg67AwD z47&9FD;G!y35*80+B#YTUKpSi5P~2_QWH^`3WLrfNz2;6fc0UH9w1vwI1E_D4Fq>E zhq<`GT!}M5C>t0G0OcwOvO~L~fYnf_oo3+lgJ1}xBV5_o@jK??a-a)Hq`e@3oxLN{ z4frkK0CGjTxOfNxV_j^mZBWV}L%^>mt_a`;_*KARLjnF1vC9GG0v85pdw|T5uC5?U zz!h@>_6Lk|u|?Se_=5lhEdlES2O%9nfWHMGhO&W!h(12Z!PXIt0%mIx$wZ_aKs86u z&fY+-aKO{DBs!wzE^rtyM-T)+-N7DYVFS0Y2RV3voNR#22>;v+z=ktF5mN?T0w^Ki zN&*Qe2{;JuZh;1TTq0|L-sk%CBI4&Y{zMv{nVi6mlZLE5e+ zaZNbP)fVnT9O(-400|Hy9}Kq8PtmzPIeg6Qy?TmNW~kdgpod&i9g|2XRZ7&ysF{*U7( z@&D47{C{!NcNOzXRQUf#+$8&-wCn%PaTDOyxY@%0F=zr#&8}tBhPl{VBHbJTbxZWS zz6VV}LPAhTR{9r-mlPKj6#s{q3CK*4oJC^N;zxeqH$wjG$C7qp`w2#A2c z_Y=DuKn^fRG+=o=0EJ`;azel?fP4V4-PVz$7J=n#U4Rf6>Ea;=5+TJAt|BOutH=-I zNCfS@g9gYOq=s}P8ZXcVxT7`7255l59f>{^;B5bF0~x}J<^<#o%oN)B_x<0gvQ5Zdoby#e4h{53l!D)~pAKpaRNh>3{< z0RGGq0I9Y=W&WR%R)Dzwnz8~OcM-G46jXUF&b{bMNl z*D1hp`j?QGWSmGTwO_YDYWQ^%|D&uMK+&J+FW}+#9Mk`H=IU3r{?`hH`>!PVyBrok zrvFaskf`{LkR8Ru=&%MKY6G0$-CnyOtka~V;l@$cm#kKT!}f^>*At7l2}X}$Pp8>azFMgDlCaj-2}7l*~73$^X!>R?qt)O z)UhU)AJ5S%%S-XTtfzh~pg~2=73?P}tEI&S_h!s1_+c6v6*O)WhEJDXFAdn|mV(;Sfaat-vB8S5)lxZjxOcH#56OzKYZ z=Dcx&{d*~(tCA1vt6j)^C*#}zlqQ3$lblOI!PHVh_=dlI+yJ=hGgV0y*C(9B^^ZTP^Z(G)X`6A)Zc1^ zunVCXKFK@sB_g>&P_>3D613R@_Fd`e6Pku zEAm&S6lYcb_$01S+uY^`kpmwoh)U>EO!4#M;Z%2S?0%MFZ`d)AG5+P_9(aRoV(yLJ z>wGF3$(_l6`I)Dy!gCmy=V!_Vf_P?{UE%p{K=@e|&)*mIJ1+nXOonnT>r%Y(yRm+P zh|ym^dISndRmM~ca+l|w^HrTq=YHK!Vn3Bl$!{~7*nxp=fibnj?Whbe>ec<${e68R zuCGObp@z79`fh)vQKB@J&EcQ(^yDdALYMOMrL~v>V*hJio8HIVx3-X464Bz!!Hs)X zD$Cy+iSS1=KQZsADhqG3C^o8&t=gZT9kk7QGC+&DM^O4L$9(6rtHMTCt+P(%n@rPa zD&{|d*Bc?sP8YA1-#0I`%=&DeG{xvE^luBHy%f2pe2-RYHr(%TYqPW^AJ6qp5GZyJ zt#bNg{-i3vo?G4|?3x#+rH8I{)=Mej;FRzOx3r?llJX_F?(&#*kdCQTvqV#bgzTxjU@vr)%njf=yLVgoF zKF71AsJEk^YBoVyaYl&o;_0D*v>hQ*@8sFKcDRm@W=c@HDnAIz_3|OcHFW943w|x8 z{r~+b=_gbq*j*mW6GwjN zcMY|}F~0*miL*bXWl5GywuP{9D0T0yr2Q1hzcAPe&@a}-p4o?fZ{HEER4n=Nr@QR0 zJQU8O$Q<q=sZ;m=(G7s{>lR34yhl`hM~FgyPdMuB zsb7wfo)JYG>+kdY)r4OOR#5!d{A(>NXYI9wjKB{dfg+4jBsDdX+dG4M$=cAYRDR~{D2Q~LDj`-tbza-RU+5idpQGYp4w(`DpWDbB!z zaYkdxLz?8uye9{yvZ!yWbn=XrvQzjMy=#`+X4E|d=Fwa;4bx_zc+2sYgPGzXLFvop zaSDGzU>tgjC!V)t?*4P{%o=_wa^e9|^W_wqBiqFCHtOF>{D!?n2qq13Qjb_D<#+<3 zLY3e1$lMPAb;@n=)3Fxj9wBP_ePm-;p;sV!%N~Kq<+Zi5 zbGbs?g$V(a zw56KS^vy)4;$f0pugzWrowjvuJL9 zm|=#EF(1Sk_(p_tQatPt-lV(vVY7m+foh%?R`PFiJD#v3i2I-Z63n1`X!S_;Q$frb zy}~}voLkxqhevO+$1(yeD_50%P$f)tpx`Qv;SZO{Igrou@yTl9&YZjr}J|L7&EjUSdU zpJ)lMcN!!ye#acZ|HKipcAmd#h&TYxIf|z$6n6J-RR1TV?YF@6QMRS0yIW9u{QR}D z#z+%|njNpELh6281|+?dKD#6(2iJC-rP@HF_C4RO%|VsF%EO@j1cRdY`IX4bvrh|sbUZRE)YK%+6E@0)%`_DC)xl)#}aa3-;R8) z$K2><`Gu`NoE^X@J(N_HZ8<$*mn!m8n7+$o-dPokAL5Y82&~!p(Z5Y}SLhQJC@!V_ z#Nj7aDl_se=#c;9B9Q>ViT>QN!B=)aP05LX8;C#Wor(m+{h^1+t31QW0zhWT$SLLN zy8C^+90-Fm``~WneRrjioIX7m&MpTBc87h}F6Hl{b&21P2UB@1Ecfl0QLn4mjO@|8 zOl$eil&2O;(@pQH#4)7afi18Nn&Mi=$kA`ghPo~>?llkq_ID}*0C~j2sO=DeR(E0JpuP!^)QY<`1|1 zn50cOExTNsddTsICx)v-wCaPRrf7;`6#+(CT3VBYmofb!Mj0l|tkuRBi@zcBIi~8w zM+K!zv5G7Cb3#v&_CVJCR`%}O9^E}v_ewbc!M==5_^K3#@#PS=ydxzkS!jRlU^&s* z0(yV*uoM%ZaM+`&U6*e}uUzd*RE%|F{P$eIgFAcZKxNC!arAmpoyp;5ftFc-8S#mF zyWX7o~Y2PdlDRi&(UZ*a(=uBhyTtnZtd#3w<9 z^=O*T?Ik`Pg~-B^)C@L^(PX{3nljw*Z8^O=|6MRX*j0HC_XYkI-XPi$c&DY)$LQC_ zPR}I7o+-dz=^tlhjk^^@zxOC4MzjndUS~AMsJp?u;;Ln5PC5N5*P9EGg3=G>wB=*_ zat(#wbG?7may!3u0c$FTA%^Zvm@D#1sf&1)+O}tsGBQ2i1}pjkBZBFb{3+ylgG{(s z7joM8idVg2#v=S-%&bsO7?(bFzLP@u&MJB&(x1BiJPe{}+D*t~m2Y`zcu|fV+?upV zX17w`syZDm$-0n@yL8J$sGi5pp|pzdB7A9tAfnY{ggBZs{Z=W^?B?N$eLVp$_MgVD zPnuPnNkURN9Ab{}%q4O1%|jF0EhV$Z{6oDG4Zx`f@nLmu+%*}%Ka*4(A1L>=J#|rt zy9rO>*|wKCU-1=44OSL_W1X=aKi{RjpN{S7iu9>uvOdV^k@KmXz`;)D)e~tJ9FT_&!N zT)hdRIuFgVJM4FgN6+RNKAxuELPTl)8J560kk8q_s^&G?Y#n-Ra8tQ>z|?D0s$%6) zy_`W?3pTqc*`mgXi*+Q3XlwEqPkhxF^Z4j`-;A^B^0zCQy^B$(GKXt1Z|{4vqBP3-Q)~k<$I?2Nc@=8&1*F=1x~|1QY)UT^<_tqb z<5iw|vEwhr^C;*RHm(yyCxnoWS#fEY?ze`WU6&IIPnD6`5>*Pw4>-}(X3v?4hq0gg zTgGDlNQ~#gV*!WS=O^<$>a3zyL@um0B)FC7cKD<+j#8mesINo!h+0jhlmYBETa@oP zV<*!~wn>Sr{>*9;%fdD?SNg5=Y%!Higpq_5y7?SaW9he8?HJLQL9G;&2jmvYd@4F; zzAozIM?uv|mq9~a+lG!+mk4RpU+pA;h5*CuUp=c&`NQwe25tt;g| zOXY~w`3i^TprC2_f%Q=7Q|QR7dv{enQC%`R2e=bm`;v+y1$6V%GRwNDol$EywoGhFGMVn+nDl|RLsEFm&n;v=WW)w8BQ!zr>B#;muemssMx zp3->CJ1@A;UTm~YiB*q^Lk($GOT=t7iKHBaZsf3HQ8GN}+g+nm!PTF%PL1`z*&e`x z+d{$vLlxEo=;l2act^Ox^Q{(yJXt%8D@)^L$z5-z#urg79$3YMl;_fC_@-0)= zg0i;@3#Vs-@v*Q0!h5{2nu6A<$usI<{=!^SqTvG30O4GY-YXDmtb%pC$BWHDdfzwD z%}GPd^ z8rLP(q9PDZsho1CkDtZ*-G^xlt|ZlmbE!?gr{qL=jtZPM%*gF4c8lZ#Jc$!reu7h{ z2iOLDhUU-}{0>(aE*u7;*TejJBcV$l_P&-1vTc1~4y^?B2w>eZ19N@co|o4? z;WYgv(e0S$j@vWN>^Gj{w1JzSt+0$8>3b!Z$pCqMkX5`M8J+7ZrJna}3GrXrp zHq>B0r7Rswc~30+C{F3yx4q1LpC4V?&K*wa0>a`MmhCZ?!xFWFSFwg;&5kF!s2e2` z2qn~L}Wi=G0c46g7(>4R96?wC(9QI-|QLy9Oc-jB*%$8?_MJNCWp z?WPq_I6q2zJHnjm;}wc*MvZGw{TPDsz=@$Jy5+tb_$#lh52CdIsR43PY!jC;A&}ss zFdtR*&bvuGv;LBv3H+gST=ALsiH-2MDDQpud7%A-e!N{YTnUtXE-{c|!;oIKb(tn% z?dIkxrE>oKaCLEbro(nY=BA8=OO0w-pmw{1GsY_~FRy4mN9$k>eszAhFH5Ai$5{P? zR<0ob18c4lkX{I` z9P}IJ$#V+0!1e`P=4|G>5}6={;a(u{I-gs+A+MwC-eiW4?Ju%RaYpT-b7mXC@wbPy z_Q>mIU1#($3$=(VjMnFY97C_6e5h!;zZSBN9@uzszAU6AnJXOr`O?RLDMgzmkVVc# zpUg)hqS0Rz6W3Ig*nyo|QyfUoOpVKHe{R6vWmrCcnXsD7Dbryx&PViu!~Fr`8J%c< zNed}0`SyXv5kLA|GxD{Lm<^6wPnvvM+M+-fdVMTydh1T~U=6(t^YDhsV<$i zkaA(w%=6RNQ|w-wvyG)0W@3upIo%!}_gU$(?h_LfyxH0y85RXUQ$H)gHlcr3@TRQ# z=<`UOY>&m!E+$>J%`9-Aa?*ptM0ec}K>_ZrQl{|`Uhw{Uhlz~?td!jzfvD=lJI60x zfa7I2uL#VXT?(~p5U^$`&O{Ht2$@|y7T-<<$(>gaP=4v-BlvJ^pEOmdTm0$DQp7-3 z-^z5?aHNT9?FYY=l6S9y?jJg$c4MvDTA5%ZKO8J!?(1tejmygSShVVder93YUwY|; zpWQSwe)#$FLuI+vuplpKyoql)-qW8%C3PStSyL3we<^~PB<~!>@OpA62?o&QKLmN< zXY*Ve6+4}zH>a(YT|-mN2_rQLhc7}enqC^%b<*4|4P(mbufEVsc2@FU6*akc1oP5) zzZ{Ln_K~v(osWJ2ALWp1XJn%h>@Qyk$>jCf6b+&FXw$l?aWAWmnAKJ~ViD-T49lvW z7w*RZK5|i};o?L4S)`JPa&Xj0MdgvxR|+#n$9l4g8OIcAYP`SQ;zV^N7y4e*hd?OL zOuO#0(svFR)VjpNLa!~}$zrGkO<9kavp2$7X89FON61zPsJ|^~9Q@(jTC(_lR$w_= zeb<*+JfM}>K4B*8zf%aQT>;iJdomnH0i8lfb^d0)4>`Tpk3!2M0 zd)aGbi@%(7>w*Zo>MV{st^zJWSKO`H#;ewbeCT1+z*}sZen9qkK}iuGnvyS=ahjTj z8l%BntSbH0`ejJ9;-jj7>};*kg9)nS%FpGePM3A%ZupNB4LrKjKk!mQ$J-aM+qrKzX0nOtQe*fI zBS_wM!~F)s)>eup(sJ6f3Z{(}fmKfLr8S*44st3fL~FB|=rF2lB+)0$`VNgV4Cx#z zV}P)zTWsSaRm5e7{4+TYiKuU^MygJpuML_cv)Wr%F0opCw;<;fMa#ExWk-Q;>0n77`<~qzTkAVFflFMEnH34qw4a0AIqcd zNy+@)xxzU{2xhidli_r8wxDO%<&~%O*pKYg5?(6^wKuTNu9GRT=HRZmQoJdUACp7@ zEN{sIQBjEx(2EAZV=S|tmQ0^tR0xE}+;MG`O_^Wza8mb{I|+mxMqSGrZ{LeTos0?A zTy$S=1=1at-@iEH8BxvUlR6XbJS$7&a-P-Z@nU>~SKqu)x#u^-YGKGUvTV}|Z;>Ex zIFq|L7KeP^Oz<3<6yTJfK3OBvP(}zFP#nEAkHn5$uby>7fTgPoo@9sxPQS{AVJ)8B!e~vHANKZM7LCa)1ia%$>Tm6c+a?I>lCgsYdSPito(lF8Gvf0=vNo8&VGG4{yf#|#-D=#<>TxXoOs!lW z?1Ze$NKUTW`0z;b8A0GMFQ9PHYKdp}hA7cE8!sB4jfN^VRxXYvL}yB)zOqL$>g$Qr zWSOz*vxeHn=WG)?wjFMBu73?YSPbZAH;?{~Xo#+oZ30B&IM!#=R9CjlIY5olYe0DT ztPLycZ3V=_vVXP4F-<)68zu9&N^de?@7H*2m%j2;HlK62DnHX(lv7vG)wI?(jG7YJqE>63e!vX3No*7Ckvzk@zB#djP0cHG-f(sO}v zX#6Tf46mXC9=vVggLuGseB>5Z+2u2f0<}N49-Cjkd+3rnYgNd#j%2VRlKOitfIHFi zY@z_gSM&sRsnV&S;n7g}**Eg4TsksTvNf?yDW4!E!rF}u=gWH{!1;C2A_bHj?(b&a zS<3K7sx1NK7r!AY$$i>SgNjQEu4^y#7x9;G&LI1+bpx%>@MkcOy%e3#JBKad8*{U9 z+`T+NB;pJea7MlLRXxK^44mdEj>{b%l6d}E71aL1w@FzF`nZcqRK{%U21Cl>rwJmf zl512RBZ2c?&256?*pe~-yxfcPV?#56Ohhzwmd=0Epb;t}`MVT`) zo;aH_lK4vQ9I3Me*Fr1#Z0@ic?+pO>>%B_jh!6 z$3&T2_K-6ueI*<%*P})8D3*5esO5!qb*bQN$zz_DHg-xn9{eGtZOiv;t+ux|kF-X+ zxSho}yD(kZ1Za%aOiWmoz#JZ;2=(G%_M}@EUq;k;llv@#Imc(c3V_swl(g>nEZ^lK zIe~ZU_Xr?-MZ6NrBpQ2p49P_crMSlpq!#rHYmr1}6u(!py@cj|9AmNDoV3zmGsxTL z6*KiLl+GtQy%IOvUewz8dC}l&?aFkSxoaJZ6unk(Pt^iZ+vBNGUV-aI8D6oI?Os}Q zeuz0g)kfwKPCh-FMXL0I>`m?QelDFpEInK4_3_KZ$T*ZQ-1T{mjV{x@7b66PuDr+A zjI=?6H*eYWy?Tfz^JS&7I3IcK7_z9kS@gyGx=$?K!Pm|3Ne;Bm=UMcS8$E`H}MQ zMu-z4q-zZG`3e+;k7Z;6bsm6Jgh;^|1Vlv+xDZY>oU=@otgNiGgQjc(%v(XNV^gX* zZDW1~HFWb6&qaf1;X(QKj@eOng{=10clUJ9IQ76x=G4ocJqwDlu)TV-=dw<>i- z^CV_Xx?tbQNE(RptnUL4nMeIes~R;y=F8Zm^up~65>w0r~~Fuq>=C}G{dDvAVYPm$cSDj?2Gh#Vj3h?nJ zzB-%uoMQFOOyO{79oDtV*FWod$R#uQbmcJ9;DPCwx8F-ErRVnq%0c3#!t67O*Hqlj z*q64OxSSFo&(qx_FqEpFbRMmojx_^{%WaH6YUA`bb5~0{EfGYt#A3z2(!k3%6Xh4j058P$X(LWuS zrng)2u)C*na({)G$X<2KrdcWT*xo*P^70k8PcLJs+62gEkAYi?!aQH<9y*>!b!QJy zy$V#_N1cGVqUY9j1fanw9tH}CTjJU$QxUgv!SI|h|5WsU%} z&kX6fPO7`s0HyScO{wQVQ7R^?t)MeZ?@i$>WLnvMatCag}_I`M* z86SO$RrUQWhpqC(CiVS0R)kw`j$%XMUEp#nOLL(qF-e&y3Sip{DdmZCAH0|i za47XMv3l6_u${K(rf|SpZt}|)ZscqfCV&oB1XiY*91UWdh%?mp+bwtSGJG-mB+*J^ zRY^T%RGAo`SynAf-Y4*`w1Os&LEr(vi8f9@CD-SjoOU5siL~;|7wMI53(Z?Oj?b zRc1~&<;8e{kE{%X>+g3ot@ZGIF;KR(b9A#Rq|<^~WgAB0B|DXzMk!+%AkldWn7!-t zZ|pNlsx#KdFY|`ow~b8c>MZ|;mdAEX2rIa*JZ{+URmpeH{g^D`d7AZ>|DuJKzVZx= z#h&5Rr;7bOqDI-<#~Nw3NA?FrBE2VAF@r`d$6>}waNhKEM-IO_O=Okc zLyCnlas}jK6}=!dA)#6r$`YNiUw-}3<*V1TswXEr>k9DAVLokJb-rsNvL~zF)^x^6 zS!$o<6b3Q}ug@KtNv#Oudb@RAF!3N`>P2#+xfEx7l{Y)p0@``P?oC}w4WJ{SnB;kZ z58~RDMb@zdGKN9Q!@ra}%nQw)oa5Jyu@s1Px=DLXyb@eM!xb7@;x?HYkBgOcod%Ag zG?3kTqJ;JQBv)qNDNLBq^|jNA78Wv!d!<2f>fs(dxet;IxDQ|77dejHDm^J|5^O@e zfVPx$R34G@F<4Uq(Ln8rs0?Q;0x~bCuke?9pX{Zi$I)b#nI%C&rPQ>Vu9oYh0fvfDx%V_S{d+P$nvfcpUBaXt;?Uq+2Qyu>zJNgkAQCX z(f2vXY}JM6aCj^*_N=T(7f&=iF!Q9860Rl0Nl6xi% zZr0qcE8ht6uyt_A)Ae0%t*+)=s1-t7ksEnnfY@ALmUigQjmR&?%v`hheD3P3bLy5P zdF^RRzt{NI4KH)!n60XbMRdDabaEI_sVSa5KdgJ36e)to*d}f~xI|BeQ3ouwbXLVz zSfou;$U%O$>2Fqe@(;7OY@{w6Z1&=cd!Xz#+iMdQB=PczDC}nG`ea(ZMGYOE%dc+S z`>A4i581;xdihftJh>A+Vfl>KTG;+5XP6p0(HZUry~|j9X_3ND7odPRM7tnUP9m1YFhSug#UJL*C|*4 zQw!+&)T+z>IFj0e|LBaMx5v}gEc>`@D*2`f`eV1BCt=@vgs`h}NaMJhJr@X?J}3P# zuxd*BW>4qB$ap#DQ-04)scm{mCxDFNs#eyE^OFZWaJ(a9Q%3e?zVW zx4l+ee~i*zV0}4R1p7%njALfn{Zm1*!}<-OL+LDD-){|GVg(FZ+RQ!l3LY?$Q^9656ulZ&LF!X?HKL`F{{}Sd|->krzEmS$yUw%kb$JicmwJ)Lg($!5JR8_crgd zy@t%jtu9P|3+>^XjVtSKo}IzE_ud(v{&Y_FEqhOx;^HF{vF!sDk3%aqT%)lK9f+&L z3%G}$L#Iy*zx{B;Bn#v3fGc@#6))PFyjiz&vbAp(PkUYZXv3UQt5sL}woh8xtu6hr zk1(E658+DY6s6)yTbiCQ=!SE_aGV0BoS?A2){MvIBIGN&O2cB&wMH4(0nMY)7A&Qi z(C@d%EcwFWLJzB|3u+1+88Nqp%~saOWvrWX_+im<_J?n4t4mlnpdjX>nfKpV^BnRV zdeHXl*d_x}3WI^NlOLH<@lbgPd1XYI*U zueF7d**h%VGxa*O+4*Pqa6@NPwsZYMoyD>~cXPmr@&7|@Gh8BUpZudL{%h35m0h$A z8BR;!3g`X952~qyg`rMw2uY@<*SBJ3x^wEB0tj)VzMfmUMcmULIh0U^uDMB;HPZ40 zr53RaHAMrPI&1=^$R}@|^lD~eYNA}&4AlCtVC;BIo2^sR0HXM0N}?tGo;!v!iOwhtsTSmp<}@74is{ zuU3hSHfZ2k!`@T`NFA|1-SQEL$z7)SJFbr7-kd$!cjs$Zm8FRq$A{4uK(rwzP~yM6 zAxBl?^+*zvoTFnQAUmNt)MJuuc~_$9*!KEVX0iJg&E{aC&bkVRg$3=^3tkT*iJleF zx#A;U{fo#@--m~vrU3KoZm!-M$X6=jJ1KoyU^rZt#`iX0+j4lPgtbL9YXU$36Jg5;bvM595r5T zVjO*&p%v_EY>T(=WG1*@8bV-|JaRooAQ|RoU^cJA8#`b+J{|uhx;6!OxH!?$%A(p*y3lOaPG`cHf}bX3{8Kn&I-My zI3TPQ!#T86^YV+H`s!01C}*g1sLvqo2&UY%UaNZ+FX&L+m7AA$tO3y-cA&Upcv>sn zq9mA=xtX5sd|xPu5PnL0iPdTMRHE`OF+X{JCg%x3KzZ%1{Bfol$tf;G>E1XCJuuzA zK>0@71qMuHPTG^cafBRq2*>-E+Q-J)F<6^&d8uoYTeDn;GGA`puSSOyYosS9i z$NT*cA??re9eS?3$uvS7xYA<#1}HySh}TfOHh_c6)}F?Uj#^n9s_9(8!QO{bA=N%M z>{TU-l)pg~7=1B53A%AREL;;5`JPD!OqO%$WSe@fQ>+ly%v)c+3UG4Y9!kw`TX&eE z!N>+_`|$Zhuf|4>9<53Ka<#GG?u#z$Y%91zHYL_%65QPNIdpZS(5X4%Tt^$ua{A`j zBbIUkc5F7T!5bIu7W!s>xj6WvR&08sPTit(GM(&)ktyivx|sEw#rJ*#Sfw_KV~^z@_Y`+>1; z{T|h~Luxmkhc0ewc)^D5xW=)w&if?=k@9P;)fQxOBQ=#eAvolR?O*!(LCawbyT{R$6HPt}{BMNS@GvnCn zm5Wn@Hb>a{aN$t9c?-NUW^!r_a&_&E*G|)*2oIfA#vM07R>wbX z>~-W-`ZaH$K2-mfvF81P=yu9%+@u*5mwM_1f4R zIp@*S$5W(x2yjx}Ab~t3Xy3F!y_&4}D~+$vY12t_gj9Lojkd=r-D=xw;jT9gLOFXY zo$DWerP7&CQ2Zv*lVuNyKIJUv8sNS)>lSMIp1PrH@_}`rx3R~mim1N+tqUvo1pJmU z^kaqQ+!U9$OLJtt`-J(uDC47q8GNr}^H!5DxY>6bcIFf76;knz7}-#ebzFL#e$_%O zP~}b%Pv)Ihy)k!9Mt!s^k5jzYZ;+!H6Su#AE{ORtG!nZInIPHn`iawR!ir4-xcp?* zD9_OCjYlVSoF>oVz-Rlu=Jem~PFhsOTGWK1mCx6PJ^=_4n-L46<-;rf4ROwN|h7sR~`d!P1FONLcRAq5Lh;3q<;p=F%L`>cnS~mct6{& zW<}zLs)FJeAchy%apKJ|^Re=5m8LN# z@(K#vidZBvo_o1mg6@P!xzJ$td?SC8v7r8|mcGUt5P`~OxDcmx4IR?w1QR7na$u@- z@A_=uY(9absPWCnv~vSlnB3h`MGq>>_%rmOl!I_=>3B&KWi5B}LYtc0wSX*@nQ?i;EgbK*6YGEnc`rD65IxIK^vN8!iM=6+Zcj1+iiVweEfhC) zi-u`#@qzR};&Uf{d#jZQT+d&-BADI<)n5^zyp{pahya$8TE>P*vMbM z{CEdW0k8pMWcn9>E=h#xccc2dW!;^nCkG6S4o$WLUJ^MWMApu~Yj}TMboc#>)C0ua zPZj*4z+X0WD*A@!uk-Y#fDL6XpME`0Ot+~plV<~|8=@og`-k`j3ao%=;2lt>{(SeY zsfVKNmy731`}~0SD$>(Gx7=<>CT6p%T&ywVNPm7dKru(pYyXAzkh~3TK%c_$=G< zBql~sdyhNt2gUU1l1IgXiyo18{QCEL(l32gY-(yU3keC)sr4uy+6p||b0^3;>#6aT z;wVO;UHpk?k(E-m4D3}fj2xvq&y`e%Aiop#Jn`PfT_eU!faGmb<*zNz8G5owk$8xcXJBU?=d!lI;*ta9Cm!A63BX366Ury%X?(X*FMLF0|nhf zc>*DN*0~3z$7w!IYNt62mxV~rG7TeI#`wE3Mosd)H(lgW<9dkZTa@(OKys9L(ah=H zTvtv__+Co7LZBpbINSbslO=RD6Z$5nLB38sl*2YeJ46~$DJnlwZ!jGM-F}vL&~~Cl z=jpmPG#e)tk2@1s8{kKaS*K0u>7A?3W7-}%3z(++EHO(H+c_!q`BTEB4b^m<*E@V3 z6vs9Cep$p!lxWq-?Y}Hc+4MKMwXIV$ULAD6{D6i$4bu75GoQB4UJ3w_l9*pmq zQg$xwa~h$Q-!z!P`g$fX4UG>Wv|a`^C_mQqnX!jTppp@4Xh69w1-~1t2uv?6 z?W4wbf}i?>v1J)i)7im%LA1e^hR1!m1zXR?dR5B;?@HR)WlNHwe!O*Jb{f!$Cq&4Y zPXZK2g~3mK1=(2b*~uYu%1ItnHpKQ z3MW`WO;_Mv+uMsiM6ns0dB)Y)$pZ;RJ!VR*V~KCoC{t37s|paunar~o@-`fKq~^Uk z%qcznMuayxKS=yuwezFKQglX$&z#LvXE~8$AA`EZ1jfd`=K`W6wz};zk1bYyej*c{ zbF3Y>=B&r-7Z9)?xRZk#+JvrgT^^kpYRR*Vh6@@YzVH^etTMrVUU&a|fmGCJ+vf+| z2raU>2o@fBozpNm|0uy@^jJ?eJgBO@D)iY&gBQ3t-)*nOAa2ar%~6^1EFhP{FE`K^ zy0F)4hR4nO>!Yo>?U_Y~rUz$MR(g<}k!~M?nWl4#Qo2gqKIh<#H#i9IAWCK|CZ6|A zyt}G|dE@zQF$o(R0uJl1Cu(w!UB2*Gx=Lv|I-xf2;T32ur_r>M(-0sI4Xoae(6|(L*jl_lLKCcjWKHKeUIXy`bed^aK5+BmU#+DSJ#h;~Ac!q4X%uJ#hqe%amJLlfSd1nB|NywhuA2{M=$!_%`9 zU!p{yvy;!ZY)pvt5P3QKneIIOy!p%aWiI{(FYE%ycWF-%E~tf^+i*_FD^m_eR}l&pFQG)Wqjqahn-AVYVVV&Z8dIqurWmBHan9 zm|(KI_DEbXOje3+sT1kj$1tSCV>gVx4G!)K`Iw>d{AJe3v3ivOT+=v)wybOxUF@{t zKby7o1F~Cz;dD|u0t;-|2Yayi0=B0BfEnH=;}^48QYsq zd7V~q4I)%rHrtcc_yX#=WZEY+W^+#g5&ELv10A~Ls*|(W?-gH)SDTZHf2y(6rL{3s zCn62J=z7>whmbv1LWp&fyML&nAzG6rC;$4#&@3KJljG?A)aQa)brti9$bN;zflJ13 zQ7=C~mPn~<_H#Z0^;sLy`8>PGrxk^}K~}TXa1fo~yS28GqP+QO=3>eX5s2{*iD@b( zI`by|Cf5zl13s57)oPM4@YkFBOS<9&LUW1>j=8OR#J!?wu{Cw7ZBsfB`PdN}JO1=tTNq%DDAu1=%B+gzu?p)b+laHG(r1QxKY*fBi zj&|PvBJ4|`p?u%JOA5(e$eI*sAxn&%tYvStOOj+ajGYQe7>O(yJ5gD)8%EZ#jy3x} z7|YoAvAy@GzTe;f{r}$cp3~`c4)Q$D+|PYq*JryPN|3PCt+pNRwJED+0bw|=>N!!P z+^`d55^M6+C3fejSw&m+7tidB*iJTtNytinvOQ_>&FXip^^k~Meyj9jAs!Zaq#bMG@=_2?&E>AXwpP5Or!I*pPsKYHj2S%K9oi?IJKzzN{Yu*24Y;y%NtX7a?zlLFwZA;gd7`G*UU$ta~?w& zN5m-JKAmH7Zs#$M4Ig1q3^b-l4l)s%o7Q}5c~IHurrdCL1K8Dz;BiBc;-}sg^~F$= zu}P$+*etvpHt$n%h75*do@%jb6;RiG4|?T(jigZd70p5Y8P;%Qlos2qjuNI#C&bQH zdwh@Bi^&TH2V<+XFB@ujMxXRwYsOFEjEgKdeCjz4L(w4?SLnSq>3t>-RL+3?&p*Bk zg@^jM^H*NWv&4JG6x_5z`s0};=?7!9OZQ`*k-6#{>vr!?Q$g=e4lj9&eS2UE58#&m zb)}^x!C-NquzA1TG&=3yLX_|d)XQGSj5IbWj^DaU0zalwGNy{V_G9$$V2f&}7?d4D zdQ)ua^u=nP0MO6&9H119Fh^^5+fFrost@(rj$4)qMcFG$sI<5miC9}&=2e|IHCS_) z5Rmec$%o)0Ws_y4Z&Uk@j-TC2Z)f_hhg(N;(Q>1v3ZcfQU{rS7!(;NbilM5MMkn5T zW6h*LR&6v`)J5HMp{Wqo7PmDya2ITVq1RzPdkdVQmNa#PIVc+gCY#H1q+}8k^y8Fn zeSRH%hy5B}SZT2`GxhBbF~m6Y5A1-XBKe&Zl^#2jCAdfun`76$Sdyr>BAi>NeZ|bN z!{}`-r^l?I3!?x!v4mv287?hpI2a)hNv>OZPm|xgc{m*xJB(S=3x9sztR_#>Y=@Jh zR=gF9n2%5*yp$?968*fsPP%;)O4y!VB)_(ZB;T@zmrQK)%luSaooA7O)OcPf824@? z3-4GRGAr#R96hFX{vBf`s(|40Ppig%a_k<;Fg`>AFfiBQeM1m5HDK+vMS|<*4B7m+ zun1vj!y&%%9l%+@f#=ayIegzfJJ++{v4(p0_Q%5E>##d(-UHx7#$*UMM znO~|pKaHoikZCOcDE|BsQZ1tN;``Jc9yguF(P0d=vp4?c%M_wW_WE%?Vjq%t9c)(u zl#d>Qlm_juqO}WV!-s`77CKDBRgNI(T4}C1CcS^6nzOLJj%%0}t%e??x=sr0q8Bq zeMRiJ&(Z`3ez4jBBNo~e0&TbEd1*QZf;Bs8}#FtWl6o7!+3WzU;gwvfzy zAGC(|Mo7_?XZ=0pUm~KMMzmtAjH*c^hk^_v__nXqhjx01Xsa{R=j_yr#O5=$0Rtg&VD!&%nAqK29( zf0iL#(!$v=Y)?tpgk4!T19(qw!&C-B`&vhl{TyXDF0-xOsuN@(Ml+Q@V_Tawhr0o= znV(!iMLHQWcCmHTu_b~jlI4fF6;4>Sq8VfmJ}aSeiior428N?e+=j%F;*b1O^otgU zUXH}PX7Fd9o6Li*ytd%5w|E;~Y2SZ)x382_coiaxDE}1uXaH15At}b~Vq&65_R$*s zx6YO<7EKeyjD!C90wQc;q5R(6{Lx;ozgtbG#wfGI^7}VM{0wv&Yd>DB#(}15y@^VX zl$okO&fv5q>m@6%Im5ZXzQ5`Dg!b3grmG|SgQIf!9=W}r+5ws0& zamTjg7iWjoO2r&6Mdy5`;R3y>y_V!*h}z+_E#eLcEuq<2YpzfGDxjH=)BH3*gY8;Qjp;#?m`UED=avs>sWCXTKf(mV zb7tDL*VrQ~%eZ3T3E)Or*o&?EpSGNr-JjKRad;(ru#L@@nD&D-72A5@Vx9T9Jx%jZ zLbc&RRs0Y7_qSZUI8u&yVz~r*ZR|yD6(UZQ5|ouh6h4lZD6i^`%^Uy+WVFH9lbT*= zMK0~Cxo-Df+u@GvaNbRs1QB;T1+VV$F2C;a1&TcR1f@QI5$`=$g>8MiNTI1Bz4h*k zyR)sbg@+r5hbEhA2Hv)*l5nR8WdEBDs9*Cplbjz9lV}AV979vc(l+%H=n!ljLXd_I zs8jQF1KgU8Zzk*o$5aQ`mb7z%j#v>S-OhXY`I>Vgcj#s#7Q)m-`|%5n4r`~L-;WO1 z=}$?r>j~Z}HkPpm7f=b4So}&+fs#*N67^g)&o^FhsYgihgVV zbTBRvbN5)Z8NzyUa9OU8yj|^{1-J7$D{9Zv`lBDWVSOu$1@<;q{Ikat?$e|#REwwu zTd~I%E`^}z36HAUl+z|-mA9sAT3fqwwl8848GBY|c~r7%(>s^+YO+b**FL7ZNZKf7cf+z$|qVH%wy z?t#;kxkBJ_UC?iX6Y4B5I8{)fPv#B^YC`mR;ACi8!^6P&Va=^Q$6oJkeO4iRRYSqq z-(cTMzduZY?1Kc&(|KyZee=BvE^`!7lE_RHo^|Y+-b28`SCa{Ug=Yq^n7u%wo^AUz z64~1H@M>U||75hc0qB?=g5Kw2H;E}2*^OSG+SZm$1r2jQ5ph2`7Ycj)8C>$k0}e%1 zFF*g(Kl0=Ig0Q|wgFYSI7R;jK7O%oC&4s^MfDc*9XrpQa99^<`{sfXcuQ6(df34)l zkt5+1`2*S4sp7}mj0C(RWa7>~jRIst`d~%=qd7InfykrZC`kHaL@Q;hCKa;4b3}mr z7H1XXN8FUFYt(*+bR^k;5D<_B#4h(wsMG7SBu)RXFeX9n2yW?9?;rdwng7S%+O4GB zE`O2C*PX-=qynkt`v$a+VwG+S@BfWVx^-Kw<`2aFdIb0lv-~xd)|IpV!A2ik0r+V% z%=E6dVEC;|ncn~?hSmvN3wd~eMvN=wD}f_@zmpBLa!x`Ad2h)P*s z+qXtQ;{!C;;U6dU^B29A{b%wQ4@Bd&gW1xY=mu@xrN9tD`59orzWDy z0Z@l5C(Sv>aPhkV5}oYwGeV){61+OG?MX7$+c#syp*;E?z_g41njZfnO)Y8wU;!`h ziSQfszI3B0U#VwZv|9mEqC6s@H83!MC+bkcxK+w3A6NYIF@S$W#K@HtRR=Ei=$H-_ zDl^~Bo4+xqSMVatz>X-+)3>s$__`%0P)?R?RardZ-n>2NL6}XCTfzcqD~3r3h_<)V zvzU$^^dVO#^YR@@&;ax;J*$Xn!yIM)2W#l%yC@V z-CUq7B$$A@uakV4D$Kk&vL8_YGq8-%^TJ;`%Q6mrUu8u6tCzrmF{4H@PB@J#HjwQI zy{Um9s)i${ zVzzS?@8o_l{A8*w_7R^}lI-$u7YCsxiWlwzb12U6tH}7g;1MfG)|&tBtzPLQza}mf zPQgCG<1+s1V(@$Cxm?Q^*10PT^@HElOm4{pw*E?c2R4~HhB5Dz)n)e|C8d6#uBd_%cbrev!~Mcu13A-J{gF)Msru5tYv4YtcA zofAX}qO4BIYrj&PAw|O}SUSz`^_Hr=X=HO#pGJUEm{)>+AXvgSV!_oTz-H9JiMQ-~ zO|?&w(e~%UWke&x(vpRql*4;0GJMnRE? z)Dc28JVvdaL(XR-t>?PG=A8oKTmGIzza}U~9PPc+Y%+kBJy@ad`D}d7hWMWqsc?FA zd+!d8My4%r?v0VbfDbd(<&uEQ*5YuH_r7}&$I3^Gk9>{NqF09uRLSJEmk)w%XrJZh zV|*g+&<`m6iAFoUNh7JYEr!d%_05!~gf=Rvj0m2;mHJa$7Pl=9L#Md(g=`JI&?;Z` zYmF7pxvsg3JaCPYE?KT(Nls59z9n)C^kb!g+c+D;85PbboOAbUz}CG3!EG_|IQjNw z*tmPpLFvYW5}7&jUaby@)s5O&-OJp4V3WMPyO>>^xi#%$wZx~LanvQ zj%HEa!3};%`+B0WSUzBK-s>~$MUaH%U&qx{|%EHef$< zjWePB@}2wl!}ISpZMoy$T)24~td~ZbH-tQ2@PBX!z^I)j{V9xXZ!>95ZY~U4#P-@1 z-IyIRM~xp zRVG1ZXLx#MmgWunW}lMH4DWv3+Ych5cd%Sqzj4HNNmB4!r%y;(d8ta7WUiT@WIk`U zjb`xkEcU#X|FGc3LF#4JD?VhX-QsRl+dwxQFN<0O%K!(5PreABkqCfDc&7xG zWiTAXDFvO*@GZ;eV!8x}OXW1vcp-!NWOLXo2hIyVRSf+`{U&j2KcaH!!c6S0K5ay{ z(c%IK&6B}{C9-nqHRbc)5r9XDePjPEv5%O#=LmO_yLV)-qhXrteHZ%fQzP{mm~*j&FR`{w3$Lc{Z=1L*zJq(~hsGOq8mr>g z#=<>hsHHi>&ri>O@HiV*?Bcb_ZfOD6mIS>`BbV^=9=@Kbk<=^;+Rv?s_6EGAB268 zy#qtpnd}bq22-%X1j?7+tTs(Hr{))(0_b3t!W;N`(futi|S* z;q&$0A)CuIKA{ex7e$`ZcD#>LSZ6eLUs z1ji%h_|H!Qj?pM^eDvqJ`Pk6G|2s8W&|F_%-%x^Zdl=A>y9;3|9D?|UwW)W#pO@Q5 zN}=h4$1B`7ULyBan-?&WO9ioZsQ&HR_&cXo!$Li4XQR7iHoEU`0t{zR<9dqJ!L;2$ zy?wz`D2Om9E4#v47#Ji>f!qMrGfgKr?TV=S(7XGb`#WP~vM-?1E%H--cPRIN?(6{; zdt^4+I_%3nC4b3JkZLo5iZ63nGDfaif*t%+ z&$JS(TQbj_UF0dEVy*Dz74wy$Pmz38B$=VoT5?lNr#kWhHK}Xt%h_q9I5TLg6k2ofwulbU52g6 ztyo7Ia8tt-B3Q+^{z>RB7EtpVqlhc)HHI?O?+JPOhX$PA=`(FBHCLZpwXx!Ah2xk^ zJUggPrVGPOd3+&ujoiNL=N=o5^8}{$E6FjR<2k3E>1kBCJG?ZcBaWANH&9n+8L*k` zrv9?uZEQY;CgET`efSxx|RlJI+Cz+%kfEC1+%=q-FJpZ7$phHpYITW+161L zHfIr4lPC+C z7mdZ+N)A0QsKYoxmaC4NPeYXAta>sXtYoHt_9dBRMHW4;q4tXPjgQdtSlH5S*L;>gTvuxPDg2`ju9>!-^}`~qK(d8Su`g|r`krDkUb ztil&TXfRt)YWVz~Mr-DT<=p%43C?WTknFl`?Vr6WdL!4$#WH+Fu;w@JB34|bvCqm; z8mmF)M6t^U*NYH%BJ*;A7>Ko__#@3@xJiSsYCy&7nenEt|AM$Q6H2I|cZn88diGnG z5rrz3BurV+CYhl$1NCS^{bID#ITo#TD;tPwIKB~k%@&cLdfpgv0F$_1a_x4t(?w0}n+w;zfn}at3-ZedS5DU(ImwEUyjfy6KQ^J5KF0yVq04 zW`1fOdv7sCZ}z)e?8n(~*SB@wbp)-Ax}f%d6(7Ez5nSE;Za3JYWB_$%n|b&6LqLcY zE&%@pwGz)dEO-N}0tb{^UoN2BK2iMNlp9QTwK3Ey8H<*6c!>;N_TM%Wke+7SV)Lc{ zHgqVtiMO}8e`+Uv_u%W8t>fdHt8dH>)R=r4f3;$kRUbW#5dPKgCS%ok-OL)&BZxU% zzJRghz!|1rzxRGcJM_056WE4>=PSJ7yRKz879t{Zp>tVNjXmS*<{K2xbkOk7oU z3$%(Vy8oUTDzbFFtvE%ab{=`(ryFR~XH#+wT31_*d#uS>2b1ShN=`e#uo3X)pgG+X zrLN2sU6Um4P!R{e!n1I@o=2WgW=YV;t97!3(m-cqu_cRT^F#b@Y*P{G*(C%$nq>4T z(;Mo8wH0UegJ2e|^Ya)Qy~mUD5l119i^S|>$?m2H_2RXXPp{HAgFK(29_cLDi^C+u zaR(>gx70s2i(qS#=NwO&iqAcBIc7qo5D$a~hOi`Zi;g@s1h?3B8ApN)@2Bx_o7L#+ zT(w=7eCQjfH{1{Xq5~Nd%As-FFIi1~1(`LF`sGxZV(XTi-sNj`tu)GDrZ2I=#BzgP z8O`FKsQzf~6?O~iiFLlGDy?20Er(k2T7t@c^sQm_a`9`t8V+fqSLTJz(1KS+OJ>%Q z(!^uR-QoEq^j2*lI*!HikOUDlZd&1>?`#5&6U3EN;>RavXbbr&RmHetal#BQFu~Y zb_v7tt=Yx(e!Md^7qzV@kDCYBNI2VM4?WG+Xy+vdR(3jQd8KCe3J0fKV}}LfgHM!B zepGWDP1gK2I{%d>$z(mOglWX&j`|GrYfpIrdNOzv% zR^$~0i58b~SBHxZ`>?PiTLH9vNSYXT35ME+aawl*!bwXyO0_zWtH z9Y>Mp$KV#jha3zZM;M@KD#!fVid#QEr>}pu06Izg>29>%Tfhp3sjXE%#=i(>Ka-kI9xgm* zekORThVvkIMJ7;l?&F+ka73J-nKC|)|IXCJI`C}@jTlS{Dx;DcQsLZ2Qo=l?p0C2v z?7F**8YkqooI^p<$ad`JtUF}|LmcPHmq3?mqQ87ua`#O>q2W4lf}FJj607xKSvpLLj-Qh6}nc_Xi#dIwvrOdw($m4 zn%5t9OD5A?H)`5JRlwUSZ+GP$+{VpmB?VT6rwkSn=4}<}>~rFa_&8SX$Tpg;E?xIa zUi#grUc$ZZ{gRp23S>i?Ft~G7Q#v4eB-~2z zZ=}PO6R&^zq=*>`??9}s_B_hW6RpcE*2SGUFIC&wV<5e770vr2d!og0?rFN4pzM#? z-iHx4l*&+|H)&hS{7LSUV`tg`0iw?sws6%>4fQn-CaN4$E5 zx~+ZnPgMw)s?Bg}Iw5AsS4f4idyYH`M6Pk=T(;P#XIJ#~m2!RxMI=4+Hc#uDqWdM4mF+3vpV(p-!4 zc{c&J@gH-&->n~Bsa^XKdS^mvn(Vgty_1cj9U;DK`x;xL&mf^*ClOh2j=*Uh4tB@U z`&;Lo&4QqWX)-&^uh(#?0p3uo;S~kd&#+|C)NoRdCLNip5n5NV8^M7GU3NL#shK&= zTMW|Zde^}h$ZmO~KE;joAR<-Sed2hC4nXbilKahH!zt1z^xP-6`kuP+XdczmGM}|b z|0HA`gz&OhU5CS}zGC*oTaQNdIiJ%&!|P2g2bU%`F} z21+!`b(E*!{-jxe^i*GoQWgpSKgH1}D6QMa}bq2I< z+6;zheYk;@Ma)3D=q7gRqH~A6cNgk=Py2vW$36xvDN_JiheDC{OQ>DjD0DB?5^_m~ zTZ6sBG2)Pzh5;FgkyC0;NwFoBY|=sX_)(C{ey*-b$aBa<_o6m$B?HLO`cXhQ9|pf^ z5@oik2yzkb!*A?PjIAe-ObEjV<8*uWC;QBo~x_7HHxdTqkCTji(Z^3c+{PH43Tz2 zNjXQ{-F|**;|u#ZeQ(bSnse!(9t=;RkBYnFvb}6tLl?5`LCfV&L3!s-SUE3V_XAh( zxUdN!SR+p6T;aH4wE9M$Q|!s=m+2g>mVfGf zeOHfaX1+a2dEqN1`~1}=f>h6g9V^j+WDV;pXm_fB%MMNV$ehFKBPD1@`3bkH_arON zI4cdNQ9JcP5{h|H&AhW9mm7y(6+n8Z64mPS*l928bU8JJ)SQ zBm3=v5yy;j3vmo+S9m>3LiA;5C0U z_4hlk(gal0WM>7C^nE8sqr7G<2kRu-E$wF7lj?U~<2#Ev_OPti@MUs)((gW8{f|$Z zC!Nhw;SgscI;t7#!J-3 zLvoJEPwBy*>2(WW5S{xZ+w8HA4`^Sv?}VHf7RPu=ImJt+yQxHX@j-soL_iUowviHF z)Ba)svev9()&=*UCp`7^`uavWB^wa6>s{V5jiIL#me&HkW02Y(=Vp2LmELWo-h&B8 zB*(|ECq3zZCl73B3joMRzhl*Rc2=QB$BTtzmk=6b3@d9|y!?lFNp{c1 z0wifXKg&!p2yt7lx_ZRD-W-A=7_4zsn}H~zNo{7r-PEQ%rY zKbahXyIgX#0TU-_cv$Vz(&M+)qufWQg40VHacLX>?9}&u& z=dCvz^A{rS7f4DgCLby#UZhz5q~{$y+alcN6kZ)IKOCYj8n|eEql05{j{;uthgyptf_h?D^ogme>9?)i|A)H86Hj?(?sTu;U;N4MU#5(E{lV2%_X z2a)#lq_6XY_cwY?#NcYtAV@k6G-jBuGByJuM=n3FEskhGv$w7B(t`j{m5LR!i{I7o zClc%+bbZ)2R@s?pxY#a%^=Aw&-ctB^o4o@pebl+xs6Ol=n@39A40X`IkN2*8V(olZ zK1QNBt2i3_>3RIH3-sZJ3I6+n^i3~<6K=i)MCu1uD%sLy<`gin(-VVw{!HdlduW22 z|=%)Y$=rnIDa^I1S6QMSPdWyvZm=;`)IbQwLd4Ow>1 zG^rffHkiyXIAGja`z_-63Vfg(3I$Iej6Vx$G^AyV%RMR2tRJ5^f7Uv#_6*UQSKs^D zHDl~&xV4BNtuM-zW;v+`&{iRpjnT5d%I>X%#O&{GAq9Q#!kqc?zGS7_*~KpXOPqTZ zL2DZ=7sw3_|6`tv_B|4FUAv9Cy?ZvB)<)dkzuIU_0(u@_u8{mD1a5>BxEy(3h5feU zCYF=-g}{ZqTDApYnSXHLDp!k~PcjTAoKK$=Fmx)Zo(wFiBa+oys zW>_u>LPIv+bk#{~P6!p^u@1ZYc-_Kpy z#AJ6V0*jN}KHZqQ9EL)%%!BiNq}qFrA4D22E_62+cm!ugGXzttF?!IOf*82Z*dkf* z!5TK%Z$ySg0vjFm);k17D511j^xGN;R1Jp+#lZZXtG8wcFtSlm-EamqX;>G zQiaug66*sVzlf}Vy?{cefC?G-McIjCJ$~j#;BL@+MJi*xRdtLE*^oqQ9SeSj+lr^s z`{@lO30}+v&sjsOBy{~?I5l*m5?nevU4y%9Yb9@eS{36_AlGk)IR3}A5LvoN%ZnIk zECQN}El_2UOD8&rZ~e~0tga2d!;-{s+C_6h;*I@jCb}ps=tz!Yajphb`O}UT!AzQdRKT7_FpBPbraE8zc%0n#K5wiNsuclza4FUtL=EqmAD= zg%PihwduzrRfmMn#o0xIQEgEm*!)`+ruu6@kAWc{(IonaD5N_uj%Mt94!ni)U*mGn z0aFZGXj&*BjA^ZlG01Ye+1SEIFGgEp#zW(9c8 zSaXIqBD03~Bef*RBt^xQjQxyy@RQ2#!dt2BU>{62M!pa6+rd0ixM{?FdUiCmVyt96 zZ7`Ota45_#Ma9!ABJi}k3EcfFrw1tBiVE_^-k(&CFIGwr@Yr0+^YNNAv6=mx#&1>k z`+sJXKI-iA`Pkz)Mu9y$&XQ#zQj2 zMXM=Tmieh##Of>DUZZZ{z3YSau&Az%@44zx67B!`qhH3$Ret6)y+w7?=eDwhP?8x5 zj>j=cL7M69j{2NonegIUb-e|3mn-WRNxT%~vUunquFHwdgrAo=bG`TBGH;DNc5V%p z1F@?-QP$31869PMLV3EyA+ZwWl~;kX$s5<+I2?z?Uk2? zkX=M|0j6L2uw`enyEENT5-_(*KVr-COi~6d;WwzECMx<6n;{WmdQp#WR^apQLI<>R ztc+<{eiemGfqBNbUwS1|3+iXQ>r@~=7Wy_$0Mc`QzPgvG%v*SR$D-9mr71NeJ->wH zFL&!jvSW4WNNE+JG?5CWpGHdcJc)DYb-)*nI1(Ss@1lO=cA`nenS#0c#@ZIG?sowU zVw5?L0CV#t(1Sf(GPNe^YuWMdHQ;+Uyn0&i8b0W0N)diMnvEF0A0`Py6b~OJF(oWlybS_VFoQT5Y%onUEaI~pBEWk_TazB2)n#*7_?O-v`qGK^InZ!H2t`mz8 z3YMzCTbfyjqD$-*17!y;wK!_pBU)?M%qKsDO*fb0y0=Q=rXZF5=}E^!Flp~S z+;)F54i1!YJtilUpmxr=K*O4Vq3PFX)w#~LB;-B0jX3d{y?5lRy6QlbQKeUfWT&4a zIkokw{S)Y=(-Le0yEF?b3x5AF6%!Fh_MjoUs}b@CvnU@SpAolcqxD0`2ps|Oh#4&@ z*=C{fc?vd&)kVu3*7!ow-4M{YAF5x`3UPnMzgNhP_h@IP?UlX1^(dy-V2b(e+@Hb; zbO9$p#j!;)X?u%~@G-i3hk||lOJS@v_Hzhqa+wbMf)uC#);g0?jLQ5C{K2{2q?HJQ zF!mc}x@Yg(toLk9?gv5PM31d3-`iwi(~u_ z?4{J4&}Gf4ZdH-PE8PL-(b?@xb3Ac8fc7pv&tSo9Hl@pC4J-gDS_pp`pr5PnUx19X z=H9I72Ts3|#H7)ieFf0)!8k3w908crzd><&6kuxBqw;Tm=efQNliLRYi(^+MEFk;q z;mDgPyEd6Yq(_d1gu};>IEfyjiqfAaiL#;wp)ibpA$5j$?gPd4>Enp$^(h2p8<+j+ zL^Tr?nIyXWf@XhfRE^5OLYjt8krFFMaWSlX;wX3No7Z1#*k0Lgxb#*rD}sFzugz-* zj9i;Pg=tEJYx-}XfFlIU%=p(2!XjJNs3R~F%NjeZhXuQYvn5BsI(LUwep)g;c5X**_BN*Fv;2om@-CqBnfTaVu<~R6x<9W0xc7o z|Eq($L#io7VbNnwCdJm;K@;i}oO6QRtw0O-u(W}YZ9&mSHJ!rziW)n)c^BS`V?XI| zeQ=(fPfPmU60f_rcK4w2VCs=cB3&!BL+w3pBK_}!BaJ(8y&?ZE$((yj@Kb@MBO)Q|HwC z_PMfAWN%A!YIkWS1j@vIHLcUIfprQ1un}Jur8WijN@E zeRd?^_LUCDJ1gxi`w08Pv&RvuuSEnd9Lp>0(c|iW6_M)~2sVhc^Q|@DYmUs8> zjv(FtoK5-$W0VRwx!S3SWvJzrN`lIoOg!VG-`PyBeev#NT6jaW#Xkc*E`#bbHC2e5 z)H}d!MsABpT8Uc_%8GmXzdQtiDd;Lq4r!Mn(-=dgRFQ)~u!n@VOlU1C^4c?)QXC!m6U|l2-Q3$PE zix=HmbJgt&S7~bU!s`dN`LJ1eNgWFlmw>y8p)Bypi`b--G2ktECznGz_Wg;7h!y5K zHrQz*w#V+rf6Tq(&pw(tjqJ1JJG0mIc zE%ZA4vcPo*E@X%A1$&48yjMF0^6FcRR(!=@PGj}$Ha5>`dl26q?G@5)dAbIWyeS=! z9LfzQC{y|+-ApnQ3ZS)c4ERvQoy7IVXy%zpXh^dHljbp~1KWcn{_9Cc9^k}YifuEa z7ZCT%W-Ecpw@v$B{DNlw4d7NMeUQaH)~o23G?|ky^!cixL8hwCg zMaf0vL;Y)xw+MzWwV}E^b9GZ@($|voZ&Q!-0bu&EET`8rpD?twa%V1< ze+1zF^0XuAhIjv7Gx6~~KdNmRF-q0B59YIkLgjw_lRmW01xyyZwBFWT8wASy zftuIlcs2F<&5*94AZTVd)BCO+p^iV@L?R72KpmkY4yghj&N1^usWj!rJsr~S^MAKB z80E-`noA zo?;D1{hsFq&yCIb{VoMZjKhjH6Y1-d^7h>vlEgODi2kSBFbGRmby-pX`;P;S7$0!; z?kk%-Z;3bEC$M`BWf~Rs0Q2eq@%T|}zGos5cNWdB$wIg_FpK3cC}kM2;)P57A|CK{ zpJp|N@qGCUWokM3c{S+Bihg2V(A$Ui+-)JiGJh?oD3Xw50!B!s)L@ z?2$F{5br@&TzI=UN8A`FzR`OD03mUnI30DUbc0xJ@DO8TuzL=EyFZxR=dqTif-uXX z$&M%ni+I)U#`nFCnahr!IK>9dslAY8A_GLsF~ZY^tiwX%(btKyfpk*duUW*|F{o%< zADmevV2w7%s>zxkRbC~kM1Vbt{djz73mNSpTPuy1)ed1#1h!LF;zW#|;|1U)0FQy| zd~TQrK;Vn0=p0{j6hFVwpZvL%|CK-IPd|eg6X$i)FTah~bJ7(f846kN3wPN#$D zt<;}p`jO}oMh{MYAcr>@9x$@&9~idVNrmQ`h2Ni4>NEEONDcFCgmtCw-nT8|sr6o; z33mk*jyE+d9+Lw!CRPfksO1-62!RsM*Vh!{bK1<6joWt}yVdsl?)NO|r-z(;eEsHX zxlQVOv*d_a=pNf(o&{RaZRa#WEv{gEZ6w0nQV}s1@-V%zx;qyJR;0M?+3T zU5P-4F3CM$7kWU9N4vJslwopo9@LTU*{;tDr^kZLZ}^)tY-WrrYP0OenXouB)WlDf zAO4W2!;LVX!ZfSMGLi*QCi@o~+ppk8Tru`Pv^b>uzl6j-&mz;eS?DQLj=B6jl3CN` zj01c(MXP9O#MWlPCIh;9&^5X=G4i%dd+aAqs`Nlrm1fz#JyM5eMWm`!s}y*yrCKx9 zIF2;23(4u6>z@Rv{tY5=#IC_mB#`#_B{A%HAJbe_{rnnQAs%emiS-mOT>tF%M_~R% zZC~P{QWf-#3oPLE;8%CI5U*TPwInU|vwt|U&)+_Qq%Ma>G)|iG4zyq~1u-d@f<}c= z@dW9Y3g-p11Uv>7CQtx?9CEmR@Pc90;X2luCtiR%AXvm?S~?wtUbv3lX{C07OUP%( z`#58&G~Jf0d7t4#C7#Kx9jl2H;*{Jn%RVF7-)JS>_mX_&lBvsfX+EaVB1(FmZS)Ro z#~m?j6EGMI;y*ybf(-Ko_QK&`WzD%kbM{4Iw?vd4mI9Tdqw>+{-lN`_SYZ&03{D2^ zwTs$kz{eJpblKqdXQ@tF&b_~V9*x64MwNEfsd>prd_s9W39_*nmg}aVc4C_Yne`xC zmpNoVVYr)Zw0edAxBBiV@Zx`~?~2@nait*fVt8eS-3YEX-tY-+?A{{2D1NH$9JvXk z3sJ=9yVvD)7%_N`^EuOA!tgjZS*QqN8G_x()A8ee)_K|v7+Dsi5^h;W4%&p8vG4BD z&`cnv9Wc&K7p2*0oK=6fBtmo_z*MP!Zck2tm;oLo_x{A5Vc~#?6~mVl)L2n4*U>=Lnah)8WvHx zS<|cE{EG3TxHv8qI<&ZoNEWUZ6hx?K{zsaFp8XDq%h3D#&s@i^uppKo<8+8Ua+R#J zS5cldjM&eVS82$+XxSR5*>b@NaSdJhU)#@tNp%OfzA3MmN}=_C>poNN+ZzfP7Tqj9f%sRK zL=@sKMn_NAKQG35=r-r_w_-mS@AW+0!ltneuFg!Tgm7-9#>Ip`XR)*{JA3s$iOK;{ zhs5s&cTPHAj}A=`qK-~P9?tGkINm;-Nna|)o$$>-6{ExF6Nz2K(Ve^``3Bg7BYHtG zF&hvv#=TJ{zDJWkI~kVYU~EA8^as>BmvYH zGJRPe44!p|+g7>Z*B}P$()6t^hM#hdPbZGx_DcnJxpbHa)*7&FV&LwrGmuw zTn6^W;!w3NPJ&it%1%KmGwrC>>et@EPAx(Zm9sXJsE+gwXcYfk+3)N7bCvoOpw_an zv5Dk4Lo|nf>d3YwkSQDqe(a{bbF{Hh;TP z^-^I%B-0IFBk9w)qI@&yBUd|x3i_z)TipIHU7wT(pshPBEJYSoSU{Ly9&xE3JR~!aFwJRWjN){>=-GBhD&i zYRhBh@B3|yN0LG${NqO7Rf;j(0|Vaq>iIE|!kx1<>oHx8uphR+BwSLPUmZU|pKyzr zagKGTZ{zK6=3Q(+-HN+R0l_||V;bdZs8|0Jz6??jRa|1wpFU-$lRh}&s0i`L+RUa; zkhir%Rw{yl%r!R%Xc34JzDJX0wa*aej-+~+Jhw!oYe!d!P%aWu`q~iE3)aIo=}(jr&aoAFaHGC!rzBhgCa;5QICH1d#2O} z|6?ZXXxkfnd3qFnzEU2h#1Ro8|KOGpbt2uKYn2GY_D zseni*iXbtB2uO=`H%NnuN;jx9(%s$CJv0nR4mI$t(f4`dobUYLA3$ODp0)3Fuj{^I zqv{WAM2A(+=CQa-X|JbKT`b11?njS5wRVwib?&_@B+Sp!^$!+cIFB1byC#`}@}9*h z+QFKED({BPYc~r7nW}C3NKTwDufM_ng*tlBn9{ zi8+d7QCrY{;4xx?%0vFnAGgkb3Wg|VTTZY@P5N``&oMQk<77=Ux4r7O!)Yg)b86_t zCozRK5LfW+YpvK{p91w-TxYypnQaSYeSstuiS%zyf*b#Mrv^(WQsjghc1P>e#A*9s z;8CAF%HON$YG1`}kBwLT+SILy@ihA|4XcBBkSU?MR!ImxAxa}(nt!JTCMUj`C|0u0$gi#xslvV#ttWCH2o ziP12-4e{0V0{uEVO2qlW)AG$b+Fbf^V2{_8*iJTG9P;tkPjk{?gg)-#;>9A`h3xw~)q7s)4D zYE?G|;NX!cdbADDkYI9qXRg*|!=@1Nm>fzdRj#NXrN_Kfvg(wlFp$i9nlulnzCA)B)Yg z!(O7cqWXWyxAiPz4`zdpJgW6yyf0L*UAjCfum6crY!i~uipx%K5*9VrQK8?D?xKK3 z+!3%v6rNv2Y(1VU@(9}Pxnri=z-+Dx@fbb5lU7wB;`q0M4k{?kQK)aSs#{@fH~!qT zHm3iX?}nnfshc@4*}Qs@#}-&|z;q+(AKZP76wA>}-MVRsaI|1=;XS~69sIbI9}-n<6ln?Fk4KCu0!mJFK#evsDRU$oD`y$-*_4|8U8 zjiU)vcR=Xu=-2pow#ocKKI67=V7Ii@4ZkSfWmyJlqCt}_)T~>_v|6NEST`dQjwej&sL=oW=ZUP|U*TZVs~x96uSLq_LVD|p#=6~bn~KCu%$uxt z`t5su*Y(y^Y>ozqASrd79R@NXi2iNmsodq3VtjaW-m6HVL!Ru|H^ zLxi_p{MklQo`HhJb6x2d8>ZoLcLX)HVUmwks*8`l2Sc!r%D({?Yv;S zXc$DTICMkj<;#Th6^w=H7RimfdT58Z%lG}C>umhzBD)tp9u;D=h4J+SPnf1&@ zgnq;BW0(n(%My9@$x3|nR{kp#dmmL-6fhYcJR)wzzee{;c;;Q~6Q*p6$nrDrVeJ<6CuI3Eu|{=twN{wVLU)e#<$Um6{wM5bg&a7 zb)HHdGh1dwFQZ!6ob*;?FYcbsN`yj+N1G2zgy0q}kD~~}Zqm(ELI}gQn&oVmV{NaP zm(MyB#OL!`6=|?zss~nRf%hV;6E;MaZeLczq{u9!Lu?F&iJWIXDP)&53j^x~q9h;UqiBwM%ph!Ely*I+FcX z^=Md4Y6cXCWU8shZP{&ztGa{L8%PLy-3yOo*iwm?(}N9sXLw zbU-Ovm?mU8`nC*Y+`nUx+u}u@gVl?!h<9UO_-p31+RT$Q@Auhwer}lp4!A>G7dIcr z09%-G#-d`tt8b=ODh7L$cxUak_8MDuVp)29wO~I6U_*Hj;CUkU^5x5Ed|Y#r5158W zA6q2##FngGP}Pp1MWhPxa;DfsxlSzZNOc7hq}>+5XyN`_;bw$2!-7bQ6fn2078o8} z&@$ze5F$Vul#A}>9J{88L9-brjZq>ewg?Vag9=dHUkVw2)`Qt3DR{#h`wq+#JW(ru!0jkyJSOGuNeIz4ND8ZFU0uny1u`rnqQ|i6r6g~uXYr4i* zvU4z;vClCgc;WaukI3ha&5*+VBx6&Nmp-nd#hK!V3pr^uG>(<~&7L#4$|m5>KT`0v zd0+UQ0iUnB#*Vf9SH%9*X1X22GQ<4`E<9Uw?Vsx17?wmf$Q_GI(iyWQtPK5*85dg8 zW#`h?&Cc`6XWv%0d&FZT-5#jwx!!ffVa?LVMY|bCC%oPl3ZhgxPJBl|OH_kK&RyhD zF4~#6yx!e<-yQo8l>FztM-e^?bV^6H=TZMPo_{3YAU@Qt#+3Zsu6gnrx4u&KAPPEb z(;P@@kACFZ7!o1Ch%q1gk;J$r(zn`|xcR;>UbugJH(U-sn@YSAV>ge`qH=UGIsgXN z^vi`Dy~odX~OE2ZmqMm*>LI&&m8A_S3XSl(gG@qRiHxY-j|TGWAmfB|3?7LBCJNnMI~2 z%(r(O&O2`VGJ2#)ZVhCj=Whv_H+??#(SSqIse1i4>$%V@$rsD3ANj+FC03=$64%Wg z$QXr2{{LzYpyj#!_!Wa$BO_E;QnWjY$-L&iFSI$thEQcYKeH}W+C*nH=b@_X7##}s z%GHvIO!R;p*#t0s+K4c3#OkeVNw}vv8w(QLMe;;$nha;Dv{|d)S*FD~?H;@h%y%+t z_qy&{k4d;ll6$6*DO><~vBj94Q(`UH4XlGCNYOtI-`0ttv)~6jVp%^NI>7stB7tig zxQ~TP^kyV5zJxyUDR8QIxPvrf>;K(xeVTJIHQlAwRPj3^~1IWH1||G#BU zN#Dhnl;K#ENy_f^_ydM-dccU~*|Hy_%)P$Y(WZ0`6d9AYH)H#QoG{OtxL7N6MmF+A zfxcA&&vv9%$V#-DOP8VFJu$!@fcJ^dr6Y}kWDse^q_|B1g|U z{Nd#v3A8%bzYpJ2|L?=MX}jFlD`J10@Q@q(67HDT+dt>!0X=Mn>>)7T!py(_Vh5x; z{z9B8iSi?zD~ItjF6)3uxc zAMJXGs~drOLvvAC>2D6rfDW!m+f`FnKICo~ox}94DOXzyzE&fvKi3okqJ0gX){Pmj zf@#SA5t4(vSlc%nYaedKKYb~7*s2rMza}X!{;;0-0*_IJiD7L zJb%Kw3#mxv+t7MyjtYiQC7nkOPT@tAZ7YFghBM;gL-t)nVTA%76Llt|d=<@o z5@(OaFZEW%vrkLShadl8evi6IBiCv*lXra91+uD5`>yWPHS~#{ag`Xp?mprMhxF^m z9Y1opPb!sFu08StkmxOX9}r4kQRSDiS7*K{mIcn=KKdL0fGAkhG`_DOjYHuh#tCsM z?an%kXZZMNHhNo(w$AoqE2~W9Xk+$;mEM$?1$5FHr;V)8hZ65m%b(yIOfXLhh_t^j zJ_WB~QqIDvbC#oQx8Y%82oo&M4RHywN{${1yupNAJ|qB{gtY=OywtdFbAo6A^<(57 zxY?UWLl*NGY+^tF`@G{;b=B$Nrm1mTc<1vn&qG(50z8GPO|#YR*(1OuU$=X0IbPq$ z4m=J4sPprPX~6l;s$_3Tpebp8QeS(NuRd!bm|*I^QR>C)6WBf;XGehLE1Da?XHEkC z#tbL8UerV`VAX&;N_C6t53g{F>Cpj&*urDZI)`CRIVi4-D#p|f^Md|-UY7<5@rbE+ zAmTLNdp3s{eD|ZOLHDP7Z4n2|Ya3wor3_W;6QhS|<$(qQtkdYY2Z?{yX%}aM><9gb zc;1KxrTdJVi()_ifjn*zV?BS_i3_@1gUK)Yoi7wUD1GXHslE{)%v;;5gJ^hOf+cxj zG^LN&O(1mJzpeAN;nttp(d%Kf?s8lRU`I#t?P2WNp`7EW)9 zXoG$Pr=NIvk^`ok3zYP6_FUejDB#5w4MMzZZf5W|$;SMOP~s>!e}Vs7))467uBtHu&C=U)M}r~IhqWVgJ+yVWTM8i zg!p^vWq$g{-86l$QvZG)u=)UYlW~|{){n~ZFLgk~tyAmzGwVN0@?P^Kcp2&%3D5cwMn}l&9@e6zYyxtw7cZ6&5*y-dN#l^>)G0M-5OiE!l;$# zk4EMfipfAh{>wHu7$>@sHsyzrr$4N4J()Zn?bXps2+960(g`T!AFf33`MVG|^?C4Q zoXM_{4bfPcT9h<;oE7R@R#Ge0hLsPscm9;_8dSS1hh77wD^WzX;ki2Csj7XP0_Q2u zULhhUU|bQ&wv7-U=2ek+mn@Diwwoi8UFAS4X32HFD)-Lt>E)jQn@&POL80-FFW~0F z=aKYbI*@CnjBiS?aPTV_^<^4*i9ssdoiOjJKd1rt{Vd?-auHw)+cauZJ0zVrPrg8S zFqHw`6SZ+%t5E1QbhO_2i{sFR81O}SzQXy>Tc4DvN3|{YrZOKttKeZ48T6Z9LpS}A zYHVX3bR`=f<^u(Xcb@}NcX>$c-Xpja6cU_i8vO$F2DG`uG^liByDHWHa5(@0hAo2L z5PoU{FSeia$iGG}+H>yl+Mrun=l7+K0p(3KUi5!pBA++R5DZOY9^~Zq*d{DIX8RyG z@@sqlQO)dz#!ekRdJcxdW+#Z4g+9K-Y$hqO()E4nGMVHAve~ZLu^?gk7FmHN4X5if znn|%$7qd{0%xem046%w48(Os}C-mkZXB_JqRCGH3IP~sx+TBsWlbsiV_^9l zLg3o~L}`5o%p!DTzUk%(=H?W*ndQX3O%8Z6hV-Y5xW#^Qb+|H)+%oFpLIgxp13}DD zj0}4m1jY7*zD;G|yMZuVnHSZ}BB!XGec+1s=jwcJ`3&5rSGhB5ihGX!!uugdBcZ}6 zMd!1r?tS(!H%TFGo{^J*by|p+Y>3An4D(_y&aqE)^;OEC_)ScH($=5?^k?->-Bc$q zpZsWjSR&m1Fd5_G^9grgBv%8OU%g%Y^w*0R)!Z0)>@A^W*Ts<*cmA7tDKpOrt5!M z%_3t(*YXa$u9G)}^hDkV&ttFSujemBE0;g%d^5w{cvDMGD`5CIizPCV_vq4ON8-xX zoo~XPHz!6^Ca8v#WGR?ao!A)Hu_?YbK|kEe@n-dX_pCuS`lf-5xr}`DYv~MS;Mw(T zh^3Y@e(BOPHnF|8x3=Kb+;-gz8hdr75+rgboV?!lu&EbyvX*guN`WmaO+bLf!i?+h z4T1do3%dK*y=^Y!>&s#zH-@Gu5nqg%yR$undb3m$-rRlhqxAY+&0|5A4+X`)+%kfU zD;>YX(5xHo2Wx|{$#}NaRJ3t~Xd5s$ZNPz@iZ4-KQ++2Q`$14%*n~lp6Dkww?!RWa z2N_yokqPcY|(N7>&s=$c*qoaPSxI3&B zcaMEJvSElrxqD8@<<@^L($6zgB8CrmfCE!bopouLkSL@@Q)UPgISmMeO82D%a!Po>G63q}5P{(gX~bM6PzAKf|%% zb@#pUQu}_%v9WBB&07*#K-4%}?PLQ#>i4=Brlo|ZM8+^T$m%#a{A5{}?IA}n>??8C zu4`mzB)qt(mXLr}l@W!uth z-o{JzlJ`lpX5LF}?%VP~r<-XCwSwlVM|WO6*J8y=Vi}?7(=kJemz9qqUlxpZF8#Eg znp~C1%Kp_Ivo~6EVO7#A1nW8h+D#Or6bOY!wos}atYfs9!1r!_c%efnl1b8X^oRBl zIOX5S*b(TLFpjFlXR!5_`DCvOTVAvJ`AhRzH<1Z|{}a#v02Z3eW-h%1!RpRlW`5^J z{*KgKS_b23*UlS~Z{3ELj}^eB(vwkhUiCa_9vvI=as6EL#tTgW)ISIFbcCTurf(~6 zg6_R}7)EOtwYAp&na|IU#@4{jkN+ z{h6lb%js9S%EST8{S)=dQ0k|6Mp^XfMXr=WPT$3-&`b3aoA2F9H;40R1eFG)OPPB@p_V9-a3s{5ILLGbRaSRbwKa1!z)jy3`F;N2v&>&u;s z+PpNAfTB4TaP7oZ3l*PgUw^`y=98 z3s~>=C!`!Ll%3*Y!epkyl8?-UUocuCC_Q{rA{CZFnVkaZCu;SQ*QSba8QgKckZolB zY@XeSI+*uWjokpNTd)Ed4F$MWF{nXQcdX+1ny-7^fe zH|;+(-&TP=$%5)q>MH+9fLu*<;tq-9!n0HP)S}g+7)pI_i+=7nFyKL@mfh?yTZBl{ z+TA8MR^b2psa(~Hs-jMNjaY5PAQbkS>Kcl>&z?}k*OWU^-4Tp3Cp}Ozre46=@qE#= z1Iqs@xIew0rGO8b{&{Ir=b3O`|2+2VaUwAt%c1}zz%my~`y!QKBV~hC37bk6`NXe# ztlDn9fLH2&p2Fld|8YW*b>>FPc8|8sxt9^ItHJ0Up7eP{oG~J(Uj;Eb<~V3f&mHHr zpcM1MubX+7Rb;>ADttfkuN96(u09M_xS$7Sd7seAg;hWAvzb}!(KZQsv^iKFbhtG& zoZacCSpM+U_dFmZ!AaU`F2i8shZN7E}L^*xx&DAxwL3c%qzD|V! zIjN+6A)Y0+((99}9ik^s2oH;BRMP2Dv5w>fw+Xg#7>Aag1V**tAy>Z1NxDD8p8?yz z@ET{GI-ZPc+YQrWE0eU7bDu|&?}jED@!cDR6cqO>!qpwSZf9#12B|2WutI|0ZOb%v z-=@qyepMN}#%0Mj=KS{6Y>mr>9?tA8KiYc@3LNb*NdC_P&%^JVTO3pTAn*;Yy?WaE z?0l=F|J-*OM9G)@8ijj#Oac5e{9ab^7Y;>RwG-gpX;nid`o?-|yyz&>>%5uC^LW9` zF;+1y&1N1OHcphnTkpJaCt>W6uQTXrsrCzd$yOy#7npZ(8smLg9OtOzK4KZWE*L?4R0ZZR>lQ;QnQ zU>PP)j$`R%=~Ieb!tX<21%Y}VcF(0m`VhllY)Tl(G>!Uu&#N{;6+tdtdNgz5ueb2AzlFE(FtsA|> z>^qpj)hxh;e|GhWZE~G~3T+Z^S={;3WH1cmKn`P0yohxQ_v3j#vb0u7Gq$ zvYKEYVHjD2nHHiYuu3rYyi{&A!Sl_KbgDfQUlbqW}>>6cC5 zr+;b6Wt-9@I^}HzZYPsdU#A+yiq4R`2uN5$FSbfuo~YG(H`b^QtL$NMRan!CC-ENx zFg;|Twx2nE`%(TCxWQK0Zwa8ML&F%S5X z7>KRfBk8S5qoFjb3A?!X#$YU}W6VcqSvgzS{qto{EqmvjAcN4ft(@}LdOLWtOB}b} zIr@Vgv057YNjI&)PA-j#z;?%s(;RXYf{?jf<3urTKWKfpY@eJCQ>(@V0bBsL_sJLz z<**u%kGkkX>{@v66@bOmlmu_ZeX00`}RkQk8 zRpsh1ai?1y(Z}3E9w+K2fyy}f)}tMMT=Q7_X1iC~X6u0(!3Zfh3-U*ca&xr`JcZtohWC9S#sd8b~ToxSAS=y`pke)8J z80)LsPuy`HiZ@dYRv?f)fFzC2qp`F}xy<8bK99>nGSTbMB2H9j{>7`t zZ63Im2`IWulD4R3nT20YToA~bh4*}o1@z z6HW)Tqj3OU=(DB}w|3DhJSQ;)djUS4WiLj|JtD`42II@GGFON&E&EBGkr!2fq( zM?i}Iu~9b{!Q{e?G$E@ngpeqRi_YFXz$J#w^MVAX;(NI^=2dmFO$tw`F3qzOUt6|b zeGTzW;CyzdZN6`ansGuA<8!eW_KxF_Nf?+{DZva9?5co|$@wC#Kfj9u3*>v$#IZZb zIics7SV`)1pLdPyKg2B2h#ME2y^Opj@d99lGHuhFqtmRYzXIEs*AgF(kTzaZz-)|D zd@aRq3uI$|=Yv+vij?=%%kFR}U1TjtpSAYDw;gKm1F!l6TecA%`9*k4{f)S>)B5M% zH9b=!-Eh>D3Y{U_oR-b4wK-HzPHL_&ZMhw8ET^NLrtK6ghO@%$uGxjZom-hSVQ4hC zr(nU)Jb%KyaM9~TFeU^9lu{Stv{KYm&e%akxw*xoK4&odg~cA(+iDxqrIe!m2}X5= z>Q$N?CjWD){%{qWb*S5j_Dw=Z)avnT(0pKFPN15 zfeR?GU^gVj4X(29JE;>ZKV4IMbbw{hR9`xVqFO$bDBm9Xu?xHCjt2wGg%?l=g|Ivca4QOkbfXPx(h z$rBz92DvXMVnscaw=KR#anjlW@+EfIa#s73;g5eNR7HscH?w>nIdy-tn`sObjA_I} zq5QmlEs+~BaGgf$RO2URI_Ac^z#fafg-&NpIs;SWV06HQU=cZp!uUcnqnU>*j|!Fi zt=DjjAJZFdA%tT38X1Zst_Ec6nd$RY4Z3cA&8p2f;RP1XY@wFFGu)%Cqk4 zlPg-!Dq#8z{_7PAd=D-g3NHguk24NnYWBwO`evPFbTLoZwoN+HIi> z^pcO+KOjaeI@cQY7qw3t^;FKX3>7sl-a+&?^(XWgO@`eNcx!zYP_H{T-ehXY3v8}c zs3h8f?UB7A)uh3~N-GV#lu}is(T0Zrt6$u_VvNb-U=U^mD=Ov>_z$o$aD^bJlZU*6 zVtwmIyz+6;=kF39lx%XZa;3G)+Ag%80FzW(M5vn#m~eX@o59i2owIuTogqXgXOk5OL@ z8b+z)w`1TxL{{IwG)skJ3_hu)V%mK9O0h39!>^t|5*9e( zYN6`0zrqEnjB3VyTS zMwVJm3}rV*J4V|kIm)+-#*IlGSTbG~jq#l&%2{77aZ2s-Fs`&#S?*uC?E#op^y4L* zJmk?)iBT(C_T5Qc5bpVISi7YTBvM44%omTJ(I_B~fBdVr`{b*sAR}E@L%S?g+K z82Y<8B{mWLxY2ilvg`8_D$F~(G{q2|@yEj^k&BFuo+m!P;+w$IreGmEKWJZ zw_`P>?m@#Q_yhAG4cOx5RZCFEal~h`LAjnQ|N-r!3doEbxcCuoCS2 zw_LrqEol0WCC6CK25^Ke70w>sz;)E6L9;70kw}(>V-Ny9wTUkO8g{JYV|N2$CV~SD z#mm#njWxFsrbH8O4paZc4#4N?qMsBxh3p@LB(|ve$8PX!cr&UwN)MDkG=J3F_2eWA zI_WzvT)nIg%huAO#kxDm^UPTRn|#WWvDJ~;@@f+`bHFH>tDV=dtw97w@0M_0}ZQN6ggFea(WTSW_y0JCMVxfq(Z>-6k?+TQYKv(xrG?L#>6vR{-ZYbR)1InLOL8hX) z?0JN)r|v+>SFem=+=S@Ty{O0tdsRg0F7IlW6B92yv3m#Vs<|Ke?kn%kKR+D{%8x9r zhrQJ5l_H1|Txh@f#(bFtD!H1#$ttV=;6Xs^aIZ?_g?Jp4<{)_O;EC*wz3EC4y(Dsi zy&t@gFZ^CiDyDnRrBu0=aB~I@WPqHT;eb{F&h}N^#a03oDQjOv9P_k=C~HRI-7VOX zZZT8cZ=7tK%R2Vj$4Md+m*b5vWJ`cM72;&q{vbiwTKHQi)6pov{6k1Mzs_w}%lv}{ zB-947SQO#Bj#98(*c(p17zqyaZ!ws<&Wfuhu`6GHDT+(1a}Q#u>Mw@v<178@DQT)Q zoe|zyRMSXyF9uzRMJ3=|Vm=Xuucx>u;Lf`Za<5H|*&i9-m zMFG8ndS!GX8#(DDL-?%3diouYBnwmP55i9w)}17JU1H7wk(>WKb7x#-h=r_rUfHd( z4SQxlIIVF)gMor%>X=x6YnXvonU2^JlUWxCW2akH{K{=&x8OR@)ci?gJT0Np@6v$4 zE<$pz`Lhq32=4iK7SUeU9BfrlqavILIWuVkS$ImztvUMpWr>0ocehi;m0 z@HvJ(`OP^V-X~yr6FjrVWM>AQ_am8>?g(i&Aedy)f`;EYHxzcry^foq=Sk~5_+6ik zm0Zn*I@=?UX^rnj;FL8o!@P>fX>5r_fPv_R>=Hgh!_9eVkBx^`_V@f~h@%;rwTiDe zG!b&rUXbT9=EZ(^9Z$Prato1QL9w+4HJL64nLM&=$z=_GJKpM?uCE?9gq;owqErcN z&lg2p56tQxP4fuMP0WnJS1LieQ)2#W4$j) z^rVqb>BkEEsdfkPT>XLsGC0XTz$00F%>AIDyYd|Upy0L0=qdfzV&?fUeGxVBv{l^; zN9AvECujB3NS`=NEhkGLA2OHq+Fw_vkzCy?pPD{sXsbN0NWt$TSQo^qcNZIZ&|i6u z6cUy!wBwfIUh@%<1&wH+0D&_C)Y&pGygzha z25KOmMeVcTBCKMcf(86b?ejCFaaqCqqqf8`+g+$NTl>pl$iriqcg#nDd>jrW-OKnXK15>K=Vn3z&_tfk!oCR8UCJd0Zpe4F`p41F6t{D%nrE~d0^8}7) zk1PjSn>XCF-u7?~(XZ~lM?Aa5o$S6B!m@jMAt6Azpp_%JZZ7dz;AbK}u-M)sU$bBB zyWe4@xWz0VXul(!aF;Qp4^})KIay|5k@!vQ#wFOP3;VM@REOqD+-0Hv*D%i~E71Gz z`_KW7#gaI(l^0~>f%1g>iu~+L>*GW9^11o8SG+Rvx4NzE5bavbO?k_7JSo_bw(u*^ zj1km*0yoPe`hiG48hyb>zDJ4n?A&2}=-z(F<_qe0kFPdVXx~;z*dD|CM6d29w*?0(v&ZqRVWMyMCROe)oefM=KeC)f&c-!H+qOydwvSk#pkk3oN-FJQs2{Tes9S}5^;7LRb75%1P^#aHnH5`&Nf|H3 zT$GG^#(5tqSs{4#S;V|z4{{7jSSdtT^G_!J;1a#{dyQO$tbBO8#~ zCw~6Bxx__@Fu$Fcr^YsqHGd2ahSC<((4lu@-*jV(0FjyA{`*QPJN}AE1*b6pV=u~< zzh-loKYRGkLi+RAJPMQ!jnR%tjbWWn*w^8grjmXt6Vn<{j##1GSQ<{%bcUh>H+7UV=!$=7Xrx;?#onWGz3w?0*^myu6@{!zym+g`D%1Y zH+OYQ$>!GiLJ(Al;75^h`>7BH^7gy65|V)#YGN4c$LAT57|dZu1-ZUhN}u_%$KBACt4RXX zG|U~b8*f#1O8Q|Fti02p!?Xk;f)h2)X=N8}$on7dc}n2mk5 zJ{w2uq6z`^XS6Tp4Kdc=0%PV%kfR`*Vo45()-RJQndDp5q%?{$R%HGq`sii83^Nhg z!jo-#1H10lvW!*=on_6ux(18g$|223qVgu{ssK&BC&+S5JL^Mq8ue?9@3ImaFJ+Df+VowYr@k5&W5)gS^Sxmg2mXsQoog0lT8CZzvNSt*pj6p z4_7?c0RorxPFn&ay3LW?ShzX|T`kZ%mGufuMOn$;o&!keES_V2)yAFV5MJ;zM$puIVf)k> zafUq@2w^HB7M;u26IXi&x^6fygT*@$EMqo{6x+DfeABguz70G6_&30Xf6=xBENX!H zt$C#VBrTtjKc8@iTr!^uJ#oi5c-zN=XLp7&LuD$ENi6dXR~cy8EEjO>xuQHU2yP-6 z_c-ph(wwm&W!vU#o3AB7!p@`*M4q4y{7#hNpYM(+$6TP#rXJE;Ev*t7`;hg_=~!2` zMiP&;gE90@K`3}`dN2e3W~nU%ijXtG;2_8@lT1RU@jl)|rO$?uc7EfTR@C(s+?B9O zM`nW?PN#ezYSwty_W*W+JLPXKXwqW~wLuPs5yEV5&ncYAm9^X58`DhaOK z|D5Wz-z$73N`8LLxCe~|`uhQ|(?6kXz<`-p5xzAGGQ=+QDSmF=ZGo-PR~S=bMIy`R zX3tthk$jw9AbgWPl%$8Vud`jsmUS^+;@}3`?`Awt@B75g({Nhj6)v$6g@)s*&HR#P zQt=)~0G4@OlAx4USj5InR1GP;(ou=&k+8}BShtXRzcQTc_Wrf=7e;Gc{+?7+9$! zIZ5R4XIM6>V{^ZYr*esc9*D}4&w8ZX7s|zqYcNC2%_uuun|J1+RzrBC3})j9H{kcr^FqJvRNg$|qAA-2bkG@+Q04_$4sVund`aaes z*`Ij4aFf=&imOPW|x~SvwrV8Z?8=mDgM#QuTy1|8(B<> zTFhVrapLX!TNVX^n~Ase&|b_#Cks?#kxA){l4{Brl0^}}aT`fG6cQTM z)+dT!>hw4S^{52kpPNZ$LGvy^R~g=cz^E6Qy)e{-sQXcmAa@DMn16v7-jz8W8C*=N znPJK6-aRd3{pH0`0ZAne;UkX@E>=^QE-Y;Fr_sUI1fNUS;K4MC*Daq$!sPHIPnKen zD!%fYyyv{^j3jmt9yh>=mvAZOH1Z>ZJdP5z{9+4cgRZQ>_bi!v$LpaCU9DO z1q{n~vmQB;68?1FHNsKwH**i>p_WnwIubyoxG)9yfEr}~7!?XXZwgMk3T2HtAr(U( z3|~uN&S>N|pN>g*tvh}ZRJ5vxYuPN#WQ@;3^jbbxu5jV5);#e(0UQ%go}K9URfdTl z%B(D!eXqlsn(DK7)L59_kQQ(MKz2W*YW$Uq-{AbPaC&SW*&tL`$dWt@E>v>xhpUgUGx5<-atS$lhGP|LcL6;C@Rl8P7VZ7BJ3 zvDcwDRybZs3=WN~mcN{oKwLEG`mp1<4MmL`^t;f8COS@b781+Qmv4$Qp3Ps#R6_=) z(`n~ageF=(Q)0wH3zrA$!)R8h8k`*2TRRV+$piGO=s*|J+@AaW=&+)c8pNCl?1N)70vUoZX^CMbk47fr=7cj)h0keAY|DOW>bOW9-|P0J@_5ZXKs_c3 z<5QR{v)Vj#z7u5MM(8uL`qh|)crU(+YnvZ$+`!W|&(_Pp{j*y7!*Kq+iP?awoCH>@ z33kv|Tf3pzunM@Xo_)C78UknBcIUMxf8DfkMWB`$Z4yf#%f`AmiiTRLA;RK=h4+K2 zE0W^^sP(%SygRvMc*m{hVA95kcA`M(ZFi2Zz#5I3K);|m_K3B|__yquMvQ-8OjnxA z+JpY#A(iI^_TnWhl6NfAneJJs)08yoCB~&n>YN#alNa^3a;2TJz>EpPlYq(C*$q3Q ziU8x(3SMf(oH$3Wk)#7brk>O+ezFQ7$ms{=uaCSozd_v#ET>CMR>ZS19p035UBc#3 zSDT!y_;8f#)BnK&=Dr84-kKi;CvLc_B#D z`Dr#Ekyqg^J`6n{`9XIvp+t8%wWl(07@Xp!`j*F1dHR&d7#0_G7b-;J;A55NV#o8k z=ye&#vE{=sk8S!&|0Da4LB$nJW2N?elciS1n};tWjS!I+(iw6R{#8l}ci2Hic|Ryu zuI1CgR{R@`HibF_6|I7Ao$i5_?KB`bS0nor{f32w1;%U=OyVye!0b!K*+fm~&*=&= zj$)r~X6d?RoGP#;XZ94@M8O)QXiA{dkpx6e96#@_$y&>>%5J9;$PkJucqQ25xhQ$+ z{>Gvp*(3DwBReKmdQi{u7j?BRtNPSi~T@2>RN3Tg*&#et?!R?#?vcG1gUtH zL8KRZ#&~n0WEZ}KPm;pu<>B!{R_CEE%_8176$a(BhA?lO!)Eseo-#MXz5!B1N7~b` zdTH4#Gt7^)sphM&=8m;^#|mYP@EG*ts`GXLV%5 zTDX?PQP`^qTwmppF;JR(i|tf-t02)3y|y5sW=R~04nKl6GF!v*lf*!nP6{ve zc$4s?pCa>zn&ZT} z@AB0T78PlGEua-Y*nZ{C{sM^m`mNR?{T@N-Sh-p_eCkkCBI>A%IcEK&e`GQ;2Q%12h zOhSHDyU&p*$_T>sj_xTFc94}Y^K<5V0|5+%%GWyX>l-Lhu`NSHEwQl3okyN17_vS& zjtSh#j=Fj!0@4BTjCvK7OrvY-6B(Gz#eIB5>8c&|p--LMVhUMhbM%GSDoN94`>u7b zr*pRyn5`Q_TXFjAJ3(~yM*1aA$d{~508l(&S-@uLKF1;6@ZV89H#vv=CcTiAUJ%Hj zI?U~^;J1c&5r~ai$y`*ie7Bs4ERnBq4m~dpkL3^# zf!k0kLCfu97~m`6`6#nrtX6#%S+GYUTNe9VZqMKT7)c z)HIzlKag$lj}p{NHQXVDyDRZyk@veBVtEH}uiU@5sn`CYXkKPXN(|CaN=&+YhBGF? zF8-t+axtGIL=Bnfv1=0?4|u1tM^j(1Q)oh9+&;}dL?=}!!??v~qllmCF?3AA*Vn|V zgB)78KJY)M6D+YYCl=GVHx zqggiN*~wf3-WIw%uGweJi_$MW@r(<(>su+)NneSs_z{@Of!V7_bEH zkmPHu(5p%C?Nkawl=??CEP0wGam9Nyl5Q;!GM_dX;hm^S9)5^yTV20G&Cn71fec%o zDF0<$jXOgO=U-T6;1zj}e$q)iY#)Ux)Xzg}^qmJTqS%LCiO+P1X)3eUMjw`uSJ0yy zX9=cz*IB(F;^jAQM3?vy1xQmqFlhaDUDT~?GopGjUaj7IHOVnw&u#tZ(PBE!>qDjf z8&d5nul$QGWh8GNHz%*q?T0?xg~~>He0J$cTGRCmljfE6jOrhykY^~S;ay^qddVyi z^6eEpaj<=f8pmKa|NXHY%K<4k-PGbcqOn|O~kScLuCM}>G?vBEAm3GD0Q9=2+7mp-qSIK)Dqk#TEGvvri7y%u(O4{rlic+w@X$X)h7S6 z!~>^^Sa#6(a`j0%n456GAciC^&7VjzCSGH|y_IVdReh{14nbh3_#|Gj?%pI1Xxt#m zTld14>M{utSnEXEokI9g@8(16qx7Sm9V@a6W-x~m%9k(lWUwtU<8r>@K=?2r9{Tef zY6mM?#Gh-Zini_`R7&qX`G!Gj`JjcwN?qp$WN!)Hwc*Gz8bx0C+!tzy$+2i-915?& z7$gMCn{IuEuCa$-EnC|G=#*iUTU4*Sh+IB}9tj^!+cjAVesb)DY$|4i8L`iC(3V5r zNh>uqc|%NPd(y1Dpup;&@vPtN3qXVEuV`ImKe1b=GU$`L!(Z^ZxySz0N6!|={)pd{ zLF*PX(f>!)TgNr|_V52R(-A|IhA}|mEig)&QKLjaOpunAl9U`>QX>?kMHDPbq@=;o zAc)e^B|W--=e+Ot=l*^F^s$G)w(C02^Lib}@jO`Anxdkt9YZJ=iN#mT%)?9;e5!tb zkAu$k?ot_EP~QnY(mDtkvB_{+sFY|PdQwNXMYSGFJ16cQ<9eeeewlU6Jn~FtQLy{- z!D+A>Pm#&!v~TXYy(Qi1?MR0%8QWN=+J;Pbg(6btSCrFHW07$zxP)G;%Z54$)1w2Jb(B7XA} zRPsnH$ZubYty+KCHU(P`5cJZIRJ*z!Q?7kn-Xr~_kHRwSo9JR@pe-=!A9ES-bj&6w zuAd)ReZ8_EWAu?{S#*8Q-AUzSd+fP;Sm}%`&!em}5sjv_wMWA<(f(2H>z`pWkRi5x zetcT;xZ-B8{{o|B^HbOm;}G?}+`6aWi_%+4>3j-yX16&YRO|hnFOXfQaz*m;pODDG z`2V=~2PsLqQj1Hd{l089*d7SAZCuPpjoxRi2Gf^pR`~OagrUqIw&1MDjny&ly>GUk z0vpaG18^RJm<^}>hl}x9Ypmz{)W;Lo8$PSj$hRi1)t7K6k|h3qdRjknzokO>WICru zaJyAsgWY;n`=*2p4=gbdYX;M)ZH;&DL`ZOs)q@EItA0I939XMOjaa)Dh2}Y;M?7T6 zyR1<0fU^@8JRW8>t>(WPsgnVF?r5(hhYor~DMmHSdEZ-$!>dpa)k;l9e|Vsz;DKo6 z=VnO}lILtd3~;jbzos3I{2o%Kx8W3+lRK;CpDd$Y(7q24Yc$&-{%L2$WxG+tzLc%}?4U2sXH}Aju7*G7l2=s-{4QkMLzy zYjco9Q}r3HsLrh+cQK&a2Z3j0SmY|-Sw1Lc)I7(K?S?MQAS>qUNH9BEdb-;k^ zYd1TN*sI>3>!@b+cx3ZM_E9BDIZKb%^`}Dm z%Hiu*yY)Soq!zC4i!-Ee5(hY%vzl!o4%9M$w{J3Th5Y6wd2D0mQebDPcEIX))HVR; zKmuluNGwl?{TK{6Czx$3VqQ(yrI)_$8-7L2wcvA39+R;{Y+O}OK*%(t%K4UmOI$Nx zk~?|hwC*a%G=PE{cGIq6`|==b6y5!8@KH)WN3teL9N8?Z)QNOEDPbO|g^={iohXN5X%73j z4^z1FUt6EJUfz2WyO7KaM+ngYEyS4)bk*bd#8GVMu^Y0Hn#TyxecemI*(p*BWd z7xZK)4rOD`>{;b7AD_N`+J`jTTNeK_O)3^|bF{P3($qC~A^B94rEXDtN5239)9U<1 z3R{Nd27mncL!CZWHM{gVvHnY#`h|Z+K0fmJ{QO2HKPCLe_*EAglB7@1qTH;%LULXn z2IYs3^Nq5(NhaUhI9GN4+^$L{ZY8!I_LOcgoZDV{P|c`pL^}Up&=vpWa*X{Pf;eC6@=%9tBpA9#-NCpXDE4 zKJg741`xfBmvFhuQoqkNz>1-Ih7W2}@fkb?-)at>5NV>a=^$j$UKTqi|BT}&t$iMw z&qvA36_6=Xn&5iU|D4eDGiecSQUc5oYqtve3Tnu(0sd8-6mhFz!S^&+2i^Soa=Ml_qNJ8H80iXXyeQ* zSe6Gd8yvJf#CfL*%nX>1W7W6QT^=ehlI%tjB`(tQ>6D}Djtnw>IhFMcK`WGwqb#HX2`_&!o;*k&nsT0YtpqJxJImlL%+&!TQrrE`H6mPm6z_wf_>X zwevW3s;GP)d@mc_a59v7oSOKne!f}g?t=$@^ShKq8wvjYjjB&={+$I#o{7^KKAOn+ zk3z^v#zI5*)nB%|5Mo& zD*aX1vVPkzNdzOume_mD>5*DY3IoYiwcKhJ?wXVRz_Ys2Ea+HGn2;%FZ;s!xOv5MW z*w>)Gwf%ADaOzw9Y9;VuZ8%rmy z;YZLh;6)C3xTh&s&gfA&?xt;)x+dp6ZF^r75xjo7Kj~7dgsn@lms9Ss4PGXc>H-&O z4k2A&%Jkq!$*lHvHtlNUAq=cA$1i3{(=>Eij~Rqw-oExyMWOoDrCq4n%0+eY3R7Kl58Hjog~iU#HJ(yZW9L30)*Kf4kO2xW|uIdcWki&j%4bToM&l_wNEdcRtVg^hRdq5kHJtnQ!*!8I--)A%RV(EGC@W0s>C3j_| zQ$gT7-sb%FGsVk<6J!s4d<=zOZGmafP2Vmdb2FEoF)Xu+<}0&L-hM>> z)jvTV41XDRQ&DSWUBV78UuW4({`OrFvP2#L`Pfrv!keOIXBX%*1euM+ucOT~Z?U*; zFTK>|X(SE%Nf-3Bb$I@=kw0pxg>Xjmj|?#x)U9>tFSC>Fa4xSc&HsUu?l^zsk^F~m zUX&rcH!J^W?)f9ZwEb>-96aOOjU?JPORVUPsish;F{jF&mD@?QcYT3ZC3b&LiORiR z7=O5!GX`QBLxnXU7Qni3Y{)I{-u%_yz@vlb^ZA{pAZ&?)I(-JYjr>D%8*)Q}I23nG zx}Rr9zOF|&5Uc@FGR9`eFny9!@EN!LC=Ax|RRfVJ5g&s?zT}O`PpPWi| zhib9B#ov#Vi#;OYG}iUNs$ z0{3Bk;PU^CM4plh8*Jhwx9&fQ7`&FhrsA=buyeybAjn!+ zKq`}TI@Zi}d9t`cIew#Nw&<7ScI2HM>pa`+z_*_3!y^l^IkT012(Htq*R=5&?b(aX zVPcn!zTDs-?F=QWd-m+2d#_eL-#WwgQ0>x0JHejW4%cjgHh#}v)uWq zvI(9b+)he~goY84!Ljn~YhYH`B^@arZp`y6^9YHhq_SCZGne`Zl9(oaQ#4Hs z31Rj8MLB9^ybP~$sabxDWAX>uCn*K@QD&5R`^^&WvK@ja*|g5ru+n+rWFzPKwFkaj zGy~@ZsWpeTn|hQyCrHT2#PjDp3@YdXCX~6qwp=@O_-G~uNj1%O2n=mj5Cc;H(8Jv^ zYckWk&NUkdg7w7~Qr(myEfn>ljQ!M>09m@o#5BVwT_&Aqm3oA36SHlEv26y+Tsn$i zU+4#vLdS0LUS{!5LMeE{GLWkrTFg3q28h2Bd?OPee$22Mf}9P4Per7!%&wcQ_SIfA*fYBMg^4IV;^D< zX*lRPut=4VOccR&n`Mt4e?wv{rZz#JuDp%wuE6$XI5X=}xUeK4J#bD&HLL3(*-%i} z^aEzDZN$bbRb=U}6jk-sWPw9});yS&z*WrQ?smbET+00R+SoE!csACA3|{iJ4|UA6 z&(v9c-;Q>2CPX;kS)XT-%n_Ag_ZKaNC+5x~DDy_oQUFIi@*|$T!4W{S&;O_P1mCp> z;$BG->sQ8Os&c*~aZ%N6A&lZ|6a-3@4BxI4KphsEp{!i_?2+l zj|J;g6PqQsc=q*zC9(DV{wCy~Ck?uIV#;d1Gmw4T+NV5f$wYxP$LV-(-|&>y<;{g7 z>+vcECd;CGd4JyR6N>F8S3+nHz8PDZCfVGKmsv>rXtMrpa8`@#8Xi9R(|;e)zcFqn zn-&Y=!~NQw35(YWYf_ZQO64oiz$Wf6RvJ^yAOx1BgfbP!R!>>aW#zf1xX26J5uH-SXND?lTNMGj!wm?fACgIc1@dQMxA@=w@S%l7I zSu*m#VXlpa^Lhk9v)XE-4IE6e@nbxjqS^g>8k2x1#d7N@o|ij)S-I~}do!ihq9xMs zXX8J}mVFM>baiNX^E0XU=+~;3lCx3PHTRIklD&V2xxs}NPwPF{yT#f^ z;v`4g=~2fU4Gn?kWMoupn+6;)8L;Z{97K?G0aaeC=ABg~hM8+*17Pi3<<3OZMEwrC znMZ>b;@SHCO7)bBc&-~%!%uhMK2?l%8o<)TZR3^M`lZ(Ae@_onS~1Zb$@_$zPqoY} z&vhg#RNtIjMxsS|_9Vv~XSHUMb!9uEYB&@z{=a6x+gQ2aeDn+CvR+et#hM;Wywzbn zS5QTrhI#BkSL{~vRkThCp5Vz%NA+5#giG8CUI$y6CiBio*~a*~LKoB1wZ zZQ*dI{aEF-H66trFM@B3E;Waua6&@{FlHUUZuXK^N=Nflna*FBCg)ncOmW&YQcaH{ z)lStsOHzd}KgOYJo{;t_4sx1Ux&!qM{ciR@K(ez+eoJGi9KaA!rE9p9E2y4(B&h1e_#0{y4A4ZkVPJ&7z z$SSxvYA4)J0m?B!tjE@-KI%$u~(3d%LgHo#kGp{tWZPw9@v9Syht8@6^m~Y{k8% z7Qesp=^pV!7*WdF)HY0;W4R-c0uy?uB*z}hbXZvPWe zlC73Rcf5dxX0pZu1TMN|K`MVflch&)> zW=1&Ym*It2&3h2rfpQIx55RcL^W7QMq*t$PpSt0&dYjV!W$FPj#{rQI>}YisexfsA z9m~8j-4e+PG}bwzBw?QerOBv9f~$iXz69OA(!hzAu@!z2_A&DE9NXB^cV6jB?7g)V zT|q*J2YNamG@BL#eBEb1R|^AM4#$W ziUjVJ?oO1@Lc3J`7Ih%9k)!q6fvQ(onwpjL;f~NlDYJB+Xs4nFqslpJp`41#$c7G* z=`4x9s`@6x;2TVO5YnXujT^rWC}t93r!SWz&i{oE*J?sb%l&D9BWk%g#6F zNJ=nScl9@WQfys|Xf({6tM8i;T>Cr~EPRGPZE|rv`9`_EJKooHEmqO;nQ-FL_+nG? z9?yKFQo(w_w(Pwq-SPH(SNKTx@*T&RX+Q5~c~g#d{M8tS(X7C_!S%ZN#|~rl1DtKC zLMD&&#-C>NWOTWtI{rHgI1R5mTjk!vxhEX0nR5K}yt`3j7=BTcIE`=A} zV{m!!->sCBEM1&L$j!r4&H5Qpzbx=j-~%O+aXRs6CReJ(*NBSIbaj8-!NW4!?^+DlA84YoWNi`M&EsDUXx5g9z* zYOO!xsR1o;P0l}U^6Po+^TXb$Oh4>8&IFZ8U6v{nxlZfRJ9QTsU zqRK!c{LuxHiz)&KL@00<-BxDK1RjG0u(fT@LhRLKcoZygiSDBj9~Zt-H8WnKf^*t2 zyWol@JpZP`$dBF7skztS2DQsh)#eUflN9W;oOQAM` zA|-D$Hl>Q%WXL${VLdSkg2q8`YpoDk^r%Xq%J*r~IaGjUuM*KC+Eqk&Q?Nfq&;MXL zG6F2>eViFGGsv-|lAkot4Rpy5$(n9D5Tz7`GOV8Vd6LqgO~os^O&0Erp3J3_IIOxk zU7FD0ks7lrVQT)8E`eKW3$LbWdgfwWaL~urLWxnGrXTueh*5SBgt+#(=2$H~=!z7= zB)rAe(f^1q#sdSRqFM3+GfE_*47k$YhI4!sK%J z=eh`vPLUoeXC9YjVHp)d=niICG4svAeo3w}ZtND7TZU8+~A- z(B*RO+1^^)vmeOp@JqC-f-b zh_-?zs|K5TrIVw6TV1cwR{Z(DQ%O!TvGI7N|NJXhRwFmg;ZA{*hh6`xxjj;?lBNh& zDBKxI`;YMPr=T$WWS)GJmAz zn^k}x+VT6WD%(%VRo?{9T%z>B>K`=yq7vLx$yGHDCSAl@$1ox#Prhk*jPQm^I4P@? z#!*_5z0#fghey;G_5t0gil0XENLnFl25oz9rinBmZIlek6fR04UTnSGLfqhxx6Ap) zxNs#cT=Xq9!@XhgmlE#$H-1?lA*d-}eY9>PnwKjg@pw_k$;m zQWgJj)nqSuXj(IfHAov;$_r1-tSEJd`*~`>OJr>$E4M(~AM?VQ8k=CR2h&xbtd(iES*pi|B+|E66aJmIvQq-IJ&0$u1Sz4EiGWtpW- z_uj-ujY(n*l&Qi~+3WW?#KDjtrW!rYso$@m9`Rr`>(#yyecP-B5ET9|uM8+k;*@g) zkwk|jcu#1QHF4RqzeDoFXv(-`;E5{P)AfN7*?XRNXQWBl;4pVq@(Up&H@mwe`YL3? z8c_myj>|-3w8-Fms^D9S8@1c99fTHFA($s-!Jb329wnaTaRmO0eRso_D#dFjq!}Fvu zOfpLH+XK)5uC*s$|L6S&s*Kv3NvClLI(xaLSJ36o`blK&Uy2C+CmKm^ZL$N?T(|R)(1H7e5Lq}}d}1{n zdG?)_0Ye~#$xo|$2%T}q()q%=V&U7dUQHmRXR;G5scuJaveQV>P@>}pTkjsh+W40? zd8#TdNm-KD4_cT$qE{TA&2R`6Rib@GWg_yL0~1cTb4J6{0)Ra{PDW{RQ)>9z0~}>w z33P9+pH}2iEU@>+SPVCt?Nrkk)=slShhMuu+(TtvO7so?iSUArKI#qgN^=-42;6}N z_P6lvF{>Wo3~r^0flX&AlztzD6Bs{kt|Mk$nsu;4V)9Rc3`<10^R6p%L9^kd+)koT ztIQZvFzKwS!U7p4yk5CRmEZl7DqEyib+Jfx_fNvKjl#xQd$>_}|H`^;EJD(79G%+k zcGR7Bg-M)4-nxavG)XwdZD(cJGU2+2$~oY&?7l}Vq>X;7&CKnV9wp=z{R;*)2RKxE;t&7IysiLQ!a1Bt-SsSGpKpuY#Mm6 zUrStt%x8G!&Ak;#`^V(-pEqR38qxFhM#UihG>SMMq1-6%1U`W&OTDeJ*2`7~H@?o5Y zV&GHjc~p9f)w%Mij5mUX-HrXyMD@54@xevwJ>n)}eR9zomyt+ML2*XyKQwcalLFWX ztO2?epZ9H4^7aEShn3VnLr9XiFV~~K6UL4A@7=SO*XcJnMl@ugNCbUVer;cF2AwkW zmKIm?>X!Fww|1ZWW=!6eHHp#{=60NHmJ|++bb5Zz^4uMd#g4qX^PJ9tqxW^2kGsv0 z!r-=Ay56bd#|ifz&oM5Z717JX)eeKP1OIue;COnSzlhjHknhP=Uw85dmA7e18+QYD%tv#wlXMkGw!z_z~x7 z#K>mrK+1)PzRgpU#BYF)mcJU3=pl0fHElG`hv$!3Z0kc9N4Z%M2W#qZ@sC{ODSEK# zV=^N2i*^|r?5P~Iokbf??6 z7ZjhxrQFRD^GNEv*u(uOQ^<7m>4}@-ULVeXs?u}BW-*|a8RBMovlF$1okoe{x0b&a zA1eHmns82a6h^XGv!-@Z5%(o--x_uajXTZ$Q_hYD)2X=2#MMHChh2VVfc4O(u(qcv zwdG9+p}6!x;{M9e#l)r6Pn~&0{U4IvWm59YvFC`envaqBP%T76aLN6H2m_d-?w~gSD{Uj#sjy`u0Qko>h;BVSpUAAA`>)BwmSco zUz}kABV9!55rIJr_JxJ3zLU`f&^lUjzh0S@9}ToWYhzEnKe{6G*v3%e+sd-3R(CIL z#B9laTY7cve|oPQcjLY!v8?8apHwC#l#r}7eP}4rAh6t@s$$*r(Oz_xb*dhYShUjR?UYH{{*^|`Z=TmHF?NfN%@6YvfqyqT&y-I>HMPi)< z&I%+r1ctqx#hJUY{iXVTQ2g1MrgrK>q;`Jgw7pNEip|u{+#+5P|hX|t)R))Z} z+Eb0Igo~bH2?0Mafp9dEx=CD?8fw~J~grWZ)VcL`{lrEp>9;Ze1!|H zRg*Z=%Hvg{fk3*#Dy?I%Qgb;R^EbF00rT=P4UdaaknXkXm)R9 zHZxVQ%uH&V%%Y2K^o`4wk2>mfZa!2$u!)9BpqQ2!=)t0>1J==N;k4oLMs}Z32~*VT zQ)chIw1W-b`M*QTO;N^Ay)5nlQK=DtVu<|6;yH57|9GvkZ{hhr`e2+HMdKml`k{kiC@Y*c5W ziIsX`%6gn{_N26QGL=)>Yh=EwPC209*?|GjIGiQ}_P+p`{mR`HV3Oan64GOsf8RLM z3Wm7Qij1!NPFs1 zSHD$UfAkn_9I*KBETFy5kWxkKPqHRyQwfv_S_lg!m#o{>EVQ=6w*B z?Hm2#U`yx(IE^||sxxw_`x)hnTSsEHpo9>*)pgW3h_?~U2;;l5=9?f*`-{MX<0jd8 zVdT7(27OX3VW8H~6`+u&>1c3%eX$?KQdA2wi5P(O|(_s(Nw zTowO)a6?fVPe$Z)3bLD9+nHN+Kzx!9?l36H1&Y{2fgaqlexMfsx)zGbd+j4#LA45I zs`#2eQ6wS8TB0o%%UaP$mD$Afl`#5g$R1eG9j6p|RmT&(GPA*(;W$&wxIjCr2tHTs zuMtU(5w$=B@8bwOac#KO@xr{jX+r!II#2LNG{n!KrjS*WK=Q=h5t#A=RBLAGuo|=} z{d8ZQz?@pbfj`Jk+J26pg|C;=?&bxk3apb#V{>>8+J?m#EOg%_U=yB26MY%6kJme-?011h1nVw zKRgO7$nwhSe&~n4ftw*Bhv^eglR$8YB`pBtuuw^709buJPshxEOD%!iz>@K_Di)N= zG-!v`E59vkQzWA|;#R_kFz?F1WjzWfZ!GaC9!4tw&{lS?FW3+MD(`WuQoX&3=l+)3 zm!ss5X!~Ot%9~`IR80YEy+(DxpN*F{wC#4vkZ_=?^$RpC16`^9? ziF`T!1N6tM9hA!n&Phrk;h}hWXq2X`2kB3Sm;bY6BNDV2oSK@14T@J-HoXZ6d}nPw-AYm0AqFN^gPWEz$v$& z65UTV+w!YP^}7#%F`Kj|v+ejwZ}Q1Ez6F^xC9K$bpZxaCnd_HVdDsaGTcFS_iu=YN z1kFV#V1LMzzCSq8#;1zsrP*r1>;m@AA|zHTE{6u~d;OrxfiqCjIO^ zS(p(GgnWRRyR*tw9zA{L4YMerDMVpYweYr-43LYRyZeh8QVA3TY1Rc2$d zp+WiRW~OxFkLzSAn?R!2HQF-gyF4I_zKW*zDVGB9LO8j$5iDg%9t7jFYr}5Z9$$%i z&@NMHQ0CEKW^y;E_0J9_+aaXEd@*T!en`vt7ee0;Ww68PoZ-tFJ{Y< z3tpSS8x~wYTl{l#Qp9mnQXt!FDbM)}QMEs8Neid$WfDOFOY%JIdq3)Ro4+|*hb`Ec z>ZiMyZ8N7CX_k;a2hb+XI?g%eqO$8$MiWXb5oJq>g-?MDwPLs$d_w*JX90vS@jNdC z9zn82j)&!C*^PsV?MR5pNw}ts1;tGRDiWwi6V=V{-)tA=w2^gIf<;e+MBa{Da%$Ui zAAsNa@wI#8H~s}ugV$YjS-1EwH}R<}SU$$I0$BYHqmkCot#Q*L_X_%uvwnV33(ahxM1__)+A?%C6|dT|Jwh_kAob*sQRU?B%guP^;C50KViu*-By_mm;lgmt*wcbp8UvE5i{SlS4xDtk2FIM(DI)2L2`*?)AO<{|g>mRt$CA^1 za9j!EsHdmayDPZ}*>(X2D<1cYmc;c_7cQKD?c|4xKK)5tuNKuS=m_;&ShV~-C5&-l zLWBHu;t1?-IOgEhWG94`u>9}6gZwGmL|chiQZ=zxYQtaBVrQF#hhX}(*l^g(6w7O} zfielu0BdkPQGyrEAurG_uj#>#oP1(^v#S1O0@^>)C(DDHTrQl=FolaHJ^b2&=n#CH zI8fEB+ROb=TW+Zmbc3^~Fn{)cX+eM2E%0ms?9O5g;Zm1Q>CC)^?Egkd?sGT7XRzE@4dwf(m-jCC z1TA1+sY-h@Z}&3=Qst*9+K0hc7rVUpf-i?jgo|W}ScC+&X4hqn6p`X%+Sa1gPkt6- ztQ}Lg2z{Z$x?!5PA!1Q5u2IBHa&Ay=n4J~UL6bjWP?9uMi5k?1uY{k7j%r#%Ldre`A%pmd=!u@QI=7CJ zff(-%MO0Bus=@9av}Vs@zhPevo3AicTn$27*3<3An+zb8Nx@_v9Br4?U*W!L3} zE3<9h104O8f-~REa3MA5uD>odrq5|aMF(9f)b2TTIfh~8R8jJ6!m~tsOGftU2SB`J~%cBDQ$~msxj*A5zp-^X27uP$!xdDy6$q(*^f@=`wy4 zLw7P?NA6fmnq7VJMra-Gc;TCuV7Cl@rdghkHZ~{l%%>si@>;>J&;`mrd|CsUHwVBG zIqmr1ub=X%%##4uh7UsZ2aO>_P}S}xKo?K`h$e;?v5VO1y3yXa+nv~Pntsf^@3$ag zgmFFH4rm$=UbTsLOX#++iQ~dWiq0NQXGa8FR*z>}-d|fhrS7I7?GPFc{YL)5FTLVAcB?y|*g` zRugm^5IoWA;T~vjW$r_C=p`p;F!cz91(bnskS|q~Oza45KWu@YP3^Zt+>Jno)GBWP z6dlD|!SuJnUWm5{62vg4t+(sd&VC6Wf?>~v7G6Rc+|3{WLt?XGHwHtEBMz*zDY#ZU zT4??AXv?oaOt@S}Dqq?;A8BO_Tl-zm#Nan$&dSck^s@wo%~?&_?;O!Ns!IRhI3`v- zDJ8!M#K4jmozVi?K~+YJL?W)#CF@?qA);V2CLr2A!~f`l4aAExgLAQwWo#!>A*NrA zvwkxaBT_$Skx@X+BxQP=X^nLau|u6iU)y{j5`lFaeB-nLnJ@3F;M+-d`y*yr{_O?d z!Lz_6Q?C(gE7mYRj7ri`Y&T{8wF<;fraLFrNdHmg!oJ^&%=aumg3=nnQdPw?#;F^V z%^>Nd-0-~)pj<>sUnrf3*YA)LOP%oj!;%?YW~2XYU+7MTsD*zb!8ETv8Qo};7=hxB zWeh`Tw4SK9{W}YA6r%w}yA;R0v$RgPb#ugQsE^H=bP9Udi=>{I%NxuoC-`b5%^?FU zx58H-{&pEPT0^kI@QY^z#HBi!gygc6=T@p8Ree%q!;~(|$@dE?3adeUvmNvtuLLpq zs9BvryHwx&Sf7<* zFJPvE0{bOmZX45^$D0{*_v7tYz+SA}>-}AEbgG_Ga@-|Nj?LYvf!?u<=z>LjT@dq- ze0?x={VWnwrXj1bPC8fso*eOiXKKX%0U^Oi=EH-ccJM5;K6gmStW+QpPsunNUf<^D zuow%ClN@Q^*wCrdqURj};9J(yv!$Ik0+3doFxLKc?9iJ(^!oYeN0$(~9(+9a-#Mz5 z(CwarD)x?s-156#)&;;Hn5Q~L=c>))Z~yT{@$;IRh|bEk-4#Rk)>J-W=ephdv*C5} zKFS_Gr+G!czzN`Q)MwZ9FSacQVioNHNih!jPu0&0qed=Y(Dvg2y|U*rm-vL^lbUAo z1H04QHja9<eCqyiw%2Q$?FxqnA$t8~kA4i}7~0G$>yI9MEFNGad4mqp zl&-M5aA{qwoNho%gKIfTbJFfd+^t=F|5x~7>Z3~*OvSTT#Nb;e2R`aox9Sa86&xis z5Wb1W+=;EVwWW8NU}`sbcY#NQrC$r?a3{-Q_k;Ca+hAHe!t zv-soMA|AZs$Nh`E_rA!?bkTErE~?xPM|cxX~-VDaV@wJ`mHTWbG#+e+}c;pa$p!f@d7j{QKFe zXcxyDtTC`|@?UFJuU>Yn7_p&vIYfZO=lr~V*oMkSL*J}<8ol>Q&tc!KVxyZfZRB2n z`0Ypd(bY$I3?%Y<{c$Xt=PmR-+zHh??P9Cuo%O6$S>OhLUtb~5Z$^jLkvji-V!*Nz z*AE>E1+_LjZSg6A{o5Xt@yv8zcuj|kiqJ76L7C^Sv>cH}`{J;GFt`OO_|-+`yawBW z0Yff!N}f1M;=EoTt%vDtDX)_6*9#va-e&}GBm#DdJ+-)euQmqO`}$&LWEp|?_Hgb8 zI(IusV$a-K(S(7_U%7{JjV?Nx!>}pGF2&~|t5(f0^bDpIM&kvF4`jHM%3099l^hcv z-#a{yG#?qTmbB<5%URHF<=X*IHGjp^+)^&ihX3a*5vC*rMsTLkO_s99`|A_il4eo5 zrrg&rV!LZ^J_a2RxR{~g*H6FaDpN4WT_lr4Hq7=v=^SCmb(ySZ01_eCH{Bqfp`c)^ zCF;dX0O27#JsUuBCvIhL7-+IKQD9~jPaDmS)XtyTBZmj{A%l!0o+G_w)?KxS7DiOt z8YmRz+%nX7vBsk}@#ow;pt4n{(#Y`GD*9}3`9*`-F3*R`ZwyAmrhaSB75biQ4hZE> zm?!hm3_q=Xy73!vZMS`f`;5zRLdE2jesyD^4?B7Gwe?BHKdVJl(D0F3@1N zJTD{cQ}_PkN5Sa7AB{y9=;sUgxma?q6S(JXu>IJ+)o34J)*T&qqa7e)bpQ&x1Jd0z zUAcM$G)%!!v>`R|LE&MLak`XCwuK+aIb&iNM?VSl`tX-i$L7XN0$1ByKe8f4pj_dP zW*7*Pal)1}oaziKjNrAW!Fp|sb^bQuXWqc+vj$jWk-!gNegL?tkH4;#X-oI`2&zkU zz%~Mg0oCWzccEH?{5kSGeWTUr-eg74supVlislUhS6ZWE`}1A9tQb!eXM3tF`R@Ls zHc6PY&t4F0_t-PQ07P4e&wtLDam>VrU<&)M+#bvu3D!zpseI2LuRu=0sN7Ef1%JWw z!|g@ZF=Z@`O1{+C8~+uf$uf{iI9w$R8Mu#7iHAWV3BSU})seNaG-UyuDFV3rNQ-N! zH@^&?ztgrTlH}J&@~}@h(-)>A>7gbrQse5A^Y~*%1qATNx8Z#yXXiwN&CAXpuo%hNWQ}?yS94f*){pCBLe%$ zppggpP6*FW7*9EY6wMwOV2+>;%y(xXv&Vo$&I-+3a%wo;^8&B?`3;%+eM*Q&0sErB zjg-H!{k(#PK~mOwKJR4?X)c2BM?Lv$i%=!9>tx9o*o5d0zDcmpFO;fF9VjU}gO)f& z8!)^qCS)FPUsaQELLHN_hF^ibIAGHIQ8Z?={F0Rv-HCY_0N3Ze7_B4}ixA#;aSO>7 zA(;S1&R-N00v*fy)iE40i?MuEc9HvZo2|s|3H8CG2XA+UoRhaD;7O}^G73wrIy+GA z!PBGVk+h(iL@Rh%i{xw7!+DS zf@@ScS>o2m{|Fi5F!H|`kPlbQBszc5dk*=Y8e+f~vL-iq7(ZDP-ag}0F@p2XX(a?T z2qSQI-=$ZQU_7z`gL)S?oYl~MImLG&$3Q>g>FH8ljIfwy?b7KkA^DfGR|VR~M86!41gDv%ERoLY?*KB>Vu!+guBLxO*Lza-v)- z{ii3}eR1S;c%Mfr|7pKnWvZ*%`t0hiP;%X` zb1jsJZE1^_jq6tjX3Y-G^yEU`o=6^e)E~Wc+i$VtMw8z;hQwlw<)`TwzHF-f^*`Ia zfD{tijQ%_Nsw1?Kn3@X*a1)-EO<({0B43MuI>1nDkn#HtJP*{5kN(q*Iq5(N`X=TI z?F+tGqI^WB3>P~54hXY=!2wU;6r}~yGmjs^^S$fp;b_z^oq=zZM~px3JcLM4l0~5- zOY)12)=I8WA~)*KiWJ=fj?wf7OJEE}$4(thL)`P?`dsNP*wvKfe~c8A-_w7heW`0| za&b-RqUvgd*QU_A3;%~0?y=e@#f!^qJP%m4&dN3~*z6EDr}$i{+gL1P>;$P1CirO| zJ<(yizCUD5dAJzRJ%3Y8vI=6RNUaXQIxw?63$H#20IM^VAlMWXhe2o#UYx#t0&3Fs z^jwrT%lJ+K2G!g=>25Rx!}Y5`C&Pvi!pOoLE#sbE9-da*mA(|NbEGR^H{2nxOY5%DO2Wfm7QWiasZZZo1pD=vY^p(s&NK zg%kuR%#qR*>>O!e9HZycos+ocSV>add-kWCGPn`mwe%W&Sx(9r0*q?ZM<8ENo>kM*>f~ccq}%v8(w7`%adf zMP5R6u~Khodu7o?tL`gLXul3#Y8aS63&=mlo|PVauk(AlLAUR8X}nTKb`gIBb?rrM z3+4gYWefLJ@?%h-V3>%P{PkNwzW1-DHWhkAuQahRbny?~l z4tY!5rqaW6cq_^T$0_K>_vN{RE^as3KWZgx`w=c=Qj!244y*}tc-ij3HDCILkZ{xs z2n(mg^Za`a--r$BzVP=zuOzMxGRoW>Cm=+9*mitq`MmIY*iMz}Y;2k5@+ep;9UVVh zy0V83_pb-NAtzmG0+zHM?OTK%5RKnk`0qfV93D~t6rngpx5Jj4cF_~11c*i_EN>oB z%5O<)covl{YDrPxpvifgv*%FtC3#56DKxRHobI2w$J|R{+%*S!ic7Rc1K$;om_2|E zqlcKd{rXY$Y44c(CjK|;#z$7FDHciEz(pWWvyiu|KUIL1zJCd;i; z0AuD*(a`?$%AT~Xi`%->kg@SG!5GNP7>m8VF&Di%yMFK5+Jt&0dAO${P%%ZTt_gwIUi$#$qeh`FRF{Tjk#`^50t}vIZTI)A|J&`~ zv;Hd;t6w0<se6ea zE@|T^1Lz1Mf~VEoUD6|0w$ks4BNDT-k1wZYfDgMM6N7?v_vl1tgSi zflW6^O9&!Lr=WCgq*Vlz?hcjMG)OmZ{lqz*d+vSXy~7@k;Zgs+{uOi0`OP`MZx_g1 z&2NR)iT^<$CX{D2l?i+jD3_}3yu+|(Q(VTMev|jn-LDs4_EZf-R`JMn;Vh~!r+4p5 zT}skG7X(SE&^>BrIYLmz99fe&+jV$4?53bRVzDVE&G& z*F*9?aV2fI^qFRk8Dsd>3YBP_$ib}gf%=X0^LJ?r((GZloi++{~Mn2(* zC%89gcunnLC8XCBq8EU$Uq9Slg*dJE+&TPna*|>X5VmrW?H1SS0L{JJu5Z4aOZfuR zPJ}^>-!Yr2j1#vfxi3$psU24f+MHCk4b6;1mtdOb3w^gGz*q!iDeF8|_3C`iFJ7&h zjJ65(=#Sz6t{ypBP5X|J7lZp&B~y8s$-VAmE!v^Tzl-@c9H^b>Kw&OfFP-+WWT{FR`U>$m1Fn$r zh43+a;idq8QK53hDNJvFX|H8x4?J48ekQq9pGI^$-K{Wv*v!J0-%;Mft9t(@0=_ zTAkv?w0i(Dq_>gOFY$0fBqV<3JNyx%0mLk#8JrYiFW`j3L?eTt1Xl8Ux=%>5h}lsW zL~-ct;eqYO9R=lD$u6Q<;S{_xh$9q+#&_*+Hy#@990>?#>_hT%jv1jbC6cL2Xx2J?PA?P#;v+9lUNnS$HHk-JPpdwkZR#=%4+2S`4(Q6RUj^k@7)L;R~zSg{UZs zrS*RQs;@b!T&#=>7e-CiISQAMs~T_5Aaq*9bcjgo6;G%AEH*iPuE{R?tC9dv@rIfC z;`rfBSI~4g<#}B8nl_QZ3>zo7PmamBRag!qr{i_F=00}>K+|~Y3{g;`@tqA%6#W$z zq=;q6FnRn}lEVn}tk*#+`Z(R52GIDhwrL5_w`ej}74*)t)D~peWnqLBF~ZMG*dH&N zi~$kbx6^)jbR11S!2fX}En?RF(t0VC!{FyHwT(1xBh164=BV!o&(yyb?PN?HE%+*q zXPaPBoK_)yZMFQd&&xkZ7=p1nQzPqvG=lf^r;2uOQ0b)sG?8PT(fCL*d)5bEcGz~r zMZKP!N^=w`3LoDa7+9+3LoMqPKX)k)o1rU5u&_#IJ`%Ekj?g14v=dRVK2fjbgKcYA zja?eyb;-5Jt{Fza*?D$4kEh7Q1itQG!`t8V=BuYKL&9-E+L~?oWAVCULQzc(2(TiK zX=;);uSu;jU$}Ki{^li3q{VER<&$}mB?(Hp&T_oP?NX{-$J#N5To>okQTO>#e~-p6 zs!&Vqj?4&sVKKaGl0ZLV%5MPE<@AU{v*JC1zCzW$eD3*j2LlGl->j-~;We`0C9+uI z1`omC5+~@`+8KJxI*@Td5;T20KK)0h4S zEnR@Y1u2SgvQ58*d-43|ih?{{pscleU2YquHwS^r2WMJQ2LABdd?-NCMyAl8h8ojhj@3`IdJq zBqDW|s-~1`Irurqrf%L$djHqb!sG%#oji+0N-AG)Fk)qwol z_==I1wGniZl-GU(YUOsOumls(TU@|vITXS8@7V5mBxa#r*gS^7QN2CYsWuLt52A(w znq44}=r(?P2thCKbdo^0wega*r(XEj7K_|`M~W{W%1!gncU?h0W0*OaLs|l0bb<<4 zRFBVr6M>#GqKbwK5lH2+@RI~-^;X!vVsx{Am}Qm0<=Ymy;*5m${}^1XFl=K~0;Ik| zKMny!3{mu+^Dy7r)E0YGMM>i|e$yoEk4*Y?ovWdi>rE-^-Vc`t|EkhE-a-BnqHJee z_#jb%4#pK~QZV@ef?!F#FJO@jfQYiXB|+deGk%;^>3etkt^C;Dg&{8EP0Gq8$Ns^Q zLAzYQ^bfT=cZ>WFQDA5p)#BMxberZt>+IRr5;S_!0158-=+}j8&r00_* zJcQqJTKXtz-|2Os0u&uoqwUkkM00(n@Iqh*7}Xbh^B?kh&KavN#*XXgTg;0S^Gmwy zdytA`+6r>aQVKDEtL$MQ?(V-zT3uD0(D(p{a3mZ{&ahK(Jr!j-`z~EZmq8I7&Hh-I zwL|v!eC@MIyxr7dh}AW#1}@v@NClHS|A1%npuWEfsdHU%Y)U4EK!OXU@@hrO0mS&j z5b`nQN2*f*khk`NI0N)9s1&mdqSMzo0wV9l8%v`C3T@22bk@I0g8Jsos}}-@JWO15 zsxucIzFYCGCcH!rhVvFt$1HAZ0Q` z=QlIeol=FbC5ERRgl}Sno>hCi6P^P^}rFH3DJ)F) zSU~c;08HJDu@-kgeNdGhd~;L3ifut=17b_B4ebH-WhAXgKP=K+H`#+*m}W{WG#~pv z#TJ+1U7@OZ$@aMDS*{O?#&fK}ovW?Zgjg47I~2bT<2pUjq_FG92k?*U4tM}Tz!2T~ z-SHORgaSQ_l#c*it6bEAhz>GFz6)&{?{P7MYK*+}1MFIqN)nd8+;15_x;eqFvuO2`(PaIf_-I?J4So+wv1P)|wA8+c# zfqu7r{fRUU^K_A#dHrk)t#WB5>H47uAn^q%tCJagG2!*4j zjv7ga3`5A12=-`Gt0abTH-9+Nyoi2Ny)sN{(fL)X5AYiji8weAa{ z4nE~b&`Rx^&<~WFbDtu^-`=rFh04G#I)}lKct8sz4uHz(z)}&@qliPu7ia@ z4=V;xE|5<;%$%Ft0b5(>C2prXU@9Uva2eR0f>2k8_a6Y@^(yti12COTvS}dqaD4+w zPD29B8Iiw$AdcrE5c4Qpc0&p1&>*XOKZd}YM3 zVEmF$hJ1Icdix*q2CVIwFX;U)T$FrMMbd>FH~2BQB$Jo$y*^6+ViQH^x-OP#_)t+H z-7yFmJA-IzL2Ff!R$X!(9}tb(EFfs(dwPD9b|ykM5dZU7rK7b3KM?(ty*zuiX^vNn zhZbgbMw12ZMNKGEhrIQT5g`CszQ2N1$nv4l7387kJSzP1Av?P|XWpGflY@P>X+Q&gw#JL&w73iEC2i1fAN3Ui(3F()>c zm8M$#aLVIz{xb{g!=Gtq-(IkI7kI05KgSwDOtdC<)5ULwB-K~>KP`U5W}MgZDD7u) zNc<}X@ETdmyR1ufJ;IMjI<}MVLXL~uoyZqwmcvUBXD09rMTDjv$IC1B-?2QpcM*ii zXy7SN*eE6bXKv;6%@t|THdKzig2#Xqb&O1U%rO3jB$wH)F7!XomX?+ocKzR9KpYCJ zO)6b>Bc``tJ5RY;@Ac+j!!B6{j<+167l%;^H00e+_^%@iyqa7B-fZeQ_zGK!&@2w}`-1ZE#&WB7J=8-Hp-yJi{*SgQ z$QgB{&)EoD3~=UP_Xg;PP}(KB_>xFE#1>!eIumY@>^XZ3;KJVsZ|T zV#z2$^V%p=wHSnhlklcFi#I&eA`R1h>yTn@=hI`BsS&n&g7C>vfF#M_K zMCk1W0A!YJjeCwagi*0V8C0nj@B6ykkcLAo2?Xi7?o7p4?yOGn0<{(hg|F?|I7SqH7@&# z{QlA1jd|l`&y^anL=KP)h{`^K4CrmRllUayT;n0B9LReo6HYxfox1?CC`t;XAZv*VD=Aqol`xIW)k9oHHw)NItINfo3dG_+yw zc6CP|Fh~}IOwpb4g4gBRpbH8rvqX-$m|x*BiAZ?%1w&egt@VS8H~RigyC)Jr;02|jvqO0@gcVfe==ew5xX59g++R!WHu^u_{Deb7l>~%ndW7A zyidM7_bJ4H^E2?z`mpAuUFEQ>0<|y}eK{KQCStv@6fls8t6hv6-+x!V+QhmsRJ!cG z?7I2G7jFpkURXl;H!9G-z{tF|x3#=XdNY`@r!n*jiZpIJHRqv9&nYwJ*~Vf~hEGPH zx)I(*qL8J-mhd4W@1i6W!k?ndbmwh-rxb6-`Y?xwdY)F1+Ax~xy5-=iA@W(L_{50q zzAsOJpo}a^W#i?faf1Ep3v@-4?qLvxSgGakYgJ`gK)3r+82q!IAnDx(`mRE6Tp!_$ zDl~T|xW{%K%@9Z`I#z<(WA{lOf}TkNF-3-3_kQZ!Toa2Ge7?%E^!DkIi1VuyU$)dD zcd@W=6RJ7eFg!_3kWK$8{(#?kKx)PLlNXw_j;x&$g@`GxfMjYCK%mVf(^UZz@otZV z$n_bO*LX1u)g36n64eRR;cA_Vp}ZTL7P7@jEWe%QcjI)NwGMGg+k~v-J4fJbeog!a zxm7p;UYej^@a)k2bu~9YK`IyJXEmf*=|k_k?eT2!wH+iaT6zcm>&1!ZU0eVSs-fjj zN#PFGwAO3M_MXxK5m~UspCfXda#aJDq1G+H>8Vr+bi8rC*u+^Pzel3X)v~@rYFWzK z?i;-|N>I@4yncQdBFPk5{YLLkZJ0uB`rQof0_+A<|4Rk36i9$`LY2~sAX`@kG1@^n za2hF`CwF&8?AkZ5D2SNu7Ptf1qEhczWpsnoyr3E;OxL-d_ZNc_Bp>uYxY-2M06A$2 z6{+T)lEQW&BdjcP&_kO}SO0rn*>g8)D{2>_cc*dfvQYJj$ouf&*N-yxw?IEqG5$_x z(%MTSppioCgJs_l$734V=qF4Rq~!77P4|Lam<~V>XP=^PfW$hgx=nsDY2go?>~u!& z;-Bn5(dz&|P)&J%1XE6C#{Us9frVl#*`ylQns>+zwM(}j?i0r2s!mn&V*~wPecLfW zsTEX|^s!gWT;=e?pZR=ItMD;!iW**4anx-4NGQ>Ls46Pzipo-WeWMUhc>7c2oc+?s zAn3!GZ|?L>*98&9{%aDxD<(#MRXijXw2gER#{m;&Elv~AA9o|Z^-R>F7@Ytqlk#Fe zOO(Vej4kYjo+$MtpSUVFaRGdkP4#h8!u~)*yK$m(L*P3jPb-9JrEaYUuTcwGnxd@x zT-DBW9I>&VAPLqlpbAkee=A7*nb_rOkj_m?*DaX^=3Uo~_I#xz64dzqBzeYIo0owv z?hbUOJv`ScHl;RCf*}I54Y_|Ae-%>T-{)gJnE%A|d|T6qTawQiS3NMihNprbh|!-QCBz0C7&WQ8Zaf3sOKng~ z0Tn;uTX_OT`W1Z*8MI&X)Bnmo2@ihvKwe(n=A<+5IItt#Km|nN zKGb!>+!M7MZNMC~zyT+9YtE24I!l9T;h`t6d zYGQz1Vq$Q}YwAe#+^IE>-5kLa)t{nzNsGtxC7V{&4K2yoswSmq_3El2(3%#*wia|( z$f5?4kwVgZ%g(Qv3&Gsc!?R2M3XZf(Z7J5h7SoNu^^6y?n$HpJK{D@&QpP^0Wj|JX z@ETB;Hv4Q#ol#P9G5gse3gv3r8U&sP8RFR;IN#^K~PApTvxRc*5iMLk#4G2Yr@(vK_t=z}f3sdz$01OvDALamd#AjM zh*Z2H(h7AYYXQ&8exalD%jVo5eY828@F3o|0+no*IPP~Qg z*fj9Y&pp30qYae9-=KB}xz?GUOiH13k)wUPxLf8V^^x|HUY4o@96~x&T@?47fdjB6 zRkhCj@HwCSz%Re#1?@II*iY==clpr`oR`O{*=Byj?j!Hg-A{CH)G$bi=c#BR@2Vfb zu)OS=las?UO|lUfY@CEZci-2yc5Wu_P~NXAYcWkY&_(yArhrubY!n0R{#ow*cYvcw zoBTmM84k8H2U?*0)2GpInR~cpl!J}Kg!VQ{#tvMyq?$3!2a7V4qhxlzT)@h;&+bzn ztXWmy8jU;>OO<#>pB`1(vaG9w*a0~ycYW3fK)3%jkivNW_3bgIx`7W9fIzMQ(#m5} zPK>`*M?DyiUyTha&QSNBjBhX}I(?}nCK76*b>Nc^mhck#ME8AAa|$pmuW>LNhT8@% z=mpKJK;nY9pjZa%<;k0<$x8{_FN>fs&tqY52v{RXFg?HF4KBEfXj_BKm^Bmle5liT zw7!VG(m%hNKpGP1FKRtnQOfwTuY5stzx%ayRbcpFvGFj}9JYte1m)&`k{))_>>E~I z*_1WL+rE02G)EKLJM111AG8D9XYmlEeRg_&5c~t_e=b^g74(${d9*OHKFnJuhy*xz z)L55fJZ3$kFcoe0=ylY0o0yb7R)Gha;Wa`z#^Z_7v4VjJgptS1?$pWmQ#+S}aNmYS zgk=+HdjKbV*HSyLYE|A9kiRVg{lRAGP$2oYSo4B3atsuhHSqDKZe)3adJl@P0+Y@q z!p;yeYneCoMiz%g=((@#u1*sBk#!-$fN0V!(nI03H4rB}tWU%5fjAS9YCq9$QqI-S z7bIZ0P}1d97g7S6Lt!7wA#tHeeGwPx{xoc==R6y~1xi{W`Z_#^35OZS0&k@m8vAdV z@raQzP-V9OX(J6t&Ua8Bq);*RG)ljJ&dgm9XO0zUa~8AV1c}lg2a)IvC785`}W8o5}Q-sZzv9!ohya-loRdR ztd9s*Bg23eHui}Y_5!1XhwD{AyOzjb1-~>TdT zt5d3}K;%J*jz6j>=@5xOn~-KUNZMIuLhMp5(1IoHOu#zKKyXX_isw$(#6=*DEQVRV zoI3y5W{Y($9b>U8=H%;lhNbXaBL&g3jx$k!Ekg(f6s5B294*&xqoM8<#(H*EIq|z4 zHLOjbX80ZsY?a$h|Yp|DU*&W2twImtLNZk}zK)2V$5H#qBd71f90;qp-8NoUww+$Xp}xKF4ruey;xcX`J?`XnErGxza( zfucpX;^hQfCPF%p#Vs-KZ-a`ipn^EvbJ>fp*{23%b{OEXXt24|zih%;*ah^k&qI3H z6ST(YHmH%gZ|isYL+=DghQIa$5_W-ik3jR+ix@%+5J+u;-k4tAK?5QXJgMA8u;1mm zG-rov@$sksp(Z7uehwFhY?eDH5aR_ws%_Pk@67*vFEHuYusMja2)t*{MS!D~isiG< z8=)gm@qDO0h8|OH)oHmk6aFYv{N8;LRpirp%s9|>4a78Ec@3M0C*siuHW!EEj=_Z% zN|Rw^?r)?SQt#K0f%5uPbBuA`qXUkUL+VTr;q$L|P4E9_Om__=37X66jNKz^c-GU;X9$!MIl(EjRfani5 z{`2L4buht1&%y&Q$0Gz~kE(B~L20ftGYqC&)RI8O{XhTRRdAJsd);%8i}{WIopNAa zkQHgR;hD|VNbx=EO-jfc=y2+R9OpltboLv7_j!Tl=ebLuYNU<2x*YC!tdYuA<|VZb zRR@R%%K*vD;$NV$J!1M+WJ(X)jBEAXLI{FMcE0*)4D)cC|d5G zbh~%?z@!$cIIm7&_8ZC<`}8*aMmyopw{~6RyCdp`=!mX+bm2u7)J9zPMUsN3e`Lwf zP8R7L+TUMG3-5yzHs;}4RBFLQKjO9gneyp6{(@(EJ5h&KOpDcNc~`rIh)+mdc-Y`Q zSN?OkPu4NHh9JxHYITk4z@uSbbiA*3X0e?C@Ur^cnTaZljzcnC0=b9KUo6}IZ-L}H zZ!ecK7%LfrE+WM!PQ{wS?je^!xnq*PbkTDsMSg{N<2JhXYJ)C1k-X*a>vx^|-YNf) zl4Q9E#Jf)4@@!(k2KB-}2i`GRr2!^}t3e(3+YgImX#IP)Wy2)7H2-9puT>6f_Lf$- z@!aEoMHt8b+C&~bi{sRaH9`@GzE}TX75Ca#r)jcptENec#qWtShng<4T7vAnu%ZqN z>f?3QkO}^>?BKJl1jb;35AVtPYrT&j491x324Q6W`V-%u?Keft&{aP>6xZUk_TISC z)$W2jGYfQlUZpJAbK4i05=QQVEa%^s+8@sb4Ou6EtG5x9s^Oy8WC3juTI9BFKdb!u zYV=*rNApT|0@v>9%p-H%sSPUHiwA07C_VMKtC;zZ$FyRhZK(gpEC80zZ-lcTRZ;fX zXYjaA;rksD9|e2+TV_GOoAx%$v=(D~42ST>^R*Is7l`ZskHgpT1D(h9(n&o+df@*e$f13F zU9<2e{|ED?>&3VU`~#^wAr^zb3D*Rjj$=A0yY3-D>pjDY&t~Hk0|068xhG1%%A%fU zqG54AcY00Mo*r~#c`!e7V`+T&x~4i=_PzA)X*Xn%O6M!`-u)wob9@AGyuF70K-5LG z=|J8oFM{o*cA!mJ(@w5dJYV*LimA}n@LT#LUcg$!yZezT|ldpRD-T7CrHd z{Lx>%k>#kP<|T;<#~Gj5lOs%xfoO zPKV@eD<HrBX{Z1=8rcA*fX?A(pMP%nHMtne$~Tb90WuoRe93#dQ8h=Xy~B{#nFmZiTyM*E z;Gs-q*1MWgcN9;naE|Ajhv%ez&+u+)tw>kbf4>nC)dt^e(Ctv_M3ZfXVI)MWW^iqe znj%mbHz6gO*pKTCxVbbz3M=NB^>m=x@s%i!!0P;*`RDzN4=X2--BbLuWfh@E7oSPh zslXaCeH-WmTs>L6<2?D=X@#Cj$X^yh|Iz(-=soyzi45)0?5bN+um8CmCaLIIT7T?D zP#yvYTc_0^xmS@6oJLVDSkqb~ zp7+T=J*Hr@Cy)Kt+X)n`5P3fhTyGeZa6av?J^RwL4NC0mh4^cqc#{E$P~U6j?_|rF zi}vrsArE!Dj>_7ewcjBM^}j|eYWml8A#Qh~MGg5$2*!iJxlTGqc5+or_eE;Y59LM1?q((ri#X;Y^R(%zw zmY`wb$zFx_YtJm@BQE!#(%A8)UA6VKaj_ecB?vZ!-@Qox&{G%c^1m#1EG29%0^bCI zKC|a$nHMeg;;#+!FV3}7AF=Q9KlZ86Klf>FF2Z!Q-rs@DjJJPb|l%yY~A zihx7T$yi_grtA>B(T%HN%oQc%`m>yL-w}UYN`f^&Q!V?$kVkdyJ&}`iiMZdh zLs;C#6HUM{u7w9`L;ZPVrJ>&L^L%pf9_fyGAD*<8JFd*+T26dyQ_YdnJ zS(L$oaP2VDMM{vML4x3HPq^E0KP?O@ck-9dlP2XDfw9l2_^<2bFvG1#Q<_TGz(nFDMl_PpkOIs9fROD#*q?ZU4^ZUiHt znyO)dRNEGZjK{-Vgcp;7b|>OXS@SXSS+@WdaU z@0ao=<0K+v7E$tP3JE+^HQC#3@~p@52Y;X41Bje)zSgw#3>Qj1#D`1(2&l|UGq7Gp z(_LNjI*BxTiFQL zFs{9?)t&nR4tM+oEiUS^uu7XufGUyFAKa+*OT#-?RniuJC>t7P6e^1GB&`$3Q2wOX zGW9)-lo0^0nnVRMmeS)gFUVeU4@L7s0SyH;THeNtP{7`WemlBEzdhQm_-&z!l_T^Y zm*IPpDFM%`I&du`!;>3@@Tt0HB%9@;NQdyY=m2G|3h5Pe)47Q7h<6?jXdBW{e(83- zPxcDu40{_KAJQSy>EXdrqPu@j$8QLbrRMHE-BZ$mqLr@~EtcZT=NI@So1ylTaKY8U z@NXbZmNpg4lun0E7WflSWWS@o95gUmf@*|j!mpC%${;}$L$Fv5ihP`Y?bIF}0LDUr zeyhqwUJ!x{zpKJ8rMfC!=!d&O>$?(K1#tYKd2z|Tul#Q7fRd{$v|8AG_Yr8BtO{3s zcejwrVSp}Qs$jS2Va-zj^Y3a_03AKB>)tuL0)B{cf>P>N>N$^|KsizOagd}>U>&;k zpz|KsD$-%+wm$deYVEmx5m>I$2iY_khq!A?g(FyjH&8mS9FlZmUOhW^S{LA3b?M?# z_(<8uBgAZ%-wh7@$r}qco{|O4?%kw)ZqJm4M$sOyxEA177WYZ`yNQi?veDejKe1Utq zot8HUCu|ZRNzk=k10^)$Q2rhoZlf3Zhn+GsJNk4q_;)08C1whynt_O#5E@=55V{?R z5C)6|10nOG&p&Fxc+6ce?uvgTb+A z!9QLi45fnGRuH$@(oE`ju`S_5G~@Tit2Vd?B8J?`{JSB|z$)osv&&@+&>AYWUd;Xq{-TL8+H$Cc%EU}q*D+@p4>RDWU)M1};Sr9|= zi-81n9+vuJ$8%^e@m(T{o=@U^rSVRVc6uJ+L=Yih^4lZO0DnT>h|{lLA{Wa zpL|W1l7wm70}|8M9#8P8FF1X9{_;p(?Y)JyZvIWj%l#R41i)K9{doVTSJ{3aPV<13 zTrua>@>=@5qZna(9ZmN$F;>tZjqgo>|4iypZ!-zg8_$b23{m=#8J&J65LUAoM zLsD0>55?UH1V4*!%%Jn`eCp0E8{@ZVf3~~mD`_I}qIV4531}`ypLo-%yNm-BB&%j( zb?4$HBCOvUA24v<#Jrw}r9IL;=+zwr8?tUn?xM&-IyPgV7kbYvG}yPC?M-#l<=;G1 z@7F0OHN2fYZQA)@qjK&1L-MpEBJ>BC4$Cq0HGD4gSxUqE_75M%h)@+z=y`Ocnjq{R zmM3A%#N8gJ^VD7n%zLb3-w>@RKY?NlD2g_87bABr(e0jW@poBAXIA zilJB10lc{zH1lQ1YnG-VINd}!pZysd9v583{0^@k4EBnifF~uo%v~$9jjwu1#@aG? z-Jh16$1zT`c%ODI}A}7he1kHPV(K;G3AeZ;0Ri1Znx?LZv6uj*I;HDbqw{P;tLzb)SYc z+OX~ElKf&xmMEBv9}BI`2lnOk6}Haxvp@Mm4yxY|kfJ7L!z&C==Hx2p7@Q|5iPC&3 zam7wNQcdeNS^(GSKKaJ)opxTva`#%d%!nfF4zbZNVHl}4;w9F(XJG^gInD zj~nf_lMs~5LR5u2mIfbRgPG=G@{U01mM8vh#-sW4vi;@d84J~V`@(3~wv?as@q)^C zP<-DMOdwboE7ZUA-Ad{XU~rb=9j;sv z7oy3r@!`@PSh{eu{VU_$H}qhl=S&}cIjrG<14M7s=6aO*#q4#4zI)WX%4OE?MS+|S zrNq(&4#FE&G-5m-^K_~!R_^!xa!UvS$AQ$_w!OAPeO0I((4?LcjzvI5hjtk{?8hH6A+!r!GVLnVlA>_%p?EI(P&p={bURxu*fdTt zc?yu#@~Ue=w+oH>J}8oKoV9qr3Tkf}K1!=piN^tYns!0Jak#Ppig`ck3GjD)qxX{R zpcWapMB}D61DffHCSof>aE@xKlw|_&x|%g?4(jc@t(J*sdO}G=(xcj)1Mejwf6xfY z?0yS5G+{-p1D&K_N#ibb#qHXfMaU}rng&knpPgwUW zdh9xJ{*%17P5zop$*bS}8I69rS|+Z$Td$brHK92;{pe9*utW$mt*8Y-J0nn32!E!@ zkz6Z3eYCEOdMFWzlcQ(sN~&)1dI->*3jOLGD_xC4dUk>33lyy;(6bjpTTORJ5z|s? zq`bA)Gl;BobmR-_k`jf_#d=XNAMe5lH5g{P@kZMR-IFuPOz;hF+S0Y%L2EPAkbdk} zy@27ZtJ#Da2{YYuvojCx6%`frNEZ)Ps(Z`_X?K@Nos-mif!?!chhQK>k8pH!Y+QQV zhKX41c(vC0n!UNUaHLhOMPmvfPkOf6V9m)~!FPrW+4VlshuLyK!-z&-SRS_)sBj$k z5TZ_=eR$n8`eb;tFjI_eFTUlaU zV@sKV1yxZlr&aNaJ_B)fWq}dvLL=tz>}7}HD2ampjt0hpeCs54#5VmE z3!0M4+3ExJF**w{@(>a>+r5B!7F>$+CqJbW=R=N+3ghh9%NGbMpY{5Yv<6zNrL;DT z^nB#b);uu<5tgvI1h8gw5p7B2>Rc*nOn`L1i!2J4miOwl-f;8`D_Z^R_aGsny`_;`B6* zGh0k%4U2784lA`)4)#1`an!NYri&OG8ZwK15zIQZCpi0y@7+%y_2c=^fQ#pAX=>hx zY35sNHwt@&dShUUpc}%+v6N4f(6XFwbG+iIR9kGEiDiP{6JZ}*ukuV`LUaNeV)Br_ zSGhl5KzZiSnnq!*tq2k4vdfywf4@z|l0Pc7qL>>sf2!`U$&{~|&0~5aD4YL2I)9&` zSMUdq&kmF*oJ$65ke1Q=_Oi$SPmcjUWVgs`+(jAJ&)Yt&oM`>`oPizhoOZwN zYIW6oUGN(8x4*Jb?KoE48pk^f$j}Y+TSQxKP4isjxfpASZE=vFYt^<>w|w+tnsz#s zQ8k9lPPMu<#_X?WLyxxsJDCQ>8JmBd_4su<^x)feyWC6(PvUM{Ie5Q-xB(vF^?)L3 zaALi4@bz$i`96M89>jN@)m>FYlGYdA!+7#%Oian5E=1#Q_S(=?#=bn4eK4&UYdqhV zS1jbZ$wLp@D1VQUwFZv&hP)SFP`T-0`i|yXo&4K!i=ffu#P*w*f#KZVShW7smi_(s zKE#BTvlwS8^N;c#m_`-Co)PCgcyPLWAC$bxO(&9ghh~YC%U)+?%pFdtKz~F5V4mZb zv6lvm%U?-n7Iv~Gi|8$uaptZSn`GICT|Gs%Q}V8$d!d`S5OnShE|!g36&tnMHt?Cg zk0%bg7J|2bIf+zmZnY#gx7Dg<$EXspxXcx~o4rEsiS%*{$MYVxs%+J9p1Q16YSz2W zlboeKjXNEYn*LRFI;+}sYsl0xlhKUv zM=D$d-A6C}5@}9hM9eD0Gc28)(V+Id`osXTpmREM2I$2HRO}keGxJv3J7iVm&pMjx$b+5mx!kg45j90iy2B*`MvDgUDmy9=SE9VAHzg+ zKGeH`=DfuCF8yw8Mn=7Dc|7-##dIL|KkSofE+9w}e*B|-I;x3qfp*5*Mn}%`^99lY zp{)^B&OD7<9vi(6hu3Gj7yYkT6+pGLOKux;y;d16!$E38HI#-~b@@ET7aVPL-q(UC zH@{#g!S>abOVcl!iGASYag>DGtw&?)zS;}$+`ja12}H>}T}w54R*u?LMe*xhF4fj^ zSC>5x@{guGYF1{~JNLfac3*Sm4~FhZLT|tZFal|D8^^mIfNmFas5n&t-0^GfMTPta zK-an$Dm0wBod3YMp1+VN@jf<*lbhdO()aau8@Fi74ax7ETG8gc|5qy- zrmsOo5NPq}$B2I()^wakJTIaRZ)D&neL!g1Lf{?j1HyMU9kdh4lR7u=?ONMYWlQMU zHX8^O9EnO9!*8S8>cu}K#xo=&t^uX2i;xsD;IseMm#=FL%E_O#CjvU5RzN(jSG(Jr zzmO55V~c`YJW^B=P4E&ngcukqfo89^m;1L2#kXAbfZ3lLm7kVI-#4Bj5fH(jhV!bi zojzRfSJ%q2l7G$J1pZ!GW=}xK?dRfO)O6@M_dat76lOf;OQc)>9aFoX@l*eMB%@V! zto#O{MG_E!724=g&BJz28 z#D+Ey6P;lJ@lb`UkOX?*IHFq-3CN%ZEYp<)7I}ti+$)=h%k44`eq0hM04--j0J1Wq zaa-|L@B{?XBjUt~a?wMaLxR&J(_}B!{EhE46MZicxetvps%d5K?ZvY< zC})EfFMh`Mp&&yQroNGE=mXf;Tq3uDqrJUNmI*uxgA3yIRw^@WCLuL2-r&a=IRZug zwq`mL>`iVdlxVK|>+9C@n11l>$9*ZtBf!*xrqfN+QRomc4z3zLa{eG zgo9(UV-8yrg@)b8$?P2*?ti_Icjxo%##`onWUQCmk`s8|LdX9({tvz{y6unzGgOm2 zKR;hQ4m7^=SZH=_7TNu4Rw!B9CdhArMkJ`v8zRoQa{moh32BT?(5?M1kL%)nZKxlr zfoT1Ar7$t)mY7X{Ns2isOW6b(qAjHLLT|vT?3Mc; zuO7CDZwf-cgI+B1g)3}^j|jeFK6~*6{$Xo*yxlZn$5!Vt3GiO>lU zvxnkQBzf$bmMJzLY>h9M!0R;667z82q~hUT_>S?Khuz)O zy<<<-36?`^yvKrc)TnYQ96p~w>zvU$3IB$HobaU=+p67(t1Yzl+#9x=1#!K@Ib-1d+0cYw_kmv~`gh`0mkYdyD6Jj$zvyyNO z(J#E*II155GfW_}sV%PWPUWKE9$ae)XyQxwQV+&5k ztQXbOyVdhBbVdQMofQxgKaT!Yg3hsafCi)AKF{%%&{NkuFk!RfJ8h1SsXc#76pUzN zyZ_8LaN@;H7y=QW^+MF4?$L(sW0Ds|@DVJGTl{j|?eSa(#QIAg?x{|ypRFrcU|L4+ zln#p~LPaK;;w3`POs+5qbxLB?$fb6S4@82F{_GS@_5QF~c59YangH7slck|`JXn?J z9B3lu)RC{Z7b2_0FCyFkUwZq62sgEyfXscP^n=XSeJ+8n)%q!?!G2JTY23T|a6t=o zRpoB7z_?|3P<;J`is&FwQVU{j%>o#dg^Jj-+o(qh*$+6}>E7Ux z>ac^d(4>lY_y#D(oGQxb8Ynyak!ciTT?%^n>-gJ{5WL{gc6rdEwv#uDYx)}9Xzk&A zZAH@ZH~K7{s)~3#lB1>N!#2xH5{1Ox9h;_YxJ70N0``DV(*BmFLMOiD&EP^j3r6HN`*noL)RS2-B5*5M@(g##VmX?&!SK?C4(8 z?BIdy4#S}=_Vz||RHE4NWmZq$(@#CBAbwDGFNfb)jR}o5?C~DOwp7qMXt^DI^nI~g z14oI!;kqeobkb%r7J*GQOO{;--x_ILG%=#8IHl^B0O@rl*op2m!(N@shi|^e)uY#Vw#*B)7 zk#`c%#u?vpyIWU(q3#+dvE&vi0FMf}0(lFGzGfhwfR{i4*@CTSO?K$^>nQHh(e~5_vK%J(6W)T zg+|U3q^7W`Tzp#hgE(i&(4KU;>+SVE)V1Fzpord73Aekm?pJ}~;t5*p^ORsi65p8* z!bNoa>p$4fiT)U_<7)`c231F*j;3U4&ACo`m-0IM!=;fjrHI=~f#O@I`0ryqm&(&KThB(g^Da6Owb(7v>m*}+WcxT?+aD_ zZyU~o4y&6!)eFlC1@Y!x-(?z`Kd_)e1=a0VGwXYlEAo$8Z;vCg^WOWe^GFLVtOaq* zB~z7T-eJ+5vj(-Jc}Wm`gEq;=TF6^#(jYpA?Ug62o1ZbS`JienGF-On?V->B@BVrf z_6z!pmxM=X&*5?GN>$|W!!^&42ih>Fe;^M6_lL)8$NBLbhnmz_LvIS*7rB0hFZD8T zn)ltfeS2|c^ht|a?~3-4F>G4fx`z7SF_Q*uvM>)X+A;1>o>nj7`AQ%knTIdiSeWC1 z@%7@@w28&gr)e{Q`UVD;J+Dx|lB?M06^aHJogHx*v{~Oba;H&6ePj&x2ey3|BH8c8rGLx6L zk`B9KYMjsINXuPw=$IGtkT_RWSz}4!ApUVDCd`X8DL1A#u?u%gp0s;X9t71=) z-gL`Yd8O$SNdM9bC@#8}#FWNj;qovh8C zGK01Foak_yHkqY4w~H~H2l>YP(s1FkP`|!92hN2UCGr=-Hj4ZQsa9QvS2_FO1KjQp zkJyJ1CQ+8l<5oMWtGv#K25-qsuupz^5ZEONebdwN?R&yyilcEB#_dY_N)+?XoJe6t zC=fpTd|hg_Yrs~d+WyL;Ilu)Ky#Ur|oWAzD2}-q{d_;?h$mS1OBZT|Ds`km~ z2~xZm>0^5-#artBp;gE|FTr8gbSa((HtW#hZ%f}@n~#pqU?dRzD_2E!52Pn_ozR%j zMnPMBr<+Qw`T8Rp{+-8BVJT_vx)4ZbI;u2wT@m1laPaCYQP=qzhlP@mNozDYrBX?uN{58fC2QM#se=AL}r9tAhJ> z6=*rl{(+Zt2GgD*YB12B@YnmCCjfD-julCEn<-W!U5NjeU+khDzvA`yr@Q=prr~XT z%&i(P6A>*tE5dwDCr(}yp-Fq`aJ-zI#o^>mk>16p8NX9+CW9Z&$eWz`mWgq8Z(N&{ z?l%1ibY-!^TB&67v$brhZtl1pZhsFt$B$;ya-lu|Hp6Zrd`g$V7wz^NbCp2g{^y&_9pmF7)Aqb(egOT0bGMZn8#J6@0hS{DZEu|Xr*Nki zOi=g!v8C6SmhA1?d`4heyq0g{Lf%AC*bA#wN+r{wXT3bT1LYy;o={IM1n)w7IMtQl z*LYM3<0#Z}vRFVKk$D zHSv;q(_h~r)7^sx(TZ{Ac?VpVtBm7rTLKUk`Tc7$d!tvwyPDg#E*5cLL>~65-$w2K z-102gFz>G{H~HSf$eU*0Ah#AxcYVCai0D=KM8v_g1~HAq#{!D# zvdsQe6PE3%^FJgSzbWnCCexsMB>bwa94iNJs?sh@;<6NTSUQe&U-OY!06=uf@bz@} zfR9<{^iJkZ5z_LE!G-Z*&}SWSAKSWRMj|zU&(HwP16d0 zt8a^UdpEpl__a+Qv*AGSQ@oZ%jjWC42w%m=^{$c|voj$8tkIK%KLDV4a_~vc_JZ|n zJFQKBZ{KCuY;JC@`uD7lPzswpQ6D|#F}tx$80Agak9)-B^r-sj^Qcahf_S^6|3liF zfK$1?ZR4d?*b*ffA|z2lMCK_WL&i)Q3z_FBbBjtDi)31cLdZNV^OTTrndd2Dndf;K zzWZ5Ad$)h@`@P@$KmG@Yqirv1J9t z%nB4fIi2KW(M#pQ@%*-*>gntLy}+6 zvvU@?s(3kw{6$jZ?+i+h;5IL(G+Sgr_2bVU-ON4?GGJkNB^0dtE(n@A856|zB0rtW zexo)Es5ES__Cyx@lesL^0*~LmziU+HyqKpe7%~J(@eiT8LXha0Z}HU6gzf-AkBKt( zMj%Trw^v@4v0Q|(uDeS1eSKh$B)l{3o3;X3v^t}NxBEfxWocz)mDZ%LDnC;2(}##X z^^5i{bRfdNo~O)Agy7I6;PIAc{R5I4o@MRXEr-$*fZGk`iAY7)4A;Ssg)FHRb`-tGg--X+VY(D&$wbDgG|pPeOS zy9p{~V7C_g3lH`U1whL@CWzJ);ZjGg(GT^62E2~-mX!04;_q5TkJvY}LDp4+4|zw7 z($zEV_sl(?EE$}u;Un3^H1;Z5s#~k4>IfXK22yw~L&D=|1QZSkocPRQk4c7-1Z&L6 z(@*9$q|&sm-aZF=4nKo}si%7$?xO48SC^0^U<@xxTEE)kR#`@P=kMB?i6Z^Rf$HH&RO0R-J4E6sbi zS$~*wsTptz&0L1<{F)z^IcH+hAI71s0SCH1r1gk7I9ZWi2X3L3KrynUxyaGN)N+Fo zEpxa_)N?rMWw<2U`K3AHU&ZGn@FeMOlu&;J`yhe;6F_G`dLMZWRk#;n7>+>4xk5$qgLSZ zq^||&HWfhGk9<;lrVO}lj5yG=f;??{)KGj^7BsktC?Qd~u@0Q^2ck^k%wQFq`;a$B zmVyYSm|V3NfTmx6)7Whr!^!3CJbRxtIK zZEnFDC8C?n9Yhv>?LMH7>zP5QZUIC7f~6(vZ3dRULat+gd6P!Su|2ZvrrC$z2?aQJ zqL`HYXjSPe&e{0eT!7!s;1iR2cXr)O0C^FamwK9L$FOjPEq->Hb{mhZ?9wFvIPn#P z!%LFsct!y)$pkFI{C4c=D}yuQ#DVMCvT7oRYK+>&r0wF3hh;$CE0l7Y@wqT)r>C9& zEJ)5B_~4D`)06~MbL{t=)c$UJZKqbBX=j{dQ<>u=29SNa4_@I}53Kna<|==*7}FcP zk8$PXoW%wWKW?5x+PRLy@pW?cfp7I~^~9p0q7ZGDD2qsk+ijE7okM*V<<|t+70rUg z#WEq35c~b(5}a^gPQlrYO!ECuDEtLw4X2M$veXJ=BTHBkgMO*HuS8nSY1W4v73VsX zq)OKiKf{bxYVSG9P_IdN^rQzP-6yGj2AN1*r1kbPLci6l5{?4$M0e*$<_zMF^~y{S zb*|Ldbq@HPHmu|CONn#4=@%BLHf6c)bBVP`_@wwckYx5)5!LqI(dnuWqlvyD17R^- z65{~ZzJsiK${G=Wuwb%u%1k|>&jBBQy__X2PCFI(N0jGq{)a8S@MT^u|6u&@1i?>W z%>;3F9}2r2c}jQuZ7M?G6DB87ek=lqYEj5$o$O}kO!%{=1bKj;k~hCErIvDl0{53BDU3@?8YbGS_vh>Nx+Jx**wbD$GvA zzw6$CObVu^IIWj&TxPle;GO2mEl&2P$nPxq@*PlNTr!%+Q^Jw){IdCfKeT9-%ffOa2` z4fQwb0w^w{Y>zaArbJ%e;-j3BiFSRq7dTr#%+q=E0Cjoa<3(L-N0RFiLL;v|o+R({ zx{l-Vq=c_tOW5^rwCgoL(orEu(s=CJC@Y;2`dWw^gdh~g_PTA+5Apq2WT$=F6!s^D z8a0C+H?oje1Q+&_V^S*JvLcYL3rKV?(vl=O=(WvosI!lyBod|-4LGh4{`6t@cN860 zvdjtioNA8Jt=GTEgpn{|KD5(oGo{16^6h7qRJyz0yg{O25!-F9nxascp zZpN-%YHL(>E6`*GO1we?h18QFj+S#PPhUS^b6ddrb)SV-y=-Hxd>psf^1ek^#Oj*# zw8mNesTK@dgra~5*o3K~S^Ryb*IB;W#LmWpb5;uv3iTvS;4U(2)6%9RHatN?Thy_A7VA2DL6Y6RY|=%O~cab#-iG#~prU@0%;X z@7TNVi;J2H5(1?xvRPn&bKZV7VxJig1fw|~k@V@-|My>0&>l*emxU}L^Do7d>j+(2 z5jTigRPMiCSG5o(CcuF|bvogZR6?{6H`E3oh8a|$6xFp@*(Law1Mm=Gua!wQ$>-DN zMGq%M1BK>(KA>v4R21Rt@UT=K7X9W$X*4GubfOaZW320R*D3h=!A5Di@1&6XaU(se zv}uRFJ4Iujk}KqUPvMFMk(G^?jgGqQV{rSI?@w!atxi&!I8W&7#3`xGf2J$yM& z>yF95!a%-i$qEF`D!kD;-{#Dbaiu}CqV47i)tzVkP{g2~7N%!8bHnA^t8=&nh*#Du1kUdT9D>kJLx0~J|+&}*LsRB5>_BO1HVqaaZ+y(S>aL!0gXkA zG7nh=erbwXDoDZlE@mSDF^yZ`WTB`#Y-@YDh}O@gzbQcdi(XWmF!kJfTvS&WJ8sq@NR7Yx?mi%RT)WqVW0U=$foH+lG396nL6cp6=e9hDdP^s_X|b;o zxLZ%WUVU@JAtK(QMW*GUZiO!6W|}?j|KNV|Tmu9a;-5{0xR0jBGfW@pD?*9P9sW2o zgl|>XMDDygre_K3p0z`_F*e`f= zFdW%6nBo1n3IcNIoV`s2-JJ)k$HmB>-7TTKgZCB3T!N}Gv*5XnIu_~B0xNd#+}<#@ z$1WpBD#|j~O-)oI_tG#KAZ-VUdnFd@V769JiN@aQG1zf%o(0y4!C*2&J2qr@MQj2c znG1R3HeYWr-LDmX&+es@^78_qn9X>79}6hZxu@x8+SwZ*(iojP`cA~p)F4L64@dff zMpG%O8W8CK+-=fQXg4kVeTqEg6Kf^AOB@re=lh=1CwcBcGbK9u#q@gEq|Dhj_M!n7 zQ#7du6~6>z?_si8N71~y=kC=zzPa9cCN=Cg7GR+>qZr!b zBliXcC}oNerOe_Vu!4&-b&aw;vU5AU{zZLtBlf0oWU`MRFEvluw>@s&uS9Q7t$7Lv zcS3ln*v zT6=s4=YsWAD1XI$nG2k8tDLdJff2oxEv=F*GE}kFJ!~MRl)9RL#6fHyn81wF;y%Vs zesIB~$V>)1x00XXx7^JbGFYxykFy#r7yz*WmRWP=%Bj68y>~Ls*H`z7+5n`3&g%$Y zyUMzPc!{ncSv7*%LG>eAZ}X2@6g7lp26=3EZ{50}6c0wc{>2Gz1@C7=QApRxXBgW_ zKLDry!8)1>YJMdGHY4X6EE=jprVFR?wFE#R8jBV&^5arQpE{)m9iN zpsCUx3G1Hw!uT%0-gz&n%c`sX;p11MjxPGxSq$G}uAbWn@*XrL8oU&_C7*qSNxp;| zJZ{pHJZiA7VqgonDfWQ~ccu08)$z^Cvx8x7OnXh?tc#$_lTcb1pkd|>*;I`@YmIhN z(X;z@nnK3_BuXJ8KpG$wHBl}llv1@JNaSgD|Ij=iKqx;5gn)fEWGPL;DnAI~OJj%x zqzhclYS+HnVKd%(3f8WubOj4RD@{(_4L1a8IjH{hv zNd3)9!-|IgmFH0o_&z*fEc#rTs8DVUBTqH0f5qbJGwS{->V8p7q1wNornf1V2*#*}V|*38W8} zmzRrVr8@raTgQKH7csrpi{z1~FeHy|N!j|+gZy05PX2n$)X_fb2(*!Q{SL9EAelj~ z@rm>f!Pj^^Am1t{64G~@P2QRa;1Hm-Rwp@~#QzldIu=1(tGkx{<9Iy_5zU)1HkD?8 ze^SC4VhxzY!1OPkpRJPckQ8w})q5&jV-V^e1=_$td8&Q*T%AeUdD3_Rr>_>wp5|6o zg`h$-1k%feJ!#sEoRkAVoQegVnH;SKitQ5TPc0@rtZY$L6gCD#v}Y5S_>&jAuxHk_ z^W(eEf!uD8)@7JeUiL+7&kV?L*$;jvs(}uKJbT}UztQ*t=3KO|-#K7z!Sqn&3LQNf z)FmS=CR;FXnpqVjG(2>dg#If|)_-L?X&802ZWasj?c3|%FuwHHZJ3Gvz{7icG@s4X zy*30JJ6DK0UYnYRIcA>mQ<1A>g4m11ojgki*eL2d zo4vvhC$I~V!j;Jkc--9GF{uZE-6tK09nRIeBI4sluUN#kQ%8^^dOJIxmguxmiDJG- zxm=3SK)lY{DIIo&ggol*cwe#6&iw=~YzdH^UVX{r{lvz3#ehcjNUD(?Bgia=0Q_ z5sBgu&bt|>wp=Nnyr{9_r(e+R`!icdkZ>tXkt(ofS%)_v4I;}DKd8iE+RHw+Kg~O`bw{FcPait;%@tmte*#M3U)m)*=U6@Hahhfz4~W$HetC9ut(KzR0+NTG=CPvQ;yayPVbR*%9gs;-2* z>YYEZH99R|%-^uLw>P}Sg>aWMXNEf9e{K;Sfat4(PMzQ?H&> zPhQt~DtBx24%`AcG@;2VcLyaO&p8rVX5MpY7Z*H3^XcUI2N0j!u{`M#T=~4x6EMS8 zu|Zg`OlG~;ua7Gn4-*(`dSnL55$hm2FN7#DP%vRdHu&G?@~VoewbHW^mV42?37|`lgRN*5hRCv_y6%RpZypLhgMP z*TPQla<+`J zPS1*&W~xJ$5W1GWOviJ^m zwkI%DfS?q$H?2dDnnnr9p8xt+yEyKU59yN`k|qPA`=j;)|CX2FLlvJ>j%qi6yy_C{ zkskp<-oWL^8dxxX1XN1Qua>`Vh>)s?cfQlIWUfVWW&Fj#gM9y_&{e`&J_w4W;Q|cR zA~P)vX3@1YJQiEGFnQa}f9Fi3aKL-d3aIXfX!@3X>6dV0CqQ2*(lNIx zJABK}>&Nk&O1MH`Dk92{Z1B7hTQ-@Mp2-U91lU1lsIpzMM?ABxPy@FPBy?PQ>sU&s zbs3+*P~d(T5Bmmlqj?R}cat}-ktrRYXft!U4ianW1`IF>nVKcv*bj#Sf~o~DK7B=O zT!o6qp*giXXx120XWdrvD^G6Wn;OO7_zmAJRNqiN!vXSA_ulKR*1E{H9o^b!NJU3<>Q_4HAweQ>=sS(A{PEkKOVbIl!h%8kX$oJYc_d(8rN!cYFE$APx}(}#A8FuzA*BE@ z4EOnkYXaL6V?k0-|3V(8mbrASmNEHtCqnkm0)8M96pw~(_4g%HL$mF_eCH{O3FBb1 zR8H_xi?f^H6(KLyF-w4vCTi`y1=AEiOtlO?l2EtP+e zU@%pc=yeUnU|%K0yuc?>2aB;cl~nKMZ5cb)f4_6f<+h2ip!B!FZl{iC%uk&&|?6tBvW;~xgvl%sX_$p8#ZMExU~5X4a}a?{NkS)7;udVobPry z6HG>SMCOYQUDIP64p)}w77lophsF=ZF?-$FLxH_i(FuQ!#@vVMmjYH1I5t~20r23K zl6)bWBAU2(cBtGgpU4PU@jZWZ_(=130HtKd0oC7{eK5}~fDKXJZguCbivHguBC2Z% z>~FdlM6IC zLDx*`oWgrm5g@}u4Sah=q)SyfPN%P#D?0d=u9I^QPS^B2k?q5FU!8Q5>T5%@J*Y=( zUFC`d?Wn(}{Zdy4IcCkZAip5)>Bs|AlwhPp=LZvG0f(MpWQm!7o%ykx%`hwIj{a@- zY(8S$Ri~2m)!uN62t~y%k%Hg!i({4CqMhRYu->fZZv+9CDjG#qFZ2%duF6Q;QV@Cp z9WNgVJD>e0?82?SjYo9pRt~`JF^!5|Pc?MxUP0ZPw?J27L1OWrz-M6qcv?)X0j{;c ztse@95Z9~VkO;m6q%f#iI`;)m%vMp=ViPH46^Jh^fe%Jj9Mn`Cvd3lATwE8~89x=_ zGSkk0Sq<@C&{6TTk;oP00xz?0-(GTBOk7OY$VL`g^o33Z<|QvWP)X>gu7ifx%T^QL z-%4ocA1LNvfcb#%jo4CPZZF&?GFJ+kI_og(H4~cMPFfZT+VdO=zywr2t_Ou-AG<4a z5hdo>vT7+c>CB?2`l)Pta1evMpC0cg%x&+?w>p(zLikPm&W5Dczqe2oGC(>h6SX24 z=zL{>FHQoa8{xXg6>Gl9VG9U>gPJMJnHcl4ws>AiZ}E%*8P~wz;GoVW>8wUZ&0iNb z0TntSjgXZ7`n$ua2>o-DVhK_77>EQ7C6`1>-ba`*4%mQ}Sq12>F$6rCbL`jAD3=<0 zyutat>>^Ov69l54AUA@?RUrFSI099`;&R{|baCx)r$Mw-D0qFk)j{03>AYqN@lcVC z%Wjh0JzbHI`KPRXBH6ouo@xN7{Yw(Cc&QTz`n|H{O2J#SmtfxkEv}-#)&nRPTJd{k z@6cUrXNMIyfIdUPbhjQPR{fFgMy0QLx4J@JJPa z8wjN5VH3OUn{cMyHG_pAHzlRv5K!bH6i&TgFp{3Uj^GSPPj9#VrL1>dn%<8K)2vwtW6|D2V@?hgr1qa#mMgyJ*y#U3ks z?D1k;tYg{0=Q*Oa$x`IvFQ2N~!jJibaUwM+bjap= zprpm06v{drtsMPYZp+hF zp++t;SBAcmdOUN&9PQ8@)mkln^Yelg5atO!X{11!eo~0@hIM%Fb;VP|z4mjj9)j|& z0~3K>zP`ZY2&HKvxSu z53iQ;DqkGv7&Y8NFZwm46Mogep6LmVgdW9KW9>TJN(7&!m4Cgt-?0i!5>94_fy1cr zDo92w6Ql#Cj(EE@<%JLVD=A#f^nsa;3Dn=?PMh31t1g-CQ_Ll#AwN(t1TE^d=SL&B z*K$A~=Kf4|=8zL)W}{a;HSQFO z>7z4M3HTr~KADWQTJczCUH1Og)Gq0}OqKr8)45~5ST2)kh%R|J*BXzvU{*z)-n0`if55L31&hlDB-LP8v-5)kwa7E`U zg$E-CjrVSV`N}0FHgX=m%9Mi^%RbO(fIku6VDTT`+(F70vcAjW#BSJE+b_a`&(Rdl zrpRaa_r0r@%01=tu#SV0$<|@K`sDV60G|B9YQU9P@1HE`43I_tLb$?}DOa!wWLTS= zW^zwc{TM!=!%zSW^TtZ!{Qn-y+xJAmUR<8gV#QA;??aK z1W?BD!a{DSKJIg=T&FFXhz(zY4Z2qDmh$C!Hi3p;tlJFqeUJPUJrdFTK(@nTIdu<{ zO5|Sn^^rXznwd5+vmjPz zu+drjgYW5GnMH{^)YuGP1Z4P9s_{+1!D1IBx4gZJQOKp4yH@L50v~VU0?~K!2DYZ4 zZ(G;FR#o3}0U_D-v-vFkp|lT_9iWtrCif-;3do7MoliXtU-BrLx;{NTSAXs36Fk^u z)U{#P5X={s0nkZ7+o^5q^zZVp)acq&GsxfvBn|*fz=L#W2uTn~7aO3U^)4XRhdB{E zw_^ivuBieZta-QjBP0P^Vv5dae_YIq@mY>R4Yb|KdKL}hUvRwWz#rna`c69vAgSLNl9gSSkef!0~$UQ%?)fxNe4g2ABYdsF02d%*|wLWV* zpS%LM+S!+hj*<+g)|v`Ym{i-0$Z&vOuzK`<&bM5y2Ixi2EC|W#@7|?!utK<+{E>s= z^HN?BbZ*(YvXC!&*r$gXn|B$o8QUG!+Z$otny9i#FRfVVRj)PwsV&kq#<6tzy7TNn zj}O&0UivGWU3u@25{k(FWD|{+g`=Y?0A|m`ZXJHi`c(2I!mPj`Ly7rKo6dK{BkLum z5sHcO&`;@*8&g0C^QB`#=LyZ(#aUU*k7eiVN5F;rjZaZtQ3LanHtkvLK-#N>-X^DV4Ql!YLT@uzU9lv3X?M8v*wt0VN2tD*X}F- zGG}@xZu^piNj6jEMGacHK3+rMbL~yDA4DAwT-JQfM;e&zKsHro^OEsKt(rCu;w89q z!r2equMtC|_hn!iKTtLZFwU9%uL3?@tK=TnA;hmca!{DkES~<04Od6^rbW!$l?wN73uwgVPFhKC?tQf&au7jX$UE&INt`B_geycLu54 zJzUBn;2O+%kS+-Anj>t=zPS9i-sc2sm#D!0dtxNXpuZaH!h{@$v-l>&6~0Auwi`dl z5ERMbd$hn*#Vt3sY+t7gLUnwIIC4y-jOd)alfq{VS5JwB&%ODanjDe1qy z8BLUu&19|3AtnJeoVHM#A9vrW{P2B)QRmI&yoY{w2eu9uKZ-KhgrOBbzJ-HoGV~?( zut99d%&J#3b4+9zGb|#s|H2f5A5n553Zj#8R>Q06Oxu+?%@)5kO%(aj9OsCI5OW zA2Ha3^Xgw6UejVLez|kDqkRiuK7ne_PJJ$70wIz2g(**|I z3i5qd`SRbN=sa6L?#i=)sp89Gd0+b*3kZq3Gs)999{6e|l>3+8j=9uti<+}9u;lvx z-3d|vc>1r|Vv*hdjnxVkW`dxm@p`e$>6e+_qjS>;(Q*vFyLT(bn*FhWQyM34kh2#n z``B?23EIs)fe##Y>BPJ}x^s4$q+SX<^#}U*xV0wld530P(z)jW%4TJbZhFRl9%@8W zTqi$a?Gf(NhrznjxUdc_%<*P)hqDqCKlnU$Y^jn%kI}3IBrF(|N$ISGMCfq9l==|$ z6xNajdc2TdOEM=t2x%$B)X8zPb5p_qh#BtMObBUX;w`LDal)Hjw+nG-CP<)fGiU1N z_d-@KEo!a!62drg2{h;6dywUe=NMffundQ0yg$rdxlJM7vD9j?xKZ<@rexnU```4J z#B0_153KgOuE&VlRdSjHb;kN1XJy#={pdZgJJOSkx5w&9!G$@=0Q=M}$3>Y6u+ue# zL*4Xx%M}6hyh7Kmjy!G7%)%lcO8Ft+=+RW_Jg6eu86TmK_T$s$AF2)Ty@eWq6peY6 zf#xu#h{p+#*zF!+#^b^!$g>4B-Wa+U@TmJI8Zz_%#u4h>Jpy4S>ky$zt+okh+_%gF zA|7>LQ-ET(-wSL5tSd#7JQHrAP=ELv#F_Uz5lb(Ct~oug^I#=5BC~vFB47*j`R&g& zY?v#rW9CsLgvMIweKkbM2C~RRC&!nf>#PDaPp*qQ`piJqv;kHVy zK{A!vRdktn@M$7Rw?xd*PrAdJ)DRE-k3uaG$zwaU9J7(AL>hF`6Vnw50k0d<0#2a{ zB7KzTJI`8jEV3&=UxavTfF!YJ$fn$)4$RvL12>k$SiOBIy`dsE8bLk|%2OX*C#kb( z^q>DcUHiiwkVi+hue%DlYw{>Sby7){n$%NluH6!}MiU`i9t1P%57q}k(>A&&?SbH6 zG#n)n@?K$t5?sNK-7AZgSM&lX?!fh=n~{IRCQu=9lrYs3H_f#`w(q9Cl(cuOuy|kn z>tmy{!4ce_8c3f|oNcWBeq4m(7ZzN%M_d*^`CZ8i5!w3z^e{q{Z5l?c#RB$(Zuw!g& znU8e;<`$HaQ;Adojm-eWDA@UC3#_7+PWh*)rj*)m*Z|^aTG97Utq)D+atOYYMuHyC zKw^CmgGn%l8n-Okcn@6xkm9!I7C`#|TS503?CT8yhiQ4t$g-D7(Xf5M;z(JaFtf}N zKoeVPk|9H$`fX-HSgQ@d8?}M%V7S-sWr07%7%J8fiLHP97lx_ zI@Tpl8QAkx9bu}5Ao>0N%FhA)!!?Q_Acu8dQa%E;Vx23-*%0cH@kJ*ykY1q15deal zG{144KL{^ZZ#%=T2kNSQm4VfRrl8om7r{y=GLBIg5oZKzC5$sf#9w0x6SIvm;5`ZK zzMOz%tt-X->+P6B%yQTQo(RmPBh2>KkMk7Yv77{>&WW>hu&hQyV;=EsfJRYH5qOYM zGK)!irF~*Jp&0W6)ad!LjUe8_VBf%iRx0Q=TVvIf>%!5Xl1M{GNt$zAN$Lb0G2PAf zHb8NXluJC=@I8Dd7DHfex5R^(%vKR3Xz7wJxZS{)a<<+zx8o2J^RIrPSxgE6TzB(a zc{G^5a2TjnFM#LgXf9IlxWcUvv*Hh{EaG#U#Dtq%LdhN-C@6V=KI&2@VmBbsb>+?k zk;cnvp+k@vj+I)7ys)sb;T17u{M1heUxAu1o*pqHnQH$&03qt3gr#Jb@u1V{0eFOt zX5FZKi>2X`rP5w~g3n~~4SXT#*+oTtkC_Yp_Blv6dy)z3D#!+vCibstH=5qZmIA6= zFV!8_>gFO8wizuZ>5b(5{(WLYF~X(+%$G1J0-4NR!Tsi80tG9ZhFj1X#tQCnx!Z#d zjG}D>?K!Uqoq^t{G4>@xe1WEE+F=9gQKnuKH0NHXB zM9}ll4L=zJ(BfaSh#YeSJuiBcK$DCHDh{k^t}b<@`t9vIAw=UfmVSKXfGv50T$v=S zU43wWqf7BJ>&O9qT!|YV98kL|Y+tm+F3JFzm5jM+TNgW;8DW zk@DYFOSu%Z+P_jLCH7Zh0`M2IuWPX+CiV4O{=oeeqCj?Y#4a`T2mAgkx8OX4;)MSa zcbTz?MnVR#xpn`5QihV8Z>M|)l4mU)CNPwf*IClIU+7Y2a5#OFwM9xjVPe+)nlts5 zq$$E&E?bLMmdXZfd8vz_<@BSw3#EKH!nX`(eCQtc%hq8gAB>x`q)!~#EgICB+A*j{ z=c@&dN%<|B%4(6%3zH1K*bQH(qlT;%^QPoXoE zu%hm?4^o&NAj>%1NLk8;mwy!4cjmUl_~bqwLHDiwxD&>f2h=8W(Lzpnz|%IQ`7Z?v zGqYR(;RU*~w3X-o6BJ%PT+cy&z{aH#BmV^o5TOuyY4mnYJc_VrXvGG=*gx#cW zieLglDDlQ@pVB?gT5X=QQ*U=G;GF|o0&Exf*&g3m(yGE+NTo+m>Y&TYXsWP6Gg+=8oFXq;_n+b zRaUI6^ZZPk%g}e^Tk)ZomuI>OFH}JdKK)IRe=$67(W@z^41xNBU7m$~kztP1PV0fG z5p2|-b{XgbZs}vr;&e!k8WiBSePie~y^Q&g&-9lSJojV#X~i)8vd9~Hwl8i>P-sD4eK-Ltg$xr610q^ucrT-9kfv$BrYWj+UNt|{ zCI^(%en~v>C5ZDw-6 z`e`l6K{|D2#89rF3%5$Of%_cygIa&u7k8vhtQIr9jiS8NeXDjS6%# zV{}_9kqX&$es_*z*?0aatt;j3P7U3*Uuk=&A8>K*gzP_N307+8St|&itA>V;Lh>KuARB*AM-w1m-Y-70D}fuXjs-NY5YqB#N(; zH>(ic>j=>)TbXRd)Z$lqly3WGn^U30xB}Ummi85yL{6I7CXptT;u~EPOJ!} zzg4g1jx$*n_{i?aU5n{kau}aRr}^HJgD#1AGbvi_!knu#hIPEGRfNxY8HqNvpb04i zFF5Jg#3|hxCD>3ov+E=G8@7R9lIq9{Ec#J+vr_{s(%2M)-VpZu=S*-VEz)wAYZ5hm zr?%?&frrQP?&EOlPR6T1=--I`8{yO z(FWniOei<3y{pre*@EzNNT8sX?b-CWteXxCkVxTW+}C&QkR9sCuZ>KPeh=UKL+r3A z58h~QjMnv-Vc~$MdrEi9bs3FCT5|Dn+F~-MaUs9ahZ7E@>m4Oye0?H*iz5j;F3U-Wp<(H{QD7_mH7CoK5P0kSh!6eTx`vc00W zqL-2~gfnNy=296yI<%QRmq=aD;O>ZJ^Cvby4LZ^w3vc`Tp20un{6P{nIyPn#9;mL- z1Wae^9OSX|&7W~bdKz5T79C>{ygCxHvmEr_D*%wx^>MR;7?;5>OAnF4RU>ODzpU2w z8~;#C%u+^jn}o(VU@?=0!*=Jb1%R2Dw^-GQoH=vmRy;}&Dy?US{ALU3lJ_;>#8PE9*#t_@7Lowt{efMtWbcF+#IeO(fzA$o=Y0RbkD+(lTo z`u_i?_CTGcDu&IG$q88GuyYt}EPsAOn~VfVwkZKL)*z-Fii42W&;Mb|@OyYocQ}>C zP;0JM>%)lJ;VzFmS#E#KMNq(kZ`7CDm;>9Hxajn zvsvgrgY`NnTgy+2sNu?`P)2J4>qfkWGOOC*n>;vY)`Vynp#0|m!`Xhow;9T@_N~}% zb4R*%Jp-9X%)c}ef0`rA_ktD-yv(f#zWiv_k;q*YNnG9r{cJxA&HUG886vouf0og9 zE=kqu=dV|gjscGfQZYQ)#Y%P|T%zNGX)IskGgK@(aNYh8D`*fj?4+`8j2VV`Q0=BL%|%}C2@mbEXPJPclhP5Ly@mVCZhm4i#Ni4NmZyp|*2*%g16if(7Rz+{i` z=CM0Gvf_Q;N}az3#Qx48 zAH8fvmoKXZWLDL02!_3n4z>WPBi0C{C_8WVm1lryO(BKe^BSNssGGVCp(P8)K*>3| zY)A1Z4Lw(;Ub?B7Q}z5#pEo@xKq1N8Y{Sp1qgVET#O%~mJm6X91rofAcR1uF(}g_8 zf6x@Vp!8yf?iL~L&w`;byf4;+L%QFHal-G>S5#Gv-K2VT^S6UEBsH>COzJxLF~~qnI>0;OWi)vKoP4DBGJNS|T;Dux1qqcP}YgX}R z10dCtme}x5Sob+iv;ai{L8jPrqxikswnG}v=7`9 z7V$-QYGkI`_U0XePVCB18DD5u`67>W^5}d549wRgy72$mtd?1S^MD*?H!%2yjB=Co zNgV%i#kvB**}o-BSZd`f`y0nB$Di1455OvenjP}S-cHJO($A$qdA;nW{oHY|Gw1c~ zb3cw#pQ>NF;~t8Qc*C~5#_wbnq6c;9e*!9+S-_Mu<4=uIhhD{>nRUA?-lZt zns_J-JtWBCitXuuv@hC)ZC@?a^}AGjx&T|jU(Vz--{a1T5ui3;56Od-dzOk$;Cp|l z(`;Q_Z&wZ8SVxJDF~>WW1D?B8Ycx-9N5>Ob_xXL0U;Y|jD5^Th6wK1h{YIO1Dt@&1 zx-X;Er|e%H9GxDQ_x)WuWu0uR{dfnLG=3$bX@Jk&4{166IR@HzQ;#c)dcZphV|yaA z$U#GALD4rPyeX$nu9Cy1MYrah;Y(07{MbB&;Owi9;`p1bU*Z^qSywA@e~cIn+~!R} zF;Mf#YRqC7MM$hi$NhHa?HfpmjgtdR;;^Hdh&k`g(P>Zkg!Z$HHpkwzX4wjmk1bPLfTDAK^&UbyA1T6pmEF)r2K%0HqF`~PZ#`%xp%^2 zt~*UZk;-eOngnW-G#J29IWwnKgd9D0tp}W&4&5=?{FJ`M0g^VG@B@%x0sb=()VA(8o?^=Kv>{8jX2Cc1xg)Ed%{u-0lt9c+6aZD*Zz9rfCuq#FX*3St^q0q0p{mDGnb zS9^3lj93zBO@bQwRDH=1O{PJBSOp|GL}IDWZ@7wkMA`G zSYTxfdCh+<3M>|M=_tkZ?CyxmRhbQeO-F;kJkhX@PsDj?n5xVkQCiW8;P3L;4s;EwfBA=DMY~OY6%J_i>q3ARO7A||_bcTwOOg;sABxk@;7y=z? zHi6sjcn5;tLB}9q|5{>;=+U|;WS@=@-kV8WdQbuwJm-L=0#wwUdr~(6JQIvyFH*;f z@HtB-keyyWQoo*L@J+qp2;T5BIQ(!!wfF;=9o;8S;?*jO9u#N`x6PTh;D0u|Qlp1g z2K(xI?p^&hH)|Uqx5Gn&lI-jdRTISeCpwCZtm~KkmJ)b1r4vZ3X z$NiAu* zRSD7K8&Fey`pkdo#osOFmp_40AkiU44IoN8ANsJ5eG(M_1pe*?j6bTtf1k><4p# z;&`Ni0DPOz6Vy#)!mg7umpBZz<)1T57GiBakab1If2yE#D#K&0B>llYJO38((@!au zgFFQ-@#39`8(pE$xoLchS-r~R>IgQjQh_YZm~J;hzcbi?{dywwTMNUORjOiJ?%4A@ zIlJ=#S8&yY;(qPV+RgaZb+(?G&BoKVK=h z0lYd2_)5HsHQ4M-mJ1TDAGi+pY&=jW$`Wg&@R7_A+HQnduS{n1yXhlRczkBOFqiZK zW!=His9{@siO|&a&f5ZM4w_dlWCh^jAGl8-tIFC zE_d-ka~RKD)*fr8%aRCUGxWzO>Jzq8M5O4q5Q=10W%oII>`{1Ac_f`RZYGZuF)ZbD z32&y$cT~IfPvUuX9MnvgZqEeZVrF&19WN(z85=LB3GA`Q<7h@qWEHS{L3-@k7m>^A z>&Nc`m%1T@kdLfokIxtgab8JixdV7wN4vlB6&IwtNv|0&p{=qk`4+Hy2rT8wUI>dX z=e0fMB53knj~R2cN=79wGW<9K(bE8+oGx*11Xy$$4nvM41~ddY$$x35uk^8wLV`za z&?#}Cmz3Zci5*hYHkQZNB#n0Rp?}7ezD~QA3b#!=&*i)92@YhFNK~ zAIw)Brn{Fgy9(j2qMLUa-L|;4#jUcuxANnuSf_6It>UR3uD$l`81SPNwUnrB+5|vI z8O7%dIa?wU!5*4BF5cTN)+Ek@ilXbQ7@k#ssBq4qs0=YCiKud@@j^gi* z_J8!R8eSFm2JW}~XX$qC>FdzV!n)0vjs1sSA&bX8YzwRrEF8^9i`rWy>h9F>H7MrB z0SnWVTb|!}(cSYybG~~SdCVc?bLEj2iTg%H4%YpUz`S^tgDk}KhN{AIh|{BZ6bBSZ zDE5<^xEe0BD1$*GAH4=%>4!KdGL;>&+RY8G2Ia@}dA|C~LwUoH!)_sOdRxFEINp(f z@lf}{eB)Xn7JCXNqc?rHvI~W%t1r*&%8tjY9eEVL&9y%SI#V7zw53CB7>WI`rq;;Id-h+HjdK=O0D;RcozLiEF4(J7^(x74^Kf>QBixt>YN`P zMVv{=WCs_bHZVSWQ3$%B460=-kD3@>c6o7O!_j3r#JWHwlu&w zK=M9i30haa4sYI`1@Z3*SZCnyDtQutqF*b=+6JxN-0#alZ(32Esx| z6q(#0BZKPR)y3zP>Oc;(32NNhdYB_&D^R%J7Qyr#t)DkRgh}a@sNZ4E)G6!~a2E_J z`HEnFD6q~tjUFma+@^>?cdfud*eVUIY)-eLmAi!mo=Ym*dv^@{=e2EM{Jf6dP@UR4 zWT871`1u$542nu{HDulXj7bJ$Jq+xc;xsr7dHh35syl!UPC256>iEi&Fy|daulK+8 zE^qmQC`2L!)Jic-*)X%?{ilwY-8oM=>RfOC!1p(Km(tuNEo! zyMELUla_G zi%bsU_$J`Z&s1F_HU@uTUqRKQM$-k&lGnENd>ke#a-B+-bT2aX4Ot5G-v^V#@qhhm zP&<18#g5o4KA#krDKz+rB5St9wR7!X>FyU%To!lQVvk_6cP|d`N(^WFtrnofDM@)hEnBGe&jm(9_7tri z+dVGe{T`FPQ^Y>i|Blua16fZzbgu2u#Xb1vc#9>@F?Y;@TjPcF|LCYhcdCWIMR+y8 zQ#1MNl|SAMt_09CztGTQqRJsbV4ftKasbhXTL9Mj9~?Y#dnGcF^hMQez;zVz@JK}T z{@Fe%PcGq{+ZQo6+F=+EP7Q9bK-gj$`$TRiBAWI#x-&*0&aG z)-dzTeq!Hy-+f*4^Yf}lBZB^m5+-hIf;btrR%lYcokv;)Cnn(}DBOo&+StV)L4#Wj zhIuXNUHPo<-~ME3085M;e(M)qy*tCwT7L$-c|Vbl}Pq;Yg$zq!GMYOD-<%_r|{ zrKv4}$j14co_V_6#m~{4-3md@?i$;7+SnATl^MzCwpJc&71Cj=2s}QIoB}c09``wj zyXr-J+?PBYxln&A-}LM{x(uXRJ9Ugb*7zP3Ql#&Yt}B32OGcCY!KVp(M|KQZJR-@g zEp`rz{3VP{>XiwMz&bLhE~Jyv0CJ?cM8t$!aTD*ku!d5Sqv5&`Jmfy=dRW{mi{J2F znIwTkn9(UW_<#bVi7-{rsrRk4mP{`Y4q&T`cDm z1Z@P4Cm*2RmzY2MQ4T?(j-e+8rk=$7)x{AcD2L?eH8QWycR3f#EJN|IP?wU7u*O}JPZmt8C!xEO_0Jh-M~B5~*=5SUGCH^9wyk#3)E3zMn{8RMDB zdkmx{wq|F@zzN#-hf8a#3x@O#Lau{_$Atc}8+ET`xQP3s)UzS>r)IXCQhGp8@)S_U zwB_0)OBVtc7HHfCgo@`1O+=0^3ZiKR9c^wwe^bSjuF#>v zopU44p1JBV|_YS zw7PlM$l4hXUZY5-Bj1R?31&bFVwrL=YIcEBPw zzW)*v7pcv|?>d8oA8dQGTOo=RnR|bmW*+$%*qQ3ha}OFuc0E=&>Yn|8p9F5HZu3@( zJpd`YKoYuj@3h3R{(af>fM(17V`f5`A)7(D_aQDjRHb~IX#uY8P7tFCXNBE;8KMq0Km*X zM&lpFy}mC)EhY2$@Mmy~QRgjjJt30BV5Dl-CPNOq)}-k+(6hG0Mu!f2JSMwHUEiPQ zt5ExeqR=?`tPm!Z=~C!uyOkBt;%YW0ZP|fTyzCRGty4A-oOES(&;U@_$^nnPT(w!x zu&BCmVlJC(AEiM6uGXYu3Lk$223JyXlfX4jCH;=KI2N(X< zzm@gztD5w)SgZkXKCpjr5)dU|e*X+&J;+Khq>4#OhI;4L*XhLqgm@rZj!-%1vh2## z`G#yoTb?aTy_Yhj5AXk&QKfSsYrf)%i4sHv2GTm7_J?h#}-9fL>vJL*TGrU-R=Amq*fjvnZ*^e*`lK zYDD^&uD%YrlA3a1l9st@g;CW#7{6 z=WSJDx>_Hk^W{#y-`+9&E$eh`!Rl>@ZT1%8e|84#dt3PGq!NBy{SC>+C6R>gdW+|FAA;?Eg zS6Cd~tc9)Tif^lGoSwv2bUpuviY|jh04AVrV^sW&***-xx@=$eNdW+;`BXU#f%1Yr zrzL8Q-8q2OJHb2ns0yiZGoeU>g2wr?SLcNEM65Jc%QBh2GsRR^oGW9oz9^aNTa*I>JfN_>lex(C^y ztd;^1w967^i$a+SP@=aG6>0=TOFnM>5Nmvc)WJzaEGIj!7-?59uik#NQu9qbhef%M z9pOBBo*j9#>4w`@@}rTT`S61V@5%xTmVK?!RQmd^}#&$Xfi)@a|Nk zws2LGWT)Kad6Cr`Eli%l;RTJ6HtCSyh^5b_PTB=@fL^M0%0Q#yiIGwOma?pfZ)dlP zErOe30x_-PQ}-62=d>U{g3+_qv}ZaoX79I9-pLx-wIG4*Jy z2XH@Wz>fCbZmqzKjk0kZz~oG+Bc&WcLiW~i!;TC`6b}AzA^Zf94#+(BcA~j6dgPLI zn*A{?(Uk}kVcoCc!7t`0MPq{mXUHbs*U&i%8ZVrBzTcTw)3ig&&t^z!(#|NN73Eu% z6D0m5Q}LWVjV@o%gFoJyrwkC=Tu3ogK4^F)907zQ-WGy5Rd5NL*SlyCpp?B0)UNng z083qvtM}qDVwJ<;tB*t@0B90LO(_T|QENChrYQI$W*n{pam3COHrG~+<%tauJc=DZ zQ>xVW4bSYeRAobdH0-k}zB<4i>xni_2V`2tez1>ZL|nb1Jv-%iz!S5Qh07EuJYyJ& z8V)9;Z(1jMKG3&e(MvMkvlYzTppn;1*MCxmBdO3jqAjX(w)FGYLKU3zrC;*rJJTK7UodBI4C+S|5?W0+IdNZS& z>T8-uex{mPom5zYyE#}3SDP6mOA4d{rDnMuMK6H{ICj zK-R0ymLfb@Y|3H7-%y&2y@`6wsRkA8ZdsosKXY%5Pex}Ld4(#7eTRHH_2n8gQ2St! z35&SWTJVppFmiP#-^>CGAmrD}Fyo8;u~cY`Ayo#-qox@v(=v=2TSJBHQ#({dL-x1?tXpip%Z&+jMS7D<)uG)+hHBF-AiPyS(|C79|sjO zgg}!dLRUQkAvSzNzUdR)r-M)jSSh?T!=+uht$`4U@{ka3iAgLV~*eOQ;-U$rJdfc&;vp5{TLr9oxhR+T4S!86jplzYCsYwsP@bK~? zN16y&HS(Hwv#IfUd|FK6_~tB6mKNJ}*>`-G;$A^-0v>i_ zz0MQt`=kruyz5(*yykysg~bO)fUTsi72jr3OnQs5w}>zK$oSCENn)<8gh!T)9L;pwwLc|lF!)#^dwED<>&a0F?|Yic{U}f>iy^n|cq9TnNZ&N>KR_n-91@Vqq#f zhNzew7Ql0DZ`_O-wwdT0Q@o<+!-B1y3%o1(ahjSE)2tFzsYz*$uyR_jCEcJ@%K_6K+JmnsM zbssKzZ1KsT@r4t>5I7I54jZklt~?m2ewvh&Bo}Lm-EO%tq&{wz2+Odg>f7Z>HxWyg zs^cFrj|cIZq#{r%NP&m%C?JiS(O+8>&6TDdV86HU&ivMyI{9QT0;Nw0v8Q_+l4u6B zdJwbdJ*(RGEVKC*Aw4Y>6@sJs!hZq47XvR1>V10gg^mCA7#meVu-$O1?B(IAqdDy8 zYWge=J$TJ*?qe#0LM-)UK2IyHQaa2_>25Xfvs9HOr?cDXyA(99NmK@`Q_db4R}lLV z;5`ok#u5=9jeD|XQCK_2h4dK}9>9^?w%JNTOIz9X)%HSSN8lk5tF|AijVf`Muq8Uc zNj%^bMB|0&I7iG2FsHAl#eE*_n&_k##%8Xn6EK;kUE`gC@3{OERqrHCIgAbt?s<6M zu__F5tMgQ~3h|095VX;Ah9VcwX{cm-`NppT%ga_Oz|KzBA6`yl&>t|w^?JXBl3x&m z%^{8gjC?Nc61h3xIqv`7AB)tf!QU55&UDA~pchls zRKduzXSBvzGv=UYnn1CKHQn{-E+ggAykQFv-Nt43S{QKpS8(96nRdr-N}LNTzkrEs~Ta8G-%xylT)Q%EqA|`26^1Ru0-)pdw;|fS*hG;DHTX}jyBTP+zCArkC*MdK zBhqup#c%MFjNp>>K8uF$ZzWh!O^m)CL*n|=sp(Q;FR?8@P8(Z(5QK$*lR~3QY(KRQ zA%KUTV*r^b;AP#+Ur}jZvaP7*`wu3slKc8%|gt-q_p~Pgo&N z*gTC#1VM~{PY&{Ju9hST_&hun*-Z%u^XD$ww7fnSWNh#B4_;9Sctv9?Z6;Ue8QWUY z>Y!_Yllwy)3n~-v8o&@P`x`>m#X*ZJLk8$VaEp4XhRAd+0T+OPsP6V}98f{dJ(4G= zxS}YuKExq8RuC#B_sC_`cYP!}OZE|q+=}}jzpNjL(ox2k1D}vx3?u%~O~35%7uAM* z-fQjd`ahyu>z~IGaXmY7sQMVDUKYCy!RF(Ioi5GMxj_(Jm)kJcUhSqi!vA(fO;G#m z-sgfZ(n3Ro5NP{^y?bM-_Ra6xyl=BMH-c@S@<#{tm`5WQg4uYBtz71#!i$t=_~vco zo`7u@`idq5<%$>k&@wWH;G$1|aG4k^UD@9*qK~A!<~wbxcuiZ4A^iQ65Lf}hx6hzB z<)rWYeTCk5rF9k%@e73eMoQX$7k(-U_;J;rF!hi$L}HgI2tZRamy##?AbT#lu&PzQ zuzYCJnboqTih^GFbS3MR*+bQqnELB*I5lLvNaIXf(wPVK_v2#$#g+PkA7n!;hyS3( z({NzJq{Dw8PKxEJ`sXBv!e4`?1u9g-4t3v$4ntr0U;k$h59<}nv@W?;Jy8ZQ z%}vE1mr#>|R^o{lhnR+GKq!%baDhPcn2*+Hf-3Tkv13m_E zJ)uzL;$);oJ5ep0(@$LSwKhT`Qt}A5{(JPo_@;}$PCc%x-lgd;29H8}~nOvZ!$=eZCcLXh(so9TV9uR!byTYVvFGa6$MKTP%QtqkyTRH2x7WcBebmiClZT-WZUsVX(U zi3+f9d=(c^2}NExIEk74o`X~y{2_^=$yEEGh=E(l<&6c;SP(Y?hTfk-LPA);$uOka z8>p#1Z>c{%{qeAOGg93QIWgOVTEaTzzXw6PI6}>a6hZrochfog)*QGi=q)t&BA!Vy z8Jqv4bS=k#^EId#VPkDA@CANr0M*32&Z`bK>hbKD8nV~g!AfxhMejD3v@1x~&Davd zRpS4&ku=50`-yRgpg>y9dlS3K2!Abg+a_q)k3UK1G4P-s!}e5m@b>(AAFx8dgZaQj z;aY(`yQD-B$>&pr3oU4ce1j_VRmlr+Q|LJ ztP@eV!VK)D>#^Cj22)%5GCS@H&bSp5V|aXMKP@e-$`_X_34En_&%#eR2TmlDMNR;< zv?dAt+Hmq}`Q(S;vhK4~sEICrq+2Om>32_y{_Y|znSn&}@z6Na)5M1O>N;*A*o=dm zNJ+7$OODmZDD?DO zmDsAWnp|x?A@ahzVgn&e5g&qSO&jhVBee_RgM{esE~J=r?3qEd{LC3j$4DZpLgJA8 zPh_vy)pP3a>^#n%8gXUaZr{v1T3Bf~2dNB7B@`6Zm6=jYL7+-|^{boSDYk`4U+qMX zaN8@JI+}pg!BTsCstGZgxOWm+p*14mY)F}$|Jw_m=;>p45ePLI;~@P}?a*F9?r>R- zU{+PdWQ{Qm=5lHAZ;nW&%}+_g04bFJ(ig4|xxkCLfHG$ciQw4z(*u1`7zfyi&hG+t zdn01QU{Y>kS4e|aCH?{u{dF{1c-^MVZ9o)L(q5c_w;2#Y)LKT`-Z8|?du0+4aGa4- zRjPG`hy4oZ^)rdf=j;7#}(Yp&ziH8%;I`;?Y zA=SgyL>A5~)#oVomJH8*E!S{b(D7dd7q_)D-`+h2LSr2acprp^HK6 ze7Be2_#5bRd7qRs%sfvt@F-aR*fU<2arKal!4qvk$)_*fF^TFS;Nht#C1^^AN{L@K zrH57Ta#~|zt&C(@Q`>%>`ju<0Wgq_srm}7~LNuMquwW(5P1u&Yj6l814rrg?%YOtA zr&{7%i~1aVvQD=uG$byIA|@JV<+pFMQjGO)oo&*j?><9?s&zZYB3?F3U0wPUEK${1 zMJRjS@9qtm>FIk|+D;;MA6t6XHGpiPGY4-fH~HB`DuSRf5~hu+n`YjCLU7C<(9emn z0=dV6I8F|8g^I7y`Gn3N$-SHJZO)7^bz@yBLA-@LKX)7#C-=S&@rnRkYZ2y4chs=3n`UZEgt&1dqF3)ptby8-Sdaa%MW{9KkeTV!s zVgiJ>koHmxh&S{Bv{%BkS#)BKzQVBIP(+;a1JsY$7b?msSR^$AeWtB*5p5>UYpa(P z8tmUR`FX$kNQbNG#_okk={nd~Lg#F;+D7c4&mY!v^yBnaB1pwtT4>cM5WP2NS5)z6 z4D%XB-^OpLhZ%qcD{6o(!=lhUDJUpsF-NRh=gyOY@q+7Qmm^&kIT+U4=ny<9BP5-& zPt;Euot4&|d{f0c)85rA>FF#!|DZbB{7l>W%LSU&VrTo5?QzK73k&QBVIbT!ZLQx} zHPazL{dUVy>(0m1#_vCoWiC|fYTw#7_S>mD#?|Cwd^Z6}%s5@RG&&E(>_E@9cnE2& z?x4s(Lki)mg%v&+rqHt$7z^s?!0J@f~M~o)YP?+`JnjnWqFYz#40Vg@k z$Ip)hHR=J>`mG=eQ>3Z0B=~pXdwxpeZJAXhc3Zb0SeL;7YKCw-BUTmuFOpG?*C({~ z={DRakiFr{nU0yY@pf|e|JrXokn1TD&Gh2nh~*hNbIPqKV^^z(Ylyf_D`4Q4v|l|< z<2jc%FPcm@$j5Qw5zqMbWJp7Kejp-kNp`kFKb2Se;BrFGZz&VPx`=wCHpqNP>jMJK51 zxOK^R!g(x19iR*cCLC^sEss;T%}ulgR%RAtG`!IrwHKNTuy<|HZWHTxO|rcDRYU7@ z^Ss?C7l0a;X7XGK5?Hlyvq~CCPhwSXw14vh$*?TA?6hj7!%KXw<&nB0nRe zfq5@E%TMiyf(uTOeN^u`&Ogo6n2~Js0CqI6H&uuBtu1NSMoKDHDtjW8Int>KgQPFC zKG$~H6`Hnhzc{b`eZKon$Eooxua{mfJ>e$#NS|yjP-|E5`JRWZQ>P@S(}DLmLR3kb zHLe!}$SHbO%r9~-xEC^+z5COYFN~6rBmM!~pL!_|t)FoXJ?v7dI*!E>e3%0|M*qG! z#1Xi(@O9nb?G<^&PuE<3FbDWcR@7y93Gq>bYNL4b@?gv>}V(MEKRo zS|(oEp&KW3a$UAOtuIF_3L{afG35texNHs8!|ndZq#BEuA4dY8?1jsG$bVv&Qy=o4 ze`0BNEaTX=0jdjSt?*&>!(+bWjA z{Y661AUP|<@urKVMGUo~@04vHz@p*cfAsJgF@U-U50RB zFAA{SO5Jh^_AcyNhHsCCJN;-l76oUGA^*#*fPHAUF~=&Vo)|w8`U~9eMybEYxowMd zEz;9`Ve$E^b_IFtyYxzhar^&d1P{f~Y)m8zQHoz5| z{AZRA0=yg#R{t+O9B8&w43^g(SGTv^%Lqxx@N>4+&|lYhZb+SC9c#~V@#TDpimRO( znfO@1(fd|^EgJO(gKv}XAEFq*4z+p}2-7cRs-{7ppoEGTF{bO2i@f0w z06F&d3X?)a4xyxB-S9PDep;Bq4>YoZ$U=Q-%=MoS0R;K_8$X2d;~^wObVsRy3HzCA z4W*pQk8gimEuc?IG@RQ~d-V$rO~j-VxmHd zrt0J~_rh<`ZK53`0xUFp&Z+F(y12Th<4)2Xw%;VCd^)Okz}wKf?%Px}(#0{jblB;) zq6=O0-;8JS;@H^`09ZSXVsD}>_$Ku89rL}(c06HCNVm$Kpry+oYw79A{Gc=MUdOd> zP-UppBCQi&s4rX>fX)a`=wwhv8XXuPAL{UakwOQZ3f=lxX5VfPT$4yGw7$GK7T0B7 zbO-QD=U%2!uR#-lhdsWdqCjM3<>iF;i<99K-cuqGVwa~Z?hSaL{UlsCc3_FB_!lYr z{$}v%<(*t4Gb@)@7CwmBKlU~&d0h9kvgSOrYM2`%Jr{jwlSyz2qS<7AzYr*LBa{wl zNAQ9+L+Hh*T)zW~4)2_>DkkXS01mTq6#&7wirmEDt7DvA!mQ&GPQUYblas){s%c3+ zQ%wV%E6Wd$=uG&tyRkj?9WmjhbHMg^@M%NK%lD%O$J}urV=qJC|AUWrRA-0LrM|i+ z^P}s+AK!HW%@_n9uQy}-|LbF@#*hdgtv^MSfhIOKE&(iD5FmMgjVA)@w5dzzvOD2k zvMTa-S>;Xu7%YhZnl6C$1>Y;D9Z=0iS?#KSLPH`oK~^aGzHVF(tEkqg7r zGjCY0G5m0#k(cr=WG51(BI^60pWp-h-_0-=yXKDw>PByFbmq^86e&~5ssGqGsw0sD zYxgJLLJky7qmnB`-5J8$gZHVI!drz-;7DfwHMbMNu8#suSKrJD|AA? z^80WIwbny3d4^bH37fIj7X1n5=I>BpaxGOHG#neEb@K`jI+E86F7?ElW87~@e_%)0 z4V{L*PDm0elwT{6|4J*lKpzFUgbEpvIvPG&pBnTHGC_EZB{F#Ymo`Nj|Cnce46)%~ zQwO4BNL83%=2WnkOZ|`OTNfoDdjr*goKEx{Lb!C$G-w}M(~KU%;e?ULR@?o!7SS}s zq-hz?T4W)n^dXFytsjHCQaK*P$l4J_`_n=4K`zT1J`xo|RO5m1yRC3mH0QTI){i%$ zb7V1Ydm;93S7Q1mhtQhCUh8#CBpNhAHPkV2yy^4s-oNArO@5rD&t@d%jGb<$Kda~r zvhTkkuZc{garEWU&PP5jQq1{2@dV{215hOUC(orxM<``l?P{1*xEir3Gh;}Z9gxI= zDAlYyDx~?xK>{CU(?bJeT|ZyEwexmdMVh?YzbrQS#d7#Hpz!`3{(OMoiUiQlau5C` zLh_fD^24owM@uOoibnGY?N#th_Yup`N|*-PClC}Q;5U;;qSVxKO}H&#L)-#q?zU3- zpxGS4L0!#(9(ghAt7&L?0}8}N_$|C&2fPNBO`u5|4pAs1+;GIsis}&lkG$_b0zw#e z|2)|tD37Rj`mta(7&2HG?H}E9>$ib@<8VQAgH66hA!r@mo|$-_NM0g-ZLQNwS74%} z{z~BQM|P(eNOzZg;*GPtphY3dNaRYh)68e}&~}|b3@IgpsLLiIVt8{$mMHBI?w14u9ji|0q7V?@craGfij6&rL(+(MqaxHa!pA6ZC^yI~}!9@^h9)(*mUz7h(n0R+Dp1lh-CUjp!ub{*XU1 zZ*<$Sbg#curf*r`M-}gexidPKlsMzFBf~D!eYujYcl2p?g{@{{CIz#7j<3q+qbnZ6 zr2geXECHX?E0+v6UWvze0KWp~?G>Ab1nJ@&-vGK(3ztL8+oh`fE*keOR(-_Yv$FT< zwNE+n2`m9E=1=Jxn0tiDYwLXQPm1>d@o3R!JhFYz2m7d( zDdF*dc+Z49b@;028gb4+{45}cx>6wXbI{|~?Po4aRO=J8VU#MiZibZ>rH{+&GpKQ< zh!$9;@B=9;7oAyr?;g)XY(ZSR@d{mv7KsIph<4k+g|2x8PW<>0!=x3l7$bfNhRHHw z@>-F-?Pl=%M=tnV#vk5DQ~gDN*!$j0+n9Are0rYEijz#z1YggUGDS`mYW|UwR^gb@ zAt_xR{~4~h!)na)2{$=y_$T83e#%Jf7ncgDe-f>}yky0;t$sr4WeNShPGVN~rTaqt zF7~qxIL>Zc>NfXr%GY4G_uG;GZ%#kob9-f8wB%wU_pK=ZpRO!>)#5bv+8ZMZcc-!K z8j;eqZtoShDlumfQsPYU8xc9o#pY*xR9k4#=;X+il%$)wr7}+PdbDqAEsj#FX0LK_ z{^IAewmqxH3pI$)SZH6sEj9ckIXrQ>j!v0t^e&k4bZPAy*s1EC#e8~!E=&Be3CCc< z2NY(Ve>s=Pz*k|iqO5p5mX ze&W^cb*b{&WoAQgY$YpPMf*td(lqJdGyIQnccrW;>V_euJpxBCD8D_rE=7o-Ik>GK zTr^Dv>)G^en@%$4?BA_tx18WX$7}G66U5ildH(g&{;wyCJ^w)A^3od1Qlf`?8LafQ zo&Kd~7H949M-{^7le&`2X6f|Qm|LV6jSlGv{f!uA{EeLZ&7GOEU=|uvIQn#=!dCd- z{5r0phe1eF4xKh;Yr0LFb`(pVKO^5=jJfE){shrC%9`C3f|kH3C#^*=K`?P|3S7u> zQl+#Tq^dk#Zyc2>t}PxviJ&aAS=_yur%5T9c(FmxT@;Jqmd4Jj@HN;+#NBh~u}T8A zw-w^?A5)7QatWF7KXz*;#tuIG%QYe*LCNRla&N{WA&qVQB$=4yV8UZJgWPRq@F&Z= z>%#xb9nUqH`VMVm*R8h|8aj{dnwLT$Qh8^p4+MP zWl1BYJ^ub2GQ%p~qYq%J{<0tZ-A*;zr38E4$gEvU{HAgBLd~Zmf|XhFseTt5&sW+d zpn7M)(SJ2mU}S~4(eIPWO6`1%?lK}o7QQ1QBmcuD@}I<-o@<;US}99Y#|u$hP6>{u z$7lH@DAtt}cE@(TfJr}tTdE9`*O9%zqt+8y=-m-p+Pw*91Evh5YnK2>*S7eZUZ_am z4w(Ez+z;JlES#?xc~ z+1XybLlPY^SXFd>!?Bas;5s#p@X+;pY&^ze5c|PV(gxeigXx+A?z< zf1_}$>-_fh4X6GmYi75ycZlA!$B61sw(mEGnJ`Zk|HoEGxdd-bMF{j{`Gv9sG?&kY zJ;B-=DF;?EURcE{D{-dvFp9)+c_BagBT;Q_Z-9C1O_seQHJa}K@N?VN+3!t-Ynm^; z%W08=oq2WD5&0O+D)Tn!Z1f>#*>1Fl)zfDs9(O_ zbvco{P>S8};-_9Gr^*|?fynKk*3RD2#2K}+KNXYK9N-yljs$TryG=Ak{nJDvrcF^g zEOG-*O~S8=l(z2)!%J2-*B07H=hvAjSebKkxwK)XK`IuB-7NC&6tt=q%D6 z?oPrVyLD&|9S_wucv}P+(xpfcqtVN-LPxjAyy5qh43me5U!x)xdKI=aizIgyoUA4@ zYjxts#tP7zV7QIG=QArivN!)U3Vve^XTw{^<*fgck|6W%C@kzOEnB%Vzt**w?^|Z` z_eRd&X&0sDt@v%oc85E^k_^Jjxixz=5HB|CI-`2mBzD=Na>4`}%IRxylYna)+2CJQ z9Hb{i+<}+ z;Ib1xJXXZIJ=U$|?$HgJqgN!`)9QJf#@oj-h<#b|e`bE-=V;w}ojyA9ZEj-CuU}~> z$`^JtJ?+M*8|r1Byn>cvD*0|j4M;I&-d@q`#o5)nihdHUf9G+HPd&-T`dpHK@wxta z`M3X=-M7}8&unsl4h}NI23lKK4LfeNM~EOA6^%3rO>izZNd)o zCbg)CVFf`y9c}1xUdy*%Syj(2&~E4}Xc-SxEmWFd(i2K`Q*7B9>ODU;;N7{k@q=B$ zjFYXs@I&XO*#6V>p%hi(&J=$ZPuIMSEzlGe&=X1!53TS92fV#%`pd$k!_2&>+G%>n zpk%u>74Iq)Z!m%BotMp9;!L{=)UT*EO^952ox%Gll$9o|v4pKmz0;|0TCtdYwrfNr z`8sDJ&qz&6TcW4>S9{wfzbe&T1NDuwOI1$|5>+Th%ninD=g-U;8TBk2wZVSv=NQ(S zo(2GW&?UX5;oh;{E2DE2IL<)pv3zEkR^Qu($7nVNF&08rSNG#8aO&pmI zW2(D0Js>{NQPbLd)XV>^=0cu=xvYtL;o9kd#=zM(2QIB>R5-0z+b+!xggI+u4%_8B zUBZ5y-Pn4xu*&m_#ESWm=Tdl?u}dZ?<6D+qMe*y56zSXMvdD8ZeO6}k z)X3!Yc+2e+1Cvo-wcx98f#3G)Om*OfHvYxX1!}i?CPEv8Z`8p}W%EVD6XP5|_H=@= zsr62KS|7p3Ur&|y=9Xpf9+)@IZcgX5mSn4pg~#~alXKzL?L^N~AyqbBm_1*23+Ax5 z%uW#(3xd>)dd#byyUQ49`}ckgh6nk$>;HJ@&*11lC#7qS%aNZ}1WwCR!t%U~^MsI9 z-ngrJFTs%$zr*U371d-C?dHOG{L(b6VgcLIwyES3);K_SiaR_+)9aq8@)fWJd@JIP z!!lbga4{v+Fvu1TdySba91`r$O>kxm>1j6?6}(~)mVpDJFYkYZ9iLF6;C3TdCCcyg rn>KA)Tl3u3c^ux5AP(K;O>H4bjtQT>0zCYtO_CR6&nKPLy8HhCAdwb} literal 0 HcmV?d00001 diff --git a/js/vibevoice_wrapper_ui.js b/js/vibevoice_wrapper_ui.js new file mode 100644 index 0000000..f6deb86 --- /dev/null +++ b/js/vibevoice_wrapper_ui.js @@ -0,0 +1,107 @@ +// custom_nodes/YourPkg/js/vibevoice_wrapper_ui.js +import { app } from "../../scripts/app.js"; + +app.registerExtension({ + name: "vibevoice.wrapper.ui", + + async beforeRegisterNodeDef(nodeType, nodeData) { + const isWrapper = + nodeType?.comfyClass === "VibeVoiceTTS_Wrapper" || + nodeData?.name === "VibeVoice TTS (Chunked Wrapper)"; + if (!isWrapper) return; + + const origOnCreated = nodeType.prototype.onNodeCreated; + nodeType.prototype.onNodeCreated = function () { + origOnCreated?.apply(this, arguments); + // only set up handlers here; do NOT mutate slots yet + wireUpHandlers(this); + }; + + function wireUpHandlers(node) { + const findW = (n) => node.widgets?.find((w) => w.name === n); + const wNum = findW("num_speakers"); + const wChunk = findW("chunk_lines"); + const wLines = findW("lines_per_chunk"); + + function ensureSpeakerInputs(count) { + // add missing inputs + for (let i = 1; i <= count; i++) { + const name = `speaker_${i}_voice`; + if (node.findInputSlot(name) === -1) node.addInput(name, "AUDIO"); + } + // remove extras + for (let i = count + 1; i <= 4; i++) { + const name = `speaker_${i}_voice`; + const idx = node.findInputSlot(name); + if (idx !== -1) node.removeInput(idx); + } + } + + // guard: only mutate once node.graph exists (prevents NullGraphError) + function safeMutate(fn) { + const doIt = () => { + if (!node.graph) { + // defer until the node is actually attached to a graph + setTimeout(doIt, 0); + return; + } + fn(); + app.graph.setDirtyCanvas(true, true); + }; + doIt(); + } + + function refresh() { + const n = Math.max(1, Math.min(4, Number(wNum?.value ?? 1))); + safeMutate(() => ensureSpeakerInputs(n)); + + if (wLines) wLines.hidden = !(wChunk?.value); + } + + // robust wiring (some frontends only call one of these) + if (wNum) { wNum.callback = refresh; wNum.onChange = refresh; } + if (wChunk) { wChunk.callback = refresh; wChunk.onChange = refresh; } + + // don’t call refresh yet; node may not be in graph during configure + node.__vv_refresh = refresh; + } + }, + + // Called for brand-new nodes added from the menu (node has a graph here) + async nodeCreated(node) { + if ( + node?.comfyClass === "VibeVoiceTTS_Wrapper" || + node?.title === "VibeVoice TTS (Chunked Wrapper)" + ) { + // next tick to ensure widgets fully exist + setTimeout(() => node.__vv_refresh?.(), 0); + } + }, + + // Called when nodes are created as part of loading a workflow + loadedGraphNode(node) { + if ( + node?.comfyClass === "VibeVoiceTTS_Wrapper" || + node?.title === "VibeVoice TTS (Chunked Wrapper)" + ) { + setTimeout(() => node.__vv_refresh?.(), 0); + } + }, + + // After the graph finishes configuring (safe point to mutate slots) + async afterConfigureGraph() { + // final pass in case anything was deferred + for (const node of app.graph._nodes) { + if ( + node?.comfyClass === "VibeVoiceTTS_Wrapper" || + node?.title === "VibeVoice TTS (Chunked Wrapper)" + ) { + node.__vv_refresh?.(); + } + } + }, + + async setup() { + console.log("[vibevoice.wrapper.ui] setup complete"); + }, +}); diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..cd691aa --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,23 @@ +[project] +name = "ComfyUI-VibeVoice" +description = "VibeVoice TTS. Expressive, long-form, multi-speaker conversational audio" +version = "1.2.0" +license = {file = "LICENSE"} +dependencies = ["torch", "torchaudio", "librosa", "numpy", "huggingface_hub", "einops", "scipy", "tokenizers", "soundfile", "s3tokenizer", "tqdm", "conformer", "safetensors", "transformers", "diffusers", "bitsandbytes"] + +[project.urls] +Repository = "https://github.com/wildminder/ComfyUI-VibeVoice" +# Used by Comfy Registry https://comfyregistry.org + +[tool.comfy] +PublisherId = "wildai" +DisplayName = "ComfyUI-VibeVoice" +Icon = "" + + + + + + + + diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..5038365 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,17 @@ +torch +accelerate +torchaudio +librosa +numpy +huggingface_hub +einops +scipy +tokenizers +soundfile +s3tokenizer +conformer +safetensors +transformers +diffusers +tqdm +bitsandbytes diff --git a/vibevoice/__init__.py b/vibevoice/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/vibevoice/configs/qwen2.5_1.5b_64k.json b/vibevoice/configs/qwen2.5_1.5b_64k.json new file mode 100644 index 0000000..febd05c --- /dev/null +++ b/vibevoice/configs/qwen2.5_1.5b_64k.json @@ -0,0 +1,112 @@ +{ + "_attn_implementation_autoset": true, + "acoustic_vae_dim": 64, + "acoustic_tokenizer_config": { + "causal": true, + "channels": 1, + "conv_bias": true, + "conv_norm": "none", + "corpus_normalize": 0.0, + "decoder_depths": null, + "decoder_n_filters": 32, + "decoder_ratios": [ + 8, + 5, + 5, + 4, + 2, + 2 + ], + "disable_last_norm": true, + "encoder_depths": "3-3-3-3-3-3-8", + "encoder_n_filters": 32, + "encoder_ratios": [ + 8, + 5, + 5, + 4, + 2, + 2 + ], + "fix_std": 0.5, + "layer_scale_init_value": 1e-06, + "layernorm": "RMSNorm", + "layernorm_elementwise_affine": true, + "layernorm_eps": 1e-05, + "mixer_layer": "depthwise_conv", + "model_type": "vibepod_acoustic_tokenizer", + "pad_mode": "constant", + "std_dist_type": "gaussian", + "vae_dim": 64, + "weight_init_value": 0.01 + }, + "decoder_config": { + "attention_dropout": 0.0, + "hidden_act": "silu", + "hidden_size": 1536, + "initializer_range": 0.02, + "intermediate_size": 8960, + "max_position_embeddings": 65536, + "max_window_layers": 28, + "model_type": "qwen2", + "num_attention_heads": 12, + "num_hidden_layers": 28, + "num_key_value_heads": 2, + "rms_norm_eps": 1e-06, + "rope_scaling": null, + "rope_theta": 1000000.0, + "sliding_window": null, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_cache": true, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "diffusion_head_config": { + "ddpm_batch_mul": 4, + "ddpm_beta_schedule": "cosine", + "ddpm_num_inference_steps": 20, + "ddpm_num_steps": 1000, + "diffusion_type": "ddpm", + "head_ffn_ratio": 3.0, + "head_layers": 4, + "hidden_size": 1536, + "latent_size": 64, + "model_type": "vibepod_diffusion_head", + "prediction_type": "v_prediction", + "rms_norm_eps": 1e-05, + "speech_vae_dim": 64 + }, + "model_type": "vibepod", + "semantic_tokenizer_config": { + "causal": true, + "channels": 1, + "conv_bias": true, + "conv_norm": "none", + "corpus_normalize": 0.0, + "disable_last_norm": true, + "encoder_depths": "3-3-3-3-3-3-8", + "encoder_n_filters": 32, + "encoder_ratios": [ + 8, + 5, + 5, + 4, + 2, + 2 + ], + "fix_std": 0, + "layer_scale_init_value": 1e-06, + "layernorm": "RMSNorm", + "layernorm_elementwise_affine": true, + "layernorm_eps": 1e-05, + "mixer_layer": "depthwise_conv", + "model_type": "vibepod_semantic_tokenizer", + "pad_mode": "constant", + "std_dist_type": "none", + "vae_dim": 128, + "weight_init_value": 0.01 + }, + "semantic_vae_dim": 128, + "torch_dtype": "bfloat16" +} diff --git a/vibevoice/configs/qwen2.5_7b_32k.json b/vibevoice/configs/qwen2.5_7b_32k.json new file mode 100644 index 0000000..d39952c --- /dev/null +++ b/vibevoice/configs/qwen2.5_7b_32k.json @@ -0,0 +1,113 @@ +{ + "_attn_implementation_autoset": true, + "acoustic_vae_dim": 64, + "acoustic_tokenizer_config": { + "causal": true, + "channels": 1, + "conv_bias": true, + "conv_norm": "none", + "corpus_normalize": 0.0, + "decoder_depths": null, + "decoder_n_filters": 32, + "decoder_ratios": [ + 8, + 5, + 5, + 4, + 2, + 2 + ], + "disable_last_norm": true, + "encoder_depths": "3-3-3-3-3-3-8", + "encoder_n_filters": 32, + "encoder_ratios": [ + 8, + 5, + 5, + 4, + 2, + 2 + ], + "fix_std": 0.5, + "layer_scale_init_value": 1e-06, + "layernorm": "RMSNorm", + "layernorm_elementwise_affine": true, + "layernorm_eps": 1e-05, + "mixer_layer": "depthwise_conv", + "model_type": "vibepod_acoustic_tokenizer", + "pad_mode": "constant", + "std_dist_type": "gaussian", + "vae_dim": 64, + "weight_init_value": 0.01 + }, + "decoder_config": { + "attention_dropout": 0.0, + "hidden_act": "silu", + "hidden_size": 3584, + "initializer_range": 0.02, + "intermediate_size": 18944, + "max_position_embeddings": 32768, + "max_window_layers": 28, + "model_type": "qwen2", + "num_attention_heads": 28, + "num_hidden_layers": 28, + "num_key_value_heads": 4, + "rms_norm_eps": 1e-06, + "rope_theta": 1000000.0, + "sliding_window": null, + "tie_word_embeddings": false, + "torch_dtype": "bfloat16", + "transformers_version": "4.40.1", + "use_cache": true, + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 152064 + }, + "diffusion_head_config": { + "ddpm_batch_mul": 4, + "ddpm_beta_schedule": "cosine", + "ddpm_num_inference_steps": 20, + "ddpm_num_steps": 1000, + "diffusion_type": "ddpm", + "head_ffn_ratio": 3.0, + "head_layers": 4, + "hidden_size": 3584, + "latent_size": 64, + "model_type": "vibepod_diffusion_head", + "prediction_type": "v_prediction", + "rms_norm_eps": 1e-05, + "speech_vae_dim": 64 + }, + "model_type": "vibepod", + "semantic_tokenizer_config": { + "causal": true, + "channels": 1, + "conv_bias": true, + "conv_norm": "none", + "corpus_normalize": 0.0, + "disable_last_norm": true, + "encoder_depths": "3-3-3-3-3-3-8", + "encoder_n_filters": 32, + "encoder_ratios": [ + 8, + 5, + 5, + 4, + 2, + 2 + ], + "fix_std": 0, + "layer_scale_init_value": 1e-06, + "layernorm": "RMSNorm", + "layernorm_elementwise_affine": true, + "layernorm_eps": 1e-05, + "mixer_layer": "depthwise_conv", + "model_type": "vibepod_semantic_tokenizer", + "pad_mode": "constant", + "std_dist_type": "none", + "vae_dim": 128, + "weight_init_value": 0.01 + }, + "semantic_vae_dim": 128, + "torch_dtype": "bfloat16" +} diff --git a/vibevoice/modular/__init__.py b/vibevoice/modular/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/vibevoice/modular/configuration_vibevoice.py b/vibevoice/modular/configuration_vibevoice.py new file mode 100644 index 0000000..fcffcb9 --- /dev/null +++ b/vibevoice/modular/configuration_vibevoice.py @@ -0,0 +1,248 @@ +""" VibeVoice_AcousticTokenizer model configuration""" + +from typing import Dict, List, Optional, Tuple + +from transformers.configuration_utils import PretrainedConfig +from transformers.utils import logging + +from transformers.models.qwen2.configuration_qwen2 import Qwen2Config + +logger = logging.get_logger(__name__) + + +class VibeVoiceAcousticTokenizerConfig(PretrainedConfig): + model_type = "vibevoice_acoustic_tokenizer" + + def __init__( + self, + channels: int = 1, + corpus_normalize: float = 0.0, + causal: bool = True, + vae_dim: int = 64, + fix_std: float = 0.5, + std_dist_type: str = 'gaussian', + # common + mixer_layer: str = 'depthwise_conv', + conv_norm: str = 'none', + pad_mode: str = 'constant', + disable_last_norm: bool = True, + layernorm: str = 'RMSNorm', + layernorm_eps: float = 1e-5, + layernorm_elementwise_affine: bool = True, + conv_bias: bool = True, + layer_scale_init_value: float = 1e-6, + weight_init_value: float = 1e-2, + # encoder specific + encoder_n_filters: int = 32, + encoder_ratios: Optional[List[int]] = [8,5,5,4,2,2], + encoder_depths: str = "3-3-3-3-3-3-8", + # decoder specific + decoder_n_filters: int = 32, + decoder_ratios: Optional[List[int]] = None, # if None, same as encoder + decoder_depths: Optional[str] = None, + **kwargs + ): + super().__init__(**kwargs) + self.channels = channels + self.corpus_normalize = corpus_normalize + self.causal = causal + self.vae_dim = vae_dim + self.fix_std = fix_std + self.std_dist_type = std_dist_type + + # common parameters + self.conv_norm = conv_norm + self.pad_mode = pad_mode + self.layernorm_eps = layernorm_eps + self.disable_last_norm = disable_last_norm + self.layernorm = layernorm + self.layernorm_elementwise_affine = layernorm_elementwise_affine + self.conv_bias = conv_bias + self.layer_scale_init_value = layer_scale_init_value + self.weight_init_value = weight_init_value + self.mixer_layer = mixer_layer + + # encoder specific parameters + self.encoder_n_filters = encoder_n_filters + self.encoder_ratios = encoder_ratios + self.encoder_depths = encoder_depths + + # decoder specific parameters + self.decoder_ratios = decoder_ratios if decoder_ratios is not None else encoder_ratios + self.decoder_n_filters = decoder_n_filters + self.decoder_depths = decoder_depths + + +class VibeVoiceSemanticTokenizerConfig(PretrainedConfig): + model_type = "vibevoice_semantic_tokenizer" + + def __init__( + self, + channels: int = 1, + corpus_normalize: float = 0.0, + causal: bool = True, + vae_dim: int = 64, + fix_std: float = 0, + std_dist_type: str = 'none', + # common + mixer_layer: str = 'depthwise_conv', + conv_norm: str = 'none', + pad_mode: str = 'constant', + disable_last_norm: bool = True, + layernorm: str = 'RMSNorm', + layernorm_eps: float = 1e-5, + layernorm_elementwise_affine: bool = True, + conv_bias: bool = True, + layer_scale_init_value: float = 1e-6, + weight_init_value: float = 1e-2, + # encoder specific + encoder_n_filters: int = 32, + encoder_ratios: Optional[List[int]] = [8,5,5,4,2,2], + encoder_depths: str = "3-3-3-3-3-3-8", + **kwargs + ): + super().__init__(**kwargs) + self.channels = channels + self.corpus_normalize = corpus_normalize + self.causal = causal + self.vae_dim = vae_dim + self.fix_std = fix_std + self.std_dist_type = std_dist_type + + # common parameters + self.conv_norm = conv_norm + self.pad_mode = pad_mode + self.layernorm_eps = layernorm_eps + self.disable_last_norm = disable_last_norm + self.layernorm = layernorm + self.layernorm_elementwise_affine = layernorm_elementwise_affine + self.conv_bias = conv_bias + self.layer_scale_init_value = layer_scale_init_value + self.weight_init_value = weight_init_value + self.mixer_layer = mixer_layer + + # encoder specific parameters + self.encoder_n_filters = encoder_n_filters + self.encoder_ratios = encoder_ratios + self.encoder_depths = encoder_depths + + +class VibeVoiceDiffusionHeadConfig(PretrainedConfig): + model_type = "vibevoice_diffusion_head" + + def __init__( + self, + hidden_size=768, + head_layers=4, + head_ffn_ratio=3.0, + rms_norm_eps=1e-5, + latent_size=64, + speech_vae_dim=None, + prediction_type="v_prediction", + diffusion_type="ddpm", + ddpm_num_steps=1000, + ddpm_num_inference_steps=20, + ddpm_beta_schedule="cosine", + ddpm_batch_mul=4, + **kwargs + ): + self.hidden_size = hidden_size + self.head_layers = head_layers + self.head_ffn_ratio = head_ffn_ratio + self.rms_norm_eps = rms_norm_eps + self.latent_size = latent_size + self.speech_vae_dim = speech_vae_dim + self.prediction_type = prediction_type + self.diffusion_type = diffusion_type + self.ddpm_num_steps = ddpm_num_steps + self.ddpm_num_inference_steps = ddpm_num_inference_steps + self.ddpm_beta_schedule = ddpm_beta_schedule + self.ddpm_batch_mul = ddpm_batch_mul + + super().__init__(**kwargs) + +class VibeVoiceConfig(PretrainedConfig): + model_type = "vibevoice" + is_composition = True + sub_configs = { + "acoustic_tokenizer_config": VibeVoiceAcousticTokenizerConfig, + "semantic_tokenizer_config": VibeVoiceSemanticTokenizerConfig, + "decoder_config": Qwen2Config, + "diffusion_head_config": VibeVoiceDiffusionHeadConfig, + } + # keys_to_ignore_at_inference = ["past_key_values"] + # Default tensor parallel plan for base model `Qwen2` + base_model_tp_plan = { + "layers.*.self_attn.q_proj": "colwise", + "layers.*.self_attn.k_proj": "colwise", + "layers.*.self_attn.v_proj": "colwise", + "layers.*.self_attn.o_proj": "rowwise", + "layers.*.mlp.gate_proj": "colwise", + "layers.*.mlp.up_proj": "colwise", + "layers.*.mlp.down_proj": "rowwise", + } + + def __init__( + self, + acoustic_tokenizer_config=None, + semantic_tokenizer_config=None, + decoder_config=None, + diffusion_head_config=None, + **kwargs + ): + + # kwargs["_attn_implementation"] = "flash_attention_2" + kwargs["_attn_implementation_autoset"] = False + + if acoustic_tokenizer_config is None: + self.acoustic_tokenizer_config = self.sub_configs["acoustic_tokenizer_config"]() + elif isinstance(acoustic_tokenizer_config, dict): + acoustic_tokenizer_config["model_type"] = "vibevoice_acoustic_tokenizer" + self.acoustic_tokenizer_config = self.sub_configs["acoustic_tokenizer_config"](**acoustic_tokenizer_config) + elif isinstance(acoustic_tokenizer_config, VibeVoiceAcousticTokenizerConfig): + # If an instance of the config class is provided + self.acoustic_tokenizer_config = acoustic_tokenizer_config + + if semantic_tokenizer_config is None: + self.semantic_tokenizer_config = self.sub_configs["semantic_tokenizer_config"]() + elif isinstance(semantic_tokenizer_config, dict): + semantic_tokenizer_config["model_type"] = "vibevoice_semantic_tokenizer" + self.semantic_tokenizer_config = self.sub_configs["semantic_tokenizer_config"](**semantic_tokenizer_config) + elif isinstance(semantic_tokenizer_config, VibeVoiceSemanticTokenizerConfig): + # If an instance of the config class is provided + self.semantic_tokenizer_config = semantic_tokenizer_config + + if decoder_config is None: + self.decoder_config = self.sub_configs["decoder_config"]() + elif isinstance(decoder_config, dict): + # If a dictionary is provided, instantiate the config class with it + # self.decoder_config = self.sub_configs["decoder_config"](**decoder_config) + if decoder_config.get("model_type", '') == "qwen2": + self.decoder_config = Qwen2Config(**decoder_config) + else: + raise ValueError(f"Unsupported decoder model type: {decoder_config.get('model_type', '')}") + elif isinstance(decoder_config, (Qwen2Config,)): + # If an instance of the config class is provided + self.decoder_config = decoder_config + + if diffusion_head_config is None: + self.diffusion_head_config = self.sub_configs["diffusion_head_config"]() + elif isinstance(diffusion_head_config, dict): + diffusion_head_config["model_type"] = "vibevoice_diffusion_head" + self.diffusion_head_config = self.sub_configs["diffusion_head_config"](**diffusion_head_config) + elif isinstance(diffusion_head_config, VibeVoiceDiffusionHeadConfig): + # If an instance of the config class is provided + self.diffusion_head_config = diffusion_head_config + + # other parameters + self.acoustic_vae_dim = getattr(self.acoustic_tokenizer_config, 'vae_dim', 64) + self.semantic_vae_dim = getattr(self.semantic_tokenizer_config, 'vae_dim', 128) + + super().__init__(**kwargs) + +__all__ = [ + "VibeVoiceAcousticTokenizerConfig", + "VibeVoiceSemanticTokenizerConfig", + "VibeVoiceDiffusionHeadConfig", + "VibeVoiceConfig" +] \ No newline at end of file diff --git a/vibevoice/modular/modeling_vibevoice.py b/vibevoice/modular/modeling_vibevoice.py new file mode 100644 index 0000000..016a389 --- /dev/null +++ b/vibevoice/modular/modeling_vibevoice.py @@ -0,0 +1,488 @@ +from dataclasses import dataclass +from typing import Dict, List, Optional, Tuple, Union, Callable +from tqdm import tqdm +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.distributed as dist + +from transformers.models.auto import AutoModel, AutoModelForCausalLM + +from transformers.activations import ACT2FN +from transformers.modeling_outputs import CausalLMOutput, BaseModelOutputWithPast, ModelOutput +from transformers.models.llama.modeling_llama import LlamaRMSNorm +from transformers import modeling_utils +from transformers.modeling_utils import PreTrainedModel +from transformers.modeling_flash_attention_utils import FlashAttentionKwargs +from transformers.utils import logging + + +from .modular_vibevoice_tokenizer import VibeVoiceTokenizerStreamingCache, VibeVoiceAcousticTokenizerModel, VibeVoiceSemanticTokenizerModel +from .modular_vibevoice_diffusion_head import VibeVoiceDiffusionHead +from vibevoice.schedule.dpm_solver import DPMSolverMultistepScheduler + +from .configuration_vibevoice import VibeVoiceConfig + + +logger = logging.get_logger(__name__) + +if not hasattr(modeling_utils, "ALL_PARALLEL_STYLES") or modeling_utils.ALL_PARALLEL_STYLES is None: + modeling_utils.ALL_PARALLEL_STYLES = ["tp", "none", "colwise", "rowwise"] + +@dataclass +class VibeVoiceCausalLMOutputWithPast(ModelOutput): + loss: Optional[torch.FloatTensor] = None + diffusion_loss: Optional[torch.FloatTensor] = None + speech_token_num: Optional[int] = None + logits: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class VibeVoiceGenerationOutput(ModelOutput): + """ + Output type for VibeVoice generation. + + Args: + sequences (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + The generated sequences. + speech_outputs (`List[torch.FloatTensor]`, *optional*): + List of generated speech waveforms or latents for each speech segment. + """ + sequences: torch.LongTensor = None + speech_outputs: Optional[List[torch.FloatTensor]] = None + + +class SpeechConnector(nn.Module): + def __init__(self, input_dim, output_dim): + super().__init__() + self.fc1 = nn.Linear(input_dim, output_dim) + self.norm = LlamaRMSNorm(output_dim, eps=1e-6) + self.fc2 = nn.Linear(output_dim, output_dim) + + def forward(self, features, **kwargs): + x = self.fc1(features) + x = self.norm(x) + x = self.fc2(x) + return x + + +# @auto_docstring +class VibeVoicePreTrainedModel(PreTrainedModel): + config_class = VibeVoiceConfig + base_model_prefix = "model" + supports_gradient_checkpointing = True + _skip_keys_device_placement = "past_key_values" + _supports_cache_class = True + _supports_flash_attn_2 = True + _supports_sdpa = True + _supports_quantized_cache = True + _supports_static_cache = True + _supports_attention_backend = True + + def _init_weights(self, module): + if isinstance(module, VibeVoiceDiffusionHead): + module.initialize_weights() + return + + # Use the language model's initializer_range if available + if hasattr(self.config, 'language_model_config') and hasattr(self.config.language_model_config, 'initializer_range'): + std = self.config.language_model_config.initializer_range + elif hasattr(self.config, 'decoder_config') and hasattr(self.config.decoder_config, 'initializer_range'): + std = self.config.decoder_config.initializer_range + else: + std = 0.02 # Default value + + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.LayerNorm): + module.weight.data.fill_(1.0) + module.bias.data.zero_() + +# @auto_docstring +class VibeVoiceModel(VibeVoicePreTrainedModel): + def __init__(self, config): + super().__init__(config) + + if hasattr(config, 'torch_dtype') and config.torch_dtype is not None: + if isinstance(config.torch_dtype, str): + dtype = getattr(torch, config.torch_dtype) + else: + dtype = config.torch_dtype + else: + dtype = torch.float32 + + # Initialize Qwen2 model for language modeling + lm_config = config.decoder_config + self.language_model = AutoModel.from_config(lm_config) + + # Initialize speech components if needed + self.acoustic_tokenizer = AutoModel.from_config(config.acoustic_tokenizer_config).to(dtype) + self.semantic_tokenizer = AutoModel.from_config(config.semantic_tokenizer_config).to(dtype) + + self.acoustic_connector = SpeechConnector(config.acoustic_vae_dim, lm_config.hidden_size).to(dtype) + self.semantic_connector = SpeechConnector(config.semantic_vae_dim, lm_config.hidden_size).to(dtype) + + # Register scaling factors as buffers - use 1D tensors for FSDP compatibility + self.register_buffer('speech_scaling_factor', torch.tensor(float('nan'))) + self.register_buffer('speech_bias_factor', torch.tensor(float('nan'))) + + # Initialize prediction head for speech generation + self.prediction_head = AutoModel.from_config(config.diffusion_head_config).to(dtype) + + # Initialize noise scheduler + self.noise_scheduler = DPMSolverMultistepScheduler( + num_train_timesteps=config.diffusion_head_config.ddpm_num_steps, + beta_schedule=config.diffusion_head_config.ddpm_beta_schedule, + prediction_type=config.diffusion_head_config.prediction_type + ) + + def get_input_embeddings(self): + if hasattr(self.language_model, 'embed_tokens'): + # If the language model has an embed_tokens attribute, return it + return self.language_model.embed_tokens + + for name, attr in self.language_model.fullmap.items(): # parallel by nnscaler, the name is changed + if attr.orig_name == 'embed_tokens.weight': + return getattr(self.language_model, name) + assert False, 'should not arrive here' + + def set_input_embeddings(self, value): + self.language_model.embed_tokens = value + + def set_speech_tokenizers(self, acoustic_tokenizer=None, semantic_tokenizer=None): + """Set the speech tokenizers used for encoding and decoding speech.""" + self.acoustic_tokenizer = acoustic_tokenizer + self.semantic_tokenizer = semantic_tokenizer + + # Reset the encoder to evaluation mode + if self.acoustic_tokenizer is not None: + self.acoustic_tokenizer.eval() + + if self.semantic_tokenizer is not None: + self.semantic_tokenizer.eval() + + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + cache_position: Optional[torch.LongTensor] = None, + **kwargs, + ) -> Union[Tuple, BaseModelOutputWithPast]: + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # Forward through language model + outputs = self.language_model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + cache_position=cache_position, + **kwargs, + ) + + if not return_dict: + return outputs + + return BaseModelOutputWithPast( + last_hidden_state=outputs.last_hidden_state, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +class VibeVoiceForConditionalGeneration(VibeVoicePreTrainedModel): + _tied_weights_keys = ["lm_head.weight"] + _tp_plan = {"lm_head": "colwise_rep"} + + def __init__(self, config): + super().__init__(config) + self.model = VibeVoiceModel(config) + self.vocab_size = config.decoder_config.vocab_size + self.lm_head = nn.Linear(config.decoder_config.hidden_size, self.vocab_size, bias=False) + + self.post_init() + + def get_input_embeddings(self): + return self.model.get_input_embeddings() + + def set_input_embeddings(self, value): + self.model.set_input_embeddings(value) + + def get_output_embeddings(self): + return self.lm_head + + def set_decoder(self, decoder): + self.model.language_model = decoder + + def get_decoder(self): + return self.model.language_model + + def tie_weights(self): + """ + Tie the weights between the input embeddings and the output embeddings. + """ + if getattr(self.config.decoder_config, 'tie_word_embeddings', False): + # The standard PreTrainedModel method will handle the tying. + # It typically does a simple parameter object assignment, which is + # CORRECT to do BEFORE FSDP wraps the model. + output_embeddings = self.get_output_embeddings() + input_embeddings = self.get_input_embeddings() + if hasattr(input_embeddings, 'weight'): + output_embeddings.weight = input_embeddings.weight + else: + # maybe returned input_embeddings a tensor directly + output_embeddings.weight = input_embeddings + + if getattr(output_embeddings, "bias", None) is not None: + output_embeddings.bias.data = nn.functional.pad( + output_embeddings.bias.data, + (0, output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0]), + "constant", + 0, + ) + print("✅ Tied input and output embeddings using standard assignment.") + else: + print("ℹ️ tie_word_embeddings is False, not tying weights.") + + # Also, ensure set_output_embeddings is safe, though your implementation looks okay. + # The key is to avoid calling it after accelerator.prepare(). + def set_output_embeddings(self, new_embeddings): + # Your current implementation using data.copy_ is good practice, + # but the best way is to not call this after prepare(). + self.lm_head = new_embeddings + + def forward_speech_features( + self, + speech_tensors=None, + speech_masks=None, + speech_type="audio", + return_unmask=False + ): + if speech_tensors is None: + # Use config to get vae_dim instead of non-existent self.args + vae_dim = self.config.acoustic_tokenizer_config.vae_dim + audio_features = torch.zeros(1, 1, vae_dim).to(self.get_input_embeddings().weight) + connect_features = self.model.acoustic_connector(audio_features) + return audio_features, connect_features + else: + with torch.no_grad(): + if speech_type == "audio": + with torch.no_grad(): + frames = self.model.acoustic_tokenizer.encode(speech_tensors.unsqueeze(1))[0][0] + audio_tokens = frames.sample(self.model.acoustic_tokenizer.std_dist_type)[0] + + elif speech_type == "vae": + # Use config to get vae_dim instead of non-existent self.args + vae_dim = self.config.acoustic_tokenizer_config.vae_dim + speech_mode = speech_tensors.reshape(speech_tensors.size(0), -1, vae_dim) + + # gaussian sample from the speech_mode + batch_size = speech_mode.size(0) + value = self.model.acoustic_tokenizer.fix_std / 0.8 + std = torch.randn(batch_size, dtype=speech_mode.dtype, device=speech_mode.device) * value + std = std.view(-1, *[1] * (speech_mode.dim() - 1)) + audio_tokens = speech_mode + std * torch.randn(speech_mode.shape).to(speech_mode) + else: + raise NotImplementedError(f"Speech type {speech_type} not implemented") + + if torch.isnan(self.model.speech_scaling_factor) or torch.isnan(self.model.speech_bias_factor): + scaling_factor = 1. / audio_tokens[speech_masks].flatten().std() + bias_factor = -audio_tokens[speech_masks].flatten().mean() + + # Only use distributed operations if the process group is initialized + if dist.is_available() and dist.is_initialized(): + dist.all_reduce(scaling_factor, op=dist.ReduceOp.SUM) + dist.all_reduce(bias_factor, op=dist.ReduceOp.SUM) + world_size = dist.get_world_size() + self.model.speech_scaling_factor.copy_(scaling_factor / world_size) + self.model.speech_bias_factor.copy_(bias_factor / world_size) + print(f"Speech scaling factor (distributed): {self.model.speech_scaling_factor}, bias factor: {self.model.speech_bias_factor}", flush=True) + else: + # Single process case + self.model.speech_scaling_factor.copy_(scaling_factor) + self.model.speech_bias_factor.copy_(bias_factor) + print(f"Speech scaling factor (single process): {self.model.speech_scaling_factor}, bias factor: {self.model.speech_bias_factor}", flush=True) + + audio_features = (audio_tokens + self.model.speech_bias_factor) * self.model.speech_scaling_factor + + connect_features = self.model.acoustic_connector(audio_features) + if return_unmask: + return audio_features, connect_features + return audio_features[speech_masks], connect_features[speech_masks] + + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = False, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + cache_position: Optional[torch.LongTensor] = None, + # New arguments for speech processing and loss calculation + speech_tensors: Optional[torch.FloatTensor] = None, + speech_masks: Optional[torch.BoolTensor] = None, + speeches_loss_input: Optional[torch.FloatTensor] = None, + speech_semantic_tensors: Optional[torch.FloatTensor] = None, + acoustic_input_mask: Optional[torch.BoolTensor] = None, + acoustic_loss_mask: Optional[torch.BoolTensor] = None, + ddpm_batch_mul: int = 1, + **kwargs: Optional[Dict[str, Union[torch.Tensor, str]]], + ) -> Union[Tuple, VibeVoiceCausalLMOutputWithPast]: + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + x = self.get_input_embeddings()(input_ids) + + semantic_speech_all_connect_features = self.model.semantic_connector(speech_semantic_tensors) + if speeches_loss_input is not None: + # only part audio need diffuse + speech_all_features, speech_all_connect_features = self.forward_speech_features( + speech_tensors=speech_tensors.type_as(x) if speech_tensors is not None else None, + speech_masks=speech_masks, + speech_type=kwargs.get("speech_type", "audio"), + return_unmask=True + ) + if speech_tensors is not None: + if semantic_speech_all_connect_features is not None: + x[acoustic_input_mask] = speech_all_connect_features[speech_masks] + semantic_speech_all_connect_features[speech_masks] + else: + x[acoustic_input_mask] = speech_all_connect_features[speech_masks] + speech_features = speech_all_features[speeches_loss_input.unsqueeze(-1) & speech_masks] # only part audio need diffuse + speech_connect_features = speech_all_connect_features[speeches_loss_input.unsqueeze(-1) & speech_masks] + else: + speech_features, speech_connect_features = self.forward_speech_features( + speech_tensors=speech_tensors.type_as(x) if speech_tensors is not None else None, + speech_masks=speech_masks, + speech_type=kwargs.get("speech_type", "audio"), + ) + if speech_tensors is not None: + x[acoustic_input_mask] = speech_connect_features + + outputs = self.model( + input_ids=None, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=x, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=False, + return_dict=return_dict, + cache_position=cache_position, + ) + + hidden_states = outputs.last_hidden_state + logits = self.lm_head(hidden_states) + # logits = logits.float() + + loss = None + if labels is not None: + # The custom CE loss with masking is calculated in the training script. + # We leave the standard loss calculation here as None. + pass + + # --- Diffusion Loss Calculation --- + diffusion_loss = None + # This block is executed only if we are in a context that involves speech. + if speech_tensors is not None and acoustic_loss_mask.sum().item() > 0: + condition_features = hidden_states[acoustic_loss_mask] + + speech_len, latent_size = speech_features.shape + + noise = torch.randn( + (speech_len * ddpm_batch_mul, latent_size), + device=hidden_states.device, + dtype=hidden_states.dtype + ) + + timesteps = torch.multinomial( + torch.ones(self.config.diffusion_head_config.ddpm_num_steps), + speech_len * ddpm_batch_mul, + replacement=True, + ).to(hidden_states.device) + + speech_features_repeated = speech_features.repeat_interleave(ddpm_batch_mul, dim=0) + condition_features_repeated = condition_features.repeat_interleave(ddpm_batch_mul, dim=0) + + noisy_speech_features = self.model.noise_scheduler.add_noise( + speech_features_repeated, noise, timesteps + ) + + model_output = self.model.prediction_head( + noisy_speech_features, + timesteps.type_as(x), + condition_features_repeated + ) + + prediction_type = self.config.diffusion_head_config.prediction_type + if prediction_type == "epsilon": + target_for_loss = noise + elif prediction_type == "v_prediction": + target_for_loss = self.model.noise_scheduler.get_velocity( + speech_features_repeated, noise, timesteps + ) + else: + raise NotImplementedError(f"Prediction type {prediction_type} not implemented") + + diffusion_loss = F.mse_loss(model_output.float(), target_for_loss.float(), reduction='sum') + if latent_size > 0 and ddpm_batch_mul > 0: + diffusion_loss = diffusion_loss / latent_size / ddpm_batch_mul + else: + diffusion_loss = torch.tensor(0.0, device=diffusion_loss.device) + + else: + # Dummy loss for DDP to work when there are no speech samples in a batch, + # but we are in a speech context. + diffusion_loss = sum(p.sum() for p in self.model.prediction_head.parameters()) * 0.0 + diffusion_loss += sum(p.sum() for p in self.model.acoustic_connector.parameters()) * 0.0 + diffusion_loss += sum(p.sum() for p in self.model.semantic_connector.parameters()) * 0.0 + # --- End Diffusion Loss Calculation --- + + if not return_dict: + output = (logits, speech_len) + outputs.to_tuple()[1:] + return (loss, diffusion_loss) + output + + return VibeVoiceCausalLMOutputWithPast( + loss=loss, + diffusion_loss=diffusion_loss, + speech_token_num=speech_len if speech_tensors is not None else 0, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + +AutoModel.register(VibeVoiceConfig, VibeVoiceModel) +AutoModelForCausalLM.register(VibeVoiceConfig, VibeVoiceForConditionalGeneration) + +__all__ = [ + "VibeVoiceModel", + "VibeVoicePreTrainedModel", + "VibeVoiceForConditionalGeneration", + "VibeVoiceCausalLMOutputWithPast", + "VibeVoiceGenerationOutput", +] \ No newline at end of file diff --git a/vibevoice/modular/modeling_vibevoice_inference.py b/vibevoice/modular/modeling_vibevoice_inference.py new file mode 100644 index 0000000..8dc1615 --- /dev/null +++ b/vibevoice/modular/modeling_vibevoice_inference.py @@ -0,0 +1,731 @@ +from dataclasses import dataclass +from typing import Dict, List, Optional, Tuple, Union, Callable +from tqdm import tqdm +import torch +import torch.nn as nn + +from transformers.models.auto import AutoModel, AutoModelForCausalLM + +from transformers.generation import GenerationMixin, GenerationConfig, LogitsProcessor, LogitsProcessorList, StoppingCriteriaList +from transformers.modeling_outputs import BaseModelOutputWithPast, ModelOutput +from transformers import modeling_utils +from transformers.modeling_utils import PreTrainedModel +from transformers.modeling_flash_attention_utils import FlashAttentionKwargs +from transformers.utils import logging + + +# from .modular_vibevoice_tokenizer import VibeVoiceTokenizerStreamingCache, VibeVoiceAcousticTokenizerModel, VibeVoiceSemanticTokenizerModel +from .modular_vibevoice_tokenizer import VibeVoiceTokenizerStreamingCache, VibeVoiceTokenizerEncoderOutput +from .modular_vibevoice_diffusion_head import VibeVoiceDiffusionHead +from vibevoice.schedule.dpm_solver import DPMSolverMultistepScheduler + +from .configuration_vibevoice import VibeVoiceConfig + +from .modular_vibevoice_text_tokenizer import VibeVoiceTextTokenizer, VibeVoiceTextTokenizerFast + +from .modeling_vibevoice import VibeVoiceModel, VibeVoicePreTrainedModel +from .streamer import AudioStreamer, AsyncAudioStreamer + +logger = logging.get_logger(__name__) + +if not hasattr(modeling_utils, "ALL_PARALLEL_STYLES") or modeling_utils.ALL_PARALLEL_STYLES is None: + modeling_utils.ALL_PARALLEL_STYLES = ["tp", "none", "colwise", "rowwise"] + +@dataclass +class VibeVoiceCausalLMOutputWithPast(BaseModelOutputWithPast): + logits: Optional[torch.FloatTensor] = None + +@dataclass +class VibeVoiceGenerationOutput(ModelOutput): + """ + Output type for VibeVoice generation. + + Args: + sequences (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + The generated sequences. + speech_outputs (`List[torch.FloatTensor]`, *optional*): + List of generated speech waveforms or latents for each speech segment. + """ + sequences: torch.LongTensor = None + speech_outputs: Optional[List[torch.FloatTensor]] = None + reach_max_step_sample: Optional[torch.BoolTensor] = None + +class VibeVoiceTokenConstraintProcessor(LogitsProcessor): + """Constrains token generation to only valid tokens during speech generation.""" + + def __init__(self, valid_token_ids: List[int], device: torch.device = None): + self.valid_token_ids = torch.tensor(valid_token_ids, dtype=torch.long, device=device) + + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + # Create a mask for valid tokens + mask = torch.full_like(scores, float('-inf')) + mask[:, self.valid_token_ids] = 0 + + # Apply mask to scores + scores = scores + mask + return scores + +class VibeVoiceForConditionalGenerationInference(VibeVoicePreTrainedModel, GenerationMixin): + _tied_weights_keys = ["lm_head.weight"] + _tp_plan = {"lm_head": "colwise_rep"} + + def __init__(self, config): + super().__init__(config) + + # Initialize the base model + self.model = VibeVoiceModel(config) + + # LM head for text generation + self.lm_head = nn.Linear(config.decoder_config.hidden_size, config.decoder_config.vocab_size, bias=False) + + # inference configuration + self.ddpm_inference_steps = config.diffusion_head_config.ddpm_num_inference_steps + + # Initialize weights and apply final processing + self.post_init() + + @property + def noise_scheduler(self): + return self.model.noise_scheduler + + @property + def prediction_head(self): + return self.model.prediction_head + + @property + def speech_scaling_factor(self): + return self.model.speech_scaling_factor + + @property + def speech_bias_factor(self): + return self.model.speech_bias_factor + + @property + def acoustic_tokenizer(self): + return self.model.acoustic_tokenizer + + @property + def semantic_tokenizer(self): + return self.model.semantic_tokenizer + + @property + def acoustic_connector(self): + return self.model.acoustic_connector + + @property + def semantic_connector(self): + return self.model.semantic_connector + + def tie_weights(self): + """ + Tie the weights between the input embeddings and the output embeddings. + """ + # Tie lm_head.weight to language_model.embed_tokens.weight + if not getattr(self.config, 'tie_word_embeddings', False): + return + + if hasattr(self, 'lm_head') and hasattr(self.model.language_model, 'embed_tokens'): + self.lm_head.weight = self.model.language_model.embed_tokens.weight + + def get_input_embeddings(self): + return self.model.get_input_embeddings() + + def set_input_embeddings(self, value): + self.model.set_input_embeddings(value) + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def set_speech_tokenizers(self, acoustic_tokenizer=None, semantic_tokenizer=None): + """Set the speech tokenizers used for encoding and decoding speech.""" + self.model.set_speech_tokenizers(acoustic_tokenizer, semantic_tokenizer) + + def set_ddpm_inference_steps(self, num_steps=None): + self.ddpm_inference_steps = num_steps or self.config.diffusion_head_config.ddpm_num_inference_steps + + def _process_speech_inputs(self, speech_tensors, speech_masks, speech_type="audio"): + """Process speech inputs through tokenizers and connectors.""" + with torch.no_grad(): + if speech_type == "audio": + # Encode audio to acoustic latents + encoder_output = self.model.acoustic_tokenizer.encode(speech_tensors.unsqueeze(1)) + acoustic_latents = encoder_output.sample(dist_type=self.model.acoustic_tokenizer.std_dist_type)[0] + + # Apply scaling and bias + acoustic_features = (acoustic_latents + self.model.speech_bias_factor.to(acoustic_latents.device)) * self.model.speech_scaling_factor.to(acoustic_latents.device) + + # Connect to language model space + acoustic_connected = self.model.acoustic_connector(acoustic_features)[speech_masks.cpu()] + + return acoustic_features, acoustic_connected + elif speech_type == "pt": + encoder_output = VibeVoiceTokenizerEncoderOutput(mean=speech_tensors, std=self.acoustic_tokenizer.config.fix_std) + acoustic_latents = encoder_output.sample(dist_type=self.model.acoustic_tokenizer.std_dist_type)[0] + + # Apply scaling and bias + acoustic_features = (acoustic_latents + self.model.speech_bias_factor.to(acoustic_latents.device)) * self.model.speech_scaling_factor.to(acoustic_latents.device) + + # Connect to language model space + acoustic_connected = self.model.acoustic_connector(acoustic_features)[speech_masks.cpu()] + + return acoustic_features, acoustic_connected + else: + raise NotImplementedError(f"Speech type {speech_type} not implemented") + + # @can_return_tuple + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + cache_position: Optional[torch.LongTensor] = None, + speech_tensors: Optional[torch.FloatTensor] = None, + speech_masks: Optional[torch.BoolTensor] = None, + speech_input_mask: Optional[torch.BoolTensor] = None, + logits_to_keep: Union[int, slice] = 0, + **kwargs, + ) -> Union[Tuple, VibeVoiceCausalLMOutputWithPast]: + """ + Args: + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + speech_tensors (`torch.FloatTensor`, *optional*): + Input speech waveforms for voice cloning or speech understanding. + speech_masks (`torch.BoolTensor`, *optional*): + Masks indicating valid speech frames. + speech_input_mask (`torch.BoolTensor`, *optional*): + Positions in the input sequence where speech embeddings should be inserted. + + Returns: + `VibeVoiceCausalLMOutputWithPast` or tuple + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # Get embeddings + if inputs_embeds is None: + inputs_embeds = self.model.get_input_embeddings()(input_ids) + + # Process speech inputs if provided + if speech_tensors is not None and speech_masks is not None: + acoustic_features, speech_embeds = self._process_speech_inputs(speech_tensors.to(self.dtype), speech_masks) + if speech_input_mask is not None: + inputs_embeds[speech_input_mask] = speech_embeds + + outputs = self.model( + inputs_embeds=inputs_embeds, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + cache_position=cache_position, + **kwargs, + ) + + hidden_states = outputs[0] if not return_dict else outputs.last_hidden_state + # Only compute necessary logits, and do not upcast them to float if we are not computing the loss + slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep + logits = self.lm_head(hidden_states[:, slice_indices, :]) + + if labels is not None: + raise NotImplementedError("Loss computation is not implemented in this version.") + + return VibeVoiceCausalLMOutputWithPast( + logits=logits, + past_key_values=outputs.past_key_values, + last_hidden_state=hidden_states, + attentions=outputs.attentions, + ) + + def _build_generate_config_model_kwargs(self, generation_config, inputs, tokenizer, return_processors=False, **kwargs): + if generation_config is None: + generation_config = GenerationConfig( + bos_token_id=tokenizer.bos_token_id, + eos_token_id=tokenizer.eos_token_id, + pad_token_id = tokenizer.pad_token_id + ) + else: + generation_config = GenerationConfig( + **generation_config, + bos_token_id=tokenizer.bos_token_id, + eos_token_id=tokenizer.eos_token_id, + pad_token_id = tokenizer.pad_token_id + ) + + generation_config, model_kwargs = self._prepare_generation_config( + generation_config, + True, + speech_start_id=tokenizer.speech_start_id, + speech_end_id=tokenizer.speech_end_id, + speech_diffusion_id=tokenizer.speech_diffusion_id, + **kwargs + ) + generation_config.speech_start_id = tokenizer.speech_start_id + generation_config.speech_end_id = tokenizer.speech_end_id + generation_config.speech_diffusion_id = tokenizer.speech_diffusion_id + + inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs(inputs, generation_config.bos_token_id, model_kwargs) + batch_size = inputs_tensor.shape[0] + device = self.device + + self._prepare_special_tokens(generation_config, True, device=device) + generation_config.use_cache = True + model_kwargs["use_cache"] = generation_config.use_cache + input_ids = inputs_tensor.to(self.device) + + input_ids_length = input_ids.shape[1] + has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None + has_default_min_length = kwargs.get("min_length") is None and generation_config.min_length is not None + generation_config = self._prepare_generated_length( + generation_config=generation_config, + has_default_max_length=has_default_max_length, + has_default_min_length=has_default_min_length, + model_input_name=model_input_name, + inputs_tensor=inputs_tensor, + input_ids_length=input_ids_length, + ) + + max_cache_length = generation_config.max_length - 1 + # Backwards compatible fix for _prepare_cache_for_generation method signature + # New transformers version expects 5 args, old version expects 6 + import inspect + try: + sig = inspect.signature(self._prepare_cache_for_generation) + if len(sig.parameters) == 5: + # New transformers version (4.56+) + self._prepare_cache_for_generation(generation_config, model_kwargs, batch_size, max_cache_length, device) + else: + # Old transformers version (pre-4.56) + self._prepare_cache_for_generation(generation_config, model_kwargs, None, batch_size, max_cache_length, device) + except Exception as e: + # Fallback to try both versions + try: + self._prepare_cache_for_generation(generation_config, model_kwargs, batch_size, max_cache_length, device) + except TypeError: + self._prepare_cache_for_generation(generation_config, model_kwargs, None, batch_size, max_cache_length, device) + model_kwargs['cache_position'] = torch.arange(input_ids_length, device=device, dtype=torch.long) + for k, v in model_kwargs.items(): + if isinstance(v, torch.Tensor): + model_kwargs[k] = v.to(device=device) + + if return_processors: + logits_processor = self._get_logits_processor( + generation_config=generation_config, + input_ids_seq_length=input_ids_length, + encoder_input_ids=inputs_tensor, + prefix_allowed_tokens_fn=None, + logits_processor=LogitsProcessorList(), + device=inputs_tensor.device, + model_kwargs=model_kwargs, + ) + + stopping_criteria = self._get_stopping_criteria(generation_config=generation_config, stopping_criteria=StoppingCriteriaList()) + + return generation_config, model_kwargs, input_ids, logits_processor, stopping_criteria + else: + return generation_config, model_kwargs, input_ids + + @torch.no_grad() + def generate( + self, + inputs: Optional[torch.Tensor] = None, + generation_config: Optional[GenerationConfig] = None, + logits_processor: Optional[LogitsProcessorList] = None, + stopping_criteria: Optional[StoppingCriteriaList] = None, + prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None, + synced_gpus: Optional[bool] = None, + assistant_model: Optional["PreTrainedModel"] = None, + audio_streamer: Optional[Union[AudioStreamer, AsyncAudioStreamer]] = None, + negative_prompt_ids: Optional[torch.Tensor] = None, + negative_prompt_attention_mask: Optional[torch.Tensor] = None, + speech_tensors: Optional[torch.FloatTensor] = None, + speech_masks: Optional[torch.BoolTensor] = None, + speech_input_mask: Optional[torch.BoolTensor] = None, + return_speech: bool = True, + cfg_scale: float = 1.0, + stop_check_fn: Optional[Callable[[], bool]] = None, + **kwargs, + ) -> Union[torch.LongTensor, VibeVoiceGenerationOutput]: + """ + Generates sequences of token ids and optionally speech outputs. + + Args: + All standard generation arguments from GenerationMixin + negative_prompt_ids: Negative prompt for CFG in speech generation + negative_prompt_attention_mask: Attention mask for negative prompt + speech_tensors: Input speech for voice cloning + speech_masks: Masks for speech tensors + speech_input_mask: Positions to insert speech embeddings + return_speech: Whether to decode and return speech outputs + cfg_scale: CFG scale for speech generation + stop_check_fn: Optional callable that returns True if generation should stop + + Returns: + Generated token sequences and optionally speech outputs + """ + # 1. Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call + tokenizer = kwargs.pop("tokenizer", None) # Pull this out first, we only use it for stopping criteria + parsed_scripts = kwargs.pop("parsed_scripts", None) + all_speakers_list = kwargs.pop("all_speakers_list", None) + max_length_times = kwargs.pop("max_length_times", 2) + + if kwargs.get('max_new_tokens', None) is None: + kwargs['max_new_tokens'] = self.config.decoder_config.max_position_embeddings - kwargs['input_ids'].shape[-1] + + generation_config, model_kwargs, input_ids, logits_processor, stopping_criteria = self._build_generate_config_model_kwargs( + generation_config, inputs, tokenizer, return_processors=True, **kwargs + ) + + negative_kwargs = { + 'input_ids': torch.full((kwargs['input_ids'].shape[0], 1), tokenizer.speech_start_id, dtype=torch.long, device=kwargs['input_ids'].device), + 'attention_mask': torch.ones((kwargs['input_ids'].shape[0], 1), dtype=torch.long, device=kwargs['input_ids'].device), + 'max_new_tokens': kwargs.get('max_new_tokens', 100) + } + negative_generation_config, negative_model_kwargs, negative_input_ids = self._build_generate_config_model_kwargs( + None, None, tokenizer, return_processors=False, **negative_kwargs + ) + + acoustic_cache = VibeVoiceTokenizerStreamingCache() + semantic_cache = VibeVoiceTokenizerStreamingCache() + + batch_size = input_ids.shape[0] + device = input_ids.device + finished_tags = torch.zeros(batch_size, dtype=torch.bool, device=device) + correct_cnt = torch.zeros(batch_size, dtype=torch.long, device=device) + is_prefill = True + inputs_embeds = None + verbose = kwargs.get("verbose", False) + + # Initialize audio chunks storage for each sample + audio_chunks = [[] for _ in range(batch_size)] + + initial_length = input_ids.shape[-1] + initial_length_per_sample = model_kwargs['attention_mask'].sum(dim=-1) + + # Define all valid tokens that can be generated + valid_tokens = [ + generation_config.speech_start_id, + generation_config.speech_end_id, + generation_config.speech_diffusion_id, + generation_config.eos_token_id + ] + # Add bos_token_id if it exists + if hasattr(generation_config, 'bos_token_id') and generation_config.bos_token_id is not None: + valid_tokens.append(generation_config.bos_token_id) + + # Add custom processor to constrain token generation + token_constraint_processor = VibeVoiceTokenConstraintProcessor(valid_tokens, device=device) + if logits_processor is None: + logits_processor = LogitsProcessorList() + logits_processor.append(token_constraint_processor) + + max_steps = min(generation_config.max_length - initial_length, int(max_length_times * initial_length)) + max_step_per_sample = torch.min(generation_config.max_length - initial_length_per_sample, (max_length_times * initial_length_per_sample).long()) + reach_max_step_sample = torch.zeros(batch_size, dtype=torch.bool, device=device) + + # Create progress iterator if verbose + if kwargs.get("show_progress_bar", True): + progress_bar = tqdm(range(max_steps), desc="Generating", leave=False) + else: + progress_bar = range(max_steps) + + for step in progress_bar: + # Check for external stop signal + if stop_check_fn is not None and stop_check_fn(): + if verbose: + print(f"Generation stopped externally at step {step + 1}") + # End the audio streamer if it exists + if audio_streamer is not None: + audio_streamer.end() + break + + # Check if audio_streamer has been ended (stopped externally) + if audio_streamer is not None and hasattr(audio_streamer, 'finished_flags'): + if any(audio_streamer.finished_flags): + if verbose: + print(f"Audio generation stopped externally at step {step + 1}") + break + + if finished_tags.all(): + if hasattr(progress_bar, 'set_description'): + progress_bar.set_description("Generation complete") + break + + if input_ids.shape[-1] >= generation_config.max_length: + print(f"Reached maximum generation length {generation_config.max_length}, stopped it.") + reached_samples = torch.arange(batch_size, device=device)[~finished_tags] + if reached_samples.numel() > 0: + reach_max_step_sample[reached_samples] = True + break + + # Update progress bar description with active samples + if hasattr(progress_bar, 'set_description'): + active_samples = (~finished_tags).sum().item() + progress_bar.set_description(f"Generating (active: {active_samples}/{batch_size})") + + model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) + if is_prefill: + # we process the speech inputs only during the first generation step + prefill_inputs = { + "speech_tensors": speech_tensors.to(device=device), + "speech_masks": speech_masks.to(device), + "speech_input_mask": speech_input_mask.to(device), + } + is_prefill = False + else: + _ = model_inputs.pop('inputs_embeds', None) + prefill_inputs = {'inputs_embeds': inputs_embeds} + + # Forward pass through the model + outputs = self( + **model_inputs, **prefill_inputs, logits_to_keep=1, return_dict=True, output_attentions=False, output_hidden_states=False, + ) + model_kwargs = self._update_model_kwargs_for_generation( + outputs, model_kwargs, is_encoder_decoder=False, + ) + + # Get logits and apply logits processor + next_token_logits = outputs.logits[:, -1, :].to(copy=True, dtype=torch.float32, device=input_ids.device) + # next_token_logits = outputs.logits[:, -1, :].to(copy=True, device=input_ids.device) + next_token_scores = logits_processor(input_ids, next_token_logits) + + # token selection + if generation_config.do_sample: + probs = nn.functional.softmax(next_token_scores, dim=-1) + # TODO (joao): this OP throws "skipping cudagraphs due to ['incompatible ops']", find solution + next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1) + else: + next_tokens = torch.argmax(next_token_scores, dim=-1) + + next_tokens[finished_tags] = generation_config.eos_token_id + input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1) + + if not kwargs.get('refresh_negative', True): + negative_model_inputs = self.prepare_inputs_for_generation(negative_input_ids, **negative_model_kwargs) + # Forward negative pass through the model + if negative_model_inputs['inputs_embeds'] is None and inputs_embeds is not None: + negative_model_inputs['inputs_embeds'] = inputs_embeds + negative_model_inputs['input_ids'] = None + + negative_outputs = self( + **negative_model_inputs, logits_to_keep=0, return_dict=True, output_attentions=False, output_hidden_states=False, + ) + negative_model_kwargs = self._update_model_kwargs_for_generation( + negative_outputs, negative_model_kwargs, is_encoder_decoder=False, + ) + negative_input_ids = torch.cat([negative_input_ids, next_tokens[:, None]], dim=-1) + + # reached end of generation + if (next_tokens == generation_config.eos_token_id).any(): + eos_indices = (next_tokens == generation_config.eos_token_id).nonzero(as_tuple=False).squeeze(1) + # Only print for samples that are newly finished (not already marked as finished) + new_eos_indices = eos_indices[~finished_tags[eos_indices]] + if new_eos_indices.numel() > 0: + finished_tags[new_eos_indices] = True + if verbose: + print(f"Samples {new_eos_indices.tolist()} reached EOS token at step {step + 1}.", flush=True) + if audio_streamer is not None: + audio_streamer.end(new_eos_indices) + + # Check if any sample reached its maximum generation length + max_length_reached = step >= max_step_per_sample + new_max_length_indices = torch.nonzero(max_length_reached & ~finished_tags, as_tuple=False).squeeze(1) + if new_max_length_indices.numel() > 0: + finished_tags[new_max_length_indices] = True + reach_max_step_sample[new_max_length_indices] = True + if verbose: + print(f"Samples {new_max_length_indices.tolist()} reached max generation length at step {step + 1}.", flush=True) + if audio_streamer is not None: + audio_streamer.end(new_max_length_indices) + + # speech_end + diffusion_end_indices = (next_tokens == generation_config.speech_end_id).nonzero(as_tuple=False).squeeze(1) + if diffusion_end_indices.numel() > 0: + # Clear tokenizer caches for samples that reached speech end + acoustic_cache.set_to_zero(diffusion_end_indices) + semantic_cache.set_to_zero(diffusion_end_indices) + + # speech_begin + diffusion_start_indices = torch.arange(batch_size, device=device)[~finished_tags & (next_tokens == generation_config.speech_start_id)] + if diffusion_start_indices.numel() > 0 and kwargs.get('refresh_negative', True): + # update attention mask + for i, sample_idx in enumerate(diffusion_start_indices.tolist()): + negative_model_kwargs['attention_mask'][sample_idx, :] = 0 + negative_model_kwargs['attention_mask'][sample_idx, -1] = 1 + # update past key values + for layer_idx in range(len(negative_model_kwargs['past_key_values'])): + k_cache, v_cache = negative_model_kwargs['past_key_values'][layer_idx] + # Process each non-diffusion sample + for sample_idx in diffusion_start_indices.tolist(): + # Shift cache for this sample + k_cache[sample_idx, :, -1, :] = k_cache[sample_idx, :, 0, :].clone() + v_cache[sample_idx, :, -1, :] = v_cache[sample_idx, :, 0, :].clone() + # update negative_input_ids + for sample_idx in diffusion_start_indices.tolist(): + negative_input_ids[sample_idx, -1] = generation_config.speech_start_id + + # Prepare inputs_embeds for next iteration + # Initialize with default embeddings for all tokens + next_inputs_embeds = self.model.get_input_embeddings()(next_tokens).unsqueeze(1) # [batch_size, 1, hidden_size] + + # forward diffusion + # Diffusion indices are those that are not finished and not special tokens + diffusion_indices = torch.arange(batch_size, device=device)[~finished_tags & (next_tokens == generation_config.speech_diffusion_id)] + + if diffusion_indices.numel() > 0: + if kwargs.get('refresh_negative', True): + negative_model_inputs = self.prepare_inputs_for_generation(negative_input_ids, **negative_model_kwargs) + # Forward negative pass through the model + if negative_model_inputs['inputs_embeds'] is None and inputs_embeds is not None: + negative_model_inputs['inputs_embeds'] = inputs_embeds + negative_model_inputs['input_ids'] = None + + negative_outputs = self( + **negative_model_inputs, logits_to_keep=0, return_dict=True, output_attentions=False, output_hidden_states=False, + ) + negative_model_kwargs = self._update_model_kwargs_for_generation( + negative_outputs, negative_model_kwargs, is_encoder_decoder=False, + ) + negative_input_ids = torch.cat([negative_input_ids, next_tokens[:, None]], dim=-1) + # correct the non-diffusion indices + # we forward all samples' negative outputs even if + # they are not in diffusion mode to keep the cache consistent + # So we need to correct the kv cache of non-diffusion samples + non_diffusion_mask = ~finished_tags & (next_tokens != generation_config.speech_diffusion_id) + if non_diffusion_mask.any(): + non_diffusion_indices = torch.arange(batch_size, device=device)[non_diffusion_mask] + start_indices = correct_cnt[non_diffusion_indices] + + # 1. Update attention_mask - need to handle each sample separately + seq_len = negative_model_kwargs['attention_mask'].shape[1] + for i, (sample_idx, start_idx) in enumerate(zip(non_diffusion_indices.tolist(), start_indices.tolist())): + # Shift the attention mask for this sample + if start_idx + 1 < seq_len - 1: + negative_model_kwargs['attention_mask'][sample_idx, start_idx+1:] = \ + negative_model_kwargs['attention_mask'][sample_idx, start_idx:-1].clone() + negative_model_kwargs['attention_mask'][sample_idx, start_idx] = 0 + + # 2. Update past_key_values + for layer_idx in range(len(negative_model_kwargs['past_key_values'])): + k_cache, v_cache = negative_model_kwargs['past_key_values'][layer_idx] + # Process each non-diffusion sample + for sample_idx, start_idx in zip(non_diffusion_indices.tolist(), start_indices.tolist()): + if start_idx + 1 < k_cache.shape[2] - 1: + # Shift cache for this sample + k_cache[sample_idx, :, start_idx+1:, :] = k_cache[sample_idx, :, start_idx:-1, :].clone() + v_cache[sample_idx, :, start_idx+1:, :] = v_cache[sample_idx, :, start_idx:-1, :].clone() + + # 3. Update negative_input_ids + for sample_idx, start_idx in zip(non_diffusion_indices.tolist(), start_indices.tolist()): + if start_idx + 1 < negative_input_ids.shape[1] - 1: + negative_input_ids[sample_idx, start_idx+1:] = \ + negative_input_ids[sample_idx, start_idx:-1].clone() + + correct_cnt[non_diffusion_indices] += 1 + + positive_condition = outputs.last_hidden_state[diffusion_indices, -1, :] + negative_condition = negative_outputs.last_hidden_state[diffusion_indices, -1, :] + + speech_latent = self.sample_speech_tokens( + positive_condition, + negative_condition, + cfg_scale=cfg_scale, + ).unsqueeze(1) + + # Decode acoustic latent to audio using acoustic streaming cache + scaled_latent = speech_latent / self.model.speech_scaling_factor.to(speech_latent.device) - self.model.speech_bias_factor.to(speech_latent.device) + audio_chunk = self.model.acoustic_tokenizer.decode( + scaled_latent.to(self.model.acoustic_tokenizer.device), + cache=acoustic_cache, # Use acoustic-specific cache + sample_indices=diffusion_indices.to(self.model.acoustic_tokenizer.device), + use_cache=True, + debug=False + ) + + # Store audio chunks for each sample + for i, sample_idx in enumerate(diffusion_indices): + idx = sample_idx.item() + # Only append audio chunk if the sample is not finished + if not finished_tags[idx]: + audio_chunks[idx].append(audio_chunk[i]) + + # Add streaming support here + if audio_streamer is not None: + # Stream the audio chunks immediately + audio_streamer.put(audio_chunk, diffusion_indices) + + # Encode audio to semantic features using semantic streaming cache + semantic_features = self.model.semantic_tokenizer.encode( + audio_chunk, + cache=semantic_cache, # Use semantic-specific cache + sample_indices=diffusion_indices, + use_cache=True, + debug=False + ).mean # semantic tokenizer has no VAE. + + # Combine acoustic and semantic features for next input + acoustic_embed = self.model.acoustic_connector(speech_latent) + semantic_embed = self.model.semantic_connector(semantic_features) + diffusion_embeds = acoustic_embed + semantic_embed + + # Update embeddings for diffusion indices + next_inputs_embeds[diffusion_indices] = diffusion_embeds + + # Set inputs_embeds for next iteration + inputs_embeds = next_inputs_embeds + + if audio_streamer is not None: + audio_streamer.end() + + # Concatenate audio chunks for each sample + final_audio_outputs = [] + for sample_chunks in audio_chunks: + if sample_chunks: + # Concatenate all chunks along the time dimension (assumed to be the last dimension) + concatenated_audio = torch.cat(sample_chunks, dim=-1) + final_audio_outputs.append(concatenated_audio) + else: + # If no audio was generated for this sample, append None + final_audio_outputs.append(None) + + return VibeVoiceGenerationOutput( + sequences=input_ids, + speech_outputs=final_audio_outputs if return_speech else None, + reach_max_step_sample=reach_max_step_sample, + ) + + @torch.no_grad() + def sample_speech_tokens(self, condition, neg_condition, cfg_scale=3.0): + self.model.noise_scheduler.set_timesteps(self.ddpm_inference_steps) + condition = torch.cat([condition, neg_condition], dim=0).to(self.model.prediction_head.device) + speech = torch.randn(condition.shape[0], self.config.acoustic_vae_dim).to(condition) + for t in self.model.noise_scheduler.timesteps: + half = speech[: len(speech) // 2] + combined = torch.cat([half, half], dim=0) + eps = self.model.prediction_head(combined, t.repeat(combined.shape[0]).to(combined), condition=condition) + cond_eps, uncond_eps = torch.split(eps, len(eps) // 2, dim=0) + half_eps = uncond_eps + cfg_scale * (cond_eps - uncond_eps) + eps = torch.cat([half_eps, half_eps], dim=0) + speech = self.model.noise_scheduler.step(eps, t, speech).prev_sample + return speech[: len(speech) // 2] + + +AutoModelForCausalLM.register(VibeVoiceConfig, VibeVoiceForConditionalGenerationInference) + +__all__ = [ + "VibeVoiceForConditionalGenerationInference", +] diff --git a/vibevoice/modular/modular_vibevoice_diffusion_head.py b/vibevoice/modular/modular_vibevoice_diffusion_head.py new file mode 100644 index 0000000..59de50f --- /dev/null +++ b/vibevoice/modular/modular_vibevoice_diffusion_head.py @@ -0,0 +1,287 @@ +import math +from typing import Optional, Tuple, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from transformers.models.auto import AutoModel +from transformers.modeling_utils import PreTrainedModel +# from transformers.modeling_layers import GradientCheckpointingLayer +from transformers.activations import ACT2FN +from transformers.utils import logging + +from .configuration_vibevoice import VibeVoiceDiffusionHeadConfig + + +logger = logging.get_logger(__name__) + + +class RMSNorm(nn.Module): + def __init__(self, dim: int, eps: float = 1e-6, elementwise_affine=True, memory_efficient=False): + super().__init__() + self.dim = dim + self.eps = eps + self.elementwise_affine = elementwise_affine + if self.elementwise_affine: + self.weight = nn.Parameter(torch.ones(dim)) + else: + self.register_parameter('weight', None) + + def _norm(self, x): + return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps) + + def forward(self, x): + output = self._norm(x.float()).type_as(x) + if self.weight is not None: + output = output * self.weight + return output + + def extra_repr(self) -> str: + return f'dim={self.dim}, eps={self.eps}, elementwise_affine={self.elementwise_affine}' + +def modulate(x, shift, scale): + """Apply modulation to input tensor.""" + return x * (1 + scale) + shift + + +class TimestepEmbedder(nn.Module): + """ + Embeds scalar timesteps into vector representations. + + Args: + hidden_size (`int`): Size of the output embedding + frequency_embedding_size (`int`, optional): Size of the intermediate frequency embedding + """ + def __init__(self, hidden_size, frequency_embedding_size=256): + super().__init__() + self.mlp = nn.Sequential( + nn.Linear(frequency_embedding_size, hidden_size, bias=False), + # nn.SiLU(), + ACT2FN['silu'], + nn.Linear(hidden_size, hidden_size, bias=False), + ) + self.frequency_embedding_size = frequency_embedding_size + + @staticmethod + def timestep_embedding(t, dim, max_period=10000): + """ + Create sinusoidal timestep embeddings. + + Args: + t (`torch.Tensor`): A 1-D Tensor of N indices, one per batch element. + These may be fractional. + dim (`int`): The dimension of the output. + max_period (`int`, optional): Controls the minimum frequency of the embeddings. + + Returns: + `torch.Tensor`: An [N, D] Tensor of positional embeddings. + """ + half = dim // 2 + freqs = torch.exp( + -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half + ).to(t.device) + args = t[:, None].float() * freqs[None] + embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) + if dim % 2: + embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) + return embedding.to(t.dtype) + + def forward(self, t): + t_freq = self.timestep_embedding(t, self.frequency_embedding_size) + t_emb = self.mlp(t_freq) + return t_emb + + +class FeedForwardNetwork(nn.Module): + """ + Standard feed-forward network with SwiGLU activation. + + Args: + embed_dim (`int`): Input dimension + ffn_dim (`int`): Hidden dimension + """ + def __init__( + self, + embed_dim, + ffn_dim, + ): + super().__init__() + self.embed_dim = embed_dim + self.gate_proj = nn.Linear(self.embed_dim, ffn_dim, bias=False) + self.up_proj = nn.Linear(self.embed_dim, ffn_dim, bias=False) + self.down_proj = nn.Linear(ffn_dim, self.embed_dim, bias=False) + self.act_fn = ACT2FN['silu'] # Using SiLU as the activation function + + def forward(self, x): + gate = self.gate_proj(x) + up = self.up_proj(x) + + # SwiGLU activation + # gate = F.silu(gate) + gate = self.act_fn(gate) + return self.down_proj(gate * up) + + +class HeadLayer(nn.Module): + """ + A layer in the diffusion head. + + Args: + embed_dim (`int`): Input dimension + ffn_dim (`int`): Hidden dimension + cond_dim (`int`): Condition embedding dimension + norm_eps (`float`, optional): Epsilon for normalization + """ + def __init__( + self, + embed_dim, + ffn_dim, + cond_dim, + norm_eps=1e-5, + ): + super().__init__() + self.embed_dim = embed_dim + self.cond_dim = cond_dim + self.ffn_dim = ffn_dim + self.ffn = FeedForwardNetwork( + self.embed_dim, + self.ffn_dim, + ) + self.norm = RMSNorm(self.embed_dim, eps=norm_eps) + self.adaLN_modulation = nn.Sequential( + # nn.SiLU(), + ACT2FN['silu'], + nn.Linear(cond_dim, 3 * self.embed_dim, bias=False) + ) + + def forward(self, x, c): + shift_ffn, scale_ffn, gate_ffn = self.adaLN_modulation(c).chunk(3, dim=-1) + x = x + gate_ffn * self.ffn(modulate(self.norm(x), shift_ffn, scale_ffn)) + return x + + +class FinalLayer(nn.Module): + """ + Final layer in the diffusion head. + + Args: + hidden_size (`int`): Input dimension + output_size (`int`): Output dimension + cond_size (`int`): Condition embedding dimension + norm_eps (`float`, optional): Epsilon for normalization + """ + def __init__(self, hidden_size, output_size, cond_size, norm_eps=1e-5): + super().__init__() + self.norm_final = RMSNorm(hidden_size, eps=norm_eps, elementwise_affine=False) + self.linear = nn.Linear(hidden_size, output_size, bias=False) + self.adaLN_modulation = nn.Sequential( + # nn.SiLU(), + ACT2FN['silu'], + nn.Linear(cond_size, 2 * hidden_size, bias=False) + ) + + def forward(self, x, c): + shift, scale = self.adaLN_modulation(c).chunk(2, dim=-1) + x = modulate(self.norm_final(x), shift, scale) + x = self.linear(x) + return x + + +class VibeVoiceDiffusionHead(PreTrainedModel): + """ + Diffusion head model for vibevoice. + + Args: + config (`VibeVoiceDiffusionHeadConfig`): Model configuration + latent_size (`int`, optional): Size of the latent space. If not provided, uses `config.latent_size`. + """ + config_class = VibeVoiceDiffusionHeadConfig + supports_gradient_checkpointing = True + _supports_flash_attn_2 = True + _supports_sdpa = True + + def __init__( + self, + config, + ): + super().__init__(config) + self.config = config + self.cond_dim = config.hidden_size + latent_size = config.latent_size + + self.noisy_images_proj = nn.Linear(latent_size, config.hidden_size, bias=False) + self.cond_proj = nn.Linear(config.hidden_size, self.cond_dim, bias=False) + self.t_embedder = TimestepEmbedder(self.cond_dim) + + ffn_dim = int(config.hidden_size * config.head_ffn_ratio) + + # Create the intermediate layers + self.layers = nn.ModuleList([ + HeadLayer( + embed_dim=config.hidden_size, + ffn_dim=ffn_dim, + cond_dim=self.cond_dim, + norm_eps=config.rms_norm_eps + ) + for _ in range(config.head_layers) + ]) + + # Final layer for output + self.final_layer = FinalLayer( + hidden_size=config.hidden_size, + output_size=latent_size, + cond_size=self.cond_dim, + norm_eps=config.rms_norm_eps + ) + + self.initialize_weights() + + def initialize_weights(self): + """Initialize the weights of the model.""" + # Initialize timestep embedder + nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02) + nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02) + + # Zero-out adaLN modulation layers + for layer in self.layers: + nn.init.constant_(layer.adaLN_modulation[-1].weight, 0) + + # Zero-out output layers + nn.init.constant_(self.final_layer.adaLN_modulation[-1].weight, 0) + nn.init.constant_(self.final_layer.linear.weight, 0) + + def forward( + self, + noisy_images, + timesteps, + condition, + ): + """ + Forward pass of the prediction head. + + Args: + noisy_images (`torch.Tensor`): Noisy images/latents to denoise + timesteps (`torch.Tensor`): Timesteps for diffusion + condition (`torch.Tensor`): Conditioning information + + Returns: + `torch.Tensor`: The predicted noise/velocity + """ + x = self.noisy_images_proj(noisy_images) + t = self.t_embedder(timesteps) + condition = self.cond_proj(condition) + c = condition + t + + for layer in self.layers: + x = layer(x, c) + + x = self.final_layer(x, c) + return x + + +AutoModel.register(VibeVoiceDiffusionHeadConfig, VibeVoiceDiffusionHead) + +__all__ = [ + "VibeVoiceDiffusionHead", +] \ No newline at end of file diff --git a/vibevoice/modular/modular_vibevoice_text_tokenizer.py b/vibevoice/modular/modular_vibevoice_text_tokenizer.py new file mode 100644 index 0000000..bfa7bdd --- /dev/null +++ b/vibevoice/modular/modular_vibevoice_text_tokenizer.py @@ -0,0 +1,214 @@ +"""Tokenization classes for vibevoice.""" + +from typing import List, Optional, Union + +from transformers.utils import logging +from transformers.models.qwen2.tokenization_qwen2 import Qwen2Tokenizer +from transformers.models.qwen2.tokenization_qwen2_fast import Qwen2TokenizerFast + +logger = logging.get_logger(__name__) + + +class VibeVoiceTextTokenizer(Qwen2Tokenizer): + """ + Construct a VibeVoice tokenizer. Based on the Qwen2 tokenizer with additional special tokens for speech. + + Args: + vocab_file (`str`): + Path to the vocabulary file. + merges_file (`str`): + Path to the merges file. + errors (`str`, *optional*, defaults to `"replace"`): + Paradigm to follow when decoding bytes to UTF-8. + unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`): + The unknown token. + bos_token (`str`, *optional*): + The beginning of sequence token. Not used for vibevoice. + eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`): + The end of sequence token. + pad_token (`str`, *optional*, defaults to `"<|endoftext|>"`): + The token used for padding. + add_special_tokens (`bool`, *optional*, defaults to `True`): + Whether or not to add special tokens when encoding. + """ + + model_input_names = ["input_ids", "attention_mask"] + + def __init__( + self, + vocab_file, + merges_file, + errors="replace", + unk_token="<|endoftext|>", + bos_token=None, + eos_token="<|endoftext|>", + pad_token="<|endoftext|>", + add_prefix_space=False, + add_special_tokens=True, + **kwargs, + ): + super().__init__( + vocab_file=vocab_file, + merges_file=merges_file, + errors=errors, + unk_token=unk_token, + bos_token=bos_token, + eos_token=eos_token, + pad_token=pad_token, + add_prefix_space=add_prefix_space, + add_special_tokens=add_special_tokens, + **kwargs, + ) + + # Add VibeVoice-specific special tokens + self._add_vibevoice_special_tokens() + + def _add_vibevoice_special_tokens(self): + """Add VibeVoice-specific special tokens.""" + special_tokens = { + "additional_special_tokens": [ + "<|vision_start|>", # Speech start (reusing vision tokens) + "<|vision_end|>", # Speech end + "<|vision_pad|>", # Speech diffusion pad + ] + } + num_added = self.add_special_tokens(special_tokens) + + # Cache special token IDs + self._speech_start_id = self.convert_tokens_to_ids("<|vision_start|>") + self._speech_end_id = self.convert_tokens_to_ids("<|vision_end|>") + self._speech_diffusion_id = self.convert_tokens_to_ids("<|vision_pad|>") + + self._eos_id = self.convert_tokens_to_ids('<|endoftext|>') + + return num_added + + @property + def eos_id(self) -> int: + """Id of the end of sequence token.""" + return self._eos_id + + @property + def speech_start_id(self) -> int: + """Id of the speech start token.""" + return self._speech_start_id + + @property + def speech_end_id(self) -> int: + """Id of the speech end token.""" + return self._speech_end_id + + @property + def speech_diffusion_id(self) -> int: + """Id of the speech diffusion token.""" + return self._speech_diffusion_id + + @property + def pad_id(self) -> int: + """Id used for padding (returns -100 for loss masking).""" + return -100 + + +class VibeVoiceTextTokenizerFast(Qwen2TokenizerFast): + """ + Construct a "fast" VibeVoice tokenizer (backed by HuggingFace's *tokenizers* library). + Based on the Qwen2 tokenizer with additional special tokens for speech. + + Args: + vocab_file (`str`, *optional*): + Path to the vocabulary file. + merges_file (`str`, *optional*): + Path to the merges file. + tokenizer_file (`str`, *optional*): + Path to [tokenizers](https://github.com/huggingface/tokenizers) file. + unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`): + The unknown token. + bos_token (`str`, *optional*): + The beginning of sequence token. Not used for vibevoice. + eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`): + The end of sequence token. + pad_token (`str`, *optional*, defaults to `"<|endoftext|>"`): + The token used for padding. + """ + + model_input_names = ["input_ids", "attention_mask"] + + def __init__( + self, + vocab_file=None, + merges_file=None, + tokenizer_file=None, + unk_token="<|endoftext|>", + bos_token=None, + eos_token="<|endoftext|>", + pad_token="<|endoftext|>", + add_prefix_space=False, + **kwargs, + ): + super().__init__( + vocab_file=vocab_file, + merges_file=merges_file, + tokenizer_file=tokenizer_file, + unk_token=unk_token, + bos_token=bos_token, + eos_token=eos_token, + pad_token=pad_token, + add_prefix_space=add_prefix_space, + **kwargs, + ) + + # Add VibeVoice-specific special tokens + self._add_vibevoice_special_tokens() + + def _add_vibevoice_special_tokens(self): + """Add VibeVoice-specific special tokens.""" + special_tokens = { + "additional_special_tokens": [ + "<|vision_start|>", # Speech start (reusing vision tokens) + "<|vision_end|>", # Speech end + "<|vision_pad|>", # Speech diffusion pad + ] + } + num_added = self.add_special_tokens(special_tokens) + + # Cache special token IDs + self._speech_start_id = self.convert_tokens_to_ids("<|vision_start|>") + self._speech_end_id = self.convert_tokens_to_ids("<|vision_end|>") + self._speech_diffusion_id = self.convert_tokens_to_ids("<|vision_pad|>") + + # self._eos_id = self.convert_tokens_to_ids('<|endoftext|>') + self._eos_id = self.eos_token_id # qwen2 / qwen3 + self._pad_id = self.convert_tokens_to_ids('<|image_pad|>') + + return num_added + + @property + def eos_id(self) -> int: + """Id of the end of sequence token.""" + return self._eos_id + + @property + def speech_start_id(self) -> int: + """Id of the speech start token.""" + return self._speech_start_id + + @property + def speech_end_id(self) -> int: + """Id of the speech end token.""" + return self._speech_end_id + + @property + def speech_diffusion_id(self) -> int: + """Id of the speech diffusion token.""" + return self._speech_diffusion_id + + @property + def pad_id(self) -> int: + """Id used for padding (returns -100 for loss masking).""" + return self._pad_id + + +__all__ = [ + "VibeVoiceTextTokenizer", + "VibeVoiceTextTokenizerFast", +] \ No newline at end of file diff --git a/vibevoice/modular/modular_vibevoice_tokenizer.py b/vibevoice/modular/modular_vibevoice_tokenizer.py new file mode 100644 index 0000000..fbd5182 --- /dev/null +++ b/vibevoice/modular/modular_vibevoice_tokenizer.py @@ -0,0 +1,1195 @@ +import math +import typing as tp +from functools import partial +from dataclasses import dataclass, field +from typing import Dict, List, Optional, Tuple, Union +import copy + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F + +from transformers.models.auto import AutoModel + +from transformers.configuration_utils import PretrainedConfig +from transformers.utils import logging +from transformers.modeling_utils import PreTrainedModel +from transformers.activations import ACT2FN + +from .configuration_vibevoice import VibeVoiceAcousticTokenizerConfig, VibeVoiceSemanticTokenizerConfig + +logger = logging.get_logger(__name__) + +import os +# Try to import APEX FusedRMSNorm +try: + from apex.normalization.fused_layer_norm import fused_rms_norm_affine + APEX_AVAILABLE = True + logger.info("APEX FusedRMSNorm is available and will be used for optimization") + if int(os.getenv("OPTIMIZE_FOR_SPEED", "0")) == 0: + APEX_AVAILABLE = False + logger.warning("APEX FusedRMSNorm is disabled by environment variable OPTIMIZE_FOR_SPEED=0") +except ImportError: + APEX_AVAILABLE = False + logger.warning("APEX FusedRMSNorm not available, using native implementation") +# APEX_AVAILABLE=False + +# Normalization modules +class ConvLayerNorm(nn.LayerNorm): + """ + Convolution-friendly LayerNorm that moves channels to last dimensions + before running the normalization and moves them back to original position right after. + """ + def __init__(self, normalized_shape: tp.Union[int, tp.List[int], torch.Size], **kwargs): + super().__init__(normalized_shape, **kwargs) + + def forward(self, x): + x = x.transpose(1, 2) # b ... t -> b t ... + x = nn.functional.layer_norm(x.float(), self.normalized_shape, self.weight.float(), self.bias.float(), self.eps).type_as(x) + x = x.transpose(1, 2) # b t ... -> b ... t + return x + +class RMSNorm(nn.Module): + def __init__(self, dim: int, eps: float = 1e-5, elementwise_affine=True, weight_shape=None): + super().__init__() + self.dim = dim + self.eps = eps + self.elementwise_affine = elementwise_affine + if self.elementwise_affine: + weight_shape = (dim,) if weight_shape is None else weight_shape + self.weight = nn.Parameter(torch.ones(weight_shape)) + else: + self.register_parameter('weight', None) + + def _norm(self, x): + return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps) + + def forward(self, x): + output = self._norm(x.float()).type_as(x) + if self.weight is not None: + output = output * self.weight + return output + + def extra_repr(self) -> str: + return f'dim={self.dim}, eps={self.eps}, elementwise_affine={self.elementwise_affine}' + +class ConvRMSNorm(RMSNorm): + def __init__(self, dim: int, eps: float = 1e-5, elementwise_affine=True, weight_shape=None): + super().__init__(dim, eps, elementwise_affine, weight_shape) + + def forward(self, x): + x = x.transpose(1, 2) # b ... t -> b t ... + if (not APEX_AVAILABLE) or (not self.elementwise_affine): + # Fallback to native implementation + output = self._norm(x.float()).type_as(x) + if self.weight is not None: + output = output * self.weight + else: + output = fused_rms_norm_affine(x, self.weight, self.weight.shape, self.eps) + output = output.transpose(1, 2) # b t ... -> b ... t + return output + +# Convolutional layers and utilities +CONV_NORMALIZATIONS = frozenset(['none', 'weight_norm', 'spectral_norm', + 'time_layer_norm', 'layer_norm', 'time_group_norm']) + + +def apply_parametrization_norm(module: nn.Module, norm: str = 'none') -> nn.Module: + assert norm in CONV_NORMALIZATIONS + if norm == 'weight_norm': + return nn.utils.weight_norm(module) + elif norm == 'spectral_norm': + return nn.utils.spectral_norm(module) + else: + # We already check was in CONV_NORMALIZATION, so any other choice + # doesn't need reparametrization. + return module + + +def get_norm_module(module: nn.Module, causal: bool = False, norm: str = 'none', **norm_kwargs) -> nn.Module: + """Return the proper normalization module. If causal is True, this will ensure the returned + module is causal, or return an error if the normalization doesn't support causal evaluation. + """ + assert norm in CONV_NORMALIZATIONS + if norm == 'layer_norm': + assert isinstance(module, nn.modules.conv._ConvNd) + return ConvLayerNorm(module.out_channels, **norm_kwargs) + elif norm == 'time_group_norm': + if causal: + raise ValueError("GroupNorm doesn't support causal evaluation.") + assert isinstance(module, nn.modules.conv._ConvNd) + return nn.GroupNorm(1, module.out_channels, **norm_kwargs) + else: + return nn.Identity() + + +def get_extra_padding_for_conv1d(x: torch.Tensor, kernel_size: int, stride: int, + padding_total: int = 0) -> int: + """Calculate extra padding needed for convolution to have the same output length""" + length = x.shape[-1] + n_frames = (length - kernel_size + padding_total) / stride + 1 + ideal_length = (math.ceil(n_frames) - 1) * stride + (kernel_size - padding_total) + return ideal_length - length + + +def pad1d(x: torch.Tensor, paddings: tp.Tuple[int, int], mode: str = 'zero', value: float = 0.): + """Pad 1D input with handling for small inputs in reflect mode""" + length = x.shape[-1] + padding_left, padding_right = paddings + assert padding_left >= 0 and padding_right >= 0, (padding_left, padding_right) + if mode == 'reflect': + max_pad = max(padding_left, padding_right) + extra_pad = 0 + if length <= max_pad: + extra_pad = max_pad - length + 1 + x = F.pad(x, (0, extra_pad)) + padded = F.pad(x, paddings, mode, value) + end = padded.shape[-1] - extra_pad + return padded[..., :end] + else: + return F.pad(x, paddings, mode, value) + + +def unpad1d(x: torch.Tensor, paddings: tp.Tuple[int, int]): + """Remove padding from x, handling properly zero padding. Only for 1d!""" + padding_left, padding_right = paddings + assert padding_left >= 0 and padding_right >= 0, (padding_left, padding_right) + assert (padding_left + padding_right) <= x.shape[-1] + end = x.shape[-1] - padding_right + return x[..., padding_left: end] + + +class NormConv1d(nn.Module): + """Wrapper around Conv1d and normalization applied to this conv""" + def __init__(self, *args, causal: bool = False, norm: str = 'none', + norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs): + super().__init__() + self.conv = apply_parametrization_norm(nn.Conv1d(*args, **kwargs), norm) + self.norm = get_norm_module(self.conv, causal, norm, **norm_kwargs) + self.norm_type = norm + + def forward(self, x): + x = self.conv(x) + x = self.norm(x) + return x + + +class NormConvTranspose1d(nn.Module): + """Wrapper around ConvTranspose1d and normalization applied to this conv""" + def __init__(self, *args, causal: bool = False, norm: str = 'none', + norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs): + super().__init__() + self.convtr = apply_parametrization_norm(nn.ConvTranspose1d(*args, **kwargs), norm) + self.norm = get_norm_module(self.convtr, causal, norm, **norm_kwargs) + self.norm_type = norm + + def forward(self, x): + x = self.convtr(x) + x = self.norm(x) + return x + + +class VibeVoiceTokenizerStreamingCache: + """Cache for streaming convolution, similar to KV cache in attention""" + def __init__(self): + self.cache = {} # Dict mapping (layer_id, sample_idx) to state tensor + + def get(self, layer_id: str, sample_indices: torch.Tensor) -> Optional[torch.Tensor]: + """Get cached states for given layer and sample indices""" + states = [] + max_length = 0 + + # First pass: collect states and find max length + for idx in sample_indices.tolist(): + key = (layer_id, idx) + if key not in self.cache: + return None # If any sample is missing, return None + state = self.cache[key] + states.append(state) + max_length = max(max_length, state.shape[-1]) + + # Second pass: pad states to max length if needed + if len(states) > 0 and states[0].dim() >= 2: + padded_states = [] + for state in states: + if state.shape[-1] < max_length: + # Pad on the time dimension (last dimension) + pad_size = max_length - state.shape[-1] + # Pad with zeros on the LEFT to align the most recent samples + padded_state = F.pad(state, (pad_size, 0), mode='constant', value=0) + padded_states.append(padded_state) + else: + padded_states.append(state) + return torch.stack(padded_states, dim=0) + else: + return torch.stack(states, dim=0) + + def set(self, layer_id: str, sample_indices: torch.Tensor, states: torch.Tensor): + """Set cached states for given layer and sample indices""" + for i, idx in enumerate(sample_indices.tolist()): + key = (layer_id, idx) + self.cache[key] = states[i].detach() + + def set_to_zero(self, sample_indices: torch.Tensor): + """Set all cached states to zero for given sample indices""" + for key in list(self.cache.keys()): + layer_id, sample_idx = key + if sample_idx in sample_indices.tolist(): + # Create zero tensor with same shape and dtype as cached tensor + cached_tensor = self.cache[key] + self.cache[key] = torch.zeros_like(cached_tensor) + + def clear(self, layer_id: Optional[str] = None, sample_indices: Optional[torch.Tensor] = None): + """Clear cache for specific layer/samples or everything""" + if layer_id is None and sample_indices is None: + self.cache.clear() + elif layer_id is not None and sample_indices is None: + # Clear all samples for a specific layer + keys_to_remove = [k for k in self.cache.keys() if k[0] == layer_id] + for k in keys_to_remove: + del self.cache[k] + elif layer_id is not None and sample_indices is not None: + # Clear specific samples for a specific layer + for idx in sample_indices.tolist(): + key = (layer_id, idx) + self.cache.pop(key, None) + +class SConv1d(nn.Module): + """Conv1d with built-in handling of asymmetric or causal padding and normalization.""" + def __init__(self, in_channels: int, out_channels: int, + kernel_size: int, stride: int = 1, dilation: int = 1, + groups: int = 1, bias: bool = True, causal: bool = False, + norm: str = 'none', norm_kwargs: tp.Dict[str, tp.Any] = {}, + pad_mode: str = 'reflect'): + super().__init__() + self.conv = NormConv1d(in_channels, out_channels, kernel_size, stride, + dilation=dilation, groups=groups, bias=bias, causal=causal, + norm=norm, norm_kwargs=norm_kwargs) + self.causal = causal + self.pad_mode = pad_mode + + # Store configuration + self.kernel_size = kernel_size + self.dilation = dilation + self.stride = stride + self.in_channels = in_channels + self.out_channels = out_channels + + # For causal convolution, we need to maintain kernel_size - 1 samples as context + # need to check use which context_size is more suitable + # self.context_size = (kernel_size - 1) * dilation + self.context_size = (kernel_size - 1) * dilation - (stride - 1) + + # For non-streaming mode, calculate padding + self.padding_total = (kernel_size - 1) * dilation - (stride - 1) + + # Create a unique layer ID for cache management + self._layer_id = None + + @property + def layer_id(self): + if self._layer_id is None: + self._layer_id = f"sconv1d_{id(self)}" + return self._layer_id + + def forward(self, x: torch.Tensor, + cache: Optional[VibeVoiceTokenizerStreamingCache] = None, + sample_indices: Optional[torch.Tensor] = None, + use_cache: bool = False, + debug: bool = False) -> torch.Tensor: + """ + Forward pass with optional streaming support via cache. + + Args: + x: Input tensor [batch_size, channels, time] + cache: VibeVoiceTokenizerStreamingCache object for maintaining states + sample_indices: Indices identifying each sample for cache management + use_cache: Whether to use cached states for streaming + debug: Whether to print debug information + + Returns: + Output tensor + """ + B, C, T = x.shape + + # Non-streaming mode + if not use_cache or cache is None: + return self._forward_non_streaming(x, debug=debug) + + # Streaming mode + assert self.causal, "Streaming mode is only supported for causal convolutions" + assert sample_indices is not None, "sample_indices must be provided for streaming mode" + assert len(sample_indices) == B, "sample_indices must match batch size" + + return self._forward_streaming(x, cache, sample_indices, debug) + + def _forward_streaming(self, x: torch.Tensor, + cache: VibeVoiceTokenizerStreamingCache, + sample_indices: torch.Tensor, + debug: bool = False) -> torch.Tensor: + """Streaming forward pass with cache operations kept separate from compiled code""" + B, C, T = x.shape + + # Cache operations (not compiled) + cached_states = cache.get(self.layer_id, sample_indices) + + if cached_states is None: + # First chunk - initialize with zeros for context + if self.context_size > 0: + cached_states = torch.zeros(B, C, self.context_size, device=x.device, dtype=x.dtype) + if debug: + print(f"[DEBUG] Initialized cache with shape: {cached_states.shape}, context_size={self.context_size}") + else: + cached_states = torch.zeros(B, C, 0, device=x.device, dtype=x.dtype) + if debug: + print(f"[DEBUG] No context needed (kernel_size=stride)") + + # Concatenate cached states with input + if cached_states.shape[2] > 0: + input_with_context = torch.cat([cached_states, x], dim=2) + else: + input_with_context = x + + if debug: + print(f"[DEBUG] Input shape: {x.shape}, Cache shape: {cached_states.shape}, Combined: {input_with_context.shape}") + + # Apply convolution directly - no extra padding in streaming mode + # The conv layer will handle its own padding internally + output = self.conv(input_with_context) + + if debug: + print(f"[DEBUG] Output shape: {output.shape}") + + # Update cache for next chunk + if self.context_size > 0: + # Calculate how many samples to keep + total_input_length = input_with_context.shape[2] + + # Keep the last context_size samples + if total_input_length >= self.context_size: + new_cache_start = total_input_length - self.context_size + new_cache = input_with_context[:, :, new_cache_start:] + else: + # If we have less than context_size samples, keep everything + new_cache = input_with_context + + if debug: + print(f"[DEBUG] New cache shape: {new_cache.shape}") + + cache.set(self.layer_id, sample_indices, new_cache) + + return output + + def _forward_non_streaming(self, x: torch.Tensor, debug: bool = False) -> torch.Tensor: + """Standard forward pass without streaming""" + B, C, T = x.shape + kernel_size = self.kernel_size + stride = self.stride + dilation = self.dilation + padding_total = self.padding_total + + # Compute extra padding for stride alignment + extra_padding = get_extra_padding_for_conv1d(x, kernel_size, stride, padding_total) + + if debug: + print(f"[DEBUG NON-STREAMING] Input shape: {x.shape}, padding_total={padding_total}, extra_padding={extra_padding}") + + if self.causal: + # Left padding for causal + if self.pad_mode == 'constant': + x = pad1d(x, (padding_total, extra_padding), mode=self.pad_mode, value=0) + else: + x = pad1d(x, (padding_total, extra_padding), mode=self.pad_mode) + else: + # Symmetric padding for non-causal + padding_right = padding_total // 2 + padding_left = padding_total - padding_right + x = pad1d(x, (padding_left, padding_right + extra_padding), mode=self.pad_mode) + + if debug: + print(f"[DEBUG NON-STREAMING] After padding: {x.shape}") + + output = self.conv(x) + + if debug: + print(f"[DEBUG NON-STREAMING] Output shape: {output.shape}") + + return output + + +class SConvTranspose1d(nn.Module): + """ConvTranspose1d with built-in handling of asymmetric or causal padding and normalization.""" + def __init__(self, in_channels: int, out_channels: int, + kernel_size: int, stride: int = 1, causal: bool = False, + norm: str = 'none', trim_right_ratio: float = 1., + norm_kwargs: tp.Dict[str, tp.Any] = {}, bias: bool = True): + super().__init__() + self.convtr = NormConvTranspose1d(in_channels, out_channels, kernel_size, stride, + causal=causal, norm=norm, norm_kwargs=norm_kwargs, bias=bias) + self.causal = causal + self.trim_right_ratio = trim_right_ratio + assert self.causal or self.trim_right_ratio == 1., \ + "`trim_right_ratio` != 1.0 only makes sense for causal convolutions" + assert self.trim_right_ratio >= 0. and self.trim_right_ratio <= 1. + + # Store configuration + self.kernel_size = kernel_size + self.stride = stride + self.in_channels = in_channels + self.out_channels = out_channels + + # For transposed convolution, padding calculation is different + self.padding_total = kernel_size - stride + + # For streaming, we need to keep track of input history + # Transposed conv needs to see multiple input samples to produce correct output + self.context_size = kernel_size - 1 + + # Create a unique layer ID for cache management + self._layer_id = None + + @property + def layer_id(self): + if self._layer_id is None: + self._layer_id = f"sconvtr1d_{id(self)}" + return self._layer_id + + def forward(self, x: torch.Tensor, + cache: Optional[VibeVoiceTokenizerStreamingCache] = None, + sample_indices: Optional[torch.Tensor] = None, + use_cache: bool = False, + debug: bool = False) -> torch.Tensor: + """ + Forward pass with optional streaming support via cache. + """ + B, C, T = x.shape + + # Non-streaming mode + if not use_cache or cache is None: + return self._forward_non_streaming(x, debug=debug) + + # Streaming mode + assert sample_indices is not None, "sample_indices must be provided for streaming mode" + assert len(sample_indices) == B, "sample_indices must match batch size" + + return self._forward_streaming(x, cache, sample_indices, debug) + + def _forward_streaming(self, x: torch.Tensor, + cache: VibeVoiceTokenizerStreamingCache, + sample_indices: torch.Tensor, + debug: bool = False) -> torch.Tensor: + """Streaming forward pass with cache operations kept separate from compiled code""" + B, C, T = x.shape + + # Cache operations (not compiled) + cached_input = cache.get(self.layer_id, sample_indices) + + if cached_input is None: + # First chunk - no history yet + cached_input = torch.zeros(B, C, 0, device=x.device, dtype=x.dtype) + if debug: + print(f"[DEBUG] Initialized empty cache for transposed conv") + + # Concatenate cached input with new input + full_input = torch.cat([cached_input, x], dim=2) + + if debug: + print(f"[DEBUG] Input shape: {x.shape}, Cache shape: {cached_input.shape}, Combined: {full_input.shape}") + + # First chunk or debug mode - use uncompiled version + full_output = self.convtr(full_input) + + if debug: + print(f"[DEBUG] Full transposed conv output shape: {full_output.shape}") + + # Calculate padding to remove + if self.causal: + padding_right = math.ceil(self.padding_total * self.trim_right_ratio) + padding_left = self.padding_total - padding_right + else: + padding_right = self.padding_total // 2 + padding_left = self.padding_total - padding_right + + # Remove padding + if padding_left + padding_right > 0: + full_output = unpad1d(full_output, (padding_left, padding_right)) + + if debug: + print(f"[DEBUG] After unpadding: {full_output.shape}") + + # Determine which part of the output corresponds to the new input + if cached_input.shape[2] == 0: + # First chunk - return all output + output = full_output + else: + # Subsequent chunks - return only the new output + expected_new_output = T * self.stride + + # Take the last expected_new_output samples + if full_output.shape[2] >= expected_new_output: + output = full_output[:, :, -expected_new_output:] + else: + output = full_output + + if debug: + print(f"[DEBUG] Final streaming output shape: {output.shape}") + + # Update cache + if full_input.shape[2] > self.context_size: + new_cache = full_input[:, :, -self.context_size:] + else: + new_cache = full_input + + if debug: + print(f"[DEBUG] New cache shape: {new_cache.shape}") + + cache.set(self.layer_id, sample_indices, new_cache) + + return output + + def _forward_non_streaming(self, x: torch.Tensor, debug: bool = False) -> torch.Tensor: + """Standard forward pass without streaming""" + if debug: + print(f"[DEBUG NON-STREAMING] Input shape: {x.shape}") + + # Apply transposed convolution + y = self.convtr(x) + + if debug: + print(f"[DEBUG NON-STREAMING] After transposed conv: {y.shape}") + + # Calculate and remove padding + if self.causal: + padding_right = math.ceil(self.padding_total * self.trim_right_ratio) + padding_left = self.padding_total - padding_right + else: + padding_right = self.padding_total // 2 + padding_left = self.padding_total - padding_right + + if padding_left + padding_right > 0: + y = unpad1d(y, (padding_left, padding_right)) + + if debug: + print(f"[DEBUG NON-STREAMING] Final output shape: {y.shape}") + + return y + +# FFN +class FFN(nn.Module): + def __init__( + self, + embed_dim, + ffn_dim, + bias=False, + ): + super().__init__() + self.embed_dim = embed_dim + self.linear1 = nn.Linear(self.embed_dim, ffn_dim, bias=bias) + self.gelu = ACT2FN["gelu"] + self.linear2 = nn.Linear(ffn_dim, self.embed_dim, bias=bias) + + def forward(self, x): + x = self.linear1(x) + x = self.gelu(x) + x = self.linear2(x) + return x + + +class Convlayer(nn.Module): + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + dilation=1, + groups=1, + bias=True, + pad_mode='zeros', + norm='weight_norm', + causal=True, + ): + super().__init__() + self.conv = SConv1d(in_channels, out_channels, kernel_size, stride=stride, dilation=dilation, + groups=groups, bias=bias, pad_mode=pad_mode, norm=norm, causal=causal) + + def forward(self, x): + return self.conv(x) + +class Block1D(nn.Module): + def __init__(self, dim, kernel_size=7, drop_path=0., mixer_layer='conv', + layer_scale_init_value=1e-6, **kwargs): + super().__init__() + + if kwargs.get('layernorm', 'LN') == 'LN': + self.norm = ConvLayerNorm(dim, eps=kwargs.get('eps', 1e-6)) + self.ffn_norm = ConvLayerNorm(dim, eps=kwargs.get('eps', 1e-6)) + elif kwargs.get('layernorm', 'RMSNorm') == 'RMSNorm': + self.norm = ConvRMSNorm(dim, eps=kwargs.get('eps', 1e-6)) + self.ffn_norm = ConvRMSNorm(dim, eps=kwargs.get('eps', 1e-6)) + + if mixer_layer == 'conv': + self.mixer = Convlayer(dim, dim, groups=kwargs.get('groups', 1), + kernel_size=kernel_size, + pad_mode=kwargs.get('pad_mode', 'reflect'), + norm=kwargs.get('norm', 'none'), + causal=kwargs.get('causal', True), + bias=kwargs.get('bias', True), + ) + elif mixer_layer == 'depthwise_conv': + self.mixer = Convlayer(dim, dim, groups=dim, + kernel_size=kernel_size, + pad_mode=kwargs.get('pad_mode', 'reflect'), + norm=kwargs.get('norm', 'none'), + causal=kwargs.get('causal', True), + bias=kwargs.get('bias', True), + ) + else: + raise ValueError(f"Unsupported mixer layer: {mixer_layer}") + + self.ffn = FFN( + dim, + kwargs.get('ffn_expansion', 4) * dim, + bias=kwargs.get('bias', False), + ) + self.drop_path = nn.Identity() if drop_path <= 0. else nn.modules.DropPath(drop_path) + + if layer_scale_init_value > 0: + self.gamma = nn.Parameter(layer_scale_init_value * torch.ones((dim)), requires_grad=True) + self.ffn_gamma = nn.Parameter(layer_scale_init_value * torch.ones((dim)), requires_grad=True) + else: + self.gamma = None + self.ffn_gamma = None + + def forward(self, x): + # mixer + residual = x + x = self.norm(x) + x = self.mixer(x) + if self.gamma is not None: + x = x * self.gamma.unsqueeze(-1) + x = residual + self.drop_path(x) + + # ffn + residual = x + x = self.ffn_norm(x) + x = x.permute(0, 2, 1) + x = self.ffn(x) + x = x.permute(0, 2, 1) + if self.ffn_gamma is not None: + x = x * self.ffn_gamma.unsqueeze(-1) + x = residual + self.drop_path(x) + + return x + + +class TokenizerEncoder(nn.Module): + """ + Encoder component for the VibeVoice tokenizer that converts audio to latent representations. + + Args: + config: Configuration object with model parameters + """ + def __init__(self, config): + super().__init__() + + # Extract parameters from config + self.channels = config.channels + self.dimension = config.dimension + self.n_filters = config.n_filters + self.ratios = list(reversed(config.ratios)) + self.depths = config.depths + self.n_residual_layers = getattr(config, "n_residual_layers", 1) + self.hop_length = np.prod(self.ratios) + self.causal = config.causal + + # Additional config parameters with defaults + kernel_size = getattr(config, "kernel_size", 7) + last_kernel_size = getattr(config, "last_kernel_size", 7) + norm = getattr(config, "norm", "none") + norm_params = getattr(config, "norm_params", {}) + pad_mode = getattr(config, "pad_mode", "reflect") + bias = getattr(config, "bias", True) + layernorm = getattr(config, "layernorm", "LN") + layernorm_eps = getattr(config, "layernorm_eps", 1e-6) + layernorm_elementwise_affine = getattr(config, "layernorm_elementwise_affine", True) + drop_path_rate = getattr(config, "drop_path_rate", 0.0) + mixer_layer = getattr(config, "mixer_layer", "conv") + layer_scale_init_value = getattr(config, "layer_scale_init_value", 0) + disable_last_norm = getattr(config, "disable_last_norm", False) + + # determine the norm type based on layernorm + if layernorm == 'LN': + norm_type = ConvLayerNorm + elif layernorm == 'RMSNorm': + norm_type = partial(ConvRMSNorm, elementwise_affine=layernorm_elementwise_affine) + else: + raise ValueError(f"Unsupported norm type: {layernorm}") + + # stem and intermediate downsampling conv layers + stem = nn.Sequential( + SConv1d(self.channels, self.n_filters, kernel_size, norm=norm, norm_kwargs=norm_params, causal=self.causal, pad_mode=pad_mode, bias=bias), + ) + + self.downsample_layers = nn.ModuleList() + self.downsample_layers.append(stem) + for i in range(len(self.ratios)): + in_ch = self.n_filters * (2 ** i) + out_ch = self.n_filters * (2 ** (i + 1)) + downsample_layer = nn.Sequential( + SConv1d(in_ch, out_ch, kernel_size=self.ratios[i] * 2, stride=self.ratios[i], causal=self.causal, pad_mode=pad_mode, norm=norm, bias=bias) + ) + self.downsample_layers.append(downsample_layer) + + # configure the transformer blocks + layer_type = partial( + Block1D, + mixer_layer=mixer_layer, + layernorm=layernorm, + eps=layernorm_eps, + causal=self.causal, + pad_mode=pad_mode, + norm=norm, + bias=bias, + layer_scale_init_value=layer_scale_init_value, + ) + + self.stages = nn.ModuleList() + dp_rates = [x.item() for x in torch.linspace(0, drop_path_rate, sum(self.depths))] + cur = 0 + + for i in range(len(self.depths)): + in_ch = self.n_filters * (2 ** i) + stage = nn.Sequential( + *[layer_type(dim=in_ch, drop_path=dp_rates[cur + j]) for j in range(self.depths[i])] + ) + self.stages.append(stage) + cur += self.depths[i] + + if not disable_last_norm: + self.norm = norm_type(in_ch, eps=layernorm_eps) + else: + self.norm = nn.Identity() + self.head = SConv1d(in_ch, self.dimension, kernel_size=last_kernel_size, causal=self.causal, pad_mode=pad_mode, norm=norm, bias=bias) + + def forward_features(self, x, cache=None, sample_indices=None, use_cache=False, debug=False): + for i in range(len(self.depths)): + # Apply downsampling + for layer in self.downsample_layers[i]: + if isinstance(layer, SConv1d): + x = layer(x, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug) + else: + x = layer(x) + + # Apply stage (Block1D contains Convlayer which contains SConv1d) + for block in self.stages[i]: + if hasattr(block, 'mixer') and hasattr(block.mixer, 'conv') and isinstance(block.mixer.conv, SConv1d): + # Block1D forward with cache support + residual = x + x = block.norm(x) + x = block.mixer.conv(x, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug) + if block.gamma is not None: + x = x * block.gamma.unsqueeze(-1) + x = residual + x + + # FFN part + residual = x + x = block.ffn_norm(x) + x = x.permute(0, 2, 1) + x = block.ffn(x) + x = x.permute(0, 2, 1) + if block.ffn_gamma is not None: + x = x * block.ffn_gamma.unsqueeze(-1) + x = residual + x + else: + x = block(x) + + return self.norm(x) + + def forward(self, x, cache=None, sample_indices=None, use_cache=False, debug=False): + x = self.forward_features(x, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug) + x = self.head(x, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug) + return x + + +class TokenizerDecoder(nn.Module): + """ + Decoder component for the VibeVoice tokenizer that converts latent representations back to audio. + + Args: + config: Configuration object with model parameters + """ + def __init__(self, config): + super().__init__() + + # Extract parameters from config + self.dimension = config.dimension + self.channels = config.channels + self.n_filters = config.n_filters + self.ratios = config.ratios + + # IMPORTANT CHANGE: Don't reverse depths again since they're already reversed in VibeVoiceAcousticTokenizerModel + self.depths = config.depths # Changed from list(reversed(config.depths)) + + self.n_residual_layers = getattr(config, "n_residual_layers", 1) + self.hop_length = np.prod(self.ratios) + self.causal = config.causal + + # Additional config parameters with defaults + kernel_size = getattr(config, "kernel_size", 7) + last_kernel_size = getattr(config, "last_kernel_size", 7) + norm = getattr(config, "norm", "none") + norm_params = getattr(config, "norm_params", {}) + pad_mode = getattr(config, "pad_mode", "reflect") + bias = getattr(config, "bias", True) + layernorm = getattr(config, "layernorm", "LN") + layernorm_eps = getattr(config, "layernorm_eps", 1e-6) + trim_right_ratio = getattr(config, "trim_right_ratio", 1.0) + layernorm_elementwise_affine = getattr(config, "layernorm_elementwise_affine", True) + drop_path_rate = getattr(config, "drop_path_rate", 0.0) + mixer_layer = getattr(config, "mixer_layer", "conv") + layer_scale_init_value = getattr(config, "layer_scale_init_value", 0) + disable_last_norm = getattr(config, "disable_last_norm", False) + + # determine the norm type based on layernorm + if layernorm == 'LN': + norm_type = ConvLayerNorm + elif layernorm == 'RMSNorm': + norm_type = partial(ConvRMSNorm, elementwise_affine=layernorm_elementwise_affine) + else: + raise ValueError(f"Unsupported norm type: {layernorm}") + + # stem and upsampling layers + stem = nn.Sequential( + SConv1d(self.dimension, self.n_filters * 2 ** (len(self.depths) - 1), kernel_size, norm=norm, + norm_kwargs=norm_params, causal=self.causal, pad_mode=pad_mode, bias=bias), + ) + + self.upsample_layers = nn.ModuleList() + self.upsample_layers.append(stem) + for i in range(len(self.ratios)): + in_ch = self.n_filters * (2 ** (len(self.depths) - 1 - i)) + out_ch = self.n_filters * (2 ** (len(self.depths) - 1 - i - 1)) + upsample_layer = nn.Sequential( + SConvTranspose1d(in_ch, out_ch, + kernel_size=self.ratios[i] * 2, stride=self.ratios[i], + norm=norm, norm_kwargs=norm_params, bias=bias, + causal=self.causal, trim_right_ratio=trim_right_ratio), + ) + self.upsample_layers.append(upsample_layer) + + # configure transformer blocks + layer_type = partial( + Block1D, + mixer_layer=mixer_layer, + layernorm=layernorm, + eps=layernorm_eps, + causal=self.causal, + pad_mode=pad_mode, + norm=norm, + bias=bias, + layer_scale_init_value=layer_scale_init_value, + ) + + self.stages = nn.ModuleList() + dp_rates = [x.item() for x in torch.linspace(0, drop_path_rate, sum(self.depths))] + cur = 0 + + # Create stages in the same order as the original model + for i in range(len(self.depths)): + in_ch = self.n_filters * (2 ** (len(self.depths) - 1 - i)) + stage = nn.Sequential( + *[layer_type(dim=in_ch, drop_path=dp_rates[cur + j]) for j in range(self.depths[i])] + ) + self.stages.append(stage) + cur += self.depths[i] + + if not disable_last_norm: + self.norm = norm_type(in_ch, eps=layernorm_eps) + else: + self.norm = nn.Identity() + self.head = SConv1d(in_ch, self.channels, kernel_size=last_kernel_size, causal=self.causal, pad_mode=pad_mode, norm=norm, bias=bias) + + def forward_features(self, x, cache=None, sample_indices=None, use_cache=False, debug=False): + for i in range(len(self.depths)): + # Apply upsampling + for layer in self.upsample_layers[i]: + if isinstance(layer, (SConv1d, SConvTranspose1d)): + x = layer(x, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug) + else: + x = layer(x) + + # Apply stage (Block1D contains Convlayer which contains SConv1d) + for block in self.stages[i]: + if hasattr(block, 'mixer') and hasattr(block.mixer, 'conv') and isinstance(block.mixer.conv, SConv1d): + # Block1D forward with cache support + residual = x + x = block.norm(x) + x = block.mixer.conv(x, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug) + if block.gamma is not None: + x = x * block.gamma.unsqueeze(-1) + x = residual + x + + # FFN part + residual = x + x = block.ffn_norm(x) + x = x.permute(0, 2, 1) + x = block.ffn(x) + x = x.permute(0, 2, 1) + if block.ffn_gamma is not None: + x = x * block.ffn_gamma.unsqueeze(-1) + x = residual + x + else: + x = block(x) + + return self.norm(x) + + def forward(self, x, cache=None, sample_indices=None, use_cache=False, debug=False): + x = self.forward_features(x, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug) + x = self.head(x, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug) + return x + + +@dataclass +class VibeVoiceTokenizerEncoderOutput: + """ + Output of VibeVoice tokenizer encoder, representing a Gaussian distribution with fixed variance. + + Args: + mean (`torch.FloatTensor`): The mean parameters of the distribution. + std (`float` or `torch.FloatTensor`): Fixed standard deviation value. + """ + mean: torch.Tensor + std: Optional[Union[float, torch.Tensor]] = None + + def sample(self, dist_type='fix'): + """ + Sample from the distribution. + + Args: + dist_type (`str`): Sampling method, either 'fix' or 'gaussian'. + + Returns: + `torch.FloatTensor`: Sampled values. + `torch.FloatTensor` (optional): Standard deviation used (only when dist_type='gaussian'). + """ + if dist_type == 'fix': + x = self.mean + self.std * torch.randn_like(self.mean) + return x, self.std + elif dist_type == 'gaussian': + batch_size = self.mean.size(0) + value = self.std / 0.8 + std = torch.randn(batch_size, device=self.mean.device, dtype=self.mean.dtype) * value + + while std.dim() < self.mean.dim(): + std = std.unsqueeze(-1) + + x = self.mean + std * torch.randn_like(self.mean) + return x, std + else: + return self.mean, self.std + + def kl(self): + """Compute KL divergence between this distribution and a standard normal.""" + target = torch.zeros_like(self.mean) + return F.mse_loss(self.mean, target, reduction='none') + + def mode(self): + """Return the distribution mode (which is the mean for Gaussian).""" + return self.mean + +class VibeVoiceAcousticTokenizerModel(PreTrainedModel): + """VibeVoice speech tokenizer model combining encoder and decoder for acoustic tokens""" + + config_class = VibeVoiceAcousticTokenizerConfig + base_model_prefix = "vibevoice_acoustic_tokenizer" + _supports_flash_attn_2 = True + _supports_sdpa = True + _no_split_modules = ["TokenizerEncoder", "TokenizerDecoder"] + + def __init__(self, config): + super().__init__(config) + + self.register_buffer('fix_std', torch.tensor(config.fix_std), persistent=False) + self.std_dist_type = getattr(config, "std_dist_type", "fix") + + # Parse encoder depths + if isinstance(config.encoder_depths, str): + encoder_depths = [int(d) for d in config.encoder_depths.split('-')] + else: + encoder_depths = config.encoder_depths + + # Parse decoder depths if provided + if config.decoder_depths is not None and isinstance(config.decoder_depths, str): + decoder_depths = [int(d) for d in config.decoder_depths.split('-')] + else: + # Default: use reversed encoder depths if decoder_depths is None + decoder_depths = list(reversed(encoder_depths)) + + # Create encoder config + encoder_config = copy.deepcopy(config) + encoder_config.dimension = config.vae_dim + encoder_config.n_filters = config.encoder_n_filters + encoder_config.ratios = config.encoder_ratios + encoder_config.depths = encoder_depths + encoder_config.norm = config.conv_norm + encoder_config.pad_mode = config.pad_mode + encoder_config.bias = config.conv_bias + encoder_config.layernorm_eps = config.layernorm_eps + encoder_config.layernorm_elementwise_affine = config.layernorm_elementwise_affine + encoder_config.mixer_layer = config.mixer_layer + encoder_config.layer_scale_init_value = config.layer_scale_init_value + encoder_config.disable_last_norm = config.disable_last_norm + + # Create decoder config + decoder_config = copy.deepcopy(config) + decoder_config.dimension = config.vae_dim + decoder_config.n_filters = config.decoder_n_filters + decoder_config.ratios = config.decoder_ratios + decoder_config.depths = decoder_depths + decoder_config.norm = config.conv_norm + decoder_config.pad_mode = config.pad_mode + decoder_config.bias = config.conv_bias + decoder_config.layernorm_eps = config.layernorm_eps + decoder_config.layernorm_elementwise_affine = config.layernorm_elementwise_affine + decoder_config.mixer_layer = config.mixer_layer + decoder_config.layer_scale_init_value = config.layer_scale_init_value + decoder_config.disable_last_norm = config.disable_last_norm + + # Initialize encoder and decoder + self.encoder = TokenizerEncoder(encoder_config) + self.decoder = TokenizerDecoder(decoder_config) + + # Initialize weights + self.apply(self._init_weights) + + def _init_weights(self, module): + """Initialize weights for the model""" + if isinstance(module, nn.Linear): + nn.init.normal_(module.weight, std=self.config.weight_init_value) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, nn.LayerNorm): + nn.init.ones_(module.weight) + nn.init.zeros_(module.bias) + elif isinstance(module, nn.Conv1d): + nn.init.normal_(module.weight, std=self.config.weight_init_value) + if module.bias is not None: + nn.init.zeros_(module.bias) + + @torch.no_grad() + def encode(self, audio, cache=None, sample_indices=None, use_cache=False, debug=False): + """Convert audio to latent representations""" + latents = self.encoder(audio, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug) + return VibeVoiceTokenizerEncoderOutput(mean=latents.permute(0, 2, 1), std=self.fix_std) + + @torch.no_grad() + def sampling(self, encoder_output, dist_type=None): + """Sample from the encoder output distribution""" + dist_type = dist_type or self.std_dist_type + + if dist_type == 'fix': + return encoder_output.sample(dist_type='fix') + elif dist_type == 'gaussian': + return encoder_output.sample(dist_type='gaussian') + else: + raise ValueError(f"Unsupported dist_type: {dist_type}, expected 'fix' or 'gaussian'") + + @torch.no_grad() + def decode(self, latents, cache=None, sample_indices=None, use_cache=False, debug=False): + """Convert latent representations back to audio""" + if latents.shape[1] == self.config.vae_dim: + pass + else: + latents = latents.permute(0, 2, 1) + + audio = self.decoder(latents, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug) + return audio + + def forward(self, audio, cache=None, sample_indices=None, use_cache=False, debug=False): + """Full forward pass: encode audio to latents, then decode back to audio""" + encoder_output = self.encode(audio, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug) + sampled_latents, _ = self.sampling(encoder_output) + reconstructed = self.decode(sampled_latents, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug) + return reconstructed, sampled_latents + + +class VibeVoiceSemanticTokenizerModel(PreTrainedModel): + """VibeVoice speech tokenizer model with only encoder for semantic tokens""" + + config_class = VibeVoiceSemanticTokenizerConfig + base_model_prefix = "vibevoice_semantic_tokenizer" + _supports_flash_attn_2 = True + _supports_sdpa = True + _no_split_modules = ["TokenizerEncoder"] + + def __init__(self, config): + super().__init__(config) + + # Parse encoder depths + if isinstance(config.encoder_depths, str): + encoder_depths = [int(d) for d in config.encoder_depths.split('-')] + else: + encoder_depths = config.encoder_depths + + # Create encoder config + encoder_config = copy.deepcopy(config) + encoder_config.dimension = config.vae_dim + encoder_config.n_filters = config.encoder_n_filters + encoder_config.ratios = config.encoder_ratios + encoder_config.depths = encoder_depths + encoder_config.norm = config.conv_norm + encoder_config.pad_mode = config.pad_mode + encoder_config.bias = config.conv_bias + encoder_config.layernorm_eps = config.layernorm_eps + encoder_config.layernorm_elementwise_affine = config.layernorm_elementwise_affine + encoder_config.mixer_layer = config.mixer_layer + encoder_config.layer_scale_init_value = config.layer_scale_init_value + encoder_config.disable_last_norm = config.disable_last_norm + + # Initialize encoder and decoder + self.encoder = TokenizerEncoder(encoder_config) + + # Initialize weights + self.apply(self._init_weights) + + def _init_weights(self, module): + """Initialize weights for the model""" + if isinstance(module, nn.Linear): + nn.init.normal_(module.weight, std=self.config.weight_init_value) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, nn.LayerNorm): + nn.init.ones_(module.weight) + nn.init.zeros_(module.bias) + elif isinstance(module, nn.Conv1d): + nn.init.normal_(module.weight, std=self.config.weight_init_value) + if module.bias is not None: + nn.init.zeros_(module.bias) + + @torch.no_grad() + def encode(self, audio, cache=None, sample_indices=None, use_cache=False, debug=False): + """Convert audio to latent representations""" + latents = self.encoder(audio, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug) + return VibeVoiceTokenizerEncoderOutput(mean=latents.permute(0, 2, 1)) + + @torch.no_grad() + def sampling(self, encoder_output, dist_type=None): + """Sample from the encoder output distribution""" + return encoder_output.sample(dist_type='none') + + def forward(self, audio, cache=None, sample_indices=None, use_cache=False, debug=False): + """Full forward pass: encode audio to latents, then decode back to audio""" + encoder_output = self.encode(audio, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug) + sampled_latents, _ = self.sampling(encoder_output, dist_type='none') + return None, sampled_latents + +AutoModel.register(VibeVoiceAcousticTokenizerConfig, VibeVoiceAcousticTokenizerModel) +AutoModel.register(VibeVoiceSemanticTokenizerConfig, VibeVoiceSemanticTokenizerModel) + +__all__ = [ + "VibeVoiceTokenizerStreamingCache", + "VibeVoiceAcousticTokenizerModel", + "VibeVoiceSemanticTokenizerModel", +] \ No newline at end of file diff --git a/vibevoice/modular/streamer.py b/vibevoice/modular/streamer.py new file mode 100644 index 0000000..7a76cb0 --- /dev/null +++ b/vibevoice/modular/streamer.py @@ -0,0 +1,264 @@ +from __future__ import annotations + +import torch + +import asyncio +from queue import Queue +from typing import TYPE_CHECKING, Optional + + +from transformers.generation import BaseStreamer + + +class AudioStreamer(BaseStreamer): + """ + Audio streamer that stores audio chunks in queues for each sample in the batch. + This allows streaming audio generation for multiple samples simultaneously. + + Parameters: + batch_size (`int`): + The batch size for generation + stop_signal (`any`, *optional*): + The signal to put in the queue when generation ends. Defaults to None. + timeout (`float`, *optional*): + The timeout for the audio queue. If `None`, the queue will block indefinitely. + """ + + def __init__( + self, + batch_size: int, + stop_signal: Optional[any] = None, + timeout: Optional[float] = None, + ): + self.batch_size = batch_size + self.stop_signal = stop_signal + self.timeout = timeout + + # Create a queue for each sample in the batch + self.audio_queues = [Queue() for _ in range(batch_size)] + self.finished_flags = [False for _ in range(batch_size)] + self.sample_indices_map = {} # Maps from sample index to queue index + + def put(self, audio_chunks: torch.Tensor, sample_indices: torch.Tensor): + """ + Receives audio chunks and puts them in the appropriate queues. + + Args: + audio_chunks: Tensor of shape (num_samples, ...) containing audio chunks + sample_indices: Tensor indicating which samples these chunks belong to + """ + for i, sample_idx in enumerate(sample_indices): + idx = sample_idx.item() + if idx < self.batch_size and not self.finished_flags[idx]: + # Convert to numpy or keep as tensor based on preference + audio_chunk = audio_chunks[i].detach().cpu() + self.audio_queues[idx].put(audio_chunk, timeout=self.timeout) + + def end(self, sample_indices: Optional[torch.Tensor] = None): + """ + Signals the end of generation for specified samples or all samples. + + Args: + sample_indices: Optional tensor of sample indices to end. If None, ends all. + """ + if sample_indices is None: + # End all samples + for idx in range(self.batch_size): + if not self.finished_flags[idx]: + self.audio_queues[idx].put(self.stop_signal, timeout=self.timeout) + self.finished_flags[idx] = True + else: + # End specific samples + for sample_idx in sample_indices: + idx = sample_idx.item() if torch.is_tensor(sample_idx) else sample_idx + if idx < self.batch_size and not self.finished_flags[idx]: + self.audio_queues[idx].put(self.stop_signal, timeout=self.timeout) + self.finished_flags[idx] = True + + def __iter__(self): + """Returns an iterator over the batch of audio streams.""" + return AudioBatchIterator(self) + + def get_stream(self, sample_idx: int): + """Get the audio stream for a specific sample.""" + if sample_idx >= self.batch_size: + raise ValueError(f"Sample index {sample_idx} exceeds batch size {self.batch_size}") + return AudioSampleIterator(self, sample_idx) + + +class AudioSampleIterator: + """Iterator for a single audio stream from the batch.""" + + def __init__(self, streamer: AudioStreamer, sample_idx: int): + self.streamer = streamer + self.sample_idx = sample_idx + + def __iter__(self): + return self + + def __next__(self): + value = self.streamer.audio_queues[self.sample_idx].get(timeout=self.streamer.timeout) + if value == self.streamer.stop_signal: + raise StopIteration() + return value + + +class AudioBatchIterator: + """Iterator that yields audio chunks for all samples in the batch.""" + + def __init__(self, streamer: AudioStreamer): + self.streamer = streamer + self.active_samples = set(range(streamer.batch_size)) + + def __iter__(self): + return self + + def __next__(self): + if not self.active_samples: + raise StopIteration() + + batch_chunks = {} + samples_to_remove = set() + + # Try to get chunks from all active samples + for idx in self.active_samples: + try: + value = self.streamer.audio_queues[idx].get(block=False) + if value == self.streamer.stop_signal: + samples_to_remove.add(idx) + else: + batch_chunks[idx] = value + except: + # Queue is empty for this sample, skip it this iteration + pass + + # Remove finished samples + self.active_samples -= samples_to_remove + + if batch_chunks: + return batch_chunks + elif self.active_samples: + # If no chunks were ready but we still have active samples, + # wait a bit and try again + import time + time.sleep(0.01) + return self.__next__() + else: + raise StopIteration() + + +class AsyncAudioStreamer(AudioStreamer): + """ + Async version of AudioStreamer for use in async contexts. + """ + + def __init__( + self, + batch_size: int, + stop_signal: Optional[any] = None, + timeout: Optional[float] = None, + ): + super().__init__(batch_size, stop_signal, timeout) + # Replace regular queues with async queues + self.audio_queues = [asyncio.Queue() for _ in range(batch_size)] + self.loop = asyncio.get_running_loop() + + def put(self, audio_chunks: torch.Tensor, sample_indices: torch.Tensor): + """Put audio chunks in the appropriate async queues.""" + for i, sample_idx in enumerate(sample_indices): + idx = sample_idx.item() + if idx < self.batch_size and not self.finished_flags[idx]: + audio_chunk = audio_chunks[i].detach().cpu() + self.loop.call_soon_threadsafe( + self.audio_queues[idx].put_nowait, audio_chunk + ) + + def end(self, sample_indices: Optional[torch.Tensor] = None): + """Signal the end of generation for specified samples.""" + if sample_indices is None: + indices_to_end = range(self.batch_size) + else: + indices_to_end = [s.item() if torch.is_tensor(s) else s for s in sample_indices] + + for idx in indices_to_end: + if idx < self.batch_size and not self.finished_flags[idx]: + self.loop.call_soon_threadsafe( + self.audio_queues[idx].put_nowait, self.stop_signal + ) + self.finished_flags[idx] = True + + async def get_stream(self, sample_idx: int): + """Get async iterator for a specific sample's audio stream.""" + if sample_idx >= self.batch_size: + raise ValueError(f"Sample index {sample_idx} exceeds batch size {self.batch_size}") + + while True: + value = await self.audio_queues[sample_idx].get() + if value == self.stop_signal: + break + yield value + + def __aiter__(self): + """Returns an async iterator over all audio streams.""" + return AsyncAudioBatchIterator(self) + + +class AsyncAudioBatchIterator: + """Async iterator for batch audio streaming.""" + + def __init__(self, streamer: AsyncAudioStreamer): + self.streamer = streamer + self.active_samples = set(range(streamer.batch_size)) + + def __aiter__(self): + return self + + async def __anext__(self): + if not self.active_samples: + raise StopAsyncIteration() + + batch_chunks = {} + samples_to_remove = set() + + # Create tasks for all active samples + tasks = { + idx: asyncio.create_task(self._get_chunk(idx)) + for idx in self.active_samples + } + + # Wait for at least one chunk to be ready + done, pending = await asyncio.wait( + tasks.values(), + return_when=asyncio.FIRST_COMPLETED, + timeout=self.streamer.timeout + ) + + # Cancel pending tasks + for task in pending: + task.cancel() + + # Process completed tasks + for idx, task in tasks.items(): + if task in done: + try: + value = await task + if value == self.streamer.stop_signal: + samples_to_remove.add(idx) + else: + batch_chunks[idx] = value + except asyncio.CancelledError: + pass + + self.active_samples -= samples_to_remove + + if batch_chunks: + return batch_chunks + elif self.active_samples: + # Try again if we still have active samples + return await self.__anext__() + else: + raise StopAsyncIteration() + + async def _get_chunk(self, idx): + """Helper to get a chunk from a specific queue.""" + return await self.streamer.audio_queues[idx].get() \ No newline at end of file diff --git a/vibevoice/processor/__init__.py b/vibevoice/processor/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/vibevoice/processor/vibevoice_processor.py b/vibevoice/processor/vibevoice_processor.py new file mode 100644 index 0000000..66d0a9d --- /dev/null +++ b/vibevoice/processor/vibevoice_processor.py @@ -0,0 +1,677 @@ +import math +import warnings +from typing import List, Optional, Union, Dict, Any, Tuple +import os +import re + +import numpy as np +import torch + +from transformers.tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy +from transformers.utils import TensorType, logging +from .vibevoice_tokenizer_processor import AudioNormalizer + +logger = logging.get_logger(__name__) + + +class VibeVoiceProcessor: + r""" + Constructs a VibeVoice processor which wraps a VibeVoice tokenizer and audio processor into a single processor. + + [`VibeVoiceProcessor`] offers all the functionalities of [`VibeVoiceTokenizer`] and [`VibeVoiceTokenizerProcessor`]. + See the [`~VibeVoiceProcessor.__call__`] and [`~VibeVoiceProcessor.decode`] for more information. + + Args: + tokenizer (`VibeVoiceTextTokenizer` or `VibeVoiceTextTokenizerFast`): + The tokenizer for text processing. + audio_processor (`VibeVoiceTokenizerProcessor`): + The audio processor for speech processing. + speech_tok_compress_ratio (`int`, *optional*, defaults to 3200): + The compression ratio for speech tokenization. + db_normalize (`bool`, *optional*, defaults to True): + Whether to apply decibel normalization to audio inputs. + """ + + def __init__(self, tokenizer=None, audio_processor=None, speech_tok_compress_ratio=3200, db_normalize=True, **kwargs): + self.tokenizer = tokenizer + self.audio_processor = audio_processor + self.speech_tok_compress_ratio = speech_tok_compress_ratio + self.db_normalize = db_normalize + self.audio_normalizer = AudioNormalizer() if db_normalize else None + self.system_prompt = " Transform the text provided by various speakers into speech output, utilizing the distinct voice of each respective speaker.\n" + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): + """ + Instantiate a VibeVoiceProcessor from a pretrained VibeVoice processor. + + Args: + pretrained_model_name_or_path (`str` or `os.PathLike`): + This can be either: + - a string, the *model id* of a pretrained model + - a path to a *directory* containing processor config + + Returns: + [`VibeVoiceProcessor`]: The processor object instantiated from pretrained model. + """ + import os + import json + from .vibevoice_tokenizer_processor import VibeVoiceTokenizerProcessor + from vibevoice.modular.modular_vibevoice_text_tokenizer import ( + VibeVoiceTextTokenizer, + VibeVoiceTextTokenizerFast + ) + + # Load processor configuration + config_path = os.path.join(pretrained_model_name_or_path, "preprocessor_config.json") + if os.path.exists(config_path): + with open(config_path, 'r') as f: + config = json.load(f) + else: + logger.warning(f"No preprocessor_config.json found at {pretrained_model_name_or_path}, using defaults") + config = { + "speech_tok_compress_ratio": 3200, + "db_normalize": True, + } + + # Extract main processor parameters + speech_tok_compress_ratio = config.get("speech_tok_compress_ratio", 3200) + db_normalize = config.get("db_normalize", True) + + # Load tokenizer - try from model path first, then fallback to Qwen + language_model_pretrained_name = config.get("language_model_pretrained_name", None) or kwargs.pop("language_model_pretrained_name", "Qwen/Qwen2.5-1.5B") + logger.info(f"Loading tokenizer from {language_model_pretrained_name}") + if 'qwen' in language_model_pretrained_name.lower(): + tokenizer = VibeVoiceTextTokenizerFast.from_pretrained( + language_model_pretrained_name, + **kwargs + ) + else: + raise ValueError(f"Unsupported tokenizer type for {language_model_pretrained_name}. Supported types: Qwen, Llama, Gemma.") + + # Load audio processor + if "audio_processor" in config: + # Create audio processor from config + audio_config = config["audio_processor"] + audio_processor = VibeVoiceTokenizerProcessor( + sampling_rate=audio_config.get("sampling_rate", 24000), + normalize_audio=audio_config.get("normalize_audio", True), + target_dB_FS=audio_config.get("target_dB_FS", -25), + eps=audio_config.get("eps", 1e-6), + ) + else: + # Create default audio processor + audio_processor = VibeVoiceTokenizerProcessor() + + # Create and return the processor + return cls( + tokenizer=tokenizer, + audio_processor=audio_processor, + speech_tok_compress_ratio=speech_tok_compress_ratio, + db_normalize=db_normalize, + ) + + def save_pretrained(self, save_directory: Union[str, os.PathLike], **kwargs): + """ + Save a processor to a directory, so that it can be re-loaded using the + [`~VibeVoiceProcessor.from_pretrained`] class method. + + Args: + save_directory (`str` or `os.PathLike`): + Directory where the processor will be saved. + """ + import os + import json + + os.makedirs(save_directory, exist_ok=True) + + # Save processor configuration + processor_config = { + "processor_class": "VibeVoiceProcessor", + "speech_tok_compress_ratio": self.speech_tok_compress_ratio, + "db_normalize": self.db_normalize, + "audio_processor": { + "feature_extractor_type": "VibeVoiceTokenizerProcessor", + "sampling_rate": getattr(self.audio_processor, 'sampling_rate', 24000), + "normalize_audio": getattr(self.audio_processor, 'normalize_audio', True), + "target_dB_FS": getattr(self.audio_processor, 'target_dB_FS', -25), + "eps": getattr(self.audio_processor, 'eps', 1e-6), + } + } + + config_path = os.path.join(save_directory, "preprocessor_config.json") + with open(config_path, 'w') as f: + json.dump(processor_config, f, indent=2) + + logger.info(f"Processor configuration saved in {config_path}") + + def __call__( + self, + text: Optional[Union[str, List[str], TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None, + voice_samples: Optional[Union[List[Union[str, np.ndarray]], List[List[Union[str, np.ndarray]]]]] = None, + padding: Union[bool, str, PaddingStrategy] = True, + truncation: Union[bool, str, TruncationStrategy] = False, + max_length: Optional[int] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + return_attention_mask: bool = True, + **kwargs, + ) -> BatchEncoding: + """ + Main method to process one or more podcast scripts with optional voice samples. + + Args: + text (`str`, `List[str]`): + The input text(s) to process. Can be: + - A single script string + - A list of script strings for batch processing + - A path to a .json or .txt file + - A list of paths + voice_samples (`List[Union[str, np.ndarray]]`, `List[List[Union[str, np.ndarray]]]`, *optional*): + Voice samples for each script. Can be: + - A list of samples for a single script + - A list of lists for batch processing + padding (`bool`, `str` or `PaddingStrategy`, defaults to `True`): + Whether to pad sequences to the same length + truncation (`bool`, `str` or `TruncationStrategy`, defaults to `False`): + Whether to truncate sequences + max_length (`int`, *optional*): + Maximum length of the returned sequences + return_tensors (`str` or `TensorType`, *optional*): + If set, will return tensors of a particular framework + return_attention_mask (`bool`, defaults to `True`): + Whether to return the attention mask + + Returns: + `BatchEncoding`: A BatchEncoding with the following fields: + - **input_ids** -- List of token id sequences or tensor + - **attention_mask** -- List of attention masks or tensor + - **speech_tensors** -- Padded speech inputs (if voice_samples provided) + - **speech_masks** -- Speech masks (if voice_samples provided) + - **speech_input_mask** -- Boolean masks indicating speech token positions + """ + # Handle single vs batch input + if isinstance(text, str) or (isinstance(text, list) and len(text) > 0 and not isinstance(text[0], str)): + # Single input + texts = [text] + is_batched = False + else: + # Batch input + texts = text + is_batched = True + + # Handle voice samples + if voice_samples is not None: + if not is_batched or (isinstance(voice_samples[0], (str, np.ndarray))): + # Single set of voice samples + voice_samples_list = [voice_samples] + else: + # Batch of voice samples + voice_samples_list = voice_samples + else: + voice_samples_list = [None] * len(texts) + + # Process each input + all_encodings = [] + for text_input, voice_input in zip(texts, voice_samples_list): + encoding = self._process_single(text_input, voice_input) + all_encodings.append(encoding) + + # Combine batch + batch_encoding = self._batch_encode( + all_encodings, + padding=padding, + truncation=truncation, + max_length=max_length, + return_tensors=return_tensors, + return_attention_mask=return_attention_mask, + ) + + return batch_encoding + + def _process_single( + self, + text: Union[str, TextInput], + voice_samples: Optional[List[Union[str, np.ndarray]]] = None, + ) -> Dict[str, Any]: + """Process a single podcast script.""" + # Determine if text is a file path or direct script + script = None + if isinstance(text, str): + # Check if it's a file path + if text.endswith('.json') and os.path.exists(text): + script = self._convert_json_to_script(text) + elif text.endswith('.txt') and os.path.exists(text): + script = self._convert_text_to_script(text) + else: + # Assume it's the script content directly + script = text + + if script is None: + raise ValueError(f"Could not process input text: {text}") + + # Parse the script + parsed_lines = self._parse_script(script) + all_speakers = list(set(speaker_id for speaker_id, _ in parsed_lines)) + + # Create system prompt + # system_tokens = self.tokenizer.encode(self.system_prompt, add_special_tokens=False) + system_tokens = self.tokenizer.encode(self.system_prompt) + + # Process voice samples if provided + if voice_samples: + voice_tokens, voice_speech_inputs, voice_speech_masks = self._create_voice_prompt(voice_samples[:len(all_speakers)]) + else: + voice_tokens, voice_speech_inputs, voice_speech_masks = [], [], [] + + # Build full token sequence + full_tokens = system_tokens + voice_tokens + speech_input_mask = [False] * len(system_tokens) + voice_speech_masks + + # Add text input section + full_tokens += self.tokenizer.encode(' Text input:\n', add_special_tokens=False) + speech_input_mask += [False] * len(self.tokenizer.encode(' Text input:\n', add_special_tokens=False)) + + for speaker_id, speaker_text in parsed_lines: + speaker_text_tokens = self.tokenizer.encode(f" Speaker {speaker_id}:{speaker_text}\n", add_special_tokens=False) + full_tokens += speaker_text_tokens + speech_input_mask += [False] * len(speaker_text_tokens) + + # Add speech output section + full_tokens += self.tokenizer.encode(' Speech output:\n', add_special_tokens=False) + [self.tokenizer.speech_start_id] + speech_input_mask += [False] * (len(self.tokenizer.encode(' Speech output:\n', add_special_tokens=False)) + 1) + + return { + "input_ids": full_tokens, + "speech_inputs": voice_speech_inputs if voice_speech_inputs else None, + "speech_input_mask": speech_input_mask, + "parsed_script": parsed_lines, + "all_speakers": all_speakers, + } + + def _batch_encode( + self, + encodings: List[Dict[str, Any]], + padding: Union[bool, str, PaddingStrategy] = True, + truncation: Union[bool, str, TruncationStrategy] = False, + max_length: Optional[int] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + return_attention_mask: bool = True, + ) -> BatchEncoding: + """Combine multiple encodings into a batch with padding.""" + # Extract input_ids and create attention_mask + input_ids_list = [enc["input_ids"] for enc in encodings] + speech_input_masks_list = [enc["speech_input_mask"] for enc in encodings] + + # Determine padding strategy + if isinstance(padding, bool): + padding_strategy = PaddingStrategy.LONGEST if padding else PaddingStrategy.DO_NOT_PAD + elif isinstance(padding, str): + padding_strategy = PaddingStrategy(padding) + else: + padding_strategy = padding + + # Apply padding to input_ids + if padding_strategy != PaddingStrategy.DO_NOT_PAD: + if padding_strategy == PaddingStrategy.LONGEST: + max_len = max(len(ids) for ids in input_ids_list) + elif padding_strategy == PaddingStrategy.MAX_LENGTH and max_length is not None: + max_len = max_length + else: + max_len = max(len(ids) for ids in input_ids_list) + + # Pad sequences + padded_input_ids = [] + attention_masks = [] + padded_speech_input_masks = [] + + for input_ids, speech_mask in zip(input_ids_list, speech_input_masks_list): + # Truncate if needed + if truncation and len(input_ids) > max_len: + input_ids = input_ids[:max_len] + speech_mask = speech_mask[:max_len] + + # Pad + padding_length = max_len - len(input_ids) + # padded_ids = [self.tokenizer.pad_token_id] * padding_length + input_ids + padded_ids = [self.tokenizer.pad_id] * padding_length + input_ids + attention_mask = [0] * padding_length + [1] * len(input_ids) + padded_speech_mask = [False] * padding_length + speech_mask + + padded_input_ids.append(padded_ids) + attention_masks.append(attention_mask) + padded_speech_input_masks.append(padded_speech_mask) + + input_ids_list = padded_input_ids + speech_input_masks_list = padded_speech_input_masks + else: + # No padding, just create attention masks + attention_masks = [[1] * len(ids) for ids in input_ids_list] if return_attention_mask else None + + # Process speech inputs + all_speech_inputs = [] + has_speech = False + for enc in encodings: + if enc["speech_inputs"] is not None: + all_speech_inputs.extend(enc["speech_inputs"]) + has_speech = True + + # Prepare batch encoding + batch_encoding = BatchEncoding() + + # Handle tensor conversion + if return_tensors is not None: + batch_encoding["input_ids"] = torch.tensor(input_ids_list, dtype=torch.long) + if return_attention_mask and attention_masks is not None: + batch_encoding["attention_mask"] = torch.tensor(attention_masks, dtype=torch.long) + batch_encoding["speech_input_mask"] = torch.tensor(speech_input_masks_list, dtype=torch.bool) + else: + batch_encoding["input_ids"] = input_ids_list + if return_attention_mask and attention_masks is not None: + batch_encoding["attention_mask"] = attention_masks + batch_encoding["speech_input_mask"] = speech_input_masks_list + + # Process speech tensors if present + if has_speech: + speech_dict = self.prepare_speech_inputs( + all_speech_inputs, + return_tensors=return_tensors, + ) + batch_encoding["speech_tensors"] = speech_dict["padded_speeches"] + batch_encoding["speech_masks"] = speech_dict["speech_masks"] + else: + batch_encoding["speech_tensors"] = None + batch_encoding["speech_masks"] = None + + # Add metadata + batch_encoding["parsed_scripts"] = [enc["parsed_script"] for enc in encodings] + batch_encoding["all_speakers_list"] = [enc["all_speakers"] for enc in encodings] + + return batch_encoding + + def _create_voice_prompt( + self, + speaker_samples: List[Union[str, np.ndarray]] + ) -> Tuple[List[int], List[np.ndarray], List[bool]]: + """ + Create voice prompt tokens and process audio samples. + + Returns: + tuple: (voice_tokens, voice_speech_inputs, voice_speech_masks) + """ + vae_token_id = self.tokenizer.speech_diffusion_id + + voice_full_tokens = self.tokenizer.encode(' Voice input:\n', add_special_tokens=False) + voice_speech_inputs = [] + voice_speech_masks = [False] * len(voice_full_tokens) + + for speaker_id, speaker_audio in enumerate(speaker_samples): + prefix_tokens = self.tokenizer.encode(f" Speaker {speaker_id}:", add_special_tokens=False) + + # Process audio + if isinstance(speaker_audio, str): + # Load audio from file + wav = self.audio_processor._load_audio_from_path(speaker_audio) + else: + wav = np.array(speaker_audio, dtype=np.float32) + + # Apply normalization if needed + if self.db_normalize and self.audio_normalizer: + wav = self.audio_normalizer(wav) + + # Calculate token length based on compression ratio + # if speaker_audio.endswith('.pt') or speaker_audio.endswith('.npy'): + # vae_tok_len = wav.shape[0] + # else: + vae_tok_len = math.ceil(wav.shape[0] / self.speech_tok_compress_ratio) + + # Build tokens and masks + speaker_tokens = (prefix_tokens + + [self.tokenizer.speech_start_id] + + [vae_token_id] * vae_tok_len + + [self.tokenizer.speech_end_id] + + self.tokenizer.encode('\n', add_special_tokens=False)) + + vae_input_mask = ([False] * len(prefix_tokens) + + [False] + + [True] * vae_tok_len + + [False] + + [False]) + + voice_full_tokens.extend(speaker_tokens) + voice_speech_masks.extend(vae_input_mask) + voice_speech_inputs.append(wav) + + return voice_full_tokens, voice_speech_inputs, voice_speech_masks + + def prepare_speech_inputs( + self, + speech_inputs: List[np.ndarray], + return_tensors: Optional[Union[str, TensorType]] = None, + device: Optional[Union[str, torch.device]] = None, + dtype: Optional[torch.dtype] = None, + ) -> Dict[str, Any]: + """ + Prepare speech inputs for model consumption. + + Args: + speech_inputs: List of speech arrays + return_tensors: Output tensor type + device: Device to place tensors on + dtype: Data type for tensors + + Returns: + Dictionary with padded_speeches and speech_masks + """ + if not speech_inputs: + return {"padded_speeches": None, "speech_masks": None} + + # Calculate sequence lengths + vae_tok_seqlens = [math.ceil(s.shape[0] / self.speech_tok_compress_ratio) for s in speech_inputs] + # vae_tok_seqlens = [math.ceil(s.shape[0] / self.speech_tok_compress_ratio) if s.ndim == 1 else s.shape[0] for s in speech_inputs] + max_speech_length = max(s.shape[0] for s in speech_inputs) + + # Pad speeches + if speech_inputs[0].ndim == 1: + padded_speeches = np.full((len(speech_inputs), max_speech_length), fill_value=0, dtype=np.float32) + else: + padded_speeches = np.full((len(speech_inputs), max_speech_length, speech_inputs[0].shape[-1]), fill_value=0, dtype=np.float32) + speech_masks = np.zeros((len(speech_inputs), max(vae_tok_seqlens)), dtype=np.bool_) + + for i, (speech, vae_tok_length) in enumerate(zip(speech_inputs, vae_tok_seqlens)): + padded_speeches[i, :len(speech)] = speech + speech_masks[i, :vae_tok_length] = True + + result = { + "padded_speeches": padded_speeches, + "speech_masks": speech_masks, + } + + # Convert to tensors if requested + if return_tensors == "pt": + result["padded_speeches"] = torch.tensor(padded_speeches, device=device, dtype=dtype or torch.float32) + result["speech_masks"] = torch.tensor(speech_masks, device=device, dtype=torch.bool) + + return result + + def _convert_json_to_script(self, json_file: str) -> str: + """ + Convert JSON format to script format. + Expected JSON format: + [ + {"speaker": "1", "text": "Hello everyone..."}, + {"speaker": "2", "text": "Great to be here..."} + ] + """ + import json + + with open(json_file, 'r', encoding='utf-8') as f: + data = json.load(f) + + if not isinstance(data, list): + raise ValueError("JSON file must contain a list of speaker entries") + + script_lines = [] + for item in data: + if not isinstance(item, dict): + logger.warning(f"Skipping non-dict entry: {item}") + continue + + speaker = item.get('speaker') + text = item.get('text') + + if speaker is None or text is None: + logger.warning(f"Skipping entry missing speaker or text: {item}") + continue + + # Ensure speaker ID is valid + try: + speaker_id = int(speaker) + except (ValueError, TypeError): + logger.warning(f"Invalid speaker ID: {speaker}, skipping entry") + continue + + # Clean up text + text = text.strip() + if text: + script_lines.append(f"Speaker {speaker_id}: {text}") + + if not script_lines: + raise ValueError("No valid entries found in JSON file") + + return "\n".join(script_lines) + + def _convert_text_to_script(self, text_file: str) -> str: + """ + Convert text file to script format. + Handles multiple formats: + 1. Already formatted as "Speaker X: text" + 2. Plain text (assigns to Speaker 1) + + Handles edge cases like multiple colons in a line. + """ + with open(text_file, 'r', encoding='utf-8') as f: + lines = f.readlines() + + script_lines = [] + current_speaker = 1 + + for line in lines: + line = line.strip() + if not line: + continue + + # Try to parse as "Speaker X: text" format + # Use regex to be more robust + speaker_match = re.match(r'^Speaker\s+(\d+)\s*:\s*(.*)$', line, re.IGNORECASE) + + if speaker_match: + speaker_id = int(speaker_match.group(1)) + text = speaker_match.group(2).strip() + if text: + script_lines.append(f"Speaker {speaker_id}: {text}") + else: + # Treat as plain text - assign to current speaker + script_lines.append(f"Speaker {current_speaker}: {line}") + + if not script_lines: + raise ValueError("No valid content found in text file") + + return "\n".join(script_lines) + + def _parse_script(self, script: str) -> List[Tuple[int, str]]: + """Parse script into list of (speaker_id, text) tuples.""" + lines = script.strip().split("\n") + parsed_lines = [] + speaker_ids = [] + + # First pass: parse all lines and collect speaker IDs + for line in lines: + if not line.strip(): + continue + + # Use regex to handle edge cases like multiple colons + match = re.match(r'^Speaker\s+(\d+)\s*:\s*(.*)$', line.strip(), re.IGNORECASE) + + if match: + speaker_id = int(match.group(1)) + text = ' ' + match.group(2).strip() + parsed_lines.append((speaker_id, text)) + speaker_ids.append(speaker_id) + else: + logger.warning(f"Could not parse line: '{line}'") + + if not parsed_lines: + raise ValueError("No valid speaker lines found in script") + + # Check if we need to normalize speaker IDs (only if all are > 0) + min_speaker_id = min(speaker_ids) + if min_speaker_id > 0: + # Normalize to start from 0 + normalized_lines = [] + for speaker_id, text in parsed_lines: + normalized_lines.append((speaker_id - 1, text)) + return normalized_lines + else: + # Keep original IDs + return parsed_lines + + def _merge_inputs(self, text_inputs: BatchEncoding, audio_inputs: Dict) -> BatchEncoding: + """Merge text and audio inputs into a single BatchEncoding.""" + # Start with text inputs + merged = BatchEncoding(text_inputs) + + # Add audio-specific fields + if "audio" in audio_inputs: + merged["speech_inputs"] = audio_inputs["audio"] + if "streaming" in audio_inputs: + merged["streaming"] = audio_inputs["streaming"] + + return merged + + def batch_decode(self, *args, **kwargs): + """ + This method forwards all its arguments to VibeVoiceTextTokenizer's [`~PreTrainedTokenizer.batch_decode`]. + Please refer to the docstring of this method for more information. + """ + return self.tokenizer.batch_decode(*args, **kwargs) + + def decode(self, *args, **kwargs): + """ + This method forwards all its arguments to VibeVoiceTextTokenizer's [`~PreTrainedTokenizer.decode`]. + Please refer to the docstring of this method for more information. + """ + return self.tokenizer.decode(*args, **kwargs) + + @property + def model_input_names(self): + """ + Return the list of inputs accepted by the model. + """ + tokenizer_input_names = self.tokenizer.model_input_names + audio_processor_input_names = self.audio_processor.model_input_names + return list(dict.fromkeys(tokenizer_input_names + audio_processor_input_names + ["speech_inputs", "speech_input_mask"])) + + def save_audio(self, + audio: Union[torch.Tensor, np.ndarray, List[Union[torch.Tensor, np.ndarray]]], + output_path: str = "output.wav", + sampling_rate: Optional[int] = None, + normalize: bool = False, + batch_prefix: str = "audio_", + ) -> str: + """ + Save audio data to a file. + Args: + audio (Union[torch.Tensor, np.ndarray, List[Union[torch.Tensor, np.ndarray]]]): + The audio data to save. Can be a single tensor/array or a list of them. + output_path (str, optional): Path to save the audio file. Defaults to "output.wav". + sampling_rate (int, optional): Sampling rate for the audio. If None, uses the processor's default. + normalize (bool, optional): Whether to normalize the audio before saving. Defaults to False. + batch_prefix (str, optional): Prefix for batch audio files. Defaults to "audio_". + Returns: + str: The path to the saved audio file. + """ + return self.audio_processor.save_audio(audio, output_path=output_path, sampling_rate=sampling_rate, normalize=normalize, batch_prefix=batch_prefix) + +__all__ = [ + "VibeVoiceProcessor", +] \ No newline at end of file diff --git a/vibevoice/processor/vibevoice_tokenizer_processor.py b/vibevoice/processor/vibevoice_tokenizer_processor.py new file mode 100644 index 0000000..0d854b7 --- /dev/null +++ b/vibevoice/processor/vibevoice_tokenizer_processor.py @@ -0,0 +1,483 @@ +""" +Processor class for VibeVoice models. +""" + +import os +import json +import warnings +from typing import List, Optional, Union, Dict, Any + +import numpy as np +import torch + +from transformers.feature_extraction_utils import FeatureExtractionMixin +from transformers.utils import logging + +logger = logging.get_logger(__name__) + + +class AudioNormalizer: + """ + Audio normalization class for VibeVoice tokenizer. + + This class provides audio normalization to ensure consistent input levels + for the VibeVoice tokenizer while maintaining audio quality. + """ + + def __init__(self, target_dB_FS: float = -25, eps: float = 1e-6): + """ + Initialize the audio normalizer. + + Args: + target_dB_FS (float): Target dB FS level for the audio. Default: -25 + eps (float): Small value to avoid division by zero. Default: 1e-6 + """ + self.target_dB_FS = target_dB_FS + self.eps = eps + + def tailor_dB_FS(self, audio: np.ndarray) -> tuple: + """ + Adjust the audio to the target dB FS level. + + Args: + audio (np.ndarray): Input audio signal + + Returns: + tuple: (normalized_audio, rms, scalar) + """ + rms = np.sqrt(np.mean(audio**2)) + scalar = 10 ** (self.target_dB_FS / 20) / (rms + self.eps) + normalized_audio = audio * scalar + return normalized_audio, rms, scalar + + def avoid_clipping(self, audio: np.ndarray, scalar: Optional[float] = None) -> tuple: + """ + Avoid clipping by scaling down if necessary. + + Args: + audio (np.ndarray): Input audio signal + scalar (float, optional): Explicit scaling factor + + Returns: + tuple: (normalized_audio, scalar) + """ + if scalar is None: + max_val = np.max(np.abs(audio)) + if max_val > 1.0: + scalar = max_val + self.eps + else: + scalar = 1.0 + + return audio / scalar, scalar + + def __call__(self, audio: np.ndarray) -> np.ndarray: + """ + Normalize the audio by adjusting to target dB FS and avoiding clipping. + + Args: + audio (np.ndarray): Input audio signal + + Returns: + np.ndarray: Normalized audio signal + """ + # First adjust to target dB FS + audio, _, _ = self.tailor_dB_FS(audio) + # Then avoid clipping + audio, _ = self.avoid_clipping(audio) + return audio + + +# Change from ProcessorMixin to FeatureExtractionMixin which is designed for single components +class VibeVoiceTokenizerProcessor(FeatureExtractionMixin): + """ + Processor for VibeVoice acoustic tokenizer models. + + This processor handles audio preprocessing for VibeVoice models, including: + - Audio format conversion (stereo to mono) + - Optional audio normalization + - Streaming support for infinite-length audio + + Args: + sampling_rate (int, optional): Expected sampling rate. Defaults to 24000. + normalize_audio (bool, optional): Whether to normalize audio. Defaults to True. + target_dB_FS (float, optional): Target dB FS for normalization. Defaults to -25. + eps (float, optional): Small value for numerical stability. Defaults to 1e-6. + """ + model_input_names = ["input_features"] + + def __init__( + self, + sampling_rate: int = 24000, + normalize_audio: bool = True, + target_dB_FS: float = -25, + eps: float = 1e-6, + **kwargs, + ): + super().__init__(**kwargs) + + self.sampling_rate = sampling_rate + self.normalize_audio = normalize_audio + + # Initialize audio normalizer if needed + if self.normalize_audio: + self.normalizer = AudioNormalizer(target_dB_FS=target_dB_FS, eps=eps) + else: + self.normalizer = None + + # Save config + self.feature_extractor_dict = { + "sampling_rate": sampling_rate, + "normalize_audio": normalize_audio, + "target_dB_FS": target_dB_FS, + "eps": eps, + } + + def _ensure_mono(self, audio: np.ndarray) -> np.ndarray: + """ + Convert stereo audio to mono if needed. + + Args: + audio (np.ndarray): Input audio array + + Returns: + np.ndarray: Mono audio array + """ + if len(audio.shape) == 1: + return audio + elif len(audio.shape) == 2: + if audio.shape[0] == 2: # (2, time) + return np.mean(audio, axis=0) + elif audio.shape[1] == 2: # (time, 2) + return np.mean(audio, axis=1) + else: + # If one dimension is 1, squeeze it + if audio.shape[0] == 1: + return audio.squeeze(0) + elif audio.shape[1] == 1: + return audio.squeeze(1) + else: + raise ValueError(f"Unexpected audio shape: {audio.shape}") + else: + raise ValueError(f"Audio should be 1D or 2D, got shape: {audio.shape}") + + def _process_single_audio(self, audio: Union[np.ndarray, List[float]]) -> np.ndarray: + """ + Process a single audio array. + + Args: + audio: Single audio input + + Returns: + np.ndarray: Processed audio + """ + # Convert to numpy array + if not isinstance(audio, np.ndarray): + audio = np.array(audio, dtype=np.float32) + else: + audio = audio.astype(np.float32) + + # Ensure mono + audio = self._ensure_mono(audio) + + # Normalize if requested + if self.normalize_audio and self.normalizer is not None: + audio = self.normalizer(audio) + + return audio + + def __call__( + self, + audio: Union[str, np.ndarray, List[float], List[np.ndarray], List[List[float]], List[str]] = None, + sampling_rate: Optional[int] = None, + return_tensors: Optional[str] = None, + **kwargs, + ): + """ + Process audio for VibeVoice models. + + Args: + audio: Audio input(s) to process. Can be: + - str: Path to audio file + - np.ndarray: Audio array + - List[float]: Audio as list of floats + - List[np.ndarray]: Batch of audio arrays + - List[str]: Batch of audio file paths + sampling_rate (int, optional): Sampling rate of the input audio + return_tensors (str, optional): Return format ('pt' for PyTorch, 'np' for NumPy) + + Returns: + dict: Processed audio inputs with keys: + - input_features: Audio tensor(s) ready for the model + """ + if audio is None: + raise ValueError("Audio input is required") + + # Validate sampling rate + if sampling_rate is not None and sampling_rate != self.sampling_rate: + logger.warning( + f"Input sampling rate ({sampling_rate}) differs from expected " + f"sampling rate ({self.sampling_rate}). Please resample your audio." + ) + + # Handle different input types + if isinstance(audio, str): + # Single audio file path + audio = self._load_audio_from_path(audio) + is_batched = False + elif isinstance(audio, list): + if len(audio) == 0: + raise ValueError("Empty audio list provided") + + # Check if it's a list of file paths + if all(isinstance(item, str) for item in audio): + # Batch of audio file paths + audio = [self._load_audio_from_path(path) for path in audio] + is_batched = True + else: + # Check if it's batched audio arrays + is_batched = isinstance(audio[0], (np.ndarray, list)) + else: + # Single audio array or list + is_batched = False + + # Process audio + if is_batched: + processed_audio = [self._process_single_audio(a) for a in audio] + else: + processed_audio = [self._process_single_audio(audio)] + + # Convert to tensors if requested + if return_tensors == "pt": + if len(processed_audio) == 1: + # Create a proper batch dimension (B, T) + input_features = torch.from_numpy(processed_audio[0]).unsqueeze(0).unsqueeze(1) + else: + # For batched input with different lengths, create a batch properly + input_features = torch.stack([torch.from_numpy(a) for a in processed_audio]).unsqueeze(1) + elif return_tensors == "np": + if len(processed_audio) == 1: + input_features = processed_audio[0][np.newaxis, np.newaxis, :] + else: + input_features = np.stack(processed_audio)[:, np.newaxis, :] + else: + input_features = processed_audio[0] if len(processed_audio) == 1 else processed_audio + + outputs = { + "audio": input_features, # Use "audio" instead of "input_features" + } + + return outputs + + def _load_audio_from_path(self, audio_path: str) -> np.ndarray: + """ + Load audio from file path. + + Args: + audio_path (str): Path to audio file + + Returns: + np.ndarray: Loaded audio array + """ + # Get file extension to determine loading method + file_ext = os.path.splitext(audio_path)[1].lower() + + if file_ext in ['.wav', '.mp3', '.flac', '.m4a', '.ogg']: + # Audio file - use librosa + import librosa + audio_array, sr = librosa.load( + audio_path, + sr=self.sampling_rate, + mono=True + ) + return audio_array + elif file_ext == '.pt': + # PyTorch tensor file + audio_tensor = torch.load(audio_path, map_location='cpu').squeeze() + if isinstance(audio_tensor, torch.Tensor): + audio_array = audio_tensor.numpy() + else: + audio_array = np.array(audio_tensor) + return audio_array.astype(np.float32) + elif file_ext == '.npy': + # NumPy file + audio_array = np.load(audio_path) + return audio_array.astype(np.float32) + else: + raise ValueError( + f"Unsupported file format: {file_ext}. " + f"Supported formats: .wav, .mp3, .flac, .m4a, .ogg, .pt, .npy, .npz" + ) + + def preprocess_audio( + self, + audio_path_or_array: Union[str, np.ndarray], + normalize: Optional[bool] = None, + ) -> np.ndarray: + """ + Convenience method to preprocess audio from file path or array. + This method is kept for backward compatibility but __call__ is recommended. + + Args: + audio_path_or_array: Path to audio file or numpy array + normalize: Whether to normalize (overrides default setting) + + Returns: + np.ndarray: Preprocessed audio array + """ + if isinstance(audio_path_or_array, str): + audio_array = self._load_audio_from_path(audio_path_or_array) + else: + audio_array = np.array(audio_path_or_array, dtype=np.float32) + + # Override normalization setting if specified + original_normalize = self.normalize_audio + if normalize is not None: + self.normalize_audio = normalize + + try: + processed = self._process_single_audio(audio_array) + finally: + # Restore original setting + self.normalize_audio = original_normalize + + return processed + + # Override to_dict method for configuration saving + def to_dict(self) -> Dict[str, Any]: + """ + Convert the object to a dict containing all attributes needed for serialization. + """ + return self.feature_extractor_dict + + def save_audio( + self, + audio: Union[torch.Tensor, np.ndarray, List[Union[torch.Tensor, np.ndarray]]], + output_path: str = "output.wav", + sampling_rate: Optional[int] = None, + normalize: bool = False, + batch_prefix: str = "audio_", + ): + """ + Save audio data to WAV file(s). + + Args: + audio: Audio data to save. Can be: + - torch.Tensor: PyTorch tensor with shape (B, C, T) or (B, T) or (T) + - np.ndarray: NumPy array with shape (B, C, T) or (B, T) or (T) + - List of tensors or arrays + output_path: Path where to save the audio. If saving multiple files, + this is treated as a directory and individual files will be saved inside. + sampling_rate: Sampling rate for the saved audio. Defaults to the processor's rate. + normalize: Whether to normalize audio before saving. + batch_prefix: Prefix for batch files when saving multiple audios. + + Returns: + List[str]: Paths to the saved audio files. + """ + if sampling_rate is None: + sampling_rate = self.sampling_rate + + try: + import soundfile as sf + except ImportError: + raise ImportError( + "soundfile is required to save audio files. " + "Install it with: pip install soundfile" + ) + + # Ensure audio is in the right format + if isinstance(audio, torch.Tensor): + # Convert PyTorch tensor to numpy + audio_np = audio.float().detach().cpu().numpy() + elif isinstance(audio, np.ndarray): + audio_np = audio + elif isinstance(audio, list): + # Handle list of tensors or arrays + if all(isinstance(a, torch.Tensor) for a in audio): + audio_np = [a.float().detach().cpu().numpy() for a in audio] + else: + audio_np = audio + else: + raise ValueError(f"Unsupported audio type: {type(audio)}") + + saved_paths = [] + + # Handle based on shape or type + if isinstance(audio_np, list): + # Multiple separate audios to save + output_dir = output_path + + # Ensure output directory exists + os.makedirs(output_dir, exist_ok=True) + + # Save each audio + for i, audio_item in enumerate(audio_np): + audio_item = self._prepare_audio_for_save(audio_item, normalize) + file_path = os.path.join(output_dir, f"{batch_prefix}{i}.wav") + sf.write(file_path, audio_item, sampling_rate) + saved_paths.append(file_path) + + else: + # Handle different dimensions + if len(audio_np.shape) >= 3: # (B, C, T) or similar + # Get batch size + batch_size = audio_np.shape[0] + + if batch_size > 1: + # Multiple audios in a batch + output_dir = output_path + + # Ensure output directory exists + os.makedirs(output_dir, exist_ok=True) + + # Save each audio in the batch + for i in range(batch_size): + # Extract single audio and remove channel dim if present + single_audio = audio_np[i] + if len(single_audio.shape) > 1: + if single_audio.shape[0] == 1: # (1, T) + single_audio = single_audio.squeeze(0) + + single_audio = self._prepare_audio_for_save(single_audio, normalize) + file_path = os.path.join(output_dir, f"{batch_prefix}{i}.wav") + sf.write(file_path, single_audio, sampling_rate) + saved_paths.append(file_path) + else: + # Single audio with batch and channel dims + audio_item = audio_np.squeeze() # Remove batch and channel dimensions + audio_item = self._prepare_audio_for_save(audio_item, normalize) + sf.write(output_path, audio_item, sampling_rate) + saved_paths.append(output_path) + else: + # Single audio without batch dimension + audio_item = self._prepare_audio_for_save(audio_np, normalize) + sf.write(output_path, audio_item, sampling_rate) + saved_paths.append(output_path) + + return saved_paths + + def _prepare_audio_for_save(self, audio: np.ndarray, normalize: bool) -> np.ndarray: + """ + Prepare audio for saving by ensuring it's the right shape and optionally normalizing. + + Args: + audio: Audio data as numpy array + normalize: Whether to normalize audio + + Returns: + np.ndarray: Processed audio ready for saving + """ + # Ensure right dimensionality + if len(audio.shape) > 1 and audio.shape[0] == 1: # (1, T) + audio = audio.squeeze(0) + + # Normalize if requested + if normalize: + max_val = np.abs(audio).max() + if max_val > 0: + audio = audio / max_val + + return audio + + +__all__ = ["VibeVoiceTokenizerProcessor", "AudioNormalizer"] \ No newline at end of file diff --git a/vibevoice/schedule/__init__.py b/vibevoice/schedule/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/vibevoice/schedule/dpm_solver.py b/vibevoice/schedule/dpm_solver.py new file mode 100644 index 0000000..806241f --- /dev/null +++ b/vibevoice/schedule/dpm_solver.py @@ -0,0 +1,1065 @@ +# Copyright 2024 TSAIL Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This file is strongly influenced by https://github.com/LuChengTHU/dpm-solver + +import math +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from diffusers.configuration_utils import ConfigMixin, register_to_config +from diffusers.utils import deprecate +from diffusers.utils.torch_utils import randn_tensor +from diffusers.schedulers.scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput + +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + # return math.cos(t * math.pi / 2 * 0.95) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + elif alpha_transform_type == "cauchy": + # µ + γ tan (π (0.5 - x)) γ = 1, µ = 3 + # alpha^2 = 1-1/(exp(λ)+1) + def alpha_bar_fn(t, gamma=1, mu=3): + snr = mu + gamma * math.tan(math.pi * (0.5 - t) * 0.9) + return 1 - 1 / (math.exp(snr) + 1.1) + + elif alpha_transform_type == "laplace": + # µ − bsgn(0.5 − t) log(1 − 2|t − 0.5|) µ = 0, b = 1 + def alpha_bar_fn(t, mu=0, b=1): + snr = mu - b * math.copysign(1, 0.5 - t) * math.log(1 - 2 * abs(t - 0.5) * 0.98) + return 1 - 1 / (math.exp(snr) + 1.02) + + else: + raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +# Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr +def rescale_zero_terminal_snr(betas): + """ + Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1) + + + Args: + betas (`torch.Tensor`): + the betas that the scheduler is being initialized with. + + Returns: + `torch.Tensor`: rescaled betas with zero terminal SNR + """ + # Convert betas to alphas_bar_sqrt + alphas = 1.0 - betas + alphas_cumprod = torch.cumprod(alphas, dim=0) + alphas_bar_sqrt = alphas_cumprod.sqrt() + + # Store old values. + alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() + alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() + + # Shift so the last timestep is zero. + alphas_bar_sqrt -= alphas_bar_sqrt_T + + # Scale so the first timestep is back to the old value. + alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) + + # Convert alphas_bar_sqrt to betas + alphas_bar = alphas_bar_sqrt**2 # Revert sqrt + alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod + alphas = torch.cat([alphas_bar[0:1], alphas]) + betas = 1 - alphas + + return betas + +class DPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin): + """ + `DPMSolverMultistepScheduler` is a fast dedicated high-order solver for diffusion ODEs. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.0001): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.02): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, or `squaredcos_cap_v2`. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + solver_order (`int`, defaults to 2): + The DPMSolver order which can be `1` or `2` or `3`. It is recommended to use `solver_order=2` for guided + sampling, and `solver_order=3` for unconditional sampling. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + thresholding (`bool`, defaults to `False`): + Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such + as Stable Diffusion. + dynamic_thresholding_ratio (`float`, defaults to 0.995): + The ratio for the dynamic thresholding method. Valid only when `thresholding=True`. + sample_max_value (`float`, defaults to 1.0): + The threshold value for dynamic thresholding. Valid only when `thresholding=True` and + `algorithm_type="dpmsolver++"`. + algorithm_type (`str`, defaults to `dpmsolver++`): + Algorithm type for the solver; can be `dpmsolver`, `dpmsolver++`, `sde-dpmsolver` or `sde-dpmsolver++`. The + `dpmsolver` type implements the algorithms in the [DPMSolver](https://huggingface.co/papers/2206.00927) + paper, and the `dpmsolver++` type implements the algorithms in the + [DPMSolver++](https://huggingface.co/papers/2211.01095) paper. It is recommended to use `dpmsolver++` or + `sde-dpmsolver++` with `solver_order=2` for guided sampling like in Stable Diffusion. + solver_type (`str`, defaults to `midpoint`): + Solver type for the second-order solver; can be `midpoint` or `heun`. The solver type slightly affects the + sample quality, especially for a small number of steps. It is recommended to use `midpoint` solvers. + lower_order_final (`bool`, defaults to `True`): + Whether to use lower-order solvers in the final steps. Only valid for < 15 inference steps. This can + stabilize the sampling of DPMSolver for steps < 15, especially for steps <= 10. + euler_at_final (`bool`, defaults to `False`): + Whether to use Euler's method in the final step. It is a trade-off between numerical stability and detail + richness. This can stabilize the sampling of the SDE variant of DPMSolver for small number of inference + steps, but sometimes may result in blurring. + use_karras_sigmas (`bool`, *optional*, defaults to `False`): + Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`, + the sigmas are determined according to a sequence of noise levels {σi}. + use_lu_lambdas (`bool`, *optional*, defaults to `False`): + Whether to use the uniform-logSNR for step sizes proposed by Lu's DPM-Solver in the noise schedule during + the sampling process. If `True`, the sigmas and time steps are determined according to a sequence of + `lambda(t)`. + final_sigmas_type (`str`, defaults to `"zero"`): + The final `sigma` value for the noise schedule during the sampling process. If `"sigma_min"`, the final + sigma is the same as the last sigma in the training schedule. If `zero`, the final sigma is set to 0. + lambda_min_clipped (`float`, defaults to `-inf`): + Clipping threshold for the minimum value of `lambda(t)` for numerical stability. This is critical for the + cosine (`squaredcos_cap_v2`) noise schedule. + variance_type (`str`, *optional*): + Set to "learned" or "learned_range" for diffusion models that predict variance. If set, the model's output + contains the predicted Gaussian variance. + timestep_spacing (`str`, defaults to `"linspace"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + rescale_betas_zero_snr (`bool`, defaults to `False`): + Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and + dark samples instead of limiting it to samples with medium brightness. Loosely related to + [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506). + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + solver_order: int = 2, + prediction_type: str = "epsilon", + thresholding: bool = False, + dynamic_thresholding_ratio: float = 0.995, + sample_max_value: float = 1.0, + algorithm_type: str = "dpmsolver++", + solver_type: str = "midpoint", + lower_order_final: bool = True, + euler_at_final: bool = False, + use_karras_sigmas: Optional[bool] = False, + use_lu_lambdas: Optional[bool] = False, + final_sigmas_type: Optional[str] = "zero", # "zero", "sigma_min" + lambda_min_clipped: float = -float("inf"), + variance_type: Optional[str] = None, + timestep_spacing: str = "linspace", + steps_offset: int = 0, + rescale_betas_zero_snr: bool = False, + ): + if algorithm_type in ["dpmsolver", "sde-dpmsolver"]: + deprecation_message = f"algorithm_type {algorithm_type} is deprecated and will be removed in a future version. Choose from `dpmsolver++` or `sde-dpmsolver++` instead" + deprecate("algorithm_types dpmsolver and sde-dpmsolver", "1.0.0", deprecation_message) + + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2" or beta_schedule == "cosine": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps, alpha_transform_type="cosine") + elif beta_schedule == "cauchy": + self.betas = betas_for_alpha_bar(num_train_timesteps, alpha_transform_type="cauchy") + elif beta_schedule == "laplace": + self.betas = betas_for_alpha_bar(num_train_timesteps, alpha_transform_type="laplace") + else: + raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}") + + if rescale_betas_zero_snr: + self.betas = rescale_zero_terminal_snr(self.betas) + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + + if rescale_betas_zero_snr: + # Close to 0 without being 0 so first sigma is not inf + # FP16 smallest positive subnormal works well here + self.alphas_cumprod[-1] = 2**-24 + + # Currently we only support VP-type noise schedule + self.alpha_t = torch.sqrt(self.alphas_cumprod) + self.sigma_t = torch.sqrt(1 - self.alphas_cumprod) + self.lambda_t = torch.log(self.alpha_t) - torch.log(self.sigma_t) + self.sigmas = ((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 + + # standard deviation of the initial noise distribution + self.init_noise_sigma = 1.0 + + # settings for DPM-Solver + if algorithm_type not in ["dpmsolver", "dpmsolver++", "sde-dpmsolver", "sde-dpmsolver++"]: + if algorithm_type == "deis": + self.register_to_config(algorithm_type="dpmsolver++") + else: + raise NotImplementedError(f"{algorithm_type} is not implemented for {self.__class__}") + + if solver_type not in ["midpoint", "heun"]: + if solver_type in ["logrho", "bh1", "bh2"]: + self.register_to_config(solver_type="midpoint") + else: + raise NotImplementedError(f"{solver_type} is not implemented for {self.__class__}") + + if algorithm_type not in ["dpmsolver++", "sde-dpmsolver++"] and final_sigmas_type == "zero": + raise ValueError( + f"`final_sigmas_type` {final_sigmas_type} is not supported for `algorithm_type` {algorithm_type}. Please choose `sigma_min` instead." + ) + + # setable values + self.num_inference_steps = None + timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=np.float32)[::-1].copy() + self.timesteps = torch.from_numpy(timesteps) + self.model_outputs = [None] * solver_order + self.lower_order_nums = 0 + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + @property + def step_index(self): + """ + The index counter for current timestep. It will increase 1 after each scheduler step. + """ + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + def set_timesteps( + self, + num_inference_steps: int = None, + device: Union[str, torch.device] = None, + timesteps: Optional[List[int]] = None, + ): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to support arbitrary timesteps schedule. If `None`, timesteps will be generated + based on the `timestep_spacing` attribute. If `timesteps` is passed, `num_inference_steps` and `sigmas` + must be `None`, and `timestep_spacing` attribute will be ignored. + """ + if num_inference_steps is None and timesteps is None: + raise ValueError("Must pass exactly one of `num_inference_steps` or `timesteps`.") + if num_inference_steps is not None and timesteps is not None: + raise ValueError("Can only pass one of `num_inference_steps` or `custom_timesteps`.") + if timesteps is not None and self.config.use_karras_sigmas: + raise ValueError("Cannot use `timesteps` with `config.use_karras_sigmas = True`") + if timesteps is not None and self.config.use_lu_lambdas: + raise ValueError("Cannot use `timesteps` with `config.use_lu_lambdas = True`") + + if timesteps is not None: + timesteps = np.array(timesteps).astype(np.int64) + else: + # Clipping the minimum of all lambda(t) for numerical stability. + # This is critical for cosine (squaredcos_cap_v2) noise schedule. + clipped_idx = torch.searchsorted(torch.flip(self.lambda_t, [0]), self.config.lambda_min_clipped) + last_timestep = ((self.config.num_train_timesteps - clipped_idx).numpy()).item() + + # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 + if self.config.timestep_spacing == "linspace": + timesteps = ( + np.linspace(0, last_timestep - 1, num_inference_steps + 1) + .round()[::-1][:-1] + .copy() + .astype(np.int64) + ) + elif self.config.timestep_spacing == "leading": + step_ratio = last_timestep // (num_inference_steps + 1) + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = ( + (np.arange(0, num_inference_steps + 1) * step_ratio).round()[::-1][:-1].copy().astype(np.int64) + ) + timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = self.config.num_train_timesteps / num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = np.arange(last_timestep, 0, -step_ratio).round().copy().astype(np.int64) + timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." + ) + + sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) + log_sigmas = np.log(sigmas) + + if self.config.use_karras_sigmas: + sigmas = np.flip(sigmas).copy() + sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps) + timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round() + elif self.config.use_lu_lambdas: + lambdas = np.flip(log_sigmas.copy()) + lambdas = self._convert_to_lu(in_lambdas=lambdas, num_inference_steps=num_inference_steps) + sigmas = np.exp(lambdas) + timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round() + else: + sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) + + if self.config.final_sigmas_type == "sigma_min": + sigma_last = ((1 - self.alphas_cumprod[0]) / self.alphas_cumprod[0]) ** 0.5 + elif self.config.final_sigmas_type == "zero": + sigma_last = 0 + else: + raise ValueError( + f"`final_sigmas_type` must be one of 'zero', or 'sigma_min', but got {self.config.final_sigmas_type}" + ) + + sigmas = np.concatenate([sigmas, [sigma_last]]).astype(np.float32) + + self.sigmas = torch.from_numpy(sigmas) + self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.int64) + + self.num_inference_steps = len(timesteps) + + self.model_outputs = [ + None, + ] * self.config.solver_order + self.lower_order_nums = 0 + + # add an index counter for schedulers that allow duplicated timesteps + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample + def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor: + """ + "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the + prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by + s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing + pixels from saturation at each step. We find that dynamic thresholding results in significantly better + photorealism as well as better image-text alignment, especially when using very large guidance weights." + + https://arxiv.org/abs/2205.11487 + """ + dtype = sample.dtype + batch_size, channels, *remaining_dims = sample.shape + + if dtype not in (torch.float32, torch.float64): + sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half + + # Flatten sample for doing quantile calculation along each image + sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) + + abs_sample = sample.abs() # "a certain percentile absolute pixel value" + + s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) + s = torch.clamp( + s, min=1, max=self.config.sample_max_value + ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] + s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 + sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" + + sample = sample.reshape(batch_size, channels, *remaining_dims) + sample = sample.to(dtype) + + return sample + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t + def _sigma_to_t(self, sigma, log_sigmas): + # get log sigma + log_sigma = np.log(np.maximum(sigma, 1e-10)) + + # get distribution + dists = log_sigma - log_sigmas[:, np.newaxis] + + # get sigmas range + low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) + high_idx = low_idx + 1 + + low = log_sigmas[low_idx] + high = log_sigmas[high_idx] + + # interpolate sigmas + w = (low - log_sigma) / (low - high) + w = np.clip(w, 0, 1) + + # transform interpolation to time range + t = (1 - w) * low_idx + w * high_idx + t = t.reshape(sigma.shape) + return t + + def _sigma_to_alpha_sigma_t(self, sigma): + alpha_t = 1 / ((sigma**2 + 1) ** 0.5) + sigma_t = sigma * alpha_t + + return alpha_t, sigma_t + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras + def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor: + """Constructs the noise schedule of Karras et al. (2022).""" + + # Hack to make sure that other schedulers which copy this function don't break + # TODO: Add this logic to the other schedulers + if hasattr(self.config, "sigma_min"): + sigma_min = self.config.sigma_min + else: + sigma_min = None + + if hasattr(self.config, "sigma_max"): + sigma_max = self.config.sigma_max + else: + sigma_max = None + + sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item() + sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item() + + rho = 7.0 # 7.0 is the value used in the paper + ramp = np.linspace(0, 1, num_inference_steps) + min_inv_rho = sigma_min ** (1 / rho) + max_inv_rho = sigma_max ** (1 / rho) + sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho + return sigmas + + def _convert_to_lu(self, in_lambdas: torch.Tensor, num_inference_steps) -> torch.Tensor: + """Constructs the noise schedule of Lu et al. (2022).""" + + lambda_min: float = in_lambdas[-1].item() + lambda_max: float = in_lambdas[0].item() + + rho = 1.0 # 1.0 is the value used in the paper + ramp = np.linspace(0, 1, num_inference_steps) + min_inv_rho = lambda_min ** (1 / rho) + max_inv_rho = lambda_max ** (1 / rho) + lambdas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho + return lambdas + + def convert_model_output( + self, + model_output: torch.Tensor, + *args, + sample: torch.Tensor = None, + **kwargs, + ) -> torch.Tensor: + """ + Convert the model output to the corresponding type the DPMSolver/DPMSolver++ algorithm needs. DPM-Solver is + designed to discretize an integral of the noise prediction model, and DPM-Solver++ is designed to discretize an + integral of the data prediction model. + + + + The algorithm and model type are decoupled. You can use either DPMSolver or DPMSolver++ for both noise + prediction and data prediction models. + + + + Args: + model_output (`torch.Tensor`): + The direct output from the learned diffusion model. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.Tensor`: + The converted model output. + """ + timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None) + if sample is None: + if len(args) > 1: + sample = args[1] + else: + raise ValueError("missing `sample` as a required keyward argument") + if timestep is not None: + deprecate( + "timesteps", + "1.0.0", + "Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + # DPM-Solver++ needs to solve an integral of the data prediction model. + if self.config.algorithm_type in ["dpmsolver++", "sde-dpmsolver++"]: + if self.config.prediction_type == "epsilon": + # DPM-Solver and DPM-Solver++ only need the "mean" output. + if self.config.variance_type in ["learned", "learned_range"]: + model_output = model_output[:, :3] + sigma = self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + x0_pred = (sample - sigma_t * model_output) / alpha_t + elif self.config.prediction_type == "sample": + x0_pred = model_output + elif self.config.prediction_type == "v_prediction": + sigma = self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + x0_pred = alpha_t * sample - sigma_t * model_output + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" + " `v_prediction` for the DPMSolverMultistepScheduler." + ) + + if self.config.thresholding: + x0_pred = self._threshold_sample(x0_pred) + + return x0_pred + + # DPM-Solver needs to solve an integral of the noise prediction model. + elif self.config.algorithm_type in ["dpmsolver", "sde-dpmsolver"]: + if self.config.prediction_type == "epsilon": + # DPM-Solver and DPM-Solver++ only need the "mean" output. + if self.config.variance_type in ["learned", "learned_range"]: + epsilon = model_output[:, :3] + else: + epsilon = model_output + elif self.config.prediction_type == "sample": + sigma = self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + epsilon = (sample - alpha_t * model_output) / sigma_t + elif self.config.prediction_type == "v_prediction": + sigma = self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + epsilon = alpha_t * model_output + sigma_t * sample + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" + " `v_prediction` for the DPMSolverMultistepScheduler." + ) + + if self.config.thresholding: + sigma = self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + x0_pred = (sample - sigma_t * epsilon) / alpha_t + x0_pred = self._threshold_sample(x0_pred) + epsilon = (sample - alpha_t * x0_pred) / sigma_t + + return epsilon + + def dpm_solver_first_order_update( + self, + model_output: torch.Tensor, + *args, + sample: torch.Tensor = None, + noise: Optional[torch.Tensor] = None, + **kwargs, + ) -> torch.Tensor: + """ + One step for the first-order DPMSolver (equivalent to DDIM). + + Args: + model_output (`torch.Tensor`): + The direct output from the learned diffusion model. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.Tensor`: + The sample tensor at the previous timestep. + """ + timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None) + prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None) + if sample is None: + if len(args) > 2: + sample = args[2] + else: + raise ValueError(" missing `sample` as a required keyward argument") + if timestep is not None: + deprecate( + "timesteps", + "1.0.0", + "Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + if prev_timestep is not None: + deprecate( + "prev_timestep", + "1.0.0", + "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + sigma_t, sigma_s = self.sigmas[self.step_index + 1], self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s, sigma_s = self._sigma_to_alpha_sigma_t(sigma_s) + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s = torch.log(alpha_s) - torch.log(sigma_s) + + h = lambda_t - lambda_s + if self.config.algorithm_type == "dpmsolver++": + x_t = (sigma_t / sigma_s) * sample - (alpha_t * (torch.exp(-h) - 1.0)) * model_output + elif self.config.algorithm_type == "dpmsolver": + x_t = (alpha_t / alpha_s) * sample - (sigma_t * (torch.exp(h) - 1.0)) * model_output + elif self.config.algorithm_type == "sde-dpmsolver++": + assert noise is not None + x_t = ( + (sigma_t / sigma_s * torch.exp(-h)) * sample + + (alpha_t * (1 - torch.exp(-2.0 * h))) * model_output + + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise + ) + elif self.config.algorithm_type == "sde-dpmsolver": + assert noise is not None + x_t = ( + (alpha_t / alpha_s) * sample + - 2.0 * (sigma_t * (torch.exp(h) - 1.0)) * model_output + + sigma_t * torch.sqrt(torch.exp(2 * h) - 1.0) * noise + ) + return x_t + + def multistep_dpm_solver_second_order_update( + self, + model_output_list: List[torch.Tensor], + *args, + sample: torch.Tensor = None, + noise: Optional[torch.Tensor] = None, + **kwargs, + ) -> torch.Tensor: + """ + One step for the second-order multistep DPMSolver. + + Args: + model_output_list (`List[torch.Tensor]`): + The direct outputs from learned diffusion model at current and latter timesteps. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.Tensor`: + The sample tensor at the previous timestep. + """ + timestep_list = args[0] if len(args) > 0 else kwargs.pop("timestep_list", None) + prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None) + if sample is None: + if len(args) > 2: + sample = args[2] + else: + raise ValueError(" missing `sample` as a required keyward argument") + if timestep_list is not None: + deprecate( + "timestep_list", + "1.0.0", + "Passing `timestep_list` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + if prev_timestep is not None: + deprecate( + "prev_timestep", + "1.0.0", + "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + sigma_t, sigma_s0, sigma_s1 = ( + self.sigmas[self.step_index + 1], + self.sigmas[self.step_index], + self.sigmas[self.step_index - 1], + ) + + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) + alpha_s1, sigma_s1 = self._sigma_to_alpha_sigma_t(sigma_s1) + + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) + lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) + + m0, m1 = model_output_list[-1], model_output_list[-2] + + h, h_0 = lambda_t - lambda_s0, lambda_s0 - lambda_s1 + r0 = h_0 / h + D0, D1 = m0, (1.0 / r0) * (m0 - m1) + if self.config.algorithm_type == "dpmsolver++": + # See https://arxiv.org/abs/2211.01095 for detailed derivations + if self.config.solver_type == "midpoint": + x_t = ( + (sigma_t / sigma_s0) * sample + - (alpha_t * (torch.exp(-h) - 1.0)) * D0 + - 0.5 * (alpha_t * (torch.exp(-h) - 1.0)) * D1 + ) + elif self.config.solver_type == "heun": + x_t = ( + (sigma_t / sigma_s0) * sample + - (alpha_t * (torch.exp(-h) - 1.0)) * D0 + + (alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0)) * D1 + ) + elif self.config.algorithm_type == "dpmsolver": + # See https://arxiv.org/abs/2206.00927 for detailed derivations + if self.config.solver_type == "midpoint": + x_t = ( + (alpha_t / alpha_s0) * sample + - (sigma_t * (torch.exp(h) - 1.0)) * D0 + - 0.5 * (sigma_t * (torch.exp(h) - 1.0)) * D1 + ) + elif self.config.solver_type == "heun": + x_t = ( + (alpha_t / alpha_s0) * sample + - (sigma_t * (torch.exp(h) - 1.0)) * D0 + - (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1 + ) + elif self.config.algorithm_type == "sde-dpmsolver++": + assert noise is not None + if self.config.solver_type == "midpoint": + x_t = ( + (sigma_t / sigma_s0 * torch.exp(-h)) * sample + + (alpha_t * (1 - torch.exp(-2.0 * h))) * D0 + + 0.5 * (alpha_t * (1 - torch.exp(-2.0 * h))) * D1 + + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise + ) + elif self.config.solver_type == "heun": + x_t = ( + (sigma_t / sigma_s0 * torch.exp(-h)) * sample + + (alpha_t * (1 - torch.exp(-2.0 * h))) * D0 + + (alpha_t * ((1.0 - torch.exp(-2.0 * h)) / (-2.0 * h) + 1.0)) * D1 + + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise + ) + elif self.config.algorithm_type == "sde-dpmsolver": + assert noise is not None + if self.config.solver_type == "midpoint": + x_t = ( + (alpha_t / alpha_s0) * sample + - 2.0 * (sigma_t * (torch.exp(h) - 1.0)) * D0 + - (sigma_t * (torch.exp(h) - 1.0)) * D1 + + sigma_t * torch.sqrt(torch.exp(2 * h) - 1.0) * noise + ) + elif self.config.solver_type == "heun": + x_t = ( + (alpha_t / alpha_s0) * sample + - 2.0 * (sigma_t * (torch.exp(h) - 1.0)) * D0 + - 2.0 * (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1 + + sigma_t * torch.sqrt(torch.exp(2 * h) - 1.0) * noise + ) + return x_t + + def multistep_dpm_solver_third_order_update( + self, + model_output_list: List[torch.Tensor], + *args, + sample: torch.Tensor = None, + **kwargs, + ) -> torch.Tensor: + """ + One step for the third-order multistep DPMSolver. + + Args: + model_output_list (`List[torch.Tensor]`): + The direct outputs from learned diffusion model at current and latter timesteps. + sample (`torch.Tensor`): + A current instance of a sample created by diffusion process. + + Returns: + `torch.Tensor`: + The sample tensor at the previous timestep. + """ + + timestep_list = args[0] if len(args) > 0 else kwargs.pop("timestep_list", None) + prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None) + if sample is None: + if len(args) > 2: + sample = args[2] + else: + raise ValueError(" missing`sample` as a required keyward argument") + if timestep_list is not None: + deprecate( + "timestep_list", + "1.0.0", + "Passing `timestep_list` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + if prev_timestep is not None: + deprecate( + "prev_timestep", + "1.0.0", + "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + sigma_t, sigma_s0, sigma_s1, sigma_s2 = ( + self.sigmas[self.step_index + 1], + self.sigmas[self.step_index], + self.sigmas[self.step_index - 1], + self.sigmas[self.step_index - 2], + ) + + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) + alpha_s1, sigma_s1 = self._sigma_to_alpha_sigma_t(sigma_s1) + alpha_s2, sigma_s2 = self._sigma_to_alpha_sigma_t(sigma_s2) + + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) + lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) + lambda_s2 = torch.log(alpha_s2) - torch.log(sigma_s2) + + m0, m1, m2 = model_output_list[-1], model_output_list[-2], model_output_list[-3] + + h, h_0, h_1 = lambda_t - lambda_s0, lambda_s0 - lambda_s1, lambda_s1 - lambda_s2 + r0, r1 = h_0 / h, h_1 / h + D0 = m0 + D1_0, D1_1 = (1.0 / r0) * (m0 - m1), (1.0 / r1) * (m1 - m2) + D1 = D1_0 + (r0 / (r0 + r1)) * (D1_0 - D1_1) + D2 = (1.0 / (r0 + r1)) * (D1_0 - D1_1) + if self.config.algorithm_type == "dpmsolver++": + # See https://arxiv.org/abs/2206.00927 for detailed derivations + x_t = ( + (sigma_t / sigma_s0) * sample + - (alpha_t * (torch.exp(-h) - 1.0)) * D0 + + (alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0)) * D1 + - (alpha_t * ((torch.exp(-h) - 1.0 + h) / h**2 - 0.5)) * D2 + ) + elif self.config.algorithm_type == "dpmsolver": + # See https://arxiv.org/abs/2206.00927 for detailed derivations + x_t = ( + (alpha_t / alpha_s0) * sample + - (sigma_t * (torch.exp(h) - 1.0)) * D0 + - (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1 + - (sigma_t * ((torch.exp(h) - 1.0 - h) / h**2 - 0.5)) * D2 + ) + return x_t + + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + index_candidates = (schedule_timesteps == timestep).nonzero() + + if len(index_candidates) == 0: + step_index = len(self.timesteps) - 1 + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + elif len(index_candidates) > 1: + step_index = index_candidates[1].item() + else: + step_index = index_candidates[0].item() + + return step_index + + def _init_step_index(self, timestep): + """ + Initialize the step_index counter for the scheduler. + """ + + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + def step( + self, + model_output: torch.Tensor, + timestep: int, + sample: torch.Tensor, + generator=None, + variance_noise: Optional[torch.Tensor] = None, + return_dict: bool = True, + ) -> Union[SchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the sample with + the multistep DPMSolver. + + Args: + model_output (`torch.Tensor`): + The direct output from learned diffusion model. + timestep (`int`): + The current discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + generator (`torch.Generator`, *optional*): + A random number generator. + variance_noise (`torch.Tensor`): + Alternative to generating noise with `generator` by directly providing the noise for the variance + itself. Useful for methods such as [`LEdits++`]. + return_dict (`bool`): + Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`. + + Returns: + [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + + """ + if self.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + if self.step_index is None: + self._init_step_index(timestep) + + # Improve numerical stability for small number of steps + lower_order_final = (self.step_index == len(self.timesteps) - 1) and ( + self.config.euler_at_final + or (self.config.lower_order_final and len(self.timesteps) < 15) + or self.config.final_sigmas_type == "zero" + ) + lower_order_second = ( + (self.step_index == len(self.timesteps) - 2) and self.config.lower_order_final and len(self.timesteps) < 15 + ) + + model_output = self.convert_model_output(model_output, sample=sample) + for i in range(self.config.solver_order - 1): + self.model_outputs[i] = self.model_outputs[i + 1] + self.model_outputs[-1] = model_output + + # Upcast to avoid precision issues when computing prev_sample + sample = sample.to(torch.float32) + if self.config.algorithm_type in ["sde-dpmsolver", "sde-dpmsolver++"] and variance_noise is None: + noise = randn_tensor( + model_output.shape, generator=generator, device=model_output.device, dtype=torch.float32 + ) + elif self.config.algorithm_type in ["sde-dpmsolver", "sde-dpmsolver++"]: + noise = variance_noise.to(device=model_output.device, dtype=torch.float32) + else: + noise = None + + if self.config.solver_order == 1 or self.lower_order_nums < 1 or lower_order_final: + prev_sample = self.dpm_solver_first_order_update(model_output, sample=sample, noise=noise) + elif self.config.solver_order == 2 or self.lower_order_nums < 2 or lower_order_second: + prev_sample = self.multistep_dpm_solver_second_order_update(self.model_outputs, sample=sample, noise=noise) + else: + prev_sample = self.multistep_dpm_solver_third_order_update(self.model_outputs, sample=sample) + + if self.lower_order_nums < self.config.solver_order: + self.lower_order_nums += 1 + + # Cast sample back to expected dtype + prev_sample = prev_sample.to(model_output.dtype) + + # upon completion increase step index by one + self._step_index += 1 + + if not return_dict: + return (prev_sample,) + + return SchedulerOutput(prev_sample=prev_sample) + + def add_noise( + self, + original_samples: torch.Tensor, + noise: torch.Tensor, + timesteps: torch.IntTensor, + ) -> torch.Tensor: + # Make sure sigmas and timesteps have the same device and dtype as original_samples + # alpha_t = self.alpha_t.to(device=original_samples.device, dtype=original_samples.dtype) + # sigma_t = self.sigma_t.to(device=original_samples.device, dtype=original_samples.dtype) + alpha_t = self.alpha_t.to(original_samples.device).to(original_samples.dtype) + sigma_t = self.sigma_t.to(original_samples.device).to(original_samples.dtype) + timesteps = timesteps.to(original_samples.device) + alpha_t = alpha_t[timesteps].flatten() + while len(alpha_t.shape) < len(original_samples.shape): + alpha_t = alpha_t.unsqueeze(-1) + + sigma_t = sigma_t[timesteps].flatten() + while len(sigma_t.shape) < len(original_samples.shape): + sigma_t = sigma_t.unsqueeze(-1) + noisy_samples = alpha_t * original_samples + sigma_t * noise + return noisy_samples + + def get_velocity(self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor: + # alpha_t = self.alpha_t.to(device=original_samples.device, dtype=original_samples.dtype) + # sigma_t = self.sigma_t.to(device=original_samples.device, dtype=original_samples.dtype) + alpha_t = self.alpha_t.to(original_samples.device).to(original_samples.dtype) + sigma_t = self.sigma_t.to(original_samples.device).to(original_samples.dtype) + + timesteps = timesteps.to(original_samples.device) + alpha_t = alpha_t[timesteps].flatten() + while len(alpha_t.shape) < len(original_samples.shape): + alpha_t = alpha_t.unsqueeze(-1) + + sigma_t = sigma_t[timesteps].flatten() + while len(sigma_t.shape) < len(original_samples.shape): + sigma_t = sigma_t.unsqueeze(-1) + + velocity = alpha_t * noise - sigma_t * original_samples + return velocity + + def __len__(self): + return self.config.num_train_timesteps \ No newline at end of file diff --git a/vibevoice/schedule/timestep_sampler.py b/vibevoice/schedule/timestep_sampler.py new file mode 100644 index 0000000..177b66f --- /dev/null +++ b/vibevoice/schedule/timestep_sampler.py @@ -0,0 +1,19 @@ +import math +import torch + + +class UniformSampler: + def __init__(self, timesteps = 1000): + self.timesteps = timesteps + def sample(self, batch_size, device): + return torch.randint(0, self.timesteps, (batch_size,), device=device) + +class LogitNormalSampler: + def __init__(self, timesteps = 1000, m = 0, s = 1): + self.timesteps = timesteps + timesteps = torch.linspace(0, 1, timesteps) + logit = torch.log(timesteps / (1 - timesteps)) + self.prob = torch.exp(-0.5 * (logit - m) ** 2 / s ** 2) / (s * math.sqrt(2 * math.pi)) + def sample(self, batch_size, device): + return torch.multinomial(self.prob, batch_size, replacement=True).to(device) + \ No newline at end of file diff --git a/vibevoice/scripts/__init__.py b/vibevoice/scripts/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/vibevoice/scripts/convert_nnscaler_checkpoint_to_transformers.py b/vibevoice/scripts/convert_nnscaler_checkpoint_to_transformers.py new file mode 100644 index 0000000..bb814cf --- /dev/null +++ b/vibevoice/scripts/convert_nnscaler_checkpoint_to_transformers.py @@ -0,0 +1,166 @@ +#!/usr/bin/env python +# coding=utf-8 + +import argparse +import json +import os +from pathlib import Path +import re +import torch +from typing import Dict, List, Tuple + +from vibevoice.modular.configuration_vibevoice import ( + VibeVoiceConfig +) +from vibevoice.modular.modeling_vibevoice import VibeVoiceForConditionalGeneration +from transformers.utils import logging + +logger = logging.get_logger(__name__) + +def convert_vibevoice_nnscaler_checkpoint_to_hf( + checkpoint_path: str, + pytorch_dump_folder_path: str, + config_path: str = None, +): + """ + Convert a nnscaler VibeVoice checkpoint to HuggingFace format. + Supports both regular checkpoints and tensor parallel checkpoints. + """ + + # Load regular checkpoint + logger.info(f"Loading regular checkpoint from {checkpoint_path}") + checkpoint = torch.load(checkpoint_path, map_location="cpu") # ['model', 'optimizer', 'lr_scheduler', 'train_status', 'train_args', 'rng_states', 'nnscaler', 'dataloader'] + + # config = checkpoint['train_args'] + init_config_name = checkpoint['train_args']['vars']['model_args']['config_path']['relative_path'] + pretrained_name = checkpoint['train_args']['vars']['data_args']['tokenizer_path'] + + init_config_path = Path(__file__).parent.parent / 'configs' / init_config_name.split('/')[-1] + if init_config_path.exists(): + logger.info(f"Loading initial config from {init_config_path}") + with open(init_config_path, 'r') as f: + init_config = json.load(f) + else: + raise FileNotFoundError(f"Initial config file {init_config_path} not found. Please provide a valid path.") + + tie_word_embeddings = init_config['decoder_config'].get('tie_word_embeddings', True) + logger.info(f"Tie word embeddings: {tie_word_embeddings}") + + init_config['decoder_config']['use_cache'] = True + config = VibeVoiceConfig(**init_config, tie_word_embeddings=tie_word_embeddings) + + # # Extract the model state dict + model_state_dict = {k.replace('model.model.', 'model.'): v for k, v in checkpoint["model"].items() if k.startswith('model.model.')} + if not tie_word_embeddings and 'model.lm_head.weight' in checkpoint["model"].keys(): + # If not tying weights, we need to add the lm_head weight separately + model_state_dict['lm_head.weight'] = checkpoint["model"]['model.lm_head.weight'] + + # Override with provided config if available + if config_path: + logger.info(f"Loading config from {config_path}") + with open(config_path, 'r') as f: + config_dict = json.load(f) + config = VibeVoiceConfig.from_dict(config_dict) + + # Set the default dtype to bfloat16 before creating the model + original_dtype = torch.get_default_dtype() + torch.set_default_dtype(torch.bfloat16) + + # Create the HuggingFace model + logger.info("Creating HuggingFace VibeVoiceForConditionalGeneration model") + model = VibeVoiceForConditionalGeneration(config) + + # Restore original dtype + torch.set_default_dtype(original_dtype) + + # Load the state dict + logger.info("Loading weights into model") + missing_keys, unexpected_keys = model.load_state_dict(model_state_dict, strict=False) + + if missing_keys: + logger.warning(f"Missing keys: {missing_keys}") + if unexpected_keys: + logger.warning(f"Unexpected keys: {unexpected_keys}") + + # Create output directory + os.makedirs(pytorch_dump_folder_path, exist_ok=True) + + # Save the model and config + logger.info(f"Saving model to {pytorch_dump_folder_path}") + + # Save config + config.save_pretrained(pytorch_dump_folder_path) + + # Save VibeVoiceProcessor configuration + logger.info("Saving VibeVoiceProcessor configuration") + processor_config = { + "processor_class": "VibeVoiceProcessor", + "speech_tok_compress_ratio": 3200, + "db_normalize": True, + # Audio processor configuration + "audio_processor": { + "feature_extractor_type": "VibeVoiceTokenizerProcessor", + "sampling_rate": 24000, + "normalize_audio": True, + "target_dB_FS": -25, + "eps": 1e-6, + }, + "language_model_pretrained_name": pretrained_name, + } + + processor_config_path = os.path.join(pytorch_dump_folder_path, "preprocessor_config.json") + with open(processor_config_path, 'w') as f: + json.dump(processor_config, f, indent=2) + logger.info(f"Saved processor config to {processor_config_path}") + + # Save model with sharding + # save_pretrained handles tied weights automatically + logger.info("Saving model weights with sharding...") + model.save_pretrained( + pytorch_dump_folder_path, + max_shard_size="2GB", # Set maximum size for each shard + safe_serialization=True # Ensure saving in .safetensors format + ) + logger.info(f"Model weights saved to {pytorch_dump_folder_path}") + + logger.info("Conversion complete!") + + # Verify the saved model can be loaded + logger.info("Verifying saved model...") + loaded_model = VibeVoiceForConditionalGeneration.from_pretrained(pytorch_dump_folder_path) + logger.info("Model successfully loaded from saved checkpoint!") + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--nnscaler_checkpoint_path", + type=str, + required=True, + help="Path to the fairseq checkpoint (.pt file). For tensor parallel checkpoints, " + "provide any one of the part files (e.g., checkpoint_1_5000-model_part-0.pt), " + "and the script will automatically detect and merge all parts.", + ) + parser.add_argument( + "--pytorch_dump_folder_path", + type=str, + required=True, + help="Path to the output PyTorch model directory", + ) + parser.add_argument( + "--config_path", + type=str, + default=None, + help="Optional path to a config JSON file to override extracted config", + ) + + args = parser.parse_args() + + convert_vibevoice_nnscaler_checkpoint_to_hf( + args.nnscaler_checkpoint_path, + args.pytorch_dump_folder_path, + args.config_path, + ) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/vibevoice_node_chunked_wrapper.py b/vibevoice_node_chunked_wrapper.py new file mode 100644 index 0000000..76f4418 --- /dev/null +++ b/vibevoice_node_chunked_wrapper.py @@ -0,0 +1,220 @@ +# comfyui_vibevoice_chunked_wrapper.py + +import math +import torch +from comfy.utils import ProgressBar + +from .vibevoice_nodes import VibeVoiceTTSNode + +# We assume the base node class from your snippet is in the same module/file. +# If it's in another module, import it instead: +# from your_module import VibeVoiceTTSNode + +class VibeVoiceTTS_WrapperNode: + """ + Wraps VibeVoiceTTSNode, adds: + - Number of Speakers (1-4) that gates which speaker_*_voice inputs are used + - Chunking controls for multiline script ("Speaker N: ...") + - Iterates per chunk, concatenates outputs into one AUDIO dict + + Returns: ("AUDIO",) — waveform [B, C, T], sample_rate per ComfyUI audio spec. + """ + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + # Pass-through of model/decoding params to the underlying node: + "model_name": (list(VibeVoiceTTSNode.INPUT_TYPES()["required"]["model_name"][0]), { + "tooltip": "Forwarded to VibeVoiceTTSNode" + }), + "text": ("STRING", { + "multiline": True, + "default": "Speaker 1: Hello there!\nSpeaker 2: And hello from me.", + "tooltip": "Multiline script: 'Speaker 1: ...' one line per utterance" + }), + "num_speakers": ("INT", { + "default": 2, "min": 1, "max": 4, "step": 1, + "tooltip": "How many speaker reference audios to use (1–4). Extra inputs are ignored." + }), + "chunk_lines": ("BOOLEAN", { + "default": False, "label_on": "Chunk", "label_off": "No chunking", + "tooltip": "When enabled, splits the script into groups of N lines and runs VibeVoice per chunk." + }), + "lines_per_chunk": ("INT", { + "default": 20, "min": 1, "max": 999, "step": 1, + "tooltip": "Only used when 'Chunk' is enabled." + }), + + # Forwarded generation knobs: + "quantize_llm_4bit": ("BOOLEAN", { + "default": False, "label_on": "Q4 (LLM only)", "label_off": "Full precision" + }), + "attention_mode": (["eager", "sdpa", "flash_attention_2"], {"default": "sdpa"}), + "cfg_scale": ("FLOAT", {"default": 1.3, "min": 1.0, "max": 2.0, "step": 0.05}), + "inference_steps": ("INT", {"default": 10, "min": 1, "max": 50}), + "seed": ("INT", {"default": 42, "min": 0, "max": 0xFFFFFFFFFFFFFFFF, "control_after_generate": True}), + "do_sample": ("BOOLEAN", {"default": True, "label_on": "Sampling", "label_off": "Greedy"}), + "temperature": ("FLOAT", {"default": 0.95, "min": 0.0, "max": 2.0, "step": 0.01}), + "top_p": ("FLOAT", {"default": 0.95, "min": 0.0, "max": 1.0, "step": 0.01}), + "top_k": ("INT", {"default": 0, "min": 0, "max": 500, "step": 1}), + }, + "optional": { + # Provide up to 4 optional speaker audios; we enforce num_speakers in code. + "speaker_1_voice": ("AUDIO", {"tooltip": "Reference audio for Speaker 1"}), + "speaker_2_voice": ("AUDIO", {"tooltip": "Reference audio for Speaker 2"}), + "speaker_3_voice": ("AUDIO", {"tooltip": "Reference audio for Speaker 3"}), + "speaker_4_voice": ("AUDIO", {"tooltip": "Reference audio for Speaker 4"}), + }, + # If you REALLY want these hidden until toggled via JS, you can also list them under "hidden" + # and add a tiny JS extension to flip them visible. Pure-Python dynamic show/hide isn’t native. # see docs + } + + RETURN_TYPES = ("AUDIO",) + FUNCTION = "run" + CATEGORY = "audio/tts" + + # --------- helpers --------- + @staticmethod + def _split_into_chunks(lines, n): + """ + Split list of lines into chunks of size n. + If the last chunk would be < 40% of n, merge it into the previous chunk. + """ + if n <= 0: + return [lines] if lines else [] + + chunks = [lines[i:i+n] for i in range(0, len(lines), n)] + if len(chunks) >= 2: + tail = chunks[-1] + if len(tail) < math.ceil(0.4 * n): + chunks[-2].extend(tail) + chunks.pop() + return chunks + + @staticmethod + def _concat_audio_dicts(audio_dicts): + """ + Concatenate a list of ComfyUI AUDIO dicts along time dim T. + Each dict: {"waveform": tensor[B,C,T], "sample_rate": int} + Returns a single AUDIO dict of the same shape convention. + """ + if not audio_dicts: + # Return 1-sample silence if nothing to concat + return {"waveform": torch.zeros((1, 1, 1), dtype=torch.float32), "sample_rate": 24000} + + srs = {ad["sample_rate"] for ad in audio_dicts if ad and "sample_rate" in ad} + if len(srs) != 1: + raise ValueError(f"Sample rates differ across chunks: {srs}") + sr = srs.pop() + + waves = [] + for ad in audio_dicts: + wf = ad["waveform"] + # Expect [B, C, T] + if wf.ndim == 1: + wf = wf.unsqueeze(0).unsqueeze(0) # -> [1,1,T] + elif wf.ndim == 2: + wf = wf.unsqueeze(0) # -> [1,C,T] + waves.append(wf) + + # Concatenate on time axis T (-1). Assumes batch (B) and channels (C) match. + out = torch.cat(waves, dim=-1) + return {"waveform": out.cpu(), "sample_rate": sr} + + @staticmethod + def _filter_speaker_inputs(kwargs, num_speakers): + """ + Pulls up to num_speakers optional AUDIO inputs from kwargs. + """ + voices = [] + for i in range(1, num_speakers + 1): + voices.append(kwargs.get(f"speaker_{i}_voice")) + # Fill the rest with None to align with underlying signature but ignored there + while len(voices) < 4: + voices.append(None) + return { + "speaker_1_voice": voices[0], + "speaker_2_voice": voices[1], + "speaker_3_voice": voices[2], + "speaker_4_voice": voices[3], + } + + # --------- main --------- + def run( + self, + model_name, + text, + num_speakers, + chunk_lines, + lines_per_chunk, + quantize_llm_4bit, + attention_mode, + cfg_scale, + inference_steps, + seed, + do_sample, + temperature, + top_p, + top_k, + **kwargs, + ): + """ + Orchestrates chunking and calls VibeVoiceTTSNode.generate_audio per chunk. + Then concatenates to a single AUDIO dict. + """ + + text = (text or "").strip() + if not text: + # return 1 second of silence at 24kHz, shape [1,1,24000] + return ({"waveform": torch.zeros((1, 1, 24000), dtype=torch.float32), "sample_rate": 24000},) + + # Prepare speaker refs according to chosen number of speakers + speaker_kwargs = self._filter_speaker_inputs(kwargs, max(1, min(4, int(num_speakers)))) + + # Prepare chunks (list of multiline strings) + if chunk_lines: + raw_lines = [ln for ln in text.splitlines() if ln.strip() != ""] + groups = self._split_into_chunks(raw_lines, lines_per_chunk) + chunk_texts = ["\n".join(g) for g in groups] if groups else [text] + else: + chunk_texts = [text] + + # Progress bar over chunks + pbar = ProgressBar(total=len(chunk_texts)) + + # Call the underlying node per chunk + base = VibeVoiceTTSNode() + audio_parts = [] + for idx, chunk in enumerate(chunk_texts, 1): + out_audio = base.generate_audio( + model_name=model_name, + text=chunk, + attention_mode=attention_mode, + cfg_scale=cfg_scale, + inference_steps=inference_steps, + seed=seed, + do_sample=do_sample, + temperature=temperature, + top_p=top_p, + top_k=top_k, + quantize_llm_4bit=quantize_llm_4bit, + force_offload=False, + **speaker_kwargs, + )[0] # underlying returns (AUDIO,) + audio_parts.append(out_audio) + pbar.update(1) + + # Concatenate into one AUDIO + merged = self._concat_audio_dicts(audio_parts) + return (merged,) + + +# Register +NODE_CLASS_MAPPINGS = { + "VibeVoiceTTS_Wrapper": VibeVoiceTTS_WrapperNode + # Keep the base node mapping from your original file: +} +NODE_DISPLAY_NAME_MAPPINGS = { + "VibeVoiceTTS_Wrapper": "VibeVoice TTS (Chunked Wrapper)" +} diff --git a/vibevoice_nodes.py b/vibevoice_nodes.py new file mode 100644 index 0000000..5b1855b --- /dev/null +++ b/vibevoice_nodes.py @@ -0,0 +1,617 @@ +import os +import re +import torch +import numpy as np +import random +from huggingface_hub import hf_hub_download, snapshot_download +import logging + +import gc + +import folder_paths +import comfy.model_management as model_management +import comfy.model_patcher +from comfy.utils import ProgressBar +from comfy.model_management import throw_exception_if_processing_interrupted + +from transformers import set_seed, AutoTokenizer, BitsAndBytesConfig +from .vibevoice.modular.modeling_vibevoice_inference import VibeVoiceForConditionalGenerationInference +from .vibevoice.processor.vibevoice_processor import VibeVoiceProcessor +from .vibevoice.processor.vibevoice_tokenizer_processor import VibeVoiceTokenizerProcessor +from .vibevoice.modular.modular_vibevoice_text_tokenizer import VibeVoiceTextTokenizerFast + +try: + import librosa +except ImportError: + print("VibeVoice Node: `librosa` is not installed. Resampling of reference audio will not be available.") + librosa = None + +logger = logging.getLogger(__name__) + +LOADED_MODELS = {} +VIBEVOICE_PATCHER_CACHE = {} + +MODEL_CONFIGS = { + "VibeVoice-1.5B": { + "repo_id": "microsoft/VibeVoice-1.5B", + "size_gb": 3.0, + "tokenizer_repo": "Qwen/Qwen2.5-1.5B" + }, + "VibeVoice-Large": { + "repo_id": "microsoft/VibeVoice-Large", + "size_gb": 17.4, + "tokenizer_repo": "Qwen/Qwen2.5-7B" + } +} + +ATTENTION_MODES = ["eager", "sdpa", "flash_attention_2"] + +def cleanup_old_models(keep_cache_key=None): + """Clean up old models, optionally keeping one specific model loaded""" + global LOADED_MODELS, VIBEVOICE_PATCHER_CACHE + + keys_to_remove = [] + + # Clear LOADED_MODELS + for key in list(LOADED_MODELS.keys()): + if key != keep_cache_key: + keys_to_remove.append(key) + del LOADED_MODELS[key] + + # Clear VIBEVOICE_PATCHER_CACHE - but more carefully + for key in list(VIBEVOICE_PATCHER_CACHE.keys()): + if key != keep_cache_key: + # Set the model/processor to None but don't delete the patcher itself + # This lets ComfyUI's model management handle the patcher cleanup + try: + patcher = VIBEVOICE_PATCHER_CACHE[key] + if hasattr(patcher, 'model') and patcher.model: + patcher.model.model = None + patcher.model.processor = None + # Remove from our cache but let ComfyUI handle the rest + del VIBEVOICE_PATCHER_CACHE[key] + except Exception as e: + logger.warning(f"Error cleaning up patcher {key}: {e}") + + if keys_to_remove: + logger.info(f"Cleaned up cached models: {keys_to_remove}") + gc.collect() + model_management.soft_empty_cache() + +class VibeVoiceModelHandler(torch.nn.Module): + """A torch.nn.Module wrapper to hold the VibeVoice model and processor.""" + def __init__(self, model_pack_name, attention_mode="eager", use_llm_4bit=False): + super().__init__() + self.model_pack_name = model_pack_name + self.attention_mode = attention_mode + self.use_llm_4bit = use_llm_4bit + self.cache_key = f"{model_pack_name}_attn_{attention_mode}" + self.model = None + self.processor = None + self.size = int(MODEL_CONFIGS[model_pack_name].get("size_gb", 4.0) * (1024**3)) + + def load_model(self, device, attention_mode="eager"): + self.model, self.processor = VibeVoiceLoader.load_model(self.model_pack_name, device, attention_mode, use_llm_4bit=self.use_llm_4bit) + self.model.to(device) + +class VibeVoicePatcher(comfy.model_patcher.ModelPatcher): + """Custom ModelPatcher for managing VibeVoice models in ComfyUI.""" + def __init__(self, model, attention_mode="eager", *args, **kwargs): + super().__init__(model, *args, **kwargs) + self.attention_mode = attention_mode + self.cache_key = model.cache_key + + @property + def is_loaded(self): + """Check if the model is currently loaded in memory.""" + return hasattr(self, 'model') and self.model is not None and hasattr(self.model, 'model') and self.model.model is not None + + def patch_model(self, device_to=None, *args, **kwargs): + target_device = self.load_device + if self.model.model is None: + logger.info(f"Loading VibeVoice models for '{self.model.model_pack_name}' to {target_device}...") + mode_names = { + "eager": "Eager (Most Compatible)", + "sdpa": "SDPA (Balanced Speed/Compatibility)", + "flash_attention_2": "Flash Attention 2 (Fastest)" + } + logger.info(f"Attention Mode: {mode_names.get(self.attention_mode, self.attention_mode)}") + self.model.load_model(target_device, self.attention_mode) + self.model.model.to(target_device) + return super().patch_model(device_to=target_device, *args, **kwargs) + + def unpatch_model(self, device_to=None, unpatch_weights=True, *args, **kwargs): + if unpatch_weights: + logger.info(f"Offloading VibeVoice models for '{self.model.model_pack_name}' ({self.attention_mode}) to {device_to}...") + self.model.model = None + self.model.processor = None + + # Clear using the correct cache key + if self.cache_key in LOADED_MODELS: + del LOADED_MODELS[self.cache_key] + logger.info(f"Cleared LOADED_MODELS cache for: {self.cache_key}") + + # DON'T delete from VIBEVOICE_PATCHER_CACHE here - let ComfyUI handle it + # This prevents the IndexError in ComfyUI's model management + + # Force garbage collection + gc.collect() + model_management.soft_empty_cache() + + return super().unpatch_model(device_to, unpatch_weights, *args, **kwargs) + +class VibeVoiceLoader: + @staticmethod + def get_model_path(model_name: str): + if model_name not in MODEL_CONFIGS: + raise ValueError(f"Unknown VibeVoice model: {model_name}") + + vibevoice_path = os.path.join(folder_paths.get_folder_paths("tts")[0], "VibeVoice") + model_path = os.path.join(vibevoice_path, model_name) + + index_file = os.path.join(model_path, "model.safetensors.index.json") + if not os.path.exists(index_file): + print(f"Downloading VibeVoice model: {model_name}...") + repo_id = MODEL_CONFIGS[model_name]["repo_id"] + snapshot_download(repo_id=repo_id, local_dir=model_path) + return model_path + + @staticmethod + def _check_attention_compatibility(attention_mode: str, torch_dtype, device_name: str = ""): + """Check if the requested attention mode is compatible with current setup.""" + + # Check for SDPA availability (PyTorch 2.0+) + if attention_mode == "sdpa": + if not hasattr(torch.nn.functional, 'scaled_dot_product_attention'): + logger.warning("SDPA not available (requires PyTorch 2.0+), falling back to eager") + return "eager" + + # Check for Flash Attention availability + elif attention_mode == "flash_attention_2": + if not hasattr(torch.nn.functional, 'scaled_dot_product_attention'): + logger.warning("Flash Attention not available, falling back to eager") + return "eager" + elif torch_dtype == torch.float32: + logger.warning("Flash Attention not recommended with float32, falling back to SDPA") + return "sdpa" if hasattr(torch.nn.functional, 'scaled_dot_product_attention') else "eager" + + # Just informational messages, no forced fallbacks + if device_name and torch.cuda.is_available(): + if "RTX 50" in device_name or "Blackwell" in device_name: + if attention_mode == "flash_attention_2": + logger.info(f"Using Flash Attention on {device_name}") + elif attention_mode == "sdpa": + logger.info(f"Using SDPA on {device_name}") + + return attention_mode + + @staticmethod + def load_model(model_name: str, device, attention_mode: str = "eager", use_llm_4bit: bool = False): + # Validate attention mode + if attention_mode not in ATTENTION_MODES: + logger.warning(f"Unknown attention mode '{attention_mode}', falling back to eager") + attention_mode = "eager" + if use_llm_4bit and attention_mode == "flash_attention_2": + attention_mode = "sdpa" + + # Create cache key that includes attention mode + cache_key = f"{model_name}_attn_{attention_mode}" + + if cache_key in LOADED_MODELS: + logger.info(f"Using cached model with {attention_mode} attention") + return LOADED_MODELS[cache_key] + + model_path = VibeVoiceLoader.get_model_path(model_name) + + logger.info(f"Loading VibeVoice model components from: {model_path}") + + + tokenizer_repo = MODEL_CONFIGS[model_name].get("tokenizer_repo") + try: + tokenizer_file_path = hf_hub_download(repo_id=tokenizer_repo, filename="tokenizer.json") + except Exception as e: + raise RuntimeError(f"Could not download tokenizer.json for {tokenizer_repo}. Error: {e}") + + vibevoice_tokenizer = VibeVoiceTextTokenizerFast(tokenizer_file=tokenizer_file_path) + audio_processor = VibeVoiceTokenizerProcessor() + processor = VibeVoiceProcessor(tokenizer=vibevoice_tokenizer, audio_processor=audio_processor) + torch_dtype = model_management.text_encoder_dtype(device) + device_name = torch.cuda.get_device_name() if torch.cuda.is_available() else "" + + # Check compatibility and potentially fall back to safer mode + final_attention_mode = VibeVoiceLoader._check_attention_compatibility( + attention_mode, torch_dtype, device_name + ) + + # Build optional 4-bit config (LLM only) + quant_config = None + if use_llm_4bit: + quant_config = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_quant_type="nf4", + bnb_4bit_use_double_quant=True, + bnb_4bit_compute_dtype=torch.bfloat16, + ) + + logger.info(f"Requested attention mode: {attention_mode}") + if final_attention_mode != attention_mode: + logger.info(f"Using attention mode: {final_attention_mode} (automatic fallback)") + # Update cache key to reflect actual mode used + cache_key = f"{model_name}_attn_{final_attention_mode}" + if cache_key in LOADED_MODELS: + return LOADED_MODELS[cache_key] + else: + logger.info(f"Using attention mode: {final_attention_mode}") + + logger.info(f"Final attention implementation: {final_attention_mode}") + + # Modify config for non-flash attention modes + if final_attention_mode in ["eager", "sdpa"]: + import json + config_path = os.path.join(model_path, "config.json") + if os.path.exists(config_path): + try: + with open(config_path, 'r') as f: + config = json.load(f) + + # Remove flash attention settings + removed_keys = [] + for key in ['_attn_implementation', 'attn_implementation', 'use_flash_attention_2']: + if key in config: + config.pop(key) + removed_keys.append(key) + + if removed_keys: + with open(config_path, 'w') as f: + json.dump(config, f, indent=2) + logger.info(f"Removed FlashAttention settings from config.json: {removed_keys}") + except Exception as e: + logger.warning(f"Could not modify config.json: {e}") + + try: + model = VibeVoiceForConditionalGenerationInference.from_pretrained( + model_path, + torch_dtype=torch.bfloat16 if quant_config else torch_dtype, + attn_implementation=final_attention_mode, + device_map="auto" if quant_config else device, + quantization_config=quant_config, # <- forwarded if supported + ) + model.eval() + setattr(model, "_llm_4bit", bool(quant_config)) + + # Store with the actual attention mode used (not the requested one) + LOADED_MODELS[cache_key] = (model, processor) + logger.info(f"Successfully loaded model with {final_attention_mode} attention") + return model, processor + + except Exception as e: + logger.error(f"Failed to load model with {final_attention_mode} attention: {e}") + + # Progressive fallback: flash -> sdpa -> eager + if final_attention_mode == "flash_attention_2": + logger.info("Attempting fallback to SDPA...") + return VibeVoiceLoader.load_model(model_name, device, "sdpa") + elif final_attention_mode == "sdpa": + logger.info("Attempting fallback to eager...") + return VibeVoiceLoader.load_model(model_name, device, "eager") + else: + # If eager fails, something is seriously wrong + raise RuntimeError(f"Failed to load model even with eager attention: {e}") + + +def set_vibevoice_seed(seed: int): + """Sets the seed for torch, numpy, and random, handling large seeds for numpy.""" + if seed == 0: + seed = random.randint(1, 0xffffffffffffffff) + + MAX_NUMPY_SEED = 2**32 - 1 + numpy_seed = seed % MAX_NUMPY_SEED + + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + np.random.seed(numpy_seed) + random.seed(seed) + +def parse_script_1_based(script: str) -> tuple[list[tuple[int, str]], list[int]]: + """ + Parses a 1-based speaker script into a list of (speaker_id, text) tuples + and a list of unique speaker IDs in the order of their first appearance. + Internally, it converts speaker IDs to 0-based for the model. + """ + parsed_lines = [] + speaker_ids_in_script = [] # This will store the 1-based IDs from the script + for line in script.strip().split("\n"): + if not (line := line.strip()): continue + match = re.match(r'^Speaker\s+(\d+)\s*:\s*(.*)$', line, re.IGNORECASE) + if match: + speaker_id = int(match.group(1)) + if speaker_id < 1: + logger.warning(f"Speaker ID must be 1 or greater. Skipping line: '{line}'") + continue + text = ' ' + match.group(2).strip() + # Internally, the model expects 0-based indexing for speakers + internal_speaker_id = speaker_id - 1 + parsed_lines.append((internal_speaker_id, text)) + if speaker_id not in speaker_ids_in_script: + speaker_ids_in_script.append(speaker_id) + else: + logger.warning(f"Could not parse line, skipping: '{line}'") + return parsed_lines, sorted(list(set(speaker_ids_in_script))) + +def preprocess_comfy_audio(audio_dict: dict, target_sr: int = 24000) -> np.ndarray: + """ + Converts a ComfyUI AUDIO dict to a mono NumPy array, resampling if necessary. + """ + if not audio_dict: return None + waveform_tensor = audio_dict.get('waveform') + if waveform_tensor is None or waveform_tensor.numel() == 0: return None + + waveform = waveform_tensor[0].cpu().numpy() + original_sr = audio_dict['sample_rate'] + + if waveform.ndim > 1: + waveform = np.mean(waveform, axis=0) + + # Check for invalid values + if np.any(np.isnan(waveform)) or np.any(np.isinf(waveform)): + logger.error("Audio contains NaN or Inf values, replacing with zeros") + waveform = np.nan_to_num(waveform, nan=0.0, posinf=0.0, neginf=0.0) + + # Ensure audio is not completely silent or has extreme values + if np.all(waveform == 0): + logger.warning("Audio waveform is completely silent") + + # Normalize extreme values + max_val = np.abs(waveform).max() + if max_val > 10.0: + logger.warning(f"Audio values are very large (max: {max_val}), normalizing") + waveform = waveform / max_val + + if original_sr != target_sr: + if librosa is None: + raise ImportError("`librosa` package is required for audio resampling. Please install it with `pip install librosa`.") + logger.warning(f"Resampling reference audio from {original_sr}Hz to {target_sr}Hz.") + waveform = librosa.resample(y=waveform, orig_sr=original_sr, target_sr=target_sr) + + # Final check after resampling + if np.any(np.isnan(waveform)) or np.any(np.isinf(waveform)): + logger.error("Audio contains NaN or Inf after resampling, replacing with zeros") + waveform = np.nan_to_num(waveform, nan=0.0, posinf=0.0, neginf=0.0) + + return waveform.astype(np.float32) + +def check_for_interrupt(): + try: + throw_exception_if_processing_interrupted() + return False + except: + return True + +class VibeVoiceTTSNode: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "model_name": (list(MODEL_CONFIGS.keys()), { + "tooltip": "Select the VibeVoice model to use. Models will be downloaded automatically if not present." + }), + "text": ("STRING", { + "multiline": True, + "default": "Speaker 1: Hello from ComfyUI!\nSpeaker 2: VibeVoice sounds amazing.", + "tooltip": "The script for the conversation. Use 'Speaker 1:', 'Speaker 2:', etc. to assign lines to different voices. Each speaker line should be on a new line." + }), + "quantize_llm_4bit": ("BOOLEAN", { + "default": False, "label_on": "Q4 (LLM only)", "label_off": "Full precision", + "tooltip": "Quantize the Qwen2.5 LLM to 4-bit NF4 via bitsandbytes. Diffusion head stays BF16/FP32." + }), + "attention_mode": (["eager", "sdpa", "flash_attention_2"], { + "default": "sdpa", + "tooltip": "Attention implementation: Eager (safest), SDPA (balanced), Flash Attention 2 (fastest but may cause issues on some GPUs like RTX 5090)" + }), + "cfg_scale": ("FLOAT", { + "default": 1.3, "min": 1.0, "max": 2.0, "step": 0.05, + "tooltip": "Classifier-Free Guidance scale. Higher values increase adherence to the voice prompt but may reduce naturalness. Recommended: 1.3" + }), + "inference_steps": ("INT", { + "default": 10, "min": 1, "max": 50, + "tooltip": "Number of diffusion steps for audio generation. More steps can improve quality but take longer. Recommended: 10" + }), + "seed": ("INT", { + "default": 42, "min": 0, "max": 0xFFFFFFFFFFFFFFFF, "control_after_generate": True, + "tooltip": "Seed for reproducibility. Set to 0 for a random seed on each run." + }), + "do_sample": ("BOOLEAN", { + "default": True, "label_on": "Enabled (Sampling)", "label_off": "Disabled (Greedy)", + "tooltip": "Enable to use sampling methods (like temperature and top_p) for more varied output. Disable for deterministic (greedy) decoding." + }), + "temperature": ("FLOAT", { + "default": 0.95, "min": 0.0, "max": 2.0, "step": 0.01, + "tooltip": "Controls randomness. Higher values make the output more random and creative, while lower values make it more focused and deterministic. Active only if 'do_sample' is enabled." + }), + "top_p": ("FLOAT", { + "default": 0.95, "min": 0.0, "max": 1.0, "step": 0.01, + "tooltip": "Nucleus sampling (Top-P). The model samples from the smallest set of tokens whose cumulative probability exceeds this value. Active only if 'do_sample' is enabled." + }), + "top_k": ("INT", { + "default": 0, "min": 0, "max": 500, "step": 1, + "tooltip": "Top-K sampling. Restricts sampling to the K most likely next tokens. Set to 0 to disable. Active only if 'do_sample' is enabled." + }), + "force_offload": ("BOOLEAN", { + "default": False, "label_on": "Force Offload", "label_off": "Keep in VRAM", + "tooltip": "Force model to be offloaded from VRAM after generation. Useful to free up memory between generations but may slow down subsequent runs." + }), + }, + "optional": { + "speaker_1_voice": ("AUDIO", {"tooltip": "Reference audio for 'Speaker 1' in the script."}), + "speaker_2_voice": ("AUDIO", {"tooltip": "Reference audio for 'Speaker 2' in the script."}), + "speaker_3_voice": ("AUDIO", {"tooltip": "Reference audio for 'Speaker 3' in the script."}), + "speaker_4_voice": ("AUDIO", {"tooltip": "Reference audio for 'Speaker 4' in the script."}), + } + } + + RETURN_TYPES = ("AUDIO",) + FUNCTION = "generate_audio" + CATEGORY = "audio/tts" + + def generate_audio(self, model_name, text, attention_mode, cfg_scale, inference_steps, seed, do_sample, temperature, top_p, top_k, quantize_llm_4bit, force_offload, **kwargs): + if not text.strip(): + logger.warning("VibeVoiceTTS: Empty text provided, returning silent audio.") + return ({"waveform": torch.zeros((1, 1, 24000), dtype=torch.float32), "sample_rate": 24000},) + + # Create cache key that includes attention mode + cache_key = f"{model_name}_attn_{attention_mode}_q4_{int(quantize_llm_4bit)}" + + # Clean up old models when switching to a different model + if cache_key not in VIBEVOICE_PATCHER_CACHE: + # Only keep models that are currently being requested + cleanup_old_models(keep_cache_key=cache_key) + + model_handler = VibeVoiceModelHandler(model_name, attention_mode, use_llm_4bit=quantize_llm_4bit) + patcher = VibeVoicePatcher( + model_handler, + attention_mode=attention_mode, + load_device=model_management.get_torch_device(), + offload_device=model_management.unet_offload_device(), + size=model_handler.size + ) + VIBEVOICE_PATCHER_CACHE[cache_key] = patcher + + patcher = VIBEVOICE_PATCHER_CACHE[cache_key] + model_management.load_model_gpu(patcher) + model = patcher.model.model + processor = patcher.model.processor + + if model is None or processor is None: + raise RuntimeError("VibeVoice model and processor could not be loaded. Check logs for errors.") + + parsed_lines_0_based, speaker_ids_1_based = parse_script_1_based(text) + if not parsed_lines_0_based: + raise ValueError("Script is empty or invalid. Use 'Speaker 1:', 'Speaker 2:', etc. format.") + + full_script = "\n".join([f"Speaker {spk}: {txt}" for spk, txt in parsed_lines_0_based]) + + speaker_inputs = {i: kwargs.get(f"speaker_{i}_voice") for i in range(1, 5)} + voice_samples_np = [preprocess_comfy_audio(speaker_inputs[sid]) for sid in speaker_ids_1_based] + + if any(v is None for v in voice_samples_np): + missing_ids = [sid for sid, v in zip(speaker_ids_1_based, voice_samples_np) if v is None] + raise ValueError(f"Script requires voices for Speakers {missing_ids}, but they were not provided.") + + set_vibevoice_seed(seed) + + try: + inputs = processor( + text=[full_script], voice_samples=[voice_samples_np], padding=True, + return_tensors="pt", return_attention_mask=True + ) + + # Validate inputs before moving to GPU + for key, value in inputs.items(): + if isinstance(value, torch.Tensor): + if torch.any(torch.isnan(value)) or torch.any(torch.isinf(value)): + logger.error(f"Input tensor '{key}' contains NaN or Inf values") + raise ValueError(f"Invalid values in input tensor: {key}") + + inputs = {k: v.to(model.device) if isinstance(v, torch.Tensor) else v for k, v in inputs.items()} + + model.set_ddpm_inference_steps(num_steps=inference_steps) + + generation_config = {'do_sample': do_sample} + if do_sample: + generation_config['temperature'] = temperature + generation_config['top_p'] = top_p + if top_k > 0: + generation_config['top_k'] = top_k + + # Hardware-specific optimizations - only for eager mode + if attention_mode == "eager": + # Apply RTX 5090 / Blackwell compatibility fixes only for eager + torch.backends.cuda.matmul.allow_tf32 = False + torch.backends.cudnn.allow_tf32 = False + torch.cuda.empty_cache() + + # Apply additional tensor fixes for eager mode + model = model.float() + processed_inputs = {} + for k, v in inputs.items(): + if isinstance(v, torch.Tensor): + # Keep integer/boolean tensors as-is (token IDs, attention masks, etc.) + if v.dtype in [torch.int, torch.long, torch.int32, torch.int64, torch.bool, torch.uint8]: + processed_inputs[k] = v + # Keep tensors with "mask" in their name as boolean + elif "mask" in k.lower(): + processed_inputs[k] = v.bool() if v.dtype != torch.bool else v + else: + # Convert float/bfloat16 tensors to float32 + processed_inputs[k] = v.float() + else: + processed_inputs[k] = v + inputs = processed_inputs + + with torch.no_grad(): + # Create progress bar for inference steps + pbar = ProgressBar(inference_steps) + + def progress_callback(step, total_steps): + pbar.update(1) + # Check for interruption from ComfyUI + if model_management.interrupt_current_processing: + raise comfy.model_management.InterruptProcessingException() + + # Custom generation loop with interruption support + try: + outputs = model.generate( + **inputs, max_new_tokens=None, cfg_scale=cfg_scale, + tokenizer=processor.tokenizer, generation_config=generation_config, + verbose=False, stop_check_fn=check_for_interrupt + ) + # Note: The model.generate method doesn't support progress callbacks in the current VibeVoice implementation + # But we check for interruption at the start and end of generation + pbar.update(inference_steps - pbar.current) + + except RuntimeError as e: + error_msg = str(e).lower() + if "assertion" in error_msg or "cuda" in error_msg: + logger.error(f"CUDA assertion failed with {attention_mode} attention: {e}") + logger.error("This might be due to invalid input data, GPU memory issues, or incompatible attention mode.") + logger.error("Try restarting ComfyUI, using different audio files, or switching to 'eager' attention mode.") + raise e + except comfy.model_management.InterruptProcessingException: + logger.info("VibeVoice generation interrupted by user") + raise + finally: + pbar.update_absolute(inference_steps) + + except comfy.model_management.InterruptProcessingException: + logger.info("VibeVoice TTS generation was cancelled") + # Return silent audio on cancellation + return ({"waveform": torch.zeros((1, 1, 24000), dtype=torch.float32), "sample_rate": 24000},) + + except Exception as e: + logger.error(f"Error during VibeVoice generation with {attention_mode} attention: {e}") + if "interrupt" in str(e).lower() or "cancel" in str(e).lower(): + logger.info("Generation was interrupted") + return ({"waveform": torch.zeros((1, 1, 24000), dtype=torch.float32), "sample_rate": 24000},) + raise + + output_waveform = outputs.speech_outputs[0] + if output_waveform.ndim == 1: output_waveform = output_waveform.unsqueeze(0) + if output_waveform.ndim == 2: output_waveform = output_waveform.unsqueeze(0) + + # Force offload model if requested + if force_offload: + logger.info(f"Force offloading VibeVoice model '{model_name}' from VRAM...") + # Force offload by unpatching the model and freeing memory + if patcher.is_loaded: + patcher.unpatch_model(unpatch_weights=True) + # Force unload all models to free memory + model_management.unload_all_models() + gc.collect() + model_management.soft_empty_cache() + logger.info("Model force offload completed") + + return ({"waveform": output_waveform.detach().cpu(), "sample_rate": 24000},) + +NODE_CLASS_MAPPINGS = {"VibeVoiceTTS": VibeVoiceTTSNode} +NODE_DISPLAY_NAME_MAPPINGS = {"VibeVoiceTTS": "VibeVoice TTS"}