Upload folder using huggingface_hub
Browse files- .gitattributes +7 -0
- .idea/.gitignore +3 -0
- .idea/DemoFusion-main.iml +12 -0
- .idea/inspectionProfiles/Project_Default.xml +48 -0
- .idea/inspectionProfiles/profiles_settings.xml +6 -0
- .idea/misc.xml +4 -0
- .idea/modules.xml +8 -0
- .idea/workspace.xml +71 -0
- README.md +154 -8
- __pycache__/pipeline_demofusion_sdxl.cpython-311.pyc +0 -0
- demo.ipynb +3 -0
- demo_lowvram.py +34 -0
- figures/gradio_demo.png +3 -0
- figures/gradio_demo_controlnet.png +3 -0
- figures/gradio_demo_controlnet_img2img.png +3 -0
- figures/gradio_demo_img2img.png +3 -0
- figures/illustration.jpg +0 -0
- figures/progressive_process.jpg +3 -0
- gradio_demo.py +46 -0
- gradio_demo_controlnet.py +93 -0
- gradio_demo_controlnet_img2img.py +93 -0
- gradio_demo_img2img.py +81 -0
- output_example.png +3 -0
- pipeline_demofusion_sdxl.py +1446 -0
- pipeline_demofusion_sdxl_controlnet.py +1795 -0
- requirements.txt +11 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,10 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
demo.ipynb filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
figures/gradio_demo.png filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
figures/gradio_demo_controlnet.png filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
figures/gradio_demo_controlnet_img2img.png filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
figures/gradio_demo_img2img.png filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
figures/progressive_process.jpg filter=lfs diff=lfs merge=lfs -text
|
| 42 |
+
output_example.png filter=lfs diff=lfs merge=lfs -text
|
.idea/.gitignore
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 默认忽略的文件
|
| 2 |
+
/shelf/
|
| 3 |
+
/workspace.xml
|
.idea/DemoFusion-main.iml
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
| 2 |
+
<module type="PYTHON_MODULE" version="4">
|
| 3 |
+
<component name="NewModuleRootManager">
|
| 4 |
+
<content url="file://$MODULE_DIR$" />
|
| 5 |
+
<orderEntry type="jdk" jdkName="Ai_mode (4)" jdkType="Python SDK" />
|
| 6 |
+
<orderEntry type="sourceFolder" forTests="false" />
|
| 7 |
+
</component>
|
| 8 |
+
<component name="PyDocumentationSettings">
|
| 9 |
+
<option name="format" value="GOOGLE" />
|
| 10 |
+
<option name="myDocStringFormat" value="Google" />
|
| 11 |
+
</component>
|
| 12 |
+
</module>
|
.idea/inspectionProfiles/Project_Default.xml
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<component name="InspectionProjectProfileManager">
|
| 2 |
+
<profile version="1.0">
|
| 3 |
+
<option name="myName" value="Project Default" />
|
| 4 |
+
<inspection_tool class="PyPackageRequirementsInspection" enabled="true" level="WARNING" enabled_by_default="true">
|
| 5 |
+
<option name="ignoredPackages">
|
| 6 |
+
<value>
|
| 7 |
+
<list size="28">
|
| 8 |
+
<item index="0" class="java.lang.String" itemvalue="httpx" />
|
| 9 |
+
<item index="1" class="java.lang.String" itemvalue="gradio" />
|
| 10 |
+
<item index="2" class="java.lang.String" itemvalue="open-clip-torch" />
|
| 11 |
+
<item index="3" class="java.lang.String" itemvalue="PyYAML" />
|
| 12 |
+
<item index="4" class="java.lang.String" itemvalue="xformers" />
|
| 13 |
+
<item index="5" class="java.lang.String" itemvalue="numpy" />
|
| 14 |
+
<item index="6" class="java.lang.String" itemvalue="requests" />
|
| 15 |
+
<item index="7" class="java.lang.String" itemvalue="fsspec" />
|
| 16 |
+
<item index="8" class="java.lang.String" itemvalue="kornia" />
|
| 17 |
+
<item index="9" class="java.lang.String" itemvalue="gradio_client" />
|
| 18 |
+
<item index="10" class="java.lang.String" itemvalue="openai-clip" />
|
| 19 |
+
<item index="11" class="java.lang.String" itemvalue="sentencepiece" />
|
| 20 |
+
<item index="12" class="java.lang.String" itemvalue="wandb" />
|
| 21 |
+
<item index="13" class="java.lang.String" itemvalue="accelerate" />
|
| 22 |
+
<item index="14" class="java.lang.String" itemvalue="uvicorn" />
|
| 23 |
+
<item index="15" class="java.lang.String" itemvalue="urllib3" />
|
| 24 |
+
<item index="16" class="java.lang.String" itemvalue="triton" />
|
| 25 |
+
<item index="17" class="java.lang.String" itemvalue="timm" />
|
| 26 |
+
<item index="18" class="java.lang.String" itemvalue="opencv-python" />
|
| 27 |
+
<item index="19" class="java.lang.String" itemvalue="pandas" />
|
| 28 |
+
<item index="20" class="java.lang.String" itemvalue="tqdm" />
|
| 29 |
+
<item index="21" class="java.lang.String" itemvalue="pytorch-lightning" />
|
| 30 |
+
<item index="22" class="java.lang.String" itemvalue="fastapi" />
|
| 31 |
+
<item index="23" class="java.lang.String" itemvalue="einops-exts" />
|
| 32 |
+
<item index="24" class="java.lang.String" itemvalue="ninja" />
|
| 33 |
+
<item index="25" class="java.lang.String" itemvalue="matplotlib" />
|
| 34 |
+
<item index="26" class="java.lang.String" itemvalue="webdataset" />
|
| 35 |
+
<item index="27" class="java.lang.String" itemvalue="Pillow" />
|
| 36 |
+
</list>
|
| 37 |
+
</value>
|
| 38 |
+
</option>
|
| 39 |
+
</inspection_tool>
|
| 40 |
+
<inspection_tool class="PyUnresolvedReferencesInspection" enabled="true" level="WARNING" enabled_by_default="true">
|
| 41 |
+
<option name="ignoredIdentifiers">
|
| 42 |
+
<list>
|
| 43 |
+
<option value="JSX2511" />
|
| 44 |
+
</list>
|
| 45 |
+
</option>
|
| 46 |
+
</inspection_tool>
|
| 47 |
+
</profile>
|
| 48 |
+
</component>
|
.idea/inspectionProfiles/profiles_settings.xml
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<component name="InspectionProjectProfileManager">
|
| 2 |
+
<settings>
|
| 3 |
+
<option name="USE_PROJECT_PROFILE" value="false" />
|
| 4 |
+
<version value="1.0" />
|
| 5 |
+
</settings>
|
| 6 |
+
</component>
|
.idea/misc.xml
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
| 2 |
+
<project version="4">
|
| 3 |
+
<component name="ProjectRootManager" version="2" project-jdk-name="Ai_mode (4)" project-jdk-type="Python SDK" />
|
| 4 |
+
</project>
|
.idea/modules.xml
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
| 2 |
+
<project version="4">
|
| 3 |
+
<component name="ProjectModuleManager">
|
| 4 |
+
<modules>
|
| 5 |
+
<module fileurl="file://$PROJECT_DIR$/.idea/DemoFusion-main.iml" filepath="$PROJECT_DIR$/.idea/DemoFusion-main.iml" />
|
| 6 |
+
</modules>
|
| 7 |
+
</component>
|
| 8 |
+
</project>
|
.idea/workspace.xml
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
| 2 |
+
<project version="4">
|
| 3 |
+
<component name="AutoImportSettings">
|
| 4 |
+
<option name="autoReloadType" value="SELECTIVE" />
|
| 5 |
+
</component>
|
| 6 |
+
<component name="ChangeListManager">
|
| 7 |
+
<list default="true" id="cf13c2e0-fea8-4b36-9b87-d1a7a68e5205" name="更改" comment="" />
|
| 8 |
+
<option name="SHOW_DIALOG" value="false" />
|
| 9 |
+
<option name="HIGHLIGHT_CONFLICTS" value="true" />
|
| 10 |
+
<option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
|
| 11 |
+
<option name="LAST_RESOLUTION" value="IGNORE" />
|
| 12 |
+
</component>
|
| 13 |
+
<component name="MarkdownSettingsMigration">
|
| 14 |
+
<option name="stateVersion" value="1" />
|
| 15 |
+
</component>
|
| 16 |
+
<component name="ProjectColorInfo">{
|
| 17 |
+
"associatedIndex": 2
|
| 18 |
+
}</component>
|
| 19 |
+
<component name="ProjectId" id="2btF3w7MTy2zdOhszEbQETG3Qqf" />
|
| 20 |
+
<component name="ProjectViewState">
|
| 21 |
+
<option name="hideEmptyMiddlePackages" value="true" />
|
| 22 |
+
<option name="showLibraryContents" value="true" />
|
| 23 |
+
</component>
|
| 24 |
+
<component name="PropertiesComponent">{
|
| 25 |
+
"keyToString": {
|
| 26 |
+
"RunOnceActivity.OpenProjectViewOnStart": "true",
|
| 27 |
+
"RunOnceActivity.ShowReadmeOnStart": "true",
|
| 28 |
+
"last_opened_file_path": "E:/AI/reduce_noise/DemoFusion-main",
|
| 29 |
+
"settings.editor.selected.configurable": "com.jetbrains.python.configuration.PyActiveSdkModuleConfigurable"
|
| 30 |
+
}
|
| 31 |
+
}</component>
|
| 32 |
+
<component name="RunManager">
|
| 33 |
+
<configuration name="gradio_demo_img2img" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
|
| 34 |
+
<module name="DemoFusion-main" />
|
| 35 |
+
<option name="INTERPRETER_OPTIONS" value="" />
|
| 36 |
+
<option name="PARENT_ENVS" value="true" />
|
| 37 |
+
<envs>
|
| 38 |
+
<env name="PYTHONUNBUFFERED" value="1" />
|
| 39 |
+
</envs>
|
| 40 |
+
<option name="SDK_HOME" value="" />
|
| 41 |
+
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
|
| 42 |
+
<option name="IS_MODULE_SDK" value="true" />
|
| 43 |
+
<option name="ADD_CONTENT_ROOTS" value="true" />
|
| 44 |
+
<option name="ADD_SOURCE_ROOTS" value="true" />
|
| 45 |
+
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/gradio_demo_img2img.py" />
|
| 46 |
+
<option name="PARAMETERS" value="" />
|
| 47 |
+
<option name="SHOW_COMMAND_LINE" value="false" />
|
| 48 |
+
<option name="EMULATE_TERMINAL" value="false" />
|
| 49 |
+
<option name="MODULE_MODE" value="false" />
|
| 50 |
+
<option name="REDIRECT_INPUT" value="false" />
|
| 51 |
+
<option name="INPUT_FILE" value="" />
|
| 52 |
+
<method v="2" />
|
| 53 |
+
</configuration>
|
| 54 |
+
<recent_temporary>
|
| 55 |
+
<list>
|
| 56 |
+
<item itemvalue="Python.gradio_demo_img2img" />
|
| 57 |
+
</list>
|
| 58 |
+
</recent_temporary>
|
| 59 |
+
</component>
|
| 60 |
+
<component name="SpellCheckerSettings" RuntimeDictionaries="0" Folders="0" CustomDictionaries="0" DefaultDictionary="应用程序级" UseSingleDictionary="true" transferred="true" />
|
| 61 |
+
<component name="TaskManager">
|
| 62 |
+
<task active="true" id="Default" summary="默认任务">
|
| 63 |
+
<changelist id="cf13c2e0-fea8-4b36-9b87-d1a7a68e5205" name="更改" comment="" />
|
| 64 |
+
<created>1707026272531</created>
|
| 65 |
+
<option name="number" value="Default" />
|
| 66 |
+
<option name="presentableId" value="Default" />
|
| 67 |
+
<updated>1707026272531</updated>
|
| 68 |
+
</task>
|
| 69 |
+
<servers />
|
| 70 |
+
</component>
|
| 71 |
+
</project>
|
README.md
CHANGED
|
@@ -1,12 +1,158 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
-
|
| 4 |
-
colorFrom: green
|
| 5 |
-
colorTo: gray
|
| 6 |
sdk: gradio
|
| 7 |
-
sdk_version: 4.
|
| 8 |
-
app_file: app.py
|
| 9 |
-
pinned: false
|
| 10 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
+
title: dzai
|
| 3 |
+
app_file: gradio_demo_img2img.py
|
|
|
|
|
|
|
| 4 |
sdk: gradio
|
| 5 |
+
sdk_version: 4.8.0
|
|
|
|
|
|
|
| 6 |
---
|
| 7 |
+
# DemoFusion
|
| 8 |
+
[](https://ruoyidu.github.io/demofusion/demofusion.html)
|
| 9 |
+
[](https://arxiv.org/pdf/2311.16973.pdf)
|
| 10 |
+
[](https://replicate.com/lucataco/demofusion)
|
| 11 |
+
[](https://colab.research.google.com/github/camenduru/DemoFusion-colab/blob/main/DemoFusion_colab.ipynb)
|
| 12 |
+
[](https://huggingface.co/spaces/radames/Enhance-This-DemoFusion-SDXL)
|
| 13 |
+
[](https://badges.toozhao.com/stats/01HFMAPCVTA1T32KN2PASNYGYK "Get your own page views count badge on badges.toozhao.com")
|
| 14 |
|
| 15 |
+
Code release for "DemoFusion: Democratising High-Resolution Image Generation With No 💰" (arXiv 2023)
|
| 16 |
+
|
| 17 |
+
<img src="figures/illustration.jpg" width="800"/>
|
| 18 |
+
|
| 19 |
+
**Abstract**: High-resolution image generation with Generative Artificial Intelligence (GenAI) has immense potential but, due to the enormous capital investment required for training, it is increasingly centralised to a few large corporations, and hidden behind paywalls. This paper aims to democratise high-resolution GenAI by advancing the frontier of high-resolution generation while remaining accessible to a broad audience. We demonstrate that existing Latent Diffusion Models (LDMs) possess untapped potential for higher-resolution image generation. Our novel DemoFusion framework seamlessly extends open-source GenAI models, employing Progressive Upscaling, Skip Residual, and Dilated Sampling mechanisms to achieve higher-resolution image generation. The progressive nature of DemoFusion requires more passes, but the intermediate results can serve as "previews", facilitating rapid prompt iteration.
|
| 20 |
+
|
| 21 |
+
# News
|
| 22 |
+
- **2023.12.12**: ✨ DemoFusion with ControNet is availabe now! Check it out at `pipeline_demofusion_sdxl_controlnet`! The local [Gradio Demo](https://github.com/PRIS-CV/DemoFusion#DemoFusionControlNet-with-local-Gradio-demo) is also available.
|
| 23 |
+
- **2023.12.10**: ✨ Image2Image is supported by `pipeline_demofusion_sdxl` now! The local [Gradio Demo](https://github.com/PRIS-CV/DemoFusion#Image2Image-with-local-Gradio-demo) is also available.
|
| 24 |
+
- **2023.12.08**: 🚀 A HuggingFace Demo for Img2Img is now available! [](https://huggingface.co/spaces/radames/Enhance-This-DemoFusion-SDXL) Thank [Radamés](https://github.com/radames) for the implementation and [](https://huggingface.co/docs/diffusers/index) for the support!
|
| 25 |
+
- **2023.12.07**: 🚀 Add Colab demo [](https://colab.research.google.com/github/camenduru/DemoFusion-colab/blob/main/DemoFusion_colab.ipynb). Check it out! Thank [camenduru](https://github.com/camenduru) for the implementation!
|
| 26 |
+
- **2023.12.06**: ✨ The local [Gradio Demo](https://github.com/PRIS-CV/DemoFusion#Text2Image-with-local-Gradio-demo) is now available! Better interaction and presentation!
|
| 27 |
+
- **2023.12.04**: ✨ A [low-vram version](https://github.com/PRIS-CV/DemoFusion#Text2Image-on-Windows-with-8-GB-of-VRAM) of DemoFusion is available! Thank [klimaleksus](https://github.com/klimaleksus) for the implementation!
|
| 28 |
+
- **2023.12.01**: 🚀 Integrated to [Replicate](https://replicate.com/explore). Check out the online demo: [](https://replicate.com/lucataco/demofusion) Thank [Luis C.](https://github.com/lucataco) for the implementation!
|
| 29 |
+
- **2023.11.29**: 💰 `pipeline_demofusion_sdxl` is released.
|
| 30 |
+
|
| 31 |
+
# Usage
|
| 32 |
+
## A quick try with integrated demos
|
| 33 |
+
- HuggingFace Space: Try Text2Image generation at [](https://huggingface.co/spaces/fffiloni/DemoFusion) and Image2Image enhancement at [](https://huggingface.co/spaces/radames/Enhance-This-DemoFusion-SDXL).
|
| 34 |
+
- Colab: Try Text2Image generation at [](https://colab.research.google.com/github/camenduru/DemoFusion-colab/blob/main/DemoFusion_colab.ipynb) and Image2Image enhancement at [](https://colab.research.google.com/github/camenduru/DemoFusion-colab/blob/main/DemoFusion_img2img_colab.ipynb).
|
| 35 |
+
- Replicate: Try Text2Image generation at [](https://replicate.com/lucataco/demofusion) and Image2Image enhancement at [](https://replicate.com/lucataco/demofusion-enhance).
|
| 36 |
+
|
| 37 |
+
## Starting with our code
|
| 38 |
+
### Hyper-parameters
|
| 39 |
+
- `view_batch_size` (`int`, defaults to 16):
|
| 40 |
+
The batch size for multiple denoising paths. Typically, a larger batch size can result in higher efficiency but comes with increased GPU memory requirements.
|
| 41 |
+
- `stride` (`int`, defaults to 64):
|
| 42 |
+
The stride of moving local patches. A smaller stride is better for alleviating seam issues, but it also introduces additional computational overhead and inference time.
|
| 43 |
+
- `cosine_scale_1` (`float`, defaults to 3):
|
| 44 |
+
Control the decreasing rate of skip-residual. A smaller value results in better consistency with low-resolution results, but it may lead to more pronounced upsampling noise. Please refer to Appendix C in the DemoFusion paper.
|
| 45 |
+
- `cosine_scale_2` (`float`, defaults to 1):
|
| 46 |
+
Control the decreasing rate of dilated sampling. A smaller value can better address the repetition issue, but it may lead to grainy images. For specific impacts, please refer to Appendix C in the DemoFusion paper.
|
| 47 |
+
- `cosine_scale_3` (`float`, defaults to 1):
|
| 48 |
+
Control the decrease rate of the Gaussian filter. A smaller value results in less grainy images, but it may lead to over-smoothing images. Please refer to Appendix C in the DemoFusion paper.
|
| 49 |
+
- `sigma` (`float`, defaults to 1):
|
| 50 |
+
The standard value of the Gaussian filter. A larger sigma promotes the global guidance of dilated sampling, but it has the potential of over-smoothing.
|
| 51 |
+
- `multi_decoder` (`bool`, defaults to True):
|
| 52 |
+
Determine whether to use a tiled decoder. Generally, a tiled decoder becomes necessary when the resolution exceeds 3072*3072 on an RTX 3090 GPU.
|
| 53 |
+
- `show_image` (`bool`, defaults to False):
|
| 54 |
+
Determine whether to show intermediate results during generation.
|
| 55 |
+
|
| 56 |
+
### Text2Image (will take about 17 GB of VRAM)
|
| 57 |
+
- Set up the dependencies as:
|
| 58 |
+
```
|
| 59 |
+
conda create -n demofusion python=3.9
|
| 60 |
+
conda activate demofusion
|
| 61 |
+
pip install -r requirements.txt
|
| 62 |
+
```
|
| 63 |
+
- Download `pipeline_demofusion_sdxl.py` and run it as follows. A use case can be found in `demo.ipynb`.
|
| 64 |
+
```
|
| 65 |
+
from pipeline_demofusion_sdxl import DemoFusionSDXLPipeline
|
| 66 |
+
import torch
|
| 67 |
+
|
| 68 |
+
model_ckpt = "stabilityai/stable-diffusion-xl-base-1.0"
|
| 69 |
+
pipe = DemoFusionSDXLPipeline.from_pretrained(model_ckpt, torch_dtype=torch.float16)
|
| 70 |
+
pipe = pipe.to("cuda")
|
| 71 |
+
|
| 72 |
+
prompt = "Envision a portrait of an elderly woman, her face a canvas of time, framed by a headscarf with muted tones of rust and cream. Her eyes, blue like faded denim. Her attire, simple yet dignified."
|
| 73 |
+
negative_prompt = "blurry, ugly, duplicate, poorly drawn, deformed, mosaic"
|
| 74 |
+
|
| 75 |
+
images = pipe(prompt, negative_prompt=negative_prompt,
|
| 76 |
+
height=3072, width=3072, view_batch_size=16, stride=64,
|
| 77 |
+
num_inference_steps=50, guidance_scale=7.5,
|
| 78 |
+
cosine_scale_1=3, cosine_scale_2=1, cosine_scale_3=1, sigma=0.8,
|
| 79 |
+
multi_decoder=True, show_image=True
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
for i, image in enumerate(images):
|
| 83 |
+
image.save('image_' + str(i) + '.png')
|
| 84 |
+
```
|
| 85 |
+
- ⚠️ When you have enough VRAM (e.g., generating 2048*2048 images on hardware with more than 18GB RAM), you can set `multi_decoder=False`, which can make the decoding process faster.
|
| 86 |
+
- Please feel free to try different prompts and resolutions.
|
| 87 |
+
- Default hyper-parameters are recommended, but they may not be optimal for all cases. For specific impacts of each hyper-parameter, please refer to Appendix C in the DemoFusion paper.
|
| 88 |
+
- The code was cleaned before the release. If you encounter any issues, please contact us.
|
| 89 |
+
|
| 90 |
+
### Text2Image on Windows with 8 GB of VRAM
|
| 91 |
+
|
| 92 |
+
- Set up the environment as:
|
| 93 |
+
|
| 94 |
+
```
|
| 95 |
+
cmd
|
| 96 |
+
git clone "https://github.com/PRIS-CV/DemoFusion"
|
| 97 |
+
cd DemoFusion
|
| 98 |
+
python -m venv venv
|
| 99 |
+
venv\Scripts\activate
|
| 100 |
+
pip install -U "xformers==0.0.22.post7+cu118" --index-url https://download.pytorch.org/whl/cu118
|
| 101 |
+
pip install "diffusers==0.21.4" "matplotlib==3.8.2" "transformers==4.35.2" "accelerate==0.25.0"
|
| 102 |
+
```
|
| 103 |
+
|
| 104 |
+
- Launch DemoFusion as follows. The use case can be found in `demo_lowvram.py`.
|
| 105 |
+
|
| 106 |
+
```
|
| 107 |
+
python
|
| 108 |
+
from pipeline_demofusion_sdxl import DemoFusionSDXLPipeline
|
| 109 |
+
|
| 110 |
+
import torch
|
| 111 |
+
from diffusers.models import AutoencoderKL
|
| 112 |
+
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
| 113 |
+
|
| 114 |
+
model_ckpt = "stabilityai/stable-diffusion-xl-base-1.0"
|
| 115 |
+
pipe = DemoFusionSDXLPipeline.from_pretrained(model_ckpt, torch_dtype=torch.float16, vae=vae)
|
| 116 |
+
pipe = pipe.to("cuda")
|
| 117 |
+
|
| 118 |
+
prompt = "Envision a portrait of an elderly woman, her face a canvas of time, framed by a headscarf with muted tones of rust and cream. Her eyes, blue like faded denim. Her attire, simple yet dignified."
|
| 119 |
+
negative_prompt = "blurry, ugly, duplicate, poorly drawn, deformed, mosaic"
|
| 120 |
+
|
| 121 |
+
images = pipe(prompt, negative_prompt=negative_prompt,
|
| 122 |
+
height=2048, width=2048, view_batch_size=4, stride=64,
|
| 123 |
+
num_inference_steps=40, guidance_scale=7.5,
|
| 124 |
+
cosine_scale_1=3, cosine_scale_2=1, cosine_scale_3=1, sigma=0.8,
|
| 125 |
+
multi_decoder=True, show_image=False, lowvram=True
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
for i, image in enumerate(images):
|
| 129 |
+
image.save('image_' + str(i) + '.png')
|
| 130 |
+
```
|
| 131 |
+
### Text2Image with local Gradio demo
|
| 132 |
+
- Make sure you have installed `gradio` and `gradio_imageslider`.
|
| 133 |
+
- Launch DemoFusion via Gradio demo now -- try `python gradio_demo.py`! Better Interaction and Presentation!
|
| 134 |
+
<img src="figures/gradio_demo.png" width="600"/>
|
| 135 |
+
|
| 136 |
+
### Image2Image with local Gradio demo
|
| 137 |
+
- Make sure you have installed `gradio` and `gradio_imageslider`.
|
| 138 |
+
- Launch DemoFusion Image2Image by `python gradio_demo_img2img.py`.
|
| 139 |
+
<img src="figures/gradio_demo_img2img.png" width="600"/>
|
| 140 |
+
- ⚠️ Please note that, as a tuning-free framework, DemoFusion's Image2Image capability is strongly correlated with the SDXL's training data distribution and will show a significant bias. An accurate prompt to describe the content and style of the input also significantly improves performance. Have fun and regard it as a side application of text+image based generation.
|
| 141 |
+
|
| 142 |
+
### DemoFusion+ControlNet with local Gradio demo
|
| 143 |
+
- Make sure you have installed `gradio` and `gradio_imageslider`.
|
| 144 |
+
- Launch DemoFusion+ControNet Text2Image by `python gradio_demo.py`.
|
| 145 |
+
- <img src="figures/gradio_demo_controlnet.png" width="600"/>
|
| 146 |
+
- Launch DemoFusion+ControNet Image2Image by `python gradio_demo_img2img.py`.
|
| 147 |
+
- <img src="figures/gradio_demo_controlnet_img2img.png" width="600"/>
|
| 148 |
+
|
| 149 |
+
## Citation
|
| 150 |
+
If you find this paper useful in your research, please consider citing:
|
| 151 |
+
```
|
| 152 |
+
@article{du2023demofusion,
|
| 153 |
+
title={DemoFusion: Democratising High-Resolution Image Generation With No $$$},
|
| 154 |
+
author={Du, Ruoyi and Chang, Dongliang and Hospedales, Timothy and Song, Yi-Zhe and Ma, Zhanyu},
|
| 155 |
+
journal={arXiv preprint arXiv:2311.16973},
|
| 156 |
+
year={2023}
|
| 157 |
+
}
|
| 158 |
+
```
|
__pycache__/pipeline_demofusion_sdxl.cpython-311.pyc
ADDED
|
Binary file (77.2 kB). View file
|
|
|
demo.ipynb
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a6bbe553656b3d9c863a261a053722930b3b538d5b6b05eac66ff9ae83eaf976
|
| 3 |
+
size 17016845
|
demo_lowvram.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
'''
|
| 3 |
+
Installation on Windows for GPU with 8 Gb of VRAM and xformers:
|
| 4 |
+
|
| 5 |
+
git clone "https://github.com/PRIS-CV/DemoFusion"
|
| 6 |
+
cd DemoFusion
|
| 7 |
+
python -m venv venv
|
| 8 |
+
venv\Scripts\activate
|
| 9 |
+
pip install -U "xformers==0.0.22.post7+cu118" --index-url https://download.pytorch.org/whl/cu118
|
| 10 |
+
pip install "diffusers==0.21.4" "matplotlib==3.8.2" "transformers==4.35.2" "accelerate==0.25.0"
|
| 11 |
+
'''
|
| 12 |
+
|
| 13 |
+
from pipeline_demofusion_sdxl import DemoFusionSDXLPipeline
|
| 14 |
+
|
| 15 |
+
import torch
|
| 16 |
+
from diffusers.models import AutoencoderKL
|
| 17 |
+
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
| 18 |
+
|
| 19 |
+
model_ckpt = "stabilityai/stable-diffusion-xl-base-1.0"
|
| 20 |
+
pipe = DemoFusionSDXLPipeline.from_pretrained(model_ckpt, torch_dtype=torch.float16, vae=vae)
|
| 21 |
+
pipe = pipe.to("cuda")
|
| 22 |
+
|
| 23 |
+
prompt = "Envision a portrait of an elderly woman, her face a canvas of time, framed by a headscarf with muted tones of rust and cream. Her eyes, blue like faded denim. Her attire, simple yet dignified."
|
| 24 |
+
negative_prompt = "blurry, ugly, duplicate, poorly drawn, deformed, mosaic"
|
| 25 |
+
|
| 26 |
+
images = pipe(prompt, negative_prompt=negative_prompt,
|
| 27 |
+
height=2048, width=2048, view_batch_size=4, stride=64,
|
| 28 |
+
num_inference_steps=40, guidance_scale=7.5,
|
| 29 |
+
cosine_scale_1=3, cosine_scale_2=1, cosine_scale_3=1, sigma=0.8,
|
| 30 |
+
multi_decoder=True, show_image=False, lowvram=True
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
for i, image in enumerate(images):
|
| 34 |
+
image.save('image_'+str(i)+'.png')
|
figures/gradio_demo.png
ADDED
|
Git LFS Details
|
figures/gradio_demo_controlnet.png
ADDED
|
Git LFS Details
|
figures/gradio_demo_controlnet_img2img.png
ADDED
|
Git LFS Details
|
figures/gradio_demo_img2img.png
ADDED
|
Git LFS Details
|
figures/illustration.jpg
ADDED
|
figures/progressive_process.jpg
ADDED
|
Git LFS Details
|
gradio_demo.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from pipeline_demofusion_sdxl import DemoFusionSDXLPipeline
|
| 3 |
+
from gradio_imageslider import ImageSlider
|
| 4 |
+
import torch
|
| 5 |
+
|
| 6 |
+
def generate_images(prompt, negative_prompt, height, width, num_inference_steps, guidance_scale, cosine_scale_1, cosine_scale_2, cosine_scale_3, sigma, view_batch_size, stride, seed):
|
| 7 |
+
model_ckpt = "stabilityai/stable-diffusion-xl-base-1.0"
|
| 8 |
+
pipe = DemoFusionSDXLPipeline.from_pretrained(model_ckpt, torch_dtype=torch.float16)
|
| 9 |
+
pipe = pipe.to("cuda")
|
| 10 |
+
|
| 11 |
+
generator = torch.Generator(device='cuda')
|
| 12 |
+
generator = generator.manual_seed(int(seed))
|
| 13 |
+
|
| 14 |
+
images = pipe(prompt, negative_prompt=negative_prompt, generator=generator,
|
| 15 |
+
height=int(height), width=int(width), view_batch_size=int(view_batch_size), stride=int(stride),
|
| 16 |
+
num_inference_steps=int(num_inference_steps), guidance_scale=guidance_scale,
|
| 17 |
+
cosine_scale_1=cosine_scale_1, cosine_scale_2=cosine_scale_2, cosine_scale_3=cosine_scale_3, sigma=sigma,
|
| 18 |
+
multi_decoder=True, show_image=False
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
return (images[0], images[-1])
|
| 22 |
+
|
| 23 |
+
iface = gr.Interface(
|
| 24 |
+
fn=generate_images,
|
| 25 |
+
inputs=[
|
| 26 |
+
gr.Textbox(label="Prompt"),
|
| 27 |
+
gr.Textbox(label="Negative Prompt", value="blurry, ugly, duplicate, poorly drawn, deformed, mosaic"),
|
| 28 |
+
gr.Slider(minimum=1024, maximum=4096, step=1024, value=2048, label="Height"),
|
| 29 |
+
gr.Slider(minimum=1024, maximum=4096, step=1024, value=2048, label="Width"),
|
| 30 |
+
gr.Slider(minimum=10, maximum=100, step=1, value=50, label="Num Inference Steps"),
|
| 31 |
+
gr.Slider(minimum=1, maximum=20, step=0.1, value=7.5, label="Guidance Scale"),
|
| 32 |
+
gr.Slider(minimum=0, maximum=5, step=0.1, value=3, label="Cosine Scale 1"),
|
| 33 |
+
gr.Slider(minimum=0, maximum=5, step=0.1, value=1, label="Cosine Scale 2"),
|
| 34 |
+
gr.Slider(minimum=0, maximum=5, step=0.1, value=1, label="Cosine Scale 3"),
|
| 35 |
+
gr.Slider(minimum=0.1, maximum=1, step=0.1, value=0.8, label="Sigma"),
|
| 36 |
+
gr.Slider(minimum=4, maximum=32, step=4, value=16, label="View Batch Size"),
|
| 37 |
+
gr.Slider(minimum=8, maximum=96, step=8, value=64, label="Stride"),
|
| 38 |
+
gr.Number(label="Seed", value=2013)
|
| 39 |
+
],
|
| 40 |
+
# outputs=gr.Gallery(label="Generated Images"),
|
| 41 |
+
outputs=ImageSlider(label="Comparison of SDXL and DemoFusion"),
|
| 42 |
+
title="DemoFusion Gradio Demo",
|
| 43 |
+
description="Generate images with the DemoFusion SDXL Pipeline."
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
iface.launch()
|
gradio_demo_controlnet.py
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from diffusers import ControlNetModel, AutoencoderKL
|
| 3 |
+
from pipeline_demofusion_sdxl import DemoFusionSDXLPipeline
|
| 4 |
+
from pipeline_demofusion_sdxl_controlnet import DemoFusionSDXLControlNetPipeline
|
| 5 |
+
from gradio_imageslider import ImageSlider
|
| 6 |
+
import torch, gc
|
| 7 |
+
from torchvision import transforms
|
| 8 |
+
from PIL import Image
|
| 9 |
+
import numpy as np
|
| 10 |
+
import cv2
|
| 11 |
+
|
| 12 |
+
def load_and_process_image(pil_image):
|
| 13 |
+
transform = transforms.Compose(
|
| 14 |
+
[
|
| 15 |
+
transforms.Resize((1024, 1024)),
|
| 16 |
+
transforms.ToTensor(),
|
| 17 |
+
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
|
| 18 |
+
]
|
| 19 |
+
)
|
| 20 |
+
image = transform(pil_image)
|
| 21 |
+
image = image.unsqueeze(0).half()
|
| 22 |
+
return image
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def pad_image(image):
|
| 26 |
+
w, h = image.size
|
| 27 |
+
if w == h:
|
| 28 |
+
return image
|
| 29 |
+
elif w > h:
|
| 30 |
+
new_image = Image.new(image.mode, (w, w), (0, 0, 0))
|
| 31 |
+
pad_w = 0
|
| 32 |
+
pad_h = (w - h) // 2
|
| 33 |
+
new_image.paste(image, (0, pad_h))
|
| 34 |
+
return new_image
|
| 35 |
+
else:
|
| 36 |
+
new_image = Image.new(image.mode, (h, h), (0, 0, 0))
|
| 37 |
+
pad_w = (h - w) // 2
|
| 38 |
+
pad_h = 0
|
| 39 |
+
new_image.paste(image, (pad_w, 0))
|
| 40 |
+
return new_image
|
| 41 |
+
|
| 42 |
+
def generate_images(prompt, negative_prompt, controlnet_conditioning_scale, height, width, num_inference_steps, guidance_scale, cosine_scale_1, cosine_scale_2, cosine_scale_3, sigma, view_batch_size, stride, seed, input_image):
|
| 43 |
+
padded_image = pad_image(input_image).resize((1024, 1024)).convert("RGB")
|
| 44 |
+
image_lr = load_and_process_image(padded_image).to('cuda')
|
| 45 |
+
controlnet = ControlNetModel.from_pretrained("diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16)
|
| 46 |
+
vae = AutoencoderKL.from_pretrained("madebyollin/stable-diffusion-xl-base-1.0/vae-fix", torch_dtype=torch.float16)
|
| 47 |
+
pipe = DemoFusionSDXLControlNetPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, vae=vae, torch_dtype=torch.float16)
|
| 48 |
+
pipe = pipe.to("cuda")
|
| 49 |
+
generator = torch.Generator(device='cuda')
|
| 50 |
+
generator = generator.manual_seed(int(seed))
|
| 51 |
+
# get canny image
|
| 52 |
+
canny_image = np.array(padded_image)
|
| 53 |
+
canny_image = cv2.Canny(canny_image, 100, 200)
|
| 54 |
+
canny_image = canny_image[:, :, None]
|
| 55 |
+
canny_image = np.concatenate([canny_image, canny_image, canny_image], axis=2)
|
| 56 |
+
canny_image = Image.fromarray(canny_image)
|
| 57 |
+
images = pipe(prompt, negative_prompt=negative_prompt, controlnet_conditioning_scale=controlnet_conditioning_scale,
|
| 58 |
+
condition_image=canny_image, generator=generator,
|
| 59 |
+
height=int(height), width=int(width), view_batch_size=int(view_batch_size), stride=int(stride),
|
| 60 |
+
num_inference_steps=int(num_inference_steps), guidance_scale=guidance_scale,
|
| 61 |
+
cosine_scale_1=cosine_scale_1, cosine_scale_2=cosine_scale_2, cosine_scale_3=cosine_scale_3, sigma=sigma,
|
| 62 |
+
multi_decoder=True, show_image=False, lowvram=False
|
| 63 |
+
)
|
| 64 |
+
for i, image in enumerate(images):
|
| 65 |
+
image.save('image_'+str(i)+'.png')
|
| 66 |
+
pipe = None
|
| 67 |
+
gc.collect()
|
| 68 |
+
torch.cuda.empty_cache()
|
| 69 |
+
return (canny_image, images[-1])
|
| 70 |
+
|
| 71 |
+
with gr.Blocks(title=f"DemoFusion") as demo:
|
| 72 |
+
with gr.Column():
|
| 73 |
+
with gr.Row():
|
| 74 |
+
with gr.Group():
|
| 75 |
+
image_input = gr.Image(type="pil", label="Input Image")
|
| 76 |
+
prompt = gr.Textbox(label="Prompt", value="")
|
| 77 |
+
negative_prompt = gr.Textbox(label="Negative Prompt", value="blurry, ugly, duplicate, poorly drawn, deformed, mosaic")
|
| 78 |
+
controlnet_conditioning_scale = gr.Slider(minimum=0, maximum=1, step=0.1, value=0.5, label="ControlNet Conditioning Scale")
|
| 79 |
+
width = gr.Slider(minimum=1024, maximum=4096, step=1024, value=2048, label="Width")
|
| 80 |
+
height = gr.Slider(minimum=1024, maximum=4096, step=1024, value=2048, label="Height")
|
| 81 |
+
num_inference_steps = gr.Slider(minimum=10, maximum=100, step=1, value=50, label="Num Inference Steps")
|
| 82 |
+
guidance_scale = gr.Slider(minimum=1, maximum=20, step=0.1, value=7.5, label="Guidance Scale")
|
| 83 |
+
cosine_scale_1 = gr.Slider(minimum=0, maximum=5, step=0.1, value=3, label="Cosine Scale 1")
|
| 84 |
+
cosine_scale_2 = gr.Slider(minimum=0, maximum=5, step=0.1, value=1, label="Cosine Scale 2")
|
| 85 |
+
cosine_scale_3 = gr.Slider(minimum=0, maximum=5, step=0.1, value=1, label="Cosine Scale 3")
|
| 86 |
+
sigma = gr.Slider(minimum=0.1, maximum=1, step=0.1, value=0.8, label="Sigma")
|
| 87 |
+
view_batch_size = gr.Slider(minimum=4, maximum=32, step=4, value=16, label="View Batch Size")
|
| 88 |
+
stride = gr.Slider(minimum=8, maximum=96, step=8, value=64, label="Stride")
|
| 89 |
+
seed = gr.Number(label="Seed", value=2013)
|
| 90 |
+
button = gr.Button()
|
| 91 |
+
output_images = ImageSlider(show_label=False)
|
| 92 |
+
button.click(fn=generate_images, inputs=[prompt, negative_prompt, controlnet_conditioning_scale, height, width, num_inference_steps, guidance_scale, cosine_scale_1, cosine_scale_2, cosine_scale_3, sigma, view_batch_size, stride, seed, image_input], outputs=[output_images], show_progress=True)
|
| 93 |
+
demo.queue().launch(inline=False, share=True, debug=True)
|
gradio_demo_controlnet_img2img.py
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from diffusers import ControlNetModel, AutoencoderKL
|
| 3 |
+
from pipeline_demofusion_sdxl import DemoFusionSDXLPipeline
|
| 4 |
+
from pipeline_demofusion_sdxl_controlnet import DemoFusionSDXLControlNetPipeline
|
| 5 |
+
from gradio_imageslider import ImageSlider
|
| 6 |
+
import torch, gc
|
| 7 |
+
from torchvision import transforms
|
| 8 |
+
from PIL import Image
|
| 9 |
+
import numpy as np
|
| 10 |
+
import cv2
|
| 11 |
+
|
| 12 |
+
def load_and_process_image(pil_image):
|
| 13 |
+
transform = transforms.Compose(
|
| 14 |
+
[
|
| 15 |
+
transforms.Resize((1024, 1024)),
|
| 16 |
+
transforms.ToTensor(),
|
| 17 |
+
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
|
| 18 |
+
]
|
| 19 |
+
)
|
| 20 |
+
image = transform(pil_image)
|
| 21 |
+
image = image.unsqueeze(0).half()
|
| 22 |
+
return image
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def pad_image(image):
|
| 26 |
+
w, h = image.size
|
| 27 |
+
if w == h:
|
| 28 |
+
return image
|
| 29 |
+
elif w > h:
|
| 30 |
+
new_image = Image.new(image.mode, (w, w), (0, 0, 0))
|
| 31 |
+
pad_w = 0
|
| 32 |
+
pad_h = (w - h) // 2
|
| 33 |
+
new_image.paste(image, (0, pad_h))
|
| 34 |
+
return new_image
|
| 35 |
+
else:
|
| 36 |
+
new_image = Image.new(image.mode, (h, h), (0, 0, 0))
|
| 37 |
+
pad_w = (h - w) // 2
|
| 38 |
+
pad_h = 0
|
| 39 |
+
new_image.paste(image, (pad_w, 0))
|
| 40 |
+
return new_image
|
| 41 |
+
|
| 42 |
+
def generate_images(prompt, negative_prompt, controlnet_conditioning_scale, height, width, num_inference_steps, guidance_scale, cosine_scale_1, cosine_scale_2, cosine_scale_3, sigma, view_batch_size, stride, seed, input_image):
|
| 43 |
+
padded_image = pad_image(input_image).resize((1024, 1024)).convert("RGB")
|
| 44 |
+
image_lr = load_and_process_image(padded_image).to('cuda')
|
| 45 |
+
controlnet = ControlNetModel.from_pretrained("diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16)
|
| 46 |
+
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
| 47 |
+
pipe = DemoFusionSDXLControlNetPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, vae=vae, torch_dtype=torch.float16)
|
| 48 |
+
pipe = pipe.to("cuda")
|
| 49 |
+
generator = torch.Generator(device='cuda')
|
| 50 |
+
generator = generator.manual_seed(int(seed))
|
| 51 |
+
# get canny image
|
| 52 |
+
canny_image = np.array(padded_image)
|
| 53 |
+
canny_image = cv2.Canny(canny_image, 100, 200)
|
| 54 |
+
canny_image = canny_image[:, :, None]
|
| 55 |
+
canny_image = np.concatenate([canny_image, canny_image, canny_image], axis=2)
|
| 56 |
+
canny_image = Image.fromarray(canny_image)
|
| 57 |
+
images = pipe(prompt, negative_prompt=negative_prompt, controlnet_conditioning_scale=controlnet_conditioning_scale,
|
| 58 |
+
image_lr=image_lr, condition_image=canny_image, generator=generator,
|
| 59 |
+
height=int(height), width=int(width), view_batch_size=int(view_batch_size), stride=int(stride),
|
| 60 |
+
num_inference_steps=int(num_inference_steps), guidance_scale=guidance_scale,
|
| 61 |
+
cosine_scale_1=cosine_scale_1, cosine_scale_2=cosine_scale_2, cosine_scale_3=cosine_scale_3, sigma=sigma,
|
| 62 |
+
multi_decoder=True, show_image=False, lowvram=False
|
| 63 |
+
)
|
| 64 |
+
for i, image in enumerate(images):
|
| 65 |
+
image.save('image_'+str(i)+'.png')
|
| 66 |
+
pipe = None
|
| 67 |
+
gc.collect()
|
| 68 |
+
torch.cuda.empty_cache()
|
| 69 |
+
return (images[0], images[-1])
|
| 70 |
+
|
| 71 |
+
with gr.Blocks(title=f"DemoFusion") as demo:
|
| 72 |
+
with gr.Column():
|
| 73 |
+
with gr.Row():
|
| 74 |
+
with gr.Group():
|
| 75 |
+
image_input = gr.Image(type="pil", label="Input Image")
|
| 76 |
+
prompt = gr.Textbox(label="Prompt (Note: an accurate prompt to describe the content and style of the input will significantly improve performance.)", value="8k high definition, high details")
|
| 77 |
+
negative_prompt = gr.Textbox(label="Negative Prompt", value="blurry, ugly, duplicate, poorly drawn, deformed, mosaic")
|
| 78 |
+
controlnet_conditioning_scale = gr.Slider(minimum=0, maximum=1, step=0.1, value=0.5, label="ControlNet Conditioning Scale")
|
| 79 |
+
width = gr.Slider(minimum=1024, maximum=4096, step=1024, value=2048, label="Width")
|
| 80 |
+
height = gr.Slider(minimum=1024, maximum=4096, step=1024, value=2048, label="Height")
|
| 81 |
+
num_inference_steps = gr.Slider(minimum=10, maximum=100, step=1, value=50, label="Num Inference Steps")
|
| 82 |
+
guidance_scale = gr.Slider(minimum=1, maximum=20, step=0.1, value=7.5, label="Guidance Scale")
|
| 83 |
+
cosine_scale_1 = gr.Slider(minimum=0, maximum=5, step=0.1, value=3, label="Cosine Scale 1")
|
| 84 |
+
cosine_scale_2 = gr.Slider(minimum=0, maximum=5, step=0.1, value=1, label="Cosine Scale 2")
|
| 85 |
+
cosine_scale_3 = gr.Slider(minimum=0, maximum=5, step=0.1, value=1, label="Cosine Scale 3")
|
| 86 |
+
sigma = gr.Slider(minimum=0.1, maximum=1, step=0.1, value=0.8, label="Sigma")
|
| 87 |
+
view_batch_size = gr.Slider(minimum=4, maximum=32, step=4, value=16, label="View Batch Size")
|
| 88 |
+
stride = gr.Slider(minimum=8, maximum=96, step=8, value=64, label="Stride")
|
| 89 |
+
seed = gr.Number(label="Seed", value=2013)
|
| 90 |
+
button = gr.Button()
|
| 91 |
+
output_images = ImageSlider(show_label=False)
|
| 92 |
+
button.click(fn=generate_images, inputs=[prompt, negative_prompt, controlnet_conditioning_scale, height, width, num_inference_steps, guidance_scale, cosine_scale_1, cosine_scale_2, cosine_scale_3, sigma, view_batch_size, stride, seed, image_input], outputs=[output_images], show_progress=True)
|
| 93 |
+
demo.queue().launch(inline=False, share=True, debug=True)
|
gradio_demo_img2img.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from diffusers import AutoencoderKL
|
| 3 |
+
from pipeline_demofusion_sdxl import DemoFusionSDXLPipeline
|
| 4 |
+
from gradio_imageslider import ImageSlider
|
| 5 |
+
import torch, gc
|
| 6 |
+
from torchvision import transforms
|
| 7 |
+
from PIL import Image
|
| 8 |
+
|
| 9 |
+
def load_and_process_image(pil_image):
|
| 10 |
+
transform = transforms.Compose(
|
| 11 |
+
[
|
| 12 |
+
transforms.Resize((1024, 1024)),
|
| 13 |
+
transforms.ToTensor(),
|
| 14 |
+
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
|
| 15 |
+
]
|
| 16 |
+
)
|
| 17 |
+
image = transform(pil_image)
|
| 18 |
+
image = image.unsqueeze(0).half()
|
| 19 |
+
return image
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def pad_image(image):
|
| 23 |
+
w, h = image.size
|
| 24 |
+
if w == h:
|
| 25 |
+
return image
|
| 26 |
+
elif w > h:
|
| 27 |
+
new_image = Image.new(image.mode, (w, w), (0, 0, 0))
|
| 28 |
+
pad_w = 0
|
| 29 |
+
pad_h = (w - h) // 2
|
| 30 |
+
new_image.paste(image, (0, pad_h))
|
| 31 |
+
return new_image
|
| 32 |
+
else:
|
| 33 |
+
new_image = Image.new(image.mode, (h, h), (0, 0, 0))
|
| 34 |
+
pad_w = (h - w) // 2
|
| 35 |
+
pad_h = 0
|
| 36 |
+
new_image.paste(image, (pad_w, 0))
|
| 37 |
+
return new_image
|
| 38 |
+
|
| 39 |
+
def generate_images(prompt, negative_prompt, height, width, num_inference_steps, guidance_scale, cosine_scale_1, cosine_scale_2, cosine_scale_3, sigma, view_batch_size, stride, seed, input_image):
|
| 40 |
+
padded_image = pad_image(input_image).resize((1024, 1024)).convert("RGB")
|
| 41 |
+
image_lr = load_and_process_image(padded_image).to('cuda')
|
| 42 |
+
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
| 43 |
+
pipe = DemoFusionSDXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", vae=vae, torch_dtype=torch.float16)
|
| 44 |
+
pipe = pipe.to("cuda")
|
| 45 |
+
generator = torch.Generator(device='cuda')
|
| 46 |
+
generator = generator.manual_seed(int(seed))
|
| 47 |
+
images = pipe(prompt, negative_prompt=negative_prompt, generator=generator,
|
| 48 |
+
height=int(height), width=int(width), view_batch_size=int(view_batch_size), stride=int(stride),
|
| 49 |
+
num_inference_steps=int(num_inference_steps), guidance_scale=guidance_scale,
|
| 50 |
+
cosine_scale_1=cosine_scale_1, cosine_scale_2=cosine_scale_2, cosine_scale_3=cosine_scale_3, sigma=sigma,
|
| 51 |
+
multi_decoder=True, show_image=False, lowvram=False, image_lr=image_lr
|
| 52 |
+
)
|
| 53 |
+
for i, image in enumerate(images):
|
| 54 |
+
image.save('image_'+str(i)+'.png')
|
| 55 |
+
pipe = None
|
| 56 |
+
gc.collect()
|
| 57 |
+
torch.cuda.empty_cache()
|
| 58 |
+
return (images[0], images[-1])
|
| 59 |
+
|
| 60 |
+
with gr.Blocks(title=f"DemoFusion") as demo:
|
| 61 |
+
with gr.Column():
|
| 62 |
+
with gr.Row():
|
| 63 |
+
with gr.Group():
|
| 64 |
+
image_input = gr.Image(type="pil", label="Input Image")
|
| 65 |
+
prompt = gr.Textbox(label="Prompt (Note: an accurate prompt to describe the content and style of the input will significantly improve performance.)", value="8k high definition, high details")
|
| 66 |
+
negative_prompt = gr.Textbox(label="Negative Prompt", value="blurry, ugly, duplicate, poorly drawn, deformed, mosaic")
|
| 67 |
+
width = gr.Slider(minimum=1024, maximum=4096, step=1024, value=2048, label="Width")
|
| 68 |
+
height = gr.Slider(minimum=1024, maximum=4096, step=1024, value=2048, label="Height")
|
| 69 |
+
num_inference_steps = gr.Slider(minimum=5, maximum=100, step=1, value=50, label="Num Inference Steps")
|
| 70 |
+
guidance_scale = gr.Slider(minimum=1, maximum=20, step=0.1, value=7.5, label="Guidance Scale")
|
| 71 |
+
cosine_scale_1 = gr.Slider(minimum=0, maximum=5, step=0.1, value=3, label="Cosine Scale 1")
|
| 72 |
+
cosine_scale_2 = gr.Slider(minimum=0, maximum=5, step=0.1, value=1, label="Cosine Scale 2")
|
| 73 |
+
cosine_scale_3 = gr.Slider(minimum=0, maximum=5, step=0.1, value=1, label="Cosine Scale 3")
|
| 74 |
+
sigma = gr.Slider(minimum=0.1, maximum=1, step=0.1, value=0.8, label="Sigma")
|
| 75 |
+
view_batch_size = gr.Slider(minimum=4, maximum=32, step=4, value=16, label="View Batch Size")
|
| 76 |
+
stride = gr.Slider(minimum=8, maximum=96, step=8, value=64, label="Stride")
|
| 77 |
+
seed = gr.Number(label="Seed", value=2013)
|
| 78 |
+
button = gr.Button()
|
| 79 |
+
output_images = ImageSlider(show_label=False)
|
| 80 |
+
button.click(fn=generate_images, inputs=[prompt, negative_prompt, height, width, num_inference_steps, guidance_scale, cosine_scale_1, cosine_scale_2, cosine_scale_3, sigma, view_batch_size, stride, seed, image_input], outputs=[output_images], show_progress=True)
|
| 81 |
+
demo.queue().launch(inline=False, share=True, debug=True)
|
output_example.png
ADDED
|
Git LFS Details
|
pipeline_demofusion_sdxl.py
ADDED
|
@@ -0,0 +1,1446 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import inspect
|
| 16 |
+
import os
|
| 17 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
| 18 |
+
import matplotlib.pyplot as plt
|
| 19 |
+
|
| 20 |
+
import torch
|
| 21 |
+
import torch.nn.functional as F
|
| 22 |
+
import numpy as np
|
| 23 |
+
import random
|
| 24 |
+
import warnings
|
| 25 |
+
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
|
| 26 |
+
|
| 27 |
+
from diffusers.image_processor import VaeImageProcessor
|
| 28 |
+
from diffusers.loaders import (
|
| 29 |
+
FromSingleFileMixin,
|
| 30 |
+
LoraLoaderMixin,
|
| 31 |
+
TextualInversionLoaderMixin,
|
| 32 |
+
)
|
| 33 |
+
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
| 34 |
+
from diffusers.models.attention_processor import (
|
| 35 |
+
AttnProcessor2_0,
|
| 36 |
+
LoRAAttnProcessor2_0,
|
| 37 |
+
LoRAXFormersAttnProcessor,
|
| 38 |
+
XFormersAttnProcessor,
|
| 39 |
+
)
|
| 40 |
+
from diffusers.models.lora import adjust_lora_scale_text_encoder
|
| 41 |
+
from diffusers.schedulers import KarrasDiffusionSchedulers
|
| 42 |
+
from diffusers.utils import (
|
| 43 |
+
is_accelerate_available,
|
| 44 |
+
is_accelerate_version,
|
| 45 |
+
is_invisible_watermark_available,
|
| 46 |
+
logging,
|
| 47 |
+
replace_example_docstring,
|
| 48 |
+
)
|
| 49 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 50 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
|
| 51 |
+
from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
if is_invisible_watermark_available():
|
| 55 |
+
from .watermark import StableDiffusionXLWatermarker
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 59 |
+
|
| 60 |
+
EXAMPLE_DOC_STRING = """
|
| 61 |
+
Examples:
|
| 62 |
+
```py
|
| 63 |
+
>>> import torch
|
| 64 |
+
>>> from diffusers import StableDiffusionXLPipeline
|
| 65 |
+
|
| 66 |
+
>>> pipe = StableDiffusionXLPipeline.from_pretrained(
|
| 67 |
+
... "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
|
| 68 |
+
... )
|
| 69 |
+
>>> pipe = pipe.to("cuda")
|
| 70 |
+
|
| 71 |
+
>>> prompt = "a photo of an astronaut riding a horse on mars"
|
| 72 |
+
>>> image = pipe(prompt).images[0]
|
| 73 |
+
```
|
| 74 |
+
"""
|
| 75 |
+
|
| 76 |
+
def gaussian_kernel(kernel_size=3, sigma=1.0, channels=3):
|
| 77 |
+
x_coord = torch.arange(kernel_size)
|
| 78 |
+
gaussian_1d = torch.exp(-(x_coord - (kernel_size - 1) / 2) ** 2 / (2 * sigma ** 2))
|
| 79 |
+
gaussian_1d = gaussian_1d / gaussian_1d.sum()
|
| 80 |
+
gaussian_2d = gaussian_1d[:, None] * gaussian_1d[None, :]
|
| 81 |
+
kernel = gaussian_2d[None, None, :, :].repeat(channels, 1, 1, 1)
|
| 82 |
+
|
| 83 |
+
return kernel
|
| 84 |
+
|
| 85 |
+
def gaussian_filter(latents, kernel_size=3, sigma=1.0):
|
| 86 |
+
channels = latents.shape[1]
|
| 87 |
+
kernel = gaussian_kernel(kernel_size, sigma, channels).to(latents.device, latents.dtype)
|
| 88 |
+
blurred_latents = F.conv2d(latents, kernel, padding=kernel_size//2, groups=channels)
|
| 89 |
+
|
| 90 |
+
return blurred_latents
|
| 91 |
+
|
| 92 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
|
| 93 |
+
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
|
| 94 |
+
"""
|
| 95 |
+
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
|
| 96 |
+
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
|
| 97 |
+
"""
|
| 98 |
+
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
|
| 99 |
+
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
|
| 100 |
+
# rescale the results from guidance (fixes overexposure)
|
| 101 |
+
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
|
| 102 |
+
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
|
| 103 |
+
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
|
| 104 |
+
return noise_cfg
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
class DemoFusionSDXLPipeline(DiffusionPipeline, FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin):
|
| 108 |
+
"""
|
| 109 |
+
Pipeline for text-to-image generation using Stable Diffusion XL.
|
| 110 |
+
|
| 111 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
| 112 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
| 113 |
+
|
| 114 |
+
In addition the pipeline inherits the following loading methods:
|
| 115 |
+
- *LoRA*: [`StableDiffusionXLPipeline.load_lora_weights`]
|
| 116 |
+
- *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`]
|
| 117 |
+
|
| 118 |
+
as well as the following saving methods:
|
| 119 |
+
- *LoRA*: [`loaders.StableDiffusionXLPipeline.save_lora_weights`]
|
| 120 |
+
|
| 121 |
+
Args:
|
| 122 |
+
vae ([`AutoencoderKL`]):
|
| 123 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
| 124 |
+
text_encoder ([`CLIPTextModel`]):
|
| 125 |
+
Frozen text-encoder. Stable Diffusion XL uses the text portion of
|
| 126 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
| 127 |
+
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
| 128 |
+
text_encoder_2 ([` CLIPTextModelWithProjection`]):
|
| 129 |
+
Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of
|
| 130 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
|
| 131 |
+
specifically the
|
| 132 |
+
[laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
|
| 133 |
+
variant.
|
| 134 |
+
tokenizer (`CLIPTokenizer`):
|
| 135 |
+
Tokenizer of class
|
| 136 |
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
| 137 |
+
tokenizer_2 (`CLIPTokenizer`):
|
| 138 |
+
Second Tokenizer of class
|
| 139 |
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
| 140 |
+
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
| 141 |
+
scheduler ([`SchedulerMixin`]):
|
| 142 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 143 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 144 |
+
force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`):
|
| 145 |
+
Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of
|
| 146 |
+
`stabilityai/stable-diffusion-xl-base-1-0`.
|
| 147 |
+
add_watermarker (`bool`, *optional*):
|
| 148 |
+
Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to
|
| 149 |
+
watermark output images. If not defined, it will default to True if the package is installed, otherwise no
|
| 150 |
+
watermarker will be used.
|
| 151 |
+
"""
|
| 152 |
+
model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae"
|
| 153 |
+
|
| 154 |
+
def __init__(
|
| 155 |
+
self,
|
| 156 |
+
vae: AutoencoderKL,
|
| 157 |
+
text_encoder: CLIPTextModel,
|
| 158 |
+
text_encoder_2: CLIPTextModelWithProjection,
|
| 159 |
+
tokenizer: CLIPTokenizer,
|
| 160 |
+
tokenizer_2: CLIPTokenizer,
|
| 161 |
+
unet: UNet2DConditionModel,
|
| 162 |
+
scheduler: KarrasDiffusionSchedulers,
|
| 163 |
+
force_zeros_for_empty_prompt: bool = True,
|
| 164 |
+
add_watermarker: Optional[bool] = None,
|
| 165 |
+
):
|
| 166 |
+
super().__init__()
|
| 167 |
+
|
| 168 |
+
self.register_modules(
|
| 169 |
+
vae=vae,
|
| 170 |
+
text_encoder=text_encoder,
|
| 171 |
+
text_encoder_2=text_encoder_2,
|
| 172 |
+
tokenizer=tokenizer,
|
| 173 |
+
tokenizer_2=tokenizer_2,
|
| 174 |
+
unet=unet,
|
| 175 |
+
scheduler=scheduler,
|
| 176 |
+
)
|
| 177 |
+
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
|
| 178 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 179 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 180 |
+
self.default_sample_size = self.unet.config.sample_size
|
| 181 |
+
|
| 182 |
+
add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
|
| 183 |
+
|
| 184 |
+
if add_watermarker:
|
| 185 |
+
self.watermark = StableDiffusionXLWatermarker()
|
| 186 |
+
else:
|
| 187 |
+
self.watermark = None
|
| 188 |
+
|
| 189 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
|
| 190 |
+
def enable_vae_slicing(self):
|
| 191 |
+
r"""
|
| 192 |
+
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
|
| 193 |
+
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
|
| 194 |
+
"""
|
| 195 |
+
self.vae.enable_slicing()
|
| 196 |
+
|
| 197 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
|
| 198 |
+
def disable_vae_slicing(self):
|
| 199 |
+
r"""
|
| 200 |
+
Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
|
| 201 |
+
computing decoding in one step.
|
| 202 |
+
"""
|
| 203 |
+
self.vae.disable_slicing()
|
| 204 |
+
|
| 205 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
|
| 206 |
+
def enable_vae_tiling(self):
|
| 207 |
+
r"""
|
| 208 |
+
Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
|
| 209 |
+
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
|
| 210 |
+
processing larger images.
|
| 211 |
+
"""
|
| 212 |
+
self.vae.enable_tiling()
|
| 213 |
+
|
| 214 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
|
| 215 |
+
def disable_vae_tiling(self):
|
| 216 |
+
r"""
|
| 217 |
+
Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
|
| 218 |
+
computing decoding in one step.
|
| 219 |
+
"""
|
| 220 |
+
self.vae.disable_tiling()
|
| 221 |
+
|
| 222 |
+
def encode_prompt(
|
| 223 |
+
self,
|
| 224 |
+
prompt: str,
|
| 225 |
+
prompt_2: Optional[str] = None,
|
| 226 |
+
device: Optional[torch.device] = None,
|
| 227 |
+
num_images_per_prompt: int = 1,
|
| 228 |
+
do_classifier_free_guidance: bool = True,
|
| 229 |
+
negative_prompt: Optional[str] = None,
|
| 230 |
+
negative_prompt_2: Optional[str] = None,
|
| 231 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 232 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 233 |
+
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 234 |
+
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 235 |
+
lora_scale: Optional[float] = None,
|
| 236 |
+
):
|
| 237 |
+
r"""
|
| 238 |
+
Encodes the prompt into text encoder hidden states.
|
| 239 |
+
|
| 240 |
+
Args:
|
| 241 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 242 |
+
prompt to be encoded
|
| 243 |
+
prompt_2 (`str` or `List[str]`, *optional*):
|
| 244 |
+
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
|
| 245 |
+
used in both text-encoders
|
| 246 |
+
device: (`torch.device`):
|
| 247 |
+
torch device
|
| 248 |
+
num_images_per_prompt (`int`):
|
| 249 |
+
number of images that should be generated per prompt
|
| 250 |
+
do_classifier_free_guidance (`bool`):
|
| 251 |
+
whether to use classifier free guidance or not
|
| 252 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 253 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 254 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 255 |
+
less than `1`).
|
| 256 |
+
negative_prompt_2 (`str` or `List[str]`, *optional*):
|
| 257 |
+
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
|
| 258 |
+
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
|
| 259 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 260 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 261 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 262 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 263 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 264 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 265 |
+
argument.
|
| 266 |
+
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 267 |
+
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
|
| 268 |
+
If not provided, pooled text embeddings will be generated from `prompt` input argument.
|
| 269 |
+
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 270 |
+
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 271 |
+
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
|
| 272 |
+
input argument.
|
| 273 |
+
lora_scale (`float`, *optional*):
|
| 274 |
+
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
| 275 |
+
"""
|
| 276 |
+
device = device or self._execution_device
|
| 277 |
+
|
| 278 |
+
# set lora scale so that monkey patched LoRA
|
| 279 |
+
# function of text encoder can correctly access it
|
| 280 |
+
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
|
| 281 |
+
self._lora_scale = lora_scale
|
| 282 |
+
|
| 283 |
+
# dynamically adjust the LoRA scale
|
| 284 |
+
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
|
| 285 |
+
adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)
|
| 286 |
+
|
| 287 |
+
if prompt is not None and isinstance(prompt, str):
|
| 288 |
+
batch_size = 1
|
| 289 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 290 |
+
batch_size = len(prompt)
|
| 291 |
+
else:
|
| 292 |
+
batch_size = prompt_embeds.shape[0]
|
| 293 |
+
|
| 294 |
+
# Define tokenizers and text encoders
|
| 295 |
+
tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
|
| 296 |
+
text_encoders = (
|
| 297 |
+
[self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
|
| 298 |
+
)
|
| 299 |
+
|
| 300 |
+
if prompt_embeds is None:
|
| 301 |
+
prompt_2 = prompt_2 or prompt
|
| 302 |
+
# textual inversion: procecss multi-vector tokens if necessary
|
| 303 |
+
prompt_embeds_list = []
|
| 304 |
+
prompts = [prompt, prompt_2]
|
| 305 |
+
for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
|
| 306 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 307 |
+
prompt = self.maybe_convert_prompt(prompt, tokenizer)
|
| 308 |
+
|
| 309 |
+
text_inputs = tokenizer(
|
| 310 |
+
prompt,
|
| 311 |
+
padding="max_length",
|
| 312 |
+
max_length=tokenizer.model_max_length,
|
| 313 |
+
truncation=True,
|
| 314 |
+
return_tensors="pt",
|
| 315 |
+
)
|
| 316 |
+
|
| 317 |
+
text_input_ids = text_inputs.input_ids
|
| 318 |
+
untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
| 319 |
+
|
| 320 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
| 321 |
+
text_input_ids, untruncated_ids
|
| 322 |
+
):
|
| 323 |
+
removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
|
| 324 |
+
logger.warning(
|
| 325 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 326 |
+
f" {tokenizer.model_max_length} tokens: {removed_text}"
|
| 327 |
+
)
|
| 328 |
+
|
| 329 |
+
prompt_embeds = text_encoder(
|
| 330 |
+
text_input_ids.to(device),
|
| 331 |
+
output_hidden_states=True,
|
| 332 |
+
)
|
| 333 |
+
|
| 334 |
+
# We are only ALWAYS interested in the pooled output of the final text encoder
|
| 335 |
+
pooled_prompt_embeds = prompt_embeds[0]
|
| 336 |
+
prompt_embeds = prompt_embeds.hidden_states[-2]
|
| 337 |
+
|
| 338 |
+
prompt_embeds_list.append(prompt_embeds)
|
| 339 |
+
|
| 340 |
+
prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
|
| 341 |
+
|
| 342 |
+
# get unconditional embeddings for classifier free guidance
|
| 343 |
+
zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
|
| 344 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
|
| 345 |
+
negative_prompt_embeds = torch.zeros_like(prompt_embeds)
|
| 346 |
+
negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
|
| 347 |
+
elif do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 348 |
+
negative_prompt = negative_prompt or ""
|
| 349 |
+
negative_prompt_2 = negative_prompt_2 or negative_prompt
|
| 350 |
+
|
| 351 |
+
uncond_tokens: List[str]
|
| 352 |
+
if prompt is not None and type(prompt) is not type(negative_prompt):
|
| 353 |
+
raise TypeError(
|
| 354 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 355 |
+
f" {type(prompt)}."
|
| 356 |
+
)
|
| 357 |
+
elif isinstance(negative_prompt, str):
|
| 358 |
+
uncond_tokens = [negative_prompt, negative_prompt_2]
|
| 359 |
+
elif batch_size != len(negative_prompt):
|
| 360 |
+
raise ValueError(
|
| 361 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 362 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 363 |
+
" the batch size of `prompt`."
|
| 364 |
+
)
|
| 365 |
+
else:
|
| 366 |
+
uncond_tokens = [negative_prompt, negative_prompt_2]
|
| 367 |
+
|
| 368 |
+
negative_prompt_embeds_list = []
|
| 369 |
+
for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
|
| 370 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 371 |
+
negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
|
| 372 |
+
|
| 373 |
+
max_length = prompt_embeds.shape[1]
|
| 374 |
+
uncond_input = tokenizer(
|
| 375 |
+
negative_prompt,
|
| 376 |
+
padding="max_length",
|
| 377 |
+
max_length=max_length,
|
| 378 |
+
truncation=True,
|
| 379 |
+
return_tensors="pt",
|
| 380 |
+
)
|
| 381 |
+
|
| 382 |
+
negative_prompt_embeds = text_encoder(
|
| 383 |
+
uncond_input.input_ids.to(device),
|
| 384 |
+
output_hidden_states=True,
|
| 385 |
+
)
|
| 386 |
+
# We are only ALWAYS interested in the pooled output of the final text encoder
|
| 387 |
+
negative_pooled_prompt_embeds = negative_prompt_embeds[0]
|
| 388 |
+
negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
|
| 389 |
+
|
| 390 |
+
negative_prompt_embeds_list.append(negative_prompt_embeds)
|
| 391 |
+
|
| 392 |
+
negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
|
| 393 |
+
|
| 394 |
+
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
|
| 395 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 396 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 397 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 398 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 399 |
+
|
| 400 |
+
if do_classifier_free_guidance:
|
| 401 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 402 |
+
seq_len = negative_prompt_embeds.shape[1]
|
| 403 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
|
| 404 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 405 |
+
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 406 |
+
|
| 407 |
+
pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
|
| 408 |
+
bs_embed * num_images_per_prompt, -1
|
| 409 |
+
)
|
| 410 |
+
if do_classifier_free_guidance:
|
| 411 |
+
negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
|
| 412 |
+
bs_embed * num_images_per_prompt, -1
|
| 413 |
+
)
|
| 414 |
+
|
| 415 |
+
return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
|
| 416 |
+
|
| 417 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
| 418 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
| 419 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 420 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 421 |
+
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
| 422 |
+
# and should be between [0, 1]
|
| 423 |
+
|
| 424 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 425 |
+
extra_step_kwargs = {}
|
| 426 |
+
if accepts_eta:
|
| 427 |
+
extra_step_kwargs["eta"] = eta
|
| 428 |
+
|
| 429 |
+
# check if the scheduler accepts generator
|
| 430 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 431 |
+
if accepts_generator:
|
| 432 |
+
extra_step_kwargs["generator"] = generator
|
| 433 |
+
return extra_step_kwargs
|
| 434 |
+
|
| 435 |
+
def check_inputs(
|
| 436 |
+
self,
|
| 437 |
+
prompt,
|
| 438 |
+
prompt_2,
|
| 439 |
+
height,
|
| 440 |
+
width,
|
| 441 |
+
callback_steps,
|
| 442 |
+
negative_prompt=None,
|
| 443 |
+
negative_prompt_2=None,
|
| 444 |
+
prompt_embeds=None,
|
| 445 |
+
negative_prompt_embeds=None,
|
| 446 |
+
pooled_prompt_embeds=None,
|
| 447 |
+
negative_pooled_prompt_embeds=None,
|
| 448 |
+
num_images_per_prompt=None,
|
| 449 |
+
):
|
| 450 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 451 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 452 |
+
|
| 453 |
+
if (callback_steps is None) or (
|
| 454 |
+
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
| 455 |
+
):
|
| 456 |
+
raise ValueError(
|
| 457 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 458 |
+
f" {type(callback_steps)}."
|
| 459 |
+
)
|
| 460 |
+
|
| 461 |
+
if prompt is not None and prompt_embeds is not None:
|
| 462 |
+
raise ValueError(
|
| 463 |
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 464 |
+
" only forward one of the two."
|
| 465 |
+
)
|
| 466 |
+
elif prompt_2 is not None and prompt_embeds is not None:
|
| 467 |
+
raise ValueError(
|
| 468 |
+
f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 469 |
+
" only forward one of the two."
|
| 470 |
+
)
|
| 471 |
+
elif prompt is None and prompt_embeds is None:
|
| 472 |
+
raise ValueError(
|
| 473 |
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 474 |
+
)
|
| 475 |
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 476 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 477 |
+
elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
|
| 478 |
+
raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
|
| 479 |
+
|
| 480 |
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
| 481 |
+
raise ValueError(
|
| 482 |
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
| 483 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 484 |
+
)
|
| 485 |
+
elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
|
| 486 |
+
raise ValueError(
|
| 487 |
+
f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
|
| 488 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 489 |
+
)
|
| 490 |
+
|
| 491 |
+
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
| 492 |
+
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
| 493 |
+
raise ValueError(
|
| 494 |
+
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
| 495 |
+
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
| 496 |
+
f" {negative_prompt_embeds.shape}."
|
| 497 |
+
)
|
| 498 |
+
|
| 499 |
+
if prompt_embeds is not None and pooled_prompt_embeds is None:
|
| 500 |
+
raise ValueError(
|
| 501 |
+
"If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
|
| 502 |
+
)
|
| 503 |
+
|
| 504 |
+
if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
|
| 505 |
+
raise ValueError(
|
| 506 |
+
"If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
|
| 507 |
+
)
|
| 508 |
+
|
| 509 |
+
# DemoFusion specific checks
|
| 510 |
+
if max(height, width) % 1024 != 0:
|
| 511 |
+
raise ValueError(f"the larger one of `height` and `width` has to be divisible by 1024 but are {height} and {width}.")
|
| 512 |
+
|
| 513 |
+
if num_images_per_prompt != 1:
|
| 514 |
+
warnings.warn("num_images_per_prompt != 1 is not supported by DemoFusion and will be ignored.")
|
| 515 |
+
num_images_per_prompt = 1
|
| 516 |
+
|
| 517 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
|
| 518 |
+
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
|
| 519 |
+
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
|
| 520 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 521 |
+
raise ValueError(
|
| 522 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 523 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 524 |
+
)
|
| 525 |
+
|
| 526 |
+
if latents is None:
|
| 527 |
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 528 |
+
else:
|
| 529 |
+
latents = latents.to(device)
|
| 530 |
+
|
| 531 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 532 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 533 |
+
return latents
|
| 534 |
+
|
| 535 |
+
def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype):
|
| 536 |
+
add_time_ids = list(original_size + crops_coords_top_left + target_size)
|
| 537 |
+
|
| 538 |
+
passed_add_embed_dim = (
|
| 539 |
+
self.unet.config.addition_time_embed_dim * len(add_time_ids) + self.text_encoder_2.config.projection_dim
|
| 540 |
+
)
|
| 541 |
+
expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
|
| 542 |
+
|
| 543 |
+
if expected_add_embed_dim != passed_add_embed_dim:
|
| 544 |
+
raise ValueError(
|
| 545 |
+
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
|
| 546 |
+
)
|
| 547 |
+
|
| 548 |
+
add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
|
| 549 |
+
return add_time_ids
|
| 550 |
+
|
| 551 |
+
def get_views(self, height, width, window_size=128, stride=64, random_jitter=False):
|
| 552 |
+
# Here, we define the mappings F_i (see Eq. 7 in the MultiDiffusion paper https://arxiv.org/abs/2302.08113)
|
| 553 |
+
# if panorama's height/width < window_size, num_blocks of height/width should return 1
|
| 554 |
+
height //= self.vae_scale_factor
|
| 555 |
+
width //= self.vae_scale_factor
|
| 556 |
+
num_blocks_height = int((height - window_size) / stride - 1e-6) + 2 if height > window_size else 1
|
| 557 |
+
num_blocks_width = int((width - window_size) / stride - 1e-6) + 2 if width > window_size else 1
|
| 558 |
+
total_num_blocks = int(num_blocks_height * num_blocks_width)
|
| 559 |
+
views = []
|
| 560 |
+
for i in range(total_num_blocks):
|
| 561 |
+
h_start = int((i // num_blocks_width) * stride)
|
| 562 |
+
h_end = h_start + window_size
|
| 563 |
+
w_start = int((i % num_blocks_width) * stride)
|
| 564 |
+
w_end = w_start + window_size
|
| 565 |
+
|
| 566 |
+
if h_end > height:
|
| 567 |
+
h_start = int(h_start + height - h_end)
|
| 568 |
+
h_end = int(height)
|
| 569 |
+
if w_end > width:
|
| 570 |
+
w_start = int(w_start + width - w_end)
|
| 571 |
+
w_end = int(width)
|
| 572 |
+
if h_start < 0:
|
| 573 |
+
h_end = int(h_end - h_start)
|
| 574 |
+
h_start = 0
|
| 575 |
+
if w_start < 0:
|
| 576 |
+
w_end = int(w_end - w_start)
|
| 577 |
+
w_start = 0
|
| 578 |
+
|
| 579 |
+
if random_jitter:
|
| 580 |
+
jitter_range = (window_size - stride) // 4
|
| 581 |
+
w_jitter = 0
|
| 582 |
+
h_jitter = 0
|
| 583 |
+
if (w_start != 0) and (w_end != width):
|
| 584 |
+
w_jitter = random.randint(-jitter_range, jitter_range)
|
| 585 |
+
elif (w_start == 0) and (w_end != width):
|
| 586 |
+
w_jitter = random.randint(-jitter_range, 0)
|
| 587 |
+
elif (w_start != 0) and (w_end == width):
|
| 588 |
+
w_jitter = random.randint(0, jitter_range)
|
| 589 |
+
if (h_start != 0) and (h_end != height):
|
| 590 |
+
h_jitter = random.randint(-jitter_range, jitter_range)
|
| 591 |
+
elif (h_start == 0) and (h_end != height):
|
| 592 |
+
h_jitter = random.randint(-jitter_range, 0)
|
| 593 |
+
elif (h_start != 0) and (h_end == height):
|
| 594 |
+
h_jitter = random.randint(0, jitter_range)
|
| 595 |
+
h_start += (h_jitter + jitter_range)
|
| 596 |
+
h_end += (h_jitter + jitter_range)
|
| 597 |
+
w_start += (w_jitter + jitter_range)
|
| 598 |
+
w_end += (w_jitter + jitter_range)
|
| 599 |
+
|
| 600 |
+
views.append((h_start, h_end, w_start, w_end))
|
| 601 |
+
return views
|
| 602 |
+
|
| 603 |
+
def tiled_decode(self, latents, current_height, current_width):
|
| 604 |
+
sample_size = self.unet.config.sample_size
|
| 605 |
+
core_size = self.unet.config.sample_size // 4
|
| 606 |
+
core_stride = core_size
|
| 607 |
+
pad_size = self.unet.config.sample_size // 8 * 3
|
| 608 |
+
decoder_view_batch_size = 1
|
| 609 |
+
|
| 610 |
+
if self.lowvram:
|
| 611 |
+
core_stride = core_size // 2
|
| 612 |
+
pad_size = core_size
|
| 613 |
+
|
| 614 |
+
views = self.get_views(current_height, current_width, stride=core_stride, window_size=core_size)
|
| 615 |
+
views_batch = [views[i : i + decoder_view_batch_size] for i in range(0, len(views), decoder_view_batch_size)]
|
| 616 |
+
latents_ = F.pad(latents, (pad_size, pad_size, pad_size, pad_size), 'constant', 0)
|
| 617 |
+
image = torch.zeros(latents.size(0), 3, current_height, current_width).to(latents.device)
|
| 618 |
+
count = torch.zeros_like(image).to(latents.device)
|
| 619 |
+
# get the latents corresponding to the current view coordinates
|
| 620 |
+
with self.progress_bar(total=len(views_batch)) as progress_bar:
|
| 621 |
+
for j, batch_view in enumerate(views_batch):
|
| 622 |
+
vb_size = len(batch_view)
|
| 623 |
+
latents_for_view = torch.cat(
|
| 624 |
+
[
|
| 625 |
+
latents_[:, :, h_start:h_end+pad_size*2, w_start:w_end+pad_size*2]
|
| 626 |
+
for h_start, h_end, w_start, w_end in batch_view
|
| 627 |
+
]
|
| 628 |
+
).to(self.vae.device)
|
| 629 |
+
image_patch = self.vae.decode(latents_for_view / self.vae.config.scaling_factor, return_dict=False)[0]
|
| 630 |
+
h_start, h_end, w_start, w_end = views[j]
|
| 631 |
+
h_start, h_end, w_start, w_end = h_start * self.vae_scale_factor, h_end * self.vae_scale_factor, w_start * self.vae_scale_factor, w_end * self.vae_scale_factor
|
| 632 |
+
p_h_start, p_h_end, p_w_start, p_w_end = pad_size * self.vae_scale_factor, image_patch.size(2) - pad_size * self.vae_scale_factor, pad_size * self.vae_scale_factor, image_patch.size(3) - pad_size * self.vae_scale_factor
|
| 633 |
+
image[:, :, h_start:h_end, w_start:w_end] += image_patch[:, :, p_h_start:p_h_end, p_w_start:p_w_end].to(latents.device)
|
| 634 |
+
count[:, :, h_start:h_end, w_start:w_end] += 1
|
| 635 |
+
progress_bar.update()
|
| 636 |
+
image = image / count
|
| 637 |
+
|
| 638 |
+
return image
|
| 639 |
+
|
| 640 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
|
| 641 |
+
def upcast_vae(self):
|
| 642 |
+
dtype = self.vae.dtype
|
| 643 |
+
self.vae.to(dtype=torch.float32)
|
| 644 |
+
use_torch_2_0_or_xformers = isinstance(
|
| 645 |
+
self.vae.decoder.mid_block.attentions[0].processor,
|
| 646 |
+
(
|
| 647 |
+
AttnProcessor2_0,
|
| 648 |
+
XFormersAttnProcessor,
|
| 649 |
+
LoRAXFormersAttnProcessor,
|
| 650 |
+
LoRAAttnProcessor2_0,
|
| 651 |
+
),
|
| 652 |
+
)
|
| 653 |
+
# if xformers or torch_2_0 is used attention block does not need
|
| 654 |
+
# to be in float32 which can save lots of memory
|
| 655 |
+
if use_torch_2_0_or_xformers:
|
| 656 |
+
self.vae.post_quant_conv.to(dtype)
|
| 657 |
+
self.vae.decoder.conv_in.to(dtype)
|
| 658 |
+
self.vae.decoder.mid_block.to(dtype)
|
| 659 |
+
|
| 660 |
+
@torch.no_grad()
|
| 661 |
+
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
| 662 |
+
def __call__(
|
| 663 |
+
self,
|
| 664 |
+
prompt: Union[str, List[str]] = None,
|
| 665 |
+
prompt_2: Optional[Union[str, List[str]]] = None,
|
| 666 |
+
height: Optional[int] = None,
|
| 667 |
+
width: Optional[int] = None,
|
| 668 |
+
num_inference_steps: int = 50,
|
| 669 |
+
denoising_end: Optional[float] = None,
|
| 670 |
+
guidance_scale: float = 5.0,
|
| 671 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 672 |
+
negative_prompt_2: Optional[Union[str, List[str]]] = None,
|
| 673 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 674 |
+
eta: float = 0.0,
|
| 675 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 676 |
+
latents: Optional[torch.FloatTensor] = None,
|
| 677 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 678 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 679 |
+
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 680 |
+
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 681 |
+
output_type: Optional[str] = "pil",
|
| 682 |
+
return_dict: bool = False,
|
| 683 |
+
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
| 684 |
+
callback_steps: int = 1,
|
| 685 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 686 |
+
guidance_rescale: float = 0.0,
|
| 687 |
+
original_size: Optional[Tuple[int, int]] = None,
|
| 688 |
+
crops_coords_top_left: Tuple[int, int] = (0, 0),
|
| 689 |
+
target_size: Optional[Tuple[int, int]] = None,
|
| 690 |
+
negative_original_size: Optional[Tuple[int, int]] = None,
|
| 691 |
+
negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
|
| 692 |
+
negative_target_size: Optional[Tuple[int, int]] = None,
|
| 693 |
+
################### DemoFusion specific parameters ####################
|
| 694 |
+
image_lr: Optional[torch.FloatTensor] = None,
|
| 695 |
+
view_batch_size: int = 16,
|
| 696 |
+
multi_decoder: bool = True,
|
| 697 |
+
stride: Optional[int] = 64,
|
| 698 |
+
cosine_scale_1: Optional[float] = 3.,
|
| 699 |
+
cosine_scale_2: Optional[float] = 1.,
|
| 700 |
+
cosine_scale_3: Optional[float] = 1.,
|
| 701 |
+
sigma: Optional[float] = 1.0,
|
| 702 |
+
show_image: bool = False,
|
| 703 |
+
lowvram: bool = False,
|
| 704 |
+
):
|
| 705 |
+
r"""
|
| 706 |
+
Function invoked when calling the pipeline for generation.
|
| 707 |
+
|
| 708 |
+
Args:
|
| 709 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 710 |
+
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
|
| 711 |
+
instead.
|
| 712 |
+
prompt_2 (`str` or `List[str]`, *optional*):
|
| 713 |
+
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
|
| 714 |
+
used in both text-encoders
|
| 715 |
+
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
| 716 |
+
The height in pixels of the generated image. This is set to 1024 by default for the best results.
|
| 717 |
+
Anything below 512 pixels won't work well for
|
| 718 |
+
[stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
|
| 719 |
+
and checkpoints that are not specifically fine-tuned on low resolutions.
|
| 720 |
+
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
| 721 |
+
The width in pixels of the generated image. This is set to 1024 by default for the best results.
|
| 722 |
+
Anything below 512 pixels won't work well for
|
| 723 |
+
[stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
|
| 724 |
+
and checkpoints that are not specifically fine-tuned on low resolutions.
|
| 725 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 726 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 727 |
+
expense of slower inference.
|
| 728 |
+
denoising_end (`float`, *optional*):
|
| 729 |
+
When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
|
| 730 |
+
completed before it is intentionally prematurely terminated. As a result, the returned sample will
|
| 731 |
+
still retain a substantial amount of noise as determined by the discrete timesteps selected by the
|
| 732 |
+
scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a
|
| 733 |
+
"Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
|
| 734 |
+
Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output)
|
| 735 |
+
guidance_scale (`float`, *optional*, defaults to 5.0):
|
| 736 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
| 737 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 738 |
+
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
| 739 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 740 |
+
usually at the expense of lower image quality.
|
| 741 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 742 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 743 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 744 |
+
less than `1`).
|
| 745 |
+
negative_prompt_2 (`str` or `List[str]`, *optional*):
|
| 746 |
+
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
|
| 747 |
+
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
|
| 748 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 749 |
+
The number of images to generate per prompt.
|
| 750 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 751 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
| 752 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 753 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 754 |
+
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
| 755 |
+
to make generation deterministic.
|
| 756 |
+
latents (`torch.FloatTensor`, *optional*):
|
| 757 |
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
| 758 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 759 |
+
tensor will ge generated by sampling using the supplied random `generator`.
|
| 760 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 761 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 762 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 763 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 764 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 765 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 766 |
+
argument.
|
| 767 |
+
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 768 |
+
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
|
| 769 |
+
If not provided, pooled text embeddings will be generated from `prompt` input argument.
|
| 770 |
+
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 771 |
+
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 772 |
+
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
|
| 773 |
+
input argument.
|
| 774 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 775 |
+
The output format of the generate image. Choose between
|
| 776 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 777 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 778 |
+
Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead
|
| 779 |
+
of a plain tuple.
|
| 780 |
+
callback (`Callable`, *optional*):
|
| 781 |
+
A function that will be called every `callback_steps` steps during inference. The function will be
|
| 782 |
+
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
| 783 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 784 |
+
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
| 785 |
+
called at every step.
|
| 786 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 787 |
+
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
| 788 |
+
`self.processor` in
|
| 789 |
+
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 790 |
+
guidance_rescale (`float`, *optional*, defaults to 0.7):
|
| 791 |
+
Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
|
| 792 |
+
Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
|
| 793 |
+
[Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
|
| 794 |
+
Guidance rescale factor should fix overexposure when using zero terminal SNR.
|
| 795 |
+
original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 796 |
+
If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
|
| 797 |
+
`original_size` defaults to `(width, height)` if not specified. Part of SDXL's micro-conditioning as
|
| 798 |
+
explained in section 2.2 of
|
| 799 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 800 |
+
crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
|
| 801 |
+
`crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
|
| 802 |
+
`crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
|
| 803 |
+
`crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
|
| 804 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 805 |
+
target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 806 |
+
For most cases, `target_size` should be set to the desired height and width of the generated image. If
|
| 807 |
+
not specified it will default to `(width, height)`. Part of SDXL's micro-conditioning as explained in
|
| 808 |
+
section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 809 |
+
negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 810 |
+
To negatively condition the generation process based on a specific image resolution. Part of SDXL's
|
| 811 |
+
micro-conditioning as explained in section 2.2 of
|
| 812 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
|
| 813 |
+
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
|
| 814 |
+
negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
|
| 815 |
+
To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
|
| 816 |
+
micro-conditioning as explained in section 2.2 of
|
| 817 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
|
| 818 |
+
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
|
| 819 |
+
negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 820 |
+
To negatively condition the generation process based on a target image resolution. It should be as same
|
| 821 |
+
as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
|
| 822 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
|
| 823 |
+
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
|
| 824 |
+
################### DemoFusion specific parameters ####################
|
| 825 |
+
image_lr (`torch.FloatTensor`, *optional*, , defaults to None):
|
| 826 |
+
Low-resolution image input for upscaling. If provided, DemoFusion will encode it as the initial latent representation.
|
| 827 |
+
view_batch_size (`int`, defaults to 16):
|
| 828 |
+
The batch size for multiple denoising paths. Typically, a larger batch size can result in higher
|
| 829 |
+
efficiency but comes with increased GPU memory requirements.
|
| 830 |
+
multi_decoder (`bool`, defaults to True):
|
| 831 |
+
Determine whether to use a tiled decoder. Generally, when the resolution exceeds 3072x3072,
|
| 832 |
+
a tiled decoder becomes necessary.
|
| 833 |
+
stride (`int`, defaults to 64):
|
| 834 |
+
The stride of moving local patches. A smaller stride is better for alleviating seam issues,
|
| 835 |
+
but it also introduces additional computational overhead and inference time.
|
| 836 |
+
cosine_scale_1 (`float`, defaults to 3):
|
| 837 |
+
Control the strength of skip-residual. For specific impacts, please refer to Appendix C
|
| 838 |
+
in the DemoFusion paper.
|
| 839 |
+
cosine_scale_2 (`float`, defaults to 1):
|
| 840 |
+
Control the strength of dilated sampling. For specific impacts, please refer to Appendix C
|
| 841 |
+
in the DemoFusion paper.
|
| 842 |
+
cosine_scale_3 (`float`, defaults to 1):
|
| 843 |
+
Control the strength of the gaussion filter. For specific impacts, please refer to Appendix C
|
| 844 |
+
in the DemoFusion paper.
|
| 845 |
+
sigma (`float`, defaults to 1):
|
| 846 |
+
The standard value of the gaussian filter.
|
| 847 |
+
show_image (`bool`, defaults to False):
|
| 848 |
+
Determine whether to show intermediate results during generation.
|
| 849 |
+
lowvram (`bool`, defaults to False):
|
| 850 |
+
Try to fit in 8 Gb of VRAM, with xformers installed.
|
| 851 |
+
|
| 852 |
+
Examples:
|
| 853 |
+
|
| 854 |
+
Returns:
|
| 855 |
+
a `list` with the generated images at each phase.
|
| 856 |
+
"""
|
| 857 |
+
|
| 858 |
+
# 0. Default height and width to unet
|
| 859 |
+
height = height or self.default_sample_size * self.vae_scale_factor
|
| 860 |
+
width = width or self.default_sample_size * self.vae_scale_factor
|
| 861 |
+
|
| 862 |
+
x1_size = self.default_sample_size * self.vae_scale_factor
|
| 863 |
+
|
| 864 |
+
height_scale = height / x1_size
|
| 865 |
+
width_scale = width / x1_size
|
| 866 |
+
scale_num = int(max(height_scale, width_scale))
|
| 867 |
+
aspect_ratio = min(height_scale, width_scale) / max(height_scale, width_scale)
|
| 868 |
+
|
| 869 |
+
original_size = original_size or (height, width)
|
| 870 |
+
target_size = target_size or (height, width)
|
| 871 |
+
|
| 872 |
+
# 1. Check inputs. Raise error if not correct
|
| 873 |
+
self.check_inputs(
|
| 874 |
+
prompt,
|
| 875 |
+
prompt_2,
|
| 876 |
+
height,
|
| 877 |
+
width,
|
| 878 |
+
callback_steps,
|
| 879 |
+
negative_prompt,
|
| 880 |
+
negative_prompt_2,
|
| 881 |
+
prompt_embeds,
|
| 882 |
+
negative_prompt_embeds,
|
| 883 |
+
pooled_prompt_embeds,
|
| 884 |
+
negative_pooled_prompt_embeds,
|
| 885 |
+
num_images_per_prompt,
|
| 886 |
+
)
|
| 887 |
+
|
| 888 |
+
# 2. Define call parameters
|
| 889 |
+
if prompt is not None and isinstance(prompt, str):
|
| 890 |
+
batch_size = 1
|
| 891 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 892 |
+
batch_size = len(prompt)
|
| 893 |
+
else:
|
| 894 |
+
batch_size = prompt_embeds.shape[0]
|
| 895 |
+
|
| 896 |
+
device = self._execution_device
|
| 897 |
+
self.lowvram = lowvram
|
| 898 |
+
if self.lowvram:
|
| 899 |
+
self.vae.cpu()
|
| 900 |
+
self.unet.cpu()
|
| 901 |
+
self.text_encoder.to(device)
|
| 902 |
+
self.text_encoder_2.to(device)
|
| 903 |
+
image_lr.cpu()
|
| 904 |
+
|
| 905 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 906 |
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
| 907 |
+
# corresponds to doing no classifier free guidance.
|
| 908 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 909 |
+
|
| 910 |
+
# 3. Encode input prompt
|
| 911 |
+
text_encoder_lora_scale = (
|
| 912 |
+
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
|
| 913 |
+
)
|
| 914 |
+
(
|
| 915 |
+
prompt_embeds,
|
| 916 |
+
negative_prompt_embeds,
|
| 917 |
+
pooled_prompt_embeds,
|
| 918 |
+
negative_pooled_prompt_embeds,
|
| 919 |
+
) = self.encode_prompt(
|
| 920 |
+
prompt=prompt,
|
| 921 |
+
prompt_2=prompt_2,
|
| 922 |
+
device=device,
|
| 923 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 924 |
+
do_classifier_free_guidance=do_classifier_free_guidance,
|
| 925 |
+
negative_prompt=negative_prompt,
|
| 926 |
+
negative_prompt_2=negative_prompt_2,
|
| 927 |
+
prompt_embeds=prompt_embeds,
|
| 928 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 929 |
+
pooled_prompt_embeds=pooled_prompt_embeds,
|
| 930 |
+
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
|
| 931 |
+
lora_scale=text_encoder_lora_scale,
|
| 932 |
+
)
|
| 933 |
+
|
| 934 |
+
# 4. Prepare timesteps
|
| 935 |
+
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
| 936 |
+
|
| 937 |
+
timesteps = self.scheduler.timesteps
|
| 938 |
+
|
| 939 |
+
# 5. Prepare latent variables
|
| 940 |
+
num_channels_latents = self.unet.config.in_channels
|
| 941 |
+
latents = self.prepare_latents(
|
| 942 |
+
batch_size * num_images_per_prompt,
|
| 943 |
+
num_channels_latents,
|
| 944 |
+
height // scale_num,
|
| 945 |
+
width // scale_num,
|
| 946 |
+
prompt_embeds.dtype,
|
| 947 |
+
device,
|
| 948 |
+
generator,
|
| 949 |
+
latents,
|
| 950 |
+
)
|
| 951 |
+
|
| 952 |
+
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 953 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 954 |
+
|
| 955 |
+
# 7. Prepare added time ids & embeddings
|
| 956 |
+
add_text_embeds = pooled_prompt_embeds
|
| 957 |
+
add_time_ids = self._get_add_time_ids(
|
| 958 |
+
original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype
|
| 959 |
+
)
|
| 960 |
+
if negative_original_size is not None and negative_target_size is not None:
|
| 961 |
+
negative_add_time_ids = self._get_add_time_ids(
|
| 962 |
+
negative_original_size,
|
| 963 |
+
negative_crops_coords_top_left,
|
| 964 |
+
negative_target_size,
|
| 965 |
+
dtype=prompt_embeds.dtype,
|
| 966 |
+
)
|
| 967 |
+
else:
|
| 968 |
+
negative_add_time_ids = add_time_ids
|
| 969 |
+
|
| 970 |
+
if do_classifier_free_guidance:
|
| 971 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
|
| 972 |
+
add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
|
| 973 |
+
add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0)
|
| 974 |
+
del negative_prompt_embeds, negative_pooled_prompt_embeds, negative_add_time_ids
|
| 975 |
+
|
| 976 |
+
prompt_embeds = prompt_embeds.to(device)
|
| 977 |
+
add_text_embeds = add_text_embeds.to(device)
|
| 978 |
+
add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
|
| 979 |
+
|
| 980 |
+
# 8. Denoising loop
|
| 981 |
+
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
|
| 982 |
+
|
| 983 |
+
# 7.1 Apply denoising_end
|
| 984 |
+
if denoising_end is not None and isinstance(denoising_end, float) and denoising_end > 0 and denoising_end < 1:
|
| 985 |
+
discrete_timestep_cutoff = int(
|
| 986 |
+
round(
|
| 987 |
+
self.scheduler.config.num_train_timesteps
|
| 988 |
+
- (denoising_end * self.scheduler.config.num_train_timesteps)
|
| 989 |
+
)
|
| 990 |
+
)
|
| 991 |
+
num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
|
| 992 |
+
timesteps = timesteps[:num_inference_steps]
|
| 993 |
+
|
| 994 |
+
output_images = []
|
| 995 |
+
|
| 996 |
+
###################################################### Phase Initialization ########################################################
|
| 997 |
+
|
| 998 |
+
if self.lowvram:
|
| 999 |
+
self.text_encoder.cpu()
|
| 1000 |
+
self.text_encoder_2.cpu()
|
| 1001 |
+
|
| 1002 |
+
if image_lr == None:
|
| 1003 |
+
print("### Phase 1 Denoising ###")
|
| 1004 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 1005 |
+
for i, t in enumerate(timesteps):
|
| 1006 |
+
|
| 1007 |
+
if self.lowvram:
|
| 1008 |
+
self.vae.cpu()
|
| 1009 |
+
self.unet.to(device)
|
| 1010 |
+
|
| 1011 |
+
latents_for_view = latents
|
| 1012 |
+
|
| 1013 |
+
# expand the latents if we are doing classifier free guidance
|
| 1014 |
+
latent_model_input = (
|
| 1015 |
+
latents.repeat_interleave(2, dim=0)
|
| 1016 |
+
if do_classifier_free_guidance
|
| 1017 |
+
else latents
|
| 1018 |
+
)
|
| 1019 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 1020 |
+
|
| 1021 |
+
# predict the noise residual
|
| 1022 |
+
added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
|
| 1023 |
+
noise_pred = self.unet(
|
| 1024 |
+
latent_model_input,
|
| 1025 |
+
t,
|
| 1026 |
+
encoder_hidden_states=prompt_embeds,
|
| 1027 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 1028 |
+
added_cond_kwargs=added_cond_kwargs,
|
| 1029 |
+
return_dict=False,
|
| 1030 |
+
)[0]
|
| 1031 |
+
|
| 1032 |
+
# perform guidance
|
| 1033 |
+
if do_classifier_free_guidance:
|
| 1034 |
+
noise_pred_uncond, noise_pred_text = noise_pred[::2], noise_pred[1::2]
|
| 1035 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 1036 |
+
|
| 1037 |
+
if do_classifier_free_guidance and guidance_rescale > 0.0:
|
| 1038 |
+
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
|
| 1039 |
+
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
|
| 1040 |
+
|
| 1041 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 1042 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
|
| 1043 |
+
|
| 1044 |
+
# call the callback, if provided
|
| 1045 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 1046 |
+
progress_bar.update()
|
| 1047 |
+
if callback is not None and i % callback_steps == 0:
|
| 1048 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 1049 |
+
callback(step_idx, t, latents)
|
| 1050 |
+
del latents_for_view, latent_model_input, noise_pred, noise_pred_text, noise_pred_uncond
|
| 1051 |
+
else:
|
| 1052 |
+
print("### Encoding Real Image ###")
|
| 1053 |
+
latents = self.vae.encode(image_lr)
|
| 1054 |
+
latents = latents.latent_dist.sample() * self.vae.config.scaling_factor
|
| 1055 |
+
|
| 1056 |
+
anchor_mean = latents.mean()
|
| 1057 |
+
anchor_std = latents.std()
|
| 1058 |
+
if self.lowvram:
|
| 1059 |
+
latents = latents.cpu()
|
| 1060 |
+
torch.cuda.empty_cache()
|
| 1061 |
+
if not output_type == "latent":
|
| 1062 |
+
# make sure the VAE is in float32 mode, as it overflows in float16
|
| 1063 |
+
needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
|
| 1064 |
+
|
| 1065 |
+
if self.lowvram:
|
| 1066 |
+
needs_upcasting = False # use madebyollin/sdxl-vae-fp16-fix in lowvram mode!
|
| 1067 |
+
self.unet.cpu()
|
| 1068 |
+
self.vae.to(device)
|
| 1069 |
+
|
| 1070 |
+
if needs_upcasting:
|
| 1071 |
+
self.upcast_vae()
|
| 1072 |
+
latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
|
| 1073 |
+
if self.lowvram and multi_decoder:
|
| 1074 |
+
current_width_height = self.unet.config.sample_size * self.vae_scale_factor
|
| 1075 |
+
image = self.tiled_decode(latents, current_width_height, current_width_height)
|
| 1076 |
+
else:
|
| 1077 |
+
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
|
| 1078 |
+
# cast back to fp16 if needed
|
| 1079 |
+
if needs_upcasting:
|
| 1080 |
+
self.vae.to(dtype=torch.float16)
|
| 1081 |
+
|
| 1082 |
+
image = self.image_processor.postprocess(image, output_type=output_type)
|
| 1083 |
+
if show_image:
|
| 1084 |
+
plt.figure(figsize=(10, 10))
|
| 1085 |
+
plt.imshow(image[0])
|
| 1086 |
+
plt.axis('off') # Turn off axis numbers and ticks
|
| 1087 |
+
plt.show()
|
| 1088 |
+
output_images.append(image[0])
|
| 1089 |
+
|
| 1090 |
+
####################################################### Phase Upscaling #####################################################
|
| 1091 |
+
if image_lr == None:
|
| 1092 |
+
starting_scale = 2
|
| 1093 |
+
else:
|
| 1094 |
+
starting_scale = 1
|
| 1095 |
+
for current_scale_num in range(starting_scale, scale_num + 1):
|
| 1096 |
+
if self.lowvram:
|
| 1097 |
+
latents = latents.to(device)
|
| 1098 |
+
self.unet.to(device)
|
| 1099 |
+
torch.cuda.empty_cache()
|
| 1100 |
+
print("### Phase {} Denoising ###".format(current_scale_num))
|
| 1101 |
+
current_height = self.unet.config.sample_size * self.vae_scale_factor * current_scale_num
|
| 1102 |
+
current_width = self.unet.config.sample_size * self.vae_scale_factor * current_scale_num
|
| 1103 |
+
if height > width:
|
| 1104 |
+
current_width = int(current_width * aspect_ratio)
|
| 1105 |
+
else:
|
| 1106 |
+
current_height = int(current_height * aspect_ratio)
|
| 1107 |
+
|
| 1108 |
+
latents = F.interpolate(latents.to(device), size=(int(current_height / self.vae_scale_factor), int(current_width / self.vae_scale_factor)), mode='bicubic')
|
| 1109 |
+
|
| 1110 |
+
noise_latents = []
|
| 1111 |
+
noise = torch.randn_like(latents)
|
| 1112 |
+
for timestep in timesteps:
|
| 1113 |
+
noise_latent = self.scheduler.add_noise(latents, noise, timestep.unsqueeze(0))
|
| 1114 |
+
noise_latents.append(noise_latent)
|
| 1115 |
+
latents = noise_latents[0]
|
| 1116 |
+
|
| 1117 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 1118 |
+
for i, t in enumerate(timesteps):
|
| 1119 |
+
count = torch.zeros_like(latents)
|
| 1120 |
+
value = torch.zeros_like(latents)
|
| 1121 |
+
cosine_factor = 0.5 * (1 + torch.cos(torch.pi * (self.scheduler.config.num_train_timesteps - t) / self.scheduler.config.num_train_timesteps)).cpu()
|
| 1122 |
+
|
| 1123 |
+
c1 = cosine_factor ** cosine_scale_1
|
| 1124 |
+
latents = latents * (1 - c1) + noise_latents[i] * c1
|
| 1125 |
+
|
| 1126 |
+
############################################# MultiDiffusion #############################################
|
| 1127 |
+
|
| 1128 |
+
views = self.get_views(current_height, current_width, stride=stride, window_size=self.unet.config.sample_size, random_jitter=True)
|
| 1129 |
+
views_batch = [views[i : i + view_batch_size] for i in range(0, len(views), view_batch_size)]
|
| 1130 |
+
|
| 1131 |
+
jitter_range = (self.unet.config.sample_size - stride) // 4
|
| 1132 |
+
latents_ = F.pad(latents, (jitter_range, jitter_range, jitter_range, jitter_range), 'constant', 0)
|
| 1133 |
+
|
| 1134 |
+
count_local = torch.zeros_like(latents_)
|
| 1135 |
+
value_local = torch.zeros_like(latents_)
|
| 1136 |
+
|
| 1137 |
+
for j, batch_view in enumerate(views_batch):
|
| 1138 |
+
vb_size = len(batch_view)
|
| 1139 |
+
|
| 1140 |
+
# get the latents corresponding to the current view coordinates
|
| 1141 |
+
latents_for_view = torch.cat(
|
| 1142 |
+
[
|
| 1143 |
+
latents_[:, :, h_start:h_end, w_start:w_end]
|
| 1144 |
+
for h_start, h_end, w_start, w_end in batch_view
|
| 1145 |
+
]
|
| 1146 |
+
)
|
| 1147 |
+
|
| 1148 |
+
# expand the latents if we are doing classifier free guidance
|
| 1149 |
+
latent_model_input = latents_for_view
|
| 1150 |
+
latent_model_input = (
|
| 1151 |
+
latent_model_input.repeat_interleave(2, dim=0)
|
| 1152 |
+
if do_classifier_free_guidance
|
| 1153 |
+
else latent_model_input
|
| 1154 |
+
)
|
| 1155 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 1156 |
+
|
| 1157 |
+
prompt_embeds_input = torch.cat([prompt_embeds] * vb_size)
|
| 1158 |
+
add_text_embeds_input = torch.cat([add_text_embeds] * vb_size)
|
| 1159 |
+
add_time_ids_input = []
|
| 1160 |
+
for h_start, h_end, w_start, w_end in batch_view:
|
| 1161 |
+
add_time_ids_ = add_time_ids.clone()
|
| 1162 |
+
add_time_ids_[:, 2] = h_start * self.vae_scale_factor
|
| 1163 |
+
add_time_ids_[:, 3] = w_start * self.vae_scale_factor
|
| 1164 |
+
add_time_ids_input.append(add_time_ids_)
|
| 1165 |
+
add_time_ids_input = torch.cat(add_time_ids_input)
|
| 1166 |
+
|
| 1167 |
+
# predict the noise residual
|
| 1168 |
+
added_cond_kwargs = {"text_embeds": add_text_embeds_input, "time_ids": add_time_ids_input}
|
| 1169 |
+
noise_pred = self.unet(
|
| 1170 |
+
latent_model_input,
|
| 1171 |
+
t,
|
| 1172 |
+
encoder_hidden_states=prompt_embeds_input,
|
| 1173 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 1174 |
+
added_cond_kwargs=added_cond_kwargs,
|
| 1175 |
+
return_dict=False,
|
| 1176 |
+
)[0]
|
| 1177 |
+
|
| 1178 |
+
if do_classifier_free_guidance:
|
| 1179 |
+
noise_pred_uncond, noise_pred_text = noise_pred[::2], noise_pred[1::2]
|
| 1180 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 1181 |
+
|
| 1182 |
+
if do_classifier_free_guidance and guidance_rescale > 0.0:
|
| 1183 |
+
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
|
| 1184 |
+
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
|
| 1185 |
+
|
| 1186 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 1187 |
+
self.scheduler._init_step_index(t)
|
| 1188 |
+
latents_denoised_batch = self.scheduler.step(
|
| 1189 |
+
noise_pred, t, latents_for_view, **extra_step_kwargs, return_dict=False)[0]
|
| 1190 |
+
|
| 1191 |
+
# extract value from batch
|
| 1192 |
+
for latents_view_denoised, (h_start, h_end, w_start, w_end) in zip(
|
| 1193 |
+
latents_denoised_batch.chunk(vb_size), batch_view
|
| 1194 |
+
):
|
| 1195 |
+
value_local[:, :, h_start:h_end, w_start:w_end] += latents_view_denoised
|
| 1196 |
+
count_local[:, :, h_start:h_end, w_start:w_end] += 1
|
| 1197 |
+
|
| 1198 |
+
value_local = value_local[: ,:, jitter_range: jitter_range + current_height // self.vae_scale_factor, jitter_range: jitter_range + current_width // self.vae_scale_factor]
|
| 1199 |
+
count_local = count_local[: ,:, jitter_range: jitter_range + current_height // self.vae_scale_factor, jitter_range: jitter_range + current_width // self.vae_scale_factor]
|
| 1200 |
+
|
| 1201 |
+
c2 = cosine_factor ** cosine_scale_2
|
| 1202 |
+
|
| 1203 |
+
value += value_local / count_local * (1 - c2)
|
| 1204 |
+
count += torch.ones_like(value_local) * (1 - c2)
|
| 1205 |
+
|
| 1206 |
+
############################################# Dilated Sampling #############################################
|
| 1207 |
+
|
| 1208 |
+
views = [[h, w] for h in range(current_scale_num) for w in range(current_scale_num)]
|
| 1209 |
+
views_batch = [views[i : i + view_batch_size] for i in range(0, len(views), view_batch_size)]
|
| 1210 |
+
|
| 1211 |
+
h_pad = (current_scale_num - (latents.size(2) % current_scale_num)) % current_scale_num
|
| 1212 |
+
w_pad = (current_scale_num - (latents.size(3) % current_scale_num)) % current_scale_num
|
| 1213 |
+
latents_ = F.pad(latents, (w_pad, 0, h_pad, 0), 'constant', 0)
|
| 1214 |
+
|
| 1215 |
+
count_global = torch.zeros_like(latents_)
|
| 1216 |
+
value_global = torch.zeros_like(latents_)
|
| 1217 |
+
|
| 1218 |
+
c3 = 0.99 * cosine_factor ** cosine_scale_3 + 1e-2
|
| 1219 |
+
std_, mean_ = latents_.std(), latents_.mean()
|
| 1220 |
+
latents_gaussian = gaussian_filter(latents_, kernel_size=(2*current_scale_num-1), sigma=sigma*c3)
|
| 1221 |
+
latents_gaussian = (latents_gaussian - latents_gaussian.mean()) / latents_gaussian.std() * std_ + mean_
|
| 1222 |
+
|
| 1223 |
+
for j, batch_view in enumerate(views_batch):
|
| 1224 |
+
latents_for_view = torch.cat(
|
| 1225 |
+
[
|
| 1226 |
+
latents_[:, :, h::current_scale_num, w::current_scale_num]
|
| 1227 |
+
for h, w in batch_view
|
| 1228 |
+
]
|
| 1229 |
+
)
|
| 1230 |
+
latents_for_view_gaussian = torch.cat(
|
| 1231 |
+
[
|
| 1232 |
+
latents_gaussian[:, :, h::current_scale_num, w::current_scale_num]
|
| 1233 |
+
for h, w in batch_view
|
| 1234 |
+
]
|
| 1235 |
+
)
|
| 1236 |
+
|
| 1237 |
+
vb_size = latents_for_view.size(0)
|
| 1238 |
+
|
| 1239 |
+
# expand the latents if we are doing classifier free guidance
|
| 1240 |
+
latent_model_input = latents_for_view_gaussian
|
| 1241 |
+
latent_model_input = (
|
| 1242 |
+
latent_model_input.repeat_interleave(2, dim=0)
|
| 1243 |
+
if do_classifier_free_guidance
|
| 1244 |
+
else latent_model_input
|
| 1245 |
+
)
|
| 1246 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 1247 |
+
|
| 1248 |
+
prompt_embeds_input = torch.cat([prompt_embeds] * vb_size)
|
| 1249 |
+
add_text_embeds_input = torch.cat([add_text_embeds] * vb_size)
|
| 1250 |
+
add_time_ids_input = torch.cat([add_time_ids] * vb_size)
|
| 1251 |
+
|
| 1252 |
+
# predict the noise residual
|
| 1253 |
+
added_cond_kwargs = {"text_embeds": add_text_embeds_input, "time_ids": add_time_ids_input}
|
| 1254 |
+
noise_pred = self.unet(
|
| 1255 |
+
latent_model_input,
|
| 1256 |
+
t,
|
| 1257 |
+
encoder_hidden_states=prompt_embeds_input,
|
| 1258 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 1259 |
+
added_cond_kwargs=added_cond_kwargs,
|
| 1260 |
+
return_dict=False,
|
| 1261 |
+
)[0]
|
| 1262 |
+
|
| 1263 |
+
if do_classifier_free_guidance:
|
| 1264 |
+
noise_pred_uncond, noise_pred_text = noise_pred[::2], noise_pred[1::2]
|
| 1265 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 1266 |
+
|
| 1267 |
+
if do_classifier_free_guidance and guidance_rescale > 0.0:
|
| 1268 |
+
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
|
| 1269 |
+
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
|
| 1270 |
+
|
| 1271 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 1272 |
+
self.scheduler._init_step_index(t)
|
| 1273 |
+
latents_denoised_batch = self.scheduler.step(
|
| 1274 |
+
noise_pred, t, latents_for_view, **extra_step_kwargs, return_dict=False)[0]
|
| 1275 |
+
|
| 1276 |
+
# extract value from batch
|
| 1277 |
+
for latents_view_denoised, (h, w) in zip(
|
| 1278 |
+
latents_denoised_batch.chunk(vb_size), batch_view
|
| 1279 |
+
):
|
| 1280 |
+
value_global[:, :, h::current_scale_num, w::current_scale_num] += latents_view_denoised
|
| 1281 |
+
count_global[:, :, h::current_scale_num, w::current_scale_num] += 1
|
| 1282 |
+
|
| 1283 |
+
c2 = cosine_factor ** cosine_scale_2
|
| 1284 |
+
|
| 1285 |
+
value_global = value_global[: ,:, h_pad:, w_pad:]
|
| 1286 |
+
|
| 1287 |
+
value += value_global * c2
|
| 1288 |
+
count += torch.ones_like(value_global) * c2
|
| 1289 |
+
|
| 1290 |
+
###########################################################
|
| 1291 |
+
|
| 1292 |
+
latents = torch.where(count > 0, value / count, value)
|
| 1293 |
+
|
| 1294 |
+
# call the callback, if provided
|
| 1295 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 1296 |
+
progress_bar.update()
|
| 1297 |
+
if callback is not None and i % callback_steps == 0:
|
| 1298 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 1299 |
+
callback(step_idx, t, latents)
|
| 1300 |
+
|
| 1301 |
+
#########################################################################################################################################
|
| 1302 |
+
|
| 1303 |
+
latents = (latents - latents.mean()) / latents.std() * anchor_std + anchor_mean
|
| 1304 |
+
if self.lowvram:
|
| 1305 |
+
latents = latents.cpu()
|
| 1306 |
+
torch.cuda.empty_cache()
|
| 1307 |
+
if not output_type == "latent":
|
| 1308 |
+
# make sure the VAE is in float32 mode, as it overflows in float16
|
| 1309 |
+
needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
|
| 1310 |
+
|
| 1311 |
+
if self.lowvram:
|
| 1312 |
+
needs_upcasting = False # use madebyollin/sdxl-vae-fp16-fix in lowvram mode!
|
| 1313 |
+
self.unet.cpu()
|
| 1314 |
+
self.vae.to(device)
|
| 1315 |
+
|
| 1316 |
+
if needs_upcasting:
|
| 1317 |
+
self.upcast_vae()
|
| 1318 |
+
latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
|
| 1319 |
+
|
| 1320 |
+
print("### Phase {} Decoding ###".format(current_scale_num))
|
| 1321 |
+
if multi_decoder:
|
| 1322 |
+
image = self.tiled_decode(latents, current_height, current_width)
|
| 1323 |
+
else:
|
| 1324 |
+
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
|
| 1325 |
+
|
| 1326 |
+
# cast back to fp16 if needed
|
| 1327 |
+
if needs_upcasting:
|
| 1328 |
+
self.vae.to(dtype=torch.float16)
|
| 1329 |
+
else:
|
| 1330 |
+
image = latents
|
| 1331 |
+
|
| 1332 |
+
if not output_type == "latent":
|
| 1333 |
+
image = self.image_processor.postprocess(image, output_type=output_type)
|
| 1334 |
+
if show_image:
|
| 1335 |
+
plt.figure(figsize=(10, 10))
|
| 1336 |
+
plt.imshow(image[0])
|
| 1337 |
+
plt.axis('off') # Turn off axis numbers and ticks
|
| 1338 |
+
plt.show()
|
| 1339 |
+
output_images.append(image[0])
|
| 1340 |
+
|
| 1341 |
+
# Offload all models
|
| 1342 |
+
self.maybe_free_model_hooks()
|
| 1343 |
+
|
| 1344 |
+
return output_images
|
| 1345 |
+
|
| 1346 |
+
# Overrride to properly handle the loading and unloading of the additional text encoder.
|
| 1347 |
+
def load_lora_weights(self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs):
|
| 1348 |
+
# We could have accessed the unet config from `lora_state_dict()` too. We pass
|
| 1349 |
+
# it here explicitly to be able to tell that it's coming from an SDXL
|
| 1350 |
+
# pipeline.
|
| 1351 |
+
|
| 1352 |
+
# Remove any existing hooks.
|
| 1353 |
+
if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
|
| 1354 |
+
from accelerate.hooks import AlignDevicesHook, CpuOffload, remove_hook_from_module
|
| 1355 |
+
else:
|
| 1356 |
+
raise ImportError("Offloading requires `accelerate v0.17.0` or higher.")
|
| 1357 |
+
|
| 1358 |
+
is_model_cpu_offload = False
|
| 1359 |
+
is_sequential_cpu_offload = False
|
| 1360 |
+
recursive = False
|
| 1361 |
+
for _, component in self.components.items():
|
| 1362 |
+
if isinstance(component, torch.nn.Module):
|
| 1363 |
+
if hasattr(component, "_hf_hook"):
|
| 1364 |
+
is_model_cpu_offload = isinstance(getattr(component, "_hf_hook"), CpuOffload)
|
| 1365 |
+
is_sequential_cpu_offload = isinstance(getattr(component, "_hf_hook"), AlignDevicesHook)
|
| 1366 |
+
logger.info(
|
| 1367 |
+
"Accelerate hooks detected. Since you have called `load_lora_weights()`, the previous hooks will be first removed. Then the LoRA parameters will be loaded and the hooks will be applied again."
|
| 1368 |
+
)
|
| 1369 |
+
recursive = is_sequential_cpu_offload
|
| 1370 |
+
remove_hook_from_module(component, recurse=recursive)
|
| 1371 |
+
state_dict, network_alphas = self.lora_state_dict(
|
| 1372 |
+
pretrained_model_name_or_path_or_dict,
|
| 1373 |
+
unet_config=self.unet.config,
|
| 1374 |
+
**kwargs,
|
| 1375 |
+
)
|
| 1376 |
+
self.load_lora_into_unet(state_dict, network_alphas=network_alphas, unet=self.unet)
|
| 1377 |
+
|
| 1378 |
+
text_encoder_state_dict = {k: v for k, v in state_dict.items() if "text_encoder." in k}
|
| 1379 |
+
if len(text_encoder_state_dict) > 0:
|
| 1380 |
+
self.load_lora_into_text_encoder(
|
| 1381 |
+
text_encoder_state_dict,
|
| 1382 |
+
network_alphas=network_alphas,
|
| 1383 |
+
text_encoder=self.text_encoder,
|
| 1384 |
+
prefix="text_encoder",
|
| 1385 |
+
lora_scale=self.lora_scale,
|
| 1386 |
+
)
|
| 1387 |
+
|
| 1388 |
+
text_encoder_2_state_dict = {k: v for k, v in state_dict.items() if "text_encoder_2." in k}
|
| 1389 |
+
if len(text_encoder_2_state_dict) > 0:
|
| 1390 |
+
self.load_lora_into_text_encoder(
|
| 1391 |
+
text_encoder_2_state_dict,
|
| 1392 |
+
network_alphas=network_alphas,
|
| 1393 |
+
text_encoder=self.text_encoder_2,
|
| 1394 |
+
prefix="text_encoder_2",
|
| 1395 |
+
lora_scale=self.lora_scale,
|
| 1396 |
+
)
|
| 1397 |
+
|
| 1398 |
+
# Offload back.
|
| 1399 |
+
if is_model_cpu_offload:
|
| 1400 |
+
self.enable_model_cpu_offload()
|
| 1401 |
+
elif is_sequential_cpu_offload:
|
| 1402 |
+
self.enable_sequential_cpu_offload()
|
| 1403 |
+
|
| 1404 |
+
@classmethod
|
| 1405 |
+
def save_lora_weights(
|
| 1406 |
+
self,
|
| 1407 |
+
save_directory: Union[str, os.PathLike],
|
| 1408 |
+
unet_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
|
| 1409 |
+
text_encoder_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
|
| 1410 |
+
text_encoder_2_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
|
| 1411 |
+
is_main_process: bool = True,
|
| 1412 |
+
weight_name: str = None,
|
| 1413 |
+
save_function: Callable = None,
|
| 1414 |
+
safe_serialization: bool = True,
|
| 1415 |
+
):
|
| 1416 |
+
state_dict = {}
|
| 1417 |
+
|
| 1418 |
+
def pack_weights(layers, prefix):
|
| 1419 |
+
layers_weights = layers.state_dict() if isinstance(layers, torch.nn.Module) else layers
|
| 1420 |
+
layers_state_dict = {f"{prefix}.{module_name}": param for module_name, param in layers_weights.items()}
|
| 1421 |
+
return layers_state_dict
|
| 1422 |
+
|
| 1423 |
+
if not (unet_lora_layers or text_encoder_lora_layers or text_encoder_2_lora_layers):
|
| 1424 |
+
raise ValueError(
|
| 1425 |
+
"You must pass at least one of `unet_lora_layers`, `text_encoder_lora_layers` or `text_encoder_2_lora_layers`."
|
| 1426 |
+
)
|
| 1427 |
+
|
| 1428 |
+
if unet_lora_layers:
|
| 1429 |
+
state_dict.update(pack_weights(unet_lora_layers, "unet"))
|
| 1430 |
+
|
| 1431 |
+
if text_encoder_lora_layers and text_encoder_2_lora_layers:
|
| 1432 |
+
state_dict.update(pack_weights(text_encoder_lora_layers, "text_encoder"))
|
| 1433 |
+
state_dict.update(pack_weights(text_encoder_2_lora_layers, "text_encoder_2"))
|
| 1434 |
+
|
| 1435 |
+
self.write_lora_layers(
|
| 1436 |
+
state_dict=state_dict,
|
| 1437 |
+
save_directory=save_directory,
|
| 1438 |
+
is_main_process=is_main_process,
|
| 1439 |
+
weight_name=weight_name,
|
| 1440 |
+
save_function=save_function,
|
| 1441 |
+
safe_serialization=safe_serialization,
|
| 1442 |
+
)
|
| 1443 |
+
|
| 1444 |
+
def _remove_text_encoder_monkey_patch(self):
|
| 1445 |
+
self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder)
|
| 1446 |
+
self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder_2)
|
pipeline_demofusion_sdxl_controlnet.py
ADDED
|
@@ -0,0 +1,1795 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
import inspect
|
| 17 |
+
import os
|
| 18 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
| 19 |
+
import matplotlib.pyplot as plt
|
| 20 |
+
|
| 21 |
+
import numpy as np
|
| 22 |
+
import PIL.Image
|
| 23 |
+
import torch
|
| 24 |
+
import torch.nn.functional as F
|
| 25 |
+
import random
|
| 26 |
+
import warnings
|
| 27 |
+
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
|
| 28 |
+
|
| 29 |
+
from diffusers.utils.import_utils import is_invisible_watermark_available
|
| 30 |
+
|
| 31 |
+
from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
|
| 32 |
+
from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
|
| 33 |
+
from diffusers.models import AutoencoderKL, ControlNetModel, UNet2DConditionModel
|
| 34 |
+
from diffusers.models.attention_processor import (
|
| 35 |
+
AttnProcessor2_0,
|
| 36 |
+
LoRAAttnProcessor2_0,
|
| 37 |
+
LoRAXFormersAttnProcessor,
|
| 38 |
+
XFormersAttnProcessor,
|
| 39 |
+
)
|
| 40 |
+
from diffusers.models.lora import adjust_lora_scale_text_encoder
|
| 41 |
+
from diffusers.schedulers import KarrasDiffusionSchedulers
|
| 42 |
+
from diffusers.utils import (
|
| 43 |
+
is_accelerate_available,
|
| 44 |
+
is_accelerate_version,
|
| 45 |
+
logging,
|
| 46 |
+
replace_example_docstring,
|
| 47 |
+
)
|
| 48 |
+
from diffusers.utils.torch_utils import is_compiled_module, randn_tensor
|
| 49 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
|
| 50 |
+
from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
if is_invisible_watermark_available():
|
| 54 |
+
from diffusers.pipelines.stable_diffusion_xl.watermark import StableDiffusionXLWatermarker
|
| 55 |
+
|
| 56 |
+
from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
EXAMPLE_DOC_STRING = """
|
| 63 |
+
Examples:
|
| 64 |
+
"""
|
| 65 |
+
|
| 66 |
+
def gaussian_kernel(kernel_size=3, sigma=1.0, channels=3):
|
| 67 |
+
x_coord = torch.arange(kernel_size)
|
| 68 |
+
gaussian_1d = torch.exp(-(x_coord - (kernel_size - 1) / 2) ** 2 / (2 * sigma ** 2))
|
| 69 |
+
gaussian_1d = gaussian_1d / gaussian_1d.sum()
|
| 70 |
+
gaussian_2d = gaussian_1d[:, None] * gaussian_1d[None, :]
|
| 71 |
+
kernel = gaussian_2d[None, None, :, :].repeat(channels, 1, 1, 1)
|
| 72 |
+
|
| 73 |
+
return kernel
|
| 74 |
+
|
| 75 |
+
def gaussian_filter(latents, kernel_size=3, sigma=1.0):
|
| 76 |
+
channels = latents.shape[1]
|
| 77 |
+
kernel = gaussian_kernel(kernel_size, sigma, channels).to(latents.device, latents.dtype)
|
| 78 |
+
blurred_latents = F.conv2d(latents, kernel, padding=kernel_size//2, groups=channels)
|
| 79 |
+
|
| 80 |
+
return blurred_latents
|
| 81 |
+
|
| 82 |
+
class DemoFusionSDXLControlNetPipeline(
|
| 83 |
+
DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin
|
| 84 |
+
):
|
| 85 |
+
r"""
|
| 86 |
+
Pipeline for text-to-image generation using Stable Diffusion XL with ControlNet guidance.
|
| 87 |
+
|
| 88 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
|
| 89 |
+
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
|
| 90 |
+
|
| 91 |
+
The pipeline also inherits the following loading methods:
|
| 92 |
+
- [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
|
| 93 |
+
- [`loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
|
| 94 |
+
- [`loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
|
| 95 |
+
|
| 96 |
+
Args:
|
| 97 |
+
vae ([`AutoencoderKL`]):
|
| 98 |
+
Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
|
| 99 |
+
text_encoder ([`~transformers.CLIPTextModel`]):
|
| 100 |
+
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
|
| 101 |
+
text_encoder_2 ([`~transformers.CLIPTextModelWithProjection`]):
|
| 102 |
+
Second frozen text-encoder
|
| 103 |
+
([laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)).
|
| 104 |
+
tokenizer ([`~transformers.CLIPTokenizer`]):
|
| 105 |
+
A `CLIPTokenizer` to tokenize text.
|
| 106 |
+
tokenizer_2 ([`~transformers.CLIPTokenizer`]):
|
| 107 |
+
A `CLIPTokenizer` to tokenize text.
|
| 108 |
+
unet ([`UNet2DConditionModel`]):
|
| 109 |
+
A `UNet2DConditionModel` to denoise the encoded image latents.
|
| 110 |
+
controlnet ([`ControlNetModel`] or `List[ControlNetModel]`):
|
| 111 |
+
Provides additional conditioning to the `unet` during the denoising process. If you set multiple
|
| 112 |
+
ControlNets as a list, the outputs from each ControlNet are added together to create one combined
|
| 113 |
+
additional conditioning.
|
| 114 |
+
scheduler ([`SchedulerMixin`]):
|
| 115 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 116 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 117 |
+
force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`):
|
| 118 |
+
Whether the negative prompt embeddings should always be set to 0. Also see the config of
|
| 119 |
+
`stabilityai/stable-diffusion-xl-base-1-0`.
|
| 120 |
+
add_watermarker (`bool`, *optional*):
|
| 121 |
+
Whether to use the [invisible_watermark](https://github.com/ShieldMnt/invisible-watermark/) library to
|
| 122 |
+
watermark output images. If not defined, it defaults to `True` if the package is installed; otherwise no
|
| 123 |
+
watermarker is used.
|
| 124 |
+
"""
|
| 125 |
+
model_cpu_offload_seq = (
|
| 126 |
+
"text_encoder->text_encoder_2->unet->vae" # leave controlnet out on purpose because it iterates with unet
|
| 127 |
+
)
|
| 128 |
+
|
| 129 |
+
def __init__(
|
| 130 |
+
self,
|
| 131 |
+
vae: AutoencoderKL,
|
| 132 |
+
text_encoder: CLIPTextModel,
|
| 133 |
+
text_encoder_2: CLIPTextModelWithProjection,
|
| 134 |
+
tokenizer: CLIPTokenizer,
|
| 135 |
+
tokenizer_2: CLIPTokenizer,
|
| 136 |
+
unet: UNet2DConditionModel,
|
| 137 |
+
controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel],
|
| 138 |
+
scheduler: KarrasDiffusionSchedulers,
|
| 139 |
+
force_zeros_for_empty_prompt: bool = True,
|
| 140 |
+
add_watermarker: Optional[bool] = None,
|
| 141 |
+
):
|
| 142 |
+
super().__init__()
|
| 143 |
+
|
| 144 |
+
if isinstance(controlnet, (list, tuple)):
|
| 145 |
+
controlnet = MultiControlNetModel(controlnet)
|
| 146 |
+
|
| 147 |
+
self.register_modules(
|
| 148 |
+
vae=vae,
|
| 149 |
+
text_encoder=text_encoder,
|
| 150 |
+
text_encoder_2=text_encoder_2,
|
| 151 |
+
tokenizer=tokenizer,
|
| 152 |
+
tokenizer_2=tokenizer_2,
|
| 153 |
+
unet=unet,
|
| 154 |
+
controlnet=controlnet,
|
| 155 |
+
scheduler=scheduler,
|
| 156 |
+
)
|
| 157 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 158 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True)
|
| 159 |
+
self.control_image_processor = VaeImageProcessor(
|
| 160 |
+
vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
|
| 161 |
+
)
|
| 162 |
+
add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
|
| 163 |
+
|
| 164 |
+
if add_watermarker:
|
| 165 |
+
self.watermark = StableDiffusionXLWatermarker()
|
| 166 |
+
else:
|
| 167 |
+
self.watermark = None
|
| 168 |
+
|
| 169 |
+
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
|
| 170 |
+
|
| 171 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
|
| 172 |
+
def enable_vae_slicing(self):
|
| 173 |
+
r"""
|
| 174 |
+
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
|
| 175 |
+
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
|
| 176 |
+
"""
|
| 177 |
+
self.vae.enable_slicing()
|
| 178 |
+
|
| 179 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
|
| 180 |
+
def disable_vae_slicing(self):
|
| 181 |
+
r"""
|
| 182 |
+
Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
|
| 183 |
+
computing decoding in one step.
|
| 184 |
+
"""
|
| 185 |
+
self.vae.disable_slicing()
|
| 186 |
+
|
| 187 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
|
| 188 |
+
def enable_vae_tiling(self):
|
| 189 |
+
r"""
|
| 190 |
+
Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
|
| 191 |
+
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
|
| 192 |
+
processing larger images.
|
| 193 |
+
"""
|
| 194 |
+
self.vae.enable_tiling()
|
| 195 |
+
|
| 196 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
|
| 197 |
+
def disable_vae_tiling(self):
|
| 198 |
+
r"""
|
| 199 |
+
Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
|
| 200 |
+
computing decoding in one step.
|
| 201 |
+
"""
|
| 202 |
+
self.vae.disable_tiling()
|
| 203 |
+
|
| 204 |
+
# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt
|
| 205 |
+
def encode_prompt(
|
| 206 |
+
self,
|
| 207 |
+
prompt: str,
|
| 208 |
+
prompt_2: Optional[str] = None,
|
| 209 |
+
device: Optional[torch.device] = None,
|
| 210 |
+
num_images_per_prompt: int = 1,
|
| 211 |
+
do_classifier_free_guidance: bool = True,
|
| 212 |
+
negative_prompt: Optional[str] = None,
|
| 213 |
+
negative_prompt_2: Optional[str] = None,
|
| 214 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 215 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 216 |
+
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 217 |
+
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 218 |
+
lora_scale: Optional[float] = None,
|
| 219 |
+
):
|
| 220 |
+
r"""
|
| 221 |
+
Encodes the prompt into text encoder hidden states.
|
| 222 |
+
|
| 223 |
+
Args:
|
| 224 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 225 |
+
prompt to be encoded
|
| 226 |
+
prompt_2 (`str` or `List[str]`, *optional*):
|
| 227 |
+
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
|
| 228 |
+
used in both text-encoders
|
| 229 |
+
device: (`torch.device`):
|
| 230 |
+
torch device
|
| 231 |
+
num_images_per_prompt (`int`):
|
| 232 |
+
number of images that should be generated per prompt
|
| 233 |
+
do_classifier_free_guidance (`bool`):
|
| 234 |
+
whether to use classifier free guidance or not
|
| 235 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 236 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 237 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 238 |
+
less than `1`).
|
| 239 |
+
negative_prompt_2 (`str` or `List[str]`, *optional*):
|
| 240 |
+
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
|
| 241 |
+
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
|
| 242 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 243 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 244 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 245 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 246 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 247 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 248 |
+
argument.
|
| 249 |
+
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 250 |
+
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
|
| 251 |
+
If not provided, pooled text embeddings will be generated from `prompt` input argument.
|
| 252 |
+
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 253 |
+
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 254 |
+
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
|
| 255 |
+
input argument.
|
| 256 |
+
lora_scale (`float`, *optional*):
|
| 257 |
+
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
| 258 |
+
"""
|
| 259 |
+
device = device or self._execution_device
|
| 260 |
+
|
| 261 |
+
# set lora scale so that monkey patched LoRA
|
| 262 |
+
# function of text encoder can correctly access it
|
| 263 |
+
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
|
| 264 |
+
self._lora_scale = lora_scale
|
| 265 |
+
|
| 266 |
+
# dynamically adjust the LoRA scale
|
| 267 |
+
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
|
| 268 |
+
adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)
|
| 269 |
+
|
| 270 |
+
if prompt is not None and isinstance(prompt, str):
|
| 271 |
+
batch_size = 1
|
| 272 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 273 |
+
batch_size = len(prompt)
|
| 274 |
+
else:
|
| 275 |
+
batch_size = prompt_embeds.shape[0]
|
| 276 |
+
|
| 277 |
+
# Define tokenizers and text encoders
|
| 278 |
+
tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
|
| 279 |
+
text_encoders = (
|
| 280 |
+
[self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
|
| 281 |
+
)
|
| 282 |
+
|
| 283 |
+
if prompt_embeds is None:
|
| 284 |
+
prompt_2 = prompt_2 or prompt
|
| 285 |
+
# textual inversion: procecss multi-vector tokens if necessary
|
| 286 |
+
prompt_embeds_list = []
|
| 287 |
+
prompts = [prompt, prompt_2]
|
| 288 |
+
for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
|
| 289 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 290 |
+
prompt = self.maybe_convert_prompt(prompt, tokenizer)
|
| 291 |
+
|
| 292 |
+
text_inputs = tokenizer(
|
| 293 |
+
prompt,
|
| 294 |
+
padding="max_length",
|
| 295 |
+
max_length=tokenizer.model_max_length,
|
| 296 |
+
truncation=True,
|
| 297 |
+
return_tensors="pt",
|
| 298 |
+
)
|
| 299 |
+
|
| 300 |
+
text_input_ids = text_inputs.input_ids
|
| 301 |
+
untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
| 302 |
+
|
| 303 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
| 304 |
+
text_input_ids, untruncated_ids
|
| 305 |
+
):
|
| 306 |
+
removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
|
| 307 |
+
logger.warning(
|
| 308 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 309 |
+
f" {tokenizer.model_max_length} tokens: {removed_text}"
|
| 310 |
+
)
|
| 311 |
+
|
| 312 |
+
prompt_embeds = text_encoder(
|
| 313 |
+
text_input_ids.to(device),
|
| 314 |
+
output_hidden_states=True,
|
| 315 |
+
)
|
| 316 |
+
|
| 317 |
+
# We are only ALWAYS interested in the pooled output of the final text encoder
|
| 318 |
+
pooled_prompt_embeds = prompt_embeds[0]
|
| 319 |
+
prompt_embeds = prompt_embeds.hidden_states[-2]
|
| 320 |
+
|
| 321 |
+
prompt_embeds_list.append(prompt_embeds)
|
| 322 |
+
|
| 323 |
+
prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
|
| 324 |
+
|
| 325 |
+
# get unconditional embeddings for classifier free guidance
|
| 326 |
+
zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
|
| 327 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
|
| 328 |
+
negative_prompt_embeds = torch.zeros_like(prompt_embeds)
|
| 329 |
+
negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
|
| 330 |
+
elif do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 331 |
+
negative_prompt = negative_prompt or ""
|
| 332 |
+
negative_prompt_2 = negative_prompt_2 or negative_prompt
|
| 333 |
+
|
| 334 |
+
uncond_tokens: List[str]
|
| 335 |
+
if prompt is not None and type(prompt) is not type(negative_prompt):
|
| 336 |
+
raise TypeError(
|
| 337 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 338 |
+
f" {type(prompt)}."
|
| 339 |
+
)
|
| 340 |
+
elif isinstance(negative_prompt, str):
|
| 341 |
+
uncond_tokens = [negative_prompt, negative_prompt_2]
|
| 342 |
+
elif batch_size != len(negative_prompt):
|
| 343 |
+
raise ValueError(
|
| 344 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 345 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 346 |
+
" the batch size of `prompt`."
|
| 347 |
+
)
|
| 348 |
+
else:
|
| 349 |
+
uncond_tokens = [negative_prompt, negative_prompt_2]
|
| 350 |
+
|
| 351 |
+
negative_prompt_embeds_list = []
|
| 352 |
+
for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
|
| 353 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 354 |
+
negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
|
| 355 |
+
|
| 356 |
+
max_length = prompt_embeds.shape[1]
|
| 357 |
+
uncond_input = tokenizer(
|
| 358 |
+
negative_prompt,
|
| 359 |
+
padding="max_length",
|
| 360 |
+
max_length=max_length,
|
| 361 |
+
truncation=True,
|
| 362 |
+
return_tensors="pt",
|
| 363 |
+
)
|
| 364 |
+
|
| 365 |
+
negative_prompt_embeds = text_encoder(
|
| 366 |
+
uncond_input.input_ids.to(device),
|
| 367 |
+
output_hidden_states=True,
|
| 368 |
+
)
|
| 369 |
+
# We are only ALWAYS interested in the pooled output of the final text encoder
|
| 370 |
+
negative_pooled_prompt_embeds = negative_prompt_embeds[0]
|
| 371 |
+
negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
|
| 372 |
+
|
| 373 |
+
negative_prompt_embeds_list.append(negative_prompt_embeds)
|
| 374 |
+
|
| 375 |
+
negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
|
| 376 |
+
|
| 377 |
+
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
|
| 378 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 379 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 380 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 381 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 382 |
+
|
| 383 |
+
if do_classifier_free_guidance:
|
| 384 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 385 |
+
seq_len = negative_prompt_embeds.shape[1]
|
| 386 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
|
| 387 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 388 |
+
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 389 |
+
|
| 390 |
+
pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
|
| 391 |
+
bs_embed * num_images_per_prompt, -1
|
| 392 |
+
)
|
| 393 |
+
if do_classifier_free_guidance:
|
| 394 |
+
negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
|
| 395 |
+
bs_embed * num_images_per_prompt, -1
|
| 396 |
+
)
|
| 397 |
+
|
| 398 |
+
return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
|
| 399 |
+
|
| 400 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
| 401 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
| 402 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 403 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 404 |
+
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
| 405 |
+
# and should be between [0, 1]
|
| 406 |
+
|
| 407 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 408 |
+
extra_step_kwargs = {}
|
| 409 |
+
if accepts_eta:
|
| 410 |
+
extra_step_kwargs["eta"] = eta
|
| 411 |
+
|
| 412 |
+
# check if the scheduler accepts generator
|
| 413 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 414 |
+
if accepts_generator:
|
| 415 |
+
extra_step_kwargs["generator"] = generator
|
| 416 |
+
return extra_step_kwargs
|
| 417 |
+
|
| 418 |
+
def check_inputs(
|
| 419 |
+
self,
|
| 420 |
+
prompt,
|
| 421 |
+
prompt_2,
|
| 422 |
+
image,
|
| 423 |
+
callback_steps,
|
| 424 |
+
negative_prompt=None,
|
| 425 |
+
negative_prompt_2=None,
|
| 426 |
+
prompt_embeds=None,
|
| 427 |
+
negative_prompt_embeds=None,
|
| 428 |
+
pooled_prompt_embeds=None,
|
| 429 |
+
negative_pooled_prompt_embeds=None,
|
| 430 |
+
controlnet_conditioning_scale=1.0,
|
| 431 |
+
control_guidance_start=0.0,
|
| 432 |
+
control_guidance_end=1.0,
|
| 433 |
+
):
|
| 434 |
+
if (callback_steps is None) or (
|
| 435 |
+
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
| 436 |
+
):
|
| 437 |
+
raise ValueError(
|
| 438 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 439 |
+
f" {type(callback_steps)}."
|
| 440 |
+
)
|
| 441 |
+
|
| 442 |
+
if prompt is not None and prompt_embeds is not None:
|
| 443 |
+
raise ValueError(
|
| 444 |
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 445 |
+
" only forward one of the two."
|
| 446 |
+
)
|
| 447 |
+
elif prompt_2 is not None and prompt_embeds is not None:
|
| 448 |
+
raise ValueError(
|
| 449 |
+
f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 450 |
+
" only forward one of the two."
|
| 451 |
+
)
|
| 452 |
+
elif prompt is None and prompt_embeds is None:
|
| 453 |
+
raise ValueError(
|
| 454 |
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 455 |
+
)
|
| 456 |
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 457 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 458 |
+
elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
|
| 459 |
+
raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
|
| 460 |
+
|
| 461 |
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
| 462 |
+
raise ValueError(
|
| 463 |
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
| 464 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 465 |
+
)
|
| 466 |
+
elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
|
| 467 |
+
raise ValueError(
|
| 468 |
+
f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
|
| 469 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 470 |
+
)
|
| 471 |
+
|
| 472 |
+
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
| 473 |
+
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
| 474 |
+
raise ValueError(
|
| 475 |
+
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
| 476 |
+
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
| 477 |
+
f" {negative_prompt_embeds.shape}."
|
| 478 |
+
)
|
| 479 |
+
|
| 480 |
+
if prompt_embeds is not None and pooled_prompt_embeds is None:
|
| 481 |
+
raise ValueError(
|
| 482 |
+
"If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
|
| 483 |
+
)
|
| 484 |
+
|
| 485 |
+
if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
|
| 486 |
+
raise ValueError(
|
| 487 |
+
"If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
|
| 488 |
+
)
|
| 489 |
+
|
| 490 |
+
# `prompt` needs more sophisticated handling when there are multiple
|
| 491 |
+
# conditionings.
|
| 492 |
+
if isinstance(self.controlnet, MultiControlNetModel):
|
| 493 |
+
if isinstance(prompt, list):
|
| 494 |
+
logger.warning(
|
| 495 |
+
f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}"
|
| 496 |
+
" prompts. The conditionings will be fixed across the prompts."
|
| 497 |
+
)
|
| 498 |
+
|
| 499 |
+
# Check `image`
|
| 500 |
+
is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
|
| 501 |
+
self.controlnet, torch._dynamo.eval_frame.OptimizedModule
|
| 502 |
+
)
|
| 503 |
+
if (
|
| 504 |
+
isinstance(self.controlnet, ControlNetModel)
|
| 505 |
+
or is_compiled
|
| 506 |
+
and isinstance(self.controlnet._orig_mod, ControlNetModel)
|
| 507 |
+
):
|
| 508 |
+
self.check_image(image, prompt, prompt_embeds)
|
| 509 |
+
elif (
|
| 510 |
+
isinstance(self.controlnet, MultiControlNetModel)
|
| 511 |
+
or is_compiled
|
| 512 |
+
and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
|
| 513 |
+
):
|
| 514 |
+
if not isinstance(image, list):
|
| 515 |
+
raise TypeError("For multiple controlnets: `image` must be type `list`")
|
| 516 |
+
|
| 517 |
+
# When `image` is a nested list:
|
| 518 |
+
# (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
|
| 519 |
+
elif any(isinstance(i, list) for i in image):
|
| 520 |
+
raise ValueError("A single batch of multiple conditionings are supported at the moment.")
|
| 521 |
+
elif len(image) != len(self.controlnet.nets):
|
| 522 |
+
raise ValueError(
|
| 523 |
+
f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets."
|
| 524 |
+
)
|
| 525 |
+
|
| 526 |
+
for image_ in image:
|
| 527 |
+
self.check_image(image_, prompt, prompt_embeds)
|
| 528 |
+
else:
|
| 529 |
+
assert False
|
| 530 |
+
|
| 531 |
+
# Check `controlnet_conditioning_scale`
|
| 532 |
+
if (
|
| 533 |
+
isinstance(self.controlnet, ControlNetModel)
|
| 534 |
+
or is_compiled
|
| 535 |
+
and isinstance(self.controlnet._orig_mod, ControlNetModel)
|
| 536 |
+
):
|
| 537 |
+
if not isinstance(controlnet_conditioning_scale, float):
|
| 538 |
+
raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
|
| 539 |
+
elif (
|
| 540 |
+
isinstance(self.controlnet, MultiControlNetModel)
|
| 541 |
+
or is_compiled
|
| 542 |
+
and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
|
| 543 |
+
):
|
| 544 |
+
if isinstance(controlnet_conditioning_scale, list):
|
| 545 |
+
if any(isinstance(i, list) for i in controlnet_conditioning_scale):
|
| 546 |
+
raise ValueError("A single batch of multiple conditionings are supported at the moment.")
|
| 547 |
+
elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
|
| 548 |
+
self.controlnet.nets
|
| 549 |
+
):
|
| 550 |
+
raise ValueError(
|
| 551 |
+
"For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
|
| 552 |
+
" the same length as the number of controlnets"
|
| 553 |
+
)
|
| 554 |
+
else:
|
| 555 |
+
assert False
|
| 556 |
+
|
| 557 |
+
if not isinstance(control_guidance_start, (tuple, list)):
|
| 558 |
+
control_guidance_start = [control_guidance_start]
|
| 559 |
+
|
| 560 |
+
if not isinstance(control_guidance_end, (tuple, list)):
|
| 561 |
+
control_guidance_end = [control_guidance_end]
|
| 562 |
+
|
| 563 |
+
if len(control_guidance_start) != len(control_guidance_end):
|
| 564 |
+
raise ValueError(
|
| 565 |
+
f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
|
| 566 |
+
)
|
| 567 |
+
|
| 568 |
+
if isinstance(self.controlnet, MultiControlNetModel):
|
| 569 |
+
if len(control_guidance_start) != len(self.controlnet.nets):
|
| 570 |
+
raise ValueError(
|
| 571 |
+
f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}."
|
| 572 |
+
)
|
| 573 |
+
|
| 574 |
+
for start, end in zip(control_guidance_start, control_guidance_end):
|
| 575 |
+
if start >= end:
|
| 576 |
+
raise ValueError(
|
| 577 |
+
f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
|
| 578 |
+
)
|
| 579 |
+
if start < 0.0:
|
| 580 |
+
raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
|
| 581 |
+
if end > 1.0:
|
| 582 |
+
raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
|
| 583 |
+
|
| 584 |
+
# Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image
|
| 585 |
+
def check_image(self, image, prompt, prompt_embeds):
|
| 586 |
+
image_is_pil = isinstance(image, PIL.Image.Image)
|
| 587 |
+
image_is_tensor = isinstance(image, torch.Tensor)
|
| 588 |
+
image_is_np = isinstance(image, np.ndarray)
|
| 589 |
+
image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
|
| 590 |
+
image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
|
| 591 |
+
image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
|
| 592 |
+
|
| 593 |
+
if (
|
| 594 |
+
not image_is_pil
|
| 595 |
+
and not image_is_tensor
|
| 596 |
+
and not image_is_np
|
| 597 |
+
and not image_is_pil_list
|
| 598 |
+
and not image_is_tensor_list
|
| 599 |
+
and not image_is_np_list
|
| 600 |
+
):
|
| 601 |
+
raise TypeError(
|
| 602 |
+
f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
|
| 603 |
+
)
|
| 604 |
+
|
| 605 |
+
if image_is_pil:
|
| 606 |
+
image_batch_size = 1
|
| 607 |
+
else:
|
| 608 |
+
image_batch_size = len(image)
|
| 609 |
+
|
| 610 |
+
if prompt is not None and isinstance(prompt, str):
|
| 611 |
+
prompt_batch_size = 1
|
| 612 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 613 |
+
prompt_batch_size = len(prompt)
|
| 614 |
+
elif prompt_embeds is not None:
|
| 615 |
+
prompt_batch_size = prompt_embeds.shape[0]
|
| 616 |
+
|
| 617 |
+
if image_batch_size != 1 and image_batch_size != prompt_batch_size:
|
| 618 |
+
raise ValueError(
|
| 619 |
+
f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
|
| 620 |
+
)
|
| 621 |
+
|
| 622 |
+
# Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image
|
| 623 |
+
def prepare_image(
|
| 624 |
+
self,
|
| 625 |
+
image,
|
| 626 |
+
width,
|
| 627 |
+
height,
|
| 628 |
+
batch_size,
|
| 629 |
+
num_images_per_prompt,
|
| 630 |
+
device,
|
| 631 |
+
dtype,
|
| 632 |
+
do_classifier_free_guidance=False,
|
| 633 |
+
guess_mode=False,
|
| 634 |
+
):
|
| 635 |
+
image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
|
| 636 |
+
image_batch_size = image.shape[0]
|
| 637 |
+
|
| 638 |
+
if image_batch_size == 1:
|
| 639 |
+
repeat_by = batch_size
|
| 640 |
+
else:
|
| 641 |
+
# image batch size is the same as prompt batch size
|
| 642 |
+
repeat_by = num_images_per_prompt
|
| 643 |
+
|
| 644 |
+
image = image.repeat_interleave(repeat_by, dim=0)
|
| 645 |
+
|
| 646 |
+
image = image.to(device=device, dtype=dtype)
|
| 647 |
+
|
| 648 |
+
if do_classifier_free_guidance and not guess_mode:
|
| 649 |
+
image = torch.cat([image] * 2)
|
| 650 |
+
|
| 651 |
+
return image
|
| 652 |
+
|
| 653 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
|
| 654 |
+
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
|
| 655 |
+
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
|
| 656 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 657 |
+
raise ValueError(
|
| 658 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 659 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 660 |
+
)
|
| 661 |
+
|
| 662 |
+
if latents is None:
|
| 663 |
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 664 |
+
else:
|
| 665 |
+
latents = latents.to(device)
|
| 666 |
+
|
| 667 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 668 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 669 |
+
return latents
|
| 670 |
+
|
| 671 |
+
# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids
|
| 672 |
+
def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype):
|
| 673 |
+
add_time_ids = list(original_size + crops_coords_top_left + target_size)
|
| 674 |
+
|
| 675 |
+
passed_add_embed_dim = (
|
| 676 |
+
self.unet.config.addition_time_embed_dim * len(add_time_ids) + self.text_encoder_2.config.projection_dim
|
| 677 |
+
)
|
| 678 |
+
expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
|
| 679 |
+
|
| 680 |
+
if expected_add_embed_dim != passed_add_embed_dim:
|
| 681 |
+
raise ValueError(
|
| 682 |
+
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
|
| 683 |
+
)
|
| 684 |
+
|
| 685 |
+
add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
|
| 686 |
+
return add_time_ids
|
| 687 |
+
|
| 688 |
+
def get_views(self, height, width, window_size=128, stride=64, random_jitter=False):
|
| 689 |
+
# Here, we define the mappings F_i (see Eq. 7 in the MultiDiffusion paper https://arxiv.org/abs/2302.08113)
|
| 690 |
+
# if panorama's height/width < window_size, num_blocks of height/width should return 1
|
| 691 |
+
height //= self.vae_scale_factor
|
| 692 |
+
width //= self.vae_scale_factor
|
| 693 |
+
num_blocks_height = int((height - window_size) / stride - 1e-6) + 2 if height > window_size else 1
|
| 694 |
+
num_blocks_width = int((width - window_size) / stride - 1e-6) + 2 if width > window_size else 1
|
| 695 |
+
total_num_blocks = int(num_blocks_height * num_blocks_width)
|
| 696 |
+
views = []
|
| 697 |
+
for i in range(total_num_blocks):
|
| 698 |
+
h_start = int((i // num_blocks_width) * stride)
|
| 699 |
+
h_end = h_start + window_size
|
| 700 |
+
w_start = int((i % num_blocks_width) * stride)
|
| 701 |
+
w_end = w_start + window_size
|
| 702 |
+
|
| 703 |
+
if h_end > height:
|
| 704 |
+
h_start = int(h_start + height - h_end)
|
| 705 |
+
h_end = int(height)
|
| 706 |
+
if w_end > width:
|
| 707 |
+
w_start = int(w_start + width - w_end)
|
| 708 |
+
w_end = int(width)
|
| 709 |
+
if h_start < 0:
|
| 710 |
+
h_end = int(h_end - h_start)
|
| 711 |
+
h_start = 0
|
| 712 |
+
if w_start < 0:
|
| 713 |
+
w_end = int(w_end - w_start)
|
| 714 |
+
w_start = 0
|
| 715 |
+
|
| 716 |
+
if random_jitter:
|
| 717 |
+
jitter_range = (window_size - stride) // 4
|
| 718 |
+
w_jitter = 0
|
| 719 |
+
h_jitter = 0
|
| 720 |
+
if (w_start != 0) and (w_end != width):
|
| 721 |
+
w_jitter = random.randint(-jitter_range, jitter_range)
|
| 722 |
+
elif (w_start == 0) and (w_end != width):
|
| 723 |
+
w_jitter = random.randint(-jitter_range, 0)
|
| 724 |
+
elif (w_start != 0) and (w_end == width):
|
| 725 |
+
w_jitter = random.randint(0, jitter_range)
|
| 726 |
+
if (h_start != 0) and (h_end != height):
|
| 727 |
+
h_jitter = random.randint(-jitter_range, jitter_range)
|
| 728 |
+
elif (h_start == 0) and (h_end != height):
|
| 729 |
+
h_jitter = random.randint(-jitter_range, 0)
|
| 730 |
+
elif (h_start != 0) and (h_end == height):
|
| 731 |
+
h_jitter = random.randint(0, jitter_range)
|
| 732 |
+
h_start += (h_jitter + jitter_range)
|
| 733 |
+
h_end += (h_jitter + jitter_range)
|
| 734 |
+
w_start += (w_jitter + jitter_range)
|
| 735 |
+
w_end += (w_jitter + jitter_range)
|
| 736 |
+
|
| 737 |
+
views.append((h_start, h_end, w_start, w_end))
|
| 738 |
+
return views
|
| 739 |
+
|
| 740 |
+
def tiled_decode(self, latents, current_height, current_width):
|
| 741 |
+
sample_size = self.unet.config.sample_size
|
| 742 |
+
core_size = self.unet.config.sample_size // 4
|
| 743 |
+
core_stride = core_size
|
| 744 |
+
pad_size = self.unet.config.sample_size // 8 * 3
|
| 745 |
+
decoder_view_batch_size = 1
|
| 746 |
+
|
| 747 |
+
if self.lowvram:
|
| 748 |
+
core_stride = core_size // 2
|
| 749 |
+
pad_size = core_size
|
| 750 |
+
|
| 751 |
+
views = self.get_views(current_height, current_width, stride=core_stride, window_size=core_size)
|
| 752 |
+
views_batch = [views[i : i + decoder_view_batch_size] for i in range(0, len(views), decoder_view_batch_size)]
|
| 753 |
+
latents_ = F.pad(latents, (pad_size, pad_size, pad_size, pad_size), 'constant', 0)
|
| 754 |
+
image = torch.zeros(latents.size(0), 3, current_height, current_width).to(latents.device)
|
| 755 |
+
count = torch.zeros_like(image).to(latents.device)
|
| 756 |
+
# get the latents corresponding to the current view coordinates
|
| 757 |
+
with self.progress_bar(total=len(views_batch)) as progress_bar:
|
| 758 |
+
for j, batch_view in enumerate(views_batch):
|
| 759 |
+
vb_size = len(batch_view)
|
| 760 |
+
latents_for_view = torch.cat(
|
| 761 |
+
[
|
| 762 |
+
latents_[:, :, h_start:h_end+pad_size*2, w_start:w_end+pad_size*2]
|
| 763 |
+
for h_start, h_end, w_start, w_end in batch_view
|
| 764 |
+
]
|
| 765 |
+
).to(self.vae.device)
|
| 766 |
+
image_patch = self.vae.decode(latents_for_view / self.vae.config.scaling_factor, return_dict=False)[0]
|
| 767 |
+
h_start, h_end, w_start, w_end = views[j]
|
| 768 |
+
h_start, h_end, w_start, w_end = h_start * self.vae_scale_factor, h_end * self.vae_scale_factor, w_start * self.vae_scale_factor, w_end * self.vae_scale_factor
|
| 769 |
+
p_h_start, p_h_end, p_w_start, p_w_end = pad_size * self.vae_scale_factor, image_patch.size(2) - pad_size * self.vae_scale_factor, pad_size * self.vae_scale_factor, image_patch.size(3) - pad_size * self.vae_scale_factor
|
| 770 |
+
image[:, :, h_start:h_end, w_start:w_end] += image_patch[:, :, p_h_start:p_h_end, p_w_start:p_w_end].to(latents.device)
|
| 771 |
+
count[:, :, h_start:h_end, w_start:w_end] += 1
|
| 772 |
+
progress_bar.update()
|
| 773 |
+
image = image / count
|
| 774 |
+
|
| 775 |
+
return image
|
| 776 |
+
|
| 777 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
|
| 778 |
+
def upcast_vae(self):
|
| 779 |
+
dtype = self.vae.dtype
|
| 780 |
+
self.vae.to(dtype=torch.float32)
|
| 781 |
+
use_torch_2_0_or_xformers = isinstance(
|
| 782 |
+
self.vae.decoder.mid_block.attentions[0].processor,
|
| 783 |
+
(
|
| 784 |
+
AttnProcessor2_0,
|
| 785 |
+
XFormersAttnProcessor,
|
| 786 |
+
LoRAXFormersAttnProcessor,
|
| 787 |
+
LoRAAttnProcessor2_0,
|
| 788 |
+
),
|
| 789 |
+
)
|
| 790 |
+
# if xformers or torch_2_0 is used attention block does not need
|
| 791 |
+
# to be in float32 which can save lots of memory
|
| 792 |
+
if use_torch_2_0_or_xformers:
|
| 793 |
+
self.vae.post_quant_conv.to(dtype)
|
| 794 |
+
self.vae.decoder.conv_in.to(dtype)
|
| 795 |
+
self.vae.decoder.mid_block.to(dtype)
|
| 796 |
+
|
| 797 |
+
@torch.no_grad()
|
| 798 |
+
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
| 799 |
+
def __call__(
|
| 800 |
+
self,
|
| 801 |
+
prompt: Union[str, List[str]] = None,
|
| 802 |
+
prompt_2: Optional[Union[str, List[str]]] = None,
|
| 803 |
+
condition_image: PipelineImageInput = None,
|
| 804 |
+
height: Optional[int] = None,
|
| 805 |
+
width: Optional[int] = None,
|
| 806 |
+
num_inference_steps: int = 50,
|
| 807 |
+
guidance_scale: float = 5.0,
|
| 808 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 809 |
+
negative_prompt_2: Optional[Union[str, List[str]]] = None,
|
| 810 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 811 |
+
eta: float = 0.0,
|
| 812 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 813 |
+
latents: Optional[torch.FloatTensor] = None,
|
| 814 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 815 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 816 |
+
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 817 |
+
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 818 |
+
output_type: Optional[str] = "pil",
|
| 819 |
+
return_dict: bool = True,
|
| 820 |
+
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
| 821 |
+
callback_steps: int = 1,
|
| 822 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 823 |
+
controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
|
| 824 |
+
guess_mode: bool = False,
|
| 825 |
+
control_guidance_start: Union[float, List[float]] = 0.0,
|
| 826 |
+
control_guidance_end: Union[float, List[float]] = 1.0,
|
| 827 |
+
original_size: Tuple[int, int] = None,
|
| 828 |
+
crops_coords_top_left: Tuple[int, int] = (0, 0),
|
| 829 |
+
target_size: Tuple[int, int] = None,
|
| 830 |
+
negative_original_size: Optional[Tuple[int, int]] = None,
|
| 831 |
+
negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
|
| 832 |
+
negative_target_size: Optional[Tuple[int, int]] = None,
|
| 833 |
+
################### DemoFusion specific parameters ####################
|
| 834 |
+
image_lr: Optional[torch.FloatTensor] = None,
|
| 835 |
+
view_batch_size: int = 16,
|
| 836 |
+
multi_decoder: bool = True,
|
| 837 |
+
stride: Optional[int] = 64,
|
| 838 |
+
cosine_scale_1: Optional[float] = 3.,
|
| 839 |
+
cosine_scale_2: Optional[float] = 1.,
|
| 840 |
+
cosine_scale_3: Optional[float] = 1.,
|
| 841 |
+
sigma: Optional[float] = 1.0,
|
| 842 |
+
show_image: bool = False,
|
| 843 |
+
lowvram: bool = False,
|
| 844 |
+
):
|
| 845 |
+
r"""
|
| 846 |
+
The call function to the pipeline for generation.
|
| 847 |
+
|
| 848 |
+
Args:
|
| 849 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 850 |
+
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
|
| 851 |
+
prompt_2 (`str` or `List[str]`, *optional*):
|
| 852 |
+
The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
|
| 853 |
+
used in both text-encoders.
|
| 854 |
+
image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
|
| 855 |
+
`List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
|
| 856 |
+
The ControlNet input condition to provide guidance to the `unet` for generation. If the type is
|
| 857 |
+
specified as `torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be
|
| 858 |
+
accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height
|
| 859 |
+
and/or width are passed, `image` is resized accordingly. If multiple ControlNets are specified in
|
| 860 |
+
`init`, images must be passed as a list such that each element of the list can be correctly batched for
|
| 861 |
+
input to a single ControlNet.
|
| 862 |
+
height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
| 863 |
+
The height in pixels of the generated image. Anything below 512 pixels won't work well for
|
| 864 |
+
[stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
|
| 865 |
+
and checkpoints that are not specifically fine-tuned on low resolutions.
|
| 866 |
+
width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
| 867 |
+
The width in pixels of the generated image. Anything below 512 pixels won't work well for
|
| 868 |
+
[stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
|
| 869 |
+
and checkpoints that are not specifically fine-tuned on low resolutions.
|
| 870 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 871 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 872 |
+
expense of slower inference.
|
| 873 |
+
guidance_scale (`float`, *optional*, defaults to 5.0):
|
| 874 |
+
A higher guidance scale value encourages the model to generate images closely linked to the text
|
| 875 |
+
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
|
| 876 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 877 |
+
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
|
| 878 |
+
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
|
| 879 |
+
negative_prompt_2 (`str` or `List[str]`, *optional*):
|
| 880 |
+
The prompt or prompts to guide what to not include in image generation. This is sent to `tokenizer_2`
|
| 881 |
+
and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders.
|
| 882 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 883 |
+
The number of images to generate per prompt.
|
| 884 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 885 |
+
Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
|
| 886 |
+
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
|
| 887 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 888 |
+
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
| 889 |
+
generation deterministic.
|
| 890 |
+
latents (`torch.FloatTensor`, *optional*):
|
| 891 |
+
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
|
| 892 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 893 |
+
tensor is generated by sampling using the supplied random `generator`.
|
| 894 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 895 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
|
| 896 |
+
provided, text embeddings are generated from the `prompt` input argument.
|
| 897 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 898 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
|
| 899 |
+
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
|
| 900 |
+
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 901 |
+
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
|
| 902 |
+
not provided, pooled text embeddings are generated from `prompt` input argument.
|
| 903 |
+
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 904 |
+
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs (prompt
|
| 905 |
+
weighting). If not provided, pooled `negative_prompt_embeds` are generated from `negative_prompt` input
|
| 906 |
+
argument.
|
| 907 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 908 |
+
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
|
| 909 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 910 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 911 |
+
plain tuple.
|
| 912 |
+
callback (`Callable`, *optional*):
|
| 913 |
+
A function that calls every `callback_steps` steps during inference. The function is called with the
|
| 914 |
+
following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
| 915 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 916 |
+
The frequency at which the `callback` function is called. If not specified, the callback is called at
|
| 917 |
+
every step.
|
| 918 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 919 |
+
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
|
| 920 |
+
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 921 |
+
controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
|
| 922 |
+
The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added
|
| 923 |
+
to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set
|
| 924 |
+
the corresponding scale as a list.
|
| 925 |
+
guess_mode (`bool`, *optional*, defaults to `False`):
|
| 926 |
+
The ControlNet encoder tries to recognize the content of the input image even if you remove all
|
| 927 |
+
prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended.
|
| 928 |
+
control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
|
| 929 |
+
The percentage of total steps at which the ControlNet starts applying.
|
| 930 |
+
control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
|
| 931 |
+
The percentage of total steps at which the ControlNet stops applying.
|
| 932 |
+
original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 933 |
+
If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
|
| 934 |
+
`original_size` defaults to `(width, height)` if not specified. Part of SDXL's micro-conditioning as
|
| 935 |
+
explained in section 2.2 of
|
| 936 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 937 |
+
crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
|
| 938 |
+
`crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
|
| 939 |
+
`crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
|
| 940 |
+
`crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
|
| 941 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 942 |
+
target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 943 |
+
For most cases, `target_size` should be set to the desired height and width of the generated image. If
|
| 944 |
+
not specified it will default to `(width, height)`. Part of SDXL's micro-conditioning as explained in
|
| 945 |
+
section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 946 |
+
negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 947 |
+
To negatively condition the generation process based on a specific image resolution. Part of SDXL's
|
| 948 |
+
micro-conditioning as explained in section 2.2 of
|
| 949 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
|
| 950 |
+
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
|
| 951 |
+
negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
|
| 952 |
+
To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
|
| 953 |
+
micro-conditioning as explained in section 2.2 of
|
| 954 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
|
| 955 |
+
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
|
| 956 |
+
negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 957 |
+
To negatively condition the generation process based on a target image resolution. It should be as same
|
| 958 |
+
as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
|
| 959 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
|
| 960 |
+
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
|
| 961 |
+
################### DemoFusion specific parameters ####################
|
| 962 |
+
image_lr (`torch.FloatTensor`, *optional*, , defaults to None):
|
| 963 |
+
Low-resolution image input for upscaling. If provided, DemoFusion will encode it as the initial latent representation.
|
| 964 |
+
view_batch_size (`int`, defaults to 16):
|
| 965 |
+
The batch size for multiple denoising paths. Typically, a larger batch size can result in higher
|
| 966 |
+
efficiency but comes with increased GPU memory requirements.
|
| 967 |
+
multi_decoder (`bool`, defaults to True):
|
| 968 |
+
Determine whether to use a tiled decoder. Generally, when the resolution exceeds 3072x3072,
|
| 969 |
+
a tiled decoder becomes necessary.
|
| 970 |
+
stride (`int`, defaults to 64):
|
| 971 |
+
The stride of moving local patches. A smaller stride is better for alleviating seam issues,
|
| 972 |
+
but it also introduces additional computational overhead and inference time.
|
| 973 |
+
cosine_scale_1 (`float`, defaults to 3):
|
| 974 |
+
Control the strength of skip-residual. For specific impacts, please refer to Appendix C
|
| 975 |
+
in the DemoFusion paper.
|
| 976 |
+
cosine_scale_2 (`float`, defaults to 1):
|
| 977 |
+
Control the strength of dilated sampling. For specific impacts, please refer to Appendix C
|
| 978 |
+
in the DemoFusion paper.
|
| 979 |
+
cosine_scale_3 (`float`, defaults to 1):
|
| 980 |
+
Control the strength of the gaussion filter. For specific impacts, please refer to Appendix C
|
| 981 |
+
in the DemoFusion paper.
|
| 982 |
+
sigma (`float`, defaults to 1):
|
| 983 |
+
The standard value of the gaussian filter.
|
| 984 |
+
show_image (`bool`, defaults to False):
|
| 985 |
+
Determine whether to show intermediate results during generation.
|
| 986 |
+
lowvram (`bool`, defaults to False):
|
| 987 |
+
Try to fit in 8 Gb of VRAM, with xformers installed.
|
| 988 |
+
Examples:
|
| 989 |
+
|
| 990 |
+
Returns:
|
| 991 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 992 |
+
If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
|
| 993 |
+
otherwise a `tuple` is returned containing the output images.
|
| 994 |
+
"""
|
| 995 |
+
controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
|
| 996 |
+
|
| 997 |
+
# align format for control guidance
|
| 998 |
+
if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
|
| 999 |
+
control_guidance_start = len(control_guidance_end) * [control_guidance_start]
|
| 1000 |
+
elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
|
| 1001 |
+
control_guidance_end = len(control_guidance_start) * [control_guidance_end]
|
| 1002 |
+
elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
|
| 1003 |
+
mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
|
| 1004 |
+
control_guidance_start, control_guidance_end = mult * [control_guidance_start], mult * [
|
| 1005 |
+
control_guidance_end
|
| 1006 |
+
]
|
| 1007 |
+
|
| 1008 |
+
# 0. Default height and width to unet
|
| 1009 |
+
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
| 1010 |
+
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
| 1011 |
+
|
| 1012 |
+
x1_size = self.unet.config.sample_size * self.vae_scale_factor
|
| 1013 |
+
|
| 1014 |
+
height_scale = height / x1_size
|
| 1015 |
+
width_scale = width / x1_size
|
| 1016 |
+
scale_num = int(max(height_scale, width_scale))
|
| 1017 |
+
aspect_ratio = min(height_scale, width_scale) / max(height_scale, width_scale)
|
| 1018 |
+
|
| 1019 |
+
original_size = original_size or (height, width)
|
| 1020 |
+
target_size = target_size or (height, width)
|
| 1021 |
+
|
| 1022 |
+
# 1. Check inputs. Raise error if not correct
|
| 1023 |
+
self.check_inputs(
|
| 1024 |
+
prompt,
|
| 1025 |
+
prompt_2,
|
| 1026 |
+
condition_image,
|
| 1027 |
+
callback_steps,
|
| 1028 |
+
negative_prompt,
|
| 1029 |
+
negative_prompt_2,
|
| 1030 |
+
prompt_embeds,
|
| 1031 |
+
negative_prompt_embeds,
|
| 1032 |
+
pooled_prompt_embeds,
|
| 1033 |
+
negative_pooled_prompt_embeds,
|
| 1034 |
+
controlnet_conditioning_scale,
|
| 1035 |
+
control_guidance_start,
|
| 1036 |
+
control_guidance_end,
|
| 1037 |
+
)
|
| 1038 |
+
|
| 1039 |
+
# 2. Define call parameters
|
| 1040 |
+
if prompt is not None and isinstance(prompt, str):
|
| 1041 |
+
batch_size = 1
|
| 1042 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 1043 |
+
batch_size = len(prompt)
|
| 1044 |
+
else:
|
| 1045 |
+
batch_size = prompt_embeds.shape[0]
|
| 1046 |
+
|
| 1047 |
+
device = self._execution_device
|
| 1048 |
+
self.lowvram = lowvram
|
| 1049 |
+
if self.lowvram:
|
| 1050 |
+
self.vae.cpu()
|
| 1051 |
+
self.unet.cpu()
|
| 1052 |
+
self.text_encoder.to(device)
|
| 1053 |
+
self.text_encoder_2.to(device)
|
| 1054 |
+
image_lr.cpu()
|
| 1055 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 1056 |
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
| 1057 |
+
# corresponds to doing no classifier free guidance.
|
| 1058 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 1059 |
+
|
| 1060 |
+
if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
|
| 1061 |
+
controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
|
| 1062 |
+
|
| 1063 |
+
global_pool_conditions = (
|
| 1064 |
+
controlnet.config.global_pool_conditions
|
| 1065 |
+
if isinstance(controlnet, ControlNetModel)
|
| 1066 |
+
else controlnet.nets[0].config.global_pool_conditions
|
| 1067 |
+
)
|
| 1068 |
+
guess_mode = guess_mode or global_pool_conditions
|
| 1069 |
+
|
| 1070 |
+
# 3. Encode input prompt
|
| 1071 |
+
text_encoder_lora_scale = (
|
| 1072 |
+
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
|
| 1073 |
+
)
|
| 1074 |
+
(
|
| 1075 |
+
prompt_embeds,
|
| 1076 |
+
negative_prompt_embeds,
|
| 1077 |
+
pooled_prompt_embeds,
|
| 1078 |
+
negative_pooled_prompt_embeds,
|
| 1079 |
+
) = self.encode_prompt(
|
| 1080 |
+
prompt,
|
| 1081 |
+
prompt_2,
|
| 1082 |
+
device,
|
| 1083 |
+
num_images_per_prompt,
|
| 1084 |
+
do_classifier_free_guidance,
|
| 1085 |
+
negative_prompt,
|
| 1086 |
+
negative_prompt_2,
|
| 1087 |
+
prompt_embeds=prompt_embeds,
|
| 1088 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 1089 |
+
pooled_prompt_embeds=pooled_prompt_embeds,
|
| 1090 |
+
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
|
| 1091 |
+
lora_scale=text_encoder_lora_scale,
|
| 1092 |
+
)
|
| 1093 |
+
|
| 1094 |
+
# 4. Prepare image
|
| 1095 |
+
if isinstance(controlnet, ControlNetModel):
|
| 1096 |
+
condition_image = self.prepare_image(
|
| 1097 |
+
image=condition_image,
|
| 1098 |
+
width=width // scale_num,
|
| 1099 |
+
height=height // scale_num,
|
| 1100 |
+
batch_size=batch_size * num_images_per_prompt,
|
| 1101 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 1102 |
+
device=device,
|
| 1103 |
+
dtype=controlnet.dtype,
|
| 1104 |
+
do_classifier_free_guidance=do_classifier_free_guidance,
|
| 1105 |
+
guess_mode=guess_mode,
|
| 1106 |
+
)
|
| 1107 |
+
# height, width = condition_image.shape[-2:]
|
| 1108 |
+
# condition_image.shape ([2, 3, 1024, 1024])
|
| 1109 |
+
elif isinstance(controlnet, MultiControlNetModel):
|
| 1110 |
+
condition_images = []
|
| 1111 |
+
|
| 1112 |
+
for image_ in condition_image:
|
| 1113 |
+
image_ = self.prepare_image(
|
| 1114 |
+
image=image_,
|
| 1115 |
+
width=width // scale_num,
|
| 1116 |
+
height=height // scale_num,
|
| 1117 |
+
batch_size=batch_size * num_images_per_prompt,
|
| 1118 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 1119 |
+
device=device,
|
| 1120 |
+
dtype=controlnet.dtype,
|
| 1121 |
+
do_classifier_free_guidance=do_classifier_free_guidance,
|
| 1122 |
+
guess_mode=guess_mode,
|
| 1123 |
+
)
|
| 1124 |
+
|
| 1125 |
+
condition_images.append(image_)
|
| 1126 |
+
|
| 1127 |
+
condition_image = condition_images
|
| 1128 |
+
# height, width = condition_image[0].shape[-2:]
|
| 1129 |
+
else:
|
| 1130 |
+
assert False
|
| 1131 |
+
|
| 1132 |
+
# 5. Prepare timesteps
|
| 1133 |
+
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
| 1134 |
+
timesteps = self.scheduler.timesteps
|
| 1135 |
+
|
| 1136 |
+
# 6. Prepare latent variables
|
| 1137 |
+
num_channels_latents = self.unet.config.in_channels
|
| 1138 |
+
latents = self.prepare_latents(
|
| 1139 |
+
batch_size * num_images_per_prompt,
|
| 1140 |
+
num_channels_latents,
|
| 1141 |
+
height // scale_num,
|
| 1142 |
+
width // scale_num,
|
| 1143 |
+
prompt_embeds.dtype,
|
| 1144 |
+
device,
|
| 1145 |
+
generator,
|
| 1146 |
+
latents,
|
| 1147 |
+
)
|
| 1148 |
+
|
| 1149 |
+
# 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 1150 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 1151 |
+
|
| 1152 |
+
# 7.1 Create tensor stating which controlnets to keep
|
| 1153 |
+
controlnet_keep = []
|
| 1154 |
+
for i in range(len(timesteps)):
|
| 1155 |
+
keeps = [
|
| 1156 |
+
1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
|
| 1157 |
+
for s, e in zip(control_guidance_start, control_guidance_end)
|
| 1158 |
+
]
|
| 1159 |
+
controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
|
| 1160 |
+
|
| 1161 |
+
# 7.2 Prepare added time ids & embeddings
|
| 1162 |
+
if isinstance(condition_image, list):
|
| 1163 |
+
original_size = original_size or condition_image[0].shape[-2:]
|
| 1164 |
+
else:
|
| 1165 |
+
original_size = original_size or condition_image.shape[-2:]
|
| 1166 |
+
target_size = target_size or (height, width)
|
| 1167 |
+
|
| 1168 |
+
add_text_embeds = pooled_prompt_embeds
|
| 1169 |
+
add_time_ids = self._get_add_time_ids(
|
| 1170 |
+
original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype
|
| 1171 |
+
)
|
| 1172 |
+
|
| 1173 |
+
if negative_original_size is not None and negative_target_size is not None:
|
| 1174 |
+
negative_add_time_ids = self._get_add_time_ids(
|
| 1175 |
+
negative_original_size,
|
| 1176 |
+
negative_crops_coords_top_left,
|
| 1177 |
+
negative_target_size,
|
| 1178 |
+
dtype=prompt_embeds.dtype,
|
| 1179 |
+
)
|
| 1180 |
+
else:
|
| 1181 |
+
negative_add_time_ids = add_time_ids
|
| 1182 |
+
|
| 1183 |
+
if do_classifier_free_guidance:
|
| 1184 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
|
| 1185 |
+
add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
|
| 1186 |
+
add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0)
|
| 1187 |
+
|
| 1188 |
+
prompt_embeds = prompt_embeds.to(device)
|
| 1189 |
+
add_text_embeds = add_text_embeds.to(device)
|
| 1190 |
+
add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
|
| 1191 |
+
|
| 1192 |
+
|
| 1193 |
+
|
| 1194 |
+
# 8. Denoising loop
|
| 1195 |
+
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
| 1196 |
+
|
| 1197 |
+
output_images = []
|
| 1198 |
+
|
| 1199 |
+
###################################################### Phase Initialization ########################################################
|
| 1200 |
+
|
| 1201 |
+
if self.lowvram:
|
| 1202 |
+
self.text_encoder.cpu()
|
| 1203 |
+
self.text_encoder_2.cpu()
|
| 1204 |
+
|
| 1205 |
+
if image_lr == None:
|
| 1206 |
+
print("### Phase 1 Denoising ###")
|
| 1207 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 1208 |
+
for i, t in enumerate(timesteps):
|
| 1209 |
+
|
| 1210 |
+
if self.lowvram:
|
| 1211 |
+
self.vae.cpu()
|
| 1212 |
+
self.unet.to(device)
|
| 1213 |
+
|
| 1214 |
+
latents_for_view = latents
|
| 1215 |
+
|
| 1216 |
+
# expand the latents if we are doing classifier free guidance
|
| 1217 |
+
latent_model_input = (
|
| 1218 |
+
latents.repeat_interleave(2, dim=0)
|
| 1219 |
+
if do_classifier_free_guidance
|
| 1220 |
+
else latents
|
| 1221 |
+
)
|
| 1222 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 1223 |
+
|
| 1224 |
+
|
| 1225 |
+
added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
|
| 1226 |
+
|
| 1227 |
+
# controlnet(s) inference
|
| 1228 |
+
if guess_mode and do_classifier_free_guidance:
|
| 1229 |
+
# Infer ControlNet only for the conditional batch.
|
| 1230 |
+
control_model_input = latents
|
| 1231 |
+
control_model_input = self.scheduler.scale_model_input(control_model_input, t)
|
| 1232 |
+
controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
|
| 1233 |
+
controlnet_added_cond_kwargs = {
|
| 1234 |
+
"text_embeds": add_text_embeds.chunk(2)[1],
|
| 1235 |
+
"time_ids": add_time_ids.chunk(2)[1],
|
| 1236 |
+
}
|
| 1237 |
+
else:
|
| 1238 |
+
control_model_input = latent_model_input
|
| 1239 |
+
controlnet_prompt_embeds = prompt_embeds
|
| 1240 |
+
controlnet_added_cond_kwargs = added_cond_kwargs
|
| 1241 |
+
|
| 1242 |
+
if isinstance(controlnet_keep[i], list):
|
| 1243 |
+
cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
|
| 1244 |
+
else:
|
| 1245 |
+
controlnet_cond_scale = controlnet_conditioning_scale
|
| 1246 |
+
if isinstance(controlnet_cond_scale, list):
|
| 1247 |
+
controlnet_cond_scale = controlnet_cond_scale[0]
|
| 1248 |
+
cond_scale = controlnet_cond_scale * controlnet_keep[i]
|
| 1249 |
+
|
| 1250 |
+
# print(condition_image.shape, control_model_input.shape, controlnet_prompt_embeds.shape, t, cond_scale, guess_mode)
|
| 1251 |
+
# print(controlnet_added_cond_kwargs["text_embeds"].shape, controlnet_added_cond_kwargs["time_ids"].shape)
|
| 1252 |
+
down_block_res_samples, mid_block_res_sample = self.controlnet(
|
| 1253 |
+
control_model_input,
|
| 1254 |
+
t,
|
| 1255 |
+
encoder_hidden_states=controlnet_prompt_embeds,
|
| 1256 |
+
controlnet_cond=condition_image,
|
| 1257 |
+
conditioning_scale=cond_scale,
|
| 1258 |
+
guess_mode=guess_mode,
|
| 1259 |
+
added_cond_kwargs=controlnet_added_cond_kwargs,
|
| 1260 |
+
return_dict=False,
|
| 1261 |
+
)
|
| 1262 |
+
|
| 1263 |
+
if guess_mode and do_classifier_free_guidance:
|
| 1264 |
+
# Infered ControlNet only for the conditional batch.
|
| 1265 |
+
# To apply the output of ControlNet to both the unconditional and conditional batches,
|
| 1266 |
+
# add 0 to the unconditional batch to keep it unchanged.
|
| 1267 |
+
down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
|
| 1268 |
+
mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
|
| 1269 |
+
|
| 1270 |
+
# predict the noise residual
|
| 1271 |
+
noise_pred = self.unet(
|
| 1272 |
+
latent_model_input,
|
| 1273 |
+
t,
|
| 1274 |
+
encoder_hidden_states=prompt_embeds,
|
| 1275 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 1276 |
+
down_block_additional_residuals=down_block_res_samples,
|
| 1277 |
+
mid_block_additional_residual=mid_block_res_sample,
|
| 1278 |
+
added_cond_kwargs=added_cond_kwargs,
|
| 1279 |
+
return_dict=False,
|
| 1280 |
+
)[0]
|
| 1281 |
+
|
| 1282 |
+
# perform guidance
|
| 1283 |
+
if do_classifier_free_guidance:
|
| 1284 |
+
noise_pred_uncond, noise_pred_text = noise_pred[::2], noise_pred[1::2]
|
| 1285 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 1286 |
+
|
| 1287 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 1288 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
|
| 1289 |
+
|
| 1290 |
+
# call the callback, if provided
|
| 1291 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 1292 |
+
progress_bar.update()
|
| 1293 |
+
if callback is not None and i % callback_steps == 0:
|
| 1294 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 1295 |
+
callback(step_idx, t, latents)
|
| 1296 |
+
else:
|
| 1297 |
+
print("### Encoding Real Image ###")
|
| 1298 |
+
latents = self.vae.encode(image_lr)
|
| 1299 |
+
latents = latents.latent_dist.sample() * self.vae.config.scaling_factor
|
| 1300 |
+
|
| 1301 |
+
anchor_mean = latents.mean()
|
| 1302 |
+
anchor_std = latents.std()
|
| 1303 |
+
if self.lowvram:
|
| 1304 |
+
latents = latents.cpu()
|
| 1305 |
+
torch.cuda.empty_cache()
|
| 1306 |
+
if not output_type == "latent":
|
| 1307 |
+
# make sure the VAE is in float32 mode, as it overflows in float16
|
| 1308 |
+
needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
|
| 1309 |
+
|
| 1310 |
+
if self.lowvram:
|
| 1311 |
+
needs_upcasting = False # use madebyollin/sdxl-vae-fp16-fix in lowvram mode!
|
| 1312 |
+
self.unet.cpu()
|
| 1313 |
+
self.vae.to(device)
|
| 1314 |
+
|
| 1315 |
+
if needs_upcasting:
|
| 1316 |
+
self.upcast_vae()
|
| 1317 |
+
latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
|
| 1318 |
+
if self.lowvram and multi_decoder:
|
| 1319 |
+
current_width_height = self.unet.config.sample_size * self.vae_scale_factor
|
| 1320 |
+
image = self.tiled_decode(latents, current_width_height, current_width_height)
|
| 1321 |
+
else:
|
| 1322 |
+
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
|
| 1323 |
+
# cast back to fp16 if needed
|
| 1324 |
+
if needs_upcasting:
|
| 1325 |
+
self.vae.to(dtype=torch.float16)
|
| 1326 |
+
|
| 1327 |
+
image = self.image_processor.postprocess(image, output_type=output_type)
|
| 1328 |
+
if show_image:
|
| 1329 |
+
plt.figure(figsize=(10, 10))
|
| 1330 |
+
plt.imshow(image[0])
|
| 1331 |
+
plt.axis('off') # Turn off axis numbers and ticks
|
| 1332 |
+
plt.show()
|
| 1333 |
+
output_images.append(image[0])
|
| 1334 |
+
|
| 1335 |
+
####################################################### Phase Upscaling #####################################################
|
| 1336 |
+
if image_lr == None:
|
| 1337 |
+
starting_scale = 2
|
| 1338 |
+
else:
|
| 1339 |
+
starting_scale = 1
|
| 1340 |
+
for current_scale_num in range(starting_scale, scale_num + 1):
|
| 1341 |
+
if self.lowvram:
|
| 1342 |
+
latents = latents.to(device)
|
| 1343 |
+
self.unet.to(device)
|
| 1344 |
+
torch.cuda.empty_cache()
|
| 1345 |
+
print("### Phase {} Denoising ###".format(current_scale_num))
|
| 1346 |
+
current_height = self.unet.config.sample_size * self.vae_scale_factor * current_scale_num
|
| 1347 |
+
current_width = self.unet.config.sample_size * self.vae_scale_factor * current_scale_num
|
| 1348 |
+
if height > width:
|
| 1349 |
+
current_width = int(current_width * aspect_ratio)
|
| 1350 |
+
else:
|
| 1351 |
+
current_height = int(current_height * aspect_ratio)
|
| 1352 |
+
|
| 1353 |
+
latents = F.interpolate(latents, size=(int(current_height / self.vae_scale_factor), int(current_width / self.vae_scale_factor)), mode='bicubic')
|
| 1354 |
+
condition_image = F.interpolate(condition_image, size=(current_height, current_width), mode='bicubic')
|
| 1355 |
+
|
| 1356 |
+
noise_latents = []
|
| 1357 |
+
noise = torch.randn_like(latents)
|
| 1358 |
+
for timestep in timesteps:
|
| 1359 |
+
noise_latent = self.scheduler.add_noise(latents, noise, timestep.unsqueeze(0))
|
| 1360 |
+
noise_latents.append(noise_latent)
|
| 1361 |
+
latents = noise_latents[0]
|
| 1362 |
+
|
| 1363 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 1364 |
+
for i, t in enumerate(timesteps):
|
| 1365 |
+
count = torch.zeros_like(latents)
|
| 1366 |
+
value = torch.zeros_like(latents)
|
| 1367 |
+
cosine_factor = 0.5 * (1 + torch.cos(torch.pi * (self.scheduler.config.num_train_timesteps - t) / self.scheduler.config.num_train_timesteps)).cpu()
|
| 1368 |
+
|
| 1369 |
+
c1 = cosine_factor ** cosine_scale_1
|
| 1370 |
+
latents = latents * (1 - c1) + noise_latents[i] * c1
|
| 1371 |
+
|
| 1372 |
+
############################################# MultiDiffusion #############################################
|
| 1373 |
+
|
| 1374 |
+
views = self.get_views(current_height, current_width, stride=stride, window_size=self.unet.config.sample_size, random_jitter=True)
|
| 1375 |
+
views_batch = [views[i : i + view_batch_size] for i in range(0, len(views), view_batch_size)]
|
| 1376 |
+
|
| 1377 |
+
jitter_range = (self.unet.config.sample_size - stride) // 4
|
| 1378 |
+
latents_ = F.pad(latents, (jitter_range, jitter_range, jitter_range, jitter_range), 'constant', 0)
|
| 1379 |
+
condition_image_ = F.pad(condition_image, (jitter_range * self.vae_scale_factor, jitter_range * self.vae_scale_factor, jitter_range * self.vae_scale_factor, jitter_range * self.vae_scale_factor), 'constant', 0)
|
| 1380 |
+
|
| 1381 |
+
count_local = torch.zeros_like(latents_)
|
| 1382 |
+
value_local = torch.zeros_like(latents_)
|
| 1383 |
+
|
| 1384 |
+
for j, batch_view in enumerate(views_batch):
|
| 1385 |
+
vb_size = len(batch_view)
|
| 1386 |
+
|
| 1387 |
+
# get the latents corresponding to the current view coordinates
|
| 1388 |
+
latents_for_view = torch.cat(
|
| 1389 |
+
[
|
| 1390 |
+
latents_[:, :, h_start:h_end, w_start:w_end]
|
| 1391 |
+
for h_start, h_end, w_start, w_end in batch_view
|
| 1392 |
+
]
|
| 1393 |
+
)
|
| 1394 |
+
condition_image_for_view = torch.cat(
|
| 1395 |
+
[
|
| 1396 |
+
condition_image_[0:1, :, h_start * self.vae_scale_factor:h_end * self.vae_scale_factor, w_start * self.vae_scale_factor:w_end * self.vae_scale_factor]
|
| 1397 |
+
for h_start, h_end, w_start, w_end in batch_view
|
| 1398 |
+
]
|
| 1399 |
+
)
|
| 1400 |
+
|
| 1401 |
+
# expand the latents if we are doing classifier free guidance
|
| 1402 |
+
latent_model_input = latents_for_view
|
| 1403 |
+
latent_model_input = (
|
| 1404 |
+
latent_model_input.repeat_interleave(2, dim=0)
|
| 1405 |
+
if do_classifier_free_guidance
|
| 1406 |
+
else latent_model_input
|
| 1407 |
+
)
|
| 1408 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 1409 |
+
|
| 1410 |
+
condition_image_input = condition_image_for_view
|
| 1411 |
+
condition_image_input = (
|
| 1412 |
+
condition_image_input.repeat_interleave(2, dim=0)
|
| 1413 |
+
if do_classifier_free_guidance
|
| 1414 |
+
else condition_image_input
|
| 1415 |
+
)
|
| 1416 |
+
|
| 1417 |
+
prompt_embeds_input = torch.cat([prompt_embeds] * vb_size)
|
| 1418 |
+
add_text_embeds_input = torch.cat([add_text_embeds] * vb_size)
|
| 1419 |
+
add_time_ids_input = []
|
| 1420 |
+
for h_start, h_end, w_start, w_end in batch_view:
|
| 1421 |
+
add_time_ids_ = add_time_ids.clone()
|
| 1422 |
+
add_time_ids_[:, 2] = h_start * self.vae_scale_factor
|
| 1423 |
+
add_time_ids_[:, 3] = w_start * self.vae_scale_factor
|
| 1424 |
+
add_time_ids_input.append(add_time_ids_)
|
| 1425 |
+
add_time_ids_input = torch.cat(add_time_ids_input)
|
| 1426 |
+
|
| 1427 |
+
added_cond_kwargs = {"text_embeds": add_text_embeds_input, "time_ids": add_time_ids_input}
|
| 1428 |
+
|
| 1429 |
+
# controlnet(s) inference
|
| 1430 |
+
if guess_mode and do_classifier_free_guidance:
|
| 1431 |
+
# Infer ControlNet only for the conditional batch.
|
| 1432 |
+
control_model_input = latent_model_input
|
| 1433 |
+
control_model_input = self.scheduler.scale_model_input(control_model_input, t)
|
| 1434 |
+
controlnet_prompt_embeds = prompt_embeds_input.chunk(2)[1]
|
| 1435 |
+
controlnet_added_cond_kwargs = {
|
| 1436 |
+
"text_embeds": add_text_embeds_input.chunk(2)[1],
|
| 1437 |
+
"time_ids": add_time_ids_input.chunk(2)[1],
|
| 1438 |
+
}
|
| 1439 |
+
else:
|
| 1440 |
+
control_model_input = latent_model_input
|
| 1441 |
+
controlnet_prompt_embeds = prompt_embeds_input
|
| 1442 |
+
controlnet_added_cond_kwargs = added_cond_kwargs
|
| 1443 |
+
|
| 1444 |
+
if isinstance(controlnet_keep[i], list):
|
| 1445 |
+
cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
|
| 1446 |
+
else:
|
| 1447 |
+
controlnet_cond_scale = controlnet_conditioning_scale
|
| 1448 |
+
if isinstance(controlnet_cond_scale, list):
|
| 1449 |
+
controlnet_cond_scale = controlnet_cond_scale[0]
|
| 1450 |
+
cond_scale = controlnet_cond_scale * controlnet_keep[i]
|
| 1451 |
+
|
| 1452 |
+
down_block_res_samples, mid_block_res_sample = self.controlnet(
|
| 1453 |
+
control_model_input,
|
| 1454 |
+
t,
|
| 1455 |
+
encoder_hidden_states=controlnet_prompt_embeds,
|
| 1456 |
+
controlnet_cond=condition_image_input,
|
| 1457 |
+
conditioning_scale=cond_scale,
|
| 1458 |
+
guess_mode=guess_mode,
|
| 1459 |
+
added_cond_kwargs=controlnet_added_cond_kwargs,
|
| 1460 |
+
return_dict=False,
|
| 1461 |
+
)
|
| 1462 |
+
|
| 1463 |
+
if guess_mode and do_classifier_free_guidance:
|
| 1464 |
+
# Infered ControlNet only for the conditional batch.
|
| 1465 |
+
# To apply the output of ControlNet to both the unconditional and conditional batches,
|
| 1466 |
+
# add 0 to the unconditional batch to keep it unchanged.
|
| 1467 |
+
down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
|
| 1468 |
+
mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
|
| 1469 |
+
|
| 1470 |
+
# predict the noise residual
|
| 1471 |
+
noise_pred = self.unet(
|
| 1472 |
+
latent_model_input,
|
| 1473 |
+
t,
|
| 1474 |
+
encoder_hidden_states=prompt_embeds_input,
|
| 1475 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 1476 |
+
down_block_additional_residuals=down_block_res_samples,
|
| 1477 |
+
mid_block_additional_residual=mid_block_res_sample,
|
| 1478 |
+
added_cond_kwargs=added_cond_kwargs,
|
| 1479 |
+
return_dict=False,
|
| 1480 |
+
)[0]
|
| 1481 |
+
|
| 1482 |
+
if do_classifier_free_guidance:
|
| 1483 |
+
noise_pred_uncond, noise_pred_text = noise_pred[::2], noise_pred[1::2]
|
| 1484 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) * 1
|
| 1485 |
+
|
| 1486 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 1487 |
+
self.scheduler._init_step_index(t)
|
| 1488 |
+
latents_denoised_batch = self.scheduler.step(
|
| 1489 |
+
noise_pred, t, latents_for_view, **extra_step_kwargs, return_dict=False)[0]
|
| 1490 |
+
|
| 1491 |
+
# extract value from batch
|
| 1492 |
+
for latents_view_denoised, (h_start, h_end, w_start, w_end) in zip(
|
| 1493 |
+
latents_denoised_batch.chunk(vb_size), batch_view
|
| 1494 |
+
):
|
| 1495 |
+
value_local[:, :, h_start:h_end, w_start:w_end] += latents_view_denoised
|
| 1496 |
+
count_local[:, :, h_start:h_end, w_start:w_end] += 1
|
| 1497 |
+
|
| 1498 |
+
value_local = value_local[: ,:, jitter_range: jitter_range + current_height // self.vae_scale_factor, jitter_range: jitter_range + current_width // self.vae_scale_factor]
|
| 1499 |
+
count_local = count_local[: ,:, jitter_range: jitter_range + current_height // self.vae_scale_factor, jitter_range: jitter_range + current_width // self.vae_scale_factor]
|
| 1500 |
+
|
| 1501 |
+
c2 = cosine_factor ** cosine_scale_2
|
| 1502 |
+
|
| 1503 |
+
value += value_local / count_local * (1 - c2)
|
| 1504 |
+
count += torch.ones_like(value_local) * (1 - c2)
|
| 1505 |
+
|
| 1506 |
+
############################################# Dilated Sampling #############################################
|
| 1507 |
+
|
| 1508 |
+
h_pad = (current_scale_num - (latents.size(2) % current_scale_num)) % current_scale_num
|
| 1509 |
+
w_pad = (current_scale_num - (latents.size(3) % current_scale_num)) % current_scale_num
|
| 1510 |
+
latents_ = F.pad(latents, (w_pad, 0, h_pad, 0), 'constant', 0)
|
| 1511 |
+
|
| 1512 |
+
count_global = torch.zeros_like(latents_)
|
| 1513 |
+
value_global = torch.zeros_like(latents_)
|
| 1514 |
+
|
| 1515 |
+
c3 = 0.99 * cosine_factor ** cosine_scale_3 + 1e-2
|
| 1516 |
+
std_, mean_ = latents_.std(), latents_.mean()
|
| 1517 |
+
latents_gaussian = gaussian_filter(latents_, kernel_size=(2*current_scale_num-1), sigma=sigma*c3)
|
| 1518 |
+
latents_gaussian = (latents_gaussian - latents_gaussian.mean()) / latents_gaussian.std() * std_ + mean_
|
| 1519 |
+
|
| 1520 |
+
latents_for_view = []
|
| 1521 |
+
for h in range(current_scale_num):
|
| 1522 |
+
for w in range(current_scale_num):
|
| 1523 |
+
latents_for_view.append(latents_[:, :, h::current_scale_num, w::current_scale_num])
|
| 1524 |
+
latents_for_view = torch.cat(latents_for_view)
|
| 1525 |
+
|
| 1526 |
+
latents_for_view_gaussian = []
|
| 1527 |
+
for h in range(current_scale_num):
|
| 1528 |
+
for w in range(current_scale_num):
|
| 1529 |
+
latents_for_view_gaussian.append(latents_gaussian[:, :, h::current_scale_num, w::current_scale_num])
|
| 1530 |
+
latents_for_view_gaussian = torch.cat(latents_for_view_gaussian)
|
| 1531 |
+
|
| 1532 |
+
condition_image_for_view = []
|
| 1533 |
+
for h in range(current_scale_num):
|
| 1534 |
+
for w in range(current_scale_num):
|
| 1535 |
+
condition_image_ = F.pad(condition_image, (w_pad * self.vae_scale_factor, w * self.vae_scale_factor, h_pad * self.vae_scale_factor, h * self.vae_scale_factor), 'constant', 0)
|
| 1536 |
+
condition_image_for_view.append(condition_image_[0:1, :, h * self.vae_scale_factor::current_scale_num, w * self.vae_scale_factor::current_scale_num])
|
| 1537 |
+
condition_image_for_view = torch.cat(condition_image_for_view)
|
| 1538 |
+
|
| 1539 |
+
vb_size = latents_for_view.size(0)
|
| 1540 |
+
|
| 1541 |
+
# expand the latents if we are doing classifier free guidance
|
| 1542 |
+
latent_model_input = latents_for_view_gaussian
|
| 1543 |
+
latent_model_input = (
|
| 1544 |
+
latent_model_input.repeat_interleave(2, dim=0)
|
| 1545 |
+
if do_classifier_free_guidance
|
| 1546 |
+
else latent_model_input
|
| 1547 |
+
)
|
| 1548 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 1549 |
+
|
| 1550 |
+
condition_image_input = condition_image_for_view
|
| 1551 |
+
condition_image_input = (
|
| 1552 |
+
condition_image_input.repeat_interleave(2, dim=0)
|
| 1553 |
+
if do_classifier_free_guidance
|
| 1554 |
+
else condition_image_input
|
| 1555 |
+
)
|
| 1556 |
+
|
| 1557 |
+
prompt_embeds_input = torch.cat([prompt_embeds] * vb_size)
|
| 1558 |
+
add_text_embeds_input = torch.cat([add_text_embeds] * vb_size)
|
| 1559 |
+
add_time_ids_input = torch.cat([add_time_ids] * vb_size)
|
| 1560 |
+
|
| 1561 |
+
added_cond_kwargs = {"text_embeds": add_text_embeds_input, "time_ids": add_time_ids_input}
|
| 1562 |
+
|
| 1563 |
+
# controlnet(s) inference
|
| 1564 |
+
if guess_mode and do_classifier_free_guidance:
|
| 1565 |
+
# Infer ControlNet only for the conditional batch.
|
| 1566 |
+
control_model_input = latent_model_input
|
| 1567 |
+
control_model_input = self.scheduler.scale_model_input(control_model_input, t)
|
| 1568 |
+
controlnet_prompt_embeds = prompt_embeds_input.chunk(2)[1]
|
| 1569 |
+
controlnet_added_cond_kwargs = {
|
| 1570 |
+
"text_embeds": add_text_embeds_input.chunk(2)[1],
|
| 1571 |
+
"time_ids": add_time_ids_input.chunk(2)[1],
|
| 1572 |
+
}
|
| 1573 |
+
else:
|
| 1574 |
+
control_model_input = latent_model_input
|
| 1575 |
+
controlnet_prompt_embeds = prompt_embeds_input
|
| 1576 |
+
controlnet_added_cond_kwargs = added_cond_kwargs
|
| 1577 |
+
|
| 1578 |
+
if isinstance(controlnet_keep[i], list):
|
| 1579 |
+
cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
|
| 1580 |
+
else:
|
| 1581 |
+
controlnet_cond_scale = controlnet_conditioning_scale
|
| 1582 |
+
if isinstance(controlnet_cond_scale, list):
|
| 1583 |
+
controlnet_cond_scale = controlnet_cond_scale[0]
|
| 1584 |
+
cond_scale = controlnet_cond_scale * controlnet_keep[i]
|
| 1585 |
+
|
| 1586 |
+
down_block_res_samples, mid_block_res_sample = self.controlnet(
|
| 1587 |
+
control_model_input,
|
| 1588 |
+
t,
|
| 1589 |
+
encoder_hidden_states=controlnet_prompt_embeds,
|
| 1590 |
+
controlnet_cond=condition_image_input,
|
| 1591 |
+
conditioning_scale=cond_scale,
|
| 1592 |
+
guess_mode=guess_mode,
|
| 1593 |
+
added_cond_kwargs=controlnet_added_cond_kwargs,
|
| 1594 |
+
return_dict=False,
|
| 1595 |
+
)
|
| 1596 |
+
|
| 1597 |
+
if guess_mode and do_classifier_free_guidance:
|
| 1598 |
+
# Infered ControlNet only for the conditional batch.
|
| 1599 |
+
# To apply the output of ControlNet to both the unconditional and conditional batches,
|
| 1600 |
+
# add 0 to the unconditional batch to keep it unchanged.
|
| 1601 |
+
down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
|
| 1602 |
+
mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
|
| 1603 |
+
|
| 1604 |
+
# predict the noise residual
|
| 1605 |
+
noise_pred = self.unet(
|
| 1606 |
+
latent_model_input,
|
| 1607 |
+
t,
|
| 1608 |
+
encoder_hidden_states=prompt_embeds_input,
|
| 1609 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 1610 |
+
down_block_additional_residuals=down_block_res_samples,
|
| 1611 |
+
mid_block_additional_residual=mid_block_res_sample,
|
| 1612 |
+
added_cond_kwargs=added_cond_kwargs,
|
| 1613 |
+
return_dict=False,
|
| 1614 |
+
)[0]
|
| 1615 |
+
|
| 1616 |
+
if do_classifier_free_guidance:
|
| 1617 |
+
noise_pred_uncond, noise_pred_text = noise_pred[::2], noise_pred[1::2]
|
| 1618 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 1619 |
+
|
| 1620 |
+
# extract value from batch
|
| 1621 |
+
for h in range(current_scale_num):
|
| 1622 |
+
for w in range(current_scale_num):
|
| 1623 |
+
noise_pred_ = noise_pred.chunk(vb_size)[h*current_scale_num+w]
|
| 1624 |
+
value_global[:, :, h::current_scale_num, w::current_scale_num] += noise_pred_
|
| 1625 |
+
count_global[:, :, h::current_scale_num, w::current_scale_num] += 1
|
| 1626 |
+
|
| 1627 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 1628 |
+
self.scheduler._init_step_index(t)
|
| 1629 |
+
value_global = self.scheduler.step(
|
| 1630 |
+
value_global, t, latents_, **extra_step_kwargs, return_dict=False)[0]
|
| 1631 |
+
|
| 1632 |
+
c2 = cosine_factor ** cosine_scale_2
|
| 1633 |
+
|
| 1634 |
+
value_global = value_global[: ,:, h_pad:, w_pad:]
|
| 1635 |
+
|
| 1636 |
+
value += value_global * c2
|
| 1637 |
+
count += torch.ones_like(value_global) * c2
|
| 1638 |
+
|
| 1639 |
+
###########################################################
|
| 1640 |
+
|
| 1641 |
+
latents = torch.where(count > 0, value / count, value)
|
| 1642 |
+
|
| 1643 |
+
# call the callback, if provided
|
| 1644 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 1645 |
+
progress_bar.update()
|
| 1646 |
+
if callback is not None and i % callback_steps == 0:
|
| 1647 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 1648 |
+
callback(step_idx, t, latents)
|
| 1649 |
+
|
| 1650 |
+
#########################################################################################################################################
|
| 1651 |
+
|
| 1652 |
+
latents = (latents - latents.mean()) / latents.std() * anchor_std + anchor_mean
|
| 1653 |
+
if self.lowvram:
|
| 1654 |
+
latents = latents.cpu()
|
| 1655 |
+
torch.cuda.empty_cache()
|
| 1656 |
+
if not output_type == "latent":
|
| 1657 |
+
# make sure the VAE is in float32 mode, as it overflows in float16
|
| 1658 |
+
needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
|
| 1659 |
+
|
| 1660 |
+
if self.lowvram:
|
| 1661 |
+
needs_upcasting = False # use madebyollin/sdxl-vae-fp16-fix in lowvram mode!
|
| 1662 |
+
self.unet.cpu()
|
| 1663 |
+
self.vae.to(device)
|
| 1664 |
+
|
| 1665 |
+
if needs_upcasting:
|
| 1666 |
+
self.upcast_vae()
|
| 1667 |
+
latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
|
| 1668 |
+
|
| 1669 |
+
print("### Phase {} Decoding ###".format(current_scale_num))
|
| 1670 |
+
if multi_decoder:
|
| 1671 |
+
image = self.tiled_decode(latents, current_height, current_width)
|
| 1672 |
+
else:
|
| 1673 |
+
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
|
| 1674 |
+
|
| 1675 |
+
# cast back to fp16 if needed
|
| 1676 |
+
if needs_upcasting:
|
| 1677 |
+
self.vae.to(dtype=torch.float16)
|
| 1678 |
+
else:
|
| 1679 |
+
image = latents
|
| 1680 |
+
|
| 1681 |
+
if not output_type == "latent":
|
| 1682 |
+
image = self.image_processor.postprocess(image, output_type=output_type)
|
| 1683 |
+
if show_image:
|
| 1684 |
+
plt.figure(figsize=(10, 10))
|
| 1685 |
+
plt.imshow(image[0])
|
| 1686 |
+
plt.axis('off') # Turn off axis numbers and ticks
|
| 1687 |
+
plt.show()
|
| 1688 |
+
output_images.append(image[0])
|
| 1689 |
+
|
| 1690 |
+
# Offload all models
|
| 1691 |
+
self.maybe_free_model_hooks()
|
| 1692 |
+
|
| 1693 |
+
return output_images
|
| 1694 |
+
|
| 1695 |
+
# Overrride to properly handle the loading and unloading of the additional text encoder.
|
| 1696 |
+
def load_lora_weights(self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs):
|
| 1697 |
+
# We could have accessed the unet config from `lora_state_dict()` too. We pass
|
| 1698 |
+
# it here explicitly to be able to tell that it's coming from an SDXL
|
| 1699 |
+
# pipeline.
|
| 1700 |
+
|
| 1701 |
+
# Remove any existing hooks.
|
| 1702 |
+
if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
|
| 1703 |
+
from accelerate.hooks import AlignDevicesHook, CpuOffload, remove_hook_from_module
|
| 1704 |
+
else:
|
| 1705 |
+
raise ImportError("Offloading requires `accelerate v0.17.0` or higher.")
|
| 1706 |
+
|
| 1707 |
+
is_model_cpu_offload = False
|
| 1708 |
+
is_sequential_cpu_offload = False
|
| 1709 |
+
recursive = False
|
| 1710 |
+
for _, component in self.components.items():
|
| 1711 |
+
if isinstance(component, torch.nn.Module):
|
| 1712 |
+
if hasattr(component, "_hf_hook"):
|
| 1713 |
+
is_model_cpu_offload = isinstance(getattr(component, "_hf_hook"), CpuOffload)
|
| 1714 |
+
is_sequential_cpu_offload = isinstance(getattr(component, "_hf_hook"), AlignDevicesHook)
|
| 1715 |
+
logger.info(
|
| 1716 |
+
"Accelerate hooks detected. Since you have called `load_lora_weights()`, the previous hooks will be first removed. Then the LoRA parameters will be loaded and the hooks will be applied again."
|
| 1717 |
+
)
|
| 1718 |
+
recursive = is_sequential_cpu_offload
|
| 1719 |
+
remove_hook_from_module(component, recurse=recursive)
|
| 1720 |
+
state_dict, network_alphas = self.lora_state_dict(
|
| 1721 |
+
pretrained_model_name_or_path_or_dict,
|
| 1722 |
+
unet_config=self.unet.config,
|
| 1723 |
+
**kwargs,
|
| 1724 |
+
)
|
| 1725 |
+
self.load_lora_into_unet(state_dict, network_alphas=network_alphas, unet=self.unet)
|
| 1726 |
+
|
| 1727 |
+
text_encoder_state_dict = {k: v for k, v in state_dict.items() if "text_encoder." in k}
|
| 1728 |
+
if len(text_encoder_state_dict) > 0:
|
| 1729 |
+
self.load_lora_into_text_encoder(
|
| 1730 |
+
text_encoder_state_dict,
|
| 1731 |
+
network_alphas=network_alphas,
|
| 1732 |
+
text_encoder=self.text_encoder,
|
| 1733 |
+
prefix="text_encoder",
|
| 1734 |
+
lora_scale=self.lora_scale,
|
| 1735 |
+
)
|
| 1736 |
+
|
| 1737 |
+
text_encoder_2_state_dict = {k: v for k, v in state_dict.items() if "text_encoder_2." in k}
|
| 1738 |
+
if len(text_encoder_2_state_dict) > 0:
|
| 1739 |
+
self.load_lora_into_text_encoder(
|
| 1740 |
+
text_encoder_2_state_dict,
|
| 1741 |
+
network_alphas=network_alphas,
|
| 1742 |
+
text_encoder=self.text_encoder_2,
|
| 1743 |
+
prefix="text_encoder_2",
|
| 1744 |
+
lora_scale=self.lora_scale,
|
| 1745 |
+
)
|
| 1746 |
+
|
| 1747 |
+
# Offload back.
|
| 1748 |
+
if is_model_cpu_offload:
|
| 1749 |
+
self.enable_model_cpu_offload()
|
| 1750 |
+
elif is_sequential_cpu_offload:
|
| 1751 |
+
self.enable_sequential_cpu_offload()
|
| 1752 |
+
|
| 1753 |
+
@classmethod
|
| 1754 |
+
def save_lora_weights(
|
| 1755 |
+
self,
|
| 1756 |
+
save_directory: Union[str, os.PathLike],
|
| 1757 |
+
unet_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
|
| 1758 |
+
text_encoder_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
|
| 1759 |
+
text_encoder_2_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
|
| 1760 |
+
is_main_process: bool = True,
|
| 1761 |
+
weight_name: str = None,
|
| 1762 |
+
save_function: Callable = None,
|
| 1763 |
+
safe_serialization: bool = True,
|
| 1764 |
+
):
|
| 1765 |
+
state_dict = {}
|
| 1766 |
+
|
| 1767 |
+
def pack_weights(layers, prefix):
|
| 1768 |
+
layers_weights = layers.state_dict() if isinstance(layers, torch.nn.Module) else layers
|
| 1769 |
+
layers_state_dict = {f"{prefix}.{module_name}": param for module_name, param in layers_weights.items()}
|
| 1770 |
+
return layers_state_dict
|
| 1771 |
+
|
| 1772 |
+
if not (unet_lora_layers or text_encoder_lora_layers or text_encoder_2_lora_layers):
|
| 1773 |
+
raise ValueError(
|
| 1774 |
+
"You must pass at least one of `unet_lora_layers`, `text_encoder_lora_layers` or `text_encoder_2_lora_layers`."
|
| 1775 |
+
)
|
| 1776 |
+
|
| 1777 |
+
if unet_lora_layers:
|
| 1778 |
+
state_dict.update(pack_weights(unet_lora_layers, "unet"))
|
| 1779 |
+
|
| 1780 |
+
if text_encoder_lora_layers and text_encoder_2_lora_layers:
|
| 1781 |
+
state_dict.update(pack_weights(text_encoder_lora_layers, "text_encoder"))
|
| 1782 |
+
state_dict.update(pack_weights(text_encoder_2_lora_layers, "text_encoder_2"))
|
| 1783 |
+
|
| 1784 |
+
self.write_lora_layers(
|
| 1785 |
+
state_dict=state_dict,
|
| 1786 |
+
save_directory=save_directory,
|
| 1787 |
+
is_main_process=is_main_process,
|
| 1788 |
+
weight_name=weight_name,
|
| 1789 |
+
save_function=save_function,
|
| 1790 |
+
safe_serialization=safe_serialization,
|
| 1791 |
+
)
|
| 1792 |
+
|
| 1793 |
+
def _remove_text_encoder_monkey_patch(self):
|
| 1794 |
+
self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder)
|
| 1795 |
+
self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder_2)
|
requirements.txt
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
diffusers~=0.21.4
|
| 2 |
+
torch~=2.1.0
|
| 3 |
+
scipy~=1.11.3
|
| 4 |
+
omegaconf~=2.3.0
|
| 5 |
+
accelerate~=0.23.0
|
| 6 |
+
transformers~=4.34.0
|
| 7 |
+
tqdm
|
| 8 |
+
einops
|
| 9 |
+
matplotlib
|
| 10 |
+
gradio
|
| 11 |
+
gradio_imageslider
|