JoelWester commited on
Commit
53bfa33
·
verified ·
1 Parent(s): 24c8843

Upload 200 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +8 -0
  2. LICENSE +661 -0
  3. README.md +272 -12
  4. demo.ipynb +0 -0
  5. environment.yml +18 -0
  6. examples/referencenet/infer_referencenet.py +277 -0
  7. examples/referencenet/train_referencenet.py +1304 -0
  8. my_dataset/test/00482.png +3 -0
  9. my_dataset/test/14795.png +3 -0
  10. my_dataset/test/friends.jpg +3 -0
  11. my_dataset/train/celeb/fake/18147_06771-01758_01758.png +3 -0
  12. my_dataset/train/celeb/real/01758_01758.png +3 -0
  13. my_dataset/train/celeb/real/01758_09704.png +3 -0
  14. my_dataset/train/celeb/real/18147_06771.png +3 -0
  15. my_dataset/train/train.jsonl +2 -0
  16. my_dataset/train_dataset_loading_script.py +143 -0
  17. src/diffusers/__init__.py +758 -0
  18. src/diffusers/commands/__init__.py +27 -0
  19. src/diffusers/commands/diffusers_cli.py +43 -0
  20. src/diffusers/commands/env.py +84 -0
  21. src/diffusers/commands/fp16_safetensors.py +132 -0
  22. src/diffusers/configuration_utils.py +699 -0
  23. src/diffusers/dependency_versions_check.py +34 -0
  24. src/diffusers/dependency_versions_table.py +46 -0
  25. src/diffusers/experimental/README.md +5 -0
  26. src/diffusers/experimental/__init__.py +1 -0
  27. src/diffusers/experimental/rl/__init__.py +1 -0
  28. src/diffusers/experimental/rl/value_guided_sampling.py +154 -0
  29. src/diffusers/image_processor.py +888 -0
  30. src/diffusers/loaders/__init__.py +82 -0
  31. src/diffusers/loaders/ip_adapter.py +159 -0
  32. src/diffusers/loaders/lora.py +1553 -0
  33. src/diffusers/loaders/lora_conversion_utils.py +284 -0
  34. src/diffusers/loaders/single_file.py +637 -0
  35. src/diffusers/loaders/textual_inversion.py +455 -0
  36. src/diffusers/loaders/unet.py +828 -0
  37. src/diffusers/loaders/utils.py +59 -0
  38. src/diffusers/models/README.md +3 -0
  39. src/diffusers/models/__init__.py +94 -0
  40. src/diffusers/models/activations.py +123 -0
  41. src/diffusers/models/adapter.py +584 -0
  42. src/diffusers/models/attention.py +668 -0
  43. src/diffusers/models/attention_flax.py +494 -0
  44. src/diffusers/models/attention_processor.py +0 -0
  45. src/diffusers/models/autoencoders/__init__.py +5 -0
  46. src/diffusers/models/autoencoders/autoencoder_asym_kl.py +186 -0
  47. src/diffusers/models/autoencoders/autoencoder_kl.py +489 -0
  48. src/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py +402 -0
  49. src/diffusers/models/autoencoders/autoencoder_tiny.py +345 -0
  50. src/diffusers/models/autoencoders/consistency_decoder_vae.py +437 -0
.gitattributes CHANGED
@@ -41,3 +41,11 @@ face_anon_simple-main/my_dataset/train/celeb/real/01758_01758.png filter=lfs dif
41
  face_anon_simple-main/my_dataset/train/celeb/real/01758_09704.png filter=lfs diff=lfs merge=lfs -text
42
  face_anon_simple-main/my_dataset/train/celeb/real/18147_06771.png filter=lfs diff=lfs merge=lfs -text
43
  face_anon_simple-main/teaser.jpg filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
41
  face_anon_simple-main/my_dataset/train/celeb/real/01758_09704.png filter=lfs diff=lfs merge=lfs -text
42
  face_anon_simple-main/my_dataset/train/celeb/real/18147_06771.png filter=lfs diff=lfs merge=lfs -text
43
  face_anon_simple-main/teaser.jpg filter=lfs diff=lfs merge=lfs -text
44
+ my_dataset/test/00482.png filter=lfs diff=lfs merge=lfs -text
45
+ my_dataset/test/14795.png filter=lfs diff=lfs merge=lfs -text
46
+ my_dataset/test/friends.jpg filter=lfs diff=lfs merge=lfs -text
47
+ my_dataset/train/celeb/fake/18147_06771-01758_01758.png filter=lfs diff=lfs merge=lfs -text
48
+ my_dataset/train/celeb/real/01758_01758.png filter=lfs diff=lfs merge=lfs -text
49
+ my_dataset/train/celeb/real/01758_09704.png filter=lfs diff=lfs merge=lfs -text
50
+ my_dataset/train/celeb/real/18147_06771.png filter=lfs diff=lfs merge=lfs -text
51
+ teaser.jpg filter=lfs diff=lfs merge=lfs -text
LICENSE ADDED
@@ -0,0 +1,661 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ GNU AFFERO GENERAL PUBLIC LICENSE
2
+ Version 3, 19 November 2007
3
+
4
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
5
+ Everyone is permitted to copy and distribute verbatim copies
6
+ of this license document, but changing it is not allowed.
7
+
8
+ Preamble
9
+
10
+ The GNU Affero General Public License is a free, copyleft license for
11
+ software and other kinds of works, specifically designed to ensure
12
+ cooperation with the community in the case of network server software.
13
+
14
+ The licenses for most software and other practical works are designed
15
+ to take away your freedom to share and change the works. By contrast,
16
+ our General Public Licenses are intended to guarantee your freedom to
17
+ share and change all versions of a program--to make sure it remains free
18
+ software for all its users.
19
+
20
+ When we speak of free software, we are referring to freedom, not
21
+ price. Our General Public Licenses are designed to make sure that you
22
+ have the freedom to distribute copies of free software (and charge for
23
+ them if you wish), that you receive source code or can get it if you
24
+ want it, that you can change the software or use pieces of it in new
25
+ free programs, and that you know you can do these things.
26
+
27
+ Developers that use our General Public Licenses protect your rights
28
+ with two steps: (1) assert copyright on the software, and (2) offer
29
+ you this License which gives you legal permission to copy, distribute
30
+ and/or modify the software.
31
+
32
+ A secondary benefit of defending all users' freedom is that
33
+ improvements made in alternate versions of the program, if they
34
+ receive widespread use, become available for other developers to
35
+ incorporate. Many developers of free software are heartened and
36
+ encouraged by the resulting cooperation. However, in the case of
37
+ software used on network servers, this result may fail to come about.
38
+ The GNU General Public License permits making a modified version and
39
+ letting the public access it on a server without ever releasing its
40
+ source code to the public.
41
+
42
+ The GNU Affero General Public License is designed specifically to
43
+ ensure that, in such cases, the modified source code becomes available
44
+ to the community. It requires the operator of a network server to
45
+ provide the source code of the modified version running there to the
46
+ users of that server. Therefore, public use of a modified version, on
47
+ a publicly accessible server, gives the public access to the source
48
+ code of the modified version.
49
+
50
+ An older license, called the Affero General Public License and
51
+ published by Affero, was designed to accomplish similar goals. This is
52
+ a different license, not a version of the Affero GPL, but Affero has
53
+ released a new version of the Affero GPL which permits relicensing under
54
+ this license.
55
+
56
+ The precise terms and conditions for copying, distribution and
57
+ modification follow.
58
+
59
+ TERMS AND CONDITIONS
60
+
61
+ 0. Definitions.
62
+
63
+ "This License" refers to version 3 of the GNU Affero General Public License.
64
+
65
+ "Copyright" also means copyright-like laws that apply to other kinds of
66
+ works, such as semiconductor masks.
67
+
68
+ "The Program" refers to any copyrightable work licensed under this
69
+ License. Each licensee is addressed as "you". "Licensees" and
70
+ "recipients" may be individuals or organizations.
71
+
72
+ To "modify" a work means to copy from or adapt all or part of the work
73
+ in a fashion requiring copyright permission, other than the making of an
74
+ exact copy. The resulting work is called a "modified version" of the
75
+ earlier work or a work "based on" the earlier work.
76
+
77
+ A "covered work" means either the unmodified Program or a work based
78
+ on the Program.
79
+
80
+ To "propagate" a work means to do anything with it that, without
81
+ permission, would make you directly or secondarily liable for
82
+ infringement under applicable copyright law, except executing it on a
83
+ computer or modifying a private copy. Propagation includes copying,
84
+ distribution (with or without modification), making available to the
85
+ public, and in some countries other activities as well.
86
+
87
+ To "convey" a work means any kind of propagation that enables other
88
+ parties to make or receive copies. Mere interaction with a user through
89
+ a computer network, with no transfer of a copy, is not conveying.
90
+
91
+ An interactive user interface displays "Appropriate Legal Notices"
92
+ to the extent that it includes a convenient and prominently visible
93
+ feature that (1) displays an appropriate copyright notice, and (2)
94
+ tells the user that there is no warranty for the work (except to the
95
+ extent that warranties are provided), that licensees may convey the
96
+ work under this License, and how to view a copy of this License. If
97
+ the interface presents a list of user commands or options, such as a
98
+ menu, a prominent item in the list meets this criterion.
99
+
100
+ 1. Source Code.
101
+
102
+ The "source code" for a work means the preferred form of the work
103
+ for making modifications to it. "Object code" means any non-source
104
+ form of a work.
105
+
106
+ A "Standard Interface" means an interface that either is an official
107
+ standard defined by a recognized standards body, or, in the case of
108
+ interfaces specified for a particular programming language, one that
109
+ is widely used among developers working in that language.
110
+
111
+ The "System Libraries" of an executable work include anything, other
112
+ than the work as a whole, that (a) is included in the normal form of
113
+ packaging a Major Component, but which is not part of that Major
114
+ Component, and (b) serves only to enable use of the work with that
115
+ Major Component, or to implement a Standard Interface for which an
116
+ implementation is available to the public in source code form. A
117
+ "Major Component", in this context, means a major essential component
118
+ (kernel, window system, and so on) of the specific operating system
119
+ (if any) on which the executable work runs, or a compiler used to
120
+ produce the work, or an object code interpreter used to run it.
121
+
122
+ The "Corresponding Source" for a work in object code form means all
123
+ the source code needed to generate, install, and (for an executable
124
+ work) run the object code and to modify the work, including scripts to
125
+ control those activities. However, it does not include the work's
126
+ System Libraries, or general-purpose tools or generally available free
127
+ programs which are used unmodified in performing those activities but
128
+ which are not part of the work. For example, Corresponding Source
129
+ includes interface definition files associated with source files for
130
+ the work, and the source code for shared libraries and dynamically
131
+ linked subprograms that the work is specifically designed to require,
132
+ such as by intimate data communication or control flow between those
133
+ subprograms and other parts of the work.
134
+
135
+ The Corresponding Source need not include anything that users
136
+ can regenerate automatically from other parts of the Corresponding
137
+ Source.
138
+
139
+ The Corresponding Source for a work in source code form is that
140
+ same work.
141
+
142
+ 2. Basic Permissions.
143
+
144
+ All rights granted under this License are granted for the term of
145
+ copyright on the Program, and are irrevocable provided the stated
146
+ conditions are met. This License explicitly affirms your unlimited
147
+ permission to run the unmodified Program. The output from running a
148
+ covered work is covered by this License only if the output, given its
149
+ content, constitutes a covered work. This License acknowledges your
150
+ rights of fair use or other equivalent, as provided by copyright law.
151
+
152
+ You may make, run and propagate covered works that you do not
153
+ convey, without conditions so long as your license otherwise remains
154
+ in force. You may convey covered works to others for the sole purpose
155
+ of having them make modifications exclusively for you, or provide you
156
+ with facilities for running those works, provided that you comply with
157
+ the terms of this License in conveying all material for which you do
158
+ not control copyright. Those thus making or running the covered works
159
+ for you must do so exclusively on your behalf, under your direction
160
+ and control, on terms that prohibit them from making any copies of
161
+ your copyrighted material outside their relationship with you.
162
+
163
+ Conveying under any other circumstances is permitted solely under
164
+ the conditions stated below. Sublicensing is not allowed; section 10
165
+ makes it unnecessary.
166
+
167
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
168
+
169
+ No covered work shall be deemed part of an effective technological
170
+ measure under any applicable law fulfilling obligations under article
171
+ 11 of the WIPO copyright treaty adopted on 20 December 1996, or
172
+ similar laws prohibiting or restricting circumvention of such
173
+ measures.
174
+
175
+ When you convey a covered work, you waive any legal power to forbid
176
+ circumvention of technological measures to the extent such circumvention
177
+ is effected by exercising rights under this License with respect to
178
+ the covered work, and you disclaim any intention to limit operation or
179
+ modification of the work as a means of enforcing, against the work's
180
+ users, your or third parties' legal rights to forbid circumvention of
181
+ technological measures.
182
+
183
+ 4. Conveying Verbatim Copies.
184
+
185
+ You may convey verbatim copies of the Program's source code as you
186
+ receive it, in any medium, provided that you conspicuously and
187
+ appropriately publish on each copy an appropriate copyright notice;
188
+ keep intact all notices stating that this License and any
189
+ non-permissive terms added in accord with section 7 apply to the code;
190
+ keep intact all notices of the absence of any warranty; and give all
191
+ recipients a copy of this License along with the Program.
192
+
193
+ You may charge any price or no price for each copy that you convey,
194
+ and you may offer support or warranty protection for a fee.
195
+
196
+ 5. Conveying Modified Source Versions.
197
+
198
+ You may convey a work based on the Program, or the modifications to
199
+ produce it from the Program, in the form of source code under the
200
+ terms of section 4, provided that you also meet all of these conditions:
201
+
202
+ a) The work must carry prominent notices stating that you modified
203
+ it, and giving a relevant date.
204
+
205
+ b) The work must carry prominent notices stating that it is
206
+ released under this License and any conditions added under section
207
+ 7. This requirement modifies the requirement in section 4 to
208
+ "keep intact all notices".
209
+
210
+ c) You must license the entire work, as a whole, under this
211
+ License to anyone who comes into possession of a copy. This
212
+ License will therefore apply, along with any applicable section 7
213
+ additional terms, to the whole of the work, and all its parts,
214
+ regardless of how they are packaged. This License gives no
215
+ permission to license the work in any other way, but it does not
216
+ invalidate such permission if you have separately received it.
217
+
218
+ d) If the work has interactive user interfaces, each must display
219
+ Appropriate Legal Notices; however, if the Program has interactive
220
+ interfaces that do not display Appropriate Legal Notices, your
221
+ work need not make them do so.
222
+
223
+ A compilation of a covered work with other separate and independent
224
+ works, which are not by their nature extensions of the covered work,
225
+ and which are not combined with it such as to form a larger program,
226
+ in or on a volume of a storage or distribution medium, is called an
227
+ "aggregate" if the compilation and its resulting copyright are not
228
+ used to limit the access or legal rights of the compilation's users
229
+ beyond what the individual works permit. Inclusion of a covered work
230
+ in an aggregate does not cause this License to apply to the other
231
+ parts of the aggregate.
232
+
233
+ 6. Conveying Non-Source Forms.
234
+
235
+ You may convey a covered work in object code form under the terms
236
+ of sections 4 and 5, provided that you also convey the
237
+ machine-readable Corresponding Source under the terms of this License,
238
+ in one of these ways:
239
+
240
+ a) Convey the object code in, or embodied in, a physical product
241
+ (including a physical distribution medium), accompanied by the
242
+ Corresponding Source fixed on a durable physical medium
243
+ customarily used for software interchange.
244
+
245
+ b) Convey the object code in, or embodied in, a physical product
246
+ (including a physical distribution medium), accompanied by a
247
+ written offer, valid for at least three years and valid for as
248
+ long as you offer spare parts or customer support for that product
249
+ model, to give anyone who possesses the object code either (1) a
250
+ copy of the Corresponding Source for all the software in the
251
+ product that is covered by this License, on a durable physical
252
+ medium customarily used for software interchange, for a price no
253
+ more than your reasonable cost of physically performing this
254
+ conveying of source, or (2) access to copy the
255
+ Corresponding Source from a network server at no charge.
256
+
257
+ c) Convey individual copies of the object code with a copy of the
258
+ written offer to provide the Corresponding Source. This
259
+ alternative is allowed only occasionally and noncommercially, and
260
+ only if you received the object code with such an offer, in accord
261
+ with subsection 6b.
262
+
263
+ d) Convey the object code by offering access from a designated
264
+ place (gratis or for a charge), and offer equivalent access to the
265
+ Corresponding Source in the same way through the same place at no
266
+ further charge. You need not require recipients to copy the
267
+ Corresponding Source along with the object code. If the place to
268
+ copy the object code is a network server, the Corresponding Source
269
+ may be on a different server (operated by you or a third party)
270
+ that supports equivalent copying facilities, provided you maintain
271
+ clear directions next to the object code saying where to find the
272
+ Corresponding Source. Regardless of what server hosts the
273
+ Corresponding Source, you remain obligated to ensure that it is
274
+ available for as long as needed to satisfy these requirements.
275
+
276
+ e) Convey the object code using peer-to-peer transmission, provided
277
+ you inform other peers where the object code and Corresponding
278
+ Source of the work are being offered to the general public at no
279
+ charge under subsection 6d.
280
+
281
+ A separable portion of the object code, whose source code is excluded
282
+ from the Corresponding Source as a System Library, need not be
283
+ included in conveying the object code work.
284
+
285
+ A "User Product" is either (1) a "consumer product", which means any
286
+ tangible personal property which is normally used for personal, family,
287
+ or household purposes, or (2) anything designed or sold for incorporation
288
+ into a dwelling. In determining whether a product is a consumer product,
289
+ doubtful cases shall be resolved in favor of coverage. For a particular
290
+ product received by a particular user, "normally used" refers to a
291
+ typical or common use of that class of product, regardless of the status
292
+ of the particular user or of the way in which the particular user
293
+ actually uses, or expects or is expected to use, the product. A product
294
+ is a consumer product regardless of whether the product has substantial
295
+ commercial, industrial or non-consumer uses, unless such uses represent
296
+ the only significant mode of use of the product.
297
+
298
+ "Installation Information" for a User Product means any methods,
299
+ procedures, authorization keys, or other information required to install
300
+ and execute modified versions of a covered work in that User Product from
301
+ a modified version of its Corresponding Source. The information must
302
+ suffice to ensure that the continued functioning of the modified object
303
+ code is in no case prevented or interfered with solely because
304
+ modification has been made.
305
+
306
+ If you convey an object code work under this section in, or with, or
307
+ specifically for use in, a User Product, and the conveying occurs as
308
+ part of a transaction in which the right of possession and use of the
309
+ User Product is transferred to the recipient in perpetuity or for a
310
+ fixed term (regardless of how the transaction is characterized), the
311
+ Corresponding Source conveyed under this section must be accompanied
312
+ by the Installation Information. But this requirement does not apply
313
+ if neither you nor any third party retains the ability to install
314
+ modified object code on the User Product (for example, the work has
315
+ been installed in ROM).
316
+
317
+ The requirement to provide Installation Information does not include a
318
+ requirement to continue to provide support service, warranty, or updates
319
+ for a work that has been modified or installed by the recipient, or for
320
+ the User Product in which it has been modified or installed. Access to a
321
+ network may be denied when the modification itself materially and
322
+ adversely affects the operation of the network or violates the rules and
323
+ protocols for communication across the network.
324
+
325
+ Corresponding Source conveyed, and Installation Information provided,
326
+ in accord with this section must be in a format that is publicly
327
+ documented (and with an implementation available to the public in
328
+ source code form), and must require no special password or key for
329
+ unpacking, reading or copying.
330
+
331
+ 7. Additional Terms.
332
+
333
+ "Additional permissions" are terms that supplement the terms of this
334
+ License by making exceptions from one or more of its conditions.
335
+ Additional permissions that are applicable to the entire Program shall
336
+ be treated as though they were included in this License, to the extent
337
+ that they are valid under applicable law. If additional permissions
338
+ apply only to part of the Program, that part may be used separately
339
+ under those permissions, but the entire Program remains governed by
340
+ this License without regard to the additional permissions.
341
+
342
+ When you convey a copy of a covered work, you may at your option
343
+ remove any additional permissions from that copy, or from any part of
344
+ it. (Additional permissions may be written to require their own
345
+ removal in certain cases when you modify the work.) You may place
346
+ additional permissions on material, added by you to a covered work,
347
+ for which you have or can give appropriate copyright permission.
348
+
349
+ Notwithstanding any other provision of this License, for material you
350
+ add to a covered work, you may (if authorized by the copyright holders of
351
+ that material) supplement the terms of this License with terms:
352
+
353
+ a) Disclaiming warranty or limiting liability differently from the
354
+ terms of sections 15 and 16 of this License; or
355
+
356
+ b) Requiring preservation of specified reasonable legal notices or
357
+ author attributions in that material or in the Appropriate Legal
358
+ Notices displayed by works containing it; or
359
+
360
+ c) Prohibiting misrepresentation of the origin of that material, or
361
+ requiring that modified versions of such material be marked in
362
+ reasonable ways as different from the original version; or
363
+
364
+ d) Limiting the use for publicity purposes of names of licensors or
365
+ authors of the material; or
366
+
367
+ e) Declining to grant rights under trademark law for use of some
368
+ trade names, trademarks, or service marks; or
369
+
370
+ f) Requiring indemnification of licensors and authors of that
371
+ material by anyone who conveys the material (or modified versions of
372
+ it) with contractual assumptions of liability to the recipient, for
373
+ any liability that these contractual assumptions directly impose on
374
+ those licensors and authors.
375
+
376
+ All other non-permissive additional terms are considered "further
377
+ restrictions" within the meaning of section 10. If the Program as you
378
+ received it, or any part of it, contains a notice stating that it is
379
+ governed by this License along with a term that is a further
380
+ restriction, you may remove that term. If a license document contains
381
+ a further restriction but permits relicensing or conveying under this
382
+ License, you may add to a covered work material governed by the terms
383
+ of that license document, provided that the further restriction does
384
+ not survive such relicensing or conveying.
385
+
386
+ If you add terms to a covered work in accord with this section, you
387
+ must place, in the relevant source files, a statement of the
388
+ additional terms that apply to those files, or a notice indicating
389
+ where to find the applicable terms.
390
+
391
+ Additional terms, permissive or non-permissive, may be stated in the
392
+ form of a separately written license, or stated as exceptions;
393
+ the above requirements apply either way.
394
+
395
+ 8. Termination.
396
+
397
+ You may not propagate or modify a covered work except as expressly
398
+ provided under this License. Any attempt otherwise to propagate or
399
+ modify it is void, and will automatically terminate your rights under
400
+ this License (including any patent licenses granted under the third
401
+ paragraph of section 11).
402
+
403
+ However, if you cease all violation of this License, then your
404
+ license from a particular copyright holder is reinstated (a)
405
+ provisionally, unless and until the copyright holder explicitly and
406
+ finally terminates your license, and (b) permanently, if the copyright
407
+ holder fails to notify you of the violation by some reasonable means
408
+ prior to 60 days after the cessation.
409
+
410
+ Moreover, your license from a particular copyright holder is
411
+ reinstated permanently if the copyright holder notifies you of the
412
+ violation by some reasonable means, this is the first time you have
413
+ received notice of violation of this License (for any work) from that
414
+ copyright holder, and you cure the violation prior to 30 days after
415
+ your receipt of the notice.
416
+
417
+ Termination of your rights under this section does not terminate the
418
+ licenses of parties who have received copies or rights from you under
419
+ this License. If your rights have been terminated and not permanently
420
+ reinstated, you do not qualify to receive new licenses for the same
421
+ material under section 10.
422
+
423
+ 9. Acceptance Not Required for Having Copies.
424
+
425
+ You are not required to accept this License in order to receive or
426
+ run a copy of the Program. Ancillary propagation of a covered work
427
+ occurring solely as a consequence of using peer-to-peer transmission
428
+ to receive a copy likewise does not require acceptance. However,
429
+ nothing other than this License grants you permission to propagate or
430
+ modify any covered work. These actions infringe copyright if you do
431
+ not accept this License. Therefore, by modifying or propagating a
432
+ covered work, you indicate your acceptance of this License to do so.
433
+
434
+ 10. Automatic Licensing of Downstream Recipients.
435
+
436
+ Each time you convey a covered work, the recipient automatically
437
+ receives a license from the original licensors, to run, modify and
438
+ propagate that work, subject to this License. You are not responsible
439
+ for enforcing compliance by third parties with this License.
440
+
441
+ An "entity transaction" is a transaction transferring control of an
442
+ organization, or substantially all assets of one, or subdividing an
443
+ organization, or merging organizations. If propagation of a covered
444
+ work results from an entity transaction, each party to that
445
+ transaction who receives a copy of the work also receives whatever
446
+ licenses to the work the party's predecessor in interest had or could
447
+ give under the previous paragraph, plus a right to possession of the
448
+ Corresponding Source of the work from the predecessor in interest, if
449
+ the predecessor has it or can get it with reasonable efforts.
450
+
451
+ You may not impose any further restrictions on the exercise of the
452
+ rights granted or affirmed under this License. For example, you may
453
+ not impose a license fee, royalty, or other charge for exercise of
454
+ rights granted under this License, and you may not initiate litigation
455
+ (including a cross-claim or counterclaim in a lawsuit) alleging that
456
+ any patent claim is infringed by making, using, selling, offering for
457
+ sale, or importing the Program or any portion of it.
458
+
459
+ 11. Patents.
460
+
461
+ A "contributor" is a copyright holder who authorizes use under this
462
+ License of the Program or a work on which the Program is based. The
463
+ work thus licensed is called the contributor's "contributor version".
464
+
465
+ A contributor's "essential patent claims" are all patent claims
466
+ owned or controlled by the contributor, whether already acquired or
467
+ hereafter acquired, that would be infringed by some manner, permitted
468
+ by this License, of making, using, or selling its contributor version,
469
+ but do not include claims that would be infringed only as a
470
+ consequence of further modification of the contributor version. For
471
+ purposes of this definition, "control" includes the right to grant
472
+ patent sublicenses in a manner consistent with the requirements of
473
+ this License.
474
+
475
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
476
+ patent license under the contributor's essential patent claims, to
477
+ make, use, sell, offer for sale, import and otherwise run, modify and
478
+ propagate the contents of its contributor version.
479
+
480
+ In the following three paragraphs, a "patent license" is any express
481
+ agreement or commitment, however denominated, not to enforce a patent
482
+ (such as an express permission to practice a patent or covenant not to
483
+ sue for patent infringement). To "grant" such a patent license to a
484
+ party means to make such an agreement or commitment not to enforce a
485
+ patent against the party.
486
+
487
+ If you convey a covered work, knowingly relying on a patent license,
488
+ and the Corresponding Source of the work is not available for anyone
489
+ to copy, free of charge and under the terms of this License, through a
490
+ publicly available network server or other readily accessible means,
491
+ then you must either (1) cause the Corresponding Source to be so
492
+ available, or (2) arrange to deprive yourself of the benefit of the
493
+ patent license for this particular work, or (3) arrange, in a manner
494
+ consistent with the requirements of this License, to extend the patent
495
+ license to downstream recipients. "Knowingly relying" means you have
496
+ actual knowledge that, but for the patent license, your conveying the
497
+ covered work in a country, or your recipient's use of the covered work
498
+ in a country, would infringe one or more identifiable patents in that
499
+ country that you have reason to believe are valid.
500
+
501
+ If, pursuant to or in connection with a single transaction or
502
+ arrangement, you convey, or propagate by procuring conveyance of, a
503
+ covered work, and grant a patent license to some of the parties
504
+ receiving the covered work authorizing them to use, propagate, modify
505
+ or convey a specific copy of the covered work, then the patent license
506
+ you grant is automatically extended to all recipients of the covered
507
+ work and works based on it.
508
+
509
+ A patent license is "discriminatory" if it does not include within
510
+ the scope of its coverage, prohibits the exercise of, or is
511
+ conditioned on the non-exercise of one or more of the rights that are
512
+ specifically granted under this License. You may not convey a covered
513
+ work if you are a party to an arrangement with a third party that is
514
+ in the business of distributing software, under which you make payment
515
+ to the third party based on the extent of your activity of conveying
516
+ the work, and under which the third party grants, to any of the
517
+ parties who would receive the covered work from you, a discriminatory
518
+ patent license (a) in connection with copies of the covered work
519
+ conveyed by you (or copies made from those copies), or (b) primarily
520
+ for and in connection with specific products or compilations that
521
+ contain the covered work, unless you entered into that arrangement,
522
+ or that patent license was granted, prior to 28 March 2007.
523
+
524
+ Nothing in this License shall be construed as excluding or limiting
525
+ any implied license or other defenses to infringement that may
526
+ otherwise be available to you under applicable patent law.
527
+
528
+ 12. No Surrender of Others' Freedom.
529
+
530
+ If conditions are imposed on you (whether by court order, agreement or
531
+ otherwise) that contradict the conditions of this License, they do not
532
+ excuse you from the conditions of this License. If you cannot convey a
533
+ covered work so as to satisfy simultaneously your obligations under this
534
+ License and any other pertinent obligations, then as a consequence you may
535
+ not convey it at all. For example, if you agree to terms that obligate you
536
+ to collect a royalty for further conveying from those to whom you convey
537
+ the Program, the only way you could satisfy both those terms and this
538
+ License would be to refrain entirely from conveying the Program.
539
+
540
+ 13. Remote Network Interaction; Use with the GNU General Public License.
541
+
542
+ Notwithstanding any other provision of this License, if you modify the
543
+ Program, your modified version must prominently offer all users
544
+ interacting with it remotely through a computer network (if your version
545
+ supports such interaction) an opportunity to receive the Corresponding
546
+ Source of your version by providing access to the Corresponding Source
547
+ from a network server at no charge, through some standard or customary
548
+ means of facilitating copying of software. This Corresponding Source
549
+ shall include the Corresponding Source for any work covered by version 3
550
+ of the GNU General Public License that is incorporated pursuant to the
551
+ following paragraph.
552
+
553
+ Notwithstanding any other provision of this License, you have
554
+ permission to link or combine any covered work with a work licensed
555
+ under version 3 of the GNU General Public License into a single
556
+ combined work, and to convey the resulting work. The terms of this
557
+ License will continue to apply to the part which is the covered work,
558
+ but the work with which it is combined will remain governed by version
559
+ 3 of the GNU General Public License.
560
+
561
+ 14. Revised Versions of this License.
562
+
563
+ The Free Software Foundation may publish revised and/or new versions of
564
+ the GNU Affero General Public License from time to time. Such new versions
565
+ will be similar in spirit to the present version, but may differ in detail to
566
+ address new problems or concerns.
567
+
568
+ Each version is given a distinguishing version number. If the
569
+ Program specifies that a certain numbered version of the GNU Affero General
570
+ Public License "or any later version" applies to it, you have the
571
+ option of following the terms and conditions either of that numbered
572
+ version or of any later version published by the Free Software
573
+ Foundation. If the Program does not specify a version number of the
574
+ GNU Affero General Public License, you may choose any version ever published
575
+ by the Free Software Foundation.
576
+
577
+ If the Program specifies that a proxy can decide which future
578
+ versions of the GNU Affero General Public License can be used, that proxy's
579
+ public statement of acceptance of a version permanently authorizes you
580
+ to choose that version for the Program.
581
+
582
+ Later license versions may give you additional or different
583
+ permissions. However, no additional obligations are imposed on any
584
+ author or copyright holder as a result of your choosing to follow a
585
+ later version.
586
+
587
+ 15. Disclaimer of Warranty.
588
+
589
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
590
+ APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
591
+ HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
592
+ OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
593
+ THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
594
+ PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
595
+ IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
596
+ ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
597
+
598
+ 16. Limitation of Liability.
599
+
600
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
601
+ WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
602
+ THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
603
+ GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
604
+ USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
605
+ DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
606
+ PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
607
+ EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
608
+ SUCH DAMAGES.
609
+
610
+ 17. Interpretation of Sections 15 and 16.
611
+
612
+ If the disclaimer of warranty and limitation of liability provided
613
+ above cannot be given local legal effect according to their terms,
614
+ reviewing courts shall apply local law that most closely approximates
615
+ an absolute waiver of all civil liability in connection with the
616
+ Program, unless a warranty or assumption of liability accompanies a
617
+ copy of the Program in return for a fee.
618
+
619
+ END OF TERMS AND CONDITIONS
620
+
621
+ How to Apply These Terms to Your New Programs
622
+
623
+ If you develop a new program, and you want it to be of the greatest
624
+ possible use to the public, the best way to achieve this is to make it
625
+ free software which everyone can redistribute and change under these terms.
626
+
627
+ To do so, attach the following notices to the program. It is safest
628
+ to attach them to the start of each source file to most effectively
629
+ state the exclusion of warranty; and each file should have at least
630
+ the "copyright" line and a pointer to where the full notice is found.
631
+
632
+ <one line to give the program's name and a brief idea of what it does.>
633
+ Copyright (C) <year> <name of author>
634
+
635
+ This program is free software: you can redistribute it and/or modify
636
+ it under the terms of the GNU Affero General Public License as published
637
+ by the Free Software Foundation, either version 3 of the License, or
638
+ (at your option) any later version.
639
+
640
+ This program is distributed in the hope that it will be useful,
641
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
642
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
643
+ GNU Affero General Public License for more details.
644
+
645
+ You should have received a copy of the GNU Affero General Public License
646
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
647
+
648
+ Also add information on how to contact you by electronic and paper mail.
649
+
650
+ If your software can interact with users remotely through a computer
651
+ network, you should also make sure that it provides a way for users to
652
+ get its source. For example, if your program is a web application, its
653
+ interface could display a "Source" link that leads users to an archive
654
+ of the code. There are many ways you could offer source, and different
655
+ solutions will be better for different programs; see section 13 for the
656
+ specific requirements.
657
+
658
+ You should also get your employer (if you work as a programmer) or school,
659
+ if any, to sign a "copyright disclaimer" for the program, if necessary.
660
+ For more information on this, and how to apply and follow the GNU AGPL, see
661
+ <https://www.gnu.org/licenses/>.
README.md CHANGED
@@ -1,12 +1,272 @@
1
- ---
2
- title: Anonymizeface
3
- emoji: 🐠
4
- colorFrom: green
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 5.49.1
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Face Anonymization Made Simple (WACV 2025)
2
+
3
+ [arXiv](http://arxiv.org/abs/2411.00762)
4
+
5
+ ![teaser](teaser.jpg)
6
+
7
+ Our face anonymization technique preserves the original facial expressions, head positioning, eye direction, and background elements, effectively masking identity while retaining other crucial details. The anonymized face blends seamlessly into its original photograph, making it ideal for diverse real-world applications.
8
+
9
+ ## Setup
10
+
11
+ 1. Clone the repository.
12
+
13
+ ```bash
14
+ git clone https://github.com/hanweikung/face_anon_simple.git
15
+ ```
16
+
17
+ 2. Create a Python environment from the `environment.yml` file.
18
+
19
+ ```bash
20
+ conda env create -f environment.yml
21
+ ```
22
+
23
+ ## Usage
24
+ 1. Import the library.
25
+
26
+ ```python
27
+ import torch
28
+ from transformers import CLIPImageProcessor, CLIPVisionModel
29
+
30
+ from diffusers import AutoencoderKL, DDPMScheduler
31
+ from diffusers.utils import load_image
32
+ from src.diffusers.models.referencenet.referencenet_unet_2d_condition import (
33
+ ReferenceNetModel,
34
+ )
35
+ from src.diffusers.models.referencenet.unet_2d_condition import UNet2DConditionModel
36
+ from src.diffusers.pipelines.referencenet.pipeline_referencenet import (
37
+ StableDiffusionReferenceNetPipeline,
38
+ )
39
+ ```
40
+
41
+ 2. Create & load models.
42
+
43
+ ```python
44
+ face_model_id = "hkung/face-anon-simple"
45
+ clip_model_id = "openai/clip-vit-large-patch14"
46
+ sd_model_id = "stabilityai/stable-diffusion-2-1"
47
+
48
+ unet = UNet2DConditionModel.from_pretrained(
49
+ face_model_id, subfolder="unet", use_safetensors=True
50
+ )
51
+ referencenet = ReferenceNetModel.from_pretrained(
52
+ face_model_id, subfolder="referencenet", use_safetensors=True
53
+ )
54
+ conditioning_referencenet = ReferenceNetModel.from_pretrained(
55
+ face_model_id, subfolder="conditioning_referencenet", use_safetensors=True
56
+ )
57
+ vae = AutoencoderKL.from_pretrained(sd_model_id, subfolder="vae", use_safetensors=True)
58
+ scheduler = DDPMScheduler.from_pretrained(
59
+ sd_model_id, subfolder="scheduler", use_safetensors=True
60
+ )
61
+ feature_extractor = CLIPImageProcessor.from_pretrained(
62
+ clip_model_id, use_safetensors=True
63
+ )
64
+ image_encoder = CLIPVisionModel.from_pretrained(clip_model_id, use_safetensors=True)
65
+
66
+ pipe = StableDiffusionReferenceNetPipeline(
67
+ unet=unet,
68
+ referencenet=referencenet,
69
+ conditioning_referencenet=conditioning_referencenet,
70
+ vae=vae,
71
+ feature_extractor=feature_extractor,
72
+ image_encoder=image_encoder,
73
+ scheduler=scheduler,
74
+ )
75
+ pipe = pipe.to("cuda")
76
+
77
+ generator = torch.manual_seed(1)
78
+ ```
79
+
80
+ ### Anonymize images with a single aligned face
81
+
82
+ Create an anonymized version of an image if the image contains a single face and that face has already been aligned similarly to those in the [FFHQ](https://github.com/NVlabs/ffhq-dataset) or [CelebA-HQ](https://github.com/tkarras/progressive_growing_of_gans) datasets.
83
+
84
+ ```python
85
+ # get an input image for anonymization
86
+ original_image = load_image("my_dataset/test/14795.png")
87
+
88
+ # generate an image that anonymizes faces
89
+ anon_image = pipe(
90
+ source_image=original_image,
91
+ conditioning_image=original_image,
92
+ num_inference_steps=200,
93
+ guidance_scale=4.0,
94
+ generator=generator,
95
+ anonymization_degree=1.25,
96
+ width=512,
97
+ height=512,
98
+ ).images[0]
99
+ anon_image.save("anon.png")
100
+ ```
101
+
102
+ ### Anonymize images with one or multiple unaligned faces
103
+
104
+ Create an anonymized version of an image if it contains one or more unaligned faces.
105
+
106
+ ```python
107
+ import face_alignment
108
+ from utils.anonymize_faces_in_image import anonymize_faces_in_image
109
+
110
+ # get an input image for anonymization
111
+ original_image = load_image("my_dataset/test/friends.jpg")
112
+
113
+ # SFD (likely best results, but slower)
114
+ fa = face_alignment.FaceAlignment(
115
+ face_alignment.LandmarksType.TWO_D, face_detector="sfd"
116
+ )
117
+
118
+ # generate an image that anonymizes faces
119
+ anon_image = anonymize_faces_in_image(
120
+ image=original_image,
121
+ face_alignment=fa,
122
+ pipe=pipe,
123
+ generator=generator,
124
+ face_image_size=512,
125
+ num_inference_steps=25,
126
+ guidance_scale=4.0,
127
+ anonymization_degree=1.25,
128
+ )
129
+ anon_image.save("anon.png")
130
+ ```
131
+
132
+ ### Swap faces between two images
133
+
134
+ Create an image that swap faces.
135
+
136
+ ```python
137
+ # get source and conditioning (driving) images for face swap
138
+ source_image = load_image("my_dataset/test/00482.png")
139
+ conditioning_image = load_image("my_dataset/test/14795.png")
140
+
141
+ # generate an image that swaps faces
142
+ swap_image = pipe(
143
+ source_image=source_image,
144
+ conditioning_image=conditioning_image,
145
+ num_inference_steps=200,
146
+ guidance_scale=4.0,
147
+ generator=generator,
148
+ anonymization_degree=0.0,
149
+ width=512,
150
+ height=512,
151
+ ).images[0]
152
+ swap_image.save("swap.png")
153
+ ```
154
+
155
+ We also provide the [demo.ipynb](https://github.com/hanweikung/face_anon_simple/blob/main/demo.ipynb) notebook, which guides you through the steps mentioned above.
156
+
157
+ ### Note on image resolution
158
+
159
+ Our model was trained on 512x512 images. To ensure correct results, always set `width=512` and `height=512` in the `pipe` function, and `face_image_size=512` in the `anonymize_faces_in_image` function. This ensures that input images are resized correctly for the diffusion pipeline. If you're using a model trained on different sizes, like 768x768, adjust these parameters accordingly.
160
+
161
+ ## Training
162
+
163
+ Our model learns face swapping for anonymization. You can train it using your own face-swapped images.
164
+
165
+ ### Training data structure
166
+
167
+ Sample training data is available in the `my_dataset/train` directory. Real images are stored in the `real` subdirectory, while face-swapped images are stored in the `fake` subdirectory.
168
+
169
+ ```bash
170
+ my_dataset/
171
+ ├── train
172
+ │   ├── celeb
173
+ │   │   ├── fake
174
+ │   │   │   └── 18147_06771-01758_01758.png
175
+ │   │   └── real
176
+ │   │   ├── 01758_01758.png
177
+ │   │   ├── 01758_09704.png
178
+ │   │   └── 18147_06771.png
179
+ │   └── train.jsonl
180
+ └── train_dataset_loading_script.py
181
+ ```
182
+
183
+ ### Data loading and configuration
184
+
185
+ Training data is loaded using a JSON lines file (`my_dataset/train.jsonl`) and a dataset loading script (`my_dataset/train_dataset_loading_script.py`), both provided as examples.
186
+
187
+ The JSON lines file includes two sample entries specifying the source image, conditioning (driving) image, and ground truth image, with file paths based on the sample training data. Adjust these paths to match your own data:
188
+
189
+ ```json
190
+ {"source_image": "celeb/real/18147_06771.png", "conditioning_image": "celeb/real/01758_01758.png", "ground_truth": "celeb/fake/18147_06771-01758_01758.png"}
191
+ {"source_image": "celeb/real/01758_09704.png", "conditioning_image": "celeb/fake/18147_06771-01758_01758.png", "ground_truth": "celeb/real/01758_01758.png"}
192
+ ```
193
+
194
+ To simulate face-swapping behavior, the source and conditioning images should have different identities. The source and ground truth should share the same identity, while the conditioning and ground truth should share the same pose and expression. When no actual ground truth is available (e.g., the first entry), the face-swapped image serves as the ground truth. When a ground truth image is available (e.g., the second entry), the swapped version of the ground truth is used as the conditioning image.
195
+
196
+ Our dataset loading script follows [Hugging Face's documentation](https://huggingface.co/docs/datasets/en/dataset_script). Please update the `metadata_path` and `images_dir` file paths in the script to match your dataset:
197
+
198
+ ```python
199
+ _URLS = {
200
+ "metadata_path": "/path/to/face_anon_simple/my_dataset/train/train.jsonl",
201
+ "images_dir": "/path/to/face_anon_simple/my_dataset/train/",
202
+ }
203
+ ```
204
+
205
+ ### Training script setup
206
+
207
+ A bash script, `train_referencenet.sh`, with the training command is provided. Update the file paths and adjust parameters as needed:
208
+
209
+ ```bash
210
+ export MODEL_DIR="/path/to/stable-diffusion-2-1/"
211
+ export CLIP_MODEL_DIR="/path/to/clip-vit-large-patch14/"
212
+ export OUTPUT_DIR="./runs/celeb/"
213
+ export NCCL_P2P_DISABLE=1
214
+ export DATASET_LOADING_SCRIPT_PATH="./my_dataset/train_dataset_loading_script.py"
215
+ export TORCH_DISTRIBUTED_DEBUG="INFO"
216
+ export WANDB__SERVICE_WAIT="300"
217
+
218
+ accelerate launch --main_process_port=29500 --mixed_precision="fp16" --multi_gpu -m examples.referencenet.train_referencenet \
219
+ --pretrained_model_name_or_path=$MODEL_DIR \
220
+ --pretrained_clip_model_name_or_path=$CLIP_MODEL_DIR \
221
+ --output_dir=$OUTPUT_DIR \
222
+ --dataset_loading_script_path=$DATASET_LOADING_SCRIPT_PATH \
223
+ --resolution=512 \
224
+ --learning_rate=1e-5 \
225
+ --validation_source_image "./my_dataset/test/00482.png" \
226
+ --validation_conditioning_image "./my_dataset/test/14795.png" \
227
+ --train_batch_size=1 \
228
+ --tracker_project_name="celeb" \
229
+ --checkpointing_steps=10000 \
230
+ --num_validation_images=1 \
231
+ --validation_steps=1000 \
232
+ --mixed_precision="fp16" \
233
+ --gradient_checkpointing \
234
+ --use_8bit_adam \
235
+ --enable_xformers_memory_efficient_attention \
236
+ --gradient_accumulation_steps=8 \
237
+ --resume_from_checkpoint="latest" \
238
+ --set_grads_to_none \
239
+ --max_train_steps=60000 \
240
+ --conditioning_dropout_prob=0.1 \
241
+ --seed=0 \
242
+ --report_to="wandb" \
243
+ --random_flip \
244
+ --dataloader_num_workers=8
245
+ ```
246
+
247
+ To train your model, run:
248
+
249
+ ```bash
250
+ bash train_referencenet.sh
251
+ ```
252
+
253
+ ## Test images
254
+
255
+ In our paper, we selected 1,000 images each from [CelebA-HQ](https://github.com/tkarras/progressive_growing_of_gans) and [FFHQ](https://github.com/NVlabs/ffhq-dataset) for quantitative analysis. The list of test images can be found in our [Hugging Face Hub repository](https://huggingface.co/datasets/hkung/face-anon-simple-dataset).
256
+
257
+ ## Citation
258
+
259
+ ```bibtex
260
+ @InProceedings{Kung_2025_WACV,
261
+ author = {Kung, Han-Wei and Varanka, Tuomas and Saha, Sanjay and Sim, Terence and Sebe, Nicu},
262
+ title = {Face Anonymization Made Simple},
263
+ booktitle = {Proceedings of the Winter Conference on Applications of Computer Vision (WACV)},
264
+ month = {February},
265
+ year = {2025},
266
+ pages = {1040-1050}
267
+ }
268
+ ```
269
+
270
+ ## Acknowledgements
271
+
272
+ This work is built upon the [Diffusers](https://github.com/huggingface/diffusers) project. The [face extractor](https://github.com/hanweikung/face_anon_simple/blob/main/utils/extractor.py) is adapted from [DeepFaceLab](https://github.com/iperov/DeepFaceLab/blob/master/mainscripts/Extractor.py).
demo.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
environment.yml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: face-anon-simple
2
+ dependencies:
3
+ - python=3.8.18
4
+ - pip
5
+ - pip:
6
+ - torch==2.1
7
+ - torchvision==0.16.0
8
+ - huggingface_hub<0.26.0
9
+ - diffusers==0.25.1
10
+ - transformers==4.46.1
11
+ - accelerate==1.0.1
12
+ - datasets==3.1.0
13
+ - xformers==0.0.22.post7
14
+ - bitsandbytes==0.44.1
15
+ - ipykernel==6.29.5
16
+ - face-alignment==1.4.1
17
+ - albumentations==1.4.1
18
+ - wandb==0.16.3
examples/referencenet/infer_referencenet.py ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ from pathlib import Path
4
+
5
+ import numpy as np
6
+ import torch
7
+ import torchvision.transforms.v2 as transforms_v2
8
+ from PIL import Image
9
+ from transformers import CLIPImageProcessor, CLIPVisionModel
10
+ from tqdm import tqdm
11
+ from accelerate import Accelerator
12
+
13
+ from datasets import load_dataset
14
+ from diffusers import AutoencoderKL, DDPMScheduler
15
+ from src.diffusers.models.referencenet.unet_2d_condition import UNet2DConditionModel
16
+ from src.diffusers.models.referencenet.referencenet_unet_2d_condition import ReferenceNetModel
17
+ from src.diffusers.pipelines.referencenet.pipeline_referencenet import StableDiffusionReferenceNetPipeline
18
+
19
+
20
+ def parse_args():
21
+ parser = argparse.ArgumentParser(description="Inference")
22
+ parser.add_argument(
23
+ "--pretrained_model_name_or_path",
24
+ type=str,
25
+ default="stabilityai/stable-diffusion-2-1",
26
+ required=False,
27
+ help="Path to pretrained model or model identifier from huggingface.co/models.",
28
+ )
29
+ parser.add_argument(
30
+ "--pretrained_clip_model_name_or_path",
31
+ type=str,
32
+ default="openai/clip-vit-large-patch14",
33
+ required=False,
34
+ help="Path to pretrained CLIP model or model identifier from huggingface.co/models.",
35
+ )
36
+ parser.add_argument(
37
+ "--model_path",
38
+ type=str,
39
+ default=None,
40
+ required=True,
41
+ help="Path to the model trained by yourself",
42
+ )
43
+ parser.add_argument(
44
+ "--dataset_loading_script_path",
45
+ type=str,
46
+ default=None,
47
+ required=True,
48
+ help="Path to the dataset loading script file",
49
+ )
50
+ parser.add_argument(
51
+ "--output_dir",
52
+ type=str,
53
+ default="./test-infer/",
54
+ help="The output directory where predictions are saved",
55
+ )
56
+ parser.add_argument(
57
+ "--resolution",
58
+ type=int,
59
+ default=512,
60
+ help="The resolution for input images, all the images in the test dataset will be resized to this resolution",
61
+ )
62
+ parser.add_argument("--guidance_scale", type=float, default=2.5)
63
+ parser.add_argument("--num_inference_steps", type=int, default=50)
64
+ parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible inference.")
65
+ parser.add_argument(
66
+ "--anonymization_degree_start",
67
+ type=float,
68
+ default=0.0,
69
+ help="Increasing the anonymization scale value encourages the model to produce images that diverge significantly from the conditioning image.",
70
+ )
71
+ parser.add_argument("--anonymization_degree_end", type=float, default=0.0)
72
+ parser.add_argument("--num_anonymization_degrees", type=int, default=1)
73
+ parser.add_argument(
74
+ "--center_crop",
75
+ default=False,
76
+ action="store_true",
77
+ help=(
78
+ "Whether to center crop the input images to the resolution. If not set, the images will be randomly"
79
+ " cropped. The images will be resized to the resolution first before cropping."
80
+ ),
81
+ )
82
+ parser.add_argument(
83
+ "--max_test_samples",
84
+ type=int,
85
+ default=None,
86
+ help="Truncate the number of test examples to this value if set.",
87
+ )
88
+ parser.add_argument(
89
+ "--vis_input",
90
+ action="store_true",
91
+ help="If set, save the input and generated images together as a single output image for easy visualization",
92
+ )
93
+ parser.add_argument(
94
+ "--test_batch_size",
95
+ type=int,
96
+ default=1,
97
+ help=(
98
+ "The batch size for the test dataloader per device should be set to 1."
99
+ "This setting does not affect performance, no matter how large the batch size is."
100
+ ),
101
+ )
102
+ parser.add_argument(
103
+ "--dataloader_num_workers",
104
+ type=int,
105
+ default=0,
106
+ help=(
107
+ "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
108
+ ),
109
+ )
110
+
111
+ args = parser.parse_args()
112
+ return args
113
+
114
+
115
+ def combine_images(images):
116
+ # Get the total width and maximum height of all images
117
+ total_width = sum(img.width for img in images)
118
+ max_height = max(img.height for img in images)
119
+
120
+ # Create a new image with the combined width and maximum height
121
+ new_image = Image.new("RGB", (total_width, max_height))
122
+
123
+ # Paste each image onto the new image horizontally
124
+ x_offset = 0
125
+ for img in images:
126
+ new_image.paste(img, (x_offset, 0))
127
+ x_offset += img.width
128
+
129
+ return new_image
130
+
131
+
132
+ def make_test_dataset(args):
133
+ ds = load_dataset(path=args.dataset_loading_script_path, split="test", trust_remote_code=True)
134
+
135
+ # Preprocessing the datasets.
136
+ image_transforms = transforms_v2.Compose(
137
+ [
138
+ transforms_v2.Resize(args.resolution, interpolation=transforms_v2.InterpolationMode.BILINEAR),
139
+ transforms_v2.CenterCrop(args.resolution)
140
+ if args.center_crop
141
+ else transforms_v2.RandomCrop(args.resolution),
142
+ ]
143
+ )
144
+
145
+ def preprocess_test(examples):
146
+ images = [image.convert("RGB") for image in examples["source_image"]]
147
+ images = [image_transforms(image) for image in images]
148
+
149
+ conditioning_images = [image.convert("RGB") for image in examples["conditioning_image"]]
150
+ conditioning_images = [image_transforms(image) for image in conditioning_images]
151
+
152
+ examples["source_image"] = images
153
+ examples["conditioning_image"] = conditioning_images
154
+
155
+ return examples
156
+
157
+ if args.max_test_samples is not None:
158
+ max_test_samples = min(args.max_test_samples, len(ds))
159
+ ds = ds.select(range(max_test_samples))
160
+
161
+ test_dataset = ds.with_transform(preprocess_test)
162
+ return test_dataset
163
+
164
+
165
+ def collate_fn(examples):
166
+ source_images = [example["source_image"] for example in examples]
167
+ conditioning_images = [example["conditioning_image"] for example in examples]
168
+ source_image_paths = [example["source_image_path"] for example in examples]
169
+ conditioning_image_paths = [example["conditioning_image_path"] for example in examples]
170
+
171
+ return {
172
+ "source_images": source_images,
173
+ "conditioning_images": conditioning_images,
174
+ "source_image_paths": source_image_paths,
175
+ "conditioning_image_paths": conditioning_image_paths,
176
+ }
177
+
178
+
179
+ if __name__ == "__main__":
180
+ args = parse_args()
181
+
182
+ accelerator = Accelerator()
183
+ device = accelerator.device
184
+
185
+ os.makedirs(args.output_dir, exist_ok=True)
186
+
187
+ if args.vis_input:
188
+ output_vis_dir = Path(args.output_dir, "vis")
189
+ output_vis_dir.mkdir(parents=True, exist_ok=True)
190
+
191
+ generator = None
192
+
193
+ # create & load model
194
+ unet = UNet2DConditionModel.from_pretrained(args.model_path, subfolder="unet")
195
+ referencenet = ReferenceNetModel.from_pretrained(args.model_path, subfolder="referencenet")
196
+ conditioning_referencenet = ReferenceNetModel.from_pretrained(
197
+ args.model_path, subfolder="conditioning_referencenet"
198
+ )
199
+ vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
200
+ feature_extractor = CLIPImageProcessor.from_pretrained(args.pretrained_clip_model_name_or_path)
201
+ image_encoder = CLIPVisionModel.from_pretrained(args.pretrained_clip_model_name_or_path)
202
+ scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
203
+
204
+ pipe = StableDiffusionReferenceNetPipeline(
205
+ unet=unet,
206
+ referencenet=referencenet,
207
+ conditioning_referencenet=conditioning_referencenet,
208
+ vae=vae,
209
+ feature_extractor=feature_extractor,
210
+ image_encoder=image_encoder,
211
+ scheduler=scheduler,
212
+ )
213
+ pipe = pipe.to(device)
214
+
215
+ test_dataset = make_test_dataset(args)
216
+ test_dataloader = torch.utils.data.DataLoader(
217
+ test_dataset,
218
+ shuffle=False,
219
+ collate_fn=collate_fn,
220
+ batch_size=args.test_batch_size,
221
+ num_workers=args.dataloader_num_workers,
222
+ )
223
+ test_dataloader = accelerator.prepare(test_dataloader)
224
+
225
+ # Generate the list of evenly spaced numbers
226
+ anonymization_degrees = np.linspace(
227
+ args.anonymization_degree_start, args.anonymization_degree_end, args.num_anonymization_degrees
228
+ )
229
+
230
+ for step, batch in enumerate(tqdm(test_dataloader)):
231
+ # Group corresponding items from each key together
232
+ grouped_items = list(
233
+ zip(
234
+ batch["source_images"],
235
+ batch["conditioning_images"],
236
+ batch["source_image_paths"],
237
+ batch["conditioning_image_paths"],
238
+ )
239
+ )
240
+
241
+ for source_image, conditioning_image, source_image_path, conditioning_image_path in grouped_items:
242
+ source_image_name = Path(source_image_path).stem
243
+ conditioning_image_name = Path(conditioning_image_path).stem
244
+
245
+ for index, anonymization_degree in enumerate(anonymization_degrees):
246
+ filename = f"{source_image_name}-{conditioning_image_name}_{index:03}.png"
247
+ save_to = Path(args.output_dir, filename)
248
+
249
+ if save_to.is_file():
250
+ continue
251
+
252
+ if args.seed is not None:
253
+ # create a generator for reproducibility; notice you don't place it on the GPU!
254
+ generator = torch.manual_seed(args.seed)
255
+
256
+ image = pipe(
257
+ source_image=source_image,
258
+ conditioning_image=conditioning_image,
259
+ height=args.resolution,
260
+ width=args.resolution,
261
+ num_inference_steps=args.num_inference_steps,
262
+ guidance_scale=args.guidance_scale,
263
+ generator=generator,
264
+ anonymization_degree=anonymization_degree,
265
+ ).images[0]
266
+
267
+ image.save(save_to)
268
+ save_vis_to = Path(output_vis_dir, filename)
269
+
270
+ if args.vis_input and not save_vis_to.is_file():
271
+ if anonymization_degree > 0.0:
272
+ # face anonymization
273
+ combined_image = combine_images([conditioning_image, image])
274
+ else:
275
+ # face swapping
276
+ combined_image = combine_images([source_image, conditioning_image, image])
277
+ combined_image.save(save_vis_to)
examples/referencenet/train_referencenet.py ADDED
@@ -0,0 +1,1304 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+
16
+ import argparse
17
+ import logging
18
+ import math
19
+ import os
20
+ import shutil
21
+ from pathlib import Path
22
+
23
+ import accelerate
24
+ import datasets
25
+ import numpy as np
26
+ import torch
27
+ import torch.nn.functional as F
28
+ import torch.utils.checkpoint
29
+ import transformers
30
+ from accelerate import Accelerator
31
+ from accelerate.logging import get_logger
32
+ from accelerate.state import AcceleratorState
33
+ from accelerate.utils import ProjectConfiguration, set_seed
34
+ from datasets import load_dataset
35
+ from huggingface_hub import create_repo
36
+ from packaging import version
37
+ from torchvision import transforms
38
+ from tqdm.auto import tqdm
39
+ from transformers import CLIPImageProcessor, CLIPVisionModel
40
+ from transformers.utils import ContextManagers
41
+
42
+ import diffusers
43
+ from diffusers import AutoencoderKL, DDPMScheduler
44
+ from diffusers.optimization import get_scheduler
45
+ from diffusers.training_utils import EMAModel, compute_snr
46
+ from diffusers.utils import check_min_version, deprecate, is_wandb_available
47
+ from diffusers.utils.import_utils import is_xformers_available
48
+
49
+ from src.diffusers.models.referencenet.unet_2d_condition import UNet2DConditionModel
50
+ from src.diffusers.models.referencenet.referencenet_unet_2d_condition import ReferenceNetModel
51
+ from src.diffusers.pipelines.referencenet.pipeline_referencenet import (
52
+ StableDiffusionReferenceNetPipeline,
53
+ cat_referencenet_states,
54
+ )
55
+ from examples.referencenet.infer_referencenet import combine_images
56
+ from PIL import Image
57
+ from PIL import ImageFile
58
+
59
+ import torchvision.transforms.v2 as transforms_v2
60
+
61
+ import albumentations as A
62
+
63
+ ImageFile.LOAD_TRUNCATED_IMAGES = True
64
+
65
+ if is_wandb_available():
66
+ import wandb
67
+
68
+
69
+ # Will error if the minimal version of diffusers is not installed. Remove at your own risks.
70
+ check_min_version("0.25.0.dev0")
71
+
72
+ logger = get_logger(__name__, log_level="INFO")
73
+
74
+
75
+ def torch_dfs(model: torch.nn.Module):
76
+ result = [model]
77
+ for child in model.children():
78
+ result += torch_dfs(child)
79
+ return result
80
+
81
+
82
+ def set_parts_of_model_for_gradient_computation(module):
83
+ # Include attention blocks in gradient computation
84
+ for attn_processor_name, attn_processor in module.attn_processors.items():
85
+ attn_module = module
86
+ for n in attn_processor_name.split(".")[:-1]:
87
+ attn_module = getattr(attn_module, n)
88
+ attn_module.requires_grad_(True)
89
+
90
+ # Include transformer blocks in gradient computation
91
+ tb_type = type(module.down_blocks[0].attentions[0].transformer_blocks[0])
92
+ attn_modules = [module for module in torch_dfs(module) if isinstance(module, tb_type)]
93
+ attn_modules = sorted(attn_modules, key=lambda x: -x.norm1.normalized_shape[0])
94
+ [attn_module.requires_grad_(True) for attn_module in attn_modules]
95
+
96
+ return module
97
+
98
+
99
+ def recursive_multiply(element, tensor):
100
+ if isinstance(element, tuple) or isinstance(element, list):
101
+ for element in element:
102
+ recursive_multiply(element, tensor)
103
+ elif torch.is_tensor(element):
104
+ # In-place multiplication
105
+ element.mul_(tensor)
106
+ else:
107
+ raise ValueError("Invalid type encountered in the element")
108
+
109
+
110
+ def log_validation(
111
+ vae, unet, feature_extractor, image_encoder, referencenet, conditioning_referencenet, args, accelerator, step
112
+ ):
113
+ logger.info("Running validation... ")
114
+ scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
115
+ pipeline = StableDiffusionReferenceNetPipeline(
116
+ unet=accelerator.unwrap_model(unet),
117
+ referencenet=accelerator.unwrap_model(referencenet),
118
+ conditioning_referencenet=accelerator.unwrap_model(conditioning_referencenet),
119
+ vae=vae,
120
+ feature_extractor=feature_extractor,
121
+ image_encoder=image_encoder,
122
+ scheduler=scheduler,
123
+ )
124
+
125
+ pipeline.set_progress_bar_config(disable=True)
126
+
127
+ if args.enable_xformers_memory_efficient_attention:
128
+ pipeline.enable_xformers_memory_efficient_attention()
129
+
130
+ if args.seed is None:
131
+ generator = None
132
+ else:
133
+ generator = torch.Generator(device=accelerator.device).manual_seed(args.seed)
134
+
135
+ if len(args.validation_conditioning_image) == len(args.validation_source_image):
136
+ validation_conditioning_images = args.validation_conditioning_image
137
+ validation_source_images = args.validation_source_image
138
+ elif len(args.validation_conditioning_image) == 1:
139
+ validation_conditioning_images = args.validation_conditioning_image * len(args.validation_source_image)
140
+ validation_source_images = args.validation_source_image
141
+ elif len(args.validation_source_image) == 1:
142
+ validation_conditioning_images = args.validation_conditioning_image
143
+ validation_source_images = args.validation_source_image * len(args.validation_conditioning_image)
144
+ else:
145
+ raise ValueError(
146
+ "number of `args.validation_conditioning_image` and `args.validation_source_image` should be checked in `parse_args`"
147
+ )
148
+ image_logs = []
149
+
150
+ image_path = os.path.join(args.output_dir, "outputs")
151
+ if not os.path.exists(image_path):
152
+ os.makedirs(image_path)
153
+
154
+ for i, (validation_source_image, validation_conditioning_image) in enumerate(
155
+ zip(validation_source_images, validation_conditioning_images)
156
+ ):
157
+ source_image_filename_without_ext = Path(validation_source_image).stem
158
+ conditioning_image_filename_without_ext = Path(validation_conditioning_image).stem
159
+
160
+ # Source images
161
+ validation_source_image = Image.open(validation_source_image).convert("RGB")
162
+ validation_source_image = transforms_v2.Resize(size=(args.resolution, args.resolution), antialias=True)(
163
+ validation_source_image
164
+ )
165
+
166
+ # Driving images
167
+ validation_conditioning_image = Image.open(validation_conditioning_image).convert("RGB")
168
+ validation_conditioning_image = transforms_v2.Resize(size=(args.resolution, args.resolution), antialias=True)(
169
+ validation_conditioning_image
170
+ )
171
+
172
+ images = []
173
+ for n in range(args.num_validation_images):
174
+ with torch.autocast("cuda"):
175
+ image = pipeline(
176
+ source_image=validation_source_image,
177
+ conditioning_image=validation_conditioning_image,
178
+ height=args.resolution,
179
+ width=args.resolution,
180
+ num_inference_steps=200,
181
+ guidance_scale=4.0,
182
+ generator=generator,
183
+ ).images[0]
184
+
185
+ images.append(image)
186
+
187
+ combined_images = combine_images([validation_source_image, validation_conditioning_image, image])
188
+ save_to = Path(
189
+ image_path,
190
+ f"{i:03}_src_{source_image_filename_without_ext}_drv_{conditioning_image_filename_without_ext}_{n:03}.png",
191
+ )
192
+ combined_images.save(save_to, format="PNG")
193
+
194
+ image_logs.append(
195
+ {
196
+ "validation_source_image": validation_source_image,
197
+ "validation_conditioning_image": validation_conditioning_image,
198
+ "images": images,
199
+ }
200
+ )
201
+
202
+ for tracker in accelerator.trackers:
203
+ if tracker.name == "tensorboard":
204
+ for i, log in enumerate(image_logs):
205
+ images = log["images"]
206
+ validation_source_image = log["validation_source_image"]
207
+ validation_conditioning_image = log["validation_conditioning_image"]
208
+
209
+ formatted_images = []
210
+
211
+ formatted_images.append(np.asarray(validation_source_image))
212
+ formatted_images.append(np.asarray(validation_conditioning_image))
213
+
214
+ for image in images:
215
+ formatted_images.append(np.asarray(image))
216
+
217
+ formatted_images = np.stack(formatted_images)
218
+
219
+ tracker.writer.add_images(f"{i:05}", formatted_images, step, dataformats="NHWC")
220
+ elif tracker.name == "wandb":
221
+ formatted_images = []
222
+
223
+ for log in image_logs:
224
+ images = log["images"]
225
+ validation_source_image = log["validation_source_image"]
226
+ validation_conditioning_image = log["validation_conditioning_image"]
227
+
228
+ formatted_images.append(wandb.Image(validation_source_image, caption="Source"))
229
+ formatted_images.append(wandb.Image(validation_conditioning_image, caption="Conditioning"))
230
+
231
+ for image in images:
232
+ image = wandb.Image(image, caption="Generated")
233
+ formatted_images.append(image)
234
+
235
+ tracker.log({"validation": formatted_images})
236
+ else:
237
+ logger.warn(f"image logging not implemented for {tracker.name}")
238
+
239
+ return image_logs
240
+
241
+
242
+ def parse_args():
243
+ parser = argparse.ArgumentParser(description="Simple example of a training script.")
244
+ parser.add_argument(
245
+ "--input_perturbation", type=float, default=0, help="The scale of input perturbation. Recommended 0.1."
246
+ )
247
+ parser.add_argument(
248
+ "--pretrained_model_name_or_path",
249
+ type=str,
250
+ default=None,
251
+ required=True,
252
+ help="Path to pretrained model or model identifier from huggingface.co/models.",
253
+ )
254
+ parser.add_argument(
255
+ "--pretrained_clip_model_name_or_path",
256
+ type=str,
257
+ default="openai/clip-vit-large-patch14",
258
+ required=False,
259
+ help="Path to pretrained CLIP model or model identifier from huggingface.co/models.",
260
+ )
261
+ parser.add_argument(
262
+ "--revision",
263
+ type=str,
264
+ default=None,
265
+ required=False,
266
+ help="Revision of pretrained model identifier from huggingface.co/models.",
267
+ )
268
+ parser.add_argument(
269
+ "--variant",
270
+ type=str,
271
+ default=None,
272
+ help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16",
273
+ )
274
+
275
+ parser.add_argument(
276
+ "--dataset_config_name",
277
+ type=str,
278
+ default=None,
279
+ help="The config of the Dataset, leave as None if there's only one config.",
280
+ )
281
+ parser.add_argument(
282
+ "--dataset_loading_script_path",
283
+ type=str,
284
+ default=None,
285
+ required=True,
286
+ help="Path to the dataset loading script file",
287
+ )
288
+ parser.add_argument(
289
+ "--source_image_column",
290
+ type=str,
291
+ default="source_image",
292
+ help="The column of the dataset containing the referencenet source image.",
293
+ )
294
+ parser.add_argument(
295
+ "--conditioning_image_column",
296
+ type=str,
297
+ default="conditioning_image",
298
+ help="The column of the dataset containing the referencenet conditioning image.",
299
+ )
300
+ parser.add_argument(
301
+ "--ground_truth_column",
302
+ type=str,
303
+ default="ground_truth",
304
+ help="The column of the dataset containing the ground truth image.",
305
+ )
306
+ parser.add_argument(
307
+ "--output_dir",
308
+ type=str,
309
+ default="sd-model-finetuned",
310
+ help="The output directory where the model predictions and checkpoints will be written.",
311
+ )
312
+ parser.add_argument(
313
+ "--cache_dir",
314
+ type=str,
315
+ default=None,
316
+ help="The directory where the downloaded models and datasets will be stored.",
317
+ )
318
+ parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
319
+ parser.add_argument(
320
+ "--resolution",
321
+ type=int,
322
+ default=512,
323
+ help=(
324
+ "The resolution for input images, all the images in the train/validation dataset will be resized to this"
325
+ " resolution"
326
+ ),
327
+ )
328
+ parser.add_argument(
329
+ "--center_crop",
330
+ default=False,
331
+ action="store_true",
332
+ help=(
333
+ "Whether to center crop the input images to the resolution. If not set, the images will be randomly"
334
+ " cropped. The images will be resized to the resolution first before cropping."
335
+ ),
336
+ )
337
+ parser.add_argument(
338
+ "--random_flip",
339
+ action="store_true",
340
+ help="whether to randomly flip images horizontally",
341
+ )
342
+ parser.add_argument(
343
+ "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader."
344
+ )
345
+ parser.add_argument("--num_train_epochs", type=int, default=100)
346
+ parser.add_argument(
347
+ "--max_train_steps",
348
+ type=int,
349
+ default=None,
350
+ help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
351
+ )
352
+ parser.add_argument(
353
+ "--max_train_samples",
354
+ type=int,
355
+ default=None,
356
+ help=(
357
+ "For debugging purposes or quicker training, truncate the number of training examples to this "
358
+ "value if set."
359
+ ),
360
+ )
361
+ parser.add_argument(
362
+ "--gradient_accumulation_steps",
363
+ type=int,
364
+ default=1,
365
+ help="Number of updates steps to accumulate before performing a backward/update pass.",
366
+ )
367
+ parser.add_argument(
368
+ "--gradient_checkpointing",
369
+ action="store_true",
370
+ help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
371
+ )
372
+ parser.add_argument(
373
+ "--learning_rate",
374
+ type=float,
375
+ default=1e-4,
376
+ help="Initial learning rate (after the potential warmup period) to use.",
377
+ )
378
+ parser.add_argument(
379
+ "--scale_lr",
380
+ action="store_true",
381
+ default=False,
382
+ help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
383
+ )
384
+ parser.add_argument(
385
+ "--lr_scheduler",
386
+ type=str,
387
+ default="constant",
388
+ help=(
389
+ 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
390
+ ' "constant", "constant_with_warmup"]'
391
+ ),
392
+ )
393
+ parser.add_argument(
394
+ "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
395
+ )
396
+ parser.add_argument(
397
+ "--snr_gamma",
398
+ type=float,
399
+ default=None,
400
+ help="SNR weighting gamma to be used if rebalancing the loss. Recommended value is 5.0. "
401
+ "More details here: https://arxiv.org/abs/2303.09556.",
402
+ )
403
+ parser.add_argument(
404
+ "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
405
+ )
406
+ parser.add_argument(
407
+ "--allow_tf32",
408
+ action="store_true",
409
+ help=(
410
+ "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
411
+ " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
412
+ ),
413
+ )
414
+ parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.")
415
+ parser.add_argument(
416
+ "--non_ema_revision",
417
+ type=str,
418
+ default=None,
419
+ required=False,
420
+ help=(
421
+ "Revision of pretrained non-ema model identifier. Must be a branch, tag or git identifier of the local or"
422
+ " remote repository specified with --pretrained_model_name_or_path."
423
+ ),
424
+ )
425
+ parser.add_argument(
426
+ "--dataloader_num_workers",
427
+ type=int,
428
+ default=0,
429
+ help=(
430
+ "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
431
+ ),
432
+ )
433
+ parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
434
+ parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
435
+ parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
436
+ parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
437
+ parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
438
+ parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
439
+ parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
440
+ parser.add_argument(
441
+ "--prediction_type",
442
+ type=str,
443
+ default=None,
444
+ help="The prediction_type that shall be used for training. Choose between 'epsilon' or 'v_prediction' or leave `None`. If left to `None` the default prediction type of the scheduler: `noise_scheduler.config.prediciton_type` is chosen.",
445
+ )
446
+ parser.add_argument(
447
+ "--hub_model_id",
448
+ type=str,
449
+ default=None,
450
+ help="The name of the repository to keep in sync with the local `output_dir`.",
451
+ )
452
+ parser.add_argument(
453
+ "--logging_dir",
454
+ type=str,
455
+ default="logs",
456
+ help=(
457
+ "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
458
+ " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
459
+ ),
460
+ )
461
+ parser.add_argument(
462
+ "--mixed_precision",
463
+ type=str,
464
+ default=None,
465
+ choices=["no", "fp16", "bf16"],
466
+ help=(
467
+ "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
468
+ " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
469
+ " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
470
+ ),
471
+ )
472
+ parser.add_argument(
473
+ "--report_to",
474
+ type=str,
475
+ default="tensorboard",
476
+ help=(
477
+ 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
478
+ ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
479
+ ),
480
+ )
481
+ parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
482
+ parser.add_argument(
483
+ "--checkpointing_steps",
484
+ type=int,
485
+ default=500,
486
+ help=(
487
+ "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming"
488
+ " training using `--resume_from_checkpoint`."
489
+ ),
490
+ )
491
+ parser.add_argument(
492
+ "--checkpoints_total_limit",
493
+ type=int,
494
+ default=None,
495
+ help=("Max number of checkpoints to store."),
496
+ )
497
+ parser.add_argument(
498
+ "--resume_from_checkpoint",
499
+ type=str,
500
+ default=None,
501
+ help=(
502
+ "Whether training should be resumed from a previous checkpoint. Use a path saved by"
503
+ ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
504
+ ),
505
+ )
506
+ parser.add_argument(
507
+ "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
508
+ )
509
+ parser.add_argument(
510
+ "--set_grads_to_none",
511
+ action="store_true",
512
+ help=(
513
+ "Save more memory by using setting grads to None instead of zero. Be aware, that this changes certain"
514
+ " behaviors, so disable this argument if it causes any problems. More info:"
515
+ " https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html"
516
+ ),
517
+ )
518
+ parser.add_argument("--noise_offset", type=float, default=0, help="The scale of noise offset.")
519
+ parser.add_argument(
520
+ "--validation_conditioning_image",
521
+ type=str,
522
+ default=None,
523
+ nargs="+",
524
+ help=(
525
+ "A set of paths to the referencenet conditioning image be evaluated every `--validation_steps`"
526
+ " and logged to `--report_to`. Provide either a matching number of `--validation_source_image`s, a"
527
+ " a single `--validation_source_image` to be used with all `--validation_conditioning_image`s, or a single"
528
+ " `--validation_conditioning_image` that will be used with all `--validation_source_image`s."
529
+ ),
530
+ )
531
+ parser.add_argument(
532
+ "--validation_source_image",
533
+ type=str,
534
+ default=None,
535
+ nargs="+",
536
+ help=(
537
+ "A set of source images evaluated every `--validation_steps` and logged to `--report_to`."
538
+ " Provide either a matching number of `--validation_conditioning_image`s, a single `--validation_conditioning_image`"
539
+ " to be used with all source images, or a single source image that will be used with all `--validation_conditioning_image`s."
540
+ ),
541
+ )
542
+ parser.add_argument(
543
+ "--num_validation_images",
544
+ type=int,
545
+ default=4,
546
+ help="Number of images to be generated for each `--validation_conditioning_image`, `--validation_source_image` pair",
547
+ )
548
+ parser.add_argument(
549
+ "--validation_epochs",
550
+ type=int,
551
+ default=5,
552
+ help="Run validation every X epochs.",
553
+ )
554
+ parser.add_argument(
555
+ "--validation_steps",
556
+ type=int,
557
+ default=100,
558
+ help=(
559
+ "Run validation every X steps. Validation consists of running the prompt"
560
+ " `args.validation_source_image` multiple times: `args.num_validation_images`"
561
+ " and logging the images."
562
+ ),
563
+ )
564
+ parser.add_argument(
565
+ "--tracker_project_name",
566
+ type=str,
567
+ default="text2image-fine-tune",
568
+ help=(
569
+ "The `project_name` argument passed to Accelerator.init_trackers for"
570
+ " more information see https://huggingface.co/docs/accelerate/v0.17.0/en/package_reference/accelerator#accelerate.Accelerator"
571
+ ),
572
+ )
573
+ parser.add_argument(
574
+ "--conditioning_dropout_prob",
575
+ type=float,
576
+ default=None,
577
+ help="Conditioning dropout probability. Drops out the conditioning image used during training",
578
+ )
579
+
580
+ args = parser.parse_args()
581
+ env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
582
+ if env_local_rank != -1 and env_local_rank != args.local_rank:
583
+ args.local_rank = env_local_rank
584
+
585
+ # Sanity checks
586
+ if args.dataset_loading_script_path is None:
587
+ raise ValueError("Need a script to load training dataset.")
588
+
589
+ # default to using the same revision for the non-ema model if not specified
590
+ if args.non_ema_revision is None:
591
+ args.non_ema_revision = args.revision
592
+
593
+ return args
594
+
595
+
596
+ def make_train_dataset(args, feature_extractor, accelerator):
597
+ # In distributed training, the load_dataset function guarantees that only one local process can concurrently
598
+ # download the dataset.
599
+ dataset = load_dataset(path=args.dataset_loading_script_path, split="train", trust_remote_code=True)
600
+
601
+ # Preprocessing the datasets.
602
+ # We need to tokenize inputs and targets.
603
+ column_names = dataset.column_names
604
+
605
+ # 6. Get the column names for input/target.
606
+ if args.source_image_column is None:
607
+ source_image_column = column_names[0]
608
+ logger.info(f"image column defaulting to {source_image_column}")
609
+ else:
610
+ source_image_column = args.source_image_column
611
+ if source_image_column not in column_names:
612
+ raise ValueError(
613
+ f"`--source_image_column` value '{args.source_image_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}"
614
+ )
615
+
616
+ if args.conditioning_image_column is None:
617
+ conditioning_image_column = column_names[1]
618
+ logger.info(f"conditioning image column defaulting to {conditioning_image_column}")
619
+ else:
620
+ conditioning_image_column = args.conditioning_image_column
621
+ if conditioning_image_column not in column_names:
622
+ raise ValueError(
623
+ f"`--conditioning_image_column` value '{args.conditioning_image_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}"
624
+ )
625
+
626
+ if args.ground_truth_column is None:
627
+ ground_truth_column = column_names[2]
628
+ logger.info(f"ground truth column defaulting to {ground_truth_column}")
629
+ else:
630
+ ground_truth_column = args.ground_truth_column
631
+ if ground_truth_column not in column_names:
632
+ raise ValueError(
633
+ f"`--ground_truth_column` value '{args.ground_truth_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}"
634
+ )
635
+
636
+ def extract_features(images):
637
+ features = []
638
+ for image in images:
639
+ feature = feature_extractor(images=image, do_rescale=False, return_tensors="pt").pixel_values[0]
640
+ features.append(feature)
641
+ return features
642
+
643
+ # The pipeline expects two images as inputs named image and image0 and will output numpy arrays.
644
+ albumentations_transform = A.Compose(
645
+ [
646
+ A.HorizontalFlip(p=0.5 if args.random_flip else 0.0),
647
+ ],
648
+ additional_targets={"image0": "image", "image1": "image"},
649
+ )
650
+
651
+ torchvision_transforms = transforms_v2.Compose(
652
+ [
653
+ transforms_v2.ToImage(),
654
+ transforms_v2.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR, antialias=True),
655
+ transforms_v2.CenterCrop(args.resolution),
656
+ transforms_v2.ToDtype(torch.float32, scale=True),
657
+ ]
658
+ )
659
+
660
+ # Create a Normalize transform
661
+ normalize_transforms = transforms_v2.Compose([transforms_v2.Normalize(mean=[0.5], std=[0.5])])
662
+
663
+ def preprocess_train(examples):
664
+ source_images = [image.convert("RGB") for image in examples[source_image_column]]
665
+ conditioning_images = [image.convert("RGB") for image in examples[conditioning_image_column]]
666
+ ground_truth = [image.convert("RGB") for image in examples[ground_truth_column]]
667
+
668
+ for i, (src, cond, gt) in enumerate(zip(source_images, conditioning_images, ground_truth)):
669
+ # Convert PIL image to numpy array
670
+ src = np.array(src)
671
+ cond = np.array(cond)
672
+ gt = np.array(gt)
673
+
674
+ # Apply the same augmentation with the same parameters to multiple images
675
+ augmented = albumentations_transform(image=src, image0=cond, image1=gt)
676
+
677
+ # Convert numpy array to PIL image
678
+ source_images[i] = Image.fromarray(augmented["image"])
679
+ conditioning_images[i] = Image.fromarray(augmented["image0"])
680
+ ground_truth[i] = Image.fromarray(augmented["image1"])
681
+
682
+ source_images = [torchvision_transforms(image) for image in source_images]
683
+ conditioning_images = [torchvision_transforms(image) for image in conditioning_images]
684
+ ground_truth = [torchvision_transforms(image) for image in ground_truth]
685
+
686
+ examples["source_images"] = [normalize_transforms(image) for image in source_images]
687
+ examples["conditioning_images"] = [normalize_transforms(image) for image in conditioning_images]
688
+ examples["ground_truth"] = [normalize_transforms(image) for image in ground_truth]
689
+
690
+ examples["clip_source_images"] = extract_features(source_images)
691
+ examples["clip_conditioning_images"] = extract_features(conditioning_images)
692
+ examples["clip_ground_truth"] = extract_features(ground_truth)
693
+
694
+ return examples
695
+
696
+ with accelerator.main_process_first():
697
+ if args.max_train_samples is not None:
698
+ dataset = dataset.shuffle(seed=args.seed).select(range(args.max_train_samples))
699
+ # Set the training transforms
700
+ train_dataset = dataset.with_transform(preprocess_train)
701
+
702
+ return train_dataset
703
+
704
+
705
+ def collate_fn(examples):
706
+ ground_truth = [example["ground_truth"] for example in examples]
707
+ source_images = [example["source_images"] for example in examples]
708
+ conditioning_images = [example["conditioning_images"] for example in examples]
709
+ clip_ground_truth = [example["clip_ground_truth"] for example in examples]
710
+ clip_source_images = [example["clip_source_images"] for example in examples]
711
+ clip_conditioning_images = [example["clip_conditioning_images"] for example in examples]
712
+
713
+ ground_truth = torch.stack(ground_truth)
714
+ ground_truth = ground_truth.to(memory_format=torch.contiguous_format).float()
715
+
716
+ source_images = torch.stack(source_images)
717
+ source_images = source_images.to(memory_format=torch.contiguous_format).float()
718
+
719
+ conditioning_images = torch.stack(conditioning_images)
720
+ conditioning_images = conditioning_images.to(memory_format=torch.contiguous_format).float()
721
+
722
+ clip_ground_truth = torch.stack(clip_ground_truth)
723
+ clip_ground_truth = clip_ground_truth.to(memory_format=torch.contiguous_format).float()
724
+
725
+ clip_source_images = torch.stack(clip_source_images)
726
+ clip_source_images = clip_source_images.to(memory_format=torch.contiguous_format).float()
727
+
728
+ clip_conditioning_images = torch.stack(clip_conditioning_images)
729
+ clip_conditioning_images = clip_conditioning_images.to(memory_format=torch.contiguous_format).float()
730
+
731
+ return {
732
+ "ground_truth": ground_truth,
733
+ "source_images": source_images,
734
+ "conditioning_images": conditioning_images,
735
+ "clip_ground_truth": clip_ground_truth,
736
+ "clip_source_images": clip_source_images,
737
+ "clip_conditioning_images": clip_conditioning_images,
738
+ }
739
+
740
+
741
+ def main():
742
+ args = parse_args()
743
+
744
+ if args.non_ema_revision is not None:
745
+ deprecate(
746
+ "non_ema_revision!=None",
747
+ "0.15.0",
748
+ message=(
749
+ "Downloading 'non_ema' weights from revision branches of the Hub is deprecated. Please make sure to"
750
+ " use `--variant=non_ema` instead."
751
+ ),
752
+ )
753
+ logging_dir = os.path.join(args.output_dir, args.logging_dir)
754
+
755
+ accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
756
+ # ddp_kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)
757
+ accelerator = Accelerator(
758
+ gradient_accumulation_steps=args.gradient_accumulation_steps,
759
+ mixed_precision=args.mixed_precision,
760
+ log_with=args.report_to,
761
+ project_config=accelerator_project_config,
762
+ # kwargs_handlers=[ddp_kwargs]
763
+ )
764
+
765
+ generator = None if args.seed is None else torch.Generator(device=accelerator.device).manual_seed(args.seed)
766
+
767
+ # Make one log on every process with the configuration for debugging.
768
+ logging.basicConfig(
769
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
770
+ datefmt="%m/%d/%Y %H:%M:%S",
771
+ level=logging.INFO,
772
+ )
773
+ logger.info(accelerator.state, main_process_only=False)
774
+ if accelerator.is_local_main_process:
775
+ datasets.utils.logging.set_verbosity_warning()
776
+ transformers.utils.logging.set_verbosity_warning()
777
+ diffusers.utils.logging.set_verbosity_info()
778
+ else:
779
+ datasets.utils.logging.set_verbosity_error()
780
+ transformers.utils.logging.set_verbosity_error()
781
+ diffusers.utils.logging.set_verbosity_error()
782
+
783
+ # If passed along, set the training seed now.
784
+ if args.seed is not None:
785
+ set_seed(args.seed)
786
+
787
+ # Handle the repository creation
788
+ if accelerator.is_main_process:
789
+ if args.output_dir is not None:
790
+ os.makedirs(args.output_dir, exist_ok=True)
791
+
792
+ if args.push_to_hub:
793
+ repo_id = create_repo(
794
+ repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token
795
+ ).repo_id
796
+
797
+ # Load scheduler and models.
798
+ noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
799
+ feature_extractor = CLIPImageProcessor.from_pretrained(args.pretrained_clip_model_name_or_path)
800
+
801
+ def deepspeed_zero_init_disabled_context_manager():
802
+ """
803
+ returns either a context list that includes one that will disable zero.Init or an empty context list
804
+ """
805
+ deepspeed_plugin = AcceleratorState().deepspeed_plugin if accelerate.state.is_initialized() else None
806
+ if deepspeed_plugin is None:
807
+ return []
808
+
809
+ return [deepspeed_plugin.zero3_init_context_manager(enable=False)]
810
+
811
+ # Currently Accelerate doesn't know how to handle multiple models under Deepspeed ZeRO stage 3.
812
+ # For this to work properly all models must be run through `accelerate.prepare`. But accelerate
813
+ # will try to assign the same optimizer with the same weights to all models during
814
+ # `deepspeed.initialize`, which of course doesn't work.
815
+ #
816
+ # For now the following workaround will partially support Deepspeed ZeRO-3, by excluding the 2
817
+ # frozen models from being partitioned during `zero.Init` which gets called during
818
+ # `from_pretrained` So CLIPTextModel and AutoencoderKL will not enjoy the parameter sharding
819
+ # across multiple gpus and only UNet2DConditionModel will get ZeRO sharded.
820
+ with ContextManagers(deepspeed_zero_init_disabled_context_manager()):
821
+ image_encoder = CLIPVisionModel.from_pretrained(args.pretrained_clip_model_name_or_path)
822
+ vae = AutoencoderKL.from_pretrained(
823
+ args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision, variant=args.variant
824
+ )
825
+
826
+ unet = UNet2DConditionModel.from_pretrained(
827
+ args.pretrained_model_name_or_path, subfolder="unet", revision=args.non_ema_revision
828
+ )
829
+ referencenet = ReferenceNetModel.from_pretrained(
830
+ args.pretrained_model_name_or_path, subfolder="unet", revision=args.non_ema_revision
831
+ )
832
+ conditioning_referencenet = ReferenceNetModel.from_pretrained(
833
+ args.pretrained_model_name_or_path, subfolder="unet", revision=args.non_ema_revision
834
+ )
835
+
836
+ # Freeze vae and image_encoder
837
+ vae.requires_grad_(False)
838
+ image_encoder.requires_grad_(False)
839
+ unet.train()
840
+ # unet.requires_grad_(False)
841
+ # for p in unet.down_blocks[0].attentions[0].transformer_blocks[0].parameters():
842
+ # print(p.requires_grad) # Expect to be False
843
+ # break
844
+ # unet = set_parts_of_model_for_gradient_computation(unet)
845
+ # Check if gradient will be calculated on the tensor
846
+ # for p in unet.down_blocks[0].attentions[0].transformer_blocks[0].parameters():
847
+ # print(p.requires_grad) # Expect to be True
848
+ # break
849
+
850
+ # referencenet.train()
851
+ referencenet.requires_grad_(False)
852
+ referencenet = set_parts_of_model_for_gradient_computation(referencenet)
853
+
854
+ # conditioning_referencenet.train()
855
+ conditioning_referencenet.requires_grad_(False)
856
+ conditioning_referencenet = set_parts_of_model_for_gradient_computation(conditioning_referencenet)
857
+
858
+ # Create EMA for the unet.
859
+ if args.use_ema:
860
+ ema_unet = UNet2DConditionModel.from_pretrained(
861
+ args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, variant=args.variant
862
+ )
863
+ ema_unet = EMAModel(ema_unet.parameters(), model_cls=UNet2DConditionModel, model_config=ema_unet.config)
864
+
865
+ if args.enable_xformers_memory_efficient_attention:
866
+ if is_xformers_available():
867
+ import xformers
868
+
869
+ xformers_version = version.parse(xformers.__version__)
870
+ if xformers_version == version.parse("0.0.16"):
871
+ logger.warn(
872
+ "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
873
+ )
874
+ unet.enable_xformers_memory_efficient_attention()
875
+ else:
876
+ raise ValueError("xformers is not available. Make sure it is installed correctly")
877
+
878
+ # `accelerate` 0.16.0 will have better support for customized saving
879
+ if False:
880
+ # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format
881
+ def save_model_hook(models, weights, output_dir):
882
+ if accelerator.is_main_process:
883
+ if args.use_ema:
884
+ ema_unet.save_pretrained(os.path.join(output_dir, "unet_ema"))
885
+
886
+ for model in models:
887
+ sub_dir = "unet" if isinstance(model, type(accelerator.unwrap_model(unet))) else "referencenet"
888
+ model.save_pretrained(os.path.join(output_dir, sub_dir))
889
+
890
+ # make sure to pop weight so that corresponding model is not saved again
891
+ weights.pop()
892
+
893
+ def load_model_hook(models, input_dir):
894
+ if args.use_ema:
895
+ load_model = EMAModel.from_pretrained(os.path.join(input_dir, "unet_ema"), UNet2DConditionModel)
896
+ ema_unet.load_state_dict(load_model.state_dict())
897
+ ema_unet.to(accelerator.device)
898
+ del load_model
899
+
900
+ while len(models) > 0:
901
+ # pop models so that they are not loaded again
902
+ model = models.pop()
903
+
904
+ if isinstance(model, type(accelerator.unwrap_model(referencenet))):
905
+ # load transformers style into model
906
+ load_model = ReferenceNetModel.from_pretrained(input_dir, subfolder="referencenet")
907
+ else:
908
+ # load diffusers style into model
909
+ load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder="unet")
910
+ model.register_to_config(**load_model.config)
911
+
912
+ model.load_state_dict(load_model.state_dict())
913
+ del load_model
914
+
915
+ accelerator.register_save_state_pre_hook(save_model_hook)
916
+ accelerator.register_load_state_pre_hook(load_model_hook)
917
+
918
+ if args.gradient_checkpointing:
919
+ unet.enable_gradient_checkpointing()
920
+
921
+ # Enable TF32 for faster training on Ampere GPUs,
922
+ # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
923
+ if args.allow_tf32:
924
+ torch.backends.cuda.matmul.allow_tf32 = True
925
+
926
+ if args.scale_lr:
927
+ args.learning_rate = (
928
+ args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
929
+ )
930
+
931
+ # Initialize the optimizer
932
+ if args.use_8bit_adam:
933
+ try:
934
+ import bitsandbytes as bnb
935
+ except ImportError:
936
+ raise ImportError(
937
+ "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`"
938
+ )
939
+
940
+ optimizer_cls = bnb.optim.AdamW8bit
941
+ else:
942
+ optimizer_cls = torch.optim.AdamW
943
+
944
+ trainable_params = list(filter(lambda p: p.requires_grad, unet.parameters()))
945
+ trainable_params += list(filter(lambda p: p.requires_grad, referencenet.parameters()))
946
+ trainable_params += list(filter(lambda p: p.requires_grad, conditioning_referencenet.parameters()))
947
+
948
+ optimizer = optimizer_cls(
949
+ trainable_params,
950
+ lr=args.learning_rate,
951
+ betas=(args.adam_beta1, args.adam_beta2),
952
+ weight_decay=args.adam_weight_decay,
953
+ eps=args.adam_epsilon,
954
+ )
955
+
956
+ # Dataset and DataLoaders creation:
957
+ train_dataset = make_train_dataset(args, feature_extractor, accelerator)
958
+
959
+ train_dataloader = torch.utils.data.DataLoader(
960
+ train_dataset,
961
+ shuffle=True,
962
+ collate_fn=collate_fn,
963
+ batch_size=args.train_batch_size,
964
+ num_workers=args.dataloader_num_workers,
965
+ )
966
+
967
+ # Scheduler and math around the number of training steps.
968
+ overrode_max_train_steps = False
969
+ num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
970
+ if args.max_train_steps is None:
971
+ args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
972
+ overrode_max_train_steps = True
973
+
974
+ lr_scheduler = get_scheduler(
975
+ args.lr_scheduler,
976
+ optimizer=optimizer,
977
+ num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
978
+ num_training_steps=args.max_train_steps * accelerator.num_processes,
979
+ )
980
+
981
+ # Prepare everything with our `accelerator`.
982
+ unet, referencenet, conditioning_referencenet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
983
+ unet, referencenet, conditioning_referencenet, optimizer, train_dataloader, lr_scheduler
984
+ )
985
+
986
+ if args.use_ema:
987
+ ema_unet.to(accelerator.device)
988
+
989
+ # For mixed precision training we cast all non-trainable weigths (vae, non-lora image_encoder and non-lora unet) to half-precision
990
+ # as these weights are only used for inference, keeping weights in full precision is not required.
991
+ weight_dtype = torch.float32
992
+ if accelerator.mixed_precision == "fp16":
993
+ weight_dtype = torch.float16
994
+ args.mixed_precision = accelerator.mixed_precision
995
+ elif accelerator.mixed_precision == "bf16":
996
+ weight_dtype = torch.bfloat16
997
+ args.mixed_precision = accelerator.mixed_precision
998
+
999
+ # Move image_encoder and vae to gpu and cast to weight_dtype
1000
+ image_encoder.to(accelerator.device, dtype=weight_dtype)
1001
+ vae.to(accelerator.device, dtype=weight_dtype)
1002
+
1003
+ # We need to recalculate our total training steps as the size of the training dataloader may have changed.
1004
+ num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
1005
+ if overrode_max_train_steps:
1006
+ args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
1007
+ # Afterwards we recalculate our number of training epochs
1008
+ args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
1009
+
1010
+ # We need to initialize the trackers we use, and also store our configuration.
1011
+ # The trackers initializes automatically on the main process.
1012
+ if accelerator.is_main_process:
1013
+ tracker_config = dict(vars(args))
1014
+ tracker_config.pop("validation_conditioning_image")
1015
+ tracker_config.pop("validation_source_image")
1016
+ accelerator.init_trackers(args.tracker_project_name, tracker_config)
1017
+
1018
+ # Train!
1019
+ total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
1020
+
1021
+ logger.info("***** Running training *****")
1022
+ logger.info(f" Num examples = {len(train_dataset)}")
1023
+ logger.info(f" Num Epochs = {args.num_train_epochs}")
1024
+ logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
1025
+ logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
1026
+ logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
1027
+ logger.info(f" Total optimization steps = {args.max_train_steps}")
1028
+ global_step = 0
1029
+ first_epoch = 0
1030
+
1031
+ # Potentially load in the weights and states from a previous save
1032
+ if args.resume_from_checkpoint:
1033
+ if args.resume_from_checkpoint != "latest":
1034
+ path = os.path.basename(args.resume_from_checkpoint)
1035
+ else:
1036
+ # Get the most recent checkpoint
1037
+ dirs = os.listdir(args.output_dir)
1038
+ dirs = [d for d in dirs if d.startswith("checkpoint")]
1039
+ dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
1040
+ path = dirs[-1] if len(dirs) > 0 else None
1041
+
1042
+ if path is None:
1043
+ accelerator.print(
1044
+ f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
1045
+ )
1046
+ args.resume_from_checkpoint = None
1047
+ initial_global_step = 0
1048
+ else:
1049
+ accelerator.print(f"Resuming from checkpoint {path}")
1050
+ accelerator.load_state(os.path.join(args.output_dir, path))
1051
+ global_step = int(path.split("-")[1])
1052
+
1053
+ initial_global_step = global_step
1054
+ first_epoch = global_step // num_update_steps_per_epoch
1055
+
1056
+ else:
1057
+ initial_global_step = 0
1058
+
1059
+ progress_bar = tqdm(
1060
+ range(0, args.max_train_steps),
1061
+ initial=initial_global_step,
1062
+ desc="Steps",
1063
+ # Only show the progress bar once on each machine.
1064
+ disable=not accelerator.is_local_main_process,
1065
+ )
1066
+
1067
+ last_step = noise_scheduler.config.num_train_timesteps - 1
1068
+ for epoch in range(first_epoch, args.num_train_epochs):
1069
+ train_loss = 0.0
1070
+ for step, batch in enumerate(train_dataloader):
1071
+ with accelerator.accumulate(unet, referencenet, conditioning_referencenet):
1072
+ # Ground truth
1073
+ ground_truth = batch["ground_truth"].to(weight_dtype)
1074
+ clip_ground_truth = batch["clip_ground_truth"].to(weight_dtype)
1075
+
1076
+ # Source images
1077
+ source_images = batch["source_images"].to(weight_dtype)
1078
+ clip_source_images = batch["clip_source_images"].to(weight_dtype)
1079
+
1080
+ # Driving images
1081
+ conditioning_images = batch["conditioning_images"].to(weight_dtype)
1082
+ clip_conditioning_images = batch["clip_conditioning_images"].to(weight_dtype)
1083
+
1084
+ # Convert images to latent space
1085
+ latents = vae.encode(ground_truth).latent_dist.sample()
1086
+ latents *= vae.config.scaling_factor
1087
+ source_latents = vae.encode(source_images).latent_dist.sample()
1088
+ source_latents *= vae.config.scaling_factor
1089
+ conditioning_latents = vae.encode(conditioning_images).latent_dist.sample()
1090
+ conditioning_latents *= vae.config.scaling_factor
1091
+
1092
+ # Sample noise that we'll add to the latents
1093
+ noise = torch.randn_like(latents)
1094
+ if args.noise_offset:
1095
+ # https://www.crosslabs.org//blog/diffusion-with-offset-noise
1096
+ noise += args.noise_offset * torch.randn(
1097
+ (latents.shape[0], latents.shape[1], 1, 1), device=latents.device
1098
+ )
1099
+ if args.input_perturbation:
1100
+ new_noise = noise + args.input_perturbation * torch.randn_like(noise)
1101
+ bsz = latents.shape[0]
1102
+ # Sample a random timestep for each image
1103
+ timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
1104
+ timesteps = timesteps.long()
1105
+ ref_timesteps = torch.zeros_like(timesteps)
1106
+
1107
+ # Add noise to the latents according to the noise magnitude at each timestep
1108
+ # (this is the forward diffusion process)
1109
+ if args.input_perturbation:
1110
+ noisy_latents = noise_scheduler.add_noise(latents, new_noise, timesteps)
1111
+ else:
1112
+ noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
1113
+
1114
+ # Fix zero PSNR
1115
+ noisy_latents[timesteps == last_step] = noise[timesteps == last_step]
1116
+
1117
+ # Get the source image embedding
1118
+ source_image_embeds = image_encoder(clip_source_images).pooler_output.unsqueeze(1)
1119
+
1120
+ # Get the conditioning image embedding
1121
+ conditioning_image_embeds = image_encoder(clip_conditioning_images).pooler_output.unsqueeze(1)
1122
+
1123
+ # Conditioning dropout to support classifier-free guidance during inference.
1124
+ random_p = torch.rand(bsz, device=accelerator.device, generator=generator)
1125
+ if args.conditioning_dropout_prob is not None:
1126
+ # Sample masks for the source images.
1127
+ image_mask = 1 - (random_p < args.conditioning_dropout_prob).to(source_images.dtype)
1128
+ # Final image conditioning.
1129
+ image_mask = image_mask.reshape(bsz, 1, 1, 1)
1130
+ source_latents = image_mask * source_latents
1131
+
1132
+ image_mask = image_mask.reshape(bsz, 1, 1)
1133
+ source_image_embeds = image_mask * source_image_embeds
1134
+
1135
+ # Get the target for loss depending on the prediction type
1136
+ if args.prediction_type is not None:
1137
+ # set prediction_type of scheduler if defined
1138
+ noise_scheduler.register_to_config(prediction_type=args.prediction_type)
1139
+
1140
+ if noise_scheduler.config.prediction_type == "epsilon":
1141
+ target = noise
1142
+ elif noise_scheduler.config.prediction_type == "v_prediction":
1143
+ target = noise_scheduler.get_velocity(latents, noise, timesteps)
1144
+ else:
1145
+ raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
1146
+
1147
+ # Referencenet pass
1148
+ referencenet_sample, referencenet_states = referencenet(
1149
+ sample=source_latents,
1150
+ timestep=ref_timesteps,
1151
+ encoder_hidden_states=source_image_embeds,
1152
+ return_dict=False,
1153
+ )
1154
+
1155
+ if False:
1156
+ # Sample masks for the referencenet states.
1157
+ referencenet_states_mask = 1 - (random_p < args.conditioning_dropout_prob).to(referencenet.dtype)
1158
+ # Final referencenet states conditioning.
1159
+ referencenet_states_mask = referencenet_states_mask.reshape(bsz, 1, 1)
1160
+ recursive_multiply(referencenet_states, referencenet_states_mask)
1161
+
1162
+ conditioning_referencenet_sample, conditioning_referencenet_states = conditioning_referencenet(
1163
+ sample=conditioning_latents,
1164
+ timestep=ref_timesteps,
1165
+ encoder_hidden_states=conditioning_image_embeds,
1166
+ return_dict=False,
1167
+ )
1168
+
1169
+ concatenated_embeds = torch.cat([source_image_embeds, conditioning_image_embeds], dim=1)
1170
+ concatenated_referencenet_states = cat_referencenet_states(
1171
+ referencenet_states,
1172
+ conditioning_referencenet_states,
1173
+ dim=1,
1174
+ )
1175
+
1176
+ # Predict the noise residual and compute loss
1177
+ model_pred = unet(
1178
+ sample=noisy_latents,
1179
+ timestep=timesteps,
1180
+ encoder_hidden_states=concatenated_embeds,
1181
+ referencenet_states=concatenated_referencenet_states,
1182
+ ).sample
1183
+ # Fix error by adding with 0 weight
1184
+ model_pred += 0 * (referencenet_sample + conditioning_referencenet_sample)
1185
+
1186
+ if args.snr_gamma is None:
1187
+ loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
1188
+ else:
1189
+ # Compute loss-weights as per Section 3.4 of https://arxiv.org/abs/2303.09556.
1190
+ # Since we predict the noise instead of x_0, the original formulation is slightly changed.
1191
+ # This is discussed in Section 4.2 of the same paper.
1192
+ snr = compute_snr(noise_scheduler, timesteps)
1193
+ if noise_scheduler.config.prediction_type == "v_prediction":
1194
+ # Velocity objective requires that we add one to SNR values before we divide by them.
1195
+ snr = snr + 1
1196
+ mse_loss_weights = (
1197
+ torch.stack([snr, args.snr_gamma * torch.ones_like(timesteps)], dim=1).min(dim=1)[0] / snr
1198
+ )
1199
+
1200
+ loss = F.mse_loss(model_pred.float(), target.float(), reduction="none")
1201
+ loss = loss.mean(dim=list(range(1, len(loss.shape)))) * mse_loss_weights
1202
+ loss = loss.mean()
1203
+
1204
+ # Gather the losses across all processes for logging (if we use distributed training).
1205
+ avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean()
1206
+ train_loss += avg_loss.item() / args.gradient_accumulation_steps
1207
+
1208
+ # Backpropagate
1209
+ accelerator.backward(loss)
1210
+ if accelerator.sync_gradients:
1211
+ accelerator.clip_grad_norm_(unet.parameters(), args.max_grad_norm)
1212
+ optimizer.step()
1213
+ lr_scheduler.step()
1214
+ optimizer.zero_grad(set_to_none=args.set_grads_to_none)
1215
+
1216
+ # Checks if the accelerator has performed an optimization step behind the scenes
1217
+ if accelerator.sync_gradients:
1218
+ if args.use_ema:
1219
+ ema_unet.step(unet.parameters())
1220
+ progress_bar.update(1)
1221
+ global_step += 1
1222
+ accelerator.log({"train_loss": train_loss}, step=global_step)
1223
+ train_loss = 0.0
1224
+
1225
+ if accelerator.is_main_process:
1226
+ if global_step % args.checkpointing_steps == 0:
1227
+ # _before_ saving state, check if this save would set us over the `checkpoints_total_limit`
1228
+ if args.checkpoints_total_limit is not None:
1229
+ checkpoints = os.listdir(args.output_dir)
1230
+ checkpoints = [d for d in checkpoints if d.startswith("checkpoint")]
1231
+ checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1]))
1232
+
1233
+ # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints
1234
+ if len(checkpoints) >= args.checkpoints_total_limit:
1235
+ num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1
1236
+ removing_checkpoints = checkpoints[0:num_to_remove]
1237
+
1238
+ logger.info(
1239
+ f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints"
1240
+ )
1241
+ logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}")
1242
+
1243
+ for removing_checkpoint in removing_checkpoints:
1244
+ removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint)
1245
+ shutil.rmtree(removing_checkpoint)
1246
+
1247
+ save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
1248
+ accelerator.save_state(save_path)
1249
+ logger.info(f"Saved state to {save_path}")
1250
+
1251
+ if args.validation_conditioning_image is not None and global_step % args.validation_steps == 0:
1252
+ if args.use_ema:
1253
+ # Store the UNet parameters temporarily and load the EMA parameters to perform inference.
1254
+ ema_unet.store(unet.parameters())
1255
+ ema_unet.copy_to(unet.parameters())
1256
+ log_validation(
1257
+ vae=vae,
1258
+ unet=unet,
1259
+ feature_extractor=feature_extractor,
1260
+ image_encoder=image_encoder,
1261
+ referencenet=referencenet,
1262
+ conditioning_referencenet=conditioning_referencenet,
1263
+ args=args,
1264
+ accelerator=accelerator,
1265
+ step=global_step,
1266
+ )
1267
+ if args.use_ema:
1268
+ # Switch back to the original UNet parameters.
1269
+ ema_unet.restore(unet.parameters())
1270
+
1271
+ logs = {"step_loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
1272
+ progress_bar.set_postfix(**logs)
1273
+
1274
+ if global_step >= args.max_train_steps:
1275
+ break
1276
+
1277
+ # Create the pipeline using the trained modules and save it.
1278
+ accelerator.wait_for_everyone()
1279
+ if accelerator.is_main_process:
1280
+ unet = accelerator.unwrap_model(unet)
1281
+ unet.save_pretrained(Path(args.output_dir, "unet"))
1282
+ referencenet = accelerator.unwrap_model(referencenet)
1283
+ referencenet.save_pretrained(Path(args.output_dir, "referencenet"))
1284
+ conditioning_referencenet = accelerator.unwrap_model(conditioning_referencenet)
1285
+ conditioning_referencenet.save_pretrained(Path(args.output_dir, "conditioning_referencenet"))
1286
+
1287
+ # Run a final round of inference.
1288
+ log_validation(
1289
+ vae=vae,
1290
+ unet=unet,
1291
+ feature_extractor=feature_extractor,
1292
+ image_encoder=image_encoder,
1293
+ referencenet=referencenet,
1294
+ conditioning_referencenet=conditioning_referencenet,
1295
+ args=args,
1296
+ accelerator=accelerator,
1297
+ step=global_step,
1298
+ )
1299
+
1300
+ accelerator.end_training()
1301
+
1302
+
1303
+ if __name__ == "__main__":
1304
+ main()
my_dataset/test/00482.png ADDED

Git LFS Details

  • SHA256: fb7d75c520ab23b26342b97b0f30c9c760100421d5ebd01ce8048839bcea5ac0
  • Pointer size: 131 Bytes
  • Size of remote file: 343 kB
my_dataset/test/14795.png ADDED

Git LFS Details

  • SHA256: 5afa53e296b53103768d62e9e738f3b1a9e393dd2d7e854dd60f927be98c8b13
  • Pointer size: 131 Bytes
  • Size of remote file: 444 kB
my_dataset/test/friends.jpg ADDED

Git LFS Details

  • SHA256: 3c05d018585e30d9ba7be5b6bc976b587085c9d95ce26c73220d7ed360d2df2c
  • Pointer size: 132 Bytes
  • Size of remote file: 1.57 MB
my_dataset/train/celeb/fake/18147_06771-01758_01758.png ADDED

Git LFS Details

  • SHA256: dc94e08055e5b7a96f925257e18dfc419c74c8ad733701826c7afa314af25c8a
  • Pointer size: 131 Bytes
  • Size of remote file: 426 kB
my_dataset/train/celeb/real/01758_01758.png ADDED

Git LFS Details

  • SHA256: b8194a334390689884bd9814054f10e266e27422c319c9121f652eb8b0ec06e8
  • Pointer size: 131 Bytes
  • Size of remote file: 408 kB
my_dataset/train/celeb/real/01758_09704.png ADDED

Git LFS Details

  • SHA256: 9ede51425b33e77e325dbd5be2857decfbbc7acf486ad1ff2a192d00446c3af2
  • Pointer size: 131 Bytes
  • Size of remote file: 371 kB
my_dataset/train/celeb/real/18147_06771.png ADDED

Git LFS Details

  • SHA256: c23a46834d6732d7c4610cdd47d5a53fa362c83008b2d187b0cf79cbe1383bd0
  • Pointer size: 131 Bytes
  • Size of remote file: 414 kB
my_dataset/train/train.jsonl ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ {"source_image": "celeb/real/18147_06771.png", "conditioning_image": "celeb/real/01758_01758.png", "ground_truth": "celeb/fake/18147_06771-01758_01758.png"}
2
+ {"source_image": "celeb/real/01758_09704.png", "conditioning_image": "celeb/fake/18147_06771-01758_01758.png", "ground_truth": "celeb/real/01758_01758.png"}
my_dataset/train_dataset_loading_script.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # TODO: Address all TODOs and remove all explanatory comments
15
+ """TODO: Add a description here."""
16
+
17
+
18
+ import os
19
+
20
+ import datasets
21
+ import pandas as pd
22
+
23
+
24
+ # TODO: Add BibTeX citation
25
+ # Find for instance the citation on arxiv or on the dataset repo/website
26
+ _CITATION = """\
27
+ @InProceedings{huggingface:dataset,
28
+ title = {A great new dataset},
29
+ author={huggingface, Inc.
30
+ },
31
+ year={2020}
32
+ }
33
+ """
34
+
35
+ # TODO: Add description of the dataset here
36
+ # You can copy an official description
37
+ _DESCRIPTION = """\
38
+ This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
39
+ """
40
+
41
+ # TODO: Add a link to an official homepage for the dataset here
42
+ _HOMEPAGE = ""
43
+
44
+ # TODO: Add the licence for the dataset here if you can find it
45
+ _LICENSE = ""
46
+
47
+ # TODO: Add link to the official dataset URLs here
48
+ _URLS = {
49
+ "metadata_path": "/path/to/face_anon_simple/my_dataset/train/train.jsonl",
50
+ "images_dir": "/path/to/face_anon_simple/my_dataset/train/",
51
+ }
52
+
53
+
54
+ # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
55
+ class NewDataset(datasets.GeneratorBasedBuilder):
56
+ """TODO: Short description of my dataset."""
57
+
58
+ VERSION = datasets.Version("1.1.0")
59
+
60
+ def _info(self):
61
+ # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
62
+ features = datasets.Features(
63
+ {
64
+ "source_image": datasets.Image(),
65
+ "conditioning_image": datasets.Image(),
66
+ "ground_truth": datasets.Image(),
67
+ "source_image_path": datasets.Value("string"),
68
+ "conditioning_image_path": datasets.Value("string"),
69
+ "ground_truth_path": datasets.Value("string"),
70
+ }
71
+ )
72
+
73
+ return datasets.DatasetInfo(
74
+ # This is the description that will appear on the datasets page.
75
+ description=_DESCRIPTION,
76
+ # This defines the different columns of the dataset and their types
77
+ features=features, # Here we define them above because they are different between the two configurations
78
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
79
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
80
+ # supervised_keys=("sentence", "label"),
81
+ # Homepage of the dataset for documentation
82
+ homepage=_HOMEPAGE,
83
+ # License for the dataset if available
84
+ license=_LICENSE,
85
+ # Citation for the dataset
86
+ citation=_CITATION,
87
+ )
88
+
89
+ def _split_generators(self, dl_manager):
90
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
91
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
92
+ metadata_path = _URLS["metadata_path"]
93
+ images_dir = _URLS["images_dir"]
94
+ return [
95
+ datasets.SplitGenerator(
96
+ name=datasets.Split.TRAIN,
97
+ # These kwargs will be passed to _generate_examples
98
+ gen_kwargs={
99
+ "metadata_path": metadata_path,
100
+ "images_dir": images_dir,
101
+ },
102
+ ),
103
+ ]
104
+
105
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
106
+ def _generate_examples(self, metadata_path, images_dir):
107
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
108
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
109
+ metadata = pd.read_json(metadata_path, lines=True)
110
+
111
+ for _, row in metadata.iterrows():
112
+ source_image_path = row["source_image"]
113
+ source_image_path = os.path.join(images_dir, source_image_path)
114
+ source_image = open(source_image_path, "rb").read()
115
+
116
+ conditioning_image_path = row["conditioning_image"]
117
+ conditioning_image_path = os.path.join(images_dir, conditioning_image_path)
118
+ conditioning_image = open(conditioning_image_path, "rb").read()
119
+
120
+ ground_truth_path = row["ground_truth"]
121
+ ground_truth_path = os.path.join(images_dir, ground_truth_path)
122
+ ground_truth = open(ground_truth_path, "rb").read()
123
+
124
+ yield (
125
+ "-".join([source_image_path, conditioning_image_path]),
126
+ {
127
+ "source_image": {
128
+ "path": source_image_path,
129
+ "bytes": source_image,
130
+ },
131
+ "conditioning_image": {
132
+ "path": conditioning_image_path,
133
+ "bytes": conditioning_image,
134
+ },
135
+ "ground_truth": {
136
+ "path": ground_truth_path,
137
+ "bytes": ground_truth,
138
+ },
139
+ "source_image_path": source_image_path,
140
+ "conditioning_image_path": conditioning_image_path,
141
+ "ground_truth_path": ground_truth_path,
142
+ },
143
+ )
src/diffusers/__init__.py ADDED
@@ -0,0 +1,758 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __version__ = "0.25.1"
2
+
3
+ from typing import TYPE_CHECKING
4
+
5
+ from .utils import (
6
+ DIFFUSERS_SLOW_IMPORT,
7
+ OptionalDependencyNotAvailable,
8
+ _LazyModule,
9
+ is_flax_available,
10
+ is_k_diffusion_available,
11
+ is_librosa_available,
12
+ is_note_seq_available,
13
+ is_onnx_available,
14
+ is_scipy_available,
15
+ is_torch_available,
16
+ is_torchsde_available,
17
+ is_transformers_available,
18
+ )
19
+
20
+
21
+ # Lazy Import based on
22
+ # https://github.com/huggingface/transformers/blob/main/src/transformers/__init__.py
23
+
24
+ # When adding a new object to this init, please add it to `_import_structure`. The `_import_structure` is a dictionary submodule to list of object names,
25
+ # and is used to defer the actual importing for when the objects are requested.
26
+ # This way `import diffusers` provides the names in the namespace without actually importing anything (and especially none of the backends).
27
+
28
+ _import_structure = {
29
+ "configuration_utils": ["ConfigMixin"],
30
+ "models": [],
31
+ "pipelines": [],
32
+ "schedulers": [],
33
+ "utils": [
34
+ "OptionalDependencyNotAvailable",
35
+ "is_flax_available",
36
+ "is_inflect_available",
37
+ "is_invisible_watermark_available",
38
+ "is_k_diffusion_available",
39
+ "is_k_diffusion_version",
40
+ "is_librosa_available",
41
+ "is_note_seq_available",
42
+ "is_onnx_available",
43
+ "is_scipy_available",
44
+ "is_torch_available",
45
+ "is_torchsde_available",
46
+ "is_transformers_available",
47
+ "is_transformers_version",
48
+ "is_unidecode_available",
49
+ "logging",
50
+ ],
51
+ }
52
+
53
+ try:
54
+ if not is_onnx_available():
55
+ raise OptionalDependencyNotAvailable()
56
+ except OptionalDependencyNotAvailable:
57
+ from .utils import dummy_onnx_objects # noqa F403
58
+
59
+ _import_structure["utils.dummy_onnx_objects"] = [
60
+ name for name in dir(dummy_onnx_objects) if not name.startswith("_")
61
+ ]
62
+
63
+ else:
64
+ _import_structure["pipelines"].extend(["OnnxRuntimeModel"])
65
+
66
+ try:
67
+ if not is_torch_available():
68
+ raise OptionalDependencyNotAvailable()
69
+ except OptionalDependencyNotAvailable:
70
+ from .utils import dummy_pt_objects # noqa F403
71
+
72
+ _import_structure["utils.dummy_pt_objects"] = [name for name in dir(dummy_pt_objects) if not name.startswith("_")]
73
+
74
+ else:
75
+ _import_structure["models"].extend(
76
+ [
77
+ "AsymmetricAutoencoderKL",
78
+ "AutoencoderKL",
79
+ "AutoencoderKLTemporalDecoder",
80
+ "AutoencoderTiny",
81
+ "ConsistencyDecoderVAE",
82
+ "ControlNetModel",
83
+ "Kandinsky3UNet",
84
+ "ModelMixin",
85
+ "MotionAdapter",
86
+ "MultiAdapter",
87
+ "PriorTransformer",
88
+ "T2IAdapter",
89
+ "T5FilmDecoder",
90
+ "Transformer2DModel",
91
+ "UNet1DModel",
92
+ "UNet2DConditionModel",
93
+ "UNet2DModel",
94
+ "UNet3DConditionModel",
95
+ "UNetMotionModel",
96
+ "UNetSpatioTemporalConditionModel",
97
+ "UVit2DModel",
98
+ "VQModel",
99
+ ]
100
+ )
101
+
102
+ _import_structure["optimization"] = [
103
+ "get_constant_schedule",
104
+ "get_constant_schedule_with_warmup",
105
+ "get_cosine_schedule_with_warmup",
106
+ "get_cosine_with_hard_restarts_schedule_with_warmup",
107
+ "get_linear_schedule_with_warmup",
108
+ "get_polynomial_decay_schedule_with_warmup",
109
+ "get_scheduler",
110
+ ]
111
+ _import_structure["pipelines"].extend(
112
+ [
113
+ "AudioPipelineOutput",
114
+ "AutoPipelineForImage2Image",
115
+ "AutoPipelineForInpainting",
116
+ "AutoPipelineForText2Image",
117
+ "ConsistencyModelPipeline",
118
+ "DanceDiffusionPipeline",
119
+ "DDIMPipeline",
120
+ "DDPMPipeline",
121
+ "DiffusionPipeline",
122
+ "DiTPipeline",
123
+ "ImagePipelineOutput",
124
+ "KarrasVePipeline",
125
+ "LDMPipeline",
126
+ "LDMSuperResolutionPipeline",
127
+ "PNDMPipeline",
128
+ "RePaintPipeline",
129
+ "ScoreSdeVePipeline",
130
+ ]
131
+ )
132
+ _import_structure["schedulers"].extend(
133
+ [
134
+ "AmusedScheduler",
135
+ "CMStochasticIterativeScheduler",
136
+ "DDIMInverseScheduler",
137
+ "DDIMParallelScheduler",
138
+ "DDIMScheduler",
139
+ "DDPMParallelScheduler",
140
+ "DDPMScheduler",
141
+ "DDPMWuerstchenScheduler",
142
+ "DEISMultistepScheduler",
143
+ "DPMSolverMultistepInverseScheduler",
144
+ "DPMSolverMultistepScheduler",
145
+ "DPMSolverSinglestepScheduler",
146
+ "EulerAncestralDiscreteScheduler",
147
+ "EulerDiscreteScheduler",
148
+ "HeunDiscreteScheduler",
149
+ "IPNDMScheduler",
150
+ "KarrasVeScheduler",
151
+ "KDPM2AncestralDiscreteScheduler",
152
+ "KDPM2DiscreteScheduler",
153
+ "LCMScheduler",
154
+ "PNDMScheduler",
155
+ "RePaintScheduler",
156
+ "SchedulerMixin",
157
+ "ScoreSdeVeScheduler",
158
+ "UnCLIPScheduler",
159
+ "UniPCMultistepScheduler",
160
+ "VQDiffusionScheduler",
161
+ ]
162
+ )
163
+ _import_structure["training_utils"] = ["EMAModel"]
164
+
165
+ try:
166
+ if not (is_torch_available() and is_scipy_available()):
167
+ raise OptionalDependencyNotAvailable()
168
+ except OptionalDependencyNotAvailable:
169
+ from .utils import dummy_torch_and_scipy_objects # noqa F403
170
+
171
+ _import_structure["utils.dummy_torch_and_scipy_objects"] = [
172
+ name for name in dir(dummy_torch_and_scipy_objects) if not name.startswith("_")
173
+ ]
174
+
175
+ else:
176
+ _import_structure["schedulers"].extend(["LMSDiscreteScheduler"])
177
+
178
+ try:
179
+ if not (is_torch_available() and is_torchsde_available()):
180
+ raise OptionalDependencyNotAvailable()
181
+ except OptionalDependencyNotAvailable:
182
+ from .utils import dummy_torch_and_torchsde_objects # noqa F403
183
+
184
+ _import_structure["utils.dummy_torch_and_torchsde_objects"] = [
185
+ name for name in dir(dummy_torch_and_torchsde_objects) if not name.startswith("_")
186
+ ]
187
+
188
+ else:
189
+ _import_structure["schedulers"].extend(["DPMSolverSDEScheduler"])
190
+
191
+ try:
192
+ if not (is_torch_available() and is_transformers_available()):
193
+ raise OptionalDependencyNotAvailable()
194
+ except OptionalDependencyNotAvailable:
195
+ from .utils import dummy_torch_and_transformers_objects # noqa F403
196
+
197
+ _import_structure["utils.dummy_torch_and_transformers_objects"] = [
198
+ name for name in dir(dummy_torch_and_transformers_objects) if not name.startswith("_")
199
+ ]
200
+
201
+ else:
202
+ _import_structure["pipelines"].extend(
203
+ [
204
+ "AltDiffusionImg2ImgPipeline",
205
+ "AltDiffusionPipeline",
206
+ "AmusedImg2ImgPipeline",
207
+ "AmusedInpaintPipeline",
208
+ "AmusedPipeline",
209
+ "AnimateDiffPipeline",
210
+ "AudioLDM2Pipeline",
211
+ "AudioLDM2ProjectionModel",
212
+ "AudioLDM2UNet2DConditionModel",
213
+ "AudioLDMPipeline",
214
+ "BlipDiffusionControlNetPipeline",
215
+ "BlipDiffusionPipeline",
216
+ "CLIPImageProjection",
217
+ "CycleDiffusionPipeline",
218
+ "IFImg2ImgPipeline",
219
+ "IFImg2ImgSuperResolutionPipeline",
220
+ "IFInpaintingPipeline",
221
+ "IFInpaintingSuperResolutionPipeline",
222
+ "IFPipeline",
223
+ "IFSuperResolutionPipeline",
224
+ "ImageTextPipelineOutput",
225
+ "Kandinsky3Img2ImgPipeline",
226
+ "Kandinsky3Pipeline",
227
+ "KandinskyCombinedPipeline",
228
+ "KandinskyImg2ImgCombinedPipeline",
229
+ "KandinskyImg2ImgPipeline",
230
+ "KandinskyInpaintCombinedPipeline",
231
+ "KandinskyInpaintPipeline",
232
+ "KandinskyPipeline",
233
+ "KandinskyPriorPipeline",
234
+ "KandinskyV22CombinedPipeline",
235
+ "KandinskyV22ControlnetImg2ImgPipeline",
236
+ "KandinskyV22ControlnetPipeline",
237
+ "KandinskyV22Img2ImgCombinedPipeline",
238
+ "KandinskyV22Img2ImgPipeline",
239
+ "KandinskyV22InpaintCombinedPipeline",
240
+ "KandinskyV22InpaintPipeline",
241
+ "KandinskyV22Pipeline",
242
+ "KandinskyV22PriorEmb2EmbPipeline",
243
+ "KandinskyV22PriorPipeline",
244
+ "LatentConsistencyModelImg2ImgPipeline",
245
+ "LatentConsistencyModelPipeline",
246
+ "LDMTextToImagePipeline",
247
+ "MusicLDMPipeline",
248
+ "PaintByExamplePipeline",
249
+ "PixArtAlphaPipeline",
250
+ "SemanticStableDiffusionPipeline",
251
+ "ShapEImg2ImgPipeline",
252
+ "ShapEPipeline",
253
+ "StableDiffusionAdapterPipeline",
254
+ "StableDiffusionAttendAndExcitePipeline",
255
+ "StableDiffusionControlNetImg2ImgPipeline",
256
+ "StableDiffusionControlNetInpaintPipeline",
257
+ "StableDiffusionControlNetPipeline",
258
+ "StableDiffusionDepth2ImgPipeline",
259
+ "StableDiffusionDiffEditPipeline",
260
+ "StableDiffusionGLIGENPipeline",
261
+ "StableDiffusionGLIGENTextImagePipeline",
262
+ "StableDiffusionImageVariationPipeline",
263
+ "StableDiffusionImg2ImgPipeline",
264
+ "StableDiffusionInpaintPipeline",
265
+ "StableDiffusionInpaintPipelineLegacy",
266
+ "StableDiffusionInstructPix2PixPipeline",
267
+ "StableDiffusionLatentUpscalePipeline",
268
+ "StableDiffusionLDM3DPipeline",
269
+ "StableDiffusionModelEditingPipeline",
270
+ "StableDiffusionPanoramaPipeline",
271
+ "StableDiffusionParadigmsPipeline",
272
+ "StableDiffusionPipeline",
273
+ "StableDiffusionPipelineSafe",
274
+ "StableDiffusionPix2PixZeroPipeline",
275
+ "StableDiffusionSAGPipeline",
276
+ "StableDiffusionUpscalePipeline",
277
+ "StableDiffusionXLAdapterPipeline",
278
+ "StableDiffusionXLControlNetImg2ImgPipeline",
279
+ "StableDiffusionXLControlNetInpaintPipeline",
280
+ "StableDiffusionXLControlNetPipeline",
281
+ "StableDiffusionXLImg2ImgPipeline",
282
+ "StableDiffusionXLInpaintPipeline",
283
+ "StableDiffusionXLInstructPix2PixPipeline",
284
+ "StableDiffusionXLPipeline",
285
+ "StableUnCLIPImg2ImgPipeline",
286
+ "StableUnCLIPPipeline",
287
+ "StableVideoDiffusionPipeline",
288
+ "TextToVideoSDPipeline",
289
+ "TextToVideoZeroPipeline",
290
+ "TextToVideoZeroSDXLPipeline",
291
+ "UnCLIPImageVariationPipeline",
292
+ "UnCLIPPipeline",
293
+ "UniDiffuserModel",
294
+ "UniDiffuserPipeline",
295
+ "UniDiffuserTextDecoder",
296
+ "VersatileDiffusionDualGuidedPipeline",
297
+ "VersatileDiffusionImageVariationPipeline",
298
+ "VersatileDiffusionPipeline",
299
+ "VersatileDiffusionTextToImagePipeline",
300
+ "VideoToVideoSDPipeline",
301
+ "VQDiffusionPipeline",
302
+ "WuerstchenCombinedPipeline",
303
+ "WuerstchenDecoderPipeline",
304
+ "WuerstchenPriorPipeline",
305
+ ]
306
+ )
307
+
308
+ try:
309
+ if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
310
+ raise OptionalDependencyNotAvailable()
311
+ except OptionalDependencyNotAvailable:
312
+ from .utils import dummy_torch_and_transformers_and_k_diffusion_objects # noqa F403
313
+
314
+ _import_structure["utils.dummy_torch_and_transformers_and_k_diffusion_objects"] = [
315
+ name for name in dir(dummy_torch_and_transformers_and_k_diffusion_objects) if not name.startswith("_")
316
+ ]
317
+
318
+ else:
319
+ _import_structure["pipelines"].extend(["StableDiffusionKDiffusionPipeline"])
320
+
321
+ try:
322
+ if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
323
+ raise OptionalDependencyNotAvailable()
324
+ except OptionalDependencyNotAvailable:
325
+ from .utils import dummy_torch_and_transformers_and_onnx_objects # noqa F403
326
+
327
+ _import_structure["utils.dummy_torch_and_transformers_and_onnx_objects"] = [
328
+ name for name in dir(dummy_torch_and_transformers_and_onnx_objects) if not name.startswith("_")
329
+ ]
330
+
331
+ else:
332
+ _import_structure["pipelines"].extend(
333
+ [
334
+ "OnnxStableDiffusionImg2ImgPipeline",
335
+ "OnnxStableDiffusionInpaintPipeline",
336
+ "OnnxStableDiffusionInpaintPipelineLegacy",
337
+ "OnnxStableDiffusionPipeline",
338
+ "OnnxStableDiffusionUpscalePipeline",
339
+ "StableDiffusionOnnxPipeline",
340
+ ]
341
+ )
342
+
343
+ try:
344
+ if not (is_torch_available() and is_librosa_available()):
345
+ raise OptionalDependencyNotAvailable()
346
+ except OptionalDependencyNotAvailable:
347
+ from .utils import dummy_torch_and_librosa_objects # noqa F403
348
+
349
+ _import_structure["utils.dummy_torch_and_librosa_objects"] = [
350
+ name for name in dir(dummy_torch_and_librosa_objects) if not name.startswith("_")
351
+ ]
352
+
353
+ else:
354
+ _import_structure["pipelines"].extend(["AudioDiffusionPipeline", "Mel"])
355
+
356
+ try:
357
+ if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
358
+ raise OptionalDependencyNotAvailable()
359
+ except OptionalDependencyNotAvailable:
360
+ from .utils import dummy_transformers_and_torch_and_note_seq_objects # noqa F403
361
+
362
+ _import_structure["utils.dummy_transformers_and_torch_and_note_seq_objects"] = [
363
+ name for name in dir(dummy_transformers_and_torch_and_note_seq_objects) if not name.startswith("_")
364
+ ]
365
+
366
+
367
+ else:
368
+ _import_structure["pipelines"].extend(["SpectrogramDiffusionPipeline"])
369
+
370
+ try:
371
+ if not is_flax_available():
372
+ raise OptionalDependencyNotAvailable()
373
+ except OptionalDependencyNotAvailable:
374
+ from .utils import dummy_flax_objects # noqa F403
375
+
376
+ _import_structure["utils.dummy_flax_objects"] = [
377
+ name for name in dir(dummy_flax_objects) if not name.startswith("_")
378
+ ]
379
+
380
+
381
+ else:
382
+ _import_structure["models.controlnet_flax"] = ["FlaxControlNetModel"]
383
+ _import_structure["models.modeling_flax_utils"] = ["FlaxModelMixin"]
384
+ _import_structure["models.unet_2d_condition_flax"] = ["FlaxUNet2DConditionModel"]
385
+ _import_structure["models.vae_flax"] = ["FlaxAutoencoderKL"]
386
+ _import_structure["pipelines"].extend(["FlaxDiffusionPipeline"])
387
+ _import_structure["schedulers"].extend(
388
+ [
389
+ "FlaxDDIMScheduler",
390
+ "FlaxDDPMScheduler",
391
+ "FlaxDPMSolverMultistepScheduler",
392
+ "FlaxEulerDiscreteScheduler",
393
+ "FlaxKarrasVeScheduler",
394
+ "FlaxLMSDiscreteScheduler",
395
+ "FlaxPNDMScheduler",
396
+ "FlaxSchedulerMixin",
397
+ "FlaxScoreSdeVeScheduler",
398
+ ]
399
+ )
400
+
401
+
402
+ try:
403
+ if not (is_flax_available() and is_transformers_available()):
404
+ raise OptionalDependencyNotAvailable()
405
+ except OptionalDependencyNotAvailable:
406
+ from .utils import dummy_flax_and_transformers_objects # noqa F403
407
+
408
+ _import_structure["utils.dummy_flax_and_transformers_objects"] = [
409
+ name for name in dir(dummy_flax_and_transformers_objects) if not name.startswith("_")
410
+ ]
411
+
412
+
413
+ else:
414
+ _import_structure["pipelines"].extend(
415
+ [
416
+ "FlaxStableDiffusionControlNetPipeline",
417
+ "FlaxStableDiffusionImg2ImgPipeline",
418
+ "FlaxStableDiffusionInpaintPipeline",
419
+ "FlaxStableDiffusionPipeline",
420
+ "FlaxStableDiffusionXLPipeline",
421
+ ]
422
+ )
423
+
424
+ try:
425
+ if not (is_note_seq_available()):
426
+ raise OptionalDependencyNotAvailable()
427
+ except OptionalDependencyNotAvailable:
428
+ from .utils import dummy_note_seq_objects # noqa F403
429
+
430
+ _import_structure["utils.dummy_note_seq_objects"] = [
431
+ name for name in dir(dummy_note_seq_objects) if not name.startswith("_")
432
+ ]
433
+
434
+
435
+ else:
436
+ _import_structure["pipelines"].extend(["MidiProcessor"])
437
+
438
+ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
439
+ from .configuration_utils import ConfigMixin
440
+
441
+ try:
442
+ if not is_onnx_available():
443
+ raise OptionalDependencyNotAvailable()
444
+ except OptionalDependencyNotAvailable:
445
+ from .utils.dummy_onnx_objects import * # noqa F403
446
+ else:
447
+ from .pipelines import OnnxRuntimeModel
448
+
449
+ try:
450
+ if not is_torch_available():
451
+ raise OptionalDependencyNotAvailable()
452
+ except OptionalDependencyNotAvailable:
453
+ from .utils.dummy_pt_objects import * # noqa F403
454
+ else:
455
+ from .models import (
456
+ AsymmetricAutoencoderKL,
457
+ AutoencoderKL,
458
+ AutoencoderKLTemporalDecoder,
459
+ AutoencoderTiny,
460
+ ConsistencyDecoderVAE,
461
+ ControlNetModel,
462
+ Kandinsky3UNet,
463
+ ModelMixin,
464
+ MotionAdapter,
465
+ MultiAdapter,
466
+ PriorTransformer,
467
+ T2IAdapter,
468
+ T5FilmDecoder,
469
+ Transformer2DModel,
470
+ UNet1DModel,
471
+ UNet2DConditionModel,
472
+ UNet2DModel,
473
+ UNet3DConditionModel,
474
+ UNetMotionModel,
475
+ UNetSpatioTemporalConditionModel,
476
+ UVit2DModel,
477
+ VQModel,
478
+ )
479
+ from .optimization import (
480
+ get_constant_schedule,
481
+ get_constant_schedule_with_warmup,
482
+ get_cosine_schedule_with_warmup,
483
+ get_cosine_with_hard_restarts_schedule_with_warmup,
484
+ get_linear_schedule_with_warmup,
485
+ get_polynomial_decay_schedule_with_warmup,
486
+ get_scheduler,
487
+ )
488
+ from .pipelines import (
489
+ AudioPipelineOutput,
490
+ AutoPipelineForImage2Image,
491
+ AutoPipelineForInpainting,
492
+ AutoPipelineForText2Image,
493
+ BlipDiffusionControlNetPipeline,
494
+ BlipDiffusionPipeline,
495
+ CLIPImageProjection,
496
+ ConsistencyModelPipeline,
497
+ DanceDiffusionPipeline,
498
+ DDIMPipeline,
499
+ DDPMPipeline,
500
+ DiffusionPipeline,
501
+ DiTPipeline,
502
+ ImagePipelineOutput,
503
+ KarrasVePipeline,
504
+ LDMPipeline,
505
+ LDMSuperResolutionPipeline,
506
+ PNDMPipeline,
507
+ RePaintPipeline,
508
+ ScoreSdeVePipeline,
509
+ )
510
+ from .schedulers import (
511
+ AmusedScheduler,
512
+ CMStochasticIterativeScheduler,
513
+ DDIMInverseScheduler,
514
+ DDIMParallelScheduler,
515
+ DDIMScheduler,
516
+ DDPMParallelScheduler,
517
+ DDPMScheduler,
518
+ DDPMWuerstchenScheduler,
519
+ DEISMultistepScheduler,
520
+ DPMSolverMultistepInverseScheduler,
521
+ DPMSolverMultistepScheduler,
522
+ DPMSolverSinglestepScheduler,
523
+ EulerAncestralDiscreteScheduler,
524
+ EulerDiscreteScheduler,
525
+ HeunDiscreteScheduler,
526
+ IPNDMScheduler,
527
+ KarrasVeScheduler,
528
+ KDPM2AncestralDiscreteScheduler,
529
+ KDPM2DiscreteScheduler,
530
+ LCMScheduler,
531
+ PNDMScheduler,
532
+ RePaintScheduler,
533
+ SchedulerMixin,
534
+ ScoreSdeVeScheduler,
535
+ UnCLIPScheduler,
536
+ UniPCMultistepScheduler,
537
+ VQDiffusionScheduler,
538
+ )
539
+ from .training_utils import EMAModel
540
+
541
+ try:
542
+ if not (is_torch_available() and is_scipy_available()):
543
+ raise OptionalDependencyNotAvailable()
544
+ except OptionalDependencyNotAvailable:
545
+ from .utils.dummy_torch_and_scipy_objects import * # noqa F403
546
+ else:
547
+ from .schedulers import LMSDiscreteScheduler
548
+
549
+ try:
550
+ if not (is_torch_available() and is_torchsde_available()):
551
+ raise OptionalDependencyNotAvailable()
552
+ except OptionalDependencyNotAvailable:
553
+ from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
554
+ else:
555
+ from .schedulers import DPMSolverSDEScheduler
556
+
557
+ try:
558
+ if not (is_torch_available() and is_transformers_available()):
559
+ raise OptionalDependencyNotAvailable()
560
+ except OptionalDependencyNotAvailable:
561
+ from .utils.dummy_torch_and_transformers_objects import * # noqa F403
562
+ else:
563
+ from .pipelines import (
564
+ AltDiffusionImg2ImgPipeline,
565
+ AltDiffusionPipeline,
566
+ AmusedImg2ImgPipeline,
567
+ AmusedInpaintPipeline,
568
+ AmusedPipeline,
569
+ AnimateDiffPipeline,
570
+ AudioLDM2Pipeline,
571
+ AudioLDM2ProjectionModel,
572
+ AudioLDM2UNet2DConditionModel,
573
+ AudioLDMPipeline,
574
+ CLIPImageProjection,
575
+ CycleDiffusionPipeline,
576
+ IFImg2ImgPipeline,
577
+ IFImg2ImgSuperResolutionPipeline,
578
+ IFInpaintingPipeline,
579
+ IFInpaintingSuperResolutionPipeline,
580
+ IFPipeline,
581
+ IFSuperResolutionPipeline,
582
+ ImageTextPipelineOutput,
583
+ Kandinsky3Img2ImgPipeline,
584
+ Kandinsky3Pipeline,
585
+ KandinskyCombinedPipeline,
586
+ KandinskyImg2ImgCombinedPipeline,
587
+ KandinskyImg2ImgPipeline,
588
+ KandinskyInpaintCombinedPipeline,
589
+ KandinskyInpaintPipeline,
590
+ KandinskyPipeline,
591
+ KandinskyPriorPipeline,
592
+ KandinskyV22CombinedPipeline,
593
+ KandinskyV22ControlnetImg2ImgPipeline,
594
+ KandinskyV22ControlnetPipeline,
595
+ KandinskyV22Img2ImgCombinedPipeline,
596
+ KandinskyV22Img2ImgPipeline,
597
+ KandinskyV22InpaintCombinedPipeline,
598
+ KandinskyV22InpaintPipeline,
599
+ KandinskyV22Pipeline,
600
+ KandinskyV22PriorEmb2EmbPipeline,
601
+ KandinskyV22PriorPipeline,
602
+ LatentConsistencyModelImg2ImgPipeline,
603
+ LatentConsistencyModelPipeline,
604
+ LDMTextToImagePipeline,
605
+ MusicLDMPipeline,
606
+ PaintByExamplePipeline,
607
+ PixArtAlphaPipeline,
608
+ SemanticStableDiffusionPipeline,
609
+ ShapEImg2ImgPipeline,
610
+ ShapEPipeline,
611
+ StableDiffusionAdapterPipeline,
612
+ StableDiffusionAttendAndExcitePipeline,
613
+ StableDiffusionControlNetImg2ImgPipeline,
614
+ StableDiffusionControlNetInpaintPipeline,
615
+ StableDiffusionControlNetPipeline,
616
+ StableDiffusionDepth2ImgPipeline,
617
+ StableDiffusionDiffEditPipeline,
618
+ StableDiffusionGLIGENPipeline,
619
+ StableDiffusionGLIGENTextImagePipeline,
620
+ StableDiffusionImageVariationPipeline,
621
+ StableDiffusionImg2ImgPipeline,
622
+ StableDiffusionInpaintPipeline,
623
+ StableDiffusionInpaintPipelineLegacy,
624
+ StableDiffusionInstructPix2PixPipeline,
625
+ StableDiffusionLatentUpscalePipeline,
626
+ StableDiffusionLDM3DPipeline,
627
+ StableDiffusionModelEditingPipeline,
628
+ StableDiffusionPanoramaPipeline,
629
+ StableDiffusionParadigmsPipeline,
630
+ StableDiffusionPipeline,
631
+ StableDiffusionPipelineSafe,
632
+ StableDiffusionPix2PixZeroPipeline,
633
+ StableDiffusionSAGPipeline,
634
+ StableDiffusionUpscalePipeline,
635
+ StableDiffusionXLAdapterPipeline,
636
+ StableDiffusionXLControlNetImg2ImgPipeline,
637
+ StableDiffusionXLControlNetInpaintPipeline,
638
+ StableDiffusionXLControlNetPipeline,
639
+ StableDiffusionXLImg2ImgPipeline,
640
+ StableDiffusionXLInpaintPipeline,
641
+ StableDiffusionXLInstructPix2PixPipeline,
642
+ StableDiffusionXLPipeline,
643
+ StableUnCLIPImg2ImgPipeline,
644
+ StableUnCLIPPipeline,
645
+ StableVideoDiffusionPipeline,
646
+ TextToVideoSDPipeline,
647
+ TextToVideoZeroPipeline,
648
+ TextToVideoZeroSDXLPipeline,
649
+ UnCLIPImageVariationPipeline,
650
+ UnCLIPPipeline,
651
+ UniDiffuserModel,
652
+ UniDiffuserPipeline,
653
+ UniDiffuserTextDecoder,
654
+ VersatileDiffusionDualGuidedPipeline,
655
+ VersatileDiffusionImageVariationPipeline,
656
+ VersatileDiffusionPipeline,
657
+ VersatileDiffusionTextToImagePipeline,
658
+ VideoToVideoSDPipeline,
659
+ VQDiffusionPipeline,
660
+ WuerstchenCombinedPipeline,
661
+ WuerstchenDecoderPipeline,
662
+ WuerstchenPriorPipeline,
663
+ )
664
+
665
+ try:
666
+ if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
667
+ raise OptionalDependencyNotAvailable()
668
+ except OptionalDependencyNotAvailable:
669
+ from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
670
+ else:
671
+ from .pipelines import StableDiffusionKDiffusionPipeline
672
+
673
+ try:
674
+ if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
675
+ raise OptionalDependencyNotAvailable()
676
+ except OptionalDependencyNotAvailable:
677
+ from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
678
+ else:
679
+ from .pipelines import (
680
+ OnnxStableDiffusionImg2ImgPipeline,
681
+ OnnxStableDiffusionInpaintPipeline,
682
+ OnnxStableDiffusionInpaintPipelineLegacy,
683
+ OnnxStableDiffusionPipeline,
684
+ OnnxStableDiffusionUpscalePipeline,
685
+ StableDiffusionOnnxPipeline,
686
+ )
687
+
688
+ try:
689
+ if not (is_torch_available() and is_librosa_available()):
690
+ raise OptionalDependencyNotAvailable()
691
+ except OptionalDependencyNotAvailable:
692
+ from .utils.dummy_torch_and_librosa_objects import * # noqa F403
693
+ else:
694
+ from .pipelines import AudioDiffusionPipeline, Mel
695
+
696
+ try:
697
+ if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
698
+ raise OptionalDependencyNotAvailable()
699
+ except OptionalDependencyNotAvailable:
700
+ from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
701
+ else:
702
+ from .pipelines import SpectrogramDiffusionPipeline
703
+
704
+ try:
705
+ if not is_flax_available():
706
+ raise OptionalDependencyNotAvailable()
707
+ except OptionalDependencyNotAvailable:
708
+ from .utils.dummy_flax_objects import * # noqa F403
709
+ else:
710
+ from .models.controlnet_flax import FlaxControlNetModel
711
+ from .models.modeling_flax_utils import FlaxModelMixin
712
+ from .models.unet_2d_condition_flax import FlaxUNet2DConditionModel
713
+ from .models.vae_flax import FlaxAutoencoderKL
714
+ from .pipelines import FlaxDiffusionPipeline
715
+ from .schedulers import (
716
+ FlaxDDIMScheduler,
717
+ FlaxDDPMScheduler,
718
+ FlaxDPMSolverMultistepScheduler,
719
+ FlaxEulerDiscreteScheduler,
720
+ FlaxKarrasVeScheduler,
721
+ FlaxLMSDiscreteScheduler,
722
+ FlaxPNDMScheduler,
723
+ FlaxSchedulerMixin,
724
+ FlaxScoreSdeVeScheduler,
725
+ )
726
+
727
+ try:
728
+ if not (is_flax_available() and is_transformers_available()):
729
+ raise OptionalDependencyNotAvailable()
730
+ except OptionalDependencyNotAvailable:
731
+ from .utils.dummy_flax_and_transformers_objects import * # noqa F403
732
+ else:
733
+ from .pipelines import (
734
+ FlaxStableDiffusionControlNetPipeline,
735
+ FlaxStableDiffusionImg2ImgPipeline,
736
+ FlaxStableDiffusionInpaintPipeline,
737
+ FlaxStableDiffusionPipeline,
738
+ FlaxStableDiffusionXLPipeline,
739
+ )
740
+
741
+ try:
742
+ if not (is_note_seq_available()):
743
+ raise OptionalDependencyNotAvailable()
744
+ except OptionalDependencyNotAvailable:
745
+ from .utils.dummy_note_seq_objects import * # noqa F403
746
+ else:
747
+ from .pipelines import MidiProcessor
748
+
749
+ else:
750
+ import sys
751
+
752
+ sys.modules[__name__] = _LazyModule(
753
+ __name__,
754
+ globals()["__file__"],
755
+ _import_structure,
756
+ module_spec=__spec__,
757
+ extra_objects={"__version__": __version__},
758
+ )
src/diffusers/commands/__init__.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from abc import ABC, abstractmethod
16
+ from argparse import ArgumentParser
17
+
18
+
19
+ class BaseDiffusersCLICommand(ABC):
20
+ @staticmethod
21
+ @abstractmethod
22
+ def register_subcommand(parser: ArgumentParser):
23
+ raise NotImplementedError()
24
+
25
+ @abstractmethod
26
+ def run(self):
27
+ raise NotImplementedError()
src/diffusers/commands/diffusers_cli.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from argparse import ArgumentParser
17
+
18
+ from .env import EnvironmentCommand
19
+ from .fp16_safetensors import FP16SafetensorsCommand
20
+
21
+
22
+ def main():
23
+ parser = ArgumentParser("Diffusers CLI tool", usage="diffusers-cli <command> [<args>]")
24
+ commands_parser = parser.add_subparsers(help="diffusers-cli command helpers")
25
+
26
+ # Register commands
27
+ EnvironmentCommand.register_subcommand(commands_parser)
28
+ FP16SafetensorsCommand.register_subcommand(commands_parser)
29
+
30
+ # Let's go
31
+ args = parser.parse_args()
32
+
33
+ if not hasattr(args, "func"):
34
+ parser.print_help()
35
+ exit(1)
36
+
37
+ # Run
38
+ service = args.func(args)
39
+ service.run()
40
+
41
+
42
+ if __name__ == "__main__":
43
+ main()
src/diffusers/commands/env.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import platform
16
+ from argparse import ArgumentParser
17
+
18
+ import huggingface_hub
19
+
20
+ from .. import __version__ as version
21
+ from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
22
+ from . import BaseDiffusersCLICommand
23
+
24
+
25
+ def info_command_factory(_):
26
+ return EnvironmentCommand()
27
+
28
+
29
+ class EnvironmentCommand(BaseDiffusersCLICommand):
30
+ @staticmethod
31
+ def register_subcommand(parser: ArgumentParser):
32
+ download_parser = parser.add_parser("env")
33
+ download_parser.set_defaults(func=info_command_factory)
34
+
35
+ def run(self):
36
+ hub_version = huggingface_hub.__version__
37
+
38
+ pt_version = "not installed"
39
+ pt_cuda_available = "NA"
40
+ if is_torch_available():
41
+ import torch
42
+
43
+ pt_version = torch.__version__
44
+ pt_cuda_available = torch.cuda.is_available()
45
+
46
+ transformers_version = "not installed"
47
+ if is_transformers_available():
48
+ import transformers
49
+
50
+ transformers_version = transformers.__version__
51
+
52
+ accelerate_version = "not installed"
53
+ if is_accelerate_available():
54
+ import accelerate
55
+
56
+ accelerate_version = accelerate.__version__
57
+
58
+ xformers_version = "not installed"
59
+ if is_xformers_available():
60
+ import xformers
61
+
62
+ xformers_version = xformers.__version__
63
+
64
+ info = {
65
+ "`diffusers` version": version,
66
+ "Platform": platform.platform(),
67
+ "Python version": platform.python_version(),
68
+ "PyTorch version (GPU?)": f"{pt_version} ({pt_cuda_available})",
69
+ "Huggingface_hub version": hub_version,
70
+ "Transformers version": transformers_version,
71
+ "Accelerate version": accelerate_version,
72
+ "xFormers version": xformers_version,
73
+ "Using GPU in script?": "<fill in>",
74
+ "Using distributed or parallel set-up in script?": "<fill in>",
75
+ }
76
+
77
+ print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n")
78
+ print(self.format_dict(info))
79
+
80
+ return info
81
+
82
+ @staticmethod
83
+ def format_dict(d):
84
+ return "\n".join([f"- {prop}: {val}" for prop, val in d.items()]) + "\n"
src/diffusers/commands/fp16_safetensors.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ Usage example:
17
+ diffusers-cli fp16_safetensors --ckpt_id=openai/shap-e --fp16 --use_safetensors
18
+ """
19
+
20
+ import glob
21
+ import json
22
+ import warnings
23
+ from argparse import ArgumentParser, Namespace
24
+ from importlib import import_module
25
+
26
+ import huggingface_hub
27
+ import torch
28
+ from huggingface_hub import hf_hub_download
29
+ from packaging import version
30
+
31
+ from ..utils import logging
32
+ from . import BaseDiffusersCLICommand
33
+
34
+
35
+ def conversion_command_factory(args: Namespace):
36
+ if args.use_auth_token:
37
+ warnings.warn(
38
+ "The `--use_auth_token` flag is deprecated and will be removed in a future version. Authentication is now"
39
+ " handled automatically if user is logged in."
40
+ )
41
+ return FP16SafetensorsCommand(args.ckpt_id, args.fp16, args.use_safetensors)
42
+
43
+
44
+ class FP16SafetensorsCommand(BaseDiffusersCLICommand):
45
+ @staticmethod
46
+ def register_subcommand(parser: ArgumentParser):
47
+ conversion_parser = parser.add_parser("fp16_safetensors")
48
+ conversion_parser.add_argument(
49
+ "--ckpt_id",
50
+ type=str,
51
+ help="Repo id of the checkpoints on which to run the conversion. Example: 'openai/shap-e'.",
52
+ )
53
+ conversion_parser.add_argument(
54
+ "--fp16", action="store_true", help="If serializing the variables in FP16 precision."
55
+ )
56
+ conversion_parser.add_argument(
57
+ "--use_safetensors", action="store_true", help="If serializing in the safetensors format."
58
+ )
59
+ conversion_parser.add_argument(
60
+ "--use_auth_token",
61
+ action="store_true",
62
+ help="When working with checkpoints having private visibility. When used `huggingface-cli login` needs to be run beforehand.",
63
+ )
64
+ conversion_parser.set_defaults(func=conversion_command_factory)
65
+
66
+ def __init__(self, ckpt_id: str, fp16: bool, use_safetensors: bool):
67
+ self.logger = logging.get_logger("diffusers-cli/fp16_safetensors")
68
+ self.ckpt_id = ckpt_id
69
+ self.local_ckpt_dir = f"/tmp/{ckpt_id}"
70
+ self.fp16 = fp16
71
+
72
+ self.use_safetensors = use_safetensors
73
+
74
+ if not self.use_safetensors and not self.fp16:
75
+ raise NotImplementedError(
76
+ "When `use_safetensors` and `fp16` both are False, then this command is of no use."
77
+ )
78
+
79
+ def run(self):
80
+ if version.parse(huggingface_hub.__version__) < version.parse("0.9.0"):
81
+ raise ImportError(
82
+ "The huggingface_hub version must be >= 0.9.0 to use this command. Please update your huggingface_hub"
83
+ " installation."
84
+ )
85
+ else:
86
+ from huggingface_hub import create_commit
87
+ from huggingface_hub._commit_api import CommitOperationAdd
88
+
89
+ model_index = hf_hub_download(repo_id=self.ckpt_id, filename="model_index.json")
90
+ with open(model_index, "r") as f:
91
+ pipeline_class_name = json.load(f)["_class_name"]
92
+ pipeline_class = getattr(import_module("diffusers"), pipeline_class_name)
93
+ self.logger.info(f"Pipeline class imported: {pipeline_class_name}.")
94
+
95
+ # Load the appropriate pipeline. We could have use `DiffusionPipeline`
96
+ # here, but just to avoid any rough edge cases.
97
+ pipeline = pipeline_class.from_pretrained(
98
+ self.ckpt_id, torch_dtype=torch.float16 if self.fp16 else torch.float32
99
+ )
100
+ pipeline.save_pretrained(
101
+ self.local_ckpt_dir,
102
+ safe_serialization=True if self.use_safetensors else False,
103
+ variant="fp16" if self.fp16 else None,
104
+ )
105
+ self.logger.info(f"Pipeline locally saved to {self.local_ckpt_dir}.")
106
+
107
+ # Fetch all the paths.
108
+ if self.fp16:
109
+ modified_paths = glob.glob(f"{self.local_ckpt_dir}/*/*.fp16.*")
110
+ elif self.use_safetensors:
111
+ modified_paths = glob.glob(f"{self.local_ckpt_dir}/*/*.safetensors")
112
+
113
+ # Prepare for the PR.
114
+ commit_message = f"Serialize variables with FP16: {self.fp16} and safetensors: {self.use_safetensors}."
115
+ operations = []
116
+ for path in modified_paths:
117
+ operations.append(CommitOperationAdd(path_in_repo="/".join(path.split("/")[4:]), path_or_fileobj=path))
118
+
119
+ # Open the PR.
120
+ commit_description = (
121
+ "Variables converted by the [`diffusers`' `fp16_safetensors`"
122
+ " CLI](https://github.com/huggingface/diffusers/blob/main/src/diffusers/commands/fp16_safetensors.py)."
123
+ )
124
+ hub_pr_url = create_commit(
125
+ repo_id=self.ckpt_id,
126
+ operations=operations,
127
+ commit_message=commit_message,
128
+ commit_description=commit_description,
129
+ repo_type="model",
130
+ create_pr=True,
131
+ ).pr_url
132
+ self.logger.info(f"PR created here: {hub_pr_url}.")
src/diffusers/configuration_utils.py ADDED
@@ -0,0 +1,699 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ ConfigMixin base class and utilities."""
17
+ import dataclasses
18
+ import functools
19
+ import importlib
20
+ import inspect
21
+ import json
22
+ import os
23
+ import re
24
+ from collections import OrderedDict
25
+ from pathlib import PosixPath
26
+ from typing import Any, Dict, Tuple, Union
27
+
28
+ import numpy as np
29
+ from huggingface_hub import create_repo, hf_hub_download
30
+ from huggingface_hub.utils import (
31
+ EntryNotFoundError,
32
+ RepositoryNotFoundError,
33
+ RevisionNotFoundError,
34
+ validate_hf_hub_args,
35
+ )
36
+ from requests import HTTPError
37
+
38
+ from . import __version__
39
+ from .utils import (
40
+ HUGGINGFACE_CO_RESOLVE_ENDPOINT,
41
+ DummyObject,
42
+ deprecate,
43
+ extract_commit_hash,
44
+ http_user_agent,
45
+ logging,
46
+ )
47
+
48
+
49
+ logger = logging.get_logger(__name__)
50
+
51
+ _re_configuration_file = re.compile(r"config\.(.*)\.json")
52
+
53
+
54
+ class FrozenDict(OrderedDict):
55
+ def __init__(self, *args, **kwargs):
56
+ super().__init__(*args, **kwargs)
57
+
58
+ for key, value in self.items():
59
+ setattr(self, key, value)
60
+
61
+ self.__frozen = True
62
+
63
+ def __delitem__(self, *args, **kwargs):
64
+ raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.")
65
+
66
+ def setdefault(self, *args, **kwargs):
67
+ raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.")
68
+
69
+ def pop(self, *args, **kwargs):
70
+ raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.")
71
+
72
+ def update(self, *args, **kwargs):
73
+ raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.")
74
+
75
+ def __setattr__(self, name, value):
76
+ if hasattr(self, "__frozen") and self.__frozen:
77
+ raise Exception(f"You cannot use ``__setattr__`` on a {self.__class__.__name__} instance.")
78
+ super().__setattr__(name, value)
79
+
80
+ def __setitem__(self, name, value):
81
+ if hasattr(self, "__frozen") and self.__frozen:
82
+ raise Exception(f"You cannot use ``__setattr__`` on a {self.__class__.__name__} instance.")
83
+ super().__setitem__(name, value)
84
+
85
+
86
+ class ConfigMixin:
87
+ r"""
88
+ Base class for all configuration classes. All configuration parameters are stored under `self.config`. Also
89
+ provides the [`~ConfigMixin.from_config`] and [`~ConfigMixin.save_config`] methods for loading, downloading, and
90
+ saving classes that inherit from [`ConfigMixin`].
91
+
92
+ Class attributes:
93
+ - **config_name** (`str`) -- A filename under which the config should stored when calling
94
+ [`~ConfigMixin.save_config`] (should be overridden by parent class).
95
+ - **ignore_for_config** (`List[str]`) -- A list of attributes that should not be saved in the config (should be
96
+ overridden by subclass).
97
+ - **has_compatibles** (`bool`) -- Whether the class has compatible classes (should be overridden by subclass).
98
+ - **_deprecated_kwargs** (`List[str]`) -- Keyword arguments that are deprecated. Note that the `init` function
99
+ should only have a `kwargs` argument if at least one argument is deprecated (should be overridden by
100
+ subclass).
101
+ """
102
+
103
+ config_name = None
104
+ ignore_for_config = []
105
+ has_compatibles = False
106
+
107
+ _deprecated_kwargs = []
108
+
109
+ def register_to_config(self, **kwargs):
110
+ if self.config_name is None:
111
+ raise NotImplementedError(f"Make sure that {self.__class__} has defined a class name `config_name`")
112
+ # Special case for `kwargs` used in deprecation warning added to schedulers
113
+ # TODO: remove this when we remove the deprecation warning, and the `kwargs` argument,
114
+ # or solve in a more general way.
115
+ kwargs.pop("kwargs", None)
116
+
117
+ if not hasattr(self, "_internal_dict"):
118
+ internal_dict = kwargs
119
+ else:
120
+ previous_dict = dict(self._internal_dict)
121
+ internal_dict = {**self._internal_dict, **kwargs}
122
+ logger.debug(f"Updating config from {previous_dict} to {internal_dict}")
123
+
124
+ self._internal_dict = FrozenDict(internal_dict)
125
+
126
+ def __getattr__(self, name: str) -> Any:
127
+ """The only reason we overwrite `getattr` here is to gracefully deprecate accessing
128
+ config attributes directly. See https://github.com/huggingface/diffusers/pull/3129
129
+
130
+ Tihs funtion is mostly copied from PyTorch's __getattr__ overwrite:
131
+ https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html#Module
132
+ """
133
+
134
+ is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name)
135
+ is_attribute = name in self.__dict__
136
+
137
+ if is_in_config and not is_attribute:
138
+ deprecation_message = f"Accessing config attribute `{name}` directly via '{type(self).__name__}' object attribute is deprecated. Please access '{name}' over '{type(self).__name__}'s config object instead, e.g. 'scheduler.config.{name}'."
139
+ deprecate("direct config name access", "1.0.0", deprecation_message, standard_warn=False)
140
+ return self._internal_dict[name]
141
+
142
+ raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")
143
+
144
+ def save_config(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):
145
+ """
146
+ Save a configuration object to the directory specified in `save_directory` so that it can be reloaded using the
147
+ [`~ConfigMixin.from_config`] class method.
148
+
149
+ Args:
150
+ save_directory (`str` or `os.PathLike`):
151
+ Directory where the configuration JSON file is saved (will be created if it does not exist).
152
+ push_to_hub (`bool`, *optional*, defaults to `False`):
153
+ Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the
154
+ repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
155
+ namespace).
156
+ kwargs (`Dict[str, Any]`, *optional*):
157
+ Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
158
+ """
159
+ if os.path.isfile(save_directory):
160
+ raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file")
161
+
162
+ os.makedirs(save_directory, exist_ok=True)
163
+
164
+ # If we save using the predefined names, we can load using `from_config`
165
+ output_config_file = os.path.join(save_directory, self.config_name)
166
+
167
+ self.to_json_file(output_config_file)
168
+ logger.info(f"Configuration saved in {output_config_file}")
169
+
170
+ if push_to_hub:
171
+ commit_message = kwargs.pop("commit_message", None)
172
+ private = kwargs.pop("private", False)
173
+ create_pr = kwargs.pop("create_pr", False)
174
+ token = kwargs.pop("token", None)
175
+ repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1])
176
+ repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id
177
+
178
+ self._upload_folder(
179
+ save_directory,
180
+ repo_id,
181
+ token=token,
182
+ commit_message=commit_message,
183
+ create_pr=create_pr,
184
+ )
185
+
186
+ @classmethod
187
+ def from_config(cls, config: Union[FrozenDict, Dict[str, Any]] = None, return_unused_kwargs=False, **kwargs):
188
+ r"""
189
+ Instantiate a Python class from a config dictionary.
190
+
191
+ Parameters:
192
+ config (`Dict[str, Any]`):
193
+ A config dictionary from which the Python class is instantiated. Make sure to only load configuration
194
+ files of compatible classes.
195
+ return_unused_kwargs (`bool`, *optional*, defaults to `False`):
196
+ Whether kwargs that are not consumed by the Python class should be returned or not.
197
+ kwargs (remaining dictionary of keyword arguments, *optional*):
198
+ Can be used to update the configuration object (after it is loaded) and initiate the Python class.
199
+ `**kwargs` are passed directly to the underlying scheduler/model's `__init__` method and eventually
200
+ overwrite the same named arguments in `config`.
201
+
202
+ Returns:
203
+ [`ModelMixin`] or [`SchedulerMixin`]:
204
+ A model or scheduler object instantiated from a config dictionary.
205
+
206
+ Examples:
207
+
208
+ ```python
209
+ >>> from diffusers import DDPMScheduler, DDIMScheduler, PNDMScheduler
210
+
211
+ >>> # Download scheduler from huggingface.co and cache.
212
+ >>> scheduler = DDPMScheduler.from_pretrained("google/ddpm-cifar10-32")
213
+
214
+ >>> # Instantiate DDIM scheduler class with same config as DDPM
215
+ >>> scheduler = DDIMScheduler.from_config(scheduler.config)
216
+
217
+ >>> # Instantiate PNDM scheduler class with same config as DDPM
218
+ >>> scheduler = PNDMScheduler.from_config(scheduler.config)
219
+ ```
220
+ """
221
+ # <===== TO BE REMOVED WITH DEPRECATION
222
+ # TODO(Patrick) - make sure to remove the following lines when config=="model_path" is deprecated
223
+ if "pretrained_model_name_or_path" in kwargs:
224
+ config = kwargs.pop("pretrained_model_name_or_path")
225
+
226
+ if config is None:
227
+ raise ValueError("Please make sure to provide a config as the first positional argument.")
228
+ # ======>
229
+
230
+ if not isinstance(config, dict):
231
+ deprecation_message = "It is deprecated to pass a pretrained model name or path to `from_config`."
232
+ if "Scheduler" in cls.__name__:
233
+ deprecation_message += (
234
+ f"If you were trying to load a scheduler, please use {cls}.from_pretrained(...) instead."
235
+ " Otherwise, please make sure to pass a configuration dictionary instead. This functionality will"
236
+ " be removed in v1.0.0."
237
+ )
238
+ elif "Model" in cls.__name__:
239
+ deprecation_message += (
240
+ f"If you were trying to load a model, please use {cls}.load_config(...) followed by"
241
+ f" {cls}.from_config(...) instead. Otherwise, please make sure to pass a configuration dictionary"
242
+ " instead. This functionality will be removed in v1.0.0."
243
+ )
244
+ deprecate("config-passed-as-path", "1.0.0", deprecation_message, standard_warn=False)
245
+ config, kwargs = cls.load_config(pretrained_model_name_or_path=config, return_unused_kwargs=True, **kwargs)
246
+
247
+ init_dict, unused_kwargs, hidden_dict = cls.extract_init_dict(config, **kwargs)
248
+
249
+ # Allow dtype to be specified on initialization
250
+ if "dtype" in unused_kwargs:
251
+ init_dict["dtype"] = unused_kwargs.pop("dtype")
252
+
253
+ # add possible deprecated kwargs
254
+ for deprecated_kwarg in cls._deprecated_kwargs:
255
+ if deprecated_kwarg in unused_kwargs:
256
+ init_dict[deprecated_kwarg] = unused_kwargs.pop(deprecated_kwarg)
257
+
258
+ # Return model and optionally state and/or unused_kwargs
259
+ model = cls(**init_dict)
260
+
261
+ # make sure to also save config parameters that might be used for compatible classes
262
+ model.register_to_config(**hidden_dict)
263
+
264
+ # add hidden kwargs of compatible classes to unused_kwargs
265
+ unused_kwargs = {**unused_kwargs, **hidden_dict}
266
+
267
+ if return_unused_kwargs:
268
+ return (model, unused_kwargs)
269
+ else:
270
+ return model
271
+
272
+ @classmethod
273
+ def get_config_dict(cls, *args, **kwargs):
274
+ deprecation_message = (
275
+ f" The function get_config_dict is deprecated. Please use {cls}.load_config instead. This function will be"
276
+ " removed in version v1.0.0"
277
+ )
278
+ deprecate("get_config_dict", "1.0.0", deprecation_message, standard_warn=False)
279
+ return cls.load_config(*args, **kwargs)
280
+
281
+ @classmethod
282
+ @validate_hf_hub_args
283
+ def load_config(
284
+ cls,
285
+ pretrained_model_name_or_path: Union[str, os.PathLike],
286
+ return_unused_kwargs=False,
287
+ return_commit_hash=False,
288
+ **kwargs,
289
+ ) -> Tuple[Dict[str, Any], Dict[str, Any]]:
290
+ r"""
291
+ Load a model or scheduler configuration.
292
+
293
+ Parameters:
294
+ pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):
295
+ Can be either:
296
+
297
+ - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
298
+ the Hub.
299
+ - A path to a *directory* (for example `./my_model_directory`) containing model weights saved with
300
+ [`~ConfigMixin.save_config`].
301
+
302
+ cache_dir (`Union[str, os.PathLike]`, *optional*):
303
+ Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
304
+ is not used.
305
+ force_download (`bool`, *optional*, defaults to `False`):
306
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
307
+ cached versions if they exist.
308
+ resume_download (`bool`, *optional*, defaults to `False`):
309
+ Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
310
+ incompletely downloaded files are deleted.
311
+ proxies (`Dict[str, str]`, *optional*):
312
+ A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
313
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
314
+ output_loading_info(`bool`, *optional*, defaults to `False`):
315
+ Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.
316
+ local_files_only (`bool`, *optional*, defaults to `False`):
317
+ Whether to only load local model weights and configuration files or not. If set to `True`, the model
318
+ won't be downloaded from the Hub.
319
+ token (`str` or *bool*, *optional*):
320
+ The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
321
+ `diffusers-cli login` (stored in `~/.huggingface`) is used.
322
+ revision (`str`, *optional*, defaults to `"main"`):
323
+ The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
324
+ allowed by Git.
325
+ subfolder (`str`, *optional*, defaults to `""`):
326
+ The subfolder location of a model file within a larger model repository on the Hub or locally.
327
+ return_unused_kwargs (`bool`, *optional*, defaults to `False):
328
+ Whether unused keyword arguments of the config are returned.
329
+ return_commit_hash (`bool`, *optional*, defaults to `False):
330
+ Whether the `commit_hash` of the loaded configuration are returned.
331
+
332
+ Returns:
333
+ `dict`:
334
+ A dictionary of all the parameters stored in a JSON configuration file.
335
+
336
+ """
337
+ cache_dir = kwargs.pop("cache_dir", None)
338
+ force_download = kwargs.pop("force_download", False)
339
+ resume_download = kwargs.pop("resume_download", False)
340
+ proxies = kwargs.pop("proxies", None)
341
+ token = kwargs.pop("token", None)
342
+ local_files_only = kwargs.pop("local_files_only", False)
343
+ revision = kwargs.pop("revision", None)
344
+ _ = kwargs.pop("mirror", None)
345
+ subfolder = kwargs.pop("subfolder", None)
346
+ user_agent = kwargs.pop("user_agent", {})
347
+
348
+ user_agent = {**user_agent, "file_type": "config"}
349
+ user_agent = http_user_agent(user_agent)
350
+
351
+ pretrained_model_name_or_path = str(pretrained_model_name_or_path)
352
+
353
+ if cls.config_name is None:
354
+ raise ValueError(
355
+ "`self.config_name` is not defined. Note that one should not load a config from "
356
+ "`ConfigMixin`. Please make sure to define `config_name` in a class inheriting from `ConfigMixin`"
357
+ )
358
+
359
+ if os.path.isfile(pretrained_model_name_or_path):
360
+ config_file = pretrained_model_name_or_path
361
+ elif os.path.isdir(pretrained_model_name_or_path):
362
+ if os.path.isfile(os.path.join(pretrained_model_name_or_path, cls.config_name)):
363
+ # Load from a PyTorch checkpoint
364
+ config_file = os.path.join(pretrained_model_name_or_path, cls.config_name)
365
+ elif subfolder is not None and os.path.isfile(
366
+ os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name)
367
+ ):
368
+ config_file = os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name)
369
+ else:
370
+ raise EnvironmentError(
371
+ f"Error no file named {cls.config_name} found in directory {pretrained_model_name_or_path}."
372
+ )
373
+ else:
374
+ try:
375
+ # Load from URL or cache if already cached
376
+ config_file = hf_hub_download(
377
+ pretrained_model_name_or_path,
378
+ filename=cls.config_name,
379
+ cache_dir=cache_dir,
380
+ force_download=force_download,
381
+ proxies=proxies,
382
+ resume_download=resume_download,
383
+ local_files_only=local_files_only,
384
+ token=token,
385
+ user_agent=user_agent,
386
+ subfolder=subfolder,
387
+ revision=revision,
388
+ )
389
+ except RepositoryNotFoundError:
390
+ raise EnvironmentError(
391
+ f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier"
392
+ " listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a"
393
+ " token having permission to this repo with `token` or log in with `huggingface-cli login`."
394
+ )
395
+ except RevisionNotFoundError:
396
+ raise EnvironmentError(
397
+ f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for"
398
+ " this model name. Check the model page at"
399
+ f" 'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions."
400
+ )
401
+ except EntryNotFoundError:
402
+ raise EnvironmentError(
403
+ f"{pretrained_model_name_or_path} does not appear to have a file named {cls.config_name}."
404
+ )
405
+ except HTTPError as err:
406
+ raise EnvironmentError(
407
+ "There was a specific connection error when trying to load"
408
+ f" {pretrained_model_name_or_path}:\n{err}"
409
+ )
410
+ except ValueError:
411
+ raise EnvironmentError(
412
+ f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"
413
+ f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"
414
+ f" directory containing a {cls.config_name} file.\nCheckout your internet connection or see how to"
415
+ " run the library in offline mode at"
416
+ " 'https://huggingface.co/docs/diffusers/installation#offline-mode'."
417
+ )
418
+ except EnvironmentError:
419
+ raise EnvironmentError(
420
+ f"Can't load config for '{pretrained_model_name_or_path}'. If you were trying to load it from "
421
+ "'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
422
+ f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
423
+ f"containing a {cls.config_name} file"
424
+ )
425
+
426
+ try:
427
+ # Load config dict
428
+ config_dict = cls._dict_from_json_file(config_file)
429
+
430
+ commit_hash = extract_commit_hash(config_file)
431
+ except (json.JSONDecodeError, UnicodeDecodeError):
432
+ raise EnvironmentError(f"It looks like the config file at '{config_file}' is not a valid JSON file.")
433
+
434
+ if not (return_unused_kwargs or return_commit_hash):
435
+ return config_dict
436
+
437
+ outputs = (config_dict,)
438
+
439
+ if return_unused_kwargs:
440
+ outputs += (kwargs,)
441
+
442
+ if return_commit_hash:
443
+ outputs += (commit_hash,)
444
+
445
+ return outputs
446
+
447
+ @staticmethod
448
+ def _get_init_keys(cls):
449
+ return set(dict(inspect.signature(cls.__init__).parameters).keys())
450
+
451
+ @classmethod
452
+ def extract_init_dict(cls, config_dict, **kwargs):
453
+ # Skip keys that were not present in the original config, so default __init__ values were used
454
+ used_defaults = config_dict.get("_use_default_values", [])
455
+ config_dict = {k: v for k, v in config_dict.items() if k not in used_defaults and k != "_use_default_values"}
456
+
457
+ # 0. Copy origin config dict
458
+ original_dict = dict(config_dict.items())
459
+
460
+ # 1. Retrieve expected config attributes from __init__ signature
461
+ expected_keys = cls._get_init_keys(cls)
462
+ expected_keys.remove("self")
463
+ # remove general kwargs if present in dict
464
+ if "kwargs" in expected_keys:
465
+ expected_keys.remove("kwargs")
466
+ # remove flax internal keys
467
+ if hasattr(cls, "_flax_internal_args"):
468
+ for arg in cls._flax_internal_args:
469
+ expected_keys.remove(arg)
470
+
471
+ # 2. Remove attributes that cannot be expected from expected config attributes
472
+ # remove keys to be ignored
473
+ if len(cls.ignore_for_config) > 0:
474
+ expected_keys = expected_keys - set(cls.ignore_for_config)
475
+
476
+ # load diffusers library to import compatible and original scheduler
477
+ diffusers_library = importlib.import_module(__name__.split(".")[0])
478
+
479
+ if cls.has_compatibles:
480
+ compatible_classes = [c for c in cls._get_compatibles() if not isinstance(c, DummyObject)]
481
+ else:
482
+ compatible_classes = []
483
+
484
+ expected_keys_comp_cls = set()
485
+ for c in compatible_classes:
486
+ expected_keys_c = cls._get_init_keys(c)
487
+ expected_keys_comp_cls = expected_keys_comp_cls.union(expected_keys_c)
488
+ expected_keys_comp_cls = expected_keys_comp_cls - cls._get_init_keys(cls)
489
+ config_dict = {k: v for k, v in config_dict.items() if k not in expected_keys_comp_cls}
490
+
491
+ # remove attributes from orig class that cannot be expected
492
+ orig_cls_name = config_dict.pop("_class_name", cls.__name__)
493
+ if (
494
+ isinstance(orig_cls_name, str)
495
+ and orig_cls_name != cls.__name__
496
+ and hasattr(diffusers_library, orig_cls_name)
497
+ ):
498
+ orig_cls = getattr(diffusers_library, orig_cls_name)
499
+ unexpected_keys_from_orig = cls._get_init_keys(orig_cls) - expected_keys
500
+ config_dict = {k: v for k, v in config_dict.items() if k not in unexpected_keys_from_orig}
501
+ elif not isinstance(orig_cls_name, str) and not isinstance(orig_cls_name, (list, tuple)):
502
+ raise ValueError(
503
+ "Make sure that the `_class_name` is of type string or list of string (for custom pipelines)."
504
+ )
505
+
506
+ # remove private attributes
507
+ config_dict = {k: v for k, v in config_dict.items() if not k.startswith("_")}
508
+
509
+ # 3. Create keyword arguments that will be passed to __init__ from expected keyword arguments
510
+ init_dict = {}
511
+ for key in expected_keys:
512
+ # if config param is passed to kwarg and is present in config dict
513
+ # it should overwrite existing config dict key
514
+ if key in kwargs and key in config_dict:
515
+ config_dict[key] = kwargs.pop(key)
516
+
517
+ if key in kwargs:
518
+ # overwrite key
519
+ init_dict[key] = kwargs.pop(key)
520
+ elif key in config_dict:
521
+ # use value from config dict
522
+ init_dict[key] = config_dict.pop(key)
523
+
524
+ # 4. Give nice warning if unexpected values have been passed
525
+ if len(config_dict) > 0:
526
+ logger.warning(
527
+ f"The config attributes {config_dict} were passed to {cls.__name__}, "
528
+ "but are not expected and will be ignored. Please verify your "
529
+ f"{cls.config_name} configuration file."
530
+ )
531
+
532
+ # 5. Give nice info if config attributes are initiliazed to default because they have not been passed
533
+ passed_keys = set(init_dict.keys())
534
+ if len(expected_keys - passed_keys) > 0:
535
+ logger.info(
536
+ f"{expected_keys - passed_keys} was not found in config. Values will be initialized to default values."
537
+ )
538
+
539
+ # 6. Define unused keyword arguments
540
+ unused_kwargs = {**config_dict, **kwargs}
541
+
542
+ # 7. Define "hidden" config parameters that were saved for compatible classes
543
+ hidden_config_dict = {k: v for k, v in original_dict.items() if k not in init_dict}
544
+
545
+ return init_dict, unused_kwargs, hidden_config_dict
546
+
547
+ @classmethod
548
+ def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):
549
+ with open(json_file, "r", encoding="utf-8") as reader:
550
+ text = reader.read()
551
+ return json.loads(text)
552
+
553
+ def __repr__(self):
554
+ return f"{self.__class__.__name__} {self.to_json_string()}"
555
+
556
+ @property
557
+ def config(self) -> Dict[str, Any]:
558
+ """
559
+ Returns the config of the class as a frozen dictionary
560
+
561
+ Returns:
562
+ `Dict[str, Any]`: Config of the class.
563
+ """
564
+ return self._internal_dict
565
+
566
+ def to_json_string(self) -> str:
567
+ """
568
+ Serializes the configuration instance to a JSON string.
569
+
570
+ Returns:
571
+ `str`:
572
+ String containing all the attributes that make up the configuration instance in JSON format.
573
+ """
574
+ config_dict = self._internal_dict if hasattr(self, "_internal_dict") else {}
575
+ config_dict["_class_name"] = self.__class__.__name__
576
+ config_dict["_diffusers_version"] = __version__
577
+
578
+ def to_json_saveable(value):
579
+ if isinstance(value, np.ndarray):
580
+ value = value.tolist()
581
+ elif isinstance(value, PosixPath):
582
+ value = str(value)
583
+ return value
584
+
585
+ config_dict = {k: to_json_saveable(v) for k, v in config_dict.items()}
586
+ # Don't save "_ignore_files" or "_use_default_values"
587
+ config_dict.pop("_ignore_files", None)
588
+ config_dict.pop("_use_default_values", None)
589
+
590
+ return json.dumps(config_dict, indent=2, sort_keys=True) + "\n"
591
+
592
+ def to_json_file(self, json_file_path: Union[str, os.PathLike]):
593
+ """
594
+ Save the configuration instance's parameters to a JSON file.
595
+
596
+ Args:
597
+ json_file_path (`str` or `os.PathLike`):
598
+ Path to the JSON file to save a configuration instance's parameters.
599
+ """
600
+ with open(json_file_path, "w", encoding="utf-8") as writer:
601
+ writer.write(self.to_json_string())
602
+
603
+
604
+ def register_to_config(init):
605
+ r"""
606
+ Decorator to apply on the init of classes inheriting from [`ConfigMixin`] so that all the arguments are
607
+ automatically sent to `self.register_for_config`. To ignore a specific argument accepted by the init but that
608
+ shouldn't be registered in the config, use the `ignore_for_config` class variable
609
+
610
+ Warning: Once decorated, all private arguments (beginning with an underscore) are trashed and not sent to the init!
611
+ """
612
+
613
+ @functools.wraps(init)
614
+ def inner_init(self, *args, **kwargs):
615
+ # Ignore private kwargs in the init.
616
+ init_kwargs = {k: v for k, v in kwargs.items() if not k.startswith("_")}
617
+ config_init_kwargs = {k: v for k, v in kwargs.items() if k.startswith("_")}
618
+ if not isinstance(self, ConfigMixin):
619
+ raise RuntimeError(
620
+ f"`@register_for_config` was applied to {self.__class__.__name__} init method, but this class does "
621
+ "not inherit from `ConfigMixin`."
622
+ )
623
+
624
+ ignore = getattr(self, "ignore_for_config", [])
625
+ # Get positional arguments aligned with kwargs
626
+ new_kwargs = {}
627
+ signature = inspect.signature(init)
628
+ parameters = {
629
+ name: p.default for i, (name, p) in enumerate(signature.parameters.items()) if i > 0 and name not in ignore
630
+ }
631
+ for arg, name in zip(args, parameters.keys()):
632
+ new_kwargs[name] = arg
633
+
634
+ # Then add all kwargs
635
+ new_kwargs.update(
636
+ {
637
+ k: init_kwargs.get(k, default)
638
+ for k, default in parameters.items()
639
+ if k not in ignore and k not in new_kwargs
640
+ }
641
+ )
642
+
643
+ # Take note of the parameters that were not present in the loaded config
644
+ if len(set(new_kwargs.keys()) - set(init_kwargs)) > 0:
645
+ new_kwargs["_use_default_values"] = list(set(new_kwargs.keys()) - set(init_kwargs))
646
+
647
+ new_kwargs = {**config_init_kwargs, **new_kwargs}
648
+ getattr(self, "register_to_config")(**new_kwargs)
649
+ init(self, *args, **init_kwargs)
650
+
651
+ return inner_init
652
+
653
+
654
+ def flax_register_to_config(cls):
655
+ original_init = cls.__init__
656
+
657
+ @functools.wraps(original_init)
658
+ def init(self, *args, **kwargs):
659
+ if not isinstance(self, ConfigMixin):
660
+ raise RuntimeError(
661
+ f"`@register_for_config` was applied to {self.__class__.__name__} init method, but this class does "
662
+ "not inherit from `ConfigMixin`."
663
+ )
664
+
665
+ # Ignore private kwargs in the init. Retrieve all passed attributes
666
+ init_kwargs = dict(kwargs.items())
667
+
668
+ # Retrieve default values
669
+ fields = dataclasses.fields(self)
670
+ default_kwargs = {}
671
+ for field in fields:
672
+ # ignore flax specific attributes
673
+ if field.name in self._flax_internal_args:
674
+ continue
675
+ if type(field.default) == dataclasses._MISSING_TYPE:
676
+ default_kwargs[field.name] = None
677
+ else:
678
+ default_kwargs[field.name] = getattr(self, field.name)
679
+
680
+ # Make sure init_kwargs override default kwargs
681
+ new_kwargs = {**default_kwargs, **init_kwargs}
682
+ # dtype should be part of `init_kwargs`, but not `new_kwargs`
683
+ if "dtype" in new_kwargs:
684
+ new_kwargs.pop("dtype")
685
+
686
+ # Get positional arguments aligned with kwargs
687
+ for i, arg in enumerate(args):
688
+ name = fields[i].name
689
+ new_kwargs[name] = arg
690
+
691
+ # Take note of the parameters that were not present in the loaded config
692
+ if len(set(new_kwargs.keys()) - set(init_kwargs)) > 0:
693
+ new_kwargs["_use_default_values"] = list(set(new_kwargs.keys()) - set(init_kwargs))
694
+
695
+ getattr(self, "register_to_config")(**new_kwargs)
696
+ original_init(self, *args, **kwargs)
697
+
698
+ cls.__init__ = init
699
+ return cls
src/diffusers/dependency_versions_check.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .dependency_versions_table import deps
16
+ from .utils.versions import require_version, require_version_core
17
+
18
+
19
+ # define which module versions we always want to check at run time
20
+ # (usually the ones defined in `install_requires` in setup.py)
21
+ #
22
+ # order specific notes:
23
+ # - tqdm must be checked before tokenizers
24
+
25
+ pkgs_to_check_at_runtime = "python requests filelock numpy".split()
26
+ for pkg in pkgs_to_check_at_runtime:
27
+ if pkg in deps:
28
+ require_version_core(deps[pkg])
29
+ else:
30
+ raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
31
+
32
+
33
+ def dep_version_check(pkg, hint=None):
34
+ require_version(deps[pkg], hint)
src/diffusers/dependency_versions_table.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # THIS FILE HAS BEEN AUTOGENERATED. To update:
2
+ # 1. modify the `_deps` dict in setup.py
3
+ # 2. run `make deps_table_update`
4
+ deps = {
5
+ "Pillow": "Pillow",
6
+ "accelerate": "accelerate>=0.11.0",
7
+ "compel": "compel==0.1.8",
8
+ "datasets": "datasets",
9
+ "filelock": "filelock",
10
+ "flax": "flax>=0.4.1",
11
+ "hf-doc-builder": "hf-doc-builder>=0.3.0",
12
+ "huggingface-hub": "huggingface-hub>=0.20.2",
13
+ "requests-mock": "requests-mock==1.10.0",
14
+ "importlib_metadata": "importlib_metadata",
15
+ "invisible-watermark": "invisible-watermark>=0.2.0",
16
+ "isort": "isort>=5.5.4",
17
+ "jax": "jax>=0.4.1",
18
+ "jaxlib": "jaxlib>=0.4.1",
19
+ "Jinja2": "Jinja2",
20
+ "k-diffusion": "k-diffusion>=0.0.12",
21
+ "torchsde": "torchsde",
22
+ "note_seq": "note_seq",
23
+ "librosa": "librosa",
24
+ "numpy": "numpy",
25
+ "omegaconf": "omegaconf",
26
+ "parameterized": "parameterized",
27
+ "peft": "peft>=0.6.0",
28
+ "protobuf": "protobuf>=3.20.3,<4",
29
+ "pytest": "pytest",
30
+ "pytest-timeout": "pytest-timeout",
31
+ "pytest-xdist": "pytest-xdist",
32
+ "python": "python>=3.8.0",
33
+ "ruff": "ruff==0.1.5",
34
+ "safetensors": "safetensors>=0.3.1",
35
+ "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
36
+ "GitPython": "GitPython<3.1.19",
37
+ "scipy": "scipy",
38
+ "onnx": "onnx",
39
+ "regex": "regex!=2019.12.17",
40
+ "requests": "requests",
41
+ "tensorboard": "tensorboard",
42
+ "torch": "torch>=1.4",
43
+ "torchvision": "torchvision",
44
+ "transformers": "transformers>=4.25.1",
45
+ "urllib3": "urllib3<=2.0.0",
46
+ }
src/diffusers/experimental/README.md ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # 🧨 Diffusers Experimental
2
+
3
+ We are adding experimental code to support novel applications and usages of the Diffusers library.
4
+ Currently, the following experiments are supported:
5
+ * Reinforcement learning via an implementation of the [Diffuser](https://arxiv.org/abs/2205.09991) model.
src/diffusers/experimental/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .rl import ValueGuidedRLPipeline
src/diffusers/experimental/rl/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .value_guided_sampling import ValueGuidedRLPipeline
src/diffusers/experimental/rl/value_guided_sampling.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import numpy as np
16
+ import torch
17
+ import tqdm
18
+
19
+ from ...models.unet_1d import UNet1DModel
20
+ from ...pipelines import DiffusionPipeline
21
+ from ...utils.dummy_pt_objects import DDPMScheduler
22
+ from ...utils.torch_utils import randn_tensor
23
+
24
+
25
+ class ValueGuidedRLPipeline(DiffusionPipeline):
26
+ r"""
27
+ Pipeline for value-guided sampling from a diffusion model trained to predict sequences of states.
28
+
29
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
30
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
31
+
32
+ Parameters:
33
+ value_function ([`UNet1DModel`]):
34
+ A specialized UNet for fine-tuning trajectories base on reward.
35
+ unet ([`UNet1DModel`]):
36
+ UNet architecture to denoise the encoded trajectories.
37
+ scheduler ([`SchedulerMixin`]):
38
+ A scheduler to be used in combination with `unet` to denoise the encoded trajectories. Default for this
39
+ application is [`DDPMScheduler`].
40
+ env ():
41
+ An environment following the OpenAI gym API to act in. For now only Hopper has pretrained models.
42
+ """
43
+
44
+ def __init__(
45
+ self,
46
+ value_function: UNet1DModel,
47
+ unet: UNet1DModel,
48
+ scheduler: DDPMScheduler,
49
+ env,
50
+ ):
51
+ super().__init__()
52
+ self.value_function = value_function
53
+ self.unet = unet
54
+ self.scheduler = scheduler
55
+ self.env = env
56
+ self.data = env.get_dataset()
57
+ self.means = {}
58
+ for key in self.data.keys():
59
+ try:
60
+ self.means[key] = self.data[key].mean()
61
+ except: # noqa: E722
62
+ pass
63
+ self.stds = {}
64
+ for key in self.data.keys():
65
+ try:
66
+ self.stds[key] = self.data[key].std()
67
+ except: # noqa: E722
68
+ pass
69
+ self.state_dim = env.observation_space.shape[0]
70
+ self.action_dim = env.action_space.shape[0]
71
+
72
+ def normalize(self, x_in, key):
73
+ return (x_in - self.means[key]) / self.stds[key]
74
+
75
+ def de_normalize(self, x_in, key):
76
+ return x_in * self.stds[key] + self.means[key]
77
+
78
+ def to_torch(self, x_in):
79
+ if isinstance(x_in, dict):
80
+ return {k: self.to_torch(v) for k, v in x_in.items()}
81
+ elif torch.is_tensor(x_in):
82
+ return x_in.to(self.unet.device)
83
+ return torch.tensor(x_in, device=self.unet.device)
84
+
85
+ def reset_x0(self, x_in, cond, act_dim):
86
+ for key, val in cond.items():
87
+ x_in[:, key, act_dim:] = val.clone()
88
+ return x_in
89
+
90
+ def run_diffusion(self, x, conditions, n_guide_steps, scale):
91
+ batch_size = x.shape[0]
92
+ y = None
93
+ for i in tqdm.tqdm(self.scheduler.timesteps):
94
+ # create batch of timesteps to pass into model
95
+ timesteps = torch.full((batch_size,), i, device=self.unet.device, dtype=torch.long)
96
+ for _ in range(n_guide_steps):
97
+ with torch.enable_grad():
98
+ x.requires_grad_()
99
+
100
+ # permute to match dimension for pre-trained models
101
+ y = self.value_function(x.permute(0, 2, 1), timesteps).sample
102
+ grad = torch.autograd.grad([y.sum()], [x])[0]
103
+
104
+ posterior_variance = self.scheduler._get_variance(i)
105
+ model_std = torch.exp(0.5 * posterior_variance)
106
+ grad = model_std * grad
107
+
108
+ grad[timesteps < 2] = 0
109
+ x = x.detach()
110
+ x = x + scale * grad
111
+ x = self.reset_x0(x, conditions, self.action_dim)
112
+
113
+ prev_x = self.unet(x.permute(0, 2, 1), timesteps).sample.permute(0, 2, 1)
114
+
115
+ # TODO: verify deprecation of this kwarg
116
+ x = self.scheduler.step(prev_x, i, x)["prev_sample"]
117
+
118
+ # apply conditions to the trajectory (set the initial state)
119
+ x = self.reset_x0(x, conditions, self.action_dim)
120
+ x = self.to_torch(x)
121
+ return x, y
122
+
123
+ def __call__(self, obs, batch_size=64, planning_horizon=32, n_guide_steps=2, scale=0.1):
124
+ # normalize the observations and create batch dimension
125
+ obs = self.normalize(obs, "observations")
126
+ obs = obs[None].repeat(batch_size, axis=0)
127
+
128
+ conditions = {0: self.to_torch(obs)}
129
+ shape = (batch_size, planning_horizon, self.state_dim + self.action_dim)
130
+
131
+ # generate initial noise and apply our conditions (to make the trajectories start at current state)
132
+ x1 = randn_tensor(shape, device=self.unet.device)
133
+ x = self.reset_x0(x1, conditions, self.action_dim)
134
+ x = self.to_torch(x)
135
+
136
+ # run the diffusion process
137
+ x, y = self.run_diffusion(x, conditions, n_guide_steps, scale)
138
+
139
+ # sort output trajectories by value
140
+ sorted_idx = y.argsort(0, descending=True).squeeze()
141
+ sorted_values = x[sorted_idx]
142
+ actions = sorted_values[:, :, : self.action_dim]
143
+ actions = actions.detach().cpu().numpy()
144
+ denorm_actions = self.de_normalize(actions, key="actions")
145
+
146
+ # select the action with the highest value
147
+ if y is not None:
148
+ selected_index = 0
149
+ else:
150
+ # if we didn't run value guiding, select a random action
151
+ selected_index = np.random.randint(0, batch_size)
152
+
153
+ denorm_actions = denorm_actions[selected_index, 0]
154
+ return denorm_actions
src/diffusers/image_processor.py ADDED
@@ -0,0 +1,888 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import warnings
16
+ from typing import List, Optional, Tuple, Union
17
+
18
+ import numpy as np
19
+ import PIL.Image
20
+ import torch
21
+ from PIL import Image, ImageFilter, ImageOps
22
+
23
+ from .configuration_utils import ConfigMixin, register_to_config
24
+ from .utils import CONFIG_NAME, PIL_INTERPOLATION, deprecate
25
+
26
+
27
+ PipelineImageInput = Union[
28
+ PIL.Image.Image,
29
+ np.ndarray,
30
+ torch.FloatTensor,
31
+ List[PIL.Image.Image],
32
+ List[np.ndarray],
33
+ List[torch.FloatTensor],
34
+ ]
35
+
36
+ PipelineDepthInput = Union[
37
+ PIL.Image.Image,
38
+ np.ndarray,
39
+ torch.FloatTensor,
40
+ List[PIL.Image.Image],
41
+ List[np.ndarray],
42
+ List[torch.FloatTensor],
43
+ ]
44
+
45
+
46
+ class VaeImageProcessor(ConfigMixin):
47
+ """
48
+ Image processor for VAE.
49
+
50
+ Args:
51
+ do_resize (`bool`, *optional*, defaults to `True`):
52
+ Whether to downscale the image's (height, width) dimensions to multiples of `vae_scale_factor`. Can accept
53
+ `height` and `width` arguments from [`image_processor.VaeImageProcessor.preprocess`] method.
54
+ vae_scale_factor (`int`, *optional*, defaults to `8`):
55
+ VAE scale factor. If `do_resize` is `True`, the image is automatically resized to multiples of this factor.
56
+ resample (`str`, *optional*, defaults to `lanczos`):
57
+ Resampling filter to use when resizing the image.
58
+ do_normalize (`bool`, *optional*, defaults to `True`):
59
+ Whether to normalize the image to [-1,1].
60
+ do_binarize (`bool`, *optional*, defaults to `False`):
61
+ Whether to binarize the image to 0/1.
62
+ do_convert_rgb (`bool`, *optional*, defaults to be `False`):
63
+ Whether to convert the images to RGB format.
64
+ do_convert_grayscale (`bool`, *optional*, defaults to be `False`):
65
+ Whether to convert the images to grayscale format.
66
+ """
67
+
68
+ config_name = CONFIG_NAME
69
+
70
+ @register_to_config
71
+ def __init__(
72
+ self,
73
+ do_resize: bool = True,
74
+ vae_scale_factor: int = 8,
75
+ resample: str = "lanczos",
76
+ do_normalize: bool = True,
77
+ do_binarize: bool = False,
78
+ do_convert_rgb: bool = False,
79
+ do_convert_grayscale: bool = False,
80
+ ):
81
+ super().__init__()
82
+ if do_convert_rgb and do_convert_grayscale:
83
+ raise ValueError(
84
+ "`do_convert_rgb` and `do_convert_grayscale` can not both be set to `True`,"
85
+ " if you intended to convert the image into RGB format, please set `do_convert_grayscale = False`.",
86
+ " if you intended to convert the image into grayscale format, please set `do_convert_rgb = False`",
87
+ )
88
+ self.config.do_convert_rgb = False
89
+
90
+ @staticmethod
91
+ def numpy_to_pil(images: np.ndarray) -> List[PIL.Image.Image]:
92
+ """
93
+ Convert a numpy image or a batch of images to a PIL image.
94
+ """
95
+ if images.ndim == 3:
96
+ images = images[None, ...]
97
+ images = (images * 255).round().astype("uint8")
98
+ if images.shape[-1] == 1:
99
+ # special case for grayscale (single channel) images
100
+ pil_images = [Image.fromarray(image.squeeze(), mode="L") for image in images]
101
+ else:
102
+ pil_images = [Image.fromarray(image) for image in images]
103
+
104
+ return pil_images
105
+
106
+ @staticmethod
107
+ def pil_to_numpy(images: Union[List[PIL.Image.Image], PIL.Image.Image]) -> np.ndarray:
108
+ """
109
+ Convert a PIL image or a list of PIL images to NumPy arrays.
110
+ """
111
+ if not isinstance(images, list):
112
+ images = [images]
113
+ images = [np.array(image).astype(np.float32) / 255.0 for image in images]
114
+ images = np.stack(images, axis=0)
115
+
116
+ return images
117
+
118
+ @staticmethod
119
+ def numpy_to_pt(images: np.ndarray) -> torch.FloatTensor:
120
+ """
121
+ Convert a NumPy image to a PyTorch tensor.
122
+ """
123
+ if images.ndim == 3:
124
+ images = images[..., None]
125
+
126
+ images = torch.from_numpy(images.transpose(0, 3, 1, 2))
127
+ return images
128
+
129
+ @staticmethod
130
+ def pt_to_numpy(images: torch.FloatTensor) -> np.ndarray:
131
+ """
132
+ Convert a PyTorch tensor to a NumPy image.
133
+ """
134
+ images = images.cpu().permute(0, 2, 3, 1).float().numpy()
135
+ return images
136
+
137
+ @staticmethod
138
+ def normalize(images: Union[np.ndarray, torch.Tensor]) -> Union[np.ndarray, torch.Tensor]:
139
+ """
140
+ Normalize an image array to [-1,1].
141
+ """
142
+ return 2.0 * images - 1.0
143
+
144
+ @staticmethod
145
+ def denormalize(images: Union[np.ndarray, torch.Tensor]) -> Union[np.ndarray, torch.Tensor]:
146
+ """
147
+ Denormalize an image array to [0,1].
148
+ """
149
+ return (images / 2 + 0.5).clamp(0, 1)
150
+
151
+ @staticmethod
152
+ def convert_to_rgb(image: PIL.Image.Image) -> PIL.Image.Image:
153
+ """
154
+ Converts a PIL image to RGB format.
155
+ """
156
+ image = image.convert("RGB")
157
+
158
+ return image
159
+
160
+ @staticmethod
161
+ def convert_to_grayscale(image: PIL.Image.Image) -> PIL.Image.Image:
162
+ """
163
+ Converts a PIL image to grayscale format.
164
+ """
165
+ image = image.convert("L")
166
+
167
+ return image
168
+
169
+ @staticmethod
170
+ def blur(image: PIL.Image.Image, blur_factor: int = 4) -> PIL.Image.Image:
171
+ """
172
+ Blurs an image.
173
+ """
174
+ image = image.filter(ImageFilter.GaussianBlur(blur_factor))
175
+
176
+ return image
177
+
178
+ @staticmethod
179
+ def get_crop_region(mask_image: PIL.Image.Image, width: int, height: int, pad=0):
180
+ """
181
+ Finds a rectangular region that contains all masked ares in an image, and expands region to match the aspect ratio of the original image;
182
+ for example, if user drew mask in a 128x32 region, and the dimensions for processing are 512x512, the region will be expanded to 128x128.
183
+
184
+ Args:
185
+ mask_image (PIL.Image.Image): Mask image.
186
+ width (int): Width of the image to be processed.
187
+ height (int): Height of the image to be processed.
188
+ pad (int, optional): Padding to be added to the crop region. Defaults to 0.
189
+
190
+ Returns:
191
+ tuple: (x1, y1, x2, y2) represent a rectangular region that contains all masked ares in an image and matches the original aspect ratio.
192
+ """
193
+
194
+ mask_image = mask_image.convert("L")
195
+ mask = np.array(mask_image)
196
+
197
+ # 1. find a rectangular region that contains all masked ares in an image
198
+ h, w = mask.shape
199
+ crop_left = 0
200
+ for i in range(w):
201
+ if not (mask[:, i] == 0).all():
202
+ break
203
+ crop_left += 1
204
+
205
+ crop_right = 0
206
+ for i in reversed(range(w)):
207
+ if not (mask[:, i] == 0).all():
208
+ break
209
+ crop_right += 1
210
+
211
+ crop_top = 0
212
+ for i in range(h):
213
+ if not (mask[i] == 0).all():
214
+ break
215
+ crop_top += 1
216
+
217
+ crop_bottom = 0
218
+ for i in reversed(range(h)):
219
+ if not (mask[i] == 0).all():
220
+ break
221
+ crop_bottom += 1
222
+
223
+ # 2. add padding to the crop region
224
+ x1, y1, x2, y2 = (
225
+ int(max(crop_left - pad, 0)),
226
+ int(max(crop_top - pad, 0)),
227
+ int(min(w - crop_right + pad, w)),
228
+ int(min(h - crop_bottom + pad, h)),
229
+ )
230
+
231
+ # 3. expands crop region to match the aspect ratio of the image to be processed
232
+ ratio_crop_region = (x2 - x1) / (y2 - y1)
233
+ ratio_processing = width / height
234
+
235
+ if ratio_crop_region > ratio_processing:
236
+ desired_height = (x2 - x1) / ratio_processing
237
+ desired_height_diff = int(desired_height - (y2 - y1))
238
+ y1 -= desired_height_diff // 2
239
+ y2 += desired_height_diff - desired_height_diff // 2
240
+ if y2 >= mask_image.height:
241
+ diff = y2 - mask_image.height
242
+ y2 -= diff
243
+ y1 -= diff
244
+ if y1 < 0:
245
+ y2 -= y1
246
+ y1 -= y1
247
+ if y2 >= mask_image.height:
248
+ y2 = mask_image.height
249
+ else:
250
+ desired_width = (y2 - y1) * ratio_processing
251
+ desired_width_diff = int(desired_width - (x2 - x1))
252
+ x1 -= desired_width_diff // 2
253
+ x2 += desired_width_diff - desired_width_diff // 2
254
+ if x2 >= mask_image.width:
255
+ diff = x2 - mask_image.width
256
+ x2 -= diff
257
+ x1 -= diff
258
+ if x1 < 0:
259
+ x2 -= x1
260
+ x1 -= x1
261
+ if x2 >= mask_image.width:
262
+ x2 = mask_image.width
263
+
264
+ return x1, y1, x2, y2
265
+
266
+ def _resize_and_fill(
267
+ self,
268
+ image: PIL.Image.Image,
269
+ width: int,
270
+ height: int,
271
+ ) -> PIL.Image.Image:
272
+ """
273
+ Resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image within the dimensions, filling empty with data from image.
274
+
275
+ Args:
276
+ image: The image to resize.
277
+ width: The width to resize the image to.
278
+ height: The height to resize the image to.
279
+ """
280
+
281
+ ratio = width / height
282
+ src_ratio = image.width / image.height
283
+
284
+ src_w = width if ratio < src_ratio else image.width * height // image.height
285
+ src_h = height if ratio >= src_ratio else image.height * width // image.width
286
+
287
+ resized = image.resize((src_w, src_h), resample=PIL_INTERPOLATION["lanczos"])
288
+ res = Image.new("RGB", (width, height))
289
+ res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
290
+
291
+ if ratio < src_ratio:
292
+ fill_height = height // 2 - src_h // 2
293
+ if fill_height > 0:
294
+ res.paste(resized.resize((width, fill_height), box=(0, 0, width, 0)), box=(0, 0))
295
+ res.paste(
296
+ resized.resize((width, fill_height), box=(0, resized.height, width, resized.height)),
297
+ box=(0, fill_height + src_h),
298
+ )
299
+ elif ratio > src_ratio:
300
+ fill_width = width // 2 - src_w // 2
301
+ if fill_width > 0:
302
+ res.paste(resized.resize((fill_width, height), box=(0, 0, 0, height)), box=(0, 0))
303
+ res.paste(
304
+ resized.resize((fill_width, height), box=(resized.width, 0, resized.width, height)),
305
+ box=(fill_width + src_w, 0),
306
+ )
307
+
308
+ return res
309
+
310
+ def _resize_and_crop(
311
+ self,
312
+ image: PIL.Image.Image,
313
+ width: int,
314
+ height: int,
315
+ ) -> PIL.Image.Image:
316
+ """
317
+ Resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image within the dimensions, cropping the excess.
318
+
319
+ Args:
320
+ image: The image to resize.
321
+ width: The width to resize the image to.
322
+ height: The height to resize the image to.
323
+ """
324
+ ratio = width / height
325
+ src_ratio = image.width / image.height
326
+
327
+ src_w = width if ratio > src_ratio else image.width * height // image.height
328
+ src_h = height if ratio <= src_ratio else image.height * width // image.width
329
+
330
+ resized = image.resize((src_w, src_h), resample=PIL_INTERPOLATION["lanczos"])
331
+ res = Image.new("RGB", (width, height))
332
+ res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
333
+ return res
334
+
335
+ def resize(
336
+ self,
337
+ image: Union[PIL.Image.Image, np.ndarray, torch.Tensor],
338
+ height: int,
339
+ width: int,
340
+ resize_mode: str = "default", # "defalt", "fill", "crop"
341
+ ) -> Union[PIL.Image.Image, np.ndarray, torch.Tensor]:
342
+ """
343
+ Resize image.
344
+
345
+ Args:
346
+ image (`PIL.Image.Image`, `np.ndarray` or `torch.Tensor`):
347
+ The image input, can be a PIL image, numpy array or pytorch tensor.
348
+ height (`int`):
349
+ The height to resize to.
350
+ width (`int`):
351
+ The width to resize to.
352
+ resize_mode (`str`, *optional*, defaults to `default`):
353
+ The resize mode to use, can be one of `default` or `fill`. If `default`, will resize the image to fit
354
+ within the specified width and height, and it may not maintaining the original aspect ratio.
355
+ If `fill`, will resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image
356
+ within the dimensions, filling empty with data from image.
357
+ If `crop`, will resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image
358
+ within the dimensions, cropping the excess.
359
+ Note that resize_mode `fill` and `crop` are only supported for PIL image input.
360
+
361
+ Returns:
362
+ `PIL.Image.Image`, `np.ndarray` or `torch.Tensor`:
363
+ The resized image.
364
+ """
365
+ if resize_mode != "default" and not isinstance(image, PIL.Image.Image):
366
+ raise ValueError(f"Only PIL image input is supported for resize_mode {resize_mode}")
367
+ if isinstance(image, PIL.Image.Image):
368
+ if resize_mode == "default":
369
+ image = image.resize((width, height), resample=PIL_INTERPOLATION[self.config.resample])
370
+ elif resize_mode == "fill":
371
+ image = self._resize_and_fill(image, width, height)
372
+ elif resize_mode == "crop":
373
+ image = self._resize_and_crop(image, width, height)
374
+ else:
375
+ raise ValueError(f"resize_mode {resize_mode} is not supported")
376
+
377
+ elif isinstance(image, torch.Tensor):
378
+ image = torch.nn.functional.interpolate(
379
+ image,
380
+ size=(height, width),
381
+ )
382
+ elif isinstance(image, np.ndarray):
383
+ image = self.numpy_to_pt(image)
384
+ image = torch.nn.functional.interpolate(
385
+ image,
386
+ size=(height, width),
387
+ )
388
+ image = self.pt_to_numpy(image)
389
+ return image
390
+
391
+ def binarize(self, image: PIL.Image.Image) -> PIL.Image.Image:
392
+ """
393
+ Create a mask.
394
+
395
+ Args:
396
+ image (`PIL.Image.Image`):
397
+ The image input, should be a PIL image.
398
+
399
+ Returns:
400
+ `PIL.Image.Image`:
401
+ The binarized image. Values less than 0.5 are set to 0, values greater than 0.5 are set to 1.
402
+ """
403
+ image[image < 0.5] = 0
404
+ image[image >= 0.5] = 1
405
+ return image
406
+
407
+ def get_default_height_width(
408
+ self,
409
+ image: Union[PIL.Image.Image, np.ndarray, torch.Tensor],
410
+ height: Optional[int] = None,
411
+ width: Optional[int] = None,
412
+ ) -> Tuple[int, int]:
413
+ """
414
+ This function return the height and width that are downscaled to the next integer multiple of
415
+ `vae_scale_factor`.
416
+
417
+ Args:
418
+ image(`PIL.Image.Image`, `np.ndarray` or `torch.Tensor`):
419
+ The image input, can be a PIL image, numpy array or pytorch tensor. if it is a numpy array, should have
420
+ shape `[batch, height, width]` or `[batch, height, width, channel]` if it is a pytorch tensor, should
421
+ have shape `[batch, channel, height, width]`.
422
+ height (`int`, *optional*, defaults to `None`):
423
+ The height in preprocessed image. If `None`, will use the height of `image` input.
424
+ width (`int`, *optional*`, defaults to `None`):
425
+ The width in preprocessed. If `None`, will use the width of the `image` input.
426
+ """
427
+
428
+ if height is None:
429
+ if isinstance(image, PIL.Image.Image):
430
+ height = image.height
431
+ elif isinstance(image, torch.Tensor):
432
+ height = image.shape[2]
433
+ else:
434
+ height = image.shape[1]
435
+
436
+ if width is None:
437
+ if isinstance(image, PIL.Image.Image):
438
+ width = image.width
439
+ elif isinstance(image, torch.Tensor):
440
+ width = image.shape[3]
441
+ else:
442
+ width = image.shape[2]
443
+
444
+ width, height = (
445
+ x - x % self.config.vae_scale_factor for x in (width, height)
446
+ ) # resize to integer multiple of vae_scale_factor
447
+
448
+ return height, width
449
+
450
+ def preprocess(
451
+ self,
452
+ image: PipelineImageInput,
453
+ height: Optional[int] = None,
454
+ width: Optional[int] = None,
455
+ resize_mode: str = "default", # "defalt", "fill", "crop"
456
+ crops_coords: Optional[Tuple[int, int, int, int]] = None,
457
+ ) -> torch.Tensor:
458
+ """
459
+ Preprocess the image input.
460
+
461
+ Args:
462
+ image (`pipeline_image_input`):
463
+ The image input, accepted formats are PIL images, NumPy arrays, PyTorch tensors; Also accept list of supported formats.
464
+ height (`int`, *optional*, defaults to `None`):
465
+ The height in preprocessed image. If `None`, will use the `get_default_height_width()` to get default height.
466
+ width (`int`, *optional*`, defaults to `None`):
467
+ The width in preprocessed. If `None`, will use get_default_height_width()` to get the default width.
468
+ resize_mode (`str`, *optional*, defaults to `default`):
469
+ The resize mode, can be one of `default` or `fill`. If `default`, will resize the image to fit
470
+ within the specified width and height, and it may not maintaining the original aspect ratio.
471
+ If `fill`, will resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image
472
+ within the dimensions, filling empty with data from image.
473
+ If `crop`, will resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image
474
+ within the dimensions, cropping the excess.
475
+ Note that resize_mode `fill` and `crop` are only supported for PIL image input.
476
+ crops_coords (`List[Tuple[int, int, int, int]]`, *optional*, defaults to `None`):
477
+ The crop coordinates for each image in the batch. If `None`, will not crop the image.
478
+ """
479
+ supported_formats = (PIL.Image.Image, np.ndarray, torch.Tensor)
480
+
481
+ # Expand the missing dimension for 3-dimensional pytorch tensor or numpy array that represents grayscale image
482
+ if self.config.do_convert_grayscale and isinstance(image, (torch.Tensor, np.ndarray)) and image.ndim == 3:
483
+ if isinstance(image, torch.Tensor):
484
+ # if image is a pytorch tensor could have 2 possible shapes:
485
+ # 1. batch x height x width: we should insert the channel dimension at position 1
486
+ # 2. channnel x height x width: we should insert batch dimension at position 0,
487
+ # however, since both channel and batch dimension has same size 1, it is same to insert at position 1
488
+ # for simplicity, we insert a dimension of size 1 at position 1 for both cases
489
+ image = image.unsqueeze(1)
490
+ else:
491
+ # if it is a numpy array, it could have 2 possible shapes:
492
+ # 1. batch x height x width: insert channel dimension on last position
493
+ # 2. height x width x channel: insert batch dimension on first position
494
+ if image.shape[-1] == 1:
495
+ image = np.expand_dims(image, axis=0)
496
+ else:
497
+ image = np.expand_dims(image, axis=-1)
498
+
499
+ if isinstance(image, supported_formats):
500
+ image = [image]
501
+ elif not (isinstance(image, list) and all(isinstance(i, supported_formats) for i in image)):
502
+ raise ValueError(
503
+ f"Input is in incorrect format: {[type(i) for i in image]}. Currently, we only support {', '.join(supported_formats)}"
504
+ )
505
+
506
+ if isinstance(image[0], PIL.Image.Image):
507
+ if crops_coords is not None:
508
+ image = [i.crop(crops_coords) for i in image]
509
+ if self.config.do_resize:
510
+ height, width = self.get_default_height_width(image[0], height, width)
511
+ image = [self.resize(i, height, width, resize_mode=resize_mode) for i in image]
512
+ if self.config.do_convert_rgb:
513
+ image = [self.convert_to_rgb(i) for i in image]
514
+ elif self.config.do_convert_grayscale:
515
+ image = [self.convert_to_grayscale(i) for i in image]
516
+ image = self.pil_to_numpy(image) # to np
517
+ image = self.numpy_to_pt(image) # to pt
518
+
519
+ elif isinstance(image[0], np.ndarray):
520
+ image = np.concatenate(image, axis=0) if image[0].ndim == 4 else np.stack(image, axis=0)
521
+
522
+ image = self.numpy_to_pt(image)
523
+
524
+ height, width = self.get_default_height_width(image, height, width)
525
+ if self.config.do_resize:
526
+ image = self.resize(image, height, width)
527
+
528
+ elif isinstance(image[0], torch.Tensor):
529
+ image = torch.cat(image, axis=0) if image[0].ndim == 4 else torch.stack(image, axis=0)
530
+
531
+ if self.config.do_convert_grayscale and image.ndim == 3:
532
+ image = image.unsqueeze(1)
533
+
534
+ channel = image.shape[1]
535
+ # don't need any preprocess if the image is latents
536
+ if channel == 4:
537
+ return image
538
+
539
+ height, width = self.get_default_height_width(image, height, width)
540
+ if self.config.do_resize:
541
+ image = self.resize(image, height, width)
542
+
543
+ # expected range [0,1], normalize to [-1,1]
544
+ do_normalize = self.config.do_normalize
545
+ if do_normalize and image.min() < 0:
546
+ warnings.warn(
547
+ "Passing `image` as torch tensor with value range in [-1,1] is deprecated. The expected value range for image tensor is [0,1] "
548
+ f"when passing as pytorch tensor or numpy Array. You passed `image` with value range [{image.min()},{image.max()}]",
549
+ FutureWarning,
550
+ )
551
+ do_normalize = False
552
+
553
+ if do_normalize:
554
+ image = self.normalize(image)
555
+
556
+ if self.config.do_binarize:
557
+ image = self.binarize(image)
558
+
559
+ return image
560
+
561
+ def postprocess(
562
+ self,
563
+ image: torch.FloatTensor,
564
+ output_type: str = "pil",
565
+ do_denormalize: Optional[List[bool]] = None,
566
+ ) -> Union[PIL.Image.Image, np.ndarray, torch.FloatTensor]:
567
+ """
568
+ Postprocess the image output from tensor to `output_type`.
569
+
570
+ Args:
571
+ image (`torch.FloatTensor`):
572
+ The image input, should be a pytorch tensor with shape `B x C x H x W`.
573
+ output_type (`str`, *optional*, defaults to `pil`):
574
+ The output type of the image, can be one of `pil`, `np`, `pt`, `latent`.
575
+ do_denormalize (`List[bool]`, *optional*, defaults to `None`):
576
+ Whether to denormalize the image to [0,1]. If `None`, will use the value of `do_normalize` in the
577
+ `VaeImageProcessor` config.
578
+
579
+ Returns:
580
+ `PIL.Image.Image`, `np.ndarray` or `torch.FloatTensor`:
581
+ The postprocessed image.
582
+ """
583
+ if not isinstance(image, torch.Tensor):
584
+ raise ValueError(
585
+ f"Input for postprocessing is in incorrect format: {type(image)}. We only support pytorch tensor"
586
+ )
587
+ if output_type not in ["latent", "pt", "np", "pil"]:
588
+ deprecation_message = (
589
+ f"the output_type {output_type} is outdated and has been set to `np`. Please make sure to set it to one of these instead: "
590
+ "`pil`, `np`, `pt`, `latent`"
591
+ )
592
+ deprecate("Unsupported output_type", "1.0.0", deprecation_message, standard_warn=False)
593
+ output_type = "np"
594
+
595
+ if output_type == "latent":
596
+ return image
597
+
598
+ if do_denormalize is None:
599
+ do_denormalize = [self.config.do_normalize] * image.shape[0]
600
+
601
+ image = torch.stack(
602
+ [self.denormalize(image[i]) if do_denormalize[i] else image[i] for i in range(image.shape[0])]
603
+ )
604
+
605
+ if output_type == "pt":
606
+ return image
607
+
608
+ image = self.pt_to_numpy(image)
609
+
610
+ if output_type == "np":
611
+ return image
612
+
613
+ if output_type == "pil":
614
+ return self.numpy_to_pil(image)
615
+
616
+ def apply_overlay(
617
+ self,
618
+ mask: PIL.Image.Image,
619
+ init_image: PIL.Image.Image,
620
+ image: PIL.Image.Image,
621
+ crop_coords: Optional[Tuple[int, int, int, int]] = None,
622
+ ) -> PIL.Image.Image:
623
+ """
624
+ overlay the inpaint output to the original image
625
+ """
626
+
627
+ width, height = image.width, image.height
628
+
629
+ init_image = self.resize(init_image, width=width, height=height)
630
+ mask = self.resize(mask, width=width, height=height)
631
+
632
+ init_image_masked = PIL.Image.new("RGBa", (width, height))
633
+ init_image_masked.paste(init_image.convert("RGBA").convert("RGBa"), mask=ImageOps.invert(mask.convert("L")))
634
+ init_image_masked = init_image_masked.convert("RGBA")
635
+
636
+ if crop_coords is not None:
637
+ x, y, w, h = crop_coords
638
+ base_image = PIL.Image.new("RGBA", (width, height))
639
+ image = self.resize(image, height=h, width=w, resize_mode="crop")
640
+ base_image.paste(image, (x, y))
641
+ image = base_image.convert("RGB")
642
+
643
+ image = image.convert("RGBA")
644
+ image.alpha_composite(init_image_masked)
645
+ image = image.convert("RGB")
646
+
647
+ return image
648
+
649
+
650
+ class VaeImageProcessorLDM3D(VaeImageProcessor):
651
+ """
652
+ Image processor for VAE LDM3D.
653
+
654
+ Args:
655
+ do_resize (`bool`, *optional*, defaults to `True`):
656
+ Whether to downscale the image's (height, width) dimensions to multiples of `vae_scale_factor`.
657
+ vae_scale_factor (`int`, *optional*, defaults to `8`):
658
+ VAE scale factor. If `do_resize` is `True`, the image is automatically resized to multiples of this factor.
659
+ resample (`str`, *optional*, defaults to `lanczos`):
660
+ Resampling filter to use when resizing the image.
661
+ do_normalize (`bool`, *optional*, defaults to `True`):
662
+ Whether to normalize the image to [-1,1].
663
+ """
664
+
665
+ config_name = CONFIG_NAME
666
+
667
+ @register_to_config
668
+ def __init__(
669
+ self,
670
+ do_resize: bool = True,
671
+ vae_scale_factor: int = 8,
672
+ resample: str = "lanczos",
673
+ do_normalize: bool = True,
674
+ ):
675
+ super().__init__()
676
+
677
+ @staticmethod
678
+ def numpy_to_pil(images: np.ndarray) -> List[PIL.Image.Image]:
679
+ """
680
+ Convert a NumPy image or a batch of images to a PIL image.
681
+ """
682
+ if images.ndim == 3:
683
+ images = images[None, ...]
684
+ images = (images * 255).round().astype("uint8")
685
+ if images.shape[-1] == 1:
686
+ # special case for grayscale (single channel) images
687
+ pil_images = [Image.fromarray(image.squeeze(), mode="L") for image in images]
688
+ else:
689
+ pil_images = [Image.fromarray(image[:, :, :3]) for image in images]
690
+
691
+ return pil_images
692
+
693
+ @staticmethod
694
+ def depth_pil_to_numpy(images: Union[List[PIL.Image.Image], PIL.Image.Image]) -> np.ndarray:
695
+ """
696
+ Convert a PIL image or a list of PIL images to NumPy arrays.
697
+ """
698
+ if not isinstance(images, list):
699
+ images = [images]
700
+
701
+ images = [np.array(image).astype(np.float32) / (2**16 - 1) for image in images]
702
+ images = np.stack(images, axis=0)
703
+ return images
704
+
705
+ @staticmethod
706
+ def rgblike_to_depthmap(image: Union[np.ndarray, torch.Tensor]) -> Union[np.ndarray, torch.Tensor]:
707
+ """
708
+ Args:
709
+ image: RGB-like depth image
710
+
711
+ Returns: depth map
712
+
713
+ """
714
+ return image[:, :, 1] * 2**8 + image[:, :, 2]
715
+
716
+ def numpy_to_depth(self, images: np.ndarray) -> List[PIL.Image.Image]:
717
+ """
718
+ Convert a NumPy depth image or a batch of images to a PIL image.
719
+ """
720
+ if images.ndim == 3:
721
+ images = images[None, ...]
722
+ images_depth = images[:, :, :, 3:]
723
+ if images.shape[-1] == 6:
724
+ images_depth = (images_depth * 255).round().astype("uint8")
725
+ pil_images = [
726
+ Image.fromarray(self.rgblike_to_depthmap(image_depth), mode="I;16") for image_depth in images_depth
727
+ ]
728
+ elif images.shape[-1] == 4:
729
+ images_depth = (images_depth * 65535.0).astype(np.uint16)
730
+ pil_images = [Image.fromarray(image_depth, mode="I;16") for image_depth in images_depth]
731
+ else:
732
+ raise Exception("Not supported")
733
+
734
+ return pil_images
735
+
736
+ def postprocess(
737
+ self,
738
+ image: torch.FloatTensor,
739
+ output_type: str = "pil",
740
+ do_denormalize: Optional[List[bool]] = None,
741
+ ) -> Union[PIL.Image.Image, np.ndarray, torch.FloatTensor]:
742
+ """
743
+ Postprocess the image output from tensor to `output_type`.
744
+
745
+ Args:
746
+ image (`torch.FloatTensor`):
747
+ The image input, should be a pytorch tensor with shape `B x C x H x W`.
748
+ output_type (`str`, *optional*, defaults to `pil`):
749
+ The output type of the image, can be one of `pil`, `np`, `pt`, `latent`.
750
+ do_denormalize (`List[bool]`, *optional*, defaults to `None`):
751
+ Whether to denormalize the image to [0,1]. If `None`, will use the value of `do_normalize` in the
752
+ `VaeImageProcessor` config.
753
+
754
+ Returns:
755
+ `PIL.Image.Image`, `np.ndarray` or `torch.FloatTensor`:
756
+ The postprocessed image.
757
+ """
758
+ if not isinstance(image, torch.Tensor):
759
+ raise ValueError(
760
+ f"Input for postprocessing is in incorrect format: {type(image)}. We only support pytorch tensor"
761
+ )
762
+ if output_type not in ["latent", "pt", "np", "pil"]:
763
+ deprecation_message = (
764
+ f"the output_type {output_type} is outdated and has been set to `np`. Please make sure to set it to one of these instead: "
765
+ "`pil`, `np`, `pt`, `latent`"
766
+ )
767
+ deprecate("Unsupported output_type", "1.0.0", deprecation_message, standard_warn=False)
768
+ output_type = "np"
769
+
770
+ if do_denormalize is None:
771
+ do_denormalize = [self.config.do_normalize] * image.shape[0]
772
+
773
+ image = torch.stack(
774
+ [self.denormalize(image[i]) if do_denormalize[i] else image[i] for i in range(image.shape[0])]
775
+ )
776
+
777
+ image = self.pt_to_numpy(image)
778
+
779
+ if output_type == "np":
780
+ if image.shape[-1] == 6:
781
+ image_depth = np.stack([self.rgblike_to_depthmap(im[:, :, 3:]) for im in image], axis=0)
782
+ else:
783
+ image_depth = image[:, :, :, 3:]
784
+ return image[:, :, :, :3], image_depth
785
+
786
+ if output_type == "pil":
787
+ return self.numpy_to_pil(image), self.numpy_to_depth(image)
788
+ else:
789
+ raise Exception(f"This type {output_type} is not supported")
790
+
791
+ def preprocess(
792
+ self,
793
+ rgb: Union[torch.FloatTensor, PIL.Image.Image, np.ndarray],
794
+ depth: Union[torch.FloatTensor, PIL.Image.Image, np.ndarray],
795
+ height: Optional[int] = None,
796
+ width: Optional[int] = None,
797
+ target_res: Optional[int] = None,
798
+ ) -> torch.Tensor:
799
+ """
800
+ Preprocess the image input. Accepted formats are PIL images, NumPy arrays or PyTorch tensors.
801
+ """
802
+ supported_formats = (PIL.Image.Image, np.ndarray, torch.Tensor)
803
+
804
+ # Expand the missing dimension for 3-dimensional pytorch tensor or numpy array that represents grayscale image
805
+ if self.config.do_convert_grayscale and isinstance(rgb, (torch.Tensor, np.ndarray)) and rgb.ndim == 3:
806
+ raise Exception("This is not yet supported")
807
+
808
+ if isinstance(rgb, supported_formats):
809
+ rgb = [rgb]
810
+ depth = [depth]
811
+ elif not (isinstance(rgb, list) and all(isinstance(i, supported_formats) for i in rgb)):
812
+ raise ValueError(
813
+ f"Input is in incorrect format: {[type(i) for i in rgb]}. Currently, we only support {', '.join(supported_formats)}"
814
+ )
815
+
816
+ if isinstance(rgb[0], PIL.Image.Image):
817
+ if self.config.do_convert_rgb:
818
+ raise Exception("This is not yet supported")
819
+ # rgb = [self.convert_to_rgb(i) for i in rgb]
820
+ # depth = [self.convert_to_depth(i) for i in depth] #TODO define convert_to_depth
821
+ if self.config.do_resize or target_res:
822
+ height, width = self.get_default_height_width(rgb[0], height, width) if not target_res else target_res
823
+ rgb = [self.resize(i, height, width) for i in rgb]
824
+ depth = [self.resize(i, height, width) for i in depth]
825
+ rgb = self.pil_to_numpy(rgb) # to np
826
+ rgb = self.numpy_to_pt(rgb) # to pt
827
+
828
+ depth = self.depth_pil_to_numpy(depth) # to np
829
+ depth = self.numpy_to_pt(depth) # to pt
830
+
831
+ elif isinstance(rgb[0], np.ndarray):
832
+ rgb = np.concatenate(rgb, axis=0) if rgb[0].ndim == 4 else np.stack(rgb, axis=0)
833
+ rgb = self.numpy_to_pt(rgb)
834
+ height, width = self.get_default_height_width(rgb, height, width)
835
+ if self.config.do_resize:
836
+ rgb = self.resize(rgb, height, width)
837
+
838
+ depth = np.concatenate(depth, axis=0) if rgb[0].ndim == 4 else np.stack(depth, axis=0)
839
+ depth = self.numpy_to_pt(depth)
840
+ height, width = self.get_default_height_width(depth, height, width)
841
+ if self.config.do_resize:
842
+ depth = self.resize(depth, height, width)
843
+
844
+ elif isinstance(rgb[0], torch.Tensor):
845
+ raise Exception("This is not yet supported")
846
+ # rgb = torch.cat(rgb, axis=0) if rgb[0].ndim == 4 else torch.stack(rgb, axis=0)
847
+
848
+ # if self.config.do_convert_grayscale and rgb.ndim == 3:
849
+ # rgb = rgb.unsqueeze(1)
850
+
851
+ # channel = rgb.shape[1]
852
+
853
+ # height, width = self.get_default_height_width(rgb, height, width)
854
+ # if self.config.do_resize:
855
+ # rgb = self.resize(rgb, height, width)
856
+
857
+ # depth = torch.cat(depth, axis=0) if depth[0].ndim == 4 else torch.stack(depth, axis=0)
858
+
859
+ # if self.config.do_convert_grayscale and depth.ndim == 3:
860
+ # depth = depth.unsqueeze(1)
861
+
862
+ # channel = depth.shape[1]
863
+ # # don't need any preprocess if the image is latents
864
+ # if depth == 4:
865
+ # return rgb, depth
866
+
867
+ # height, width = self.get_default_height_width(depth, height, width)
868
+ # if self.config.do_resize:
869
+ # depth = self.resize(depth, height, width)
870
+ # expected range [0,1], normalize to [-1,1]
871
+ do_normalize = self.config.do_normalize
872
+ if rgb.min() < 0 and do_normalize:
873
+ warnings.warn(
874
+ "Passing `image` as torch tensor with value range in [-1,1] is deprecated. The expected value range for image tensor is [0,1] "
875
+ f"when passing as pytorch tensor or numpy Array. You passed `image` with value range [{rgb.min()},{rgb.max()}]",
876
+ FutureWarning,
877
+ )
878
+ do_normalize = False
879
+
880
+ if do_normalize:
881
+ rgb = self.normalize(rgb)
882
+ depth = self.normalize(depth)
883
+
884
+ if self.config.do_binarize:
885
+ rgb = self.binarize(rgb)
886
+ depth = self.binarize(depth)
887
+
888
+ return rgb, depth
src/diffusers/loaders/__init__.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TYPE_CHECKING
2
+
3
+ from ..utils import DIFFUSERS_SLOW_IMPORT, _LazyModule, deprecate
4
+ from ..utils.import_utils import is_torch_available, is_transformers_available
5
+
6
+
7
+ def text_encoder_lora_state_dict(text_encoder):
8
+ deprecate(
9
+ "text_encoder_load_state_dict in `models`",
10
+ "0.27.0",
11
+ "`text_encoder_lora_state_dict` is deprecated and will be removed in 0.27.0. Make sure to retrieve the weights using `get_peft_model`. See https://huggingface.co/docs/peft/v0.6.2/en/quicktour#peftmodel for more information.",
12
+ )
13
+ state_dict = {}
14
+
15
+ for name, module in text_encoder_attn_modules(text_encoder):
16
+ for k, v in module.q_proj.lora_linear_layer.state_dict().items():
17
+ state_dict[f"{name}.q_proj.lora_linear_layer.{k}"] = v
18
+
19
+ for k, v in module.k_proj.lora_linear_layer.state_dict().items():
20
+ state_dict[f"{name}.k_proj.lora_linear_layer.{k}"] = v
21
+
22
+ for k, v in module.v_proj.lora_linear_layer.state_dict().items():
23
+ state_dict[f"{name}.v_proj.lora_linear_layer.{k}"] = v
24
+
25
+ for k, v in module.out_proj.lora_linear_layer.state_dict().items():
26
+ state_dict[f"{name}.out_proj.lora_linear_layer.{k}"] = v
27
+
28
+ return state_dict
29
+
30
+
31
+ if is_transformers_available():
32
+
33
+ def text_encoder_attn_modules(text_encoder):
34
+ deprecate(
35
+ "text_encoder_attn_modules in `models`",
36
+ "0.27.0",
37
+ "`text_encoder_lora_state_dict` is deprecated and will be removed in 0.27.0. Make sure to retrieve the weights using `get_peft_model`. See https://huggingface.co/docs/peft/v0.6.2/en/quicktour#peftmodel for more information.",
38
+ )
39
+ from transformers import CLIPTextModel, CLIPTextModelWithProjection
40
+
41
+ attn_modules = []
42
+
43
+ if isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection)):
44
+ for i, layer in enumerate(text_encoder.text_model.encoder.layers):
45
+ name = f"text_model.encoder.layers.{i}.self_attn"
46
+ mod = layer.self_attn
47
+ attn_modules.append((name, mod))
48
+ else:
49
+ raise ValueError(f"do not know how to get attention modules for: {text_encoder.__class__.__name__}")
50
+
51
+ return attn_modules
52
+
53
+
54
+ _import_structure = {}
55
+
56
+ if is_torch_available():
57
+ _import_structure["single_file"] = ["FromOriginalControlnetMixin", "FromOriginalVAEMixin"]
58
+ _import_structure["unet"] = ["UNet2DConditionLoadersMixin"]
59
+ _import_structure["utils"] = ["AttnProcsLayers"]
60
+
61
+ if is_transformers_available():
62
+ _import_structure["single_file"].extend(["FromSingleFileMixin"])
63
+ _import_structure["lora"] = ["LoraLoaderMixin", "StableDiffusionXLLoraLoaderMixin"]
64
+ _import_structure["textual_inversion"] = ["TextualInversionLoaderMixin"]
65
+ _import_structure["ip_adapter"] = ["IPAdapterMixin"]
66
+
67
+
68
+ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
69
+ if is_torch_available():
70
+ from .single_file import FromOriginalControlnetMixin, FromOriginalVAEMixin
71
+ from .unet import UNet2DConditionLoadersMixin
72
+ from .utils import AttnProcsLayers
73
+
74
+ if is_transformers_available():
75
+ from .ip_adapter import IPAdapterMixin
76
+ from .lora import LoraLoaderMixin, StableDiffusionXLLoraLoaderMixin
77
+ from .single_file import FromSingleFileMixin
78
+ from .textual_inversion import TextualInversionLoaderMixin
79
+ else:
80
+ import sys
81
+
82
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
src/diffusers/loaders/ip_adapter.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import os
15
+ from typing import Dict, Union
16
+
17
+ import torch
18
+ from huggingface_hub.utils import validate_hf_hub_args
19
+ from safetensors import safe_open
20
+
21
+ from ..utils import (
22
+ _get_model_file,
23
+ is_transformers_available,
24
+ logging,
25
+ )
26
+
27
+
28
+ if is_transformers_available():
29
+ from transformers import (
30
+ CLIPImageProcessor,
31
+ CLIPVisionModelWithProjection,
32
+ )
33
+
34
+ from ..models.attention_processor import (
35
+ IPAdapterAttnProcessor,
36
+ IPAdapterAttnProcessor2_0,
37
+ )
38
+
39
+ logger = logging.get_logger(__name__)
40
+
41
+
42
+ class IPAdapterMixin:
43
+ """Mixin for handling IP Adapters."""
44
+
45
+ @validate_hf_hub_args
46
+ def load_ip_adapter(
47
+ self,
48
+ pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]],
49
+ subfolder: str,
50
+ weight_name: str,
51
+ **kwargs,
52
+ ):
53
+ """
54
+ Parameters:
55
+ pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
56
+ Can be either:
57
+
58
+ - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
59
+ the Hub.
60
+ - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
61
+ with [`ModelMixin.save_pretrained`].
62
+ - A [torch state
63
+ dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
64
+
65
+ cache_dir (`Union[str, os.PathLike]`, *optional*):
66
+ Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
67
+ is not used.
68
+ force_download (`bool`, *optional*, defaults to `False`):
69
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
70
+ cached versions if they exist.
71
+ resume_download (`bool`, *optional*, defaults to `False`):
72
+ Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
73
+ incompletely downloaded files are deleted.
74
+ proxies (`Dict[str, str]`, *optional*):
75
+ A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
76
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
77
+ local_files_only (`bool`, *optional*, defaults to `False`):
78
+ Whether to only load local model weights and configuration files or not. If set to `True`, the model
79
+ won't be downloaded from the Hub.
80
+ token (`str` or *bool*, *optional*):
81
+ The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
82
+ `diffusers-cli login` (stored in `~/.huggingface`) is used.
83
+ revision (`str`, *optional*, defaults to `"main"`):
84
+ The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
85
+ allowed by Git.
86
+ subfolder (`str`, *optional*, defaults to `""`):
87
+ The subfolder location of a model file within a larger model repository on the Hub or locally.
88
+ """
89
+
90
+ # Load the main state dict first.
91
+ cache_dir = kwargs.pop("cache_dir", None)
92
+ force_download = kwargs.pop("force_download", False)
93
+ resume_download = kwargs.pop("resume_download", False)
94
+ proxies = kwargs.pop("proxies", None)
95
+ local_files_only = kwargs.pop("local_files_only", None)
96
+ token = kwargs.pop("token", None)
97
+ revision = kwargs.pop("revision", None)
98
+
99
+ user_agent = {
100
+ "file_type": "attn_procs_weights",
101
+ "framework": "pytorch",
102
+ }
103
+
104
+ if not isinstance(pretrained_model_name_or_path_or_dict, dict):
105
+ model_file = _get_model_file(
106
+ pretrained_model_name_or_path_or_dict,
107
+ weights_name=weight_name,
108
+ cache_dir=cache_dir,
109
+ force_download=force_download,
110
+ resume_download=resume_download,
111
+ proxies=proxies,
112
+ local_files_only=local_files_only,
113
+ token=token,
114
+ revision=revision,
115
+ subfolder=subfolder,
116
+ user_agent=user_agent,
117
+ )
118
+ if weight_name.endswith(".safetensors"):
119
+ state_dict = {"image_proj": {}, "ip_adapter": {}}
120
+ with safe_open(model_file, framework="pt", device="cpu") as f:
121
+ for key in f.keys():
122
+ if key.startswith("image_proj."):
123
+ state_dict["image_proj"][key.replace("image_proj.", "")] = f.get_tensor(key)
124
+ elif key.startswith("ip_adapter."):
125
+ state_dict["ip_adapter"][key.replace("ip_adapter.", "")] = f.get_tensor(key)
126
+ else:
127
+ state_dict = torch.load(model_file, map_location="cpu")
128
+ else:
129
+ state_dict = pretrained_model_name_or_path_or_dict
130
+
131
+ keys = list(state_dict.keys())
132
+ if keys != ["image_proj", "ip_adapter"]:
133
+ raise ValueError("Required keys are (`image_proj` and `ip_adapter`) missing from the state dict.")
134
+
135
+ # load CLIP image encoer here if it has not been registered to the pipeline yet
136
+ if hasattr(self, "image_encoder") and getattr(self, "image_encoder", None) is None:
137
+ if not isinstance(pretrained_model_name_or_path_or_dict, dict):
138
+ logger.info(f"loading image_encoder from {pretrained_model_name_or_path_or_dict}")
139
+ image_encoder = CLIPVisionModelWithProjection.from_pretrained(
140
+ pretrained_model_name_or_path_or_dict,
141
+ subfolder=os.path.join(subfolder, "image_encoder"),
142
+ ).to(self.device, dtype=self.dtype)
143
+ self.image_encoder = image_encoder
144
+ else:
145
+ raise ValueError("`image_encoder` cannot be None when using IP Adapters.")
146
+
147
+ # create feature extractor if it has not been registered to the pipeline yet
148
+ if hasattr(self, "feature_extractor") and getattr(self, "feature_extractor", None) is None:
149
+ self.feature_extractor = CLIPImageProcessor()
150
+
151
+ # load ip-adapter into unet
152
+ unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
153
+ unet._load_ip_adapter_weights(state_dict)
154
+
155
+ def set_ip_adapter_scale(self, scale):
156
+ unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
157
+ for attn_processor in unet.attn_processors.values():
158
+ if isinstance(attn_processor, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0)):
159
+ attn_processor.scale = scale
src/diffusers/loaders/lora.py ADDED
@@ -0,0 +1,1553 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import inspect
15
+ import os
16
+ from contextlib import nullcontext
17
+ from typing import Callable, Dict, List, Optional, Union
18
+
19
+ import safetensors
20
+ import torch
21
+ from huggingface_hub import model_info
22
+ from huggingface_hub.constants import HF_HUB_OFFLINE
23
+ from huggingface_hub.utils import validate_hf_hub_args
24
+ from packaging import version
25
+ from torch import nn
26
+
27
+ from .. import __version__
28
+ from ..models.modeling_utils import _LOW_CPU_MEM_USAGE_DEFAULT, load_model_dict_into_meta
29
+ from ..utils import (
30
+ USE_PEFT_BACKEND,
31
+ _get_model_file,
32
+ convert_state_dict_to_diffusers,
33
+ convert_state_dict_to_peft,
34
+ convert_unet_state_dict_to_peft,
35
+ delete_adapter_layers,
36
+ deprecate,
37
+ get_adapter_name,
38
+ get_peft_kwargs,
39
+ is_accelerate_available,
40
+ is_transformers_available,
41
+ logging,
42
+ recurse_remove_peft_layers,
43
+ scale_lora_layers,
44
+ set_adapter_layers,
45
+ set_weights_and_activate_adapters,
46
+ )
47
+ from .lora_conversion_utils import _convert_kohya_lora_to_diffusers, _maybe_map_sgm_blocks_to_diffusers
48
+
49
+
50
+ if is_transformers_available():
51
+ from transformers import PreTrainedModel
52
+
53
+ from ..models.lora import PatchedLoraProjection, text_encoder_attn_modules, text_encoder_mlp_modules
54
+
55
+ if is_accelerate_available():
56
+ from accelerate import init_empty_weights
57
+ from accelerate.hooks import AlignDevicesHook, CpuOffload, remove_hook_from_module
58
+
59
+ logger = logging.get_logger(__name__)
60
+
61
+ TEXT_ENCODER_NAME = "text_encoder"
62
+ UNET_NAME = "unet"
63
+ TRANSFORMER_NAME = "transformer"
64
+
65
+ LORA_WEIGHT_NAME = "pytorch_lora_weights.bin"
66
+ LORA_WEIGHT_NAME_SAFE = "pytorch_lora_weights.safetensors"
67
+
68
+ LORA_DEPRECATION_MESSAGE = "You are using an old version of LoRA backend. This will be deprecated in the next releases in favor of PEFT make sure to install the latest PEFT and transformers packages in the future."
69
+
70
+
71
+ class LoraLoaderMixin:
72
+ r"""
73
+ Load LoRA layers into [`UNet2DConditionModel`] and
74
+ [`CLIPTextModel`](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel).
75
+ """
76
+
77
+ text_encoder_name = TEXT_ENCODER_NAME
78
+ unet_name = UNET_NAME
79
+ transformer_name = TRANSFORMER_NAME
80
+ num_fused_loras = 0
81
+
82
+ def load_lora_weights(
83
+ self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], adapter_name=None, **kwargs
84
+ ):
85
+ """
86
+ Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.unet` and
87
+ `self.text_encoder`.
88
+
89
+ All kwargs are forwarded to `self.lora_state_dict`.
90
+
91
+ See [`~loaders.LoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded.
92
+
93
+ See [`~loaders.LoraLoaderMixin.load_lora_into_unet`] for more details on how the state dict is loaded into
94
+ `self.unet`.
95
+
96
+ See [`~loaders.LoraLoaderMixin.load_lora_into_text_encoder`] for more details on how the state dict is loaded
97
+ into `self.text_encoder`.
98
+
99
+ Parameters:
100
+ pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
101
+ See [`~loaders.LoraLoaderMixin.lora_state_dict`].
102
+ kwargs (`dict`, *optional*):
103
+ See [`~loaders.LoraLoaderMixin.lora_state_dict`].
104
+ adapter_name (`str`, *optional*):
105
+ Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
106
+ `default_{i}` where i is the total number of adapters being loaded.
107
+ """
108
+ # First, ensure that the checkpoint is a compatible one and can be successfully loaded.
109
+ state_dict, network_alphas = self.lora_state_dict(pretrained_model_name_or_path_or_dict, **kwargs)
110
+
111
+ is_correct_format = all("lora" in key for key in state_dict.keys())
112
+ if not is_correct_format:
113
+ raise ValueError("Invalid LoRA checkpoint.")
114
+
115
+ low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT)
116
+
117
+ self.load_lora_into_unet(
118
+ state_dict,
119
+ network_alphas=network_alphas,
120
+ unet=getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet,
121
+ low_cpu_mem_usage=low_cpu_mem_usage,
122
+ adapter_name=adapter_name,
123
+ _pipeline=self,
124
+ )
125
+ self.load_lora_into_text_encoder(
126
+ state_dict,
127
+ network_alphas=network_alphas,
128
+ text_encoder=getattr(self, self.text_encoder_name)
129
+ if not hasattr(self, "text_encoder")
130
+ else self.text_encoder,
131
+ lora_scale=self.lora_scale,
132
+ low_cpu_mem_usage=low_cpu_mem_usage,
133
+ adapter_name=adapter_name,
134
+ _pipeline=self,
135
+ )
136
+
137
+ @classmethod
138
+ @validate_hf_hub_args
139
+ def lora_state_dict(
140
+ cls,
141
+ pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]],
142
+ **kwargs,
143
+ ):
144
+ r"""
145
+ Return state dict for lora weights and the network alphas.
146
+
147
+ <Tip warning={true}>
148
+
149
+ We support loading A1111 formatted LoRA checkpoints in a limited capacity.
150
+
151
+ This function is experimental and might change in the future.
152
+
153
+ </Tip>
154
+
155
+ Parameters:
156
+ pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
157
+ Can be either:
158
+
159
+ - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
160
+ the Hub.
161
+ - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
162
+ with [`ModelMixin.save_pretrained`].
163
+ - A [torch state
164
+ dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
165
+
166
+ cache_dir (`Union[str, os.PathLike]`, *optional*):
167
+ Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
168
+ is not used.
169
+ force_download (`bool`, *optional*, defaults to `False`):
170
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
171
+ cached versions if they exist.
172
+ resume_download (`bool`, *optional*, defaults to `False`):
173
+ Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
174
+ incompletely downloaded files are deleted.
175
+ proxies (`Dict[str, str]`, *optional*):
176
+ A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
177
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
178
+ local_files_only (`bool`, *optional*, defaults to `False`):
179
+ Whether to only load local model weights and configuration files or not. If set to `True`, the model
180
+ won't be downloaded from the Hub.
181
+ token (`str` or *bool*, *optional*):
182
+ The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
183
+ `diffusers-cli login` (stored in `~/.huggingface`) is used.
184
+ revision (`str`, *optional*, defaults to `"main"`):
185
+ The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
186
+ allowed by Git.
187
+ subfolder (`str`, *optional*, defaults to `""`):
188
+ The subfolder location of a model file within a larger model repository on the Hub or locally.
189
+ low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
190
+ Speed up model loading only loading the pretrained weights and not initializing the weights. This also
191
+ tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
192
+ Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this
193
+ argument to `True` will raise an error.
194
+ mirror (`str`, *optional*):
195
+ Mirror source to resolve accessibility issues if you're downloading a model in China. We do not
196
+ guarantee the timeliness or safety of the source, and you should refer to the mirror site for more
197
+ information.
198
+
199
+ """
200
+ # Load the main state dict first which has the LoRA layers for either of
201
+ # UNet and text encoder or both.
202
+ cache_dir = kwargs.pop("cache_dir", None)
203
+ force_download = kwargs.pop("force_download", False)
204
+ resume_download = kwargs.pop("resume_download", False)
205
+ proxies = kwargs.pop("proxies", None)
206
+ local_files_only = kwargs.pop("local_files_only", None)
207
+ token = kwargs.pop("token", None)
208
+ revision = kwargs.pop("revision", None)
209
+ subfolder = kwargs.pop("subfolder", None)
210
+ weight_name = kwargs.pop("weight_name", None)
211
+ unet_config = kwargs.pop("unet_config", None)
212
+ use_safetensors = kwargs.pop("use_safetensors", None)
213
+
214
+ allow_pickle = False
215
+ if use_safetensors is None:
216
+ use_safetensors = True
217
+ allow_pickle = True
218
+
219
+ user_agent = {
220
+ "file_type": "attn_procs_weights",
221
+ "framework": "pytorch",
222
+ }
223
+
224
+ model_file = None
225
+ if not isinstance(pretrained_model_name_or_path_or_dict, dict):
226
+ # Let's first try to load .safetensors weights
227
+ if (use_safetensors and weight_name is None) or (
228
+ weight_name is not None and weight_name.endswith(".safetensors")
229
+ ):
230
+ try:
231
+ # Here we're relaxing the loading check to enable more Inference API
232
+ # friendliness where sometimes, it's not at all possible to automatically
233
+ # determine `weight_name`.
234
+ if weight_name is None:
235
+ weight_name = cls._best_guess_weight_name(
236
+ pretrained_model_name_or_path_or_dict,
237
+ file_extension=".safetensors",
238
+ local_files_only=local_files_only,
239
+ )
240
+ model_file = _get_model_file(
241
+ pretrained_model_name_or_path_or_dict,
242
+ weights_name=weight_name or LORA_WEIGHT_NAME_SAFE,
243
+ cache_dir=cache_dir,
244
+ force_download=force_download,
245
+ resume_download=resume_download,
246
+ proxies=proxies,
247
+ local_files_only=local_files_only,
248
+ token=token,
249
+ revision=revision,
250
+ subfolder=subfolder,
251
+ user_agent=user_agent,
252
+ )
253
+ state_dict = safetensors.torch.load_file(model_file, device="cpu")
254
+ except (IOError, safetensors.SafetensorError) as e:
255
+ if not allow_pickle:
256
+ raise e
257
+ # try loading non-safetensors weights
258
+ model_file = None
259
+ pass
260
+
261
+ if model_file is None:
262
+ if weight_name is None:
263
+ weight_name = cls._best_guess_weight_name(
264
+ pretrained_model_name_or_path_or_dict, file_extension=".bin", local_files_only=local_files_only
265
+ )
266
+ model_file = _get_model_file(
267
+ pretrained_model_name_or_path_or_dict,
268
+ weights_name=weight_name or LORA_WEIGHT_NAME,
269
+ cache_dir=cache_dir,
270
+ force_download=force_download,
271
+ resume_download=resume_download,
272
+ proxies=proxies,
273
+ local_files_only=local_files_only,
274
+ token=token,
275
+ revision=revision,
276
+ subfolder=subfolder,
277
+ user_agent=user_agent,
278
+ )
279
+ state_dict = torch.load(model_file, map_location="cpu")
280
+ else:
281
+ state_dict = pretrained_model_name_or_path_or_dict
282
+
283
+ network_alphas = None
284
+ # TODO: replace it with a method from `state_dict_utils`
285
+ if all(
286
+ (
287
+ k.startswith("lora_te_")
288
+ or k.startswith("lora_unet_")
289
+ or k.startswith("lora_te1_")
290
+ or k.startswith("lora_te2_")
291
+ )
292
+ for k in state_dict.keys()
293
+ ):
294
+ # Map SDXL blocks correctly.
295
+ if unet_config is not None:
296
+ # use unet config to remap block numbers
297
+ state_dict = _maybe_map_sgm_blocks_to_diffusers(state_dict, unet_config)
298
+ state_dict, network_alphas = _convert_kohya_lora_to_diffusers(state_dict)
299
+
300
+ return state_dict, network_alphas
301
+
302
+ @classmethod
303
+ def _best_guess_weight_name(
304
+ cls, pretrained_model_name_or_path_or_dict, file_extension=".safetensors", local_files_only=False
305
+ ):
306
+ if local_files_only or HF_HUB_OFFLINE:
307
+ raise ValueError("When using the offline mode, you must specify a `weight_name`.")
308
+
309
+ targeted_files = []
310
+
311
+ if os.path.isfile(pretrained_model_name_or_path_or_dict):
312
+ return
313
+ elif os.path.isdir(pretrained_model_name_or_path_or_dict):
314
+ targeted_files = [
315
+ f for f in os.listdir(pretrained_model_name_or_path_or_dict) if f.endswith(file_extension)
316
+ ]
317
+ else:
318
+ files_in_repo = model_info(pretrained_model_name_or_path_or_dict).siblings
319
+ targeted_files = [f.rfilename for f in files_in_repo if f.rfilename.endswith(file_extension)]
320
+ if len(targeted_files) == 0:
321
+ return
322
+
323
+ # "scheduler" does not correspond to a LoRA checkpoint.
324
+ # "optimizer" does not correspond to a LoRA checkpoint
325
+ # only top-level checkpoints are considered and not the other ones, hence "checkpoint".
326
+ unallowed_substrings = {"scheduler", "optimizer", "checkpoint"}
327
+ targeted_files = list(
328
+ filter(lambda x: all(substring not in x for substring in unallowed_substrings), targeted_files)
329
+ )
330
+
331
+ if any(f.endswith(LORA_WEIGHT_NAME) for f in targeted_files):
332
+ targeted_files = list(filter(lambda x: x.endswith(LORA_WEIGHT_NAME), targeted_files))
333
+ elif any(f.endswith(LORA_WEIGHT_NAME_SAFE) for f in targeted_files):
334
+ targeted_files = list(filter(lambda x: x.endswith(LORA_WEIGHT_NAME_SAFE), targeted_files))
335
+
336
+ if len(targeted_files) > 1:
337
+ raise ValueError(
338
+ f"Provided path contains more than one weights file in the {file_extension} format. Either specify `weight_name` in `load_lora_weights` or make sure there's only one `.safetensors` or `.bin` file in {pretrained_model_name_or_path_or_dict}."
339
+ )
340
+ weight_name = targeted_files[0]
341
+ return weight_name
342
+
343
+ @classmethod
344
+ def _optionally_disable_offloading(cls, _pipeline):
345
+ """
346
+ Optionally removes offloading in case the pipeline has been already sequentially offloaded to CPU.
347
+
348
+ Args:
349
+ _pipeline (`DiffusionPipeline`):
350
+ The pipeline to disable offloading for.
351
+
352
+ Returns:
353
+ tuple:
354
+ A tuple indicating if `is_model_cpu_offload` or `is_sequential_cpu_offload` is True.
355
+ """
356
+ is_model_cpu_offload = False
357
+ is_sequential_cpu_offload = False
358
+
359
+ if _pipeline is not None:
360
+ for _, component in _pipeline.components.items():
361
+ if isinstance(component, nn.Module) and hasattr(component, "_hf_hook"):
362
+ if not is_model_cpu_offload:
363
+ is_model_cpu_offload = isinstance(component._hf_hook, CpuOffload)
364
+ if not is_sequential_cpu_offload:
365
+ is_sequential_cpu_offload = isinstance(component._hf_hook, AlignDevicesHook)
366
+
367
+ logger.info(
368
+ "Accelerate hooks detected. Since you have called `load_lora_weights()`, the previous hooks will be first removed. Then the LoRA parameters will be loaded and the hooks will be applied again."
369
+ )
370
+ remove_hook_from_module(component, recurse=is_sequential_cpu_offload)
371
+
372
+ return (is_model_cpu_offload, is_sequential_cpu_offload)
373
+
374
+ @classmethod
375
+ def load_lora_into_unet(
376
+ cls, state_dict, network_alphas, unet, low_cpu_mem_usage=None, adapter_name=None, _pipeline=None
377
+ ):
378
+ """
379
+ This will load the LoRA layers specified in `state_dict` into `unet`.
380
+
381
+ Parameters:
382
+ state_dict (`dict`):
383
+ A standard state dict containing the lora layer parameters. The keys can either be indexed directly
384
+ into the unet or prefixed with an additional `unet` which can be used to distinguish between text
385
+ encoder lora layers.
386
+ network_alphas (`Dict[str, float]`):
387
+ See `LoRALinearLayer` for more details.
388
+ unet (`UNet2DConditionModel`):
389
+ The UNet model to load the LoRA layers into.
390
+ low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
391
+ Speed up model loading only loading the pretrained weights and not initializing the weights. This also
392
+ tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
393
+ Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this
394
+ argument to `True` will raise an error.
395
+ adapter_name (`str`, *optional*):
396
+ Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
397
+ `default_{i}` where i is the total number of adapters being loaded.
398
+ """
399
+ low_cpu_mem_usage = low_cpu_mem_usage if low_cpu_mem_usage is not None else _LOW_CPU_MEM_USAGE_DEFAULT
400
+ # If the serialization format is new (introduced in https://github.com/huggingface/diffusers/pull/2918),
401
+ # then the `state_dict` keys should have `cls.unet_name` and/or `cls.text_encoder_name` as
402
+ # their prefixes.
403
+ keys = list(state_dict.keys())
404
+
405
+ if all(key.startswith("unet.unet") for key in keys):
406
+ deprecation_message = "Keys starting with 'unet.unet' are deprecated."
407
+ deprecate("unet.unet keys", "0.27", deprecation_message)
408
+
409
+ if all(key.startswith(cls.unet_name) or key.startswith(cls.text_encoder_name) for key in keys):
410
+ # Load the layers corresponding to UNet.
411
+ logger.info(f"Loading {cls.unet_name}.")
412
+
413
+ unet_keys = [k for k in keys if k.startswith(cls.unet_name)]
414
+ state_dict = {k.replace(f"{cls.unet_name}.", ""): v for k, v in state_dict.items() if k in unet_keys}
415
+
416
+ if network_alphas is not None:
417
+ alpha_keys = [k for k in network_alphas.keys() if k.startswith(cls.unet_name)]
418
+ network_alphas = {
419
+ k.replace(f"{cls.unet_name}.", ""): v for k, v in network_alphas.items() if k in alpha_keys
420
+ }
421
+
422
+ else:
423
+ # Otherwise, we're dealing with the old format. This means the `state_dict` should only
424
+ # contain the module names of the `unet` as its keys WITHOUT any prefix.
425
+ if not USE_PEFT_BACKEND:
426
+ warn_message = "You have saved the LoRA weights using the old format. To convert the old LoRA weights to the new format, you can first load them in a dictionary and then create a new dictionary like the following: `new_state_dict = {f'unet.{module_name}': params for module_name, params in old_state_dict.items()}`."
427
+ logger.warn(warn_message)
428
+
429
+ if USE_PEFT_BACKEND and len(state_dict.keys()) > 0:
430
+ from peft import LoraConfig, inject_adapter_in_model, set_peft_model_state_dict
431
+
432
+ if adapter_name in getattr(unet, "peft_config", {}):
433
+ raise ValueError(
434
+ f"Adapter name {adapter_name} already in use in the Unet - please select a new adapter name."
435
+ )
436
+
437
+ state_dict = convert_unet_state_dict_to_peft(state_dict)
438
+
439
+ if network_alphas is not None:
440
+ # The alphas state dict have the same structure as Unet, thus we convert it to peft format using
441
+ # `convert_unet_state_dict_to_peft` method.
442
+ network_alphas = convert_unet_state_dict_to_peft(network_alphas)
443
+
444
+ rank = {}
445
+ for key, val in state_dict.items():
446
+ if "lora_B" in key:
447
+ rank[key] = val.shape[1]
448
+
449
+ lora_config_kwargs = get_peft_kwargs(rank, network_alphas, state_dict, is_unet=True)
450
+ lora_config = LoraConfig(**lora_config_kwargs)
451
+
452
+ # adapter_name
453
+ if adapter_name is None:
454
+ adapter_name = get_adapter_name(unet)
455
+
456
+ # In case the pipeline has been already offloaded to CPU - temporarily remove the hooks
457
+ # otherwise loading LoRA weights will lead to an error
458
+ is_model_cpu_offload, is_sequential_cpu_offload = cls._optionally_disable_offloading(_pipeline)
459
+
460
+ inject_adapter_in_model(lora_config, unet, adapter_name=adapter_name)
461
+ incompatible_keys = set_peft_model_state_dict(unet, state_dict, adapter_name)
462
+
463
+ if incompatible_keys is not None:
464
+ # check only for unexpected keys
465
+ unexpected_keys = getattr(incompatible_keys, "unexpected_keys", None)
466
+ if unexpected_keys:
467
+ logger.warning(
468
+ f"Loading adapter weights from state_dict led to unexpected keys not found in the model: "
469
+ f" {unexpected_keys}. "
470
+ )
471
+
472
+ # Offload back.
473
+ if is_model_cpu_offload:
474
+ _pipeline.enable_model_cpu_offload()
475
+ elif is_sequential_cpu_offload:
476
+ _pipeline.enable_sequential_cpu_offload()
477
+ # Unsafe code />
478
+
479
+ unet.load_attn_procs(
480
+ state_dict, network_alphas=network_alphas, low_cpu_mem_usage=low_cpu_mem_usage, _pipeline=_pipeline
481
+ )
482
+
483
+ @classmethod
484
+ def load_lora_into_text_encoder(
485
+ cls,
486
+ state_dict,
487
+ network_alphas,
488
+ text_encoder,
489
+ prefix=None,
490
+ lora_scale=1.0,
491
+ low_cpu_mem_usage=None,
492
+ adapter_name=None,
493
+ _pipeline=None,
494
+ ):
495
+ """
496
+ This will load the LoRA layers specified in `state_dict` into `text_encoder`
497
+
498
+ Parameters:
499
+ state_dict (`dict`):
500
+ A standard state dict containing the lora layer parameters. The key should be prefixed with an
501
+ additional `text_encoder` to distinguish between unet lora layers.
502
+ network_alphas (`Dict[str, float]`):
503
+ See `LoRALinearLayer` for more details.
504
+ text_encoder (`CLIPTextModel`):
505
+ The text encoder model to load the LoRA layers into.
506
+ prefix (`str`):
507
+ Expected prefix of the `text_encoder` in the `state_dict`.
508
+ lora_scale (`float`):
509
+ How much to scale the output of the lora linear layer before it is added with the output of the regular
510
+ lora layer.
511
+ low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
512
+ Speed up model loading only loading the pretrained weights and not initializing the weights. This also
513
+ tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
514
+ Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this
515
+ argument to `True` will raise an error.
516
+ adapter_name (`str`, *optional*):
517
+ Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
518
+ `default_{i}` where i is the total number of adapters being loaded.
519
+ """
520
+ low_cpu_mem_usage = low_cpu_mem_usage if low_cpu_mem_usage is not None else _LOW_CPU_MEM_USAGE_DEFAULT
521
+
522
+ # If the serialization format is new (introduced in https://github.com/huggingface/diffusers/pull/2918),
523
+ # then the `state_dict` keys should have `self.unet_name` and/or `self.text_encoder_name` as
524
+ # their prefixes.
525
+ keys = list(state_dict.keys())
526
+ prefix = cls.text_encoder_name if prefix is None else prefix
527
+
528
+ # Safe prefix to check with.
529
+ if any(cls.text_encoder_name in key for key in keys):
530
+ # Load the layers corresponding to text encoder and make necessary adjustments.
531
+ text_encoder_keys = [k for k in keys if k.startswith(prefix) and k.split(".")[0] == prefix]
532
+ text_encoder_lora_state_dict = {
533
+ k.replace(f"{prefix}.", ""): v for k, v in state_dict.items() if k in text_encoder_keys
534
+ }
535
+
536
+ if len(text_encoder_lora_state_dict) > 0:
537
+ logger.info(f"Loading {prefix}.")
538
+ rank = {}
539
+ text_encoder_lora_state_dict = convert_state_dict_to_diffusers(text_encoder_lora_state_dict)
540
+
541
+ if USE_PEFT_BACKEND:
542
+ # convert state dict
543
+ text_encoder_lora_state_dict = convert_state_dict_to_peft(text_encoder_lora_state_dict)
544
+
545
+ for name, _ in text_encoder_attn_modules(text_encoder):
546
+ rank_key = f"{name}.out_proj.lora_B.weight"
547
+ rank[rank_key] = text_encoder_lora_state_dict[rank_key].shape[1]
548
+
549
+ patch_mlp = any(".mlp." in key for key in text_encoder_lora_state_dict.keys())
550
+ if patch_mlp:
551
+ for name, _ in text_encoder_mlp_modules(text_encoder):
552
+ rank_key_fc1 = f"{name}.fc1.lora_B.weight"
553
+ rank_key_fc2 = f"{name}.fc2.lora_B.weight"
554
+
555
+ rank[rank_key_fc1] = text_encoder_lora_state_dict[rank_key_fc1].shape[1]
556
+ rank[rank_key_fc2] = text_encoder_lora_state_dict[rank_key_fc2].shape[1]
557
+ else:
558
+ for name, _ in text_encoder_attn_modules(text_encoder):
559
+ rank_key = f"{name}.out_proj.lora_linear_layer.up.weight"
560
+ rank.update({rank_key: text_encoder_lora_state_dict[rank_key].shape[1]})
561
+
562
+ patch_mlp = any(".mlp." in key for key in text_encoder_lora_state_dict.keys())
563
+ if patch_mlp:
564
+ for name, _ in text_encoder_mlp_modules(text_encoder):
565
+ rank_key_fc1 = f"{name}.fc1.lora_linear_layer.up.weight"
566
+ rank_key_fc2 = f"{name}.fc2.lora_linear_layer.up.weight"
567
+ rank[rank_key_fc1] = text_encoder_lora_state_dict[rank_key_fc1].shape[1]
568
+ rank[rank_key_fc2] = text_encoder_lora_state_dict[rank_key_fc2].shape[1]
569
+
570
+ if network_alphas is not None:
571
+ alpha_keys = [
572
+ k for k in network_alphas.keys() if k.startswith(prefix) and k.split(".")[0] == prefix
573
+ ]
574
+ network_alphas = {
575
+ k.replace(f"{prefix}.", ""): v for k, v in network_alphas.items() if k in alpha_keys
576
+ }
577
+
578
+ if USE_PEFT_BACKEND:
579
+ from peft import LoraConfig
580
+
581
+ lora_config_kwargs = get_peft_kwargs(
582
+ rank, network_alphas, text_encoder_lora_state_dict, is_unet=False
583
+ )
584
+
585
+ lora_config = LoraConfig(**lora_config_kwargs)
586
+
587
+ # adapter_name
588
+ if adapter_name is None:
589
+ adapter_name = get_adapter_name(text_encoder)
590
+
591
+ is_model_cpu_offload, is_sequential_cpu_offload = cls._optionally_disable_offloading(_pipeline)
592
+
593
+ # inject LoRA layers and load the state dict
594
+ # in transformers we automatically check whether the adapter name is already in use or not
595
+ text_encoder.load_adapter(
596
+ adapter_name=adapter_name,
597
+ adapter_state_dict=text_encoder_lora_state_dict,
598
+ peft_config=lora_config,
599
+ )
600
+
601
+ # scale LoRA layers with `lora_scale`
602
+ scale_lora_layers(text_encoder, weight=lora_scale)
603
+ else:
604
+ cls._modify_text_encoder(
605
+ text_encoder,
606
+ lora_scale,
607
+ network_alphas,
608
+ rank=rank,
609
+ patch_mlp=patch_mlp,
610
+ low_cpu_mem_usage=low_cpu_mem_usage,
611
+ )
612
+
613
+ is_pipeline_offloaded = _pipeline is not None and any(
614
+ isinstance(c, torch.nn.Module) and hasattr(c, "_hf_hook")
615
+ for c in _pipeline.components.values()
616
+ )
617
+ if is_pipeline_offloaded and low_cpu_mem_usage:
618
+ low_cpu_mem_usage = True
619
+ logger.info(
620
+ f"Pipeline {_pipeline.__class__} is offloaded. Therefore low cpu mem usage loading is forced."
621
+ )
622
+
623
+ if low_cpu_mem_usage:
624
+ device = next(iter(text_encoder_lora_state_dict.values())).device
625
+ dtype = next(iter(text_encoder_lora_state_dict.values())).dtype
626
+ unexpected_keys = load_model_dict_into_meta(
627
+ text_encoder, text_encoder_lora_state_dict, device=device, dtype=dtype
628
+ )
629
+ else:
630
+ load_state_dict_results = text_encoder.load_state_dict(
631
+ text_encoder_lora_state_dict, strict=False
632
+ )
633
+ unexpected_keys = load_state_dict_results.unexpected_keys
634
+
635
+ if len(unexpected_keys) != 0:
636
+ raise ValueError(
637
+ f"failed to load text encoder state dict, unexpected keys: {load_state_dict_results.unexpected_keys}"
638
+ )
639
+
640
+ # <Unsafe code
641
+ # We can be sure that the following works as all we do is change the dtype and device of the text encoder
642
+ # Now we remove any existing hooks to
643
+ is_model_cpu_offload = False
644
+ is_sequential_cpu_offload = False
645
+ if _pipeline is not None:
646
+ for _, component in _pipeline.components.items():
647
+ if isinstance(component, torch.nn.Module):
648
+ if hasattr(component, "_hf_hook"):
649
+ is_model_cpu_offload = isinstance(getattr(component, "_hf_hook"), CpuOffload)
650
+ is_sequential_cpu_offload = isinstance(
651
+ getattr(component, "_hf_hook"), AlignDevicesHook
652
+ )
653
+ logger.info(
654
+ "Accelerate hooks detected. Since you have called `load_lora_weights()`, the previous hooks will be first removed. Then the LoRA parameters will be loaded and the hooks will be applied again."
655
+ )
656
+ remove_hook_from_module(component, recurse=is_sequential_cpu_offload)
657
+
658
+ text_encoder.to(device=text_encoder.device, dtype=text_encoder.dtype)
659
+
660
+ # Offload back.
661
+ if is_model_cpu_offload:
662
+ _pipeline.enable_model_cpu_offload()
663
+ elif is_sequential_cpu_offload:
664
+ _pipeline.enable_sequential_cpu_offload()
665
+ # Unsafe code />
666
+
667
+ @classmethod
668
+ def load_lora_into_transformer(
669
+ cls, state_dict, network_alphas, transformer, low_cpu_mem_usage=None, adapter_name=None, _pipeline=None
670
+ ):
671
+ """
672
+ This will load the LoRA layers specified in `state_dict` into `transformer`.
673
+
674
+ Parameters:
675
+ state_dict (`dict`):
676
+ A standard state dict containing the lora layer parameters. The keys can either be indexed directly
677
+ into the unet or prefixed with an additional `unet` which can be used to distinguish between text
678
+ encoder lora layers.
679
+ network_alphas (`Dict[str, float]`):
680
+ See `LoRALinearLayer` for more details.
681
+ unet (`UNet2DConditionModel`):
682
+ The UNet model to load the LoRA layers into.
683
+ low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
684
+ Speed up model loading only loading the pretrained weights and not initializing the weights. This also
685
+ tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
686
+ Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this
687
+ argument to `True` will raise an error.
688
+ adapter_name (`str`, *optional*):
689
+ Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
690
+ `default_{i}` where i is the total number of adapters being loaded.
691
+ """
692
+ low_cpu_mem_usage = low_cpu_mem_usage if low_cpu_mem_usage is not None else _LOW_CPU_MEM_USAGE_DEFAULT
693
+
694
+ keys = list(state_dict.keys())
695
+
696
+ transformer_keys = [k for k in keys if k.startswith(cls.transformer_name)]
697
+ state_dict = {
698
+ k.replace(f"{cls.transformer_name}.", ""): v for k, v in state_dict.items() if k in transformer_keys
699
+ }
700
+
701
+ if network_alphas is not None:
702
+ alpha_keys = [k for k in network_alphas.keys() if k.startswith(cls.transformer_name)]
703
+ network_alphas = {
704
+ k.replace(f"{cls.transformer_name}.", ""): v for k, v in network_alphas.items() if k in alpha_keys
705
+ }
706
+
707
+ if len(state_dict.keys()) > 0:
708
+ from peft import LoraConfig, inject_adapter_in_model, set_peft_model_state_dict
709
+
710
+ if adapter_name in getattr(transformer, "peft_config", {}):
711
+ raise ValueError(
712
+ f"Adapter name {adapter_name} already in use in the transformer - please select a new adapter name."
713
+ )
714
+
715
+ rank = {}
716
+ for key, val in state_dict.items():
717
+ if "lora_B" in key:
718
+ rank[key] = val.shape[1]
719
+
720
+ lora_config_kwargs = get_peft_kwargs(rank, network_alphas, state_dict)
721
+ lora_config = LoraConfig(**lora_config_kwargs)
722
+
723
+ # adapter_name
724
+ if adapter_name is None:
725
+ adapter_name = get_adapter_name(transformer)
726
+
727
+ # In case the pipeline has been already offloaded to CPU - temporarily remove the hooks
728
+ # otherwise loading LoRA weights will lead to an error
729
+ is_model_cpu_offload, is_sequential_cpu_offload = cls._optionally_disable_offloading(_pipeline)
730
+
731
+ inject_adapter_in_model(lora_config, transformer, adapter_name=adapter_name)
732
+ incompatible_keys = set_peft_model_state_dict(transformer, state_dict, adapter_name)
733
+
734
+ if incompatible_keys is not None:
735
+ # check only for unexpected keys
736
+ unexpected_keys = getattr(incompatible_keys, "unexpected_keys", None)
737
+ if unexpected_keys:
738
+ logger.warning(
739
+ f"Loading adapter weights from state_dict led to unexpected keys not found in the model: "
740
+ f" {unexpected_keys}. "
741
+ )
742
+
743
+ # Offload back.
744
+ if is_model_cpu_offload:
745
+ _pipeline.enable_model_cpu_offload()
746
+ elif is_sequential_cpu_offload:
747
+ _pipeline.enable_sequential_cpu_offload()
748
+ # Unsafe code />
749
+
750
+ @property
751
+ def lora_scale(self) -> float:
752
+ # property function that returns the lora scale which can be set at run time by the pipeline.
753
+ # if _lora_scale has not been set, return 1
754
+ return self._lora_scale if hasattr(self, "_lora_scale") else 1.0
755
+
756
+ def _remove_text_encoder_monkey_patch(self):
757
+ if USE_PEFT_BACKEND:
758
+ remove_method = recurse_remove_peft_layers
759
+ else:
760
+ remove_method = self._remove_text_encoder_monkey_patch_classmethod
761
+
762
+ if hasattr(self, "text_encoder"):
763
+ remove_method(self.text_encoder)
764
+
765
+ # In case text encoder have no Lora attached
766
+ if USE_PEFT_BACKEND and getattr(self.text_encoder, "peft_config", None) is not None:
767
+ del self.text_encoder.peft_config
768
+ self.text_encoder._hf_peft_config_loaded = None
769
+ if hasattr(self, "text_encoder_2"):
770
+ remove_method(self.text_encoder_2)
771
+ if USE_PEFT_BACKEND:
772
+ del self.text_encoder_2.peft_config
773
+ self.text_encoder_2._hf_peft_config_loaded = None
774
+
775
+ @classmethod
776
+ def _remove_text_encoder_monkey_patch_classmethod(cls, text_encoder):
777
+ deprecate("_remove_text_encoder_monkey_patch_classmethod", "0.27", LORA_DEPRECATION_MESSAGE)
778
+
779
+ for _, attn_module in text_encoder_attn_modules(text_encoder):
780
+ if isinstance(attn_module.q_proj, PatchedLoraProjection):
781
+ attn_module.q_proj.lora_linear_layer = None
782
+ attn_module.k_proj.lora_linear_layer = None
783
+ attn_module.v_proj.lora_linear_layer = None
784
+ attn_module.out_proj.lora_linear_layer = None
785
+
786
+ for _, mlp_module in text_encoder_mlp_modules(text_encoder):
787
+ if isinstance(mlp_module.fc1, PatchedLoraProjection):
788
+ mlp_module.fc1.lora_linear_layer = None
789
+ mlp_module.fc2.lora_linear_layer = None
790
+
791
+ @classmethod
792
+ def _modify_text_encoder(
793
+ cls,
794
+ text_encoder,
795
+ lora_scale=1,
796
+ network_alphas=None,
797
+ rank: Union[Dict[str, int], int] = 4,
798
+ dtype=None,
799
+ patch_mlp=False,
800
+ low_cpu_mem_usage=False,
801
+ ):
802
+ r"""
803
+ Monkey-patches the forward passes of attention modules of the text encoder.
804
+ """
805
+ deprecate("_modify_text_encoder", "0.27", LORA_DEPRECATION_MESSAGE)
806
+
807
+ def create_patched_linear_lora(model, network_alpha, rank, dtype, lora_parameters):
808
+ linear_layer = model.regular_linear_layer if isinstance(model, PatchedLoraProjection) else model
809
+ ctx = init_empty_weights if low_cpu_mem_usage else nullcontext
810
+ with ctx():
811
+ model = PatchedLoraProjection(linear_layer, lora_scale, network_alpha, rank, dtype=dtype)
812
+
813
+ lora_parameters.extend(model.lora_linear_layer.parameters())
814
+ return model
815
+
816
+ # First, remove any monkey-patch that might have been applied before
817
+ cls._remove_text_encoder_monkey_patch_classmethod(text_encoder)
818
+
819
+ lora_parameters = []
820
+ network_alphas = {} if network_alphas is None else network_alphas
821
+ is_network_alphas_populated = len(network_alphas) > 0
822
+
823
+ for name, attn_module in text_encoder_attn_modules(text_encoder):
824
+ query_alpha = network_alphas.pop(name + ".to_q_lora.down.weight.alpha", None)
825
+ key_alpha = network_alphas.pop(name + ".to_k_lora.down.weight.alpha", None)
826
+ value_alpha = network_alphas.pop(name + ".to_v_lora.down.weight.alpha", None)
827
+ out_alpha = network_alphas.pop(name + ".to_out_lora.down.weight.alpha", None)
828
+
829
+ if isinstance(rank, dict):
830
+ current_rank = rank.pop(f"{name}.out_proj.lora_linear_layer.up.weight")
831
+ else:
832
+ current_rank = rank
833
+
834
+ attn_module.q_proj = create_patched_linear_lora(
835
+ attn_module.q_proj, query_alpha, current_rank, dtype, lora_parameters
836
+ )
837
+ attn_module.k_proj = create_patched_linear_lora(
838
+ attn_module.k_proj, key_alpha, current_rank, dtype, lora_parameters
839
+ )
840
+ attn_module.v_proj = create_patched_linear_lora(
841
+ attn_module.v_proj, value_alpha, current_rank, dtype, lora_parameters
842
+ )
843
+ attn_module.out_proj = create_patched_linear_lora(
844
+ attn_module.out_proj, out_alpha, current_rank, dtype, lora_parameters
845
+ )
846
+
847
+ if patch_mlp:
848
+ for name, mlp_module in text_encoder_mlp_modules(text_encoder):
849
+ fc1_alpha = network_alphas.pop(name + ".fc1.lora_linear_layer.down.weight.alpha", None)
850
+ fc2_alpha = network_alphas.pop(name + ".fc2.lora_linear_layer.down.weight.alpha", None)
851
+
852
+ current_rank_fc1 = rank.pop(f"{name}.fc1.lora_linear_layer.up.weight")
853
+ current_rank_fc2 = rank.pop(f"{name}.fc2.lora_linear_layer.up.weight")
854
+
855
+ mlp_module.fc1 = create_patched_linear_lora(
856
+ mlp_module.fc1, fc1_alpha, current_rank_fc1, dtype, lora_parameters
857
+ )
858
+ mlp_module.fc2 = create_patched_linear_lora(
859
+ mlp_module.fc2, fc2_alpha, current_rank_fc2, dtype, lora_parameters
860
+ )
861
+
862
+ if is_network_alphas_populated and len(network_alphas) > 0:
863
+ raise ValueError(
864
+ f"The `network_alphas` has to be empty at this point but has the following keys \n\n {', '.join(network_alphas.keys())}"
865
+ )
866
+
867
+ return lora_parameters
868
+
869
+ @classmethod
870
+ def save_lora_weights(
871
+ cls,
872
+ save_directory: Union[str, os.PathLike],
873
+ unet_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
874
+ text_encoder_lora_layers: Dict[str, torch.nn.Module] = None,
875
+ transformer_lora_layers: Dict[str, torch.nn.Module] = None,
876
+ is_main_process: bool = True,
877
+ weight_name: str = None,
878
+ save_function: Callable = None,
879
+ safe_serialization: bool = True,
880
+ ):
881
+ r"""
882
+ Save the LoRA parameters corresponding to the UNet and text encoder.
883
+
884
+ Arguments:
885
+ save_directory (`str` or `os.PathLike`):
886
+ Directory to save LoRA parameters to. Will be created if it doesn't exist.
887
+ unet_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
888
+ State dict of the LoRA layers corresponding to the `unet`.
889
+ text_encoder_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
890
+ State dict of the LoRA layers corresponding to the `text_encoder`. Must explicitly pass the text
891
+ encoder LoRA state dict because it comes from 🤗 Transformers.
892
+ is_main_process (`bool`, *optional*, defaults to `True`):
893
+ Whether the process calling this is the main process or not. Useful during distributed training and you
894
+ need to call this function on all processes. In this case, set `is_main_process=True` only on the main
895
+ process to avoid race conditions.
896
+ save_function (`Callable`):
897
+ The function to use to save the state dictionary. Useful during distributed training when you need to
898
+ replace `torch.save` with another method. Can be configured with the environment variable
899
+ `DIFFUSERS_SAVE_MODE`.
900
+ safe_serialization (`bool`, *optional*, defaults to `True`):
901
+ Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
902
+ """
903
+ state_dict = {}
904
+
905
+ def pack_weights(layers, prefix):
906
+ layers_weights = layers.state_dict() if isinstance(layers, torch.nn.Module) else layers
907
+ layers_state_dict = {f"{prefix}.{module_name}": param for module_name, param in layers_weights.items()}
908
+ return layers_state_dict
909
+
910
+ if not (unet_lora_layers or text_encoder_lora_layers or transformer_lora_layers):
911
+ raise ValueError(
912
+ "You must pass at least one of `unet_lora_layers`, `text_encoder_lora_layers`, or `transformer_lora_layers`."
913
+ )
914
+
915
+ if unet_lora_layers:
916
+ state_dict.update(pack_weights(unet_lora_layers, cls.unet_name))
917
+
918
+ if text_encoder_lora_layers:
919
+ state_dict.update(pack_weights(text_encoder_lora_layers, cls.text_encoder_name))
920
+
921
+ if transformer_lora_layers:
922
+ state_dict.update(pack_weights(transformer_lora_layers, "transformer"))
923
+
924
+ # Save the model
925
+ cls.write_lora_layers(
926
+ state_dict=state_dict,
927
+ save_directory=save_directory,
928
+ is_main_process=is_main_process,
929
+ weight_name=weight_name,
930
+ save_function=save_function,
931
+ safe_serialization=safe_serialization,
932
+ )
933
+
934
+ @staticmethod
935
+ def write_lora_layers(
936
+ state_dict: Dict[str, torch.Tensor],
937
+ save_directory: str,
938
+ is_main_process: bool,
939
+ weight_name: str,
940
+ save_function: Callable,
941
+ safe_serialization: bool,
942
+ ):
943
+ if os.path.isfile(save_directory):
944
+ logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
945
+ return
946
+
947
+ if save_function is None:
948
+ if safe_serialization:
949
+
950
+ def save_function(weights, filename):
951
+ return safetensors.torch.save_file(weights, filename, metadata={"format": "pt"})
952
+
953
+ else:
954
+ save_function = torch.save
955
+
956
+ os.makedirs(save_directory, exist_ok=True)
957
+
958
+ if weight_name is None:
959
+ if safe_serialization:
960
+ weight_name = LORA_WEIGHT_NAME_SAFE
961
+ else:
962
+ weight_name = LORA_WEIGHT_NAME
963
+
964
+ save_function(state_dict, os.path.join(save_directory, weight_name))
965
+ logger.info(f"Model weights saved in {os.path.join(save_directory, weight_name)}")
966
+
967
+ def unload_lora_weights(self):
968
+ """
969
+ Unloads the LoRA parameters.
970
+
971
+ Examples:
972
+
973
+ ```python
974
+ >>> # Assuming `pipeline` is already loaded with the LoRA parameters.
975
+ >>> pipeline.unload_lora_weights()
976
+ >>> ...
977
+ ```
978
+ """
979
+ unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
980
+
981
+ if not USE_PEFT_BACKEND:
982
+ if version.parse(__version__) > version.parse("0.23"):
983
+ logger.warn(
984
+ "You are using `unload_lora_weights` to disable and unload lora weights. If you want to iteratively enable and disable adapter weights,"
985
+ "you can use `pipe.enable_lora()` or `pipe.disable_lora()`. After installing the latest version of PEFT."
986
+ )
987
+
988
+ for _, module in unet.named_modules():
989
+ if hasattr(module, "set_lora_layer"):
990
+ module.set_lora_layer(None)
991
+ else:
992
+ recurse_remove_peft_layers(unet)
993
+ if hasattr(unet, "peft_config"):
994
+ del unet.peft_config
995
+
996
+ # Safe to call the following regardless of LoRA.
997
+ self._remove_text_encoder_monkey_patch()
998
+
999
+ def fuse_lora(
1000
+ self,
1001
+ fuse_unet: bool = True,
1002
+ fuse_text_encoder: bool = True,
1003
+ lora_scale: float = 1.0,
1004
+ safe_fusing: bool = False,
1005
+ adapter_names: Optional[List[str]] = None,
1006
+ ):
1007
+ r"""
1008
+ Fuses the LoRA parameters into the original parameters of the corresponding blocks.
1009
+
1010
+ <Tip warning={true}>
1011
+
1012
+ This is an experimental API.
1013
+
1014
+ </Tip>
1015
+
1016
+ Args:
1017
+ fuse_unet (`bool`, defaults to `True`): Whether to fuse the UNet LoRA parameters.
1018
+ fuse_text_encoder (`bool`, defaults to `True`):
1019
+ Whether to fuse the text encoder LoRA parameters. If the text encoder wasn't monkey-patched with the
1020
+ LoRA parameters then it won't have any effect.
1021
+ lora_scale (`float`, defaults to 1.0):
1022
+ Controls how much to influence the outputs with the LoRA parameters.
1023
+ safe_fusing (`bool`, defaults to `False`):
1024
+ Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them.
1025
+ adapter_names (`List[str]`, *optional*):
1026
+ Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused.
1027
+
1028
+ Example:
1029
+
1030
+ ```py
1031
+ from diffusers import DiffusionPipeline
1032
+ import torch
1033
+
1034
+ pipeline = DiffusionPipeline.from_pretrained(
1035
+ "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
1036
+ ).to("cuda")
1037
+ pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel")
1038
+ pipeline.fuse_lora(lora_scale=0.7)
1039
+ ```
1040
+ """
1041
+ if fuse_unet or fuse_text_encoder:
1042
+ self.num_fused_loras += 1
1043
+ if self.num_fused_loras > 1:
1044
+ logger.warn(
1045
+ "The current API is supported for operating with a single LoRA file. You are trying to load and fuse more than one LoRA which is not well-supported.",
1046
+ )
1047
+
1048
+ if fuse_unet:
1049
+ unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
1050
+ unet.fuse_lora(lora_scale, safe_fusing=safe_fusing, adapter_names=adapter_names)
1051
+
1052
+ if USE_PEFT_BACKEND:
1053
+ from peft.tuners.tuners_utils import BaseTunerLayer
1054
+
1055
+ def fuse_text_encoder_lora(text_encoder, lora_scale=1.0, safe_fusing=False, adapter_names=None):
1056
+ merge_kwargs = {"safe_merge": safe_fusing}
1057
+
1058
+ for module in text_encoder.modules():
1059
+ if isinstance(module, BaseTunerLayer):
1060
+ if lora_scale != 1.0:
1061
+ module.scale_layer(lora_scale)
1062
+
1063
+ # For BC with previous PEFT versions, we need to check the signature
1064
+ # of the `merge` method to see if it supports the `adapter_names` argument.
1065
+ supported_merge_kwargs = list(inspect.signature(module.merge).parameters)
1066
+ if "adapter_names" in supported_merge_kwargs:
1067
+ merge_kwargs["adapter_names"] = adapter_names
1068
+ elif "adapter_names" not in supported_merge_kwargs and adapter_names is not None:
1069
+ raise ValueError(
1070
+ "The `adapter_names` argument is not supported with your PEFT version. "
1071
+ "Please upgrade to the latest version of PEFT. `pip install -U peft`"
1072
+ )
1073
+
1074
+ module.merge(**merge_kwargs)
1075
+
1076
+ else:
1077
+ deprecate("fuse_text_encoder_lora", "0.27", LORA_DEPRECATION_MESSAGE)
1078
+
1079
+ def fuse_text_encoder_lora(text_encoder, lora_scale=1.0, safe_fusing=False, **kwargs):
1080
+ if "adapter_names" in kwargs and kwargs["adapter_names"] is not None:
1081
+ raise ValueError(
1082
+ "The `adapter_names` argument is not supported in your environment. Please switch to PEFT "
1083
+ "backend to use this argument by installing latest PEFT and transformers."
1084
+ " `pip install -U peft transformers`"
1085
+ )
1086
+
1087
+ for _, attn_module in text_encoder_attn_modules(text_encoder):
1088
+ if isinstance(attn_module.q_proj, PatchedLoraProjection):
1089
+ attn_module.q_proj._fuse_lora(lora_scale, safe_fusing)
1090
+ attn_module.k_proj._fuse_lora(lora_scale, safe_fusing)
1091
+ attn_module.v_proj._fuse_lora(lora_scale, safe_fusing)
1092
+ attn_module.out_proj._fuse_lora(lora_scale, safe_fusing)
1093
+
1094
+ for _, mlp_module in text_encoder_mlp_modules(text_encoder):
1095
+ if isinstance(mlp_module.fc1, PatchedLoraProjection):
1096
+ mlp_module.fc1._fuse_lora(lora_scale, safe_fusing)
1097
+ mlp_module.fc2._fuse_lora(lora_scale, safe_fusing)
1098
+
1099
+ if fuse_text_encoder:
1100
+ if hasattr(self, "text_encoder"):
1101
+ fuse_text_encoder_lora(self.text_encoder, lora_scale, safe_fusing, adapter_names=adapter_names)
1102
+ if hasattr(self, "text_encoder_2"):
1103
+ fuse_text_encoder_lora(self.text_encoder_2, lora_scale, safe_fusing, adapter_names=adapter_names)
1104
+
1105
+ def unfuse_lora(self, unfuse_unet: bool = True, unfuse_text_encoder: bool = True):
1106
+ r"""
1107
+ Reverses the effect of
1108
+ [`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraLoaderMixin.fuse_lora).
1109
+
1110
+ <Tip warning={true}>
1111
+
1112
+ This is an experimental API.
1113
+
1114
+ </Tip>
1115
+
1116
+ Args:
1117
+ unfuse_unet (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters.
1118
+ unfuse_text_encoder (`bool`, defaults to `True`):
1119
+ Whether to unfuse the text encoder LoRA parameters. If the text encoder wasn't monkey-patched with the
1120
+ LoRA parameters then it won't have any effect.
1121
+ """
1122
+ unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
1123
+ if unfuse_unet:
1124
+ if not USE_PEFT_BACKEND:
1125
+ unet.unfuse_lora()
1126
+ else:
1127
+ from peft.tuners.tuners_utils import BaseTunerLayer
1128
+
1129
+ for module in unet.modules():
1130
+ if isinstance(module, BaseTunerLayer):
1131
+ module.unmerge()
1132
+
1133
+ if USE_PEFT_BACKEND:
1134
+ from peft.tuners.tuners_utils import BaseTunerLayer
1135
+
1136
+ def unfuse_text_encoder_lora(text_encoder):
1137
+ for module in text_encoder.modules():
1138
+ if isinstance(module, BaseTunerLayer):
1139
+ module.unmerge()
1140
+
1141
+ else:
1142
+ deprecate("unfuse_text_encoder_lora", "0.27", LORA_DEPRECATION_MESSAGE)
1143
+
1144
+ def unfuse_text_encoder_lora(text_encoder):
1145
+ for _, attn_module in text_encoder_attn_modules(text_encoder):
1146
+ if isinstance(attn_module.q_proj, PatchedLoraProjection):
1147
+ attn_module.q_proj._unfuse_lora()
1148
+ attn_module.k_proj._unfuse_lora()
1149
+ attn_module.v_proj._unfuse_lora()
1150
+ attn_module.out_proj._unfuse_lora()
1151
+
1152
+ for _, mlp_module in text_encoder_mlp_modules(text_encoder):
1153
+ if isinstance(mlp_module.fc1, PatchedLoraProjection):
1154
+ mlp_module.fc1._unfuse_lora()
1155
+ mlp_module.fc2._unfuse_lora()
1156
+
1157
+ if unfuse_text_encoder:
1158
+ if hasattr(self, "text_encoder"):
1159
+ unfuse_text_encoder_lora(self.text_encoder)
1160
+ if hasattr(self, "text_encoder_2"):
1161
+ unfuse_text_encoder_lora(self.text_encoder_2)
1162
+
1163
+ self.num_fused_loras -= 1
1164
+
1165
+ def set_adapters_for_text_encoder(
1166
+ self,
1167
+ adapter_names: Union[List[str], str],
1168
+ text_encoder: Optional["PreTrainedModel"] = None, # noqa: F821
1169
+ text_encoder_weights: List[float] = None,
1170
+ ):
1171
+ """
1172
+ Sets the adapter layers for the text encoder.
1173
+
1174
+ Args:
1175
+ adapter_names (`List[str]` or `str`):
1176
+ The names of the adapters to use.
1177
+ text_encoder (`torch.nn.Module`, *optional*):
1178
+ The text encoder module to set the adapter layers for. If `None`, it will try to get the `text_encoder`
1179
+ attribute.
1180
+ text_encoder_weights (`List[float]`, *optional*):
1181
+ The weights to use for the text encoder. If `None`, the weights are set to `1.0` for all the adapters.
1182
+ """
1183
+ if not USE_PEFT_BACKEND:
1184
+ raise ValueError("PEFT backend is required for this method.")
1185
+
1186
+ def process_weights(adapter_names, weights):
1187
+ if weights is None:
1188
+ weights = [1.0] * len(adapter_names)
1189
+ elif isinstance(weights, float):
1190
+ weights = [weights]
1191
+
1192
+ if len(adapter_names) != len(weights):
1193
+ raise ValueError(
1194
+ f"Length of adapter names {len(adapter_names)} is not equal to the length of the weights {len(weights)}"
1195
+ )
1196
+ return weights
1197
+
1198
+ adapter_names = [adapter_names] if isinstance(adapter_names, str) else adapter_names
1199
+ text_encoder_weights = process_weights(adapter_names, text_encoder_weights)
1200
+ text_encoder = text_encoder or getattr(self, "text_encoder", None)
1201
+ if text_encoder is None:
1202
+ raise ValueError(
1203
+ "The pipeline does not have a default `pipe.text_encoder` class. Please make sure to pass a `text_encoder` instead."
1204
+ )
1205
+ set_weights_and_activate_adapters(text_encoder, adapter_names, text_encoder_weights)
1206
+
1207
+ def disable_lora_for_text_encoder(self, text_encoder: Optional["PreTrainedModel"] = None):
1208
+ """
1209
+ Disables the LoRA layers for the text encoder.
1210
+
1211
+ Args:
1212
+ text_encoder (`torch.nn.Module`, *optional*):
1213
+ The text encoder module to disable the LoRA layers for. If `None`, it will try to get the
1214
+ `text_encoder` attribute.
1215
+ """
1216
+ if not USE_PEFT_BACKEND:
1217
+ raise ValueError("PEFT backend is required for this method.")
1218
+
1219
+ text_encoder = text_encoder or getattr(self, "text_encoder", None)
1220
+ if text_encoder is None:
1221
+ raise ValueError("Text Encoder not found.")
1222
+ set_adapter_layers(text_encoder, enabled=False)
1223
+
1224
+ def enable_lora_for_text_encoder(self, text_encoder: Optional["PreTrainedModel"] = None):
1225
+ """
1226
+ Enables the LoRA layers for the text encoder.
1227
+
1228
+ Args:
1229
+ text_encoder (`torch.nn.Module`, *optional*):
1230
+ The text encoder module to enable the LoRA layers for. If `None`, it will try to get the `text_encoder`
1231
+ attribute.
1232
+ """
1233
+ if not USE_PEFT_BACKEND:
1234
+ raise ValueError("PEFT backend is required for this method.")
1235
+ text_encoder = text_encoder or getattr(self, "text_encoder", None)
1236
+ if text_encoder is None:
1237
+ raise ValueError("Text Encoder not found.")
1238
+ set_adapter_layers(self.text_encoder, enabled=True)
1239
+
1240
+ def set_adapters(
1241
+ self,
1242
+ adapter_names: Union[List[str], str],
1243
+ adapter_weights: Optional[List[float]] = None,
1244
+ ):
1245
+ unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
1246
+ # Handle the UNET
1247
+ unet.set_adapters(adapter_names, adapter_weights)
1248
+
1249
+ # Handle the Text Encoder
1250
+ if hasattr(self, "text_encoder"):
1251
+ self.set_adapters_for_text_encoder(adapter_names, self.text_encoder, adapter_weights)
1252
+ if hasattr(self, "text_encoder_2"):
1253
+ self.set_adapters_for_text_encoder(adapter_names, self.text_encoder_2, adapter_weights)
1254
+
1255
+ def disable_lora(self):
1256
+ if not USE_PEFT_BACKEND:
1257
+ raise ValueError("PEFT backend is required for this method.")
1258
+
1259
+ # Disable unet adapters
1260
+ unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
1261
+ unet.disable_lora()
1262
+
1263
+ # Disable text encoder adapters
1264
+ if hasattr(self, "text_encoder"):
1265
+ self.disable_lora_for_text_encoder(self.text_encoder)
1266
+ if hasattr(self, "text_encoder_2"):
1267
+ self.disable_lora_for_text_encoder(self.text_encoder_2)
1268
+
1269
+ def enable_lora(self):
1270
+ if not USE_PEFT_BACKEND:
1271
+ raise ValueError("PEFT backend is required for this method.")
1272
+
1273
+ # Enable unet adapters
1274
+ unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
1275
+ unet.enable_lora()
1276
+
1277
+ # Enable text encoder adapters
1278
+ if hasattr(self, "text_encoder"):
1279
+ self.enable_lora_for_text_encoder(self.text_encoder)
1280
+ if hasattr(self, "text_encoder_2"):
1281
+ self.enable_lora_for_text_encoder(self.text_encoder_2)
1282
+
1283
+ def delete_adapters(self, adapter_names: Union[List[str], str]):
1284
+ """
1285
+ Args:
1286
+ Deletes the LoRA layers of `adapter_name` for the unet and text-encoder(s).
1287
+ adapter_names (`Union[List[str], str]`):
1288
+ The names of the adapter to delete. Can be a single string or a list of strings
1289
+ """
1290
+ if not USE_PEFT_BACKEND:
1291
+ raise ValueError("PEFT backend is required for this method.")
1292
+
1293
+ if isinstance(adapter_names, str):
1294
+ adapter_names = [adapter_names]
1295
+
1296
+ # Delete unet adapters
1297
+ unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
1298
+ unet.delete_adapters(adapter_names)
1299
+
1300
+ for adapter_name in adapter_names:
1301
+ # Delete text encoder adapters
1302
+ if hasattr(self, "text_encoder"):
1303
+ delete_adapter_layers(self.text_encoder, adapter_name)
1304
+ if hasattr(self, "text_encoder_2"):
1305
+ delete_adapter_layers(self.text_encoder_2, adapter_name)
1306
+
1307
+ def get_active_adapters(self) -> List[str]:
1308
+ """
1309
+ Gets the list of the current active adapters.
1310
+
1311
+ Example:
1312
+
1313
+ ```python
1314
+ from diffusers import DiffusionPipeline
1315
+
1316
+ pipeline = DiffusionPipeline.from_pretrained(
1317
+ "stabilityai/stable-diffusion-xl-base-1.0",
1318
+ ).to("cuda")
1319
+ pipeline.load_lora_weights("CiroN2022/toy-face", weight_name="toy_face_sdxl.safetensors", adapter_name="toy")
1320
+ pipeline.get_active_adapters()
1321
+ ```
1322
+ """
1323
+ if not USE_PEFT_BACKEND:
1324
+ raise ValueError(
1325
+ "PEFT backend is required for this method. Please install the latest version of PEFT `pip install -U peft`"
1326
+ )
1327
+
1328
+ from peft.tuners.tuners_utils import BaseTunerLayer
1329
+
1330
+ active_adapters = []
1331
+ unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
1332
+ for module in unet.modules():
1333
+ if isinstance(module, BaseTunerLayer):
1334
+ active_adapters = module.active_adapters
1335
+ break
1336
+
1337
+ return active_adapters
1338
+
1339
+ def get_list_adapters(self) -> Dict[str, List[str]]:
1340
+ """
1341
+ Gets the current list of all available adapters in the pipeline.
1342
+ """
1343
+ if not USE_PEFT_BACKEND:
1344
+ raise ValueError(
1345
+ "PEFT backend is required for this method. Please install the latest version of PEFT `pip install -U peft`"
1346
+ )
1347
+
1348
+ set_adapters = {}
1349
+
1350
+ if hasattr(self, "text_encoder") and hasattr(self.text_encoder, "peft_config"):
1351
+ set_adapters["text_encoder"] = list(self.text_encoder.peft_config.keys())
1352
+
1353
+ if hasattr(self, "text_encoder_2") and hasattr(self.text_encoder_2, "peft_config"):
1354
+ set_adapters["text_encoder_2"] = list(self.text_encoder_2.peft_config.keys())
1355
+
1356
+ unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
1357
+ if hasattr(self, self.unet_name) and hasattr(unet, "peft_config"):
1358
+ set_adapters[self.unet_name] = list(self.unet.peft_config.keys())
1359
+
1360
+ return set_adapters
1361
+
1362
+ def set_lora_device(self, adapter_names: List[str], device: Union[torch.device, str, int]) -> None:
1363
+ """
1364
+ Moves the LoRAs listed in `adapter_names` to a target device. Useful for offloading the LoRA to the CPU in case
1365
+ you want to load multiple adapters and free some GPU memory.
1366
+
1367
+ Args:
1368
+ adapter_names (`List[str]`):
1369
+ List of adapters to send device to.
1370
+ device (`Union[torch.device, str, int]`):
1371
+ Device to send the adapters to. Can be either a torch device, a str or an integer.
1372
+ """
1373
+ if not USE_PEFT_BACKEND:
1374
+ raise ValueError("PEFT backend is required for this method.")
1375
+
1376
+ from peft.tuners.tuners_utils import BaseTunerLayer
1377
+
1378
+ # Handle the UNET
1379
+ unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
1380
+ for unet_module in unet.modules():
1381
+ if isinstance(unet_module, BaseTunerLayer):
1382
+ for adapter_name in adapter_names:
1383
+ unet_module.lora_A[adapter_name].to(device)
1384
+ unet_module.lora_B[adapter_name].to(device)
1385
+
1386
+ # Handle the text encoder
1387
+ modules_to_process = []
1388
+ if hasattr(self, "text_encoder"):
1389
+ modules_to_process.append(self.text_encoder)
1390
+
1391
+ if hasattr(self, "text_encoder_2"):
1392
+ modules_to_process.append(self.text_encoder_2)
1393
+
1394
+ for text_encoder in modules_to_process:
1395
+ # loop over submodules
1396
+ for text_encoder_module in text_encoder.modules():
1397
+ if isinstance(text_encoder_module, BaseTunerLayer):
1398
+ for adapter_name in adapter_names:
1399
+ text_encoder_module.lora_A[adapter_name].to(device)
1400
+ text_encoder_module.lora_B[adapter_name].to(device)
1401
+
1402
+
1403
+ class StableDiffusionXLLoraLoaderMixin(LoraLoaderMixin):
1404
+ """This class overrides `LoraLoaderMixin` with LoRA loading/saving code that's specific to SDXL"""
1405
+
1406
+ # Overrride to properly handle the loading and unloading of the additional text encoder.
1407
+ def load_lora_weights(
1408
+ self,
1409
+ pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]],
1410
+ adapter_name: Optional[str] = None,
1411
+ **kwargs,
1412
+ ):
1413
+ """
1414
+ Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.unet` and
1415
+ `self.text_encoder`.
1416
+
1417
+ All kwargs are forwarded to `self.lora_state_dict`.
1418
+
1419
+ See [`~loaders.LoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded.
1420
+
1421
+ See [`~loaders.LoraLoaderMixin.load_lora_into_unet`] for more details on how the state dict is loaded into
1422
+ `self.unet`.
1423
+
1424
+ See [`~loaders.LoraLoaderMixin.load_lora_into_text_encoder`] for more details on how the state dict is loaded
1425
+ into `self.text_encoder`.
1426
+
1427
+ Parameters:
1428
+ pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
1429
+ See [`~loaders.LoraLoaderMixin.lora_state_dict`].
1430
+ adapter_name (`str`, *optional*):
1431
+ Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
1432
+ `default_{i}` where i is the total number of adapters being loaded.
1433
+ kwargs (`dict`, *optional*):
1434
+ See [`~loaders.LoraLoaderMixin.lora_state_dict`].
1435
+ """
1436
+ # We could have accessed the unet config from `lora_state_dict()` too. We pass
1437
+ # it here explicitly to be able to tell that it's coming from an SDXL
1438
+ # pipeline.
1439
+
1440
+ # First, ensure that the checkpoint is a compatible one and can be successfully loaded.
1441
+ state_dict, network_alphas = self.lora_state_dict(
1442
+ pretrained_model_name_or_path_or_dict,
1443
+ unet_config=self.unet.config,
1444
+ **kwargs,
1445
+ )
1446
+ is_correct_format = all("lora" in key for key in state_dict.keys())
1447
+ if not is_correct_format:
1448
+ raise ValueError("Invalid LoRA checkpoint.")
1449
+
1450
+ self.load_lora_into_unet(
1451
+ state_dict, network_alphas=network_alphas, unet=self.unet, adapter_name=adapter_name, _pipeline=self
1452
+ )
1453
+ text_encoder_state_dict = {k: v for k, v in state_dict.items() if "text_encoder." in k}
1454
+ if len(text_encoder_state_dict) > 0:
1455
+ self.load_lora_into_text_encoder(
1456
+ text_encoder_state_dict,
1457
+ network_alphas=network_alphas,
1458
+ text_encoder=self.text_encoder,
1459
+ prefix="text_encoder",
1460
+ lora_scale=self.lora_scale,
1461
+ adapter_name=adapter_name,
1462
+ _pipeline=self,
1463
+ )
1464
+
1465
+ text_encoder_2_state_dict = {k: v for k, v in state_dict.items() if "text_encoder_2." in k}
1466
+ if len(text_encoder_2_state_dict) > 0:
1467
+ self.load_lora_into_text_encoder(
1468
+ text_encoder_2_state_dict,
1469
+ network_alphas=network_alphas,
1470
+ text_encoder=self.text_encoder_2,
1471
+ prefix="text_encoder_2",
1472
+ lora_scale=self.lora_scale,
1473
+ adapter_name=adapter_name,
1474
+ _pipeline=self,
1475
+ )
1476
+
1477
+ @classmethod
1478
+ def save_lora_weights(
1479
+ cls,
1480
+ save_directory: Union[str, os.PathLike],
1481
+ unet_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
1482
+ text_encoder_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
1483
+ text_encoder_2_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
1484
+ is_main_process: bool = True,
1485
+ weight_name: str = None,
1486
+ save_function: Callable = None,
1487
+ safe_serialization: bool = True,
1488
+ ):
1489
+ r"""
1490
+ Save the LoRA parameters corresponding to the UNet and text encoder.
1491
+
1492
+ Arguments:
1493
+ save_directory (`str` or `os.PathLike`):
1494
+ Directory to save LoRA parameters to. Will be created if it doesn't exist.
1495
+ unet_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
1496
+ State dict of the LoRA layers corresponding to the `unet`.
1497
+ text_encoder_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
1498
+ State dict of the LoRA layers corresponding to the `text_encoder`. Must explicitly pass the text
1499
+ encoder LoRA state dict because it comes from 🤗 Transformers.
1500
+ is_main_process (`bool`, *optional*, defaults to `True`):
1501
+ Whether the process calling this is the main process or not. Useful during distributed training and you
1502
+ need to call this function on all processes. In this case, set `is_main_process=True` only on the main
1503
+ process to avoid race conditions.
1504
+ save_function (`Callable`):
1505
+ The function to use to save the state dictionary. Useful during distributed training when you need to
1506
+ replace `torch.save` with another method. Can be configured with the environment variable
1507
+ `DIFFUSERS_SAVE_MODE`.
1508
+ safe_serialization (`bool`, *optional*, defaults to `True`):
1509
+ Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
1510
+ """
1511
+ state_dict = {}
1512
+
1513
+ def pack_weights(layers, prefix):
1514
+ layers_weights = layers.state_dict() if isinstance(layers, torch.nn.Module) else layers
1515
+ layers_state_dict = {f"{prefix}.{module_name}": param for module_name, param in layers_weights.items()}
1516
+ return layers_state_dict
1517
+
1518
+ if not (unet_lora_layers or text_encoder_lora_layers or text_encoder_2_lora_layers):
1519
+ raise ValueError(
1520
+ "You must pass at least one of `unet_lora_layers`, `text_encoder_lora_layers` or `text_encoder_2_lora_layers`."
1521
+ )
1522
+
1523
+ if unet_lora_layers:
1524
+ state_dict.update(pack_weights(unet_lora_layers, "unet"))
1525
+
1526
+ if text_encoder_lora_layers and text_encoder_2_lora_layers:
1527
+ state_dict.update(pack_weights(text_encoder_lora_layers, "text_encoder"))
1528
+ state_dict.update(pack_weights(text_encoder_2_lora_layers, "text_encoder_2"))
1529
+
1530
+ cls.write_lora_layers(
1531
+ state_dict=state_dict,
1532
+ save_directory=save_directory,
1533
+ is_main_process=is_main_process,
1534
+ weight_name=weight_name,
1535
+ save_function=save_function,
1536
+ safe_serialization=safe_serialization,
1537
+ )
1538
+
1539
+ def _remove_text_encoder_monkey_patch(self):
1540
+ if USE_PEFT_BACKEND:
1541
+ recurse_remove_peft_layers(self.text_encoder)
1542
+ # TODO: @younesbelkada handle this in transformers side
1543
+ if getattr(self.text_encoder, "peft_config", None) is not None:
1544
+ del self.text_encoder.peft_config
1545
+ self.text_encoder._hf_peft_config_loaded = None
1546
+
1547
+ recurse_remove_peft_layers(self.text_encoder_2)
1548
+ if getattr(self.text_encoder_2, "peft_config", None) is not None:
1549
+ del self.text_encoder_2.peft_config
1550
+ self.text_encoder_2._hf_peft_config_loaded = None
1551
+ else:
1552
+ self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder)
1553
+ self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder_2)
src/diffusers/loaders/lora_conversion_utils.py ADDED
@@ -0,0 +1,284 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import re
16
+
17
+ from ..utils import logging
18
+
19
+
20
+ logger = logging.get_logger(__name__)
21
+
22
+
23
+ def _maybe_map_sgm_blocks_to_diffusers(state_dict, unet_config, delimiter="_", block_slice_pos=5):
24
+ # 1. get all state_dict_keys
25
+ all_keys = list(state_dict.keys())
26
+ sgm_patterns = ["input_blocks", "middle_block", "output_blocks"]
27
+
28
+ # 2. check if needs remapping, if not return original dict
29
+ is_in_sgm_format = False
30
+ for key in all_keys:
31
+ if any(p in key for p in sgm_patterns):
32
+ is_in_sgm_format = True
33
+ break
34
+
35
+ if not is_in_sgm_format:
36
+ return state_dict
37
+
38
+ # 3. Else remap from SGM patterns
39
+ new_state_dict = {}
40
+ inner_block_map = ["resnets", "attentions", "upsamplers"]
41
+
42
+ # Retrieves # of down, mid and up blocks
43
+ input_block_ids, middle_block_ids, output_block_ids = set(), set(), set()
44
+
45
+ for layer in all_keys:
46
+ if "text" in layer:
47
+ new_state_dict[layer] = state_dict.pop(layer)
48
+ else:
49
+ layer_id = int(layer.split(delimiter)[:block_slice_pos][-1])
50
+ if sgm_patterns[0] in layer:
51
+ input_block_ids.add(layer_id)
52
+ elif sgm_patterns[1] in layer:
53
+ middle_block_ids.add(layer_id)
54
+ elif sgm_patterns[2] in layer:
55
+ output_block_ids.add(layer_id)
56
+ else:
57
+ raise ValueError(f"Checkpoint not supported because layer {layer} not supported.")
58
+
59
+ input_blocks = {
60
+ layer_id: [key for key in state_dict if f"input_blocks{delimiter}{layer_id}" in key]
61
+ for layer_id in input_block_ids
62
+ }
63
+ middle_blocks = {
64
+ layer_id: [key for key in state_dict if f"middle_block{delimiter}{layer_id}" in key]
65
+ for layer_id in middle_block_ids
66
+ }
67
+ output_blocks = {
68
+ layer_id: [key for key in state_dict if f"output_blocks{delimiter}{layer_id}" in key]
69
+ for layer_id in output_block_ids
70
+ }
71
+
72
+ # Rename keys accordingly
73
+ for i in input_block_ids:
74
+ block_id = (i - 1) // (unet_config.layers_per_block + 1)
75
+ layer_in_block_id = (i - 1) % (unet_config.layers_per_block + 1)
76
+
77
+ for key in input_blocks[i]:
78
+ inner_block_id = int(key.split(delimiter)[block_slice_pos])
79
+ inner_block_key = inner_block_map[inner_block_id] if "op" not in key else "downsamplers"
80
+ inner_layers_in_block = str(layer_in_block_id) if "op" not in key else "0"
81
+ new_key = delimiter.join(
82
+ key.split(delimiter)[: block_slice_pos - 1]
83
+ + [str(block_id), inner_block_key, inner_layers_in_block]
84
+ + key.split(delimiter)[block_slice_pos + 1 :]
85
+ )
86
+ new_state_dict[new_key] = state_dict.pop(key)
87
+
88
+ for i in middle_block_ids:
89
+ key_part = None
90
+ if i == 0:
91
+ key_part = [inner_block_map[0], "0"]
92
+ elif i == 1:
93
+ key_part = [inner_block_map[1], "0"]
94
+ elif i == 2:
95
+ key_part = [inner_block_map[0], "1"]
96
+ else:
97
+ raise ValueError(f"Invalid middle block id {i}.")
98
+
99
+ for key in middle_blocks[i]:
100
+ new_key = delimiter.join(
101
+ key.split(delimiter)[: block_slice_pos - 1] + key_part + key.split(delimiter)[block_slice_pos:]
102
+ )
103
+ new_state_dict[new_key] = state_dict.pop(key)
104
+
105
+ for i in output_block_ids:
106
+ block_id = i // (unet_config.layers_per_block + 1)
107
+ layer_in_block_id = i % (unet_config.layers_per_block + 1)
108
+
109
+ for key in output_blocks[i]:
110
+ inner_block_id = int(key.split(delimiter)[block_slice_pos])
111
+ inner_block_key = inner_block_map[inner_block_id]
112
+ inner_layers_in_block = str(layer_in_block_id) if inner_block_id < 2 else "0"
113
+ new_key = delimiter.join(
114
+ key.split(delimiter)[: block_slice_pos - 1]
115
+ + [str(block_id), inner_block_key, inner_layers_in_block]
116
+ + key.split(delimiter)[block_slice_pos + 1 :]
117
+ )
118
+ new_state_dict[new_key] = state_dict.pop(key)
119
+
120
+ if len(state_dict) > 0:
121
+ raise ValueError("At this point all state dict entries have to be converted.")
122
+
123
+ return new_state_dict
124
+
125
+
126
+ def _convert_kohya_lora_to_diffusers(state_dict, unet_name="unet", text_encoder_name="text_encoder"):
127
+ unet_state_dict = {}
128
+ te_state_dict = {}
129
+ te2_state_dict = {}
130
+ network_alphas = {}
131
+
132
+ # every down weight has a corresponding up weight and potentially an alpha weight
133
+ lora_keys = [k for k in state_dict.keys() if k.endswith("lora_down.weight")]
134
+ for key in lora_keys:
135
+ lora_name = key.split(".")[0]
136
+ lora_name_up = lora_name + ".lora_up.weight"
137
+ lora_name_alpha = lora_name + ".alpha"
138
+
139
+ if lora_name.startswith("lora_unet_"):
140
+ diffusers_name = key.replace("lora_unet_", "").replace("_", ".")
141
+
142
+ if "input.blocks" in diffusers_name:
143
+ diffusers_name = diffusers_name.replace("input.blocks", "down_blocks")
144
+ else:
145
+ diffusers_name = diffusers_name.replace("down.blocks", "down_blocks")
146
+
147
+ if "middle.block" in diffusers_name:
148
+ diffusers_name = diffusers_name.replace("middle.block", "mid_block")
149
+ else:
150
+ diffusers_name = diffusers_name.replace("mid.block", "mid_block")
151
+ if "output.blocks" in diffusers_name:
152
+ diffusers_name = diffusers_name.replace("output.blocks", "up_blocks")
153
+ else:
154
+ diffusers_name = diffusers_name.replace("up.blocks", "up_blocks")
155
+
156
+ diffusers_name = diffusers_name.replace("transformer.blocks", "transformer_blocks")
157
+ diffusers_name = diffusers_name.replace("to.q.lora", "to_q_lora")
158
+ diffusers_name = diffusers_name.replace("to.k.lora", "to_k_lora")
159
+ diffusers_name = diffusers_name.replace("to.v.lora", "to_v_lora")
160
+ diffusers_name = diffusers_name.replace("to.out.0.lora", "to_out_lora")
161
+ diffusers_name = diffusers_name.replace("proj.in", "proj_in")
162
+ diffusers_name = diffusers_name.replace("proj.out", "proj_out")
163
+ diffusers_name = diffusers_name.replace("emb.layers", "time_emb_proj")
164
+
165
+ # SDXL specificity.
166
+ if "emb" in diffusers_name and "time.emb.proj" not in diffusers_name:
167
+ pattern = r"\.\d+(?=\D*$)"
168
+ diffusers_name = re.sub(pattern, "", diffusers_name, count=1)
169
+ if ".in." in diffusers_name:
170
+ diffusers_name = diffusers_name.replace("in.layers.2", "conv1")
171
+ if ".out." in diffusers_name:
172
+ diffusers_name = diffusers_name.replace("out.layers.3", "conv2")
173
+ if "downsamplers" in diffusers_name or "upsamplers" in diffusers_name:
174
+ diffusers_name = diffusers_name.replace("op", "conv")
175
+ if "skip" in diffusers_name:
176
+ diffusers_name = diffusers_name.replace("skip.connection", "conv_shortcut")
177
+
178
+ # LyCORIS specificity.
179
+ if "time.emb.proj" in diffusers_name:
180
+ diffusers_name = diffusers_name.replace("time.emb.proj", "time_emb_proj")
181
+ if "conv.shortcut" in diffusers_name:
182
+ diffusers_name = diffusers_name.replace("conv.shortcut", "conv_shortcut")
183
+
184
+ # General coverage.
185
+ if "transformer_blocks" in diffusers_name:
186
+ if "attn1" in diffusers_name or "attn2" in diffusers_name:
187
+ diffusers_name = diffusers_name.replace("attn1", "attn1.processor")
188
+ diffusers_name = diffusers_name.replace("attn2", "attn2.processor")
189
+ unet_state_dict[diffusers_name] = state_dict.pop(key)
190
+ unet_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
191
+ elif "ff" in diffusers_name:
192
+ unet_state_dict[diffusers_name] = state_dict.pop(key)
193
+ unet_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
194
+ elif any(key in diffusers_name for key in ("proj_in", "proj_out")):
195
+ unet_state_dict[diffusers_name] = state_dict.pop(key)
196
+ unet_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
197
+ else:
198
+ unet_state_dict[diffusers_name] = state_dict.pop(key)
199
+ unet_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
200
+
201
+ elif lora_name.startswith("lora_te_"):
202
+ diffusers_name = key.replace("lora_te_", "").replace("_", ".")
203
+ diffusers_name = diffusers_name.replace("text.model", "text_model")
204
+ diffusers_name = diffusers_name.replace("self.attn", "self_attn")
205
+ diffusers_name = diffusers_name.replace("q.proj.lora", "to_q_lora")
206
+ diffusers_name = diffusers_name.replace("k.proj.lora", "to_k_lora")
207
+ diffusers_name = diffusers_name.replace("v.proj.lora", "to_v_lora")
208
+ diffusers_name = diffusers_name.replace("out.proj.lora", "to_out_lora")
209
+ if "self_attn" in diffusers_name:
210
+ te_state_dict[diffusers_name] = state_dict.pop(key)
211
+ te_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
212
+ elif "mlp" in diffusers_name:
213
+ # Be aware that this is the new diffusers convention and the rest of the code might
214
+ # not utilize it yet.
215
+ diffusers_name = diffusers_name.replace(".lora.", ".lora_linear_layer.")
216
+ te_state_dict[diffusers_name] = state_dict.pop(key)
217
+ te_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
218
+
219
+ # (sayakpaul): Duplicate code. Needs to be cleaned.
220
+ elif lora_name.startswith("lora_te1_"):
221
+ diffusers_name = key.replace("lora_te1_", "").replace("_", ".")
222
+ diffusers_name = diffusers_name.replace("text.model", "text_model")
223
+ diffusers_name = diffusers_name.replace("self.attn", "self_attn")
224
+ diffusers_name = diffusers_name.replace("q.proj.lora", "to_q_lora")
225
+ diffusers_name = diffusers_name.replace("k.proj.lora", "to_k_lora")
226
+ diffusers_name = diffusers_name.replace("v.proj.lora", "to_v_lora")
227
+ diffusers_name = diffusers_name.replace("out.proj.lora", "to_out_lora")
228
+ if "self_attn" in diffusers_name:
229
+ te_state_dict[diffusers_name] = state_dict.pop(key)
230
+ te_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
231
+ elif "mlp" in diffusers_name:
232
+ # Be aware that this is the new diffusers convention and the rest of the code might
233
+ # not utilize it yet.
234
+ diffusers_name = diffusers_name.replace(".lora.", ".lora_linear_layer.")
235
+ te_state_dict[diffusers_name] = state_dict.pop(key)
236
+ te_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
237
+
238
+ # (sayakpaul): Duplicate code. Needs to be cleaned.
239
+ elif lora_name.startswith("lora_te2_"):
240
+ diffusers_name = key.replace("lora_te2_", "").replace("_", ".")
241
+ diffusers_name = diffusers_name.replace("text.model", "text_model")
242
+ diffusers_name = diffusers_name.replace("self.attn", "self_attn")
243
+ diffusers_name = diffusers_name.replace("q.proj.lora", "to_q_lora")
244
+ diffusers_name = diffusers_name.replace("k.proj.lora", "to_k_lora")
245
+ diffusers_name = diffusers_name.replace("v.proj.lora", "to_v_lora")
246
+ diffusers_name = diffusers_name.replace("out.proj.lora", "to_out_lora")
247
+ if "self_attn" in diffusers_name:
248
+ te2_state_dict[diffusers_name] = state_dict.pop(key)
249
+ te2_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
250
+ elif "mlp" in diffusers_name:
251
+ # Be aware that this is the new diffusers convention and the rest of the code might
252
+ # not utilize it yet.
253
+ diffusers_name = diffusers_name.replace(".lora.", ".lora_linear_layer.")
254
+ te2_state_dict[diffusers_name] = state_dict.pop(key)
255
+ te2_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
256
+
257
+ # Rename the alphas so that they can be mapped appropriately.
258
+ if lora_name_alpha in state_dict:
259
+ alpha = state_dict.pop(lora_name_alpha).item()
260
+ if lora_name_alpha.startswith("lora_unet_"):
261
+ prefix = "unet."
262
+ elif lora_name_alpha.startswith(("lora_te_", "lora_te1_")):
263
+ prefix = "text_encoder."
264
+ else:
265
+ prefix = "text_encoder_2."
266
+ new_name = prefix + diffusers_name.split(".lora.")[0] + ".alpha"
267
+ network_alphas.update({new_name: alpha})
268
+
269
+ if len(state_dict) > 0:
270
+ raise ValueError(f"The following keys have not been correctly be renamed: \n\n {', '.join(state_dict.keys())}")
271
+
272
+ logger.info("Kohya-style checkpoint detected.")
273
+ unet_state_dict = {f"{unet_name}.{module_name}": params for module_name, params in unet_state_dict.items()}
274
+ te_state_dict = {f"{text_encoder_name}.{module_name}": params for module_name, params in te_state_dict.items()}
275
+ te2_state_dict = (
276
+ {f"text_encoder_2.{module_name}": params for module_name, params in te2_state_dict.items()}
277
+ if len(te2_state_dict) > 0
278
+ else None
279
+ )
280
+ if te2_state_dict is not None:
281
+ te_state_dict.update(te2_state_dict)
282
+
283
+ new_state_dict = {**unet_state_dict, **te_state_dict}
284
+ return new_state_dict, network_alphas
src/diffusers/loaders/single_file.py ADDED
@@ -0,0 +1,637 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from contextlib import nullcontext
15
+ from io import BytesIO
16
+ from pathlib import Path
17
+
18
+ import requests
19
+ import torch
20
+ from huggingface_hub import hf_hub_download
21
+ from huggingface_hub.utils import validate_hf_hub_args
22
+
23
+ from ..utils import (
24
+ deprecate,
25
+ is_accelerate_available,
26
+ is_omegaconf_available,
27
+ is_transformers_available,
28
+ logging,
29
+ )
30
+ from ..utils.import_utils import BACKENDS_MAPPING
31
+
32
+
33
+ if is_transformers_available():
34
+ pass
35
+
36
+ if is_accelerate_available():
37
+ from accelerate import init_empty_weights
38
+
39
+ logger = logging.get_logger(__name__)
40
+
41
+
42
+ class FromSingleFileMixin:
43
+ """
44
+ Load model weights saved in the `.ckpt` format into a [`DiffusionPipeline`].
45
+ """
46
+
47
+ @classmethod
48
+ def from_ckpt(cls, *args, **kwargs):
49
+ deprecation_message = "The function `from_ckpt` is deprecated in favor of `from_single_file` and will be removed in diffusers v.0.21. Please make sure to use `StableDiffusionPipeline.from_single_file(...)` instead."
50
+ deprecate("from_ckpt", "0.21.0", deprecation_message, standard_warn=False)
51
+ return cls.from_single_file(*args, **kwargs)
52
+
53
+ @classmethod
54
+ @validate_hf_hub_args
55
+ def from_single_file(cls, pretrained_model_link_or_path, **kwargs):
56
+ r"""
57
+ Instantiate a [`DiffusionPipeline`] from pretrained pipeline weights saved in the `.ckpt` or `.safetensors`
58
+ format. The pipeline is set in evaluation mode (`model.eval()`) by default.
59
+
60
+ Parameters:
61
+ pretrained_model_link_or_path (`str` or `os.PathLike`, *optional*):
62
+ Can be either:
63
+ - A link to the `.ckpt` file (for example
64
+ `"https://huggingface.co/<repo_id>/blob/main/<path_to_file>.ckpt"`) on the Hub.
65
+ - A path to a *file* containing all pipeline weights.
66
+ torch_dtype (`str` or `torch.dtype`, *optional*):
67
+ Override the default `torch.dtype` and load the model with another dtype. If `"auto"` is passed, the
68
+ dtype is automatically derived from the model's weights.
69
+ force_download (`bool`, *optional*, defaults to `False`):
70
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
71
+ cached versions if they exist.
72
+ cache_dir (`Union[str, os.PathLike]`, *optional*):
73
+ Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
74
+ is not used.
75
+ resume_download (`bool`, *optional*, defaults to `False`):
76
+ Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
77
+ incompletely downloaded files are deleted.
78
+ proxies (`Dict[str, str]`, *optional*):
79
+ A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
80
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
81
+ local_files_only (`bool`, *optional*, defaults to `False`):
82
+ Whether to only load local model weights and configuration files or not. If set to `True`, the model
83
+ won't be downloaded from the Hub.
84
+ token (`str` or *bool*, *optional*):
85
+ The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
86
+ `diffusers-cli login` (stored in `~/.huggingface`) is used.
87
+ revision (`str`, *optional*, defaults to `"main"`):
88
+ The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
89
+ allowed by Git.
90
+ use_safetensors (`bool`, *optional*, defaults to `None`):
91
+ If set to `None`, the safetensors weights are downloaded if they're available **and** if the
92
+ safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors
93
+ weights. If set to `False`, safetensors weights are not loaded.
94
+ extract_ema (`bool`, *optional*, defaults to `False`):
95
+ Whether to extract the EMA weights or not. Pass `True` to extract the EMA weights which usually yield
96
+ higher quality images for inference. Non-EMA weights are usually better for continuing finetuning.
97
+ upcast_attention (`bool`, *optional*, defaults to `None`):
98
+ Whether the attention computation should always be upcasted.
99
+ image_size (`int`, *optional*, defaults to 512):
100
+ The image size the model was trained on. Use 512 for all Stable Diffusion v1 models and the Stable
101
+ Diffusion v2 base model. Use 768 for Stable Diffusion v2.
102
+ prediction_type (`str`, *optional*):
103
+ The prediction type the model was trained on. Use `'epsilon'` for all Stable Diffusion v1 models and
104
+ the Stable Diffusion v2 base model. Use `'v_prediction'` for Stable Diffusion v2.
105
+ num_in_channels (`int`, *optional*, defaults to `None`):
106
+ The number of input channels. If `None`, it is automatically inferred.
107
+ scheduler_type (`str`, *optional*, defaults to `"pndm"`):
108
+ Type of scheduler to use. Should be one of `["pndm", "lms", "heun", "euler", "euler-ancestral", "dpm",
109
+ "ddim"]`.
110
+ load_safety_checker (`bool`, *optional*, defaults to `True`):
111
+ Whether to load the safety checker or not.
112
+ text_encoder ([`~transformers.CLIPTextModel`], *optional*, defaults to `None`):
113
+ An instance of `CLIPTextModel` to use, specifically the
114
+ [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. If this
115
+ parameter is `None`, the function loads a new instance of `CLIPTextModel` by itself if needed.
116
+ vae (`AutoencoderKL`, *optional*, defaults to `None`):
117
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. If
118
+ this parameter is `None`, the function will load a new instance of [CLIP] by itself, if needed.
119
+ tokenizer ([`~transformers.CLIPTokenizer`], *optional*, defaults to `None`):
120
+ An instance of `CLIPTokenizer` to use. If this parameter is `None`, the function loads a new instance
121
+ of `CLIPTokenizer` by itself if needed.
122
+ original_config_file (`str`):
123
+ Path to `.yaml` config file corresponding to the original architecture. If `None`, will be
124
+ automatically inferred by looking for a key that only exists in SD2.0 models.
125
+ kwargs (remaining dictionary of keyword arguments, *optional*):
126
+ Can be used to overwrite load and saveable variables (for example the pipeline components of the
127
+ specific pipeline class). The overwritten components are directly passed to the pipelines `__init__`
128
+ method. See example below for more information.
129
+
130
+ Examples:
131
+
132
+ ```py
133
+ >>> from diffusers import StableDiffusionPipeline
134
+
135
+ >>> # Download pipeline from huggingface.co and cache.
136
+ >>> pipeline = StableDiffusionPipeline.from_single_file(
137
+ ... "https://huggingface.co/WarriorMama777/OrangeMixs/blob/main/Models/AbyssOrangeMix/AbyssOrangeMix.safetensors"
138
+ ... )
139
+
140
+ >>> # Download pipeline from local file
141
+ >>> # file is downloaded under ./v1-5-pruned-emaonly.ckpt
142
+ >>> pipeline = StableDiffusionPipeline.from_single_file("./v1-5-pruned-emaonly")
143
+
144
+ >>> # Enable float16 and move to GPU
145
+ >>> pipeline = StableDiffusionPipeline.from_single_file(
146
+ ... "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.ckpt",
147
+ ... torch_dtype=torch.float16,
148
+ ... )
149
+ >>> pipeline.to("cuda")
150
+ ```
151
+ """
152
+ # import here to avoid circular dependency
153
+ from ..pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
154
+
155
+ original_config_file = kwargs.pop("original_config_file", None)
156
+ config_files = kwargs.pop("config_files", None)
157
+ cache_dir = kwargs.pop("cache_dir", None)
158
+ resume_download = kwargs.pop("resume_download", False)
159
+ force_download = kwargs.pop("force_download", False)
160
+ proxies = kwargs.pop("proxies", None)
161
+ local_files_only = kwargs.pop("local_files_only", None)
162
+ token = kwargs.pop("token", None)
163
+ revision = kwargs.pop("revision", None)
164
+ extract_ema = kwargs.pop("extract_ema", False)
165
+ image_size = kwargs.pop("image_size", None)
166
+ scheduler_type = kwargs.pop("scheduler_type", "pndm")
167
+ num_in_channels = kwargs.pop("num_in_channels", None)
168
+ upcast_attention = kwargs.pop("upcast_attention", None)
169
+ load_safety_checker = kwargs.pop("load_safety_checker", True)
170
+ prediction_type = kwargs.pop("prediction_type", None)
171
+ text_encoder = kwargs.pop("text_encoder", None)
172
+ text_encoder_2 = kwargs.pop("text_encoder_2", None)
173
+ vae = kwargs.pop("vae", None)
174
+ controlnet = kwargs.pop("controlnet", None)
175
+ adapter = kwargs.pop("adapter", None)
176
+ tokenizer = kwargs.pop("tokenizer", None)
177
+ tokenizer_2 = kwargs.pop("tokenizer_2", None)
178
+
179
+ torch_dtype = kwargs.pop("torch_dtype", None)
180
+
181
+ use_safetensors = kwargs.pop("use_safetensors", None)
182
+
183
+ pipeline_name = cls.__name__
184
+ file_extension = pretrained_model_link_or_path.rsplit(".", 1)[-1]
185
+ from_safetensors = file_extension == "safetensors"
186
+
187
+ if from_safetensors and use_safetensors is False:
188
+ raise ValueError("Make sure to install `safetensors` with `pip install safetensors`.")
189
+
190
+ # TODO: For now we only support stable diffusion
191
+ stable_unclip = None
192
+ model_type = None
193
+
194
+ if pipeline_name in [
195
+ "StableDiffusionControlNetPipeline",
196
+ "StableDiffusionControlNetImg2ImgPipeline",
197
+ "StableDiffusionControlNetInpaintPipeline",
198
+ ]:
199
+ from ..models.controlnet import ControlNetModel
200
+ from ..pipelines.controlnet.multicontrolnet import MultiControlNetModel
201
+
202
+ # list/tuple or a single instance of ControlNetModel or MultiControlNetModel
203
+ if not (
204
+ isinstance(controlnet, (ControlNetModel, MultiControlNetModel))
205
+ or isinstance(controlnet, (list, tuple))
206
+ and isinstance(controlnet[0], ControlNetModel)
207
+ ):
208
+ raise ValueError("ControlNet needs to be passed if loading from ControlNet pipeline.")
209
+ elif "StableDiffusion" in pipeline_name:
210
+ # Model type will be inferred from the checkpoint.
211
+ pass
212
+ elif pipeline_name == "StableUnCLIPPipeline":
213
+ model_type = "FrozenOpenCLIPEmbedder"
214
+ stable_unclip = "txt2img"
215
+ elif pipeline_name == "StableUnCLIPImg2ImgPipeline":
216
+ model_type = "FrozenOpenCLIPEmbedder"
217
+ stable_unclip = "img2img"
218
+ elif pipeline_name == "PaintByExamplePipeline":
219
+ model_type = "PaintByExample"
220
+ elif pipeline_name == "LDMTextToImagePipeline":
221
+ model_type = "LDMTextToImage"
222
+ else:
223
+ raise ValueError(f"Unhandled pipeline class: {pipeline_name}")
224
+
225
+ # remove huggingface url
226
+ has_valid_url_prefix = False
227
+ valid_url_prefixes = ["https://huggingface.co/", "huggingface.co/", "hf.co/", "https://hf.co/"]
228
+ for prefix in valid_url_prefixes:
229
+ if pretrained_model_link_or_path.startswith(prefix):
230
+ pretrained_model_link_or_path = pretrained_model_link_or_path[len(prefix) :]
231
+ has_valid_url_prefix = True
232
+
233
+ # Code based on diffusers.pipelines.pipeline_utils.DiffusionPipeline.from_pretrained
234
+ ckpt_path = Path(pretrained_model_link_or_path)
235
+ if not ckpt_path.is_file():
236
+ if not has_valid_url_prefix:
237
+ raise ValueError(
238
+ f"The provided path is either not a file or a valid huggingface URL was not provided. Valid URLs begin with {', '.join(valid_url_prefixes)}"
239
+ )
240
+
241
+ # get repo_id and (potentially nested) file path of ckpt in repo
242
+ repo_id = "/".join(ckpt_path.parts[:2])
243
+ file_path = "/".join(ckpt_path.parts[2:])
244
+
245
+ if file_path.startswith("blob/"):
246
+ file_path = file_path[len("blob/") :]
247
+
248
+ if file_path.startswith("main/"):
249
+ file_path = file_path[len("main/") :]
250
+
251
+ pretrained_model_link_or_path = hf_hub_download(
252
+ repo_id,
253
+ filename=file_path,
254
+ cache_dir=cache_dir,
255
+ resume_download=resume_download,
256
+ proxies=proxies,
257
+ local_files_only=local_files_only,
258
+ token=token,
259
+ revision=revision,
260
+ force_download=force_download,
261
+ )
262
+
263
+ pipe = download_from_original_stable_diffusion_ckpt(
264
+ pretrained_model_link_or_path,
265
+ pipeline_class=cls,
266
+ model_type=model_type,
267
+ stable_unclip=stable_unclip,
268
+ controlnet=controlnet,
269
+ adapter=adapter,
270
+ from_safetensors=from_safetensors,
271
+ extract_ema=extract_ema,
272
+ image_size=image_size,
273
+ scheduler_type=scheduler_type,
274
+ num_in_channels=num_in_channels,
275
+ upcast_attention=upcast_attention,
276
+ load_safety_checker=load_safety_checker,
277
+ prediction_type=prediction_type,
278
+ text_encoder=text_encoder,
279
+ text_encoder_2=text_encoder_2,
280
+ vae=vae,
281
+ tokenizer=tokenizer,
282
+ tokenizer_2=tokenizer_2,
283
+ original_config_file=original_config_file,
284
+ config_files=config_files,
285
+ local_files_only=local_files_only,
286
+ )
287
+
288
+ if torch_dtype is not None:
289
+ pipe.to(dtype=torch_dtype)
290
+
291
+ return pipe
292
+
293
+
294
+ class FromOriginalVAEMixin:
295
+ """
296
+ Load pretrained ControlNet weights saved in the `.ckpt` or `.safetensors` format into an [`AutoencoderKL`].
297
+ """
298
+
299
+ @classmethod
300
+ @validate_hf_hub_args
301
+ def from_single_file(cls, pretrained_model_link_or_path, **kwargs):
302
+ r"""
303
+ Instantiate a [`AutoencoderKL`] from pretrained ControlNet weights saved in the original `.ckpt` or
304
+ `.safetensors` format. The pipeline is set in evaluation mode (`model.eval()`) by default.
305
+
306
+ Parameters:
307
+ pretrained_model_link_or_path (`str` or `os.PathLike`, *optional*):
308
+ Can be either:
309
+ - A link to the `.ckpt` file (for example
310
+ `"https://huggingface.co/<repo_id>/blob/main/<path_to_file>.ckpt"`) on the Hub.
311
+ - A path to a *file* containing all pipeline weights.
312
+ torch_dtype (`str` or `torch.dtype`, *optional*):
313
+ Override the default `torch.dtype` and load the model with another dtype. If `"auto"` is passed, the
314
+ dtype is automatically derived from the model's weights.
315
+ force_download (`bool`, *optional*, defaults to `False`):
316
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
317
+ cached versions if they exist.
318
+ cache_dir (`Union[str, os.PathLike]`, *optional*):
319
+ Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
320
+ is not used.
321
+ resume_download (`bool`, *optional*, defaults to `False`):
322
+ Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
323
+ incompletely downloaded files are deleted.
324
+ proxies (`Dict[str, str]`, *optional*):
325
+ A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
326
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
327
+ local_files_only (`bool`, *optional*, defaults to `False`):
328
+ Whether to only load local model weights and configuration files or not. If set to True, the model
329
+ won't be downloaded from the Hub.
330
+ token (`str` or *bool*, *optional*):
331
+ The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
332
+ `diffusers-cli login` (stored in `~/.huggingface`) is used.
333
+ revision (`str`, *optional*, defaults to `"main"`):
334
+ The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
335
+ allowed by Git.
336
+ image_size (`int`, *optional*, defaults to 512):
337
+ The image size the model was trained on. Use 512 for all Stable Diffusion v1 models and the Stable
338
+ Diffusion v2 base model. Use 768 for Stable Diffusion v2.
339
+ use_safetensors (`bool`, *optional*, defaults to `None`):
340
+ If set to `None`, the safetensors weights are downloaded if they're available **and** if the
341
+ safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors
342
+ weights. If set to `False`, safetensors weights are not loaded.
343
+ upcast_attention (`bool`, *optional*, defaults to `None`):
344
+ Whether the attention computation should always be upcasted.
345
+ scaling_factor (`float`, *optional*, defaults to 0.18215):
346
+ The component-wise standard deviation of the trained latent space computed using the first batch of the
347
+ training set. This is used to scale the latent space to have unit variance when training the diffusion
348
+ model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the
349
+ diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z
350
+ = 1 / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution
351
+ Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper.
352
+ kwargs (remaining dictionary of keyword arguments, *optional*):
353
+ Can be used to overwrite load and saveable variables (for example the pipeline components of the
354
+ specific pipeline class). The overwritten components are directly passed to the pipelines `__init__`
355
+ method. See example below for more information.
356
+
357
+ <Tip warning={true}>
358
+
359
+ Make sure to pass both `image_size` and `scaling_factor` to `from_single_file()` if you're loading
360
+ a VAE from SDXL or a Stable Diffusion v2 model or higher.
361
+
362
+ </Tip>
363
+
364
+ Examples:
365
+
366
+ ```py
367
+ from diffusers import AutoencoderKL
368
+
369
+ url = "https://huggingface.co/stabilityai/sd-vae-ft-mse-original/blob/main/vae-ft-mse-840000-ema-pruned.safetensors" # can also be local file
370
+ model = AutoencoderKL.from_single_file(url)
371
+ ```
372
+ """
373
+ if not is_omegaconf_available():
374
+ raise ValueError(BACKENDS_MAPPING["omegaconf"][1])
375
+
376
+ from omegaconf import OmegaConf
377
+
378
+ from ..models import AutoencoderKL
379
+
380
+ # import here to avoid circular dependency
381
+ from ..pipelines.stable_diffusion.convert_from_ckpt import (
382
+ convert_ldm_vae_checkpoint,
383
+ create_vae_diffusers_config,
384
+ )
385
+
386
+ config_file = kwargs.pop("config_file", None)
387
+ cache_dir = kwargs.pop("cache_dir", None)
388
+ resume_download = kwargs.pop("resume_download", False)
389
+ force_download = kwargs.pop("force_download", False)
390
+ proxies = kwargs.pop("proxies", None)
391
+ local_files_only = kwargs.pop("local_files_only", None)
392
+ token = kwargs.pop("token", None)
393
+ revision = kwargs.pop("revision", None)
394
+ image_size = kwargs.pop("image_size", None)
395
+ scaling_factor = kwargs.pop("scaling_factor", None)
396
+ kwargs.pop("upcast_attention", None)
397
+
398
+ torch_dtype = kwargs.pop("torch_dtype", None)
399
+
400
+ use_safetensors = kwargs.pop("use_safetensors", None)
401
+
402
+ file_extension = pretrained_model_link_or_path.rsplit(".", 1)[-1]
403
+ from_safetensors = file_extension == "safetensors"
404
+
405
+ if from_safetensors and use_safetensors is False:
406
+ raise ValueError("Make sure to install `safetensors` with `pip install safetensors`.")
407
+
408
+ # remove huggingface url
409
+ for prefix in ["https://huggingface.co/", "huggingface.co/", "hf.co/", "https://hf.co/"]:
410
+ if pretrained_model_link_or_path.startswith(prefix):
411
+ pretrained_model_link_or_path = pretrained_model_link_or_path[len(prefix) :]
412
+
413
+ # Code based on diffusers.pipelines.pipeline_utils.DiffusionPipeline.from_pretrained
414
+ ckpt_path = Path(pretrained_model_link_or_path)
415
+ if not ckpt_path.is_file():
416
+ # get repo_id and (potentially nested) file path of ckpt in repo
417
+ repo_id = "/".join(ckpt_path.parts[:2])
418
+ file_path = "/".join(ckpt_path.parts[2:])
419
+
420
+ if file_path.startswith("blob/"):
421
+ file_path = file_path[len("blob/") :]
422
+
423
+ if file_path.startswith("main/"):
424
+ file_path = file_path[len("main/") :]
425
+
426
+ pretrained_model_link_or_path = hf_hub_download(
427
+ repo_id,
428
+ filename=file_path,
429
+ cache_dir=cache_dir,
430
+ resume_download=resume_download,
431
+ proxies=proxies,
432
+ local_files_only=local_files_only,
433
+ token=token,
434
+ revision=revision,
435
+ force_download=force_download,
436
+ )
437
+
438
+ if from_safetensors:
439
+ from safetensors import safe_open
440
+
441
+ checkpoint = {}
442
+ with safe_open(pretrained_model_link_or_path, framework="pt", device="cpu") as f:
443
+ for key in f.keys():
444
+ checkpoint[key] = f.get_tensor(key)
445
+ else:
446
+ checkpoint = torch.load(pretrained_model_link_or_path, map_location="cpu")
447
+
448
+ if "state_dict" in checkpoint:
449
+ checkpoint = checkpoint["state_dict"]
450
+
451
+ if config_file is None:
452
+ config_url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml"
453
+ config_file = BytesIO(requests.get(config_url).content)
454
+
455
+ original_config = OmegaConf.load(config_file)
456
+
457
+ # default to sd-v1-5
458
+ image_size = image_size or 512
459
+
460
+ vae_config = create_vae_diffusers_config(original_config, image_size=image_size)
461
+ converted_vae_checkpoint = convert_ldm_vae_checkpoint(checkpoint, vae_config)
462
+
463
+ if scaling_factor is None:
464
+ if (
465
+ "model" in original_config
466
+ and "params" in original_config.model
467
+ and "scale_factor" in original_config.model.params
468
+ ):
469
+ vae_scaling_factor = original_config.model.params.scale_factor
470
+ else:
471
+ vae_scaling_factor = 0.18215 # default SD scaling factor
472
+
473
+ vae_config["scaling_factor"] = vae_scaling_factor
474
+
475
+ ctx = init_empty_weights if is_accelerate_available() else nullcontext
476
+ with ctx():
477
+ vae = AutoencoderKL(**vae_config)
478
+
479
+ if is_accelerate_available():
480
+ from ..models.modeling_utils import load_model_dict_into_meta
481
+
482
+ load_model_dict_into_meta(vae, converted_vae_checkpoint, device="cpu")
483
+ else:
484
+ vae.load_state_dict(converted_vae_checkpoint)
485
+
486
+ if torch_dtype is not None:
487
+ vae.to(dtype=torch_dtype)
488
+
489
+ return vae
490
+
491
+
492
+ class FromOriginalControlnetMixin:
493
+ """
494
+ Load pretrained ControlNet weights saved in the `.ckpt` or `.safetensors` format into a [`ControlNetModel`].
495
+ """
496
+
497
+ @classmethod
498
+ @validate_hf_hub_args
499
+ def from_single_file(cls, pretrained_model_link_or_path, **kwargs):
500
+ r"""
501
+ Instantiate a [`ControlNetModel`] from pretrained ControlNet weights saved in the original `.ckpt` or
502
+ `.safetensors` format. The pipeline is set in evaluation mode (`model.eval()`) by default.
503
+
504
+ Parameters:
505
+ pretrained_model_link_or_path (`str` or `os.PathLike`, *optional*):
506
+ Can be either:
507
+ - A link to the `.ckpt` file (for example
508
+ `"https://huggingface.co/<repo_id>/blob/main/<path_to_file>.ckpt"`) on the Hub.
509
+ - A path to a *file* containing all pipeline weights.
510
+ torch_dtype (`str` or `torch.dtype`, *optional*):
511
+ Override the default `torch.dtype` and load the model with another dtype. If `"auto"` is passed, the
512
+ dtype is automatically derived from the model's weights.
513
+ force_download (`bool`, *optional*, defaults to `False`):
514
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
515
+ cached versions if they exist.
516
+ cache_dir (`Union[str, os.PathLike]`, *optional*):
517
+ Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
518
+ is not used.
519
+ resume_download (`bool`, *optional*, defaults to `False`):
520
+ Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
521
+ incompletely downloaded files are deleted.
522
+ proxies (`Dict[str, str]`, *optional*):
523
+ A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
524
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
525
+ local_files_only (`bool`, *optional*, defaults to `False`):
526
+ Whether to only load local model weights and configuration files or not. If set to True, the model
527
+ won't be downloaded from the Hub.
528
+ token (`str` or *bool*, *optional*):
529
+ The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
530
+ `diffusers-cli login` (stored in `~/.huggingface`) is used.
531
+ revision (`str`, *optional*, defaults to `"main"`):
532
+ The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
533
+ allowed by Git.
534
+ use_safetensors (`bool`, *optional*, defaults to `None`):
535
+ If set to `None`, the safetensors weights are downloaded if they're available **and** if the
536
+ safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors
537
+ weights. If set to `False`, safetensors weights are not loaded.
538
+ image_size (`int`, *optional*, defaults to 512):
539
+ The image size the model was trained on. Use 512 for all Stable Diffusion v1 models and the Stable
540
+ Diffusion v2 base model. Use 768 for Stable Diffusion v2.
541
+ upcast_attention (`bool`, *optional*, defaults to `None`):
542
+ Whether the attention computation should always be upcasted.
543
+ kwargs (remaining dictionary of keyword arguments, *optional*):
544
+ Can be used to overwrite load and saveable variables (for example the pipeline components of the
545
+ specific pipeline class). The overwritten components are directly passed to the pipelines `__init__`
546
+ method. See example below for more information.
547
+
548
+ Examples:
549
+
550
+ ```py
551
+ from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
552
+
553
+ url = "https://huggingface.co/lllyasviel/ControlNet-v1-1/blob/main/control_v11p_sd15_canny.pth" # can also be a local path
554
+ model = ControlNetModel.from_single_file(url)
555
+
556
+ url = "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned.safetensors" # can also be a local path
557
+ pipe = StableDiffusionControlNetPipeline.from_single_file(url, controlnet=controlnet)
558
+ ```
559
+ """
560
+ # import here to avoid circular dependency
561
+ from ..pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
562
+
563
+ config_file = kwargs.pop("config_file", None)
564
+ cache_dir = kwargs.pop("cache_dir", None)
565
+ resume_download = kwargs.pop("resume_download", False)
566
+ force_download = kwargs.pop("force_download", False)
567
+ proxies = kwargs.pop("proxies", None)
568
+ local_files_only = kwargs.pop("local_files_only", None)
569
+ token = kwargs.pop("token", None)
570
+ num_in_channels = kwargs.pop("num_in_channels", None)
571
+ use_linear_projection = kwargs.pop("use_linear_projection", None)
572
+ revision = kwargs.pop("revision", None)
573
+ extract_ema = kwargs.pop("extract_ema", False)
574
+ image_size = kwargs.pop("image_size", None)
575
+ upcast_attention = kwargs.pop("upcast_attention", None)
576
+
577
+ torch_dtype = kwargs.pop("torch_dtype", None)
578
+
579
+ use_safetensors = kwargs.pop("use_safetensors", None)
580
+
581
+ file_extension = pretrained_model_link_or_path.rsplit(".", 1)[-1]
582
+ from_safetensors = file_extension == "safetensors"
583
+
584
+ if from_safetensors and use_safetensors is False:
585
+ raise ValueError("Make sure to install `safetensors` with `pip install safetensors`.")
586
+
587
+ # remove huggingface url
588
+ for prefix in ["https://huggingface.co/", "huggingface.co/", "hf.co/", "https://hf.co/"]:
589
+ if pretrained_model_link_or_path.startswith(prefix):
590
+ pretrained_model_link_or_path = pretrained_model_link_or_path[len(prefix) :]
591
+
592
+ # Code based on diffusers.pipelines.pipeline_utils.DiffusionPipeline.from_pretrained
593
+ ckpt_path = Path(pretrained_model_link_or_path)
594
+ if not ckpt_path.is_file():
595
+ # get repo_id and (potentially nested) file path of ckpt in repo
596
+ repo_id = "/".join(ckpt_path.parts[:2])
597
+ file_path = "/".join(ckpt_path.parts[2:])
598
+
599
+ if file_path.startswith("blob/"):
600
+ file_path = file_path[len("blob/") :]
601
+
602
+ if file_path.startswith("main/"):
603
+ file_path = file_path[len("main/") :]
604
+
605
+ pretrained_model_link_or_path = hf_hub_download(
606
+ repo_id,
607
+ filename=file_path,
608
+ cache_dir=cache_dir,
609
+ resume_download=resume_download,
610
+ proxies=proxies,
611
+ local_files_only=local_files_only,
612
+ token=token,
613
+ revision=revision,
614
+ force_download=force_download,
615
+ )
616
+
617
+ if config_file is None:
618
+ config_url = "https://raw.githubusercontent.com/lllyasviel/ControlNet/main/models/cldm_v15.yaml"
619
+ config_file = BytesIO(requests.get(config_url).content)
620
+
621
+ image_size = image_size or 512
622
+
623
+ controlnet = download_controlnet_from_original_ckpt(
624
+ pretrained_model_link_or_path,
625
+ original_config_file=config_file,
626
+ image_size=image_size,
627
+ extract_ema=extract_ema,
628
+ num_in_channels=num_in_channels,
629
+ upcast_attention=upcast_attention,
630
+ from_safetensors=from_safetensors,
631
+ use_linear_projection=use_linear_projection,
632
+ )
633
+
634
+ if torch_dtype is not None:
635
+ controlnet.to(dtype=torch_dtype)
636
+
637
+ return controlnet
src/diffusers/loaders/textual_inversion.py ADDED
@@ -0,0 +1,455 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Dict, List, Optional, Union
15
+
16
+ import safetensors
17
+ import torch
18
+ from huggingface_hub.utils import validate_hf_hub_args
19
+ from torch import nn
20
+
21
+ from ..utils import _get_model_file, is_accelerate_available, is_transformers_available, logging
22
+
23
+
24
+ if is_transformers_available():
25
+ from transformers import PreTrainedModel, PreTrainedTokenizer
26
+
27
+ if is_accelerate_available():
28
+ from accelerate.hooks import AlignDevicesHook, CpuOffload, remove_hook_from_module
29
+
30
+ logger = logging.get_logger(__name__)
31
+
32
+ TEXT_INVERSION_NAME = "learned_embeds.bin"
33
+ TEXT_INVERSION_NAME_SAFE = "learned_embeds.safetensors"
34
+
35
+
36
+ @validate_hf_hub_args
37
+ def load_textual_inversion_state_dicts(pretrained_model_name_or_paths, **kwargs):
38
+ cache_dir = kwargs.pop("cache_dir", None)
39
+ force_download = kwargs.pop("force_download", False)
40
+ resume_download = kwargs.pop("resume_download", False)
41
+ proxies = kwargs.pop("proxies", None)
42
+ local_files_only = kwargs.pop("local_files_only", None)
43
+ token = kwargs.pop("token", None)
44
+ revision = kwargs.pop("revision", None)
45
+ subfolder = kwargs.pop("subfolder", None)
46
+ weight_name = kwargs.pop("weight_name", None)
47
+ use_safetensors = kwargs.pop("use_safetensors", None)
48
+
49
+ allow_pickle = False
50
+ if use_safetensors is None:
51
+ use_safetensors = True
52
+ allow_pickle = True
53
+
54
+ user_agent = {
55
+ "file_type": "text_inversion",
56
+ "framework": "pytorch",
57
+ }
58
+ state_dicts = []
59
+ for pretrained_model_name_or_path in pretrained_model_name_or_paths:
60
+ if not isinstance(pretrained_model_name_or_path, (dict, torch.Tensor)):
61
+ # 3.1. Load textual inversion file
62
+ model_file = None
63
+
64
+ # Let's first try to load .safetensors weights
65
+ if (use_safetensors and weight_name is None) or (
66
+ weight_name is not None and weight_name.endswith(".safetensors")
67
+ ):
68
+ try:
69
+ model_file = _get_model_file(
70
+ pretrained_model_name_or_path,
71
+ weights_name=weight_name or TEXT_INVERSION_NAME_SAFE,
72
+ cache_dir=cache_dir,
73
+ force_download=force_download,
74
+ resume_download=resume_download,
75
+ proxies=proxies,
76
+ local_files_only=local_files_only,
77
+ token=token,
78
+ revision=revision,
79
+ subfolder=subfolder,
80
+ user_agent=user_agent,
81
+ )
82
+ state_dict = safetensors.torch.load_file(model_file, device="cpu")
83
+ except Exception as e:
84
+ if not allow_pickle:
85
+ raise e
86
+
87
+ model_file = None
88
+
89
+ if model_file is None:
90
+ model_file = _get_model_file(
91
+ pretrained_model_name_or_path,
92
+ weights_name=weight_name or TEXT_INVERSION_NAME,
93
+ cache_dir=cache_dir,
94
+ force_download=force_download,
95
+ resume_download=resume_download,
96
+ proxies=proxies,
97
+ local_files_only=local_files_only,
98
+ token=token,
99
+ revision=revision,
100
+ subfolder=subfolder,
101
+ user_agent=user_agent,
102
+ )
103
+ state_dict = torch.load(model_file, map_location="cpu")
104
+ else:
105
+ state_dict = pretrained_model_name_or_path
106
+
107
+ state_dicts.append(state_dict)
108
+
109
+ return state_dicts
110
+
111
+
112
+ class TextualInversionLoaderMixin:
113
+ r"""
114
+ Load Textual Inversion tokens and embeddings to the tokenizer and text encoder.
115
+ """
116
+
117
+ def maybe_convert_prompt(self, prompt: Union[str, List[str]], tokenizer: "PreTrainedTokenizer"): # noqa: F821
118
+ r"""
119
+ Processes prompts that include a special token corresponding to a multi-vector textual inversion embedding to
120
+ be replaced with multiple special tokens each corresponding to one of the vectors. If the prompt has no textual
121
+ inversion token or if the textual inversion token is a single vector, the input prompt is returned.
122
+
123
+ Parameters:
124
+ prompt (`str` or list of `str`):
125
+ The prompt or prompts to guide the image generation.
126
+ tokenizer (`PreTrainedTokenizer`):
127
+ The tokenizer responsible for encoding the prompt into input tokens.
128
+
129
+ Returns:
130
+ `str` or list of `str`: The converted prompt
131
+ """
132
+ if not isinstance(prompt, List):
133
+ prompts = [prompt]
134
+ else:
135
+ prompts = prompt
136
+
137
+ prompts = [self._maybe_convert_prompt(p, tokenizer) for p in prompts]
138
+
139
+ if not isinstance(prompt, List):
140
+ return prompts[0]
141
+
142
+ return prompts
143
+
144
+ def _maybe_convert_prompt(self, prompt: str, tokenizer: "PreTrainedTokenizer"): # noqa: F821
145
+ r"""
146
+ Maybe convert a prompt into a "multi vector"-compatible prompt. If the prompt includes a token that corresponds
147
+ to a multi-vector textual inversion embedding, this function will process the prompt so that the special token
148
+ is replaced with multiple special tokens each corresponding to one of the vectors. If the prompt has no textual
149
+ inversion token or a textual inversion token that is a single vector, the input prompt is simply returned.
150
+
151
+ Parameters:
152
+ prompt (`str`):
153
+ The prompt to guide the image generation.
154
+ tokenizer (`PreTrainedTokenizer`):
155
+ The tokenizer responsible for encoding the prompt into input tokens.
156
+
157
+ Returns:
158
+ `str`: The converted prompt
159
+ """
160
+ tokens = tokenizer.tokenize(prompt)
161
+ unique_tokens = set(tokens)
162
+ for token in unique_tokens:
163
+ if token in tokenizer.added_tokens_encoder:
164
+ replacement = token
165
+ i = 1
166
+ while f"{token}_{i}" in tokenizer.added_tokens_encoder:
167
+ replacement += f" {token}_{i}"
168
+ i += 1
169
+
170
+ prompt = prompt.replace(token, replacement)
171
+
172
+ return prompt
173
+
174
+ def _check_text_inv_inputs(self, tokenizer, text_encoder, pretrained_model_name_or_paths, tokens):
175
+ if tokenizer is None:
176
+ raise ValueError(
177
+ f"{self.__class__.__name__} requires `self.tokenizer` or passing a `tokenizer` of type `PreTrainedTokenizer` for calling"
178
+ f" `{self.load_textual_inversion.__name__}`"
179
+ )
180
+
181
+ if text_encoder is None:
182
+ raise ValueError(
183
+ f"{self.__class__.__name__} requires `self.text_encoder` or passing a `text_encoder` of type `PreTrainedModel` for calling"
184
+ f" `{self.load_textual_inversion.__name__}`"
185
+ )
186
+
187
+ if len(pretrained_model_name_or_paths) > 1 and len(pretrained_model_name_or_paths) != len(tokens):
188
+ raise ValueError(
189
+ f"You have passed a list of models of length {len(pretrained_model_name_or_paths)}, and list of tokens of length {len(tokens)} "
190
+ f"Make sure both lists have the same length."
191
+ )
192
+
193
+ valid_tokens = [t for t in tokens if t is not None]
194
+ if len(set(valid_tokens)) < len(valid_tokens):
195
+ raise ValueError(f"You have passed a list of tokens that contains duplicates: {tokens}")
196
+
197
+ @staticmethod
198
+ def _retrieve_tokens_and_embeddings(tokens, state_dicts, tokenizer):
199
+ all_tokens = []
200
+ all_embeddings = []
201
+ for state_dict, token in zip(state_dicts, tokens):
202
+ if isinstance(state_dict, torch.Tensor):
203
+ if token is None:
204
+ raise ValueError(
205
+ "You are trying to load a textual inversion embedding that has been saved as a PyTorch tensor. Make sure to pass the name of the corresponding token in this case: `token=...`."
206
+ )
207
+ loaded_token = token
208
+ embedding = state_dict
209
+ elif len(state_dict) == 1:
210
+ # diffusers
211
+ loaded_token, embedding = next(iter(state_dict.items()))
212
+ elif "string_to_param" in state_dict:
213
+ # A1111
214
+ loaded_token = state_dict["name"]
215
+ embedding = state_dict["string_to_param"]["*"]
216
+ else:
217
+ raise ValueError(
218
+ f"Loaded state dictonary is incorrect: {state_dict}. \n\n"
219
+ "Please verify that the loaded state dictionary of the textual embedding either only has a single key or includes the `string_to_param`"
220
+ " input key."
221
+ )
222
+
223
+ if token is not None and loaded_token != token:
224
+ logger.info(f"The loaded token: {loaded_token} is overwritten by the passed token {token}.")
225
+ else:
226
+ token = loaded_token
227
+
228
+ if token in tokenizer.get_vocab():
229
+ raise ValueError(
230
+ f"Token {token} already in tokenizer vocabulary. Please choose a different token name or remove {token} and embedding from the tokenizer and text encoder."
231
+ )
232
+
233
+ all_tokens.append(token)
234
+ all_embeddings.append(embedding)
235
+
236
+ return all_tokens, all_embeddings
237
+
238
+ @staticmethod
239
+ def _extend_tokens_and_embeddings(tokens, embeddings, tokenizer):
240
+ all_tokens = []
241
+ all_embeddings = []
242
+
243
+ for embedding, token in zip(embeddings, tokens):
244
+ if f"{token}_1" in tokenizer.get_vocab():
245
+ multi_vector_tokens = [token]
246
+ i = 1
247
+ while f"{token}_{i}" in tokenizer.added_tokens_encoder:
248
+ multi_vector_tokens.append(f"{token}_{i}")
249
+ i += 1
250
+
251
+ raise ValueError(
252
+ f"Multi-vector Token {multi_vector_tokens} already in tokenizer vocabulary. Please choose a different token name or remove the {multi_vector_tokens} and embedding from the tokenizer and text encoder."
253
+ )
254
+
255
+ is_multi_vector = len(embedding.shape) > 1 and embedding.shape[0] > 1
256
+ if is_multi_vector:
257
+ all_tokens += [token] + [f"{token}_{i}" for i in range(1, embedding.shape[0])]
258
+ all_embeddings += [e for e in embedding] # noqa: C416
259
+ else:
260
+ all_tokens += [token]
261
+ all_embeddings += [embedding[0]] if len(embedding.shape) > 1 else [embedding]
262
+
263
+ return all_tokens, all_embeddings
264
+
265
+ @validate_hf_hub_args
266
+ def load_textual_inversion(
267
+ self,
268
+ pretrained_model_name_or_path: Union[str, List[str], Dict[str, torch.Tensor], List[Dict[str, torch.Tensor]]],
269
+ token: Optional[Union[str, List[str]]] = None,
270
+ tokenizer: Optional["PreTrainedTokenizer"] = None, # noqa: F821
271
+ text_encoder: Optional["PreTrainedModel"] = None, # noqa: F821
272
+ **kwargs,
273
+ ):
274
+ r"""
275
+ Load Textual Inversion embeddings into the text encoder of [`StableDiffusionPipeline`] (both 🤗 Diffusers and
276
+ Automatic1111 formats are supported).
277
+
278
+ Parameters:
279
+ pretrained_model_name_or_path (`str` or `os.PathLike` or `List[str or os.PathLike]` or `Dict` or `List[Dict]`):
280
+ Can be either one of the following or a list of them:
281
+
282
+ - A string, the *model id* (for example `sd-concepts-library/low-poly-hd-logos-icons`) of a
283
+ pretrained model hosted on the Hub.
284
+ - A path to a *directory* (for example `./my_text_inversion_directory/`) containing the textual
285
+ inversion weights.
286
+ - A path to a *file* (for example `./my_text_inversions.pt`) containing textual inversion weights.
287
+ - A [torch state
288
+ dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
289
+
290
+ token (`str` or `List[str]`, *optional*):
291
+ Override the token to use for the textual inversion weights. If `pretrained_model_name_or_path` is a
292
+ list, then `token` must also be a list of equal length.
293
+ text_encoder ([`~transformers.CLIPTextModel`], *optional*):
294
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
295
+ If not specified, function will take self.tokenizer.
296
+ tokenizer ([`~transformers.CLIPTokenizer`], *optional*):
297
+ A `CLIPTokenizer` to tokenize text. If not specified, function will take self.tokenizer.
298
+ weight_name (`str`, *optional*):
299
+ Name of a custom weight file. This should be used when:
300
+
301
+ - The saved textual inversion file is in 🤗 Diffusers format, but was saved under a specific weight
302
+ name such as `text_inv.bin`.
303
+ - The saved textual inversion file is in the Automatic1111 format.
304
+ cache_dir (`Union[str, os.PathLike]`, *optional*):
305
+ Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
306
+ is not used.
307
+ force_download (`bool`, *optional*, defaults to `False`):
308
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
309
+ cached versions if they exist.
310
+ resume_download (`bool`, *optional*, defaults to `False`):
311
+ Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
312
+ incompletely downloaded files are deleted.
313
+ proxies (`Dict[str, str]`, *optional*):
314
+ A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
315
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
316
+ local_files_only (`bool`, *optional*, defaults to `False`):
317
+ Whether to only load local model weights and configuration files or not. If set to `True`, the model
318
+ won't be downloaded from the Hub.
319
+ token (`str` or *bool*, *optional*):
320
+ The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
321
+ `diffusers-cli login` (stored in `~/.huggingface`) is used.
322
+ revision (`str`, *optional*, defaults to `"main"`):
323
+ The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
324
+ allowed by Git.
325
+ subfolder (`str`, *optional*, defaults to `""`):
326
+ The subfolder location of a model file within a larger model repository on the Hub or locally.
327
+ mirror (`str`, *optional*):
328
+ Mirror source to resolve accessibility issues if you're downloading a model in China. We do not
329
+ guarantee the timeliness or safety of the source, and you should refer to the mirror site for more
330
+ information.
331
+
332
+ Example:
333
+
334
+ To load a Textual Inversion embedding vector in 🤗 Diffusers format:
335
+
336
+ ```py
337
+ from diffusers import StableDiffusionPipeline
338
+ import torch
339
+
340
+ model_id = "runwayml/stable-diffusion-v1-5"
341
+ pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
342
+
343
+ pipe.load_textual_inversion("sd-concepts-library/cat-toy")
344
+
345
+ prompt = "A <cat-toy> backpack"
346
+
347
+ image = pipe(prompt, num_inference_steps=50).images[0]
348
+ image.save("cat-backpack.png")
349
+ ```
350
+
351
+ To load a Textual Inversion embedding vector in Automatic1111 format, make sure to download the vector first
352
+ (for example from [civitAI](https://civitai.com/models/3036?modelVersionId=9857)) and then load the vector
353
+ locally:
354
+
355
+ ```py
356
+ from diffusers import StableDiffusionPipeline
357
+ import torch
358
+
359
+ model_id = "runwayml/stable-diffusion-v1-5"
360
+ pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
361
+
362
+ pipe.load_textual_inversion("./charturnerv2.pt", token="charturnerv2")
363
+
364
+ prompt = "charturnerv2, multiple views of the same character in the same outfit, a character turnaround of a woman wearing a black jacket and red shirt, best quality, intricate details."
365
+
366
+ image = pipe(prompt, num_inference_steps=50).images[0]
367
+ image.save("character.png")
368
+ ```
369
+
370
+ """
371
+ # 1. Set correct tokenizer and text encoder
372
+ tokenizer = tokenizer or getattr(self, "tokenizer", None)
373
+ text_encoder = text_encoder or getattr(self, "text_encoder", None)
374
+
375
+ # 2. Normalize inputs
376
+ pretrained_model_name_or_paths = (
377
+ [pretrained_model_name_or_path]
378
+ if not isinstance(pretrained_model_name_or_path, list)
379
+ else pretrained_model_name_or_path
380
+ )
381
+ tokens = [token] if not isinstance(token, list) else token
382
+ if tokens[0] is None:
383
+ tokens = tokens * len(pretrained_model_name_or_paths)
384
+
385
+ # 3. Check inputs
386
+ self._check_text_inv_inputs(tokenizer, text_encoder, pretrained_model_name_or_paths, tokens)
387
+
388
+ # 4. Load state dicts of textual embeddings
389
+ state_dicts = load_textual_inversion_state_dicts(pretrained_model_name_or_paths, **kwargs)
390
+
391
+ # 4.1 Handle the special case when state_dict is a tensor that contains n embeddings for n tokens
392
+ if len(tokens) > 1 and len(state_dicts) == 1:
393
+ if isinstance(state_dicts[0], torch.Tensor):
394
+ state_dicts = list(state_dicts[0])
395
+ if len(tokens) != len(state_dicts):
396
+ raise ValueError(
397
+ f"You have passed a state_dict contains {len(state_dicts)} embeddings, and list of tokens of length {len(tokens)} "
398
+ f"Make sure both have the same length."
399
+ )
400
+
401
+ # 4. Retrieve tokens and embeddings
402
+ tokens, embeddings = self._retrieve_tokens_and_embeddings(tokens, state_dicts, tokenizer)
403
+
404
+ # 5. Extend tokens and embeddings for multi vector
405
+ tokens, embeddings = self._extend_tokens_and_embeddings(tokens, embeddings, tokenizer)
406
+
407
+ # 6. Make sure all embeddings have the correct size
408
+ expected_emb_dim = text_encoder.get_input_embeddings().weight.shape[-1]
409
+ if any(expected_emb_dim != emb.shape[-1] for emb in embeddings):
410
+ raise ValueError(
411
+ "Loaded embeddings are of incorrect shape. Expected each textual inversion embedding "
412
+ "to be of shape {input_embeddings.shape[-1]}, but are {embeddings.shape[-1]} "
413
+ )
414
+
415
+ # 7. Now we can be sure that loading the embedding matrix works
416
+ # < Unsafe code:
417
+
418
+ # 7.1 Offload all hooks in case the pipeline was cpu offloaded before make sure, we offload and onload again
419
+ is_model_cpu_offload = False
420
+ is_sequential_cpu_offload = False
421
+ for _, component in self.components.items():
422
+ if isinstance(component, nn.Module):
423
+ if hasattr(component, "_hf_hook"):
424
+ is_model_cpu_offload = isinstance(getattr(component, "_hf_hook"), CpuOffload)
425
+ is_sequential_cpu_offload = isinstance(getattr(component, "_hf_hook"), AlignDevicesHook)
426
+ logger.info(
427
+ "Accelerate hooks detected. Since you have called `load_textual_inversion()`, the previous hooks will be first removed. Then the textual inversion parameters will be loaded and the hooks will be applied again."
428
+ )
429
+ remove_hook_from_module(component, recurse=is_sequential_cpu_offload)
430
+
431
+ # 7.2 save expected device and dtype
432
+ device = text_encoder.device
433
+ dtype = text_encoder.dtype
434
+
435
+ # 7.3 Increase token embedding matrix
436
+ text_encoder.resize_token_embeddings(len(tokenizer) + len(tokens))
437
+ input_embeddings = text_encoder.get_input_embeddings().weight
438
+
439
+ # 7.4 Load token and embedding
440
+ for token, embedding in zip(tokens, embeddings):
441
+ # add tokens and get ids
442
+ tokenizer.add_tokens(token)
443
+ token_id = tokenizer.convert_tokens_to_ids(token)
444
+ input_embeddings.data[token_id] = embedding
445
+ logger.info(f"Loaded textual inversion embedding for {token}.")
446
+
447
+ input_embeddings.to(dtype=dtype, device=device)
448
+
449
+ # 7.5 Offload the model again
450
+ if is_model_cpu_offload:
451
+ self.enable_model_cpu_offload()
452
+ elif is_sequential_cpu_offload:
453
+ self.enable_sequential_cpu_offload()
454
+
455
+ # / Unsafe Code >
src/diffusers/loaders/unet.py ADDED
@@ -0,0 +1,828 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import inspect
15
+ import os
16
+ from collections import defaultdict
17
+ from contextlib import nullcontext
18
+ from functools import partial
19
+ from typing import Callable, Dict, List, Optional, Union
20
+
21
+ import safetensors
22
+ import torch
23
+ import torch.nn.functional as F
24
+ from huggingface_hub.utils import validate_hf_hub_args
25
+ from torch import nn
26
+
27
+ from ..models.embeddings import ImageProjection, MLPProjection, Resampler
28
+ from ..models.modeling_utils import _LOW_CPU_MEM_USAGE_DEFAULT, load_model_dict_into_meta
29
+ from ..utils import (
30
+ USE_PEFT_BACKEND,
31
+ _get_model_file,
32
+ delete_adapter_layers,
33
+ is_accelerate_available,
34
+ logging,
35
+ set_adapter_layers,
36
+ set_weights_and_activate_adapters,
37
+ )
38
+ from .utils import AttnProcsLayers
39
+
40
+
41
+ if is_accelerate_available():
42
+ from accelerate import init_empty_weights
43
+ from accelerate.hooks import AlignDevicesHook, CpuOffload, remove_hook_from_module
44
+
45
+ logger = logging.get_logger(__name__)
46
+
47
+
48
+ TEXT_ENCODER_NAME = "text_encoder"
49
+ UNET_NAME = "unet"
50
+
51
+ LORA_WEIGHT_NAME = "pytorch_lora_weights.bin"
52
+ LORA_WEIGHT_NAME_SAFE = "pytorch_lora_weights.safetensors"
53
+
54
+ CUSTOM_DIFFUSION_WEIGHT_NAME = "pytorch_custom_diffusion_weights.bin"
55
+ CUSTOM_DIFFUSION_WEIGHT_NAME_SAFE = "pytorch_custom_diffusion_weights.safetensors"
56
+
57
+
58
+ class UNet2DConditionLoadersMixin:
59
+ """
60
+ Load LoRA layers into a [`UNet2DCondtionModel`].
61
+ """
62
+
63
+ text_encoder_name = TEXT_ENCODER_NAME
64
+ unet_name = UNET_NAME
65
+
66
+ @validate_hf_hub_args
67
+ def load_attn_procs(self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs):
68
+ r"""
69
+ Load pretrained attention processor layers into [`UNet2DConditionModel`]. Attention processor layers have to be
70
+ defined in
71
+ [`attention_processor.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py)
72
+ and be a `torch.nn.Module` class.
73
+
74
+ Parameters:
75
+ pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
76
+ Can be either:
77
+
78
+ - A string, the model id (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
79
+ the Hub.
80
+ - A path to a directory (for example `./my_model_directory`) containing the model weights saved
81
+ with [`ModelMixin.save_pretrained`].
82
+ - A [torch state
83
+ dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
84
+
85
+ cache_dir (`Union[str, os.PathLike]`, *optional*):
86
+ Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
87
+ is not used.
88
+ force_download (`bool`, *optional*, defaults to `False`):
89
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
90
+ cached versions if they exist.
91
+ resume_download (`bool`, *optional*, defaults to `False`):
92
+ Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
93
+ incompletely downloaded files are deleted.
94
+ proxies (`Dict[str, str]`, *optional*):
95
+ A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
96
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
97
+ local_files_only (`bool`, *optional*, defaults to `False`):
98
+ Whether to only load local model weights and configuration files or not. If set to `True`, the model
99
+ won't be downloaded from the Hub.
100
+ token (`str` or *bool*, *optional*):
101
+ The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
102
+ `diffusers-cli login` (stored in `~/.huggingface`) is used.
103
+ low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
104
+ Speed up model loading only loading the pretrained weights and not initializing the weights. This also
105
+ tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
106
+ Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this
107
+ argument to `True` will raise an error.
108
+ revision (`str`, *optional*, defaults to `"main"`):
109
+ The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
110
+ allowed by Git.
111
+ subfolder (`str`, *optional*, defaults to `""`):
112
+ The subfolder location of a model file within a larger model repository on the Hub or locally.
113
+ mirror (`str`, *optional*):
114
+ Mirror source to resolve accessibility issues if you’re downloading a model in China. We do not
115
+ guarantee the timeliness or safety of the source, and you should refer to the mirror site for more
116
+ information.
117
+
118
+ Example:
119
+
120
+ ```py
121
+ from diffusers import AutoPipelineForText2Image
122
+ import torch
123
+
124
+ pipeline = AutoPipelineForText2Image.from_pretrained(
125
+ "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
126
+ ).to("cuda")
127
+ pipeline.unet.load_attn_procs(
128
+ "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_name="cinematic"
129
+ )
130
+ ```
131
+ """
132
+ from ..models.attention_processor import CustomDiffusionAttnProcessor
133
+ from ..models.lora import LoRACompatibleConv, LoRACompatibleLinear, LoRAConv2dLayer, LoRALinearLayer
134
+
135
+ cache_dir = kwargs.pop("cache_dir", None)
136
+ force_download = kwargs.pop("force_download", False)
137
+ resume_download = kwargs.pop("resume_download", False)
138
+ proxies = kwargs.pop("proxies", None)
139
+ local_files_only = kwargs.pop("local_files_only", None)
140
+ token = kwargs.pop("token", None)
141
+ revision = kwargs.pop("revision", None)
142
+ subfolder = kwargs.pop("subfolder", None)
143
+ weight_name = kwargs.pop("weight_name", None)
144
+ use_safetensors = kwargs.pop("use_safetensors", None)
145
+ low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT)
146
+ # This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script.
147
+ # See https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning
148
+ network_alphas = kwargs.pop("network_alphas", None)
149
+
150
+ _pipeline = kwargs.pop("_pipeline", None)
151
+
152
+ is_network_alphas_none = network_alphas is None
153
+
154
+ allow_pickle = False
155
+
156
+ if use_safetensors is None:
157
+ use_safetensors = True
158
+ allow_pickle = True
159
+
160
+ user_agent = {
161
+ "file_type": "attn_procs_weights",
162
+ "framework": "pytorch",
163
+ }
164
+
165
+ if low_cpu_mem_usage and not is_accelerate_available():
166
+ low_cpu_mem_usage = False
167
+ logger.warning(
168
+ "Cannot initialize model with low cpu memory usage because `accelerate` was not found in the"
169
+ " environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install"
170
+ " `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip"
171
+ " install accelerate\n```\n."
172
+ )
173
+
174
+ model_file = None
175
+ if not isinstance(pretrained_model_name_or_path_or_dict, dict):
176
+ # Let's first try to load .safetensors weights
177
+ if (use_safetensors and weight_name is None) or (
178
+ weight_name is not None and weight_name.endswith(".safetensors")
179
+ ):
180
+ try:
181
+ model_file = _get_model_file(
182
+ pretrained_model_name_or_path_or_dict,
183
+ weights_name=weight_name or LORA_WEIGHT_NAME_SAFE,
184
+ cache_dir=cache_dir,
185
+ force_download=force_download,
186
+ resume_download=resume_download,
187
+ proxies=proxies,
188
+ local_files_only=local_files_only,
189
+ token=token,
190
+ revision=revision,
191
+ subfolder=subfolder,
192
+ user_agent=user_agent,
193
+ )
194
+ state_dict = safetensors.torch.load_file(model_file, device="cpu")
195
+ except IOError as e:
196
+ if not allow_pickle:
197
+ raise e
198
+ # try loading non-safetensors weights
199
+ pass
200
+ if model_file is None:
201
+ model_file = _get_model_file(
202
+ pretrained_model_name_or_path_or_dict,
203
+ weights_name=weight_name or LORA_WEIGHT_NAME,
204
+ cache_dir=cache_dir,
205
+ force_download=force_download,
206
+ resume_download=resume_download,
207
+ proxies=proxies,
208
+ local_files_only=local_files_only,
209
+ token=token,
210
+ revision=revision,
211
+ subfolder=subfolder,
212
+ user_agent=user_agent,
213
+ )
214
+ state_dict = torch.load(model_file, map_location="cpu")
215
+ else:
216
+ state_dict = pretrained_model_name_or_path_or_dict
217
+
218
+ # fill attn processors
219
+ lora_layers_list = []
220
+
221
+ is_lora = all(("lora" in k or k.endswith(".alpha")) for k in state_dict.keys()) and not USE_PEFT_BACKEND
222
+ is_custom_diffusion = any("custom_diffusion" in k for k in state_dict.keys())
223
+
224
+ if is_lora:
225
+ # correct keys
226
+ state_dict, network_alphas = self.convert_state_dict_legacy_attn_format(state_dict, network_alphas)
227
+
228
+ if network_alphas is not None:
229
+ network_alphas_keys = list(network_alphas.keys())
230
+ used_network_alphas_keys = set()
231
+
232
+ lora_grouped_dict = defaultdict(dict)
233
+ mapped_network_alphas = {}
234
+
235
+ all_keys = list(state_dict.keys())
236
+ for key in all_keys:
237
+ value = state_dict.pop(key)
238
+ attn_processor_key, sub_key = ".".join(key.split(".")[:-3]), ".".join(key.split(".")[-3:])
239
+ lora_grouped_dict[attn_processor_key][sub_key] = value
240
+
241
+ # Create another `mapped_network_alphas` dictionary so that we can properly map them.
242
+ if network_alphas is not None:
243
+ for k in network_alphas_keys:
244
+ if k.replace(".alpha", "") in key:
245
+ mapped_network_alphas.update({attn_processor_key: network_alphas.get(k)})
246
+ used_network_alphas_keys.add(k)
247
+
248
+ if not is_network_alphas_none:
249
+ if len(set(network_alphas_keys) - used_network_alphas_keys) > 0:
250
+ raise ValueError(
251
+ f"The `network_alphas` has to be empty at this point but has the following keys \n\n {', '.join(network_alphas.keys())}"
252
+ )
253
+
254
+ if len(state_dict) > 0:
255
+ raise ValueError(
256
+ f"The `state_dict` has to be empty at this point but has the following keys \n\n {', '.join(state_dict.keys())}"
257
+ )
258
+
259
+ for key, value_dict in lora_grouped_dict.items():
260
+ attn_processor = self
261
+ for sub_key in key.split("."):
262
+ attn_processor = getattr(attn_processor, sub_key)
263
+
264
+ # Process non-attention layers, which don't have to_{k,v,q,out_proj}_lora layers
265
+ # or add_{k,v,q,out_proj}_proj_lora layers.
266
+ rank = value_dict["lora.down.weight"].shape[0]
267
+
268
+ if isinstance(attn_processor, LoRACompatibleConv):
269
+ in_features = attn_processor.in_channels
270
+ out_features = attn_processor.out_channels
271
+ kernel_size = attn_processor.kernel_size
272
+
273
+ ctx = init_empty_weights if low_cpu_mem_usage else nullcontext
274
+ with ctx():
275
+ lora = LoRAConv2dLayer(
276
+ in_features=in_features,
277
+ out_features=out_features,
278
+ rank=rank,
279
+ kernel_size=kernel_size,
280
+ stride=attn_processor.stride,
281
+ padding=attn_processor.padding,
282
+ network_alpha=mapped_network_alphas.get(key),
283
+ )
284
+ elif isinstance(attn_processor, LoRACompatibleLinear):
285
+ ctx = init_empty_weights if low_cpu_mem_usage else nullcontext
286
+ with ctx():
287
+ lora = LoRALinearLayer(
288
+ attn_processor.in_features,
289
+ attn_processor.out_features,
290
+ rank,
291
+ mapped_network_alphas.get(key),
292
+ )
293
+ else:
294
+ raise ValueError(f"Module {key} is not a LoRACompatibleConv or LoRACompatibleLinear module.")
295
+
296
+ value_dict = {k.replace("lora.", ""): v for k, v in value_dict.items()}
297
+ lora_layers_list.append((attn_processor, lora))
298
+
299
+ if low_cpu_mem_usage:
300
+ device = next(iter(value_dict.values())).device
301
+ dtype = next(iter(value_dict.values())).dtype
302
+ load_model_dict_into_meta(lora, value_dict, device=device, dtype=dtype)
303
+ else:
304
+ lora.load_state_dict(value_dict)
305
+
306
+ elif is_custom_diffusion:
307
+ attn_processors = {}
308
+ custom_diffusion_grouped_dict = defaultdict(dict)
309
+ for key, value in state_dict.items():
310
+ if len(value) == 0:
311
+ custom_diffusion_grouped_dict[key] = {}
312
+ else:
313
+ if "to_out" in key:
314
+ attn_processor_key, sub_key = ".".join(key.split(".")[:-3]), ".".join(key.split(".")[-3:])
315
+ else:
316
+ attn_processor_key, sub_key = ".".join(key.split(".")[:-2]), ".".join(key.split(".")[-2:])
317
+ custom_diffusion_grouped_dict[attn_processor_key][sub_key] = value
318
+
319
+ for key, value_dict in custom_diffusion_grouped_dict.items():
320
+ if len(value_dict) == 0:
321
+ attn_processors[key] = CustomDiffusionAttnProcessor(
322
+ train_kv=False, train_q_out=False, hidden_size=None, cross_attention_dim=None
323
+ )
324
+ else:
325
+ cross_attention_dim = value_dict["to_k_custom_diffusion.weight"].shape[1]
326
+ hidden_size = value_dict["to_k_custom_diffusion.weight"].shape[0]
327
+ train_q_out = True if "to_q_custom_diffusion.weight" in value_dict else False
328
+ attn_processors[key] = CustomDiffusionAttnProcessor(
329
+ train_kv=True,
330
+ train_q_out=train_q_out,
331
+ hidden_size=hidden_size,
332
+ cross_attention_dim=cross_attention_dim,
333
+ )
334
+ attn_processors[key].load_state_dict(value_dict)
335
+ elif USE_PEFT_BACKEND:
336
+ # In that case we have nothing to do as loading the adapter weights is already handled above by `set_peft_model_state_dict`
337
+ # on the Unet
338
+ pass
339
+ else:
340
+ raise ValueError(
341
+ f"{model_file} does not seem to be in the correct format expected by LoRA or Custom Diffusion training."
342
+ )
343
+
344
+ # <Unsafe code
345
+ # We can be sure that the following works as it just sets attention processors, lora layers and puts all in the same dtype
346
+ # Now we remove any existing hooks to
347
+ is_model_cpu_offload = False
348
+ is_sequential_cpu_offload = False
349
+
350
+ # For PEFT backend the Unet is already offloaded at this stage as it is handled inside `lora_lora_weights_into_unet`
351
+ if not USE_PEFT_BACKEND:
352
+ if _pipeline is not None:
353
+ for _, component in _pipeline.components.items():
354
+ if isinstance(component, nn.Module) and hasattr(component, "_hf_hook"):
355
+ is_model_cpu_offload = isinstance(getattr(component, "_hf_hook"), CpuOffload)
356
+ is_sequential_cpu_offload = isinstance(getattr(component, "_hf_hook"), AlignDevicesHook)
357
+
358
+ logger.info(
359
+ "Accelerate hooks detected. Since you have called `load_lora_weights()`, the previous hooks will be first removed. Then the LoRA parameters will be loaded and the hooks will be applied again."
360
+ )
361
+ remove_hook_from_module(component, recurse=is_sequential_cpu_offload)
362
+
363
+ # only custom diffusion needs to set attn processors
364
+ if is_custom_diffusion:
365
+ self.set_attn_processor(attn_processors)
366
+
367
+ # set lora layers
368
+ for target_module, lora_layer in lora_layers_list:
369
+ target_module.set_lora_layer(lora_layer)
370
+
371
+ self.to(dtype=self.dtype, device=self.device)
372
+
373
+ # Offload back.
374
+ if is_model_cpu_offload:
375
+ _pipeline.enable_model_cpu_offload()
376
+ elif is_sequential_cpu_offload:
377
+ _pipeline.enable_sequential_cpu_offload()
378
+ # Unsafe code />
379
+
380
+ def convert_state_dict_legacy_attn_format(self, state_dict, network_alphas):
381
+ is_new_lora_format = all(
382
+ key.startswith(self.unet_name) or key.startswith(self.text_encoder_name) for key in state_dict.keys()
383
+ )
384
+ if is_new_lora_format:
385
+ # Strip the `"unet"` prefix.
386
+ is_text_encoder_present = any(key.startswith(self.text_encoder_name) for key in state_dict.keys())
387
+ if is_text_encoder_present:
388
+ warn_message = "The state_dict contains LoRA params corresponding to the text encoder which are not being used here. To use both UNet and text encoder related LoRA params, use [`pipe.load_lora_weights()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraLoaderMixin.load_lora_weights)."
389
+ logger.warn(warn_message)
390
+ unet_keys = [k for k in state_dict.keys() if k.startswith(self.unet_name)]
391
+ state_dict = {k.replace(f"{self.unet_name}.", ""): v for k, v in state_dict.items() if k in unet_keys}
392
+
393
+ # change processor format to 'pure' LoRACompatibleLinear format
394
+ if any("processor" in k.split(".") for k in state_dict.keys()):
395
+
396
+ def format_to_lora_compatible(key):
397
+ if "processor" not in key.split("."):
398
+ return key
399
+ return key.replace(".processor", "").replace("to_out_lora", "to_out.0.lora").replace("_lora", ".lora")
400
+
401
+ state_dict = {format_to_lora_compatible(k): v for k, v in state_dict.items()}
402
+
403
+ if network_alphas is not None:
404
+ network_alphas = {format_to_lora_compatible(k): v for k, v in network_alphas.items()}
405
+ return state_dict, network_alphas
406
+
407
+ def save_attn_procs(
408
+ self,
409
+ save_directory: Union[str, os.PathLike],
410
+ is_main_process: bool = True,
411
+ weight_name: str = None,
412
+ save_function: Callable = None,
413
+ safe_serialization: bool = True,
414
+ **kwargs,
415
+ ):
416
+ r"""
417
+ Save attention processor layers to a directory so that it can be reloaded with the
418
+ [`~loaders.UNet2DConditionLoadersMixin.load_attn_procs`] method.
419
+
420
+ Arguments:
421
+ save_directory (`str` or `os.PathLike`):
422
+ Directory to save an attention processor to (will be created if it doesn't exist).
423
+ is_main_process (`bool`, *optional*, defaults to `True`):
424
+ Whether the process calling this is the main process or not. Useful during distributed training and you
425
+ need to call this function on all processes. In this case, set `is_main_process=True` only on the main
426
+ process to avoid race conditions.
427
+ save_function (`Callable`):
428
+ The function to use to save the state dictionary. Useful during distributed training when you need to
429
+ replace `torch.save` with another method. Can be configured with the environment variable
430
+ `DIFFUSERS_SAVE_MODE`.
431
+ safe_serialization (`bool`, *optional*, defaults to `True`):
432
+ Whether to save the model using `safetensors` or with `pickle`.
433
+
434
+ Example:
435
+
436
+ ```py
437
+ import torch
438
+ from diffusers import DiffusionPipeline
439
+
440
+ pipeline = DiffusionPipeline.from_pretrained(
441
+ "CompVis/stable-diffusion-v1-4",
442
+ torch_dtype=torch.float16,
443
+ ).to("cuda")
444
+ pipeline.unet.load_attn_procs("path-to-save-model", weight_name="pytorch_custom_diffusion_weights.bin")
445
+ pipeline.unet.save_attn_procs("path-to-save-model", weight_name="pytorch_custom_diffusion_weights.bin")
446
+ ```
447
+ """
448
+ from ..models.attention_processor import (
449
+ CustomDiffusionAttnProcessor,
450
+ CustomDiffusionAttnProcessor2_0,
451
+ CustomDiffusionXFormersAttnProcessor,
452
+ )
453
+
454
+ if os.path.isfile(save_directory):
455
+ logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
456
+ return
457
+
458
+ if save_function is None:
459
+ if safe_serialization:
460
+
461
+ def save_function(weights, filename):
462
+ return safetensors.torch.save_file(weights, filename, metadata={"format": "pt"})
463
+
464
+ else:
465
+ save_function = torch.save
466
+
467
+ os.makedirs(save_directory, exist_ok=True)
468
+
469
+ is_custom_diffusion = any(
470
+ isinstance(
471
+ x,
472
+ (CustomDiffusionAttnProcessor, CustomDiffusionAttnProcessor2_0, CustomDiffusionXFormersAttnProcessor),
473
+ )
474
+ for (_, x) in self.attn_processors.items()
475
+ )
476
+ if is_custom_diffusion:
477
+ model_to_save = AttnProcsLayers(
478
+ {
479
+ y: x
480
+ for (y, x) in self.attn_processors.items()
481
+ if isinstance(
482
+ x,
483
+ (
484
+ CustomDiffusionAttnProcessor,
485
+ CustomDiffusionAttnProcessor2_0,
486
+ CustomDiffusionXFormersAttnProcessor,
487
+ ),
488
+ )
489
+ }
490
+ )
491
+ state_dict = model_to_save.state_dict()
492
+ for name, attn in self.attn_processors.items():
493
+ if len(attn.state_dict()) == 0:
494
+ state_dict[name] = {}
495
+ else:
496
+ model_to_save = AttnProcsLayers(self.attn_processors)
497
+ state_dict = model_to_save.state_dict()
498
+
499
+ if weight_name is None:
500
+ if safe_serialization:
501
+ weight_name = CUSTOM_DIFFUSION_WEIGHT_NAME_SAFE if is_custom_diffusion else LORA_WEIGHT_NAME_SAFE
502
+ else:
503
+ weight_name = CUSTOM_DIFFUSION_WEIGHT_NAME if is_custom_diffusion else LORA_WEIGHT_NAME
504
+
505
+ # Save the model
506
+ save_function(state_dict, os.path.join(save_directory, weight_name))
507
+ logger.info(f"Model weights saved in {os.path.join(save_directory, weight_name)}")
508
+
509
+ def fuse_lora(self, lora_scale=1.0, safe_fusing=False, adapter_names=None):
510
+ self.lora_scale = lora_scale
511
+ self._safe_fusing = safe_fusing
512
+ self.apply(partial(self._fuse_lora_apply, adapter_names=adapter_names))
513
+
514
+ def _fuse_lora_apply(self, module, adapter_names=None):
515
+ if not USE_PEFT_BACKEND:
516
+ if hasattr(module, "_fuse_lora"):
517
+ module._fuse_lora(self.lora_scale, self._safe_fusing)
518
+
519
+ if adapter_names is not None:
520
+ raise ValueError(
521
+ "The `adapter_names` argument is not supported in your environment. Please switch"
522
+ " to PEFT backend to use this argument by installing latest PEFT and transformers."
523
+ " `pip install -U peft transformers`"
524
+ )
525
+ else:
526
+ from peft.tuners.tuners_utils import BaseTunerLayer
527
+
528
+ merge_kwargs = {"safe_merge": self._safe_fusing}
529
+
530
+ if isinstance(module, BaseTunerLayer):
531
+ if self.lora_scale != 1.0:
532
+ module.scale_layer(self.lora_scale)
533
+
534
+ # For BC with prevous PEFT versions, we need to check the signature
535
+ # of the `merge` method to see if it supports the `adapter_names` argument.
536
+ supported_merge_kwargs = list(inspect.signature(module.merge).parameters)
537
+ if "adapter_names" in supported_merge_kwargs:
538
+ merge_kwargs["adapter_names"] = adapter_names
539
+ elif "adapter_names" not in supported_merge_kwargs and adapter_names is not None:
540
+ raise ValueError(
541
+ "The `adapter_names` argument is not supported with your PEFT version. Please upgrade"
542
+ " to the latest version of PEFT. `pip install -U peft`"
543
+ )
544
+
545
+ module.merge(**merge_kwargs)
546
+
547
+ def unfuse_lora(self):
548
+ self.apply(self._unfuse_lora_apply)
549
+
550
+ def _unfuse_lora_apply(self, module):
551
+ if not USE_PEFT_BACKEND:
552
+ if hasattr(module, "_unfuse_lora"):
553
+ module._unfuse_lora()
554
+ else:
555
+ from peft.tuners.tuners_utils import BaseTunerLayer
556
+
557
+ if isinstance(module, BaseTunerLayer):
558
+ module.unmerge()
559
+
560
+ def set_adapters(
561
+ self,
562
+ adapter_names: Union[List[str], str],
563
+ weights: Optional[Union[List[float], float]] = None,
564
+ ):
565
+ """
566
+ Set the currently active adapters for use in the UNet.
567
+
568
+ Args:
569
+ adapter_names (`List[str]` or `str`):
570
+ The names of the adapters to use.
571
+ adapter_weights (`Union[List[float], float]`, *optional*):
572
+ The adapter(s) weights to use with the UNet. If `None`, the weights are set to `1.0` for all the
573
+ adapters.
574
+
575
+ Example:
576
+
577
+ ```py
578
+ from diffusers import AutoPipelineForText2Image
579
+ import torch
580
+
581
+ pipeline = AutoPipelineForText2Image.from_pretrained(
582
+ "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
583
+ ).to("cuda")
584
+ pipeline.load_lora_weights(
585
+ "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_name="cinematic"
586
+ )
587
+ pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel")
588
+ pipeline.set_adapters(["cinematic", "pixel"], adapter_weights=[0.5, 0.5])
589
+ ```
590
+ """
591
+ if not USE_PEFT_BACKEND:
592
+ raise ValueError("PEFT backend is required for `set_adapters()`.")
593
+
594
+ adapter_names = [adapter_names] if isinstance(adapter_names, str) else adapter_names
595
+
596
+ if weights is None:
597
+ weights = [1.0] * len(adapter_names)
598
+ elif isinstance(weights, float):
599
+ weights = [weights] * len(adapter_names)
600
+
601
+ if len(adapter_names) != len(weights):
602
+ raise ValueError(
603
+ f"Length of adapter names {len(adapter_names)} is not equal to the length of their weights {len(weights)}."
604
+ )
605
+
606
+ set_weights_and_activate_adapters(self, adapter_names, weights)
607
+
608
+ def disable_lora(self):
609
+ """
610
+ Disable the UNet's active LoRA layers.
611
+
612
+ Example:
613
+
614
+ ```py
615
+ from diffusers import AutoPipelineForText2Image
616
+ import torch
617
+
618
+ pipeline = AutoPipelineForText2Image.from_pretrained(
619
+ "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
620
+ ).to("cuda")
621
+ pipeline.load_lora_weights(
622
+ "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_name="cinematic"
623
+ )
624
+ pipeline.disable_lora()
625
+ ```
626
+ """
627
+ if not USE_PEFT_BACKEND:
628
+ raise ValueError("PEFT backend is required for this method.")
629
+ set_adapter_layers(self, enabled=False)
630
+
631
+ def enable_lora(self):
632
+ """
633
+ Enable the UNet's active LoRA layers.
634
+
635
+ Example:
636
+
637
+ ```py
638
+ from diffusers import AutoPipelineForText2Image
639
+ import torch
640
+
641
+ pipeline = AutoPipelineForText2Image.from_pretrained(
642
+ "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
643
+ ).to("cuda")
644
+ pipeline.load_lora_weights(
645
+ "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_name="cinematic"
646
+ )
647
+ pipeline.enable_lora()
648
+ ```
649
+ """
650
+ if not USE_PEFT_BACKEND:
651
+ raise ValueError("PEFT backend is required for this method.")
652
+ set_adapter_layers(self, enabled=True)
653
+
654
+ def delete_adapters(self, adapter_names: Union[List[str], str]):
655
+ """
656
+ Delete an adapter's LoRA layers from the UNet.
657
+
658
+ Args:
659
+ adapter_names (`Union[List[str], str]`):
660
+ The names (single string or list of strings) of the adapter to delete.
661
+
662
+ Example:
663
+
664
+ ```py
665
+ from diffusers import AutoPipelineForText2Image
666
+ import torch
667
+
668
+ pipeline = AutoPipelineForText2Image.from_pretrained(
669
+ "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
670
+ ).to("cuda")
671
+ pipeline.load_lora_weights(
672
+ "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_names="cinematic"
673
+ )
674
+ pipeline.delete_adapters("cinematic")
675
+ ```
676
+ """
677
+ if not USE_PEFT_BACKEND:
678
+ raise ValueError("PEFT backend is required for this method.")
679
+
680
+ if isinstance(adapter_names, str):
681
+ adapter_names = [adapter_names]
682
+
683
+ for adapter_name in adapter_names:
684
+ delete_adapter_layers(self, adapter_name)
685
+
686
+ # Pop also the corresponding adapter from the config
687
+ if hasattr(self, "peft_config"):
688
+ self.peft_config.pop(adapter_name, None)
689
+
690
+ def _convert_ip_adapter_image_proj_to_diffusers(self, state_dict):
691
+ updated_state_dict = {}
692
+ image_projection = None
693
+
694
+ if "proj.weight" in state_dict:
695
+ # IP-Adapter
696
+ num_image_text_embeds = 4
697
+ clip_embeddings_dim = state_dict["proj.weight"].shape[-1]
698
+ cross_attention_dim = state_dict["proj.weight"].shape[0] // 4
699
+
700
+ image_projection = ImageProjection(
701
+ cross_attention_dim=cross_attention_dim,
702
+ image_embed_dim=clip_embeddings_dim,
703
+ num_image_text_embeds=num_image_text_embeds,
704
+ )
705
+
706
+ for key, value in state_dict.items():
707
+ diffusers_name = key.replace("proj", "image_embeds")
708
+ updated_state_dict[diffusers_name] = value
709
+
710
+ elif "proj.3.weight" in state_dict:
711
+ # IP-Adapter Full
712
+ clip_embeddings_dim = state_dict["proj.0.weight"].shape[0]
713
+ cross_attention_dim = state_dict["proj.3.weight"].shape[0]
714
+
715
+ image_projection = MLPProjection(
716
+ cross_attention_dim=cross_attention_dim, image_embed_dim=clip_embeddings_dim
717
+ )
718
+
719
+ for key, value in state_dict.items():
720
+ diffusers_name = key.replace("proj.0", "ff.net.0.proj")
721
+ diffusers_name = diffusers_name.replace("proj.2", "ff.net.2")
722
+ diffusers_name = diffusers_name.replace("proj.3", "norm")
723
+ updated_state_dict[diffusers_name] = value
724
+
725
+ else:
726
+ # IP-Adapter Plus
727
+ num_image_text_embeds = state_dict["latents"].shape[1]
728
+ embed_dims = state_dict["proj_in.weight"].shape[1]
729
+ output_dims = state_dict["proj_out.weight"].shape[0]
730
+ hidden_dims = state_dict["latents"].shape[2]
731
+ heads = state_dict["layers.0.0.to_q.weight"].shape[0] // 64
732
+
733
+ image_projection = Resampler(
734
+ embed_dims=embed_dims,
735
+ output_dims=output_dims,
736
+ hidden_dims=hidden_dims,
737
+ heads=heads,
738
+ num_queries=num_image_text_embeds,
739
+ )
740
+
741
+ for key, value in state_dict.items():
742
+ diffusers_name = key.replace("0.to", "2.to")
743
+ diffusers_name = diffusers_name.replace("1.0.weight", "3.0.weight")
744
+ diffusers_name = diffusers_name.replace("1.0.bias", "3.0.bias")
745
+ diffusers_name = diffusers_name.replace("1.1.weight", "3.1.net.0.proj.weight")
746
+ diffusers_name = diffusers_name.replace("1.3.weight", "3.1.net.2.weight")
747
+
748
+ if "norm1" in diffusers_name:
749
+ updated_state_dict[diffusers_name.replace("0.norm1", "0")] = value
750
+ elif "norm2" in diffusers_name:
751
+ updated_state_dict[diffusers_name.replace("0.norm2", "1")] = value
752
+ elif "to_kv" in diffusers_name:
753
+ v_chunk = value.chunk(2, dim=0)
754
+ updated_state_dict[diffusers_name.replace("to_kv", "to_k")] = v_chunk[0]
755
+ updated_state_dict[diffusers_name.replace("to_kv", "to_v")] = v_chunk[1]
756
+ elif "to_out" in diffusers_name:
757
+ updated_state_dict[diffusers_name.replace("to_out", "to_out.0")] = value
758
+ else:
759
+ updated_state_dict[diffusers_name] = value
760
+
761
+ image_projection.load_state_dict(updated_state_dict)
762
+ return image_projection
763
+
764
+ def _load_ip_adapter_weights(self, state_dict):
765
+ from ..models.attention_processor import (
766
+ AttnProcessor,
767
+ AttnProcessor2_0,
768
+ IPAdapterAttnProcessor,
769
+ IPAdapterAttnProcessor2_0,
770
+ )
771
+
772
+ if "proj.weight" in state_dict["image_proj"]:
773
+ # IP-Adapter
774
+ num_image_text_embeds = 4
775
+ elif "proj.3.weight" in state_dict["image_proj"]:
776
+ # IP-Adapter Full Face
777
+ num_image_text_embeds = 257 # 256 CLIP tokens + 1 CLS token
778
+ else:
779
+ # IP-Adapter Plus
780
+ num_image_text_embeds = state_dict["image_proj"]["latents"].shape[1]
781
+
782
+ # Set encoder_hid_proj after loading ip_adapter weights,
783
+ # because `Resampler` also has `attn_processors`.
784
+ self.encoder_hid_proj = None
785
+
786
+ # set ip-adapter cross-attention processors & load state_dict
787
+ attn_procs = {}
788
+ key_id = 1
789
+ for name in self.attn_processors.keys():
790
+ cross_attention_dim = None if name.endswith("attn1.processor") else self.config.cross_attention_dim
791
+ if name.startswith("mid_block"):
792
+ hidden_size = self.config.block_out_channels[-1]
793
+ elif name.startswith("up_blocks"):
794
+ block_id = int(name[len("up_blocks.")])
795
+ hidden_size = list(reversed(self.config.block_out_channels))[block_id]
796
+ elif name.startswith("down_blocks"):
797
+ block_id = int(name[len("down_blocks.")])
798
+ hidden_size = self.config.block_out_channels[block_id]
799
+ if cross_attention_dim is None or "motion_modules" in name:
800
+ attn_processor_class = (
801
+ AttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else AttnProcessor
802
+ )
803
+ attn_procs[name] = attn_processor_class()
804
+ else:
805
+ attn_processor_class = (
806
+ IPAdapterAttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else IPAdapterAttnProcessor
807
+ )
808
+ attn_procs[name] = attn_processor_class(
809
+ hidden_size=hidden_size,
810
+ cross_attention_dim=cross_attention_dim,
811
+ scale=1.0,
812
+ num_tokens=num_image_text_embeds,
813
+ ).to(dtype=self.dtype, device=self.device)
814
+
815
+ value_dict = {}
816
+ for k, w in attn_procs[name].state_dict().items():
817
+ value_dict.update({f"{k}": state_dict["ip_adapter"][f"{key_id}.{k}"]})
818
+
819
+ attn_procs[name].load_state_dict(value_dict)
820
+ key_id += 2
821
+
822
+ self.set_attn_processor(attn_procs)
823
+
824
+ # convert IP-Adapter Image Projection layers to diffusers
825
+ image_projection = self._convert_ip_adapter_image_proj_to_diffusers(state_dict["image_proj"])
826
+
827
+ self.encoder_hid_proj = image_projection.to(device=self.device, dtype=self.dtype)
828
+ self.config.encoder_hid_dim_type = "ip_image_proj"
src/diffusers/loaders/utils.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Dict
16
+
17
+ import torch
18
+
19
+
20
+ class AttnProcsLayers(torch.nn.Module):
21
+ def __init__(self, state_dict: Dict[str, torch.Tensor]):
22
+ super().__init__()
23
+ self.layers = torch.nn.ModuleList(state_dict.values())
24
+ self.mapping = dict(enumerate(state_dict.keys()))
25
+ self.rev_mapping = {v: k for k, v in enumerate(state_dict.keys())}
26
+
27
+ # .processor for unet, .self_attn for text encoder
28
+ self.split_keys = [".processor", ".self_attn"]
29
+
30
+ # we add a hook to state_dict() and load_state_dict() so that the
31
+ # naming fits with `unet.attn_processors`
32
+ def map_to(module, state_dict, *args, **kwargs):
33
+ new_state_dict = {}
34
+ for key, value in state_dict.items():
35
+ num = int(key.split(".")[1]) # 0 is always "layers"
36
+ new_key = key.replace(f"layers.{num}", module.mapping[num])
37
+ new_state_dict[new_key] = value
38
+
39
+ return new_state_dict
40
+
41
+ def remap_key(key, state_dict):
42
+ for k in self.split_keys:
43
+ if k in key:
44
+ return key.split(k)[0] + k
45
+
46
+ raise ValueError(
47
+ f"There seems to be a problem with the state_dict: {set(state_dict.keys())}. {key} has to have one of {self.split_keys}."
48
+ )
49
+
50
+ def map_from(module, state_dict, *args, **kwargs):
51
+ all_keys = list(state_dict.keys())
52
+ for key in all_keys:
53
+ replace_key = remap_key(key, state_dict)
54
+ new_key = key.replace(replace_key, f"layers.{module.rev_mapping[replace_key]}")
55
+ state_dict[new_key] = state_dict[key]
56
+ del state_dict[key]
57
+
58
+ self._register_state_dict_hook(map_to)
59
+ self._register_load_state_dict_pre_hook(map_from, with_module=True)
src/diffusers/models/README.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # Models
2
+
3
+ For more detail on the models, please refer to the [docs](https://huggingface.co/docs/diffusers/api/models/overview).
src/diffusers/models/__init__.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ..utils import (
18
+ DIFFUSERS_SLOW_IMPORT,
19
+ _LazyModule,
20
+ is_flax_available,
21
+ is_torch_available,
22
+ )
23
+
24
+
25
+ _import_structure = {}
26
+
27
+ if is_torch_available():
28
+ _import_structure["adapter"] = ["MultiAdapter", "T2IAdapter"]
29
+ _import_structure["autoencoders.autoencoder_asym_kl"] = ["AsymmetricAutoencoderKL"]
30
+ _import_structure["autoencoders.autoencoder_kl"] = ["AutoencoderKL"]
31
+ _import_structure["autoencoders.autoencoder_kl_temporal_decoder"] = ["AutoencoderKLTemporalDecoder"]
32
+ _import_structure["autoencoders.autoencoder_tiny"] = ["AutoencoderTiny"]
33
+ _import_structure["autoencoders.consistency_decoder_vae"] = ["ConsistencyDecoderVAE"]
34
+ _import_structure["controlnet"] = ["ControlNetModel"]
35
+ _import_structure["dual_transformer_2d"] = ["DualTransformer2DModel"]
36
+ _import_structure["embeddings"] = ["ImageProjection"]
37
+ _import_structure["modeling_utils"] = ["ModelMixin"]
38
+ _import_structure["prior_transformer"] = ["PriorTransformer"]
39
+ _import_structure["t5_film_transformer"] = ["T5FilmDecoder"]
40
+ _import_structure["transformer_2d"] = ["Transformer2DModel"]
41
+ _import_structure["transformer_temporal"] = ["TransformerTemporalModel"]
42
+ _import_structure["unet_1d"] = ["UNet1DModel"]
43
+ _import_structure["unet_2d"] = ["UNet2DModel"]
44
+ _import_structure["unet_2d_condition"] = ["UNet2DConditionModel"]
45
+ _import_structure["unet_3d_condition"] = ["UNet3DConditionModel"]
46
+ _import_structure["unet_kandinsky3"] = ["Kandinsky3UNet"]
47
+ _import_structure["unet_motion_model"] = ["MotionAdapter", "UNetMotionModel"]
48
+ _import_structure["unet_spatio_temporal_condition"] = ["UNetSpatioTemporalConditionModel"]
49
+ _import_structure["uvit_2d"] = ["UVit2DModel"]
50
+ _import_structure["vq_model"] = ["VQModel"]
51
+
52
+ if is_flax_available():
53
+ _import_structure["controlnet_flax"] = ["FlaxControlNetModel"]
54
+ _import_structure["unet_2d_condition_flax"] = ["FlaxUNet2DConditionModel"]
55
+ _import_structure["vae_flax"] = ["FlaxAutoencoderKL"]
56
+
57
+
58
+ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
59
+ if is_torch_available():
60
+ from .adapter import MultiAdapter, T2IAdapter
61
+ from .autoencoders import (
62
+ AsymmetricAutoencoderKL,
63
+ AutoencoderKL,
64
+ AutoencoderKLTemporalDecoder,
65
+ AutoencoderTiny,
66
+ ConsistencyDecoderVAE,
67
+ )
68
+ from .controlnet import ControlNetModel
69
+ from .dual_transformer_2d import DualTransformer2DModel
70
+ from .embeddings import ImageProjection
71
+ from .modeling_utils import ModelMixin
72
+ from .prior_transformer import PriorTransformer
73
+ from .t5_film_transformer import T5FilmDecoder
74
+ from .transformer_2d import Transformer2DModel
75
+ from .transformer_temporal import TransformerTemporalModel
76
+ from .unet_1d import UNet1DModel
77
+ from .unet_2d import UNet2DModel
78
+ from .unet_2d_condition import UNet2DConditionModel
79
+ from .unet_3d_condition import UNet3DConditionModel
80
+ from .unet_kandinsky3 import Kandinsky3UNet
81
+ from .unet_motion_model import MotionAdapter, UNetMotionModel
82
+ from .unet_spatio_temporal_condition import UNetSpatioTemporalConditionModel
83
+ from .uvit_2d import UVit2DModel
84
+ from .vq_model import VQModel
85
+
86
+ if is_flax_available():
87
+ from .controlnet_flax import FlaxControlNetModel
88
+ from .unet_2d_condition_flax import FlaxUNet2DConditionModel
89
+ from .vae_flax import FlaxAutoencoderKL
90
+
91
+ else:
92
+ import sys
93
+
94
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
src/diffusers/models/activations.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 HuggingFace Inc.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import torch
17
+ import torch.nn.functional as F
18
+ from torch import nn
19
+
20
+ from ..utils import USE_PEFT_BACKEND
21
+ from .lora import LoRACompatibleLinear
22
+
23
+
24
+ ACTIVATION_FUNCTIONS = {
25
+ "swish": nn.SiLU(),
26
+ "silu": nn.SiLU(),
27
+ "mish": nn.Mish(),
28
+ "gelu": nn.GELU(),
29
+ "relu": nn.ReLU(),
30
+ }
31
+
32
+
33
+ def get_activation(act_fn: str) -> nn.Module:
34
+ """Helper function to get activation function from string.
35
+
36
+ Args:
37
+ act_fn (str): Name of activation function.
38
+
39
+ Returns:
40
+ nn.Module: Activation function.
41
+ """
42
+
43
+ act_fn = act_fn.lower()
44
+ if act_fn in ACTIVATION_FUNCTIONS:
45
+ return ACTIVATION_FUNCTIONS[act_fn]
46
+ else:
47
+ raise ValueError(f"Unsupported activation function: {act_fn}")
48
+
49
+
50
+ class GELU(nn.Module):
51
+ r"""
52
+ GELU activation function with tanh approximation support with `approximate="tanh"`.
53
+
54
+ Parameters:
55
+ dim_in (`int`): The number of channels in the input.
56
+ dim_out (`int`): The number of channels in the output.
57
+ approximate (`str`, *optional*, defaults to `"none"`): If `"tanh"`, use tanh approximation.
58
+ bias (`bool`, defaults to True): Whether to use a bias in the linear layer.
59
+ """
60
+
61
+ def __init__(self, dim_in: int, dim_out: int, approximate: str = "none", bias: bool = True):
62
+ super().__init__()
63
+ self.proj = nn.Linear(dim_in, dim_out, bias=bias)
64
+ self.approximate = approximate
65
+
66
+ def gelu(self, gate: torch.Tensor) -> torch.Tensor:
67
+ if gate.device.type != "mps":
68
+ return F.gelu(gate, approximate=self.approximate)
69
+ # mps: gelu is not implemented for float16
70
+ return F.gelu(gate.to(dtype=torch.float32), approximate=self.approximate).to(dtype=gate.dtype)
71
+
72
+ def forward(self, hidden_states):
73
+ hidden_states = self.proj(hidden_states)
74
+ hidden_states = self.gelu(hidden_states)
75
+ return hidden_states
76
+
77
+
78
+ class GEGLU(nn.Module):
79
+ r"""
80
+ A [variant](https://arxiv.org/abs/2002.05202) of the gated linear unit activation function.
81
+
82
+ Parameters:
83
+ dim_in (`int`): The number of channels in the input.
84
+ dim_out (`int`): The number of channels in the output.
85
+ bias (`bool`, defaults to True): Whether to use a bias in the linear layer.
86
+ """
87
+
88
+ def __init__(self, dim_in: int, dim_out: int, bias: bool = True):
89
+ super().__init__()
90
+ linear_cls = LoRACompatibleLinear if not USE_PEFT_BACKEND else nn.Linear
91
+
92
+ self.proj = linear_cls(dim_in, dim_out * 2, bias=bias)
93
+
94
+ def gelu(self, gate: torch.Tensor) -> torch.Tensor:
95
+ if gate.device.type != "mps":
96
+ return F.gelu(gate)
97
+ # mps: gelu is not implemented for float16
98
+ return F.gelu(gate.to(dtype=torch.float32)).to(dtype=gate.dtype)
99
+
100
+ def forward(self, hidden_states, scale: float = 1.0):
101
+ args = () if USE_PEFT_BACKEND else (scale,)
102
+ hidden_states, gate = self.proj(hidden_states, *args).chunk(2, dim=-1)
103
+ return hidden_states * self.gelu(gate)
104
+
105
+
106
+ class ApproximateGELU(nn.Module):
107
+ r"""
108
+ The approximate form of the Gaussian Error Linear Unit (GELU). For more details, see section 2 of this
109
+ [paper](https://arxiv.org/abs/1606.08415).
110
+
111
+ Parameters:
112
+ dim_in (`int`): The number of channels in the input.
113
+ dim_out (`int`): The number of channels in the output.
114
+ bias (`bool`, defaults to True): Whether to use a bias in the linear layer.
115
+ """
116
+
117
+ def __init__(self, dim_in: int, dim_out: int, bias: bool = True):
118
+ super().__init__()
119
+ self.proj = nn.Linear(dim_in, dim_out, bias=bias)
120
+
121
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
122
+ x = self.proj(x)
123
+ return x * torch.sigmoid(1.702 * x)
src/diffusers/models/adapter.py ADDED
@@ -0,0 +1,584 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import os
15
+ from typing import Callable, List, Optional, Union
16
+
17
+ import torch
18
+ import torch.nn as nn
19
+
20
+ from ..configuration_utils import ConfigMixin, register_to_config
21
+ from ..utils import logging
22
+ from .modeling_utils import ModelMixin
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ class MultiAdapter(ModelMixin):
29
+ r"""
30
+ MultiAdapter is a wrapper model that contains multiple adapter models and merges their outputs according to
31
+ user-assigned weighting.
32
+
33
+ This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library
34
+ implements for all the model (such as downloading or saving, etc.)
35
+
36
+ Parameters:
37
+ adapters (`List[T2IAdapter]`, *optional*, defaults to None):
38
+ A list of `T2IAdapter` model instances.
39
+ """
40
+
41
+ def __init__(self, adapters: List["T2IAdapter"]):
42
+ super(MultiAdapter, self).__init__()
43
+
44
+ self.num_adapter = len(adapters)
45
+ self.adapters = nn.ModuleList(adapters)
46
+
47
+ if len(adapters) == 0:
48
+ raise ValueError("Expecting at least one adapter")
49
+
50
+ if len(adapters) == 1:
51
+ raise ValueError("For a single adapter, please use the `T2IAdapter` class instead of `MultiAdapter`")
52
+
53
+ # The outputs from each adapter are added together with a weight.
54
+ # This means that the change in dimensions from downsampling must
55
+ # be the same for all adapters. Inductively, it also means the
56
+ # downscale_factor and total_downscale_factor must be the same for all
57
+ # adapters.
58
+ first_adapter_total_downscale_factor = adapters[0].total_downscale_factor
59
+ first_adapter_downscale_factor = adapters[0].downscale_factor
60
+ for idx in range(1, len(adapters)):
61
+ if (
62
+ adapters[idx].total_downscale_factor != first_adapter_total_downscale_factor
63
+ or adapters[idx].downscale_factor != first_adapter_downscale_factor
64
+ ):
65
+ raise ValueError(
66
+ f"Expecting all adapters to have the same downscaling behavior, but got:\n"
67
+ f"adapters[0].total_downscale_factor={first_adapter_total_downscale_factor}\n"
68
+ f"adapters[0].downscale_factor={first_adapter_downscale_factor}\n"
69
+ f"adapter[`{idx}`].total_downscale_factor={adapters[idx].total_downscale_factor}\n"
70
+ f"adapter[`{idx}`].downscale_factor={adapters[idx].downscale_factor}"
71
+ )
72
+
73
+ self.total_downscale_factor = first_adapter_total_downscale_factor
74
+ self.downscale_factor = first_adapter_downscale_factor
75
+
76
+ def forward(self, xs: torch.Tensor, adapter_weights: Optional[List[float]] = None) -> List[torch.Tensor]:
77
+ r"""
78
+ Args:
79
+ xs (`torch.Tensor`):
80
+ (batch, channel, height, width) input images for multiple adapter models concated along dimension 1,
81
+ `channel` should equal to `num_adapter` * "number of channel of image".
82
+ adapter_weights (`List[float]`, *optional*, defaults to None):
83
+ List of floats representing the weight which will be multiply to each adapter's output before adding
84
+ them together.
85
+ """
86
+ if adapter_weights is None:
87
+ adapter_weights = torch.tensor([1 / self.num_adapter] * self.num_adapter)
88
+ else:
89
+ adapter_weights = torch.tensor(adapter_weights)
90
+
91
+ accume_state = None
92
+ for x, w, adapter in zip(xs, adapter_weights, self.adapters):
93
+ features = adapter(x)
94
+ if accume_state is None:
95
+ accume_state = features
96
+ for i in range(len(accume_state)):
97
+ accume_state[i] = w * accume_state[i]
98
+ else:
99
+ for i in range(len(features)):
100
+ accume_state[i] += w * features[i]
101
+ return accume_state
102
+
103
+ def save_pretrained(
104
+ self,
105
+ save_directory: Union[str, os.PathLike],
106
+ is_main_process: bool = True,
107
+ save_function: Callable = None,
108
+ safe_serialization: bool = True,
109
+ variant: Optional[str] = None,
110
+ ):
111
+ """
112
+ Save a model and its configuration file to a directory, so that it can be re-loaded using the
113
+ `[`~models.adapter.MultiAdapter.from_pretrained`]` class method.
114
+
115
+ Arguments:
116
+ save_directory (`str` or `os.PathLike`):
117
+ Directory to which to save. Will be created if it doesn't exist.
118
+ is_main_process (`bool`, *optional*, defaults to `True`):
119
+ Whether the process calling this is the main process or not. Useful when in distributed training like
120
+ TPUs and need to call this function on all processes. In this case, set `is_main_process=True` only on
121
+ the main process to avoid race conditions.
122
+ save_function (`Callable`):
123
+ The function to use to save the state dictionary. Useful on distributed training like TPUs when one
124
+ need to replace `torch.save` by another method. Can be configured with the environment variable
125
+ `DIFFUSERS_SAVE_MODE`.
126
+ safe_serialization (`bool`, *optional*, defaults to `True`):
127
+ Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
128
+ variant (`str`, *optional*):
129
+ If specified, weights are saved in the format pytorch_model.<variant>.bin.
130
+ """
131
+ idx = 0
132
+ model_path_to_save = save_directory
133
+ for adapter in self.adapters:
134
+ adapter.save_pretrained(
135
+ model_path_to_save,
136
+ is_main_process=is_main_process,
137
+ save_function=save_function,
138
+ safe_serialization=safe_serialization,
139
+ variant=variant,
140
+ )
141
+
142
+ idx += 1
143
+ model_path_to_save = model_path_to_save + f"_{idx}"
144
+
145
+ @classmethod
146
+ def from_pretrained(cls, pretrained_model_path: Optional[Union[str, os.PathLike]], **kwargs):
147
+ r"""
148
+ Instantiate a pretrained MultiAdapter model from multiple pre-trained adapter models.
149
+
150
+ The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train
151
+ the model, you should first set it back in training mode with `model.train()`.
152
+
153
+ The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come
154
+ pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning
155
+ task.
156
+
157
+ The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those
158
+ weights are discarded.
159
+
160
+ Parameters:
161
+ pretrained_model_path (`os.PathLike`):
162
+ A path to a *directory* containing model weights saved using
163
+ [`~diffusers.models.adapter.MultiAdapter.save_pretrained`], e.g., `./my_model_directory/adapter`.
164
+ torch_dtype (`str` or `torch.dtype`, *optional*):
165
+ Override the default `torch.dtype` and load the model under this dtype. If `"auto"` is passed the dtype
166
+ will be automatically derived from the model's weights.
167
+ output_loading_info(`bool`, *optional*, defaults to `False`):
168
+ Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.
169
+ device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*):
170
+ A map that specifies where each submodule should go. It doesn't need to be refined to each
171
+ parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the
172
+ same device.
173
+
174
+ To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For
175
+ more information about each option see [designing a device
176
+ map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map).
177
+ max_memory (`Dict`, *optional*):
178
+ A dictionary device identifier to maximum memory. Will default to the maximum memory available for each
179
+ GPU and the available CPU RAM if unset.
180
+ low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
181
+ Speed up model loading by not initializing the weights and only loading the pre-trained weights. This
182
+ also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the
183
+ model. This is only supported when torch version >= 1.9.0. If you are using an older version of torch,
184
+ setting this argument to `True` will raise an error.
185
+ variant (`str`, *optional*):
186
+ If specified load weights from `variant` filename, *e.g.* pytorch_model.<variant>.bin. `variant` is
187
+ ignored when using `from_flax`.
188
+ use_safetensors (`bool`, *optional*, defaults to `None`):
189
+ If set to `None`, the `safetensors` weights will be downloaded if they're available **and** if the
190
+ `safetensors` library is installed. If set to `True`, the model will be forcibly loaded from
191
+ `safetensors` weights. If set to `False`, loading will *not* use `safetensors`.
192
+ """
193
+ idx = 0
194
+ adapters = []
195
+
196
+ # load adapter and append to list until no adapter directory exists anymore
197
+ # first adapter has to be saved under `./mydirectory/adapter` to be compliant with `DiffusionPipeline.from_pretrained`
198
+ # second, third, ... adapters have to be saved under `./mydirectory/adapter_1`, `./mydirectory/adapter_2`, ...
199
+ model_path_to_load = pretrained_model_path
200
+ while os.path.isdir(model_path_to_load):
201
+ adapter = T2IAdapter.from_pretrained(model_path_to_load, **kwargs)
202
+ adapters.append(adapter)
203
+
204
+ idx += 1
205
+ model_path_to_load = pretrained_model_path + f"_{idx}"
206
+
207
+ logger.info(f"{len(adapters)} adapters loaded from {pretrained_model_path}.")
208
+
209
+ if len(adapters) == 0:
210
+ raise ValueError(
211
+ f"No T2IAdapters found under {os.path.dirname(pretrained_model_path)}. Expected at least {pretrained_model_path + '_0'}."
212
+ )
213
+
214
+ return cls(adapters)
215
+
216
+
217
+ class T2IAdapter(ModelMixin, ConfigMixin):
218
+ r"""
219
+ A simple ResNet-like model that accepts images containing control signals such as keyposes and depth. The model
220
+ generates multiple feature maps that are used as additional conditioning in [`UNet2DConditionModel`]. The model's
221
+ architecture follows the original implementation of
222
+ [Adapter](https://github.com/TencentARC/T2I-Adapter/blob/686de4681515662c0ac2ffa07bf5dda83af1038a/ldm/modules/encoders/adapter.py#L97)
223
+ and
224
+ [AdapterLight](https://github.com/TencentARC/T2I-Adapter/blob/686de4681515662c0ac2ffa07bf5dda83af1038a/ldm/modules/encoders/adapter.py#L235).
225
+
226
+ This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library
227
+ implements for all the model (such as downloading or saving, etc.)
228
+
229
+ Parameters:
230
+ in_channels (`int`, *optional*, defaults to 3):
231
+ Number of channels of Aapter's input(*control image*). Set this parameter to 1 if you're using gray scale
232
+ image as *control image*.
233
+ channels (`List[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
234
+ The number of channel of each downsample block's output hidden state. The `len(block_out_channels)` will
235
+ also determine the number of downsample blocks in the Adapter.
236
+ num_res_blocks (`int`, *optional*, defaults to 2):
237
+ Number of ResNet blocks in each downsample block.
238
+ downscale_factor (`int`, *optional*, defaults to 8):
239
+ A factor that determines the total downscale factor of the Adapter.
240
+ adapter_type (`str`, *optional*, defaults to `full_adapter`):
241
+ The type of Adapter to use. Choose either `full_adapter` or `full_adapter_xl` or `light_adapter`.
242
+ """
243
+
244
+ @register_to_config
245
+ def __init__(
246
+ self,
247
+ in_channels: int = 3,
248
+ channels: List[int] = [320, 640, 1280, 1280],
249
+ num_res_blocks: int = 2,
250
+ downscale_factor: int = 8,
251
+ adapter_type: str = "full_adapter",
252
+ ):
253
+ super().__init__()
254
+
255
+ if adapter_type == "full_adapter":
256
+ self.adapter = FullAdapter(in_channels, channels, num_res_blocks, downscale_factor)
257
+ elif adapter_type == "full_adapter_xl":
258
+ self.adapter = FullAdapterXL(in_channels, channels, num_res_blocks, downscale_factor)
259
+ elif adapter_type == "light_adapter":
260
+ self.adapter = LightAdapter(in_channels, channels, num_res_blocks, downscale_factor)
261
+ else:
262
+ raise ValueError(
263
+ f"Unsupported adapter_type: '{adapter_type}'. Choose either 'full_adapter' or "
264
+ "'full_adapter_xl' or 'light_adapter'."
265
+ )
266
+
267
+ def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
268
+ r"""
269
+ This function processes the input tensor `x` through the adapter model and returns a list of feature tensors,
270
+ each representing information extracted at a different scale from the input. The length of the list is
271
+ determined by the number of downsample blocks in the Adapter, as specified by the `channels` and
272
+ `num_res_blocks` parameters during initialization.
273
+ """
274
+ return self.adapter(x)
275
+
276
+ @property
277
+ def total_downscale_factor(self):
278
+ return self.adapter.total_downscale_factor
279
+
280
+ @property
281
+ def downscale_factor(self):
282
+ """The downscale factor applied in the T2I-Adapter's initial pixel unshuffle operation. If an input image's dimensions are
283
+ not evenly divisible by the downscale_factor then an exception will be raised.
284
+ """
285
+ return self.adapter.unshuffle.downscale_factor
286
+
287
+
288
+ # full adapter
289
+
290
+
291
+ class FullAdapter(nn.Module):
292
+ r"""
293
+ See [`T2IAdapter`] for more information.
294
+ """
295
+
296
+ def __init__(
297
+ self,
298
+ in_channels: int = 3,
299
+ channels: List[int] = [320, 640, 1280, 1280],
300
+ num_res_blocks: int = 2,
301
+ downscale_factor: int = 8,
302
+ ):
303
+ super().__init__()
304
+
305
+ in_channels = in_channels * downscale_factor**2
306
+
307
+ self.unshuffle = nn.PixelUnshuffle(downscale_factor)
308
+ self.conv_in = nn.Conv2d(in_channels, channels[0], kernel_size=3, padding=1)
309
+
310
+ self.body = nn.ModuleList(
311
+ [
312
+ AdapterBlock(channels[0], channels[0], num_res_blocks),
313
+ *[
314
+ AdapterBlock(channels[i - 1], channels[i], num_res_blocks, down=True)
315
+ for i in range(1, len(channels))
316
+ ],
317
+ ]
318
+ )
319
+
320
+ self.total_downscale_factor = downscale_factor * 2 ** (len(channels) - 1)
321
+
322
+ def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
323
+ r"""
324
+ This method processes the input tensor `x` through the FullAdapter model and performs operations including
325
+ pixel unshuffling, convolution, and a stack of AdapterBlocks. It returns a list of feature tensors, each
326
+ capturing information at a different stage of processing within the FullAdapter model. The number of feature
327
+ tensors in the list is determined by the number of downsample blocks specified during initialization.
328
+ """
329
+ x = self.unshuffle(x)
330
+ x = self.conv_in(x)
331
+
332
+ features = []
333
+
334
+ for block in self.body:
335
+ x = block(x)
336
+ features.append(x)
337
+
338
+ return features
339
+
340
+
341
+ class FullAdapterXL(nn.Module):
342
+ r"""
343
+ See [`T2IAdapter`] for more information.
344
+ """
345
+
346
+ def __init__(
347
+ self,
348
+ in_channels: int = 3,
349
+ channels: List[int] = [320, 640, 1280, 1280],
350
+ num_res_blocks: int = 2,
351
+ downscale_factor: int = 16,
352
+ ):
353
+ super().__init__()
354
+
355
+ in_channels = in_channels * downscale_factor**2
356
+
357
+ self.unshuffle = nn.PixelUnshuffle(downscale_factor)
358
+ self.conv_in = nn.Conv2d(in_channels, channels[0], kernel_size=3, padding=1)
359
+
360
+ self.body = []
361
+ # blocks to extract XL features with dimensions of [320, 64, 64], [640, 64, 64], [1280, 32, 32], [1280, 32, 32]
362
+ for i in range(len(channels)):
363
+ if i == 1:
364
+ self.body.append(AdapterBlock(channels[i - 1], channels[i], num_res_blocks))
365
+ elif i == 2:
366
+ self.body.append(AdapterBlock(channels[i - 1], channels[i], num_res_blocks, down=True))
367
+ else:
368
+ self.body.append(AdapterBlock(channels[i], channels[i], num_res_blocks))
369
+
370
+ self.body = nn.ModuleList(self.body)
371
+ # XL has only one downsampling AdapterBlock.
372
+ self.total_downscale_factor = downscale_factor * 2
373
+
374
+ def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
375
+ r"""
376
+ This method takes the tensor x as input and processes it through FullAdapterXL model. It consists of operations
377
+ including unshuffling pixels, applying convolution layer and appending each block into list of feature tensors.
378
+ """
379
+ x = self.unshuffle(x)
380
+ x = self.conv_in(x)
381
+
382
+ features = []
383
+
384
+ for block in self.body:
385
+ x = block(x)
386
+ features.append(x)
387
+
388
+ return features
389
+
390
+
391
+ class AdapterBlock(nn.Module):
392
+ r"""
393
+ An AdapterBlock is a helper model that contains multiple ResNet-like blocks. It is used in the `FullAdapter` and
394
+ `FullAdapterXL` models.
395
+
396
+ Parameters:
397
+ in_channels (`int`):
398
+ Number of channels of AdapterBlock's input.
399
+ out_channels (`int`):
400
+ Number of channels of AdapterBlock's output.
401
+ num_res_blocks (`int`):
402
+ Number of ResNet blocks in the AdapterBlock.
403
+ down (`bool`, *optional*, defaults to `False`):
404
+ Whether to perform downsampling on AdapterBlock's input.
405
+ """
406
+
407
+ def __init__(self, in_channels: int, out_channels: int, num_res_blocks: int, down: bool = False):
408
+ super().__init__()
409
+
410
+ self.downsample = None
411
+ if down:
412
+ self.downsample = nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True)
413
+
414
+ self.in_conv = None
415
+ if in_channels != out_channels:
416
+ self.in_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
417
+
418
+ self.resnets = nn.Sequential(
419
+ *[AdapterResnetBlock(out_channels) for _ in range(num_res_blocks)],
420
+ )
421
+
422
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
423
+ r"""
424
+ This method takes tensor x as input and performs operations downsampling and convolutional layers if the
425
+ self.downsample and self.in_conv properties of AdapterBlock model are specified. Then it applies a series of
426
+ residual blocks to the input tensor.
427
+ """
428
+ if self.downsample is not None:
429
+ x = self.downsample(x)
430
+
431
+ if self.in_conv is not None:
432
+ x = self.in_conv(x)
433
+
434
+ x = self.resnets(x)
435
+
436
+ return x
437
+
438
+
439
+ class AdapterResnetBlock(nn.Module):
440
+ r"""
441
+ An `AdapterResnetBlock` is a helper model that implements a ResNet-like block.
442
+
443
+ Parameters:
444
+ channels (`int`):
445
+ Number of channels of AdapterResnetBlock's input and output.
446
+ """
447
+
448
+ def __init__(self, channels: int):
449
+ super().__init__()
450
+ self.block1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
451
+ self.act = nn.ReLU()
452
+ self.block2 = nn.Conv2d(channels, channels, kernel_size=1)
453
+
454
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
455
+ r"""
456
+ This method takes input tensor x and applies a convolutional layer, ReLU activation, and another convolutional
457
+ layer on the input tensor. It returns addition with the input tensor.
458
+ """
459
+
460
+ h = self.act(self.block1(x))
461
+ h = self.block2(h)
462
+
463
+ return h + x
464
+
465
+
466
+ # light adapter
467
+
468
+
469
+ class LightAdapter(nn.Module):
470
+ r"""
471
+ See [`T2IAdapter`] for more information.
472
+ """
473
+
474
+ def __init__(
475
+ self,
476
+ in_channels: int = 3,
477
+ channels: List[int] = [320, 640, 1280],
478
+ num_res_blocks: int = 4,
479
+ downscale_factor: int = 8,
480
+ ):
481
+ super().__init__()
482
+
483
+ in_channels = in_channels * downscale_factor**2
484
+
485
+ self.unshuffle = nn.PixelUnshuffle(downscale_factor)
486
+
487
+ self.body = nn.ModuleList(
488
+ [
489
+ LightAdapterBlock(in_channels, channels[0], num_res_blocks),
490
+ *[
491
+ LightAdapterBlock(channels[i], channels[i + 1], num_res_blocks, down=True)
492
+ for i in range(len(channels) - 1)
493
+ ],
494
+ LightAdapterBlock(channels[-1], channels[-1], num_res_blocks, down=True),
495
+ ]
496
+ )
497
+
498
+ self.total_downscale_factor = downscale_factor * (2 ** len(channels))
499
+
500
+ def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
501
+ r"""
502
+ This method takes the input tensor x and performs downscaling and appends it in list of feature tensors. Each
503
+ feature tensor corresponds to a different level of processing within the LightAdapter.
504
+ """
505
+ x = self.unshuffle(x)
506
+
507
+ features = []
508
+
509
+ for block in self.body:
510
+ x = block(x)
511
+ features.append(x)
512
+
513
+ return features
514
+
515
+
516
+ class LightAdapterBlock(nn.Module):
517
+ r"""
518
+ A `LightAdapterBlock` is a helper model that contains multiple `LightAdapterResnetBlocks`. It is used in the
519
+ `LightAdapter` model.
520
+
521
+ Parameters:
522
+ in_channels (`int`):
523
+ Number of channels of LightAdapterBlock's input.
524
+ out_channels (`int`):
525
+ Number of channels of LightAdapterBlock's output.
526
+ num_res_blocks (`int`):
527
+ Number of LightAdapterResnetBlocks in the LightAdapterBlock.
528
+ down (`bool`, *optional*, defaults to `False`):
529
+ Whether to perform downsampling on LightAdapterBlock's input.
530
+ """
531
+
532
+ def __init__(self, in_channels: int, out_channels: int, num_res_blocks: int, down: bool = False):
533
+ super().__init__()
534
+ mid_channels = out_channels // 4
535
+
536
+ self.downsample = None
537
+ if down:
538
+ self.downsample = nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True)
539
+
540
+ self.in_conv = nn.Conv2d(in_channels, mid_channels, kernel_size=1)
541
+ self.resnets = nn.Sequential(*[LightAdapterResnetBlock(mid_channels) for _ in range(num_res_blocks)])
542
+ self.out_conv = nn.Conv2d(mid_channels, out_channels, kernel_size=1)
543
+
544
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
545
+ r"""
546
+ This method takes tensor x as input and performs downsampling if required. Then it applies in convolution
547
+ layer, a sequence of residual blocks, and out convolutional layer.
548
+ """
549
+ if self.downsample is not None:
550
+ x = self.downsample(x)
551
+
552
+ x = self.in_conv(x)
553
+ x = self.resnets(x)
554
+ x = self.out_conv(x)
555
+
556
+ return x
557
+
558
+
559
+ class LightAdapterResnetBlock(nn.Module):
560
+ """
561
+ A `LightAdapterResnetBlock` is a helper model that implements a ResNet-like block with a slightly different
562
+ architecture than `AdapterResnetBlock`.
563
+
564
+ Parameters:
565
+ channels (`int`):
566
+ Number of channels of LightAdapterResnetBlock's input and output.
567
+ """
568
+
569
+ def __init__(self, channels: int):
570
+ super().__init__()
571
+ self.block1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
572
+ self.act = nn.ReLU()
573
+ self.block2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
574
+
575
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
576
+ r"""
577
+ This function takes input tensor x and processes it through one convolutional layer, ReLU activation, and
578
+ another convolutional layer and adds it to input tensor.
579
+ """
580
+
581
+ h = self.act(self.block1(x))
582
+ h = self.block2(h)
583
+
584
+ return h + x
src/diffusers/models/attention.py ADDED
@@ -0,0 +1,668 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Any, Dict, Optional
15
+
16
+ import torch
17
+ import torch.nn.functional as F
18
+ from torch import nn
19
+
20
+ from ..utils import USE_PEFT_BACKEND
21
+ from ..utils.torch_utils import maybe_allow_in_graph
22
+ from .activations import GEGLU, GELU, ApproximateGELU
23
+ from .attention_processor import Attention
24
+ from .embeddings import SinusoidalPositionalEmbedding
25
+ from .lora import LoRACompatibleLinear
26
+ from .normalization import AdaLayerNorm, AdaLayerNormContinuous, AdaLayerNormZero, RMSNorm
27
+
28
+
29
+ def _chunked_feed_forward(
30
+ ff: nn.Module, hidden_states: torch.Tensor, chunk_dim: int, chunk_size: int, lora_scale: Optional[float] = None
31
+ ):
32
+ # "feed_forward_chunk_size" can be used to save memory
33
+ if hidden_states.shape[chunk_dim] % chunk_size != 0:
34
+ raise ValueError(
35
+ f"`hidden_states` dimension to be chunked: {hidden_states.shape[chunk_dim]} has to be divisible by chunk size: {chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`."
36
+ )
37
+
38
+ num_chunks = hidden_states.shape[chunk_dim] // chunk_size
39
+ if lora_scale is None:
40
+ ff_output = torch.cat(
41
+ [ff(hid_slice) for hid_slice in hidden_states.chunk(num_chunks, dim=chunk_dim)],
42
+ dim=chunk_dim,
43
+ )
44
+ else:
45
+ # TOOD(Patrick): LoRA scale can be removed once PEFT refactor is complete
46
+ ff_output = torch.cat(
47
+ [ff(hid_slice, scale=lora_scale) for hid_slice in hidden_states.chunk(num_chunks, dim=chunk_dim)],
48
+ dim=chunk_dim,
49
+ )
50
+
51
+ return ff_output
52
+
53
+
54
+ @maybe_allow_in_graph
55
+ class GatedSelfAttentionDense(nn.Module):
56
+ r"""
57
+ A gated self-attention dense layer that combines visual features and object features.
58
+
59
+ Parameters:
60
+ query_dim (`int`): The number of channels in the query.
61
+ context_dim (`int`): The number of channels in the context.
62
+ n_heads (`int`): The number of heads to use for attention.
63
+ d_head (`int`): The number of channels in each head.
64
+ """
65
+
66
+ def __init__(self, query_dim: int, context_dim: int, n_heads: int, d_head: int):
67
+ super().__init__()
68
+
69
+ # we need a linear projection since we need cat visual feature and obj feature
70
+ self.linear = nn.Linear(context_dim, query_dim)
71
+
72
+ self.attn = Attention(query_dim=query_dim, heads=n_heads, dim_head=d_head)
73
+ self.ff = FeedForward(query_dim, activation_fn="geglu")
74
+
75
+ self.norm1 = nn.LayerNorm(query_dim)
76
+ self.norm2 = nn.LayerNorm(query_dim)
77
+
78
+ self.register_parameter("alpha_attn", nn.Parameter(torch.tensor(0.0)))
79
+ self.register_parameter("alpha_dense", nn.Parameter(torch.tensor(0.0)))
80
+
81
+ self.enabled = True
82
+
83
+ def forward(self, x: torch.Tensor, objs: torch.Tensor) -> torch.Tensor:
84
+ if not self.enabled:
85
+ return x
86
+
87
+ n_visual = x.shape[1]
88
+ objs = self.linear(objs)
89
+
90
+ x = x + self.alpha_attn.tanh() * self.attn(self.norm1(torch.cat([x, objs], dim=1)))[:, :n_visual, :]
91
+ x = x + self.alpha_dense.tanh() * self.ff(self.norm2(x))
92
+
93
+ return x
94
+
95
+
96
+ @maybe_allow_in_graph
97
+ class BasicTransformerBlock(nn.Module):
98
+ r"""
99
+ A basic Transformer block.
100
+
101
+ Parameters:
102
+ dim (`int`): The number of channels in the input and output.
103
+ num_attention_heads (`int`): The number of heads to use for multi-head attention.
104
+ attention_head_dim (`int`): The number of channels in each head.
105
+ dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
106
+ cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention.
107
+ activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
108
+ num_embeds_ada_norm (:
109
+ obj: `int`, *optional*): The number of diffusion steps used during training. See `Transformer2DModel`.
110
+ attention_bias (:
111
+ obj: `bool`, *optional*, defaults to `False`): Configure if the attentions should contain a bias parameter.
112
+ only_cross_attention (`bool`, *optional*):
113
+ Whether to use only cross-attention layers. In this case two cross attention layers are used.
114
+ double_self_attention (`bool`, *optional*):
115
+ Whether to use two self-attention layers. In this case no cross attention layers are used.
116
+ upcast_attention (`bool`, *optional*):
117
+ Whether to upcast the attention computation to float32. This is useful for mixed precision training.
118
+ norm_elementwise_affine (`bool`, *optional*, defaults to `True`):
119
+ Whether to use learnable elementwise affine parameters for normalization.
120
+ norm_type (`str`, *optional*, defaults to `"layer_norm"`):
121
+ The normalization layer to use. Can be `"layer_norm"`, `"ada_norm"` or `"ada_norm_zero"`.
122
+ final_dropout (`bool` *optional*, defaults to False):
123
+ Whether to apply a final dropout after the last feed-forward layer.
124
+ attention_type (`str`, *optional*, defaults to `"default"`):
125
+ The type of attention to use. Can be `"default"` or `"gated"` or `"gated-text-image"`.
126
+ positional_embeddings (`str`, *optional*, defaults to `None`):
127
+ The type of positional embeddings to apply to.
128
+ num_positional_embeddings (`int`, *optional*, defaults to `None`):
129
+ The maximum number of positional embeddings to apply.
130
+ """
131
+
132
+ def __init__(
133
+ self,
134
+ dim: int,
135
+ num_attention_heads: int,
136
+ attention_head_dim: int,
137
+ dropout=0.0,
138
+ cross_attention_dim: Optional[int] = None,
139
+ activation_fn: str = "geglu",
140
+ num_embeds_ada_norm: Optional[int] = None,
141
+ attention_bias: bool = False,
142
+ only_cross_attention: bool = False,
143
+ double_self_attention: bool = False,
144
+ upcast_attention: bool = False,
145
+ norm_elementwise_affine: bool = True,
146
+ norm_type: str = "layer_norm", # 'layer_norm', 'ada_norm', 'ada_norm_zero', 'ada_norm_single'
147
+ norm_eps: float = 1e-5,
148
+ final_dropout: bool = False,
149
+ attention_type: str = "default",
150
+ positional_embeddings: Optional[str] = None,
151
+ num_positional_embeddings: Optional[int] = None,
152
+ ada_norm_continous_conditioning_embedding_dim: Optional[int] = None,
153
+ ada_norm_bias: Optional[int] = None,
154
+ ff_inner_dim: Optional[int] = None,
155
+ ff_bias: bool = True,
156
+ attention_out_bias: bool = True,
157
+ ):
158
+ super().__init__()
159
+ self.only_cross_attention = only_cross_attention
160
+
161
+ self.use_ada_layer_norm_zero = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
162
+ self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
163
+ self.use_ada_layer_norm_single = norm_type == "ada_norm_single"
164
+ self.use_layer_norm = norm_type == "layer_norm"
165
+ self.use_ada_layer_norm_continuous = norm_type == "ada_norm_continuous"
166
+
167
+ if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
168
+ raise ValueError(
169
+ f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"
170
+ f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}."
171
+ )
172
+
173
+ if positional_embeddings and (num_positional_embeddings is None):
174
+ raise ValueError(
175
+ "If `positional_embedding` type is defined, `num_positition_embeddings` must also be defined."
176
+ )
177
+
178
+ if positional_embeddings == "sinusoidal":
179
+ self.pos_embed = SinusoidalPositionalEmbedding(dim, max_seq_length=num_positional_embeddings)
180
+ else:
181
+ self.pos_embed = None
182
+
183
+ # Define 3 blocks. Each block has its own normalization layer.
184
+ # 1. Self-Attn
185
+ if self.use_ada_layer_norm:
186
+ self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm)
187
+ elif self.use_ada_layer_norm_zero:
188
+ self.norm1 = AdaLayerNormZero(dim, num_embeds_ada_norm)
189
+ elif self.use_ada_layer_norm_continuous:
190
+ self.norm1 = AdaLayerNormContinuous(
191
+ dim,
192
+ ada_norm_continous_conditioning_embedding_dim,
193
+ norm_elementwise_affine,
194
+ norm_eps,
195
+ ada_norm_bias,
196
+ "rms_norm",
197
+ )
198
+ else:
199
+ self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps)
200
+
201
+ self.attn1 = Attention(
202
+ query_dim=dim,
203
+ heads=num_attention_heads,
204
+ dim_head=attention_head_dim,
205
+ dropout=dropout,
206
+ bias=attention_bias,
207
+ cross_attention_dim=cross_attention_dim if only_cross_attention else None,
208
+ upcast_attention=upcast_attention,
209
+ out_bias=attention_out_bias,
210
+ )
211
+
212
+ # 2. Cross-Attn
213
+ if cross_attention_dim is not None or double_self_attention:
214
+ # We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
215
+ # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
216
+ # the second cross attention block.
217
+ if self.use_ada_layer_norm:
218
+ self.norm2 = AdaLayerNorm(dim, num_embeds_ada_norm)
219
+ elif self.use_ada_layer_norm_continuous:
220
+ self.norm2 = AdaLayerNormContinuous(
221
+ dim,
222
+ ada_norm_continous_conditioning_embedding_dim,
223
+ norm_elementwise_affine,
224
+ norm_eps,
225
+ ada_norm_bias,
226
+ "rms_norm",
227
+ )
228
+ else:
229
+ self.norm2 = nn.LayerNorm(dim, norm_eps, norm_elementwise_affine)
230
+
231
+ self.attn2 = Attention(
232
+ query_dim=dim,
233
+ cross_attention_dim=cross_attention_dim if not double_self_attention else None,
234
+ heads=num_attention_heads,
235
+ dim_head=attention_head_dim,
236
+ dropout=dropout,
237
+ bias=attention_bias,
238
+ upcast_attention=upcast_attention,
239
+ out_bias=attention_out_bias,
240
+ ) # is self-attn if encoder_hidden_states is none
241
+ else:
242
+ self.norm2 = None
243
+ self.attn2 = None
244
+
245
+ # 3. Feed-forward
246
+ if self.use_ada_layer_norm_continuous:
247
+ self.norm3 = AdaLayerNormContinuous(
248
+ dim,
249
+ ada_norm_continous_conditioning_embedding_dim,
250
+ norm_elementwise_affine,
251
+ norm_eps,
252
+ ada_norm_bias,
253
+ "layer_norm",
254
+ )
255
+ elif not self.use_ada_layer_norm_single:
256
+ self.norm3 = nn.LayerNorm(dim, norm_eps, norm_elementwise_affine)
257
+
258
+ self.ff = FeedForward(
259
+ dim,
260
+ dropout=dropout,
261
+ activation_fn=activation_fn,
262
+ final_dropout=final_dropout,
263
+ inner_dim=ff_inner_dim,
264
+ bias=ff_bias,
265
+ )
266
+
267
+ # 4. Fuser
268
+ if attention_type == "gated" or attention_type == "gated-text-image":
269
+ self.fuser = GatedSelfAttentionDense(dim, cross_attention_dim, num_attention_heads, attention_head_dim)
270
+
271
+ # 5. Scale-shift for PixArt-Alpha.
272
+ if self.use_ada_layer_norm_single:
273
+ self.scale_shift_table = nn.Parameter(torch.randn(6, dim) / dim**0.5)
274
+
275
+ # let chunk size default to None
276
+ self._chunk_size = None
277
+ self._chunk_dim = 0
278
+
279
+ def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int = 0):
280
+ # Sets chunk feed-forward
281
+ self._chunk_size = chunk_size
282
+ self._chunk_dim = dim
283
+
284
+ def forward(
285
+ self,
286
+ hidden_states: torch.FloatTensor,
287
+ attention_mask: Optional[torch.FloatTensor] = None,
288
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
289
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
290
+ timestep: Optional[torch.LongTensor] = None,
291
+ cross_attention_kwargs: Dict[str, Any] = None,
292
+ class_labels: Optional[torch.LongTensor] = None,
293
+ added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
294
+ ) -> torch.FloatTensor:
295
+ # Notice that normalization is always applied before the real computation in the following blocks.
296
+ # 0. Self-Attention
297
+ batch_size = hidden_states.shape[0]
298
+
299
+ if self.use_ada_layer_norm:
300
+ norm_hidden_states = self.norm1(hidden_states, timestep)
301
+ elif self.use_ada_layer_norm_zero:
302
+ norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(
303
+ hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype
304
+ )
305
+ elif self.use_layer_norm:
306
+ norm_hidden_states = self.norm1(hidden_states)
307
+ elif self.use_ada_layer_norm_continuous:
308
+ norm_hidden_states = self.norm1(hidden_states, added_cond_kwargs["pooled_text_emb"])
309
+ elif self.use_ada_layer_norm_single:
310
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (
311
+ self.scale_shift_table[None] + timestep.reshape(batch_size, 6, -1)
312
+ ).chunk(6, dim=1)
313
+ norm_hidden_states = self.norm1(hidden_states)
314
+ norm_hidden_states = norm_hidden_states * (1 + scale_msa) + shift_msa
315
+ norm_hidden_states = norm_hidden_states.squeeze(1)
316
+ else:
317
+ raise ValueError("Incorrect norm used")
318
+
319
+ if self.pos_embed is not None:
320
+ norm_hidden_states = self.pos_embed(norm_hidden_states)
321
+
322
+ # 1. Retrieve lora scale.
323
+ lora_scale = cross_attention_kwargs.get("scale", 1.0) if cross_attention_kwargs is not None else 1.0
324
+
325
+ # 2. Prepare GLIGEN inputs
326
+ cross_attention_kwargs = cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {}
327
+ gligen_kwargs = cross_attention_kwargs.pop("gligen", None)
328
+
329
+ attn_output = self.attn1(
330
+ norm_hidden_states,
331
+ encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
332
+ attention_mask=attention_mask,
333
+ **cross_attention_kwargs,
334
+ )
335
+ if self.use_ada_layer_norm_zero:
336
+ attn_output = gate_msa.unsqueeze(1) * attn_output
337
+ elif self.use_ada_layer_norm_single:
338
+ attn_output = gate_msa * attn_output
339
+
340
+ hidden_states = attn_output + hidden_states
341
+ if hidden_states.ndim == 4:
342
+ hidden_states = hidden_states.squeeze(1)
343
+
344
+ # 2.5 GLIGEN Control
345
+ if gligen_kwargs is not None:
346
+ hidden_states = self.fuser(hidden_states, gligen_kwargs["objs"])
347
+
348
+ # 3. Cross-Attention
349
+ if self.attn2 is not None:
350
+ if self.use_ada_layer_norm:
351
+ norm_hidden_states = self.norm2(hidden_states, timestep)
352
+ elif self.use_ada_layer_norm_zero or self.use_layer_norm:
353
+ norm_hidden_states = self.norm2(hidden_states)
354
+ elif self.use_ada_layer_norm_single:
355
+ # For PixArt norm2 isn't applied here:
356
+ # https://github.com/PixArt-alpha/PixArt-alpha/blob/0f55e922376d8b797edd44d25d0e7464b260dcab/diffusion/model/nets/PixArtMS.py#L70C1-L76C103
357
+ norm_hidden_states = hidden_states
358
+ elif self.use_ada_layer_norm_continuous:
359
+ norm_hidden_states = self.norm2(hidden_states, added_cond_kwargs["pooled_text_emb"])
360
+ else:
361
+ raise ValueError("Incorrect norm")
362
+
363
+ if self.pos_embed is not None and self.use_ada_layer_norm_single is False:
364
+ norm_hidden_states = self.pos_embed(norm_hidden_states)
365
+
366
+ attn_output = self.attn2(
367
+ norm_hidden_states,
368
+ encoder_hidden_states=encoder_hidden_states,
369
+ attention_mask=encoder_attention_mask,
370
+ **cross_attention_kwargs,
371
+ )
372
+ hidden_states = attn_output + hidden_states
373
+
374
+ # 4. Feed-forward
375
+ if self.use_ada_layer_norm_continuous:
376
+ norm_hidden_states = self.norm3(hidden_states, added_cond_kwargs["pooled_text_emb"])
377
+ elif not self.use_ada_layer_norm_single:
378
+ norm_hidden_states = self.norm3(hidden_states)
379
+
380
+ if self.use_ada_layer_norm_zero:
381
+ norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
382
+
383
+ if self.use_ada_layer_norm_single:
384
+ norm_hidden_states = self.norm2(hidden_states)
385
+ norm_hidden_states = norm_hidden_states * (1 + scale_mlp) + shift_mlp
386
+
387
+ if self._chunk_size is not None:
388
+ # "feed_forward_chunk_size" can be used to save memory
389
+ ff_output = _chunked_feed_forward(
390
+ self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size, lora_scale=lora_scale
391
+ )
392
+ else:
393
+ ff_output = self.ff(norm_hidden_states, scale=lora_scale)
394
+
395
+ if self.use_ada_layer_norm_zero:
396
+ ff_output = gate_mlp.unsqueeze(1) * ff_output
397
+ elif self.use_ada_layer_norm_single:
398
+ ff_output = gate_mlp * ff_output
399
+
400
+ hidden_states = ff_output + hidden_states
401
+ if hidden_states.ndim == 4:
402
+ hidden_states = hidden_states.squeeze(1)
403
+
404
+ return hidden_states
405
+
406
+
407
+ @maybe_allow_in_graph
408
+ class TemporalBasicTransformerBlock(nn.Module):
409
+ r"""
410
+ A basic Transformer block for video like data.
411
+
412
+ Parameters:
413
+ dim (`int`): The number of channels in the input and output.
414
+ time_mix_inner_dim (`int`): The number of channels for temporal attention.
415
+ num_attention_heads (`int`): The number of heads to use for multi-head attention.
416
+ attention_head_dim (`int`): The number of channels in each head.
417
+ cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention.
418
+ """
419
+
420
+ def __init__(
421
+ self,
422
+ dim: int,
423
+ time_mix_inner_dim: int,
424
+ num_attention_heads: int,
425
+ attention_head_dim: int,
426
+ cross_attention_dim: Optional[int] = None,
427
+ ):
428
+ super().__init__()
429
+ self.is_res = dim == time_mix_inner_dim
430
+
431
+ self.norm_in = nn.LayerNorm(dim)
432
+
433
+ # Define 3 blocks. Each block has its own normalization layer.
434
+ # 1. Self-Attn
435
+ self.norm_in = nn.LayerNorm(dim)
436
+ self.ff_in = FeedForward(
437
+ dim,
438
+ dim_out=time_mix_inner_dim,
439
+ activation_fn="geglu",
440
+ )
441
+
442
+ self.norm1 = nn.LayerNorm(time_mix_inner_dim)
443
+ self.attn1 = Attention(
444
+ query_dim=time_mix_inner_dim,
445
+ heads=num_attention_heads,
446
+ dim_head=attention_head_dim,
447
+ cross_attention_dim=None,
448
+ )
449
+
450
+ # 2. Cross-Attn
451
+ if cross_attention_dim is not None:
452
+ # We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
453
+ # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
454
+ # the second cross attention block.
455
+ self.norm2 = nn.LayerNorm(time_mix_inner_dim)
456
+ self.attn2 = Attention(
457
+ query_dim=time_mix_inner_dim,
458
+ cross_attention_dim=cross_attention_dim,
459
+ heads=num_attention_heads,
460
+ dim_head=attention_head_dim,
461
+ ) # is self-attn if encoder_hidden_states is none
462
+ else:
463
+ self.norm2 = None
464
+ self.attn2 = None
465
+
466
+ # 3. Feed-forward
467
+ self.norm3 = nn.LayerNorm(time_mix_inner_dim)
468
+ self.ff = FeedForward(time_mix_inner_dim, activation_fn="geglu")
469
+
470
+ # let chunk size default to None
471
+ self._chunk_size = None
472
+ self._chunk_dim = None
473
+
474
+ def set_chunk_feed_forward(self, chunk_size: Optional[int], **kwargs):
475
+ # Sets chunk feed-forward
476
+ self._chunk_size = chunk_size
477
+ # chunk dim should be hardcoded to 1 to have better speed vs. memory trade-off
478
+ self._chunk_dim = 1
479
+
480
+ def forward(
481
+ self,
482
+ hidden_states: torch.FloatTensor,
483
+ num_frames: int,
484
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
485
+ ) -> torch.FloatTensor:
486
+ # Notice that normalization is always applied before the real computation in the following blocks.
487
+ # 0. Self-Attention
488
+ batch_size = hidden_states.shape[0]
489
+
490
+ batch_frames, seq_length, channels = hidden_states.shape
491
+ batch_size = batch_frames // num_frames
492
+
493
+ hidden_states = hidden_states[None, :].reshape(batch_size, num_frames, seq_length, channels)
494
+ hidden_states = hidden_states.permute(0, 2, 1, 3)
495
+ hidden_states = hidden_states.reshape(batch_size * seq_length, num_frames, channels)
496
+
497
+ residual = hidden_states
498
+ hidden_states = self.norm_in(hidden_states)
499
+
500
+ if self._chunk_size is not None:
501
+ hidden_states = _chunked_feed_forward(self.ff_in, hidden_states, self._chunk_dim, self._chunk_size)
502
+ else:
503
+ hidden_states = self.ff_in(hidden_states)
504
+
505
+ if self.is_res:
506
+ hidden_states = hidden_states + residual
507
+
508
+ norm_hidden_states = self.norm1(hidden_states)
509
+ attn_output = self.attn1(norm_hidden_states, encoder_hidden_states=None)
510
+ hidden_states = attn_output + hidden_states
511
+
512
+ # 3. Cross-Attention
513
+ if self.attn2 is not None:
514
+ norm_hidden_states = self.norm2(hidden_states)
515
+ attn_output = self.attn2(norm_hidden_states, encoder_hidden_states=encoder_hidden_states)
516
+ hidden_states = attn_output + hidden_states
517
+
518
+ # 4. Feed-forward
519
+ norm_hidden_states = self.norm3(hidden_states)
520
+
521
+ if self._chunk_size is not None:
522
+ ff_output = _chunked_feed_forward(self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size)
523
+ else:
524
+ ff_output = self.ff(norm_hidden_states)
525
+
526
+ if self.is_res:
527
+ hidden_states = ff_output + hidden_states
528
+ else:
529
+ hidden_states = ff_output
530
+
531
+ hidden_states = hidden_states[None, :].reshape(batch_size, seq_length, num_frames, channels)
532
+ hidden_states = hidden_states.permute(0, 2, 1, 3)
533
+ hidden_states = hidden_states.reshape(batch_size * num_frames, seq_length, channels)
534
+
535
+ return hidden_states
536
+
537
+
538
+ class SkipFFTransformerBlock(nn.Module):
539
+ def __init__(
540
+ self,
541
+ dim: int,
542
+ num_attention_heads: int,
543
+ attention_head_dim: int,
544
+ kv_input_dim: int,
545
+ kv_input_dim_proj_use_bias: bool,
546
+ dropout=0.0,
547
+ cross_attention_dim: Optional[int] = None,
548
+ attention_bias: bool = False,
549
+ attention_out_bias: bool = True,
550
+ ):
551
+ super().__init__()
552
+ if kv_input_dim != dim:
553
+ self.kv_mapper = nn.Linear(kv_input_dim, dim, kv_input_dim_proj_use_bias)
554
+ else:
555
+ self.kv_mapper = None
556
+
557
+ self.norm1 = RMSNorm(dim, 1e-06)
558
+
559
+ self.attn1 = Attention(
560
+ query_dim=dim,
561
+ heads=num_attention_heads,
562
+ dim_head=attention_head_dim,
563
+ dropout=dropout,
564
+ bias=attention_bias,
565
+ cross_attention_dim=cross_attention_dim,
566
+ out_bias=attention_out_bias,
567
+ )
568
+
569
+ self.norm2 = RMSNorm(dim, 1e-06)
570
+
571
+ self.attn2 = Attention(
572
+ query_dim=dim,
573
+ cross_attention_dim=cross_attention_dim,
574
+ heads=num_attention_heads,
575
+ dim_head=attention_head_dim,
576
+ dropout=dropout,
577
+ bias=attention_bias,
578
+ out_bias=attention_out_bias,
579
+ )
580
+
581
+ def forward(self, hidden_states, encoder_hidden_states, cross_attention_kwargs):
582
+ cross_attention_kwargs = cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {}
583
+
584
+ if self.kv_mapper is not None:
585
+ encoder_hidden_states = self.kv_mapper(F.silu(encoder_hidden_states))
586
+
587
+ norm_hidden_states = self.norm1(hidden_states)
588
+
589
+ attn_output = self.attn1(
590
+ norm_hidden_states,
591
+ encoder_hidden_states=encoder_hidden_states,
592
+ **cross_attention_kwargs,
593
+ )
594
+
595
+ hidden_states = attn_output + hidden_states
596
+
597
+ norm_hidden_states = self.norm2(hidden_states)
598
+
599
+ attn_output = self.attn2(
600
+ norm_hidden_states,
601
+ encoder_hidden_states=encoder_hidden_states,
602
+ **cross_attention_kwargs,
603
+ )
604
+
605
+ hidden_states = attn_output + hidden_states
606
+
607
+ return hidden_states
608
+
609
+
610
+ class FeedForward(nn.Module):
611
+ r"""
612
+ A feed-forward layer.
613
+
614
+ Parameters:
615
+ dim (`int`): The number of channels in the input.
616
+ dim_out (`int`, *optional*): The number of channels in the output. If not given, defaults to `dim`.
617
+ mult (`int`, *optional*, defaults to 4): The multiplier to use for the hidden dimension.
618
+ dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
619
+ activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
620
+ final_dropout (`bool` *optional*, defaults to False): Apply a final dropout.
621
+ bias (`bool`, defaults to True): Whether to use a bias in the linear layer.
622
+ """
623
+
624
+ def __init__(
625
+ self,
626
+ dim: int,
627
+ dim_out: Optional[int] = None,
628
+ mult: int = 4,
629
+ dropout: float = 0.0,
630
+ activation_fn: str = "geglu",
631
+ final_dropout: bool = False,
632
+ inner_dim=None,
633
+ bias: bool = True,
634
+ ):
635
+ super().__init__()
636
+ if inner_dim is None:
637
+ inner_dim = int(dim * mult)
638
+ dim_out = dim_out if dim_out is not None else dim
639
+ linear_cls = LoRACompatibleLinear if not USE_PEFT_BACKEND else nn.Linear
640
+
641
+ if activation_fn == "gelu":
642
+ act_fn = GELU(dim, inner_dim, bias=bias)
643
+ if activation_fn == "gelu-approximate":
644
+ act_fn = GELU(dim, inner_dim, approximate="tanh", bias=bias)
645
+ elif activation_fn == "geglu":
646
+ act_fn = GEGLU(dim, inner_dim, bias=bias)
647
+ elif activation_fn == "geglu-approximate":
648
+ act_fn = ApproximateGELU(dim, inner_dim, bias=bias)
649
+
650
+ self.net = nn.ModuleList([])
651
+ # project in
652
+ self.net.append(act_fn)
653
+ # project dropout
654
+ self.net.append(nn.Dropout(dropout))
655
+ # project out
656
+ self.net.append(linear_cls(inner_dim, dim_out, bias=bias))
657
+ # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
658
+ if final_dropout:
659
+ self.net.append(nn.Dropout(dropout))
660
+
661
+ def forward(self, hidden_states: torch.Tensor, scale: float = 1.0) -> torch.Tensor:
662
+ compatible_cls = (GEGLU,) if USE_PEFT_BACKEND else (GEGLU, LoRACompatibleLinear)
663
+ for module in self.net:
664
+ if isinstance(module, compatible_cls):
665
+ hidden_states = module(hidden_states, scale)
666
+ else:
667
+ hidden_states = module(hidden_states)
668
+ return hidden_states
src/diffusers/models/attention_flax.py ADDED
@@ -0,0 +1,494 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import functools
16
+ import math
17
+
18
+ import flax.linen as nn
19
+ import jax
20
+ import jax.numpy as jnp
21
+
22
+
23
+ def _query_chunk_attention(query, key, value, precision, key_chunk_size: int = 4096):
24
+ """Multi-head dot product attention with a limited number of queries."""
25
+ num_kv, num_heads, k_features = key.shape[-3:]
26
+ v_features = value.shape[-1]
27
+ key_chunk_size = min(key_chunk_size, num_kv)
28
+ query = query / jnp.sqrt(k_features)
29
+
30
+ @functools.partial(jax.checkpoint, prevent_cse=False)
31
+ def summarize_chunk(query, key, value):
32
+ attn_weights = jnp.einsum("...qhd,...khd->...qhk", query, key, precision=precision)
33
+
34
+ max_score = jnp.max(attn_weights, axis=-1, keepdims=True)
35
+ max_score = jax.lax.stop_gradient(max_score)
36
+ exp_weights = jnp.exp(attn_weights - max_score)
37
+
38
+ exp_values = jnp.einsum("...vhf,...qhv->...qhf", value, exp_weights, precision=precision)
39
+ max_score = jnp.einsum("...qhk->...qh", max_score)
40
+
41
+ return (exp_values, exp_weights.sum(axis=-1), max_score)
42
+
43
+ def chunk_scanner(chunk_idx):
44
+ # julienne key array
45
+ key_chunk = jax.lax.dynamic_slice(
46
+ operand=key,
47
+ start_indices=[0] * (key.ndim - 3) + [chunk_idx, 0, 0], # [...,k,h,d]
48
+ slice_sizes=list(key.shape[:-3]) + [key_chunk_size, num_heads, k_features], # [...,k,h,d]
49
+ )
50
+
51
+ # julienne value array
52
+ value_chunk = jax.lax.dynamic_slice(
53
+ operand=value,
54
+ start_indices=[0] * (value.ndim - 3) + [chunk_idx, 0, 0], # [...,v,h,d]
55
+ slice_sizes=list(value.shape[:-3]) + [key_chunk_size, num_heads, v_features], # [...,v,h,d]
56
+ )
57
+
58
+ return summarize_chunk(query, key_chunk, value_chunk)
59
+
60
+ chunk_values, chunk_weights, chunk_max = jax.lax.map(f=chunk_scanner, xs=jnp.arange(0, num_kv, key_chunk_size))
61
+
62
+ global_max = jnp.max(chunk_max, axis=0, keepdims=True)
63
+ max_diffs = jnp.exp(chunk_max - global_max)
64
+
65
+ chunk_values *= jnp.expand_dims(max_diffs, axis=-1)
66
+ chunk_weights *= max_diffs
67
+
68
+ all_values = chunk_values.sum(axis=0)
69
+ all_weights = jnp.expand_dims(chunk_weights, -1).sum(axis=0)
70
+
71
+ return all_values / all_weights
72
+
73
+
74
+ def jax_memory_efficient_attention(
75
+ query, key, value, precision=jax.lax.Precision.HIGHEST, query_chunk_size: int = 1024, key_chunk_size: int = 4096
76
+ ):
77
+ r"""
78
+ Flax Memory-efficient multi-head dot product attention. https://arxiv.org/abs/2112.05682v2
79
+ https://github.com/AminRezaei0x443/memory-efficient-attention
80
+
81
+ Args:
82
+ query (`jnp.ndarray`): (batch..., query_length, head, query_key_depth_per_head)
83
+ key (`jnp.ndarray`): (batch..., key_value_length, head, query_key_depth_per_head)
84
+ value (`jnp.ndarray`): (batch..., key_value_length, head, value_depth_per_head)
85
+ precision (`jax.lax.Precision`, *optional*, defaults to `jax.lax.Precision.HIGHEST`):
86
+ numerical precision for computation
87
+ query_chunk_size (`int`, *optional*, defaults to 1024):
88
+ chunk size to divide query array value must divide query_length equally without remainder
89
+ key_chunk_size (`int`, *optional*, defaults to 4096):
90
+ chunk size to divide key and value array value must divide key_value_length equally without remainder
91
+
92
+ Returns:
93
+ (`jnp.ndarray`) with shape of (batch..., query_length, head, value_depth_per_head)
94
+ """
95
+ num_q, num_heads, q_features = query.shape[-3:]
96
+
97
+ def chunk_scanner(chunk_idx, _):
98
+ # julienne query array
99
+ query_chunk = jax.lax.dynamic_slice(
100
+ operand=query,
101
+ start_indices=([0] * (query.ndim - 3)) + [chunk_idx, 0, 0], # [...,q,h,d]
102
+ slice_sizes=list(query.shape[:-3]) + [min(query_chunk_size, num_q), num_heads, q_features], # [...,q,h,d]
103
+ )
104
+
105
+ return (
106
+ chunk_idx + query_chunk_size, # unused ignore it
107
+ _query_chunk_attention(
108
+ query=query_chunk, key=key, value=value, precision=precision, key_chunk_size=key_chunk_size
109
+ ),
110
+ )
111
+
112
+ _, res = jax.lax.scan(
113
+ f=chunk_scanner,
114
+ init=0,
115
+ xs=None,
116
+ length=math.ceil(num_q / query_chunk_size), # start counter # stop counter
117
+ )
118
+
119
+ return jnp.concatenate(res, axis=-3) # fuse the chunked result back
120
+
121
+
122
+ class FlaxAttention(nn.Module):
123
+ r"""
124
+ A Flax multi-head attention module as described in: https://arxiv.org/abs/1706.03762
125
+
126
+ Parameters:
127
+ query_dim (:obj:`int`):
128
+ Input hidden states dimension
129
+ heads (:obj:`int`, *optional*, defaults to 8):
130
+ Number of heads
131
+ dim_head (:obj:`int`, *optional*, defaults to 64):
132
+ Hidden states dimension inside each head
133
+ dropout (:obj:`float`, *optional*, defaults to 0.0):
134
+ Dropout rate
135
+ use_memory_efficient_attention (`bool`, *optional*, defaults to `False`):
136
+ enable memory efficient attention https://arxiv.org/abs/2112.05682
137
+ split_head_dim (`bool`, *optional*, defaults to `False`):
138
+ Whether to split the head dimension into a new axis for the self-attention computation. In most cases,
139
+ enabling this flag should speed up the computation for Stable Diffusion 2.x and Stable Diffusion XL.
140
+ dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
141
+ Parameters `dtype`
142
+
143
+ """
144
+
145
+ query_dim: int
146
+ heads: int = 8
147
+ dim_head: int = 64
148
+ dropout: float = 0.0
149
+ use_memory_efficient_attention: bool = False
150
+ split_head_dim: bool = False
151
+ dtype: jnp.dtype = jnp.float32
152
+
153
+ def setup(self):
154
+ inner_dim = self.dim_head * self.heads
155
+ self.scale = self.dim_head**-0.5
156
+
157
+ # Weights were exported with old names {to_q, to_k, to_v, to_out}
158
+ self.query = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, name="to_q")
159
+ self.key = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, name="to_k")
160
+ self.value = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, name="to_v")
161
+
162
+ self.proj_attn = nn.Dense(self.query_dim, dtype=self.dtype, name="to_out_0")
163
+ self.dropout_layer = nn.Dropout(rate=self.dropout)
164
+
165
+ def reshape_heads_to_batch_dim(self, tensor):
166
+ batch_size, seq_len, dim = tensor.shape
167
+ head_size = self.heads
168
+ tensor = tensor.reshape(batch_size, seq_len, head_size, dim // head_size)
169
+ tensor = jnp.transpose(tensor, (0, 2, 1, 3))
170
+ tensor = tensor.reshape(batch_size * head_size, seq_len, dim // head_size)
171
+ return tensor
172
+
173
+ def reshape_batch_dim_to_heads(self, tensor):
174
+ batch_size, seq_len, dim = tensor.shape
175
+ head_size = self.heads
176
+ tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim)
177
+ tensor = jnp.transpose(tensor, (0, 2, 1, 3))
178
+ tensor = tensor.reshape(batch_size // head_size, seq_len, dim * head_size)
179
+ return tensor
180
+
181
+ def __call__(self, hidden_states, context=None, deterministic=True):
182
+ context = hidden_states if context is None else context
183
+
184
+ query_proj = self.query(hidden_states)
185
+ key_proj = self.key(context)
186
+ value_proj = self.value(context)
187
+
188
+ if self.split_head_dim:
189
+ b = hidden_states.shape[0]
190
+ query_states = jnp.reshape(query_proj, (b, -1, self.heads, self.dim_head))
191
+ key_states = jnp.reshape(key_proj, (b, -1, self.heads, self.dim_head))
192
+ value_states = jnp.reshape(value_proj, (b, -1, self.heads, self.dim_head))
193
+ else:
194
+ query_states = self.reshape_heads_to_batch_dim(query_proj)
195
+ key_states = self.reshape_heads_to_batch_dim(key_proj)
196
+ value_states = self.reshape_heads_to_batch_dim(value_proj)
197
+
198
+ if self.use_memory_efficient_attention:
199
+ query_states = query_states.transpose(1, 0, 2)
200
+ key_states = key_states.transpose(1, 0, 2)
201
+ value_states = value_states.transpose(1, 0, 2)
202
+
203
+ # this if statement create a chunk size for each layer of the unet
204
+ # the chunk size is equal to the query_length dimension of the deepest layer of the unet
205
+
206
+ flatten_latent_dim = query_states.shape[-3]
207
+ if flatten_latent_dim % 64 == 0:
208
+ query_chunk_size = int(flatten_latent_dim / 64)
209
+ elif flatten_latent_dim % 16 == 0:
210
+ query_chunk_size = int(flatten_latent_dim / 16)
211
+ elif flatten_latent_dim % 4 == 0:
212
+ query_chunk_size = int(flatten_latent_dim / 4)
213
+ else:
214
+ query_chunk_size = int(flatten_latent_dim)
215
+
216
+ hidden_states = jax_memory_efficient_attention(
217
+ query_states, key_states, value_states, query_chunk_size=query_chunk_size, key_chunk_size=4096 * 4
218
+ )
219
+
220
+ hidden_states = hidden_states.transpose(1, 0, 2)
221
+ else:
222
+ # compute attentions
223
+ if self.split_head_dim:
224
+ attention_scores = jnp.einsum("b t n h, b f n h -> b n f t", key_states, query_states)
225
+ else:
226
+ attention_scores = jnp.einsum("b i d, b j d->b i j", query_states, key_states)
227
+
228
+ attention_scores = attention_scores * self.scale
229
+ attention_probs = nn.softmax(attention_scores, axis=-1 if self.split_head_dim else 2)
230
+
231
+ # attend to values
232
+ if self.split_head_dim:
233
+ hidden_states = jnp.einsum("b n f t, b t n h -> b f n h", attention_probs, value_states)
234
+ b = hidden_states.shape[0]
235
+ hidden_states = jnp.reshape(hidden_states, (b, -1, self.heads * self.dim_head))
236
+ else:
237
+ hidden_states = jnp.einsum("b i j, b j d -> b i d", attention_probs, value_states)
238
+ hidden_states = self.reshape_batch_dim_to_heads(hidden_states)
239
+
240
+ hidden_states = self.proj_attn(hidden_states)
241
+ return self.dropout_layer(hidden_states, deterministic=deterministic)
242
+
243
+
244
+ class FlaxBasicTransformerBlock(nn.Module):
245
+ r"""
246
+ A Flax transformer block layer with `GLU` (Gated Linear Unit) activation function as described in:
247
+ https://arxiv.org/abs/1706.03762
248
+
249
+
250
+ Parameters:
251
+ dim (:obj:`int`):
252
+ Inner hidden states dimension
253
+ n_heads (:obj:`int`):
254
+ Number of heads
255
+ d_head (:obj:`int`):
256
+ Hidden states dimension inside each head
257
+ dropout (:obj:`float`, *optional*, defaults to 0.0):
258
+ Dropout rate
259
+ only_cross_attention (`bool`, defaults to `False`):
260
+ Whether to only apply cross attention.
261
+ dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
262
+ Parameters `dtype`
263
+ use_memory_efficient_attention (`bool`, *optional*, defaults to `False`):
264
+ enable memory efficient attention https://arxiv.org/abs/2112.05682
265
+ split_head_dim (`bool`, *optional*, defaults to `False`):
266
+ Whether to split the head dimension into a new axis for the self-attention computation. In most cases,
267
+ enabling this flag should speed up the computation for Stable Diffusion 2.x and Stable Diffusion XL.
268
+ """
269
+
270
+ dim: int
271
+ n_heads: int
272
+ d_head: int
273
+ dropout: float = 0.0
274
+ only_cross_attention: bool = False
275
+ dtype: jnp.dtype = jnp.float32
276
+ use_memory_efficient_attention: bool = False
277
+ split_head_dim: bool = False
278
+
279
+ def setup(self):
280
+ # self attention (or cross_attention if only_cross_attention is True)
281
+ self.attn1 = FlaxAttention(
282
+ self.dim,
283
+ self.n_heads,
284
+ self.d_head,
285
+ self.dropout,
286
+ self.use_memory_efficient_attention,
287
+ self.split_head_dim,
288
+ dtype=self.dtype,
289
+ )
290
+ # cross attention
291
+ self.attn2 = FlaxAttention(
292
+ self.dim,
293
+ self.n_heads,
294
+ self.d_head,
295
+ self.dropout,
296
+ self.use_memory_efficient_attention,
297
+ self.split_head_dim,
298
+ dtype=self.dtype,
299
+ )
300
+ self.ff = FlaxFeedForward(dim=self.dim, dropout=self.dropout, dtype=self.dtype)
301
+ self.norm1 = nn.LayerNorm(epsilon=1e-5, dtype=self.dtype)
302
+ self.norm2 = nn.LayerNorm(epsilon=1e-5, dtype=self.dtype)
303
+ self.norm3 = nn.LayerNorm(epsilon=1e-5, dtype=self.dtype)
304
+ self.dropout_layer = nn.Dropout(rate=self.dropout)
305
+
306
+ def __call__(self, hidden_states, context, deterministic=True):
307
+ # self attention
308
+ residual = hidden_states
309
+ if self.only_cross_attention:
310
+ hidden_states = self.attn1(self.norm1(hidden_states), context, deterministic=deterministic)
311
+ else:
312
+ hidden_states = self.attn1(self.norm1(hidden_states), deterministic=deterministic)
313
+ hidden_states = hidden_states + residual
314
+
315
+ # cross attention
316
+ residual = hidden_states
317
+ hidden_states = self.attn2(self.norm2(hidden_states), context, deterministic=deterministic)
318
+ hidden_states = hidden_states + residual
319
+
320
+ # feed forward
321
+ residual = hidden_states
322
+ hidden_states = self.ff(self.norm3(hidden_states), deterministic=deterministic)
323
+ hidden_states = hidden_states + residual
324
+
325
+ return self.dropout_layer(hidden_states, deterministic=deterministic)
326
+
327
+
328
+ class FlaxTransformer2DModel(nn.Module):
329
+ r"""
330
+ A Spatial Transformer layer with Gated Linear Unit (GLU) activation function as described in:
331
+ https://arxiv.org/pdf/1506.02025.pdf
332
+
333
+
334
+ Parameters:
335
+ in_channels (:obj:`int`):
336
+ Input number of channels
337
+ n_heads (:obj:`int`):
338
+ Number of heads
339
+ d_head (:obj:`int`):
340
+ Hidden states dimension inside each head
341
+ depth (:obj:`int`, *optional*, defaults to 1):
342
+ Number of transformers block
343
+ dropout (:obj:`float`, *optional*, defaults to 0.0):
344
+ Dropout rate
345
+ use_linear_projection (`bool`, defaults to `False`): tbd
346
+ only_cross_attention (`bool`, defaults to `False`): tbd
347
+ dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
348
+ Parameters `dtype`
349
+ use_memory_efficient_attention (`bool`, *optional*, defaults to `False`):
350
+ enable memory efficient attention https://arxiv.org/abs/2112.05682
351
+ split_head_dim (`bool`, *optional*, defaults to `False`):
352
+ Whether to split the head dimension into a new axis for the self-attention computation. In most cases,
353
+ enabling this flag should speed up the computation for Stable Diffusion 2.x and Stable Diffusion XL.
354
+ """
355
+
356
+ in_channels: int
357
+ n_heads: int
358
+ d_head: int
359
+ depth: int = 1
360
+ dropout: float = 0.0
361
+ use_linear_projection: bool = False
362
+ only_cross_attention: bool = False
363
+ dtype: jnp.dtype = jnp.float32
364
+ use_memory_efficient_attention: bool = False
365
+ split_head_dim: bool = False
366
+
367
+ def setup(self):
368
+ self.norm = nn.GroupNorm(num_groups=32, epsilon=1e-5)
369
+
370
+ inner_dim = self.n_heads * self.d_head
371
+ if self.use_linear_projection:
372
+ self.proj_in = nn.Dense(inner_dim, dtype=self.dtype)
373
+ else:
374
+ self.proj_in = nn.Conv(
375
+ inner_dim,
376
+ kernel_size=(1, 1),
377
+ strides=(1, 1),
378
+ padding="VALID",
379
+ dtype=self.dtype,
380
+ )
381
+
382
+ self.transformer_blocks = [
383
+ FlaxBasicTransformerBlock(
384
+ inner_dim,
385
+ self.n_heads,
386
+ self.d_head,
387
+ dropout=self.dropout,
388
+ only_cross_attention=self.only_cross_attention,
389
+ dtype=self.dtype,
390
+ use_memory_efficient_attention=self.use_memory_efficient_attention,
391
+ split_head_dim=self.split_head_dim,
392
+ )
393
+ for _ in range(self.depth)
394
+ ]
395
+
396
+ if self.use_linear_projection:
397
+ self.proj_out = nn.Dense(inner_dim, dtype=self.dtype)
398
+ else:
399
+ self.proj_out = nn.Conv(
400
+ inner_dim,
401
+ kernel_size=(1, 1),
402
+ strides=(1, 1),
403
+ padding="VALID",
404
+ dtype=self.dtype,
405
+ )
406
+
407
+ self.dropout_layer = nn.Dropout(rate=self.dropout)
408
+
409
+ def __call__(self, hidden_states, context, deterministic=True):
410
+ batch, height, width, channels = hidden_states.shape
411
+ residual = hidden_states
412
+ hidden_states = self.norm(hidden_states)
413
+ if self.use_linear_projection:
414
+ hidden_states = hidden_states.reshape(batch, height * width, channels)
415
+ hidden_states = self.proj_in(hidden_states)
416
+ else:
417
+ hidden_states = self.proj_in(hidden_states)
418
+ hidden_states = hidden_states.reshape(batch, height * width, channels)
419
+
420
+ for transformer_block in self.transformer_blocks:
421
+ hidden_states = transformer_block(hidden_states, context, deterministic=deterministic)
422
+
423
+ if self.use_linear_projection:
424
+ hidden_states = self.proj_out(hidden_states)
425
+ hidden_states = hidden_states.reshape(batch, height, width, channels)
426
+ else:
427
+ hidden_states = hidden_states.reshape(batch, height, width, channels)
428
+ hidden_states = self.proj_out(hidden_states)
429
+
430
+ hidden_states = hidden_states + residual
431
+ return self.dropout_layer(hidden_states, deterministic=deterministic)
432
+
433
+
434
+ class FlaxFeedForward(nn.Module):
435
+ r"""
436
+ Flax module that encapsulates two Linear layers separated by a non-linearity. It is the counterpart of PyTorch's
437
+ [`FeedForward`] class, with the following simplifications:
438
+ - The activation function is currently hardcoded to a gated linear unit from:
439
+ https://arxiv.org/abs/2002.05202
440
+ - `dim_out` is equal to `dim`.
441
+ - The number of hidden dimensions is hardcoded to `dim * 4` in [`FlaxGELU`].
442
+
443
+ Parameters:
444
+ dim (:obj:`int`):
445
+ Inner hidden states dimension
446
+ dropout (:obj:`float`, *optional*, defaults to 0.0):
447
+ Dropout rate
448
+ dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
449
+ Parameters `dtype`
450
+ """
451
+
452
+ dim: int
453
+ dropout: float = 0.0
454
+ dtype: jnp.dtype = jnp.float32
455
+
456
+ def setup(self):
457
+ # The second linear layer needs to be called
458
+ # net_2 for now to match the index of the Sequential layer
459
+ self.net_0 = FlaxGEGLU(self.dim, self.dropout, self.dtype)
460
+ self.net_2 = nn.Dense(self.dim, dtype=self.dtype)
461
+
462
+ def __call__(self, hidden_states, deterministic=True):
463
+ hidden_states = self.net_0(hidden_states, deterministic=deterministic)
464
+ hidden_states = self.net_2(hidden_states)
465
+ return hidden_states
466
+
467
+
468
+ class FlaxGEGLU(nn.Module):
469
+ r"""
470
+ Flax implementation of a Linear layer followed by the variant of the gated linear unit activation function from
471
+ https://arxiv.org/abs/2002.05202.
472
+
473
+ Parameters:
474
+ dim (:obj:`int`):
475
+ Input hidden states dimension
476
+ dropout (:obj:`float`, *optional*, defaults to 0.0):
477
+ Dropout rate
478
+ dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
479
+ Parameters `dtype`
480
+ """
481
+
482
+ dim: int
483
+ dropout: float = 0.0
484
+ dtype: jnp.dtype = jnp.float32
485
+
486
+ def setup(self):
487
+ inner_dim = self.dim * 4
488
+ self.proj = nn.Dense(inner_dim * 2, dtype=self.dtype)
489
+ self.dropout_layer = nn.Dropout(rate=self.dropout)
490
+
491
+ def __call__(self, hidden_states, deterministic=True):
492
+ hidden_states = self.proj(hidden_states)
493
+ hidden_linear, hidden_gelu = jnp.split(hidden_states, 2, axis=2)
494
+ return self.dropout_layer(hidden_linear * nn.gelu(hidden_gelu), deterministic=deterministic)
src/diffusers/models/attention_processor.py ADDED
The diff for this file is too large to render. See raw diff
 
src/diffusers/models/autoencoders/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from .autoencoder_asym_kl import AsymmetricAutoencoderKL
2
+ from .autoencoder_kl import AutoencoderKL
3
+ from .autoencoder_kl_temporal_decoder import AutoencoderKLTemporalDecoder
4
+ from .autoencoder_tiny import AutoencoderTiny
5
+ from .consistency_decoder_vae import ConsistencyDecoderVAE
src/diffusers/models/autoencoders/autoencoder_asym_kl.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Optional, Tuple, Union
15
+
16
+ import torch
17
+ import torch.nn as nn
18
+
19
+ from ...configuration_utils import ConfigMixin, register_to_config
20
+ from ...utils.accelerate_utils import apply_forward_hook
21
+ from ..modeling_outputs import AutoencoderKLOutput
22
+ from ..modeling_utils import ModelMixin
23
+ from .vae import DecoderOutput, DiagonalGaussianDistribution, Encoder, MaskConditionDecoder
24
+
25
+
26
+ class AsymmetricAutoencoderKL(ModelMixin, ConfigMixin):
27
+ r"""
28
+ Designing a Better Asymmetric VQGAN for StableDiffusion https://arxiv.org/abs/2306.04632 . A VAE model with KL loss
29
+ for encoding images into latents and decoding latent representations into images.
30
+
31
+ This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
32
+ for all models (such as downloading or saving).
33
+
34
+ Parameters:
35
+ in_channels (int, *optional*, defaults to 3): Number of channels in the input image.
36
+ out_channels (int, *optional*, defaults to 3): Number of channels in the output.
37
+ down_block_types (`Tuple[str]`, *optional*, defaults to `("DownEncoderBlock2D",)`):
38
+ Tuple of downsample block types.
39
+ down_block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`):
40
+ Tuple of down block output channels.
41
+ layers_per_down_block (`int`, *optional*, defaults to `1`):
42
+ Number layers for down block.
43
+ up_block_types (`Tuple[str]`, *optional*, defaults to `("UpDecoderBlock2D",)`):
44
+ Tuple of upsample block types.
45
+ up_block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`):
46
+ Tuple of up block output channels.
47
+ layers_per_up_block (`int`, *optional*, defaults to `1`):
48
+ Number layers for up block.
49
+ act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
50
+ latent_channels (`int`, *optional*, defaults to 4): Number of channels in the latent space.
51
+ sample_size (`int`, *optional*, defaults to `32`): Sample input size.
52
+ norm_num_groups (`int`, *optional*, defaults to `32`):
53
+ Number of groups to use for the first normalization layer in ResNet blocks.
54
+ scaling_factor (`float`, *optional*, defaults to 0.18215):
55
+ The component-wise standard deviation of the trained latent space computed using the first batch of the
56
+ training set. This is used to scale the latent space to have unit variance when training the diffusion
57
+ model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the
58
+ diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1
59
+ / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image
60
+ Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper.
61
+ """
62
+
63
+ @register_to_config
64
+ def __init__(
65
+ self,
66
+ in_channels: int = 3,
67
+ out_channels: int = 3,
68
+ down_block_types: Tuple[str, ...] = ("DownEncoderBlock2D",),
69
+ down_block_out_channels: Tuple[int, ...] = (64,),
70
+ layers_per_down_block: int = 1,
71
+ up_block_types: Tuple[str, ...] = ("UpDecoderBlock2D",),
72
+ up_block_out_channels: Tuple[int, ...] = (64,),
73
+ layers_per_up_block: int = 1,
74
+ act_fn: str = "silu",
75
+ latent_channels: int = 4,
76
+ norm_num_groups: int = 32,
77
+ sample_size: int = 32,
78
+ scaling_factor: float = 0.18215,
79
+ ) -> None:
80
+ super().__init__()
81
+
82
+ # pass init params to Encoder
83
+ self.encoder = Encoder(
84
+ in_channels=in_channels,
85
+ out_channels=latent_channels,
86
+ down_block_types=down_block_types,
87
+ block_out_channels=down_block_out_channels,
88
+ layers_per_block=layers_per_down_block,
89
+ act_fn=act_fn,
90
+ norm_num_groups=norm_num_groups,
91
+ double_z=True,
92
+ )
93
+
94
+ # pass init params to Decoder
95
+ self.decoder = MaskConditionDecoder(
96
+ in_channels=latent_channels,
97
+ out_channels=out_channels,
98
+ up_block_types=up_block_types,
99
+ block_out_channels=up_block_out_channels,
100
+ layers_per_block=layers_per_up_block,
101
+ act_fn=act_fn,
102
+ norm_num_groups=norm_num_groups,
103
+ )
104
+
105
+ self.quant_conv = nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1)
106
+ self.post_quant_conv = nn.Conv2d(latent_channels, latent_channels, 1)
107
+
108
+ self.use_slicing = False
109
+ self.use_tiling = False
110
+
111
+ self.register_to_config(block_out_channels=up_block_out_channels)
112
+ self.register_to_config(force_upcast=False)
113
+
114
+ @apply_forward_hook
115
+ def encode(
116
+ self, x: torch.FloatTensor, return_dict: bool = True
117
+ ) -> Union[AutoencoderKLOutput, Tuple[torch.FloatTensor]]:
118
+ h = self.encoder(x)
119
+ moments = self.quant_conv(h)
120
+ posterior = DiagonalGaussianDistribution(moments)
121
+
122
+ if not return_dict:
123
+ return (posterior,)
124
+
125
+ return AutoencoderKLOutput(latent_dist=posterior)
126
+
127
+ def _decode(
128
+ self,
129
+ z: torch.FloatTensor,
130
+ image: Optional[torch.FloatTensor] = None,
131
+ mask: Optional[torch.FloatTensor] = None,
132
+ return_dict: bool = True,
133
+ ) -> Union[DecoderOutput, Tuple[torch.FloatTensor]]:
134
+ z = self.post_quant_conv(z)
135
+ dec = self.decoder(z, image, mask)
136
+
137
+ if not return_dict:
138
+ return (dec,)
139
+
140
+ return DecoderOutput(sample=dec)
141
+
142
+ @apply_forward_hook
143
+ def decode(
144
+ self,
145
+ z: torch.FloatTensor,
146
+ generator: Optional[torch.Generator] = None,
147
+ image: Optional[torch.FloatTensor] = None,
148
+ mask: Optional[torch.FloatTensor] = None,
149
+ return_dict: bool = True,
150
+ ) -> Union[DecoderOutput, Tuple[torch.FloatTensor]]:
151
+ decoded = self._decode(z, image, mask).sample
152
+
153
+ if not return_dict:
154
+ return (decoded,)
155
+
156
+ return DecoderOutput(sample=decoded)
157
+
158
+ def forward(
159
+ self,
160
+ sample: torch.FloatTensor,
161
+ mask: Optional[torch.FloatTensor] = None,
162
+ sample_posterior: bool = False,
163
+ return_dict: bool = True,
164
+ generator: Optional[torch.Generator] = None,
165
+ ) -> Union[DecoderOutput, Tuple[torch.FloatTensor]]:
166
+ r"""
167
+ Args:
168
+ sample (`torch.FloatTensor`): Input sample.
169
+ mask (`torch.FloatTensor`, *optional*, defaults to `None`): Optional inpainting mask.
170
+ sample_posterior (`bool`, *optional*, defaults to `False`):
171
+ Whether to sample from the posterior.
172
+ return_dict (`bool`, *optional*, defaults to `True`):
173
+ Whether or not to return a [`DecoderOutput`] instead of a plain tuple.
174
+ """
175
+ x = sample
176
+ posterior = self.encode(x).latent_dist
177
+ if sample_posterior:
178
+ z = posterior.sample(generator=generator)
179
+ else:
180
+ z = posterior.mode()
181
+ dec = self.decode(z, sample, mask).sample
182
+
183
+ if not return_dict:
184
+ return (dec,)
185
+
186
+ return DecoderOutput(sample=dec)
src/diffusers/models/autoencoders/autoencoder_kl.py ADDED
@@ -0,0 +1,489 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Dict, Optional, Tuple, Union
15
+
16
+ import torch
17
+ import torch.nn as nn
18
+
19
+ from ...configuration_utils import ConfigMixin, register_to_config
20
+ from ...loaders import FromOriginalVAEMixin
21
+ from ...utils.accelerate_utils import apply_forward_hook
22
+ from ..attention_processor import (
23
+ ADDED_KV_ATTENTION_PROCESSORS,
24
+ CROSS_ATTENTION_PROCESSORS,
25
+ Attention,
26
+ AttentionProcessor,
27
+ AttnAddedKVProcessor,
28
+ AttnProcessor,
29
+ )
30
+ from ..modeling_outputs import AutoencoderKLOutput
31
+ from ..modeling_utils import ModelMixin
32
+ from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
33
+
34
+
35
+ class AutoencoderKL(ModelMixin, ConfigMixin, FromOriginalVAEMixin):
36
+ r"""
37
+ A VAE model with KL loss for encoding images into latents and decoding latent representations into images.
38
+
39
+ This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
40
+ for all models (such as downloading or saving).
41
+
42
+ Parameters:
43
+ in_channels (int, *optional*, defaults to 3): Number of channels in the input image.
44
+ out_channels (int, *optional*, defaults to 3): Number of channels in the output.
45
+ down_block_types (`Tuple[str]`, *optional*, defaults to `("DownEncoderBlock2D",)`):
46
+ Tuple of downsample block types.
47
+ up_block_types (`Tuple[str]`, *optional*, defaults to `("UpDecoderBlock2D",)`):
48
+ Tuple of upsample block types.
49
+ block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`):
50
+ Tuple of block output channels.
51
+ act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
52
+ latent_channels (`int`, *optional*, defaults to 4): Number of channels in the latent space.
53
+ sample_size (`int`, *optional*, defaults to `32`): Sample input size.
54
+ scaling_factor (`float`, *optional*, defaults to 0.18215):
55
+ The component-wise standard deviation of the trained latent space computed using the first batch of the
56
+ training set. This is used to scale the latent space to have unit variance when training the diffusion
57
+ model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the
58
+ diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1
59
+ / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image
60
+ Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper.
61
+ force_upcast (`bool`, *optional*, default to `True`):
62
+ If enabled it will force the VAE to run in float32 for high image resolution pipelines, such as SD-XL. VAE
63
+ can be fine-tuned / trained to a lower range without loosing too much precision in which case
64
+ `force_upcast` can be set to `False` - see: https://huggingface.co/madebyollin/sdxl-vae-fp16-fix
65
+ """
66
+
67
+ _supports_gradient_checkpointing = True
68
+
69
+ @register_to_config
70
+ def __init__(
71
+ self,
72
+ in_channels: int = 3,
73
+ out_channels: int = 3,
74
+ down_block_types: Tuple[str] = ("DownEncoderBlock2D",),
75
+ up_block_types: Tuple[str] = ("UpDecoderBlock2D",),
76
+ block_out_channels: Tuple[int] = (64,),
77
+ layers_per_block: int = 1,
78
+ act_fn: str = "silu",
79
+ latent_channels: int = 4,
80
+ norm_num_groups: int = 32,
81
+ sample_size: int = 32,
82
+ scaling_factor: float = 0.18215,
83
+ force_upcast: float = True,
84
+ ):
85
+ super().__init__()
86
+
87
+ # pass init params to Encoder
88
+ self.encoder = Encoder(
89
+ in_channels=in_channels,
90
+ out_channels=latent_channels,
91
+ down_block_types=down_block_types,
92
+ block_out_channels=block_out_channels,
93
+ layers_per_block=layers_per_block,
94
+ act_fn=act_fn,
95
+ norm_num_groups=norm_num_groups,
96
+ double_z=True,
97
+ )
98
+
99
+ # pass init params to Decoder
100
+ self.decoder = Decoder(
101
+ in_channels=latent_channels,
102
+ out_channels=out_channels,
103
+ up_block_types=up_block_types,
104
+ block_out_channels=block_out_channels,
105
+ layers_per_block=layers_per_block,
106
+ norm_num_groups=norm_num_groups,
107
+ act_fn=act_fn,
108
+ )
109
+
110
+ self.quant_conv = nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1)
111
+ self.post_quant_conv = nn.Conv2d(latent_channels, latent_channels, 1)
112
+
113
+ self.use_slicing = False
114
+ self.use_tiling = False
115
+
116
+ # only relevant if vae tiling is enabled
117
+ self.tile_sample_min_size = self.config.sample_size
118
+ sample_size = (
119
+ self.config.sample_size[0]
120
+ if isinstance(self.config.sample_size, (list, tuple))
121
+ else self.config.sample_size
122
+ )
123
+ self.tile_latent_min_size = int(sample_size / (2 ** (len(self.config.block_out_channels) - 1)))
124
+ self.tile_overlap_factor = 0.25
125
+
126
+ def _set_gradient_checkpointing(self, module, value=False):
127
+ if isinstance(module, (Encoder, Decoder)):
128
+ module.gradient_checkpointing = value
129
+
130
+ def enable_tiling(self, use_tiling: bool = True):
131
+ r"""
132
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
133
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
134
+ processing larger images.
135
+ """
136
+ self.use_tiling = use_tiling
137
+
138
+ def disable_tiling(self):
139
+ r"""
140
+ Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing
141
+ decoding in one step.
142
+ """
143
+ self.enable_tiling(False)
144
+
145
+ def enable_slicing(self):
146
+ r"""
147
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
148
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
149
+ """
150
+ self.use_slicing = True
151
+
152
+ def disable_slicing(self):
153
+ r"""
154
+ Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing
155
+ decoding in one step.
156
+ """
157
+ self.use_slicing = False
158
+
159
+ @property
160
+ # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
161
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
162
+ r"""
163
+ Returns:
164
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
165
+ indexed by its weight name.
166
+ """
167
+ # set recursively
168
+ processors = {}
169
+
170
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
171
+ if hasattr(module, "get_processor"):
172
+ processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True)
173
+
174
+ for sub_name, child in module.named_children():
175
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
176
+
177
+ return processors
178
+
179
+ for name, module in self.named_children():
180
+ fn_recursive_add_processors(name, module, processors)
181
+
182
+ return processors
183
+
184
+ # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attn_processor
185
+ def set_attn_processor(
186
+ self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]], _remove_lora=False
187
+ ):
188
+ r"""
189
+ Sets the attention processor to use to compute attention.
190
+
191
+ Parameters:
192
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
193
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
194
+ for **all** `Attention` layers.
195
+
196
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
197
+ processor. This is strongly recommended when setting trainable attention processors.
198
+
199
+ """
200
+ count = len(self.attn_processors.keys())
201
+
202
+ if isinstance(processor, dict) and len(processor) != count:
203
+ raise ValueError(
204
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
205
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
206
+ )
207
+
208
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
209
+ if hasattr(module, "set_processor"):
210
+ if not isinstance(processor, dict):
211
+ module.set_processor(processor, _remove_lora=_remove_lora)
212
+ else:
213
+ module.set_processor(processor.pop(f"{name}.processor"), _remove_lora=_remove_lora)
214
+
215
+ for sub_name, child in module.named_children():
216
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
217
+
218
+ for name, module in self.named_children():
219
+ fn_recursive_attn_processor(name, module, processor)
220
+
221
+ # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor
222
+ def set_default_attn_processor(self):
223
+ """
224
+ Disables custom attention processors and sets the default attention implementation.
225
+ """
226
+ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
227
+ processor = AttnAddedKVProcessor()
228
+ elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
229
+ processor = AttnProcessor()
230
+ else:
231
+ raise ValueError(
232
+ f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
233
+ )
234
+
235
+ self.set_attn_processor(processor, _remove_lora=True)
236
+
237
+ @apply_forward_hook
238
+ def encode(
239
+ self, x: torch.FloatTensor, return_dict: bool = True
240
+ ) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]:
241
+ """
242
+ Encode a batch of images into latents.
243
+
244
+ Args:
245
+ x (`torch.FloatTensor`): Input batch of images.
246
+ return_dict (`bool`, *optional*, defaults to `True`):
247
+ Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple.
248
+
249
+ Returns:
250
+ The latent representations of the encoded images. If `return_dict` is True, a
251
+ [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned.
252
+ """
253
+ if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
254
+ return self.tiled_encode(x, return_dict=return_dict)
255
+
256
+ if self.use_slicing and x.shape[0] > 1:
257
+ encoded_slices = [self.encoder(x_slice) for x_slice in x.split(1)]
258
+ h = torch.cat(encoded_slices)
259
+ else:
260
+ h = self.encoder(x)
261
+
262
+ moments = self.quant_conv(h)
263
+ posterior = DiagonalGaussianDistribution(moments)
264
+
265
+ if not return_dict:
266
+ return (posterior,)
267
+
268
+ return AutoencoderKLOutput(latent_dist=posterior)
269
+
270
+ def _decode(self, z: torch.FloatTensor, return_dict: bool = True) -> Union[DecoderOutput, torch.FloatTensor]:
271
+ if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
272
+ return self.tiled_decode(z, return_dict=return_dict)
273
+
274
+ z = self.post_quant_conv(z)
275
+ dec = self.decoder(z)
276
+
277
+ if not return_dict:
278
+ return (dec,)
279
+
280
+ return DecoderOutput(sample=dec)
281
+
282
+ @apply_forward_hook
283
+ def decode(
284
+ self, z: torch.FloatTensor, return_dict: bool = True, generator=None
285
+ ) -> Union[DecoderOutput, torch.FloatTensor]:
286
+ """
287
+ Decode a batch of images.
288
+
289
+ Args:
290
+ z (`torch.FloatTensor`): Input batch of latent vectors.
291
+ return_dict (`bool`, *optional*, defaults to `True`):
292
+ Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple.
293
+
294
+ Returns:
295
+ [`~models.vae.DecoderOutput`] or `tuple`:
296
+ If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is
297
+ returned.
298
+
299
+ """
300
+ if self.use_slicing and z.shape[0] > 1:
301
+ decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)]
302
+ decoded = torch.cat(decoded_slices)
303
+ else:
304
+ decoded = self._decode(z).sample
305
+
306
+ if not return_dict:
307
+ return (decoded,)
308
+
309
+ return DecoderOutput(sample=decoded)
310
+
311
+ def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
312
+ blend_extent = min(a.shape[2], b.shape[2], blend_extent)
313
+ for y in range(blend_extent):
314
+ b[:, :, y, :] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
315
+ return b
316
+
317
+ def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
318
+ blend_extent = min(a.shape[3], b.shape[3], blend_extent)
319
+ for x in range(blend_extent):
320
+ b[:, :, :, x] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
321
+ return b
322
+
323
+ def tiled_encode(self, x: torch.FloatTensor, return_dict: bool = True) -> AutoencoderKLOutput:
324
+ r"""Encode a batch of images using a tiled encoder.
325
+
326
+ When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several
327
+ steps. This is useful to keep memory use constant regardless of image size. The end result of tiled encoding is
328
+ different from non-tiled encoding because each tile uses a different encoder. To avoid tiling artifacts, the
329
+ tiles overlap and are blended together to form a smooth output. You may still see tile-sized changes in the
330
+ output, but they should be much less noticeable.
331
+
332
+ Args:
333
+ x (`torch.FloatTensor`): Input batch of images.
334
+ return_dict (`bool`, *optional*, defaults to `True`):
335
+ Whether or not to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple.
336
+
337
+ Returns:
338
+ [`~models.autoencoder_kl.AutoencoderKLOutput`] or `tuple`:
339
+ If return_dict is True, a [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain
340
+ `tuple` is returned.
341
+ """
342
+ overlap_size = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor))
343
+ blend_extent = int(self.tile_latent_min_size * self.tile_overlap_factor)
344
+ row_limit = self.tile_latent_min_size - blend_extent
345
+
346
+ # Split the image into 512x512 tiles and encode them separately.
347
+ rows = []
348
+ for i in range(0, x.shape[2], overlap_size):
349
+ row = []
350
+ for j in range(0, x.shape[3], overlap_size):
351
+ tile = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
352
+ tile = self.encoder(tile)
353
+ tile = self.quant_conv(tile)
354
+ row.append(tile)
355
+ rows.append(row)
356
+ result_rows = []
357
+ for i, row in enumerate(rows):
358
+ result_row = []
359
+ for j, tile in enumerate(row):
360
+ # blend the above tile and the left tile
361
+ # to the current tile and add the current tile to the result row
362
+ if i > 0:
363
+ tile = self.blend_v(rows[i - 1][j], tile, blend_extent)
364
+ if j > 0:
365
+ tile = self.blend_h(row[j - 1], tile, blend_extent)
366
+ result_row.append(tile[:, :, :row_limit, :row_limit])
367
+ result_rows.append(torch.cat(result_row, dim=3))
368
+
369
+ moments = torch.cat(result_rows, dim=2)
370
+ posterior = DiagonalGaussianDistribution(moments)
371
+
372
+ if not return_dict:
373
+ return (posterior,)
374
+
375
+ return AutoencoderKLOutput(latent_dist=posterior)
376
+
377
+ def tiled_decode(self, z: torch.FloatTensor, return_dict: bool = True) -> Union[DecoderOutput, torch.FloatTensor]:
378
+ r"""
379
+ Decode a batch of images using a tiled decoder.
380
+
381
+ Args:
382
+ z (`torch.FloatTensor`): Input batch of latent vectors.
383
+ return_dict (`bool`, *optional*, defaults to `True`):
384
+ Whether or not to return a [`~models.vae.DecoderOutput`] instead of a plain tuple.
385
+
386
+ Returns:
387
+ [`~models.vae.DecoderOutput`] or `tuple`:
388
+ If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is
389
+ returned.
390
+ """
391
+ overlap_size = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor))
392
+ blend_extent = int(self.tile_sample_min_size * self.tile_overlap_factor)
393
+ row_limit = self.tile_sample_min_size - blend_extent
394
+
395
+ # Split z into overlapping 64x64 tiles and decode them separately.
396
+ # The tiles have an overlap to avoid seams between tiles.
397
+ rows = []
398
+ for i in range(0, z.shape[2], overlap_size):
399
+ row = []
400
+ for j in range(0, z.shape[3], overlap_size):
401
+ tile = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
402
+ tile = self.post_quant_conv(tile)
403
+ decoded = self.decoder(tile)
404
+ row.append(decoded)
405
+ rows.append(row)
406
+ result_rows = []
407
+ for i, row in enumerate(rows):
408
+ result_row = []
409
+ for j, tile in enumerate(row):
410
+ # blend the above tile and the left tile
411
+ # to the current tile and add the current tile to the result row
412
+ if i > 0:
413
+ tile = self.blend_v(rows[i - 1][j], tile, blend_extent)
414
+ if j > 0:
415
+ tile = self.blend_h(row[j - 1], tile, blend_extent)
416
+ result_row.append(tile[:, :, :row_limit, :row_limit])
417
+ result_rows.append(torch.cat(result_row, dim=3))
418
+
419
+ dec = torch.cat(result_rows, dim=2)
420
+ if not return_dict:
421
+ return (dec,)
422
+
423
+ return DecoderOutput(sample=dec)
424
+
425
+ def forward(
426
+ self,
427
+ sample: torch.FloatTensor,
428
+ sample_posterior: bool = False,
429
+ return_dict: bool = True,
430
+ generator: Optional[torch.Generator] = None,
431
+ ) -> Union[DecoderOutput, torch.FloatTensor]:
432
+ r"""
433
+ Args:
434
+ sample (`torch.FloatTensor`): Input sample.
435
+ sample_posterior (`bool`, *optional*, defaults to `False`):
436
+ Whether to sample from the posterior.
437
+ return_dict (`bool`, *optional*, defaults to `True`):
438
+ Whether or not to return a [`DecoderOutput`] instead of a plain tuple.
439
+ """
440
+ x = sample
441
+ posterior = self.encode(x).latent_dist
442
+ if sample_posterior:
443
+ z = posterior.sample(generator=generator)
444
+ else:
445
+ z = posterior.mode()
446
+ dec = self.decode(z).sample
447
+
448
+ if not return_dict:
449
+ return (dec,)
450
+
451
+ return DecoderOutput(sample=dec)
452
+
453
+ # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections
454
+ def fuse_qkv_projections(self):
455
+ """
456
+ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query,
457
+ key, value) are fused. For cross-attention modules, key and value projection matrices are fused.
458
+
459
+ <Tip warning={true}>
460
+
461
+ This API is 🧪 experimental.
462
+
463
+ </Tip>
464
+ """
465
+ self.original_attn_processors = None
466
+
467
+ for _, attn_processor in self.attn_processors.items():
468
+ if "Added" in str(attn_processor.__class__.__name__):
469
+ raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.")
470
+
471
+ self.original_attn_processors = self.attn_processors
472
+
473
+ for module in self.modules():
474
+ if isinstance(module, Attention):
475
+ module.fuse_projections(fuse=True)
476
+
477
+ # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections
478
+ def unfuse_qkv_projections(self):
479
+ """Disables the fused QKV projection if enabled.
480
+
481
+ <Tip warning={true}>
482
+
483
+ This API is 🧪 experimental.
484
+
485
+ </Tip>
486
+
487
+ """
488
+ if self.original_attn_processors is not None:
489
+ self.set_attn_processor(self.original_attn_processors)
src/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py ADDED
@@ -0,0 +1,402 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Dict, Optional, Tuple, Union
15
+
16
+ import torch
17
+ import torch.nn as nn
18
+
19
+ from ...configuration_utils import ConfigMixin, register_to_config
20
+ from ...loaders import FromOriginalVAEMixin
21
+ from ...utils import is_torch_version
22
+ from ...utils.accelerate_utils import apply_forward_hook
23
+ from ..attention_processor import CROSS_ATTENTION_PROCESSORS, AttentionProcessor, AttnProcessor
24
+ from ..modeling_outputs import AutoencoderKLOutput
25
+ from ..modeling_utils import ModelMixin
26
+ from ..unet_3d_blocks import MidBlockTemporalDecoder, UpBlockTemporalDecoder
27
+ from .vae import DecoderOutput, DiagonalGaussianDistribution, Encoder
28
+
29
+
30
+ class TemporalDecoder(nn.Module):
31
+ def __init__(
32
+ self,
33
+ in_channels: int = 4,
34
+ out_channels: int = 3,
35
+ block_out_channels: Tuple[int] = (128, 256, 512, 512),
36
+ layers_per_block: int = 2,
37
+ ):
38
+ super().__init__()
39
+ self.layers_per_block = layers_per_block
40
+
41
+ self.conv_in = nn.Conv2d(in_channels, block_out_channels[-1], kernel_size=3, stride=1, padding=1)
42
+ self.mid_block = MidBlockTemporalDecoder(
43
+ num_layers=self.layers_per_block,
44
+ in_channels=block_out_channels[-1],
45
+ out_channels=block_out_channels[-1],
46
+ attention_head_dim=block_out_channels[-1],
47
+ )
48
+
49
+ # up
50
+ self.up_blocks = nn.ModuleList([])
51
+ reversed_block_out_channels = list(reversed(block_out_channels))
52
+ output_channel = reversed_block_out_channels[0]
53
+ for i in range(len(block_out_channels)):
54
+ prev_output_channel = output_channel
55
+ output_channel = reversed_block_out_channels[i]
56
+
57
+ is_final_block = i == len(block_out_channels) - 1
58
+ up_block = UpBlockTemporalDecoder(
59
+ num_layers=self.layers_per_block + 1,
60
+ in_channels=prev_output_channel,
61
+ out_channels=output_channel,
62
+ add_upsample=not is_final_block,
63
+ )
64
+ self.up_blocks.append(up_block)
65
+ prev_output_channel = output_channel
66
+
67
+ self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=32, eps=1e-6)
68
+
69
+ self.conv_act = nn.SiLU()
70
+ self.conv_out = torch.nn.Conv2d(
71
+ in_channels=block_out_channels[0],
72
+ out_channels=out_channels,
73
+ kernel_size=3,
74
+ padding=1,
75
+ )
76
+
77
+ conv_out_kernel_size = (3, 1, 1)
78
+ padding = [int(k // 2) for k in conv_out_kernel_size]
79
+ self.time_conv_out = torch.nn.Conv3d(
80
+ in_channels=out_channels,
81
+ out_channels=out_channels,
82
+ kernel_size=conv_out_kernel_size,
83
+ padding=padding,
84
+ )
85
+
86
+ self.gradient_checkpointing = False
87
+
88
+ def forward(
89
+ self,
90
+ sample: torch.FloatTensor,
91
+ image_only_indicator: torch.FloatTensor,
92
+ num_frames: int = 1,
93
+ ) -> torch.FloatTensor:
94
+ r"""The forward method of the `Decoder` class."""
95
+
96
+ sample = self.conv_in(sample)
97
+
98
+ upscale_dtype = next(iter(self.up_blocks.parameters())).dtype
99
+ if self.training and self.gradient_checkpointing:
100
+
101
+ def create_custom_forward(module):
102
+ def custom_forward(*inputs):
103
+ return module(*inputs)
104
+
105
+ return custom_forward
106
+
107
+ if is_torch_version(">=", "1.11.0"):
108
+ # middle
109
+ sample = torch.utils.checkpoint.checkpoint(
110
+ create_custom_forward(self.mid_block),
111
+ sample,
112
+ image_only_indicator,
113
+ use_reentrant=False,
114
+ )
115
+ sample = sample.to(upscale_dtype)
116
+
117
+ # up
118
+ for up_block in self.up_blocks:
119
+ sample = torch.utils.checkpoint.checkpoint(
120
+ create_custom_forward(up_block),
121
+ sample,
122
+ image_only_indicator,
123
+ use_reentrant=False,
124
+ )
125
+ else:
126
+ # middle
127
+ sample = torch.utils.checkpoint.checkpoint(
128
+ create_custom_forward(self.mid_block),
129
+ sample,
130
+ image_only_indicator,
131
+ )
132
+ sample = sample.to(upscale_dtype)
133
+
134
+ # up
135
+ for up_block in self.up_blocks:
136
+ sample = torch.utils.checkpoint.checkpoint(
137
+ create_custom_forward(up_block),
138
+ sample,
139
+ image_only_indicator,
140
+ )
141
+ else:
142
+ # middle
143
+ sample = self.mid_block(sample, image_only_indicator=image_only_indicator)
144
+ sample = sample.to(upscale_dtype)
145
+
146
+ # up
147
+ for up_block in self.up_blocks:
148
+ sample = up_block(sample, image_only_indicator=image_only_indicator)
149
+
150
+ # post-process
151
+ sample = self.conv_norm_out(sample)
152
+ sample = self.conv_act(sample)
153
+ sample = self.conv_out(sample)
154
+
155
+ batch_frames, channels, height, width = sample.shape
156
+ batch_size = batch_frames // num_frames
157
+ sample = sample[None, :].reshape(batch_size, num_frames, channels, height, width).permute(0, 2, 1, 3, 4)
158
+ sample = self.time_conv_out(sample)
159
+
160
+ sample = sample.permute(0, 2, 1, 3, 4).reshape(batch_frames, channels, height, width)
161
+
162
+ return sample
163
+
164
+
165
+ class AutoencoderKLTemporalDecoder(ModelMixin, ConfigMixin, FromOriginalVAEMixin):
166
+ r"""
167
+ A VAE model with KL loss for encoding images into latents and decoding latent representations into images.
168
+
169
+ This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
170
+ for all models (such as downloading or saving).
171
+
172
+ Parameters:
173
+ in_channels (int, *optional*, defaults to 3): Number of channels in the input image.
174
+ out_channels (int, *optional*, defaults to 3): Number of channels in the output.
175
+ down_block_types (`Tuple[str]`, *optional*, defaults to `("DownEncoderBlock2D",)`):
176
+ Tuple of downsample block types.
177
+ block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`):
178
+ Tuple of block output channels.
179
+ layers_per_block: (`int`, *optional*, defaults to 1): Number of layers per block.
180
+ latent_channels (`int`, *optional*, defaults to 4): Number of channels in the latent space.
181
+ sample_size (`int`, *optional*, defaults to `32`): Sample input size.
182
+ scaling_factor (`float`, *optional*, defaults to 0.18215):
183
+ The component-wise standard deviation of the trained latent space computed using the first batch of the
184
+ training set. This is used to scale the latent space to have unit variance when training the diffusion
185
+ model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the
186
+ diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1
187
+ / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image
188
+ Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper.
189
+ force_upcast (`bool`, *optional*, default to `True`):
190
+ If enabled it will force the VAE to run in float32 for high image resolution pipelines, such as SD-XL. VAE
191
+ can be fine-tuned / trained to a lower range without loosing too much precision in which case
192
+ `force_upcast` can be set to `False` - see: https://huggingface.co/madebyollin/sdxl-vae-fp16-fix
193
+ """
194
+
195
+ _supports_gradient_checkpointing = True
196
+
197
+ @register_to_config
198
+ def __init__(
199
+ self,
200
+ in_channels: int = 3,
201
+ out_channels: int = 3,
202
+ down_block_types: Tuple[str] = ("DownEncoderBlock2D",),
203
+ block_out_channels: Tuple[int] = (64,),
204
+ layers_per_block: int = 1,
205
+ latent_channels: int = 4,
206
+ sample_size: int = 32,
207
+ scaling_factor: float = 0.18215,
208
+ force_upcast: float = True,
209
+ ):
210
+ super().__init__()
211
+
212
+ # pass init params to Encoder
213
+ self.encoder = Encoder(
214
+ in_channels=in_channels,
215
+ out_channels=latent_channels,
216
+ down_block_types=down_block_types,
217
+ block_out_channels=block_out_channels,
218
+ layers_per_block=layers_per_block,
219
+ double_z=True,
220
+ )
221
+
222
+ # pass init params to Decoder
223
+ self.decoder = TemporalDecoder(
224
+ in_channels=latent_channels,
225
+ out_channels=out_channels,
226
+ block_out_channels=block_out_channels,
227
+ layers_per_block=layers_per_block,
228
+ )
229
+
230
+ self.quant_conv = nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1)
231
+
232
+ sample_size = (
233
+ self.config.sample_size[0]
234
+ if isinstance(self.config.sample_size, (list, tuple))
235
+ else self.config.sample_size
236
+ )
237
+ self.tile_latent_min_size = int(sample_size / (2 ** (len(self.config.block_out_channels) - 1)))
238
+ self.tile_overlap_factor = 0.25
239
+
240
+ def _set_gradient_checkpointing(self, module, value=False):
241
+ if isinstance(module, (Encoder, TemporalDecoder)):
242
+ module.gradient_checkpointing = value
243
+
244
+ @property
245
+ # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
246
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
247
+ r"""
248
+ Returns:
249
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
250
+ indexed by its weight name.
251
+ """
252
+ # set recursively
253
+ processors = {}
254
+
255
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
256
+ if hasattr(module, "get_processor"):
257
+ processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True)
258
+
259
+ for sub_name, child in module.named_children():
260
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
261
+
262
+ return processors
263
+
264
+ for name, module in self.named_children():
265
+ fn_recursive_add_processors(name, module, processors)
266
+
267
+ return processors
268
+
269
+ # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attn_processor
270
+ def set_attn_processor(
271
+ self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]], _remove_lora=False
272
+ ):
273
+ r"""
274
+ Sets the attention processor to use to compute attention.
275
+
276
+ Parameters:
277
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
278
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
279
+ for **all** `Attention` layers.
280
+
281
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
282
+ processor. This is strongly recommended when setting trainable attention processors.
283
+
284
+ """
285
+ count = len(self.attn_processors.keys())
286
+
287
+ if isinstance(processor, dict) and len(processor) != count:
288
+ raise ValueError(
289
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
290
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
291
+ )
292
+
293
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
294
+ if hasattr(module, "set_processor"):
295
+ if not isinstance(processor, dict):
296
+ module.set_processor(processor, _remove_lora=_remove_lora)
297
+ else:
298
+ module.set_processor(processor.pop(f"{name}.processor"), _remove_lora=_remove_lora)
299
+
300
+ for sub_name, child in module.named_children():
301
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
302
+
303
+ for name, module in self.named_children():
304
+ fn_recursive_attn_processor(name, module, processor)
305
+
306
+ def set_default_attn_processor(self):
307
+ """
308
+ Disables custom attention processors and sets the default attention implementation.
309
+ """
310
+ if all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
311
+ processor = AttnProcessor()
312
+ else:
313
+ raise ValueError(
314
+ f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
315
+ )
316
+
317
+ self.set_attn_processor(processor, _remove_lora=True)
318
+
319
+ @apply_forward_hook
320
+ def encode(
321
+ self, x: torch.FloatTensor, return_dict: bool = True
322
+ ) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]:
323
+ """
324
+ Encode a batch of images into latents.
325
+
326
+ Args:
327
+ x (`torch.FloatTensor`): Input batch of images.
328
+ return_dict (`bool`, *optional*, defaults to `True`):
329
+ Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple.
330
+
331
+ Returns:
332
+ The latent representations of the encoded images. If `return_dict` is True, a
333
+ [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned.
334
+ """
335
+ h = self.encoder(x)
336
+ moments = self.quant_conv(h)
337
+ posterior = DiagonalGaussianDistribution(moments)
338
+
339
+ if not return_dict:
340
+ return (posterior,)
341
+
342
+ return AutoencoderKLOutput(latent_dist=posterior)
343
+
344
+ @apply_forward_hook
345
+ def decode(
346
+ self,
347
+ z: torch.FloatTensor,
348
+ num_frames: int,
349
+ return_dict: bool = True,
350
+ ) -> Union[DecoderOutput, torch.FloatTensor]:
351
+ """
352
+ Decode a batch of images.
353
+
354
+ Args:
355
+ z (`torch.FloatTensor`): Input batch of latent vectors.
356
+ return_dict (`bool`, *optional*, defaults to `True`):
357
+ Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple.
358
+
359
+ Returns:
360
+ [`~models.vae.DecoderOutput`] or `tuple`:
361
+ If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is
362
+ returned.
363
+
364
+ """
365
+ batch_size = z.shape[0] // num_frames
366
+ image_only_indicator = torch.zeros(batch_size, num_frames, dtype=z.dtype, device=z.device)
367
+ decoded = self.decoder(z, num_frames=num_frames, image_only_indicator=image_only_indicator)
368
+
369
+ if not return_dict:
370
+ return (decoded,)
371
+
372
+ return DecoderOutput(sample=decoded)
373
+
374
+ def forward(
375
+ self,
376
+ sample: torch.FloatTensor,
377
+ sample_posterior: bool = False,
378
+ return_dict: bool = True,
379
+ generator: Optional[torch.Generator] = None,
380
+ num_frames: int = 1,
381
+ ) -> Union[DecoderOutput, torch.FloatTensor]:
382
+ r"""
383
+ Args:
384
+ sample (`torch.FloatTensor`): Input sample.
385
+ sample_posterior (`bool`, *optional*, defaults to `False`):
386
+ Whether to sample from the posterior.
387
+ return_dict (`bool`, *optional*, defaults to `True`):
388
+ Whether or not to return a [`DecoderOutput`] instead of a plain tuple.
389
+ """
390
+ x = sample
391
+ posterior = self.encode(x).latent_dist
392
+ if sample_posterior:
393
+ z = posterior.sample(generator=generator)
394
+ else:
395
+ z = posterior.mode()
396
+
397
+ dec = self.decode(z, num_frames=num_frames).sample
398
+
399
+ if not return_dict:
400
+ return (dec,)
401
+
402
+ return DecoderOutput(sample=dec)
src/diffusers/models/autoencoders/autoencoder_tiny.py ADDED
@@ -0,0 +1,345 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 Ollin Boer Bohan and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ from dataclasses import dataclass
17
+ from typing import Optional, Tuple, Union
18
+
19
+ import torch
20
+
21
+ from ...configuration_utils import ConfigMixin, register_to_config
22
+ from ...utils import BaseOutput
23
+ from ...utils.accelerate_utils import apply_forward_hook
24
+ from ..modeling_utils import ModelMixin
25
+ from .vae import DecoderOutput, DecoderTiny, EncoderTiny
26
+
27
+
28
+ @dataclass
29
+ class AutoencoderTinyOutput(BaseOutput):
30
+ """
31
+ Output of AutoencoderTiny encoding method.
32
+
33
+ Args:
34
+ latents (`torch.Tensor`): Encoded outputs of the `Encoder`.
35
+
36
+ """
37
+
38
+ latents: torch.Tensor
39
+
40
+
41
+ class AutoencoderTiny(ModelMixin, ConfigMixin):
42
+ r"""
43
+ A tiny distilled VAE model for encoding images into latents and decoding latent representations into images.
44
+
45
+ [`AutoencoderTiny`] is a wrapper around the original implementation of `TAESD`.
46
+
47
+ This model inherits from [`ModelMixin`]. Check the superclass documentation for its generic methods implemented for
48
+ all models (such as downloading or saving).
49
+
50
+ Parameters:
51
+ in_channels (`int`, *optional*, defaults to 3): Number of channels in the input image.
52
+ out_channels (`int`, *optional*, defaults to 3): Number of channels in the output.
53
+ encoder_block_out_channels (`Tuple[int]`, *optional*, defaults to `(64, 64, 64, 64)`):
54
+ Tuple of integers representing the number of output channels for each encoder block. The length of the
55
+ tuple should be equal to the number of encoder blocks.
56
+ decoder_block_out_channels (`Tuple[int]`, *optional*, defaults to `(64, 64, 64, 64)`):
57
+ Tuple of integers representing the number of output channels for each decoder block. The length of the
58
+ tuple should be equal to the number of decoder blocks.
59
+ act_fn (`str`, *optional*, defaults to `"relu"`):
60
+ Activation function to be used throughout the model.
61
+ latent_channels (`int`, *optional*, defaults to 4):
62
+ Number of channels in the latent representation. The latent space acts as a compressed representation of
63
+ the input image.
64
+ upsampling_scaling_factor (`int`, *optional*, defaults to 2):
65
+ Scaling factor for upsampling in the decoder. It determines the size of the output image during the
66
+ upsampling process.
67
+ num_encoder_blocks (`Tuple[int]`, *optional*, defaults to `(1, 3, 3, 3)`):
68
+ Tuple of integers representing the number of encoder blocks at each stage of the encoding process. The
69
+ length of the tuple should be equal to the number of stages in the encoder. Each stage has a different
70
+ number of encoder blocks.
71
+ num_decoder_blocks (`Tuple[int]`, *optional*, defaults to `(3, 3, 3, 1)`):
72
+ Tuple of integers representing the number of decoder blocks at each stage of the decoding process. The
73
+ length of the tuple should be equal to the number of stages in the decoder. Each stage has a different
74
+ number of decoder blocks.
75
+ latent_magnitude (`float`, *optional*, defaults to 3.0):
76
+ Magnitude of the latent representation. This parameter scales the latent representation values to control
77
+ the extent of information preservation.
78
+ latent_shift (float, *optional*, defaults to 0.5):
79
+ Shift applied to the latent representation. This parameter controls the center of the latent space.
80
+ scaling_factor (`float`, *optional*, defaults to 1.0):
81
+ The component-wise standard deviation of the trained latent space computed using the first batch of the
82
+ training set. This is used to scale the latent space to have unit variance when training the diffusion
83
+ model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the
84
+ diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1
85
+ / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image
86
+ Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper. For this Autoencoder,
87
+ however, no such scaling factor was used, hence the value of 1.0 as the default.
88
+ force_upcast (`bool`, *optional*, default to `False`):
89
+ If enabled it will force the VAE to run in float32 for high image resolution pipelines, such as SD-XL. VAE
90
+ can be fine-tuned / trained to a lower range without losing too much precision, in which case
91
+ `force_upcast` can be set to `False` (see this fp16-friendly
92
+ [AutoEncoder](https://huggingface.co/madebyollin/sdxl-vae-fp16-fix)).
93
+ """
94
+
95
+ _supports_gradient_checkpointing = True
96
+
97
+ @register_to_config
98
+ def __init__(
99
+ self,
100
+ in_channels: int = 3,
101
+ out_channels: int = 3,
102
+ encoder_block_out_channels: Tuple[int, ...] = (64, 64, 64, 64),
103
+ decoder_block_out_channels: Tuple[int, ...] = (64, 64, 64, 64),
104
+ act_fn: str = "relu",
105
+ latent_channels: int = 4,
106
+ upsampling_scaling_factor: int = 2,
107
+ num_encoder_blocks: Tuple[int, ...] = (1, 3, 3, 3),
108
+ num_decoder_blocks: Tuple[int, ...] = (3, 3, 3, 1),
109
+ latent_magnitude: int = 3,
110
+ latent_shift: float = 0.5,
111
+ force_upcast: bool = False,
112
+ scaling_factor: float = 1.0,
113
+ ):
114
+ super().__init__()
115
+
116
+ if len(encoder_block_out_channels) != len(num_encoder_blocks):
117
+ raise ValueError("`encoder_block_out_channels` should have the same length as `num_encoder_blocks`.")
118
+ if len(decoder_block_out_channels) != len(num_decoder_blocks):
119
+ raise ValueError("`decoder_block_out_channels` should have the same length as `num_decoder_blocks`.")
120
+
121
+ self.encoder = EncoderTiny(
122
+ in_channels=in_channels,
123
+ out_channels=latent_channels,
124
+ num_blocks=num_encoder_blocks,
125
+ block_out_channels=encoder_block_out_channels,
126
+ act_fn=act_fn,
127
+ )
128
+
129
+ self.decoder = DecoderTiny(
130
+ in_channels=latent_channels,
131
+ out_channels=out_channels,
132
+ num_blocks=num_decoder_blocks,
133
+ block_out_channels=decoder_block_out_channels,
134
+ upsampling_scaling_factor=upsampling_scaling_factor,
135
+ act_fn=act_fn,
136
+ )
137
+
138
+ self.latent_magnitude = latent_magnitude
139
+ self.latent_shift = latent_shift
140
+ self.scaling_factor = scaling_factor
141
+
142
+ self.use_slicing = False
143
+ self.use_tiling = False
144
+
145
+ # only relevant if vae tiling is enabled
146
+ self.spatial_scale_factor = 2**out_channels
147
+ self.tile_overlap_factor = 0.125
148
+ self.tile_sample_min_size = 512
149
+ self.tile_latent_min_size = self.tile_sample_min_size // self.spatial_scale_factor
150
+
151
+ self.register_to_config(block_out_channels=decoder_block_out_channels)
152
+ self.register_to_config(force_upcast=False)
153
+
154
+ def _set_gradient_checkpointing(self, module, value: bool = False) -> None:
155
+ if isinstance(module, (EncoderTiny, DecoderTiny)):
156
+ module.gradient_checkpointing = value
157
+
158
+ def scale_latents(self, x: torch.FloatTensor) -> torch.FloatTensor:
159
+ """raw latents -> [0, 1]"""
160
+ return x.div(2 * self.latent_magnitude).add(self.latent_shift).clamp(0, 1)
161
+
162
+ def unscale_latents(self, x: torch.FloatTensor) -> torch.FloatTensor:
163
+ """[0, 1] -> raw latents"""
164
+ return x.sub(self.latent_shift).mul(2 * self.latent_magnitude)
165
+
166
+ def enable_slicing(self) -> None:
167
+ r"""
168
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
169
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
170
+ """
171
+ self.use_slicing = True
172
+
173
+ def disable_slicing(self) -> None:
174
+ r"""
175
+ Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing
176
+ decoding in one step.
177
+ """
178
+ self.use_slicing = False
179
+
180
+ def enable_tiling(self, use_tiling: bool = True) -> None:
181
+ r"""
182
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
183
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
184
+ processing larger images.
185
+ """
186
+ self.use_tiling = use_tiling
187
+
188
+ def disable_tiling(self) -> None:
189
+ r"""
190
+ Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing
191
+ decoding in one step.
192
+ """
193
+ self.enable_tiling(False)
194
+
195
+ def _tiled_encode(self, x: torch.FloatTensor) -> torch.FloatTensor:
196
+ r"""Encode a batch of images using a tiled encoder.
197
+
198
+ When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several
199
+ steps. This is useful to keep memory use constant regardless of image size. To avoid tiling artifacts, the
200
+ tiles overlap and are blended together to form a smooth output.
201
+
202
+ Args:
203
+ x (`torch.FloatTensor`): Input batch of images.
204
+
205
+ Returns:
206
+ `torch.FloatTensor`: Encoded batch of images.
207
+ """
208
+ # scale of encoder output relative to input
209
+ sf = self.spatial_scale_factor
210
+ tile_size = self.tile_sample_min_size
211
+
212
+ # number of pixels to blend and to traverse between tile
213
+ blend_size = int(tile_size * self.tile_overlap_factor)
214
+ traverse_size = tile_size - blend_size
215
+
216
+ # tiles index (up/left)
217
+ ti = range(0, x.shape[-2], traverse_size)
218
+ tj = range(0, x.shape[-1], traverse_size)
219
+
220
+ # mask for blending
221
+ blend_masks = torch.stack(
222
+ torch.meshgrid([torch.arange(tile_size / sf) / (blend_size / sf - 1)] * 2, indexing="ij")
223
+ )
224
+ blend_masks = blend_masks.clamp(0, 1).to(x.device)
225
+
226
+ # output array
227
+ out = torch.zeros(x.shape[0], 4, x.shape[-2] // sf, x.shape[-1] // sf, device=x.device)
228
+ for i in ti:
229
+ for j in tj:
230
+ tile_in = x[..., i : i + tile_size, j : j + tile_size]
231
+ # tile result
232
+ tile_out = out[..., i // sf : (i + tile_size) // sf, j // sf : (j + tile_size) // sf]
233
+ tile = self.encoder(tile_in)
234
+ h, w = tile.shape[-2], tile.shape[-1]
235
+ # blend tile result into output
236
+ blend_mask_i = torch.ones_like(blend_masks[0]) if i == 0 else blend_masks[0]
237
+ blend_mask_j = torch.ones_like(blend_masks[1]) if j == 0 else blend_masks[1]
238
+ blend_mask = blend_mask_i * blend_mask_j
239
+ tile, blend_mask = tile[..., :h, :w], blend_mask[..., :h, :w]
240
+ tile_out.copy_(blend_mask * tile + (1 - blend_mask) * tile_out)
241
+ return out
242
+
243
+ def _tiled_decode(self, x: torch.FloatTensor) -> torch.FloatTensor:
244
+ r"""Encode a batch of images using a tiled encoder.
245
+
246
+ When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several
247
+ steps. This is useful to keep memory use constant regardless of image size. To avoid tiling artifacts, the
248
+ tiles overlap and are blended together to form a smooth output.
249
+
250
+ Args:
251
+ x (`torch.FloatTensor`): Input batch of images.
252
+
253
+ Returns:
254
+ `torch.FloatTensor`: Encoded batch of images.
255
+ """
256
+ # scale of decoder output relative to input
257
+ sf = self.spatial_scale_factor
258
+ tile_size = self.tile_latent_min_size
259
+
260
+ # number of pixels to blend and to traverse between tiles
261
+ blend_size = int(tile_size * self.tile_overlap_factor)
262
+ traverse_size = tile_size - blend_size
263
+
264
+ # tiles index (up/left)
265
+ ti = range(0, x.shape[-2], traverse_size)
266
+ tj = range(0, x.shape[-1], traverse_size)
267
+
268
+ # mask for blending
269
+ blend_masks = torch.stack(
270
+ torch.meshgrid([torch.arange(tile_size * sf) / (blend_size * sf - 1)] * 2, indexing="ij")
271
+ )
272
+ blend_masks = blend_masks.clamp(0, 1).to(x.device)
273
+
274
+ # output array
275
+ out = torch.zeros(x.shape[0], 3, x.shape[-2] * sf, x.shape[-1] * sf, device=x.device)
276
+ for i in ti:
277
+ for j in tj:
278
+ tile_in = x[..., i : i + tile_size, j : j + tile_size]
279
+ # tile result
280
+ tile_out = out[..., i * sf : (i + tile_size) * sf, j * sf : (j + tile_size) * sf]
281
+ tile = self.decoder(tile_in)
282
+ h, w = tile.shape[-2], tile.shape[-1]
283
+ # blend tile result into output
284
+ blend_mask_i = torch.ones_like(blend_masks[0]) if i == 0 else blend_masks[0]
285
+ blend_mask_j = torch.ones_like(blend_masks[1]) if j == 0 else blend_masks[1]
286
+ blend_mask = (blend_mask_i * blend_mask_j)[..., :h, :w]
287
+ tile_out.copy_(blend_mask * tile + (1 - blend_mask) * tile_out)
288
+ return out
289
+
290
+ @apply_forward_hook
291
+ def encode(
292
+ self, x: torch.FloatTensor, return_dict: bool = True
293
+ ) -> Union[AutoencoderTinyOutput, Tuple[torch.FloatTensor]]:
294
+ if self.use_slicing and x.shape[0] > 1:
295
+ output = [self._tiled_encode(x_slice) if self.use_tiling else self.encoder(x) for x_slice in x.split(1)]
296
+ output = torch.cat(output)
297
+ else:
298
+ output = self._tiled_encode(x) if self.use_tiling else self.encoder(x)
299
+
300
+ if not return_dict:
301
+ return (output,)
302
+
303
+ return AutoencoderTinyOutput(latents=output)
304
+
305
+ @apply_forward_hook
306
+ def decode(
307
+ self, x: torch.FloatTensor, generator: Optional[torch.Generator] = None, return_dict: bool = True
308
+ ) -> Union[DecoderOutput, Tuple[torch.FloatTensor]]:
309
+ if self.use_slicing and x.shape[0] > 1:
310
+ output = [self._tiled_decode(x_slice) if self.use_tiling else self.decoder(x) for x_slice in x.split(1)]
311
+ output = torch.cat(output)
312
+ else:
313
+ output = self._tiled_decode(x) if self.use_tiling else self.decoder(x)
314
+
315
+ if not return_dict:
316
+ return (output,)
317
+
318
+ return DecoderOutput(sample=output)
319
+
320
+ def forward(
321
+ self,
322
+ sample: torch.FloatTensor,
323
+ return_dict: bool = True,
324
+ ) -> Union[DecoderOutput, Tuple[torch.FloatTensor]]:
325
+ r"""
326
+ Args:
327
+ sample (`torch.FloatTensor`): Input sample.
328
+ return_dict (`bool`, *optional*, defaults to `True`):
329
+ Whether or not to return a [`DecoderOutput`] instead of a plain tuple.
330
+ """
331
+ enc = self.encode(sample).latents
332
+
333
+ # scale latents to be in [0, 1], then quantize latents to a byte tensor,
334
+ # as if we were storing the latents in an RGBA uint8 image.
335
+ scaled_enc = self.scale_latents(enc).mul_(255).round_().byte()
336
+
337
+ # unquantize latents back into [0, 1], then unscale latents back to their original range,
338
+ # as if we were loading the latents from an RGBA uint8 image.
339
+ unscaled_enc = self.unscale_latents(scaled_enc / 255.0)
340
+
341
+ dec = self.decode(unscaled_enc)
342
+
343
+ if not return_dict:
344
+ return (dec,)
345
+ return DecoderOutput(sample=dec)
src/diffusers/models/autoencoders/consistency_decoder_vae.py ADDED
@@ -0,0 +1,437 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from dataclasses import dataclass
15
+ from typing import Dict, Optional, Tuple, Union
16
+
17
+ import torch
18
+ import torch.nn.functional as F
19
+ from torch import nn
20
+
21
+ from ...configuration_utils import ConfigMixin, register_to_config
22
+ from ...schedulers import ConsistencyDecoderScheduler
23
+ from ...utils import BaseOutput
24
+ from ...utils.accelerate_utils import apply_forward_hook
25
+ from ...utils.torch_utils import randn_tensor
26
+ from ..attention_processor import (
27
+ ADDED_KV_ATTENTION_PROCESSORS,
28
+ CROSS_ATTENTION_PROCESSORS,
29
+ AttentionProcessor,
30
+ AttnAddedKVProcessor,
31
+ AttnProcessor,
32
+ )
33
+ from ..modeling_utils import ModelMixin
34
+ from ..unet_2d import UNet2DModel
35
+ from .vae import DecoderOutput, DiagonalGaussianDistribution, Encoder
36
+
37
+
38
+ @dataclass
39
+ class ConsistencyDecoderVAEOutput(BaseOutput):
40
+ """
41
+ Output of encoding method.
42
+
43
+ Args:
44
+ latent_dist (`DiagonalGaussianDistribution`):
45
+ Encoded outputs of `Encoder` represented as the mean and logvar of `DiagonalGaussianDistribution`.
46
+ `DiagonalGaussianDistribution` allows for sampling latents from the distribution.
47
+ """
48
+
49
+ latent_dist: "DiagonalGaussianDistribution"
50
+
51
+
52
+ class ConsistencyDecoderVAE(ModelMixin, ConfigMixin):
53
+ r"""
54
+ The consistency decoder used with DALL-E 3.
55
+
56
+ Examples:
57
+ ```py
58
+ >>> import torch
59
+ >>> from diffusers import StableDiffusionPipeline, ConsistencyDecoderVAE
60
+
61
+ >>> vae = ConsistencyDecoderVAE.from_pretrained("openai/consistency-decoder", torch_dtype=torch.float16)
62
+ >>> pipe = StableDiffusionPipeline.from_pretrained(
63
+ ... "runwayml/stable-diffusion-v1-5", vae=vae, torch_dtype=torch.float16
64
+ ... ).to("cuda")
65
+
66
+ >>> pipe("horse", generator=torch.manual_seed(0)).images
67
+ ```
68
+ """
69
+
70
+ @register_to_config
71
+ def __init__(
72
+ self,
73
+ scaling_factor: float = 0.18215,
74
+ latent_channels: int = 4,
75
+ encoder_act_fn: str = "silu",
76
+ encoder_block_out_channels: Tuple[int, ...] = (128, 256, 512, 512),
77
+ encoder_double_z: bool = True,
78
+ encoder_down_block_types: Tuple[str, ...] = (
79
+ "DownEncoderBlock2D",
80
+ "DownEncoderBlock2D",
81
+ "DownEncoderBlock2D",
82
+ "DownEncoderBlock2D",
83
+ ),
84
+ encoder_in_channels: int = 3,
85
+ encoder_layers_per_block: int = 2,
86
+ encoder_norm_num_groups: int = 32,
87
+ encoder_out_channels: int = 4,
88
+ decoder_add_attention: bool = False,
89
+ decoder_block_out_channels: Tuple[int, ...] = (320, 640, 1024, 1024),
90
+ decoder_down_block_types: Tuple[str, ...] = (
91
+ "ResnetDownsampleBlock2D",
92
+ "ResnetDownsampleBlock2D",
93
+ "ResnetDownsampleBlock2D",
94
+ "ResnetDownsampleBlock2D",
95
+ ),
96
+ decoder_downsample_padding: int = 1,
97
+ decoder_in_channels: int = 7,
98
+ decoder_layers_per_block: int = 3,
99
+ decoder_norm_eps: float = 1e-05,
100
+ decoder_norm_num_groups: int = 32,
101
+ decoder_num_train_timesteps: int = 1024,
102
+ decoder_out_channels: int = 6,
103
+ decoder_resnet_time_scale_shift: str = "scale_shift",
104
+ decoder_time_embedding_type: str = "learned",
105
+ decoder_up_block_types: Tuple[str, ...] = (
106
+ "ResnetUpsampleBlock2D",
107
+ "ResnetUpsampleBlock2D",
108
+ "ResnetUpsampleBlock2D",
109
+ "ResnetUpsampleBlock2D",
110
+ ),
111
+ ):
112
+ super().__init__()
113
+ self.encoder = Encoder(
114
+ act_fn=encoder_act_fn,
115
+ block_out_channels=encoder_block_out_channels,
116
+ double_z=encoder_double_z,
117
+ down_block_types=encoder_down_block_types,
118
+ in_channels=encoder_in_channels,
119
+ layers_per_block=encoder_layers_per_block,
120
+ norm_num_groups=encoder_norm_num_groups,
121
+ out_channels=encoder_out_channels,
122
+ )
123
+
124
+ self.decoder_unet = UNet2DModel(
125
+ add_attention=decoder_add_attention,
126
+ block_out_channels=decoder_block_out_channels,
127
+ down_block_types=decoder_down_block_types,
128
+ downsample_padding=decoder_downsample_padding,
129
+ in_channels=decoder_in_channels,
130
+ layers_per_block=decoder_layers_per_block,
131
+ norm_eps=decoder_norm_eps,
132
+ norm_num_groups=decoder_norm_num_groups,
133
+ num_train_timesteps=decoder_num_train_timesteps,
134
+ out_channels=decoder_out_channels,
135
+ resnet_time_scale_shift=decoder_resnet_time_scale_shift,
136
+ time_embedding_type=decoder_time_embedding_type,
137
+ up_block_types=decoder_up_block_types,
138
+ )
139
+ self.decoder_scheduler = ConsistencyDecoderScheduler()
140
+ self.register_to_config(block_out_channels=encoder_block_out_channels)
141
+ self.register_to_config(force_upcast=False)
142
+ self.register_buffer(
143
+ "means",
144
+ torch.tensor([0.38862467, 0.02253063, 0.07381133, -0.0171294])[None, :, None, None],
145
+ persistent=False,
146
+ )
147
+ self.register_buffer(
148
+ "stds", torch.tensor([0.9654121, 1.0440036, 0.76147926, 0.77022034])[None, :, None, None], persistent=False
149
+ )
150
+
151
+ self.quant_conv = nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1)
152
+
153
+ self.use_slicing = False
154
+ self.use_tiling = False
155
+
156
+ # Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.enable_tiling
157
+ def enable_tiling(self, use_tiling: bool = True):
158
+ r"""
159
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
160
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
161
+ processing larger images.
162
+ """
163
+ self.use_tiling = use_tiling
164
+
165
+ # Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.disable_tiling
166
+ def disable_tiling(self):
167
+ r"""
168
+ Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing
169
+ decoding in one step.
170
+ """
171
+ self.enable_tiling(False)
172
+
173
+ # Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.enable_slicing
174
+ def enable_slicing(self):
175
+ r"""
176
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
177
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
178
+ """
179
+ self.use_slicing = True
180
+
181
+ # Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.disable_slicing
182
+ def disable_slicing(self):
183
+ r"""
184
+ Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing
185
+ decoding in one step.
186
+ """
187
+ self.use_slicing = False
188
+
189
+ @property
190
+ # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
191
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
192
+ r"""
193
+ Returns:
194
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
195
+ indexed by its weight name.
196
+ """
197
+ # set recursively
198
+ processors = {}
199
+
200
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
201
+ if hasattr(module, "get_processor"):
202
+ processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True)
203
+
204
+ for sub_name, child in module.named_children():
205
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
206
+
207
+ return processors
208
+
209
+ for name, module in self.named_children():
210
+ fn_recursive_add_processors(name, module, processors)
211
+
212
+ return processors
213
+
214
+ # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attn_processor
215
+ def set_attn_processor(
216
+ self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]], _remove_lora=False
217
+ ):
218
+ r"""
219
+ Sets the attention processor to use to compute attention.
220
+
221
+ Parameters:
222
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
223
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
224
+ for **all** `Attention` layers.
225
+
226
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
227
+ processor. This is strongly recommended when setting trainable attention processors.
228
+
229
+ """
230
+ count = len(self.attn_processors.keys())
231
+
232
+ if isinstance(processor, dict) and len(processor) != count:
233
+ raise ValueError(
234
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
235
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
236
+ )
237
+
238
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
239
+ if hasattr(module, "set_processor"):
240
+ if not isinstance(processor, dict):
241
+ module.set_processor(processor, _remove_lora=_remove_lora)
242
+ else:
243
+ module.set_processor(processor.pop(f"{name}.processor"), _remove_lora=_remove_lora)
244
+
245
+ for sub_name, child in module.named_children():
246
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
247
+
248
+ for name, module in self.named_children():
249
+ fn_recursive_attn_processor(name, module, processor)
250
+
251
+ # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor
252
+ def set_default_attn_processor(self):
253
+ """
254
+ Disables custom attention processors and sets the default attention implementation.
255
+ """
256
+ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
257
+ processor = AttnAddedKVProcessor()
258
+ elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
259
+ processor = AttnProcessor()
260
+ else:
261
+ raise ValueError(
262
+ f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
263
+ )
264
+
265
+ self.set_attn_processor(processor, _remove_lora=True)
266
+
267
+ @apply_forward_hook
268
+ def encode(
269
+ self, x: torch.FloatTensor, return_dict: bool = True
270
+ ) -> Union[ConsistencyDecoderVAEOutput, Tuple[DiagonalGaussianDistribution]]:
271
+ """
272
+ Encode a batch of images into latents.
273
+
274
+ Args:
275
+ x (`torch.FloatTensor`): Input batch of images.
276
+ return_dict (`bool`, *optional*, defaults to `True`):
277
+ Whether to return a [`~models.consistecy_decoder_vae.ConsistencyDecoderOoutput`] instead of a plain
278
+ tuple.
279
+
280
+ Returns:
281
+ The latent representations of the encoded images. If `return_dict` is True, a
282
+ [`~models.consistency_decoder_vae.ConsistencyDecoderVAEOutput`] is returned, otherwise a plain `tuple`
283
+ is returned.
284
+ """
285
+ if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
286
+ return self.tiled_encode(x, return_dict=return_dict)
287
+
288
+ if self.use_slicing and x.shape[0] > 1:
289
+ encoded_slices = [self.encoder(x_slice) for x_slice in x.split(1)]
290
+ h = torch.cat(encoded_slices)
291
+ else:
292
+ h = self.encoder(x)
293
+
294
+ moments = self.quant_conv(h)
295
+ posterior = DiagonalGaussianDistribution(moments)
296
+
297
+ if not return_dict:
298
+ return (posterior,)
299
+
300
+ return ConsistencyDecoderVAEOutput(latent_dist=posterior)
301
+
302
+ @apply_forward_hook
303
+ def decode(
304
+ self,
305
+ z: torch.FloatTensor,
306
+ generator: Optional[torch.Generator] = None,
307
+ return_dict: bool = True,
308
+ num_inference_steps: int = 2,
309
+ ) -> Union[DecoderOutput, Tuple[torch.FloatTensor]]:
310
+ z = (z * self.config.scaling_factor - self.means) / self.stds
311
+
312
+ scale_factor = 2 ** (len(self.config.block_out_channels) - 1)
313
+ z = F.interpolate(z, mode="nearest", scale_factor=scale_factor)
314
+
315
+ batch_size, _, height, width = z.shape
316
+
317
+ self.decoder_scheduler.set_timesteps(num_inference_steps, device=self.device)
318
+
319
+ x_t = self.decoder_scheduler.init_noise_sigma * randn_tensor(
320
+ (batch_size, 3, height, width), generator=generator, dtype=z.dtype, device=z.device
321
+ )
322
+
323
+ for t in self.decoder_scheduler.timesteps:
324
+ model_input = torch.concat([self.decoder_scheduler.scale_model_input(x_t, t), z], dim=1)
325
+ model_output = self.decoder_unet(model_input, t).sample[:, :3, :, :]
326
+ prev_sample = self.decoder_scheduler.step(model_output, t, x_t, generator).prev_sample
327
+ x_t = prev_sample
328
+
329
+ x_0 = x_t
330
+
331
+ if not return_dict:
332
+ return (x_0,)
333
+
334
+ return DecoderOutput(sample=x_0)
335
+
336
+ # Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.blend_v
337
+ def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
338
+ blend_extent = min(a.shape[2], b.shape[2], blend_extent)
339
+ for y in range(blend_extent):
340
+ b[:, :, y, :] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
341
+ return b
342
+
343
+ # Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.blend_h
344
+ def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
345
+ blend_extent = min(a.shape[3], b.shape[3], blend_extent)
346
+ for x in range(blend_extent):
347
+ b[:, :, :, x] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
348
+ return b
349
+
350
+ def tiled_encode(self, x: torch.FloatTensor, return_dict: bool = True) -> ConsistencyDecoderVAEOutput:
351
+ r"""Encode a batch of images using a tiled encoder.
352
+
353
+ When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several
354
+ steps. This is useful to keep memory use constant regardless of image size. The end result of tiled encoding is
355
+ different from non-tiled encoding because each tile uses a different encoder. To avoid tiling artifacts, the
356
+ tiles overlap and are blended together to form a smooth output. You may still see tile-sized changes in the
357
+ output, but they should be much less noticeable.
358
+
359
+ Args:
360
+ x (`torch.FloatTensor`): Input batch of images.
361
+ return_dict (`bool`, *optional*, defaults to `True`):
362
+ Whether or not to return a [`~models.consistency_decoder_vae.ConsistencyDecoderVAEOutput`] instead of a
363
+ plain tuple.
364
+
365
+ Returns:
366
+ [`~models.consistency_decoder_vae.ConsistencyDecoderVAEOutput`] or `tuple`:
367
+ If return_dict is True, a [`~models.consistency_decoder_vae.ConsistencyDecoderVAEOutput`] is returned,
368
+ otherwise a plain `tuple` is returned.
369
+ """
370
+ overlap_size = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor))
371
+ blend_extent = int(self.tile_latent_min_size * self.tile_overlap_factor)
372
+ row_limit = self.tile_latent_min_size - blend_extent
373
+
374
+ # Split the image into 512x512 tiles and encode them separately.
375
+ rows = []
376
+ for i in range(0, x.shape[2], overlap_size):
377
+ row = []
378
+ for j in range(0, x.shape[3], overlap_size):
379
+ tile = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
380
+ tile = self.encoder(tile)
381
+ tile = self.quant_conv(tile)
382
+ row.append(tile)
383
+ rows.append(row)
384
+ result_rows = []
385
+ for i, row in enumerate(rows):
386
+ result_row = []
387
+ for j, tile in enumerate(row):
388
+ # blend the above tile and the left tile
389
+ # to the current tile and add the current tile to the result row
390
+ if i > 0:
391
+ tile = self.blend_v(rows[i - 1][j], tile, blend_extent)
392
+ if j > 0:
393
+ tile = self.blend_h(row[j - 1], tile, blend_extent)
394
+ result_row.append(tile[:, :, :row_limit, :row_limit])
395
+ result_rows.append(torch.cat(result_row, dim=3))
396
+
397
+ moments = torch.cat(result_rows, dim=2)
398
+ posterior = DiagonalGaussianDistribution(moments)
399
+
400
+ if not return_dict:
401
+ return (posterior,)
402
+
403
+ return ConsistencyDecoderVAEOutput(latent_dist=posterior)
404
+
405
+ def forward(
406
+ self,
407
+ sample: torch.FloatTensor,
408
+ sample_posterior: bool = False,
409
+ return_dict: bool = True,
410
+ generator: Optional[torch.Generator] = None,
411
+ ) -> Union[DecoderOutput, Tuple[torch.FloatTensor]]:
412
+ r"""
413
+ Args:
414
+ sample (`torch.FloatTensor`): Input sample.
415
+ sample_posterior (`bool`, *optional*, defaults to `False`):
416
+ Whether to sample from the posterior.
417
+ return_dict (`bool`, *optional*, defaults to `True`):
418
+ Whether or not to return a [`DecoderOutput`] instead of a plain tuple.
419
+ generator (`torch.Generator`, *optional*, defaults to `None`):
420
+ Generator to use for sampling.
421
+
422
+ Returns:
423
+ [`DecoderOutput`] or `tuple`:
424
+ If return_dict is True, a [`DecoderOutput`] is returned, otherwise a plain `tuple` is returned.
425
+ """
426
+ x = sample
427
+ posterior = self.encode(x).latent_dist
428
+ if sample_posterior:
429
+ z = posterior.sample(generator=generator)
430
+ else:
431
+ z = posterior.mode()
432
+ dec = self.decode(z, generator=generator).sample
433
+
434
+ if not return_dict:
435
+ return (dec,)
436
+
437
+ return DecoderOutput(sample=dec)