PennyJX commited on
Commit
2fa003a
·
verified ·
1 Parent(s): 2a04aaa

Upload 3 files

Browse files
modules/processing.py ADDED
@@ -0,0 +1,1559 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ import json
3
+ import logging
4
+ import math
5
+ import os
6
+ import sys
7
+ import hashlib
8
+ from dataclasses import dataclass, field
9
+
10
+ import torch
11
+ import numpy as np
12
+ from PIL import Image, ImageOps
13
+ import random
14
+ import cv2
15
+ from skimage import exposure
16
+ from typing import Any
17
+
18
+ import modules.sd_hijack
19
+ from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts, sd_samplers_common, sd_unet, errors, rng
20
+ from modules.rng import slerp # noqa: F401
21
+ from modules.sd_hijack import model_hijack
22
+ from modules.sd_samplers_common import images_tensor_to_samples, decode_first_stage, approximation_indexes
23
+ from modules.shared import opts, cmd_opts, state
24
+ import modules.shared as shared
25
+ import modules.paths as paths
26
+ import modules.face_restoration
27
+ import modules.images as images
28
+ import modules.styles
29
+ import modules.sd_models as sd_models
30
+ import modules.sd_vae as sd_vae
31
+ from ldm.data.util import AddMiDaS
32
+ from ldm.models.diffusion.ddpm import LatentDepth2ImageDiffusion
33
+
34
+ from einops import repeat, rearrange
35
+ from blendmodes.blend import blendLayers, BlendType
36
+
37
+
38
+ # some of those options should not be changed at all because they would break the model, so I removed them from options.
39
+ opt_C = 4
40
+ opt_f = 8
41
+
42
+
43
+ def setup_color_correction(image):
44
+ logging.info("Calibrating color correction.")
45
+ correction_target = cv2.cvtColor(np.asarray(image.copy()), cv2.COLOR_RGB2LAB)
46
+ return correction_target
47
+
48
+
49
+ def apply_color_correction(correction, original_image):
50
+ logging.info("Applying color correction.")
51
+ image = Image.fromarray(cv2.cvtColor(exposure.match_histograms(
52
+ cv2.cvtColor(
53
+ np.asarray(original_image),
54
+ cv2.COLOR_RGB2LAB
55
+ ),
56
+ correction,
57
+ channel_axis=2
58
+ ), cv2.COLOR_LAB2RGB).astype("uint8"))
59
+
60
+ image = blendLayers(image, original_image, BlendType.LUMINOSITY)
61
+
62
+ return image.convert('RGB')
63
+
64
+
65
+ def apply_overlay(image, paste_loc, index, overlays):
66
+ if overlays is None or index >= len(overlays):
67
+ return image
68
+
69
+ overlay = overlays[index]
70
+
71
+ if paste_loc is not None:
72
+ x, y, w, h = paste_loc
73
+ base_image = Image.new('RGBA', (overlay.width, overlay.height))
74
+ image = images.resize_image(1, image, w, h)
75
+ base_image.paste(image, (x, y))
76
+ image = base_image
77
+
78
+ image = image.convert('RGBA')
79
+ image.alpha_composite(overlay)
80
+ image = image.convert('RGB')
81
+
82
+ return image
83
+
84
+ def create_binary_mask(image):
85
+ if image.mode == 'RGBA' and image.getextrema()[-1] != (255, 255):
86
+ image = image.split()[-1].convert("L").point(lambda x: 255 if x > 128 else 0)
87
+ else:
88
+ image = image.convert('L')
89
+ return image
90
+
91
+ def txt2img_image_conditioning(sd_model, x, width, height):
92
+ if sd_model.model.conditioning_key in {'hybrid', 'concat'}: # Inpainting models
93
+
94
+ # The "masked-image" in this case will just be all 0.5 since the entire image is masked.
95
+ image_conditioning = torch.ones(x.shape[0], 3, height, width, device=x.device) * 0.5
96
+ image_conditioning = images_tensor_to_samples(image_conditioning, approximation_indexes.get(opts.sd_vae_encode_method))
97
+
98
+ # Add the fake full 1s mask to the first dimension.
99
+ image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0)
100
+ image_conditioning = image_conditioning.to(x.dtype)
101
+
102
+ return image_conditioning
103
+
104
+ elif sd_model.model.conditioning_key == "crossattn-adm": # UnCLIP models
105
+
106
+ return x.new_zeros(x.shape[0], 2*sd_model.noise_augmentor.time_embed.dim, dtype=x.dtype, device=x.device)
107
+
108
+ else:
109
+ sd = sd_model.model.state_dict()
110
+ diffusion_model_input = sd.get('diffusion_model.input_blocks.0.0.weight', None)
111
+ if diffusion_model_input is not None:
112
+ if diffusion_model_input.shape[1] == 9:
113
+ # The "masked-image" in this case will just be all 0.5 since the entire image is masked.
114
+ image_conditioning = torch.ones(x.shape[0], 3, height, width, device=x.device) * 0.5
115
+ image_conditioning = images_tensor_to_samples(image_conditioning,
116
+ approximation_indexes.get(opts.sd_vae_encode_method))
117
+
118
+ # Add the fake full 1s mask to the first dimension.
119
+ image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0)
120
+ image_conditioning = image_conditioning.to(x.dtype)
121
+
122
+ return image_conditioning
123
+
124
+ # Dummy zero conditioning if we're not using inpainting or unclip models.
125
+ # Still takes up a bit of memory, but no encoder call.
126
+ # Pretty sure we can just make this a 1x1 image since its not going to be used besides its batch size.
127
+ return x.new_zeros(x.shape[0], 5, 1, 1, dtype=x.dtype, device=x.device)
128
+
129
+
130
+ @dataclass(repr=False)
131
+ class StableDiffusionProcessing:
132
+ sd_model: object = None
133
+ outpath_samples: str = None
134
+ outpath_grids: str = None
135
+ prompt: str = ""
136
+ prompt_for_display: str = None
137
+ negative_prompt: str = ""
138
+ styles: list[str] = None
139
+ seed: int = -1
140
+ subseed: int = -1
141
+ subseed_strength: float = 0
142
+ seed_resize_from_h: int = -1
143
+ seed_resize_from_w: int = -1
144
+ seed_enable_extras: bool = True
145
+ sampler_name: str = None
146
+ batch_size: int = 1
147
+ n_iter: int = 1
148
+ steps: int = 50
149
+ cfg_scale: float = 7.0
150
+ width: int = 512
151
+ height: int = 512
152
+ restore_faces: bool = None
153
+ tiling: bool = None
154
+ do_not_save_samples: bool = False
155
+ do_not_save_grid: bool = False
156
+ extra_generation_params: dict[str, Any] = None
157
+ overlay_images: list = None
158
+ eta: float = None
159
+ do_not_reload_embeddings: bool = False
160
+ denoising_strength: float = None
161
+ ddim_discretize: str = None
162
+ s_min_uncond: float = None
163
+ s_churn: float = None
164
+ s_tmax: float = None
165
+ s_tmin: float = None
166
+ s_noise: float = None
167
+ override_settings: dict[str, Any] = None
168
+ override_settings_restore_afterwards: bool = True
169
+ sampler_index: int = None
170
+ refiner_checkpoint: str = None
171
+ refiner_switch_at: float = None
172
+ token_merging_ratio = 0
173
+ token_merging_ratio_hr = 0
174
+ disable_extra_networks: bool = False
175
+
176
+ scripts_value: scripts.ScriptRunner = field(default=None, init=False)
177
+ script_args_value: list = field(default=None, init=False)
178
+ scripts_setup_complete: bool = field(default=False, init=False)
179
+
180
+ cached_uc = [None, None]
181
+ cached_c = [None, None]
182
+
183
+ comments: dict = None
184
+ sampler: sd_samplers_common.Sampler | None = field(default=None, init=False)
185
+ is_using_inpainting_conditioning: bool = field(default=False, init=False)
186
+ paste_to: tuple | None = field(default=None, init=False)
187
+
188
+ is_hr_pass: bool = field(default=False, init=False)
189
+
190
+ c: tuple = field(default=None, init=False)
191
+ uc: tuple = field(default=None, init=False)
192
+
193
+ rng: rng.ImageRNG | None = field(default=None, init=False)
194
+ step_multiplier: int = field(default=1, init=False)
195
+ color_corrections: list = field(default=None, init=False)
196
+
197
+ all_prompts: list = field(default=None, init=False)
198
+ all_negative_prompts: list = field(default=None, init=False)
199
+ all_seeds: list = field(default=None, init=False)
200
+ all_subseeds: list = field(default=None, init=False)
201
+ iteration: int = field(default=0, init=False)
202
+ main_prompt: str = field(default=None, init=False)
203
+ main_negative_prompt: str = field(default=None, init=False)
204
+
205
+ prompts: list = field(default=None, init=False)
206
+ negative_prompts: list = field(default=None, init=False)
207
+ seeds: list = field(default=None, init=False)
208
+ subseeds: list = field(default=None, init=False)
209
+ extra_network_data: dict = field(default=None, init=False)
210
+
211
+ user: str = field(default=None, init=False)
212
+
213
+ sd_model_name: str = field(default=None, init=False)
214
+ sd_model_hash: str = field(default=None, init=False)
215
+ sd_vae_name: str = field(default=None, init=False)
216
+ sd_vae_hash: str = field(default=None, init=False)
217
+
218
+ is_api: bool = field(default=False, init=False)
219
+
220
+ def __post_init__(self):
221
+ if self.sampler_index is not None:
222
+ print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr)
223
+
224
+ self.comments = {}
225
+
226
+ if self.styles is None:
227
+ self.styles = []
228
+
229
+ self.sampler_noise_scheduler_override = None
230
+ self.s_min_uncond = self.s_min_uncond if self.s_min_uncond is not None else opts.s_min_uncond
231
+ self.s_churn = self.s_churn if self.s_churn is not None else opts.s_churn
232
+ self.s_tmin = self.s_tmin if self.s_tmin is not None else opts.s_tmin
233
+ self.s_tmax = (self.s_tmax if self.s_tmax is not None else opts.s_tmax) or float('inf')
234
+ self.s_noise = self.s_noise if self.s_noise is not None else opts.s_noise
235
+
236
+ self.extra_generation_params = self.extra_generation_params or {}
237
+ self.override_settings = self.override_settings or {}
238
+ self.script_args = self.script_args or {}
239
+
240
+ self.refiner_checkpoint_info = None
241
+
242
+ if not self.seed_enable_extras:
243
+ self.subseed = -1
244
+ self.subseed_strength = 0
245
+ self.seed_resize_from_h = 0
246
+ self.seed_resize_from_w = 0
247
+
248
+ self.cached_uc = StableDiffusionProcessing.cached_uc
249
+ self.cached_c = StableDiffusionProcessing.cached_c
250
+
251
+ @property
252
+ def sd_model(self):
253
+ return shared.sd_model
254
+
255
+ @sd_model.setter
256
+ def sd_model(self, value):
257
+ pass
258
+
259
+ @property
260
+ def scripts(self):
261
+ return self.scripts_value
262
+
263
+ @scripts.setter
264
+ def scripts(self, value):
265
+ self.scripts_value = value
266
+
267
+ if self.scripts_value and self.script_args_value and not self.scripts_setup_complete:
268
+ self.setup_scripts()
269
+
270
+ @property
271
+ def script_args(self):
272
+ return self.script_args_value
273
+
274
+ @script_args.setter
275
+ def script_args(self, value):
276
+ self.script_args_value = value
277
+
278
+ if self.scripts_value and self.script_args_value and not self.scripts_setup_complete:
279
+ self.setup_scripts()
280
+
281
+ def setup_scripts(self):
282
+ self.scripts_setup_complete = True
283
+
284
+ self.scripts.setup_scrips(self, is_ui=not self.is_api)
285
+
286
+ def comment(self, text):
287
+ self.comments[text] = 1
288
+
289
+ def txt2img_image_conditioning(self, x, width=None, height=None):
290
+ self.is_using_inpainting_conditioning = self.sd_model.model.conditioning_key in {'hybrid', 'concat'}
291
+
292
+ return txt2img_image_conditioning(self.sd_model, x, width or self.width, height or self.height)
293
+
294
+ def depth2img_image_conditioning(self, source_image):
295
+ # Use the AddMiDaS helper to Format our source image to suit the MiDaS model
296
+ transformer = AddMiDaS(model_type="dpt_hybrid")
297
+ transformed = transformer({"jpg": rearrange(source_image[0], "c h w -> h w c")})
298
+ midas_in = torch.from_numpy(transformed["midas_in"][None, ...]).to(device=shared.device)
299
+ midas_in = repeat(midas_in, "1 ... -> n ...", n=self.batch_size)
300
+
301
+ conditioning_image = images_tensor_to_samples(source_image*0.5+0.5, approximation_indexes.get(opts.sd_vae_encode_method))
302
+ conditioning = torch.nn.functional.interpolate(
303
+ self.sd_model.depth_model(midas_in),
304
+ size=conditioning_image.shape[2:],
305
+ mode="bicubic",
306
+ align_corners=False,
307
+ )
308
+
309
+ (depth_min, depth_max) = torch.aminmax(conditioning)
310
+ conditioning = 2. * (conditioning - depth_min) / (depth_max - depth_min) - 1.
311
+ return conditioning
312
+
313
+ def edit_image_conditioning(self, source_image):
314
+ conditioning_image = shared.sd_model.encode_first_stage(source_image).mode()
315
+
316
+ return conditioning_image
317
+
318
+ def unclip_image_conditioning(self, source_image):
319
+ c_adm = self.sd_model.embedder(source_image)
320
+ if self.sd_model.noise_augmentor is not None:
321
+ noise_level = 0 # TODO: Allow other noise levels?
322
+ c_adm, noise_level_emb = self.sd_model.noise_augmentor(c_adm, noise_level=repeat(torch.tensor([noise_level]).to(c_adm.device), '1 -> b', b=c_adm.shape[0]))
323
+ c_adm = torch.cat((c_adm, noise_level_emb), 1)
324
+ return c_adm
325
+
326
+ def inpainting_image_conditioning(self, source_image, latent_image, image_mask=None):
327
+ self.is_using_inpainting_conditioning = True
328
+
329
+ # Handle the different mask inputs
330
+ if image_mask is not None:
331
+ if torch.is_tensor(image_mask):
332
+ conditioning_mask = image_mask
333
+ else:
334
+ conditioning_mask = np.array(image_mask.convert("L"))
335
+ conditioning_mask = conditioning_mask.astype(np.float32) / 255.0
336
+ conditioning_mask = torch.from_numpy(conditioning_mask[None, None])
337
+
338
+ # Inpainting model uses a discretized mask as input, so we round to either 1.0 or 0.0
339
+ conditioning_mask = torch.round(conditioning_mask)
340
+ else:
341
+ conditioning_mask = source_image.new_ones(1, 1, *source_image.shape[-2:])
342
+
343
+ # Create another latent image, this time with a masked version of the original input.
344
+ # Smoothly interpolate between the masked and unmasked latent conditioning image using a parameter.
345
+ conditioning_mask = conditioning_mask.to(device=source_image.device, dtype=source_image.dtype)
346
+ conditioning_image = torch.lerp(
347
+ source_image,
348
+ source_image * (1.0 - conditioning_mask),
349
+ getattr(self, "inpainting_mask_weight", shared.opts.inpainting_mask_weight)
350
+ )
351
+
352
+ # Encode the new masked image using first stage of network.
353
+ conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(conditioning_image))
354
+
355
+ # Create the concatenated conditioning tensor to be fed to `c_concat`
356
+ conditioning_mask = torch.nn.functional.interpolate(conditioning_mask, size=latent_image.shape[-2:])
357
+ conditioning_mask = conditioning_mask.expand(conditioning_image.shape[0], -1, -1, -1)
358
+ image_conditioning = torch.cat([conditioning_mask, conditioning_image], dim=1)
359
+ image_conditioning = image_conditioning.to(shared.device).type(self.sd_model.dtype)
360
+
361
+ return image_conditioning
362
+
363
+ def img2img_image_conditioning(self, source_image, latent_image, image_mask=None):
364
+ source_image = devices.cond_cast_float(source_image)
365
+
366
+ # HACK: Using introspection as the Depth2Image model doesn't appear to uniquely
367
+ # identify itself with a field common to all models. The conditioning_key is also hybrid.
368
+ if isinstance(self.sd_model, LatentDepth2ImageDiffusion):
369
+ return self.depth2img_image_conditioning(source_image)
370
+
371
+ if self.sd_model.cond_stage_key == "edit":
372
+ return self.edit_image_conditioning(source_image)
373
+
374
+ if self.sampler.conditioning_key in {'hybrid', 'concat'}:
375
+ return self.inpainting_image_conditioning(source_image, latent_image, image_mask=image_mask)
376
+
377
+ if self.sampler.conditioning_key == "crossattn-adm":
378
+ return self.unclip_image_conditioning(source_image)
379
+
380
+ sd = self.sampler.model_wrap.inner_model.model.state_dict()
381
+ diffusion_model_input = sd.get('diffusion_model.input_blocks.0.0.weight', None)
382
+ if diffusion_model_input is not None:
383
+ if diffusion_model_input.shape[1] == 9:
384
+ return self.inpainting_image_conditioning(source_image, latent_image, image_mask=image_mask)
385
+
386
+ # Dummy zero conditioning if we're not using inpainting or depth model.
387
+ return latent_image.new_zeros(latent_image.shape[0], 5, 1, 1)
388
+
389
+ def init(self, all_prompts, all_seeds, all_subseeds):
390
+ pass
391
+
392
+ def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
393
+ raise NotImplementedError()
394
+
395
+ def close(self):
396
+ self.sampler = None
397
+ self.c = None
398
+ self.uc = None
399
+ if not opts.persistent_cond_cache:
400
+ StableDiffusionProcessing.cached_c = [None, None]
401
+ StableDiffusionProcessing.cached_uc = [None, None]
402
+
403
+ def get_token_merging_ratio(self, for_hr=False):
404
+ if for_hr:
405
+ return self.token_merging_ratio_hr or opts.token_merging_ratio_hr or self.token_merging_ratio or opts.token_merging_ratio
406
+
407
+ return self.token_merging_ratio or opts.token_merging_ratio
408
+
409
+ def setup_prompts(self):
410
+ if isinstance(self.prompt,list):
411
+ self.all_prompts = self.prompt
412
+ elif isinstance(self.negative_prompt, list):
413
+ self.all_prompts = [self.prompt] * len(self.negative_prompt)
414
+ else:
415
+ self.all_prompts = self.batch_size * self.n_iter * [self.prompt]
416
+
417
+ if isinstance(self.negative_prompt, list):
418
+ self.all_negative_prompts = self.negative_prompt
419
+ else:
420
+ self.all_negative_prompts = [self.negative_prompt] * len(self.all_prompts)
421
+
422
+ if len(self.all_prompts) != len(self.all_negative_prompts):
423
+ raise RuntimeError(f"Received a different number of prompts ({len(self.all_prompts)}) and negative prompts ({len(self.all_negative_prompts)})")
424
+
425
+ self.all_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, self.styles) for x in self.all_prompts]
426
+ self.all_negative_prompts = [shared.prompt_styles.apply_negative_styles_to_prompt(x, self.styles) for x in self.all_negative_prompts]
427
+
428
+ self.main_prompt = self.all_prompts[0]
429
+ self.main_negative_prompt = self.all_negative_prompts[0]
430
+
431
+ def cached_params(self, required_prompts, steps, extra_network_data, hires_steps=None, use_old_scheduling=False):
432
+ """Returns parameters that invalidate the cond cache if changed"""
433
+
434
+ return (
435
+ required_prompts,
436
+ steps,
437
+ hires_steps,
438
+ use_old_scheduling,
439
+ opts.CLIP_stop_at_last_layers,
440
+ shared.sd_model.sd_checkpoint_info,
441
+ extra_network_data,
442
+ opts.sdxl_crop_left,
443
+ opts.sdxl_crop_top,
444
+ self.width,
445
+ self.height,
446
+ )
447
+
448
+ def get_conds_with_caching(self, function, required_prompts, steps, caches, extra_network_data, hires_steps=None):
449
+ """
450
+ Returns the result of calling function(shared.sd_model, required_prompts, steps)
451
+ using a cache to store the result if the same arguments have been used before.
452
+
453
+ cache is an array containing two elements. The first element is a tuple
454
+ representing the previously used arguments, or None if no arguments
455
+ have been used before. The second element is where the previously
456
+ computed result is stored.
457
+
458
+ caches is a list with items described above.
459
+ """
460
+
461
+ if shared.opts.use_old_scheduling:
462
+ old_schedules = prompt_parser.get_learned_conditioning_prompt_schedules(required_prompts, steps, hires_steps, False)
463
+ new_schedules = prompt_parser.get_learned_conditioning_prompt_schedules(required_prompts, steps, hires_steps, True)
464
+ if old_schedules != new_schedules:
465
+ self.extra_generation_params["Old prompt editing timelines"] = True
466
+
467
+ cached_params = self.cached_params(required_prompts, steps, extra_network_data, hires_steps, shared.opts.use_old_scheduling)
468
+
469
+ for cache in caches:
470
+ if cache[0] is not None and cached_params == cache[0]:
471
+ return cache[1]
472
+
473
+ cache = caches[0]
474
+
475
+ with devices.autocast():
476
+ cache[1] = function(shared.sd_model, required_prompts, steps, hires_steps, shared.opts.use_old_scheduling)
477
+
478
+ cache[0] = cached_params
479
+ return cache[1]
480
+
481
+ def setup_conds(self):
482
+ prompts = prompt_parser.SdConditioning(self.prompts, width=self.width, height=self.height)
483
+ negative_prompts = prompt_parser.SdConditioning(self.negative_prompts, width=self.width, height=self.height, is_negative_prompt=True)
484
+
485
+ sampler_config = sd_samplers.find_sampler_config(self.sampler_name)
486
+ total_steps = sampler_config.total_steps(self.steps) if sampler_config else self.steps
487
+ self.step_multiplier = total_steps // self.steps
488
+ self.firstpass_steps = total_steps
489
+
490
+ self.uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, total_steps, [self.cached_uc], self.extra_network_data)
491
+ self.c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, total_steps, [self.cached_c], self.extra_network_data)
492
+
493
+ def get_conds(self):
494
+ return self.c, self.uc
495
+
496
+ def parse_extra_network_prompts(self):
497
+ self.prompts, self.extra_network_data = extra_networks.parse_prompts(self.prompts)
498
+
499
+ def save_samples(self) -> bool:
500
+ """Returns whether generated images need to be written to disk"""
501
+ return opts.samples_save and not self.do_not_save_samples and (opts.save_incomplete_images or not state.interrupted and not state.skipped)
502
+
503
+
504
+ class Processed:
505
+ def __init__(self, p: StableDiffusionProcessing, images_list, seed=-1, info="", subseed=None, all_prompts=None, all_negative_prompts=None, all_seeds=None, all_subseeds=None, index_of_first_image=0, infotexts=None, comments=""):
506
+ self.images = images_list
507
+ self.prompt = p.prompt
508
+ self.negative_prompt = p.negative_prompt
509
+ self.seed = seed
510
+ self.subseed = subseed
511
+ self.subseed_strength = p.subseed_strength
512
+ self.info = info
513
+ self.comments = "".join(f"{comment}\n" for comment in p.comments)
514
+ self.width = p.width
515
+ self.height = p.height
516
+ self.sampler_name = p.sampler_name
517
+ self.cfg_scale = p.cfg_scale
518
+ self.image_cfg_scale = getattr(p, 'image_cfg_scale', None)
519
+ self.steps = p.steps
520
+ self.batch_size = p.batch_size
521
+ self.restore_faces = p.restore_faces
522
+ self.face_restoration_model = opts.face_restoration_model if p.restore_faces else None
523
+ self.sd_model_name = p.sd_model_name
524
+ self.sd_model_hash = p.sd_model_hash
525
+ self.sd_vae_name = p.sd_vae_name
526
+ self.sd_vae_hash = p.sd_vae_hash
527
+ self.seed_resize_from_w = p.seed_resize_from_w
528
+ self.seed_resize_from_h = p.seed_resize_from_h
529
+ self.denoising_strength = getattr(p, 'denoising_strength', None)
530
+ self.extra_generation_params = p.extra_generation_params
531
+ self.index_of_first_image = index_of_first_image
532
+ self.styles = p.styles
533
+ self.job_timestamp = state.job_timestamp
534
+ self.clip_skip = opts.CLIP_stop_at_last_layers
535
+ self.token_merging_ratio = p.token_merging_ratio
536
+ self.token_merging_ratio_hr = p.token_merging_ratio_hr
537
+
538
+ self.eta = p.eta
539
+ self.ddim_discretize = p.ddim_discretize
540
+ self.s_churn = p.s_churn
541
+ self.s_tmin = p.s_tmin
542
+ self.s_tmax = p.s_tmax
543
+ self.s_noise = p.s_noise
544
+ self.s_min_uncond = p.s_min_uncond
545
+ self.sampler_noise_scheduler_override = p.sampler_noise_scheduler_override
546
+ self.prompt = self.prompt if not isinstance(self.prompt, list) else self.prompt[0]
547
+ self.negative_prompt = self.negative_prompt if not isinstance(self.negative_prompt, list) else self.negative_prompt[0]
548
+ self.seed = int(self.seed if not isinstance(self.seed, list) else self.seed[0]) if self.seed is not None else -1
549
+ self.subseed = int(self.subseed if not isinstance(self.subseed, list) else self.subseed[0]) if self.subseed is not None else -1
550
+ self.is_using_inpainting_conditioning = p.is_using_inpainting_conditioning
551
+
552
+ self.all_prompts = all_prompts or p.all_prompts or [self.prompt]
553
+ self.all_negative_prompts = all_negative_prompts or p.all_negative_prompts or [self.negative_prompt]
554
+ self.all_seeds = all_seeds or p.all_seeds or [self.seed]
555
+ self.all_subseeds = all_subseeds or p.all_subseeds or [self.subseed]
556
+ self.infotexts = infotexts or [info]
557
+ self.version = program_version()
558
+
559
+ def js(self):
560
+ obj = {
561
+ "prompt": self.all_prompts[0],
562
+ "all_prompts": self.all_prompts,
563
+ "negative_prompt": self.all_negative_prompts[0],
564
+ "all_negative_prompts": self.all_negative_prompts,
565
+ "seed": self.seed,
566
+ "all_seeds": self.all_seeds,
567
+ "subseed": self.subseed,
568
+ "all_subseeds": self.all_subseeds,
569
+ "subseed_strength": self.subseed_strength,
570
+ "width": self.width,
571
+ "height": self.height,
572
+ "sampler_name": self.sampler_name,
573
+ "cfg_scale": self.cfg_scale,
574
+ "steps": self.steps,
575
+ "batch_size": self.batch_size,
576
+ "restore_faces": self.restore_faces,
577
+ "face_restoration_model": self.face_restoration_model,
578
+ "sd_model_name": self.sd_model_name,
579
+ "sd_model_hash": self.sd_model_hash,
580
+ "sd_vae_name": self.sd_vae_name,
581
+ "sd_vae_hash": self.sd_vae_hash,
582
+ "seed_resize_from_w": self.seed_resize_from_w,
583
+ "seed_resize_from_h": self.seed_resize_from_h,
584
+ "denoising_strength": self.denoising_strength,
585
+ "extra_generation_params": self.extra_generation_params,
586
+ "index_of_first_image": self.index_of_first_image,
587
+ "infotexts": self.infotexts,
588
+ "styles": self.styles,
589
+ "job_timestamp": self.job_timestamp,
590
+ "clip_skip": self.clip_skip,
591
+ "is_using_inpainting_conditioning": self.is_using_inpainting_conditioning,
592
+ "version": self.version,
593
+ }
594
+
595
+ return json.dumps(obj)
596
+
597
+ def infotext(self, p: StableDiffusionProcessing, index):
598
+ return create_infotext(p, self.all_prompts, self.all_seeds, self.all_subseeds, comments=[], position_in_batch=index % self.batch_size, iteration=index // self.batch_size)
599
+
600
+ def get_token_merging_ratio(self, for_hr=False):
601
+ return self.token_merging_ratio_hr if for_hr else self.token_merging_ratio
602
+
603
+
604
+ def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, seed_resize_from_h=0, seed_resize_from_w=0, p=None):
605
+ g = rng.ImageRNG(shape, seeds, subseeds=subseeds, subseed_strength=subseed_strength, seed_resize_from_h=seed_resize_from_h, seed_resize_from_w=seed_resize_from_w)
606
+ return g.next()
607
+
608
+
609
+ class DecodedSamples(list):
610
+ already_decoded = True
611
+
612
+
613
+ def decode_latent_batch(model, batch, target_device=None, check_for_nans=False):
614
+ samples = DecodedSamples()
615
+
616
+ for i in range(batch.shape[0]):
617
+ sample = decode_first_stage(model, batch[i:i + 1])[0]
618
+
619
+ if check_for_nans:
620
+ try:
621
+ devices.test_for_nans(sample, "vae")
622
+ except devices.NansException as e:
623
+ if devices.dtype_vae == torch.float32 or not shared.opts.auto_vae_precision:
624
+ raise e
625
+
626
+ errors.print_error_explanation(
627
+ "A tensor with all NaNs was produced in VAE.\n"
628
+ "Web UI will now convert VAE into 32-bit float and retry.\n"
629
+ "To disable this behavior, disable the 'Automatically revert VAE to 32-bit floats' setting.\n"
630
+ "To always start with 32-bit VAE, use --no-half-vae commandline flag."
631
+ )
632
+
633
+ devices.dtype_vae = torch.float32
634
+ model.first_stage_model.to(devices.dtype_vae)
635
+ batch = batch.to(devices.dtype_vae)
636
+
637
+ sample = decode_first_stage(model, batch[i:i + 1])[0]
638
+
639
+ if target_device is not None:
640
+ sample = sample.to(target_device)
641
+
642
+ samples.append(sample)
643
+
644
+ return samples
645
+
646
+
647
+ def get_fixed_seed(seed):
648
+ if seed == '' or seed is None:
649
+ seed = -1
650
+ elif isinstance(seed, str):
651
+ try:
652
+ seed = int(seed)
653
+ except Exception:
654
+ seed = -1
655
+
656
+ if seed == -1:
657
+ return int(random.randrange(4294967294))
658
+
659
+ return seed
660
+
661
+
662
+ def fix_seed(p):
663
+ p.seed = get_fixed_seed(p.seed)
664
+ p.subseed = get_fixed_seed(p.subseed)
665
+
666
+
667
+ def program_version():
668
+ import launch
669
+
670
+ res = launch.git_tag()
671
+ if res == "<none>":
672
+ res = None
673
+
674
+ return res
675
+
676
+
677
+ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0, use_main_prompt=False, index=None, all_negative_prompts=None):
678
+ if index is None:
679
+ index = position_in_batch + iteration * p.batch_size
680
+
681
+ if all_negative_prompts is None:
682
+ all_negative_prompts = p.all_negative_prompts
683
+
684
+ clip_skip = getattr(p, 'clip_skip', opts.CLIP_stop_at_last_layers)
685
+ enable_hr = getattr(p, 'enable_hr', False)
686
+ token_merging_ratio = p.get_token_merging_ratio()
687
+ token_merging_ratio_hr = p.get_token_merging_ratio(for_hr=True)
688
+
689
+ uses_ensd = opts.eta_noise_seed_delta != 0
690
+ if uses_ensd:
691
+ uses_ensd = sd_samplers_common.is_sampler_using_eta_noise_seed_delta(p)
692
+
693
+ generation_params = {
694
+ "Steps": p.steps,
695
+ "Sampler": p.sampler_name,
696
+ "CFG scale": p.cfg_scale,
697
+ "Image CFG scale": getattr(p, 'image_cfg_scale', None),
698
+ "Seed": p.all_seeds[0] if use_main_prompt else all_seeds[index],
699
+ "Face restoration": opts.face_restoration_model if p.restore_faces else None,
700
+ "Size": f"{p.width}x{p.height}",
701
+ "Model hash": p.sd_model_hash if opts.add_model_hash_to_info else None,
702
+ "Model": p.sd_model_name if opts.add_model_name_to_info else None,
703
+ "VAE hash": p.sd_vae_hash if opts.add_vae_hash_to_info else None,
704
+ "VAE": p.sd_vae_name if opts.add_vae_name_to_info else None,
705
+ "Variation seed": (None if p.subseed_strength == 0 else (p.all_subseeds[0] if use_main_prompt else all_subseeds[index])),
706
+ "Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength),
707
+ "Seed resize from": (None if p.seed_resize_from_w <= 0 or p.seed_resize_from_h <= 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"),
708
+ "Denoising strength": getattr(p, 'denoising_strength', None),
709
+ "Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None,
710
+ "Clip skip": None if clip_skip <= 1 else clip_skip,
711
+ "ENSD": opts.eta_noise_seed_delta if uses_ensd else None,
712
+ "Token merging ratio": None if token_merging_ratio == 0 else token_merging_ratio,
713
+ "Token merging ratio hr": None if not enable_hr or token_merging_ratio_hr == 0 else token_merging_ratio_hr,
714
+ "Init image hash": getattr(p, 'init_img_hash', None),
715
+ "RNG": opts.randn_source if opts.randn_source != "GPU" else None,
716
+ "NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond,
717
+ "Tiling": "True" if p.tiling else None,
718
+ **p.extra_generation_params,
719
+ "Version": program_version() if opts.add_version_to_infotext else None,
720
+ "User": p.user if opts.add_user_name_to_info else None,
721
+ }
722
+
723
+ generation_params_text = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in generation_params.items() if v is not None])
724
+
725
+ prompt_text = p.main_prompt if use_main_prompt else all_prompts[index]
726
+ negative_prompt_text = f"\nNegative prompt: {p.main_negative_prompt if use_main_prompt else all_negative_prompts[index]}" if all_negative_prompts[index] else ""
727
+
728
+ return f"{prompt_text}{negative_prompt_text}\n{generation_params_text}".strip()
729
+
730
+
731
+ def process_images(p: StableDiffusionProcessing) -> Processed:
732
+ if p.scripts is not None:
733
+ p.scripts.before_process(p)
734
+
735
+ stored_opts = {k: opts.data[k] if k in opts.data else opts.get_default(k) for k in p.override_settings.keys() if k in opts.data}
736
+
737
+ try:
738
+ # if no checkpoint override or the override checkpoint can't be found, remove override entry and load opts checkpoint
739
+ # and if after running refiner, the refiner model is not unloaded - webui swaps back to main model here, if model over is present it will be reloaded afterwards
740
+ if sd_models.checkpoint_aliases.get(p.override_settings.get('sd_model_checkpoint')) is None:
741
+ p.override_settings.pop('sd_model_checkpoint', None)
742
+ sd_models.reload_model_weights()
743
+
744
+ for k, v in p.override_settings.items():
745
+ opts.set(k, v, is_api=True, run_callbacks=False)
746
+
747
+ if k == 'sd_model_checkpoint':
748
+ sd_models.reload_model_weights()
749
+
750
+ if k == 'sd_vae':
751
+ sd_vae.reload_vae_weights()
752
+
753
+ sd_models.apply_token_merging(p.sd_model, p.get_token_merging_ratio())
754
+
755
+ res = process_images_inner(p)
756
+
757
+ finally:
758
+ sd_models.apply_token_merging(p.sd_model, 0)
759
+
760
+ # restore opts to original state
761
+ if p.override_settings_restore_afterwards:
762
+ for k, v in stored_opts.items():
763
+ setattr(opts, k, v)
764
+
765
+ if k == 'sd_vae':
766
+ sd_vae.reload_vae_weights()
767
+
768
+ return res
769
+
770
+
771
+ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
772
+ """this is the main loop that both txt2img and img2img use; it calls func_init once inside all the scopes and func_sample once per batch"""
773
+
774
+ if isinstance(p.prompt, list):
775
+ assert(len(p.prompt) > 0)
776
+ else:
777
+ assert p.prompt is not None
778
+
779
+ devices.torch_gc()
780
+
781
+ seed = get_fixed_seed(p.seed)
782
+ subseed = get_fixed_seed(p.subseed)
783
+
784
+ if p.restore_faces is None:
785
+ p.restore_faces = opts.face_restoration
786
+
787
+ if p.tiling is None:
788
+ p.tiling = opts.tiling
789
+
790
+ if p.refiner_checkpoint not in (None, "", "None", "none"):
791
+ p.refiner_checkpoint_info = sd_models.get_closet_checkpoint_match(p.refiner_checkpoint)
792
+ if p.refiner_checkpoint_info is None:
793
+ raise Exception(f'Could not find checkpoint with name {p.refiner_checkpoint}')
794
+
795
+ p.sd_model_name = shared.sd_model.sd_checkpoint_info.name_for_extra
796
+ p.sd_model_hash = shared.sd_model.sd_model_hash
797
+ p.sd_vae_name = sd_vae.get_loaded_vae_name()
798
+ p.sd_vae_hash = sd_vae.get_loaded_vae_hash()
799
+
800
+ modules.sd_hijack.model_hijack.apply_circular(p.tiling)
801
+ modules.sd_hijack.model_hijack.clear_comments()
802
+
803
+ p.setup_prompts()
804
+
805
+ if isinstance(seed, list):
806
+ p.all_seeds = seed
807
+ else:
808
+ p.all_seeds = [int(seed) + (x if p.subseed_strength == 0 else 0) for x in range(len(p.all_prompts))]
809
+
810
+ if isinstance(subseed, list):
811
+ p.all_subseeds = subseed
812
+ else:
813
+ p.all_subseeds = [int(subseed) + x for x in range(len(p.all_prompts))]
814
+
815
+ if os.path.exists(cmd_opts.embeddings_dir) and not p.do_not_reload_embeddings:
816
+ model_hijack.embedding_db.load_textual_inversion_embeddings()
817
+
818
+ if p.scripts is not None:
819
+ p.scripts.process(p)
820
+
821
+ infotexts = []
822
+ output_images = []
823
+ with torch.no_grad(), p.sd_model.ema_scope():
824
+ with devices.autocast():
825
+ p.init(p.all_prompts, p.all_seeds, p.all_subseeds)
826
+
827
+ # for OSX, loading the model during sampling changes the generated picture, so it is loaded here
828
+ if shared.opts.live_previews_enable and opts.show_progress_type == "Approx NN":
829
+ sd_vae_approx.model()
830
+
831
+ sd_unet.apply_unet()
832
+
833
+ if state.job_count == -1:
834
+ state.job_count = p.n_iter
835
+
836
+ for n in range(p.n_iter):
837
+ p.iteration = n
838
+
839
+ if state.skipped:
840
+ state.skipped = False
841
+
842
+ if state.interrupted:
843
+ break
844
+
845
+ sd_models.reload_model_weights() # model can be changed for example by refiner
846
+
847
+ p.prompts = p.all_prompts[n * p.batch_size:(n + 1) * p.batch_size]
848
+ p.negative_prompts = p.all_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size]
849
+ p.seeds = p.all_seeds[n * p.batch_size:(n + 1) * p.batch_size]
850
+ p.subseeds = p.all_subseeds[n * p.batch_size:(n + 1) * p.batch_size]
851
+
852
+ p.rng = rng.ImageRNG((opt_C, p.height // opt_f, p.width // opt_f), p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, seed_resize_from_h=p.seed_resize_from_h, seed_resize_from_w=p.seed_resize_from_w)
853
+
854
+ if p.scripts is not None:
855
+ p.scripts.before_process_batch(p, batch_number=n, prompts=p.prompts, seeds=p.seeds, subseeds=p.subseeds)
856
+
857
+ if len(p.prompts) == 0:
858
+ break
859
+
860
+ p.parse_extra_network_prompts()
861
+
862
+ if not p.disable_extra_networks:
863
+ with devices.autocast():
864
+ extra_networks.activate(p, p.extra_network_data)
865
+
866
+ if p.scripts is not None:
867
+ p.scripts.process_batch(p, batch_number=n, prompts=p.prompts, seeds=p.seeds, subseeds=p.subseeds)
868
+
869
+ # params.txt should be saved after scripts.process_batch, since the
870
+ # infotext could be modified by that callback
871
+ # Example: a wildcard processed by process_batch sets an extra model
872
+ # strength, which is saved as "Model Strength: 1.0" in the infotext
873
+ if n == 0:
874
+ with open(os.path.join(paths.data_path, "params.txt"), "w", encoding="utf8") as file:
875
+ processed = Processed(p, [])
876
+ file.write(processed.infotext(p, 0))
877
+
878
+ p.setup_conds()
879
+
880
+ for comment in model_hijack.comments:
881
+ p.comment(comment)
882
+
883
+ p.extra_generation_params.update(model_hijack.extra_generation_params)
884
+
885
+ if p.n_iter > 1:
886
+ shared.state.job = f"Batch {n+1} out of {p.n_iter}"
887
+
888
+ with devices.without_autocast() if devices.unet_needs_upcast else devices.autocast():
889
+ samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts)
890
+
891
+ if getattr(samples_ddim, 'already_decoded', False):
892
+ x_samples_ddim = samples_ddim
893
+ else:
894
+ if opts.sd_vae_decode_method != 'Full':
895
+ p.extra_generation_params['VAE Decoder'] = opts.sd_vae_decode_method
896
+ x_samples_ddim = decode_latent_batch(p.sd_model, samples_ddim, target_device=devices.cpu, check_for_nans=True)
897
+
898
+ x_samples_ddim = torch.stack(x_samples_ddim).float()
899
+ x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
900
+
901
+ del samples_ddim
902
+
903
+ if lowvram.is_enabled(shared.sd_model):
904
+ lowvram.send_everything_to_cpu()
905
+
906
+ devices.torch_gc()
907
+
908
+ state.nextjob()
909
+
910
+ if p.scripts is not None:
911
+ p.scripts.postprocess_batch(p, x_samples_ddim, batch_number=n)
912
+
913
+ p.prompts = p.all_prompts[n * p.batch_size:(n + 1) * p.batch_size]
914
+ p.negative_prompts = p.all_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size]
915
+
916
+ batch_params = scripts.PostprocessBatchListArgs(list(x_samples_ddim))
917
+ p.scripts.postprocess_batch_list(p, batch_params, batch_number=n)
918
+ x_samples_ddim = batch_params.images
919
+
920
+ def infotext(index=0, use_main_prompt=False):
921
+ return create_infotext(p, p.prompts, p.seeds, p.subseeds, use_main_prompt=use_main_prompt, index=index, all_negative_prompts=p.negative_prompts)
922
+
923
+ save_samples = p.save_samples()
924
+
925
+ for i, x_sample in enumerate(x_samples_ddim):
926
+ p.batch_index = i
927
+
928
+ x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
929
+ x_sample = x_sample.astype(np.uint8)
930
+
931
+ if p.restore_faces:
932
+ if save_samples and opts.save_images_before_face_restoration:
933
+ images.save_image(Image.fromarray(x_sample), p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-before-face-restoration")
934
+
935
+ devices.torch_gc()
936
+
937
+ x_sample = modules.face_restoration.restore_faces(x_sample)
938
+ devices.torch_gc()
939
+
940
+ image = Image.fromarray(x_sample)
941
+
942
+ if p.scripts is not None:
943
+ pp = scripts.PostprocessImageArgs(image)
944
+ p.scripts.postprocess_image(p, pp)
945
+ image = pp.image
946
+ if p.color_corrections is not None and i < len(p.color_corrections):
947
+ if save_samples and opts.save_images_before_color_correction:
948
+ image_without_cc = apply_overlay(image, p.paste_to, i, p.overlay_images)
949
+ images.save_image(image_without_cc, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-before-color-correction")
950
+ image = apply_color_correction(p.color_corrections[i], image)
951
+
952
+ image = apply_overlay(image, p.paste_to, i, p.overlay_images)
953
+
954
+ if save_samples:
955
+ images.save_image(image, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p)
956
+
957
+ text = infotext(i)
958
+ infotexts.append(text)
959
+ if opts.enable_pnginfo:
960
+ image.info["parameters"] = text
961
+ output_images.append(image)
962
+ if hasattr(p, 'mask_for_overlay') and p.mask_for_overlay:
963
+ if opts.return_mask or opts.save_mask:
964
+ image_mask = p.mask_for_overlay.convert('RGB')
965
+ if save_samples and opts.save_mask:
966
+ images.save_image(image_mask, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-mask")
967
+ if opts.return_mask:
968
+ output_images.append(image_mask)
969
+
970
+ if opts.return_mask_composite or opts.save_mask_composite:
971
+ image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), images.resize_image(2, p.mask_for_overlay, image.width, image.height).convert('L')).convert('RGBA')
972
+ if save_samples and opts.save_mask_composite:
973
+ images.save_image(image_mask_composite, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-mask-composite")
974
+ if opts.return_mask_composite:
975
+ output_images.append(image_mask_composite)
976
+
977
+ del x_samples_ddim
978
+
979
+ devices.torch_gc()
980
+
981
+ if not infotexts:
982
+ infotexts.append(Processed(p, []).infotext(p, 0))
983
+
984
+ p.color_corrections = None
985
+
986
+ index_of_first_image = 0
987
+ unwanted_grid_because_of_img_count = len(output_images) < 2 and opts.grid_only_if_multiple
988
+ if (opts.return_grid or opts.grid_save) and not p.do_not_save_grid and not unwanted_grid_because_of_img_count:
989
+ grid = images.image_grid(output_images, p.batch_size)
990
+
991
+ if opts.return_grid:
992
+ text = infotext(use_main_prompt=True)
993
+ infotexts.insert(0, text)
994
+ if opts.enable_pnginfo:
995
+ grid.info["parameters"] = text
996
+ output_images.insert(0, grid)
997
+ index_of_first_image = 1
998
+ if opts.grid_save:
999
+ images.save_image(grid, p.outpath_grids, "grid", p.all_seeds[0], p.all_prompts[0], opts.grid_format, info=infotext(use_main_prompt=True), short_filename=not opts.grid_extended_filename, p=p, grid=True)
1000
+
1001
+ if not p.disable_extra_networks and p.extra_network_data:
1002
+ extra_networks.deactivate(p, p.extra_network_data)
1003
+
1004
+ devices.torch_gc()
1005
+
1006
+ res = Processed(
1007
+ p,
1008
+ images_list=output_images,
1009
+ seed=p.all_seeds[0],
1010
+ info=infotexts[0],
1011
+ subseed=p.all_subseeds[0],
1012
+ index_of_first_image=index_of_first_image,
1013
+ infotexts=infotexts,
1014
+ )
1015
+
1016
+ if p.scripts is not None:
1017
+ p.scripts.postprocess(p, res)
1018
+
1019
+ return res
1020
+
1021
+
1022
+ def old_hires_fix_first_pass_dimensions(width, height):
1023
+ """old algorithm for auto-calculating first pass size"""
1024
+
1025
+ desired_pixel_count = 512 * 512
1026
+ actual_pixel_count = width * height
1027
+ scale = math.sqrt(desired_pixel_count / actual_pixel_count)
1028
+ width = math.ceil(scale * width / 64) * 64
1029
+ height = math.ceil(scale * height / 64) * 64
1030
+
1031
+ return width, height
1032
+
1033
+
1034
+ @dataclass(repr=False)
1035
+ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
1036
+ enable_hr: bool = False
1037
+ denoising_strength: float = 0.75
1038
+ firstphase_width: int = 0
1039
+ firstphase_height: int = 0
1040
+ hr_scale: float = 2.0
1041
+ hr_upscaler: str = None
1042
+ hr_second_pass_steps: int = 0
1043
+ hr_resize_x: int = 0
1044
+ hr_resize_y: int = 0
1045
+ hr_checkpoint_name: str = None
1046
+ hr_sampler_name: str = None
1047
+ hr_prompt: str = ''
1048
+ hr_negative_prompt: str = ''
1049
+
1050
+ cached_hr_uc = [None, None]
1051
+ cached_hr_c = [None, None]
1052
+
1053
+ hr_checkpoint_info: dict = field(default=None, init=False)
1054
+ hr_upscale_to_x: int = field(default=0, init=False)
1055
+ hr_upscale_to_y: int = field(default=0, init=False)
1056
+ truncate_x: int = field(default=0, init=False)
1057
+ truncate_y: int = field(default=0, init=False)
1058
+ applied_old_hires_behavior_to: tuple = field(default=None, init=False)
1059
+ latent_scale_mode: dict = field(default=None, init=False)
1060
+ hr_c: tuple | None = field(default=None, init=False)
1061
+ hr_uc: tuple | None = field(default=None, init=False)
1062
+ all_hr_prompts: list = field(default=None, init=False)
1063
+ all_hr_negative_prompts: list = field(default=None, init=False)
1064
+ hr_prompts: list = field(default=None, init=False)
1065
+ hr_negative_prompts: list = field(default=None, init=False)
1066
+ hr_extra_network_data: list = field(default=None, init=False)
1067
+
1068
+ def __post_init__(self):
1069
+ super().__post_init__()
1070
+
1071
+ if self.firstphase_width != 0 or self.firstphase_height != 0:
1072
+ self.hr_upscale_to_x = self.width
1073
+ self.hr_upscale_to_y = self.height
1074
+ self.width = self.firstphase_width
1075
+ self.height = self.firstphase_height
1076
+
1077
+ self.cached_hr_uc = StableDiffusionProcessingTxt2Img.cached_hr_uc
1078
+ self.cached_hr_c = StableDiffusionProcessingTxt2Img.cached_hr_c
1079
+
1080
+ def calculate_target_resolution(self):
1081
+ if opts.use_old_hires_fix_width_height and self.applied_old_hires_behavior_to != (self.width, self.height):
1082
+ self.hr_resize_x = self.width
1083
+ self.hr_resize_y = self.height
1084
+ self.hr_upscale_to_x = self.width
1085
+ self.hr_upscale_to_y = self.height
1086
+
1087
+ self.width, self.height = old_hires_fix_first_pass_dimensions(self.width, self.height)
1088
+ self.applied_old_hires_behavior_to = (self.width, self.height)
1089
+
1090
+ if self.hr_resize_x == 0 and self.hr_resize_y == 0:
1091
+ self.extra_generation_params["Hires upscale"] = self.hr_scale
1092
+ self.hr_upscale_to_x = int(self.width * self.hr_scale)
1093
+ self.hr_upscale_to_y = int(self.height * self.hr_scale)
1094
+ else:
1095
+ self.extra_generation_params["Hires resize"] = f"{self.hr_resize_x}x{self.hr_resize_y}"
1096
+
1097
+ if self.hr_resize_y == 0:
1098
+ self.hr_upscale_to_x = self.hr_resize_x
1099
+ self.hr_upscale_to_y = self.hr_resize_x * self.height // self.width
1100
+ elif self.hr_resize_x == 0:
1101
+ self.hr_upscale_to_x = self.hr_resize_y * self.width // self.height
1102
+ self.hr_upscale_to_y = self.hr_resize_y
1103
+ else:
1104
+ target_w = self.hr_resize_x
1105
+ target_h = self.hr_resize_y
1106
+ src_ratio = self.width / self.height
1107
+ dst_ratio = self.hr_resize_x / self.hr_resize_y
1108
+
1109
+ if src_ratio < dst_ratio:
1110
+ self.hr_upscale_to_x = self.hr_resize_x
1111
+ self.hr_upscale_to_y = self.hr_resize_x * self.height // self.width
1112
+ else:
1113
+ self.hr_upscale_to_x = self.hr_resize_y * self.width // self.height
1114
+ self.hr_upscale_to_y = self.hr_resize_y
1115
+
1116
+ self.truncate_x = (self.hr_upscale_to_x - target_w) // opt_f
1117
+ self.truncate_y = (self.hr_upscale_to_y - target_h) // opt_f
1118
+
1119
+ def init(self, all_prompts, all_seeds, all_subseeds):
1120
+ if self.enable_hr:
1121
+ if self.hr_checkpoint_name:
1122
+ self.hr_checkpoint_info = sd_models.get_closet_checkpoint_match(self.hr_checkpoint_name)
1123
+
1124
+ if self.hr_checkpoint_info is None:
1125
+ raise Exception(f'Could not find checkpoint with name {self.hr_checkpoint_name}')
1126
+
1127
+ self.extra_generation_params["Hires checkpoint"] = self.hr_checkpoint_info.short_title
1128
+
1129
+ if self.hr_sampler_name is not None and self.hr_sampler_name != self.sampler_name:
1130
+ self.extra_generation_params["Hires sampler"] = self.hr_sampler_name
1131
+
1132
+ if tuple(self.hr_prompt) != tuple(self.prompt):
1133
+ self.extra_generation_params["Hires prompt"] = self.hr_prompt
1134
+
1135
+ if tuple(self.hr_negative_prompt) != tuple(self.negative_prompt):
1136
+ self.extra_generation_params["Hires negative prompt"] = self.hr_negative_prompt
1137
+
1138
+ self.latent_scale_mode = shared.latent_upscale_modes.get(self.hr_upscaler, None) if self.hr_upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest")
1139
+ if self.enable_hr and self.latent_scale_mode is None:
1140
+ if not any(x.name == self.hr_upscaler for x in shared.sd_upscalers):
1141
+ raise Exception(f"could not find upscaler named {self.hr_upscaler}")
1142
+
1143
+ self.calculate_target_resolution()
1144
+
1145
+ if not state.processing_has_refined_job_count:
1146
+ if state.job_count == -1:
1147
+ state.job_count = self.n_iter
1148
+
1149
+ shared.total_tqdm.updateTotal((self.steps + (self.hr_second_pass_steps or self.steps)) * state.job_count)
1150
+ state.job_count = state.job_count * 2
1151
+ state.processing_has_refined_job_count = True
1152
+
1153
+ if self.hr_second_pass_steps:
1154
+ self.extra_generation_params["Hires steps"] = self.hr_second_pass_steps
1155
+
1156
+ if self.hr_upscaler is not None:
1157
+ self.extra_generation_params["Hires upscaler"] = self.hr_upscaler
1158
+
1159
+ def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
1160
+ self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model)
1161
+
1162
+ x = self.rng.next()
1163
+ samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
1164
+ del x
1165
+
1166
+ if not self.enable_hr:
1167
+ return samples
1168
+ devices.torch_gc()
1169
+
1170
+ if self.latent_scale_mode is None:
1171
+ decoded_samples = torch.stack(decode_latent_batch(self.sd_model, samples, target_device=devices.cpu, check_for_nans=True)).to(dtype=torch.float32)
1172
+ else:
1173
+ decoded_samples = None
1174
+
1175
+ with sd_models.SkipWritingToConfig():
1176
+ sd_models.reload_model_weights(info=self.hr_checkpoint_info)
1177
+
1178
+ return self.sample_hr_pass(samples, decoded_samples, seeds, subseeds, subseed_strength, prompts)
1179
+
1180
+ def sample_hr_pass(self, samples, decoded_samples, seeds, subseeds, subseed_strength, prompts):
1181
+ if shared.state.interrupted:
1182
+ return samples
1183
+
1184
+ self.is_hr_pass = True
1185
+ target_width = self.hr_upscale_to_x
1186
+ target_height = self.hr_upscale_to_y
1187
+
1188
+ def save_intermediate(image, index):
1189
+ """saves image before applying hires fix, if enabled in options; takes as an argument either an image or batch with latent space images"""
1190
+
1191
+ if not self.save_samples() or not opts.save_images_before_highres_fix:
1192
+ return
1193
+
1194
+ if not isinstance(image, Image.Image):
1195
+ image = sd_samplers.sample_to_image(image, index, approximation=0)
1196
+
1197
+ info = create_infotext(self, self.all_prompts, self.all_seeds, self.all_subseeds, [], iteration=self.iteration, position_in_batch=index)
1198
+ images.save_image(image, self.outpath_samples, "", seeds[index], prompts[index], opts.samples_format, info=info, p=self, suffix="-before-highres-fix")
1199
+
1200
+ img2img_sampler_name = self.hr_sampler_name or self.sampler_name
1201
+
1202
+ self.sampler = sd_samplers.create_sampler(img2img_sampler_name, self.sd_model)
1203
+
1204
+ if self.latent_scale_mode is not None:
1205
+ for i in range(samples.shape[0]):
1206
+ save_intermediate(samples, i)
1207
+
1208
+ samples = torch.nn.functional.interpolate(samples, size=(target_height // opt_f, target_width // opt_f), mode=self.latent_scale_mode["mode"], antialias=self.latent_scale_mode["antialias"])
1209
+
1210
+ # Avoid making the inpainting conditioning unless necessary as
1211
+ # this does need some extra compute to decode / encode the image again.
1212
+ if getattr(self, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) < 1.0:
1213
+ image_conditioning = self.img2img_image_conditioning(decode_first_stage(self.sd_model, samples), samples)
1214
+ else:
1215
+ image_conditioning = self.txt2img_image_conditioning(samples)
1216
+ else:
1217
+ lowres_samples = torch.clamp((decoded_samples + 1.0) / 2.0, min=0.0, max=1.0)
1218
+
1219
+ batch_images = []
1220
+ for i, x_sample in enumerate(lowres_samples):
1221
+ x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
1222
+ x_sample = x_sample.astype(np.uint8)
1223
+ image = Image.fromarray(x_sample)
1224
+
1225
+ save_intermediate(image, i)
1226
+
1227
+ image = images.resize_image(0, image, target_width, target_height, upscaler_name=self.hr_upscaler)
1228
+ image = np.array(image).astype(np.float32) / 255.0
1229
+ image = np.moveaxis(image, 2, 0)
1230
+ batch_images.append(image)
1231
+
1232
+ decoded_samples = torch.from_numpy(np.array(batch_images))
1233
+ decoded_samples = decoded_samples.to(shared.device, dtype=devices.dtype_vae)
1234
+
1235
+ if opts.sd_vae_encode_method != 'Full':
1236
+ self.extra_generation_params['VAE Encoder'] = opts.sd_vae_encode_method
1237
+ samples = images_tensor_to_samples(decoded_samples, approximation_indexes.get(opts.sd_vae_encode_method))
1238
+
1239
+ image_conditioning = self.img2img_image_conditioning(decoded_samples, samples)
1240
+
1241
+ shared.state.nextjob()
1242
+
1243
+ samples = samples[:, :, self.truncate_y//2:samples.shape[2]-(self.truncate_y+1)//2, self.truncate_x//2:samples.shape[3]-(self.truncate_x+1)//2]
1244
+
1245
+ self.rng = rng.ImageRNG(samples.shape[1:], self.seeds, subseeds=self.subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w)
1246
+ noise = self.rng.next()
1247
+
1248
+ # GC now before running the next img2img to prevent running out of memory
1249
+ devices.torch_gc()
1250
+
1251
+ if not self.disable_extra_networks:
1252
+ with devices.autocast():
1253
+ extra_networks.activate(self, self.hr_extra_network_data)
1254
+
1255
+ with devices.autocast():
1256
+ self.calculate_hr_conds()
1257
+
1258
+ sd_models.apply_token_merging(self.sd_model, self.get_token_merging_ratio(for_hr=True))
1259
+
1260
+ if self.scripts is not None:
1261
+ self.scripts.before_hr(self)
1262
+
1263
+ samples = self.sampler.sample_img2img(self, samples, noise, self.hr_c, self.hr_uc, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning)
1264
+
1265
+ sd_models.apply_token_merging(self.sd_model, self.get_token_merging_ratio())
1266
+
1267
+ self.sampler = None
1268
+ devices.torch_gc()
1269
+
1270
+ decoded_samples = decode_latent_batch(self.sd_model, samples, target_device=devices.cpu, check_for_nans=True)
1271
+
1272
+ self.is_hr_pass = False
1273
+ return decoded_samples
1274
+
1275
+ def close(self):
1276
+ super().close()
1277
+ self.hr_c = None
1278
+ self.hr_uc = None
1279
+ if not opts.persistent_cond_cache:
1280
+ StableDiffusionProcessingTxt2Img.cached_hr_uc = [None, None]
1281
+ StableDiffusionProcessingTxt2Img.cached_hr_c = [None, None]
1282
+
1283
+ def setup_prompts(self):
1284
+ super().setup_prompts()
1285
+
1286
+ if not self.enable_hr:
1287
+ return
1288
+
1289
+ if self.hr_prompt == '':
1290
+ self.hr_prompt = self.prompt
1291
+
1292
+ if self.hr_negative_prompt == '':
1293
+ self.hr_negative_prompt = self.negative_prompt
1294
+
1295
+ if isinstance(self.hr_prompt, list):
1296
+ self.all_hr_prompts = self.hr_prompt
1297
+ else:
1298
+ self.all_hr_prompts = self.batch_size * self.n_iter * [self.hr_prompt]
1299
+
1300
+ if isinstance(self.hr_negative_prompt, list):
1301
+ self.all_hr_negative_prompts = self.hr_negative_prompt
1302
+ else:
1303
+ self.all_hr_negative_prompts = self.batch_size * self.n_iter * [self.hr_negative_prompt]
1304
+
1305
+ self.all_hr_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, self.styles) for x in self.all_hr_prompts]
1306
+ self.all_hr_negative_prompts = [shared.prompt_styles.apply_negative_styles_to_prompt(x, self.styles) for x in self.all_hr_negative_prompts]
1307
+
1308
+ def calculate_hr_conds(self):
1309
+ if self.hr_c is not None:
1310
+ return
1311
+
1312
+ hr_prompts = prompt_parser.SdConditioning(self.hr_prompts, width=self.hr_upscale_to_x, height=self.hr_upscale_to_y)
1313
+ hr_negative_prompts = prompt_parser.SdConditioning(self.hr_negative_prompts, width=self.hr_upscale_to_x, height=self.hr_upscale_to_y, is_negative_prompt=True)
1314
+
1315
+ sampler_config = sd_samplers.find_sampler_config(self.hr_sampler_name or self.sampler_name)
1316
+ steps = self.hr_second_pass_steps or self.steps
1317
+ total_steps = sampler_config.total_steps(steps) if sampler_config else steps
1318
+
1319
+ self.hr_uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, hr_negative_prompts, self.firstpass_steps, [self.cached_hr_uc, self.cached_uc], self.hr_extra_network_data, total_steps)
1320
+ self.hr_c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, hr_prompts, self.firstpass_steps, [self.cached_hr_c, self.cached_c], self.hr_extra_network_data, total_steps)
1321
+
1322
+ def setup_conds(self):
1323
+ if self.is_hr_pass:
1324
+ # if we are in hr pass right now, the call is being made from the refiner, and we don't need to setup firstpass cons or switch model
1325
+ self.hr_c = None
1326
+ self.calculate_hr_conds()
1327
+ return
1328
+
1329
+ super().setup_conds()
1330
+
1331
+ self.hr_uc = None
1332
+ self.hr_c = None
1333
+
1334
+ if self.enable_hr and self.hr_checkpoint_info is None:
1335
+ if shared.opts.hires_fix_use_firstpass_conds:
1336
+ self.calculate_hr_conds()
1337
+
1338
+ elif lowvram.is_enabled(shared.sd_model) and shared.sd_model.sd_checkpoint_info == sd_models.select_checkpoint(): # if in lowvram mode, we need to calculate conds right away, before the cond NN is unloaded
1339
+ with devices.autocast():
1340
+ extra_networks.activate(self, self.hr_extra_network_data)
1341
+
1342
+ self.calculate_hr_conds()
1343
+
1344
+ with devices.autocast():
1345
+ extra_networks.activate(self, self.extra_network_data)
1346
+
1347
+ def get_conds(self):
1348
+ if self.is_hr_pass:
1349
+ return self.hr_c, self.hr_uc
1350
+
1351
+ return super().get_conds()
1352
+
1353
+ def parse_extra_network_prompts(self):
1354
+ res = super().parse_extra_network_prompts()
1355
+
1356
+ if self.enable_hr:
1357
+ self.hr_prompts = self.all_hr_prompts[self.iteration * self.batch_size:(self.iteration + 1) * self.batch_size]
1358
+ self.hr_negative_prompts = self.all_hr_negative_prompts[self.iteration * self.batch_size:(self.iteration + 1) * self.batch_size]
1359
+
1360
+ self.hr_prompts, self.hr_extra_network_data = extra_networks.parse_prompts(self.hr_prompts)
1361
+
1362
+ return res
1363
+
1364
+
1365
+ @dataclass(repr=False)
1366
+ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
1367
+ init_images: list = None
1368
+ resize_mode: int = 0
1369
+ denoising_strength: float = 0.75
1370
+ image_cfg_scale: float = None
1371
+ mask: Any = None
1372
+ mask_blur_x: int = 4
1373
+ mask_blur_y: int = 4
1374
+ mask_blur: int = None
1375
+ inpainting_fill: int = 0
1376
+ inpaint_full_res: bool = True
1377
+ inpaint_full_res_padding: int = 0
1378
+ inpainting_mask_invert: int = 0
1379
+ initial_noise_multiplier: float = None
1380
+ latent_mask: Image = None
1381
+
1382
+ image_mask: Any = field(default=None, init=False)
1383
+
1384
+ nmask: torch.Tensor = field(default=None, init=False)
1385
+ image_conditioning: torch.Tensor = field(default=None, init=False)
1386
+ init_img_hash: str = field(default=None, init=False)
1387
+ mask_for_overlay: Image = field(default=None, init=False)
1388
+ init_latent: torch.Tensor = field(default=None, init=False)
1389
+
1390
+ def __post_init__(self):
1391
+ super().__post_init__()
1392
+
1393
+ self.image_mask = self.mask
1394
+ self.mask = None
1395
+ self.initial_noise_multiplier = opts.initial_noise_multiplier if self.initial_noise_multiplier is None else self.initial_noise_multiplier
1396
+
1397
+ @property
1398
+ def mask_blur(self):
1399
+ if self.mask_blur_x == self.mask_blur_y:
1400
+ return self.mask_blur_x
1401
+ return None
1402
+
1403
+ @mask_blur.setter
1404
+ def mask_blur(self, value):
1405
+ if isinstance(value, int):
1406
+ self.mask_blur_x = value
1407
+ self.mask_blur_y = value
1408
+
1409
+ def init(self, all_prompts, all_seeds, all_subseeds):
1410
+ self.image_cfg_scale: float = self.image_cfg_scale if shared.sd_model.cond_stage_key == "edit" else None
1411
+
1412
+ self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model)
1413
+ crop_region = None
1414
+
1415
+ image_mask = self.image_mask
1416
+
1417
+ if image_mask is not None:
1418
+ # image_mask is passed in as RGBA by Gradio to support alpha masks,
1419
+ # but we still want to support binary masks.
1420
+ image_mask = create_binary_mask(image_mask)
1421
+
1422
+ if self.inpainting_mask_invert:
1423
+ image_mask = ImageOps.invert(image_mask)
1424
+
1425
+ if self.mask_blur_x > 0:
1426
+ np_mask = np.array(image_mask)
1427
+ kernel_size = 2 * int(2.5 * self.mask_blur_x + 0.5) + 1
1428
+ np_mask = cv2.GaussianBlur(np_mask, (kernel_size, 1), self.mask_blur_x)
1429
+ image_mask = Image.fromarray(np_mask)
1430
+
1431
+ if self.mask_blur_y > 0:
1432
+ np_mask = np.array(image_mask)
1433
+ kernel_size = 2 * int(2.5 * self.mask_blur_y + 0.5) + 1
1434
+ np_mask = cv2.GaussianBlur(np_mask, (1, kernel_size), self.mask_blur_y)
1435
+ image_mask = Image.fromarray(np_mask)
1436
+
1437
+ if self.inpaint_full_res:
1438
+ self.mask_for_overlay = image_mask
1439
+ mask = image_mask.convert('L')
1440
+ crop_region = masking.get_crop_region(np.array(mask), self.inpaint_full_res_padding)
1441
+ crop_region = masking.expand_crop_region(crop_region, self.width, self.height, mask.width, mask.height)
1442
+ x1, y1, x2, y2 = crop_region
1443
+
1444
+ mask = mask.crop(crop_region)
1445
+ image_mask = images.resize_image(2, mask, self.width, self.height)
1446
+ self.paste_to = (x1, y1, x2-x1, y2-y1)
1447
+ else:
1448
+ image_mask = images.resize_image(self.resize_mode, image_mask, self.width, self.height)
1449
+ np_mask = np.array(image_mask)
1450
+ np_mask = np.clip((np_mask.astype(np.float32)) * 2, 0, 255).astype(np.uint8)
1451
+ self.mask_for_overlay = Image.fromarray(np_mask)
1452
+
1453
+ self.overlay_images = []
1454
+
1455
+ latent_mask = self.latent_mask if self.latent_mask is not None else image_mask
1456
+
1457
+ add_color_corrections = opts.img2img_color_correction and self.color_corrections is None
1458
+ if add_color_corrections:
1459
+ self.color_corrections = []
1460
+ imgs = []
1461
+ for img in self.init_images:
1462
+
1463
+ # Save init image
1464
+ if opts.save_init_img:
1465
+ self.init_img_hash = hashlib.md5(img.tobytes()).hexdigest()
1466
+ images.save_image(img, path=opts.outdir_init_images, basename=None, forced_filename=self.init_img_hash, save_to_dirs=False)
1467
+
1468
+ image = images.flatten(img, opts.img2img_background_color)
1469
+
1470
+ if crop_region is None and self.resize_mode != 3:
1471
+ image = images.resize_image(self.resize_mode, image, self.width, self.height)
1472
+
1473
+ if image_mask is not None:
1474
+ image_masked = Image.new('RGBa', (image.width, image.height))
1475
+ image_masked.paste(image.convert("RGBA").convert("RGBa"), mask=ImageOps.invert(self.mask_for_overlay.convert('L')))
1476
+
1477
+ self.overlay_images.append(image_masked.convert('RGBA'))
1478
+
1479
+ # crop_region is not None if we are doing inpaint full res
1480
+ if crop_region is not None:
1481
+ image = image.crop(crop_region)
1482
+ image = images.resize_image(2, image, self.width, self.height)
1483
+
1484
+ if image_mask is not None:
1485
+ if self.inpainting_fill != 1:
1486
+ image = masking.fill(image, latent_mask)
1487
+
1488
+ if add_color_corrections:
1489
+ self.color_corrections.append(setup_color_correction(image))
1490
+
1491
+ image = np.array(image).astype(np.float32) / 255.0
1492
+ image = np.moveaxis(image, 2, 0)
1493
+
1494
+ imgs.append(image)
1495
+
1496
+ if len(imgs) == 1:
1497
+ batch_images = np.expand_dims(imgs[0], axis=0).repeat(self.batch_size, axis=0)
1498
+ if self.overlay_images is not None:
1499
+ self.overlay_images = self.overlay_images * self.batch_size
1500
+
1501
+ if self.color_corrections is not None and len(self.color_corrections) == 1:
1502
+ self.color_corrections = self.color_corrections * self.batch_size
1503
+
1504
+ elif len(imgs) <= self.batch_size:
1505
+ self.batch_size = len(imgs)
1506
+ batch_images = np.array(imgs)
1507
+ else:
1508
+ raise RuntimeError(f"bad number of images passed: {len(imgs)}; expecting {self.batch_size} or less")
1509
+
1510
+ image = torch.from_numpy(batch_images)
1511
+ image = image.to(shared.device, dtype=devices.dtype_vae)
1512
+
1513
+ if opts.sd_vae_encode_method != 'Full':
1514
+ self.extra_generation_params['VAE Encoder'] = opts.sd_vae_encode_method
1515
+
1516
+ self.init_latent = images_tensor_to_samples(image, approximation_indexes.get(opts.sd_vae_encode_method), self.sd_model)
1517
+ devices.torch_gc()
1518
+
1519
+ if self.resize_mode == 3:
1520
+ self.init_latent = torch.nn.functional.interpolate(self.init_latent, size=(self.height // opt_f, self.width // opt_f), mode="bilinear")
1521
+
1522
+ if image_mask is not None:
1523
+ init_mask = latent_mask
1524
+ latmask = init_mask.convert('RGB').resize((self.init_latent.shape[3], self.init_latent.shape[2]))
1525
+ latmask = np.moveaxis(np.array(latmask, dtype=np.float32), 2, 0) / 255
1526
+ latmask = latmask[0]
1527
+ latmask = np.around(latmask)
1528
+ latmask = np.tile(latmask[None], (4, 1, 1))
1529
+
1530
+ self.mask = torch.asarray(1.0 - latmask).to(shared.device).type(self.sd_model.dtype)
1531
+ self.nmask = torch.asarray(latmask).to(shared.device).type(self.sd_model.dtype)
1532
+
1533
+ # this needs to be fixed to be done in sample() using actual seeds for batches
1534
+ if self.inpainting_fill == 2:
1535
+ self.init_latent = self.init_latent * self.mask + create_random_tensors(self.init_latent.shape[1:], all_seeds[0:self.init_latent.shape[0]]) * self.nmask
1536
+ elif self.inpainting_fill == 3:
1537
+ self.init_latent = self.init_latent * self.mask
1538
+
1539
+ self.image_conditioning = self.img2img_image_conditioning(image * 2 - 1, self.init_latent, image_mask)
1540
+
1541
+ def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
1542
+ x = self.rng.next()
1543
+
1544
+ if self.initial_noise_multiplier != 1.0:
1545
+ self.extra_generation_params["Noise multiplier"] = self.initial_noise_multiplier
1546
+ x *= self.initial_noise_multiplier
1547
+
1548
+ samples = self.sampler.sample_img2img(self, self.init_latent, x, conditioning, unconditional_conditioning, image_conditioning=self.image_conditioning)
1549
+
1550
+ if self.mask is not None:
1551
+ samples = samples * self.nmask + self.init_latent * self.mask
1552
+
1553
+ del x
1554
+ devices.torch_gc()
1555
+
1556
+ return samples
1557
+
1558
+ def get_token_merging_ratio(self, for_hr=False):
1559
+ return self.token_merging_ratio or ("token_merging_ratio" in self.override_settings and opts.token_merging_ratio) or opts.token_merging_ratio_img2img or opts.token_merging_ratio
modules/sd_models_config.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import torch
4
+
5
+ from modules import shared, paths, sd_disable_initialization, devices
6
+
7
+ sd_configs_path = shared.sd_configs_path
8
+ sd_repo_configs_path = os.path.join(paths.paths['Stable Diffusion'], "configs", "stable-diffusion")
9
+ sd_xl_repo_configs_path = os.path.join(paths.paths['Stable Diffusion XL'], "configs", "inference")
10
+
11
+
12
+ config_default = shared.sd_default_config
13
+ config_sd2 = os.path.join(sd_repo_configs_path, "v2-inference.yaml")
14
+ config_sd2v = os.path.join(sd_repo_configs_path, "v2-inference-v.yaml")
15
+ config_sd2_inpainting = os.path.join(sd_repo_configs_path, "v2-inpainting-inference.yaml")
16
+ config_sdxl = os.path.join(sd_xl_repo_configs_path, "sd_xl_base.yaml")
17
+ config_sdxl_refiner = os.path.join(sd_xl_repo_configs_path, "sd_xl_refiner.yaml")
18
+ config_sdxl_inpainting = os.path.join(sd_configs_path, "sd_xl_inpaint.yaml")
19
+ config_depth_model = os.path.join(sd_repo_configs_path, "v2-midas-inference.yaml")
20
+ config_unclip = os.path.join(sd_repo_configs_path, "v2-1-stable-unclip-l-inference.yaml")
21
+ config_unopenclip = os.path.join(sd_repo_configs_path, "v2-1-stable-unclip-h-inference.yaml")
22
+ config_inpainting = os.path.join(sd_configs_path, "v1-inpainting-inference.yaml")
23
+ config_instruct_pix2pix = os.path.join(sd_configs_path, "instruct-pix2pix.yaml")
24
+ config_alt_diffusion = os.path.join(sd_configs_path, "alt-diffusion-inference.yaml")
25
+ config_alt_diffusion_m18 = os.path.join(sd_configs_path, "alt-diffusion-m18-inference.yaml")
26
+
27
+ def is_using_v_parameterization_for_sd2(state_dict):
28
+ """
29
+ Detects whether unet in state_dict is using v-parameterization. Returns True if it is. You're welcome.
30
+ """
31
+
32
+ import ldm.modules.diffusionmodules.openaimodel
33
+
34
+ device = devices.cpu
35
+
36
+ with sd_disable_initialization.DisableInitialization():
37
+ unet = ldm.modules.diffusionmodules.openaimodel.UNetModel(
38
+ use_checkpoint=True,
39
+ use_fp16=False,
40
+ image_size=32,
41
+ in_channels=4,
42
+ out_channels=4,
43
+ model_channels=320,
44
+ attention_resolutions=[4, 2, 1],
45
+ num_res_blocks=2,
46
+ channel_mult=[1, 2, 4, 4],
47
+ num_head_channels=64,
48
+ use_spatial_transformer=True,
49
+ use_linear_in_transformer=True,
50
+ transformer_depth=1,
51
+ context_dim=1024,
52
+ legacy=False
53
+ )
54
+ unet.eval()
55
+
56
+ with torch.no_grad():
57
+ unet_sd = {k.replace("model.diffusion_model.", ""): v for k, v in state_dict.items() if "model.diffusion_model." in k}
58
+ unet.load_state_dict(unet_sd, strict=True)
59
+ unet.to(device=device, dtype=torch.float)
60
+
61
+ test_cond = torch.ones((1, 2, 1024), device=device) * 0.5
62
+ x_test = torch.ones((1, 4, 8, 8), device=device) * 0.5
63
+
64
+ out = (unet(x_test, torch.asarray([999], device=device), context=test_cond) - x_test).mean().item()
65
+
66
+ return out < -1
67
+
68
+
69
+ def guess_model_config_from_state_dict(sd, filename):
70
+ sd2_cond_proj_weight = sd.get('cond_stage_model.model.transformer.resblocks.0.attn.in_proj_weight', None)
71
+ diffusion_model_input = sd.get('model.diffusion_model.input_blocks.0.0.weight', None)
72
+ sd2_variations_weight = sd.get('embedder.model.ln_final.weight', None)
73
+
74
+ if sd.get('conditioner.embedders.1.model.ln_final.weight', None) is not None:
75
+ if diffusion_model_input.shape[1] == 9:
76
+ return config_sdxl_inpainting
77
+ else:
78
+ return config_sdxl
79
+ if sd.get('conditioner.embedders.0.model.ln_final.weight', None) is not None:
80
+ return config_sdxl_refiner
81
+ elif sd.get('depth_model.model.pretrained.act_postprocess3.0.project.0.bias', None) is not None:
82
+ return config_depth_model
83
+ elif sd2_variations_weight is not None and sd2_variations_weight.shape[0] == 768:
84
+ return config_unclip
85
+ elif sd2_variations_weight is not None and sd2_variations_weight.shape[0] == 1024:
86
+ return config_unopenclip
87
+
88
+ if sd2_cond_proj_weight is not None and sd2_cond_proj_weight.shape[1] == 1024:
89
+ if diffusion_model_input.shape[1] == 9:
90
+ return config_sd2_inpainting
91
+ elif is_using_v_parameterization_for_sd2(sd):
92
+ return config_sd2v
93
+ else:
94
+ return config_sd2
95
+
96
+ if diffusion_model_input is not None:
97
+ if diffusion_model_input.shape[1] == 9:
98
+ return config_inpainting
99
+ if diffusion_model_input.shape[1] == 8:
100
+ return config_instruct_pix2pix
101
+
102
+
103
+ if sd.get('cond_stage_model.roberta.embeddings.word_embeddings.weight', None) is not None:
104
+ if sd.get('cond_stage_model.transformation.weight').size()[0] == 1024:
105
+ return config_alt_diffusion_m18
106
+ return config_alt_diffusion
107
+
108
+ return config_default
109
+
110
+
111
+ def find_checkpoint_config(state_dict, info):
112
+ if info is None:
113
+ return guess_model_config_from_state_dict(state_dict, "")
114
+
115
+ config = find_checkpoint_config_near_filename(info)
116
+ if config is not None:
117
+ return config
118
+
119
+ return guess_model_config_from_state_dict(state_dict, info.filename)
120
+
121
+
122
+ def find_checkpoint_config_near_filename(info):
123
+ if info is None:
124
+ return None
125
+
126
+ config = f"{os.path.splitext(info.filename)[0]}.yaml"
127
+ if os.path.exists(config):
128
+ return config
129
+
130
+ return None
131
+
modules/sd_models_xl.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import torch
4
+
5
+ import sgm.models.diffusion
6
+ import sgm.modules.diffusionmodules.denoiser_scaling
7
+ import sgm.modules.diffusionmodules.discretizer
8
+ from modules import devices, shared, prompt_parser
9
+
10
+
11
+ def get_learned_conditioning(self: sgm.models.diffusion.DiffusionEngine, batch: prompt_parser.SdConditioning | list[str]):
12
+ for embedder in self.conditioner.embedders:
13
+ embedder.ucg_rate = 0.0
14
+
15
+ width = getattr(batch, 'width', 1024)
16
+ height = getattr(batch, 'height', 1024)
17
+ is_negative_prompt = getattr(batch, 'is_negative_prompt', False)
18
+ aesthetic_score = shared.opts.sdxl_refiner_low_aesthetic_score if is_negative_prompt else shared.opts.sdxl_refiner_high_aesthetic_score
19
+
20
+ devices_args = dict(device=devices.device, dtype=devices.dtype)
21
+
22
+ sdxl_conds = {
23
+ "txt": batch,
24
+ "original_size_as_tuple": torch.tensor([height, width], **devices_args).repeat(len(batch), 1),
25
+ "crop_coords_top_left": torch.tensor([shared.opts.sdxl_crop_top, shared.opts.sdxl_crop_left], **devices_args).repeat(len(batch), 1),
26
+ "target_size_as_tuple": torch.tensor([height, width], **devices_args).repeat(len(batch), 1),
27
+ "aesthetic_score": torch.tensor([aesthetic_score], **devices_args).repeat(len(batch), 1),
28
+ }
29
+
30
+ force_zero_negative_prompt = is_negative_prompt and all(x == '' for x in batch)
31
+ c = self.conditioner(sdxl_conds, force_zero_embeddings=['txt'] if force_zero_negative_prompt else [])
32
+
33
+ return c
34
+
35
+
36
+ def apply_model(self: sgm.models.diffusion.DiffusionEngine, x, t, cond):
37
+ sd = self.model.state_dict()
38
+ diffusion_model_input = sd.get('diffusion_model.input_blocks.0.0.weight', None)
39
+ if diffusion_model_input is not None:
40
+ if diffusion_model_input.shape[1] == 9:
41
+ x = torch.cat([x] + cond['c_concat'], dim=1)
42
+
43
+ return self.model(x, t, cond)
44
+
45
+
46
+ def get_first_stage_encoding(self, x): # SDXL's encode_first_stage does everything so get_first_stage_encoding is just there for compatibility
47
+ return x
48
+
49
+
50
+ sgm.models.diffusion.DiffusionEngine.get_learned_conditioning = get_learned_conditioning
51
+ sgm.models.diffusion.DiffusionEngine.apply_model = apply_model
52
+ sgm.models.diffusion.DiffusionEngine.get_first_stage_encoding = get_first_stage_encoding
53
+
54
+
55
+ def encode_embedding_init_text(self: sgm.modules.GeneralConditioner, init_text, nvpt):
56
+ res = []
57
+
58
+ for embedder in [embedder for embedder in self.embedders if hasattr(embedder, 'encode_embedding_init_text')]:
59
+ encoded = embedder.encode_embedding_init_text(init_text, nvpt)
60
+ res.append(encoded)
61
+
62
+ return torch.cat(res, dim=1)
63
+
64
+
65
+ def tokenize(self: sgm.modules.GeneralConditioner, texts):
66
+ for embedder in [embedder for embedder in self.embedders if hasattr(embedder, 'tokenize')]:
67
+ return embedder.tokenize(texts)
68
+
69
+ raise AssertionError('no tokenizer available')
70
+
71
+
72
+
73
+ def process_texts(self, texts):
74
+ for embedder in [embedder for embedder in self.embedders if hasattr(embedder, 'process_texts')]:
75
+ return embedder.process_texts(texts)
76
+
77
+
78
+ def get_target_prompt_token_count(self, token_count):
79
+ for embedder in [embedder for embedder in self.embedders if hasattr(embedder, 'get_target_prompt_token_count')]:
80
+ return embedder.get_target_prompt_token_count(token_count)
81
+
82
+
83
+ # those additions to GeneralConditioner make it possible to use it as model.cond_stage_model from SD1.5 in exist
84
+ sgm.modules.GeneralConditioner.encode_embedding_init_text = encode_embedding_init_text
85
+ sgm.modules.GeneralConditioner.tokenize = tokenize
86
+ sgm.modules.GeneralConditioner.process_texts = process_texts
87
+ sgm.modules.GeneralConditioner.get_target_prompt_token_count = get_target_prompt_token_count
88
+
89
+
90
+ def extend_sdxl(model):
91
+ """this adds a bunch of parameters to make SDXL model look a bit more like SD1.5 to the rest of the codebase."""
92
+
93
+ dtype = next(model.model.diffusion_model.parameters()).dtype
94
+ model.model.diffusion_model.dtype = dtype
95
+ model.model.conditioning_key = 'crossattn'
96
+ model.cond_stage_key = 'txt'
97
+ # model.cond_stage_model will be set in sd_hijack
98
+
99
+ model.parameterization = "v" if isinstance(model.denoiser.scaling, sgm.modules.diffusionmodules.denoiser_scaling.VScaling) else "eps"
100
+
101
+ discretization = sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization()
102
+ model.alphas_cumprod = torch.asarray(discretization.alphas_cumprod, device=devices.device, dtype=dtype)
103
+
104
+ model.conditioner.wrapped = torch.nn.Module()
105
+
106
+
107
+ sgm.modules.attention.print = shared.ldm_print
108
+ sgm.modules.diffusionmodules.model.print = shared.ldm_print
109
+ sgm.modules.diffusionmodules.openaimodel.print = shared.ldm_print
110
+ sgm.modules.encoders.modules.print = shared.ldm_print
111
+
112
+ # this gets the code to load the vanilla attention that we override
113
+ sgm.modules.attention.SDP_IS_AVAILABLE = True
114
+ sgm.modules.attention.XFORMERS_IS_AVAILABLE = False