diff --git a/examples/sketch_inpaint.py b/examples/sketch_inpaint.py index 0a83dfe..65d7c3b 100644 --- a/examples/sketch_inpaint.py +++ b/examples/sketch_inpaint.py @@ -16,13 +16,15 @@ def main(): scheduler = "EulerAncestralDiscreteScheduler" # gives good results pipe = MaskedIm2ImPipe(model_dir+model_id) - pipe.setup(original_image=img, image_painted=img_paint, strength=0.75, + pipe.setup(original_image=img, image_painted=img_paint, strength=0.85, scheduler=scheduler, guidance_scale=7, clip_skip=3, blur=blur) prompt = "a man wearing a mask" - gs = GenSession("./masked_im2im", pipe, Cfgen(prompt, "", seeds=range(10))) - gs.gen_sess(add_count=10) + init = 84958344 + count = 10 + gs = GenSession("./masked_im2im", pipe, Cfgen(prompt, "", seeds=range(init, init + count))) + gs.gen_sess(add_count=count) if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/examples/sketch_inpaint_xl.py b/examples/sketch_inpaint_xl.py new file mode 100644 index 0000000..6513596 --- /dev/null +++ b/examples/sketch_inpaint_xl.py @@ -0,0 +1,27 @@ +from multigen import GenSession, Cfgen +from multigen.pipes import MaskedIm2ImPipe +import PIL.Image +import numpy + + +def main(): + model_id = 'frankjoshua/juggernautXL_v8Rundiffusion' + model_id = '/home/imgen/models/SDXL/juggernautXL_v8Rundiffusion.safetensors' + img = PIL.Image.open("./mech_beard_sigm.png") + # read image with mask painted over + img_paint = numpy.array(PIL.Image.open("./mech_beard_sigm_mask.png")) + + scheduler = "EulerAncestralDiscreteScheduler" # gives good results + + pipe = MaskedIm2ImPipe(model_id) + blur = 48 + pipe.setup(original_image=img, image_painted=img_paint, strength=0.96, + scheduler=scheduler, guidance_scale=7, clip_skip=0, blur=blur, blur_compose=3, steps=50, sample_mode='random') + + prompt = "a man wearing a mask" + gs = GenSession("./masked_im2im_xl", pipe, Cfgen(prompt, "", seeds=range(0,10))) + gs.gen_sess(add_count=10) + + +if __name__ == "__main__": + main() diff --git a/multigen/pipelines/masked_stable_diffusion_img2img.py b/multigen/pipelines/masked_stable_diffusion_img2img.py index b8182b0..d6376ae 100644 --- a/multigen/pipelines/masked_stable_diffusion_img2img.py +++ b/multigen/pipelines/masked_stable_diffusion_img2img.py @@ -5,26 +5,31 @@ import torch from diffusers import StableDiffusionImg2ImgPipeline from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput +from diffusers.image_processor import PipelineImageInput, VaeImageProcessor +from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img import retrieve_latents +from diffusers.utils.torch_utils import randn_tensor +from diffusers.models.attention_processor import ( + AttnProcessor2_0, + LoRAAttnProcessor2_0, + LoRAXFormersAttnProcessor, + XFormersAttnProcessor, +) class MaskedStableDiffusionImg2ImgPipeline(StableDiffusionImg2ImgPipeline): - debug_save = False + debug_save = 0 + @torch.no_grad() def __call__( self, prompt: Union[str, List[str]] = None, - image: Union[ - torch.FloatTensor, - PIL.Image.Image, - np.ndarray, - List[torch.FloatTensor], - List[PIL.Image.Image], - List[np.ndarray], - ] = None, + image: PipelineImageInput = None, + original_image: PipelineImageInput = None, strength: float = 0.8, num_inference_steps: Optional[int] = 50, + timesteps: List[int] = None, guidance_scale: Optional[float] = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, @@ -32,11 +37,14 @@ def __call__( generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None, output_type: Optional[str] = "pil", return_dict: bool = True, - callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, - callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: int = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], mask: Union[ torch.FloatTensor, PIL.Image.Image, @@ -45,6 +53,8 @@ def __call__( List[PIL.Image.Image], List[np.ndarray], ] = None, + sample_mode='sample', + **kwargs ): r""" The call function to the pipeline for generation. @@ -53,8 +63,13 @@ def __call__( prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): - `Image` or tensor representing an image batch to be used as the starting point. Can also accept image + `Image`, numpy array or tensor representing an image batch to be used as the starting point. For both + numpy array and pytorch tensor, the expected value range is between `[0, 1]` If it's a tensor or a list + or tensors, the expected shape should be `(B, C, H, W)` or `(C, H, W)`. If it is a numpy array or a + list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)` It can also accept image latents as `image`, but if passing latents directly it is not encoded again. + original_image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + Original image without mask. If provided it will be used to compute latents for non-inpaint area. strength (`float`, *optional*, defaults to 0.8): Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a starting point and more noise is added the higher the `strength`. The number of denoising steps depends @@ -64,6 +79,10 @@ def __call__( num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. This parameter is modulated by `strength`. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. @@ -84,22 +103,36 @@ def __call__( negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.FloatTensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. - callback (`Callable`, *optional*): - A function that calls every `callback_steps` steps during inference. The function is called with the - following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. - callback_steps (`int`, *optional*, defaults to 1): - The frequency at which the `callback` function is called. If not specified, the callback is called at - every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. mask (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`, *optional*): A mask with non-zero elements for the area to be inpainted. If not specified, no mask is applied. + sample_mode (`str`, *optional*) + Controls how latent code for masked area is initialised. It can be one of sample, argmax, random Examples: Returns: @@ -109,8 +142,22 @@ def __call__( second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ - # code adapted from parent class StableDiffusionImg2ImgPipeline + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + # 0. Check inputs. Raise error if not correct self.check_inputs(prompt, strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) @@ -126,6 +173,8 @@ def __call__( # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 + if original_image is None: + original_image = image # 2. Encode input prompt text_encoder_lora_scale = ( @@ -144,7 +193,7 @@ def __call__( # 3. Preprocess image image = self.image_processor.preprocess(image) - + original_image = self.image_processor.preprocess(original_image) # 4. set timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) @@ -153,14 +202,20 @@ def __call__( # 5. Prepare latent variables # it is sampled from the latent distribution of the VAE latents = self.prepare_latents( - image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator + image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator, sample_mode=sample_mode ) # mean of the latent distribution - init_latents = [ - self.vae.encode(image.to(device=device, dtype=prompt_embeds.dtype)[i : i + 1]).latent_dist.mean for i in range(batch_size) - ] - init_latents = torch.cat(init_latents, dim=0) + non_inpaint_latents = self.prepare_latents( + original_image, + latent_timestep, + batch_size, + num_images_per_prompt, + prompt_embeds.dtype, + device, + generator, + add_noise=False, + sample_mode="argmax") # 6. create latent mask latent_mask = self._make_latent_mask(latents, mask) @@ -172,6 +227,22 @@ def __call__( num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): + shape = non_inpaint_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=latents.dtype) + # noisy latent code of input image at current step + orig_latents_t = self.scheduler.add_noise(non_inpaint_latents, noise, t.unsqueeze(0)) + + # orig_latents_t (1 - latent_mask) + latents * latent_mask + latents = torch.lerp(orig_latents_t , latents, latent_mask) + + if self.debug_save: + img1 = self.latents_to_img(latents) + t_str = str(t.int().item()) + for i in range(3 - len(t_str)): + t_str = '0' + t_str + img1[0].save(f'step{t_str}.png') + + # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) @@ -190,13 +261,15 @@ def __call__( noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) - if latent_mask is not None: - latents = torch.lerp(init_latents * self.vae.config.scaling_factor, latents, latent_mask) - noise_pred = torch.lerp(torch.zeros_like(noise_pred), noise_pred, latent_mask) - # compute the previous noisy sample x_t -> x_t-1 + latents_dtype = latents.dtype latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + if latents.dtype != latents_dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + latents = latents.to(latents_dtype) + # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() @@ -204,33 +277,43 @@ def __call__( callback(i, t, latents) if not output_type == "latent": - scaled = latents / self.vae.config.scaling_factor - if latent_mask is not None: - # scaled = latents / self.vae.config.scaling_factor * latent_mask + init_latents * (1 - latent_mask) - scaled = torch.lerp(init_latents, scaled, latent_mask) - image = self.vae.decode(scaled, return_dict=False)[0] + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + elif latents.dtype != self.vae.dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + self.vae = self.vae.to(latents.dtype) + if self.debug_save: - image_gen = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] - image_gen = self.image_processor.postprocess(image_gen, output_type=output_type, do_denormalize=[True]) + image_gen = self.latents_to_img(latents) image_gen[0].save("from_latent.png") + + if latent_mask is not None: + # interpolate with latent mask + latents = torch.lerp(non_inpaint_latents, latents, latent_mask) + + + latents = self.denormalize(latents) + image = self.vae.decode(latents, return_dict=False)[0] + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: image = latents has_nsfw_concept = None - if has_nsfw_concept is None: - do_denormalize = [True] * image.shape[0] - else: - do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + image = self.image_processor.postprocess(image, output_type=output_type) - image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) - - # Offload last model to CPU - if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: - self.final_offload_hook.offload() + # Offload all models + self.maybe_free_model_hooks() if not return_dict: - return (image, has_nsfw_concept) + return (image,) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) @@ -258,3 +341,106 @@ def _make_latent_mask(self, latents, mask): latent_mask = torch.as_tensor(np.stack(latent_mask)).to(latents) latent_mask = latent_mask / latent_mask.max() return latent_mask + + def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None, add_noise=True, sample_mode: str = "sample"): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + if image.shape[1] == 4: + init_latents = image + + else: + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + elif isinstance(generator, list): + init_latents = [ + retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i], sample_mode=sample_mode) + for i in range(batch_size) + ] + init_latents = torch.cat(init_latents, dim=0) + else: + init_latents = retrieve_latents(self.vae.encode(image), generator=generator, sample_mode=sample_mode) + + init_latents = self.vae.config.scaling_factor * init_latents + + if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: + # expand init_latents for batch_size + deprecation_message = ( + f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial" + " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" + " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" + " your script to pass as many initial images as text prompts to suppress this warning." + ) + deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) + additional_image_per_prompt = batch_size // init_latents.shape[0] + init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) + elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." + ) + else: + init_latents = torch.cat([init_latents], dim=0) + + if add_noise: + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + # get latents + init_latents = self.scheduler.add_noise(init_latents, noise, timestep) + latents = init_latents + + return latents + + def denormalize(self, latents): + # unscale/denormalize the latents + # denormalize with the mean and std if available and not None + has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None + has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None + if has_latents_mean and has_latents_std: + latents_mean = ( + torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype) + ) + latents_std = ( + torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype) + ) + latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean + else: + latents = latents / self.vae.config.scaling_factor + + return latents + + def latents_to_img(self, latents): + l1 = self.denormalize(latents) + img1 = self.vae.decode(l1, return_dict=False)[0] + img1 = self.image_processor.postprocess(img1, output_type='pil', do_denormalize=[True]) + return img1 + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + LoRAXFormersAttnProcessor, + LoRAAttnProcessor2_0, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) diff --git a/multigen/pipelines/masked_stable_diffusion_xl_img2img.py b/multigen/pipelines/masked_stable_diffusion_xl_img2img.py new file mode 100644 index 0000000..76beac5 --- /dev/null +++ b/multigen/pipelines/masked_stable_diffusion_xl_img2img.py @@ -0,0 +1,649 @@ +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import PIL.Image +import torch + +import numpy as np + +from diffusers.utils import ( + deprecate, + is_torch_xla_available, + logging, +) +from diffusers.image_processor import PipelineImageInput +from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img import StableDiffusionXLImg2ImgPipeline, retrieve_latents, retrieve_timesteps, rescale_noise_cfg +from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput + +from diffusers.utils.torch_utils import randn_tensor + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +class MaskedStableDiffusionXLImg2ImgPipeline(StableDiffusionXLImg2ImgPipeline): + debug_save = 0 + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + image: PipelineImageInput = None, + original_image: PipelineImageInput = None, + strength: float = 0.3, + num_inference_steps: Optional[int] = 50, + timesteps: List[int] = None, + denoising_start: Optional[float] = None, + denoising_end: Optional[float] = None, + guidance_scale: Optional[float] = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: Optional[float] = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guidance_rescale: float = 0.0, + original_size: Tuple[int, int] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Tuple[int, int] = None, + negative_original_size: Optional[Tuple[int, int]] = None, + negative_crops_coords_top_left: Tuple[int, int] = (0, 0), + negative_target_size: Optional[Tuple[int, int]] = None, + aesthetic_score: float = 6.0, + negative_aesthetic_score: float = 2.5, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + mask: Union[ + torch.FloatTensor, + PIL.Image.Image, + np.ndarray, + List[torch.FloatTensor], + List[PIL.Image.Image], + List[np.ndarray], + ] = None, + sample_mode='sample', + **kwargs + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image` or tensor representing an image batch to be used as the starting point. Can also accept image + latents as `image`, but if passing latents directly it is not encoded again. + strength (`float`, *optional*, defaults to 0.8): + Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a + starting point and more noise is added the higher the `strength`. The number of denoising steps depends + on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising + process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 + essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. This parameter is modulated by `strength`. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + ,`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + mask (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`, *optional*): + A mask with non-zero elements for the area to be inpainted. If not specified, no mask is applied. + sample_mode (`str`, *optional*): + control latents initialisation for the inpaint area, can be one of sample, argmax, random + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + # code adapted from parent class StableDiffusionXLImg2ImgPipeline + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + + + # 0. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + strength, + num_inference_steps, + callback_steps, + negative_prompt, + negative_prompt_2, + prompt_embeds, + negative_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._guidance_rescale = guidance_rescale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._denoising_end = denoising_end + self._denoising_start = denoising_start + self._interrupt = False + if original_image is None: + original_image = image + + # 1. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 2. Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + + # 3. Preprocess image + image = self.image_processor.preprocess(image) + original_image = self.image_processor.preprocess(original_image) + + # 4. set timesteps + def denoising_value_valid(dnv): + return isinstance(dnv, float) and 0 < dnv < 1 + + timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) + timesteps, num_inference_steps = self.get_timesteps( + num_inference_steps, + strength, + device, + denoising_start=self.denoising_start if denoising_value_valid(self.denoising_start) else None, + ) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + + add_noise = True if self.denoising_start is None else False + + # 5. Prepare latent variables + # It is sampled from the latent distribution of the VAE + # that's what we repaint + latents = self.prepare_latents( + image, + latent_timestep, + batch_size, + num_images_per_prompt, + prompt_embeds.dtype, + device, + generator, + add_noise, + sample_mode=sample_mode + ) + + # mean of the latent distribution + # it is multiplied by self.vae.config.scaling_factor + non_paint_latents = self.prepare_latents( + original_image, + latent_timestep, + batch_size, + num_images_per_prompt, + prompt_embeds.dtype, + device, + generator, + add_noise=False, + sample_mode="argmax") + + if self.debug_save: + init_img_from_latents = self.latents_to_img(non_paint_latents) + init_img_from_latents[0].save('non_paint_latents.png') + # 6. create latent mask + latent_mask = self._make_latent_mask(latents, mask) + + # 7. Prepare extra step kwargs. + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + height, width = latents.shape[-2:] + height = height * self.vae_scale_factor + width = width * self.vae_scale_factor + + original_size = original_size or (height, width) + target_size = target_size or (height, width) + + # 8. Prepare added time ids & embeddings + if negative_original_size is None: + negative_original_size = original_size + if negative_target_size is None: + negative_target_size = target_size + + add_text_embeds = pooled_prompt_embeds + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + add_time_ids, add_neg_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + aesthetic_score, + negative_aesthetic_score, + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1) + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) + add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1) + add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + + # 10. Denoising loop + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + + # 10.1 Apply denoising_end + if ( + self.denoising_end is not None + and self.denoising_start is not None + and denoising_value_valid(self.denoising_end) + and denoising_value_valid(self.denoising_start) + and self.denoising_start >= self.denoising_end + ): + raise ValueError( + f"`denoising_start`: {self.denoising_start} cannot be larger than or equal to `denoising_end`: " + + f" {self.denoising_end} when using type float." + ) + elif self.denoising_end is not None and denoising_value_valid(self.denoising_end): + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (self.denoising_end * self.scheduler.config.num_train_timesteps) + ) + ) + num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) + timesteps = timesteps[:num_inference_steps] + + # 10.2 Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + shape = non_paint_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=latents.dtype) + # noisy latent code of input image at current step + orig_latents_t = non_paint_latents + orig_latents_t = self.scheduler.add_noise(non_paint_latents, noise, t.unsqueeze(0)) + + # orig_latents_t (1 - latent_mask) + latents * latent_mask + latents = torch.lerp(orig_latents_t , latents, latent_mask) + + if self.debug_save: + img1 = self.latents_to_img(latents) + t_str = str(t.int().item()) + for i in range(3 - len(t_str)): + t_str = '0' + t_str + img1[0].save(f'step{t_str}.png') + + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + added_cond_kwargs["image_embeds"] = image_embeds + + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=self.cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: + # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf + noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale) + + + # compute the previous noisy sample x_t -> x_t-1 + latents_dtype = latents.dtype + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if latents.dtype != latents_dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + latents = latents.to(latents_dtype) + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) + negative_pooled_prompt_embeds = callback_outputs.pop( + "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds + ) + add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) + add_neg_time_ids = callback_outputs.pop("add_neg_time_ids", add_neg_time_ids) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if XLA_AVAILABLE: + xm.mark_step() + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + elif latents.dtype != self.vae.dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + self.vae = self.vae.to(latents.dtype) + + if self.debug_save: + image_gen = self.latents_to_img(latents) + image_gen[0].save("from_latent.png") + + if latent_mask is not None: + # interpolate with latent mask + latents = torch.lerp(non_paint_latents, latents, latent_mask) + + latents = self.denormalize(latents) + image = self.vae.decode(latents, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + else: + image = latents + + # apply watermark if available + if self.watermark is not None: + image = self.watermark.apply_watermark(image) + + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return StableDiffusionXLPipelineOutput(images=image) + + def _make_latent_mask(self, latents, mask): + if mask is not None: + latent_mask = [] + if not isinstance(mask, list): + tmp_mask = [mask] + else: + tmp_mask = mask + _, l_channels, l_height, l_width = latents.shape + for m in tmp_mask: + if not isinstance(m, PIL.Image.Image): + if len(m.shape) == 2: + m = m[..., np.newaxis] + if m.max() > 1: + m = m / 255.0 + m = self.image_processor.numpy_to_pil(m)[0] + if m.mode != "L": + m = m.convert("L") + resized = self.image_processor.resize(m, l_height, l_width) + if self.debug_save: + resized.save("latent_mask.png") + latent_mask.append(np.repeat(np.array(resized)[np.newaxis, :, :], l_channels, axis=0)) + latent_mask = torch.as_tensor(np.stack(latent_mask)).to(latents) + latent_mask = latent_mask / max(latent_mask.max(), 1) + return latent_mask + + def prepare_latents( + self, image, timestep, batch_size, num_images_per_prompt, dtype, device, + generator=None, + add_noise=True, + sample_mode: str = "sample" + ): + + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + # Offload text encoder if `enable_model_cpu_offload` was enabled + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.text_encoder_2.to("cpu") + torch.cuda.empty_cache() + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + if image.shape[1] == 4: + init_latents = image + elif sample_mode == 'random': + height, width = image.shape[-2:] + num_channels_latents = self.unet.config.in_channels + latents = self.random_latents( + batch_size, + num_channels_latents, + height, + width, + dtype, + device, + generator, + ) + return self.vae.config.scaling_factor * latents + else: + # make sure the VAE is in float32 mode, as it overflows in float16 + if self.vae.config.force_upcast: + image = image.float() + self.vae.to(dtype=torch.float32) + + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + elif isinstance(generator, list): + init_latents = [ + retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i], sample_mode=sample_mode) + for i in range(batch_size) + ] + init_latents = torch.cat(init_latents, dim=0) + else: + init_latents = retrieve_latents(self.vae.encode(image), generator=generator, sample_mode=sample_mode) + + if self.vae.config.force_upcast: + self.vae.to(dtype) + + init_latents = init_latents.to(dtype) + init_latents = self.vae.config.scaling_factor * init_latents + + if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: + # expand init_latents for batch_size + additional_image_per_prompt = batch_size // init_latents.shape[0] + init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) + elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." + ) + else: + init_latents = torch.cat([init_latents], dim=0) + + if add_noise: + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + # get latents + init_latents = self.scheduler.add_noise(init_latents, noise, timestep) + + latents = init_latents + + return latents + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def random_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def denormalize(self, latents): + # unscale/denormalize the latents + # denormalize with the mean and std if available and not None + has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None + has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None + if has_latents_mean and has_latents_std: + latents_mean = ( + torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype) + ) + latents_std = ( + torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype) + ) + latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean + else: + latents = latents / self.vae.config.scaling_factor + + return latents + + def latents_to_img(self, latents): + l1 = self.denormalize(latents) + img1 = self.vae.decode(l1, return_dict=False)[0] + img1 = self.image_processor.postprocess(img1, output_type='pil', do_denormalize=[True]) + return img1 diff --git a/multigen/pipes.py b/multigen/pipes.py index 9edfefa..9db8e21 100755 --- a/multigen/pipes.py +++ b/multigen/pipes.py @@ -17,6 +17,7 @@ from diffusers.schedulers import KarrasDiffusionSchedulers from .pipelines.masked_stable_diffusion_img2img import MaskedStableDiffusionImg2ImgPipeline +from .pipelines.masked_stable_diffusion_xl_img2img import MaskedStableDiffusionXLImg2ImgPipeline from transformers import CLIPProcessor, CLIPTextModel #from xformers.ops import MemoryEfficientAttentionFlashAttentionOp # from diffusers import StableDiffusionKDiffusionPipeline @@ -260,6 +261,7 @@ def gen(self, inputs): class MaskedIm2ImPipe(Im2ImPipe): _class = MaskedStableDiffusionImg2ImgPipeline + _classxl = MaskedStableDiffusionXLImg2ImgPipeline def __init__(self, *args, pipe: Optional[StableDiffusionImg2ImgPipeline] = None, **kwargs): super().__init__(*args, pipe=pipe, **kwargs) @@ -268,7 +270,7 @@ def __init__(self, *args, pipe: Optional[StableDiffusionImg2ImgPipeline] = None, self._original_image = None self._mask_blur = None - def setup(self, original_image=None, image_painted=None, mask=None, blur=4, **kwargs): + def setup(self, original_image=None, image_painted=None, mask=None, blur=4, blur_compose=4, sample_mode='sample', **kwargs): self._original_image = Image.open(original_image) if isinstance(original_image, str) else original_image self._image_painted = Image.open(image_painted) if isinstance(image_painted, str) else image_painted # there are two options: @@ -282,23 +284,35 @@ def setup(self, original_image=None, image_painted=None, mask=None, blur=4, **kw self._mask = mask input_image = self._image_painted if self._image_painted is not None else self._original_image input_image = np.array(input_image) + super().setup(fimage=None, image=input_image / input_image.max(), **kwargs) pil_mask = mask if not isinstance(self._mask, Image.Image): pil_mask = Image.fromarray(mask) if pil_mask.mode != "L": pil_mask = pil_mask.convert("L") + self._mask_blur = self.blur_mask(pil_mask, blur) + self._mask_compose = self.blur_mask(pil_mask, blur_compose) + self._sample_mode = sample_mode + + def blur_mask(self, pil_mask, blur): mask_blur = pil_mask.filter(ImageFilter.GaussianBlur(radius=blur)) mask_blur = np.array(mask_blur) - self._mask_blur = np.tile(mask_blur / mask_blur.max(), (3, 1, 1)).transpose(1,2,0) + return np.tile(mask_blur / mask_blur.max(), (3, 1, 1)).transpose(1,2,0) def gen(self, inputs): inputs = inputs.copy() inputs.update(mask=self._mask) + if 'sample_mode' not in inputs: + inputs['sample_mode'] = self._sample_mode + + original_image = self._original_image + original_image = np.array(original_image) + inputs['original_image'] = original_image / original_image.max() img_gen = super().gen(inputs) # compose with original using mask - img_compose = self._mask_blur * img_gen + (1 - self._mask_blur) * self._original_image + img_compose = self._mask_compose * img_gen + (1 - self._mask_compose) * self._original_image # convert to PIL image img_compose = Image.fromarray(img_compose.astype(np.uint8)) return img_compose