diff --git a/examples/community/checkpoint_merger.py b/examples/community/checkpoint_merger.py index 24f187b41c07..3e29ae50078b 100644 --- a/examples/community/checkpoint_merger.py +++ b/examples/community/checkpoint_merger.py @@ -199,24 +199,20 @@ def merge(self, pretrained_model_name_or_path_list: List[Union[str, os.PathLike] if not attr.startswith("_"): checkpoint_path_1 = os.path.join(cached_folders[1], attr) if os.path.exists(checkpoint_path_1): - files = list( - ( - *glob.glob(os.path.join(checkpoint_path_1, "*.safetensors")), - *glob.glob(os.path.join(checkpoint_path_1, "*.bin")), - ) - ) + files = [ + *glob.glob(os.path.join(checkpoint_path_1, "*.safetensors")), + *glob.glob(os.path.join(checkpoint_path_1, "*.bin")), + ] checkpoint_path_1 = files[0] if len(files) > 0 else None if len(cached_folders) < 3: checkpoint_path_2 = None else: checkpoint_path_2 = os.path.join(cached_folders[2], attr) if os.path.exists(checkpoint_path_2): - files = list( - ( - *glob.glob(os.path.join(checkpoint_path_2, "*.safetensors")), - *glob.glob(os.path.join(checkpoint_path_2, "*.bin")), - ) - ) + files = [ + *glob.glob(os.path.join(checkpoint_path_2, "*.safetensors")), + *glob.glob(os.path.join(checkpoint_path_2, "*.bin")), + ] checkpoint_path_2 = files[0] if len(files) > 0 else None # For an attr if both checkpoint_path_1 and 2 are None, ignore. # If atleast one is present, deal with it according to interp method, of course only if the state_dict keys match. diff --git a/examples/community/imagic_stable_diffusion.py b/examples/community/imagic_stable_diffusion.py index 03917b187af7..dc8ce5f259dc 100644 --- a/examples/community/imagic_stable_diffusion.py +++ b/examples/community/imagic_stable_diffusion.py @@ -48,7 +48,7 @@ def preprocess(image): w, h = image.size - w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32 + w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]) image = np.array(image).astype(np.float32) / 255.0 image = image[None].transpose(0, 3, 1, 2) diff --git a/examples/community/lpw_stable_diffusion.py b/examples/community/lpw_stable_diffusion.py index 80b7b90c8bbd..072f7cde17ad 100644 --- a/examples/community/lpw_stable_diffusion.py +++ b/examples/community/lpw_stable_diffusion.py @@ -376,7 +376,7 @@ def get_weighted_text_embeddings( def preprocess_image(image): w, h = image.size - w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32 + w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]) image = np.array(image).astype(np.float32) / 255.0 image = image[None].transpose(0, 3, 1, 2) @@ -387,7 +387,7 @@ def preprocess_image(image): def preprocess_mask(mask, scale_factor=8): mask = mask.convert("L") w, h = mask.size - w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32 + w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 mask = mask.resize((w // scale_factor, h // scale_factor), resample=PIL_INTERPOLATION["nearest"]) mask = np.array(mask).astype(np.float32) / 255.0 mask = np.tile(mask, (4, 1, 1)) diff --git a/examples/community/lpw_stable_diffusion_onnx.py b/examples/community/lpw_stable_diffusion_onnx.py index 817bae262e94..1e0764de5d4d 100644 --- a/examples/community/lpw_stable_diffusion_onnx.py +++ b/examples/community/lpw_stable_diffusion_onnx.py @@ -403,7 +403,7 @@ def get_weighted_text_embeddings( def preprocess_image(image): w, h = image.size - w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32 + w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]) image = np.array(image).astype(np.float32) / 255.0 image = image[None].transpose(0, 3, 1, 2) @@ -413,7 +413,7 @@ def preprocess_image(image): def preprocess_mask(mask, scale_factor=8): mask = mask.convert("L") w, h = mask.size - w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32 + w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 mask = mask.resize((w // scale_factor, h // scale_factor), resample=PIL_INTERPOLATION["nearest"]) mask = np.array(mask).astype(np.float32) / 255.0 mask = np.tile(mask, (4, 1, 1)) diff --git a/examples/community/stable_unclip.py b/examples/community/stable_unclip.py index 8ff9c44d19fd..1b438c8fcb3e 100644 --- a/examples/community/stable_unclip.py +++ b/examples/community/stable_unclip.py @@ -46,7 +46,7 @@ def __init__( ): super().__init__() - decoder_pipe_kwargs = dict(image_encoder=None) if decoder_pipe_kwargs is None else decoder_pipe_kwargs + decoder_pipe_kwargs = {"image_encoder": None} if decoder_pipe_kwargs is None else decoder_pipe_kwargs decoder_pipe_kwargs["torch_dtype"] = decoder_pipe_kwargs.get("torch_dtype", None) or prior.dtype diff --git a/examples/instruct_pix2pix/train_instruct_pix2pix.py b/examples/instruct_pix2pix/train_instruct_pix2pix.py index 57430b7f150a..6e51e86a9f16 100644 --- a/examples/instruct_pix2pix/train_instruct_pix2pix.py +++ b/examples/instruct_pix2pix/train_instruct_pix2pix.py @@ -673,7 +673,7 @@ def preprocess_train(examples): examples["edited_pixel_values"] = edited_images # Preprocess the captions. - captions = [caption for caption in examples[edit_prompt_column]] + captions = list(examples[edit_prompt_column]) examples["input_ids"] = tokenize_captions(captions) return examples diff --git a/examples/rl/run_diffuser_locomotion.py b/examples/rl/run_diffuser_locomotion.py index e64a20500bea..adf6d1443d1c 100644 --- a/examples/rl/run_diffuser_locomotion.py +++ b/examples/rl/run_diffuser_locomotion.py @@ -4,17 +4,17 @@ from diffusers.experimental import ValueGuidedRLPipeline -config = dict( - n_samples=64, - horizon=32, - num_inference_steps=20, - n_guide_steps=2, # can set to 0 for faster sampling, does not use value network - scale_grad_by_std=True, - scale=0.1, - eta=0.0, - t_grad_cutoff=2, - device="cpu", -) +config = { + "n_samples": 64, + "horizon": 32, + "num_inference_steps": 20, + "n_guide_steps": 2, # can set to 0 for faster sampling, does not use value network + "scale_grad_by_std": True, + "scale": 0.1, + "eta": 0.0, + "t_grad_cutoff": 2, + "device": "cpu", +} if __name__ == "__main__": diff --git a/pyproject.toml b/pyproject.toml index 5ec7ae51be15..a5fe70af9ca7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,8 +4,8 @@ target-version = ['py37'] [tool.ruff] # Never enforce `E501` (line length violations). -ignore = ["E501", "E741", "W605"] -select = ["E", "F", "I", "W"] +ignore = ["C901", "E501", "E741", "W605"] +select = ["C", "E", "F", "I", "W"] line-length = 119 # Ignore import violations in all `__init__.py` files. diff --git a/scripts/convert_ddpm_original_checkpoint_to_diffusers.py b/scripts/convert_ddpm_original_checkpoint_to_diffusers.py index 4222327c23de..46595784b0ba 100644 --- a/scripts/convert_ddpm_original_checkpoint_to_diffusers.py +++ b/scripts/convert_ddpm_original_checkpoint_to_diffusers.py @@ -404,7 +404,7 @@ def convert_vq_autoenc_checkpoint(checkpoint, config): config = json.loads(f.read()) # unet case - key_prefix_set = set(key.split(".")[0] for key in checkpoint.keys()) + key_prefix_set = {key.split(".")[0] for key in checkpoint.keys()} if "encoder" in key_prefix_set and "decoder" in key_prefix_set: converted_checkpoint = convert_vq_autoenc_checkpoint(checkpoint, config) else: diff --git a/scripts/convert_models_diffuser_to_diffusers.py b/scripts/convert_models_diffuser_to_diffusers.py index 9475f7da93fb..cc5321e33fe0 100644 --- a/scripts/convert_models_diffuser_to_diffusers.py +++ b/scripts/convert_models_diffuser_to_diffusers.py @@ -24,29 +24,29 @@ def unet(hor): up_block_types = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D") model = torch.load(f"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch") state_dict = model.state_dict() - config = dict( - down_block_types=down_block_types, - block_out_channels=block_out_channels, - up_block_types=up_block_types, - layers_per_block=1, - use_timestep_embedding=True, - out_block_type="OutConv1DBlock", - norm_num_groups=8, - downsample_each_block=False, - in_channels=14, - out_channels=14, - extra_in_channels=0, - time_embedding_type="positional", - flip_sin_to_cos=False, - freq_shift=1, - sample_size=65536, - mid_block_type="MidResTemporalBlock1D", - act_fn="mish", - ) + config = { + "down_block_types": down_block_types, + "block_out_channels": block_out_channels, + "up_block_types": up_block_types, + "layers_per_block": 1, + "use_timestep_embedding": True, + "out_block_type": "OutConv1DBlock", + "norm_num_groups": 8, + "downsample_each_block": False, + "in_channels": 14, + "out_channels": 14, + "extra_in_channels": 0, + "time_embedding_type": "positional", + "flip_sin_to_cos": False, + "freq_shift": 1, + "sample_size": 65536, + "mid_block_type": "MidResTemporalBlock1D", + "act_fn": "mish", + } hf_value_function = UNet1DModel(**config) print(f"length of state dict: {len(state_dict.keys())}") print(f"length of value function dict: {len(hf_value_function.state_dict().keys())}") - mapping = dict((k, hfk) for k, hfk in zip(model.state_dict().keys(), hf_value_function.state_dict().keys())) + mapping = dict(zip(model.state_dict().keys(), hf_value_function.state_dict().keys())) for k, v in mapping.items(): state_dict[v] = state_dict.pop(k) hf_value_function.load_state_dict(state_dict) @@ -57,25 +57,25 @@ def unet(hor): def value_function(): - config = dict( - in_channels=14, - down_block_types=("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"), - up_block_types=(), - out_block_type="ValueFunction", - mid_block_type="ValueFunctionMidBlock1D", - block_out_channels=(32, 64, 128, 256), - layers_per_block=1, - downsample_each_block=True, - sample_size=65536, - out_channels=14, - extra_in_channels=0, - time_embedding_type="positional", - use_timestep_embedding=True, - flip_sin_to_cos=False, - freq_shift=1, - norm_num_groups=8, - act_fn="mish", - ) + config = { + "in_channels": 14, + "down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"), + "up_block_types": (), + "out_block_type": "ValueFunction", + "mid_block_type": "ValueFunctionMidBlock1D", + "block_out_channels": (32, 64, 128, 256), + "layers_per_block": 1, + "downsample_each_block": True, + "sample_size": 65536, + "out_channels": 14, + "extra_in_channels": 0, + "time_embedding_type": "positional", + "use_timestep_embedding": True, + "flip_sin_to_cos": False, + "freq_shift": 1, + "norm_num_groups": 8, + "act_fn": "mish", + } model = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch") state_dict = model @@ -83,7 +83,7 @@ def value_function(): print(f"length of state dict: {len(state_dict.keys())}") print(f"length of value function dict: {len(hf_value_function.state_dict().keys())}") - mapping = dict((k, hfk) for k, hfk in zip(state_dict.keys(), hf_value_function.state_dict().keys())) + mapping = dict(zip(state_dict.keys(), hf_value_function.state_dict().keys())) for k, v in mapping.items(): state_dict[v] = state_dict.pop(k) diff --git a/scripts/convert_original_audioldm_to_diffusers.py b/scripts/convert_original_audioldm_to_diffusers.py index bd671e3a7b70..189b165c0a01 100644 --- a/scripts/convert_original_audioldm_to_diffusers.py +++ b/scripts/convert_original_audioldm_to_diffusers.py @@ -246,19 +246,19 @@ def create_unet_diffusers_config(original_config, image_size: int): ) class_embeddings_concat = unet_params.extra_film_use_concat if "extra_film_use_concat" in unet_params else None - config = dict( - sample_size=image_size // vae_scale_factor, - in_channels=unet_params.in_channels, - out_channels=unet_params.out_channels, - down_block_types=tuple(down_block_types), - up_block_types=tuple(up_block_types), - block_out_channels=tuple(block_out_channels), - layers_per_block=unet_params.num_res_blocks, - cross_attention_dim=cross_attention_dim, - class_embed_type=class_embed_type, - projection_class_embeddings_input_dim=projection_class_embeddings_input_dim, - class_embeddings_concat=class_embeddings_concat, - ) + config = { + "sample_size": image_size // vae_scale_factor, + "in_channels": unet_params.in_channels, + "out_channels": unet_params.out_channels, + "down_block_types": tuple(down_block_types), + "up_block_types": tuple(up_block_types), + "block_out_channels": tuple(block_out_channels), + "layers_per_block": unet_params.num_res_blocks, + "cross_attention_dim": cross_attention_dim, + "class_embed_type": class_embed_type, + "projection_class_embeddings_input_dim": projection_class_embeddings_input_dim, + "class_embeddings_concat": class_embeddings_concat, + } return config @@ -278,17 +278,17 @@ def create_vae_diffusers_config(original_config, checkpoint, image_size: int): scaling_factor = checkpoint["scale_factor"] if "scale_by_std" in original_config.model.params else 0.18215 - config = dict( - sample_size=image_size, - in_channels=vae_params.in_channels, - out_channels=vae_params.out_ch, - down_block_types=tuple(down_block_types), - up_block_types=tuple(up_block_types), - block_out_channels=tuple(block_out_channels), - latent_channels=vae_params.z_channels, - layers_per_block=vae_params.num_res_blocks, - scaling_factor=float(scaling_factor), - ) + config = { + "sample_size": image_size, + "in_channels": vae_params.in_channels, + "out_channels": vae_params.out_ch, + "down_block_types": tuple(down_block_types), + "up_block_types": tuple(up_block_types), + "block_out_channels": tuple(block_out_channels), + "latent_channels": vae_params.z_channels, + "layers_per_block": vae_params.num_res_blocks, + "scaling_factor": float(scaling_factor), + } return config @@ -670,18 +670,18 @@ def create_transformers_vocoder_config(original_config): """ vocoder_params = original_config.model.params.vocoder_config.params - config = dict( - model_in_dim=vocoder_params.num_mels, - sampling_rate=vocoder_params.sampling_rate, - upsample_initial_channel=vocoder_params.upsample_initial_channel, - upsample_rates=list(vocoder_params.upsample_rates), - upsample_kernel_sizes=list(vocoder_params.upsample_kernel_sizes), - resblock_kernel_sizes=list(vocoder_params.resblock_kernel_sizes), - resblock_dilation_sizes=[ + config = { + "model_in_dim": vocoder_params.num_mels, + "sampling_rate": vocoder_params.sampling_rate, + "upsample_initial_channel": vocoder_params.upsample_initial_channel, + "upsample_rates": list(vocoder_params.upsample_rates), + "upsample_kernel_sizes": list(vocoder_params.upsample_kernel_sizes), + "resblock_kernel_sizes": list(vocoder_params.resblock_kernel_sizes), + "resblock_dilation_sizes": [ list(resblock_dilation) for resblock_dilation in vocoder_params.resblock_dilation_sizes ], - normalize_before=False, - ) + "normalize_before": False, + } return config diff --git a/scripts/convert_versatile_diffusion_to_diffusers.py b/scripts/convert_versatile_diffusion_to_diffusers.py index 06b0cec03448..b895e08e9de9 100644 --- a/scripts/convert_versatile_diffusion_to_diffusers.py +++ b/scripts/convert_versatile_diffusion_to_diffusers.py @@ -280,17 +280,17 @@ def create_image_unet_diffusers_config(unet_params): if not all(n == unet_params.num_noattn_blocks[0] for n in unet_params.num_noattn_blocks): raise ValueError("Not all num_res_blocks are equal, which is not supported in this script.") - config = dict( - sample_size=None, - in_channels=unet_params.input_channels, - out_channels=unet_params.output_channels, - down_block_types=tuple(down_block_types), - up_block_types=tuple(up_block_types), - block_out_channels=tuple(block_out_channels), - layers_per_block=unet_params.num_noattn_blocks[0], - cross_attention_dim=unet_params.context_dim, - attention_head_dim=unet_params.num_heads, - ) + config = { + "sample_size": None, + "in_channels": unet_params.input_channels, + "out_channels": unet_params.output_channels, + "down_block_types": tuple(down_block_types), + "up_block_types": tuple(up_block_types), + "block_out_channels": tuple(block_out_channels), + "layers_per_block": unet_params.num_noattn_blocks[0], + "cross_attention_dim": unet_params.context_dim, + "attention_head_dim": unet_params.num_heads, + } return config @@ -319,17 +319,17 @@ def create_text_unet_diffusers_config(unet_params): if not all(n == unet_params.num_noattn_blocks[0] for n in unet_params.num_noattn_blocks): raise ValueError("Not all num_res_blocks are equal, which is not supported in this script.") - config = dict( - sample_size=None, - in_channels=(unet_params.input_channels, 1, 1), - out_channels=(unet_params.output_channels, 1, 1), - down_block_types=tuple(down_block_types), - up_block_types=tuple(up_block_types), - block_out_channels=tuple(block_out_channels), - layers_per_block=unet_params.num_noattn_blocks[0], - cross_attention_dim=unet_params.context_dim, - attention_head_dim=unet_params.num_heads, - ) + config = { + "sample_size": None, + "in_channels": (unet_params.input_channels, 1, 1), + "out_channels": (unet_params.output_channels, 1, 1), + "down_block_types": tuple(down_block_types), + "up_block_types": tuple(up_block_types), + "block_out_channels": tuple(block_out_channels), + "layers_per_block": unet_params.num_noattn_blocks[0], + "cross_attention_dim": unet_params.context_dim, + "attention_head_dim": unet_params.num_heads, + } return config @@ -343,16 +343,16 @@ def create_vae_diffusers_config(vae_params): down_block_types = ["DownEncoderBlock2D"] * len(block_out_channels) up_block_types = ["UpDecoderBlock2D"] * len(block_out_channels) - config = dict( - sample_size=vae_params.resolution, - in_channels=vae_params.in_channels, - out_channels=vae_params.out_ch, - down_block_types=tuple(down_block_types), - up_block_types=tuple(up_block_types), - block_out_channels=tuple(block_out_channels), - latent_channels=vae_params.z_channels, - layers_per_block=vae_params.num_res_blocks, - ) + config = { + "sample_size": vae_params.resolution, + "in_channels": vae_params.in_channels, + "out_channels": vae_params.out_ch, + "down_block_types": tuple(down_block_types), + "up_block_types": tuple(up_block_types), + "block_out_channels": tuple(block_out_channels), + "latent_channels": vae_params.z_channels, + "layers_per_block": vae_params.num_res_blocks, + } return config diff --git a/src/diffusers/configuration_utils.py b/src/diffusers/configuration_utils.py index 20b7b273d5af..ce6e77b03f57 100644 --- a/src/diffusers/configuration_utils.py +++ b/src/diffusers/configuration_utils.py @@ -420,7 +420,7 @@ def _get_init_keys(cls): @classmethod def extract_init_dict(cls, config_dict, **kwargs): # 0. Copy origin config dict - original_dict = {k: v for k, v in config_dict.items()} + original_dict = dict(config_dict.items()) # 1. Retrieve expected config attributes from __init__ signature expected_keys = cls._get_init_keys(cls) @@ -610,7 +610,7 @@ def init(self, *args, **kwargs): ) # Ignore private kwargs in the init. Retrieve all passed attributes - init_kwargs = {k: v for k, v in kwargs.items()} + init_kwargs = dict(kwargs.items()) # Retrieve default values fields = dataclasses.fields(self) diff --git a/src/diffusers/experimental/rl/value_guided_sampling.py b/src/diffusers/experimental/rl/value_guided_sampling.py index 7de33a795c77..e4af4986faad 100644 --- a/src/diffusers/experimental/rl/value_guided_sampling.py +++ b/src/diffusers/experimental/rl/value_guided_sampling.py @@ -52,13 +52,13 @@ def __init__( self.scheduler = scheduler self.env = env self.data = env.get_dataset() - self.means = dict() + self.means = {} for key in self.data.keys(): try: self.means[key] = self.data[key].mean() except: # noqa: E722 pass - self.stds = dict() + self.stds = {} for key in self.data.keys(): try: self.stds[key] = self.data[key].std() diff --git a/src/diffusers/image_processor.py b/src/diffusers/image_processor.py index de6543800b2d..80e3412991cf 100644 --- a/src/diffusers/image_processor.py +++ b/src/diffusers/image_processor.py @@ -99,7 +99,7 @@ def resize(self, images: PIL.Image.Image) -> PIL.Image.Image: Resize a PIL image. Both height and width will be downscaled to the next integer multiple of `vae_scale_factor` """ w, h = images.size - w, h = map(lambda x: x - x % self.vae_scale_factor, (w, h)) # resize to integer multiple of vae_scale_factor + w, h = (x - x % self.vae_scale_factor for x in (w, h)) # resize to integer multiple of vae_scale_factor images = images.resize((w, h), resample=PIL_INTERPOLATION[self.resample]) return images diff --git a/src/diffusers/loaders.py b/src/diffusers/loaders.py index 31fdc46d9e1b..d6bb6fde6ac1 100644 --- a/src/diffusers/loaders.py +++ b/src/diffusers/loaders.py @@ -37,7 +37,7 @@ class AttnProcsLayers(torch.nn.Module): def __init__(self, state_dict: Dict[str, torch.Tensor]): super().__init__() self.layers = torch.nn.ModuleList(state_dict.values()) - self.mapping = {k: v for k, v in enumerate(state_dict.keys())} + self.mapping = dict(enumerate(state_dict.keys())) self.rev_mapping = {v: k for k, v in enumerate(state_dict.keys())} # we add a hook to state_dict() and load_state_dict() so that the diff --git a/src/diffusers/models/modeling_utils.py b/src/diffusers/models/modeling_utils.py index e51b40ce4509..aa4e2b0ea487 100644 --- a/src/diffusers/models/modeling_utils.py +++ b/src/diffusers/models/modeling_utils.py @@ -647,7 +647,7 @@ def _load_pretrained_model( ): # Retrieve missing & unexpected_keys model_state_dict = model.state_dict() - loaded_keys = [k for k in state_dict.keys()] + loaded_keys = list(state_dict.keys()) expected_keys = list(model_state_dict.keys()) diff --git a/src/diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion_img2img.py b/src/diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion_img2img.py index ab80072fa78f..23f4886f06c1 100644 --- a/src/diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion_img2img.py +++ b/src/diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion_img2img.py @@ -74,7 +74,7 @@ def preprocess(image): if isinstance(image[0], PIL.Image.Image): w, h = image[0].size - w, h = map(lambda x: x - x % 8, (w, h)) # resize to integer multiple of 8 + w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] image = np.concatenate(image, axis=0) diff --git a/src/diffusers/pipelines/audio_diffusion/pipeline_audio_diffusion.py b/src/diffusers/pipelines/audio_diffusion/pipeline_audio_diffusion.py index 8f0925ac4aaa..1b88270cbbe6 100644 --- a/src/diffusers/pipelines/audio_diffusion/pipeline_audio_diffusion.py +++ b/src/diffusers/pipelines/audio_diffusion/pipeline_audio_diffusion.py @@ -201,12 +201,12 @@ def __call__( images = images.cpu().permute(0, 2, 3, 1).numpy() images = (images * 255).round().astype("uint8") images = list( - map(lambda _: Image.fromarray(_[:, :, 0]), images) + (Image.fromarray(_[:, :, 0]) for _ in images) if images.shape[3] == 1 - else map(lambda _: Image.fromarray(_, mode="RGB").convert("L"), images) + else (Image.fromarray(_, mode="RGB").convert("L") for _ in images) ) - audios = list(map(lambda _: self.mel.image_to_audio(_), images)) + audios = [self.mel.image_to_audio(_) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) diff --git a/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion_superresolution.py b/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion_superresolution.py index 2ecf5f24a4a7..6887068f3443 100644 --- a/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion_superresolution.py +++ b/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion_superresolution.py @@ -21,7 +21,7 @@ def preprocess(image): w, h = image.size - w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32 + w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]) image = np.array(image).astype(np.float32) / 255.0 image = image[None].transpose(0, 3, 1, 2) diff --git a/src/diffusers/pipelines/pipeline_flax_utils.py b/src/diffusers/pipelines/pipeline_flax_utils.py index d3fc415ab4d7..9d91ff757799 100644 --- a/src/diffusers/pipelines/pipeline_flax_utils.py +++ b/src/diffusers/pipelines/pipeline_flax_utils.py @@ -491,7 +491,7 @@ def _get_signature_keys(obj): parameters = inspect.signature(obj.__init__).parameters required_parameters = {k: v for k, v in parameters.items() if v.default == inspect._empty} optional_parameters = set({k for k, v in parameters.items() if v.default != inspect._empty}) - expected_modules = set(required_parameters.keys()) - set(["self"]) + expected_modules = set(required_parameters.keys()) - {"self"} return expected_modules, optional_parameters @property diff --git a/src/diffusers/pipelines/pipeline_utils.py b/src/diffusers/pipelines/pipeline_utils.py index 8f33b506827a..d3578745b8b3 100644 --- a/src/diffusers/pipelines/pipeline_utils.py +++ b/src/diffusers/pipelines/pipeline_utils.py @@ -204,11 +204,11 @@ def variant_compatible_siblings(filenames, variant=None) -> Union[List[os.PathLi non_variant_file_regex = re.compile(f"{'|'.join(weight_names)}") if variant is not None: - variant_filenames = set(f for f in filenames if variant_file_regex.match(f.split("/")[-1]) is not None) + variant_filenames = {f for f in filenames if variant_file_regex.match(f.split("/")[-1]) is not None} else: variant_filenames = set() - non_variant_filenames = set(f for f in filenames if non_variant_file_regex.match(f.split("/")[-1]) is not None) + non_variant_filenames = {f for f in filenames if non_variant_file_regex.match(f.split("/")[-1]) is not None} usable_filenames = set(variant_filenames) for f in non_variant_filenames: @@ -225,7 +225,7 @@ def warn_deprecated_model_variant(pretrained_model_name_or_path, use_auth_token, use_auth_token=use_auth_token, revision=None, ) - filenames = set(sibling.rfilename for sibling in info.siblings) + filenames = {sibling.rfilename for sibling in info.siblings} comp_model_filenames, _ = variant_compatible_siblings(filenames, variant=revision) comp_model_filenames = [".".join(f.split(".")[:1] + f.split(".")[2:]) for f in comp_model_filenames] @@ -1115,7 +1115,7 @@ def download(cls, pretrained_model_name, **kwargs) -> Union[str, os.PathLike]: # retrieve all folder_names that contain relevant files folder_names = [k for k, v in config_dict.items() if isinstance(v, list)] - filenames = set(sibling.rfilename for sibling in info.siblings) + filenames = {sibling.rfilename for sibling in info.siblings} model_filenames, variant_filenames = variant_compatible_siblings(filenames, variant=variant) # if the whole pipeline is cached we don't have to ping the Hub @@ -1126,7 +1126,7 @@ def download(cls, pretrained_model_name, **kwargs) -> Union[str, os.PathLike]: pretrained_model_name, use_auth_token, variant, revision, model_filenames ) - model_folder_names = set([os.path.split(f)[0] for f in model_filenames]) + model_folder_names = {os.path.split(f)[0] for f in model_filenames} # all filenames compatible with variant will be added allow_patterns = list(model_filenames) @@ -1157,8 +1157,8 @@ def download(cls, pretrained_model_name, **kwargs) -> Union[str, os.PathLike]: elif use_safetensors and is_safetensors_compatible(model_filenames, variant=variant): ignore_patterns = ["*.bin", "*.msgpack"] - safetensors_variant_filenames = set([f for f in variant_filenames if f.endswith(".safetensors")]) - safetensors_model_filenames = set([f for f in model_filenames if f.endswith(".safetensors")]) + safetensors_variant_filenames = {f for f in variant_filenames if f.endswith(".safetensors")} + safetensors_model_filenames = {f for f in model_filenames if f.endswith(".safetensors")} if ( len(safetensors_variant_filenames) > 0 and safetensors_model_filenames != safetensors_variant_filenames @@ -1169,8 +1169,8 @@ def download(cls, pretrained_model_name, **kwargs) -> Union[str, os.PathLike]: else: ignore_patterns = ["*.safetensors", "*.msgpack"] - bin_variant_filenames = set([f for f in variant_filenames if f.endswith(".bin")]) - bin_model_filenames = set([f for f in model_filenames if f.endswith(".bin")]) + bin_variant_filenames = {f for f in variant_filenames if f.endswith(".bin")} + bin_model_filenames = {f for f in model_filenames if f.endswith(".bin")} if len(bin_variant_filenames) > 0 and bin_model_filenames != bin_variant_filenames: logger.warn( f"\nA mixture of {variant} and non-{variant} filenames will be loaded.\nLoaded {variant} filenames:\n[{', '.join(bin_variant_filenames)}]\nLoaded non-{variant} filenames:\n[{', '.join(bin_model_filenames - bin_variant_filenames)}\nIf this behavior is not expected, please check your folder structure." @@ -1215,7 +1215,7 @@ def _get_signature_keys(obj): parameters = inspect.signature(obj.__init__).parameters required_parameters = {k: v for k, v in parameters.items() if v.default == inspect._empty} optional_parameters = set({k for k, v in parameters.items() if v.default != inspect._empty}) - expected_modules = set(required_parameters.keys()) - set(["self"]) + expected_modules = set(required_parameters.keys()) - {"self"} return expected_modules, optional_parameters @property diff --git a/src/diffusers/pipelines/repaint/pipeline_repaint.py b/src/diffusers/pipelines/repaint/pipeline_repaint.py index fabcd2610f43..f4914c46db51 100644 --- a/src/diffusers/pipelines/repaint/pipeline_repaint.py +++ b/src/diffusers/pipelines/repaint/pipeline_repaint.py @@ -37,7 +37,7 @@ def _preprocess_image(image: Union[List, PIL.Image.Image, torch.Tensor]): if isinstance(image[0], PIL.Image.Image): w, h = image[0].size - w, h = map(lambda x: x - x % 8, (w, h)) # resize to integer multiple of 8 + w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] image = np.concatenate(image, axis=0) @@ -58,7 +58,7 @@ def _preprocess_mask(mask: Union[List, PIL.Image.Image, torch.Tensor]): if isinstance(mask[0], PIL.Image.Image): w, h = mask[0].size - w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32 + w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 mask = [np.array(m.convert("L").resize((w, h), resample=PIL_INTERPOLATION["nearest"]))[None, :] for m in mask] mask = np.concatenate(mask, axis=0) mask = mask.astype(np.float32) / 255.0 diff --git a/src/diffusers/pipelines/spectrogram_diffusion/midi_utils.py b/src/diffusers/pipelines/spectrogram_diffusion/midi_utils.py index 00277adc7fbe..08d0878db588 100644 --- a/src/diffusers/pipelines/spectrogram_diffusion/midi_utils.py +++ b/src/diffusers/pipelines/spectrogram_diffusion/midi_utils.py @@ -166,7 +166,7 @@ def __init__(self, max_shift_steps: int, steps_per_second: float, event_ranges: self._shift_range = EventRange(type="shift", min_value=0, max_value=max_shift_steps) self._event_ranges = [self._shift_range] + event_ranges # Ensure all event types have unique names. - assert len(self._event_ranges) == len(set([er.type for er in self._event_ranges])) + assert len(self._event_ranges) == len({er.type for er in self._event_ranges}) @property def num_classes(self) -> int: diff --git a/src/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py b/src/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py index ef4598433f82..7fbdbdab1fa9 100644 --- a/src/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py +++ b/src/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py @@ -274,18 +274,18 @@ def create_unet_diffusers_config(original_config, image_size: int, controlnet=Fa else: raise NotImplementedError(f"Unknown conditional unet num_classes config: {unet_params.num_classes}") - config = dict( - sample_size=image_size // vae_scale_factor, - in_channels=unet_params.in_channels, - down_block_types=tuple(down_block_types), - block_out_channels=tuple(block_out_channels), - layers_per_block=unet_params.num_res_blocks, - cross_attention_dim=unet_params.context_dim, - attention_head_dim=head_dim, - use_linear_projection=use_linear_projection, - class_embed_type=class_embed_type, - projection_class_embeddings_input_dim=projection_class_embeddings_input_dim, - ) + config = { + "sample_size": image_size // vae_scale_factor, + "in_channels": unet_params.in_channels, + "down_block_types": tuple(down_block_types), + "block_out_channels": tuple(block_out_channels), + "layers_per_block": unet_params.num_res_blocks, + "cross_attention_dim": unet_params.context_dim, + "attention_head_dim": head_dim, + "use_linear_projection": use_linear_projection, + "class_embed_type": class_embed_type, + "projection_class_embeddings_input_dim": projection_class_embeddings_input_dim, + } if not controlnet: config["out_channels"] = unet_params.out_channels @@ -305,16 +305,16 @@ def create_vae_diffusers_config(original_config, image_size: int): down_block_types = ["DownEncoderBlock2D"] * len(block_out_channels) up_block_types = ["UpDecoderBlock2D"] * len(block_out_channels) - config = dict( - sample_size=image_size, - in_channels=vae_params.in_channels, - out_channels=vae_params.out_ch, - down_block_types=tuple(down_block_types), - up_block_types=tuple(up_block_types), - block_out_channels=tuple(block_out_channels), - latent_channels=vae_params.z_channels, - layers_per_block=vae_params.num_res_blocks, - ) + config = { + "sample_size": image_size, + "in_channels": vae_params.in_channels, + "out_channels": vae_params.out_ch, + "down_block_types": tuple(down_block_types), + "up_block_types": tuple(up_block_types), + "block_out_channels": tuple(block_out_channels), + "latent_channels": vae_params.z_channels, + "layers_per_block": vae_params.num_res_blocks, + } return config diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_cycle_diffusion.py b/src/diffusers/pipelines/stable_diffusion/pipeline_cycle_diffusion.py index 67cda0cfef32..4616dc1e4849 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_cycle_diffusion.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_cycle_diffusion.py @@ -44,7 +44,7 @@ def preprocess(image): if isinstance(image[0], PIL.Image.Image): w, h = image[0].size - w, h = map(lambda x: x - x % 8, (w, h)) # resize to integer multiple of 8 + w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] image = np.concatenate(image, axis=0) diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_controlnet.py b/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_controlnet.py index 4dc450cebc84..5af07ec8b9c4 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_controlnet.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_controlnet.py @@ -530,7 +530,7 @@ def unshard(x: jnp.ndarray): def preprocess(image, dtype): image = image.convert("RGB") w, h = image.size - w, h = map(lambda x: x - x % 64, (w, h)) # resize to integer multiple of 64 + w, h = (x - x % 64 for x in (w, h)) # resize to integer multiple of 64 image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]) image = jnp.array(image).astype(dtype) / 255.0 image = image[None].transpose(0, 3, 1, 2) diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_img2img.py b/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_img2img.py index 95cab9df61e8..2063238df27a 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_img2img.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_img2img.py @@ -520,7 +520,7 @@ def unshard(x: jnp.ndarray): def preprocess(image, dtype): w, h = image.size - w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32 + w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]) image = jnp.array(image).astype(dtype) / 255.0 image = image[None].transpose(0, 3, 1, 2) diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_inpaint.py b/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_inpaint.py index 6e9b9ff6d00f..abb57f8b62e9 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_inpaint.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_inpaint.py @@ -563,7 +563,7 @@ def unshard(x: jnp.ndarray): def preprocess_image(image, dtype): w, h = image.size - w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32 + w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]) image = jnp.array(image).astype(dtype) / 255.0 image = image[None].transpose(0, 3, 1, 2) @@ -572,7 +572,7 @@ def preprocess_image(image, dtype): def preprocess_mask(mask, dtype): w, h = mask.size - w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32 + w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 mask = mask.resize((w, h)) mask = jnp.array(mask.convert("L")).astype(dtype) / 255.0 mask = jnp.expand_dims(mask, axis=(0, 1)) diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_img2img.py b/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_img2img.py index 910fbaacfcca..80c4a8692a05 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_img2img.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_img2img.py @@ -40,7 +40,7 @@ def preprocess(image): if isinstance(image[0], PIL.Image.Image): w, h = image[0].size - w, h = map(lambda x: x - x % 64, (w, h)) # resize to integer multiple of 64 + w, h = (x - x % 64 for x in (w, h)) # resize to integer multiple of 64 image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] image = np.concatenate(image, axis=0) diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint_legacy.py b/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint_legacy.py index 987a343c718b..5cb3abb4f54e 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint_legacy.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint_legacy.py @@ -19,7 +19,7 @@ def preprocess(image): w, h = image.size - w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32 + w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 image = image.resize((w, h), resample=PIL.Image.LANCZOS) image = np.array(image).astype(np.float32) / 255.0 image = image[None].transpose(0, 3, 1, 2) @@ -29,7 +29,7 @@ def preprocess(image): def preprocess_mask(mask, scale_factor=8): mask = mask.convert("L") w, h = mask.size - w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32 + w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 mask = mask.resize((w // scale_factor, h // scale_factor), resample=PIL.Image.NEAREST) mask = np.array(mask).astype(np.float32) / 255.0 mask = np.tile(mask, (4, 1, 1)) diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py b/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py index 45b5a50467b0..b91262551b0f 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py @@ -31,7 +31,7 @@ def preprocess(image): if isinstance(image[0], PIL.Image.Image): w, h = image[0].size - w, h = map(lambda x: x - x % 64, (w, h)) # resize to integer multiple of 32 + w, h = (x - x % 64 for x in (w, h)) # resize to integer multiple of 32 image = [np.array(i.resize((w, h)))[None, :] for i in image] image = np.concatenate(image, axis=0) diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py index b66cfe9b437e..96282771d777 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py @@ -41,7 +41,7 @@ def preprocess(image): if isinstance(image[0], PIL.Image.Image): w, h = image[0].size - w, h = map(lambda x: x - x % 8, (w, h)) # resize to integer multiple of 8 + w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] image = np.concatenate(image, axis=0) @@ -442,7 +442,7 @@ def prepare_depth_map(self, image, depth_map, batch_size, do_classifier_free_gui if isinstance(image, PIL.Image.Image): image = [image] else: - image = [img for img in image] + image = list(image) if isinstance(image[0], PIL.Image.Image): width, height = image[0].size diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py index 1c94c58450ab..ba124ffecbee 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py @@ -78,7 +78,7 @@ def preprocess(image): if isinstance(image[0], PIL.Image.Image): w, h = image[0].size - w, h = map(lambda x: x - x % 8, (w, h)) # resize to integer multiple of 8 + w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] image = np.concatenate(image, axis=0) diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint_legacy.py b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint_legacy.py index 6fafe08285ee..3d1615968369 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint_legacy.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint_legacy.py @@ -42,7 +42,7 @@ def preprocess_image(image): w, h = image.size - w, h = map(lambda x: x - x % 8, (w, h)) # resize to integer multiple of 8 + w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]) image = np.array(image).astype(np.float32) / 255.0 image = image[None].transpose(0, 3, 1, 2) @@ -54,7 +54,7 @@ def preprocess_mask(mask, scale_factor=8): if not isinstance(mask, torch.FloatTensor): mask = mask.convert("L") w, h = mask.size - w, h = map(lambda x: x - x % 8, (w, h)) # resize to integer multiple of 8 + w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 mask = mask.resize((w // scale_factor, h // scale_factor), resample=PIL_INTERPOLATION["nearest"]) mask = np.array(mask).astype(np.float32) / 255.0 mask = np.tile(mask, (4, 1, 1)) @@ -76,7 +76,7 @@ def preprocess_mask(mask, scale_factor=8): # (potentially) reduce mask channel dimension from 3 to 1 for broadcasting to latent shape mask = mask.mean(dim=1, keepdim=True) h, w = mask.shape[-2:] - h, w = map(lambda x: x - x % 8, (h, w)) # resize to integer multiple of 8 + h, w = (x - x % 8 for x in (h, w)) # resize to integer multiple of 8 mask = torch.nn.functional.interpolate(mask, (h // scale_factor, w // scale_factor)) return mask diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py index a45937fd2045..5f4d6685185f 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py @@ -47,7 +47,7 @@ def preprocess(image): if isinstance(image[0], PIL.Image.Image): w, h = image[0].size - w, h = map(lambda x: x - x % 8, (w, h)) # resize to integer multiple of 8 + w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] image = np.concatenate(image, axis=0) diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py index 624d0e625828..822bd49ce31c 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py @@ -38,7 +38,7 @@ def preprocess(image): if isinstance(image[0], PIL.Image.Image): w, h = image[0].size - w, h = map(lambda x: x - x % 64, (w, h)) # resize to integer multiple of 64 + w, h = (x - x % 64 for x in (w, h)) # resize to integer multiple of 64 image = [np.array(i.resize((w, h)))[None, :] for i in image] image = np.concatenate(image, axis=0) diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_pix2pix_zero.py b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_pix2pix_zero.py index 4c2dbe6ff85d..bc072d4c73e8 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_pix2pix_zero.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_pix2pix_zero.py @@ -180,7 +180,7 @@ def preprocess(image): if isinstance(image[0], PIL.Image.Image): w, h = image[0].size - w, h = map(lambda x: x - x % 8, (w, h)) # resize to integer multiple of 8 + w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] image = np.concatenate(image, axis=0) diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py index 9f8f44a12bb4..b25a91d4caef 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py @@ -37,7 +37,7 @@ def preprocess(image): if isinstance(image[0], PIL.Image.Image): w, h = image[0].size - w, h = map(lambda x: x - x % 64, (w, h)) # resize to integer multiple of 64 + w, h = (x - x % 64 for x in (w, h)) # resize to integer multiple of 64 image = [np.array(i.resize((w, h)))[None, :] for i in image] image = np.concatenate(image, axis=0) diff --git a/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py b/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py index f9ae82568e5c..2b47184d7773 100644 --- a/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py +++ b/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py @@ -134,7 +134,7 @@ def normalize_embeddings(encoder_output): return embeds if isinstance(prompt, torch.Tensor) and len(prompt.shape) == 4: - prompt = [p for p in prompt] + prompt = list(prompt) batch_size = len(prompt) if isinstance(prompt, list) else 1 diff --git a/src/diffusers/utils/outputs.py b/src/diffusers/utils/outputs.py index f91a49b7a8a7..b6e8a219e129 100644 --- a/src/diffusers/utils/outputs.py +++ b/src/diffusers/utils/outputs.py @@ -84,7 +84,7 @@ def update(self, *args, **kwargs): def __getitem__(self, k): if isinstance(k, str): - inner_dict = {k: v for (k, v) in self.items()} + inner_dict = dict(self.items()) return inner_dict[k] else: return self.to_tuple()[k] diff --git a/utils/check_doc_toc.py b/utils/check_doc_toc.py index c00feb9d8e3f..ff9285c63f16 100644 --- a/utils/check_doc_toc.py +++ b/utils/check_doc_toc.py @@ -43,7 +43,7 @@ def clean_doc_toc(doc_list): new_doc = [] for duplicate_key in duplicates: - titles = list(set(doc["title"] for doc in doc_list if doc["local"] == duplicate_key)) + titles = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key}) if len(titles) > 1: raise ValueError( f"{duplicate_key} is present several times in the documentation table of content at " diff --git a/utils/check_repo.py b/utils/check_repo.py index 2cdb9af62de9..cfd2964f9dcc 100644 --- a/utils/check_repo.py +++ b/utils/check_repo.py @@ -219,7 +219,7 @@ def check_model_list(): # Get the models from the directory structure of `src/transformers/models/` models = [model for model in dir(diffusers.models) if not model.startswith("__")] - missing_models = sorted(list(set(_models).difference(models))) + missing_models = sorted(set(_models).difference(models)) if missing_models: raise Exception( f"The following models should be included in {models_dir}/__init__.py: {','.join(missing_models)}." @@ -429,7 +429,7 @@ def get_all_auto_configured_models(): for attr_name in dir(diffusers.models.auto.modeling_flax_auto): if attr_name.startswith("FLAX_MODEL_") and attr_name.endswith("MAPPING_NAMES"): result = result | set(get_values(getattr(diffusers.models.auto.modeling_flax_auto, attr_name))) - return [cls for cls in result] + return list(result) def ignore_unautoclassed(model_name): diff --git a/utils/overwrite_expected_slice.py b/utils/overwrite_expected_slice.py index 95799f9ca625..7aa66727150a 100644 --- a/utils/overwrite_expected_slice.py +++ b/utils/overwrite_expected_slice.py @@ -67,7 +67,7 @@ def overwrite_file(file, class_name, test_name, correct_line, done_test): def main(correct, fail=None): if fail is not None: with open(fail, "r") as f: - test_failures = set([l.strip() for l in f.readlines()]) + test_failures = {l.strip() for l in f.readlines()} else: test_failures = None diff --git a/utils/stale.py b/utils/stale.py index 36631b65a3ba..12932f31c243 100644 --- a/utils/stale.py +++ b/utils/stale.py @@ -38,7 +38,7 @@ def main(): open_issues = repo.get_issues(state="open") for issue in open_issues: - comments = sorted([comment for comment in issue.get_comments()], key=lambda i: i.created_at, reverse=True) + comments = sorted(issue.get_comments(), key=lambda i: i.created_at, reverse=True) last_comment = comments[0] if len(comments) > 0 else None if ( last_comment is not None