From ac2adaa2998fa4b0b1ea0db22dbcfcafd972bf24 Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Fri, 10 Jan 2025 08:25:00 -0500 Subject: [PATCH] cleanup Signed-off-by: Vladimir Mandic --- installer.py | 1 - modules/instantir/ip_adapter/ip_adapter.py | 2 -- modules/linfusion/linfusion.py | 4 +--- modules/lora/lora_convert.py | 2 -- modules/meissonic/transformer.py | 4 ---- modules/omnigen/model.py | 1 - modules/pixelsmith/vae.py | 4 ---- modules/pulid/eva_clip/hf_model.py | 1 - modules/schedulers/scheduler_dc.py | 1 - modules/schedulers/scheduler_tdd.py | 4 +--- modules/schedulers/scheduler_vdm.py | 1 - modules/todo/todo_merge.py | 1 - 12 files changed, 2 insertions(+), 24 deletions(-) diff --git a/installer.py b/installer.py index 96259da36..8f7ed7aca 100644 --- a/installer.py +++ b/installer.py @@ -1010,7 +1010,6 @@ def ensure_base_requirements(): setuptools_version = '69.5.1' def update_setuptools(): - # print('Install base requirements') global pkg_resources, setuptools, distutils # pylint: disable=global-statement # python may ship with incompatible setuptools subprocess.run(f'"{sys.executable}" -m pip install setuptools=={setuptools_version}', shell=True, check=False, env=os.environ, stdout=subprocess.PIPE, stderr=subprocess.PIPE) diff --git a/modules/instantir/ip_adapter/ip_adapter.py b/modules/instantir/ip_adapter/ip_adapter.py index 10f01d4f3..7f4bcbc45 100644 --- a/modules/instantir/ip_adapter/ip_adapter.py +++ b/modules/instantir/ip_adapter/ip_adapter.py @@ -169,8 +169,6 @@ def load_from_checkpoint(self, ckpt_path: str): if "latents" in state_dict["image_proj"] and "latents" in self.image_proj.state_dict(): # Check if the shapes are mismatched if state_dict["image_proj"]["latents"].shape != self.image_proj.state_dict()["latents"].shape: - print(f"Shapes of 'image_proj.latents' in checkpoint {ckpt_path} and current model do not match.") - print("Removing 'latents' from checkpoint and loading the rest of the weights.") del state_dict["image_proj"]["latents"] strict_load_image_proj_model = False diff --git a/modules/linfusion/linfusion.py b/modules/linfusion/linfusion.py index 724cf2f3f..d8317e39b 100644 --- a/modules/linfusion/linfusion.py +++ b/modules/linfusion/linfusion.py @@ -89,9 +89,7 @@ def construct_for( pipe_name_path = pipe_name_path or pipeline._internal_dict._name_or_path # pylint: disable=protected-access pretrained_model_name_or_path = model_dict.get(pipe_name_path, None) if pretrained_model_name_or_path: - print( - f"Matching LinFusion '{pretrained_model_name_or_path}' for pipeline '{pipe_name_path}'." - ) + pass else: raise RuntimeError( f"LinFusion not found for pipeline [{pipe_name_path}], please provide the path." diff --git a/modules/lora/lora_convert.py b/modules/lora/lora_convert.py index 032ffa5a3..c2685aacd 100644 --- a/modules/lora/lora_convert.py +++ b/modules/lora/lora_convert.py @@ -205,8 +205,6 @@ def _convert_to_ai_toolkit_cat(sds_sd, ait_sd, sds_key, ait_keys, dims=None): up_weight[i : i + dims[j], k * ait_rank : (k + 1) * ait_rank] == 0 ) i += dims[j] - # if is_sparse: - # print(f"weight is sparse: {sds_key}") # make ai-toolkit weight ait_down_keys = [k + ".lora_down.weight" for k in ait_keys] diff --git a/modules/meissonic/transformer.py b/modules/meissonic/transformer.py index 43e77ddc7..543c30108 100644 --- a/modules/meissonic/transformer.py +++ b/modules/meissonic/transformer.py @@ -670,15 +670,11 @@ def __init__( self.upsample = None def forward(self, x): - # print("before,", x.shape) if self.downsample is not None: - # print('downsample') x = self.downsample(x) if self.upsample is not None: - # print('upsample') x = self.upsample(x) - # print("after,", x.shape) return x diff --git a/modules/omnigen/model.py b/modules/omnigen/model.py index 3a42263d2..17d696b53 100644 --- a/modules/omnigen/model.py +++ b/modules/omnigen/model.py @@ -259,7 +259,6 @@ def cropped_pos_embed(self, height, width): left = (self.pos_embed_max_size - width) // 2 spatial_pos_embed = self.pos_embed.reshape(1, self.pos_embed_max_size, self.pos_embed_max_size, -1) spatial_pos_embed = spatial_pos_embed[:, top : top + height, left : left + width, :] - # print(top, top + height, left, left + width, spatial_pos_embed.size()) spatial_pos_embed = spatial_pos_embed.reshape(1, -1, spatial_pos_embed.shape[-1]) return spatial_pos_embed diff --git a/modules/pixelsmith/vae.py b/modules/pixelsmith/vae.py index 93d376cfd..98e051695 100644 --- a/modules/pixelsmith/vae.py +++ b/modules/pixelsmith/vae.py @@ -681,10 +681,6 @@ def __init__( if self.unknown_index == "extra": self.unknown_index = self.re_embed self.re_embed = self.re_embed + 1 - print( - f"Remapping {self.n_e} indices to {self.re_embed} indices. " - f"Using {self.unknown_index} for unknown indices." - ) else: self.re_embed = n_e diff --git a/modules/pulid/eva_clip/hf_model.py b/modules/pulid/eva_clip/hf_model.py index d148bbff2..0b9551993 100644 --- a/modules/pulid/eva_clip/hf_model.py +++ b/modules/pulid/eva_clip/hf_model.py @@ -222,7 +222,6 @@ def lock(self, unlocked_layers:int=0, freeze_layer_norm:bool=True): encoder = self.transformer.encoder if hasattr(self.transformer, 'encoder') else self.transformer layer_list = getattr(encoder, arch_dict[self.config.model_type]["config_names"]["layer_attr"]) - print(f"Unlocking {unlocked_layers}/{len(layer_list) + 1} layers of hf model") embeddings = getattr( self.transformer, arch_dict[self.config.model_type]["config_names"]["token_embeddings_attr"]) modules = [embeddings, *layer_list][:-unlocked_layers] diff --git a/modules/schedulers/scheduler_dc.py b/modules/schedulers/scheduler_dc.py index a1ccfbeba..190588855 100644 --- a/modules/schedulers/scheduler_dc.py +++ b/modules/schedulers/scheduler_dc.py @@ -820,7 +820,6 @@ def closure(ratio_param): loss.backward() optimizer.step() ratio_bound = bound_func(ratio_param) - print(f'iter [{iter_}]', ratio_bound.item(), loss.item()) torch.cuda.empty_cache() return ratio_bound.data.detach().item() diff --git a/modules/schedulers/scheduler_tdd.py b/modules/schedulers/scheduler_tdd.py index 03c9da1a5..125ef1b3b 100644 --- a/modules/schedulers/scheduler_tdd.py +++ b/modules/schedulers/scheduler_tdd.py @@ -117,8 +117,7 @@ def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.devic timesteps = tcd_origin_timesteps[inference_indices] if self.special_jump: if self.tdd_train_step == 50: - #timesteps = np.array([999., 879., 759., 499., 259.]) - print(timesteps) + pass elif self.tdd_train_step == 250: if num_inference_steps == 5: timesteps = np.array([999., 875., 751., 499., 251.]) @@ -203,7 +202,6 @@ def set_timesteps_s(self, eta: float = 0.0): sigmas_s = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) if self.config.use_karras_sigmas: - print("have not write") pass else: sigmas_s = np.interp(timesteps_s, np.arange(0, len(sigmas_s)), sigmas_s) diff --git a/modules/schedulers/scheduler_vdm.py b/modules/schedulers/scheduler_vdm.py index 492c30a0c..4f48db163 100644 --- a/modules/schedulers/scheduler_vdm.py +++ b/modules/schedulers/scheduler_vdm.py @@ -355,7 +355,6 @@ def step( ) # 3. Clip or threshold "predicted x_0" - # print({ 'timestep': timestep.item(), 'min': pred_original_sample.min().item(), 'max': pred_original_sample.max().item(), 'alpha': alpha.item(), 'sigma': sigma.item() }) if self.config.thresholding: pred_original_sample = self._threshold_sample(pred_original_sample) elif self.config.clip_sample: diff --git a/modules/todo/todo_merge.py b/modules/todo/todo_merge.py index bfbff5621..77840d6ed 100644 --- a/modules/todo/todo_merge.py +++ b/modules/todo/todo_merge.py @@ -25,7 +25,6 @@ def init_generator(device: torch.device, fallback: torch.Generator = None): """ Forks the current default random generator given device. """ - print(f"init_generator device = {device}") if device.type == "cpu": return torch.Generator(device="cpu").set_state(torch.get_rng_state()) elif device.type == "cuda":