From 28748813ff0bd9aa37f9d5e6a085f96e85748487 Mon Sep 17 00:00:00 2001 From: FengSibo Date: Mon, 6 Jan 2025 14:52:17 +0800 Subject: [PATCH] fix(hybrid optim): fp32_grad not scaled when use offload_cpu (#399) --- internlm/solver/optimizer/hybrid_zero_optim.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internlm/solver/optimizer/hybrid_zero_optim.py b/internlm/solver/optimizer/hybrid_zero_optim.py index 49f3fbcf..8d3ce3ad 100644 --- a/internlm/solver/optimizer/hybrid_zero_optim.py +++ b/internlm/solver/optimizer/hybrid_zero_optim.py @@ -839,9 +839,9 @@ def _step(self, closure=None, norms=None): param_shape == flat_fp32_avg_grads.shape ), f"fp32 param and grad have different shape {param_shape} vs {flat_fp32_avg_grads.shape}" - single_grad_partition_groups.append(flat_fp32_avg_grads) device = self._fp32_flat_param_groups_of_current_rank[group_id].device self._fp32_flat_param_groups_of_current_rank[group_id].grad = flat_fp32_avg_grads.to(device) + single_grad_partition_groups.append(self._fp32_flat_param_groups_of_current_rank[group_id].grad) # unscale and clip grads # get the global norm global_norm_groups = {}