From 307aa64783f7242c3e8ba95e9689e4df5171704e Mon Sep 17 00:00:00 2001 From: Henry Date: Thu, 30 Mar 2023 18:40:47 +0200 Subject: [PATCH] This solves bug about a RuntimeError, all tensors will be in the same device If in the function `transform` (https://github.com/hdnh2006/cartoonify/blob/a37167c5cda7a56362395271d30aecd5098bbbcd/cartoongan/test_from_code.py#L14) we set `gpu = 0`. The code fails because `torch.FloatTensor([n])` is not set in the same device as var, so the operation ``` var = var.unsqueeze(2).unsqueeze(3).expand_as(x) * ( (n - 1) / torch.FloatTensor([n]).to(var.device) ) ``` will fail... --- cartoongan/network/Transformer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cartoongan/network/Transformer.py b/cartoongan/network/Transformer.py index af2ff88..3e3860d 100644 --- a/cartoongan/network/Transformer.py +++ b/cartoongan/network/Transformer.py @@ -170,7 +170,7 @@ def __call__(self, x): # Calculate the biased var. torch.var returns unbiased var var = torch.std(t, 2) ** 2 var = var.unsqueeze(2).unsqueeze(3).expand_as(x) * ( - (n - 1) / torch.FloatTensor([n]) + (n - 1) / torch.FloatTensor([n]).to(var.device) ) scale_broadcast = self.scale.unsqueeze(1).unsqueeze(1).unsqueeze(0) scale_broadcast = scale_broadcast.expand_as(x)