From 7cad68301f8539eecf4043eb172ce2c8d5c0bfb3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicol=C3=A1s=20Della=20Penna?= Date: Sun, 25 Feb 2018 04:53:26 +0100 Subject: [PATCH] DistributedDataParallel this should help with multiple GPUs --- src/train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/train.py b/src/train.py index 9e1da97..fb55096 100644 --- a/src/train.py +++ b/src/train.py @@ -189,7 +189,7 @@ def train(train_file, validation_file, batch_size, epoch_limit, file_name, gpu_m + ' AT EPOCH: ' + str(start_epoch) + "\n" + TextColor.END) if gpu_mode: - model = torch.nn.DataParallel(model).cuda() + model = torch.nn.DistributedDataParallel(model).cuda() # Train the Model sys.stderr.write(TextColor.PURPLE + 'Training starting\n' + TextColor.END)