diff --git a/tutorial/EpidemicLearning/config_EL.ini b/tutorial/EpidemicLearning/config_EL.ini index ce876adabf4..39c49abd9ac 100644 --- a/tutorial/EpidemicLearning/config_EL.ini +++ b/tutorial/EpidemicLearning/config_EL.ini @@ -10,19 +10,23 @@ test_dir = ../../eval/data/ sizes = random_seed = 90 partition_niid = dirichlet -alpha = 0.1 ; alpha (dirichlet parameter) +alpha = 0.1 +; alpha (dirichlet parameter) [OPTIMIZER_PARAMS] optimizer_package = torch.optim optimizer_class = SGD -lr = 0.05 ; gamma +lr = 0.05 +; gamma [TRAIN_PARAMS] training_package = decentralizepy.training.Training training_class = Training -rounds = 10 ; r +rounds = 10 +; r full_epochs = False -batch_size = 5 ; b +batch_size = 5 +; b shuffle = True loss_package = torch.nn loss_class = CrossEntropyLoss @@ -33,9 +37,11 @@ comm_class = TCP addresses_filepath = ip.json [SHARING] -sharing_package = decentralizepy.sharing.PlainAverageSharing ; Does not use Metropolis-Hastings +sharing_package = decentralizepy.sharing.PlainAverageSharing +; Does not use Metropolis-Hastings sharing_class = PlainAverageSharing compress = False [NODE] -graph_degree = 7 ; s (number of neighbors in EL-Oracle and number of random neighbors picked to send message to in EL-Local) \ No newline at end of file +graph_degree = 7 +; s (number of neighbors in EL-Oracle and number of random neighbors picked to send message to in EL-Local)