Skip to content

Commit

Permalink
All port stages tested
Browse files Browse the repository at this point in the history
  • Loading branch information
szmazurek committed Jan 16, 2025
1 parent cb6c184 commit f0b9507
Showing 1 changed file with 100 additions and 100 deletions.
200 changes: 100 additions & 100 deletions testing/test_lightning_components.py
Original file line number Diff line number Diff line change
Expand Up @@ -516,105 +516,105 @@ def test_port_model_3d_rad_regression_single_device_single_node(device):


def test_port_model_2d_rad_classification_single_device_single_node(device):
# with TrainerTestsContextManager():
parameters = parseConfig(
TESTS_DIRPATH + "/config_classification.yaml", version_check_flag=False
)
parameters["modality"] = "rad"
parameters["track_memory_usage"] = True
parameters["patch_size"] = PATCH_SIZE["2D"]
parameters["model"]["dimension"] = 2
parameters["model"]["final_layer"] = "logits"
training_data, parameters["headers"] = parseTrainingCSV(
TEST_DATA_DIRPATH + "/train_2d_rad_classification.csv"
)
parameters["model"]["num_channels"] = 3
parameters["model"]["onnx_export"] = False
parameters["model"]["print_summary"] = False
parameters["save_output"] = True
parameters["model"]["architecture"] = "densenet121"
parameters = populate_header_in_parameters(parameters, parameters["headers"])
dataset = ImagesFromDataFrame(
training_data, parameters, train=True, loader_type="train"
)
dataset_val = ImagesFromDataFrame(
training_data, parameters, train=False, loader_type="validation"
)
dataset_test = ImagesFromDataFrame(
training_data, parameters, train=False, loader_type="test"
)
train_dataloader = torch.utils.data.DataLoader(
dataset, batch_size=parameters["batch_size"], shuffle=True
)
val_dataloader = torch.utils.data.DataLoader(
dataset_val, batch_size=parameters["batch_size"], shuffle=False
)
test_dataloader = torch.utils.data.DataLoader(
dataset_test, batch_size=parameters["batch_size"], shuffle=False
)
parameters = populate_channel_keys_in_params(train_dataloader, parameters)
module = GandlfLightningModule(parameters, output_dir=TEST_DATA_OUTPUT_DIRPATH)
trainer = pl.Trainer(
accelerator="auto",
strategy="auto",
fast_dev_run=False,
devices=1,
num_nodes=1,
max_epochs=parameters["num_epochs"],
sync_batchnorm=False,
enable_checkpointing=False,
logger=False,
num_sanity_val_steps=0,
)
trainer.fit(module, train_dataloader, val_dataloader)
trainer.test(module, test_dataloader)
with TrainerTestsContextManager():
parameters = parseConfig(
TESTS_DIRPATH + "/config_classification.yaml", version_check_flag=False
)
parameters["modality"] = "rad"
parameters["track_memory_usage"] = True
parameters["patch_size"] = PATCH_SIZE["2D"]
parameters["model"]["dimension"] = 2
parameters["model"]["final_layer"] = "logits"
training_data, parameters["headers"] = parseTrainingCSV(
TEST_DATA_DIRPATH + "/train_2d_rad_classification.csv"
)
parameters["model"]["num_channels"] = 3
parameters["model"]["onnx_export"] = False
parameters["model"]["print_summary"] = False
parameters["save_output"] = True
parameters["model"]["architecture"] = "densenet121"
parameters = populate_header_in_parameters(parameters, parameters["headers"])
dataset = ImagesFromDataFrame(
training_data, parameters, train=True, loader_type="train"
)
dataset_val = ImagesFromDataFrame(
training_data, parameters, train=False, loader_type="validation"
)
dataset_test = ImagesFromDataFrame(
training_data, parameters, train=False, loader_type="test"
)
train_dataloader = torch.utils.data.DataLoader(
dataset, batch_size=parameters["batch_size"], shuffle=True
)
val_dataloader = torch.utils.data.DataLoader(
dataset_val, batch_size=parameters["batch_size"], shuffle=False
)
test_dataloader = torch.utils.data.DataLoader(
dataset_test, batch_size=parameters["batch_size"], shuffle=False
)
parameters = populate_channel_keys_in_params(train_dataloader, parameters)
module = GandlfLightningModule(parameters, output_dir=TEST_DATA_OUTPUT_DIRPATH)
trainer = pl.Trainer(
accelerator="auto",
strategy="auto",
fast_dev_run=False,
devices=1,
num_nodes=1,
max_epochs=parameters["num_epochs"],
sync_batchnorm=False,
enable_checkpointing=False,
logger=False,
num_sanity_val_steps=0,
)
trainer.fit(module, train_dataloader, val_dataloader)
trainer.test(module, test_dataloader)

inference_data, parameters["headers"] = parseTrainingCSV(
TEST_DATA_DIRPATH + "/train_2d_rad_classification.csv"
)
inference_data.drop("ValueToPredict", axis=1, inplace=True)
inference_data.drop("Label", axis=1, inplace=True)
temp_infer_csv = os.path.join(TEST_DATA_OUTPUT_DIRPATH, "temp_infer_csv.csv")
inference_data.to_csv(temp_infer_csv, index=False)
parameters = parseConfig(
TESTS_DIRPATH + "/config_classification.yaml", version_check_flag=False
)
inference_data, parameters["headers"] = parseTrainingCSV(temp_infer_csv)
parameters["output_dir"] = TEST_DATA_OUTPUT_DIRPATH # this is in inference mode
parameters["modality"] = "rad"
parameters["patch_size"] = PATCH_SIZE["2D"]
parameters["model"]["dimension"] = 2
parameters["model"]["final_layer"] = "logits"
parameters["model"]["num_channels"] = 3
parameters = populate_header_in_parameters(parameters, parameters["headers"])
parameters["model"]["architecture"] = "densenet121"
parameters["model"]["onnx_export"] = False
parameters["differential_privacy"] = False
parameters["save_output"] = True

dataset = ImagesFromDataFrame(
inference_data, parameters, train=False, loader_type="testing"
)
inference_data, parameters["headers"] = parseTrainingCSV(
TEST_DATA_DIRPATH + "/train_2d_rad_classification.csv"
)
inference_data.drop("ValueToPredict", axis=1, inplace=True)
inference_data.drop("Label", axis=1, inplace=True)
temp_infer_csv = os.path.join(TEST_DATA_OUTPUT_DIRPATH, "temp_infer_csv.csv")
inference_data.to_csv(temp_infer_csv, index=False)
parameters = parseConfig(
TESTS_DIRPATH + "/config_classification.yaml", version_check_flag=False
)
inference_data, parameters["headers"] = parseTrainingCSV(temp_infer_csv)
parameters["output_dir"] = TEST_DATA_OUTPUT_DIRPATH # this is in inference mode
parameters["modality"] = "rad"
parameters["patch_size"] = PATCH_SIZE["2D"]
parameters["model"]["dimension"] = 2
parameters["model"]["final_layer"] = "logits"
parameters["model"]["num_channels"] = 3
parameters = populate_header_in_parameters(parameters, parameters["headers"])
parameters["model"]["architecture"] = "densenet121"
parameters["model"]["onnx_export"] = False
parameters["differential_privacy"] = False
parameters["save_output"] = True

inference_dataloader = torch.utils.data.DataLoader(
dataset, batch_size=parameters["batch_size"], shuffle=True
)
parameters = populate_channel_keys_in_params(inference_dataloader, parameters)

module = GandlfLightningModule(parameters, output_dir=TEST_DATA_OUTPUT_DIRPATH)
trainer = pl.Trainer(
accelerator="auto",
strategy="auto",
fast_dev_run=False,
devices=1,
num_nodes=1,
max_epochs=parameters["num_epochs"],
sync_batchnorm=False,
enable_checkpointing=False,
logger=False,
num_sanity_val_steps=0,
)
trainer.predict(module, inference_dataloader)
dataset = ImagesFromDataFrame(
inference_data, parameters, train=False, loader_type="testing"
)

inference_dataloader = torch.utils.data.DataLoader(
dataset, batch_size=parameters["batch_size"], shuffle=True
)
parameters = populate_channel_keys_in_params(inference_dataloader, parameters)

module = GandlfLightningModule(parameters, output_dir=TEST_DATA_OUTPUT_DIRPATH)
trainer = pl.Trainer(
accelerator="auto",
strategy="auto",
fast_dev_run=False,
devices=1,
num_nodes=1,
max_epochs=parameters["num_epochs"],
sync_batchnorm=False,
enable_checkpointing=False,
logger=False,
num_sanity_val_steps=0,
)
trainer.predict(module, inference_dataloader)


# TODO Refactor this and other tests
Expand Down Expand Up @@ -725,7 +725,7 @@ def test_port_model_3d_rad_classification_single_device_single_node(device):
trainer.predict(module, inference_dataloader)


def test_port_model_inference_classification_histology_2d(device):
def test_port_model_classification_histology_2d_single_device_single_node(device):
with TrainerTestsContextManager():
output_dir_patches = os.path.join(TEST_DATA_OUTPUT_DIRPATH, "histo_patches")
if os.path.isdir(output_dir_patches):
Expand Down Expand Up @@ -823,7 +823,7 @@ def test_port_model_inference_classification_histology_2d(device):
trainer.predict(module, inference_data.iterrows())


def test_port_model_inference_segmentation_histology_2d():
def test_port_model_segmentation_histology_2d_single_device_single_node(device):
with TrainerTestsContextManager():
output_dir_patches = os.path.join(TEST_DATA_OUTPUT_DIRPATH, "histo_patches")
if os.path.isdir(output_dir_patches):
Expand Down Expand Up @@ -866,7 +866,7 @@ def test_port_model_inference_segmentation_histology_2d():
parameters["nested_training"]["testing"] = 1
parameters["nested_training"]["validation"] = -2
parameters["metrics"] = ["dice"]
parameters["model"]["onnx_export"] = True
parameters["model"]["onnx_export"] = False
parameters["model"]["print_summary"] = True
parameters["data_preprocessing"]["resize_image"] = [128, 128]

Expand Down

1 comment on commit f0b9507

@github-actions
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@check-spelling-bot Report

🔴 Please review

See the 📜action log or 📝 job summary for details.

Unrecognized words (6)

angi
disbled
indded
sikt
somewhow
unpredicatably

These words are not needed and should be removed allgather skipif Stdnet

Some files were automatically ignored 🙈

These sample patterns would exclude them:

/classification_medmnist_notebook/medmnist/dataset/[^/]+$
[^/]\.png$
^\Q.gitattributes\E$
^\Q.spelling/.spelling/expect.txt\E$
^\Q__init__.py\E$
^\QGANDLF/data/patch_miner/__init__.py\E$
^\QGANDLF/data/patch_miner/opm/__init__.py\E$
^\QGANDLF/grad_clipping/__init__.py\E$
^\QGANDLF/models/seg_modules/__init__.py\E$
^\QGANDLF/privacy/__init__.py\E$
^\Qtesting/__init__.py\E$
^\Qtutorials/classification_medmnist_notebook/tutorial.ipynb\E$
^samples/images_opm/

You should consider excluding directory paths (e.g. (?:^|/)vendor/), filenames (e.g. (?:^|/)yarn\.lock$), or file extensions (e.g. \.gz$)

You should consider adding them to:

.github/actions/spelling/excludes.txt

File matching is via Perl regular expressions.

To check these files, more of their words need to be in the dictionary than not. You can use patterns.txt to exclude portions, add items to the dictionary (e.g. by adding them to allow.txt), or fix typos.

To accept these unrecognized words as correct, update file exclusions, and remove the previously acknowledged and now absent words, you could run the following commands

... in a clone of the [email protected]:szmazurek/GaNDLF.git repository
on the inference_pass branch (ℹ️ how do I use this?):

curl -s -S -L 'https://raw.githubusercontent.com/check-spelling/check-spelling/main/apply.pl' |
perl - 'https://github.com/szmazurek/GaNDLF/actions/runs/12814261439/attempts/1'
Available 📚 dictionaries could cover words not in the 📘 dictionary
Dictionary Entries Covers Uniquely
cspell:java/src/java-terms.txt 920 2 2

Consider adding them (in .github/workflows/spellchecker.yml) in jobs:/spelling: for uses: check-spelling/check-spelling@main in its with:

      with:
        extra_dictionaries: |
          cspell:java/src/java-terms.txt

To stop checking additional dictionaries, add (in .github/workflows/spellchecker.yml) for uses: check-spelling/check-spelling@main in its with:

check_extra_dictionaries: ''
Warnings (3)

See the 📜action log or 📝 job summary for details.

⚠️ Warnings Count
⚠️ binary-file 19
⚠️ large-file 2
⚠️ noisy-file 2

See ⚠️ Event descriptions for more information.

Please sign in to comment.