From b12d52b86309d84235b16ba097e5660264a98035 Mon Sep 17 00:00:00 2001 From: WinstonHuTiger <2953620996@qq.com> Date: Thu, 22 Aug 2024 14:27:16 -0400 Subject: [PATCH 1/3] add metrics mlcube for brasyn challenge with two pre-trained nnUNet --- .../mlcube/mlcube.yaml | 27 +++++ .../mlcube/mlcube_gpu.yaml | 28 +++++ .../mlcube/workspace/parameters.yaml | 4 + .../mlcube/workspace/parameters_gpu.yaml | 4 + .../project/Dockerfile | 31 +++++ .../project/README.md | 9 ++ .../project/entrypoint.sh | 108 +++++++++++++++++ .../project/parse_gpu_require.py | 13 +++ .../project/prepare_data_input.py | 110 ++++++++++++++++++ .../project/process_tool_output.py | 91 +++++++++++++++ .../project/requirements.txt | 4 + .../project/write_metrics.py | 27 +++++ 12 files changed, 456 insertions(+) create mode 100644 examples/BraTS2024/global_synthesis_metrics/mlcube/mlcube.yaml create mode 100644 examples/BraTS2024/global_synthesis_metrics/mlcube/mlcube_gpu.yaml create mode 100644 examples/BraTS2024/global_synthesis_metrics/mlcube/workspace/parameters.yaml create mode 100644 examples/BraTS2024/global_synthesis_metrics/mlcube/workspace/parameters_gpu.yaml create mode 100644 examples/BraTS2024/global_synthesis_metrics/project/Dockerfile create mode 100644 examples/BraTS2024/global_synthesis_metrics/project/README.md create mode 100644 examples/BraTS2024/global_synthesis_metrics/project/entrypoint.sh create mode 100644 examples/BraTS2024/global_synthesis_metrics/project/parse_gpu_require.py create mode 100644 examples/BraTS2024/global_synthesis_metrics/project/prepare_data_input.py create mode 100644 examples/BraTS2024/global_synthesis_metrics/project/process_tool_output.py create mode 100644 examples/BraTS2024/global_synthesis_metrics/project/requirements.txt create mode 100644 examples/BraTS2024/global_synthesis_metrics/project/write_metrics.py diff --git a/examples/BraTS2024/global_synthesis_metrics/mlcube/mlcube.yaml b/examples/BraTS2024/global_synthesis_metrics/mlcube/mlcube.yaml new file mode 100644 index 000000000..dc9a93daa --- /dev/null +++ b/examples/BraTS2024/global_synthesis_metrics/mlcube/mlcube.yaml @@ -0,0 +1,27 @@ +name: BraTS2024 Missing MRI task Metrics +description: BraTS2024 Missing MRI task Metrics +authors: + - { name: MLCommons Medical Working Group } + +platform: + accelerator_count: 0 + +docker: + # Image name + image: mlcommons/brats2024-global-synthesis-metrics:0.0.0 + # Docker build context relative to $MLCUBE_ROOT. Default is `build`. + build_context: "../project" + # Docker file name within docker build context, default is `Dockerfile`. + build_file: "Dockerfile" + +tasks: + evaluate: + # Computes evaluation metrics on the given predictions and ground truths + parameters: + inputs: + { + predictions: predictions, + labels: labels, + parameters_file: parameters.yaml, + } + outputs: { output_path: { type: "file", default: "results.yaml" } } \ No newline at end of file diff --git a/examples/BraTS2024/global_synthesis_metrics/mlcube/mlcube_gpu.yaml b/examples/BraTS2024/global_synthesis_metrics/mlcube/mlcube_gpu.yaml new file mode 100644 index 000000000..d37a35077 --- /dev/null +++ b/examples/BraTS2024/global_synthesis_metrics/mlcube/mlcube_gpu.yaml @@ -0,0 +1,28 @@ +name: BraTS2024 Missing MRI task Metrics +description: BraTS2024 Missing MRI task Metrics +authors: + - { name: MLCommons Medical Working Group } + +platform: + accelerator_count: 1 + +docker: + # Image name + image: mlcommons/brats2024-global-synthesis-metrics:0.0.0 + # Docker build context relative to $MLCUBE_ROOT. Default is `build`. + build_context: "../project" + # Docker file name within docker build context, default is `Dockerfile`. + build_file: "Dockerfile" + gpu_args: --gpus all + +tasks: + evaluate: + # Computes evaluation metrics on the given predictions and ground truths + parameters: + inputs: + { + predictions: predictions, + labels: labels, + parameters_file: parameters.yaml, + } + outputs: { output_path: { type: "file", default: "results.yaml" } } \ No newline at end of file diff --git a/examples/BraTS2024/global_synthesis_metrics/mlcube/workspace/parameters.yaml b/examples/BraTS2024/global_synthesis_metrics/mlcube/workspace/parameters.yaml new file mode 100644 index 000000000..423afa8ce --- /dev/null +++ b/examples/BraTS2024/global_synthesis_metrics/mlcube/workspace/parameters.yaml @@ -0,0 +1,4 @@ +original_data_in_labels: original_data +missing_modality_json: "missing.json" +segmentation_labels: segmentation_labels +requires_gpu: False \ No newline at end of file diff --git a/examples/BraTS2024/global_synthesis_metrics/mlcube/workspace/parameters_gpu.yaml b/examples/BraTS2024/global_synthesis_metrics/mlcube/workspace/parameters_gpu.yaml new file mode 100644 index 000000000..a81625ca2 --- /dev/null +++ b/examples/BraTS2024/global_synthesis_metrics/mlcube/workspace/parameters_gpu.yaml @@ -0,0 +1,4 @@ +original_data_in_labels: original_data +missing_modality_json: "missing.json" +segmentation_labels: segmentation_labels +requires_gpu: True \ No newline at end of file diff --git a/examples/BraTS2024/global_synthesis_metrics/project/Dockerfile b/examples/BraTS2024/global_synthesis_metrics/project/Dockerfile new file mode 100644 index 000000000..f63219712 --- /dev/null +++ b/examples/BraTS2024/global_synthesis_metrics/project/Dockerfile @@ -0,0 +1,31 @@ +FROM winstonhutiger/brasyn_nnunet:glioma_metasis + +# At the time of writing this, segmentation metrics and other +# metrics are in different branches. Create two separate +# virtual envs. + +# Create venv for GaNDLF segmentation metrics +RUN python3 -m venv /seg_venv && /seg_venv/bin/pip install --upgrade pip +RUN git clone https://github.com/rachitsaluja/GaNDLF.git seg_GaNDLF && \ + cd seg_GaNDLF && git checkout c2a2c1cc6fc1d307a70068160066acdf1e8cd8bc && \ + /seg_venv/bin/pip install torch==1.13.1+cpu torchvision==0.14.1+cpu torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cpu && \ + /seg_venv/bin/pip install -e . + +# Create venv for GaNDLF inpainting metrics +RUN python3 -m venv /ssim_venv && /ssim_venv/bin/pip install --upgrade pip +RUN git clone https://github.com/FelixSteinbauer/GaNDLF.git ssim_GaNDLF && \ + cd ssim_GaNDLF && git checkout bc0d3fa6c25b75728fbd9796380d9b82c5a2583f && \ + /ssim_venv/bin/pip install torch==1.13.1+cpu torchvision==0.14.1+cpu torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cpu && \ + /ssim_venv/bin/pip install -e . + +# Prepare config file for inpainting metrics +RUN wget -O /ssim_config.yaml https://raw.githubusercontent.com/mlcommons/medperf/c347c9bbbd6428e120b6a760a0a0996aab182eb5/examples/BraTS2023/inpainting_metrics/mlcube/workspace/parameters.yaml + +# Install main script dependencies in a separate venv to avoid unexpected conflicts +COPY ./requirements.txt /mlcube_project/requirements.txt +RUN python3 -m venv /main_venv && /main_venv/bin/pip install -r /mlcube_project/requirements.txt + +# copy project files +COPY . /mlcube_project + +ENTRYPOINT ["/bin/bash", "/mlcube_project/entrypoint.sh"] diff --git a/examples/BraTS2024/global_synthesis_metrics/project/README.md b/examples/BraTS2024/global_synthesis_metrics/project/README.md new file mode 100644 index 000000000..5a7568a2b --- /dev/null +++ b/examples/BraTS2024/global_synthesis_metrics/project/README.md @@ -0,0 +1,9 @@ +# How this MLCube was created + + +## Build the MLCube + +```bash +cd ../mlcube +mlcube configure -Pdocker.build_strategy=always +``` \ No newline at end of file diff --git a/examples/BraTS2024/global_synthesis_metrics/project/entrypoint.sh b/examples/BraTS2024/global_synthesis_metrics/project/entrypoint.sh new file mode 100644 index 000000000..6f8747081 --- /dev/null +++ b/examples/BraTS2024/global_synthesis_metrics/project/entrypoint.sh @@ -0,0 +1,108 @@ +#!/bin/bash + +# pretrained weights +export nnUNet_raw="/workspace/raw_data_base" +export nnUNet_preprocessed="/workspace/datasets" +export nnUNet_results="/workspace/weights" +export MKL_THREADING_LAYER=GNU + +# Read arguments +while [ "${1:-}" != "" ]; do + case "$1" in + "--type"*) + labels="${1#*=}" + ;; + "--predictions"*) + predictions="${1#*=}" + ;; + "--labels"*) + labels="${1#*=}" + ;; + "--output_path"*) + output_path="${1#*=}" + ;; + "--parameters_file"*) + parameters_file="${1#*=}" + ;; + *) + task=$1 + ;; + esac + shift +done + +# validate arguments +if [ -z "$type" ] +then + echo "--type is required" + exit 1 +fi + + +if [ -z "$predictions" ] +then + echo "--predictions is required" + exit 1 +fi + +if [ -z "$labels" ] +then + echo "--labels is required" + exit 1 +fi + +if [ -z "$output_path" ] +then + echo "--output_path is required" + exit 1 +fi + +if [ -z "$parameters_file" ] +then + echo "--parameters_file is required" + exit 1 +fi + +if [ "$task" != "evaluate" ] +then + echo "Invalid task: task should be evaluate" + exit 1 +fi + +# Prepare input data to FeTS tool +/main_venv/bin/python /mlcube_project/prepare_data_input.py \ + --predictions $predictions \ + --labels $labels \ + --parameters_file $parameters_file \ + --intermediate_folder /data_renamed \ + --ssim_csv /ssim_data.csv + +mkdir /seg_output_folder + +# Run glioma segmentation tool or metasis tool +if [ "$type" == "glioma" ] +then + nnUNetv2_predict -d Dataset137_BraTS2021 -i "/data_renamed" -o "/seg_output_folder" -f 0 1 2 3 4 -tr nnUNetTrainer -c 3d_fullres -p nnUNetPlans +else + nnUNetv2_predict -d Dataset133_BraTS_metasis_2024 -i "/data_renamed" -o "/seg_output_folder" -f 0 1 2 3 4 -tr nnUNetTrainer -c 3d_fullres -p nnUNetPlans +fi + +# # Process nnunet tool output +/main_venv/bin/python /mlcube_project/process_tool_output.py \ + --intermediate_folder /seg_output_folder \ + --labels $labels \ + --parameters_file $parameters_file \ + --seg_csv /seg_data.csv \ + + +# Run segmentation metrics +/seg_venv/bin/gandlf_generateBraTSMetrics -c BraTS-GLI -i /seg_data.csv -o /seg_metrics.yaml + +# Run ssim metrics +/ssim_venv/bin/gandlf_generateMetrics -c /ssim_config.yaml -i /ssim_data.csv -o /ssim_metrics.yaml + +# write final metrics file +/main_venv/bin/python /mlcube_project/write_metrics.py \ + --segmentation_metrics /seg_metrics.yaml \ + --ssim_metrics /ssim_metrics.yaml \ + --output_path $output_path \ No newline at end of file diff --git a/examples/BraTS2024/global_synthesis_metrics/project/parse_gpu_require.py b/examples/BraTS2024/global_synthesis_metrics/project/parse_gpu_require.py new file mode 100644 index 000000000..3e39c671a --- /dev/null +++ b/examples/BraTS2024/global_synthesis_metrics/project/parse_gpu_require.py @@ -0,0 +1,13 @@ +import argparse +import yaml + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--parameters_file") + + args = parser.parse_args() + with open(args.parameters_file) as f: + parameters = yaml.safe_load(f) + + requires_gpu = parameters.get("requires_gpu", True) + print(int(bool(requires_gpu))) diff --git a/examples/BraTS2024/global_synthesis_metrics/project/prepare_data_input.py b/examples/BraTS2024/global_synthesis_metrics/project/prepare_data_input.py new file mode 100644 index 000000000..5b9cdd795 --- /dev/null +++ b/examples/BraTS2024/global_synthesis_metrics/project/prepare_data_input.py @@ -0,0 +1,110 @@ +import argparse +import os +import shutil +import re +import yaml +import json +import pandas as pd + + +def prepare_data_input( + predictions, original_data, missing_modality_json, intermediate_folder, ssim_csv +): + modalities = {"t1c": "t1ce", "t1n": "t1", "t2f": "flair", "t2w": "t2"} + os.makedirs(intermediate_folder, exist_ok=True) + + # parse predictions + predictions_dict = {} + for file in os.listdir(predictions): + subj_id = file[-len("xxxxx-xxx-mmm.nii.gz") : -len("-mmm.nii.gz")] + predictions_dict[subj_id] = os.path.join(predictions, file) + + # read original data and rename + original_missing = {} + for subj in os.listdir(original_data): + pattern = r".*(\d{5}-\d{3})$" + reg = re.match(pattern, subj) + if not reg: + continue + subj_id = reg.groups()[0] + missing_modality = missing_modality_json[subj] + + folder = os.path.join(original_data, subj) + inter_folder = os.path.join(intermediate_folder, subj) + os.makedirs(inter_folder, exist_ok=True) + + for file in os.listdir(folder): + if file.endswith(f"{missing_modality}.nii.gz"): + original_missing[subj_id] = os.path.join(folder, file) + continue + file_path = os.path.join(folder, file) + for modality in modalities: + if modality == missing_modality: + continue + suffix = f"-{modality}.nii.gz" + if file.endswith(suffix): + newfile = file.replace(suffix, f"_{modalities[modality]}.nii.gz") + newfile = os.path.join(inter_folder, newfile) + shutil.copyfile(file_path, newfile) + break + + # move the prediction + prediction = predictions_dict[subj_id] + assert prediction.endswith( + f"{missing_modality}.nii.gz" + ), "Prediction is not the missing modality" + prediction_name = os.path.basename(prediction) + suffix = f"-{missing_modality}.nii.gz" + newfile = prediction_name.replace( + suffix, f"_{modalities[missing_modality]}.nii.gz" + ) + newfile = os.path.join(inter_folder, newfile) + shutil.copyfile(prediction, newfile) + + # prepare data csv for ssim + input_data = [] + for subject_id in predictions_dict: + prediction_record = { + "SubjectID": subject_id, + "Prediction": predictions_dict[subject_id], + "Target": original_missing[subject_id], + } + input_data.append(prediction_record) + + input_data_df = pd.DataFrame(input_data) + os.makedirs(os.path.dirname(os.path.abspath(ssim_csv)), exist_ok=True) + input_data_df.to_csv(ssim_csv, index=False) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--predictions") + parser.add_argument("--labels") + parser.add_argument("--intermediate_folder") + parser.add_argument("--ssim_csv") + parser.add_argument("--parameters_file") + + args = parser.parse_args() + labels = args.labels + predictions = args.predictions + intermediate_folder = args.intermediate_folder + ssim_csv = args.ssim_csv + + with open(args.parameters_file) as f: + parameters = yaml.safe_load(f) + + original_data_in_labels = parameters["original_data_in_labels"] + missing_modality_json = parameters["missing_modality_json"] + segmentation_labels = parameters["segmentation_labels"] + + original_data_in_labels = os.path.join(labels, original_data_in_labels) + missing_modality_json = json.load(open(os.path.join(labels, missing_modality_json))) + segmentation_labels_folder = os.path.join(labels, segmentation_labels) + + prepare_data_input( + predictions, + original_data_in_labels, + missing_modality_json, + intermediate_folder, + ssim_csv, + ) diff --git a/examples/BraTS2024/global_synthesis_metrics/project/process_tool_output.py b/examples/BraTS2024/global_synthesis_metrics/project/process_tool_output.py new file mode 100644 index 000000000..c7d10f945 --- /dev/null +++ b/examples/BraTS2024/global_synthesis_metrics/project/process_tool_output.py @@ -0,0 +1,91 @@ +import argparse +import os +import re +import yaml +import json +import pandas as pd +import SimpleITK as sitk + + +def convert_to_brats_label_style(tool_outputs_dict): + output_folder = "/corrected_predictions" + os.makedirs(output_folder, exist_ok=True) + tool_outputs_dict_corrected = {} + for subj, path in tool_outputs_dict.items(): + img = sitk.ReadImage(path) + mask_array = sitk.GetArrayFromImage(img) + mask_array[mask_array == 4] = 3 # set the label from FeTS style to BraTS style + new_img = sitk.GetImageFromArray(mask_array) + new_img.CopyInformation(img) # copy all meta information from the previous mask + target_path = os.path.join(output_folder, os.path.basename(path)) + sitk.WriteImage(new_img, target_path) + tool_outputs_dict_corrected[subj] = target_path + + return tool_outputs_dict_corrected + + +def process_tool_output( + intermediate_folder, segmentation_labels_folder, seg_csv +): + # collect tool output + tool_outputs_dict = {} + pattern = r".*(\d{5}-\d{3})\.nii\.gz$" + for subj in os.listdir(intermediate_folder): + reg = re.match(pattern, subj) + if not reg: + continue + + subj_id = subj.split(".")[0] + if os.path.isdir(subj): + continue + + tool_outputs_dict[subj_id] = os.path.join(intermediate_folder, subj) + + tool_outputs_dict = convert_to_brats_label_style(tool_outputs_dict) + + # Read labels + labels_dict = {} + pattern = r".*(\d{5}-\d{3})-seg\.nii\.gz$" + for file in os.listdir(segmentation_labels_folder): + reg = re.match(pattern, file) + if not reg: + continue + subject_id = reg.groups()[0] + labels_dict[subject_id] = os.path.join(segmentation_labels_folder, file) + + # create csv + input_data = [] + for subject_id in tool_outputs_dict: + prediction_record = { + "SubjectID": subject_id, + "Prediction": tool_outputs_dict[subject_id], + "Target": labels_dict[subject_id], + } + input_data.append(prediction_record) + + input_data_df = pd.DataFrame(input_data) + os.makedirs(os.path.dirname(os.path.abspath(seg_csv)), exist_ok=True) + input_data_df.to_csv(seg_csv, index=False) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--intermediate_folder") + parser.add_argument("--labels") + parser.add_argument("--parameters_file") + parser.add_argument("--seg_csv") + + args = parser.parse_args() + intermediate_folder = args.intermediate_folder + labels = args.labels + seg_csv = args.seg_csv + + with open(args.parameters_file) as f: + parameters = yaml.safe_load(f) + + segmentation_labels = parameters["segmentation_labels"] + segmentation_labels_folder = os.path.join(labels, segmentation_labels) + + process_tool_output( + intermediate_folder, segmentation_labels_folder, seg_csv + ) diff --git a/examples/BraTS2024/global_synthesis_metrics/project/requirements.txt b/examples/BraTS2024/global_synthesis_metrics/project/requirements.txt new file mode 100644 index 000000000..db396922d --- /dev/null +++ b/examples/BraTS2024/global_synthesis_metrics/project/requirements.txt @@ -0,0 +1,4 @@ +pyYAML +numpy +SimpleITK>=2.1.0 +pandas diff --git a/examples/BraTS2024/global_synthesis_metrics/project/write_metrics.py b/examples/BraTS2024/global_synthesis_metrics/project/write_metrics.py new file mode 100644 index 000000000..4a7e4322f --- /dev/null +++ b/examples/BraTS2024/global_synthesis_metrics/project/write_metrics.py @@ -0,0 +1,27 @@ +import argparse +import os +import yaml + + +def main(segmentation_metrics, ssim_metrics, output_path): + with open(segmentation_metrics) as f: + segmentation_metrics = yaml.safe_load(f) + + with open(ssim_metrics) as f: + ssim_metrics = yaml.safe_load(f) + ssim_metrics = {key: val["ssim"] for key, val in ssim_metrics.items()} + + metrics = {"segmentation": segmentation_metrics, "ssim": ssim_metrics} + os.makedirs(os.path.dirname(output_path), exist_ok=True) + with open(output_path, "w") as f: + yaml.dump(metrics, f) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--segmentation_metrics") + parser.add_argument("--ssim_metrics") + parser.add_argument("--output_path") + + args = parser.parse_args() + main(args.segmentation_metrics, args.ssim_metrics, args.output_path) From c67f7ab5e35408186eaf9545eb0eeac26aaa2685 Mon Sep 17 00:00:00 2001 From: winstonhuTiger <2953620996@qq.com> Date: Wed, 28 Aug 2024 20:57:25 -0400 Subject: [PATCH 2/3] update dockerfile --- .../BraTS2024/global_synthesis_metrics/project/Dockerfile | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/examples/BraTS2024/global_synthesis_metrics/project/Dockerfile b/examples/BraTS2024/global_synthesis_metrics/project/Dockerfile index f63219712..a5c7420ba 100644 --- a/examples/BraTS2024/global_synthesis_metrics/project/Dockerfile +++ b/examples/BraTS2024/global_synthesis_metrics/project/Dockerfile @@ -3,18 +3,19 @@ FROM winstonhutiger/brasyn_nnunet:glioma_metasis # At the time of writing this, segmentation metrics and other # metrics are in different branches. Create two separate # virtual envs. +# RUN apt-get update && apt install git-all -y # Create venv for GaNDLF segmentation metrics RUN python3 -m venv /seg_venv && /seg_venv/bin/pip install --upgrade pip RUN git clone https://github.com/rachitsaluja/GaNDLF.git seg_GaNDLF && \ - cd seg_GaNDLF && git checkout c2a2c1cc6fc1d307a70068160066acdf1e8cd8bc && \ + cd seg_GaNDLF && \ /seg_venv/bin/pip install torch==1.13.1+cpu torchvision==0.14.1+cpu torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cpu && \ /seg_venv/bin/pip install -e . # Create venv for GaNDLF inpainting metrics RUN python3 -m venv /ssim_venv && /ssim_venv/bin/pip install --upgrade pip RUN git clone https://github.com/FelixSteinbauer/GaNDLF.git ssim_GaNDLF && \ - cd ssim_GaNDLF && git checkout bc0d3fa6c25b75728fbd9796380d9b82c5a2583f && \ + cd ssim_GaNDLF && \ /ssim_venv/bin/pip install torch==1.13.1+cpu torchvision==0.14.1+cpu torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cpu && \ /ssim_venv/bin/pip install -e . From 707746d937aaad779b9ef5b56c29fc487d1fa03c Mon Sep 17 00:00:00 2001 From: winstonhuTiger <2953620996@qq.com> Date: Fri, 13 Sep 2024 20:17:16 -0400 Subject: [PATCH 3/3] move type to yaml configuration file --- .../mlcube/workspace/parameters.yaml | 4 +++- .../mlcube/workspace/parameters_gpu.yaml | 4 +++- .../project/entrypoint.sh | 22 ++++++++++--------- 3 files changed, 18 insertions(+), 12 deletions(-) diff --git a/examples/BraTS2024/global_synthesis_metrics/mlcube/workspace/parameters.yaml b/examples/BraTS2024/global_synthesis_metrics/mlcube/workspace/parameters.yaml index 423afa8ce..f823e4508 100644 --- a/examples/BraTS2024/global_synthesis_metrics/mlcube/workspace/parameters.yaml +++ b/examples/BraTS2024/global_synthesis_metrics/mlcube/workspace/parameters.yaml @@ -1,4 +1,6 @@ original_data_in_labels: original_data missing_modality_json: "missing.json" segmentation_labels: segmentation_labels -requires_gpu: False \ No newline at end of file +requires_gpu: False +disease_type: glioma +# disease_type can only be glioma or metastasis \ No newline at end of file diff --git a/examples/BraTS2024/global_synthesis_metrics/mlcube/workspace/parameters_gpu.yaml b/examples/BraTS2024/global_synthesis_metrics/mlcube/workspace/parameters_gpu.yaml index a81625ca2..4c1014960 100644 --- a/examples/BraTS2024/global_synthesis_metrics/mlcube/workspace/parameters_gpu.yaml +++ b/examples/BraTS2024/global_synthesis_metrics/mlcube/workspace/parameters_gpu.yaml @@ -1,4 +1,6 @@ original_data_in_labels: original_data missing_modality_json: "missing.json" segmentation_labels: segmentation_labels -requires_gpu: True \ No newline at end of file +requires_gpu: True +disease_type: metastasis +# disease_type can only be glioma or metastasis \ No newline at end of file diff --git a/examples/BraTS2024/global_synthesis_metrics/project/entrypoint.sh b/examples/BraTS2024/global_synthesis_metrics/project/entrypoint.sh index 6f8747081..f945fe9b6 100644 --- a/examples/BraTS2024/global_synthesis_metrics/project/entrypoint.sh +++ b/examples/BraTS2024/global_synthesis_metrics/project/entrypoint.sh @@ -9,9 +9,6 @@ export MKL_THREADING_LAYER=GNU # Read arguments while [ "${1:-}" != "" ]; do case "$1" in - "--type"*) - labels="${1#*=}" - ;; "--predictions"*) predictions="${1#*=}" ;; @@ -31,12 +28,6 @@ while [ "${1:-}" != "" ]; do shift done -# validate arguments -if [ -z "$type" ] -then - echo "--type is required" - exit 1 -fi if [ -z "$predictions" ] @@ -69,6 +60,14 @@ then exit 1 fi +type=$(yq -r '.disease_type' $parameters_file) +if [[ $type =~ ^("metastasis"|"glioma")$ ]]; then + echo "$type is valid" +else + echo "$type is not valid" + exit 1 +fi + # Prepare input data to FeTS tool /main_venv/bin/python /mlcube_project/prepare_data_input.py \ --predictions $predictions \ @@ -79,11 +78,14 @@ fi mkdir /seg_output_folder + # Run glioma segmentation tool or metasis tool if [ "$type" == "glioma" ] then nnUNetv2_predict -d Dataset137_BraTS2021 -i "/data_renamed" -o "/seg_output_folder" -f 0 1 2 3 4 -tr nnUNetTrainer -c 3d_fullres -p nnUNetPlans -else +fi + +if [ "$type" == "metastasis" ] nnUNetv2_predict -d Dataset133_BraTS_metasis_2024 -i "/data_renamed" -o "/seg_output_folder" -f 0 1 2 3 4 -tr nnUNetTrainer -c 3d_fullres -p nnUNetPlans fi