Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 27 additions & 0 deletions examples/BraTS2024/global_synthesis_metrics/mlcube/mlcube.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
name: BraTS2024 Missing MRI task Metrics
description: BraTS2024 Missing MRI task Metrics
authors:
- { name: MLCommons Medical Working Group }

platform:
accelerator_count: 0

docker:
# Image name
image: mlcommons/brats2024-global-synthesis-metrics:0.0.0
# Docker build context relative to $MLCUBE_ROOT. Default is `build`.
build_context: "../project"
# Docker file name within docker build context, default is `Dockerfile`.
build_file: "Dockerfile"

tasks:
evaluate:
# Computes evaluation metrics on the given predictions and ground truths
parameters:
inputs:
{
predictions: predictions,
labels: labels,
parameters_file: parameters.yaml,
}
outputs: { output_path: { type: "file", default: "results.yaml" } }
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
name: BraTS2024 Missing MRI task Metrics
description: BraTS2024 Missing MRI task Metrics
authors:
- { name: MLCommons Medical Working Group }

platform:
accelerator_count: 1

docker:
# Image name
image: mlcommons/brats2024-global-synthesis-metrics:0.0.0
# Docker build context relative to $MLCUBE_ROOT. Default is `build`.
build_context: "../project"
# Docker file name within docker build context, default is `Dockerfile`.
build_file: "Dockerfile"
gpu_args: --gpus all

tasks:
evaluate:
# Computes evaluation metrics on the given predictions and ground truths
parameters:
inputs:
{
predictions: predictions,
labels: labels,
parameters_file: parameters.yaml,
}
outputs: { output_path: { type: "file", default: "results.yaml" } }
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
original_data_in_labels: original_data
missing_modality_json: "missing.json"
segmentation_labels: segmentation_labels
requires_gpu: False
disease_type: glioma
# disease_type can only be glioma or metastasis
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
original_data_in_labels: original_data
missing_modality_json: "missing.json"
segmentation_labels: segmentation_labels
requires_gpu: True
disease_type: metastasis
# disease_type can only be glioma or metastasis
32 changes: 32 additions & 0 deletions examples/BraTS2024/global_synthesis_metrics/project/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
FROM winstonhutiger/brasyn_nnunet:glioma_metasis

# At the time of writing this, segmentation metrics and other
# metrics are in different branches. Create two separate
# virtual envs.
# RUN apt-get update && apt install git-all -y

# Create venv for GaNDLF segmentation metrics
RUN python3 -m venv /seg_venv && /seg_venv/bin/pip install --upgrade pip
RUN git clone https://github.com/rachitsaluja/GaNDLF.git seg_GaNDLF && \
cd seg_GaNDLF && \
/seg_venv/bin/pip install torch==1.13.1+cpu torchvision==0.14.1+cpu torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cpu && \
/seg_venv/bin/pip install -e .

# Create venv for GaNDLF inpainting metrics
RUN python3 -m venv /ssim_venv && /ssim_venv/bin/pip install --upgrade pip
RUN git clone https://github.com/FelixSteinbauer/GaNDLF.git ssim_GaNDLF && \
cd ssim_GaNDLF && \
/ssim_venv/bin/pip install torch==1.13.1+cpu torchvision==0.14.1+cpu torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cpu && \
/ssim_venv/bin/pip install -e .

# Prepare config file for inpainting metrics
RUN wget -O /ssim_config.yaml https://raw.githubusercontent.com/mlcommons/medperf/c347c9bbbd6428e120b6a760a0a0996aab182eb5/examples/BraTS2023/inpainting_metrics/mlcube/workspace/parameters.yaml

# Install main script dependencies in a separate venv to avoid unexpected conflicts
COPY ./requirements.txt /mlcube_project/requirements.txt
RUN python3 -m venv /main_venv && /main_venv/bin/pip install -r /mlcube_project/requirements.txt

# copy project files
COPY . /mlcube_project

ENTRYPOINT ["/bin/bash", "/mlcube_project/entrypoint.sh"]
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
# How this MLCube was created


## Build the MLCube

```bash
cd ../mlcube
mlcube configure -Pdocker.build_strategy=always
```
110 changes: 110 additions & 0 deletions examples/BraTS2024/global_synthesis_metrics/project/entrypoint.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
#!/bin/bash

# pretrained weights
export nnUNet_raw="/workspace/raw_data_base"
export nnUNet_preprocessed="/workspace/datasets"
export nnUNet_results="/workspace/weights"
export MKL_THREADING_LAYER=GNU

# Read arguments
while [ "${1:-}" != "" ]; do
case "$1" in
"--predictions"*)
predictions="${1#*=}"
;;
"--labels"*)
labels="${1#*=}"
;;
"--output_path"*)
output_path="${1#*=}"
;;
"--parameters_file"*)
parameters_file="${1#*=}"
;;
*)
task=$1
;;
esac
shift
done



if [ -z "$predictions" ]
then
echo "--predictions is required"
exit 1
fi

if [ -z "$labels" ]
then
echo "--labels is required"
exit 1
fi

if [ -z "$output_path" ]
then
echo "--output_path is required"
exit 1
fi

if [ -z "$parameters_file" ]
then
echo "--parameters_file is required"
exit 1
fi

if [ "$task" != "evaluate" ]
then
echo "Invalid task: task should be evaluate"
exit 1
fi

type=$(yq -r '.disease_type' $parameters_file)
if [[ $type =~ ^("metastasis"|"glioma")$ ]]; then
echo "$type is valid"
else
echo "$type is not valid"
exit 1
fi

# Prepare input data to FeTS tool
/main_venv/bin/python /mlcube_project/prepare_data_input.py \
--predictions $predictions \
--labels $labels \
--parameters_file $parameters_file \
--intermediate_folder /data_renamed \
--ssim_csv /ssim_data.csv

mkdir /seg_output_folder


# Run glioma segmentation tool or metasis tool
if [ "$type" == "glioma" ]
then
nnUNetv2_predict -d Dataset137_BraTS2021 -i "/data_renamed" -o "/seg_output_folder" -f 0 1 2 3 4 -tr nnUNetTrainer -c 3d_fullres -p nnUNetPlans
fi

if [ "$type" == "metastasis" ]
nnUNetv2_predict -d Dataset133_BraTS_metasis_2024 -i "/data_renamed" -o "/seg_output_folder" -f 0 1 2 3 4 -tr nnUNetTrainer -c 3d_fullres -p nnUNetPlans
fi

# # Process nnunet tool output
/main_venv/bin/python /mlcube_project/process_tool_output.py \
--intermediate_folder /seg_output_folder \
--labels $labels \
--parameters_file $parameters_file \
--seg_csv /seg_data.csv \


# Run segmentation metrics
/seg_venv/bin/gandlf_generateBraTSMetrics -c BraTS-GLI -i /seg_data.csv -o /seg_metrics.yaml

# Run ssim metrics
/ssim_venv/bin/gandlf_generateMetrics -c /ssim_config.yaml -i /ssim_data.csv -o /ssim_metrics.yaml

# write final metrics file
/main_venv/bin/python /mlcube_project/write_metrics.py \
--segmentation_metrics /seg_metrics.yaml \
--ssim_metrics /ssim_metrics.yaml \
--output_path $output_path
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
import argparse
import yaml

if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--parameters_file")

args = parser.parse_args()
with open(args.parameters_file) as f:
parameters = yaml.safe_load(f)

requires_gpu = parameters.get("requires_gpu", True)
print(int(bool(requires_gpu)))
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
import argparse
import os
import shutil
import re
import yaml
import json
import pandas as pd


def prepare_data_input(
predictions, original_data, missing_modality_json, intermediate_folder, ssim_csv
):
modalities = {"t1c": "t1ce", "t1n": "t1", "t2f": "flair", "t2w": "t2"}
os.makedirs(intermediate_folder, exist_ok=True)

# parse predictions
predictions_dict = {}
for file in os.listdir(predictions):
subj_id = file[-len("xxxxx-xxx-mmm.nii.gz") : -len("-mmm.nii.gz")]
predictions_dict[subj_id] = os.path.join(predictions, file)

# read original data and rename
original_missing = {}
for subj in os.listdir(original_data):
pattern = r".*(\d{5}-\d{3})$"
reg = re.match(pattern, subj)
if not reg:
continue
subj_id = reg.groups()[0]
missing_modality = missing_modality_json[subj]

folder = os.path.join(original_data, subj)
inter_folder = os.path.join(intermediate_folder, subj)
os.makedirs(inter_folder, exist_ok=True)

for file in os.listdir(folder):
if file.endswith(f"{missing_modality}.nii.gz"):
original_missing[subj_id] = os.path.join(folder, file)
continue
file_path = os.path.join(folder, file)
for modality in modalities:
if modality == missing_modality:
continue
suffix = f"-{modality}.nii.gz"
if file.endswith(suffix):
newfile = file.replace(suffix, f"_{modalities[modality]}.nii.gz")
newfile = os.path.join(inter_folder, newfile)
shutil.copyfile(file_path, newfile)
break

# move the prediction
prediction = predictions_dict[subj_id]
assert prediction.endswith(
f"{missing_modality}.nii.gz"
), "Prediction is not the missing modality"
prediction_name = os.path.basename(prediction)
suffix = f"-{missing_modality}.nii.gz"
newfile = prediction_name.replace(
suffix, f"_{modalities[missing_modality]}.nii.gz"
)
newfile = os.path.join(inter_folder, newfile)
shutil.copyfile(prediction, newfile)

# prepare data csv for ssim
input_data = []
for subject_id in predictions_dict:
prediction_record = {
"SubjectID": subject_id,
"Prediction": predictions_dict[subject_id],
"Target": original_missing[subject_id],
}
input_data.append(prediction_record)

input_data_df = pd.DataFrame(input_data)
os.makedirs(os.path.dirname(os.path.abspath(ssim_csv)), exist_ok=True)
input_data_df.to_csv(ssim_csv, index=False)


if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--predictions")
parser.add_argument("--labels")
parser.add_argument("--intermediate_folder")
parser.add_argument("--ssim_csv")
parser.add_argument("--parameters_file")

args = parser.parse_args()
labels = args.labels
predictions = args.predictions
intermediate_folder = args.intermediate_folder
ssim_csv = args.ssim_csv

with open(args.parameters_file) as f:
parameters = yaml.safe_load(f)

original_data_in_labels = parameters["original_data_in_labels"]
missing_modality_json = parameters["missing_modality_json"]
segmentation_labels = parameters["segmentation_labels"]

original_data_in_labels = os.path.join(labels, original_data_in_labels)
missing_modality_json = json.load(open(os.path.join(labels, missing_modality_json)))
segmentation_labels_folder = os.path.join(labels, segmentation_labels)

prepare_data_input(
predictions,
original_data_in_labels,
missing_modality_json,
intermediate_folder,
ssim_csv,
)
Loading
Loading