Skip to content

Commit

Permalink
Dp sf pap (#544)
Browse files Browse the repository at this point in the history
* use both _1 and _2 segmentations

* use both _1 and _2 segmentations

* TEMP: add dice metrics, copied from neuron

* ENH: Use dice loss and metrics

* ENH: Remove kidney label and merge body/background labels

* FIX: Fix bad number of channels

* ENH: Use only one channel of the input image

* WIP: hacking bottom of U-net

* ENH: Add mean and std for normalization

* ENH: Add neurite and voxelmorph to docker

* FIX: Use dice loss from neurite

* STYLE: Fix up WIP code on hacking bottom of U-net

* ENH: Add merged paps for segmentation tensormap

* WIP: Fix Unet concats

* FIX: Fix soft dice metrics

* ENH: Add plot_dice to compare

* ENH: Add median computation for papillary segmentation project

* FIX: Fix double plot on one graph

* ENH: Allow generator to have empty path, e.g., to test on all images

* ENH: Prune list of structures for which we do stats

* STYLE: rearranging

* WIP: Handle inference without ground truth labels

* ENH: Remove option for merged paps

* FIX: Get all b2s images, instance_2s only

* ENH: Add mri dates

* FIX: Fix normalization with correct padding

* FIX: Fix soft dice metrics again

* COMP: Add option for environment variable for jupyter notebooks

* WIP: data augmentation

* WIP: Better scatter plots for medians

* ENH: Report std too

* ENH: Improve dice plots for a single model

* ENH: Log pearson correlation coefficients

* STYLE: Adding TODOs to fix tensor_generators

* WIP: Add temporary code to save Dice scores

* WIP: Add temporary code for plotting medians

* STYLE: Clean up code for infer_medians

* STYLE: Clean up medians code

* STYLE: Add command-line args for median computations

* ENH: Add percentiles and tsv for dice calculations

* STYLE: Add command-line args for data augmentation

* ENH: Improve log files for dice compare

* STYLE: Small edits requested in PR

* STYLE: docstring and typehints for plot_dice

* STYLE: docstring for infer_statistics_from_segmented_regions

* STYLE: Docstring and typehints for augment_using_layers

---------

Co-authored-by: Sam Freesun Friedman <[email protected]>
  • Loading branch information
daniellepace and lucidtronix authored Nov 22, 2023
1 parent 5eeba94 commit f65ae7a
Show file tree
Hide file tree
Showing 13 changed files with 676 additions and 49 deletions.
4 changes: 4 additions & 0 deletions docker/vm_boot_images/config/tensorflow-requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -41,3 +41,7 @@ boto3
ml4ht==0.0.10
google-cloud-storage
umap-learn[plot]
neurite
voxelmorph
pystrum

18 changes: 17 additions & 1 deletion ml4h/arguments.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ def parse_args():
parser.add_argument('--dicoms', default='./dicoms/', help='Path to folder of dicoms.')
parser.add_argument('--sample_csv', default=None, help='Path to CSV with Sample IDs to restrict tensor paths')
parser.add_argument('--tsv_style', default='standard', choices=['standard', 'genetics'], help='Format choice for the TSV file produced in output by infer and explore modes.')
parser.add_argument('--app_csv', help='Path to file used to link sample IDs between UKBB applications 17488 and 7089')
parser.add_argument('--app_csv', help='Path to file used by the recipe')
parser.add_argument('--tensors', help='Path to folder containing tensors, or where tensors will be written.')
parser.add_argument('--output_folder', default='./recipes_output/', help='Path to output folder for recipes.py runs.')
parser.add_argument('--model_file', help='Path to a saved model architecture and weights (hd5).')
Expand Down Expand Up @@ -189,6 +189,9 @@ def parse_args():
parser.add_argument('--pool_z', default=1, type=int, help='Pooling size in the z-axis, if 1 no pooling will be performed.')
parser.add_argument('--padding', default='same', help='Valid or same border padding on the convolutional layers.')
parser.add_argument('--dense_blocks', nargs='*', default=[32, 32, 32], type=int, help='List of number of kernels in convolutional layers.')
parser.add_argument('--merge_dimension', default=3, type=int, help='Dimension of the merge layer.')
parser.add_argument('--merge_dense_blocks', nargs='*', default=[32], type=int, help='List of number of kernels in convolutional merge layer.')
parser.add_argument('--decoder_dense_blocks', nargs='*', default=[32, 32, 32], type=int, help='List of number of kernels in convolutional decoder layers.')
parser.add_argument('--encoder_blocks', nargs='*', default=['conv_encode'], help='List of encoding blocks.')
parser.add_argument('--merge_blocks', nargs='*', default=['concat'], help='List of merge blocks.')
parser.add_argument('--decoder_blocks', nargs='*', default=['conv_decode', 'dense_decode'], help='List of decoding blocks.')
Expand Down Expand Up @@ -269,6 +272,11 @@ def parse_args():
help='If true saves the model weights from the last training epoch, otherwise the model with best validation loss is saved.',
)

# 2D image data augmentation parameters
parser.add_argument('--rotation_factor', default=0., type=float, help='for data augmentation, a float represented as fraction of 2 Pi, e.g., rotation_factor = 0.014 results in an output rotated by a random amount in the range [-5 degrees, 5 degrees]')
parser.add_argument('--zoom_factor', default=0., type=float, help='for data augmentation, a float represented as fraction of value, e.g., zoom_factor = 0.05 results in an output zoomed in a random amount in the range [-5%, 5%]')
parser.add_argument('--translation_factor', default=0., type=float, help='for data augmentation, a float represented as a fraction of value, e.g., translation_factor = 0.05 results in an output shifted by a random amount in the range [-5%, 5%] in the x- and y- directions')

# Run specific and debugging arguments
parser.add_argument('--id', default='no_id', help='Identifier for this run, user-defined string to keep experiments organized.')
parser.add_argument('--random_seed', default=12878, type=int, help='Random seed to use throughout run. Always use np.random.')
Expand Down Expand Up @@ -376,6 +384,14 @@ def parse_args():
default='3M',
)

# Arguments for explorations/infer_stats_from_segmented_regions
parser.add_argument('--analyze_ground_truth', default=True, help='Whether or not to filter by images with ground truth segmentations, for comparison')
parser.add_argument('--structures_to_analyze', nargs='*', default=[], help='Structure names to include in the .tsv files and scatter plots')
parser.add_argument('--erosion_radius', default=1, type=int, help='Radius of the unit disk structuring element for erosion preprocessing')
parser.add_argument('--intensity_thresh', type=float, help='Threshold value for preprocessing')
parser.add_argument('--intensity_thresh_in_structures', nargs='*', default=[], help='Structure names whose pixels should be replaced if the images has intensity above the threshold')
parser.add_argument('--intensity_thresh_out_structure', help='Replacement structure name')

# TensorMap prefix for convenience
parser.add_argument('--tensormap_prefix', default="ml4h.tensormap", type=str, help="Module prefix path for TensorMaps. Defaults to \"ml4h.tensormap\"")

Expand Down
6 changes: 3 additions & 3 deletions ml4h/defines.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,9 +78,9 @@ def __str__(self):
'interventricular_septum': 5, 'interatrial_septum': 6, 'crista_terminalis': 7,
}
MRI_SAX_PAP_SEGMENTED_CHANNEL_MAP = {
'background': 0, 'body': 1, 'thoracic_cavity': 2, 'liver': 3, 'stomach': 4, 'spleen': 5, 'kidney': 6,
'interventricular_septum': 7, 'LV_free_wall': 8, 'anterolateral_pap': 9, 'posteromedial_pap': 10, 'LV_cavity': 11,
'RV_free_wall': 12, 'RV_cavity': 13,
'background': 0, 'thoracic_cavity': 1, 'liver': 2, 'stomach': 3, 'spleen': 4,
'interventricular_septum': 5, 'LV_free_wall': 6, 'anterolateral_pap': 7, 'posteromedial_pap': 8, 'LV_cavity': 9,
'RV_free_wall': 10, 'RV_cavity': 11,
}
MRI_SAX_SEGMENTED_CHANNEL_MAP = {
'background': 0, 'RV_free_wall': 1, 'interventricular_septum': 2, 'LV_free_wall': 3, 'LV_cavity': 4,
Expand Down
Loading

0 comments on commit f65ae7a

Please sign in to comment.