From 4cd7fadbdb5f340bcd5c2533678dceda1b02f200 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Wed, 6 Jul 2022 19:53:27 -0400 Subject: [PATCH 01/26] Add warning when empty segmentation is passed with omit_empty_frames --- src/highdicom/seg/sop.py | 9 ++++++++ tests/test_seg.py | 46 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+) diff --git a/src/highdicom/seg/sop.py b/src/highdicom/seg/sop.py index 2d0c1fa7..16c25563 100644 --- a/src/highdicom/seg/sop.py +++ b/src/highdicom/seg/sop.py @@ -6,6 +6,7 @@ from typing import ( Any, cast, Dict, List, Optional, Sequence, Union, Tuple, BinaryIO ) +import warnings import numpy as np from pydicom.dataset import Dataset @@ -535,6 +536,14 @@ def __init__( segmentation_type ) self.SegmentsOverlap = segments_overlap.value + if omit_empty_frames and pixel_array.sum() == 0: + omit_empty_frames = False + warnings.warn( + 'Encoding an empty segmentation with "omit_empty_frames" ' + 'set to True. Reverting to encoding all frames since omitting ' + 'all frames is not possible.', + UserWarning + ) if plane_positions is None: if pixel_array.shape[0] != len(source_plane_positions): diff --git a/tests/test_seg.py b/tests/test_seg.py index c5da5f87..e4923749 100644 --- a/tests/test_seg.py +++ b/tests/test_seg.py @@ -1652,6 +1652,52 @@ def test_construction_empty_source_image(self): device_serial_number=self._device_serial_number ) + def test_construction_empty_source_seg_sparse(self): + # Can encoding an empty segmentation with omit_empty_frames=True issues + # a warning and encodes the full segmentation + empty_pixel_array = np.zeros_like(self._ct_pixel_array) + with pytest.warns(UserWarning): + seg = Segmentation( + source_images=[self._ct_image], + pixel_array=empty_pixel_array, + segmentation_type=SegmentationTypeValues.FRACTIONAL.value, + segment_descriptions=( + self._segment_descriptions + ), + series_instance_uid=self._series_instance_uid, + series_number=self._series_number, + sop_instance_uid=self._sop_instance_uid, + instance_number=self._instance_number, + manufacturer=self._manufacturer, + manufacturer_model_name=self._manufacturer_model_name, + software_versions=self._software_versions, + device_serial_number=self._device_serial_number, + omit_empty_frames=True, + ) + + assert seg.pixel_array.shape == empty_pixel_array.shape + + def test_construction_empty_seg_image(self): + # Can encode an empty segmentation with omit_empty_frames=False + empty_pixel_array = np.zeros_like(self._ct_pixel_array) + Segmentation( + source_images=[self._ct_image], + pixel_array=empty_pixel_array, + segmentation_type=SegmentationTypeValues.FRACTIONAL.value, + segment_descriptions=( + self._segment_descriptions + ), + series_instance_uid=self._series_instance_uid, + series_number=self._series_number, + sop_instance_uid=self._sop_instance_uid, + instance_number=self._instance_number, + manufacturer=self._manufacturer, + manufacturer_model_name=self._manufacturer_model_name, + software_versions=self._software_versions, + device_serial_number=self._device_serial_number, + omit_empty_frames=False, + ) + def test_construction_invalid_content_label(self): with pytest.raises(ValueError): Segmentation( From 2c7a62967fdf316a9692956ef8297dc94cc73615 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Thu, 7 Jul 2022 19:52:06 -0400 Subject: [PATCH 02/26] Change UserWarning to logger.warning --- src/highdicom/seg/sop.py | 6 ++---- tests/test_seg.py | 35 +++++++++++++++++------------------ 2 files changed, 19 insertions(+), 22 deletions(-) diff --git a/src/highdicom/seg/sop.py b/src/highdicom/seg/sop.py index 16c25563..b9646d57 100644 --- a/src/highdicom/seg/sop.py +++ b/src/highdicom/seg/sop.py @@ -6,7 +6,6 @@ from typing import ( Any, cast, Dict, List, Optional, Sequence, Union, Tuple, BinaryIO ) -import warnings import numpy as np from pydicom.dataset import Dataset @@ -538,11 +537,10 @@ def __init__( self.SegmentsOverlap = segments_overlap.value if omit_empty_frames and pixel_array.sum() == 0: omit_empty_frames = False - warnings.warn( + logger.warning( 'Encoding an empty segmentation with "omit_empty_frames" ' 'set to True. Reverting to encoding all frames since omitting ' - 'all frames is not possible.', - UserWarning + 'all frames is not possible.' ) if plane_positions is None: diff --git a/tests/test_seg.py b/tests/test_seg.py index e4923749..26729c15 100644 --- a/tests/test_seg.py +++ b/tests/test_seg.py @@ -1656,24 +1656,23 @@ def test_construction_empty_source_seg_sparse(self): # Can encoding an empty segmentation with omit_empty_frames=True issues # a warning and encodes the full segmentation empty_pixel_array = np.zeros_like(self._ct_pixel_array) - with pytest.warns(UserWarning): - seg = Segmentation( - source_images=[self._ct_image], - pixel_array=empty_pixel_array, - segmentation_type=SegmentationTypeValues.FRACTIONAL.value, - segment_descriptions=( - self._segment_descriptions - ), - series_instance_uid=self._series_instance_uid, - series_number=self._series_number, - sop_instance_uid=self._sop_instance_uid, - instance_number=self._instance_number, - manufacturer=self._manufacturer, - manufacturer_model_name=self._manufacturer_model_name, - software_versions=self._software_versions, - device_serial_number=self._device_serial_number, - omit_empty_frames=True, - ) + seg = Segmentation( + source_images=[self._ct_image], + pixel_array=empty_pixel_array, + segmentation_type=SegmentationTypeValues.FRACTIONAL.value, + segment_descriptions=( + self._segment_descriptions + ), + series_instance_uid=self._series_instance_uid, + series_number=self._series_number, + sop_instance_uid=self._sop_instance_uid, + instance_number=self._instance_number, + manufacturer=self._manufacturer, + manufacturer_model_name=self._manufacturer_model_name, + software_versions=self._software_versions, + device_serial_number=self._device_serial_number, + omit_empty_frames=True, + ) assert seg.pixel_array.shape == empty_pixel_array.shape From 1e4334a94086ff4e10090f72f71d12dcbe4ca16a Mon Sep 17 00:00:00 2001 From: hackermd Date: Fri, 5 Aug 2022 14:46:21 -0400 Subject: [PATCH 03/26] Allow reference of optical path for measurements --- src/highdicom/ann/content.py | 33 ++++++++++++++++++++-- src/highdicom/content.py | 55 ++++++++++++++++++++++++++++-------- tests/test_ann.py | 26 ++++++++++++++++- tests/test_content.py | 40 ++++++++++++++++++++++---- 4 files changed, 134 insertions(+), 20 deletions(-) diff --git a/src/highdicom/ann/content.py b/src/highdicom/ann/content.py index c7234ca4..dfcaa2eb 100644 --- a/src/highdicom/ann/content.py +++ b/src/highdicom/ann/content.py @@ -11,7 +11,10 @@ AnnotationGroupGenerationTypeValues, GraphicTypeValues, ) -from highdicom.content import AlgorithmIdentificationSequence +from highdicom.content import ( + AlgorithmIdentificationSequence, + ReferencedImageSequence, +) from highdicom.sr.coding import CodedConcept from highdicom.uid import UID from highdicom._module_utils import check_required_attributes @@ -25,7 +28,8 @@ def __init__( self, name: Union[Code, CodedConcept], values: np.ndarray, - unit: Union[Code, CodedConcept] + unit: Union[Code, CodedConcept], + referenced_images: Optional[ReferencedImageSequence] = None ) -> None: """ Parameters @@ -40,6 +44,9 @@ def __init__( unit: Union[highdicom.sr.CodedConcept, pydicom.sr.coding.Code], optional Coded units of measurement (see :dcm:`CID 7181 ` "Abstract Multi-dimensional Image Model Component Units") + referenced_images: Union[highdicom.ReferencedImageSequence, None], optional + Referenced image to which the measurement applies. Should only be + provided for intensity measurements. """ # noqa: E501 super().__init__() @@ -61,6 +68,22 @@ def __init__( item.AnnotationIndexList = stored_indices.tobytes() self.MeasurementValuesSequence = [item] + if referenced_images is not None: + if len(referenced_images) == 0: + raise ValueError( + 'Argument "referenced_images" must contain one item.' + ) + elif len(referenced_images) > 1: + raise ValueError( + 'Argument "referenced_images" must contain only one item.' + ) + if not isinstance(referenced_images, ReferencedImageSequence): + raise TypeError( + 'Argument "referenced_images" must have type ' + 'ReferencedImageSequence.' + ) + self.ReferencedImageSequence = referenced_images + @property def name(self) -> CodedConcept: """highdicom.sr.CodedConcept: coded name""" @@ -520,6 +543,12 @@ def get_graphic_data( ) else: if coordinate_type == AnnotationCoordinateTypeValues.SCOORD: + if hasattr(self, 'CommonZCoordinateValue'): + raise ValueError( + 'The annotation group contains the ' + '"Common Z Coordinate Value" element and therefore ' + 'cannot have Annotation Coordinate Type "2D".' + ) coordinate_dimensionality = 2 else: coordinate_dimensionality = 3 diff --git a/src/highdicom/content.py b/src/highdicom/content.py index d94a0e54..f230b957 100644 --- a/src/highdicom/content.py +++ b/src/highdicom/content.py @@ -10,7 +10,10 @@ from pydicom.sr.coding import Code from pydicom.sr.codedict import codes from pydicom.valuerep import DS, format_number_as_ds -from pydicom._storage_sopclass_uids import SegmentationStorage +from pydicom.uid import ( + SegmentationStorage, + VLWholeSlideMicroscopyImageStorage, +) from highdicom.enum import ( CoordinateSystemNames, @@ -1406,21 +1409,20 @@ def __init__( referenced_images: Optional[Sequence[Dataset]] = None, referenced_frame_number: Union[int, Sequence[int], None] = None, referenced_segment_number: Union[int, Sequence[int], None] = None, + referenced_optical_path_identifier: Union[int, None] = None, ): """ Parameters ---------- referenced_images: Union[Sequence[pydicom.Dataset], None], optional - Images to which the VOI LUT described in this dataset applies. Note - that if unspecified, the VOI LUT applies to every image referenced - in the presentation state object that this dataset is included in. + Images that should be referenced referenced_frame_number: Union[int, Sequence[int], None], optional - Frame number(s) within a referenced multiframe image to which this - VOI LUT applies. + Frame number(s) within a referenced multiframe image referenced_segment_number: Union[int, Sequence[int], None], optional - Segment number(s) within a referenced segmentation image to which - this VOI LUT applies. + Segment number(s) within a referenced segmentation image + referenced_optical_path_identifier: Union[int, None], optional + Identifier of the optical path within a referenced microscopy image """ super().__init__() @@ -1445,6 +1447,7 @@ def __init__( raise ValueError("Found duplicate instances in referenced images.") multiple_images = len(referenced_images) > 1 + sop_class_uid = referenced_images[0].SOPClassUID if referenced_frame_number is not None: if multiple_images: raise ValueError( @@ -1466,16 +1469,17 @@ def __init__( f'Frame number {f} is invalid for referenced ' 'image.' ) + if referenced_segment_number is not None: if multiple_images: raise ValueError( 'Specifying "referenced_segment_number" is not ' 'supported with multiple referenced images.' ) - if referenced_images[0].SOPClassUID != SegmentationStorage: + if sop_class_uid != SegmentationStorage: raise TypeError( '"referenced_segment_number" is only valid when the ' - 'referenced image is a segmentation image.' + 'referenced image is a Segmentation image.' ) number_of_segments = len(referenced_images[0].SegmentSequence) if isinstance(referenced_segment_number, Sequence): @@ -1485,8 +1489,7 @@ def __init__( for s in _referenced_segment_numbers: if s < 1 or s > number_of_segments: raise ValueError( - f'Segment number {s} is invalid for referenced ' - 'image.' + f'Segment number {s} is invalid for referenced image.' ) if referenced_frame_number is not None: # Check that the one of the specified segments exists @@ -1504,6 +1507,31 @@ def __init__( f'Referenced frame {f} does not contain any of ' 'the referenced segments.' ) + + if referenced_optical_path_identifier is not None: + if multiple_images: + raise ValueError( + 'Specifying "referenced_optical_path_identifier" is not ' + 'supported with multiple referenced images.' + ) + if sop_class_uid != VLWholeSlideMicroscopyImageStorage: + raise TypeError( + '"referenced_optical_path_identifier" is only valid when ' + 'referenced image is a VL Whole Slide Microscopy image.' + ) + has_optical_path = False + for ref_img in referenced_images: + for optical_path_item in ref_img.OpticalPathSequence: + has_optical_path |= ( + optical_path_item.OpticalPathIdentifier == + referenced_optical_path_identifier + ) + if not has_optical_path: + raise ValueError( + 'None of the reference images contains the specified ' + '"referenced_optical_path_identifier".' + ) + for im in referenced_images: if not does_iod_have_pixel_data(im.SOPClassUID): raise ValueError( @@ -1515,6 +1543,9 @@ def __init__( ref_im.ReferencedSOPClassUID = im.SOPClassUID if referenced_segment_number is not None: ref_im.ReferencedSegmentNumber = referenced_segment_number + elif referenced_optical_path_identifier is not None: + ref_im.ReferencedOpticalPathIdentifier = \ + str(referenced_optical_path_identifier) if referenced_frame_number is not None: ref_im.ReferencedFrameNumber = referenced_frame_number self.append(ref_im) diff --git a/tests/test_ann.py b/tests/test_ann.py index 39b39c1c..ea612a73 100644 --- a/tests/test_ann.py +++ b/tests/test_ann.py @@ -8,6 +8,7 @@ from pydicom.filereader import dcmread from pydicom.sr.codedict import codes from pydicom.sr.coding import Code +from pydicom.uid import VLWholeSlideMicroscopyImageStorage from highdicom.ann.content import Measurements, AnnotationGroup from highdicom.ann.enum import ( @@ -16,7 +17,10 @@ GraphicTypeValues, ) from highdicom.ann.sop import MicroscopyBulkSimpleAnnotations -from highdicom.content import AlgorithmIdentificationSequence +from highdicom.content import ( + AlgorithmIdentificationSequence, + ReferencedImageSequence, +) from highdicom.sr.coding import CodedConcept from highdicom.uid import UID @@ -61,6 +65,26 @@ def test_construction(self): values[stored_indices] ) + def test_construction_with_referenced_image(self): + optical_path_item = Dataset() + optical_path_item.OpticalPathIdentifier = '1' + image = Dataset() + image.SOPInstanceUID = '1.2.3.4' + image.SOPClassUID = VLWholeSlideMicroscopyImageStorage + image.OpticalPathSequence = [optical_path_item] + + measurements = Measurements( + name=Code('Q4LE', 'SBSI', 'Mean intensity'), + values=np.ones((10, ), dtype=np.float32), + unit=Code('{counts}', 'UCUM', 'Counts'), + referenced_images=ReferencedImageSequence( + referenced_images=[image], + referenced_optical_path_identifier='1' + ) + ) + assert hasattr(measurements, 'ReferencedImageSequence') + assert len(measurements.ReferencedImageSequence) == 1 + def test_construction_missing_name(self): with pytest.raises(TypeError): Measurements( diff --git a/tests/test_content.py b/tests/test_content.py index f28568f9..cf64cac3 100644 --- a/tests/test_content.py +++ b/tests/test_content.py @@ -742,9 +742,8 @@ def setUp(self): for f in get_testdata_files('dicomdirtests/77654033/CT2/*') ] self._ct_multiframe = dcmread(get_testdata_file('eCT_Supplemental.dcm')) - self._seg = dcmread( - 'data/test_files/seg_image_ct_binary_overlap.dcm' - ) + self._sm = dcmread('data/test_files/sm_image.dcm') + self._seg = dcmread('data/test_files/seg_image_ct_binary_overlap.dcm') def test_construction_ref_ims(self): ref_ims = ReferencedImageSequence( @@ -812,9 +811,9 @@ def test_construction_segment_number(self): assert ref_ims[0].ReferencedSegmentNumber == 1 def test_construction_segment_number_non_seg(self): - with pytest.raises(ValueError): + with pytest.raises(TypeError): ReferencedImageSequence( - referenced_images=self._ct_series, + referenced_images=self._ct_series[0], referenced_segment_number=1 ) @@ -873,6 +872,37 @@ def test_construction_duplicate(self): referenced_images=self._ct_series * 2, ) + def test_construction_optical_path_identifier(self): + ref_ims = ReferencedImageSequence( + referenced_images=[self._sm], + referenced_optical_path_identifier='1' + ) + assert len(ref_ims) == 1 + assert ref_ims[0].ReferencedOpticalPathIdentifier == '1' + + def test_construction_optical_path_identifier_invalid_reference(self): + with pytest.raises(ValueError): + ReferencedImageSequence( + referenced_images=[self._sm], + referenced_optical_path_identifier='20' + ) + + def test_construction_optical_path_identifier_non_sm(self): + with pytest.raises(TypeError): + ReferencedImageSequence( + referenced_images=[self._seg], + referenced_optical_path_identifier='1' + ) + + def test_construction_optical_path_identifier_and_frame_numbers(self): + ref_ims = ReferencedImageSequence( + referenced_images=[self._sm], + referenced_optical_path_identifier='1', + referenced_frame_number=[1, 2], + ) + assert len(ref_ims) == 1 + assert ref_ims[0].ReferencedOpticalPathIdentifier == '1' + class TestPaletteColorLUT(TestCase): From 26094981b0430f01f85b2dc87584a650a4641a17 Mon Sep 17 00:00:00 2001 From: hackermd Date: Fri, 5 Aug 2022 15:19:50 -0400 Subject: [PATCH 04/26] Add property to access referenced images --- src/highdicom/ann/content.py | 13 ++++++++++++ src/highdicom/content.py | 39 +++++++++++++++++++++++++++++++++++- tests/test_ann.py | 7 +++++++ 3 files changed, 58 insertions(+), 1 deletion(-) diff --git a/src/highdicom/ann/content.py b/src/highdicom/ann/content.py index dfcaa2eb..2d9a9fc3 100644 --- a/src/highdicom/ann/content.py +++ b/src/highdicom/ann/content.py @@ -94,6 +94,14 @@ def unit(self) -> CodedConcept: """highdicom.sr.CodedConcept: coded unit""" return self.MeasurementUnitsCodeSequence[0] + @property + def referenced_images(self) -> ReferencedImageSequence: + """highdicom.ReferencedImageSequence: referenced images""" + if hasattr(self, 'ReferencedImageSequence'): + return self.ReferencedImageSequence + else: + return [] + def get_values(self, number_of_annotations: int) -> np.ndarray: """Get measured values for annotations. @@ -174,6 +182,11 @@ def from_dataset(cls, dataset: Dataset) -> 'Measurements': measurements.MeasurementUnitsCodeSequence[0] ) ] + if hasattr(measurements, 'ReferencedImageSequence'): + measurements.ReferencedImageSequence = \ + ReferencedImageSequence.from_sequence( + measurements.ReferencedImageSequence + ) return cast(Measurements, measurements) diff --git a/src/highdicom/content.py b/src/highdicom/content.py index f230b957..344e26e2 100644 --- a/src/highdicom/content.py +++ b/src/highdicom/content.py @@ -108,7 +108,7 @@ def from_sequence( Returns ------- - highdicom.seg.content.AlgorithmIdentificationSequence + highdicom.AlgorithmIdentificationSequence Algorithm Identification Sequence """ @@ -1550,6 +1550,43 @@ def __init__( ref_im.ReferencedFrameNumber = referenced_frame_number self.append(ref_im) + @classmethod + def from_sequence( + cls, + sequence: DataElementSequence + ) -> 'ReferencedImageSequence': + """Construct instance from an existing data element sequence. + + Parameters + ---------- + sequence: pydicom.sequence.Sequence + Data element sequence representing the + Algorithm Identification Sequence + + Returns + ------- + highdicom.ReferencedImageSequence + Referenced Image Sequence + + """ + if not isinstance(sequence, DataElementSequence): + raise TypeError( + 'Sequence should be of type pydicom.sequence.Sequence.' + ) + if len(sequence) != 1: + raise ValueError('Sequence should contain a single item.') + check_required_attributes( + sequence[0], + module='advanced-blending-presentation-state', + base_path=[ + 'AdvancedBlendingSequence', + 'ReferencedImageSequence', + ] + ) + ref_img_sequence = deepcopy(sequence) + ref_img_sequence.__class__ = ReferencedImageSequence + return cast(ReferencedImageSequence, ref_img_sequence) + class LUT(Dataset): diff --git a/tests/test_ann.py b/tests/test_ann.py index ea612a73..7749149e 100644 --- a/tests/test_ann.py +++ b/tests/test_ann.py @@ -124,12 +124,19 @@ def test_alternative_construction_from_dataset(self): measurement_values.FloatingPointValues = values.tobytes() measurement_values.AnnotationIndexList = index.tobytes() dataset.MeasurementValuesSequence = [measurement_values] + referenced_image = Dataset() + referenced_image.ReferencedOpticalPathIdentifier = '1' + referenced_image.ReferencedSOPInstanceUID = '1.2.3' + referenced_image.ReferencedSOPClassUID = \ + VLWholeSlideMicroscopyImageStorage + dataset.ReferencedImageSequence = [referenced_image] measurements = Measurements.from_dataset(dataset) assert measurements.name == CodedConcept.from_dataset(name) assert measurements.unit == CodedConcept.from_dataset(unit) np.testing.assert_allclose(measurements.get_values(3), values) + assert len(measurements.referenced_images) == 1 class TestAnnotationGroup(unittest.TestCase): From da2f5b134abcd92fbda7c50860506608355ce837 Mon Sep 17 00:00:00 2001 From: hackermd Date: Fri, 5 Aug 2022 15:25:45 -0400 Subject: [PATCH 05/26] Provide referenced images for measurements --- src/highdicom/ann/content.py | 20 +++++++++++++++----- tests/test_ann.py | 13 +++++++++---- 2 files changed, 24 insertions(+), 9 deletions(-) diff --git a/src/highdicom/ann/content.py b/src/highdicom/ann/content.py index 2d9a9fc3..45a98be7 100644 --- a/src/highdicom/ann/content.py +++ b/src/highdicom/ann/content.py @@ -95,12 +95,12 @@ def unit(self) -> CodedConcept: return self.MeasurementUnitsCodeSequence[0] @property - def referenced_images(self) -> ReferencedImageSequence: - """highdicom.ReferencedImageSequence: referenced images""" + def referenced_images(self) -> Union[ReferencedImageSequence, None]: + """Union[highdicom.ReferencedImageSequence, None]: referenced images""" if hasattr(self, 'ReferencedImageSequence'): return self.ReferencedImageSequence else: - return [] + return None def get_values(self, number_of_annotations: int) -> np.ndarray: """Get measured values for annotations. @@ -675,7 +675,10 @@ def get_measurements( self, name: Optional[Union[Code, CodedConcept]] = None ) -> Tuple[ - List[CodedConcept], np.ndarray, List[CodedConcept] + List[CodedConcept], + np.ndarray, + List[CodedConcept], + List[Union[ReferencedImageSequence, None]] ]: """Get measurements. @@ -696,6 +699,8 @@ def get_measurements( given annotation. units: List[highdicom.sr.CodedConcept] Units of measurements + referenced_images: List[highdicom.ReferencedImageSequence, None] + Referenced images """ # noqa: E501 number_of_annotations = self.number_of_annotations @@ -717,11 +722,16 @@ def get_measurements( item.unit for item in self.MeasurementsSequence if name is None or item.name == name ] + referenced_images = [ + item.referenced_images for item in self.MeasurementsSequence + if name is None or item.name == name + ] else: value_array = np.empty((number_of_annotations, 0), np.float32) names = [] units = [] - return (names, value_array, units) + referenced_images = [] + return (names, value_array, units, referenced_images) def _get_coordinate_index( self, diff --git a/tests/test_ann.py b/tests/test_ann.py index 7749149e..fe587934 100644 --- a/tests/test_ann.py +++ b/tests/test_ann.py @@ -233,7 +233,7 @@ def test_construction(self): graphic_data[1] ) - names, values, units = group.get_measurements() + names, values, units, ref_images = group.get_measurements() assert len(names) == 1 assert names[0] == measurement_names[0] assert len(units) == 1 @@ -241,8 +241,10 @@ def test_construction(self): assert values.dtype == np.float32 assert values.shape == (2, 1) np.testing.assert_allclose(values, measurement_values) + assert len(ref_images) == 1 + assert ref_images[0] is None - names, values, units = group.get_measurements( + names, values, units, ref_images = group.get_measurements( name=measurement_names[0] ) assert len(names) == 1 @@ -252,8 +254,10 @@ def test_construction(self): assert values.dtype == np.float32 assert values.shape == (2, 1) np.testing.assert_allclose(values, measurement_values) + assert len(ref_images) == 1 + assert ref_images[0] is None - names, values, units = group.get_measurements( + names, values, units, ref_images = group.get_measurements( name=codes.SCT.Volume ) assert names == [] @@ -261,6 +265,7 @@ def test_construction(self): assert values.size == 0 assert values.dtype == np.float32 assert values.shape == (2, 0) + assert ref_images == [] def test_alternative_construction_from_dataset(self): coordinates_data = np.array( @@ -313,7 +318,7 @@ def test_alternative_construction_from_dataset(self): np.array([[1.0, 1.0]], dtype=np.double) ) - names, values, units = group.get_measurements() + names, values, units, ref_images = group.get_measurements() assert names == [] assert units == [] assert values.size == 0 From 366c58aa473c8c6751e688d586cbd3fdb3aa58b5 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Mon, 15 Aug 2022 01:01:28 +0000 Subject: [PATCH 06/26] Remove module level imports of module data (#196) --- src/highdicom/_module_utils.py | 17 ++++++++++------- src/highdicom/base.py | 4 ++-- src/highdicom/legacy/sop.py | 4 ++-- 3 files changed, 14 insertions(+), 11 deletions(-) diff --git a/src/highdicom/_module_utils.py b/src/highdicom/_module_utils.py index 2a33dd14..8a91a9bf 100644 --- a/src/highdicom/_module_utils.py +++ b/src/highdicom/_module_utils.py @@ -3,13 +3,6 @@ from pydicom import Dataset -from highdicom._iods import IOD_MODULE_MAP, SOP_CLASS_UID_IOD_KEY_MAP -from highdicom._modules import MODULE_ATTRIBUTE_MAP -from highdicom._iods import ( - IOD_MODULE_MAP, - SOP_CLASS_UID_IOD_KEY_MAP -) - # Allowed values for the type of an attribute class AttributeTypeValues(Enum): @@ -168,6 +161,7 @@ def construct_module_tree(module: str) -> Dict[str, Any]: dictionary that forms an item in the next level of the tree structure. """ + from highdicom._modules import MODULE_ATTRIBUTE_MAP if module not in MODULE_ATTRIBUTE_MAP: raise AttributeError(f"No such module found: '{module}'.") tree: Dict[str, Any] = {'attributes': {}} @@ -205,6 +199,10 @@ def get_module_usage( """ + from highdicom._iods import ( + IOD_MODULE_MAP, + SOP_CLASS_UID_IOD_KEY_MAP + ) try: iod_name = SOP_CLASS_UID_IOD_KEY_MAP[sop_class_uid] except KeyError as e: @@ -235,6 +233,11 @@ def is_attribute_in_iod(attribute: str, sop_class_uid: str) -> bool: specified by the sop_class_uid. False otherwise. """ + from highdicom._iods import ( + IOD_MODULE_MAP, + SOP_CLASS_UID_IOD_KEY_MAP + ) + from highdicom._modules import MODULE_ATTRIBUTE_MAP try: iod_name = SOP_CLASS_UID_IOD_KEY_MAP[sop_class_uid] except KeyError as e: diff --git a/src/highdicom/base.py b/src/highdicom/base.py index ca6e056b..f2aa7cad 100644 --- a/src/highdicom/base.py +++ b/src/highdicom/base.py @@ -16,8 +16,6 @@ ) from highdicom.valuerep import check_person_name from highdicom.version import __version__ -from highdicom._iods import IOD_MODULE_MAP, SOP_CLASS_UID_IOD_KEY_MAP -from highdicom._modules import MODULE_ATTRIBUTE_MAP from highdicom._module_utils import is_attribute_in_iod @@ -289,6 +287,8 @@ def _copy_root_attributes_of_module( DICOM Module (e.g., ``"General Series"`` or ``"Specimen"``) """ + from highdicom._iods import IOD_MODULE_MAP, SOP_CLASS_UID_IOD_KEY_MAP + from highdicom._modules import MODULE_ATTRIBUTE_MAP logger.info( 'copy {}-related attributes from dataset "{}"'.format( ie, dataset.SOPInstanceUID diff --git a/src/highdicom/legacy/sop.py b/src/highdicom/legacy/sop.py index 3ae20849..d7c3df4f 100644 --- a/src/highdicom/legacy/sop.py +++ b/src/highdicom/legacy/sop.py @@ -17,8 +17,6 @@ from highdicom.base import SOPClass from highdicom.frame import encode_frame -from highdicom._iods import IOD_MODULE_MAP, SOP_CLASS_UID_IOD_KEY_MAP -from highdicom._modules import MODULE_ATTRIBUTE_MAP logger = logging.getLogger(__name__) @@ -60,6 +58,8 @@ def _convert_legacy_to_enhanced( which instances are provided via `sf_datasets`. """ + from highdicom._iods import IOD_MODULE_MAP, SOP_CLASS_UID_IOD_KEY_MAP + from highdicom._modules import MODULE_ATTRIBUTE_MAP try: ref_ds = sf_datasets[0] except IndexError: From ed5901ffb98e60eff1edf926944107095c9c66e3 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Mon, 15 Aug 2022 01:02:37 +0000 Subject: [PATCH 07/26] Minor fixes to segmentation (#195) * Minor fixes to segmentation * Fix docstring typo in type --- docs/usage.rst | 2 +- src/highdicom/seg/content.py | 2 +- src/highdicom/seg/sop.py | 13 ++----------- 3 files changed, 4 insertions(+), 13 deletions(-) diff --git a/docs/usage.rst b/docs/usage.rst index 8bd86bb5..73fe02d3 100644 --- a/docs/usage.rst +++ b/docs/usage.rst @@ -121,7 +121,7 @@ Derive a Segmentation image from a multi-frame Slide Microscopy (SM) image: ) # Create the Segmentation instance - seg_dataset = Segmentation( + seg_dataset = hd.seg.Segmentation( source_images=[image_dataset], pixel_array=mask, segmentation_type=hd.seg.SegmentationTypeValues.BINARY, diff --git a/src/highdicom/seg/content.py b/src/highdicom/seg/content.py index ff4b7fe6..9617f151 100644 --- a/src/highdicom/seg/content.py +++ b/src/highdicom/seg/content.py @@ -77,7 +77,7 @@ def __init__( "Anatomic Region", :dcm:`CID 4031 ` "Common Anatomic Regions", as as well as other CIDs for domain-specific anatomic regions) - primary_anatomic_structures: Union[Sequence[Union[highdicom.sr.Code, highdicom.sr.CodedConcept]], None], optional + primary_anatomic_structures: Union[Sequence[Union[pydicom.sr.coding.Code, highdicom.sr.CodedConcept]], None], optional Anatomic structure(s) the segment represents (see CIDs for domain-specific primary anatomic structures) diff --git a/src/highdicom/seg/sop.py b/src/highdicom/seg/sop.py index 581ac851..4e9ce557 100644 --- a/src/highdicom/seg/sop.py +++ b/src/highdicom/seg/sop.py @@ -312,6 +312,8 @@ def __init__( if pixel_array.ndim == 2: pixel_array = pixel_array[np.newaxis, ...] + if pixel_array.ndim not in [3, 4]: + raise ValueError('Pixel array must be a 2D, 3D, or 4D array.') super().__init__( study_instance_uid=src_img.StudyInstanceUID, @@ -568,17 +570,6 @@ def __init__( self.NumberOfFrames = 0 self.PerFrameFunctionalGroupsSequence: List[Dataset] = [] - if pixel_array.ndim == 2: - pixel_array = pixel_array[np.newaxis, ...] - if pixel_array.ndim not in [3, 4]: - raise ValueError('Pixel array must be a 2D, 3D, or 4D array.') - - if pixel_array.shape[1:3] != (self.Rows, self.Columns): - raise ValueError( - 'Pixel array representing segments has the wrong number of ' - 'rows and columns.' - ) - # Check segment numbers described_segment_numbers = np.array([ int(item.SegmentNumber) From bfbd80770efb7c0c9a7f86c46dea81cc93f179ea Mon Sep 17 00:00:00 2001 From: Sean Doyle Date: Fri, 19 Aug 2022 14:58:48 -0400 Subject: [PATCH 08/26] Implement TID 1601 Image Library Entry (#83) * Fix recording of evidence in structured reports * Assert that evidence is provided for references * Use sets for comparison and avoid duplicates * Add test for report referencing multiple studies * Use datetime workaround for python 3.6 support * Added image_library_entries to TID 1500 Co-authored-by: Sean Doyle Co-authored-by: hackermd Co-authored-by: Christopher Bridge --- data/test_files/dx_image.dcm | Bin 0 -> 37825 bytes docs/release_notes.rst | 16 + src/highdicom/sr/content.py | 2 +- src/highdicom/sr/templates.py | 545 ++++++++++++++++++++++++++++------ tests/test_sr.py | 224 ++++++++++++-- 5 files changed, 663 insertions(+), 124 deletions(-) create mode 100644 data/test_files/dx_image.dcm diff --git a/data/test_files/dx_image.dcm b/data/test_files/dx_image.dcm new file mode 100644 index 0000000000000000000000000000000000000000..a9b526541fab98a53804197f627164c33d6d9f34 GIT binary patch literal 37825 zcmc$`cW`6rbuS2wWxugX)*Csznyqqewf+8B)s{8tJlzvIH@n%*)XV_@kplw=f+R?Q z00;s^&XEWVAix0ToO8~Z?A$#;8YNrulVn?xmG*bp6C|zQdjEK}dl_(X?*;Ikd(Qct z^PTT^KqCGByWNVt8UGrFkQVo4(iR?HBYlaKc!PMv_NCYGm=^oDq%&`wee2Ts({G(Q zefspJw=TSO=B@MiJNdyCe}ZeSy}IV?#kX+PTjwrbI(_>5xwkI8b^bE`&Yr(;_VnB5 z&s;it@yzA(=gyqIjFaciUAXY}<<;&;aevR}7-si9X=(C7VK7-Hr%g@Jg z_HE)b-adQo^yxFF-#Ul=b8lTdx#q7(DR;j@N=%7Oj?K(Hck#^Yq}NF|Q%;c*lVk71 zCR~prCnd+mUr){=lao`)#CGNaCC4`2x;EJAKv}+O=7K!Mk{35~YBqh#$m6XmdWsCR%x`#6Kh5yn8z(6?gsXUr%{JBIUu*e)~JphYUPoR9{bv zee;iO;2bRKEVO(M=hJZhos;wDE}wl1_we+&i*H@VjeP42O!DH{b7#(6zI6V~1=z>s zw=SN)d=@|F&YnH{*4w8q!d~zWU;n$5)W0R&!CSKb``fR ze(O9&_U7LcBO|sq@ROAAHPWpjF7a^~#0^A|3@K|1}Zj?SDv zeg5p(H%Qk`?q&?Gd*hAkDPJa?K7AqU4N~9V#ijl==@z|^&SkSs#q&!=^nz3A^a3&a zO?*mRA_+G3H+bcrky7s_CST8dlXMFA>whQRO8#c-ty8e~n{lVEC&isgDdLot(?#r4 z>1WQob^1-xX8nOI&=Qg#d8Pu}PJxzlGazkT8DOP5ZcgXNvQaQgBkpc;_v{Q2`| zfq|zl<1J4?yM(V@KYRA=bEi&`zD|mTe9+RFQ>4H8Z2#;j*c2gm961Kp5dKKQ5ypun z#o-9)S{&>)nO(x97r=ZtqN2i6MIt^I4p(rBMK7hF`UZj4@Zv9#zCpr0OZh4(J~1Kj z=H2AP_}gc27Wj&@?Ku1Olf6Ui!E*mY3-|`mpSb#UT=S*R_g*{M`!6{5CntL(;_ROg z$dQov4brtYPR1XbaOxYR>u;odI+CO}PAuc>CFuAYqz5O@&R!tw9eG>ZzLuBw?Dub>Sz8)y+*qA+V7+4HPY?Z{tzXvky2j! zOb2IAT_dHwM)=z`674m@)6?PU1O@@HATjAnDR;k0in*CgnD#fJL?G&GIQBJCOj#cP zR7xpbz@m#-z=q)myy>W{4CBhGG#9hC6AJ4Ag z8KI9P*vw6Mdu&4b?c|i(r#`HD<0P`*H<3t($H#{Ud%OF)d;9wbhuDAg==ce?$B!O9 zCbox%IDLRKkB$zH9zJ~Z=rMkA{^_%4FJ3->{`B#a7cW2f@H;>J$Wi47tV`}qHu*n9f)#fKk#=ZF8}=fD13tpE2S{px?UxW|WwyIY&9OG_(@Ya1II zn>%~^d;5n+N5@CUP{}cN4i65YO2~DDCl4VWb_l7D9}-eR*2mADK70Q1gAYD<`Qf*J z_|so}F4h-P{rj&@#{I>=Po=;C;@HXYlS5zZ{YOtfebpa4oalu(^6Ddqy@P#7w7I^q zzPYizy-Db4Zx@<_5E%Qzqr?6Eot>Rs*d!s)5hR2C5dvbv0mwy&_4vuN=P#bWc=7VZ zvlkzH@}qzGFPbKKbO?E0InP{*gGJUHiphV(*J*#LHi8pTGLm1;1KZ zUYMDhnwgoNU0PaO-`;~$Vf2UlCyId49UVcY?VW>T!UP{Zh9rcdj&aRHoIHkm;ql?o zBiQKUr;zFeqmyO0pJ_W0>5=Ynv6ij$L{ z>gpKi0p;O>r%#_gfA$=*ee{E${_;1!BLL%bsZKumWEdxd{=IR3arZx!fDnlo^sCom z=hN4oyyoQJ?}_y3yM6XKiz^#z>+A5P<(1{7rTMwJ<>j@F-F+AyF-FMoaAy~LgoFgp zJ$gj!;XjlGIq-N$cpafD$OzS8dwe3&i{~G}vwr#86RCbrE{x~ovrmSJVdH*(rWxYY zr(-`E^@$+3_|@ZQ&xlu_Ty^p`C(l0LJK33=pPL*V85kNKnH-;(nx3AXotc_lS_A&U zgWy<4@FLuN!e0P!us%W*jQ-&(Ngxd&Fd_kAhPVv}2m1%d@GWeFdwul%pZ)rGCys?r z`$Au@#_-utUkUk24<}L)r$1E?vG>L9tJnQM{YMa)o12;(?&+c&F93C1TpBNdN znwec#TmT5Ju5WB??d%;PL;=*EKxa=L6aN8qgh~jUfZq{_ad=4l9U-y(;?~YSK;vL{4=4el5ThUzBmqe5 z6H=T=0;vvh88%1>#T^lb3UL4EgO9%ZW1z+V%mTj{<$tnwGVVY8huic@5JHsS{`Pk# z+sJ5NPgk_1t)-=7rm519r5_=NhfJ%pxSVteuOlOO%;7r!CIB9!&VWB(()e0IT!B0iJh zLebKzM-+HrM;)OyQ{Oat8Z{{ zWNL19c6MfVeqm*EdlMkPvrnKo;ZOt+5F+hv?e1)EL$IAKq#s0HwFk^Z8ik*eObUtx z3cmd4b zH#H6P_q4Y*x3q+tBQ1>$p{7t%q^+~NudlbSv!lJMzprm#cx+;Jer9TJVQOk>eqm{K z6LD$_9(F*e=@6lMXK#CRbBmB`8$uz7?d%Y8;THlzNJKTHh))r&KKbrHA{T^M1Y%$# ztcKV=6Or(RSBFoe_)L^n2ca_pPk#B!U;Nv@{^Xy3`0u&Gr?&$|!%uJ8;_Ck3BV>2_b z#^t5emE{$}$M)e!yGT2>x3>sIZEkLCZ12EIx3&P8uu2>!@~q8mWRH)ZzIgG$$KUA>}u<1YmK&dbhSnyWf(dNL$pvsLqjOkSl<|GYKpW(JNtS% zdwTnaN5{v;`a8P1JKMW^`i94c2YZIbW|rn=r>CZ87gyKTHr7_xHrF>H1Vq{-{x+dV zC=Duuu$x;OTU%S32zJPXpTZ3Zg?;yjKmO^z{sLi%aBys&N%fmw|LWg={;xm#>Cb-l z)1Ut2$N%(?Km6{;A3lG4yt}o&I0YE%?(XU7=|{}LAbNW{JEG04t<7z19i3e`JTwSb z8J~zoo12;%!%dBip>T6+OG90#sj;rkR~v+M!AN^=|3Gg~w6m)OTf4hk5O15%($f#V{hbe=K7>Ic zjRd3+0UeW*4?cML^7*r8PaYp5pD1F>+NVKOsKiJt)l}n^!4=h zw097e)z%6%wRiRnjf_o9PR>kCPT*mD3`(2Cx#s3@xT&EL3Tlef`+{}BfY0y8Ul3wO zT3hfFX{h(Re2tMvgb=;aUl)zGb@Y!<3=a48By#1|)s@x7S&V&dZhB&TbO^rC-#<7!GT75Y;1leM zc!n_3KiuCB1+_zl4u~-@I5avwIW-M2a3?2WEYlN{6O+@EQ#dv`4U=nXY^ZN&hyW`v zbf2fT)&~^@>l>Sz>H?lxUt>pGFwhWim@QCdI8@ir;PC_+BF#*(>L-POg#?cL2yh_f_{0Zd_d zFpaBfO;U^(AdQE_}CPL`1A*{a%yUP5{IW}r%zJ2$tfso zY7&_Ngn~%3&26x|WlP)zvqJnnVFh++w45R;%4&v)G(Ylf?;7cYEv>v&HIw$WBXj zwaMXfIUF{l#pCsQ@Z$~Ew+svp5(3Q5Ev~F$I=Qj7xCl_3p1~h{3+cx`Ql*1^#O93^ zB;CYcw*?d9Wc$w6CFT(kP&C6re>#SXXoai7W_d-Odeioo4^O$ zlGbQT3o@Yqe8}l=I4vflQKL}Es|_ZL&EfPqv?{gA>~mXnDou?_XE56gCV9Et<#l+S?@n9m&Pc&ejHz*dz8r&UNIC zYfFoZ^Yc&?#)mNynKE&^7m$Df180d7*gF{|F>r{4p%WUMn}H(dpu<!s>wLlbx`uE|DAL>lu!Wls4hM&I+6iTbpAeTZkMP;R=N?%jP)Hq!( zi`iyzx$QQKv%bC|6hXMIhe(Yr&Gn%OghJ}k)z#B`G7B3Vg*DADuB;&ptt>CEt^)2b zi(Xz^Ck{X^0?8L<=jZ2Phw}gqC=bUl#U?<0ZWbOkIYk&3Av8pvCUi<%3zMBCFanS? z0&E;b_Rx`v*o4brBQ^e1^V8$8Z}lER38` z3PKU|fxQJnN%IRt0R%DuC#Fv(H3SeqV>mlcj2!MmU^M~K<0IIHh@jlx+l5&kZe4dT z!bop>Yg4$nEgB8$Oa_BiRi#uaU~iQbl5&ZpLMAPjRVx*0jjBQ{DK9URs#Vo$b&af| zTr909pvqvUR-LlC2FM7Z%z;|F!%_<+HHMlydfTHNNK+uh@Yool3O0;m61AGiv6GMw zYbT;5oE{DV38p3h;Nz38f-KSkL?a|wNNonjh${%dK)y9JG|<=G5k)$N#5&v@4#6D! zjcvpYZ0~Fb!i8OG6*jZijU9Eh0>&m0mx+o?O2ie?3UOsc1&&A+a#>kvA&n=g#kPf~hhzL=sw zMkowP^Uw%F==cyG$43!PaRMSi3XB>`#mV6jBBBjrvVcshx39aay}b?jCGxUxs4>(K z3Wr-UVf9&z29vWP90IrngS9TBrdkd!(b)7d30$XKQc)r-Dk>^05Q$60W#Wp8igHP1 zd1VDRCxc$DR6$8KT8UUJC>1gp6?&7-WY(yvYVcsOnXP8G)#62(5r9~A$ef(MNGo#T z*48E@Pob9fHcZ9fUHC=HLkf!$?%S+B>7T?_p$T z$jOLYt)afIJ`}F=dg`0oBB5|7g2b#2F%;-*cKd2;T^_gFVXRTft5j988a+%+Di)WO z6bbSR3yX?{!jiI5*jibIR9u{&m5@!(lPRi!8fs;^xL6>{&n{9UK$$H{MYYLb)SFF! z36sO*4FWlla`@|jf`R5pB-DsG3xY!faeI-!heOzFYYw9v(1f`QX3tGc#GIxn6mE@1 zkz990VOOXZ;8Y#rKJv0!q-KF2u}7rQ9;4A_bTna(+Su4w*I4U>8`}|5oGy>c<+LG` zDiyLyS%q9zQYMjB#F5H7%afQ5tB3&2^#94>)KiffxC{)js04$+u=s?Y&Yt(2CK?P%uf?{Z;3?^4tm|swk#}^a{iiARukWWoXNG0boSz;OV zsi;*B-;T^Ca^g+LVe{BH9gLm*EakRUMY}4(wIb6tUyoeY$Znx;w zH5!A->~MR%-hkI>HE7kUni`!^hdXOB=v6XFNm*G*NvXK7xU{SY!sO@Y6&CRFp{#=Z zf+7JsH71@$XR+v9F|?_WDHWwc0WXirE>!_6)T&BJg~DjC7%e7)NnKrStaadCIX$&r zm(A*^^CN=~)nn*EKU}J%Ino;O`D-I>Fghf;L8sZ<2x)K?00k0x;1|fah-?h5X+bLK zbi3>}z24$J$8k*KUZjUsyVK=H9_7Ye5Bgz8fk3Ul8KpG9LxZ=rw($h9!c76E#aS1C%LnWhkKgNd zLK2Gwj$yKy%}(4Khr{l0JB=D0yx)s?O#lg~-KeT2K3NNEGgfP?HcgeRQYxtsizTue zjY?TrSt`iO=W%#!E)rg&nqa*&*Rb3Z{Nvcb6HF}m01h`1Xz>{^SO*{ z3Rftt0#sIu0g7s!UT-oR3~Gf;p>x5S9WHmRLvJ)YF^Tj8C;~9UAb|luUrbee;YN5% zU1QK|H`?Jkfq>JXGkKu`r2lp(N>`(|A_vB=+vRpx4SGFpn$v4gYi(YSQ>U)3s*+c! zYvjs`a)nZ-LYNelSCp5CN->nuLXk+wWw9C10yi&@!_Uv-u%Ra|otIz0XXhkdzenTp zcq}?Cmo5;Mh>ATwRxVGFM@@^p@qohN7O)v~Dvec47-vyYK8Hie$mWVl%A^$)lCt8GQiLl_ zO^w!~Q_AEDlgUV!oflJIlg(e}_j&!^fYageHbhVwj5HAb-T+MS2O9h?m?%W@dpu@? z5uSkz#AI{W2^==Uja*)f*;5OIFzHXGGnzswg~qG2+x5tskq4A=QWGE0#ELQ^;s^x! zd5q+{x8J{$o5x`=X%q@MD=j~t!Qt>YTnLrNqO*9}3Ab-&(di6!K8u!1q4J9g3kCRN zvuJ4<6hV1usRTJvu@LrHr9_gXF~E~#N`qc!#8kv>F`7+w;4c<|ssBDf_y9+wBeE6itP-nnx>flR07GTB^4E;Tm?5LlRpaK)r&KFFjNBWo%t zEkVdFC=gau5ouCQrL0m?u2NUksO@g6x<;)tz-myo@Hj9Bb3i0y>0ZqLQLS*p#k^KC ztP8=$T?^${00%$^yWM2>1qhM!YOPMpq|+%3sm)$|&0^%`L45^0I`u(%21{I4Tqr6sLw zRw;uMw@Fmq{a1}!@?n@r82Qs^vpZhCTZQc`T({d;lu?&UCn z+k&F}e17hO`w20LG%Afsq0*R4N@iw81`GFu&*w2HX(?$r0!ULJ77O!!$$>o(wNr_NcTwEsC8LbYBMx&M~tCbcLieok%kqFxS zNX{Lo!svBoFMP&rt*L<^fH#L#r@^#Fqjv%%F+Jfq(oSK@P29feO)Xap`R4Ri57s@Bn3tk?#5EfEW0C3M2%e4S#g!UR$wOnaI z0nBZz#0*PSZAW#*rPk`TnrbU%4Q`#fMrS}aruEkwYHE=BDl63uftt$7^78Tuxm;Xa zBIak}Luq22Lx(IG_dYi>J0lYl92%>j7#SIlfqaHvf&vMTo=c;Vv&q@1iJ4isP*-+x zTw)?L6dQ9V{>t^(dpED&LIA$>Ufiu4cVcecPsaFD5@2gdDfB#k9xEp~AvuFiV~ZdW zoy&uTvjyT(5dvCaQE7R#12dYMDou5@N@hVYb*klnLZ#9cX%5*mS|fa4>8fiE0Pw5f z{S}oSue78}DL<*)$b=c0tm29)3u+m1g@~G(&BzwYODnZ%g+y9F&rC_Vmz4D&BRerU zht5X&M$OKmP_wd1bJDW1vokVM!CZ`c5O+5v_1@jsd-v|#jlFa0`i;w1@5Ee>O-Q_b z>7845-@kI}UR?bBIDp5!#H5tuv{WP(lvEHOvvYG9FgSWH3qZHy~S{0Q^N<{@irP->K$P`FcD)nZS zvP!L1REk9vHgg_VjwGg1p|ZO)c_|q@prz90lxHVrkePf2M;X+W=4MfHkf#VL6n49| zvbYGoM9C}TBCuq0i&)g8#JKF-obq6q}F!t@k!E}h8~7`;^-8jH?g()nVETweh|Wg%FWRaR6Q6vaG7`u#Laqe$!x1Yn6D($BlXKJQe7W6TnNKBi@))eV61lQU zk`JY_$c$p78e*i!v;q;Yuv99p62qv;B0wl|E<}4+6TL8x3p1cpa+!I^J4*|Nm5M5< zptHAtmz5ZM^V;=WF*j~pdoLz85%>9iTxvRu??L>{n`uecuw8lYYE10y8}GmW{=2ts z#3m;v0M!wz;xcoxQ;}HQi_dtFl$w>6m~i*nt+@2GjO>(z`ydl1Btu6OOg@-&fmB|> z=OQ$53MEzQigJW4Uf$~OT~Fb40@iRS|)c&3X3af zS(&M9L7}XMPcACr3Ar3@d3ipY#pTiDg;Y9&!C)&8LjiJYAocjM~Ko7dla z_ioaI)YPPV@rbMOadEe$K}bHl$^Bq8`qK&@5LpjrzR&PB|eBtrxg?;UUT_eOjycfC1MfMa+xr%P^J=N zn$OEu$hlOOOoW+oQMpDf&SS9o!o1Sz8e6ThG><~b;BgB|O9Ygh)Cy4=HJ4SOYDDqWVl48^xcQ}ex4N>JoW-CezZ0L4Os28&g#~On zB{MNT_BJc$R?N*9j4|cjmA9{6yZo(hy?ZY$B|SSMHaX#LLPlnGcG`oO*!wXzQ__Gc zu~!j+WA3LSL?k68CucHQh_{7&u83D$oL??Swo)XnkcbM|WtBpafGZFvN|1Umgav%5 z$?OTadF0e|ag_$kAN4IVE|O?|rK(!1Q%Ng}Q|LTife49XnIuoirXwI0sr4#N6+flY zO68Ww%}pJVhEPz$YZ&VkmKpm8JyI^cuvkEOkd;akVqz^77V=A=O_RB#Zvloe`jR(4K>9C>42fuKyyr!e!?UaQUN@i>Y#9nwO%yASJ$Tbs*m{^6m~ zkf*IZq~p`bx%|SC%)|$bViZX@yuuA~n^77IuT~&ou zOrc3pa5j{el?p|5sv?QK0}IC;-5nuUS64&1y?=QhoWQlw;j!V7@r7=WTCY`>3IwH< zazR>3GG>OD*m3fUiulMWSjcEmYT$D@Y}UnhFJHa!&bc%1-neor=KAGJ=e~9E@_Sd_ zzkc(^mFqFrV`8todm}dPZd~fUc;E(^nvs@Fqvv9W%;9Hcq~v1m%PN#BRP4;mq+Ax7 zXUfXUi};0US!9t~t5!;j3#6jl91c(AGwU@Hon4?b>#B_^wa23i)$=8`PC)NL7v8^e<-K<T;c+uN7d;H|^cz<&X zyuZWMp0@UZqmMsao0!4^+053wUtZ&Ej5PbO@Z zg{>PmZd`fy{Dp7+!`aK1-UUR)Jh*W^F6mC}{hL=H+Jo2|*Y79Yy?!l`lE%m;GdQ%Q z2M@?OnG~e2h3xyuIYK@szpP5Fk||3|OEpHV&S5vy=vvyPyE1jW$*AsSBmP=4Ut`aioEDpkm07XYOq=Mmbxk>kLUwi-jxBlU+ z_hawfy&rq~W>S3YgOq!BlVaY#ap&%xdpBYelJDKTk(`-{+69ZtNw}Q~)1|Nr3JZ!D z6rK)MJSFPzc8k5L#_6^@0%5<+F)$U-Ra=|dd!iw4|3s)iqS2W~X9rtb7Nbpp=8h4E z+h5n)_OMIUF#GiJ;o-x_Q-hQ9AZ*M(CKjtfi#yoZ`EU~yvvsWK5;T^LdX3Ymb%AWt z*H`bc2kMY$*=-$!Y4);Jr z+!!968X8?*1s8Mg@Zr(k_SU19&qs&4`UaL)7Zz4`+BLem2F&}hZqwO-;!GW8GB$@v zTO%*y&|rHfD-=8wS__K!+`qY-9ed?&3bI3*KtPXu_v$?g0xT8IA0Lx^^Zj@4Bqiq1 zQj=L(Y3bQi29rZhr{(c5h0ZS(OBMMg3Xe%+GKX55f?Y0~N}fkIyL?OWLeMa4`sS#c>T$yeWb|3N0XNSK{Qr)6a178REu<u=?a^dwuudXrh0gh7^uy4ctL{Bnszn46AC zMGikd?an=3Nm&tUC`J4nO788LxU_6mNhy=c5p!7u(n@KSg2z{Cv`V$s)6f{vo3-7G zqeJ5pYdedZlcBE3rSXB@vAMDNmBsbJp~m6yncZ#hRMxf+M#749ZlPHZM}1Qt3kE5zPZ8Y z)EeDb#W|5*OKd)RSl`9HMlrATh&1?+~Q6E5PaRHo|Rj}?Bf}zL^ zL2R7FqW{LuFe1yuz|tnD4NL30oAtr)9w<@!8%u)&6B`c~!Kzz4T%Q;l-aZ0Jbboty zad@I7)VseqJ+n49y|S`%{A7E&#o0DExzkzYs0-J7Tpq0%1tqsxTO%pTE0k;XM!nVo zh$x^ifp=J|;Br`)akIYmwXbrN<%I=!4k39d4|0mxbPk)#MXJxhq%$!izqEwI6;vV> zE2@;ql|^EO(`z>vjloDqgVq*a+?bu1S>9fn7_!(s!^0iX&XKu^*@gA_j=pAa&OsJh zUs~Nc?D0qDo*ZnhZ?27xOfK&qj`a?V>^__x>03Gg-4}3hFgolEgx0oZ=9U+hH@1(S zK3bU=>ZjGvQ1;E@WacIbZwQ z>seYcQDH)y;_!14Q(06dWMWd%(wIC-T5>8&RKnpD$PnEGm9ROXScbJZqt#rCwQ{Ys zZfDlA6>MTCH z({5Cw!dqUYQr8%;PHR$EamaMo6|9c<<#NCF7ypG8&`E^?B-cE4J~#bA8jXp-larp5 z&f>GvQ_?7}slO22Z56N*@^MZVR{e zf*7~3H3Uk)#L)Q8CU|N)JF88>$mXNH{f+G{FqjV>?+y(Njy*o;hp%jIBbLrhO!PMg zn)EIX z`Qd0dJoj*a3y?iAy^5xQ)q&2@81U}?v_B$!lUPh2OIlqBO{x8 zd-HSk3aQ50+**%iX`frGHoH7FRkczo2AfGo{GhUHs-&~>F*W9K^9p$!I{m-?#T(Xo zi&We#k@?ft#(aA4p3pm%d|aA;!Z@#DQ+ z^shaAI^7-ZfBIzq`FB4)T;Dm`-Pl|8s;p+UF4#zL58RfjN{vxht=FkhepkpfW}C^3 zwK{EeAxls!AePM$m}s=Gex<4xhU2u_tahyeR4+C)L!{Oz%7tux5u3u{33b3ui_u*d za%jwsXiw1MZ5f@M0e`-Ga$&?D4E0UTj&=|9Pc1D?wsdx`%?!1MhgX;O9z9wMT6JyR zBL|1u`v;GnKAVd8yY`S2Y|i!%%^dBoOpdjOXSXMpww{0ZWDnl7wfAVPwWVYK;mc3H z_Z$wjyS}wMs8m|CN|d6#7OYPjm2$PoSfwQhoC>9^Qj3%xbckvxkD6a9fIz6m(kSFV zP3&)VJHS?O`3;IHrJ^d2$*-&~#bhKUnI%WtwnT5J!TjG>AN1)>_DEl|E6_SNK06Ki z+w}Yh*ce0eEA!JJ)XvW>cJ=lxFOK!Mj;^dee);6_c$Y8O*njl=`02A}AAYzRF4j1qBcNI?EhqC1nBQ(tbg@(n5+Q1g zCET3kw5-e=8s-qpG9g={!m6;|5Nrt5JFV`>K!>-Xbrf{c$??&th2h%zron~P#rf&! zh0T@auGYxJQvcA%^zzE?%jZvLd)hij2fzqiTwmRJxD~38E*$JGuO0&S4<5gKzCAQM zvbZwdGqkuky|g|zyS#a{5RFcL^wB3DZLO?hTa6m^l`>-gl%{5a;%lQT#P`WH7=z{gzJ-d!4M@O14E>>30MkJ)>Mn6QenOzJ1HSKoyK9)*<>u;u#2RXo*IorV|0cZBMr5|(Acm) z6oD@-PQaUnXPbOY9rGI-AfwK&Y;R1r^^NbXOia$L?#&HMOb@lR_0G@C96vu=-+6er zJqCi*#Ps6E&i3r^Wvj zhm)M#`y-`qd4_VOV*#%H(YJEOt6#pTWYjfkydY;gH- zxfe1|FKivYeDUz<)2D|3h`qsq+3!D`nOfUl--v2T#8qHJ6RXg2i%DlU6C#=6B?yWh zgS1joDJdxgX`_&lNr;q^nw5j~TWYNtD?nIlsN%A@94Id_D>FGMEuF&P(oxB#v2-S_ zNKuZ;qQ@I-sBNfk8EI;69hw{*o9J#0MaR4A!>yx}%PX_!vD-a393B{6+@BjB7@0VH zc(^v#-RSi+Hx55K+?^en+&P$RukTn~UKs5O_!{en=lAyK#-AJYkR zvMx-meSCo4sP*+(yP#NFRU;KuC@Mtd;J;uM5gaJ6a4b%&jp{JJD60@6UnnSGW|4Dq zu@0LBHDzVj+DwSSTB8u>Mx>F;a^)wgj251^bsaj~;GJA>lpRSbzEQXl(}*5A>oPp$}jWO*3mNjUsj#2&p2- zC9Nov+Z{j#kPvh*J%>TBl~dVNgI6qc7&R27$0RI4h>N;M9%)}+(u^;jHLD5@0T z;+7POLk(-W zHPGu1wslTV^>($7E$pq$gj-f$q9yR@^2XHm-saxX`uOnZ(CjqP!-a;=htHpX=SK(K zT~P4idRV~5s4LXSDQmEtCalz=4r@}@Xk=v)DI_Tui;FPPslp;xUJixMz&huNOynG| zp+;Gy1OdoIRDlTwUT#kEgTxFzgI%tu@qxEl?~tj*8VATgZLPkbzqQ`w3AUrhuF>zc zyXs8VGL|+ixz$qKHat4m;xHN-hrxktnnbtN%xG78y|=TaV`L1h>gi`kE1kW|#~;1? zi2qoBwC4P~G*rBdf@4As@uYOK{pH#+XxT0zii3AqEoXgheowGPnUb(&ghA(}S=;CsiPe*#gE0dR(N{|B(`m*^O8XP*5rK>e17O1;JIa=MWI$Sv*FbL+;W<3M zygEDFHHIu<0vYVa=Kdj?&YnH{&iB6mczb7Qd~9a_VE4%je67OwzIVJm74UV=k4k}0 zbe^b8DlRQo$l)-OYJ<%esk7<~7Bv_nN~PLRg|*@`EFNa&&>;~Si+adb)avoyLWiBwayYLjeau?Krn8DlB@!7iDVNE`BC$+surxH( z667nW4LqzGth$$%ir7$6F7j2RcfdRt-q&>jGdQY7&=ajSd5l~(zMzVSrC2IogZY)O zG1S@}_WF#LP}j)7@E{siJA1;BNJnQF>@v`Ve6<#LeOHQivaPu-+|obT+2n)&wSh_9 zKe_>&*} z@JIi2ytB|(@9Xdt^HC2jE0rk}GO--YBssW^dX%TJga(36^$8C_AulhNl?aL15J*oT z7gMrxkmQm7JilJ2(df-)Eg-*=oCS;H@_9(nxmvr`T@R*hyWdr3vDZZh2GLU)ZD}6> zm%gn7L{+cZ?(lj|7EdcWi2EXSINH~ViAosVi5;N3qdynM8V=Z}|BC_BRUmpob?U!2>1`0(TJ{o@aQ_~V~^w2AL&h&D776TH+sQMp{D0WVIc zGijuy3IkdSY)&({I^amCR5B?%Oj1@r%cg>b3yKh%Nz2K}rjTFHXf&!p{V{{41=eeN zDwb+^Y$_J-m0rE8z9Sk56I@eULv(0pd<=%v)eCxl&qy;E-e9MDh<=3jq3*%n&d#>B z&aR-h-rF$H+l_wCXt+KQ?dxjvHMI1M4-a&=6AMR4=yU{XtqrYM z=b0Gm?dT+0O{ZtO!XR-uY_5RaSsUq#_Kb9+M`fTl9BK8sTHD*38=ITaqzjQE4o|2R zP8Dfu?d=5te|T?w|M11*h0)=;tv!6%z{XtP(E2*Q^Wxjze>8y>A@spT=s9$PeN|9U zSRn%;Q*XE0JXX*}ZJ;3P)m18jjil4d%ZtR4VrmvDlgwPK#;}=K#HVDF|GW@ACSdld zq~%~;7!A~TGFHn$Z6OzXEf!}}B;v5Rg0=Qg>)-&?)Ym&Wi4M#8`8J=!hgLgpEt;|d zozec0_Lk=6Xm5X4bI{+`i8ha>XnUx(rN2Akwg;Lz`g)pMqkWV0UaMtbY3JeY()2XC zAr~+mU!R|9n^-@1{P5ZS5|MDsEiEcD$+UbBJMx%JewhkfMYGjWYtpE#E-Y996v`_> zL{`=)WTj=LSc_!kaky+219eswnBdgRzce@b943TYX}LsJsnE+4lUSI+vzU|ux6@VI z5RTM)T|U1v)B@C+!-NzaqZ2c0Q$c4igq~raAGFolw(jo!C^}if-Tm#&et&ycliwe1 zZ*Orm_jW~`7Ed_Z+0)V1-Pu3XT4!~1EI&NhU7f@?6bwyIZEi0u^mMQ8JbrYrI6FBy zJ-4vc%%W15yb?4`@OcGA<+3WJ2IOt660L|vaQ9>~u&F_rla-ZM%F1~xo&enyShZlX z2}UlHlKxJl!Ed*yW#u5FNh)O;XMQ||&7rda=azsk96=uv(WeuPv<*(pFR#p@8E<}Z zZgJ3GA87#5-W_mRtbVk$^n|e@6GE#_B;ajsZwl6-jX&&Z>go);?e-wNueYbOg=nb? zG>3sp-+4>DA*$JBxEua7xP1KLEyQX{8JdB~TW% zfHC6M%jFdyh>K1Qe7i4VcO3G16l}Nh%|_)k2Y|th6W}!siKi92O7)lpdtr zpwYCT>)Goxp|7N1|XA>7^OPBbm{qnmYg1)ZK~O`q>a z!%=6m8?AlC>Gc)t^i<=X2ZXOSf21RHAD;AYiS`9+6LZe56 zp2-4dszF zm!X3}8}9BJ9_sIEY3*ug4ELh5eGaXa^Qd@_!Zo`3I{hvXqS4IaYi(`sg)=p_x3$%S zfQ+{chMF4#9v^V7)@m>XLTC=FM*|-gh%DCH_L=2}FPhpUtCfqW(mDr9sOg2eQlln0Og+PiJ1j7kS+0-2H zTU{o-A0?!NXHQ>z{OuPgRUYmw&f@Dy)@Ens*Vei#1q7{?$>2*WOGU*ZH01~j%hgy| zvgsUtzeg(*fgA%WDr&_nayAtn#6VLL6YDSxW-b!fj108qxKMel!6aWHEfXpYIzwGS zDo0dNQ7PnTqTK_ND5iD|cQu8(XC@aG*YRZ>W0<54c6DIYzaA8JyTN4lqW!cd+FTz* zvr?lUdO&XrN=iYO)87z6l~!$rJhef8W0SAW9jsMrL&K=5JbV7_kG}okw?BOO@Mz;; zb7N;|VQqcVC=wQfw9a91OVL~j#-j~P@# zBo+XIPR&9|CDZElx$T(PqNt=XU};}pqp3BfQH2t0{F1t!&PjYzNzYJUxTSpz-v)!P zoLE3*#3-2Wo+7&3?@79q2b}s1G=7bxln{gGO!eqp_?O zJx%CnYHTo6yV@q^CbwSv@ROIHeE&y3{?2!w?yN7O$9rqFr2_nVVPPH!Um{tBxTF+o z4_sb(6}qK@UJGVRsN0Jr6{Up;MI7Wn@F)gB^<%>f8FW-rz<~0ge~9QwQ&kiF4*F`e z$W=KD(z$5YluPn8Lj&W}qr_K9w6wPj%q=Z%;9E3?2Sz9Q+oLVvhB~hk4J~G?t1;Tq z-P+{$w6?Xk2hbGl1{T&sOICk=bUqn&_Fj?6htt9iXeiR#+ljK=iW~>PTbjjelJQl zO#^k_sycPj_j_Wi2d|#*AHF;}IC@HqD-Q`v=qBjOD$qDoRh2iRtSP1@)?HOq-PkRe zy4}~+Dg5%r8!c5=KL3o2GUeTDDwf_i7ry%DtIt3GJN1u$2beNSNi8Mmz1=r^N5SNa&WyNmxJt{-Q-7!7v{1l4^~9b>oDt$dcceVUNEA@K)0!75|hPl zhstAYf~=iF2{LIEK?ZJibWech1f>Ofaaw7R3=HXfk$5P#d33z{^yv8LaIY|vnqMx? zshWtK0JNy7Mgw4G*H&Gl`jAqMnx#*Etaaz>!X+eQwR7gihrBf5Ksfp1Mpl9QV)WOcCI$1y(!P0@8 zlRATVe#-}^dyih7zI^c{7f$3?3gMpXAg$`Lb85;-t>FIE3cN3%OVq>-&{92Z*K58b zo^Y{@{8$B&RaH?I%_1zpc>V#ZffNg~k`gdekb!~S^k(ecUSgR@r$Pg0IhrZ}rXBCyJ6TdLVFT`8#t z0;cGKni|fR_O4DMLuD5~``iB|-CRv}vrzzoafUA71b#u3=>ITFpOK>;>BX?^?Ct{+ zbsP0R)_-eKUH{pa4eee1!%Y{bA?JbOg~%OrD4Bd3fO#kmdfx|T*(n<#0X#G?uC_XS zF0)NvyWC)65Nq}=MY2V>jMUmYBtogHom zEy2`mbfBfZwXwdTuBMKP*Ye9m>43A5@$Lf?j~FL})w} z@L#n~Iyowb>&+3q`}F9=;oG;T_x)N+C~N`!PYkuO5v)9b(ktb)>>Gj1F2}N=>J)ri zWmVaw3tyKCeyXn3)YsQs13Gu*@`VduQ&acNmxy)o5|HKLegwJ&RPi2l z;G55{wRZN8v|efq=Axmn7o#rVk04NbCZ3@{F2ET~{tyZkQuPvxC+xvK(3-49lNx6l zQ2^DVD8Mj};bV5;il86Vn1vIFgb}89+iaoY-m8P7)6-XX(|V0VKM7-j$VAlF)z)7_ z4P2=Lc)>K4Un6gc@!imR9b@+D#jC_nzoy#yS`E@t3vl(j3tywcDF^zNFUqDhYF1h= z3<<=VZ-e|D93P)jtH-46Uwm<`xnrdF`laqzIuZ**3}AQo;;}#`7vikVMxz0jiRi`z zQ!*y8x~Nf5Vc3~;CM^U%gHsZai~_toB_jaR(KX0PB{q|Qu*2YiU>3VEFuQYfeEjvPzYdh zcmmlR>GIiJGUj(00aT4km?W9S2_&X{w=c@Cp%p3drGXsHy;}*KobH zt+^c=r@D%@hEwx23sw@?c`qazljB4EeZ%0C&C>CS z>d(KdzH#ejM@6NS9KO%4w>dOYdo(>03dHk;bjxS$WM( zlxh$iF25>nJI7KL=-KkC<+v51;-|e8LJDw>A}jF?XYFNyLBCwiI=y-goNdK5sorHC z9_nY=^|8$-$cZA70xmCd@@4<{vgQ`Ci^?X_LQ%A~O{X*;82B~wzzk%OY+Y+`z<%Ij*Ft}9gyO%!Hzb?}B};9;0}pMU<<u6A)40?Tkr=u3RW<>+HVS-B2r^i`eygx7~~< zn8jI7F3!eWW-W)ROsfPCqeCPZ4jfK1{Cf&w1S4b&)q(>;K0Pu%Iyg9t*+(*31M}3R z!62WSRM;(QXJYa3i+88TyR#WhV?|Y44*~ic#JeG(XsE-|yZjw9zzVDqIhQMx%n{xP z_C&oIkemtull}ZFhyp5sv!L@KL8xuwoKhHVJ{{2GVG__&O2rguBq{U@l$gjRvWfbN z${ONj?G?4EgwJR9x$Rb0G?RunD_=;ufdEfQ<&e+873gv4HAXdMHx{eiDCNYKDNq-L zlU4F@l3lmQS$J}pT7&s8r8TQyd9ZmbmQd#L;k&md4|4$-PHJ;6cIk~9%}fR~2i$r# z2!%jp71h<%tf-3ey7n8`cU;ucG}eN#{o?blzA3{6A>dzjr2@GdaNkvCZ^VO@JUu=k z>`KgmNx4$1HE49|X{CO=`irlwUF+`etZvez!!EbaX``Gh6^do&<`Op5)F`u{H3`Nx zx0@A5K*(i>@=Y;vdq@U19o$5S1hR3cf8@4A2KJd!1DQl+)lbT&Y(9rQmVa>q z>m6xmX==Vnl&7)K z4{O!pOQ*w$RIZS4s-XLlDzsMWhqUg1$AzQo!f-NM6~q!Hf`5ZNn@T~+urM+v;dYS8 z6{ta_$tYD!ngbq3w6Jw}`sQFc<{0m}e!ZcCG-`WWD^^i`Lqiog2^8B^vawaw0$>jm zq89i8rVcu;RFTUQ{QPCF@PBHXLd8a3l%EO_`!^h!c_ z>XD(FSH8a7+CR|SR$iy^yNTnO?J%N2Q&mVfrpLJNa;=&A0gWr+cL)4l_+1F>>xeM0 zV+2c`N~;3Hh&c>MT_I!wsjf7diPx(s#|_MG9-O`3UkDf{2U?qIsi*1gLZlYKM^gjo z$<^;Jp~)~ESb7|}Wml_iw0Faa1Z%-icViXk&I&M4mn#}reAVcQ%9^I;Mppp7Pp?%F z9%9UBr^m&DF^EN@(ipVzX_d6;+sjQoy*&*VTg>5*-)k|41lL(6A23X_JD}+yDrT{| z<5piJAo%|rb~kZL=ybH)CP-=&0^vQ0<%Hb|rJx`cF<4O(YJbohPT$=-d%rc~$Ij?# ztEumz0=K>GMhoc4x_ZJpWg;|wxtw@V4USYLfxW8@guePFrX-`IplHq@nDpzWOZL&sO#zPsr~MTIpp&@3@(f! z$fz99P_Q{jV|bt@b31L(s2?&&K_+US9G+I2OnSXafbKCBB=X6jVVLP~2xW?C88*h0 zOz8@Hz3H{xlecRLk7Q)5x2qXIYcJJyZE#8u-YKgTitOqYfXUb3sHv%^aPz`9ms_F# z!26O64zyKWz6c^3XlxzYqJb1A1U*g7o=`NNFQ#IdOeXHtjgL+e`&B|{AXl*sR78Pg z%IVrKFH=c$iwveO;6;%K$t?%`HZ$xxupR1g*_vUmlW*w&(vb&?Mk>${XQ3TAPsi6X$PzHMl zHWA2pM1mURyNh2|v~~Gj>~^)m7xwslYFiqL ztAHO^fy-l)z=vfZT!ctEgJyEdV3Ll*d;=K_TT1~kkwqbu86)v*VfV$^^ZBS|0xpG~ z?xwm{_{BQOm;>z=B>l(1k%U}X%=opb|r^kTgUT?(f<@>cH3hS}U zYk-%xxYLDss>D(=#TnSMf_8%#hnD$42To5;%QY6KMklZQs;sfQzx>-CL)b%X$n12u zy%C#6Et9L%=1>C8TCG2ufYmbym!a2Z;joe#pnQU*N2J~fAS)&(M#SNuS4tI-saQ1F zd)QX<_Ycl?=YpKS`g zk94v^+uK^ISfF*JoWLC8J=HWe#pjk`7F=J==Od7KCZHCB(9C8t+adoVekzwK%`UBR z;_{`cmY#<4nhCehZcs@bu~;DD)M&WZDrYTdM+Q!K-I+Eh=7xW10X zhJS$$ud1%XN~pMc3D5jmJLEUR13l!S+k0v&s_Pm$JKMog3J#R|#w47^kU-vF4u(*D zne;*~0&l7twFOg~#bnfJw2++Y`@X%@*;Uheu}yBlw9q>RrJLKNRYAw-OQ!ugo6iGT zT{09xrl?s>%vr6q*{sC%jTZT&&TQ6AN+u>?TQcdSD%PFF3~7%eQ&`wKdG%nPiWi)e zL14x8jlzWP;N~|qRKxiM7eje1?l{Sc>T8$~m_hg%WiXj% z{V~7YsA1r~c*Jb;2fT?)DjtDDE$BDc1KbN8<)W-nn|@Mif~gFGEQwBUG|CD38O>Ik zMyWEVmghHK9dFNC(2A&el9(;m+ll3Ox53dzijTVv%CNSDZNkALK58njU>Olf;b3m+ zAr;uxTwCAV+&_kLY44(-u)ex1UAzN}`|jrIVk(x*t-zqTo`KxXb$mDY2E@v%h0(6Lp`x*^ zy%|%1eS(gzD7%JtSXtEwCrx8t&y7}2qPFgdsgeGhVxFkNxmH|+hIn^pZEa&Qoyo7l zz`U}Qf-~6$Pckcq`)q|*+-p!Ye*PUc;f0!;7D9MN)3^q{U-Tv^2SywQ(GG9T3C@nCN>mYX|*C1BYfSe%9b7?=&fCyV=~MYPU$Xu-tx+a zcQzmHK3H2>zk9bZySTnw5bUHgF^){96Z&KboT}|j<8wJ6yd}|^wwlA#rC^B|QRi+^%i(ZXtQHkqK2qjgH9_DUW@)QhWA+wS z*N>kq2OK5^XXnUm5vLIE$vV)+~~O8$^yeJ#MvbbMpT9HKO9j)y%D0g z-hOL{h^GXq$e#9QVMZN3+upsux&%)wtyDJdtu8LX|Gl)33VQ7PSv%a#3G5$;SZ79C zS{t$Y8#*m>?GC3-Z*$J9tsd{?BW|-!E|(17CWhJ8(gvj(Rp4Dsjm;gM6i{QxcHxJz z5FzlwNpGwb@fIRu9KR5H-MG=K5C`PcKwnQA)rghl$FKJm=@diHfP0&CJh%s`YKbW} zL!qZ2{)Nt$Xe5lebGG5W=Du4^t?eU@cr>I(Z3n|ni^&}Xa*<0|wO;r!%b;_dvZNw9a~Qg9htVB|Of|ibvKoRY zhp69d^&?RLPCT934862Np@+%Z0a2%%c&pOpP@3!(gTdr*quyQd-16a_WZY-Rk0dAo z>V-T5_3VU-Z+14rRzmUP$N;q*kjD^d6>v~oUXrjkC`+a)7xJ7DDYhE~PD;tZP27^E z#)hNAqk}s@B)7KUD&K#$e;=62!u-r^aecK2$!d-rpQ6{m!opfYrW(3lafMR132IoUl1Gq$inX>x(Hv|teu$&*@aGONP^6(!I| zqb(TAF27hxB?4A**OLl4r~;0fTYZAe>sEUkL`A)~2;4#{OF3F6@ngaG0fTH?Gd0Q6 zc%IOnOGqB51YOv~?OWIoR1ds9IemS!y|uGV3#Dhzo8~eW5WM92lJ# zxiv)Gs^=z5Z{2O8wiSt~8n`_HrzNv1IDLhMFPQ+1%r5Ny@b>k)cdvGL_x7G09lbm{ zc(MbbyyzzNVCTX1y^YnC1yL!eksHzj)wi1KZ!})1X&%5akQJey0;7UnW$ zdu}G>ak#^R``;f7dh{wL$*R8u1g=8mvfHd?qnePQKb)B%?VJtyEMQOM zTD@j^q_?AsGH|?La$fL0-X5Nu8WuAJaaKQUG9A7BoLhZ_Qpp(&j!4yNVcbKg$;yMI ziBf{zzV{zLa2-EBc(K2=l26Ax76W}Qe4=28oJ+uMH>szIUrFV*?;5(gItIHd${TJE zU%P3t6Ie{8K=RJbE)^H&T)z2iz~c=j!w?Y*8d{Ug<`3GG8V5e1)dnlQ&7@EojZ%XM zPuR>lgVhrZr?cxTONFqDx-po^jTR-9bg=FX!NbHvQG7NqDpgDjj82Xbu7}?U#H%39 zyp2G7^!E-7fzVaT@azO5I!PY*Ax6Ld@#lZOd-ZeyK2^00YO&kBgCqvAxCBr5n1tXP zyH7`JcK5=AC>g~r6?D^Z=1YdA zF#$y(_2A7x_~zg_8|WW^A{?r)Ap%q3kK5-9`Q2`Hzw%<2VBYR(>}b3pgMc{fcRBotxnf~;2dqxg9?Pa;(G&$H z@kj*jc0c&$D82==W3!1OFe+H23X{Ts|BFa=lTK%L2JF#$`9vY=GArTR)WAS5p8zWX zq(wS9JTg8xDf0c$oDnFSB)~az69RYmdW-d)X$!66tf%4FLGCn^W zbZ{;a;OpmS&!VGa66so zJc@RaNBoRod{k?KquGXG=yY3IdlOUf4a!{C7NZm@!Ro4)PK~Q&u+1r;&?V+;&=9+j zO{z*AdSs)M#1E)4fIC%qDpQkF$Vn`L*)^N3?jWkx=cO&igGYPM9zWOu76HW2W7b>T zp#(^qwY&Gx=IB6=O5=(U={E8kD4@|f=uLt(>e7r@qdp>Bfw}da8Lc_Kx^Z`n0~wc>d_mvTfZXIp@fonk*7*JlcQK2-!>9R`#an{kJUu%n)R%AQEjJ{O&RPExpTxgsX;SKvAf|b`OTE4&rZPF8ChY*?oD*r|F3KCd3x&yvmR2>cjJmef5 z(QqsVjN|d)tJm+o|KZ0UfBNai_s2Uqk8W~^&JZ$a>1|kP9*2j5JX{`&O^nAvl2xbE z>n4=Bxm?(r$cF548N@Bi8yk04=rgjoy0SiF1+^89056HfglIJ0iiM2BDrXQMgr7GP z2k=6Z60o}F=TfmW@XQo`zhZhq<8=G&8k|KA7349RwTwbEs?n&75Q>WslLpZ=A`zkE zL3ER`*lZ3C6e=!-cautOAG{OofRN~i4`+uDmt6x?EZ>~eIw>c1di_CBnL_<{JednR z9XJen)`EeiK+4If7zFz9OfKp-n1O31lKJyqS@-U*dF)}ZblLnY$!eMc0JFLw(1&UkDLvsI^{@`a?TMuZIr zEoo_R!#aeaoM@nko*@w9?L9u5Toafn$}L zor&T+*kw9stzua$FX!WtxLFY{=F;1hYLsLVk(U>+uJovkNn&ZUvMmGE-+qyLW(m z-hZ%jcY)(SlAK?@ySbDKxXd(yaXM{y)keKqMPf*)(i!z86c+7^rl{6eE6n-1wVln) zb>NLO6%0qxb6K}qc6&&{{fBCUZa#PE8MwJFCdy|B6c%ZMLlc#3)M;}$!V4>Fd21?x z?u~@wv|4hyQwyL>mY1@TNM@CyqIfKd#YtZYOd4WV9F%TxSUNtmq#RU0ArYy|cZVMK`4iXo{0J@7{m-`2E}Cy>0a1{Vm#Ut}V{zI9WYTrr06u zBoi@MljvcRE#Bo_>l=6IW(E)@l!#K!KR#^q1yiC)%*rY~%(hkwi>s>%Zz6wZNwB*V z<`bM9;n{UMoyGk{T4BcHQBvE!U>a4q0Jsyyl36MgfSRRzE`KZ>WVHtQgwNvliK3X& z6Kdo`kzgcAXV7_CN+67A$ATZ8SiiTqoJ*z(TTf4DwnewCw{PC?mZIG!5UKyn$)`g{Q+$;KB!Edm#k^;Zv3>v}4(d;22#u3f0YoD?0M6-n(B6gjep&ga6iQjP`Ik zH_t2;*bT`*Xo)T}%LTGnbhe}AF%1WonKYV$<#RK9%;xFpCz_}&7EwU+yvQX!&E}Y` z$1mT0|NXl+uTM{Cg(j}!S6rt^_2%tsO0c%MwM1I(-rIg~|2{&|7I}aF0j9?6{L;$y zBibI)9gxb@HQq-Lww3i+Dvc5emUNUm3iQM}HK`-o0Muf&IGdd#?~`BKdcch365(7S z6XK9lY1~niCM{QrxX>_l%%fCR3h6{0v7xp+0!9Q)(Z)|Lp0L z$B!O8di>-eBGDd@UTZ{Y*P#>=w{sb5Z#E_RxH#>akv<8z5My*L7omtRl!e9+IixKvyya21(1 zULrg~zzVN{URKbV#gb`u&YQE-SFeym$WjtW%#v7qXo~kVvU_>>?BRp0jXU(Bq!lfF zKwrK(5i@#x`0UB<4y*=rcwIxkA@?0*S|tk2jV+|ZL8H&3r8q$4ihw{(&nNt$(f^haPB_jU=lLjp)(%yboVY#mc=qzTV9w6=qYD41(N~)Qq&O4XXB|j z)~uW2WrJkQ>~s5)3wIvu+*`(e&xk%$tA!vDro}>GE*Xl>Eicgv2;!AlIs?z<^M&~w z(L{t2XTt0ZH9&NL%cRrDk`2xZEsy`y&(2S{DILqriX1f_!jm?Pn&@o|q#sNl1Z2AAk7i$De-S^H2Yz zv*L$$y!k|^ljB#%M+eWJ?u#9TQe?}M__%ZLuIP!1e8|KqTqY$){kJWcUoOsshX!zd z)Rw?JBVei07AzYH)AlTzjC%o~OsTxdCnC(n2}}%Y|IrXLZ<3&e+T>iS4Ag?|CjE0Nt~&b41@uq2Aa_h=CBm*@eCW z%JpEIF!9qD$EVzXafva1;PJPwu*z9zeB%1}gJ^Y_90{S=-|S^H6l+HW-*FW&{MZk)R!YGP582XBp74KfZ~x%%_=H20`!DW(X}sV6 zK(A4*ANYz+xTUfG^ZY73KmYXmSN_F>m83dXk3y=K2gtPddr>#Lu@HAQP^%ADDYpEY;7@dOKdj;F624!S!46XVj+*EhLcbC zvOFiy5(JH+o%@4_51;Jq@9#Z*CT`n9j>E@$&tION{j$$X+kyd|ub`3`e~{(h1o#gx z2&sPl;q7aoB#vH|=IiLib08tKdVU0s8GRv^AUUgbF?8%3lrKw<>6yo@S->J%7p-nl zc4%G>9*i1jGdX~SpN}wN@sdknW}ZEL`gH%vW6t)64&jIKCe9_YsTDTQYUGHaFHFs1Qj5HaJJ++8xoK zo!v&Sz-7#H>;QH>?dRr&r1&HEcOGI)vQ?fu!ARNNef03rll|vn*StX*v0=mx|GhB3 zAM~en$4g=>onN1x{Q83U8vXt2_dmUjyYjD}ej9!MUkZIsvwK8U52eeu2;d;{V>W7cWmBdQVX6WzzBz8&z08Z zK1vMTwU1&xIDZ|#=J*8T5_<-l<=2@J!u+zv{%UH(E&kJB&t)n}A|w*wygy%m@e*$- z{h;_Hgkq{_LM^PnbE{sgKQaE&oFR!2MF{p%*c@0~jJ$LSqYDv!u~g2FM}Fb?hWW2+ z?9Ze8dtiqB&Vi~gjUc3-r3US2w zSTeK3;mFAU-}%L(5JOzQEiy5*|9Jf(!l%dcq4W8N-y{-a|7DK;Ale_k`28>9ul+Vx zVzPclEYbJ;UC9s@e*d|3i-~b^iXHqrvqdQ3^TB^wSYmm7T47>x{-Y$~kN(Ae|1?M* z_*Z(*`7E8wbUt7IQIJm)DrEbkOeK4Ykzw0nWML`37Z>Xa(-`Ylgs8=N^2Zb9)7tt) zivKFguY2a#C%@d_Pm>`=E~e#Nq|yZaCeo+(h#&bk*-9e+^2>PS7yRyGaAMe{p?-Q< zJQuH(uF^Ner{Dd||LE!O{^GAsOG7~ql!p6h^eC3|omARO=ifY+^w%Hz)02ObknjK7 jkUow3&teGc@qaAK?|$u{hVa{$NFfGZ66su`pML!Rc7VPY literal 0 HcmV?d00001 diff --git a/docs/release_notes.rst b/docs/release_notes.rst index 44f82aeb..cde95a98 100644 --- a/docs/release_notes.rst +++ b/docs/release_notes.rst @@ -185,3 +185,19 @@ error. Similarly, as of highdicom 0.18.0, it is no longer possible to pass datasets with a Big Endian transfer syntax to the `from_dataset` methods of any of the :class:`highdicom.SOPClass` subclasses. + +.. _update-image-library: + +Change in MeasurementReport constructor for TID 1601 enhancement +---------------------------------------------------------------- + +A breaking change was made after highdicom 0.18.4 in the creation of Image +Library TID 1601 objects. +Previously the Imag Library was constructed by explicitly +passing a `pydicom.sequence.Sequence` of `ImageLibraryEntryDescriptors` +objects to the :class:`highdicom.sr.MeasurementReport` constructor in the `image_library_groups` +argument. +Now a `pydicom.sequence.Sequence` of `pydicom.dataset.Dataset` +objects is passed in the `referenced_images` argument and the +ImageLibrary components are created internally by highdicom. +This standardizes the content of the Image Library subcomponents. diff --git a/src/highdicom/sr/content.py b/src/highdicom/sr/content.py index 3327b2df..6fe47f8b 100644 --- a/src/highdicom/sr/content.py +++ b/src/highdicom/sr/content.py @@ -1268,7 +1268,7 @@ def __init__( modifier_item = CodeContentItem( name=CodedConcept( value='106233006', - meaning='Topographical Modifier', + meaning='Topographical modifier', scheme_designator='SCT' ), value=topographical_modifier, diff --git a/src/highdicom/sr/templates.py b/src/highdicom/sr/templates.py index fc4ee697..8583b6e7 100644 --- a/src/highdicom/sr/templates.py +++ b/src/highdicom/sr/templates.py @@ -1,7 +1,8 @@ """DICOM structured reporting templates.""" +import collections import logging from copy import deepcopy -from typing import cast, Iterable, List, Optional, Sequence, Tuple, Union +from typing import cast, Dict, Iterable, List, Optional, Sequence, Tuple, Union from pydicom.dataset import Dataset from pydicom.sr.coding import Code @@ -22,6 +23,7 @@ SourceImageForSegmentation, SourceSeriesForSegmentation ) + from highdicom.sr.enum import ( GraphicTypeValues, GraphicTypeValues3D, @@ -29,7 +31,10 @@ ValueTypeValues, ) from highdicom.uid import UID -from highdicom.sr.utils import find_content_items, get_coded_name +from highdicom.sr.utils import ( + find_content_items, + get_coded_name +) from highdicom.sr.value_types import ( CodeContentItem, ContainerContentItem, @@ -42,7 +47,7 @@ UIDRefContentItem, ) - +from highdicom._module_utils import does_iod_have_pixel_data # Codes missing from pydicom DEFAULT_LANGUAGE = CodedConcept( value='en-US', @@ -489,6 +494,143 @@ def _contains_image_items( return False +def _get_coded_modality(sop_class_uid: str) -> Code: + """Get the coded modality for a SOP Class UID of an Image. + + Parameters + ---------- + sop_class_uid: str + SOP Class UID + + Returns + ------- + pydicom.sr.coding.Code + Coded Acquisition Modality + (see :dcm:`CID 29 Acquisition Modality `) + + Raises + ------ + ValueError + if the SOP Class UID does not identify a SOP Class + for storage of an Image information entity + + """ # noqa: E501 + sopclass_to_modality_map: Dict[str, Code] = { + '1.2.840.10008.5.1.4.1.1.1': codes.cid29.ComputedRadiography, + '1.2.840.10008.5.1.4.1.1.1.1': codes.cid29.DigitalRadiography, + '1.2.840.10008.5.1.4.1.1.1.1.1': codes.cid29.DigitalRadiography, + '1.2.840.10008.5.1.4.1.1.1.2': codes.cid29.Mammography, + '1.2.840.10008.5.1.4.1.1.1.2.1': codes.cid29.Mammography, + '1.2.840.10008.5.1.4.1.1.1.3': codes.cid29.IntraOralRadiography, + '1.2.840.10008.5.1.4.1.1.1.3.1': codes.cid29.IntraOralRadiography, + '1.2.840.10008.5.1.4.1.1.2': codes.cid29.ComputedTomography, + '1.2.840.10008.5.1.4.1.1.2.1': codes.cid29.ComputedTomography, + '1.2.840.10008.5.1.4.1.1.2.2': codes.cid29.ComputedTomography, + '1.2.840.10008.5.1.4.1.1.3.1': codes.cid29.Ultrasound, + '1.2.840.10008.5.1.4.1.1.4': codes.cid29.MagneticResonance, + '1.2.840.10008.5.1.4.1.1.4.1': codes.cid29.MagneticResonance, + '1.2.840.10008.5.1.4.1.1.4.2': codes.cid29.MagneticResonance, + '1.2.840.10008.5.1.4.1.1.4.3': codes.cid29.MagneticResonance, + '1.2.840.10008.5.1.4.1.1.4.4': codes.cid29.MagneticResonance, + '1.2.840.10008.5.1.4.1.1.6.1': codes.cid29.Ultrasound, + '1.2.840.10008.5.1.4.1.1.6.2': codes.cid29.Ultrasound, + '1.2.840.10008.5.1.4.1.1.7': codes.cid32.Other, + '1.2.840.10008.5.1.4.1.1.7.1': codes.cid32.Other, + '1.2.840.10008.5.1.4.1.1.7.2': codes.cid32.Other, + '1.2.840.10008.5.1.4.1.1.7.3': codes.cid32.Other, + '1.2.840.10008.5.1.4.1.1.7.4': codes.cid32.Other, + '1.2.840.10008.5.1.4.1.1.9.1.1': codes.cid29.Electrocardiography, + '1.2.840.10008.5.1.4.1.1.9.1.2': codes.cid29.Electrocardiography, + '1.2.840.10008.5.1.4.1.1.9.1.3': codes.cid29.Electrocardiography, + '1.2.840.10008.5.1.4.1.1.9.2.1': codes.cid29.HemodynamicWaveform, + '1.2.840.10008.5.1.4.1.1.9.3.1': codes.cid29.Electrocardiography, + '1.2.840.10008.5.1.4.1.1.9.4.1': codes.cid32.BasicVoiceAudio, + '1.2.840.10008.5.1.4.1.1.9.5.1': codes.cid29.HemodynamicWaveform, + '1.2.840.10008.5.1.4.1.1.9.6.1': codes.cid29.RespiratoryWaveform, + '1.2.840.10008.5.1.4.1.1.11.1': codes.cid32.PresentationState, + '1.2.840.10008.5.1.4.1.1.11.2': codes.cid32.PresentationState, + '1.2.840.10008.5.1.4.1.1.11.3': codes.cid32.PresentationState, + '1.2.840.10008.5.1.4.1.1.11.4': codes.cid32.PresentationState, + '1.2.840.10008.5.1.4.1.1.12.1': codes.cid29.XRayAngiography, + '1.2.840.10008.5.1.4.1.1.12.1.1': codes.cid29.XRayAngiography, + '1.2.840.10008.5.1.4.1.1.12.2': codes.cid29.Radiofluoroscopy, + '1.2.840.10008.5.1.4.1.1.12.2.1': codes.cid29.Radiofluoroscopy, + '1.2.840.10008.5.1.4.1.1.13.1.1': codes.cid29.XRayAngiography, + '1.2.840.10008.5.1.4.1.1.13.1.2': codes.cid29.DigitalRadiography, + '1.2.840.10008.5.1.4.1.1.13.1.3': codes.cid29.Mammography, + '1.2.840.10008.5.1.4.1.1.14.1': codes.cid29.IntravascularOpticalCoherenceTomography, # noqa E501 + '1.2.840.10008.5.1.4.1.1.14.2': codes.cid29.IntravascularOpticalCoherenceTomography, # noqa E501 + '1.2.840.10008.5.1.4.1.1.20': codes.cid29.NuclearMedicine, + '1.2.840.10008.5.1.4.1.1.66.1': codes.cid32.Registration, + '1.2.840.10008.5.1.4.1.1.66.2': codes.cid32.SpatialFiducials, + '1.2.840.10008.5.1.4.1.1.66.3': codes.cid32.Registration, + '1.2.840.10008.5.1.4.1.1.66.4': codes.cid32.Segmentation, + '1.2.840.10008.5.1.4.1.1.67': codes.cid32.RealWorldValueMap, + '1.2.840.10008.5.1.4.1.1.68.1': codes.cid29.OpticalSurfaceScanner, + '1.2.840.10008.5.1.4.1.1.68.2': codes.cid29.OpticalSurfaceScanner, + '1.2.840.10008.5.1.4.1.1.77.1.1': codes.cid29.Endoscopy, + '1.2.840.10008.5.1.4.1.1.77.1.1.1': codes.cid29.Endoscopy, + '1.2.840.10008.5.1.4.1.1.77.1.2': codes.cid29.GeneralMicroscopy, + '1.2.840.10008.5.1.4.1.1.77.1.2.1': codes.cid29.GeneralMicroscopy, + '1.2.840.10008.5.1.4.1.1.77.1.3': codes.cid29.SlideMicroscopy, + '1.2.840.10008.5.1.4.1.1.77.1.4': codes.cid29.ExternalCameraPhotography, + '1.2.840.10008.5.1.4.1.1.77.1.4.1': codes.cid29.ExternalCameraPhotography, # noqa E501 + '1.2.840.10008.5.1.4.1.1.77.1.5.1': codes.cid29.OphthalmicPhotography, + '1.2.840.10008.5.1.4.1.1.77.1.5.2': codes.cid29.OphthalmicPhotography, + '1.2.840.10008.5.1.4.1.1.77.1.5.3': codes.cid32.StereometricRelationship, # noqa E501 + '1.2.840.10008.5.1.4.1.1.77.1.5.4': codes.cid29.OphthalmicTomography, + '1.2.840.10008.5.1.4.1.1.77.1.6': codes.cid29.SlideMicroscopy, + '1.2.840.10008.5.1.4.1.1.78.1': codes.cid29.Lensometry, + '1.2.840.10008.5.1.4.1.1.78.2': codes.cid29.Autorefraction, + '1.2.840.10008.5.1.4.1.1.78.3': codes.cid29.Keratometry, + '1.2.840.10008.5.1.4.1.1.78.4': codes.cid29.SubjectiveRefraction, + '1.2.840.10008.5.1.4.1.1.78.5': codes.cid29.VisualAcuity, + '1.2.840.10008.5.1.4.1.1.78.7': codes.cid29.OphthalmicAxialMeasurements, + '1.2.840.10008.5.1.4.1.1.78.8': codes.cid32.IntraocularLensCalculation, + '1.2.840.10008.5.1.4.1.1.80.1': codes.cid29.OphthalmicVisualField, + '1.2.840.10008.5.1.4.1.1.81.1': codes.cid29.OphthalmicMapping, + '1.2.840.10008.5.1.4.1.1.82.1': codes.cid29.OphthalmicMapping, + '1.2.840.10008.5.1.4.1.1.88.11': codes.cid32.StructuredReportDocument, + '1.2.840.10008.5.1.4.1.1.88.22': codes.cid32.StructuredReportDocument, + '1.2.840.10008.5.1.4.1.1.88.33': codes.cid32.StructuredReportDocument, + '1.2.840.10008.5.1.4.1.1.88.34': codes.cid32.StructuredReportDocument, + '1.2.840.10008.5.1.4.1.1.88.35': codes.cid32.StructuredReportDocument, + '1.2.840.10008.5.1.4.1.1.88.50': codes.cid32.StructuredReportDocument, + '1.2.840.10008.5.1.4.1.1.88.59': codes.cid32.KeyObjectSelection, + '1.2.840.10008.5.1.4.1.1.88.65': codes.cid32.StructuredReportDocument, + '1.2.840.10008.5.1.4.1.1.88.67': codes.cid32.StructuredReportDocument, + '1.2.840.10008.5.1.4.1.1.88.68': codes.cid32.StructuredReportDocument, + '1.2.840.10008.5.1.4.1.1.88.70': codes.cid32.StructuredReportDocument, + '1.2.840.10008.5.1.4.1.1.88.71': codes.cid32.StructuredReportDocument, + '1.2.840.10008.5.1.4.1.1.88.72': codes.cid32.StructuredReportDocument, + '1.2.840.10008.5.1.4.1.1.88.73': codes.cid32.StructuredReportDocument, + '1.2.840.10008.5.1.4.1.1.88.74': codes.cid32.StructuredReportDocument, + '1.2.840.10008.5.1.4.1.1.88.75': codes.cid32.StructuredReportDocument, + '1.2.840.10008.5.1.4.1.1.88.76': codes.cid32.StructuredReportDocument, + '1.2.840.10008.5.1.4.1.1.90.1': codes.cid32.ContentAssessmentResult, + '1.2.840.10008.5.1.4.1.1.128': codes.cid29.PositronEmissionTomography, + '1.2.840.10008.5.1.4.1.1.130': codes.cid29.PositronEmissionTomography, + '1.2.840.10008.5.1.4.1.1.128.1': codes.cid29.PositronEmissionTomography, + '1.2.840.10008.5.1.4.1.1.200.2': codes.cid32.CTProtocol, + '1.2.840.10008.5.1.4.1.1.481.1': codes.cid29.RTImage, + '1.2.840.10008.5.1.4.1.1.481.2': codes.cid32.RTDose, + '1.2.840.10008.5.1.4.1.1.481.3': codes.cid32.RTStructureSet, + '1.2.840.10008.5.1.4.1.1.481.4': codes.cid32.RTTreatmentRecord, + '1.2.840.10008.5.1.4.1.1.481.5': codes.cid32.RTPlan, + '1.2.840.10008.5.1.4.1.1.481.6': codes.cid32.RTTreatmentRecord, + '1.2.840.10008.5.1.4.1.1.481.7': codes.cid32.RTTreatmentRecord, + '1.2.840.10008.5.1.4.1.1.481.8': codes.cid32.RTPlan, + '1.2.840.10008.5.1.4.1.1.481.9': codes.cid32.RTTreatmentRecord, + } + try: + return sopclass_to_modality_map[sop_class_uid] + except KeyError: + raise ValueError( + 'SOP Class UID does not identify a SOP Class ' + 'for storage of an image information entity.' + ) + + class Template(ContentSequence): """Abstract base class for a DICOM SR template.""" @@ -3644,60 +3786,49 @@ def from_sequence( class ImageLibraryEntryDescriptors(Template): - """`TID 1602 `_ - Image Library Entry Descriptors""" # noqa: E501 + """:dcm:`TID 1602 Image Library Entry Descriptors + ` + """ # noqa: E501 def __init__( self, - modality: Union[Code, CodedConcept], - frame_of_reference_uid: str, - pixel_data_rows: int, - pixel_data_columns: int, + image: Dataset, additional_descriptors: Optional[Sequence[ContentItem]] = None ) -> None: """ + Parameters ---------- - modality: Union[highdicom.sr.CodedConcept, pydicom.sr.coding.Code] - Modality - frame_of_reference_uid: str - Frame of Reference UID - pixel_data_rows: int - Number of rows in pixel data frames - pixel_data_columns: int - Number of rows in pixel data frames + image: pydicom.dataset.Dataset + Metadata of a referenced image instance additional_descriptors: Union[Sequence[highdicom.sr.ContentItem], None], optional - Additional SR Content Items that should be included + Optional additional SR Content Items that should be included + for description of the referenced image """ # noqa: E501 super().__init__() + modality = _get_coded_modality(image.SOPClassUID) + if not does_iod_have_pixel_data(image.SOPClassUID): + raise ValueError( + f'Dataset with SOPInstanceUID {image.SOPInstanceUID}' + 'is not a DICOM image') + modality_item = CodeContentItem( - name=CodedConcept( - value='121139', - meaning='Modality', - scheme_designator='DCM' - ), + name=codes.DCM.Modality, value=modality, relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT ) self.append(modality_item) - frame_of_reference_uid_item = UIDRefContentItem( - name=CodedConcept( - value='112227', - meaning='Frame of Reference UID', - scheme_designator='DCM' - ), - value=frame_of_reference_uid, - relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT - ) - self.append(frame_of_reference_uid_item) + if 'FrameOfReferenceUID' in image: + frame_of_reference_uid_item = UIDRefContentItem( + name=codes.DCM.FrameOfReferenceUID, + value=image.FrameOfReferenceUID, + relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT + ) + self.append(frame_of_reference_uid_item) pixel_data_rows_item = NumContentItem( - name=CodedConcept( - value='110910', - meaning='Pixel Data Rows', - scheme_designator='DCM' - ), - value=pixel_data_rows, + name=codes.DCM.PixelDataRows, + value=image.Rows, relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT, unit=CodedConcept( value='{pixels}', @@ -3707,12 +3838,8 @@ def __init__( ) self.append(pixel_data_rows_item) pixel_data_cols_item = NumContentItem( - name=CodedConcept( - value='110911', - meaning='Pixel Data Columns', - scheme_designator='DCM' - ), - value=pixel_data_columns, + name=codes.DCM.PixelDataColumns, + value=image.Columns, relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT, unit=CodedConcept( value='{pixels}', @@ -3721,6 +3848,16 @@ def __init__( ) ) self.append(pixel_data_cols_item) + + if self._is_cross_sectional(image): + modality_descriptors = \ + self._generate_cross_sectional_descriptors(image) + self.extend(modality_descriptors) + elif self._is_projection_radiography(image): + modality_descriptors = \ + self._generate_projection_radiography_descriptors(image) + self.extend(modality_descriptors) + if additional_descriptors is not None: for item in additional_descriptors: if not isinstance(item, ContentItem): @@ -3732,6 +3869,188 @@ def __init__( item.RelationshipType = relationship_type.value self.append(item) + def _generate_projection_radiography_descriptors( + self, + dataset: Dataset + ) -> Sequence[ContentItem]: + """Generate descriptors for projection radiography modalities. + :dcm:`TID 1603 ` + Image Library Entry Descriptors for Projection Radiography + + Parameters + ---------- + pydicom.Dataset + Metadata of a projection radiology image + + Returns + ------- + Sequence[highdicom.sr.ContentItem] + SR Content Items describing the image + + """ # noqa: E501 + patient_orientation = dataset.PatientOrientation + pixel_spacing = dataset.ImagerPixelSpacing + descriptors = [ + TextContentItem( + name=codes.DCM.PatientOrientationRow, + value=patient_orientation[0], + relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT, + ), + TextContentItem( + name=codes.DCM.PatientOrientationColumn, + value=patient_orientation[1], + relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT, + ), + NumContentItem( + name=codes.DCM.HorizontalPixelSpacing, + value=pixel_spacing[1], + relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT, + unit=codes.UCUM.Millimeter + ), + NumContentItem( + name=codes.DCM.VerticalPixelSpacing, + value=pixel_spacing[0], + relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT, + unit=codes.UCUM.Millimeter + )] + + return descriptors + + def _generate_cross_sectional_descriptors( + self, + dataset: Dataset + ) -> Sequence[ContentItem]: + """Generate descriptors for cross-sectional modalities. + + :dcm:`TID 1604 Image Library Entry Descriptors for Cross-Sectional Modalities ` + + Parameters + ---------- + dataset: pydicom.Dataset + A pydicom Dataset of a cross-sectional image. + + Returns + ------- + Sequence[highdicom.sr.ContentItem] + SR Content Items describing the image. + + """ # noqa: E501 + pixel_spacing = dataset.PixelSpacing + image_orientation = dataset.ImageOrientationPatient + image_position = dataset.ImagePositionPatient + + descriptors = [ + NumContentItem( + name=codes.DCM.HorizontalPixelSpacing, + value=pixel_spacing[1], + relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT, + unit=codes.UCUM.Millimeter + ), + NumContentItem( + name=codes.DCM.VerticalPixelSpacing, + value=pixel_spacing[0], + relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT, + unit=codes.UCUM.Millimeter + ), + NumContentItem( + name=codes.DCM.SpacingBetweenSlices, + value=dataset.SpacingBetweenSlices, + relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT, + unit=codes.UCUM.Millimeter + ), + NumContentItem( + name=codes.DCM.SliceThickness, + value=dataset.SliceThickness, + relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT, + unit=codes.UCUM.Millimeter + ), + NumContentItem( + name=codes.DCM.ImagePositionPatientX, + value=image_position[0], + relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT, + unit=codes.UCUM.Millimeter + ), + NumContentItem( + name=codes.DCM.ImagePositionPatientY, + value=image_position[1], + relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT, + unit=codes.UCUM.Millimeter + ), + NumContentItem( + name=codes.DCM.ImagePositionPatientZ, + value=image_position[2], + relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT, + unit=codes.UCUM.Millimeter + ), + NumContentItem( + name=codes.DCM.ImageOrientationPatientRowX, + value=image_orientation[0], + relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT, + unit=CodedConcept( + value='{-1:1}', + meaning='{-1:1}', + scheme_designator='UCUM' + ) + ), + NumContentItem( + name=codes.DCM.ImageOrientationPatientRowY, + value=image_orientation[1], + relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT, + unit=CodedConcept( + value='{-1:1}', + meaning='{-1:1}', + scheme_designator='UCUM' + ) + ), + NumContentItem( + name=codes.DCM.ImageOrientationPatientRowZ, + value=image_orientation[2], + relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT, + unit=CodedConcept( + value='{-1:1}', + meaning='{-1:1}', + scheme_designator='UCUM' + ) + ), + NumContentItem( + name=codes.DCM.ImageOrientationPatientColumnX, + value=image_orientation[3], + relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT, + unit=CodedConcept( + value='{-1:1}', + meaning='{-1:1}', + scheme_designator='UCUM' + ) + ), + NumContentItem( + name=codes.DCM.ImageOrientationPatientColumnY, + value=image_orientation[4], + relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT, + unit=CodedConcept( + value='{-1:1}', + meaning='{-1:1}', + scheme_designator='UCUM' + ) + ), + NumContentItem( + name=codes.DCM.ImageOrientationPatientColumnZ, + value=image_orientation[5], + relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT, + unit=CodedConcept( + value='{-1:1}', + meaning='{-1:1}', + scheme_designator='UCUM' + ) + ) + ] + return descriptors + + def _is_cross_sectional(self, ds: Dataset) -> bool: + return ds.Modality in ['CT', 'MR', 'PT'] + + def _is_projection_radiography(self, ds: Dataset) -> bool: + return ds.Modality in ['CR', 'DX', 'IO', 'MG', 'PX', 'RF', 'RG', 'XA'] + class MeasurementReport(Template): @@ -3759,8 +4078,8 @@ def __init__( language_of_content_item_and_descendants: Optional[ LanguageOfContentItemAndDescendants ] = None, - image_library_groups: Optional[ - Sequence[ImageLibraryEntryDescriptors] + referenced_images: Optional[ + Sequence[Dataset] ] = None ): """ @@ -3782,8 +4101,8 @@ def __init__( language_of_content_item_and_descendants: Union[highdicom.sr.LanguageOfContentItemAndDescendants, None], optional specification of the language of report content items (defaults to English) - image_library_groups: Union[Sequence[highdicom.sr.ImageLibraryEntry], None], optional - Entry descriptors for each image library group + referenced_images: Union[Sequence[pydicom.Dataset], None], optional + Images that should be included in the library """ # noqa: E501 if title is None: @@ -3808,18 +4127,14 @@ def __init__( procedure_reported = [procedure_reported] for procedure in procedure_reported: procedure_item = CodeContentItem( - name=CodedConcept( - value='121058', - meaning='Procedure reported', - scheme_designator='DCM', - ), + name=codes.DCM.ProcedureReported, value=procedure, relationship_type=RelationshipTypeValues.HAS_CONCEPT_MOD ) item.ContentSequence.append(procedure_item) - - image_library_item = ImageLibrary(image_library_groups) - item.ContentSequence.extend(image_library_item) + if referenced_images: + image_library = ImageLibrary(referenced_images) + item.ContentSequence.extend(image_library) measurements: Union[ MeasurementsAndQualitativeEvaluations, @@ -3833,11 +4148,7 @@ def __init__( MeasurementsAndQualitativeEvaluations, ) container_item = ContainerContentItem( - name=CodedConcept( - value='126010', - meaning='Imaging Measurements', - scheme_designator='DCM' - ), + name=codes.DCM.ImagingMeasurements, relationship_type=RelationshipTypeValues.CONTAINS ) container_item.ContentSequence = ContentSequence() @@ -4625,51 +4936,105 @@ def get_image_measurement_groups( return sequences +class ImageLibraryEntry(Template): + + """:dcm:`TID 1601 Image Library Entry ` + """ # noqa: E501 + + def __init__( + self, + dataset: Dataset, + ) -> None: + """ + Parameters + ---------- + dataset: pydicom.dataset.Dataset + Image to include in image library + + """ + super().__init__() + + library_item_entry = ImageLibraryEntryDescriptors(dataset) + group_item = ContainerContentItem( + name=codes.DCM.ImageLibraryGroup, + relationship_type=RelationshipTypeValues.CONTAINS + ) + + group_item.ContentSequence = library_item_entry + self.append(group_item) + + class ImageLibrary(Template): - """:dcm:`TID 1600 ` Image Library""" + """:dcm:`TID 1600 Image Library ` + """ # noqa: E501 def __init__( self, - groups: Optional[Sequence[ImageLibraryEntryDescriptors]] = None + datasets: Sequence[Dataset] ) -> None: """ Parameters ---------- - groups: Union[Sequence[Sequence[highdicom.sr.ImageLibraryEntryDescriptors]], None], optional - Entry descriptors for each image library group + datasets: Sequence[pydicom.dataset.Dataset] + Image Datasets to include in image library. Non-image + objects will throw an exception. - """ # noqa: E501 + """ super().__init__() library_item = ContainerContentItem( - name=CodedConcept( - value='111028', - meaning='Image Library', - scheme_designator='DCM' - ), + name=codes.DCM.ImageLibrary, relationship_type=RelationshipTypeValues.CONTAINS ) - content = ContentSequence() - if groups is not None: - for descriptor_items in groups: - group_item = ContainerContentItem( + library_item.ContentSequence = ContentSequence() + if datasets is not None: + groups = collections.defaultdict(list) + for ds in datasets: + modality = _get_coded_modality(ds.SOPClassUID) + image_item = ImageContentItem( name=CodedConcept( - value='126200', - meaning='Image Library Group', - scheme_designator='DCM' + value='260753009', + meaning='Source', + scheme_designator='SCT' ), + referenced_sop_instance_uid=ds.SOPInstanceUID, + referenced_sop_class_uid=ds.SOPClassUID, relationship_type=RelationshipTypeValues.CONTAINS ) - group_item.ContentSequence = descriptor_items - # The Image Library Entry template contains the individual - # Image Library Entry Descriptors content items. - if not isinstance(descriptor_items, - ImageLibraryEntryDescriptors): - raise TypeError( - 'Image library group items must have type ' - '"ImageLibraryEntry".' + descriptors = ImageLibraryEntryDescriptors(ds) + + image_item.ContentSequence = ContentSequence() + image_item.ContentSequence.extend(descriptors) + if 'FrameOfReferenceUID' in ds: + # Only type 1 attributes + shared_descriptors = ( + modality, + ds.FrameOfReferenceUID, ) - content.append(group_item) - if len(content) > 0: - library_item.ContentSequence = content + else: + shared_descriptors = ( + modality, + ) + groups[shared_descriptors].append(image_item) + + for shared_descriptors, image_items in groups.items(): + image = image_items[0] + group_item = ContainerContentItem( + name=codes.DCM.ImageLibraryGroup, + relationship_type=RelationshipTypeValues.CONTAINS + ) + group_item.ContentSequence = ContentSequence() + + if 'FrameOfReferenceUID' in image: + group_item.ContentSequence.append( + UIDRefContentItem( + name=codes.DCM.FrameOfReferenceUID, + value=shared_descriptors[1], + relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT # noqa: E501 + ) + ) + group_item.ContentSequence.extend(image_items) + if len(group_item) > 0: + library_item.ContentSequence.append(group_item) + self.append(library_item) diff --git a/tests/test_sr.py b/tests/test_sr.py index a49fcb29..956d36e4 100644 --- a/tests/test_sr.py +++ b/tests/test_sr.py @@ -3075,6 +3075,11 @@ class TestMeasurementReport(unittest.TestCase): def setUp(self): super().setUp() + file_path = Path(__file__) + data_dir = file_path.parent.parent.joinpath('data') + self._ref_dataset = dcmread( + str(data_dir.joinpath('test_files', 'ct_image.dcm')) + ) self._person_observer_name = 'Bar^Foo' self._observer_person_context = ObserverContext( observer_type=codes.cid270.Person, @@ -3182,7 +3187,8 @@ def test_construction_image(self): measurement_report = MeasurementReport( observation_context=self._observation_context, procedure_reported=self._procedure_reported, - imaging_measurements=[self._image_group] + imaging_measurements=[self._image_group], + referenced_images=[self._ref_dataset] ) item = measurement_report[0] assert len(item.ContentSequence) == 13 @@ -3281,7 +3287,7 @@ def test_construction_planar(self): imaging_measurements=[self._roi_group] ) item = measurement_report[0] - assert len(item.ContentSequence) == 13 + assert len(item.ContentSequence) == 12 template_item = item.ContentTemplateSequence[0] assert template_item.TemplateIdentifier == '1500' @@ -3303,10 +3309,8 @@ def test_construction_planar(self): (9, '111700'), # Procedure reported (10, '121058'), - # Image library - (11, '111028'), # Imaging measurements - (12, '126010'), + (11, '126010'), ] for index, value in content_item_expectations: content_item = item.ContentSequence[index] @@ -3359,7 +3363,7 @@ def test_construction_volumetric(self): imaging_measurements=[self._roi_group_3d] ) item = measurement_report[0] - assert len(item.ContentSequence) == 13 + assert len(item.ContentSequence) == 12 template_item = item.ContentTemplateSequence[0] assert template_item.TemplateIdentifier == '1500' @@ -3369,22 +3373,26 @@ def test_construction_volumetric(self): (0, '121049'), # Observer context - Person (1, '121005'), + # Observer Name - Person (2, '121008'), # Observer context - Device (3, '121005'), + # Observer UID - Device (4, '121012'), # Subject context - Specimen (5, '121024'), + # UID, Specimen (6, '121039'), + # Identifier, Specimen (7, '121041'), + # Type, Specimen (8, '371439000'), + # Container Identifier, Specimen (9, '111700'), # Procedure reported (10, '121058'), - # Image library - (11, '111028'), # Imaging measurements - (12, '126010'), + (11, '126010'), ] for index, value in content_item_expectations: content_item = item.ContentSequence[index] @@ -4905,12 +4913,110 @@ class TestImageLibraryEntryDescriptors(unittest.TestCase): def setUp(self): super().setUp() + file_path = Path(__file__) + data_dir = file_path.parent.parent.joinpath('data') + self._ref_ct_dataset = dcmread( + str(data_dir.joinpath('test_files', 'ct_image.dcm')) + ) + self._ref_sm_dataset = dcmread( + str(data_dir.joinpath('test_files', 'sm_image.dcm')) + ) + self._ref_dx_dataset = dcmread( + str(data_dir.joinpath('test_files', 'dx_image.dcm')) + ) - def test_construction(self): - modality = codes.cid29.SlideMicroscopy - frame_of_reference_uid = '1.2.3' - pixel_data_rows = 10 - pixel_data_columns = 20 + def test_ct_construction(self): + group = ImageLibraryEntryDescriptors( + image=self._ref_ct_dataset, + ) + assert len(group) == 17 + assert isinstance(group[0], CodeContentItem) + assert group[0].name == codes.DCM.Modality + assert group[0].value == codes.cid29.ComputedTomography + assert isinstance(group[1], UIDRefContentItem) + assert group[1].name == codes.DCM.FrameOfReferenceUID + assert group[1].value == self._ref_ct_dataset.FrameOfReferenceUID + assert isinstance(group[2], NumContentItem) + assert group[2].name == codes.DCM.PixelDataRows + assert group[2].value == self._ref_ct_dataset.Rows + assert isinstance(group[3], NumContentItem) + assert group[3].name == codes.DCM.PixelDataColumns + assert group[3].value == self._ref_ct_dataset.Columns + assert isinstance(group[4], NumContentItem) + assert group[4].name == codes.DCM.HorizontalPixelSpacing + assert group[4].value == self._ref_ct_dataset.PixelSpacing[0] + value_item = group[4].MeasuredValueSequence[0] + unit_code_item = value_item.MeasurementUnitsCodeSequence[0] + assert unit_code_item.CodeValue == 'mm' + assert unit_code_item.CodeMeaning == 'millimeter' + assert unit_code_item.CodingSchemeDesignator == 'UCUM' + assert isinstance(group[5], NumContentItem) + assert group[5].name == codes.DCM.VerticalPixelSpacing + assert group[5].value == self._ref_ct_dataset.PixelSpacing[1] + assert isinstance(group[6], NumContentItem) + assert group[6].name == codes.DCM.SpacingBetweenSlices + assert group[6].value == self._ref_ct_dataset.SpacingBetweenSlices + value_item = group[6].MeasuredValueSequence[0] + unit_code_item = value_item.MeasurementUnitsCodeSequence[0] + assert unit_code_item.CodeValue == 'mm' + assert unit_code_item.CodeMeaning == 'millimeter' + assert unit_code_item.CodingSchemeDesignator == 'UCUM' + assert isinstance(group[7], NumContentItem) + assert group[7].name == codes.DCM.SliceThickness + assert group[7].value == self._ref_ct_dataset.SliceThickness + assert isinstance(group[8], NumContentItem) + assert group[8].name == codes.DCM.ImagePositionPatientX + assert group[8].value == self._ref_ct_dataset.ImagePositionPatient[0] + assert isinstance(group[9], NumContentItem) + assert group[9].name == codes.DCM.ImagePositionPatientY + assert group[9].value == self._ref_ct_dataset.ImagePositionPatient[1] + assert isinstance(group[10], NumContentItem) + assert group[10].name == codes.DCM.ImagePositionPatientZ + assert group[10].value == self._ref_ct_dataset.ImagePositionPatient[2] + assert isinstance(group[11], NumContentItem) + assert group[11].name == codes.DCM.ImageOrientationPatientRowX + assert group[11].value == \ + self._ref_ct_dataset.ImageOrientationPatient[0] + value_item = group[11].MeasuredValueSequence[0] + unit_code_item = value_item.MeasurementUnitsCodeSequence[0] + assert unit_code_item.CodeValue == '{-1:1}' + assert unit_code_item.CodeMeaning == '{-1:1}' + assert unit_code_item.CodingSchemeDesignator == 'UCUM' + assert isinstance(group[12], NumContentItem) + assert group[12].name == codes.DCM.ImageOrientationPatientRowY + assert group[12].value == \ + self._ref_ct_dataset.ImageOrientationPatient[1] + assert isinstance(group[13], NumContentItem) + assert group[13].name == codes.DCM.ImageOrientationPatientRowZ + assert group[13].value == \ + self._ref_ct_dataset.ImageOrientationPatient[2] + assert isinstance(group[14], NumContentItem) + assert group[14].name == codes.DCM.ImageOrientationPatientColumnX + assert group[14].value == \ + self._ref_ct_dataset.ImageOrientationPatient[3] + assert isinstance(group[15], NumContentItem) + assert group[15].name == codes.DCM.ImageOrientationPatientColumnY + assert group[15].value == \ + self._ref_ct_dataset.ImageOrientationPatient[4] + assert isinstance(group[16], NumContentItem) + assert group[16].name == codes.DCM.ImageOrientationPatientColumnZ + assert group[16].value == \ + self._ref_ct_dataset.ImageOrientationPatient[5] + value_item = group[16].MeasuredValueSequence[0] + unit_code_item = value_item.MeasurementUnitsCodeSequence[0] + assert unit_code_item.CodeValue == '{-1:1}' + assert unit_code_item.CodeMeaning == '{-1:1}' + assert unit_code_item.CodingSchemeDesignator == 'UCUM' + + def test_bad_ct_construction(self): + # Test failure of ImageLibraryDescriptors with 'bad' image. + del self._ref_ct_dataset.Rows + with pytest.raises(AttributeError): + ImageLibraryEntryDescriptors( + image=self._ref_ct_dataset, + ) + + def test_sm_construction(self): content_date = datetime.now().date() content_time = datetime.now().time() content_date_item = DateContentItem( @@ -4924,25 +5030,22 @@ def test_construction(self): relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT ) group = ImageLibraryEntryDescriptors( - modality=modality, - frame_of_reference_uid=frame_of_reference_uid, - pixel_data_rows=pixel_data_rows, - pixel_data_columns=pixel_data_columns, + image=self._ref_sm_dataset, additional_descriptors=[content_date_item, content_time_item] ) assert len(group) == 6 assert isinstance(group[0], CodeContentItem) assert group[0].name == codes.DCM.Modality - assert group[0].value == modality + assert group[0].value == codes.cid29.SlideMicroscopy assert isinstance(group[1], UIDRefContentItem) assert group[1].name == codes.DCM.FrameOfReferenceUID - assert group[1].value == frame_of_reference_uid + assert group[1].value == self._ref_sm_dataset.FrameOfReferenceUID assert isinstance(group[2], NumContentItem) assert group[2].name == codes.DCM.PixelDataRows - assert group[2].value == pixel_data_rows + assert group[2].value == self._ref_sm_dataset.Rows assert isinstance(group[3], NumContentItem) assert group[3].name == codes.DCM.PixelDataColumns - assert group[3].value == pixel_data_columns + assert group[3].value == self._ref_sm_dataset.Columns assert isinstance(group[4], DateContentItem) assert group[4].name == codes.DCM.ContentDate assert group[4].value == content_date @@ -4950,6 +5053,55 @@ def test_construction(self): assert group[5].name == codes.DCM.ContentTime assert group[5].value == content_time + def test_dx_construction(self): + content_date = datetime.now().date() + content_time = datetime.now().time() + imager_pixel_spacing = self._ref_dx_dataset.ImagerPixelSpacing + patient_orientation = self._ref_dx_dataset.PatientOrientation + + content_date_item = DateContentItem( + name=codes.DCM.ContentDate, + value=content_date, + relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT + ) + content_time_item = TimeContentItem( + name=codes.DCM.ContentTime, + value=content_time, + relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT + ) + group = ImageLibraryEntryDescriptors( + image=self._ref_dx_dataset, + additional_descriptors=[content_date_item, content_time_item] + ) + assert len(group) == 9 + assert isinstance(group[0], CodeContentItem) + assert group[0].name == codes.DCM.Modality + assert group[0].value == codes.cid29.DigitalRadiography + assert isinstance(group[1], NumContentItem) + assert group[1].name == codes.DCM.PixelDataRows + assert group[1].value == self._ref_dx_dataset.Rows + assert isinstance(group[2], NumContentItem) + assert group[2].name == codes.DCM.PixelDataColumns + assert group[2].value == self._ref_dx_dataset.Columns + assert isinstance(group[3], TextContentItem) + assert group[3].name == codes.DCM.PatientOrientationRow + assert group[3].value == patient_orientation[0] + assert isinstance(group[4], TextContentItem) + assert group[4].name == codes.DCM.PatientOrientationColumn + assert group[4].value == patient_orientation[1] + assert isinstance(group[5], NumContentItem) + assert group[5].name == codes.DCM.HorizontalPixelSpacing + assert group[5].value == imager_pixel_spacing[1] + assert isinstance(group[6], NumContentItem) + assert group[6].name == codes.DCM.VerticalPixelSpacing + assert group[6].value == imager_pixel_spacing[0] + assert isinstance(group[7], DateContentItem) + assert group[7].name == codes.DCM.ContentDate + assert group[7].value == content_date + assert isinstance(group[8], TimeContentItem) + assert group[8].name == codes.DCM.ContentTime + assert group[8].value == content_time + class TestImageLibrary(unittest.TestCase): @@ -4957,18 +5109,24 @@ def setUp(self): super().setUp() def test_construction(self): - modality = codes.cid29.SlideMicroscopy - frame_of_reference_uid = '1.2.3' - pixel_data_rows = 10 - pixel_data_columns = 20 - descriptor_items = ImageLibraryEntryDescriptors( - modality=modality, - frame_of_reference_uid=frame_of_reference_uid, - pixel_data_rows=pixel_data_rows, - pixel_data_columns=pixel_data_columns, + file_path = Path(__file__) + data_dir = file_path.parent.parent.joinpath('data') + self._ref_sm_dataset = dcmread( + str(data_dir.joinpath('test_files', 'sm_image.dcm')) ) - library_items = ImageLibrary(groups=[descriptor_items]) + + library_items = ImageLibrary([self._ref_sm_dataset]) assert len(library_items) == 1 library_group_item = library_items[0].ContentSequence[0] - assert len(library_group_item.ContentSequence) == len(descriptor_items) + assert len(library_group_item.ContentSequence) == 1 assert library_group_item.name == codes.DCM.ImageLibraryGroup + content_item = library_group_item.ContentSequence[0] + assert isinstance(content_item, ImageContentItem) + ref_sop_instance_uid = \ + content_item.ReferencedSOPSequence[0].ReferencedSOPInstanceUID + ref_sop_class_uid = \ + content_item.ReferencedSOPSequence[0].ReferencedSOPClassUID + assert ref_sop_instance_uid == \ + self._ref_sm_dataset.SOPInstanceUID + assert ref_sop_class_uid == \ + self._ref_sm_dataset.SOPClassUID From d03f4c867013ca1bc462f15efb1b3c8dd1794f5f Mon Sep 17 00:00:00 2001 From: hackermd Date: Fri, 19 Aug 2022 18:06:20 -0400 Subject: [PATCH 09/26] Increase package version --- src/highdicom/version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/highdicom/version.py b/src/highdicom/version.py index 949f02c2..482e4a19 100644 --- a/src/highdicom/version.py +++ b/src/highdicom/version.py @@ -1 +1 @@ -__version__ = '0.18.4' +__version__ = '0.19.0' From 6d80f5ade2936ab3d87959bddcea7d8548696807 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Mon, 10 Oct 2022 14:28:05 -0400 Subject: [PATCH 10/26] Move pylibjpeg-libjpeg to optional dependency --- setup.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 24323590..457858d3 100644 --- a/setup.py +++ b/setup.py @@ -56,7 +56,9 @@ def get_version(): 'pillow>=8.3', 'pillow-jpls>=1.0', 'pylibjpeg>=1.4', - 'pylibjpeg-libjpeg>=1.3', 'pylibjpeg-openjpeg>=1.2', ], + extras_requires={ + 'libjpeg': ['pylibjpeg-libjpeg>=1.3'], + }, ) From 3a04c754b83612f3917b80a791f3ad2b0e4e440d Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Mon, 10 Oct 2022 16:45:49 -0400 Subject: [PATCH 11/26] Add skips for tests that need libjpeg, restructure segmentation tests --- src/highdicom/frame.py | 13 +- tests/test_frame.py | 2 + tests/test_pm.py | 1 + tests/test_sc.py | 2 + tests/test_seg.py | 653 +++++++++++++++++++++-------------------- 5 files changed, 358 insertions(+), 313 deletions(-) diff --git a/src/highdicom/frame.py b/src/highdicom/frame.py index 6055d38d..aab3e6a0 100644 --- a/src/highdicom/frame.py +++ b/src/highdicom/frame.py @@ -258,7 +258,18 @@ def encode_frame( ) elif transfer_syntax_uid == JPEGLSLossless: - import pillow_jpls # noqa + try: + import pillow_jpls # noqa + except ImportError as e: + raise ImportError( + 'In order to encode images using the JPEGLSLossless ' + 'transfer syntax, highdicom requires the ' + '"pylibjpeg-libjpeg" package to be installed. This package ' + 'is not installed by default as it uses a copyleft GPL v3 ' + 'license. To accept the terms of the GPL v3 license and ' + 'install highdicom with the "pylibjpeg-libjpeg" package, ' + 'use "pip install highdicom[libjpeg]".' + ) from e if samples_per_pixel == 1: if planar_configuration is not None: raise ValueError( diff --git a/tests/test_frame.py b/tests/test_frame.py index c10b1417..2e4d424d 100644 --- a/tests/test_frame.py +++ b/tests/test_frame.py @@ -215,6 +215,7 @@ def test_jpeg2000_monochrome(self): np.testing.assert_array_equal(frame, decoded_frame) def test_jpegls_rgb(self): + pytest.importorskip("libjpeg") bits_allocated = 8 frame = np.ones((16, 32, 3), dtype=np.dtype(f'uint{bits_allocated}')) frame *= 255 @@ -244,6 +245,7 @@ def test_jpegls_rgb(self): np.testing.assert_array_equal(frame, decoded_frame) def test_jpegls_monochrome(self): + pytest.importorskip("libjpeg") bits_allocated = 16 frame = np.zeros((16, 32), dtype=np.dtype(f'uint{bits_allocated}')) compressed_frame = encode_frame( diff --git a/tests/test_pm.py b/tests/test_pm.py index f4b504ef..aa297da4 100644 --- a/tests/test_pm.py +++ b/tests/test_pm.py @@ -584,6 +584,7 @@ def test_multi_frame_sm_image_ushort_encapsulated_jpeg2000(self): assert np.array_equal(pmap.pixel_array, pixel_array) def test_multi_frame_sm_image_ushort_encapsulated_jpegls(self): + pytest.importorskip("libjpeg") pixel_array = np.random.randint( low=0, high=2**8, diff --git a/tests/test_sc.py b/tests/test_sc.py index ab3eab49..72e6636e 100644 --- a/tests/test_sc.py +++ b/tests/test_sc.py @@ -428,6 +428,7 @@ def test_rgb_jpeg2000(self): ) def test_monochrome_jpegls(self): + pytest.importorskip("libjpeg") bits_allocated = 16 photometric_interpretation = 'MONOCHROME2' coordinate_system = 'PATIENT' @@ -455,6 +456,7 @@ def test_monochrome_jpegls(self): ) def test_rgb_jpegls(self): + pytest.importorskip("libjpeg") bits_allocated = 8 photometric_interpretation = 'YBR_FULL' coordinate_system = 'PATIENT' diff --git a/tests/test_seg.py b/tests/test_seg.py index c7547ba8..b00d711a 100644 --- a/tests/test_seg.py +++ b/tests/test_seg.py @@ -554,10 +554,10 @@ def test_construction_2(self): assert seq[5].FunctionalGroupPointer == 0x0048021A -class TestSegmentation(unittest.TestCase): +class TestSegmentation: + @pytest.fixture(autouse=True) def setUp(self): - super().setUp() file_path = Path(__file__) data_dir = file_path.parent.parent.joinpath('data') self._segmented_property_category = \ @@ -683,6 +683,47 @@ def setUp(self): ) self._ct_multiframe_mask_array[:, 100:200, 200:400] = True + self._tests = { + 'ct-image': ([self._ct_image], self._ct_pixel_array), + 'sm-image': ([self._sm_image], self._sm_pixel_array), + 'ct-series': (self._ct_series, self._ct_series_mask_array), + 'ct-multiframe': ( + [self._ct_multiframe], self._ct_multiframe_mask_array + ), + } + + # Fixtures to use to parametrize segmentation creation + # Using this fixture mechanism, we can parametrize class methods + @staticmethod + @pytest.fixture(params=[ExplicitVRLittleEndian, ImplicitVRLittleEndian]) + def binary_transfer_syntax_uid(request): + return request.param + + @staticmethod + @pytest.fixture( + params=[ + ExplicitVRLittleEndian, + ImplicitVRLittleEndian, + RLELossless, + JPEG2000Lossless, + JPEGLSLossless, + ] + ) + def fractional_transfer_syntax_uid(request): + return request.param + + @staticmethod + @pytest.fixture(params=[np.bool_, np.uint8, np.uint16, np.float_]) + def pix_type(request): + return request.param + + @staticmethod + @pytest.fixture( + params=['ct-image', 'sm-image', 'ct-series', 'ct-multiframe'], + ) + def test_data(request): + return request.param + @staticmethod def sort_frames(sources, mask): src = sources[0] @@ -1365,325 +1406,313 @@ def test_construction_7(self): assert SegmentsOverlapValues[instance.SegmentsOverlap] == \ SegmentsOverlapValues.NO - def test_pixel_types(self): - # A series of tests on different types of image - tests = [ - ([self._ct_image], self._ct_pixel_array), - ([self._sm_image], self._sm_pixel_array), - (self._ct_series, self._ct_series_mask_array), - ([self._ct_multiframe], self._ct_multiframe_mask_array), - ] + def test_pixel_types_fractional( + self, + fractional_transfer_syntax_uid, + pix_type, + test_data, + ): + if fractional_transfer_syntax_uid == JPEGLSLossless: + pytest.importorskip("libjpeg") + + sources, mask = self._tests[test_data] + + # Two segments, overlapping + multi_segment_overlap = np.stack([mask, mask], axis=-1) + if multi_segment_overlap.ndim == 3: + multi_segment_overlap = multi_segment_overlap[np.newaxis, ...] + + # Two segments non-overlapping + multi_segment_exc = np.stack([mask, 1 - mask], axis=-1) + if multi_segment_exc.ndim == 3: + multi_segment_exc = multi_segment_exc[np.newaxis, ...] + additional_mask = 1 - mask + + # Find the expected encodings for the masks + if mask.ndim > 2: + # Expected encoding of the mask + expected_encoding = self.sort_frames( + sources, + mask + ) + expected_encoding = self.remove_empty_frames( + expected_encoding + ) - for sources, mask in tests: - - # Two segments, overlapping - multi_segment_overlap = np.stack([mask, mask], axis=-1) - if multi_segment_overlap.ndim == 3: - multi_segment_overlap = multi_segment_overlap[np.newaxis, ...] - - # Two segments non-overlapping - multi_segment_exc = np.stack([mask, 1 - mask], axis=-1) - if multi_segment_exc.ndim == 3: - multi_segment_exc = multi_segment_exc[np.newaxis, ...] - additional_mask = 1 - mask - - # Find the expected encodings for the masks - if mask.ndim > 2: - # Expected encoding of the mask - expected_encoding = self.sort_frames( - sources, - mask - ) - expected_encoding = self.remove_empty_frames( - expected_encoding - ) + # Expected encoding of the complement + expected_encoding_comp = self.sort_frames( + sources, + additional_mask + ) + expected_encoding_comp = self.remove_empty_frames( + expected_encoding_comp + ) - # Expected encoding of the complement - expected_encoding_comp = self.sort_frames( - sources, - additional_mask - ) - expected_encoding_comp = self.remove_empty_frames( - expected_encoding_comp - ) + # Expected encoding of the multi segment arrays + expected_enc_overlap = np.concatenate( + [expected_encoding, expected_encoding], + axis=0 + ) + expected_enc_exc = np.concatenate( + [expected_encoding, expected_encoding_comp], + axis=0 + ) + expected_encoding = expected_encoding.squeeze() + else: + expected_encoding = mask - # Expected encoding of the multi segment arrays - expected_enc_overlap = np.concatenate( - [expected_encoding, expected_encoding], - axis=0 - ) - expected_enc_exc = np.concatenate( - [expected_encoding, expected_encoding_comp], - axis=0 - ) - expected_encoding = expected_encoding.squeeze() - else: - expected_encoding = mask + # Expected encoding of the multi segment arrays + expected_enc_overlap = np.stack( + [expected_encoding, expected_encoding], + axis=0 + ) + expected_enc_exc = np.stack( + [expected_encoding, 1 - expected_encoding], + axis=0 + ) - # Expected encoding of the multi segment arrays - expected_enc_overlap = np.stack( - [expected_encoding, expected_encoding], - axis=0 - ) - expected_enc_exc = np.stack( - [expected_encoding, 1 - expected_encoding], - axis=0 - ) + max_fractional_value = 255 + instance = Segmentation( + sources, + mask.astype(pix_type), + SegmentationTypeValues.FRACTIONAL.value, + self._segment_descriptions, + self._series_instance_uid, + self._series_number, + self._sop_instance_uid, + self._instance_number, + self._manufacturer, + self._manufacturer_model_name, + self._software_versions, + self._device_serial_number, + max_fractional_value=max_fractional_value, + transfer_syntax_uid=fractional_transfer_syntax_uid + ) - # Test instance creation for different pixel types and transfer - # syntaxes - valid_transfer_syntaxes = [ - ExplicitVRLittleEndian, - ImplicitVRLittleEndian, - RLELossless, - JPEG2000Lossless, - JPEGLSLossless, - ] - - max_fractional_value = 255 - for transfer_syntax_uid in valid_transfer_syntaxes: - for pix_type in [np.bool_, np.uint8, np.uint16, np.float_]: - instance = Segmentation( - sources, - mask.astype(pix_type), - SegmentationTypeValues.FRACTIONAL.value, - self._segment_descriptions, - self._series_instance_uid, - self._series_number, - self._sop_instance_uid, - self._instance_number, - self._manufacturer, - self._manufacturer_model_name, - self._software_versions, - self._device_serial_number, - max_fractional_value=max_fractional_value, - transfer_syntax_uid=transfer_syntax_uid - ) - - # Ensure the recovered pixel array matches what is expected - if pix_type in (np.bool_, np.float_): - assert np.array_equal( - self.get_array_after_writing(instance), - expected_encoding * max_fractional_value - ), f'{sources[0].Modality} {transfer_syntax_uid}' - else: - assert np.array_equal( - self.get_array_after_writing(instance), - expected_encoding - ), f'{sources[0].Modality} {transfer_syntax_uid}' - self.check_dimension_index_vals(instance) - - # Multi-segment (exclusive) - instance = Segmentation( - sources, - multi_segment_exc.astype(pix_type), - SegmentationTypeValues.FRACTIONAL.value, - self._both_segment_descriptions, - self._series_instance_uid, - self._series_number, - self._sop_instance_uid, - self._instance_number, - self._manufacturer, - self._manufacturer_model_name, - self._software_versions, - self._device_serial_number, - max_fractional_value=1, - transfer_syntax_uid=transfer_syntax_uid - ) - if pix_type == np.float_: - assert ( - instance.SegmentsOverlap == - SegmentsOverlapValues.UNDEFINED.value - ) - else: - assert ( - instance.SegmentsOverlap == - SegmentsOverlapValues.NO.value - ) - - assert np.array_equal( - self.get_array_after_writing(instance), - expected_enc_exc - ), f'{sources[0].Modality} {transfer_syntax_uid}' - self.check_dimension_index_vals(instance) - - # Multi-segment (overlapping) - instance = Segmentation( - sources, - multi_segment_overlap.astype(pix_type), - SegmentationTypeValues.FRACTIONAL.value, - self._both_segment_descriptions, - self._series_instance_uid, - self._series_number, - self._sop_instance_uid, - self._instance_number, - self._manufacturer, - self._manufacturer_model_name, - self._software_versions, - self._device_serial_number, - max_fractional_value=1, - transfer_syntax_uid=transfer_syntax_uid - ) - if pix_type == np.float_: - assert ( - instance.SegmentsOverlap == - SegmentsOverlapValues.UNDEFINED.value - ) - else: - assert ( - instance.SegmentsOverlap == - SegmentsOverlapValues.YES.value - ) - - assert np.array_equal( - self.get_array_after_writing(instance), - expected_enc_overlap - ), f'{sources[0].Modality} {transfer_syntax_uid}' - self.check_dimension_index_vals(instance) - - for sources, mask in tests: - # Two segments, overlapping - multi_segment_overlap = np.stack([mask, mask], axis=-1) - if multi_segment_overlap.ndim == 3: - multi_segment_overlap = multi_segment_overlap[np.newaxis, ...] - - # Two segments non-overlapping - multi_segment_exc = np.stack([mask, 1 - mask], axis=-1) - - if multi_segment_exc.ndim == 3: - multi_segment_exc = multi_segment_exc[np.newaxis, ...] - additional_mask = 1 - mask - - additional_mask = (1 - mask) - # Find the expected encodings for the masks - if mask.ndim > 2: - # Expected encoding of the mask - expected_encoding = self.sort_frames( - sources, - mask - ) - expected_encoding = self.remove_empty_frames( - expected_encoding - ) + # Ensure the recovered pixel array matches what is expected + if pix_type in (np.bool_, np.float_): + assert np.array_equal( + self.get_array_after_writing(instance), + expected_encoding * max_fractional_value + ), f'{sources[0].Modality} {fractional_transfer_syntax_uid}' + else: + assert np.array_equal( + self.get_array_after_writing(instance), + expected_encoding + ), f'{sources[0].Modality} {fractional_transfer_syntax_uid}' + self.check_dimension_index_vals(instance) - # Expected encoding of the complement - expected_encoding_comp = self.sort_frames( - sources, - additional_mask - ) - expected_encoding_comp = self.remove_empty_frames( - expected_encoding_comp - ) + # Multi-segment (exclusive) + instance = Segmentation( + sources, + multi_segment_exc.astype(pix_type), + SegmentationTypeValues.FRACTIONAL.value, + self._both_segment_descriptions, + self._series_instance_uid, + self._series_number, + self._sop_instance_uid, + self._instance_number, + self._manufacturer, + self._manufacturer_model_name, + self._software_versions, + self._device_serial_number, + max_fractional_value=1, + transfer_syntax_uid=fractional_transfer_syntax_uid + ) + if pix_type == np.float_: + assert ( + instance.SegmentsOverlap == + SegmentsOverlapValues.UNDEFINED.value + ) + else: + assert ( + instance.SegmentsOverlap == + SegmentsOverlapValues.NO.value + ) - # Expected encoding of the multi segment arrays - expected_enc_overlap = np.concatenate( - [expected_encoding, expected_encoding], - axis=0 - ) - expected_enc_exc = np.concatenate( - [expected_encoding, expected_encoding_comp], - axis=0 - ) - expected_encoding = expected_encoding.squeeze() - else: - expected_encoding = mask + assert np.array_equal( + self.get_array_after_writing(instance), + expected_enc_exc + ), f'{sources[0].Modality} {fractional_transfer_syntax_uid}' + self.check_dimension_index_vals(instance) - # Expected encoding of the multi segment arrays - expected_enc_overlap = np.stack( - [expected_encoding, expected_encoding], - axis=0 - ) - expected_enc_exc = np.stack( - [expected_encoding, 1 - expected_encoding], - axis=0 - ) + # Multi-segment (overlapping) + instance = Segmentation( + sources, + multi_segment_overlap.astype(pix_type), + SegmentationTypeValues.FRACTIONAL.value, + self._both_segment_descriptions, + self._series_instance_uid, + self._series_number, + self._sop_instance_uid, + self._instance_number, + self._manufacturer, + self._manufacturer_model_name, + self._software_versions, + self._device_serial_number, + max_fractional_value=1, + transfer_syntax_uid=fractional_transfer_syntax_uid + ) + if pix_type == np.float_: + assert ( + instance.SegmentsOverlap == + SegmentsOverlapValues.UNDEFINED.value + ) + else: + assert ( + instance.SegmentsOverlap == + SegmentsOverlapValues.YES.value + ) + + assert np.array_equal( + self.get_array_after_writing(instance), + expected_enc_overlap + ), f'{sources[0].Modality} {fractional_transfer_syntax_uid}' + self.check_dimension_index_vals(instance) + + def test_pixel_types_binary( + self, + binary_transfer_syntax_uid, + pix_type, + test_data, + ): + sources, mask = self._tests[test_data] + + # Two segments, overlapping + multi_segment_overlap = np.stack([mask, mask], axis=-1) + if multi_segment_overlap.ndim == 3: + multi_segment_overlap = multi_segment_overlap[np.newaxis, ...] + + # Two segments non-overlapping + multi_segment_exc = np.stack([mask, 1 - mask], axis=-1) + + if multi_segment_exc.ndim == 3: + multi_segment_exc = multi_segment_exc[np.newaxis, ...] + additional_mask = 1 - mask + + additional_mask = (1 - mask) + # Find the expected encodings for the masks + if mask.ndim > 2: + # Expected encoding of the mask + expected_encoding = self.sort_frames( + sources, + mask + ) + expected_encoding = self.remove_empty_frames( + expected_encoding + ) + + # Expected encoding of the complement + expected_encoding_comp = self.sort_frames( + sources, + additional_mask + ) + expected_encoding_comp = self.remove_empty_frames( + expected_encoding_comp + ) + + # Expected encoding of the multi segment arrays + expected_enc_overlap = np.concatenate( + [expected_encoding, expected_encoding], + axis=0 + ) + expected_enc_exc = np.concatenate( + [expected_encoding, expected_encoding_comp], + axis=0 + ) + expected_encoding = expected_encoding.squeeze() + else: + expected_encoding = mask + + # Expected encoding of the multi segment arrays + expected_enc_overlap = np.stack( + [expected_encoding, expected_encoding], + axis=0 + ) + expected_enc_exc = np.stack( + [expected_encoding, 1 - expected_encoding], + axis=0 + ) + + instance = Segmentation( + sources, + mask.astype(pix_type), + SegmentationTypeValues.BINARY.value, + self._segment_descriptions, + self._series_instance_uid, + self._series_number, + self._sop_instance_uid, + self._instance_number, + self._manufacturer, + self._manufacturer_model_name, + self._software_versions, + self._device_serial_number, + max_fractional_value=1, + transfer_syntax_uid=binary_transfer_syntax_uid + ) + + # Ensure the recovered pixel array matches what is expected + assert np.array_equal( + self.get_array_after_writing(instance), + expected_encoding + ), f'{sources[0].Modality} {binary_transfer_syntax_uid}' + self.check_dimension_index_vals(instance) - valid_transfer_syntaxes = [ - ExplicitVRLittleEndian, - ImplicitVRLittleEndian, - ] - - for transfer_syntax_uid in valid_transfer_syntaxes: - for pix_type in [np.bool_, np.uint8, np.uint16, np.float_]: - instance = Segmentation( - sources, - mask.astype(pix_type), - SegmentationTypeValues.BINARY.value, - self._segment_descriptions, - self._series_instance_uid, - self._series_number, - self._sop_instance_uid, - self._instance_number, - self._manufacturer, - self._manufacturer_model_name, - self._software_versions, - self._device_serial_number, - max_fractional_value=1, - transfer_syntax_uid=transfer_syntax_uid - ) - - # Ensure the recovered pixel array matches what is expected - assert np.array_equal( - self.get_array_after_writing(instance), - expected_encoding - ), f'{sources[0].Modality} {transfer_syntax_uid}' - self.check_dimension_index_vals(instance) - - # Multi-segment (exclusive) - instance = Segmentation( - sources, - multi_segment_exc.astype(pix_type), - SegmentationTypeValues.BINARY.value, - self._both_segment_descriptions, - self._series_instance_uid, - self._series_number, - self._sop_instance_uid, - self._instance_number, - self._manufacturer, - self._manufacturer_model_name, - self._software_versions, - self._device_serial_number, - max_fractional_value=1, - transfer_syntax_uid=transfer_syntax_uid - ) - assert ( - instance.SegmentsOverlap == - SegmentsOverlapValues.NO.value - ) - - assert np.array_equal( - self.get_array_after_writing(instance), - expected_enc_exc - ), f'{sources[0].Modality} {transfer_syntax_uid}' - self.check_dimension_index_vals(instance) - - # Multi-segment (overlapping) - instance = Segmentation( - sources, - multi_segment_overlap.astype(pix_type), - SegmentationTypeValues.BINARY.value, - self._both_segment_descriptions, - self._series_instance_uid, - self._series_number, - self._sop_instance_uid, - self._instance_number, - self._manufacturer, - self._manufacturer_model_name, - self._software_versions, - self._device_serial_number, - max_fractional_value=1, - transfer_syntax_uid=transfer_syntax_uid - ) - assert ( - instance.SegmentsOverlap == - SegmentsOverlapValues.YES.value - ) - - assert np.array_equal( - self.get_array_after_writing(instance), - expected_enc_overlap - ), f'{sources[0].Modality} {transfer_syntax_uid}' - self.check_dimension_index_vals(instance) + # Multi-segment (exclusive) + instance = Segmentation( + sources, + multi_segment_exc.astype(pix_type), + SegmentationTypeValues.BINARY.value, + self._both_segment_descriptions, + self._series_instance_uid, + self._series_number, + self._sop_instance_uid, + self._instance_number, + self._manufacturer, + self._manufacturer_model_name, + self._software_versions, + self._device_serial_number, + max_fractional_value=1, + transfer_syntax_uid=binary_transfer_syntax_uid + ) + assert ( + instance.SegmentsOverlap == + SegmentsOverlapValues.NO.value + ) + + assert np.array_equal( + self.get_array_after_writing(instance), + expected_enc_exc + ), f'{sources[0].Modality} {binary_transfer_syntax_uid}' + self.check_dimension_index_vals(instance) + + # Multi-segment (overlapping) + instance = Segmentation( + sources, + multi_segment_overlap.astype(pix_type), + SegmentationTypeValues.BINARY.value, + self._both_segment_descriptions, + self._series_instance_uid, + self._series_number, + self._sop_instance_uid, + self._instance_number, + self._manufacturer, + self._manufacturer_model_name, + self._software_versions, + self._device_serial_number, + max_fractional_value=1, + transfer_syntax_uid=binary_transfer_syntax_uid + ) + assert ( + instance.SegmentsOverlap == + SegmentsOverlapValues.YES.value + ) + + assert np.array_equal( + self.get_array_after_writing(instance), + expected_enc_overlap + ), f'{sources[0].Modality} {binary_transfer_syntax_uid}' + self.check_dimension_index_vals(instance) def test_odd_number_pixels(self): # Test that an image with an odd number of pixels per frame is encoded From d1e632dcb6cdc7a1522c7bb70a6e40734543b1b3 Mon Sep 17 00:00:00 2001 From: Christopher Bridge Date: Mon, 10 Oct 2022 17:37:41 -0400 Subject: [PATCH 12/26] Remove unnecessary import error --- src/highdicom/frame.py | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/src/highdicom/frame.py b/src/highdicom/frame.py index aab3e6a0..6055d38d 100644 --- a/src/highdicom/frame.py +++ b/src/highdicom/frame.py @@ -258,18 +258,7 @@ def encode_frame( ) elif transfer_syntax_uid == JPEGLSLossless: - try: - import pillow_jpls # noqa - except ImportError as e: - raise ImportError( - 'In order to encode images using the JPEGLSLossless ' - 'transfer syntax, highdicom requires the ' - '"pylibjpeg-libjpeg" package to be installed. This package ' - 'is not installed by default as it uses a copyleft GPL v3 ' - 'license. To accept the terms of the GPL v3 license and ' - 'install highdicom with the "pylibjpeg-libjpeg" package, ' - 'use "pip install highdicom[libjpeg]".' - ) from e + import pillow_jpls # noqa if samples_per_pixel == 1: if planar_configuration is not None: raise ValueError( From 89d1343fa4b79ed16b79fa8265c3c90b153af0d6 Mon Sep 17 00:00:00 2001 From: Christopher Bridge Date: Mon, 10 Oct 2022 17:45:38 -0400 Subject: [PATCH 13/26] Update installation docs --- docs/installation.rst | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/docs/installation.rst b/docs/installation.rst index decbd4d3..fb961f57 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -22,7 +22,19 @@ Pre-build package available at PyPi: pip install highdicom -Source code available at Github: +Like the underlying ``pydicom`` package, highdicom relies on functionality +implemented in the ``pylibjpeg-libjpeg`` +`package `_ for the decoding of +DICOM images with certain transfer syntaxes. Since ``pylibjpeg-libjpeg`` is +licensed under a copyleft GPL v3 license, it is not installed by default when +you install highdicom. To install ``pylibjpeg-libjpeg`` along with highdicom, +use + +.. code-block:: none + + pip install highdicom[libjpeg] + +Install directly from source code (available on Github): .. code-block:: none From 26561622203b532cdde163214ef3330b660b50e1 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Mon, 10 Oct 2022 17:49:21 -0400 Subject: [PATCH 14/26] Bump python version in installation docs to match setup.py --- docs/installation.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/installation.rst b/docs/installation.rst index fb961f57..6c8086b3 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -8,7 +8,7 @@ Installation guide Requirements ------------ -* `Python `_ (version 3.5 or higher) +* `Python `_ (version 3.6 or higher) * Python package manager `pip `_ .. _installation: From 0ca578477ac112fc56b9f94867d0b733b3346548 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Mon, 10 Oct 2022 17:57:30 -0400 Subject: [PATCH 15/26] Add libjpeg to CI workflow --- .github/workflows/run_unit_tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/run_unit_tests.yml b/.github/workflows/run_unit_tests.yml index 29a380f2..53d7e927 100644 --- a/.github/workflows/run_unit_tests.yml +++ b/.github/workflows/run_unit_tests.yml @@ -27,7 +27,7 @@ jobs: run: | python -m pip install --upgrade pip setuptools pip install -r requirements_test.txt - pip install . + pip install ".[libjpeg]" - name: Lint with flake8 run: | flake8 --exclude='bin,build,.eggs,src/highdicom/_*' From d1a904eb3e6387c24a20b3ce71a6f1396bc9ffc1 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Mon, 10 Oct 2022 18:03:05 -0400 Subject: [PATCH 16/26] Add workflow with and without libjpeg --- .github/workflows/run_unit_tests.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/run_unit_tests.yml b/.github/workflows/run_unit_tests.yml index 53d7e927..8c533ea8 100644 --- a/.github/workflows/run_unit_tests.yml +++ b/.github/workflows/run_unit_tests.yml @@ -16,6 +16,7 @@ jobs: strategy: matrix: python-version: ["3.7", "3.8", "3.9", "3.10"] + dependencies: [".", "'.[libjpeg]'"] steps: - uses: actions/checkout@v2 @@ -27,7 +28,7 @@ jobs: run: | python -m pip install --upgrade pip setuptools pip install -r requirements_test.txt - pip install ".[libjpeg]" + pip install ${{ matrix.dependencies }} - name: Lint with flake8 run: | flake8 --exclude='bin,build,.eggs,src/highdicom/_*' From 320b4f48a56bd0a6f4fc96cc45cec7e30216edbb Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Mon, 10 Oct 2022 21:34:12 -0400 Subject: [PATCH 17/26] Apply suggestions from code review Co-authored-by: Markus D. Herrmann --- setup.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index 457858d3..06534738 100644 --- a/setup.py +++ b/setup.py @@ -55,10 +55,13 @@ def get_version(): 'numpy>=1.19', 'pillow>=8.3', 'pillow-jpls>=1.0', - 'pylibjpeg>=1.4', 'pylibjpeg-openjpeg>=1.2', ], extras_requires={ - 'libjpeg': ['pylibjpeg-libjpeg>=1.3'], + 'libjpeg': [ + 'pylibjpeg>=1.4', + 'pylibjpeg-libjpeg>=1.3', + 'pylibjpeg-openjpeg>=1.2' + ], }, ) From 1422a259537dde096fa95b03e35893a2a24ad130 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Mon, 10 Oct 2022 21:34:27 -0400 Subject: [PATCH 18/26] Update docs/installation.rst Co-authored-by: Markus D. Herrmann --- docs/installation.rst | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/docs/installation.rst b/docs/installation.rst index 6c8086b3..4333985a 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -22,13 +22,10 @@ Pre-build package available at PyPi: pip install highdicom -Like the underlying ``pydicom`` package, highdicom relies on functionality -implemented in the ``pylibjpeg-libjpeg`` -`package `_ for the decoding of -DICOM images with certain transfer syntaxes. Since ``pylibjpeg-libjpeg`` is -licensed under a copyleft GPL v3 license, it is not installed by default when -you install highdicom. To install ``pylibjpeg-libjpeg`` along with highdicom, -use +The library relies on the underlying ``pydicom`` package for decoding of pixel data, which internally delegates the task to either the ``pillow`` or the ``pylibjpeg`` packages. +Since the ``pillow`` is a dependency of *highdicom* and will automatically be installed, some transfer syntax can thus be readily decoded and encoded (baseline JPEG, JPEG-2000, JPEG-LS). +Support for additional transfer syntaxes (e.g., lossless JPEG) requires installation of the ``pylibjpeg`` package as well as the ``pylibjpeg-libjpeg`` and ``pylibjpeg-openjpeg`` packages. +Since ``pylibjpeg-libjpeg`` is licensed under a copyleft GPL v3 license, it is not installed by default when you install *highdicom*. To install the ``pylibjpeg`` packages along with *highdicom*, use .. code-block:: none From 7e085178c0bbc11fdc3dc6e4d8a4262c5fdbd9f5 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Mon, 10 Oct 2022 21:43:49 -0400 Subject: [PATCH 19/26] remove openjpeg from deps, fix installation guide --- docs/installation.rst | 14 ++++++++++---- setup.py | 1 - 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/docs/installation.rst b/docs/installation.rst index 4333985a..5fa1e1a6 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -22,10 +22,16 @@ Pre-build package available at PyPi: pip install highdicom -The library relies on the underlying ``pydicom`` package for decoding of pixel data, which internally delegates the task to either the ``pillow`` or the ``pylibjpeg`` packages. -Since the ``pillow`` is a dependency of *highdicom* and will automatically be installed, some transfer syntax can thus be readily decoded and encoded (baseline JPEG, JPEG-2000, JPEG-LS). -Support for additional transfer syntaxes (e.g., lossless JPEG) requires installation of the ``pylibjpeg`` package as well as the ``pylibjpeg-libjpeg`` and ``pylibjpeg-openjpeg`` packages. -Since ``pylibjpeg-libjpeg`` is licensed under a copyleft GPL v3 license, it is not installed by default when you install *highdicom*. To install the ``pylibjpeg`` packages along with *highdicom*, use +The library relies on the underlying ``pydicom`` package for decoding of pixel +data, which internally delegates the task to either the ``pillow`` or the +``pylibjpeg`` packages. Since ``pillow`` is a dependency of *highdicom* and +will automatically be installed, some transfer syntax can thus be readily +decoded and encoded (baseline JPEG, JPEG-2000, JPEG-LS). Support for additional +transfer syntaxes (e.g., lossless JPEG) requires installation of the +``pylibjpeg`` package as well as the ``pylibjpeg-libjpeg`` and +``pylibjpeg-openjpeg`` packages. Since ``pylibjpeg-libjpeg`` is licensed under +a copyleft GPL v3 license, it is not installed by default when you install +*highdicom*. To install the ``pylibjpeg`` packages along with *highdicom*, use .. code-block:: none diff --git a/setup.py b/setup.py index 06534738..2a5c6107 100644 --- a/setup.py +++ b/setup.py @@ -55,7 +55,6 @@ def get_version(): 'numpy>=1.19', 'pillow>=8.3', 'pillow-jpls>=1.0', - 'pylibjpeg-openjpeg>=1.2', ], extras_requires={ 'libjpeg': [ From c03598d60b0aeb7b43d5f3a260ae085ef01647b4 Mon Sep 17 00:00:00 2001 From: "Markus D. Herrmann" Date: Fri, 28 Oct 2022 15:12:24 -0400 Subject: [PATCH 20/26] Add citation file (#204) --- CITATION.cff | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 CITATION.cff diff --git a/CITATION.cff b/CITATION.cff new file mode 100644 index 00000000..a5ed42d8 --- /dev/null +++ b/CITATION.cff @@ -0,0 +1,42 @@ +cff-version: 1.2.0 +message: "If you use this software, please cite our paper." +authors: +- family-names: "Herrmann" + given-names: "Markus D." +- family-names: "Bridge" + given-names: "Christopher P." +- family-names: "Fedorov" + given-names: "Andriy Y." +- family-names: "Pieper" + given-names: "Steven" +- family-names: "Doyle" + given-names: "Sean W." +- family-names: "Gorman" + given-names: "Chris" +preferred-citation: + type: article + authors: + - family-names: "Bridge" + given-names: "Christopher P." + orcid: "https://orcid.org/0000-0002-2242-351X" + - family-names: "Gorman" + given-names: "Chris" + - family-names: "Pieper" + given-names: "Steven" + - family-names: "Doyle" + given-names: "Sean W." + - family-names: "Lennerz" + given-names: "Jochen K." + - family-names: "Kalpathy-Cramer" + given-names: "Jayashree " + - family-names: "Clunie" + given-names: "David A." + - family-names: "Fedorov" + given-names: "Andriy Y." + - family-names: "Herrmann" + given-names: "Markus D." + orcid: "https://orcid.org/0000-0002-7257-9205" + title: "Highdicom: a Python Library for Standardized Encoding of Image Annotations and Machine Learning Model Outputs in Pathology and Radiology" + journal: "J Digit Imaging" + year: 2022 + doi: 10.1007/s10278-022-00683-y From 2152c1983e5bcf818a99f4d6d574a519160cb53a Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Fri, 28 Oct 2022 15:51:03 -0400 Subject: [PATCH 21/26] Use deepcopy for CodedConcept.from_dataset() --- src/highdicom/sr/coding.py | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/src/highdicom/sr/coding.py b/src/highdicom/sr/coding.py index d6ee9b50..6e033a46 100644 --- a/src/highdicom/sr/coding.py +++ b/src/highdicom/sr/coding.py @@ -1,3 +1,4 @@ +from copy import deepcopy import logging from typing import Any, Optional, Union @@ -64,13 +65,15 @@ def __eq__(self, other: Any) -> bool: whether `self` and `other` are considered equal """ - this = Code( - self.value, - self.scheme_designator, - self.meaning, - self.scheme_version - ) - return Code.__eq__(this, other) + if isinstance(other, (Code, CodedConcept)): + this = Code( + self.value, + self.scheme_designator, + self.meaning, + self.scheme_version + ) + return Code.__eq__(this, other) + return super().__eq__(other) def __ne__(self, other: Any) -> bool: """Compares `self` and `other` for inequality. @@ -121,12 +124,9 @@ def from_dataset(cls, dataset: Dataset) -> 'CodedConcept': 'Dataset does not contain the following attribute ' f'required for coded concepts: {kw}.' ) - return cls( - value=dataset.CodeValue, - scheme_designator=dataset.CodingSchemeDesignator, - meaning=dataset.CodeMeaning, - scheme_version=getattr(dataset, 'CodingSchemeVersion', None) - ) + concept = deepcopy(dataset) + concept.__class__ = cls + return concept @classmethod def from_code(cls, code: Union[Code, 'CodedConcept']) -> 'CodedConcept': From d36e2a50350fd15531f549f52dafd87587a80afe Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Wed, 9 Nov 2022 13:42:14 -0500 Subject: [PATCH 22/26] Increase package version for release (#206) Co-authored-by: Chris Bridge --- src/highdicom/version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/highdicom/version.py b/src/highdicom/version.py index 482e4a19..2f15b8cd 100644 --- a/src/highdicom/version.py +++ b/src/highdicom/version.py @@ -1 +1 @@ -__version__ = '0.19.0' +__version__ = '0.20.0' From 91e616da21dbe8300d3eb232e5b2896cce3d1349 Mon Sep 17 00:00:00 2001 From: hackermd Date: Fri, 5 Aug 2022 14:46:21 -0400 Subject: [PATCH 23/26] Allow reference of optical path for measurements --- src/highdicom/ann/content.py | 33 ++++++++++++++++++++-- src/highdicom/content.py | 55 ++++++++++++++++++++++++++++-------- tests/test_ann.py | 26 ++++++++++++++++- tests/test_content.py | 40 ++++++++++++++++++++++---- 4 files changed, 134 insertions(+), 20 deletions(-) diff --git a/src/highdicom/ann/content.py b/src/highdicom/ann/content.py index c7234ca4..dfcaa2eb 100644 --- a/src/highdicom/ann/content.py +++ b/src/highdicom/ann/content.py @@ -11,7 +11,10 @@ AnnotationGroupGenerationTypeValues, GraphicTypeValues, ) -from highdicom.content import AlgorithmIdentificationSequence +from highdicom.content import ( + AlgorithmIdentificationSequence, + ReferencedImageSequence, +) from highdicom.sr.coding import CodedConcept from highdicom.uid import UID from highdicom._module_utils import check_required_attributes @@ -25,7 +28,8 @@ def __init__( self, name: Union[Code, CodedConcept], values: np.ndarray, - unit: Union[Code, CodedConcept] + unit: Union[Code, CodedConcept], + referenced_images: Optional[ReferencedImageSequence] = None ) -> None: """ Parameters @@ -40,6 +44,9 @@ def __init__( unit: Union[highdicom.sr.CodedConcept, pydicom.sr.coding.Code], optional Coded units of measurement (see :dcm:`CID 7181 ` "Abstract Multi-dimensional Image Model Component Units") + referenced_images: Union[highdicom.ReferencedImageSequence, None], optional + Referenced image to which the measurement applies. Should only be + provided for intensity measurements. """ # noqa: E501 super().__init__() @@ -61,6 +68,22 @@ def __init__( item.AnnotationIndexList = stored_indices.tobytes() self.MeasurementValuesSequence = [item] + if referenced_images is not None: + if len(referenced_images) == 0: + raise ValueError( + 'Argument "referenced_images" must contain one item.' + ) + elif len(referenced_images) > 1: + raise ValueError( + 'Argument "referenced_images" must contain only one item.' + ) + if not isinstance(referenced_images, ReferencedImageSequence): + raise TypeError( + 'Argument "referenced_images" must have type ' + 'ReferencedImageSequence.' + ) + self.ReferencedImageSequence = referenced_images + @property def name(self) -> CodedConcept: """highdicom.sr.CodedConcept: coded name""" @@ -520,6 +543,12 @@ def get_graphic_data( ) else: if coordinate_type == AnnotationCoordinateTypeValues.SCOORD: + if hasattr(self, 'CommonZCoordinateValue'): + raise ValueError( + 'The annotation group contains the ' + '"Common Z Coordinate Value" element and therefore ' + 'cannot have Annotation Coordinate Type "2D".' + ) coordinate_dimensionality = 2 else: coordinate_dimensionality = 3 diff --git a/src/highdicom/content.py b/src/highdicom/content.py index d94a0e54..f230b957 100644 --- a/src/highdicom/content.py +++ b/src/highdicom/content.py @@ -10,7 +10,10 @@ from pydicom.sr.coding import Code from pydicom.sr.codedict import codes from pydicom.valuerep import DS, format_number_as_ds -from pydicom._storage_sopclass_uids import SegmentationStorage +from pydicom.uid import ( + SegmentationStorage, + VLWholeSlideMicroscopyImageStorage, +) from highdicom.enum import ( CoordinateSystemNames, @@ -1406,21 +1409,20 @@ def __init__( referenced_images: Optional[Sequence[Dataset]] = None, referenced_frame_number: Union[int, Sequence[int], None] = None, referenced_segment_number: Union[int, Sequence[int], None] = None, + referenced_optical_path_identifier: Union[int, None] = None, ): """ Parameters ---------- referenced_images: Union[Sequence[pydicom.Dataset], None], optional - Images to which the VOI LUT described in this dataset applies. Note - that if unspecified, the VOI LUT applies to every image referenced - in the presentation state object that this dataset is included in. + Images that should be referenced referenced_frame_number: Union[int, Sequence[int], None], optional - Frame number(s) within a referenced multiframe image to which this - VOI LUT applies. + Frame number(s) within a referenced multiframe image referenced_segment_number: Union[int, Sequence[int], None], optional - Segment number(s) within a referenced segmentation image to which - this VOI LUT applies. + Segment number(s) within a referenced segmentation image + referenced_optical_path_identifier: Union[int, None], optional + Identifier of the optical path within a referenced microscopy image """ super().__init__() @@ -1445,6 +1447,7 @@ def __init__( raise ValueError("Found duplicate instances in referenced images.") multiple_images = len(referenced_images) > 1 + sop_class_uid = referenced_images[0].SOPClassUID if referenced_frame_number is not None: if multiple_images: raise ValueError( @@ -1466,16 +1469,17 @@ def __init__( f'Frame number {f} is invalid for referenced ' 'image.' ) + if referenced_segment_number is not None: if multiple_images: raise ValueError( 'Specifying "referenced_segment_number" is not ' 'supported with multiple referenced images.' ) - if referenced_images[0].SOPClassUID != SegmentationStorage: + if sop_class_uid != SegmentationStorage: raise TypeError( '"referenced_segment_number" is only valid when the ' - 'referenced image is a segmentation image.' + 'referenced image is a Segmentation image.' ) number_of_segments = len(referenced_images[0].SegmentSequence) if isinstance(referenced_segment_number, Sequence): @@ -1485,8 +1489,7 @@ def __init__( for s in _referenced_segment_numbers: if s < 1 or s > number_of_segments: raise ValueError( - f'Segment number {s} is invalid for referenced ' - 'image.' + f'Segment number {s} is invalid for referenced image.' ) if referenced_frame_number is not None: # Check that the one of the specified segments exists @@ -1504,6 +1507,31 @@ def __init__( f'Referenced frame {f} does not contain any of ' 'the referenced segments.' ) + + if referenced_optical_path_identifier is not None: + if multiple_images: + raise ValueError( + 'Specifying "referenced_optical_path_identifier" is not ' + 'supported with multiple referenced images.' + ) + if sop_class_uid != VLWholeSlideMicroscopyImageStorage: + raise TypeError( + '"referenced_optical_path_identifier" is only valid when ' + 'referenced image is a VL Whole Slide Microscopy image.' + ) + has_optical_path = False + for ref_img in referenced_images: + for optical_path_item in ref_img.OpticalPathSequence: + has_optical_path |= ( + optical_path_item.OpticalPathIdentifier == + referenced_optical_path_identifier + ) + if not has_optical_path: + raise ValueError( + 'None of the reference images contains the specified ' + '"referenced_optical_path_identifier".' + ) + for im in referenced_images: if not does_iod_have_pixel_data(im.SOPClassUID): raise ValueError( @@ -1515,6 +1543,9 @@ def __init__( ref_im.ReferencedSOPClassUID = im.SOPClassUID if referenced_segment_number is not None: ref_im.ReferencedSegmentNumber = referenced_segment_number + elif referenced_optical_path_identifier is not None: + ref_im.ReferencedOpticalPathIdentifier = \ + str(referenced_optical_path_identifier) if referenced_frame_number is not None: ref_im.ReferencedFrameNumber = referenced_frame_number self.append(ref_im) diff --git a/tests/test_ann.py b/tests/test_ann.py index 39b39c1c..ea612a73 100644 --- a/tests/test_ann.py +++ b/tests/test_ann.py @@ -8,6 +8,7 @@ from pydicom.filereader import dcmread from pydicom.sr.codedict import codes from pydicom.sr.coding import Code +from pydicom.uid import VLWholeSlideMicroscopyImageStorage from highdicom.ann.content import Measurements, AnnotationGroup from highdicom.ann.enum import ( @@ -16,7 +17,10 @@ GraphicTypeValues, ) from highdicom.ann.sop import MicroscopyBulkSimpleAnnotations -from highdicom.content import AlgorithmIdentificationSequence +from highdicom.content import ( + AlgorithmIdentificationSequence, + ReferencedImageSequence, +) from highdicom.sr.coding import CodedConcept from highdicom.uid import UID @@ -61,6 +65,26 @@ def test_construction(self): values[stored_indices] ) + def test_construction_with_referenced_image(self): + optical_path_item = Dataset() + optical_path_item.OpticalPathIdentifier = '1' + image = Dataset() + image.SOPInstanceUID = '1.2.3.4' + image.SOPClassUID = VLWholeSlideMicroscopyImageStorage + image.OpticalPathSequence = [optical_path_item] + + measurements = Measurements( + name=Code('Q4LE', 'SBSI', 'Mean intensity'), + values=np.ones((10, ), dtype=np.float32), + unit=Code('{counts}', 'UCUM', 'Counts'), + referenced_images=ReferencedImageSequence( + referenced_images=[image], + referenced_optical_path_identifier='1' + ) + ) + assert hasattr(measurements, 'ReferencedImageSequence') + assert len(measurements.ReferencedImageSequence) == 1 + def test_construction_missing_name(self): with pytest.raises(TypeError): Measurements( diff --git a/tests/test_content.py b/tests/test_content.py index f28568f9..cf64cac3 100644 --- a/tests/test_content.py +++ b/tests/test_content.py @@ -742,9 +742,8 @@ def setUp(self): for f in get_testdata_files('dicomdirtests/77654033/CT2/*') ] self._ct_multiframe = dcmread(get_testdata_file('eCT_Supplemental.dcm')) - self._seg = dcmread( - 'data/test_files/seg_image_ct_binary_overlap.dcm' - ) + self._sm = dcmread('data/test_files/sm_image.dcm') + self._seg = dcmread('data/test_files/seg_image_ct_binary_overlap.dcm') def test_construction_ref_ims(self): ref_ims = ReferencedImageSequence( @@ -812,9 +811,9 @@ def test_construction_segment_number(self): assert ref_ims[0].ReferencedSegmentNumber == 1 def test_construction_segment_number_non_seg(self): - with pytest.raises(ValueError): + with pytest.raises(TypeError): ReferencedImageSequence( - referenced_images=self._ct_series, + referenced_images=self._ct_series[0], referenced_segment_number=1 ) @@ -873,6 +872,37 @@ def test_construction_duplicate(self): referenced_images=self._ct_series * 2, ) + def test_construction_optical_path_identifier(self): + ref_ims = ReferencedImageSequence( + referenced_images=[self._sm], + referenced_optical_path_identifier='1' + ) + assert len(ref_ims) == 1 + assert ref_ims[0].ReferencedOpticalPathIdentifier == '1' + + def test_construction_optical_path_identifier_invalid_reference(self): + with pytest.raises(ValueError): + ReferencedImageSequence( + referenced_images=[self._sm], + referenced_optical_path_identifier='20' + ) + + def test_construction_optical_path_identifier_non_sm(self): + with pytest.raises(TypeError): + ReferencedImageSequence( + referenced_images=[self._seg], + referenced_optical_path_identifier='1' + ) + + def test_construction_optical_path_identifier_and_frame_numbers(self): + ref_ims = ReferencedImageSequence( + referenced_images=[self._sm], + referenced_optical_path_identifier='1', + referenced_frame_number=[1, 2], + ) + assert len(ref_ims) == 1 + assert ref_ims[0].ReferencedOpticalPathIdentifier == '1' + class TestPaletteColorLUT(TestCase): From 615d38cbb2cf6a0b0f2ad56cb67a8f116df3865c Mon Sep 17 00:00:00 2001 From: hackermd Date: Fri, 5 Aug 2022 15:19:50 -0400 Subject: [PATCH 24/26] Add property to access referenced images --- src/highdicom/ann/content.py | 13 ++++++++++++ src/highdicom/content.py | 39 +++++++++++++++++++++++++++++++++++- tests/test_ann.py | 7 +++++++ 3 files changed, 58 insertions(+), 1 deletion(-) diff --git a/src/highdicom/ann/content.py b/src/highdicom/ann/content.py index dfcaa2eb..2d9a9fc3 100644 --- a/src/highdicom/ann/content.py +++ b/src/highdicom/ann/content.py @@ -94,6 +94,14 @@ def unit(self) -> CodedConcept: """highdicom.sr.CodedConcept: coded unit""" return self.MeasurementUnitsCodeSequence[0] + @property + def referenced_images(self) -> ReferencedImageSequence: + """highdicom.ReferencedImageSequence: referenced images""" + if hasattr(self, 'ReferencedImageSequence'): + return self.ReferencedImageSequence + else: + return [] + def get_values(self, number_of_annotations: int) -> np.ndarray: """Get measured values for annotations. @@ -174,6 +182,11 @@ def from_dataset(cls, dataset: Dataset) -> 'Measurements': measurements.MeasurementUnitsCodeSequence[0] ) ] + if hasattr(measurements, 'ReferencedImageSequence'): + measurements.ReferencedImageSequence = \ + ReferencedImageSequence.from_sequence( + measurements.ReferencedImageSequence + ) return cast(Measurements, measurements) diff --git a/src/highdicom/content.py b/src/highdicom/content.py index f230b957..344e26e2 100644 --- a/src/highdicom/content.py +++ b/src/highdicom/content.py @@ -108,7 +108,7 @@ def from_sequence( Returns ------- - highdicom.seg.content.AlgorithmIdentificationSequence + highdicom.AlgorithmIdentificationSequence Algorithm Identification Sequence """ @@ -1550,6 +1550,43 @@ def __init__( ref_im.ReferencedFrameNumber = referenced_frame_number self.append(ref_im) + @classmethod + def from_sequence( + cls, + sequence: DataElementSequence + ) -> 'ReferencedImageSequence': + """Construct instance from an existing data element sequence. + + Parameters + ---------- + sequence: pydicom.sequence.Sequence + Data element sequence representing the + Algorithm Identification Sequence + + Returns + ------- + highdicom.ReferencedImageSequence + Referenced Image Sequence + + """ + if not isinstance(sequence, DataElementSequence): + raise TypeError( + 'Sequence should be of type pydicom.sequence.Sequence.' + ) + if len(sequence) != 1: + raise ValueError('Sequence should contain a single item.') + check_required_attributes( + sequence[0], + module='advanced-blending-presentation-state', + base_path=[ + 'AdvancedBlendingSequence', + 'ReferencedImageSequence', + ] + ) + ref_img_sequence = deepcopy(sequence) + ref_img_sequence.__class__ = ReferencedImageSequence + return cast(ReferencedImageSequence, ref_img_sequence) + class LUT(Dataset): diff --git a/tests/test_ann.py b/tests/test_ann.py index ea612a73..7749149e 100644 --- a/tests/test_ann.py +++ b/tests/test_ann.py @@ -124,12 +124,19 @@ def test_alternative_construction_from_dataset(self): measurement_values.FloatingPointValues = values.tobytes() measurement_values.AnnotationIndexList = index.tobytes() dataset.MeasurementValuesSequence = [measurement_values] + referenced_image = Dataset() + referenced_image.ReferencedOpticalPathIdentifier = '1' + referenced_image.ReferencedSOPInstanceUID = '1.2.3' + referenced_image.ReferencedSOPClassUID = \ + VLWholeSlideMicroscopyImageStorage + dataset.ReferencedImageSequence = [referenced_image] measurements = Measurements.from_dataset(dataset) assert measurements.name == CodedConcept.from_dataset(name) assert measurements.unit == CodedConcept.from_dataset(unit) np.testing.assert_allclose(measurements.get_values(3), values) + assert len(measurements.referenced_images) == 1 class TestAnnotationGroup(unittest.TestCase): From d32c006bd1cd83d298024b19dff1c99ae0bf5773 Mon Sep 17 00:00:00 2001 From: hackermd Date: Fri, 5 Aug 2022 15:25:45 -0400 Subject: [PATCH 25/26] Provide referenced images for measurements --- src/highdicom/ann/content.py | 20 +++++++++++++++----- tests/test_ann.py | 13 +++++++++---- 2 files changed, 24 insertions(+), 9 deletions(-) diff --git a/src/highdicom/ann/content.py b/src/highdicom/ann/content.py index 2d9a9fc3..45a98be7 100644 --- a/src/highdicom/ann/content.py +++ b/src/highdicom/ann/content.py @@ -95,12 +95,12 @@ def unit(self) -> CodedConcept: return self.MeasurementUnitsCodeSequence[0] @property - def referenced_images(self) -> ReferencedImageSequence: - """highdicom.ReferencedImageSequence: referenced images""" + def referenced_images(self) -> Union[ReferencedImageSequence, None]: + """Union[highdicom.ReferencedImageSequence, None]: referenced images""" if hasattr(self, 'ReferencedImageSequence'): return self.ReferencedImageSequence else: - return [] + return None def get_values(self, number_of_annotations: int) -> np.ndarray: """Get measured values for annotations. @@ -675,7 +675,10 @@ def get_measurements( self, name: Optional[Union[Code, CodedConcept]] = None ) -> Tuple[ - List[CodedConcept], np.ndarray, List[CodedConcept] + List[CodedConcept], + np.ndarray, + List[CodedConcept], + List[Union[ReferencedImageSequence, None]] ]: """Get measurements. @@ -696,6 +699,8 @@ def get_measurements( given annotation. units: List[highdicom.sr.CodedConcept] Units of measurements + referenced_images: List[highdicom.ReferencedImageSequence, None] + Referenced images """ # noqa: E501 number_of_annotations = self.number_of_annotations @@ -717,11 +722,16 @@ def get_measurements( item.unit for item in self.MeasurementsSequence if name is None or item.name == name ] + referenced_images = [ + item.referenced_images for item in self.MeasurementsSequence + if name is None or item.name == name + ] else: value_array = np.empty((number_of_annotations, 0), np.float32) names = [] units = [] - return (names, value_array, units) + referenced_images = [] + return (names, value_array, units, referenced_images) def _get_coordinate_index( self, diff --git a/tests/test_ann.py b/tests/test_ann.py index 7749149e..fe587934 100644 --- a/tests/test_ann.py +++ b/tests/test_ann.py @@ -233,7 +233,7 @@ def test_construction(self): graphic_data[1] ) - names, values, units = group.get_measurements() + names, values, units, ref_images = group.get_measurements() assert len(names) == 1 assert names[0] == measurement_names[0] assert len(units) == 1 @@ -241,8 +241,10 @@ def test_construction(self): assert values.dtype == np.float32 assert values.shape == (2, 1) np.testing.assert_allclose(values, measurement_values) + assert len(ref_images) == 1 + assert ref_images[0] is None - names, values, units = group.get_measurements( + names, values, units, ref_images = group.get_measurements( name=measurement_names[0] ) assert len(names) == 1 @@ -252,8 +254,10 @@ def test_construction(self): assert values.dtype == np.float32 assert values.shape == (2, 1) np.testing.assert_allclose(values, measurement_values) + assert len(ref_images) == 1 + assert ref_images[0] is None - names, values, units = group.get_measurements( + names, values, units, ref_images = group.get_measurements( name=codes.SCT.Volume ) assert names == [] @@ -261,6 +265,7 @@ def test_construction(self): assert values.size == 0 assert values.dtype == np.float32 assert values.shape == (2, 0) + assert ref_images == [] def test_alternative_construction_from_dataset(self): coordinates_data = np.array( @@ -313,7 +318,7 @@ def test_alternative_construction_from_dataset(self): np.array([[1.0, 1.0]], dtype=np.double) ) - names, values, units = group.get_measurements() + names, values, units, ref_images = group.get_measurements() assert names == [] assert units == [] assert values.size == 0 From 1ff69858ed6c55b62e28f44bcf0af705ff1bf9db Mon Sep 17 00:00:00 2001 From: "Markus D. Herrmann" Date: Sat, 1 Apr 2023 18:03:44 +0200 Subject: [PATCH 26/26] Update src/highdicom/ann/content.py Co-authored-by: Chris Bridge --- src/highdicom/ann/content.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/highdicom/ann/content.py b/src/highdicom/ann/content.py index 45a98be7..d793eb6a 100644 --- a/src/highdicom/ann/content.py +++ b/src/highdicom/ann/content.py @@ -98,7 +98,7 @@ def unit(self) -> CodedConcept: def referenced_images(self) -> Union[ReferencedImageSequence, None]: """Union[highdicom.ReferencedImageSequence, None]: referenced images""" if hasattr(self, 'ReferencedImageSequence'): - return self.ReferencedImageSequence + return ReferencedImageSequence.from_sequence(self.ReferencedImageSequence) else: return None