From 16019a03470d1924129bc5d40cb5edb0df359945 Mon Sep 17 00:00:00 2001 From: Afshin Date: Thu, 25 Jun 2020 14:38:42 -0400 Subject: [PATCH 01/44] newly designed multi-frame conversion based on pixelmed --- src/highdicom/legacy/sop.py | 1349 ++++++++++++++++++++++++++++++++++- 1 file changed, 1346 insertions(+), 3 deletions(-) diff --git a/src/highdicom/legacy/sop.py b/src/highdicom/legacy/sop.py index d0751e3f..6a5f7410 100644 --- a/src/highdicom/legacy/sop.py +++ b/src/highdicom/legacy/sop.py @@ -5,9 +5,16 @@ from typing import Any, Dict, List, Optional, Sequence, Union import numpy as np -from pydicom.datadict import tag_for_keyword +from pydicom.datadict import tag_for_keyword, dictionary_VR from pydicom.dataset import Dataset from pydicom.tag import Tag +from pydicom.dataelem import DataElement +from pydicom.sequence import Sequence as DicomSequence +from pydicom.multival import MultiValue +from datetime import date, datetime, time +from pydicom.valuerep import DT, DA, TM +from copy import deepcopy + from pydicom.uid import UID from highdicom.base import SOPClass @@ -70,6 +77,7 @@ def _convert_legacy_to_enhanced( if mf_dataset is None: mf_dataset = Dataset() + transfer_syntaxes = set() series = set() studies = set() @@ -165,7 +173,7 @@ def _convert_legacy_to_enhanced( volumetric_properties = 'VOLUME' unique_image_types = set() unassigned_dataelements: Dict[str, List[Dataset]] = defaultdict(list) - + # Per-Frame Functional Groups perframe_items = [] for i, ds in enumerate(sf_datasets): @@ -316,7 +324,8 @@ def _convert_legacy_to_enhanced( perframe_items.append(perframe_item) # All other attributes that are not assigned to functional groups. - for tag, da in ds.items(): + + for tag, da in ds.items(): if tag in assigned_attributes: continue elif tag in mf_attributes: @@ -619,3 +628,1337 @@ def __init__( **kwargs ) _convert_legacy_to_enhanced(legacy_datasets, self) +from abc import ABC, abstractmethod +class Abstract_MultiframeModuleAdder(ABC): + + def __init__(self, sf_datasets:Sequence[Dataset] + , excluded_from_perframe_tags:Sequence[Tag] + , excluded_from_functional_tags:Sequence[Tag] + , perframe_tags: Sequence[Tag] + , shared_tags: Sequence[Tag] + , multi_frame_output:Dataset=None ): + self.ExcludedFromPerFrameTags = excluded_from_perframe_tags + self.ExcludedFromFunctionalGroupsTags = excluded_from_functional_tags + self.PerFrameTags = perframe_tags + self.SharedTags = shared_tags + self.TargetDataset = multi_frame_output + self.SingleFrameSet = sf_datasets + self.EarliestDate = DA('00010101') + self.EarliestTime = TM('000000') + self.EarliestDateTime = DT('00010101000000') + def _is_empty_or_empty_items(self, attribute:DataElement)->bool: + if attribute.is_empty: + return True + if type(attribute.value) == Sequence: + if len(attribute.value) == 0: + return True + for item in attribute.value: + for tg, v in item.items(): + v = item[tg] + if not self._is_empty_or_empty_items(v): + return False + return True + + def _mark_tag_as_used(self, tg:Tag): + if tg in self.SharedTags: + self.SharedTags[tg] = True + elif tg in self.ExcludedFromPerFrameTags: + self.ExcludedFromPerFrameTags[tg] = True + elif tg in self.PerFrameTags: + self.PerFrameTags[tg] = True + + def _copy_attrib_if_present(self, src_ds:Dataset, dest_ds:Dataset, + src_kw_or_tg:str, dest_kw_or_tg:str=None, + check_not_to_be_perframe=True, check_not_to_be_empty=False): + if type(src_kw_or_tg) == str: + src_kw_or_tg = tag_for_keyword(src_kw_or_tg) + if dest_kw_or_tg == None: + dest_kw_or_tg = src_kw_or_tg + elif type(dest_kw_or_tg) == str: + dest_kw_or_tg = tag_for_keyword(dest_kw_or_tg) + if check_not_to_be_perframe: + if src_kw_or_tg in self.PerFrameTags: + return + if src_kw_or_tg in src_ds: + elem = src_ds[src_kw_or_tg] + if check_not_to_be_empty: + if _is_empty_or_empty_items(elem): + return + new_elem = deepcopy(elem) + if dest_kw_or_tg == src_kw_or_tg: + dest_ds[dest_kw_or_tg] = new_elem + else: + new_elem1 = DataElement(dest_kw_or_tg, + dictionary_VR(dest_kw_or_tg), newelem.value) + dest_ds[dest_kw_or_tg] = new_elem1 + # now mark the attrib as used/done to keep track of every one of it + self._mark_tag_as_used(src_kw_or_tg) + + + + + def _get_perframe_item(self, index:int)->Dataset: + if index > len(self.SingleFrameSet): + return None + pf_kw = 'PerFrameFunctionalGroupsSequence' + pf_tg = tag_for_keyword(pf_kw) + if not pf_kw in self.TargetDataset: + seq = [] + for i in range(0, len(self.SingleFrameSet)): + seq.append(Dataset()) + self.TargetDataset[pf_tg] = DataElement(pf_tg, 'SQ', DicomSequence(seq)) + return self.TargetDataset[pf_tg].value[index] + def _get_shared_item(self)->Dataset: + sf_kw = 'SharedFunctionalGroupsSequence' + sf_tg = tag_for_keyword(sf_kw) + if not sf_kw in self.TargetDataset: + seq = [Dataset()] + self.TargetDataset[sf_tg] = DataElement(sf_tg, 'SQ', DicomSequence(seq)) + return self.TargetDataset[sf_tg].value[0] + def _get_or_create_attribute(self, src:Dataset, kw:str, default)->DataElement: + tg = tag_for_keyword(kw) + if kw in src: + a = deepcopy(src[kw]) + else: + a = DataElement(tg, + dictionary_VR(tg), default ) + from pydicom.valuerep import DT, TM, DA + if a.VR == 'DA' and type(a.value)==str: + a.value = DA(a.value) + if a.VR == 'DT' and type(a.value)==str: + a.value = DT(a.value) + if a.VR == 'TM' and type(a.value)==str: + a.value = TM(a.value) + + self._mark_tag_as_used(tg) + return a + def _add_module(self, module_name: str, check_not_to_be_perframe=True): + # sf_sop_instance_uid = sf_datasets[0] + # mf_sop_instance_uid = LEGACY_ENHANCED_SOP_CLASS_UID_MAP[sf_sop_instance_uid] + # iod_name = _SOP_CLASS_UID_IOD_KEY_MAP[mf_sop_instance_uid] + # modules = IOD_MODULE_MAP[iod_name] + from copy import deepcopy + attribs = MODULE_ATTRIBUTE_MAP[module_name] + ref_dataset = self.SingleFrameSet[0] + for a in attribs: + if len(a['path']) == 0: + self._copy_attrib_if_present(ref_dataset, self.TargetDataset, a['keyword'], + check_not_to_be_perframe=check_not_to_be_perframe) + + @abstractmethod + def AddModule(self): + pass +class PixelImageModule(Abstract_MultiframeModuleAdder): + def __init__(self, sf_datasets:Sequence[Dataset] + , excluded_from_perframe_tags:Sequence[Tag] + , excluded_from_functional_tags:Sequence[Tag] + , perframe_tags: Sequence[Tag] + , shared_tags: Sequence[Tag] + , multi_frame_output:Dataset=None): + super().__init__( + sf_datasets + , excluded_from_perframe_tags + , excluded_from_functional_tags + , perframe_tags + , shared_tags + , multi_frame_output + ) + def AddModule(self): + self._add_module('image-pixel', False) # don't check the perframe set +class CommonCTMRPETImageDescriptionMacro(Abstract_MultiframeModuleAdder): + def __init__(self, sf_datasets:Sequence[Dataset] + , excluded_from_perframe_tags:Sequence[Tag] + , excluded_from_functional_tags:Sequence[Tag] + , perframe_tags: Sequence[Tag] + , shared_tags: Sequence[Tag] + , multi_frame_output:Dataset=None + , modality:str='CT'): + super().__init__( + sf_datasets + , excluded_from_perframe_tags + , excluded_from_functional_tags + , perframe_tags + , shared_tags + , multi_frame_output + ) + self.Modality = modality + def _get_value_for_frame_type(self, attrib:DataElement): + if type(attrib) == DataElement: + return None + output = ['', '', '', ''] + v = attrib.value + l = len(v) + output[0] = 'ORIGINAL' if l == 0 else v[0] + output[1] = 'PRIMARY' + output[2] = 'VOLUME' if l<3 else v[2] + output[3] = 'NONE' + return output + + def _add_module_to_functional_group(self, src_fg:Dataset, dest_fg:Dataset): + seq_kw = '{}{}FrameTypeSequence' + if self.Modality == 'PET': + seq_kw = seq_kw.format(self.Modality, '') + else: + seq_kw = seq_kw.format(self.Modality, 'Image') + seq_tg = tag_for_keyword(seq_kw) + + FrameType_a = src_fg['ImageType'] + new_val = self._get_value_for_frame_type(FrameType_a) + inner_item = Dataset() + FrameType_tg = tag_for_keyword('FrameType') + inner_item[FrameType_tg] = DataElement(FrameType_tg, + FrameType_a.VR, new_val) + dest_fg[seq_tg] = DataElement(seq_tg, dictionary_VR(seq_tg), + [inner_item]) + + def AddModule(self): + im_type_tag = tag_for_keyword('ImageType') + fm_type_tag = tag_for_keyword('FrameType') + + if not im_type_tag in self.PerFrameTags: + im_type_a = self.SingleFrameSet[0][im_type_tag] + new_val = self._get_value_for_frame_type(im_type_a) + self.TargetDataset[im_type_tag] = DataElement(im_type_tag, + im_type_a.VR, new_val) + #---------------------------- + + item = self._get_shared_item() + self._add_module_to_functional_group(self.SingleFrameSet[0],item) + else: + for i in range(0, len(self.SingleFrameSet)): + item = self._get_perframe_item(i) + self._add_module_to_functional_group(self.SingleFrameSet[i],item) + +class EnhancedCommonImageModule(Abstract_MultiframeModuleAdder): + def __init__(self, sf_datasets:Sequence[Dataset] + , excluded_from_perframe_tags:Sequence[Tag] + , excluded_from_functional_tags:Sequence[Tag] + , perframe_tags: Sequence[Tag] + , shared_tags: Sequence[Tag] + , multi_frame_output:Dataset=None): + super().__init__( + sf_datasets + , excluded_from_perframe_tags + , excluded_from_functional_tags + , perframe_tags + , shared_tags + , multi_frame_output + ) + def AddModule(self): + # ct_mr = CommonCTMRImageDescriptionMacro(self.SingleFrameSet + # , self.ExcludedFromPerFrameTags + # , self.PerFrameTags + # , self.SharedTags + # , self.TargetDataset) + # ct_mr.AddModule() + + + # Acquisition Number + # Acquisition DateTime - should be able to find earliest amongst all frames, if present (required if ORIGINAL) + # Acquisition Duration - should be able to work this out, but type 2C, so can send empty + + # Referenced Raw Data Sequence - optional - ignore - too hard to merge + # Referenced Waveform Sequence - optional - ignore - too hard to merge + # Referenced Image Evidence Sequence - should add if we have references :( + # Source Image Evidence Sequence - should add if we have sources :( + # Referenced Presentation State Sequence - should merge if present in any source frame :( + + # Samples per Pixel - handled by distinguishingAttribute copy + # Photometric Interpretation - handled by distinguishingAttribute copy + # Bits Allocated - handled by distinguishingAttribute copy + # Bits Stored - handled by distinguishingAttribute copy + # High Bit - handled by distinguishingAttribute copy + ref_dataset = self.SingleFrameSet[0] + attribs_to_be_added = ['ContentQualification', + 'ImageComments', + 'BurnedInAnnotation', + 'RecognizableVisualFeatures', + 'LossyImageCompression', + 'LossyImageCompressionRatio', + 'LossyImageCompressionMethod'] + for kw in attribs_to_be_added: + self._copy_attrib_if_present(ref_dataset, self.TargetDataset, kw) + + + if not tag_for_keyword('PresentationLUTShape') in self.PerFrameTags : + # actually should really invert the pixel data if MONOCHROME1, since only MONOCHROME2 is permitted :( + # also, do not need to check if PhotometricInterpretation is per-frame, since a distinguishing attribute + phmi_kw = 'PhotometricInterpretation' + phmi_tg = tag_for_keyword(phmi_kw) + phmi_a = self._get_or_create_attribute(self.SingleFrameSet[0], phmi_kw, "MONOCHROME2") + LUT_shape_default = "INVERTED" if phmi_a.value == 'MONOCHROME1' else "IDENTITY" + LUT_shape_a = self._get_or_create_attribute(self.SingleFrameSet[0], + 'PresentationLUTShape', + LUT_shape_default) + if not LUT_shape_a.is_empty: + self.TargetDataset['PresentationLUTShape'] = LUT_shape_a + # Icon Image Sequence - always discard these +class ContrastBolusModule(Abstract_MultiframeModuleAdder): + def __init__(self, sf_datasets:Sequence[Dataset] + , excluded_from_perframe_tags:Sequence[Tag] + , excluded_from_functional_tags:Sequence[Tag] + , perframe_tags: Sequence[Tag] + , shared_tags: Sequence[Tag] + , multi_frame_output:Dataset=None): + super().__init__( + sf_datasets + , excluded_from_perframe_tags + , excluded_from_functional_tags + , perframe_tags + , shared_tags + , multi_frame_output + ) + def AddModule(self): + self._add_module('contrast-bolus') +class EnhancedCTImageModule(Abstract_MultiframeModuleAdder): + def __init__(self, sf_datasets:Sequence[Dataset] + , excluded_from_perframe_tags:Sequence[Tag] + , excluded_from_functional_tags:Sequence[Tag] + , perframe_tags: Sequence[Tag] + , shared_tags: Sequence[Tag] + , multi_frame_output:Dataset=None): + super().__init__( + sf_datasets + , excluded_from_perframe_tags + , excluded_from_functional_tags + , perframe_tags + , shared_tags + , multi_frame_output + ) + def AddModule(self): + pass + #David's code doesn't hold anything for this module ... should ask him +class AcquisitionContextModule(Abstract_MultiframeModuleAdder): + def __init__(self, sf_datasets:Sequence[Dataset] + , excluded_from_perframe_tags:Sequence[Tag] + , excluded_from_functional_tags:Sequence[Tag] + , perframe_tags: Sequence[Tag] + , shared_tags: Sequence[Tag] + , multi_frame_output:Dataset=None): + super().__init__( + sf_datasets + , excluded_from_perframe_tags + , excluded_from_functional_tags + , perframe_tags + , shared_tags + , multi_frame_output + ) + def AddModule(self): + self._copy_attrib_if_present(self.SingleFrameSet + , self.TargetDataset + , 'AcquisitionContextSequence' + , check_not_to_be_perframe=True)#check not to be in perframe +class FrameAnatomyFunctionalGroup(Abstract_MultiframeModuleAdder): + def __init__(self, sf_datasets:Sequence[Dataset] + , excluded_from_perframe_tags:Sequence[Tag] + , excluded_from_functional_tags:Sequence[Tag] + , perframe_tags: Sequence[Tag] + , shared_tags: Sequence[Tag] + , multi_frame_output:Dataset=None): + super().__init__( + sf_datasets + , excluded_from_perframe_tags + , excluded_from_functional_tags + , perframe_tags + , shared_tags + , multi_frame_output + ) + def _add_module_to_functional_group(self, src_fg:Dataset, dest_fg:Dataset): + #David's code is more complicaated than mine + #Should check it out later. + fa_seq_tg = tag_for_keyword('FrameAnatomySequence') + item = Dataset() + self._copy_attrib_if_present(item, src_fg, 'AnatomicRegionSequence' + , check_not_to_be_perframe=False) + self._copy_attrib_if_present(item, src_fg, 'FrameLaterality' + , check_not_to_be_perframe=False + , check_not_to_be_empty=True) + + if not 'FrameLaterality' in item: + self._copy_attrib_if_present(item, src_fg, 'ImageLaterality' + , 'FrameLaterality' + , check_not_to_be_perframe=False) + if not 'FrameLaterality' in item: + self._copy_attrib_if_present(item, src_fg, 'Laterality' + , 'FrameLaterality' + , check_not_to_be_perframe=False) + + FrameAnatomy_a = DataElement(fa_seq_tg, dictionary_VR(fa_seq_tg), + [item] ) + dest_fg['FrameAnatomySequence'] = FrameAnatomy_a + def _contains_right_attributes(self, tags:dict) ->bool: + laterality_tg = tag_for_keyword('Laterality') + im_laterality_tg = tag_for_keyword('ImageLaterality') + bodypart_tg = tag_for_keyword('BodyPartExamined') + anatomical_reg_tg = tag_for_keyword('AnatomicRegionSequence') + + return (laterality_tg in tags + or im_laterality_tg in tags + or bodypart_tg in tags + or anatomical_reg_tg) + + def AddModule(self): + if (not self._contains_right_attributes(self.PerFrameTags) + and (self._contains_right_attributes(self.SharedTags) + or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) + ): + item = self._get_shared_item() + self._add_module_to_functional_group(self.SingleFrameSet[0],item) + elif self._contains_right_attributes(self.PerFrameTags): + for i in range(0, len(self.SingleFrameSet)): + item = self._get_perframe_item(i) + self._add_module_to_functional_group(self.SingleFrameSet[i],item) + +class PixelMeasuresFunctionalGroup(Abstract_MultiframeModuleAdder): + def __init__(self, sf_datasets:Sequence[Dataset] + , excluded_from_perframe_tags:Sequence[Tag] + , excluded_from_functional_tags:Sequence[Tag] + , perframe_tags: Sequence[Tag] + , shared_tags: Sequence[Tag] + , multi_frame_output:Dataset=None): + super().__init__( + sf_datasets + , excluded_from_perframe_tags + , excluded_from_functional_tags + , perframe_tags + , shared_tags + , multi_frame_output + ) + def _contains_right_attributes(self, tags:dict) ->bool: + PixelSpacing_tg = tag_for_keyword('PixelSpacing' ) + SliceThickness_tg = tag_for_keyword('SliceThickness') + ImagerPixelSpacing_tg = tag_for_keyword('ImagerPixelSpacing') + + return (PixelSpacing_tg in tags + or SliceThickness_tg in tags + or ImagerPixelSpacing_tg in tags) + def _add_module_to_functional_group(self, src_fg:Dataset, dest_fg:Dataset): + item = Dataset() + self._copy_attrib_if_present(src_fg + ,item + ,'PixelSpacing' + , check_not_to_be_perframe=False) + self._copy_attrib_if_present(src_fg + ,item + ,'SliceThickness' + , check_not_to_be_perframe=False) + if not 'PixelSpacing' in item: + self._copy_attrib_if_present(src_fg + ,item + ,'ImagerPixelSpacing' + ,'PixelSpacing' + , check_not_to_be_perframe=False + , check_not_to_be_empty=True) + pixel_measures_kw = 'PixelMeasuresSequence' + pixel_measures_tg = tag_for_keyword(pixel_measures_kw) + seq = DataElement(pixel_measures_tg + , dictionary_VR(pixel_measures_tg) + , [item]) + dest_fg[pixel_measures_tg] = seq + + def AddModule(self): + if (not self._contains_right_attributes(self.PerFrameTags) + and (self._contains_right_attributes(self.SharedTags) + or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) + ): + item = self._get_shared_item() + self._add_module_to_functional_group(self.SingleFrameSet[0],item) + elif self._contains_right_attributes(self.PerFrameTags): + for i in range(0, len(self.SingleFrameSet)): + item = self._get_perframe_item(i) + self._add_module_to_functional_group(self.SingleFrameSet[i],item) + +class PlanePositionFunctionalGroup(Abstract_MultiframeModuleAdder): + def __init__(self, sf_datasets:Sequence[Dataset] + , excluded_from_perframe_tags:Sequence[Tag] + , excluded_from_functional_tags:Sequence[Tag] + , perframe_tags: Sequence[Tag] + , shared_tags: Sequence[Tag] + , multi_frame_output:Dataset=None): + super().__init__( + sf_datasets + , excluded_from_perframe_tags + , excluded_from_functional_tags + , perframe_tags + , shared_tags + , multi_frame_output + ) + def _contains_right_attributes(self, tags:dict) ->bool: + ImagePositionPatient_tg = tag_for_keyword('ImagePositionPatient' ) + + return ImagePositionPatient_tg in tags + def _add_module_to_functional_group(self, src_fg:Dataset, dest_fg:Dataset): + item = Dataset() + self._copy_attrib_if_present(src_fg + , item + ,'ImagePositionPatient' + , check_not_to_be_perframe=False + , check_not_to_be_empty=False) + PlanePositionSequence_kw = 'PlanePositionSequence' + PlanePositionSequence_tg = tag_for_keyword(PlanePositionSequence_kw) + seq = DataElement(PlanePositionSequence_tg + , dictionary_VR(PlanePositionSequence_tg) + , [item]) + dest_fg[PlanePositionSequence_tg] = seq + + def AddModule(self): + if (not self._contains_right_attributes(self.PerFrameTags) + and (self._contains_right_attributes(self.SharedTags) + or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) + ): + item = self._get_shared_item() + self._add_module_to_functional_group(self.SingleFrameSet[0],item) + elif self._contains_right_attributes(self.PerFrameTags): + for i in range(0, len(self.SingleFrameSet)): + item = self._get_perframe_item(i) + self._add_module_to_functional_group(self.SingleFrameSet[i],item) + +class PlanePositionFunctionalGroup(Abstract_MultiframeModuleAdder): + def __init__(self, sf_datasets:Sequence[Dataset] + , excluded_from_perframe_tags:Sequence[Tag] + , excluded_from_functional_tags:Sequence[Tag] + , perframe_tags: Sequence[Tag] + , shared_tags: Sequence[Tag] + , multi_frame_output:Dataset=None): + super().__init__( + sf_datasets + , excluded_from_perframe_tags + , excluded_from_functional_tags + , perframe_tags + , shared_tags + , multi_frame_output + ) + def _contains_right_attributes(self, tags:dict) ->bool: + ImagePositionPatient_tg = tag_for_keyword('ImagePositionPatient' ) + + return ImagePositionPatient_tg in tags + def _add_module_to_functional_group(self, src_fg:Dataset, dest_fg:Dataset): + item = Dataset() + self._copy_attrib_if_present(src_fg + , item + ,'ImagePositionPatient' + , check_not_to_be_perframe=False + , check_not_to_be_empty=False) + PlanePositionSequence_kw = 'PlanePositionSequence' + PlanePositionSequence_tg = tag_for_keyword(PlanePositionSequence_kw) + seq = DataElement(PlanePositionSequence_tg + , dictionary_VR(PlanePositionSequence_tg) + , [item]) + dest_fg[PlanePositionSequence_tg] = seq + + def AddModule(self): + if (not self._contains_right_attributes(self.PerFrameTags) + and (self._contains_right_attributes(self.SharedTags) + or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) + ): + item = self._get_shared_item() + self._add_module_to_functional_group(self.SingleFrameSet[0],item) + elif self._contains_right_attributes(self.PerFrameTags): + for i in range(0, len(self.SingleFrameSet)): + item = self._get_perframe_item(i) + self._add_module_to_functional_group(self.SingleFrameSet[i],item) + +class PlaneOrientationFunctionalGroup(Abstract_MultiframeModuleAdder): + def __init__(self, sf_datasets:Sequence[Dataset] + , excluded_from_perframe_tags:Sequence[Tag] + , excluded_from_functional_tags:Sequence[Tag] + , perframe_tags: Sequence[Tag] + , shared_tags: Sequence[Tag] + , multi_frame_output:Dataset=None): + super().__init__( + sf_datasets + , excluded_from_perframe_tags + , excluded_from_functional_tags + , perframe_tags + , shared_tags + , multi_frame_output + ) + def _contains_right_attributes(self, tags:dict) ->bool: + ImageOrientationPatient_tg = tag_for_keyword('ImageOrientationPatient' ) + + return ImageOrientationPatient_tg in tags + def _add_module_to_functional_group(self, src_fg:Dataset, dest_fg:Dataset): + item = Dataset() + self._copy_attrib_if_present(src_fg + , item + ,'ImageOrientationPatient' + , check_not_to_be_perframe=False + , check_not_to_be_empty=False) + kw = 'PlaneOrientationSequence' + tg = tag_for_keyword(kw) + seq = DataElement(tg, dictionary_VR(tg), [item]) + dest_fg[tg] = seq + + def AddModule(self): + if (not self._contains_right_attributes(self.PerFrameTags) + and (self._contains_right_attributes(self.SharedTags) + or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) + ): + item = self._get_shared_item() + self._add_module_to_functional_group(self.SingleFrameSet[0],item) + elif self._contains_right_attributes(self.PerFrameTags): + for i in range(0, len(self.SingleFrameSet)): + item = self._get_perframe_item(i) + self._add_module_to_functional_group(self.SingleFrameSet[i],item) + +class FrameVOILUTFunctionalGroup(Abstract_MultiframeModuleAdder): + def __init__(self, sf_datasets:Sequence[Dataset] + , excluded_from_perframe_tags:Sequence[Tag] + , excluded_from_functional_tags:Sequence[Tag] + , perframe_tags: Sequence[Tag] + , shared_tags: Sequence[Tag] + , multi_frame_output:Dataset=None): + super().__init__( + sf_datasets + , excluded_from_perframe_tags + , excluded_from_functional_tags + , perframe_tags + , shared_tags + , multi_frame_output + ) + def _contains_right_attributes(self, tags:dict) ->bool: + WindowWidth_tg = tag_for_keyword('WindowWidth') + WindowCenter_tg = tag_for_keyword('WindowCenter') + WindowCenterWidthExplanation_tg = tag_for_keyword('WindowCenterWidthExplanation') + + return (WindowWidth_tg in tags + or WindowCenter_tg in tags + or WindowCenterWidthExplanation_tg in tags) + def _add_module_to_functional_group(self, src_fg:Dataset, dest_fg:Dataset): + item = Dataset() + self._copy_attrib_if_present(src_fg + , item + ,'WindowWidth' + , check_not_to_be_perframe=False + , check_not_to_be_empty=False) + self._copy_attrib_if_present(src_fg + , item + ,'WindowCenter' + , check_not_to_be_perframe=False + , check_not_to_be_empty=False) + self._copy_attrib_if_present(src_fg + , item + ,'WindowCenterWidthExplanation' + , check_not_to_be_perframe=False + , check_not_to_be_empty=False) + kw = 'FrameVOILUTSequence' + tg = tag_for_keyword(kw) + seq = DataElement(tg, dictionary_VR(tg), [item]) + dest_fg[tg] = seq + + def AddModule(self): + if (not self._contains_right_attributes(self.PerFrameTags) + and (self._contains_right_attributes(self.SharedTags) + or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) + ): + item = self._get_shared_item() + self._add_module_to_functional_group(self.SingleFrameSet[0],item) + elif self._contains_right_attributes(self.PerFrameTags): + for i in range(0, len(self.SingleFrameSet)): + item = self._get_perframe_item(i) + self._add_module_to_functional_group(self.SingleFrameSet[i],item) + +class PixelValueTransformationFunctionalGroup(Abstract_MultiframeModuleAdder): + def __init__(self, sf_datasets:Sequence[Dataset] + , excluded_from_perframe_tags:Sequence[Tag] + , excluded_from_functional_tags:Sequence[Tag] + , perframe_tags: Sequence[Tag] + , shared_tags: Sequence[Tag] + , multi_frame_output:Dataset=None): + super().__init__( + sf_datasets + , excluded_from_perframe_tags + , excluded_from_functional_tags + , perframe_tags + , shared_tags + , multi_frame_output + ) + def _contains_right_attributes(self, tags:dict) ->bool: + RescaleIntercept_tg = tag_for_keyword('RescaleIntercept') + RescaleSlope_tg = tag_for_keyword('RescaleSlope') + RescaleType_tg = tag_for_keyword('RescaleType') + + return (RescaleIntercept_tg in tags + or RescaleSlope_tg in tags + or RescaleType_tg in tags) + def _add_module_to_functional_group(self, src_fg:Dataset, dest_fg:Dataset): + item = Dataset() + self._copy_attrib_if_present(src_fg + , item + ,'RescaleSlope' + , check_not_to_be_perframe=False + , check_not_to_be_empty=False) + self._copy_attrib_if_present(src_fg + , item + ,'RescaleIntercept' + , check_not_to_be_perframe=False + , check_not_to_be_empty=False) + haveValuesSoAddType = 'RescaleSlope' in item or 'RescaleIntercept' in item + self._copy_attrib_if_present(src_fg + , item + , 'RescaleType' + , check_not_to_be_perframe=False + , check_not_to_be_empty=True) + if not "RescaleType" in item: + value = '' + modality = '' if not 'Modality' in src_fg else src_fg["Modality"].value + if haveValuesSoAddType: + value = 'US' + if modality == 'CT': + containes_localizer = False + ImageType_v = [] if not 'ImageType' in src_fg else src_fg['ImageType'].value + for i in ImageType_v: + if i=='LOCALIZER': + containes_localizer = True + break + if not containes_localizer: + value = "HU" + elif modality == 'PT': + value = 'US' if not 'Units' in src_fg else src_fg['Units'].value + if value != '': + tg = tag_for_keyword('RescaleType') + item[tg]= DataElement(tg, dictionary_VR(tg), value) + + kw = 'PixelValueTransformationSequence' + tg = tag_for_keyword(kw) + seq = DataElement(tg, dictionary_VR(tg), [item]) + dest_fg[tg] = seq + + def AddModule(self): + if (not self._contains_right_attributes(self.PerFrameTags) + and (self._contains_right_attributes(self.SharedTags) + or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) + ): + item = self._get_shared_item() + self._add_module_to_functional_group(self.SingleFrameSet[0],item) + elif self._contains_right_attributes(self.PerFrameTags): + for i in range(0, len(self.SingleFrameSet)): + item = self._get_perframe_item(i) + self._add_module_to_functional_group(self.SingleFrameSet[i],item) + +class ReferencedImageFunctionalGroup(Abstract_MultiframeModuleAdder): + def __init__(self, sf_datasets:Sequence[Dataset] + , excluded_from_perframe_tags:Sequence[Tag] + , excluded_from_functional_tags:Sequence[Tag] + , perframe_tags: Sequence[Tag] + , shared_tags: Sequence[Tag] + , multi_frame_output:Dataset=None): + super().__init__( + sf_datasets + , excluded_from_perframe_tags + , excluded_from_functional_tags + , perframe_tags + , shared_tags + , multi_frame_output + ) + def _contains_right_attributes(self, tags:dict) ->bool: + return tag_for_keyword('ReferencedImageSequence') in tags + def _add_module_to_functional_group(self, src_fg:Dataset, dest_fg:Dataset): + self._copy_attrib_if_present(src_fg + , dest_fg + ,'ReferencedImageSequence' + , check_not_to_be_perframe=False + , check_not_to_be_empty=False) + + + def AddModule(self): + if (not self._contains_right_attributes(self.PerFrameTags) + and (self._contains_right_attributes(self.SharedTags) + or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) + ): + item = self._get_shared_item() + self._add_module_to_functional_group(self.SingleFrameSet[0],item) + elif self._contains_right_attributes(self.PerFrameTags): + for i in range(0, len(self.SingleFrameSet)): + item = self._get_perframe_item(i) + self._add_module_to_functional_group(self.SingleFrameSet[i],item) + +class DerivationImageFunctionalGroup(Abstract_MultiframeModuleAdder): + def __init__(self, sf_datasets:Sequence[Dataset] + , excluded_from_perframe_tags:Sequence[Tag] + , excluded_from_functional_tags:Sequence[Tag] + , perframe_tags: Sequence[Tag] + , shared_tags: Sequence[Tag] + , multi_frame_output:Dataset=None): + super().__init__( + sf_datasets + , excluded_from_perframe_tags + , excluded_from_functional_tags + , perframe_tags + , shared_tags + , multi_frame_output + ) + def _contains_right_attributes(self, tags:dict) ->bool: + return tag_for_keyword('SourceImageSequence') in tags + def _add_module_to_functional_group(self, src_fg:Dataset, dest_fg:Dataset): + item = Dataset() + self._copy_attrib_if_present(src_fg + , item + ,'DerivationDescription' + , check_not_to_be_perframe=False + , check_not_to_be_empty=True) + haveValuesSoAddType = 'RescaleSlope' in item or 'RescaleIntercept' in item + self._copy_attrib_if_present(src_fg + , item + , 'DerivationCodeSequence' + , check_not_to_be_perframe=False + , check_not_to_be_empty=False) + self._copy_attrib_if_present(src_fg + , item + ,'SourceImageSequence' + , check_not_to_be_perframe=False + , check_not_to_be_empty=False) + kw = 'DerivationImageSequence' + tg = tag_for_keyword(kw) + seq = DataElement(tg, dictionary_VR(tg), [item]) + dest_fg[tg] = seq + + def AddModule(self): + if (not self._contains_right_attributes(self.PerFrameTags) + and (self._contains_right_attributes(self.SharedTags) + or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) + ): + item = self._get_shared_item() + self._add_module_to_functional_group(self.SingleFrameSet[0],item) + elif self._contains_right_attributes(self.PerFrameTags): + for i in range(0, len(self.SingleFrameSet)): + item = self._get_perframe_item(i) + self._add_module_to_functional_group(self.SingleFrameSet[i],item) + +class UnassignedPerFrame(Abstract_MultiframeModuleAdder): + def __init__(self, sf_datasets:Sequence[Dataset] + , excluded_from_perframe_tags:Sequence[Tag] + , excluded_from_functional_tags:Sequence[Tag] + , perframe_tags: Sequence[Tag] + , shared_tags: Sequence[Tag] + , multi_frame_output:Dataset=None): + super().__init__( + sf_datasets + , excluded_from_perframe_tags + , excluded_from_functional_tags + , perframe_tags + , shared_tags + , multi_frame_output + ) + + def _add_module_to_functional_group(self, src_fg:Dataset, dest_fg:Dataset): + item = Dataset() + for tg, used in self.PerFrameTags.items(): + if not used not in self.ExcludedFromFunctionalGroupsTags: + self._copy_attrib_if_present(src_fg + , item + ,tg + , check_not_to_be_perframe=False + , check_not_to_be_empty=False) + + kw = 'UnassignedPerFrameConvertedAttributesSequence' + tg = tag_for_keyword(kw) + seq = DataElement(tg, dictionary_VR(tg), [item]) + dest_fg[tg] = seq + + + def AddModule(self): + for i in range(0, len(self.SingleFrameSet)): + item = self._get_perframe_item(i) + self._add_module_to_functional_group(self.SingleFrameSet[i],item) +class UnassignedShared(Abstract_MultiframeModuleAdder): + def __init__(self, sf_datasets:Sequence[Dataset] + , excluded_from_perframe_tags:Sequence[Tag] + , excluded_from_functional_tags:Sequence[Tag] + , perframe_tags: Sequence[Tag] + , shared_tags: Sequence[Tag] + , multi_frame_output:Dataset=None): + super().__init__( + sf_datasets + , excluded_from_perframe_tags + , excluded_from_functional_tags + , perframe_tags + , shared_tags + , multi_frame_output + ) + + def _add_module_to_functional_group(self, src_fg:Dataset, dest_fg:Dataset): + item = Dataset() + for tg, used in self.SharedTags.items(): + if (not used + and tg not in self.ExcludedFromFunctionalGroupsTags): + self._copy_attrib_if_present(src_fg + , item + ,tg + , check_not_to_be_perframe=False + , check_not_to_be_empty=False) + + kw = 'UnassignedSharedConvertedAttributesSequence' + tg = tag_for_keyword(kw) + seq = DataElement(tg, dictionary_VR(tg), [item]) + dest_fg[tg] = seq + + + def AddModule(self): + item = self._get_shared_item() + self._add_module_to_functional_group(self.SingleFrameSet[0],item) + +class ConversionSourceFunctionalGroup(Abstract_MultiframeModuleAdder): + def __init__(self, sf_datasets:Sequence[Dataset] + , excluded_from_perframe_tags:Sequence[Tag] + , excluded_from_functional_tags:Sequence[Tag] + , perframe_tags: Sequence[Tag] + , shared_tags: Sequence[Tag] + , multi_frame_output:Dataset=None): + super().__init__( + sf_datasets + , excluded_from_perframe_tags + , excluded_from_functional_tags + , perframe_tags + , shared_tags + , multi_frame_output + ) + def _add_module_to_functional_group(self, src_fg:Dataset, dest_fg:Dataset): + item = Dataset() + self._copy_attrib_if_present(src_fg + , item + ,'ReferencedSOPClassUID' + , check_not_to_be_perframe=False + , check_not_to_be_empty=True) + self._copy_attrib_if_present(src_fg + , item + ,'ReferencedSOPInstanceUID' + , check_not_to_be_perframe=False + , check_not_to_be_empty=True) + kw = 'ConversionSourceAttributesSequence' + tg = tag_for_keyword(kw) + seq = DataElement(tg, dictionary_VR(tg), [item]) + dest_fg[tg] = seq + + + + + def AddModule(self): + for i in range(0, len(self.SingleFrameSet)): + item = self._get_perframe_item(i) + self._add_module_to_functional_group(self.SingleFrameSet[i],item) + +class FrameContentFunctionalGroup(Abstract_MultiframeModuleAdder): + def __init__(self, sf_datasets:Sequence[Dataset] + , excluded_from_perframe_tags:Sequence[Tag] + , excluded_from_functional_tags:Sequence[Tag] + , perframe_tags: Sequence[Tag] + , shared_tags: Sequence[Tag] + , multi_frame_output:Dataset=None): + + super().__init__( + sf_datasets + , excluded_from_perframe_tags + , excluded_from_functional_tags + , perframe_tags + , shared_tags + , multi_frame_output + ) + self.EarliestFrameAcquisitionDateTime = DT('99991231235959') + def _contains_right_attributes(self, tags:dict) ->bool: + AcquisitionDateTime_tg = tag_for_keyword('AcquisitionDateTime') + AcquisitionDate_tg = tag_for_keyword('AcquisitionDate') + AcquisitionTime_tg = tag_for_keyword('AcquisitionTime') + + return (AcquisitionDateTime_tg in tags + or AcquisitionTime_tg in tags + or AcquisitionDate_tg in tags) + def _add_module_to_functional_group(self, src_fg:Dataset, dest_fg:Dataset): + item = Dataset() + item['AcquisitionNumber'] = self._get_or_create_attribute(src_fg, 'AcquisitionNumber',0) + AcquisitionDateTime_a = self._get_or_create_attribute(src_fg,'AcquisitionDateTime', self.EarliestDateTime) + AcquisitionDateTime_is_perframe = self._contains_right_attributes(self.PerFrameTags) + if AcquisitionDateTime_a.value == self.EarliestDateTime: + AcquisitionDate_a = self._get_or_create_attribute(src_fg,'AcquisitionDate', self.EarliestDate) + + AcquisitionTime_a = self._get_or_create_attribute(src_fg,'AcquisitionTime', self.EarliestTime) + d = AcquisitionDate_a.value + t = AcquisitionTime_a.value + AcquisitionDateTime_a.value = DT(d.original_string+t.original_string) + if AcquisitionDateTime_a.value > self.EarliestDateTime: + if AcquisitionDateTime_a.value < self.EarliestFrameAcquisitionDateTime: + self.EarliestFrameAcquisitionDateTime = AcquisitionDateTime_a.value + if not AcquisitionDateTime_is_perframe: + if 'TriggerTime' in src_fg and not 'FrameReferenceDateTime' in src_fg: + TriggerTime_a = self._get_or_create_attribute(src_fg,'TriggerTime', self.EarliestTime) + AcquisitionDateTime_a.value = DT( + AcquisitionDate_a.value.original_string+TriggerTime_a.value.original_string) + item['AcquisitionDateTime'] =AcquisitionDateTime_a + #--------------------------------- + self._copy_attrib_if_present(item, src_fg, "AcquisitionDuration", + "FrameAcquisitionDuration", check_not_to_be_perframe=False, check_not_to_be_empty=True) + self._copy_attrib_if_present(item, src_fg, 'TemporalPositionIndex' + , check_not_to_be_perframe=False, check_not_to_be_empty=True) + self._copy_attrib_if_present(item, src_fg, "ImageComments", + "FrameComments", check_not_to_be_perframe=False, check_not_to_be_empty=True) + #----------------------------------- + seq_tg = tag_for_keyword('FrameContentSequence') + dest_fg[seq_tg] = DataElement(seq_tg, dictionary_VR(seq_tg), [item]) + + + def AddModule(self): + for i in range(0, len(self.SingleFrameSet)): + item = self._get_perframe_item(i) + self._add_module_to_functional_group(self.SingleFrameSet[i],item) + + + + +class LegacyConvertedEnhanceImage(SOPClass): + + """SOP class for Legacy Converted Enhanced PET Image instances.""" + + def __init__( + self, + legacy_datasets: Sequence[Dataset], + series_instance_uid: str, + series_number: int, + sop_instance_uid: str, + instance_number: int, + **kwargs: Any + ) -> None: + """ + Parameters + ---------- + legacy_datasets: Sequence[pydicom.dataset.Dataset] + DICOM data sets of legacy single-frame image instances that should + be converted + series_instance_uid: str + UID of the series + series_number: Union[int, None] + Number of the series within the study + sop_instance_uid: str + UID that should be assigned to the instance + instance_number: int + Number that should be assigned to the instance + **kwargs: Any, optional + Additional keyword arguments that will be passed to the constructor + of `highdicom.base.SOPClass` + + """ + + try: + ref_ds = legacy_datasets[0] + except IndexError: + raise ValueError('No DICOM data sets of provided.') + + sop_class_uid = LEGACY_ENHANCED_SOP_CLASS_UID_MAP[ref_ds.SOPClassUID] + + super().__init__( + study_instance_uid=ref_ds.StudyInstanceUID, + series_instance_uid=series_instance_uid, + series_number=series_number, + sop_instance_uid=sop_instance_uid, + sop_class_uid=sop_class_uid, + instance_number=instance_number, + manufacturer=ref_ds.Manufacturer, + modality=ref_ds.Modality, + transfer_syntax_uid=None, # FIXME: frame encoding + patient_id=ref_ds.PatientID, + patient_name=ref_ds.PatientName, + patient_birth_date=ref_ds.PatientBirthDate, + patient_sex=ref_ds.PatientSex, + accession_number=ref_ds.AccessionNumber, + study_id=ref_ds.StudyID, + study_date=ref_ds.StudyDate, + study_time=ref_ds.StudyTime, + referring_physician_name=ref_ds.ReferringPhysicianName, + **kwargs + ) + self._legacy_datasets = legacy_datasets + self.DistinguishingAttributeKeywords = [ + 'PatientID', + 'PatientName', + 'Manufacturer', + 'InstitutionName', + 'InstitutionAddress', + 'StationName', + 'InstitutionalDepartmentName', + 'ManufacturerModelName', + 'DeviceSerialNumber', + 'SoftwareVersions', + 'GantryID', + 'PixelPaddingValue', + 'Modality', + 'ImageType', + 'BurnedInAnnotation', + 'SOPClassUID', + 'Rows', + 'Columns', + 'BitsStored', + 'BitsAllocated', + 'HighBit', + 'PixelRepresentation', + 'PhotometricInterpretation', + 'PlanarConfiguration', + 'SamplesPerPixel', + 'ProtocolName', + 'ImageOrientationPatient', + 'PixelSpacing', + 'SliceThickness', + 'AcquisitionContextSequence'] + to_be_removed_from_distinguishing_attribs = set() + for kw in self.DistinguishingAttributeKeywords: + x = [] + not_present_attribute_count = 0 + for ds in legacy_datasets: + if kw in ds: + if len(x) == 0: + x.append(ds[kw]) + else: + already_has_new_value = False + for x_elem in x: + if self._isequal(x_elem.value, ds[kw].value): + already_has_new_value = True + break + if not already_has_new_value: + x.append(ds[kw]) + else: + to_be_removed_from_distinguishing_attribs.add(kw) + not_present_attribute_count += 1 + if not_present_attribute_count != len(legacy_datasets) \ + and not_present_attribute_count!=0: + raise ValueError('One or more datasets lack {} distinguishing attributes'.format(kw)) + if len(x)>1: + error_msg = 'All instances must have the same value for {}.\n\tExisting values:' + for x_elem in x: + error_msg += '\n\t\t{}'.format(x_elem.value) + raise ValueError(error_msg) + for kw in to_be_removed_from_distinguishing_attribs: + self.DistinguishingAttributeKeywords.remove(kw) + + self.ExcludedFromPerFrameTags = {} + for i in self.DistinguishingAttributeKeywords: + self.ExcludedFromPerFrameTags[tag_for_keyword(i)] = False + + self.ExcludedFromPerFrameTags[tag_for_keyword('AcquisitionDateTime')] = False + self.ExcludedFromPerFrameTags[tag_for_keyword('AcquisitionDate')] = False + self.ExcludedFromPerFrameTags[tag_for_keyword('AcquisitionTime')] = False + + self.ExcludedFromFunctionalGroupsTags={tag_for_keyword('SpecificCharacterSet'): False} + #--------------------------------------------------------------------- + self.PerFrameTags = {} + self.SharedTags = {} + self._find_per_frame_and_shared_tags() + #---------------------------------------------------------------------- + self.__build_blocks = [] + def _find_per_frame_and_shared_tags(self): + rough_shared = {} + sfs = self._legacy_datasets + for ds in sfs: + for ttag, elem in ds.items(): + if (not ttag.is_private + and not self._istag_file_meta_information_group(ttag) + and not self._istag_repeating_group(ttag) + and not self._istag_group_length(ttag) + and not self._istag_excluded_from_perframe(ttag)): + elem = ds[ttag] + self.PerFrameTags[ttag] = False + if ttag in rough_shared: + rough_shared[ttag].append(elem.value) + else: + rough_shared[ttag] = [elem.value] + to_be_removed_from_shared = [] + for ttag, v in rough_shared.items(): + v = rough_shared[ttag] + N = len(v) + if len(v) < len(self._legacy_datasets): + to_be_removed_from_shared.append(ttag) + else: + all_values_are_equal = True + for v_i in v: + if not self._isequal(v_i,v[0]): + all_values_are_equal = False + break + if not all_values_are_equal: + to_be_removed_from_shared.append(ttag) + from pydicom.datadict import keyword_for_tag + for t, v in rough_shared.items(): + if keyword_for_tag(t)!='PatientSex': + continue + + + for t in to_be_removed_from_shared: + del rough_shared[t] + for t, v in rough_shared.items(): + self.SharedTags[t] = False + del self.PerFrameTags[t] + # for t in self.SharedTags: + # print(keyword_for_tag(t)) + # print('perframe ---------------------------') + # for t in self.PerFrameTags: + # print (keyword_for_tag(t)) + + + def _istag_excluded_from_perframe(self, t:Tag)->bool: + return t in self.ExcludedFromPerFrameTags + + + def _istag_file_meta_information_group(self, t:Tag)->bool: + return t.group == 0x0002 + def _istag_repeating_group(self, t:Tag)->bool: + g = t.group + return (g >= 0x5000 and g <= 0x501e) or\ + (g >= 0x6000 and g <= 0x601e) + def _istag_group_length(self, t:Tag)->bool: + return t.element == 0 + def _isequal(self, v1, v2): + float_tolerance = 1.0e-5 + is_equal_float = lambda x1, x2: abs(x1-x2)bool: + if type(ds1) != type(ds2): + return False + if type(ds1) != Dataset: + return False + for k1, elem1 in ds1.items(): + if not k1 in ds2: + return False + elem2 = ds2[k1] + return self._isequal(elem2.value, elem1.value) + def AddNewBuildBlock(self, element:Abstract_MultiframeModuleAdder): + if not isinstance(element, Abstract_MultiframeModuleAdder) : + raise ValueError('Build block must be an instance of Abstract_MultiframeModuleAdder') + self.__build_blocks.append(element) + def AddBuildBlocksForCT(self): + Blocks= [PixelImageModule(self._legacy_datasets + , self.ExcludedFromPerFrameTags + , self.ExcludedFromFunctionalGroupsTags + , self.PerFrameTags + , self.SharedTags + , self) + ,CommonCTMRPETImageDescriptionMacro(self._legacy_datasets + , self.ExcludedFromPerFrameTags + , self.ExcludedFromFunctionalGroupsTags + , self.PerFrameTags + , self.SharedTags + , self + , 'CT') + ,EnhancedCommonImageModule(self._legacy_datasets + , self.ExcludedFromPerFrameTags + , self.ExcludedFromFunctionalGroupsTags + , self.PerFrameTags + , self.SharedTags + , self) + ,ContrastBolusModule(self._legacy_datasets + , self.ExcludedFromPerFrameTags + , self.ExcludedFromFunctionalGroupsTags + , self.PerFrameTags + , self.SharedTags + , self) + ,EnhancedCTImageModule(self._legacy_datasets + , self.ExcludedFromPerFrameTags + , self.ExcludedFromFunctionalGroupsTags + , self.PerFrameTags + , self.SharedTags + , self) + ,AcquisitionContextModule(self._legacy_datasets + , self.ExcludedFromPerFrameTags + , self.ExcludedFromFunctionalGroupsTags + , self.PerFrameTags + , self.SharedTags + , self) + ,FrameAnatomyFunctionalGroup(self._legacy_datasets + , self.ExcludedFromPerFrameTags + , self.ExcludedFromFunctionalGroupsTags + , self.PerFrameTags + , self.SharedTags + , self) + ,PixelMeasuresFunctionalGroup(self._legacy_datasets + , self.ExcludedFromPerFrameTags + , self.ExcludedFromFunctionalGroupsTags + , self.PerFrameTags + , self.SharedTags + , self) + ,PlaneOrientationFunctionalGroup(self._legacy_datasets + , self.ExcludedFromPerFrameTags + , self.ExcludedFromFunctionalGroupsTags + , self.PerFrameTags + , self.SharedTags + , self) + ,PlanePositionFunctionalGroup(self._legacy_datasets + , self.ExcludedFromPerFrameTags + , self.ExcludedFromFunctionalGroupsTags + , self.PerFrameTags + , self.SharedTags + , self) + ,FrameVOILUTFunctionalGroup(self._legacy_datasets + , self.ExcludedFromPerFrameTags + , self.ExcludedFromFunctionalGroupsTags + , self.PerFrameTags + , self.SharedTags + , self) + ,PixelValueTransformationFunctionalGroup(self._legacy_datasets + , self.ExcludedFromPerFrameTags + , self.ExcludedFromFunctionalGroupsTags + , self.PerFrameTags + , self.SharedTags + , self) + ,ReferencedImageFunctionalGroup(self._legacy_datasets + , self.ExcludedFromPerFrameTags + , self.ExcludedFromFunctionalGroupsTags + , self.PerFrameTags + , self.SharedTags + , self) + ,ConversionSourceFunctionalGroup(self._legacy_datasets + , self.ExcludedFromPerFrameTags + , self.ExcludedFromFunctionalGroupsTags + , self.PerFrameTags + , self.SharedTags + , self) + ,FrameContentFunctionalGroup(self._legacy_datasets + , self.ExcludedFromPerFrameTags + , self.ExcludedFromFunctionalGroupsTags + , self.PerFrameTags + , self.SharedTags + , self) + ,UnassignedPerFrame(self._legacy_datasets + , self.ExcludedFromPerFrameTags + , self.ExcludedFromFunctionalGroupsTags + , self.PerFrameTags + , self.SharedTags + , self) + ,UnassignedShared(self._legacy_datasets + , self.ExcludedFromPerFrameTags + , self.ExcludedFromFunctionalGroupsTags + , self.PerFrameTags + , self.SharedTags + , self) + ] + for b in Blocks: + self.AddNewBuildBlock(b) + + def BuildMultiFrame(self): + for builder in self.__build_blocks: + builder.AddModule() + + + + + + + + + + + + From 6f620733e069ea9995c0a9efcee280e18dfa9e32 Mon Sep 17 00:00:00 2001 From: Afshin Date: Tue, 30 Jun 2020 18:17:10 -0400 Subject: [PATCH 02/44] mend --- src/highdicom/legacy/sop.py | 698 ++++++++++++++++++++++++++++++------ 1 file changed, 585 insertions(+), 113 deletions(-) diff --git a/src/highdicom/legacy/sop.py b/src/highdicom/legacy/sop.py index 6a5f7410..2fc79963 100644 --- a/src/highdicom/legacy/sop.py +++ b/src/highdicom/legacy/sop.py @@ -11,7 +11,7 @@ from pydicom.dataelem import DataElement from pydicom.sequence import Sequence as DicomSequence from pydicom.multival import MultiValue -from datetime import date, datetime, time +from datetime import date, datetime, time, timedelta from pydicom.valuerep import DT, DA, TM from copy import deepcopy @@ -309,17 +309,13 @@ def _convert_legacy_to_enhanced( except AttributeError: pass - # Cardiac Synchronization (U) - # TODO: http://dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.7.6.16.2.html#sect_C.7.6.16.2.7 # noqa + # Cardiac Synchronization (U # TODO: http:#dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.7.6.16.2.html#sect_C.7.6.16.2.7 # noqa - # Contrast/Bolus Usage (U) - MR/CT only - # TODO: http://dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.7.6.16.2.html#sect_C.7.6.16.2.12 # noqa + # Contrast/Bolus Usage (U) - MR/CT onl # TODO: http:#dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.7.6.16.2.html#sect_C.7.6.16.2.12 # noqa - # Respiratory Synchronization (U) - # TODO: http://dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.7.6.16.2.html#sect_C.7.6.16.2.17 # noqa + # Respiratory Synchronization (U # TODO: http:#dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.7.6.16.2.html#sect_C.7.6.16.2.17 # noqa - # Real World Value Mapping (U) - PET only - # TODO: http://dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.7.6.16.2.html#sect_C.7.6.16.2.11 # noqa + # Real World Value Mapping (U) - PET onl # TODO: http:#dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.7.6.16.2.html#sect_C.7.6.16.2.11 # noqa perframe_items.append(perframe_item) @@ -646,6 +642,9 @@ def __init__(self, sf_datasets:Sequence[Dataset] self.EarliestDate = DA('00010101') self.EarliestTime = TM('000000') self.EarliestDateTime = DT('00010101000000') + self.FarthestFutureDate = DA('99991231') + self.FarthestFutureTime = TM('235959') + self.FarthestFutureDateTime = DT('99991231235959') def _is_empty_or_empty_items(self, attribute:DataElement)->bool: if attribute.is_empty: return True @@ -657,7 +656,7 @@ def _is_empty_or_empty_items(self, attribute:DataElement)->bool: v = item[tg] if not self._is_empty_or_empty_items(v): return False - return True + return False def _mark_tag_as_used(self, tg:Tag): if tg in self.SharedTags: @@ -682,14 +681,14 @@ def _copy_attrib_if_present(self, src_ds:Dataset, dest_ds:Dataset, if src_kw_or_tg in src_ds: elem = src_ds[src_kw_or_tg] if check_not_to_be_empty: - if _is_empty_or_empty_items(elem): + if self._is_empty_or_empty_items(elem): return new_elem = deepcopy(elem) if dest_kw_or_tg == src_kw_or_tg: dest_ds[dest_kw_or_tg] = new_elem else: new_elem1 = DataElement(dest_kw_or_tg, - dictionary_VR(dest_kw_or_tg), newelem.value) + dictionary_VR(dest_kw_or_tg), new_elem.value) dest_ds[dest_kw_or_tg] = new_elem1 # now mark the attrib as used/done to keep track of every one of it self._mark_tag_as_used(src_kw_or_tg) @@ -732,7 +731,9 @@ def _get_or_create_attribute(self, src:Dataset, kw:str, default)->DataElement: self._mark_tag_as_used(tg) return a - def _add_module(self, module_name: str, check_not_to_be_perframe=True): + def _add_module(self, module_name: str, excepted_attributes = [] + , check_not_to_be_perframe=True + , check_not_to_be_empty=False): # sf_sop_instance_uid = sf_datasets[0] # mf_sop_instance_uid = LEGACY_ENHANCED_SOP_CLASS_UID_MAP[sf_sop_instance_uid] # iod_name = _SOP_CLASS_UID_IOD_KEY_MAP[mf_sop_instance_uid] @@ -741,14 +742,35 @@ def _add_module(self, module_name: str, check_not_to_be_perframe=True): attribs = MODULE_ATTRIBUTE_MAP[module_name] ref_dataset = self.SingleFrameSet[0] for a in attribs: + if a in excepted_attributes: + continue if len(a['path']) == 0: - self._copy_attrib_if_present(ref_dataset, self.TargetDataset, a['keyword'], - check_not_to_be_perframe=check_not_to_be_perframe) + self._copy_attrib_if_present(ref_dataset, self.TargetDataset, a['keyword'] + , check_not_to_be_perframe=check_not_to_be_perframe + , check_not_to_be_empty=check_not_to_be_empty) @abstractmethod def AddModule(self): pass -class PixelImageModule(Abstract_MultiframeModuleAdder): +class ImagePixelModule(Abstract_MultiframeModuleAdder): + def __init__(self, sf_datasets:Sequence[Dataset] + , excluded_from_perframe_tags:Sequence[Tag] + , excluded_from_functional_tags:Sequence[Tag] + , perframe_tags: Sequence[Tag] + , shared_tags: Sequence[Tag] + , multi_frame_output:Dataset=None): + super().__init__( + sf_datasets + , excluded_from_perframe_tags + , excluded_from_functional_tags + , perframe_tags + , shared_tags + , multi_frame_output + ) + def AddModule(self): + self._add_module('image-pixel',excepted_attributes=[], check_not_to_be_empty=False, + check_not_to_be_perframe=False) # don't check the perframe set +class CompositeInstanceContex(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets:Sequence[Dataset] , excluded_from_perframe_tags:Sequence[Tag] , excluded_from_functional_tags:Sequence[Tag] @@ -763,8 +785,90 @@ def __init__(self, sf_datasets:Sequence[Dataset] , shared_tags , multi_frame_output ) + self._module_excepted_list = { + "patient" : [] + , "clinical-trial-subject" : [] + , "general-study" : + [ + "RequestingService" + ] + , "patient-study" : + [ + "ReasonForVisit" + , "ReasonForVisitCodeSequence" + ] + , "clinical-trial-study" : [] + , "general-series" : + [ + "SmallestPixelValueInSeries" + , "LargestPixelValueInSeries" + , "PerformedProcedureStepEndDate" + , "PerformedProcedureStepEndTime" + ] + , "clinical-trial-series" : [] + , "general-equipment" : + [ + "InstitutionalDepartmentTypeCodeSequence" + ] + , "frame-of-reference" : [] + , "sop-common" : + [ + "SpecificCharacterSet" + , "EncryptedAttributesSequence" + , "MACParametersSequence" + , "DigitalSignaturesSequence" + ] + , "general-image" : + [ + "ImageType" + , "AcquisitionDate" + , "AcquisitionDateTime" + , "AcquisitionTime" + , "AnatomicRegionSequence" + , "PrimaryAnatomicStructureSequence" + , "IrradiationEventUID" + , "AcquisitionNumber" + , "InstanceNumber" + , "PatientOrientation" + , "ImageLaterality" + , "ImagesInAcquisition" + , "ImageComments" + , "QualityControlImage" + , "BurnedInAnnotation" + , "RecognizableVisualFeatures" + , "LossyImageCompression" + , "LossyImageCompressionRatio" + , "LossyImageCompressionMethod" + , "RealWorldValueMappingSequence" + , "IconImageSequence" + , "PresentationLUTShape" + ] + , "sr-document-general" : + [ + "ContentDate" + , "ContentTime" + , "ReferencedInstanceSequence" + , "InstanceNumber" + , "VerifyingObserverSequence" + , "AuthorObserverSequence" + , "ParticipantSequence" + , "CustodialOrganizationSequence" + , "PredecessorDocumentsSequence" + , "CurrentRequestedProcedureEvidenceSequence" + , "PertinentOtherEvidenceSequence" + , "CompletionFlag" + , "CompletionFlagDescription" + , "VerificationFlag" + , "PreliminaryFlag" + , "IdenticalDocumentsSequence" + ] +} def AddModule(self): - self._add_module('image-pixel', False) # don't check the perframe set + for module_name, excpeted_a in self._module_excepted_list.items(): + self._add_module(module_name + , excepted_attributes = excpeted_a + , check_not_to_be_empty=False + , check_not_to_be_perframe=False) # don't check the perframe set class CommonCTMRPETImageDescriptionMacro(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets:Sequence[Dataset] , excluded_from_perframe_tags:Sequence[Tag] @@ -783,7 +887,7 @@ def __init__(self, sf_datasets:Sequence[Dataset] ) self.Modality = modality def _get_value_for_frame_type(self, attrib:DataElement): - if type(attrib) == DataElement: + if type(attrib) != DataElement: return None output = ['', '', '', ''] v = attrib.value @@ -793,41 +897,53 @@ def _get_value_for_frame_type(self, attrib:DataElement): output[2] = 'VOLUME' if l<3 else v[2] output[3] = 'NONE' return output - - def _add_module_to_functional_group(self, src_fg:Dataset, dest_fg:Dataset): + def _get_frame_type_seq_tag(self)->int: seq_kw = '{}{}FrameTypeSequence' if self.Modality == 'PET': seq_kw = seq_kw.format(self.Modality, '') else: seq_kw = seq_kw.format(self.Modality, 'Image') - seq_tg = tag_for_keyword(seq_kw) + return tag_for_keyword(seq_kw) + + + def _add_module_to_functional_group(self, src_fg:Dataset, dest_fg:Dataset, level): + FrameType_a = src_fg['ImageType'] + if level ==0: + FrameType_tg = tag_for_keyword('ImageType') + else: + FrameType_tg = tag_for_keyword('FrameType') + new_val = self._get_value_for_frame_type(FrameType_a) - inner_item = Dataset() - FrameType_tg = tag_for_keyword('FrameType') - inner_item[FrameType_tg] = DataElement(FrameType_tg, + dest_fg[FrameType_tg] = DataElement(FrameType_tg, FrameType_a.VR, new_val) - dest_fg[seq_tg] = DataElement(seq_tg, dictionary_VR(seq_tg), - [inner_item]) + element_generator = lambda kw, val:DataElement(tag_for_keyword(kw) + , dictionary_VR(tag_for_keyword(kw)), val) + dest_fg['PixelPresentation'] = element_generator('PixelPresentation', "MONOCHROME") + dest_fg['VolumetricProperties'] = element_generator('VolumetricProperties', "VOLUME") + dest_fg['VolumeBasedCalculationTechnique'] = element_generator('VolumeBasedCalculationTechnique', "NONE") def AddModule(self): im_type_tag = tag_for_keyword('ImageType') fm_type_tag = tag_for_keyword('FrameType') + + seq_tg=self._get_frame_type_seq_tag() if not im_type_tag in self.PerFrameTags: - im_type_a = self.SingleFrameSet[0][im_type_tag] - new_val = self._get_value_for_frame_type(im_type_a) - self.TargetDataset[im_type_tag] = DataElement(im_type_tag, - im_type_a.VR, new_val) + self._add_module_to_functional_group(self.SingleFrameSet[0],self.TargetDataset,0) #---------------------------- - item = self._get_shared_item() - self._add_module_to_functional_group(self.SingleFrameSet[0],item) + inner_item = Dataset() + self._add_module_to_functional_group(self.SingleFrameSet[0],inner_item,1) + item[seq_tg] = DataElement(seq_tg, 'SQ', [inner_item]) else: for i in range(0, len(self.SingleFrameSet)): item = self._get_perframe_item(i) - self._add_module_to_functional_group(self.SingleFrameSet[i],item) + inner_item = Dataset() + self._add_module_to_functional_group(self.SingleFrameSet[i],inner_item, 1) + item[seq_tg] = DataElement(seq_tg, 'SQ', [inner_item]) + class EnhancedCommonImageModule(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets:Sequence[Dataset] @@ -928,6 +1044,82 @@ def __init__(self, sf_datasets:Sequence[Dataset] def AddModule(self): pass #David's code doesn't hold anything for this module ... should ask him +class EnhancedPETImageModule(Abstract_MultiframeModuleAdder): + def __init__(self, sf_datasets:Sequence[Dataset] + , excluded_from_perframe_tags:Sequence[Tag] + , excluded_from_functional_tags:Sequence[Tag] + , perframe_tags: Sequence[Tag] + , shared_tags: Sequence[Tag] + , multi_frame_output:Dataset=None): + super().__init__( + sf_datasets + , excluded_from_perframe_tags + , excluded_from_functional_tags + , perframe_tags + , shared_tags + , multi_frame_output + ) + def AddModule(self): + pass + #David's code doesn't hold anything for this module ... should ask him +class EnhancedMRImageModule(Abstract_MultiframeModuleAdder): + def __init__(self, sf_datasets:Sequence[Dataset] + , excluded_from_perframe_tags:Sequence[Tag] + , excluded_from_functional_tags:Sequence[Tag] + , perframe_tags: Sequence[Tag] + , shared_tags: Sequence[Tag] + , multi_frame_output:Dataset=None): + super().__init__( + sf_datasets + , excluded_from_perframe_tags + , excluded_from_functional_tags + , perframe_tags + , shared_tags + , multi_frame_output + ) + + + def AddModule(self): + self._copy_attrib_if_present(self.SingleFrameSet[0] + , self.TargetDataset + , "ResonantNucleus" + , check_not_to_be_perframe = True + , check_not_to_be_empty = True) + + if not 'ResonantNucleus' in self.TargetDataset: + # derive from ImagedNucleus, which is the one used in legacy MR IOD, + # but does not have a standard list of defined terms ... (could check these :() + self._copy_attrib_if_present(self.SingleFrameSet[0] + , self.TargetDataset + , "ImagedNucleus" + , check_not_to_be_perframe = True + , check_not_to_be_empty = True) + + self._copy_attrib_if_present(self.SingleFrameSet[0] + , self.TargetDataset + , "KSpaceFiltering" + , check_not_to_be_perframe = True + , check_not_to_be_empty = True) + + self._copy_attrib_if_present(self.SingleFrameSet[0] + , self.TargetDataset + , "MagneticFieldStrength" + , check_not_to_be_perframe = True + , check_not_to_be_empty = True) + + self._copy_attrib_if_present(self.SingleFrameSet[0] + , self.TargetDataset + , "ApplicableSafetyStandardAgency" + , check_not_to_be_perframe = True + , check_not_to_be_empty = True) + + self._copy_attrib_if_present(self.SingleFrameSet[0] + , self.TargetDataset + , "ApplicableSafetyStandardDescription" + , check_not_to_be_perframe = True + , check_not_to_be_empty = True) + + class AcquisitionContextModule(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets:Sequence[Dataset] , excluded_from_perframe_tags:Sequence[Tag] @@ -968,20 +1160,28 @@ def _add_module_to_functional_group(self, src_fg:Dataset, dest_fg:Dataset): #Should check it out later. fa_seq_tg = tag_for_keyword('FrameAnatomySequence') item = Dataset() - self._copy_attrib_if_present(item, src_fg, 'AnatomicRegionSequence' - , check_not_to_be_perframe=False) - self._copy_attrib_if_present(item, src_fg, 'FrameLaterality' + self._copy_attrib_if_present(src_fg, item, 'AnatomicRegionSequence' + , check_not_to_be_perframe=False + , check_not_to_be_empty=False) + self._copy_attrib_if_present(src_fg, item, 'FrameLaterality' , check_not_to_be_perframe=False , check_not_to_be_empty=True) if not 'FrameLaterality' in item: - self._copy_attrib_if_present(item, src_fg, 'ImageLaterality' + self._copy_attrib_if_present(src_fg, item, 'ImageLaterality' , 'FrameLaterality' - , check_not_to_be_perframe=False) + , check_not_to_be_perframe=False + , check_not_to_be_empty=True) if not 'FrameLaterality' in item: - self._copy_attrib_if_present(item, src_fg, 'Laterality' + self._copy_attrib_if_present(src_fg, item, 'Laterality' , 'FrameLaterality' - , check_not_to_be_perframe=False) + , check_not_to_be_perframe=False + , check_not_to_be_empty=True) + if not 'FrameLaterality' in item: + FrameLaterality_a=self._get_or_create_attribute(src_fg, 'FrameLaterality', "U") + item['FrameLaterality'] = FrameLaterality_a + + FrameAnatomy_a = DataElement(fa_seq_tg, dictionary_VR(fa_seq_tg), [item] ) @@ -1113,51 +1313,7 @@ def AddModule(self): item = self._get_perframe_item(i) self._add_module_to_functional_group(self.SingleFrameSet[i],item) -class PlanePositionFunctionalGroup(Abstract_MultiframeModuleAdder): - def __init__(self, sf_datasets:Sequence[Dataset] - , excluded_from_perframe_tags:Sequence[Tag] - , excluded_from_functional_tags:Sequence[Tag] - , perframe_tags: Sequence[Tag] - , shared_tags: Sequence[Tag] - , multi_frame_output:Dataset=None): - super().__init__( - sf_datasets - , excluded_from_perframe_tags - , excluded_from_functional_tags - , perframe_tags - , shared_tags - , multi_frame_output - ) - def _contains_right_attributes(self, tags:dict) ->bool: - ImagePositionPatient_tg = tag_for_keyword('ImagePositionPatient' ) - - return ImagePositionPatient_tg in tags - def _add_module_to_functional_group(self, src_fg:Dataset, dest_fg:Dataset): - item = Dataset() - self._copy_attrib_if_present(src_fg - , item - ,'ImagePositionPatient' - , check_not_to_be_perframe=False - , check_not_to_be_empty=False) - PlanePositionSequence_kw = 'PlanePositionSequence' - PlanePositionSequence_tg = tag_for_keyword(PlanePositionSequence_kw) - seq = DataElement(PlanePositionSequence_tg - , dictionary_VR(PlanePositionSequence_tg) - , [item]) - dest_fg[PlanePositionSequence_tg] = seq - - def AddModule(self): - if (not self._contains_right_attributes(self.PerFrameTags) - and (self._contains_right_attributes(self.SharedTags) - or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) - ): - item = self._get_shared_item() - self._add_module_to_functional_group(self.SingleFrameSet[0],item) - elif self._contains_right_attributes(self.PerFrameTags): - for i in range(0, len(self.SingleFrameSet)): - item = self._get_perframe_item(i) - self._add_module_to_functional_group(self.SingleFrameSet[i],item) - + class PlaneOrientationFunctionalGroup(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets:Sequence[Dataset] , excluded_from_perframe_tags:Sequence[Tag] @@ -1517,11 +1673,13 @@ def _add_module_to_functional_group(self, src_fg:Dataset, dest_fg:Dataset): item = Dataset() self._copy_attrib_if_present(src_fg , item + ,'SOPClassUID' ,'ReferencedSOPClassUID' , check_not_to_be_perframe=False , check_not_to_be_empty=True) self._copy_attrib_if_present(src_fg , item + ,'SOPInstanceUID' ,'ReferencedSOPInstanceUID' , check_not_to_be_perframe=False , check_not_to_be_empty=True) @@ -1554,7 +1712,7 @@ def __init__(self, sf_datasets:Sequence[Dataset] , shared_tags , multi_frame_output ) - self.EarliestFrameAcquisitionDateTime = DT('99991231235959') + self.EarliestFrameAcquisitionDateTime = self.FarthestFutureDateTime def _contains_right_attributes(self, tags:dict) ->bool: AcquisitionDateTime_tg = tag_for_keyword('AcquisitionDateTime') AcquisitionDate_tg = tag_for_keyword('AcquisitionDate') @@ -1574,33 +1732,228 @@ def _add_module_to_functional_group(self, src_fg:Dataset, dest_fg:Dataset): AcquisitionTime_a = self._get_or_create_attribute(src_fg,'AcquisitionTime', self.EarliestTime) d = AcquisitionDate_a.value t = AcquisitionTime_a.value - AcquisitionDateTime_a.value = DT(d.original_string+t.original_string) + AcquisitionDateTime_a.value = DT(d.strftime('%Y%m%d')+t.strftime('%H%M%S')) if AcquisitionDateTime_a.value > self.EarliestDateTime: if AcquisitionDateTime_a.value < self.EarliestFrameAcquisitionDateTime: self.EarliestFrameAcquisitionDateTime = AcquisitionDateTime_a.value if not AcquisitionDateTime_is_perframe: if 'TriggerTime' in src_fg and not 'FrameReferenceDateTime' in src_fg: TriggerTime_a = self._get_or_create_attribute(src_fg,'TriggerTime', self.EarliestTime) - AcquisitionDateTime_a.value = DT( - AcquisitionDate_a.value.original_string+TriggerTime_a.value.original_string) + trigger_time_in_millisecond = int(TriggerTime_a.value) + if trigger_time_in_millisecond>0: + t_delta = timedelta(trigger_time_in_millisecond) + # this is so rediculous. I'm not able to cnvert the DT to datetime (cast to superclass) + d_t = datetime.combine(AcquisitionDateTime_a.value.date() , AcquisitionDateTime_a.value.time()) + d_t = d_t + t_delta + AcquisitionDateTime_a.value = DT(d_t.strftime('%Y%m%d%H%M%S')) item['AcquisitionDateTime'] =AcquisitionDateTime_a #--------------------------------- - self._copy_attrib_if_present(item, src_fg, "AcquisitionDuration", + self._copy_attrib_if_present(src_fg, item, "AcquisitionDuration", "FrameAcquisitionDuration", check_not_to_be_perframe=False, check_not_to_be_empty=True) - self._copy_attrib_if_present(item, src_fg, 'TemporalPositionIndex' + self._copy_attrib_if_present(src_fg, item, 'TemporalPositionIndex' , check_not_to_be_perframe=False, check_not_to_be_empty=True) - self._copy_attrib_if_present(item, src_fg, "ImageComments", + self._copy_attrib_if_present(src_fg, item, "ImageComments", "FrameComments", check_not_to_be_perframe=False, check_not_to_be_empty=True) #----------------------------------- seq_tg = tag_for_keyword('FrameContentSequence') dest_fg[seq_tg] = DataElement(seq_tg, dictionary_VR(seq_tg), [item]) - - + # Also we want to add the earliest frame acq date time to the multiframe: + def AddModule(self): for i in range(0, len(self.SingleFrameSet)): item = self._get_perframe_item(i) self._add_module_to_functional_group(self.SingleFrameSet[i],item) - + if self.EarliestFrameAcquisitionDateTime < self.FarthestFutureDateTime: + kw = 'AcquisitionDateTime' + self.TargetDataset[kw] = DataElement(tag_for_keyword(kw), + 'DT', self.EarliestFrameAcquisitionDateTime) +class PixelData(Abstract_MultiframeModuleAdder): + def __init__(self, sf_datasets:Sequence[Dataset] + , excluded_from_perframe_tags:Sequence[Tag] + , excluded_from_functional_tags:Sequence[Tag] + , perframe_tags: Sequence[Tag] + , shared_tags: Sequence[Tag] + , multi_frame_output:Dataset=None): + super().__init__( + sf_datasets + , excluded_from_perframe_tags + , excluded_from_functional_tags + , perframe_tags + , shared_tags + , multi_frame_output + ) + + self._byte_data = bytearray() + self._word_data = bytearray() + + def _is_other_byte_vr(self, vr:str) -> bool: + return vr[0]=='O' and vr[1]=='B' + def _is_other_word_vr(self, vr:str) -> bool: + return vr[0]=='O' and vr[1]=='W' + # def _contains_right_attributes(self, tags:dict) ->bool: + # ImagePositionPatient_tg = tag_for_keyword('ImagePositionPatient' ) + + # return ImagePositionPatient_tg in tags + + + def AddModule(self): + kw = 'NumberOfFrames' + tg = tag_for_keyword(kw) + FrameCount = len (self.SingleFrameSet) + self.TargetDataset[kw] = DataElement(tg, dictionary_VR(tg), FrameCount) + kw = "PixelData" + for i in range(0, len(self.SingleFrameSet)): + PixelData_a = self.SingleFrameSet[i][kw] + if self._is_other_byte_vr(PixelData_a.VR) : + if len(self._word_data) != 0: + raise TypeError( + 'Cannot mix OB and OW Pixel Data VR from different frames' + ) + self._byte_data.extend(PixelData_a.value) + elif self._is_other_word_vr(PixelData_a.VR) : + if len(self._byte_data) !=0: + raise TypeError( + 'Cannot mix OB and OW Pixel Data VR from different frames' + ) + self._word_data.extend(PixelData_a.value) + else: + raise TypeError( + 'Cannot mix OB and OW Pixel Data VR from different frames') + if len(self._byte_data)!=0: + MF_PixelData = DataElement(tag_for_keyword(kw), + 'OB', bytes(self._byte_data)) + elif len(self._word_data)!=0: + MF_PixelData = DataElement(tag_for_keyword(kw), + 'OW', bytes(self._word_data)) + self.TargetDataset[kw] = MF_PixelData + + +class ContentDateTime(Abstract_MultiframeModuleAdder): + def __init__(self, sf_datasets:Sequence[Dataset] + , excluded_from_perframe_tags:Sequence[Tag] + , excluded_from_functional_tags:Sequence[Tag] + , perframe_tags: Sequence[Tag] + , shared_tags: Sequence[Tag] + , multi_frame_output:Dataset=None): + super().__init__( + sf_datasets + , excluded_from_perframe_tags + , excluded_from_functional_tags + , perframe_tags + , shared_tags + , multi_frame_output + ) + self.EarliestContentDateTime = self.FarthestFutureDateTime + + def AddModule(self): + + for i in range(0, len(self.SingleFrameSet)): + src = self.SingleFrameSet[i] + ContentDate_a = self._get_or_create_attribute(src,'ContentDate', self.EarliestDate) + ContentTime_a = self._get_or_create_attribute(src,'ContentTime', self.EarliestTime) + d = ContentDate_a.value + t = ContentTime_a.value + value = DT(d.strftime('%Y%m%d')+t.strftime('%H%M%S')) + if self.EarliestContentDateTime > value: + self.EarliestContentDateTime = value + if self.EarliestContentDateTime < self.FarthestFutureDateTime: + n_d = DA(self.EarliestContentDateTime.date().strftime('%Y%m%d')) + n_t = TM(self.EarliestContentDateTime.time().strftime('%H%M%S')) + kw = 'ContentDate' + self.TargetDataset[kw] = DataElement( + tag_for_keyword(kw), 'DA', n_d) + kw = 'ContentTime' + self.TargetDataset[kw] = DataElement( + tag_for_keyword(kw), 'TM', n_t) + + + +class InstanceCreationDateTime(Abstract_MultiframeModuleAdder): + def __init__(self, sf_datasets:Sequence[Dataset] + , excluded_from_perframe_tags:Sequence[Tag] + , excluded_from_functional_tags:Sequence[Tag] + , perframe_tags: Sequence[Tag] + , shared_tags: Sequence[Tag] + , multi_frame_output:Dataset=None): + super().__init__( + sf_datasets + , excluded_from_perframe_tags + , excluded_from_functional_tags + , perframe_tags + , shared_tags + , multi_frame_output + ) + + def AddModule(self): + nnooww = datetime.now() + n_d = DA(nnooww.date().strftime('%Y%m%d')) + n_t = TM(nnooww.time().strftime('%H%M%S')) + kw = 'InstanceCreationDate' + self.TargetDataset[kw] = DataElement( + tag_for_keyword(kw), 'DA', n_d) + kw = 'InstanceCreationTime' + self.TargetDataset[kw] = DataElement( + tag_for_keyword(kw), 'TM', n_t) + +class ContributingEquipmentSequence(Abstract_MultiframeModuleAdder): + def __init__(self, sf_datasets:Sequence[Dataset] + , excluded_from_perframe_tags:Sequence[Tag] + , excluded_from_functional_tags:Sequence[Tag] + , perframe_tags: Sequence[Tag] + , shared_tags: Sequence[Tag] + , multi_frame_output:Dataset=None): + super().__init__( + sf_datasets + , excluded_from_perframe_tags + , excluded_from_functional_tags + , perframe_tags + , shared_tags + , multi_frame_output + ) + def _add_data_element_to_target(self, kw:str, value)->None: + tg = tag_for_keyword(kw) + self.TargetDataset[kw] = DataElement(tg, dictionary_VR(tg), value) + + + def AddModule(self): + ds = Dataset() + CodeValue_tg = tag_for_keyword('CodeValue') + CodeMeaning_tg = tag_for_keyword('CodeMeaning') + CodingSchemeDesignator_tg = tag_for_keyword('CodingSchemeDesignator') + PurposeOfReferenceCode_item = Dataset() + PurposeOfReferenceCode_item['CodeValue'] = DataElement( + CodeValue_tg + , dictionary_VR(CodeValue_tg) + ,'109106') + PurposeOfReferenceCode_item['CodeMeaning'] = DataElement( + CodeMeaning_tg + , dictionary_VR(CodeMeaning_tg) + ,'Enhanced Multi-frame Conversion Equipment') + PurposeOfReferenceCode_item['CodingSchemeDesignator'] = DataElement( + CodingSchemeDesignator_tg + , dictionary_VR(CodingSchemeDesignator_tg) + ,'DCM') + + PurposeOfReferenceCode_seq = DataElement( + tag_for_keyword('PurposeOfReferenceCodeSequence'), + 'SQ',[PurposeOfReferenceCode_item] + ) + self.TargetDataset['PurposeOfReferenceCodeSequence'] = PurposeOfReferenceCode_seq + + self._add_data_element_to_target("Manufacturer", 'HighDicom') + self._add_data_element_to_target("InstitutionName", 'HighDicom') + self._add_data_element_to_target("InstitutionalDepartmentName", 'Software Development' ) + self._add_data_element_to_target("InstitutionAddress", 'Radialogy Department, B&W Hospital, Boston, MA') + self._add_data_element_to_target("SoftwareVersions", '1.4') # get sw version + self._add_data_element_to_target("ContributionDescription", 'Legacy Enhanced Image created from Classic Images') + + + + + + + + @@ -1615,6 +1968,7 @@ def __init__( series_number: int, sop_instance_uid: str, instance_number: int, + sort_key=None, **kwargs: Any ) -> None: """ @@ -1643,7 +1997,8 @@ def __init__( raise ValueError('No DICOM data sets of provided.') sop_class_uid = LEGACY_ENHANCED_SOP_CLASS_UID_MAP[ref_ds.SOPClassUID] - + if sort_key == None: + sort_key = LegacyConvertedEnhanceImage.default_sort_key super().__init__( study_instance_uid=ref_ds.StudyInstanceUID, series_instance_uid=series_instance_uid, @@ -1742,6 +2097,33 @@ def __init__( self._find_per_frame_and_shared_tags() #---------------------------------------------------------------------- self.__build_blocks = [] + #===================================================== + new_ds = [] + for item in sorted(self._legacy_datasets, key=sort_key): + new_ds.append(item) + self.legacy_datasets = new_ds + if _SOP_CLASS_UID_IOD_KEY_MAP[sop_class_uid] == 'legacy-converted-enhanced-ct-image': + self.AddBuildBlocksForCT() + elif _SOP_CLASS_UID_IOD_KEY_MAP[sop_class_uid] == 'legacy-converted-enhanced-mr-image': + self.AddBuildBlocksForMR() + elif _SOP_CLASS_UID_IOD_KEY_MAP[sop_class_uid] == 'legacy-converted-enhanced-pet-image': + self.AddBuildBlocksForPET() + + + + + def default_sort_key(x:Dataset)->tuple: + out = tuple() + if 'SeriesNumber' in x: + out += (x['SeriesNumber'].value,) + if 'InstanceNumber' in x: + out += (x['InstanceNumber'].value,) + if 'SOPInstanceUID' in x: + out += (x['SOPInstanceUID'].value,) + return out + + + def _find_per_frame_and_shared_tags(self): rough_shared = {} sfs = self._legacy_datasets @@ -1839,33 +2221,22 @@ def AddNewBuildBlock(self, element:Abstract_MultiframeModuleAdder): if not isinstance(element, Abstract_MultiframeModuleAdder) : raise ValueError('Build block must be an instance of Abstract_MultiframeModuleAdder') self.__build_blocks.append(element) - def AddBuildBlocksForCT(self): - Blocks= [PixelImageModule(self._legacy_datasets - , self.ExcludedFromPerFrameTags - , self.ExcludedFromFunctionalGroupsTags - , self.PerFrameTags - , self.SharedTags - , self) - ,CommonCTMRPETImageDescriptionMacro(self._legacy_datasets - , self.ExcludedFromPerFrameTags - , self.ExcludedFromFunctionalGroupsTags - , self.PerFrameTags - , self.SharedTags - , self - , 'CT') - ,EnhancedCommonImageModule(self._legacy_datasets + def ClearBuildBlocks(self): + self.__build_blocks = [] + def AddCommonCT_PET_MR_BuildBlocks(self): + Blocks= [ImagePixelModule(self._legacy_datasets , self.ExcludedFromPerFrameTags , self.ExcludedFromFunctionalGroupsTags , self.PerFrameTags , self.SharedTags , self) - ,ContrastBolusModule(self._legacy_datasets + ,CompositeInstanceContex(self._legacy_datasets , self.ExcludedFromPerFrameTags , self.ExcludedFromFunctionalGroupsTags , self.PerFrameTags , self.SharedTags , self) - ,EnhancedCTImageModule(self._legacy_datasets + ,EnhancedCommonImageModule(self._legacy_datasets , self.ExcludedFromPerFrameTags , self.ExcludedFromFunctionalGroupsTags , self.PerFrameTags @@ -1943,9 +2314,110 @@ def AddBuildBlocksForCT(self): , self.PerFrameTags , self.SharedTags , self) + ,PixelData(self._legacy_datasets + , self.ExcludedFromPerFrameTags + , self.ExcludedFromFunctionalGroupsTags + , self.PerFrameTags + , self.SharedTags + , self) + ,ContentDateTime(self._legacy_datasets + , self.ExcludedFromPerFrameTags + , self.ExcludedFromFunctionalGroupsTags + , self.PerFrameTags + , self.SharedTags + , self) + ,InstanceCreationDateTime(self._legacy_datasets + , self.ExcludedFromPerFrameTags + , self.ExcludedFromFunctionalGroupsTags + , self.PerFrameTags + , self.SharedTags + , self) + ,ContributingEquipmentSequence(self._legacy_datasets + , self.ExcludedFromPerFrameTags + , self.ExcludedFromFunctionalGroupsTags + , self.PerFrameTags + , self.SharedTags + , self) ] for b in Blocks: self.AddNewBuildBlock(b) + + def AddCTSpecificBuildBlocks(self): + Blocks= [CommonCTMRPETImageDescriptionMacro(self._legacy_datasets + , self.ExcludedFromPerFrameTags + , self.ExcludedFromFunctionalGroupsTags + , self.PerFrameTags + , self.SharedTags + , self + , 'CT') + ,EnhancedCTImageModule(self._legacy_datasets + , self.ExcludedFromPerFrameTags + , self.ExcludedFromFunctionalGroupsTags + , self.PerFrameTags + , self.SharedTags + , self) + ,ContrastBolusModule(self._legacy_datasets + , self.ExcludedFromPerFrameTags + , self.ExcludedFromFunctionalGroupsTags + , self.PerFrameTags + , self.SharedTags + , self) + ] + for b in Blocks: + self.AddNewBuildBlock(b) + def AddMRSpecificBuildBlocks(self): + Blocks= [CommonCTMRPETImageDescriptionMacro(self._legacy_datasets + , self.ExcludedFromPerFrameTags + , self.ExcludedFromFunctionalGroupsTags + , self.PerFrameTags + , self.SharedTags + , self + , 'MR') + ,EnhancedMRImageModule(self._legacy_datasets + , self.ExcludedFromPerFrameTags + , self.ExcludedFromFunctionalGroupsTags + , self.PerFrameTags + , self.SharedTags + , self) + ,ContrastBolusModule(self._legacy_datasets + , self.ExcludedFromPerFrameTags + , self.ExcludedFromFunctionalGroupsTags + , self.PerFrameTags + , self.SharedTags + , self) + ] + for b in Blocks: + self.AddNewBuildBlock(b) + def AddPETSpecificBuildBlocks(self): + Blocks= [CommonCTMRPETImageDescriptionMacro(self._legacy_datasets + , self.ExcludedFromPerFrameTags + , self.ExcludedFromFunctionalGroupsTags + , self.PerFrameTags + , self.SharedTags + , self + , 'PET') + ,EnhancedPETImageModule(self._legacy_datasets + , self.ExcludedFromPerFrameTags + , self.ExcludedFromFunctionalGroupsTags + , self.PerFrameTags + , self.SharedTags + , self) + ] + for b in Blocks: + self.AddNewBuildBlock(b) + + def AddBuildBlocksForCT(self): + self.ClearBuildBlocks() + self.AddCommonCT_PET_MR_BuildBlocks() + self.AddCTSpecificBuildBlocks() + def AddBuildBlocksForMR(self): + self.ClearBuildBlocks() + self.AddCommonCT_PET_MR_BuildBlocks() + self.AddMRSpecificBuildBlocks() + def AddBuildBlocksForPET(self): + self.ClearBuildBlocks() + self.AddCommonCT_PET_MR_BuildBlocks() + self.AddPETSpecificBuildBlocks() def BuildMultiFrame(self): for builder in self.__build_blocks: From 5af4d97211da7b0c519deb8c2d12d9829cf9f413 Mon Sep 17 00:00:00 2001 From: Afshin Date: Wed, 1 Jul 2020 19:41:18 -0400 Subject: [PATCH 03/44] Flake8 verified code --- src/highdicom/legacy/sop.py | 2559 ++++++++++++++++++----------------- 1 file changed, 1302 insertions(+), 1257 deletions(-) diff --git a/src/highdicom/legacy/sop.py b/src/highdicom/legacy/sop.py index 2fc79963..aa8728b8 100644 --- a/src/highdicom/legacy/sop.py +++ b/src/highdicom/legacy/sop.py @@ -1,9 +1,7 @@ """Module for SOP Classes of Legacy Converted Enhanced Image IODs.""" - import logging from collections import defaultdict from typing import Any, Dict, List, Optional, Sequence, Union - import numpy as np from pydicom.datadict import tag_for_keyword, dictionary_VR from pydicom.dataset import Dataset @@ -14,18 +12,12 @@ from datetime import date, datetime, time, timedelta from pydicom.valuerep import DT, DA, TM from copy import deepcopy - from pydicom.uid import UID - from highdicom.base import SOPClass from highdicom.legacy import SOP_CLASS_UIDS from highdicom._iods import IOD_MODULE_MAP from highdicom._modules import MODULE_ATTRIBUTE_MAP - - logger = logging.getLogger(__name__) - - LEGACY_ENHANCED_SOP_CLASS_UID_MAP = { # CT Image Storage '1.2.840.10008.5.1.4.1.1.2': '1.2.840.10008.5.1.4.1.1.2.2', @@ -34,11 +26,9 @@ # PET Image Storage '1.2.840.10008.5.1.4.1.1.128': '1.2.840.10008.5.1.4.1.1.128.1', } - - _SOP_CLASS_UID_IOD_KEY_MAP = { - '1.2.840.10008.5.1.4.1.1.2.2': 'legacy-converted-enhanced-ct-image', - '1.2.840.10008.5.1.4.1.1.4.4': 'legacy-converted-enhanced-mr-image', + '1.2.840.10008.5.1.4.1.1.2.2': 'legacy-converted-enhanced-ct-image', + '1.2.840.10008.5.1.4.1.1.4.4': 'legacy-converted-enhanced-mr-image', '1.2.840.10008.5.1.4.1.1.128.1': 'legacy-converted-enhanced-pet-image', } @@ -46,38 +36,31 @@ def _convert_legacy_to_enhanced( sf_datasets: Sequence[Dataset], mf_dataset: Optional[Dataset] = None - ) -> Dataset: +) -> Dataset: """Converts one or more MR, CT or PET Image instances into one Legacy Converted Enhanced MR/CT/PET Image instance by copying information from `sf_datasets` into `mf_dataset`. - Parameters ---------- sf_datasets: Sequence[pydicom.dataset.Dataset] DICOM data sets of single-frame legacy image instances mf_dataset: pydicom.dataset.Dataset, optional DICOM data set of multi-frame enhanced image instance - Returns ------- pydicom.dataset.Dataset DICOM data set of enhanced multi-frame image instance - Note ---- Frames will be included into the Pixel Data element in the order in which instances are provided via `sf_datasets`. - """ try: ref_ds = sf_datasets[0] except IndexError: raise ValueError('No data sets of single-frame legacy images provided.') - if mf_dataset is None: mf_dataset = Dataset() - - transfer_syntaxes = set() series = set() studies = set() @@ -89,25 +72,18 @@ def _convert_legacy_to_enhanced( modalities.add(ds.Modality) if len(series) > 1: raise ValueError( - 'All instances must belong to the same series.' - ) + 'All instances must belong to the same series.') if len(studies) > 1: raise ValueError( - 'All instances must belong to the same study.' - ) + 'All instances must belong to the same study.') if len(modalities) > 1: raise ValueError( - 'All instances must have the same modality.' - ) + 'All instances must have the same modality.') if len(transfer_syntaxes) > 1: raise ValueError( - 'All instances must have the same transfer syntaxes.' - ) - + 'All instances must have the same transfer syntaxes.') sop_class_uid = LEGACY_ENHANCED_SOP_CLASS_UID_MAP[ref_ds.SOPClassUID] - mf_dataset.NumberOfFrames = len(sf_datasets) - # We will ignore some attributes, because they will get assigned new # values in the legacy converted enhanced image instance. ignored_attributes = { @@ -118,7 +94,6 @@ def _convert_legacy_to_enhanced( tag_for_keyword('PixelData'), tag_for_keyword('SeriesInstanceUID'), } - mf_attributes = [] iod_key = _SOP_CLASS_UID_IOD_KEY_MAP[sop_class_uid] for module_item in IOD_MODULE_MAP[iod_key]: @@ -131,7 +106,6 @@ def _convert_legacy_to_enhanced( if tag in ignored_attributes: continue mf_attributes.append(tag) - # Assign attributes that are not defined at the root level of the # Lecacy Converted Enhanced MR/CT/PET Image IOD to the appropriate # sequence attributes of the SharedFunctinoalGroupsSequence or @@ -163,41 +137,34 @@ def _convert_legacy_to_enhanced( tag_for_keyword('RescaleSlope'), tag_for_keyword('RescaleType'), } - if ref_ds.ImageType[0] == 'ORIGINAL': mf_dataset.VolumeBasedCalculationTechnique = 'NONE' else: mf_dataset.VolumeBasedCalculationTechnique = 'MIXED' - pixel_representation = sf_datasets[0].PixelRepresentation volumetric_properties = 'VOLUME' unique_image_types = set() unassigned_dataelements: Dict[str, List[Dataset]] = defaultdict(list) - # Per-Frame Functional Groups perframe_items = [] for i, ds in enumerate(sf_datasets): perframe_item = Dataset() - # Frame Content (M) frame_content_item = Dataset() if 'AcquisitionDate' in ds and 'AcquisitionTime' in ds: frame_content_item.FrameAcquisitionDateTime = '{}{}'.format( ds.AcquisitionDate, - ds.AcquisitionTime - ) + ds.AcquisitionTime) frame_content_item.FrameAcquisitionNumber = ds.InstanceNumber perframe_item.FrameContentSequence = [ frame_content_item, ] - # Plane Position (Patient) (M) plane_position_item = Dataset() plane_position_item.ImagePositionPatient = ds.ImagePositionPatient perframe_item.PlanePositionSequence = [ plane_position_item, ] - frame_type = list(ds.ImageType) if len(frame_type) < 4: if frame_type[0] == 'ORIGINAL': @@ -214,19 +181,16 @@ def _convert_legacy_to_enhanced( frame_type_item.FrameVolumeBasedCalculationTechnique = 'NONE' else: frame_type_item.FrameVolumeBasedCalculationTechnique = 'MIXED' - if sop_class_uid == '1.2.840.10008.5.1.4.1.1.4.4': # MR Image Frame Type (M) perframe_item.MRImageFrameTypeSequence = [ frame_type_item, ] - elif sop_class_uid == '1.2.840.10008.5.1.4.1.1.2.2': # CT Image Frame Type (M) perframe_item.CTImageFrameTypeSequence = [ frame_type_item, ] - # CT Pixel Value Transformation (M) pixel_val_transform_item = Dataset() pixel_val_transform_item.RescaleIntercept = ds.RescaleIntercept @@ -238,13 +202,11 @@ def _convert_legacy_to_enhanced( perframe_item.PixelValueTransformationSequence = [ pixel_val_transform_item, ] - elif sop_class_uid == '1.2.840.10008.5.1.4.1.1.128.1': # PET Image Frame Type (M) perframe_item.PETImageFrameTypeSequence = [ frame_type_item, ] - # Frame VOI LUT (U) try: frame_voi_lut_item = Dataset() @@ -255,20 +217,17 @@ def _convert_legacy_to_enhanced( ] except AttributeError: pass - # Referenced Image (C) try: perframe_item.ReferencedImageSequence = \ ds.ReferencedImageSequence except AttributeError: pass - # Derivation Image (C) try: perframe_item.SourceImageSequence = ds.SourceImageSequence except AttributeError: pass - # Frame Anatomy (C) try: frame_anatomy_item = Dataset() @@ -278,7 +237,6 @@ def _convert_legacy_to_enhanced( ] except AttributeError: pass - # Image Frame Conversion Source (C) conv_src_attr_item = Dataset() conv_src_attr_item.ReferencedSOPClassUID = ds.SOPClassUID @@ -286,7 +244,6 @@ def _convert_legacy_to_enhanced( perframe_item.ConversionSourceAttributesSequence = [ conv_src_attr_item, ] - # Irradiation Event Identification (C) - CT/PET only try: irradiation_event_id_item = Dataset() @@ -297,7 +254,6 @@ def _convert_legacy_to_enhanced( ] except AttributeError: pass - # Temporal Position (U) try: temporal_position_item = Dataset() @@ -308,20 +264,13 @@ def _convert_legacy_to_enhanced( ] except AttributeError: pass - - # Cardiac Synchronization (U # TODO: http:#dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.7.6.16.2.html#sect_C.7.6.16.2.7 # noqa - - # Contrast/Bolus Usage (U) - MR/CT onl # TODO: http:#dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.7.6.16.2.html#sect_C.7.6.16.2.12 # noqa - - # Respiratory Synchronization (U # TODO: http:#dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.7.6.16.2.html#sect_C.7.6.16.2.17 # noqa - - # Real World Value Mapping (U) - PET onl # TODO: http:#dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.7.6.16.2.html#sect_C.7.6.16.2.11 # noqa - + # Cardiac Synchronization (U # TODO: http: # dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.7.6.16.2.html# sect_C.7.6.16.2.7 # noqa + # Contrast/Bolus Usage (U) - MR/CT onl # TODO: http: # dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.7.6.16.2.html# sect_C.7.6.16.2.12 # noqa + # Respiratory Synchronization (U # TODO: http: # dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.7.6.16.2.html# sect_C.7.6.16.2.17 # noqa + # Real World Value Mapping (U) - PET onl # TODO: http: # dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.7.6.16.2.html# sect_C.7.6.16.2.11 # noqa perframe_items.append(perframe_item) - # All other attributes that are not assigned to functional groups. - - for tag, da in ds.items(): + for tag, da in ds.items(): if tag in assigned_attributes: continue elif tag in mf_attributes: @@ -329,7 +278,6 @@ def _convert_legacy_to_enhanced( else: if tag not in ignored_attributes: unassigned_dataelements[tag].append(da) - # All remaining unassigned attributes will be collected in either the # UnassignedSharedConvertedAttributesSequence or the # UnassignedPerFrameConvertedAttributesSequence, depending on whether @@ -347,16 +295,13 @@ def _convert_legacy_to_enhanced( else: for i, da in enumerate(dataelements): unassigned_perframe_ca_items[i].add(da) - mf_dataset.ImageType = list(list(unique_image_types)[0]) if len(unique_image_types) > 1: mf_dataset.ImageType[2] = 'MIXED' mf_dataset.PixelRepresentation = pixel_representation mf_dataset.VolumetricProperties = volumetric_properties - # Shared Functional Groups shared_item = Dataset() - # Pixel Measures (M) pixel_measures_item = Dataset() pixel_measures_item.PixelSpacing = ref_ds.PixelSpacing @@ -369,7 +314,6 @@ def _convert_legacy_to_enhanced( shared_item.PixelMeasuresSequence = [ pixel_measures_item, ] - # Plane Orientation (Patient) (M) plane_orientation_item = Dataset() plane_orientation_item.ImageOrientationPatient = \ @@ -377,37 +321,30 @@ def _convert_legacy_to_enhanced( shared_item.PlaneOrientationSequence = [ plane_orientation_item, ] - shared_item.UnassignedSharedConvertedAttributesSequence = [ unassigned_shared_ca_item, ] mf_dataset.SharedFunctionalGroupsSequence = [ shared_item, ] - for i, ca_item in enumerate(unassigned_perframe_ca_items): perframe_items[i].UnassignedPerFrameConvertedAttributesSequence = [ ca_item, ] mf_dataset.PerFrameFunctionalGroupsSequence = perframe_items - mf_dataset.AcquisitionContextSequence = [] - # TODO: Encapsulated Pixel Data with compressed frame items. - # Create the Pixel Data element of the mulit-frame image instance using # native encoding (simply concatenating pixels of individual frames) - # Sometimes there may be numpy types such as ">i2". The (* 1) hack + # Sometimes there may be numpy types such as " > i2". The (* 1) hack # ensures that pixel values have the correct integer type. mf_dataset.PixelData = b''.join([ (ds.pixel_array * 1).data for ds in sf_datasets ]) - return mf_dataset class LegacyConvertedEnhancedMRImage(SOPClass): - """SOP class for Legacy Converted Enhanced MR Image instances.""" def __init__( @@ -417,8 +354,7 @@ def __init__( series_number: int, sop_instance_uid: str, instance_number: int, - **kwargs: Any - ) -> None: + **kwargs: Any) -> None: """ Parameters ---------- @@ -436,25 +372,18 @@ def __init__( **kwargs: Any, optional Additional keyword arguments that will be passed to the constructor of `highdicom.base.SOPClass` - """ - try: ref_ds = legacy_datasets[0] except IndexError: raise ValueError('No DICOM data sets of provided.') - if ref_ds.Modality != 'MR': raise ValueError( - 'Wrong modality for conversion of legacy MR images.' - ) + 'Wrong modality for conversion of legacy MR images.') if ref_ds.SOPClassUID != '1.2.840.10008.5.1.4.1.1.4': raise ValueError( - 'Wrong SOP class for conversion of legacy MR images.' - ) - + 'Wrong SOP class for conversion of legacy MR images.') sop_class_uid = LEGACY_ENHANCED_SOP_CLASS_UID_MAP[ref_ds.SOPClassUID] - super().__init__( study_instance_uid=ref_ds.StudyInstanceUID, series_instance_uid=series_instance_uid, @@ -474,14 +403,12 @@ def __init__( study_date=ref_ds.StudyDate, study_time=ref_ds.StudyTime, referring_physician_name=ref_ds.ReferringPhysicianName, - **kwargs - ) + **kwargs) _convert_legacy_to_enhanced(legacy_datasets, self) self.PresentationLUTShape = 'IDENTITY' class LegacyConvertedEnhancedCTImage(SOPClass): - """SOP class for Legacy Converted Enhanced CT Image instances.""" def __init__( @@ -491,8 +418,7 @@ def __init__( series_number: int, sop_instance_uid: str, instance_number: int, - **kwargs: Any - ) -> None: + **kwargs: Any) -> None: """ Parameters ---------- @@ -510,25 +436,18 @@ def __init__( **kwargs: Any, optional Additional keyword arguments that will be passed to the constructor of `highdicom.base.SOPClass` - """ - try: ref_ds = legacy_datasets[0] except IndexError: raise ValueError('No DICOM data sets of provided.') - if ref_ds.Modality != 'CT': raise ValueError( - 'Wrong modality for conversion of legacy CT images.' - ) + 'Wrong modality for conversion of legacy CT images.') if ref_ds.SOPClassUID != '1.2.840.10008.5.1.4.1.1.2': raise ValueError( - 'Wrong SOP class for conversion of legacy CT images.' - ) - + 'Wrong SOP class for conversion of legacy CT images.') sop_class_uid = LEGACY_ENHANCED_SOP_CLASS_UID_MAP[ref_ds.SOPClassUID] - super().__init__( study_instance_uid=ref_ds.StudyInstanceUID, series_instance_uid=series_instance_uid, @@ -548,13 +467,11 @@ def __init__( study_date=ref_ds.StudyDate, study_time=ref_ds.StudyTime, referring_physician_name=ref_ds.ReferringPhysicianName, - **kwargs - ) + **kwargs) _convert_legacy_to_enhanced(legacy_datasets, self) class LegacyConvertedEnhancedPETImage(SOPClass): - """SOP class for Legacy Converted Enhanced PET Image instances.""" def __init__( @@ -564,8 +481,7 @@ def __init__( series_number: int, sop_instance_uid: str, instance_number: int, - **kwargs: Any - ) -> None: + **kwargs: Any) -> None: """ Parameters ---------- @@ -583,25 +499,18 @@ def __init__( **kwargs: Any, optional Additional keyword arguments that will be passed to the constructor of `highdicom.base.SOPClass` - """ - try: ref_ds = legacy_datasets[0] except IndexError: raise ValueError('No DICOM data sets of provided.') - if ref_ds.Modality != 'PT': raise ValueError( - 'Wrong modality for conversion of legacy PET images.' - ) + 'Wrong modality for conversion of legacy PET images.') if ref_ds.SOPClassUID != '1.2.840.10008.5.1.4.1.1.128': raise ValueError( - 'Wrong SOP class for conversion of legacy PET images.' - ) - + 'Wrong SOP class for conversion of legacy PET images.') sop_class_uid = LEGACY_ENHANCED_SOP_CLASS_UID_MAP[ref_ds.SOPClassUID] - super().__init__( study_instance_uid=ref_ds.StudyInstanceUID, series_instance_uid=series_instance_uid, @@ -621,21 +530,24 @@ def __init__( study_date=ref_ds.StudyDate, study_time=ref_ds.StudyTime, referring_physician_name=ref_ds.ReferringPhysicianName, - **kwargs - ) + **kwargs) _convert_legacy_to_enhanced(legacy_datasets, self) + + from abc import ABC, abstractmethod + + class Abstract_MultiframeModuleAdder(ABC): - - def __init__(self, sf_datasets:Sequence[Dataset] - , excluded_from_perframe_tags:Sequence[Tag] - , excluded_from_functional_tags:Sequence[Tag] - , perframe_tags: Sequence[Tag] - , shared_tags: Sequence[Tag] - , multi_frame_output:Dataset=None ): - self.ExcludedFromPerFrameTags = excluded_from_perframe_tags + + def __init__(self, sf_datasets: Sequence[Dataset], + excluded_from_perframe_tags: Sequence[Tag], + excluded_from_functional_tags: Sequence[Tag], + perframe_tags: Sequence[Tag], + shared_tags: Sequence[Tag], + multi_frame_output: Dataset = None): + self.ExcludedFromPerFrameTags = excluded_from_perframe_tags self.ExcludedFromFunctionalGroupsTags = excluded_from_functional_tags - self.PerFrameTags = perframe_tags + self.PerFrameTags = perframe_tags self.SharedTags = shared_tags self.TargetDataset = multi_frame_output self.SingleFrameSet = sf_datasets @@ -645,7 +557,8 @@ def __init__(self, sf_datasets:Sequence[Dataset] self.FarthestFutureDate = DA('99991231') self.FarthestFutureTime = TM('235959') self.FarthestFutureDateTime = DT('99991231235959') - def _is_empty_or_empty_items(self, attribute:DataElement)->bool: + + def _is_empty_or_empty_items(self, attribute: DataElement) -> bool: if attribute.is_empty: return True if type(attribute.value) == Sequence: @@ -657,85 +570,91 @@ def _is_empty_or_empty_items(self, attribute:DataElement)->bool: if not self._is_empty_or_empty_items(v): return False return False - - def _mark_tag_as_used(self, tg:Tag): + + def _mark_tag_as_used(self, tg: Tag): if tg in self.SharedTags: - self.SharedTags[tg] = True + self.SharedTags[tg] = True elif tg in self.ExcludedFromPerFrameTags: self.ExcludedFromPerFrameTags[tg] = True elif tg in self.PerFrameTags: self.PerFrameTags[tg] = True - def _copy_attrib_if_present(self, src_ds:Dataset, dest_ds:Dataset, - src_kw_or_tg:str, dest_kw_or_tg:str=None, - check_not_to_be_perframe=True, check_not_to_be_empty=False): + def _copy_attrib_if_present(self, src_ds: Dataset, dest_ds: Dataset, + src_kw_or_tg: str, dest_kw_or_tg: str = None, + check_not_to_be_perframe=True, + check_not_to_be_empty=False): if type(src_kw_or_tg) == str: src_kw_or_tg = tag_for_keyword(src_kw_or_tg) - if dest_kw_or_tg == None: + if dest_kw_or_tg is None: dest_kw_or_tg = src_kw_or_tg elif type(dest_kw_or_tg) == str: dest_kw_or_tg = tag_for_keyword(dest_kw_or_tg) if check_not_to_be_perframe: - if src_kw_or_tg in self.PerFrameTags: - return + if src_kw_or_tg in self.PerFrameTags: + return if src_kw_or_tg in src_ds: elem = src_ds[src_kw_or_tg] if check_not_to_be_empty: if self._is_empty_or_empty_items(elem): - return + return new_elem = deepcopy(elem) if dest_kw_or_tg == src_kw_or_tg: dest_ds[dest_kw_or_tg] = new_elem else: new_elem1 = DataElement(dest_kw_or_tg, - dictionary_VR(dest_kw_or_tg), new_elem.value) + dictionary_VR(dest_kw_or_tg), + new_elem.value) dest_ds[dest_kw_or_tg] = new_elem1 # now mark the attrib as used/done to keep track of every one of it self._mark_tag_as_used(src_kw_or_tg) - - - - def _get_perframe_item(self, index:int)->Dataset: + def _get_perframe_item(self, index: int) -> Dataset: if index > len(self.SingleFrameSet): return None pf_kw = 'PerFrameFunctionalGroupsSequence' pf_tg = tag_for_keyword(pf_kw) - if not pf_kw in self.TargetDataset: + if pf_kw not in self.TargetDataset: seq = [] for i in range(0, len(self.SingleFrameSet)): seq.append(Dataset()) - self.TargetDataset[pf_tg] = DataElement(pf_tg, 'SQ', DicomSequence(seq)) + self.TargetDataset[pf_tg] = DataElement(pf_tg, + 'SQ', + DicomSequence(seq)) return self.TargetDataset[pf_tg].value[index] - def _get_shared_item(self)->Dataset: + + def _get_shared_item(self) -> Dataset: sf_kw = 'SharedFunctionalGroupsSequence' sf_tg = tag_for_keyword(sf_kw) - if not sf_kw in self.TargetDataset: + if sf_kw not in self.TargetDataset: seq = [Dataset()] - self.TargetDataset[sf_tg] = DataElement(sf_tg, 'SQ', DicomSequence(seq)) + self.TargetDataset[sf_tg] = DataElement(sf_tg, + 'SQ', + DicomSequence(seq)) return self.TargetDataset[sf_tg].value[0] - def _get_or_create_attribute(self, src:Dataset, kw:str, default)->DataElement: + + def _get_or_create_attribute( + self, src: Dataset, kw: str, default) -> DataElement: tg = tag_for_keyword(kw) if kw in src: a = deepcopy(src[kw]) else: - a = DataElement(tg, - dictionary_VR(tg), default ) + a = DataElement(tg, dictionary_VR(tg), default) from pydicom.valuerep import DT, TM, DA - if a.VR == 'DA' and type(a.value)==str: + if a.VR == 'DA' and type(a.value) == str: a.value = DA(a.value) - if a.VR == 'DT' and type(a.value)==str: + if a.VR == 'DT' and type(a.value) == str: a.value = DT(a.value) - if a.VR == 'TM' and type(a.value)==str: + if a.VR == 'TM' and type(a.value) == str: a.value = TM(a.value) - self._mark_tag_as_used(tg) return a - def _add_module(self, module_name: str, excepted_attributes = [] - , check_not_to_be_perframe=True - , check_not_to_be_empty=False): + + def _add_module(self, module_name: str, excepted_attributes=[], + check_not_to_be_perframe=True, + check_not_to_be_empty=False): # sf_sop_instance_uid = sf_datasets[0] - # mf_sop_instance_uid = LEGACY_ENHANCED_SOP_CLASS_UID_MAP[sf_sop_instance_uid] + # mf_sop_instance_uid = LEGACY_ENHANCED_SOP_CLASS_UID_MAP[ + # sf_sop_instance_uid] # iod_name = _SOP_CLASS_UID_IOD_KEY_MAP[mf_sop_instance_uid] # modules = IOD_MODULE_MAP[iod_name] from copy import deepcopy @@ -745,159 +664,185 @@ def _add_module(self, module_name: str, excepted_attributes = [] if a in excepted_attributes: continue if len(a['path']) == 0: - self._copy_attrib_if_present(ref_dataset, self.TargetDataset, a['keyword'] - , check_not_to_be_perframe=check_not_to_be_perframe - , check_not_to_be_empty=check_not_to_be_empty) + self._copy_attrib_if_present( + ref_dataset, self.TargetDataset, a['keyword'], + check_not_to_be_perframe=check_not_to_be_perframe, + check_not_to_be_empty=check_not_to_be_empty) @abstractmethod def AddModule(self): pass + + class ImagePixelModule(Abstract_MultiframeModuleAdder): - def __init__(self, sf_datasets:Sequence[Dataset] - , excluded_from_perframe_tags:Sequence[Tag] - , excluded_from_functional_tags:Sequence[Tag] - , perframe_tags: Sequence[Tag] - , shared_tags: Sequence[Tag] - , multi_frame_output:Dataset=None): + + def __init__(self, sf_datasets: Sequence[Dataset], + excluded_from_perframe_tags: Sequence[Tag], + excluded_from_functional_tags: Sequence[Tag], + perframe_tags: Sequence[Tag], + shared_tags: Sequence[Tag], + multi_frame_output: Dataset = None): super().__init__( - sf_datasets - , excluded_from_perframe_tags - , excluded_from_functional_tags - , perframe_tags - , shared_tags - , multi_frame_output - ) + sf_datasets, + excluded_from_perframe_tags, + excluded_from_functional_tags, + perframe_tags, + shared_tags, + multi_frame_output) + def AddModule(self): - self._add_module('image-pixel',excepted_attributes=[], check_not_to_be_empty=False, - check_not_to_be_perframe=False) # don't check the perframe set + module_and_excepted_at = { + "image-pixel": + [ + "ColorSpace", + "PixelDataProviderURL", + "ExtendedOffsetTable", + "ExtendedOffsetTableLengths", + "PixelData" + ] + } + for module, except_at in module_and_excepted_at.items(): + self._add_module( + module, + excepted_attributes=except_at, + check_not_to_be_empty=False, + check_not_to_be_perframe=False) # don't check the perframe set + + class CompositeInstanceContex(Abstract_MultiframeModuleAdder): - def __init__(self, sf_datasets:Sequence[Dataset] - , excluded_from_perframe_tags:Sequence[Tag] - , excluded_from_functional_tags:Sequence[Tag] - , perframe_tags: Sequence[Tag] - , shared_tags: Sequence[Tag] - , multi_frame_output:Dataset=None): + + def __init__(self, sf_datasets: Sequence[Dataset], + excluded_from_perframe_tags: Sequence[Tag], + excluded_from_functional_tags: Sequence[Tag], + perframe_tags: Sequence[Tag], + shared_tags: Sequence[Tag], + multi_frame_output: Dataset = None): super().__init__( - sf_datasets - , excluded_from_perframe_tags - , excluded_from_functional_tags - , perframe_tags - , shared_tags - , multi_frame_output - ) + sf_datasets, + excluded_from_perframe_tags, + excluded_from_functional_tags, + perframe_tags, + shared_tags, + multi_frame_output) self._module_excepted_list = { - "patient" : [] - , "clinical-trial-subject" : [] - , "general-study" : - [ - "RequestingService" - ] - , "patient-study" : - [ - "ReasonForVisit" - , "ReasonForVisitCodeSequence" - ] - , "clinical-trial-study" : [] - , "general-series" : - [ - "SmallestPixelValueInSeries" - , "LargestPixelValueInSeries" - , "PerformedProcedureStepEndDate" - , "PerformedProcedureStepEndTime" - ] - , "clinical-trial-series" : [] - , "general-equipment" : - [ - "InstitutionalDepartmentTypeCodeSequence" - ] - , "frame-of-reference" : [] - , "sop-common" : - [ - "SpecificCharacterSet" - , "EncryptedAttributesSequence" - , "MACParametersSequence" - , "DigitalSignaturesSequence" - ] - , "general-image" : - [ - "ImageType" - , "AcquisitionDate" - , "AcquisitionDateTime" - , "AcquisitionTime" - , "AnatomicRegionSequence" - , "PrimaryAnatomicStructureSequence" - , "IrradiationEventUID" - , "AcquisitionNumber" - , "InstanceNumber" - , "PatientOrientation" - , "ImageLaterality" - , "ImagesInAcquisition" - , "ImageComments" - , "QualityControlImage" - , "BurnedInAnnotation" - , "RecognizableVisualFeatures" - , "LossyImageCompression" - , "LossyImageCompressionRatio" - , "LossyImageCompressionMethod" - , "RealWorldValueMappingSequence" - , "IconImageSequence" - , "PresentationLUTShape" - ] - , "sr-document-general" : - [ - "ContentDate" - , "ContentTime" - , "ReferencedInstanceSequence" - , "InstanceNumber" - , "VerifyingObserverSequence" - , "AuthorObserverSequence" - , "ParticipantSequence" - , "CustodialOrganizationSequence" - , "PredecessorDocumentsSequence" - , "CurrentRequestedProcedureEvidenceSequence" - , "PertinentOtherEvidenceSequence" - , "CompletionFlag" - , "CompletionFlagDescription" - , "VerificationFlag" - , "PreliminaryFlag" - , "IdenticalDocumentsSequence" - ] -} + "patient": [], + "clinical-trial-subject": [], + "general-study": + [ + "RequestingService" + ], + "patient-study": + [ + "ReasonForVisit", + "ReasonForVisitCodeSequence" + ], + "clinical-trial-study": [], + "general-series": + [ + "SmallestPixelValueInSeries", + "LargestPixelValueInSeries", + "PerformedProcedureStepEndDate", + "PerformedProcedureStepEndTime" + ], + "clinical-trial-series": [], + "general-equipment": + [ + "InstitutionalDepartmentTypeCodeSequence" + ], + "frame-of-reference": [], + "sop-common": + [ + "SpecificCharacterSet", + "EncryptedAttributesSequence", + "MACParametersSequence", + "DigitalSignaturesSequence" + ], + "general-image": + [ + "ImageType", + "AcquisitionDate", + "AcquisitionDateTime", + "AcquisitionTime", + "AnatomicRegionSequence", + "PrimaryAnatomicStructureSequence", + "IrradiationEventUID", + "AcquisitionNumber", + "InstanceNumber", + "PatientOrientation", + "ImageLaterality", + "ImagesInAcquisition", + "ImageComments", + "QualityControlImage", + "BurnedInAnnotation", + "RecognizableVisualFeatures", + "LossyImageCompression", + "LossyImageCompressionRatio", + "LossyImageCompressionMethod", + "RealWorldValueMappingSequence", + "IconImageSequence", + "PresentationLUTShape" + ], + "sr-document-general": + [ + "ContentDate", + "ContentTime", + "ReferencedInstanceSequence", + "InstanceNumber", + "VerifyingObserverSequence", + "AuthorObserverSequence", + "ParticipantSequence", + "CustodialOrganizationSequence", + "PredecessorDocumentsSequence", + "CurrentRequestedProcedureEvidenceSequence", + "PertinentOtherEvidenceSequence", + "CompletionFlag", + "CompletionFlagDescription", + "VerificationFlag", + "PreliminaryFlag", + "IdenticalDocumentsSequence" + ] + } + def AddModule(self): for module_name, excpeted_a in self._module_excepted_list.items(): - self._add_module(module_name - , excepted_attributes = excpeted_a - , check_not_to_be_empty=False - , check_not_to_be_perframe=False) # don't check the perframe set + self._add_module( + module_name, + excepted_attributes=excpeted_a, + check_not_to_be_empty=False, + check_not_to_be_perframe=False) # don't check the perframe set + + class CommonCTMRPETImageDescriptionMacro(Abstract_MultiframeModuleAdder): - def __init__(self, sf_datasets:Sequence[Dataset] - , excluded_from_perframe_tags:Sequence[Tag] - , excluded_from_functional_tags:Sequence[Tag] - , perframe_tags: Sequence[Tag] - , shared_tags: Sequence[Tag] - , multi_frame_output:Dataset=None - , modality:str='CT'): + + def __init__(self, sf_datasets: Sequence[Dataset], + excluded_from_perframe_tags: Sequence[Tag], + excluded_from_functional_tags: Sequence[Tag], + perframe_tags: Sequence[Tag], + shared_tags: Sequence[Tag], + multi_frame_output: Dataset = None, + modality: str = 'CT'): super().__init__( - sf_datasets - , excluded_from_perframe_tags - , excluded_from_functional_tags - , perframe_tags - , shared_tags - , multi_frame_output - ) + sf_datasets, + excluded_from_perframe_tags, + excluded_from_functional_tags, + perframe_tags, + shared_tags, + multi_frame_output) self.Modality = modality - def _get_value_for_frame_type(self, attrib:DataElement): + + def _get_value_for_frame_type(self, attrib: DataElement): if type(attrib) != DataElement: return None output = ['', '', '', ''] v = attrib.value - l = len(v) - output[0] = 'ORIGINAL' if l == 0 else v[0] + lng = len(v) + output[0] = 'ORIGINAL' if lng == 0 else v[0] output[1] = 'PRIMARY' - output[2] = 'VOLUME' if l<3 else v[2] + output[2] = 'VOLUME' if lng < 3 else v[2] output[3] = 'NONE' return output - def _get_frame_type_seq_tag(self)->int: + + def _get_frame_type_seq_tag(self) -> int: seq_kw = '{}{}FrameTypeSequence' if self.Modality == 'PET': seq_kw = seq_kw.format(self.Modality, '') @@ -905,61 +850,63 @@ def _get_frame_type_seq_tag(self)->int: seq_kw = seq_kw.format(self.Modality, 'Image') return tag_for_keyword(seq_kw) - - def _add_module_to_functional_group(self, src_fg:Dataset, dest_fg:Dataset, level): - - + def _add_module_to_functional_group(self, src_fg: Dataset, + dest_fg: Dataset, level): FrameType_a = src_fg['ImageType'] - if level ==0: + if level == 0: FrameType_tg = tag_for_keyword('ImageType') else: FrameType_tg = tag_for_keyword('FrameType') - new_val = self._get_value_for_frame_type(FrameType_a) dest_fg[FrameType_tg] = DataElement(FrameType_tg, - FrameType_a.VR, new_val) - element_generator = lambda kw, val:DataElement(tag_for_keyword(kw) - , dictionary_VR(tag_for_keyword(kw)), val) - dest_fg['PixelPresentation'] = element_generator('PixelPresentation', "MONOCHROME") - dest_fg['VolumetricProperties'] = element_generator('VolumetricProperties', "VOLUME") - dest_fg['VolumeBasedCalculationTechnique'] = element_generator('VolumeBasedCalculationTechnique', "NONE") + FrameType_a.VR, new_val) + element_generator = lambda kw, val: DataElement( + tag_for_keyword(kw), + dictionary_VR(tag_for_keyword(kw)), val) + dest_fg['PixelPresentation'] = element_generator( + 'PixelPresentation', "MONOCHROME") + dest_fg['VolumetricProperties'] = element_generator( + 'VolumetricProperties', "VOLUME") + dest_fg['VolumeBasedCalculationTechnique'] = element_generator( + 'VolumeBasedCalculationTechnique', "NONE") def AddModule(self): im_type_tag = tag_for_keyword('ImageType') - fm_type_tag = tag_for_keyword('FrameType') - - seq_tg=self._get_frame_type_seq_tag() - - if not im_type_tag in self.PerFrameTags: - self._add_module_to_functional_group(self.SingleFrameSet[0],self.TargetDataset,0) - #---------------------------- + seq_tg = self._get_frame_type_seq_tag() + if im_type_tag not in self.PerFrameTags: + self._add_module_to_functional_group(self.SingleFrameSet[0], + self.TargetDataset, 0) + # ---------------------------- item = self._get_shared_item() inner_item = Dataset() - self._add_module_to_functional_group(self.SingleFrameSet[0],inner_item,1) + self._add_module_to_functional_group(self.SingleFrameSet[0], + inner_item, 1) item[seq_tg] = DataElement(seq_tg, 'SQ', [inner_item]) else: for i in range(0, len(self.SingleFrameSet)): item = self._get_perframe_item(i) inner_item = Dataset() - self._add_module_to_functional_group(self.SingleFrameSet[i],inner_item, 1) + self._add_module_to_functional_group(self.SingleFrameSet[i], + inner_item, 1) item[seq_tg] = DataElement(seq_tg, 'SQ', [inner_item]) - + class EnhancedCommonImageModule(Abstract_MultiframeModuleAdder): - def __init__(self, sf_datasets:Sequence[Dataset] - , excluded_from_perframe_tags:Sequence[Tag] - , excluded_from_functional_tags:Sequence[Tag] - , perframe_tags: Sequence[Tag] - , shared_tags: Sequence[Tag] - , multi_frame_output:Dataset=None): + + def __init__(self, sf_datasets: Sequence[Dataset], + excluded_from_perframe_tags: Sequence[Tag], + excluded_from_functional_tags: Sequence[Tag], + perframe_tags: Sequence[Tag], + shared_tags: Sequence[Tag], + multi_frame_output: Dataset = None): super().__init__( - sf_datasets - , excluded_from_perframe_tags - , excluded_from_functional_tags - , perframe_tags - , shared_tags - , multi_frame_output - ) + sf_datasets, + excluded_from_perframe_tags, + excluded_from_functional_tags, + perframe_tags, + shared_tags, + multi_frame_output) + def AddModule(self): # ct_mr = CommonCTMRImageDescriptionMacro(self.SingleFrameSet # , self.ExcludedFromPerFrameTags @@ -967,25 +914,25 @@ def AddModule(self): # , self.SharedTags # , self.TargetDataset) # ct_mr.AddModule() - - - # Acquisition Number - # Acquisition DateTime - should be able to find earliest amongst all frames, if present (required if ORIGINAL) - # Acquisition Duration - should be able to work this out, but type 2C, so can send empty - - # Referenced Raw Data Sequence - optional - ignore - too hard to merge - # Referenced Waveform Sequence - optional - ignore - too hard to merge - # Referenced Image Evidence Sequence - should add if we have references :( - # Source Image Evidence Sequence - should add if we have sources :( - # Referenced Presentation State Sequence - should merge if present in any source frame :( - - # Samples per Pixel - handled by distinguishingAttribute copy - # Photometric Interpretation - handled by distinguishingAttribute copy - # Bits Allocated - handled by distinguishingAttribute copy - # Bits Stored - handled by distinguishingAttribute copy - # High Bit - handled by distinguishingAttribute copy + # Acquisition Number + # Acquisition DateTime - should be able to find earliest amongst all + # frames, if present (required if ORIGINAL) + # Acquisition Duration - should be able to work this out, but type 2C, + # so can send empty + # Referenced Raw Data Sequence - optional - ignore - too hard to merge + # Referenced Waveform Sequence - optional - ignore - too hard to merge + # Referenced Image Evidence Sequence - should add if we have references + # Source Image Evidence Sequence - should add if we have sources : ( + # Referenced Presentation State Sequence - should merge if present in + # any source frame + # Samples per Pixel - handled by distinguishingAttribute copy + # Photometric Interpretation - handled by distinguishingAttribute copy + # Bits Allocated - handled by distinguishingAttribute copy + # Bits Stored - handled by distinguishingAttribute copy + # High Bit - handled by distinguishingAttribute copy ref_dataset = self.SingleFrameSet[0] - attribs_to_be_added = ['ContentQualification', + attribs_to_be_added = [ + 'ContentQualification', 'ImageComments', 'BurnedInAnnotation', 'RecognizableVisualFeatures', @@ -994,866 +941,946 @@ def AddModule(self): 'LossyImageCompressionMethod'] for kw in attribs_to_be_added: self._copy_attrib_if_present(ref_dataset, self.TargetDataset, kw) - - - if not tag_for_keyword('PresentationLUTShape') in self.PerFrameTags : - # actually should really invert the pixel data if MONOCHROME1, since only MONOCHROME2 is permitted :( - # also, do not need to check if PhotometricInterpretation is per-frame, since a distinguishing attribute + if tag_for_keyword('PresentationLUTShape') not in self.PerFrameTags: + # actually should really invert the pixel data if MONOCHROME1, + # since only MONOCHROME2 is permitted : ( + # also, do not need to check if PhotometricInterpretation is + # per-frame, since a distinguishing attribute phmi_kw = 'PhotometricInterpretation' - phmi_tg = tag_for_keyword(phmi_kw) - phmi_a = self._get_or_create_attribute(self.SingleFrameSet[0], phmi_kw, "MONOCHROME2") - LUT_shape_default = "INVERTED" if phmi_a.value == 'MONOCHROME1' else "IDENTITY" + phmi_a = self._get_or_create_attribute(self.SingleFrameSet[0], + phmi_kw, + "MONOCHROME2") + LUT_shape_default = "INVERTED" if phmi_a.value == 'MONOCHROME1'\ + else "IDENTITY" LUT_shape_a = self._get_or_create_attribute(self.SingleFrameSet[0], - 'PresentationLUTShape', + 'PresentationLUTShape', LUT_shape_default) if not LUT_shape_a.is_empty: self.TargetDataset['PresentationLUTShape'] = LUT_shape_a - # Icon Image Sequence - always discard these + # Icon Image Sequence - always discard these + + class ContrastBolusModule(Abstract_MultiframeModuleAdder): - def __init__(self, sf_datasets:Sequence[Dataset] - , excluded_from_perframe_tags:Sequence[Tag] - , excluded_from_functional_tags:Sequence[Tag] - , perframe_tags: Sequence[Tag] - , shared_tags: Sequence[Tag] - , multi_frame_output:Dataset=None): + + def __init__(self, sf_datasets: Sequence[Dataset], + excluded_from_perframe_tags: Sequence[Tag], + excluded_from_functional_tags: Sequence[Tag], + perframe_tags: Sequence[Tag], + shared_tags: Sequence[Tag], + multi_frame_output: Dataset = None): super().__init__( - sf_datasets - , excluded_from_perframe_tags - , excluded_from_functional_tags - , perframe_tags - , shared_tags - , multi_frame_output - ) + sf_datasets, + excluded_from_perframe_tags, + excluded_from_functional_tags, + perframe_tags, + shared_tags, + multi_frame_output) + def AddModule(self): self._add_module('contrast-bolus') + + class EnhancedCTImageModule(Abstract_MultiframeModuleAdder): - def __init__(self, sf_datasets:Sequence[Dataset] - , excluded_from_perframe_tags:Sequence[Tag] - , excluded_from_functional_tags:Sequence[Tag] - , perframe_tags: Sequence[Tag] - , shared_tags: Sequence[Tag] - , multi_frame_output:Dataset=None): + + def __init__(self, sf_datasets: Sequence[Dataset], + excluded_from_perframe_tags: Sequence[Tag], + excluded_from_functional_tags: Sequence[Tag], + perframe_tags: Sequence[Tag], + shared_tags: Sequence[Tag], + multi_frame_output: Dataset = None): super().__init__( - sf_datasets - , excluded_from_perframe_tags - , excluded_from_functional_tags - , perframe_tags - , shared_tags - , multi_frame_output - ) + sf_datasets, + excluded_from_perframe_tags, + excluded_from_functional_tags, + perframe_tags, + shared_tags, + multi_frame_output) + def AddModule(self): pass - #David's code doesn't hold anything for this module ... should ask him + # David's code doesn't hold anything for this module ... should ask him + + class EnhancedPETImageModule(Abstract_MultiframeModuleAdder): - def __init__(self, sf_datasets:Sequence[Dataset] - , excluded_from_perframe_tags:Sequence[Tag] - , excluded_from_functional_tags:Sequence[Tag] - , perframe_tags: Sequence[Tag] - , shared_tags: Sequence[Tag] - , multi_frame_output:Dataset=None): + + def __init__(self, sf_datasets: Sequence[Dataset], + excluded_from_perframe_tags: Sequence[Tag], + excluded_from_functional_tags: Sequence[Tag], + perframe_tags: Sequence[Tag], + shared_tags: Sequence[Tag], + multi_frame_output: Dataset = None): super().__init__( - sf_datasets - , excluded_from_perframe_tags - , excluded_from_functional_tags - , perframe_tags - , shared_tags - , multi_frame_output - ) + sf_datasets, + excluded_from_perframe_tags, + excluded_from_functional_tags, + perframe_tags, + shared_tags, + multi_frame_output) + def AddModule(self): pass - #David's code doesn't hold anything for this module ... should ask him + # David's code doesn't hold anything for this module ... should ask him + + class EnhancedMRImageModule(Abstract_MultiframeModuleAdder): - def __init__(self, sf_datasets:Sequence[Dataset] - , excluded_from_perframe_tags:Sequence[Tag] - , excluded_from_functional_tags:Sequence[Tag] - , perframe_tags: Sequence[Tag] - , shared_tags: Sequence[Tag] - , multi_frame_output:Dataset=None): + + def __init__(self, sf_datasets: Sequence[Dataset], + excluded_from_perframe_tags: Sequence[Tag], + excluded_from_functional_tags: Sequence[Tag], + perframe_tags: Sequence[Tag], + shared_tags: Sequence[Tag], + multi_frame_output: Dataset = None): super().__init__( - sf_datasets - , excluded_from_perframe_tags - , excluded_from_functional_tags - , perframe_tags - , shared_tags - , multi_frame_output - ) - + sf_datasets, + excluded_from_perframe_tags, + excluded_from_functional_tags, + perframe_tags, + shared_tags, + multi_frame_output) def AddModule(self): - self._copy_attrib_if_present(self.SingleFrameSet[0] - , self.TargetDataset - , "ResonantNucleus" - , check_not_to_be_perframe = True - , check_not_to_be_empty = True) - - if not 'ResonantNucleus' in self.TargetDataset: - # derive from ImagedNucleus, which is the one used in legacy MR IOD, - # but does not have a standard list of defined terms ... (could check these :() - self._copy_attrib_if_present(self.SingleFrameSet[0] - , self.TargetDataset - , "ImagedNucleus" - , check_not_to_be_perframe = True - , check_not_to_be_empty = True) - - self._copy_attrib_if_present(self.SingleFrameSet[0] - , self.TargetDataset - , "KSpaceFiltering" - , check_not_to_be_perframe = True - , check_not_to_be_empty = True) - - self._copy_attrib_if_present(self.SingleFrameSet[0] - , self.TargetDataset - , "MagneticFieldStrength" - , check_not_to_be_perframe = True - , check_not_to_be_empty = True) - - self._copy_attrib_if_present(self.SingleFrameSet[0] - , self.TargetDataset - , "ApplicableSafetyStandardAgency" - , check_not_to_be_perframe = True - , check_not_to_be_empty = True) - - self._copy_attrib_if_present(self.SingleFrameSet[0] - , self.TargetDataset - , "ApplicableSafetyStandardDescription" - , check_not_to_be_perframe = True - , check_not_to_be_empty = True) - - + self._copy_attrib_if_present( + self.SingleFrameSet[0], + self.TargetDataset, + "ResonantNucleus", + check_not_to_be_perframe=True, + check_not_to_be_empty=True) + if 'ResonantNucleus' not in self.TargetDataset: + # derive from ImagedNucleus, which is the one used in legacy MR + # IOD, but does not have a standard list of defined terms ... + # (could check these : () + self._copy_attrib_if_present( + self.SingleFrameSet[0], + self.TargetDataset, + "ImagedNucleus", + check_not_to_be_perframe=True, + check_not_to_be_empty=True) + self._copy_attrib_if_present( + self.SingleFrameSet[0], + self.TargetDataset, + "KSpaceFiltering", + check_not_to_be_perframe=True, + check_not_to_be_empty=True) + self._copy_attrib_if_present( + self.SingleFrameSet[0], + self.TargetDataset, + "MagneticFieldStrength", + check_not_to_be_perframe=True, + check_not_to_be_empty=True) + self._copy_attrib_if_present( + self.SingleFrameSet[0], + self.TargetDataset, + "ApplicableSafetyStandardAgency", + check_not_to_be_perframe=True, + check_not_to_be_empty=True) + self._copy_attrib_if_present( + self.SingleFrameSet[0], + self.TargetDataset, + "ApplicableSafetyStandardDescription", + check_not_to_be_perframe=True, + check_not_to_be_empty=True) + + class AcquisitionContextModule(Abstract_MultiframeModuleAdder): - def __init__(self, sf_datasets:Sequence[Dataset] - , excluded_from_perframe_tags:Sequence[Tag] - , excluded_from_functional_tags:Sequence[Tag] - , perframe_tags: Sequence[Tag] - , shared_tags: Sequence[Tag] - , multi_frame_output:Dataset=None): + + def __init__(self, sf_datasets: Sequence[Dataset], + excluded_from_perframe_tags: Sequence[Tag], + excluded_from_functional_tags: Sequence[Tag], + perframe_tags: Sequence[Tag], + shared_tags: Sequence[Tag], + multi_frame_output: Dataset = None): super().__init__( - sf_datasets - , excluded_from_perframe_tags - , excluded_from_functional_tags - , perframe_tags - , shared_tags - , multi_frame_output - ) + sf_datasets, + excluded_from_perframe_tags, + excluded_from_functional_tags, + perframe_tags, + shared_tags, + multi_frame_output) + def AddModule(self): - self._copy_attrib_if_present(self.SingleFrameSet - , self.TargetDataset - , 'AcquisitionContextSequence' - , check_not_to_be_perframe=True)#check not to be in perframe + self._copy_attrib_if_present( + self.SingleFrameSet, + self.TargetDataset, + 'AcquisitionContextSequence', + check_not_to_be_perframe=True) # check not to be in perframe + + class FrameAnatomyFunctionalGroup(Abstract_MultiframeModuleAdder): - def __init__(self, sf_datasets:Sequence[Dataset] - , excluded_from_perframe_tags:Sequence[Tag] - , excluded_from_functional_tags:Sequence[Tag] - , perframe_tags: Sequence[Tag] - , shared_tags: Sequence[Tag] - , multi_frame_output:Dataset=None): + + def __init__(self, sf_datasets: Sequence[Dataset], + excluded_from_perframe_tags: Sequence[Tag], + excluded_from_functional_tags: Sequence[Tag], + perframe_tags: Sequence[Tag], + shared_tags: Sequence[Tag], + multi_frame_output: Dataset = None): super().__init__( - sf_datasets - , excluded_from_perframe_tags - , excluded_from_functional_tags - , perframe_tags - , shared_tags - , multi_frame_output - ) - def _add_module_to_functional_group(self, src_fg:Dataset, dest_fg:Dataset): - #David's code is more complicaated than mine - #Should check it out later. + sf_datasets, + excluded_from_perframe_tags, + excluded_from_functional_tags, + perframe_tags, + shared_tags, + multi_frame_output) + + def _add_module_to_functional_group( + self, src_fg: Dataset, dest_fg: Dataset): + # David's code is more complicaated than mine + # Should check it out later. fa_seq_tg = tag_for_keyword('FrameAnatomySequence') item = Dataset() - self._copy_attrib_if_present(src_fg, item, 'AnatomicRegionSequence' - , check_not_to_be_perframe=False - , check_not_to_be_empty=False) - self._copy_attrib_if_present(src_fg, item, 'FrameLaterality' - , check_not_to_be_perframe=False - , check_not_to_be_empty=True) - - if not 'FrameLaterality' in item: - self._copy_attrib_if_present(src_fg, item, 'ImageLaterality' - , 'FrameLaterality' - , check_not_to_be_perframe=False - , check_not_to_be_empty=True) - if not 'FrameLaterality' in item: - self._copy_attrib_if_present(src_fg, item, 'Laterality' - , 'FrameLaterality' - , check_not_to_be_perframe=False - , check_not_to_be_empty=True) - if not 'FrameLaterality' in item: - FrameLaterality_a=self._get_or_create_attribute(src_fg, 'FrameLaterality', "U") + self._copy_attrib_if_present(src_fg, item, 'AnatomicRegionSequence', + check_not_to_be_perframe=False, + check_not_to_be_empty=False) + self._copy_attrib_if_present(src_fg, item, 'FrameLaterality', + check_not_to_be_perframe=False, + check_not_to_be_empty=True) + if 'FrameLaterality' not in item: + self._copy_attrib_if_present(src_fg, item, 'ImageLaterality', + 'FrameLaterality', + check_not_to_be_perframe=False, + check_not_to_be_empty=True) + if 'FrameLaterality' not in item: + self._copy_attrib_if_present(src_fg, item, 'Laterality', + 'FrameLaterality', + check_not_to_be_perframe=False, + check_not_to_be_empty=True) + if 'FrameLaterality' not in item: + FrameLaterality_a = self._get_or_create_attribute( + src_fg, 'FrameLaterality', "U") item['FrameLaterality'] = FrameLaterality_a - - - - FrameAnatomy_a = DataElement(fa_seq_tg, dictionary_VR(fa_seq_tg), - [item] ) + FrameAnatomy_a = DataElement(fa_seq_tg, + dictionary_VR(fa_seq_tg), + [item]) dest_fg['FrameAnatomySequence'] = FrameAnatomy_a - def _contains_right_attributes(self, tags:dict) ->bool: + + def _contains_right_attributes(self, tags: dict) -> bool: laterality_tg = tag_for_keyword('Laterality') im_laterality_tg = tag_for_keyword('ImageLaterality') bodypart_tg = tag_for_keyword('BodyPartExamined') anatomical_reg_tg = tag_for_keyword('AnatomicRegionSequence') - - return (laterality_tg in tags - or im_laterality_tg in tags - or bodypart_tg in tags - or anatomical_reg_tg) + return (laterality_tg in tags or + im_laterality_tg in tags or + bodypart_tg in tags or + anatomical_reg_tg) def AddModule(self): - if (not self._contains_right_attributes(self.PerFrameTags) - and (self._contains_right_attributes(self.SharedTags) - or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) + if (not self._contains_right_attributes(self.PerFrameTags) and + (self._contains_right_attributes(self.SharedTags) or + self._contains_right_attributes(self.ExcludedFromPerFrameTags)) ): item = self._get_shared_item() - self._add_module_to_functional_group(self.SingleFrameSet[0],item) + self._add_module_to_functional_group(self.SingleFrameSet[0], item) elif self._contains_right_attributes(self.PerFrameTags): for i in range(0, len(self.SingleFrameSet)): item = self._get_perframe_item(i) - self._add_module_to_functional_group(self.SingleFrameSet[i],item) + self._add_module_to_functional_group( + self.SingleFrameSet[i], item) + class PixelMeasuresFunctionalGroup(Abstract_MultiframeModuleAdder): - def __init__(self, sf_datasets:Sequence[Dataset] - , excluded_from_perframe_tags:Sequence[Tag] - , excluded_from_functional_tags:Sequence[Tag] - , perframe_tags: Sequence[Tag] - , shared_tags: Sequence[Tag] - , multi_frame_output:Dataset=None): + + def __init__(self, sf_datasets: Sequence[Dataset], + excluded_from_perframe_tags: Sequence[Tag], + excluded_from_functional_tags: Sequence[Tag], + perframe_tags: Sequence[Tag], + shared_tags: Sequence[Tag], + multi_frame_output: Dataset = None): super().__init__( - sf_datasets - , excluded_from_perframe_tags - , excluded_from_functional_tags - , perframe_tags - , shared_tags - , multi_frame_output - ) - def _contains_right_attributes(self, tags:dict) ->bool: - PixelSpacing_tg = tag_for_keyword('PixelSpacing' ) + sf_datasets, + excluded_from_perframe_tags, + excluded_from_functional_tags, + perframe_tags, + shared_tags, + multi_frame_output) + + def _contains_right_attributes(self, tags: dict) -> bool: + PixelSpacing_tg = tag_for_keyword('PixelSpacing') SliceThickness_tg = tag_for_keyword('SliceThickness') ImagerPixelSpacing_tg = tag_for_keyword('ImagerPixelSpacing') + return (PixelSpacing_tg in tags or + SliceThickness_tg in tags or + ImagerPixelSpacing_tg in tags) - return (PixelSpacing_tg in tags - or SliceThickness_tg in tags - or ImagerPixelSpacing_tg in tags) - def _add_module_to_functional_group(self, src_fg:Dataset, dest_fg:Dataset): + def _add_module_to_functional_group( + self, src_fg: Dataset, dest_fg: Dataset): item = Dataset() - self._copy_attrib_if_present(src_fg - ,item - ,'PixelSpacing' - , check_not_to_be_perframe=False) - self._copy_attrib_if_present(src_fg - ,item - ,'SliceThickness' - , check_not_to_be_perframe=False) - if not 'PixelSpacing' in item: - self._copy_attrib_if_present(src_fg - ,item - ,'ImagerPixelSpacing' - ,'PixelSpacing' - , check_not_to_be_perframe=False - , check_not_to_be_empty=True) + self._copy_attrib_if_present(src_fg, + item, + 'PixelSpacing', + check_not_to_be_perframe=False) + self._copy_attrib_if_present(src_fg, + item, + 'SliceThickness', + check_not_to_be_perframe=False) + if 'PixelSpacing' not in item: + self._copy_attrib_if_present(src_fg, + item, + 'ImagerPixelSpacing', + 'PixelSpacing', + check_not_to_be_perframe=False, + check_not_to_be_empty=True) pixel_measures_kw = 'PixelMeasuresSequence' pixel_measures_tg = tag_for_keyword(pixel_measures_kw) - seq = DataElement(pixel_measures_tg - , dictionary_VR(pixel_measures_tg) - , [item]) + seq = DataElement(pixel_measures_tg, + dictionary_VR(pixel_measures_tg), + [item]) dest_fg[pixel_measures_tg] = seq def AddModule(self): - if (not self._contains_right_attributes(self.PerFrameTags) - and (self._contains_right_attributes(self.SharedTags) - or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) + if (not self._contains_right_attributes(self.PerFrameTags) and + (self._contains_right_attributes(self.SharedTags) or + self._contains_right_attributes(self.ExcludedFromPerFrameTags)) ): item = self._get_shared_item() - self._add_module_to_functional_group(self.SingleFrameSet[0],item) + self._add_module_to_functional_group(self.SingleFrameSet[0], item) elif self._contains_right_attributes(self.PerFrameTags): for i in range(0, len(self.SingleFrameSet)): item = self._get_perframe_item(i) - self._add_module_to_functional_group(self.SingleFrameSet[i],item) - + self._add_module_to_functional_group( + self.SingleFrameSet[i], item) + + class PlanePositionFunctionalGroup(Abstract_MultiframeModuleAdder): - def __init__(self, sf_datasets:Sequence[Dataset] - , excluded_from_perframe_tags:Sequence[Tag] - , excluded_from_functional_tags:Sequence[Tag] - , perframe_tags: Sequence[Tag] - , shared_tags: Sequence[Tag] - , multi_frame_output:Dataset=None): - super().__init__( - sf_datasets - , excluded_from_perframe_tags - , excluded_from_functional_tags - , perframe_tags - , shared_tags - , multi_frame_output - ) - def _contains_right_attributes(self, tags:dict) ->bool: - ImagePositionPatient_tg = tag_for_keyword('ImagePositionPatient' ) + def __init__(self, sf_datasets: Sequence[Dataset], + excluded_from_perframe_tags: Sequence[Tag], + excluded_from_functional_tags: Sequence[Tag], + perframe_tags: Sequence[Tag], + shared_tags: Sequence[Tag], + multi_frame_output: Dataset = None): + super().__init__( + sf_datasets, + excluded_from_perframe_tags, + excluded_from_functional_tags, + perframe_tags, + shared_tags, + multi_frame_output) + + def _contains_right_attributes(self, tags: dict) -> bool: + ImagePositionPatient_tg = tag_for_keyword('ImagePositionPatient') return ImagePositionPatient_tg in tags - def _add_module_to_functional_group(self, src_fg:Dataset, dest_fg:Dataset): + + def _add_module_to_functional_group( + self, src_fg: Dataset, dest_fg: Dataset): item = Dataset() - self._copy_attrib_if_present(src_fg - , item - ,'ImagePositionPatient' - , check_not_to_be_perframe=False - , check_not_to_be_empty=False) + self._copy_attrib_if_present(src_fg, + item, + 'ImagePositionPatient', + check_not_to_be_perframe=False, + check_not_to_be_empty=False) PlanePositionSequence_kw = 'PlanePositionSequence' PlanePositionSequence_tg = tag_for_keyword(PlanePositionSequence_kw) - seq = DataElement(PlanePositionSequence_tg - , dictionary_VR(PlanePositionSequence_tg) - , [item]) + seq = DataElement(PlanePositionSequence_tg, + dictionary_VR(PlanePositionSequence_tg), + [item]) dest_fg[PlanePositionSequence_tg] = seq def AddModule(self): - if (not self._contains_right_attributes(self.PerFrameTags) - and (self._contains_right_attributes(self.SharedTags) - or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) + if (not self._contains_right_attributes(self.PerFrameTags) and + (self._contains_right_attributes(self.SharedTags) or + self._contains_right_attributes(self.ExcludedFromPerFrameTags)) ): item = self._get_shared_item() - self._add_module_to_functional_group(self.SingleFrameSet[0],item) + self._add_module_to_functional_group(self.SingleFrameSet[0], item) elif self._contains_right_attributes(self.PerFrameTags): for i in range(0, len(self.SingleFrameSet)): item = self._get_perframe_item(i) - self._add_module_to_functional_group(self.SingleFrameSet[i],item) - - + self._add_module_to_functional_group( + self.SingleFrameSet[i], item) + + class PlaneOrientationFunctionalGroup(Abstract_MultiframeModuleAdder): - def __init__(self, sf_datasets:Sequence[Dataset] - , excluded_from_perframe_tags:Sequence[Tag] - , excluded_from_functional_tags:Sequence[Tag] - , perframe_tags: Sequence[Tag] - , shared_tags: Sequence[Tag] - , multi_frame_output:Dataset=None): - super().__init__( - sf_datasets - , excluded_from_perframe_tags - , excluded_from_functional_tags - , perframe_tags - , shared_tags - , multi_frame_output - ) - def _contains_right_attributes(self, tags:dict) ->bool: - ImageOrientationPatient_tg = tag_for_keyword('ImageOrientationPatient' ) + def __init__(self, sf_datasets: Sequence[Dataset], + excluded_from_perframe_tags: Sequence[Tag], + excluded_from_functional_tags: Sequence[Tag], + perframe_tags: Sequence[Tag], + shared_tags: Sequence[Tag], + multi_frame_output: Dataset = None): + super().__init__( + sf_datasets, + excluded_from_perframe_tags, + excluded_from_functional_tags, + perframe_tags, + shared_tags, + multi_frame_output) + + def _contains_right_attributes(self, tags: dict) -> bool: + ImageOrientationPatient_tg = tag_for_keyword('ImageOrientationPatient') return ImageOrientationPatient_tg in tags - def _add_module_to_functional_group(self, src_fg:Dataset, dest_fg:Dataset): + + def _add_module_to_functional_group( + self, src_fg: Dataset, dest_fg: Dataset): item = Dataset() - self._copy_attrib_if_present(src_fg - , item - ,'ImageOrientationPatient' - , check_not_to_be_perframe=False - , check_not_to_be_empty=False) + self._copy_attrib_if_present(src_fg, + item, + 'ImageOrientationPatient', + check_not_to_be_perframe=False, + check_not_to_be_empty=False) kw = 'PlaneOrientationSequence' tg = tag_for_keyword(kw) seq = DataElement(tg, dictionary_VR(tg), [item]) dest_fg[tg] = seq def AddModule(self): - if (not self._contains_right_attributes(self.PerFrameTags) - and (self._contains_right_attributes(self.SharedTags) - or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) + if (not self._contains_right_attributes(self.PerFrameTags) and + (self._contains_right_attributes(self.SharedTags) or + self._contains_right_attributes(self.ExcludedFromPerFrameTags)) ): item = self._get_shared_item() - self._add_module_to_functional_group(self.SingleFrameSet[0],item) + self._add_module_to_functional_group(self.SingleFrameSet[0], item) elif self._contains_right_attributes(self.PerFrameTags): for i in range(0, len(self.SingleFrameSet)): item = self._get_perframe_item(i) - self._add_module_to_functional_group(self.SingleFrameSet[i],item) - + self._add_module_to_functional_group( + self.SingleFrameSet[i], item) + + class FrameVOILUTFunctionalGroup(Abstract_MultiframeModuleAdder): - def __init__(self, sf_datasets:Sequence[Dataset] - , excluded_from_perframe_tags:Sequence[Tag] - , excluded_from_functional_tags:Sequence[Tag] - , perframe_tags: Sequence[Tag] - , shared_tags: Sequence[Tag] - , multi_frame_output:Dataset=None): + + def __init__(self, sf_datasets: Sequence[Dataset], + excluded_from_perframe_tags: Sequence[Tag], + excluded_from_functional_tags: Sequence[Tag], + perframe_tags: Sequence[Tag], + shared_tags: Sequence[Tag], + multi_frame_output: Dataset = None): super().__init__( - sf_datasets - , excluded_from_perframe_tags - , excluded_from_functional_tags - , perframe_tags - , shared_tags - , multi_frame_output - ) - def _contains_right_attributes(self, tags:dict) ->bool: + sf_datasets, + excluded_from_perframe_tags, + excluded_from_functional_tags, + perframe_tags, + shared_tags, + multi_frame_output) + + def _contains_right_attributes(self, tags: dict) -> bool: WindowWidth_tg = tag_for_keyword('WindowWidth') WindowCenter_tg = tag_for_keyword('WindowCenter') - WindowCenterWidthExplanation_tg = tag_for_keyword('WindowCenterWidthExplanation') - - return (WindowWidth_tg in tags - or WindowCenter_tg in tags - or WindowCenterWidthExplanation_tg in tags) - def _add_module_to_functional_group(self, src_fg:Dataset, dest_fg:Dataset): + WindowCenterWidthExplanation_tg = tag_for_keyword( + 'WindowCenterWidthExplanation') + return (WindowWidth_tg in tags or + WindowCenter_tg in tags or + WindowCenterWidthExplanation_tg in tags) + + def _add_module_to_functional_group( + self, src_fg: Dataset, dest_fg: Dataset): item = Dataset() - self._copy_attrib_if_present(src_fg - , item - ,'WindowWidth' - , check_not_to_be_perframe=False - , check_not_to_be_empty=False) - self._copy_attrib_if_present(src_fg - , item - ,'WindowCenter' - , check_not_to_be_perframe=False - , check_not_to_be_empty=False) - self._copy_attrib_if_present(src_fg - , item - ,'WindowCenterWidthExplanation' - , check_not_to_be_perframe=False - , check_not_to_be_empty=False) + self._copy_attrib_if_present(src_fg, + item, + 'WindowWidth', + check_not_to_be_perframe=False, + check_not_to_be_empty=False) + self._copy_attrib_if_present(src_fg, + item, + 'WindowCenter', + check_not_to_be_perframe=False, + check_not_to_be_empty=False) + self._copy_attrib_if_present(src_fg, + item, + 'WindowCenterWidthExplanation', + check_not_to_be_perframe=False, + check_not_to_be_empty=False) kw = 'FrameVOILUTSequence' tg = tag_for_keyword(kw) seq = DataElement(tg, dictionary_VR(tg), [item]) dest_fg[tg] = seq def AddModule(self): - if (not self._contains_right_attributes(self.PerFrameTags) - and (self._contains_right_attributes(self.SharedTags) - or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) + if (not self._contains_right_attributes(self.PerFrameTags) and + (self._contains_right_attributes(self.SharedTags) or + self._contains_right_attributes(self.ExcludedFromPerFrameTags)) ): item = self._get_shared_item() - self._add_module_to_functional_group(self.SingleFrameSet[0],item) + self._add_module_to_functional_group(self.SingleFrameSet[0], item) elif self._contains_right_attributes(self.PerFrameTags): for i in range(0, len(self.SingleFrameSet)): item = self._get_perframe_item(i) - self._add_module_to_functional_group(self.SingleFrameSet[i],item) - + self._add_module_to_functional_group( + self.SingleFrameSet[i], item) + + class PixelValueTransformationFunctionalGroup(Abstract_MultiframeModuleAdder): - def __init__(self, sf_datasets:Sequence[Dataset] - , excluded_from_perframe_tags:Sequence[Tag] - , excluded_from_functional_tags:Sequence[Tag] - , perframe_tags: Sequence[Tag] - , shared_tags: Sequence[Tag] - , multi_frame_output:Dataset=None): + + def __init__(self, sf_datasets: Sequence[Dataset], + excluded_from_perframe_tags: Sequence[Tag], + excluded_from_functional_tags: Sequence[Tag], + perframe_tags: Sequence[Tag], + shared_tags: Sequence[Tag], + multi_frame_output: Dataset = None): super().__init__( - sf_datasets - , excluded_from_perframe_tags - , excluded_from_functional_tags - , perframe_tags - , shared_tags - , multi_frame_output - ) - def _contains_right_attributes(self, tags:dict) ->bool: + sf_datasets, + excluded_from_perframe_tags, + excluded_from_functional_tags, + perframe_tags, + shared_tags, + multi_frame_output) + + def _contains_right_attributes(self, tags: dict) -> bool: RescaleIntercept_tg = tag_for_keyword('RescaleIntercept') RescaleSlope_tg = tag_for_keyword('RescaleSlope') RescaleType_tg = tag_for_keyword('RescaleType') + return (RescaleIntercept_tg in tags or + RescaleSlope_tg in tags or + RescaleType_tg in tags) - return (RescaleIntercept_tg in tags - or RescaleSlope_tg in tags - or RescaleType_tg in tags) - def _add_module_to_functional_group(self, src_fg:Dataset, dest_fg:Dataset): + def _add_module_to_functional_group( + self, src_fg: Dataset, dest_fg: Dataset): item = Dataset() - self._copy_attrib_if_present(src_fg - , item - ,'RescaleSlope' - , check_not_to_be_perframe=False - , check_not_to_be_empty=False) - self._copy_attrib_if_present(src_fg - , item - ,'RescaleIntercept' - , check_not_to_be_perframe=False - , check_not_to_be_empty=False) - haveValuesSoAddType = 'RescaleSlope' in item or 'RescaleIntercept' in item - self._copy_attrib_if_present(src_fg - , item - , 'RescaleType' - , check_not_to_be_perframe=False - , check_not_to_be_empty=True) - if not "RescaleType" in item: + self._copy_attrib_if_present(src_fg, + item, + 'RescaleSlope', + check_not_to_be_perframe=False, + check_not_to_be_empty=False) + self._copy_attrib_if_present(src_fg, + item, + 'RescaleIntercept', + check_not_to_be_perframe=False, + check_not_to_be_empty=False) + haveValuesSoAddType = ('RescaleSlope' in item or + 'RescaleIntercept' in item) + self._copy_attrib_if_present(src_fg, + item, + 'RescaleType', + check_not_to_be_perframe=False, + check_not_to_be_empty=True) + if "RescaleType" not in item: value = '' - modality = '' if not 'Modality' in src_fg else src_fg["Modality"].value + modality = '' if 'Modality' not in src_fg\ + else src_fg["Modality"].value if haveValuesSoAddType: value = 'US' if modality == 'CT': containes_localizer = False - ImageType_v = [] if not 'ImageType' in src_fg else src_fg['ImageType'].value + ImageType_v = [] if 'ImageType' not in src_fg\ + else src_fg['ImageType'].value for i in ImageType_v: - if i=='LOCALIZER': + if i == 'LOCALIZER': containes_localizer = True break if not containes_localizer: value = "HU" elif modality == 'PT': - value = 'US' if not 'Units' in src_fg else src_fg['Units'].value + value = 'US' if 'Units' not in src_fg\ + else src_fg['Units'].value if value != '': - tg = tag_for_keyword('RescaleType') - item[tg]= DataElement(tg, dictionary_VR(tg), value) - + tg = tag_for_keyword('RescaleType') + item[tg] = DataElement(tg, dictionary_VR(tg), value) kw = 'PixelValueTransformationSequence' tg = tag_for_keyword(kw) seq = DataElement(tg, dictionary_VR(tg), [item]) dest_fg[tg] = seq def AddModule(self): - if (not self._contains_right_attributes(self.PerFrameTags) - and (self._contains_right_attributes(self.SharedTags) - or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) + if (not self._contains_right_attributes(self.PerFrameTags) and + (self._contains_right_attributes(self.SharedTags) or + self._contains_right_attributes(self.ExcludedFromPerFrameTags)) ): item = self._get_shared_item() - self._add_module_to_functional_group(self.SingleFrameSet[0],item) + self._add_module_to_functional_group(self.SingleFrameSet[0], item) elif self._contains_right_attributes(self.PerFrameTags): for i in range(0, len(self.SingleFrameSet)): item = self._get_perframe_item(i) - self._add_module_to_functional_group(self.SingleFrameSet[i],item) - + self._add_module_to_functional_group( + self.SingleFrameSet[i], item) + + class ReferencedImageFunctionalGroup(Abstract_MultiframeModuleAdder): - def __init__(self, sf_datasets:Sequence[Dataset] - , excluded_from_perframe_tags:Sequence[Tag] - , excluded_from_functional_tags:Sequence[Tag] - , perframe_tags: Sequence[Tag] - , shared_tags: Sequence[Tag] - , multi_frame_output:Dataset=None): + + def __init__(self, sf_datasets: Sequence[Dataset], + excluded_from_perframe_tags: Sequence[Tag], + excluded_from_functional_tags: Sequence[Tag], + perframe_tags: Sequence[Tag], + shared_tags: Sequence[Tag], + multi_frame_output: Dataset = None): super().__init__( - sf_datasets - , excluded_from_perframe_tags - , excluded_from_functional_tags - , perframe_tags - , shared_tags - , multi_frame_output - ) - def _contains_right_attributes(self, tags:dict) ->bool: + sf_datasets, + excluded_from_perframe_tags, + excluded_from_functional_tags, + perframe_tags, + shared_tags, + multi_frame_output) + + def _contains_right_attributes(self, tags: dict) -> bool: return tag_for_keyword('ReferencedImageSequence') in tags - def _add_module_to_functional_group(self, src_fg:Dataset, dest_fg:Dataset): - self._copy_attrib_if_present(src_fg - , dest_fg - ,'ReferencedImageSequence' - , check_not_to_be_perframe=False - , check_not_to_be_empty=False) - + + def _add_module_to_functional_group( + self, src_fg: Dataset, dest_fg: Dataset): + self._copy_attrib_if_present(src_fg, + dest_fg, + 'ReferencedImageSequence', + check_not_to_be_perframe=False, + check_not_to_be_empty=False) def AddModule(self): - if (not self._contains_right_attributes(self.PerFrameTags) - and (self._contains_right_attributes(self.SharedTags) - or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) + if (not self._contains_right_attributes(self.PerFrameTags) and + (self._contains_right_attributes(self.SharedTags) or + self._contains_right_attributes(self.ExcludedFromPerFrameTags)) ): item = self._get_shared_item() - self._add_module_to_functional_group(self.SingleFrameSet[0],item) + self._add_module_to_functional_group(self.SingleFrameSet[0], item) elif self._contains_right_attributes(self.PerFrameTags): for i in range(0, len(self.SingleFrameSet)): item = self._get_perframe_item(i) - self._add_module_to_functional_group(self.SingleFrameSet[i],item) - + self._add_module_to_functional_group( + self.SingleFrameSet[i], item) + + class DerivationImageFunctionalGroup(Abstract_MultiframeModuleAdder): - def __init__(self, sf_datasets:Sequence[Dataset] - , excluded_from_perframe_tags:Sequence[Tag] - , excluded_from_functional_tags:Sequence[Tag] - , perframe_tags: Sequence[Tag] - , shared_tags: Sequence[Tag] - , multi_frame_output:Dataset=None): + + def __init__(self, sf_datasets: Sequence[Dataset], + excluded_from_perframe_tags: Sequence[Tag], + excluded_from_functional_tags: Sequence[Tag], + perframe_tags: Sequence[Tag], + shared_tags: Sequence[Tag], + multi_frame_output: Dataset = None): super().__init__( - sf_datasets - , excluded_from_perframe_tags - , excluded_from_functional_tags - , perframe_tags - , shared_tags - , multi_frame_output - ) - def _contains_right_attributes(self, tags:dict) ->bool: + sf_datasets, + excluded_from_perframe_tags, + excluded_from_functional_tags, + perframe_tags, + shared_tags, + multi_frame_output) + + def _contains_right_attributes(self, tags: dict) -> bool: return tag_for_keyword('SourceImageSequence') in tags - def _add_module_to_functional_group(self, src_fg:Dataset, dest_fg:Dataset): + + def _add_module_to_functional_group( + self, src_fg: Dataset, dest_fg: Dataset): item = Dataset() - self._copy_attrib_if_present(src_fg - , item - ,'DerivationDescription' - , check_not_to_be_perframe=False - , check_not_to_be_empty=True) - haveValuesSoAddType = 'RescaleSlope' in item or 'RescaleIntercept' in item - self._copy_attrib_if_present(src_fg - , item - , 'DerivationCodeSequence' - , check_not_to_be_perframe=False - , check_not_to_be_empty=False) - self._copy_attrib_if_present(src_fg - , item - ,'SourceImageSequence' - , check_not_to_be_perframe=False - , check_not_to_be_empty=False) + self._copy_attrib_if_present(src_fg, + item, + 'DerivationDescription', + check_not_to_be_perframe=False, + check_not_to_be_empty=True) + self._copy_attrib_if_present(src_fg, + item, + 'DerivationCodeSequence', + check_not_to_be_perframe=False, + check_not_to_be_empty=False) + self._copy_attrib_if_present(src_fg, + item, + 'SourceImageSequence', + check_not_to_be_perframe=False, + check_not_to_be_empty=False) kw = 'DerivationImageSequence' tg = tag_for_keyword(kw) seq = DataElement(tg, dictionary_VR(tg), [item]) dest_fg[tg] = seq def AddModule(self): - if (not self._contains_right_attributes(self.PerFrameTags) - and (self._contains_right_attributes(self.SharedTags) - or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) + if (not self._contains_right_attributes(self.PerFrameTags) and + (self._contains_right_attributes(self.SharedTags) or + self._contains_right_attributes(self.ExcludedFromPerFrameTags)) ): item = self._get_shared_item() - self._add_module_to_functional_group(self.SingleFrameSet[0],item) + self._add_module_to_functional_group(self.SingleFrameSet[0], item) elif self._contains_right_attributes(self.PerFrameTags): for i in range(0, len(self.SingleFrameSet)): item = self._get_perframe_item(i) - self._add_module_to_functional_group(self.SingleFrameSet[i],item) - + self._add_module_to_functional_group( + self.SingleFrameSet[i], item) + + class UnassignedPerFrame(Abstract_MultiframeModuleAdder): - def __init__(self, sf_datasets:Sequence[Dataset] - , excluded_from_perframe_tags:Sequence[Tag] - , excluded_from_functional_tags:Sequence[Tag] - , perframe_tags: Sequence[Tag] - , shared_tags: Sequence[Tag] - , multi_frame_output:Dataset=None): + + def __init__(self, sf_datasets: Sequence[Dataset], + excluded_from_perframe_tags: Sequence[Tag], + excluded_from_functional_tags: Sequence[Tag], + perframe_tags: Sequence[Tag], + shared_tags: Sequence[Tag], + multi_frame_output: Dataset = None): super().__init__( - sf_datasets - , excluded_from_perframe_tags - , excluded_from_functional_tags - , perframe_tags - , shared_tags - , multi_frame_output - ) - - def _add_module_to_functional_group(self, src_fg:Dataset, dest_fg:Dataset): + sf_datasets, + excluded_from_perframe_tags, + excluded_from_functional_tags, + perframe_tags, + shared_tags, + multi_frame_output) + + def _add_module_to_functional_group( + self, src_fg: Dataset, dest_fg: Dataset): item = Dataset() for tg, used in self.PerFrameTags.items(): - if not used not in self.ExcludedFromFunctionalGroupsTags: - self._copy_attrib_if_present(src_fg - , item - ,tg - , check_not_to_be_perframe=False - , check_not_to_be_empty=False) - + if used not in self.ExcludedFromFunctionalGroupsTags: + self._copy_attrib_if_present(src_fg, + item, + tg, + check_not_to_be_perframe=False, + check_not_to_be_empty=False) kw = 'UnassignedPerFrameConvertedAttributesSequence' tg = tag_for_keyword(kw) seq = DataElement(tg, dictionary_VR(tg), [item]) dest_fg[tg] = seq - def AddModule(self): for i in range(0, len(self.SingleFrameSet)): item = self._get_perframe_item(i) - self._add_module_to_functional_group(self.SingleFrameSet[i],item) + self._add_module_to_functional_group( + self.SingleFrameSet[i], item) + + class UnassignedShared(Abstract_MultiframeModuleAdder): - def __init__(self, sf_datasets:Sequence[Dataset] - , excluded_from_perframe_tags:Sequence[Tag] - , excluded_from_functional_tags:Sequence[Tag] - , perframe_tags: Sequence[Tag] - , shared_tags: Sequence[Tag] - , multi_frame_output:Dataset=None): + + def __init__(self, sf_datasets: Sequence[Dataset], + excluded_from_perframe_tags: Sequence[Tag], + excluded_from_functional_tags: Sequence[Tag], + perframe_tags: Sequence[Tag], + shared_tags: Sequence[Tag], + multi_frame_output: Dataset = None): super().__init__( - sf_datasets - , excluded_from_perframe_tags - , excluded_from_functional_tags - , perframe_tags - , shared_tags - , multi_frame_output - ) - - def _add_module_to_functional_group(self, src_fg:Dataset, dest_fg:Dataset): + sf_datasets, + excluded_from_perframe_tags, + excluded_from_functional_tags, + perframe_tags, + shared_tags, + multi_frame_output) + + def _add_module_to_functional_group( + self, src_fg: Dataset, dest_fg: Dataset): item = Dataset() for tg, used in self.SharedTags.items(): - if (not used - and tg not in self.ExcludedFromFunctionalGroupsTags): - self._copy_attrib_if_present(src_fg - , item - ,tg - , check_not_to_be_perframe=False - , check_not_to_be_empty=False) - + if (not used and + tg not in self.ExcludedFromFunctionalGroupsTags): + self._copy_attrib_if_present(src_fg, + item, + tg, + check_not_to_be_perframe=False, + check_not_to_be_empty=False) kw = 'UnassignedSharedConvertedAttributesSequence' tg = tag_for_keyword(kw) seq = DataElement(tg, dictionary_VR(tg), [item]) dest_fg[tg] = seq - def AddModule(self): item = self._get_shared_item() - self._add_module_to_functional_group(self.SingleFrameSet[0],item) - + self._add_module_to_functional_group(self.SingleFrameSet[0], item) + + class ConversionSourceFunctionalGroup(Abstract_MultiframeModuleAdder): - def __init__(self, sf_datasets:Sequence[Dataset] - , excluded_from_perframe_tags:Sequence[Tag] - , excluded_from_functional_tags:Sequence[Tag] - , perframe_tags: Sequence[Tag] - , shared_tags: Sequence[Tag] - , multi_frame_output:Dataset=None): + + def __init__(self, sf_datasets: Sequence[Dataset], + excluded_from_perframe_tags: Sequence[Tag], + excluded_from_functional_tags: Sequence[Tag], + perframe_tags: Sequence[Tag], + shared_tags: Sequence[Tag], + multi_frame_output: Dataset = None): super().__init__( - sf_datasets - , excluded_from_perframe_tags - , excluded_from_functional_tags - , perframe_tags - , shared_tags - , multi_frame_output - ) - def _add_module_to_functional_group(self, src_fg:Dataset, dest_fg:Dataset): + sf_datasets, + excluded_from_perframe_tags, + excluded_from_functional_tags, + perframe_tags, + shared_tags, + multi_frame_output) + + def _add_module_to_functional_group( + self, src_fg: Dataset, dest_fg: Dataset): item = Dataset() - self._copy_attrib_if_present(src_fg - , item - ,'SOPClassUID' - ,'ReferencedSOPClassUID' - , check_not_to_be_perframe=False - , check_not_to_be_empty=True) - self._copy_attrib_if_present(src_fg - , item - ,'SOPInstanceUID' - ,'ReferencedSOPInstanceUID' - , check_not_to_be_perframe=False - , check_not_to_be_empty=True) + self._copy_attrib_if_present(src_fg, + item, + 'SOPClassUID', + 'ReferencedSOPClassUID', + check_not_to_be_perframe=False, + check_not_to_be_empty=True) + self._copy_attrib_if_present(src_fg, + item, + 'SOPInstanceUID', + 'ReferencedSOPInstanceUID', + check_not_to_be_perframe=False, + check_not_to_be_empty=True) kw = 'ConversionSourceAttributesSequence' tg = tag_for_keyword(kw) seq = DataElement(tg, dictionary_VR(tg), [item]) dest_fg[tg] = seq - - - def AddModule(self): for i in range(0, len(self.SingleFrameSet)): item = self._get_perframe_item(i) - self._add_module_to_functional_group(self.SingleFrameSet[i],item) - + self._add_module_to_functional_group( + self.SingleFrameSet[i], item) + + class FrameContentFunctionalGroup(Abstract_MultiframeModuleAdder): - def __init__(self, sf_datasets:Sequence[Dataset] - , excluded_from_perframe_tags:Sequence[Tag] - , excluded_from_functional_tags:Sequence[Tag] - , perframe_tags: Sequence[Tag] - , shared_tags: Sequence[Tag] - , multi_frame_output:Dataset=None): - + + def __init__(self, sf_datasets: Sequence[Dataset], + excluded_from_perframe_tags: Sequence[Tag], + excluded_from_functional_tags: Sequence[Tag], + perframe_tags: Sequence[Tag], + shared_tags: Sequence[Tag], + multi_frame_output: Dataset = None): super().__init__( - sf_datasets - , excluded_from_perframe_tags - , excluded_from_functional_tags - , perframe_tags - , shared_tags - , multi_frame_output - ) + sf_datasets, + excluded_from_perframe_tags, + excluded_from_functional_tags, + perframe_tags, + shared_tags, + multi_frame_output) self.EarliestFrameAcquisitionDateTime = self.FarthestFutureDateTime - def _contains_right_attributes(self, tags:dict) ->bool: + + def _contains_right_attributes(self, tags: dict) -> bool: AcquisitionDateTime_tg = tag_for_keyword('AcquisitionDateTime') AcquisitionDate_tg = tag_for_keyword('AcquisitionDate') AcquisitionTime_tg = tag_for_keyword('AcquisitionTime') + return (AcquisitionDateTime_tg in tags or + AcquisitionTime_tg in tags or + AcquisitionDate_tg in tags) - return (AcquisitionDateTime_tg in tags - or AcquisitionTime_tg in tags - or AcquisitionDate_tg in tags) - def _add_module_to_functional_group(self, src_fg:Dataset, dest_fg:Dataset): + def _add_module_to_functional_group( + self, src_fg: Dataset, dest_fg: Dataset): item = Dataset() - item['AcquisitionNumber'] = self._get_or_create_attribute(src_fg, 'AcquisitionNumber',0) - AcquisitionDateTime_a = self._get_or_create_attribute(src_fg,'AcquisitionDateTime', self.EarliestDateTime) - AcquisitionDateTime_is_perframe = self._contains_right_attributes(self.PerFrameTags) + item['AcquisitionNumber'] = self._get_or_create_attribute( + src_fg, 'AcquisitionNumber', 0) + AcquisitionDateTime_a = self._get_or_create_attribute( + src_fg, 'AcquisitionDateTime', self.EarliestDateTime) + AcquisitionDateTime_is_perframe = self._contains_right_attributes( + self.PerFrameTags) if AcquisitionDateTime_a.value == self.EarliestDateTime: - AcquisitionDate_a = self._get_or_create_attribute(src_fg,'AcquisitionDate', self.EarliestDate) - - AcquisitionTime_a = self._get_or_create_attribute(src_fg,'AcquisitionTime', self.EarliestTime) + AcquisitionDate_a = self._get_or_create_attribute( + src_fg, 'AcquisitionDate', self.EarliestDate) + AcquisitionTime_a = self._get_or_create_attribute( + src_fg, 'AcquisitionTime', self.EarliestTime) d = AcquisitionDate_a.value t = AcquisitionTime_a.value - AcquisitionDateTime_a.value = DT(d.strftime('%Y%m%d')+t.strftime('%H%M%S')) + AcquisitionDateTime_a.value = (DT(d.strftime('%Y%m%d') + + t.strftime('%H%M%S'))) if AcquisitionDateTime_a.value > self.EarliestDateTime: - if AcquisitionDateTime_a.value < self.EarliestFrameAcquisitionDateTime: - self.EarliestFrameAcquisitionDateTime = AcquisitionDateTime_a.value + if (AcquisitionDateTime_a.value < + self.EarliestFrameAcquisitionDateTime): + self.EarliestFrameAcquisitionDateTime =\ + AcquisitionDateTime_a.value if not AcquisitionDateTime_is_perframe: - if 'TriggerTime' in src_fg and not 'FrameReferenceDateTime' in src_fg: - TriggerTime_a = self._get_or_create_attribute(src_fg,'TriggerTime', self.EarliestTime) + if ('TriggerTime' in src_fg and + 'FrameReferenceDateTime' not in src_fg): + TriggerTime_a = self._get_or_create_attribute( + src_fg, 'TriggerTime', self.EarliestTime) trigger_time_in_millisecond = int(TriggerTime_a.value) - if trigger_time_in_millisecond>0: + if trigger_time_in_millisecond > 0: t_delta = timedelta(trigger_time_in_millisecond) - # this is so rediculous. I'm not able to cnvert the DT to datetime (cast to superclass) - d_t = datetime.combine(AcquisitionDateTime_a.value.date() , AcquisitionDateTime_a.value.time()) + # this is so rediculous. I'm not able to cnvert + # the DT to datetime (cast to superclass) + d_t = datetime.combine( + AcquisitionDateTime_a.value.date(), + AcquisitionDateTime_a.value.time()) d_t = d_t + t_delta - AcquisitionDateTime_a.value = DT(d_t.strftime('%Y%m%d%H%M%S')) - item['AcquisitionDateTime'] =AcquisitionDateTime_a - #--------------------------------- - self._copy_attrib_if_present(src_fg, item, "AcquisitionDuration", - "FrameAcquisitionDuration", check_not_to_be_perframe=False, check_not_to_be_empty=True) - self._copy_attrib_if_present(src_fg, item, 'TemporalPositionIndex' - , check_not_to_be_perframe=False, check_not_to_be_empty=True) - self._copy_attrib_if_present(src_fg, item, "ImageComments", - "FrameComments", check_not_to_be_perframe=False, check_not_to_be_empty=True) - #----------------------------------- + AcquisitionDateTime_a.value =\ + DT(d_t.strftime('%Y%m%d%H%M%S')) + item['AcquisitionDateTime'] = AcquisitionDateTime_a + # --------------------------------- + self._copy_attrib_if_present( + src_fg, item, "AcquisitionDuration", + "FrameAcquisitionDuration", + check_not_to_be_perframe=False, + check_not_to_be_empty=True) + self._copy_attrib_if_present( + src_fg, item, + 'TemporalPositionIndex', + check_not_to_be_perframe=False, + check_not_to_be_empty=True) + self._copy_attrib_if_present( + src_fg, item, "ImageComments", + "FrameComments", + check_not_to_be_perframe=False, + check_not_to_be_empty=True) + # ----------------------------------- seq_tg = tag_for_keyword('FrameContentSequence') - dest_fg[seq_tg] = DataElement(seq_tg, dictionary_VR(seq_tg), [item]) - # Also we want to add the earliest frame acq date time to the multiframe: - + dest_fg[seq_tg] = DataElement(seq_tg, dictionary_VR(seq_tg), [item]) + # Also we want to add the earliest frame acq date time to the multiframe: + def AddModule(self): for i in range(0, len(self.SingleFrameSet)): item = self._get_perframe_item(i) - self._add_module_to_functional_group(self.SingleFrameSet[i],item) + self._add_module_to_functional_group( + self.SingleFrameSet[i], item) if self.EarliestFrameAcquisitionDateTime < self.FarthestFutureDateTime: kw = 'AcquisitionDateTime' - self.TargetDataset[kw] = DataElement(tag_for_keyword(kw), - 'DT', self.EarliestFrameAcquisitionDateTime) + self.TargetDataset[kw] = DataElement( + tag_for_keyword(kw), + 'DT', self.EarliestFrameAcquisitionDateTime) + + class PixelData(Abstract_MultiframeModuleAdder): - def __init__(self, sf_datasets:Sequence[Dataset] - , excluded_from_perframe_tags:Sequence[Tag] - , excluded_from_functional_tags:Sequence[Tag] - , perframe_tags: Sequence[Tag] - , shared_tags: Sequence[Tag] - , multi_frame_output:Dataset=None): - super().__init__( - sf_datasets - , excluded_from_perframe_tags - , excluded_from_functional_tags - , perframe_tags - , shared_tags - , multi_frame_output - ) + def __init__(self, sf_datasets: Sequence[Dataset], + excluded_from_perframe_tags: Sequence[Tag], + excluded_from_functional_tags: Sequence[Tag], + perframe_tags: Sequence[Tag], + shared_tags: Sequence[Tag], + multi_frame_output: Dataset = None): + super().__init__( + sf_datasets, + excluded_from_perframe_tags, + excluded_from_functional_tags, + perframe_tags, + shared_tags, + multi_frame_output) self._byte_data = bytearray() self._word_data = bytearray() - def _is_other_byte_vr(self, vr:str) -> bool: - return vr[0]=='O' and vr[1]=='B' - def _is_other_word_vr(self, vr:str) -> bool: - return vr[0]=='O' and vr[1]=='W' - # def _contains_right_attributes(self, tags:dict) ->bool: - # ImagePositionPatient_tg = tag_for_keyword('ImagePositionPatient' ) + def _is_other_byte_vr(self, vr: str) -> bool: + return vr[0] == 'O' and vr[1] == 'B' + def _is_other_word_vr(self, vr: str) -> bool: + return vr[0] == 'O' and vr[1] == 'W' + # def _contains_right_attributes(self, tags: dict) -> bool: + # ImagePositionPatient_tg = tag_for_keyword('ImagePositionPatient') # return ImagePositionPatient_tg in tags - def AddModule(self): kw = 'NumberOfFrames' tg = tag_for_keyword(kw) - FrameCount = len (self.SingleFrameSet) + FrameCount = len(self.SingleFrameSet) self.TargetDataset[kw] = DataElement(tg, dictionary_VR(tg), FrameCount) kw = "PixelData" for i in range(0, len(self.SingleFrameSet)): PixelData_a = self.SingleFrameSet[i][kw] - if self._is_other_byte_vr(PixelData_a.VR) : + if self._is_other_byte_vr(PixelData_a.VR): if len(self._word_data) != 0: raise TypeError( - 'Cannot mix OB and OW Pixel Data VR from different frames' - ) + 'Cannot mix OB and OW Pixel Data ' + 'VR from different frames') self._byte_data.extend(PixelData_a.value) - elif self._is_other_word_vr(PixelData_a.VR) : - if len(self._byte_data) !=0: + elif self._is_other_word_vr(PixelData_a.VR): + if len(self._byte_data) != 0: raise TypeError( - 'Cannot mix OB and OW Pixel Data VR from different frames' - ) + 'Cannot mix OB and OW Pixel Data ' + 'VR from different frames') self._word_data.extend(PixelData_a.value) else: raise TypeError( 'Cannot mix OB and OW Pixel Data VR from different frames') - if len(self._byte_data)!=0: + if len(self._byte_data) != 0: MF_PixelData = DataElement(tag_for_keyword(kw), - 'OB', bytes(self._byte_data)) - elif len(self._word_data)!=0: + 'OB', bytes(self._byte_data)) + elif len(self._word_data) != 0: MF_PixelData = DataElement(tag_for_keyword(kw), - 'OW', bytes(self._word_data)) + 'OW', bytes(self._word_data)) self.TargetDataset[kw] = MF_PixelData - + class ContentDateTime(Abstract_MultiframeModuleAdder): - def __init__(self, sf_datasets:Sequence[Dataset] - , excluded_from_perframe_tags:Sequence[Tag] - , excluded_from_functional_tags:Sequence[Tag] - , perframe_tags: Sequence[Tag] - , shared_tags: Sequence[Tag] - , multi_frame_output:Dataset=None): + + def __init__(self, sf_datasets: Sequence[Dataset], + excluded_from_perframe_tags: Sequence[Tag], + excluded_from_functional_tags: Sequence[Tag], + perframe_tags: Sequence[Tag], + shared_tags: Sequence[Tag], + multi_frame_output: Dataset = None): super().__init__( - sf_datasets - , excluded_from_perframe_tags - , excluded_from_functional_tags - , perframe_tags - , shared_tags - , multi_frame_output - ) + sf_datasets, + excluded_from_perframe_tags, + excluded_from_functional_tags, + perframe_tags, + shared_tags, + multi_frame_output) self.EarliestContentDateTime = self.FarthestFutureDateTime def AddModule(self): - for i in range(0, len(self.SingleFrameSet)): src = self.SingleFrameSet[i] - ContentDate_a = self._get_or_create_attribute(src,'ContentDate', self.EarliestDate) - ContentTime_a = self._get_or_create_attribute(src,'ContentTime', self.EarliestTime) + ContentDate_a = self._get_or_create_attribute( + src, 'ContentDate', self.EarliestDate) + ContentTime_a = self._get_or_create_attribute( + src, 'ContentTime', self.EarliestTime) d = ContentDate_a.value t = ContentTime_a.value - value = DT(d.strftime('%Y%m%d')+t.strftime('%H%M%S')) + value = DT(d.strftime('%Y%m%d') + t.strftime('%H%M%S')) if self.EarliestContentDateTime > value: self.EarliestContentDateTime = value if self.EarliestContentDateTime < self.FarthestFutureDateTime: @@ -1867,22 +1894,21 @@ def AddModule(self): tag_for_keyword(kw), 'TM', n_t) - class InstanceCreationDateTime(Abstract_MultiframeModuleAdder): - def __init__(self, sf_datasets:Sequence[Dataset] - , excluded_from_perframe_tags:Sequence[Tag] - , excluded_from_functional_tags:Sequence[Tag] - , perframe_tags: Sequence[Tag] - , shared_tags: Sequence[Tag] - , multi_frame_output:Dataset=None): + + def __init__(self, sf_datasets: Sequence[Dataset], + excluded_from_perframe_tags: Sequence[Tag], + excluded_from_functional_tags: Sequence[Tag], + perframe_tags: Sequence[Tag], + shared_tags: Sequence[Tag], + multi_frame_output: Dataset = None): super().__init__( - sf_datasets - , excluded_from_perframe_tags - , excluded_from_functional_tags - , perframe_tags - , shared_tags - , multi_frame_output - ) + sf_datasets, + excluded_from_perframe_tags, + excluded_from_functional_tags, + perframe_tags, + shared_tags, + multi_frame_output) def AddModule(self): nnooww = datetime.now() @@ -1895,70 +1921,66 @@ def AddModule(self): self.TargetDataset[kw] = DataElement( tag_for_keyword(kw), 'TM', n_t) + class ContributingEquipmentSequence(Abstract_MultiframeModuleAdder): - def __init__(self, sf_datasets:Sequence[Dataset] - , excluded_from_perframe_tags:Sequence[Tag] - , excluded_from_functional_tags:Sequence[Tag] - , perframe_tags: Sequence[Tag] - , shared_tags: Sequence[Tag] - , multi_frame_output:Dataset=None): + + def __init__(self, sf_datasets: Sequence[Dataset], + excluded_from_perframe_tags: Sequence[Tag], + excluded_from_functional_tags: Sequence[Tag], + perframe_tags: Sequence[Tag], + shared_tags: Sequence[Tag], + multi_frame_output: Dataset = None): super().__init__( - sf_datasets - , excluded_from_perframe_tags - , excluded_from_functional_tags - , perframe_tags - , shared_tags - , multi_frame_output - ) - def _add_data_element_to_target(self, kw:str, value)->None: + sf_datasets, + excluded_from_perframe_tags, + excluded_from_functional_tags, + perframe_tags, + shared_tags, + multi_frame_output) + + def _add_data_element_to_target(self, kw: str, value) -> None: tg = tag_for_keyword(kw) self.TargetDataset[kw] = DataElement(tg, dictionary_VR(tg), value) - def AddModule(self): - ds = Dataset() CodeValue_tg = tag_for_keyword('CodeValue') CodeMeaning_tg = tag_for_keyword('CodeMeaning') CodingSchemeDesignator_tg = tag_for_keyword('CodingSchemeDesignator') PurposeOfReferenceCode_item = Dataset() PurposeOfReferenceCode_item['CodeValue'] = DataElement( - CodeValue_tg - , dictionary_VR(CodeValue_tg) - ,'109106') + CodeValue_tg, + dictionary_VR(CodeValue_tg), + '109106') PurposeOfReferenceCode_item['CodeMeaning'] = DataElement( - CodeMeaning_tg - , dictionary_VR(CodeMeaning_tg) - ,'Enhanced Multi-frame Conversion Equipment') + CodeMeaning_tg, + dictionary_VR(CodeMeaning_tg), + 'Enhanced Multi-frame Conversion Equipment') PurposeOfReferenceCode_item['CodingSchemeDesignator'] = DataElement( - CodingSchemeDesignator_tg - , dictionary_VR(CodingSchemeDesignator_tg) - ,'DCM') - + CodingSchemeDesignator_tg, + dictionary_VR(CodingSchemeDesignator_tg), + 'DCM') PurposeOfReferenceCode_seq = DataElement( tag_for_keyword('PurposeOfReferenceCodeSequence'), - 'SQ',[PurposeOfReferenceCode_item] - ) - self.TargetDataset['PurposeOfReferenceCodeSequence'] = PurposeOfReferenceCode_seq - + 'SQ', [PurposeOfReferenceCode_item]) + self.TargetDataset[ + 'PurposeOfReferenceCodeSequence'] = PurposeOfReferenceCode_seq self._add_data_element_to_target("Manufacturer", 'HighDicom') self._add_data_element_to_target("InstitutionName", 'HighDicom') - self._add_data_element_to_target("InstitutionalDepartmentName", 'Software Development' ) - self._add_data_element_to_target("InstitutionAddress", 'Radialogy Department, B&W Hospital, Boston, MA') - self._add_data_element_to_target("SoftwareVersions", '1.4') # get sw version - self._add_data_element_to_target("ContributionDescription", 'Legacy Enhanced Image created from Classic Images') - - - - - - - - + self._add_data_element_to_target( + "InstitutionalDepartmentName", + 'Software Development') + self._add_data_element_to_target( + "InstitutionAddress", + 'Radialogy Department, B&W Hospital, Boston, MA') + self._add_data_element_to_target( + "SoftwareVersions", + '1.4') # get sw version + self._add_data_element_to_target( + "ContributionDescription", + 'Legacy Enhanced Image created from Classic Images') - class LegacyConvertedEnhanceImage(SOPClass): - """SOP class for Legacy Converted Enhanced PET Image instances.""" def __init__( @@ -1969,8 +1991,7 @@ def __init__( sop_instance_uid: str, instance_number: int, sort_key=None, - **kwargs: Any - ) -> None: + **kwargs: Any) -> None: """ Parameters ---------- @@ -1988,16 +2009,13 @@ def __init__( **kwargs: Any, optional Additional keyword arguments that will be passed to the constructor of `highdicom.base.SOPClass` - """ - try: ref_ds = legacy_datasets[0] except IndexError: raise ValueError('No DICOM data sets of provided.') - sop_class_uid = LEGACY_ENHANCED_SOP_CLASS_UID_MAP[ref_ds.SOPClassUID] - if sort_key == None: + if sort_key is None: sort_key = LegacyConvertedEnhanceImage.default_sort_key super().__init__( study_instance_uid=ref_ds.StudyInstanceUID, @@ -2018,8 +2036,7 @@ def __init__( study_date=ref_ds.StudyDate, study_time=ref_ds.StudyTime, referring_physician_name=ref_ds.ReferringPhysicianName, - **kwargs - ) + **kwargs) self._legacy_datasets = legacy_datasets self.DistinguishingAttributeKeywords = [ 'PatientID', @@ -2071,95 +2088,95 @@ def __init__( else: to_be_removed_from_distinguishing_attribs.add(kw) not_present_attribute_count += 1 - if not_present_attribute_count != len(legacy_datasets) \ - and not_present_attribute_count!=0: - raise ValueError('One or more datasets lack {} distinguishing attributes'.format(kw)) - if len(x)>1: - error_msg = 'All instances must have the same value for {}.\n\tExisting values:' + if not_present_attribute_count != len(legacy_datasets)\ + and not_present_attribute_count != 0: + raise ValueError('One or more datasets lack {} ' + 'distinguishing attributes'.format(kw)) + if len(x) > 1: + error_msg = 'All instances must have the same' + ' value for {}.\n\tExisting values: ' for x_elem in x: error_msg += '\n\t\t{}'.format(x_elem.value) raise ValueError(error_msg) for kw in to_be_removed_from_distinguishing_attribs: self.DistinguishingAttributeKeywords.remove(kw) - self.ExcludedFromPerFrameTags = {} for i in self.DistinguishingAttributeKeywords: self.ExcludedFromPerFrameTags[tag_for_keyword(i)] = False - - self.ExcludedFromPerFrameTags[tag_for_keyword('AcquisitionDateTime')] = False - self.ExcludedFromPerFrameTags[tag_for_keyword('AcquisitionDate')] = False - self.ExcludedFromPerFrameTags[tag_for_keyword('AcquisitionTime')] = False - - self.ExcludedFromFunctionalGroupsTags={tag_for_keyword('SpecificCharacterSet'): False} - #--------------------------------------------------------------------- + self.ExcludedFromPerFrameTags[ + tag_for_keyword('AcquisitionDateTime')] = False + self.ExcludedFromPerFrameTags[ + tag_for_keyword('AcquisitionDate')] = False + self.ExcludedFromPerFrameTags[ + tag_for_keyword('AcquisitionTime')] = False + self.ExcludedFromFunctionalGroupsTags = { + tag_for_keyword('SpecificCharacterSet'): False} + + # --------------------------------------------------------------------- self.PerFrameTags = {} self.SharedTags = {} self._find_per_frame_and_shared_tags() - #---------------------------------------------------------------------- + # ---------------------------------------------------------------------- self.__build_blocks = [] - #===================================================== + # == == == == == == == == == == == == == == == == == == == == == == == = new_ds = [] for item in sorted(self._legacy_datasets, key=sort_key): new_ds.append(item) self.legacy_datasets = new_ds - if _SOP_CLASS_UID_IOD_KEY_MAP[sop_class_uid] == 'legacy-converted-enhanced-ct-image': + if (_SOP_CLASS_UID_IOD_KEY_MAP[sop_class_uid] == + 'legacy-converted-enhanced-ct-image'): self.AddBuildBlocksForCT() - elif _SOP_CLASS_UID_IOD_KEY_MAP[sop_class_uid] == 'legacy-converted-enhanced-mr-image': + elif (_SOP_CLASS_UID_IOD_KEY_MAP[sop_class_uid] == + 'legacy-converted-enhanced-mr-image'): self.AddBuildBlocksForMR() - elif _SOP_CLASS_UID_IOD_KEY_MAP[sop_class_uid] == 'legacy-converted-enhanced-pet-image': + elif (_SOP_CLASS_UID_IOD_KEY_MAP[sop_class_uid] == + 'legacy-converted-enhanced-pet-image'): self.AddBuildBlocksForPET() - - - - def default_sort_key(x:Dataset)->tuple: + def default_sort_key(x: Dataset) -> tuple: out = tuple() if 'SeriesNumber' in x: - out += (x['SeriesNumber'].value,) + out += (x['SeriesNumber'].value, ) if 'InstanceNumber' in x: - out += (x['InstanceNumber'].value,) + out += (x['InstanceNumber'].value, ) if 'SOPInstanceUID' in x: - out += (x['SOPInstanceUID'].value,) + out += (x['SOPInstanceUID'].value, ) return out - - def _find_per_frame_and_shared_tags(self): rough_shared = {} sfs = self._legacy_datasets for ds in sfs: for ttag, elem in ds.items(): - if (not ttag.is_private - and not self._istag_file_meta_information_group(ttag) - and not self._istag_repeating_group(ttag) - and not self._istag_group_length(ttag) - and not self._istag_excluded_from_perframe(ttag)): + if (not ttag.is_private and not + self._istag_file_meta_information_group(ttag) and not + self._istag_repeating_group(ttag) and not + self._istag_group_length(ttag) and not + self._istag_excluded_from_perframe(ttag) and + ttag != tag_for_keyword('PixelData')): elem = ds[ttag] self.PerFrameTags[ttag] = False - if ttag in rough_shared: + if ttag in rough_shared: rough_shared[ttag].append(elem.value) else: rough_shared[ttag] = [elem.value] to_be_removed_from_shared = [] for ttag, v in rough_shared.items(): v = rough_shared[ttag] - N = len(v) if len(v) < len(self._legacy_datasets): to_be_removed_from_shared.append(ttag) else: all_values_are_equal = True for v_i in v: - if not self._isequal(v_i,v[0]): + if not self._isequal(v_i, v[0]): all_values_are_equal = False break if not all_values_are_equal: to_be_removed_from_shared.append(ttag) from pydicom.datadict import keyword_for_tag for t, v in rough_shared.items(): - if keyword_for_tag(t)!='PatientSex': + if keyword_for_tag(t) != 'PatientSex': continue - - for t in to_be_removed_from_shared: del rough_shared[t] for t, v in rough_shared.items(): @@ -2171,34 +2188,34 @@ def _find_per_frame_and_shared_tags(self): # for t in self.PerFrameTags: # print (keyword_for_tag(t)) - - def _istag_excluded_from_perframe(self, t:Tag)->bool: + def _istag_excluded_from_perframe(self, t: Tag) -> bool: return t in self.ExcludedFromPerFrameTags - - - def _istag_file_meta_information_group(self, t:Tag)->bool: + + def _istag_file_meta_information_group(self, t: Tag) -> bool: return t.group == 0x0002 - def _istag_repeating_group(self, t:Tag)->bool: + + def _istag_repeating_group(self, t: Tag) -> bool: g = t.group return (g >= 0x5000 and g <= 0x501e) or\ (g >= 0x6000 and g <= 0x601e) - def _istag_group_length(self, t:Tag)->bool: + + def _istag_group_length(self, t: Tag) -> bool: return t.element == 0 + def _isequal(self, v1, v2): float_tolerance = 1.0e-5 - is_equal_float = lambda x1, x2: abs(x1-x2)bool: + + def _isequal_dicom_dataset(self, ds1, ds2) -> bool: if type(ds1) != type(ds2): return False if type(ds1) != Dataset: return False for k1, elem1 in ds1.items(): - if not k1 in ds2: + if k1 not in ds2: return False - elem2 = ds2[k1] - return self._isequal(elem2.value, elem1.value) - def AddNewBuildBlock(self, element:Abstract_MultiframeModuleAdder): - if not isinstance(element, Abstract_MultiframeModuleAdder) : - raise ValueError('Build block must be an instance of Abstract_MultiframeModuleAdder') + elem2 = ds2[k1] + return self._isequal(elem2.value, elem1.value) + + def AddNewBuildBlock(self, element: Abstract_MultiframeModuleAdder): + if not isinstance(element, Abstract_MultiframeModuleAdder): + raise ValueError('Build block must be an instance ' + 'of Abstract_MultiframeModuleAdder') self.__build_blocks.append(element) + def ClearBuildBlocks(self): self.__build_blocks = [] + def AddCommonCT_PET_MR_BuildBlocks(self): - Blocks= [ImagePixelModule(self._legacy_datasets - , self.ExcludedFromPerFrameTags - , self.ExcludedFromFunctionalGroupsTags - , self.PerFrameTags - , self.SharedTags - , self) - ,CompositeInstanceContex(self._legacy_datasets - , self.ExcludedFromPerFrameTags - , self.ExcludedFromFunctionalGroupsTags - , self.PerFrameTags - , self.SharedTags - , self) - ,EnhancedCommonImageModule(self._legacy_datasets - , self.ExcludedFromPerFrameTags - , self.ExcludedFromFunctionalGroupsTags - , self.PerFrameTags - , self.SharedTags - , self) - ,AcquisitionContextModule(self._legacy_datasets - , self.ExcludedFromPerFrameTags - , self.ExcludedFromFunctionalGroupsTags - , self.PerFrameTags - , self.SharedTags - , self) - ,FrameAnatomyFunctionalGroup(self._legacy_datasets - , self.ExcludedFromPerFrameTags - , self.ExcludedFromFunctionalGroupsTags - , self.PerFrameTags - , self.SharedTags - , self) - ,PixelMeasuresFunctionalGroup(self._legacy_datasets - , self.ExcludedFromPerFrameTags - , self.ExcludedFromFunctionalGroupsTags - , self.PerFrameTags - , self.SharedTags - , self) - ,PlaneOrientationFunctionalGroup(self._legacy_datasets - , self.ExcludedFromPerFrameTags - , self.ExcludedFromFunctionalGroupsTags - , self.PerFrameTags - , self.SharedTags - , self) - ,PlanePositionFunctionalGroup(self._legacy_datasets - , self.ExcludedFromPerFrameTags - , self.ExcludedFromFunctionalGroupsTags - , self.PerFrameTags - , self.SharedTags - , self) - ,FrameVOILUTFunctionalGroup(self._legacy_datasets - , self.ExcludedFromPerFrameTags - , self.ExcludedFromFunctionalGroupsTags - , self.PerFrameTags - , self.SharedTags - , self) - ,PixelValueTransformationFunctionalGroup(self._legacy_datasets - , self.ExcludedFromPerFrameTags - , self.ExcludedFromFunctionalGroupsTags - , self.PerFrameTags - , self.SharedTags - , self) - ,ReferencedImageFunctionalGroup(self._legacy_datasets - , self.ExcludedFromPerFrameTags - , self.ExcludedFromFunctionalGroupsTags - , self.PerFrameTags - , self.SharedTags - , self) - ,ConversionSourceFunctionalGroup(self._legacy_datasets - , self.ExcludedFromPerFrameTags - , self.ExcludedFromFunctionalGroupsTags - , self.PerFrameTags - , self.SharedTags - , self) - ,FrameContentFunctionalGroup(self._legacy_datasets - , self.ExcludedFromPerFrameTags - , self.ExcludedFromFunctionalGroupsTags - , self.PerFrameTags - , self.SharedTags - , self) - ,UnassignedPerFrame(self._legacy_datasets - , self.ExcludedFromPerFrameTags - , self.ExcludedFromFunctionalGroupsTags - , self.PerFrameTags - , self.SharedTags - , self) - ,UnassignedShared(self._legacy_datasets - , self.ExcludedFromPerFrameTags - , self.ExcludedFromFunctionalGroupsTags - , self.PerFrameTags - , self.SharedTags - , self) - ,PixelData(self._legacy_datasets - , self.ExcludedFromPerFrameTags - , self.ExcludedFromFunctionalGroupsTags - , self.PerFrameTags - , self.SharedTags - , self) - ,ContentDateTime(self._legacy_datasets - , self.ExcludedFromPerFrameTags - , self.ExcludedFromFunctionalGroupsTags - , self.PerFrameTags - , self.SharedTags - , self) - ,InstanceCreationDateTime(self._legacy_datasets - , self.ExcludedFromPerFrameTags - , self.ExcludedFromFunctionalGroupsTags - , self.PerFrameTags - , self.SharedTags - , self) - ,ContributingEquipmentSequence(self._legacy_datasets - , self.ExcludedFromPerFrameTags - , self.ExcludedFromFunctionalGroupsTags - , self.PerFrameTags - , self.SharedTags - , self) - ] + Blocks = [ + ImagePixelModule( + self._legacy_datasets, + self.ExcludedFromPerFrameTags, + self.ExcludedFromFunctionalGroupsTags, + self.PerFrameTags, + self.SharedTags, + self), + CompositeInstanceContex( + self._legacy_datasets, + self.ExcludedFromPerFrameTags, + self.ExcludedFromFunctionalGroupsTags, + self.PerFrameTags, + self.SharedTags, + self), + EnhancedCommonImageModule( + self._legacy_datasets, + self.ExcludedFromPerFrameTags, + self.ExcludedFromFunctionalGroupsTags, + self.PerFrameTags, + self.SharedTags, + self), + AcquisitionContextModule( + self._legacy_datasets, + self.ExcludedFromPerFrameTags, + self.ExcludedFromFunctionalGroupsTags, + self.PerFrameTags, + self.SharedTags, + self), + FrameAnatomyFunctionalGroup( + self._legacy_datasets, + self.ExcludedFromPerFrameTags, + self.ExcludedFromFunctionalGroupsTags, + self.PerFrameTags, + self.SharedTags, + self), + PixelMeasuresFunctionalGroup( + self._legacy_datasets, + self.ExcludedFromPerFrameTags, + self.ExcludedFromFunctionalGroupsTags, + self.PerFrameTags, + self.SharedTags, + self), + PlaneOrientationFunctionalGroup( + self._legacy_datasets, + self.ExcludedFromPerFrameTags, + self.ExcludedFromFunctionalGroupsTags, + self.PerFrameTags, + self.SharedTags, + self), + PlanePositionFunctionalGroup( + self._legacy_datasets, + self.ExcludedFromPerFrameTags, + self.ExcludedFromFunctionalGroupsTags, + self.PerFrameTags, + self.SharedTags, + self), + FrameVOILUTFunctionalGroup( + self._legacy_datasets, + self.ExcludedFromPerFrameTags, + self.ExcludedFromFunctionalGroupsTags, + self.PerFrameTags, + self.SharedTags, + self), + PixelValueTransformationFunctionalGroup( + self._legacy_datasets, + self.ExcludedFromPerFrameTags, + self.ExcludedFromFunctionalGroupsTags, + self.PerFrameTags, + self.SharedTags, + self), + ReferencedImageFunctionalGroup( + self._legacy_datasets, + self.ExcludedFromPerFrameTags, + self.ExcludedFromFunctionalGroupsTags, + self.PerFrameTags, + self.SharedTags, + self), + ConversionSourceFunctionalGroup( + self._legacy_datasets, + self.ExcludedFromPerFrameTags, + self.ExcludedFromFunctionalGroupsTags, + self.PerFrameTags, + self.SharedTags, + self), + FrameContentFunctionalGroup( + self._legacy_datasets, + self.ExcludedFromPerFrameTags, + self.ExcludedFromFunctionalGroupsTags, + self.PerFrameTags, + self.SharedTags, + self), + UnassignedPerFrame( + self._legacy_datasets, + self.ExcludedFromPerFrameTags, + self.ExcludedFromFunctionalGroupsTags, + self.PerFrameTags, + self.SharedTags, + self), + UnassignedShared( + self._legacy_datasets, + self.ExcludedFromPerFrameTags, + self.ExcludedFromFunctionalGroupsTags, + self.PerFrameTags, + self.SharedTags, + self), + PixelData( + self._legacy_datasets, + self.ExcludedFromPerFrameTags, + self.ExcludedFromFunctionalGroupsTags, + self.PerFrameTags, + self.SharedTags, + self), + ContentDateTime( + self._legacy_datasets, + self.ExcludedFromPerFrameTags, + self.ExcludedFromFunctionalGroupsTags, + self.PerFrameTags, + self.SharedTags, + self), + InstanceCreationDateTime( + self._legacy_datasets, + self.ExcludedFromPerFrameTags, + self.ExcludedFromFunctionalGroupsTags, + self.PerFrameTags, + self.SharedTags, + self), + ContributingEquipmentSequence( + self._legacy_datasets, + self.ExcludedFromPerFrameTags, + self.ExcludedFromFunctionalGroupsTags, + self.PerFrameTags, + self.SharedTags, + self) + ] for b in Blocks: self.AddNewBuildBlock(b) def AddCTSpecificBuildBlocks(self): - Blocks= [CommonCTMRPETImageDescriptionMacro(self._legacy_datasets - , self.ExcludedFromPerFrameTags - , self.ExcludedFromFunctionalGroupsTags - , self.PerFrameTags - , self.SharedTags - , self - , 'CT') - ,EnhancedCTImageModule(self._legacy_datasets - , self.ExcludedFromPerFrameTags - , self.ExcludedFromFunctionalGroupsTags - , self.PerFrameTags - , self.SharedTags - , self) - ,ContrastBolusModule(self._legacy_datasets - , self.ExcludedFromPerFrameTags - , self.ExcludedFromFunctionalGroupsTags - , self.PerFrameTags - , self.SharedTags - , self) + Blocks = [ + CommonCTMRPETImageDescriptionMacro( + self._legacy_datasets, + self.ExcludedFromPerFrameTags, + self.ExcludedFromFunctionalGroupsTags, + self.PerFrameTags, + self.SharedTags, + self, + 'CT'), + EnhancedCTImageModule( + self._legacy_datasets, + self.ExcludedFromPerFrameTags, + self.ExcludedFromFunctionalGroupsTags, + self.PerFrameTags, + self.SharedTags, + self), + ContrastBolusModule( + self._legacy_datasets, + self.ExcludedFromPerFrameTags, + self.ExcludedFromFunctionalGroupsTags, + self.PerFrameTags, + self.SharedTags, + self) ] for b in Blocks: self.AddNewBuildBlock(b) + def AddMRSpecificBuildBlocks(self): - Blocks= [CommonCTMRPETImageDescriptionMacro(self._legacy_datasets - , self.ExcludedFromPerFrameTags - , self.ExcludedFromFunctionalGroupsTags - , self.PerFrameTags - , self.SharedTags - , self - , 'MR') - ,EnhancedMRImageModule(self._legacy_datasets - , self.ExcludedFromPerFrameTags - , self.ExcludedFromFunctionalGroupsTags - , self.PerFrameTags - , self.SharedTags - , self) - ,ContrastBolusModule(self._legacy_datasets - , self.ExcludedFromPerFrameTags - , self.ExcludedFromFunctionalGroupsTags - , self.PerFrameTags - , self.SharedTags - , self) + Blocks = [ + CommonCTMRPETImageDescriptionMacro( + self._legacy_datasets, + self.ExcludedFromPerFrameTags, + self.ExcludedFromFunctionalGroupsTags, + self.PerFrameTags, + self.SharedTags, + self, + 'MR'), + EnhancedMRImageModule( + self._legacy_datasets, + self.ExcludedFromPerFrameTags, + self.ExcludedFromFunctionalGroupsTags, + self.PerFrameTags, + self.SharedTags, + self), + ContrastBolusModule( + self._legacy_datasets, + self.ExcludedFromPerFrameTags, + self.ExcludedFromFunctionalGroupsTags, + self.PerFrameTags, + self.SharedTags, + self) ] for b in Blocks: self.AddNewBuildBlock(b) + def AddPETSpecificBuildBlocks(self): - Blocks= [CommonCTMRPETImageDescriptionMacro(self._legacy_datasets - , self.ExcludedFromPerFrameTags - , self.ExcludedFromFunctionalGroupsTags - , self.PerFrameTags - , self.SharedTags - , self - , 'PET') - ,EnhancedPETImageModule(self._legacy_datasets - , self.ExcludedFromPerFrameTags - , self.ExcludedFromFunctionalGroupsTags - , self.PerFrameTags - , self.SharedTags - , self) + Blocks = [ + CommonCTMRPETImageDescriptionMacro( + self._legacy_datasets, + self.ExcludedFromPerFrameTags, + self.ExcludedFromFunctionalGroupsTags, + self.PerFrameTags, + self.SharedTags, + self, + 'PET'), + EnhancedPETImageModule( + self._legacy_datasets, + self.ExcludedFromPerFrameTags, + self.ExcludedFromFunctionalGroupsTags, + self.PerFrameTags, + self.SharedTags, + self) ] for b in Blocks: self.AddNewBuildBlock(b) @@ -2410,27 +2465,17 @@ def AddBuildBlocksForCT(self): self.ClearBuildBlocks() self.AddCommonCT_PET_MR_BuildBlocks() self.AddCTSpecificBuildBlocks() + def AddBuildBlocksForMR(self): self.ClearBuildBlocks() self.AddCommonCT_PET_MR_BuildBlocks() self.AddMRSpecificBuildBlocks() + def AddBuildBlocksForPET(self): self.ClearBuildBlocks() self.AddCommonCT_PET_MR_BuildBlocks() self.AddPETSpecificBuildBlocks() - + def BuildMultiFrame(self): for builder in self.__build_blocks: builder.AddModule() - - - - - - - - - - - - From a05e189a8bc42c5e560df7f08ff83ca9dd156c03 Mon Sep 17 00:00:00 2001 From: Afshin Date: Thu, 9 Jul 2020 12:23:57 -0400 Subject: [PATCH 04/44] Verified by mypy and flake8. Added stack module adder --- src/highdicom/legacy/sop.py | 421 +++++++++++++++++++++++++----------- 1 file changed, 294 insertions(+), 127 deletions(-) diff --git a/src/highdicom/legacy/sop.py b/src/highdicom/legacy/sop.py index aa8728b8..c8c9dced 100644 --- a/src/highdicom/legacy/sop.py +++ b/src/highdicom/legacy/sop.py @@ -1,4 +1,5 @@ """Module for SOP Classes of Legacy Converted Enhanced Image IODs.""" +from __future__ import annotations import logging from collections import defaultdict from typing import Any, Dict, List, Optional, Sequence, Union @@ -540,11 +541,12 @@ def __init__( class Abstract_MultiframeModuleAdder(ABC): def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: Sequence[Tag], + excluded_from_perframe_tags: dict, excluded_from_functional_tags: Sequence[Tag], - perframe_tags: Sequence[Tag], - shared_tags: Sequence[Tag], - multi_frame_output: Dataset = None): + perframe_tags: dict, + shared_tags: dict, + multi_frame_output: Dataset): + self.ExcludedFromPerFrameTags = excluded_from_perframe_tags self.ExcludedFromFunctionalGroupsTags = excluded_from_functional_tags self.PerFrameTags = perframe_tags @@ -611,9 +613,9 @@ def _copy_attrib_if_present(self, src_ds: Dataset, dest_ds: Dataset, def _get_perframe_item(self, index: int) -> Dataset: if index > len(self.SingleFrameSet): return None - pf_kw = 'PerFrameFunctionalGroupsSequence' + pf_kw: str = 'PerFrameFunctionalGroupsSequence' pf_tg = tag_for_keyword(pf_kw) - if pf_kw not in self.TargetDataset: + if pf_tg not in self.TargetDataset: seq = [] for i in range(0, len(self.SingleFrameSet)): seq.append(Dataset()) @@ -658,14 +660,15 @@ def _add_module(self, module_name: str, excepted_attributes=[], # iod_name = _SOP_CLASS_UID_IOD_KEY_MAP[mf_sop_instance_uid] # modules = IOD_MODULE_MAP[iod_name] from copy import deepcopy - attribs = MODULE_ATTRIBUTE_MAP[module_name] + attribs: list = MODULE_ATTRIBUTE_MAP[module_name] ref_dataset = self.SingleFrameSet[0] for a in attribs: - if a in excepted_attributes: + kw: str = a['keyword'] + if kw in excepted_attributes: continue if len(a['path']) == 0: self._copy_attrib_if_present( - ref_dataset, self.TargetDataset, a['keyword'], + ref_dataset, self.TargetDataset, kw, check_not_to_be_perframe=check_not_to_be_perframe, check_not_to_be_empty=check_not_to_be_empty) @@ -677,11 +680,11 @@ def AddModule(self): class ImagePixelModule(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: Sequence[Tag], + excluded_from_perframe_tags: dict, excluded_from_functional_tags: Sequence[Tag], - perframe_tags: Sequence[Tag], - shared_tags: Sequence[Tag], - multi_frame_output: Dataset = None): + perframe_tags: dict, + shared_tags: dict, + multi_frame_output: Dataset): super().__init__( sf_datasets, excluded_from_perframe_tags, @@ -712,11 +715,11 @@ def AddModule(self): class CompositeInstanceContex(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: Sequence[Tag], + excluded_from_perframe_tags: dict, excluded_from_functional_tags: Sequence[Tag], - perframe_tags: Sequence[Tag], - shared_tags: Sequence[Tag], - multi_frame_output: Dataset = None): + perframe_tags: dict, + shared_tags: dict, + multi_frame_output: Dataset): super().__init__( sf_datasets, excluded_from_perframe_tags, @@ -729,6 +732,7 @@ def __init__(self, sf_datasets: Sequence[Dataset], "clinical-trial-subject": [], "general-study": [ + "StudyInstanceUID", "RequestingService" ], "patient-study": @@ -739,6 +743,8 @@ def __init__(self, sf_datasets: Sequence[Dataset], "clinical-trial-study": [], "general-series": [ + "SeriesInstanceUID", + "SeriesNumber", "SmallestPixelValueInSeries", "LargestPixelValueInSeries", "PerformedProcedureStepEndDate", @@ -752,6 +758,9 @@ def __init__(self, sf_datasets: Sequence[Dataset], "frame-of-reference": [], "sop-common": [ + "SOPClassUID", + "SOPInstanceUID", + "InstanceNumber", "SpecificCharacterSet", "EncryptedAttributesSequence", "MACParametersSequence", @@ -815,11 +824,11 @@ def AddModule(self): class CommonCTMRPETImageDescriptionMacro(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: Sequence[Tag], + excluded_from_perframe_tags: dict, excluded_from_functional_tags: Sequence[Tag], - perframe_tags: Sequence[Tag], - shared_tags: Sequence[Tag], - multi_frame_output: Dataset = None, + perframe_tags: dict, + shared_tags: dict, + multi_frame_output: Dataset, modality: str = 'CT'): super().__init__( sf_datasets, @@ -860,9 +869,11 @@ def _add_module_to_functional_group(self, src_fg: Dataset, new_val = self._get_value_for_frame_type(FrameType_a) dest_fg[FrameType_tg] = DataElement(FrameType_tg, FrameType_a.VR, new_val) - element_generator = lambda kw, val: DataElement( - tag_for_keyword(kw), - dictionary_VR(tag_for_keyword(kw)), val) + + def element_generator(kw, val) -> DataElement: + return DataElement( + tag_for_keyword(kw), + dictionary_VR(tag_for_keyword(kw)), val) dest_fg['PixelPresentation'] = element_generator( 'PixelPresentation', "MONOCHROME") dest_fg['VolumetricProperties'] = element_generator( @@ -894,11 +905,11 @@ def AddModule(self): class EnhancedCommonImageModule(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: Sequence[Tag], + excluded_from_perframe_tags: dict, excluded_from_functional_tags: Sequence[Tag], - perframe_tags: Sequence[Tag], - shared_tags: Sequence[Tag], - multi_frame_output: Dataset = None): + perframe_tags: dict, + shared_tags: dict, + multi_frame_output: Dataset): super().__init__( sf_datasets, excluded_from_perframe_tags, @@ -963,11 +974,11 @@ def AddModule(self): class ContrastBolusModule(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: Sequence[Tag], + excluded_from_perframe_tags: dict, excluded_from_functional_tags: Sequence[Tag], - perframe_tags: Sequence[Tag], - shared_tags: Sequence[Tag], - multi_frame_output: Dataset = None): + perframe_tags: dict, + shared_tags: dict, + multi_frame_output: Dataset): super().__init__( sf_datasets, excluded_from_perframe_tags, @@ -983,11 +994,11 @@ def AddModule(self): class EnhancedCTImageModule(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: Sequence[Tag], + excluded_from_perframe_tags: dict, excluded_from_functional_tags: Sequence[Tag], - perframe_tags: Sequence[Tag], - shared_tags: Sequence[Tag], - multi_frame_output: Dataset = None): + perframe_tags: dict, + shared_tags: dict, + multi_frame_output: Dataset): super().__init__( sf_datasets, excluded_from_perframe_tags, @@ -1004,11 +1015,11 @@ def AddModule(self): class EnhancedPETImageModule(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: Sequence[Tag], + excluded_from_perframe_tags: dict, excluded_from_functional_tags: Sequence[Tag], - perframe_tags: Sequence[Tag], - shared_tags: Sequence[Tag], - multi_frame_output: Dataset = None): + perframe_tags: dict, + shared_tags: dict, + multi_frame_output: Dataset): super().__init__( sf_datasets, excluded_from_perframe_tags, @@ -1025,11 +1036,11 @@ def AddModule(self): class EnhancedMRImageModule(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: Sequence[Tag], + excluded_from_perframe_tags: dict, excluded_from_functional_tags: Sequence[Tag], - perframe_tags: Sequence[Tag], - shared_tags: Sequence[Tag], - multi_frame_output: Dataset = None): + perframe_tags: dict, + shared_tags: dict, + multi_frame_output: Dataset): super().__init__( sf_datasets, excluded_from_perframe_tags, @@ -1084,11 +1095,11 @@ def AddModule(self): class AcquisitionContextModule(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: Sequence[Tag], + excluded_from_perframe_tags: dict, excluded_from_functional_tags: Sequence[Tag], - perframe_tags: Sequence[Tag], - shared_tags: Sequence[Tag], - multi_frame_output: Dataset = None): + perframe_tags: dict, + shared_tags: dict, + multi_frame_output: Dataset): super().__init__( sf_datasets, excluded_from_perframe_tags, @@ -1108,11 +1119,11 @@ def AddModule(self): class FrameAnatomyFunctionalGroup(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: Sequence[Tag], + excluded_from_perframe_tags: dict, excluded_from_functional_tags: Sequence[Tag], - perframe_tags: Sequence[Tag], - shared_tags: Sequence[Tag], - multi_frame_output: Dataset = None): + perframe_tags: dict, + shared_tags: dict, + multi_frame_output: Dataset): super().__init__( sf_datasets, excluded_from_perframe_tags, @@ -1179,11 +1190,11 @@ def AddModule(self): class PixelMeasuresFunctionalGroup(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: Sequence[Tag], + excluded_from_perframe_tags: dict, excluded_from_functional_tags: Sequence[Tag], - perframe_tags: Sequence[Tag], - shared_tags: Sequence[Tag], - multi_frame_output: Dataset = None): + perframe_tags: dict, + shared_tags: dict, + multi_frame_output: Dataset): super().__init__( sf_datasets, excluded_from_perframe_tags, @@ -1242,11 +1253,11 @@ def AddModule(self): class PlanePositionFunctionalGroup(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: Sequence[Tag], + excluded_from_perframe_tags: dict, excluded_from_functional_tags: Sequence[Tag], - perframe_tags: Sequence[Tag], - shared_tags: Sequence[Tag], - multi_frame_output: Dataset = None): + perframe_tags: dict, + shared_tags: dict, + multi_frame_output: Dataset): super().__init__( sf_datasets, excluded_from_perframe_tags, @@ -1291,11 +1302,11 @@ def AddModule(self): class PlaneOrientationFunctionalGroup(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: Sequence[Tag], + excluded_from_perframe_tags: dict, excluded_from_functional_tags: Sequence[Tag], - perframe_tags: Sequence[Tag], - shared_tags: Sequence[Tag], - multi_frame_output: Dataset = None): + perframe_tags: dict, + shared_tags: dict, + multi_frame_output: Dataset): super().__init__( sf_datasets, excluded_from_perframe_tags, @@ -1338,11 +1349,11 @@ def AddModule(self): class FrameVOILUTFunctionalGroup(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: Sequence[Tag], + excluded_from_perframe_tags: dict, excluded_from_functional_tags: Sequence[Tag], - perframe_tags: Sequence[Tag], - shared_tags: Sequence[Tag], - multi_frame_output: Dataset = None): + perframe_tags: dict, + shared_tags: dict, + multi_frame_output: Dataset): super().__init__( sf_datasets, excluded_from_perframe_tags, @@ -1400,11 +1411,11 @@ def AddModule(self): class PixelValueTransformationFunctionalGroup(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: Sequence[Tag], + excluded_from_perframe_tags: dict, excluded_from_functional_tags: Sequence[Tag], - perframe_tags: Sequence[Tag], - shared_tags: Sequence[Tag], - multi_frame_output: Dataset = None): + perframe_tags: dict, + shared_tags: dict, + multi_frame_output: Dataset): super().__init__( sf_datasets, excluded_from_perframe_tags, @@ -1485,11 +1496,11 @@ def AddModule(self): class ReferencedImageFunctionalGroup(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: Sequence[Tag], + excluded_from_perframe_tags: dict, excluded_from_functional_tags: Sequence[Tag], - perframe_tags: Sequence[Tag], - shared_tags: Sequence[Tag], - multi_frame_output: Dataset = None): + perframe_tags: dict, + shared_tags: dict, + multi_frame_output: Dataset): super().__init__( sf_datasets, excluded_from_perframe_tags, @@ -1526,11 +1537,11 @@ def AddModule(self): class DerivationImageFunctionalGroup(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: Sequence[Tag], + excluded_from_perframe_tags: dict, excluded_from_functional_tags: Sequence[Tag], - perframe_tags: Sequence[Tag], - shared_tags: Sequence[Tag], - multi_frame_output: Dataset = None): + perframe_tags: dict, + shared_tags: dict, + multi_frame_output: Dataset): super().__init__( sf_datasets, excluded_from_perframe_tags, @@ -1582,11 +1593,11 @@ def AddModule(self): class UnassignedPerFrame(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: Sequence[Tag], + excluded_from_perframe_tags: dict, excluded_from_functional_tags: Sequence[Tag], - perframe_tags: Sequence[Tag], - shared_tags: Sequence[Tag], - multi_frame_output: Dataset = None): + perframe_tags: dict, + shared_tags: dict, + multi_frame_output: Dataset): super().__init__( sf_datasets, excluded_from_perframe_tags, @@ -1620,11 +1631,11 @@ def AddModule(self): class UnassignedShared(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: Sequence[Tag], + excluded_from_perframe_tags: dict, excluded_from_functional_tags: Sequence[Tag], - perframe_tags: Sequence[Tag], - shared_tags: Sequence[Tag], - multi_frame_output: Dataset = None): + perframe_tags: dict, + shared_tags: dict, + multi_frame_output: Dataset): super().__init__( sf_datasets, excluded_from_perframe_tags, @@ -1657,11 +1668,11 @@ def AddModule(self): class ConversionSourceFunctionalGroup(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: Sequence[Tag], + excluded_from_perframe_tags: dict, excluded_from_functional_tags: Sequence[Tag], - perframe_tags: Sequence[Tag], - shared_tags: Sequence[Tag], - multi_frame_output: Dataset = None): + perframe_tags: dict, + shared_tags: dict, + multi_frame_output: Dataset): super().__init__( sf_datasets, excluded_from_perframe_tags, @@ -1700,11 +1711,11 @@ def AddModule(self): class FrameContentFunctionalGroup(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: Sequence[Tag], + excluded_from_perframe_tags: dict, excluded_from_functional_tags: Sequence[Tag], - perframe_tags: Sequence[Tag], - shared_tags: Sequence[Tag], - multi_frame_output: Dataset = None): + perframe_tags: dict, + shared_tags: dict, + multi_frame_output: Dataset): super().__init__( sf_datasets, excluded_from_perframe_tags, @@ -1798,11 +1809,11 @@ def AddModule(self): class PixelData(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: Sequence[Tag], + excluded_from_perframe_tags: dict, excluded_from_functional_tags: Sequence[Tag], - perframe_tags: Sequence[Tag], - shared_tags: Sequence[Tag], - multi_frame_output: Dataset = None): + perframe_tags: dict, + shared_tags: dict, + multi_frame_output: Dataset): super().__init__( sf_datasets, excluded_from_perframe_tags, @@ -1857,11 +1868,11 @@ def AddModule(self): class ContentDateTime(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: Sequence[Tag], + excluded_from_perframe_tags: dict, excluded_from_functional_tags: Sequence[Tag], - perframe_tags: Sequence[Tag], - shared_tags: Sequence[Tag], - multi_frame_output: Dataset = None): + perframe_tags: dict, + shared_tags: dict, + multi_frame_output: Dataset): super().__init__( sf_datasets, excluded_from_perframe_tags, @@ -1897,11 +1908,11 @@ def AddModule(self): class InstanceCreationDateTime(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: Sequence[Tag], + excluded_from_perframe_tags: dict, excluded_from_functional_tags: Sequence[Tag], - perframe_tags: Sequence[Tag], - shared_tags: Sequence[Tag], - multi_frame_output: Dataset = None): + perframe_tags: dict, + shared_tags: dict, + multi_frame_output: Dataset): super().__init__( sf_datasets, excluded_from_perframe_tags, @@ -1925,11 +1936,11 @@ def AddModule(self): class ContributingEquipmentSequence(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: Sequence[Tag], + excluded_from_perframe_tags: dict, excluded_from_functional_tags: Sequence[Tag], - perframe_tags: Sequence[Tag], - shared_tags: Sequence[Tag], - multi_frame_output: Dataset = None): + perframe_tags: dict, + shared_tags: dict, + multi_frame_output: Dataset): super().__init__( sf_datasets, excluded_from_perframe_tags, @@ -1980,6 +1991,113 @@ def AddModule(self): 'Legacy Enhanced Image created from Classic Images') +class StackInformation(Abstract_MultiframeModuleAdder): + + def __init__(self, sf_datasets: Sequence[Dataset], + excluded_from_perframe_tags: dict, + excluded_from_functional_tags: Sequence[Tag], + perframe_tags: dict, + shared_tags: dict, + multi_frame_output: Dataset): + super().__init__( + sf_datasets, + excluded_from_perframe_tags, + excluded_from_functional_tags, + perframe_tags, + shared_tags, + multi_frame_output) + self._slices: list = [] + self._tolerance = 0.0001 + self._slice_location_map: dict = {} + + def _build_slices_geometry(self) -> None: + frame_count = len(self.SingleFrameSet) + for i in range(0, frame_count): + curr_frame = self.SingleFrameSet[i] + ImagePositionPatient_v = None \ + if 'ImagePositionPatient' not in curr_frame\ + else curr_frame['ImagePositionPatient'].value + ImageOrientationPatient_v = None \ + if 'ImageOrientationPatient' not in curr_frame\ + else curr_frame['ImageOrientationPatient'].value + PixelSpacing_v = None \ + if 'PixelSpacing' not in curr_frame\ + else curr_frame['PixelSpacing'].value + SliceThickness_v = 0.0 \ + if 'SliceThickness' not in curr_frame\ + else curr_frame['SliceThickness'].value + # SliceLocation_v = None \ + # if 'SliceLocation' not in curr_frame\ + # else curr_frame['SliceLocation'].value + Rows_v = 0 \ + if 'Rows' not in curr_frame\ + else curr_frame['Rows'].value + Columns_v = 0 \ + if 'Columns' not in curr_frame\ + else curr_frame['Columns'].value + if (ImageOrientationPatient_v is not None and + ImagePositionPatient_v is not None and + PixelSpacing_v is not None): + row = np.array(ImageOrientationPatient_v[0:3]) + col = np.array(ImageOrientationPatient_v[3:]) + voxel_spaceing = np.array([PixelSpacing_v[0], + PixelSpacing_v[1], + SliceThickness_v]) + tpl = np.array(ImagePositionPatient_v) + dim = (Rows_v, Columns_v, 1) + self._slices.append(GeometryOfSlice(row, col, + tpl, voxel_spaceing, dim)) + else: + print("Error in geometri ...") + self._slices = [] # clear the slices + break + + def _are_all_slices_parallel(self) -> bool: + slice_count = len(self._slices) + if slice_count >= 2: + last_slice = self._slices[0] + for i in range(1, slice_count): + curr_slice = self._slices[i] + if not GeometryOfSlice.AreParallel( + curr_slice, last_slice, self._tolerance): + return False + last_slice = curr_slice + return True + elif slice_count == 1: + return True + else: + return False + + def AddModule(self): + self._build_slices_geometry() + round_digits = int(np.ceil(-np.log10(self._tolerance))) + if self._are_all_slices_parallel(): + for idx, s in enumerate(self._slices): + dist = round(s.GetDistanceAlongOrigin(), round_digits) + if dist in self._slice_location_map: + self._slice_location_map[dist].append(idx) + else: + self._slice_location_map[dist] = [idx] + distance_index = 0 + for loc, idx in sorted(self._slice_location_map.items()): + if len(idx) != 1: + print('Error') + return + frame_index = idx[0] + frame = self._get_perframe_item(frame_index) + new_item = Dataset() + new_item["StackID"] = self._get_or_create_attribute( + self.SingleFrameSet[0], + "StackID", "0") + new_item["InStackPositionNumber"] =\ + self._get_or_create_attribute( + self.SingleFrameSet[0], + "InStackPositionNumber", distance_index) + tg = tag_for_keyword("FrameContentSequence") + frame[tg] = DataElement(tg, "SQ", [new_item]) + distance_index += 1 + + class LegacyConvertedEnhanceImage(SOPClass): """SOP class for Legacy Converted Enhanced PET Image instances.""" @@ -2026,7 +2144,6 @@ def __init__( instance_number=instance_number, manufacturer=ref_ds.Manufacturer, modality=ref_ds.Modality, - transfer_syntax_uid=None, # FIXME: frame encoding patient_id=ref_ds.PatientID, patient_name=ref_ds.PatientName, patient_birth_date=ref_ds.PatientBirthDate, @@ -2071,7 +2188,7 @@ def __init__( 'AcquisitionContextSequence'] to_be_removed_from_distinguishing_attribs = set() for kw in self.DistinguishingAttributeKeywords: - x = [] + x: list = [] not_present_attribute_count = 0 for ds in legacy_datasets: if kw in ds: @@ -2113,11 +2230,11 @@ def __init__( tag_for_keyword('SpecificCharacterSet'): False} # --------------------------------------------------------------------- - self.PerFrameTags = {} - self.SharedTags = {} + self.PerFrameTags: dict = {} + self.SharedTags: dict = {} self._find_per_frame_and_shared_tags() # ---------------------------------------------------------------------- - self.__build_blocks = [] + self.__build_blocks: list = [] # == == == == == == == == == == == == == == == == == == == == == == == = new_ds = [] for item in sorted(self._legacy_datasets, key=sort_key): @@ -2134,7 +2251,7 @@ def __init__( self.AddBuildBlocksForPET() def default_sort_key(x: Dataset) -> tuple: - out = tuple() + out: tuple = tuple() if 'SeriesNumber' in x: out += (x['SeriesNumber'].value, ) if 'InstanceNumber' in x: @@ -2203,8 +2320,11 @@ def _istag_group_length(self, t: Tag) -> bool: return t.element == 0 def _isequal(self, v1, v2): + from pydicom.valuerep import DSfloat float_tolerance = 1.0e-5 - is_equal_float = lambda x1, x2: abs(x1 - x2) < float_tolerance + + def is_equal_float(x1, x2) -> bool: + return abs(x1 - x2) < float_tolerance if type(v1) != type(v2): return False if type(v1) == DicomSequence: @@ -2217,7 +2337,7 @@ def _isequal(self, v1, v2): v11 = v1 v22 = v2 for xx, yy in zip(v11, v22): - if type(xx) == float: + if type(xx) == DSfloat or type(xx) == float: if not is_equal_float(xx, yy): return False else: @@ -2234,7 +2354,9 @@ def _isequal_dicom_dataset(self, ds1, ds2) -> bool: if k1 not in ds2: return False elem2 = ds2[k1] - return self._isequal(elem2.value, elem1.value) + if not self._isequal(elem2.value, elem1.value): + return False + return True def AddNewBuildBlock(self, element: Abstract_MultiframeModuleAdder): if not isinstance(element, Abstract_MultiframeModuleAdder): @@ -2338,42 +2460,49 @@ def AddCommonCT_PET_MR_BuildBlocks(self): self.PerFrameTags, self.SharedTags, self), - UnassignedPerFrame( + PixelData( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, self.PerFrameTags, self.SharedTags, self), - UnassignedShared( + ContentDateTime( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, self.PerFrameTags, self.SharedTags, self), - PixelData( + InstanceCreationDateTime( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, self.PerFrameTags, self.SharedTags, self), - ContentDateTime( + ContributingEquipmentSequence( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, self.PerFrameTags, self.SharedTags, self), - InstanceCreationDateTime( + UnassignedPerFrame( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, self.PerFrameTags, self.SharedTags, self), - ContributingEquipmentSequence( + UnassignedShared( + self._legacy_datasets, + self.ExcludedFromPerFrameTags, + self.ExcludedFromFunctionalGroupsTags, + self.PerFrameTags, + self.SharedTags, + self), + StackInformation( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, @@ -2479,3 +2608,41 @@ def AddBuildBlocksForPET(self): def BuildMultiFrame(self): for builder in self.__build_blocks: builder.AddModule() + + +class GeometryOfSlice: + def __init__(self, + row_vector: np.array, + col_vector: np.array, + top_left_corner_pos: np.array, + voxel_spaceing: np.array, + dimensions: tuple): + self.RowVector = row_vector + self.ColVector = col_vector + self.TopLeftCornerPosition = top_left_corner_pos + self.VoxelSpacing = voxel_spaceing + self.Dim = dimensions + + def GetNormalVector(self) -> np.array: + return np.cross(self.RowVector, + self.ColVector) + + def GetDistanceAlongOrigin(self) -> float: + n = self.GetNormalVector() + return float( + self.TopLeftCornerPosition.dot(n)) + + def AreParallel(slice1: GeometryOfSlice, + slice2: GeometryOfSlice, + tolerance=0.0001) -> bool: + if (type(slice1) != GeometryOfSlice or + type(slice2) != GeometryOfSlice): + print('Error') + return False + else: + n1 = slice1.GetNormalVector() + n2 = slice2.GetNormalVector() + for el1, el2 in zip(n1, n2): + if abs(el1 - el2) > tolerance: + return False + return True From 408cdde24a1a8088c399db3fd38ed43fe2c0ade5 Mon Sep 17 00:00:00 2001 From: Afshin Date: Thu, 9 Jul 2020 14:51:14 -0400 Subject: [PATCH 05/44] mend --- src/highdicom/legacy/sop.py | 199 ++++++++++++++++++------------------ 1 file changed, 100 insertions(+), 99 deletions(-) diff --git a/src/highdicom/legacy/sop.py b/src/highdicom/legacy/sop.py index c8c9dced..816c3ac3 100644 --- a/src/highdicom/legacy/sop.py +++ b/src/highdicom/legacy/sop.py @@ -2,7 +2,7 @@ from __future__ import annotations import logging from collections import defaultdict -from typing import Any, Dict, List, Optional, Sequence, Union +from typing import Any, Dict, List, Optional, Sequence, Union, Callable import numpy as np from pydicom.datadict import tag_for_keyword, dictionary_VR from pydicom.dataset import Dataset @@ -542,7 +542,7 @@ class Abstract_MultiframeModuleAdder(ABC): def __init__(self, sf_datasets: Sequence[Dataset], excluded_from_perframe_tags: dict, - excluded_from_functional_tags: Sequence[Tag], + excluded_from_functional_tags: dict, perframe_tags: dict, shared_tags: dict, multi_frame_output: Dataset): @@ -573,7 +573,7 @@ def _is_empty_or_empty_items(self, attribute: DataElement) -> bool: return False return False - def _mark_tag_as_used(self, tg: Tag): + def _mark_tag_as_used(self, tg: Tag) -> None: if tg in self.SharedTags: self.SharedTags[tg] = True elif tg in self.ExcludedFromPerFrameTags: @@ -583,8 +583,8 @@ def _mark_tag_as_used(self, tg: Tag): def _copy_attrib_if_present(self, src_ds: Dataset, dest_ds: Dataset, src_kw_or_tg: str, dest_kw_or_tg: str = None, - check_not_to_be_perframe=True, - check_not_to_be_empty=False): + check_not_to_be_perframe: bool = True, + check_not_to_be_empty: bool = False) -> None: if type(src_kw_or_tg) == str: src_kw_or_tg = tag_for_keyword(src_kw_or_tg) if dest_kw_or_tg is None: @@ -635,7 +635,7 @@ def _get_shared_item(self) -> Dataset: return self.TargetDataset[sf_tg].value[0] def _get_or_create_attribute( - self, src: Dataset, kw: str, default) -> DataElement: + self, src: Dataset, kw: str, default: Any) -> DataElement: tg = tag_for_keyword(kw) if kw in src: a = deepcopy(src[kw]) @@ -651,9 +651,9 @@ def _get_or_create_attribute( self._mark_tag_as_used(tg) return a - def _add_module(self, module_name: str, excepted_attributes=[], - check_not_to_be_perframe=True, - check_not_to_be_empty=False): + def _add_module(self, module_name: str, excepted_attributes: list = [], + check_not_to_be_perframe: bool = True, + check_not_to_be_empty: bool = False) -> None: # sf_sop_instance_uid = sf_datasets[0] # mf_sop_instance_uid = LEGACY_ENHANCED_SOP_CLASS_UID_MAP[ # sf_sop_instance_uid] @@ -673,7 +673,7 @@ def _add_module(self, module_name: str, excepted_attributes=[], check_not_to_be_empty=check_not_to_be_empty) @abstractmethod - def AddModule(self): + def AddModule(self) -> None: pass @@ -681,7 +681,7 @@ class ImagePixelModule(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], excluded_from_perframe_tags: dict, - excluded_from_functional_tags: Sequence[Tag], + excluded_from_functional_tags: dict, perframe_tags: dict, shared_tags: dict, multi_frame_output: Dataset): @@ -693,7 +693,7 @@ def __init__(self, sf_datasets: Sequence[Dataset], shared_tags, multi_frame_output) - def AddModule(self): + def AddModule(self) -> None: module_and_excepted_at = { "image-pixel": [ @@ -716,7 +716,7 @@ class CompositeInstanceContex(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], excluded_from_perframe_tags: dict, - excluded_from_functional_tags: Sequence[Tag], + excluded_from_functional_tags: dict, perframe_tags: dict, shared_tags: dict, multi_frame_output: Dataset): @@ -727,7 +727,7 @@ def __init__(self, sf_datasets: Sequence[Dataset], perframe_tags, shared_tags, multi_frame_output) - self._module_excepted_list = { + self._module_excepted_list: dict = { "patient": [], "clinical-trial-subject": [], "general-study": @@ -812,7 +812,7 @@ def __init__(self, sf_datasets: Sequence[Dataset], ] } - def AddModule(self): + def AddModule(self) -> None: for module_name, excpeted_a in self._module_excepted_list.items(): self._add_module( module_name, @@ -825,7 +825,7 @@ class CommonCTMRPETImageDescriptionMacro(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], excluded_from_perframe_tags: dict, - excluded_from_functional_tags: Sequence[Tag], + excluded_from_functional_tags: dict, perframe_tags: dict, shared_tags: dict, multi_frame_output: Dataset, @@ -839,7 +839,8 @@ def __init__(self, sf_datasets: Sequence[Dataset], multi_frame_output) self.Modality = modality - def _get_value_for_frame_type(self, attrib: DataElement): + def _get_value_for_frame_type(self, + attrib: DataElement) -> Union[list, None]: if type(attrib) != DataElement: return None output = ['', '', '', ''] @@ -860,7 +861,7 @@ def _get_frame_type_seq_tag(self) -> int: return tag_for_keyword(seq_kw) def _add_module_to_functional_group(self, src_fg: Dataset, - dest_fg: Dataset, level): + dest_fg: Dataset, level: int) -> None: FrameType_a = src_fg['ImageType'] if level == 0: FrameType_tg = tag_for_keyword('ImageType') @@ -870,7 +871,7 @@ def _add_module_to_functional_group(self, src_fg: Dataset, dest_fg[FrameType_tg] = DataElement(FrameType_tg, FrameType_a.VR, new_val) - def element_generator(kw, val) -> DataElement: + def element_generator(kw: str, val: Any) -> DataElement: return DataElement( tag_for_keyword(kw), dictionary_VR(tag_for_keyword(kw)), val) @@ -881,7 +882,7 @@ def element_generator(kw, val) -> DataElement: dest_fg['VolumeBasedCalculationTechnique'] = element_generator( 'VolumeBasedCalculationTechnique', "NONE") - def AddModule(self): + def AddModule(self) -> None: im_type_tag = tag_for_keyword('ImageType') seq_tg = self._get_frame_type_seq_tag() if im_type_tag not in self.PerFrameTags: @@ -906,7 +907,7 @@ class EnhancedCommonImageModule(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], excluded_from_perframe_tags: dict, - excluded_from_functional_tags: Sequence[Tag], + excluded_from_functional_tags: dict, perframe_tags: dict, shared_tags: dict, multi_frame_output: Dataset): @@ -918,7 +919,7 @@ def __init__(self, sf_datasets: Sequence[Dataset], shared_tags, multi_frame_output) - def AddModule(self): + def AddModule(self) -> None: # ct_mr = CommonCTMRImageDescriptionMacro(self.SingleFrameSet # , self.ExcludedFromPerFrameTags # , self.PerFrameTags @@ -975,7 +976,7 @@ class ContrastBolusModule(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], excluded_from_perframe_tags: dict, - excluded_from_functional_tags: Sequence[Tag], + excluded_from_functional_tags: dict, perframe_tags: dict, shared_tags: dict, multi_frame_output: Dataset): @@ -987,7 +988,7 @@ def __init__(self, sf_datasets: Sequence[Dataset], shared_tags, multi_frame_output) - def AddModule(self): + def AddModule(self) -> None: self._add_module('contrast-bolus') @@ -995,7 +996,7 @@ class EnhancedCTImageModule(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], excluded_from_perframe_tags: dict, - excluded_from_functional_tags: Sequence[Tag], + excluded_from_functional_tags: dict, perframe_tags: dict, shared_tags: dict, multi_frame_output: Dataset): @@ -1007,7 +1008,7 @@ def __init__(self, sf_datasets: Sequence[Dataset], shared_tags, multi_frame_output) - def AddModule(self): + def AddModule(self) -> None: pass # David's code doesn't hold anything for this module ... should ask him @@ -1016,7 +1017,7 @@ class EnhancedPETImageModule(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], excluded_from_perframe_tags: dict, - excluded_from_functional_tags: Sequence[Tag], + excluded_from_functional_tags: dict, perframe_tags: dict, shared_tags: dict, multi_frame_output: Dataset): @@ -1028,7 +1029,7 @@ def __init__(self, sf_datasets: Sequence[Dataset], shared_tags, multi_frame_output) - def AddModule(self): + def AddModule(self) -> None: pass # David's code doesn't hold anything for this module ... should ask him @@ -1037,7 +1038,7 @@ class EnhancedMRImageModule(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], excluded_from_perframe_tags: dict, - excluded_from_functional_tags: Sequence[Tag], + excluded_from_functional_tags: dict, perframe_tags: dict, shared_tags: dict, multi_frame_output: Dataset): @@ -1049,7 +1050,7 @@ def __init__(self, sf_datasets: Sequence[Dataset], shared_tags, multi_frame_output) - def AddModule(self): + def AddModule(self) -> None: self._copy_attrib_if_present( self.SingleFrameSet[0], self.TargetDataset, @@ -1096,7 +1097,7 @@ class AcquisitionContextModule(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], excluded_from_perframe_tags: dict, - excluded_from_functional_tags: Sequence[Tag], + excluded_from_functional_tags: dict, perframe_tags: dict, shared_tags: dict, multi_frame_output: Dataset): @@ -1108,7 +1109,7 @@ def __init__(self, sf_datasets: Sequence[Dataset], shared_tags, multi_frame_output) - def AddModule(self): + def AddModule(self) -> None: self._copy_attrib_if_present( self.SingleFrameSet, self.TargetDataset, @@ -1120,7 +1121,7 @@ class FrameAnatomyFunctionalGroup(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], excluded_from_perframe_tags: dict, - excluded_from_functional_tags: Sequence[Tag], + excluded_from_functional_tags: dict, perframe_tags: dict, shared_tags: dict, multi_frame_output: Dataset): @@ -1133,7 +1134,7 @@ def __init__(self, sf_datasets: Sequence[Dataset], multi_frame_output) def _add_module_to_functional_group( - self, src_fg: Dataset, dest_fg: Dataset): + self, src_fg: Dataset, dest_fg: Dataset) -> None: # David's code is more complicaated than mine # Should check it out later. fa_seq_tg = tag_for_keyword('FrameAnatomySequence') @@ -1173,7 +1174,7 @@ def _contains_right_attributes(self, tags: dict) -> bool: bodypart_tg in tags or anatomical_reg_tg) - def AddModule(self): + def AddModule(self) -> None: if (not self._contains_right_attributes(self.PerFrameTags) and (self._contains_right_attributes(self.SharedTags) or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) @@ -1191,7 +1192,7 @@ class PixelMeasuresFunctionalGroup(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], excluded_from_perframe_tags: dict, - excluded_from_functional_tags: Sequence[Tag], + excluded_from_functional_tags: dict, perframe_tags: dict, shared_tags: dict, multi_frame_output: Dataset): @@ -1212,7 +1213,7 @@ def _contains_right_attributes(self, tags: dict) -> bool: ImagerPixelSpacing_tg in tags) def _add_module_to_functional_group( - self, src_fg: Dataset, dest_fg: Dataset): + self, src_fg: Dataset, dest_fg: Dataset) -> None: item = Dataset() self._copy_attrib_if_present(src_fg, item, @@ -1236,7 +1237,7 @@ def _add_module_to_functional_group( [item]) dest_fg[pixel_measures_tg] = seq - def AddModule(self): + def AddModule(self) -> None: if (not self._contains_right_attributes(self.PerFrameTags) and (self._contains_right_attributes(self.SharedTags) or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) @@ -1254,7 +1255,7 @@ class PlanePositionFunctionalGroup(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], excluded_from_perframe_tags: dict, - excluded_from_functional_tags: Sequence[Tag], + excluded_from_functional_tags: dict, perframe_tags: dict, shared_tags: dict, multi_frame_output: Dataset): @@ -1271,7 +1272,7 @@ def _contains_right_attributes(self, tags: dict) -> bool: return ImagePositionPatient_tg in tags def _add_module_to_functional_group( - self, src_fg: Dataset, dest_fg: Dataset): + self, src_fg: Dataset, dest_fg: Dataset) -> None: item = Dataset() self._copy_attrib_if_present(src_fg, item, @@ -1285,7 +1286,7 @@ def _add_module_to_functional_group( [item]) dest_fg[PlanePositionSequence_tg] = seq - def AddModule(self): + def AddModule(self) -> None: if (not self._contains_right_attributes(self.PerFrameTags) and (self._contains_right_attributes(self.SharedTags) or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) @@ -1303,7 +1304,7 @@ class PlaneOrientationFunctionalGroup(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], excluded_from_perframe_tags: dict, - excluded_from_functional_tags: Sequence[Tag], + excluded_from_functional_tags: dict, perframe_tags: dict, shared_tags: dict, multi_frame_output: Dataset): @@ -1320,7 +1321,7 @@ def _contains_right_attributes(self, tags: dict) -> bool: return ImageOrientationPatient_tg in tags def _add_module_to_functional_group( - self, src_fg: Dataset, dest_fg: Dataset): + self, src_fg: Dataset, dest_fg: Dataset) -> None: item = Dataset() self._copy_attrib_if_present(src_fg, item, @@ -1332,7 +1333,7 @@ def _add_module_to_functional_group( seq = DataElement(tg, dictionary_VR(tg), [item]) dest_fg[tg] = seq - def AddModule(self): + def AddModule(self) -> None: if (not self._contains_right_attributes(self.PerFrameTags) and (self._contains_right_attributes(self.SharedTags) or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) @@ -1350,7 +1351,7 @@ class FrameVOILUTFunctionalGroup(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], excluded_from_perframe_tags: dict, - excluded_from_functional_tags: Sequence[Tag], + excluded_from_functional_tags: dict, perframe_tags: dict, shared_tags: dict, multi_frame_output: Dataset): @@ -1372,7 +1373,7 @@ def _contains_right_attributes(self, tags: dict) -> bool: WindowCenterWidthExplanation_tg in tags) def _add_module_to_functional_group( - self, src_fg: Dataset, dest_fg: Dataset): + self, src_fg: Dataset, dest_fg: Dataset) -> None: item = Dataset() self._copy_attrib_if_present(src_fg, item, @@ -1394,7 +1395,7 @@ def _add_module_to_functional_group( seq = DataElement(tg, dictionary_VR(tg), [item]) dest_fg[tg] = seq - def AddModule(self): + def AddModule(self) -> None: if (not self._contains_right_attributes(self.PerFrameTags) and (self._contains_right_attributes(self.SharedTags) or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) @@ -1412,7 +1413,7 @@ class PixelValueTransformationFunctionalGroup(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], excluded_from_perframe_tags: dict, - excluded_from_functional_tags: Sequence[Tag], + excluded_from_functional_tags: dict, perframe_tags: dict, shared_tags: dict, multi_frame_output: Dataset): @@ -1433,7 +1434,7 @@ def _contains_right_attributes(self, tags: dict) -> bool: RescaleType_tg in tags) def _add_module_to_functional_group( - self, src_fg: Dataset, dest_fg: Dataset): + self, src_fg: Dataset, dest_fg: Dataset) -> None: item = Dataset() self._copy_attrib_if_present(src_fg, item, @@ -1479,7 +1480,7 @@ def _add_module_to_functional_group( seq = DataElement(tg, dictionary_VR(tg), [item]) dest_fg[tg] = seq - def AddModule(self): + def AddModule(self) -> None: if (not self._contains_right_attributes(self.PerFrameTags) and (self._contains_right_attributes(self.SharedTags) or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) @@ -1497,7 +1498,7 @@ class ReferencedImageFunctionalGroup(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], excluded_from_perframe_tags: dict, - excluded_from_functional_tags: Sequence[Tag], + excluded_from_functional_tags: dict, perframe_tags: dict, shared_tags: dict, multi_frame_output: Dataset): @@ -1513,14 +1514,14 @@ def _contains_right_attributes(self, tags: dict) -> bool: return tag_for_keyword('ReferencedImageSequence') in tags def _add_module_to_functional_group( - self, src_fg: Dataset, dest_fg: Dataset): + self, src_fg: Dataset, dest_fg: Dataset) -> None: self._copy_attrib_if_present(src_fg, dest_fg, 'ReferencedImageSequence', check_not_to_be_perframe=False, check_not_to_be_empty=False) - def AddModule(self): + def AddModule(self) -> None: if (not self._contains_right_attributes(self.PerFrameTags) and (self._contains_right_attributes(self.SharedTags) or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) @@ -1538,7 +1539,7 @@ class DerivationImageFunctionalGroup(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], excluded_from_perframe_tags: dict, - excluded_from_functional_tags: Sequence[Tag], + excluded_from_functional_tags: dict, perframe_tags: dict, shared_tags: dict, multi_frame_output: Dataset): @@ -1554,7 +1555,7 @@ def _contains_right_attributes(self, tags: dict) -> bool: return tag_for_keyword('SourceImageSequence') in tags def _add_module_to_functional_group( - self, src_fg: Dataset, dest_fg: Dataset): + self, src_fg: Dataset, dest_fg: Dataset) -> None: item = Dataset() self._copy_attrib_if_present(src_fg, item, @@ -1576,7 +1577,7 @@ def _add_module_to_functional_group( seq = DataElement(tg, dictionary_VR(tg), [item]) dest_fg[tg] = seq - def AddModule(self): + def AddModule(self) -> None: if (not self._contains_right_attributes(self.PerFrameTags) and (self._contains_right_attributes(self.SharedTags) or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) @@ -1594,7 +1595,7 @@ class UnassignedPerFrame(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], excluded_from_perframe_tags: dict, - excluded_from_functional_tags: Sequence[Tag], + excluded_from_functional_tags: dict, perframe_tags: dict, shared_tags: dict, multi_frame_output: Dataset): @@ -1607,7 +1608,7 @@ def __init__(self, sf_datasets: Sequence[Dataset], multi_frame_output) def _add_module_to_functional_group( - self, src_fg: Dataset, dest_fg: Dataset): + self, src_fg: Dataset, dest_fg: Dataset) -> None: item = Dataset() for tg, used in self.PerFrameTags.items(): if used not in self.ExcludedFromFunctionalGroupsTags: @@ -1621,7 +1622,7 @@ def _add_module_to_functional_group( seq = DataElement(tg, dictionary_VR(tg), [item]) dest_fg[tg] = seq - def AddModule(self): + def AddModule(self) -> None: for i in range(0, len(self.SingleFrameSet)): item = self._get_perframe_item(i) self._add_module_to_functional_group( @@ -1632,7 +1633,7 @@ class UnassignedShared(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], excluded_from_perframe_tags: dict, - excluded_from_functional_tags: Sequence[Tag], + excluded_from_functional_tags: dict, perframe_tags: dict, shared_tags: dict, multi_frame_output: Dataset): @@ -1645,7 +1646,7 @@ def __init__(self, sf_datasets: Sequence[Dataset], multi_frame_output) def _add_module_to_functional_group( - self, src_fg: Dataset, dest_fg: Dataset): + self, src_fg: Dataset, dest_fg: Dataset) -> None: item = Dataset() for tg, used in self.SharedTags.items(): if (not used and @@ -1660,7 +1661,7 @@ def _add_module_to_functional_group( seq = DataElement(tg, dictionary_VR(tg), [item]) dest_fg[tg] = seq - def AddModule(self): + def AddModule(self) -> None: item = self._get_shared_item() self._add_module_to_functional_group(self.SingleFrameSet[0], item) @@ -1669,7 +1670,7 @@ class ConversionSourceFunctionalGroup(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], excluded_from_perframe_tags: dict, - excluded_from_functional_tags: Sequence[Tag], + excluded_from_functional_tags: dict, perframe_tags: dict, shared_tags: dict, multi_frame_output: Dataset): @@ -1682,7 +1683,7 @@ def __init__(self, sf_datasets: Sequence[Dataset], multi_frame_output) def _add_module_to_functional_group( - self, src_fg: Dataset, dest_fg: Dataset): + self, src_fg: Dataset, dest_fg: Dataset) -> None: item = Dataset() self._copy_attrib_if_present(src_fg, item, @@ -1701,7 +1702,7 @@ def _add_module_to_functional_group( seq = DataElement(tg, dictionary_VR(tg), [item]) dest_fg[tg] = seq - def AddModule(self): + def AddModule(self) -> None: for i in range(0, len(self.SingleFrameSet)): item = self._get_perframe_item(i) self._add_module_to_functional_group( @@ -1712,7 +1713,7 @@ class FrameContentFunctionalGroup(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], excluded_from_perframe_tags: dict, - excluded_from_functional_tags: Sequence[Tag], + excluded_from_functional_tags: dict, perframe_tags: dict, shared_tags: dict, multi_frame_output: Dataset): @@ -1734,7 +1735,7 @@ def _contains_right_attributes(self, tags: dict) -> bool: AcquisitionDate_tg in tags) def _add_module_to_functional_group( - self, src_fg: Dataset, dest_fg: Dataset): + self, src_fg: Dataset, dest_fg: Dataset) -> None: item = Dataset() item['AcquisitionNumber'] = self._get_or_create_attribute( src_fg, 'AcquisitionNumber', 0) @@ -1794,7 +1795,7 @@ def _add_module_to_functional_group( dest_fg[seq_tg] = DataElement(seq_tg, dictionary_VR(seq_tg), [item]) # Also we want to add the earliest frame acq date time to the multiframe: - def AddModule(self): + def AddModule(self) -> None: for i in range(0, len(self.SingleFrameSet)): item = self._get_perframe_item(i) self._add_module_to_functional_group( @@ -1810,7 +1811,7 @@ class PixelData(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], excluded_from_perframe_tags: dict, - excluded_from_functional_tags: Sequence[Tag], + excluded_from_functional_tags: dict, perframe_tags: dict, shared_tags: dict, multi_frame_output: Dataset): @@ -1833,7 +1834,7 @@ def _is_other_word_vr(self, vr: str) -> bool: # ImagePositionPatient_tg = tag_for_keyword('ImagePositionPatient') # return ImagePositionPatient_tg in tags - def AddModule(self): + def AddModule(self) -> None: kw = 'NumberOfFrames' tg = tag_for_keyword(kw) FrameCount = len(self.SingleFrameSet) @@ -1869,7 +1870,7 @@ class ContentDateTime(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], excluded_from_perframe_tags: dict, - excluded_from_functional_tags: Sequence[Tag], + excluded_from_functional_tags: dict, perframe_tags: dict, shared_tags: dict, multi_frame_output: Dataset): @@ -1882,7 +1883,7 @@ def __init__(self, sf_datasets: Sequence[Dataset], multi_frame_output) self.EarliestContentDateTime = self.FarthestFutureDateTime - def AddModule(self): + def AddModule(self) -> None: for i in range(0, len(self.SingleFrameSet)): src = self.SingleFrameSet[i] ContentDate_a = self._get_or_create_attribute( @@ -1909,7 +1910,7 @@ class InstanceCreationDateTime(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], excluded_from_perframe_tags: dict, - excluded_from_functional_tags: Sequence[Tag], + excluded_from_functional_tags: dict, perframe_tags: dict, shared_tags: dict, multi_frame_output: Dataset): @@ -1921,7 +1922,7 @@ def __init__(self, sf_datasets: Sequence[Dataset], shared_tags, multi_frame_output) - def AddModule(self): + def AddModule(self) -> None: nnooww = datetime.now() n_d = DA(nnooww.date().strftime('%Y%m%d')) n_t = TM(nnooww.time().strftime('%H%M%S')) @@ -1937,7 +1938,7 @@ class ContributingEquipmentSequence(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], excluded_from_perframe_tags: dict, - excluded_from_functional_tags: Sequence[Tag], + excluded_from_functional_tags: dict, perframe_tags: dict, shared_tags: dict, multi_frame_output: Dataset): @@ -1949,11 +1950,11 @@ def __init__(self, sf_datasets: Sequence[Dataset], shared_tags, multi_frame_output) - def _add_data_element_to_target(self, kw: str, value) -> None: + def _add_data_element_to_target(self, kw: str, value: Any) -> None: tg = tag_for_keyword(kw) self.TargetDataset[kw] = DataElement(tg, dictionary_VR(tg), value) - def AddModule(self): + def AddModule(self) -> None: CodeValue_tg = tag_for_keyword('CodeValue') CodeMeaning_tg = tag_for_keyword('CodeMeaning') CodingSchemeDesignator_tg = tag_for_keyword('CodingSchemeDesignator') @@ -1995,7 +1996,7 @@ class StackInformation(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], excluded_from_perframe_tags: dict, - excluded_from_functional_tags: Sequence[Tag], + excluded_from_functional_tags: dict, perframe_tags: dict, shared_tags: dict, multi_frame_output: Dataset): @@ -2068,7 +2069,7 @@ def _are_all_slices_parallel(self) -> bool: else: return False - def AddModule(self): + def AddModule(self) -> None: self._build_slices_geometry() round_digits = int(np.ceil(-np.log10(self._tolerance))) if self._are_all_slices_parallel(): @@ -2079,11 +2080,11 @@ def AddModule(self): else: self._slice_location_map[dist] = [idx] distance_index = 0 - for loc, idx in sorted(self._slice_location_map.items()): - if len(idx) != 1: + for loc, idxs in sorted(self._slice_location_map.items()): + if len(idxs) != 1: print('Error') return - frame_index = idx[0] + frame_index = idxs[0] frame = self._get_perframe_item(frame_index) new_item = Dataset() new_item["StackID"] = self._get_or_create_attribute( @@ -2108,7 +2109,7 @@ def __init__( series_number: int, sop_instance_uid: str, instance_number: int, - sort_key=None, + sort_key: Callable = None, **kwargs: Any) -> None: """ Parameters @@ -2260,8 +2261,8 @@ def default_sort_key(x: Dataset) -> tuple: out += (x['SOPInstanceUID'].value, ) return out - def _find_per_frame_and_shared_tags(self): - rough_shared = {} + def _find_per_frame_and_shared_tags(self) -> None: + rough_shared: dict = {} sfs = self._legacy_datasets for ds in sfs: for ttag, elem in ds.items(): @@ -2319,11 +2320,11 @@ def _istag_repeating_group(self, t: Tag) -> bool: def _istag_group_length(self, t: Tag) -> bool: return t.element == 0 - def _isequal(self, v1, v2): + def _isequal(self, v1: Any, v2: Any) -> bool: from pydicom.valuerep import DSfloat float_tolerance = 1.0e-5 - def is_equal_float(x1, x2) -> bool: + def is_equal_float(x1: float, x2: float) -> bool: return abs(x1 - x2) < float_tolerance if type(v1) != type(v2): return False @@ -2345,7 +2346,7 @@ def is_equal_float(x1, x2) -> bool: return False return True - def _isequal_dicom_dataset(self, ds1, ds2) -> bool: + def _isequal_dicom_dataset(self, ds1: Dataset, ds2: Dataset) -> bool: if type(ds1) != type(ds2): return False if type(ds1) != Dataset: @@ -2358,16 +2359,16 @@ def _isequal_dicom_dataset(self, ds1, ds2) -> bool: return False return True - def AddNewBuildBlock(self, element: Abstract_MultiframeModuleAdder): + def AddNewBuildBlock(self, element: Abstract_MultiframeModuleAdder) -> None: if not isinstance(element, Abstract_MultiframeModuleAdder): raise ValueError('Build block must be an instance ' 'of Abstract_MultiframeModuleAdder') self.__build_blocks.append(element) - def ClearBuildBlocks(self): + def ClearBuildBlocks(self) -> None: self.__build_blocks = [] - def AddCommonCT_PET_MR_BuildBlocks(self): + def AddCommonCT_PET_MR_BuildBlocks(self) -> None: Blocks = [ ImagePixelModule( self._legacy_datasets, @@ -2513,7 +2514,7 @@ def AddCommonCT_PET_MR_BuildBlocks(self): for b in Blocks: self.AddNewBuildBlock(b) - def AddCTSpecificBuildBlocks(self): + def AddCTSpecificBuildBlocks(self) -> None: Blocks = [ CommonCTMRPETImageDescriptionMacro( self._legacy_datasets, @@ -2541,7 +2542,7 @@ def AddCTSpecificBuildBlocks(self): for b in Blocks: self.AddNewBuildBlock(b) - def AddMRSpecificBuildBlocks(self): + def AddMRSpecificBuildBlocks(self) -> None: Blocks = [ CommonCTMRPETImageDescriptionMacro( self._legacy_datasets, @@ -2569,7 +2570,7 @@ def AddMRSpecificBuildBlocks(self): for b in Blocks: self.AddNewBuildBlock(b) - def AddPETSpecificBuildBlocks(self): + def AddPETSpecificBuildBlocks(self) -> None: Blocks = [ CommonCTMRPETImageDescriptionMacro( self._legacy_datasets, @@ -2590,22 +2591,22 @@ def AddPETSpecificBuildBlocks(self): for b in Blocks: self.AddNewBuildBlock(b) - def AddBuildBlocksForCT(self): + def AddBuildBlocksForCT(self) -> None: self.ClearBuildBlocks() self.AddCommonCT_PET_MR_BuildBlocks() self.AddCTSpecificBuildBlocks() - def AddBuildBlocksForMR(self): + def AddBuildBlocksForMR(self) -> None: self.ClearBuildBlocks() self.AddCommonCT_PET_MR_BuildBlocks() self.AddMRSpecificBuildBlocks() - def AddBuildBlocksForPET(self): + def AddBuildBlocksForPET(self) -> None: self.ClearBuildBlocks() self.AddCommonCT_PET_MR_BuildBlocks() self.AddPETSpecificBuildBlocks() - def BuildMultiFrame(self): + def BuildMultiFrame(self) -> None: for builder in self.__build_blocks: builder.AddModule() @@ -2634,7 +2635,7 @@ def GetDistanceAlongOrigin(self) -> float: def AreParallel(slice1: GeometryOfSlice, slice2: GeometryOfSlice, - tolerance=0.0001) -> bool: + tolerance: float = 0.0001) -> bool: if (type(slice1) != GeometryOfSlice or type(slice2) != GeometryOfSlice): print('Error') From 2444cc59bd80b268d31812d163081cd7cd195017 Mon Sep 17 00:00:00 2001 From: Afshin Date: Thu, 9 Jul 2020 17:36:24 -0400 Subject: [PATCH 06/44] mend --- src/highdicom/legacy/sop.py | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/src/highdicom/legacy/sop.py b/src/highdicom/legacy/sop.py index 816c3ac3..0f2f10df 100644 --- a/src/highdicom/legacy/sop.py +++ b/src/highdicom/legacy/sop.py @@ -3,7 +3,7 @@ import logging from collections import defaultdict from typing import Any, Dict, List, Optional, Sequence, Union, Callable -import numpy as np +from numpy import log10, array, ceil, cross from pydicom.datadict import tag_for_keyword, dictionary_VR from pydicom.dataset import Dataset from pydicom.tag import Tag @@ -2039,12 +2039,12 @@ def _build_slices_geometry(self) -> None: if (ImageOrientationPatient_v is not None and ImagePositionPatient_v is not None and PixelSpacing_v is not None): - row = np.array(ImageOrientationPatient_v[0:3]) - col = np.array(ImageOrientationPatient_v[3:]) - voxel_spaceing = np.array([PixelSpacing_v[0], - PixelSpacing_v[1], - SliceThickness_v]) - tpl = np.array(ImagePositionPatient_v) + row = array(ImageOrientationPatient_v[0:3]) + col = array(ImageOrientationPatient_v[3:]) + voxel_spaceing = array([PixelSpacing_v[0], + PixelSpacing_v[1], + SliceThickness_v]) + tpl = array(ImagePositionPatient_v) dim = (Rows_v, Columns_v, 1) self._slices.append(GeometryOfSlice(row, col, tpl, voxel_spaceing, dim)) @@ -2071,7 +2071,7 @@ def _are_all_slices_parallel(self) -> bool: def AddModule(self) -> None: self._build_slices_geometry() - round_digits = int(np.ceil(-np.log10(self._tolerance))) + round_digits = int(ceil(-log10(self._tolerance))) if self._are_all_slices_parallel(): for idx, s in enumerate(self._slices): dist = round(s.GetDistanceAlongOrigin(), round_digits) @@ -2109,7 +2109,7 @@ def __init__( series_number: int, sop_instance_uid: str, instance_number: int, - sort_key: Callable = None, + sort_key: Callable[..., Any] = None, **kwargs: Any) -> None: """ Parameters @@ -2613,10 +2613,10 @@ def BuildMultiFrame(self) -> None: class GeometryOfSlice: def __init__(self, - row_vector: np.array, - col_vector: np.array, - top_left_corner_pos: np.array, - voxel_spaceing: np.array, + row_vector: array, + col_vector: array, + top_left_corner_pos: array, + voxel_spaceing: array, dimensions: tuple): self.RowVector = row_vector self.ColVector = col_vector @@ -2624,9 +2624,8 @@ def __init__(self, self.VoxelSpacing = voxel_spaceing self.Dim = dimensions - def GetNormalVector(self) -> np.array: - return np.cross(self.RowVector, - self.ColVector) + def GetNormalVector(self) -> array: + return cross(self.RowVector, self.ColVector) def GetDistanceAlongOrigin(self) -> float: n = self.GetNormalVector() From 09467cebfcfb832a828b3923bef87e7feedbe5b4 Mon Sep 17 00:00:00 2001 From: Afshin Date: Thu, 9 Jul 2020 17:57:36 -0400 Subject: [PATCH 07/44] mend --- src/highdicom/legacy/sop.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/src/highdicom/legacy/sop.py b/src/highdicom/legacy/sop.py index 0f2f10df..31937682 100644 --- a/src/highdicom/legacy/sop.py +++ b/src/highdicom/legacy/sop.py @@ -3,7 +3,7 @@ import logging from collections import defaultdict from typing import Any, Dict, List, Optional, Sequence, Union, Callable -from numpy import log10, array, ceil, cross +from numpy import log10, array, ceil, cross, dot, ndarray from pydicom.datadict import tag_for_keyword, dictionary_VR from pydicom.dataset import Dataset from pydicom.tag import Tag @@ -2109,7 +2109,7 @@ def __init__( series_number: int, sop_instance_uid: str, instance_number: int, - sort_key: Callable[..., Any] = None, + sort_key: Callable[...] = None, **kwargs: Any) -> None: """ Parameters @@ -2613,10 +2613,10 @@ def BuildMultiFrame(self) -> None: class GeometryOfSlice: def __init__(self, - row_vector: array, - col_vector: array, - top_left_corner_pos: array, - voxel_spaceing: array, + row_vector: ndarray, + col_vector: ndarray, + top_left_corner_pos: ndarray, + voxel_spaceing: ndarray, dimensions: tuple): self.RowVector = row_vector self.ColVector = col_vector @@ -2624,13 +2624,13 @@ def __init__(self, self.VoxelSpacing = voxel_spaceing self.Dim = dimensions - def GetNormalVector(self) -> array: + def GetNormalVector(self) -> ndarray: return cross(self.RowVector, self.ColVector) def GetDistanceAlongOrigin(self) -> float: n = self.GetNormalVector() return float( - self.TopLeftCornerPosition.dot(n)) + dot(self.TopLeftCornerPosition, n)) def AreParallel(slice1: GeometryOfSlice, slice2: GeometryOfSlice, @@ -2640,8 +2640,8 @@ def AreParallel(slice1: GeometryOfSlice, print('Error') return False else: - n1 = slice1.GetNormalVector() - n2 = slice2.GetNormalVector() + n1: ndarray = slice1.GetNormalVector() + n2: ndarray = slice2.GetNormalVector() for el1, el2 in zip(n1, n2): if abs(el1 - el2) > tolerance: return False From 8f92d685822b000e1143dce3f5493d3f7db2d5a0 Mon Sep 17 00:00:00 2001 From: Afshin Date: Thu, 9 Jul 2020 18:08:10 -0400 Subject: [PATCH 08/44] mend --- src/highdicom/legacy/sop.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/highdicom/legacy/sop.py b/src/highdicom/legacy/sop.py index 31937682..80c26a72 100644 --- a/src/highdicom/legacy/sop.py +++ b/src/highdicom/legacy/sop.py @@ -2109,7 +2109,7 @@ def __init__( series_number: int, sop_instance_uid: str, instance_number: int, - sort_key: Callable[...] = None, + sort_key: Callable = None, **kwargs: Any) -> None: """ Parameters From 69b18922f98e08182fea6800d2992e1b1cb7f134 Mon Sep 17 00:00:00 2001 From: Afshin Date: Tue, 14 Jul 2020 14:02:52 -0400 Subject: [PATCH 09/44] Added frameset collection class --- src/highdicom/legacy/sop.py | 531 ++++++++++++++++++++---------------- 1 file changed, 302 insertions(+), 229 deletions(-) diff --git a/src/highdicom/legacy/sop.py b/src/highdicom/legacy/sop.py index 80c26a72..0daf0804 100644 --- a/src/highdicom/legacy/sop.py +++ b/src/highdicom/legacy/sop.py @@ -1,4 +1,4 @@ -"""Module for SOP Classes of Legacy Converted Enhanced Image IODs.""" +""" Module for SOP Classes of Legacy Converted Enhanced Image IODs.""" from __future__ import annotations import logging from collections import defaultdict @@ -549,7 +549,7 @@ def __init__(self, sf_datasets: Sequence[Dataset], self.ExcludedFromPerFrameTags = excluded_from_perframe_tags self.ExcludedFromFunctionalGroupsTags = excluded_from_functional_tags - self.PerFrameTags = perframe_tags + self._PerFrameTags = perframe_tags self.SharedTags = shared_tags self.TargetDataset = multi_frame_output self.SingleFrameSet = sf_datasets @@ -578,8 +578,8 @@ def _mark_tag_as_used(self, tg: Tag) -> None: self.SharedTags[tg] = True elif tg in self.ExcludedFromPerFrameTags: self.ExcludedFromPerFrameTags[tg] = True - elif tg in self.PerFrameTags: - self.PerFrameTags[tg] = True + elif tg in self._PerFrameTags: + self._PerFrameTags[tg] = True def _copy_attrib_if_present(self, src_ds: Dataset, dest_ds: Dataset, src_kw_or_tg: str, dest_kw_or_tg: str = None, @@ -592,7 +592,7 @@ def _copy_attrib_if_present(self, src_ds: Dataset, dest_ds: Dataset, elif type(dest_kw_or_tg) == str: dest_kw_or_tg = tag_for_keyword(dest_kw_or_tg) if check_not_to_be_perframe: - if src_kw_or_tg in self.PerFrameTags: + if src_kw_or_tg in self._PerFrameTags: return if src_kw_or_tg in src_ds: elem = src_ds[src_kw_or_tg] @@ -885,7 +885,7 @@ def element_generator(kw: str, val: Any) -> DataElement: def AddModule(self) -> None: im_type_tag = tag_for_keyword('ImageType') seq_tg = self._get_frame_type_seq_tag() - if im_type_tag not in self.PerFrameTags: + if im_type_tag not in self._PerFrameTags: self._add_module_to_functional_group(self.SingleFrameSet[0], self.TargetDataset, 0) # ---------------------------- @@ -922,7 +922,7 @@ def __init__(self, sf_datasets: Sequence[Dataset], def AddModule(self) -> None: # ct_mr = CommonCTMRImageDescriptionMacro(self.SingleFrameSet # , self.ExcludedFromPerFrameTags - # , self.PerFrameTags + # , self._PerFrameTags # , self.SharedTags # , self.TargetDataset) # ct_mr.AddModule() @@ -953,7 +953,7 @@ def AddModule(self) -> None: 'LossyImageCompressionMethod'] for kw in attribs_to_be_added: self._copy_attrib_if_present(ref_dataset, self.TargetDataset, kw) - if tag_for_keyword('PresentationLUTShape') not in self.PerFrameTags: + if tag_for_keyword('PresentationLUTShape') not in self._PerFrameTags: # actually should really invert the pixel data if MONOCHROME1, # since only MONOCHROME2 is permitted : ( # also, do not need to check if PhotometricInterpretation is @@ -1175,13 +1175,13 @@ def _contains_right_attributes(self, tags: dict) -> bool: anatomical_reg_tg) def AddModule(self) -> None: - if (not self._contains_right_attributes(self.PerFrameTags) and + if (not self._contains_right_attributes(self._PerFrameTags) and (self._contains_right_attributes(self.SharedTags) or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) ): item = self._get_shared_item() self._add_module_to_functional_group(self.SingleFrameSet[0], item) - elif self._contains_right_attributes(self.PerFrameTags): + elif self._contains_right_attributes(self._PerFrameTags): for i in range(0, len(self.SingleFrameSet)): item = self._get_perframe_item(i) self._add_module_to_functional_group( @@ -1238,13 +1238,13 @@ def _add_module_to_functional_group( dest_fg[pixel_measures_tg] = seq def AddModule(self) -> None: - if (not self._contains_right_attributes(self.PerFrameTags) and + if (not self._contains_right_attributes(self._PerFrameTags) and (self._contains_right_attributes(self.SharedTags) or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) ): item = self._get_shared_item() self._add_module_to_functional_group(self.SingleFrameSet[0], item) - elif self._contains_right_attributes(self.PerFrameTags): + elif self._contains_right_attributes(self._PerFrameTags): for i in range(0, len(self.SingleFrameSet)): item = self._get_perframe_item(i) self._add_module_to_functional_group( @@ -1287,13 +1287,13 @@ def _add_module_to_functional_group( dest_fg[PlanePositionSequence_tg] = seq def AddModule(self) -> None: - if (not self._contains_right_attributes(self.PerFrameTags) and + if (not self._contains_right_attributes(self._PerFrameTags) and (self._contains_right_attributes(self.SharedTags) or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) ): item = self._get_shared_item() self._add_module_to_functional_group(self.SingleFrameSet[0], item) - elif self._contains_right_attributes(self.PerFrameTags): + elif self._contains_right_attributes(self._PerFrameTags): for i in range(0, len(self.SingleFrameSet)): item = self._get_perframe_item(i) self._add_module_to_functional_group( @@ -1334,13 +1334,13 @@ def _add_module_to_functional_group( dest_fg[tg] = seq def AddModule(self) -> None: - if (not self._contains_right_attributes(self.PerFrameTags) and + if (not self._contains_right_attributes(self._PerFrameTags) and (self._contains_right_attributes(self.SharedTags) or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) ): item = self._get_shared_item() self._add_module_to_functional_group(self.SingleFrameSet[0], item) - elif self._contains_right_attributes(self.PerFrameTags): + elif self._contains_right_attributes(self._PerFrameTags): for i in range(0, len(self.SingleFrameSet)): item = self._get_perframe_item(i) self._add_module_to_functional_group( @@ -1396,13 +1396,13 @@ def _add_module_to_functional_group( dest_fg[tg] = seq def AddModule(self) -> None: - if (not self._contains_right_attributes(self.PerFrameTags) and + if (not self._contains_right_attributes(self._PerFrameTags) and (self._contains_right_attributes(self.SharedTags) or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) ): item = self._get_shared_item() self._add_module_to_functional_group(self.SingleFrameSet[0], item) - elif self._contains_right_attributes(self.PerFrameTags): + elif self._contains_right_attributes(self._PerFrameTags): for i in range(0, len(self.SingleFrameSet)): item = self._get_perframe_item(i) self._add_module_to_functional_group( @@ -1481,13 +1481,13 @@ def _add_module_to_functional_group( dest_fg[tg] = seq def AddModule(self) -> None: - if (not self._contains_right_attributes(self.PerFrameTags) and + if (not self._contains_right_attributes(self._PerFrameTags) and (self._contains_right_attributes(self.SharedTags) or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) ): item = self._get_shared_item() self._add_module_to_functional_group(self.SingleFrameSet[0], item) - elif self._contains_right_attributes(self.PerFrameTags): + elif self._contains_right_attributes(self._PerFrameTags): for i in range(0, len(self.SingleFrameSet)): item = self._get_perframe_item(i) self._add_module_to_functional_group( @@ -1522,13 +1522,13 @@ def _add_module_to_functional_group( check_not_to_be_empty=False) def AddModule(self) -> None: - if (not self._contains_right_attributes(self.PerFrameTags) and + if (not self._contains_right_attributes(self._PerFrameTags) and (self._contains_right_attributes(self.SharedTags) or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) ): item = self._get_shared_item() self._add_module_to_functional_group(self.SingleFrameSet[0], item) - elif self._contains_right_attributes(self.PerFrameTags): + elif self._contains_right_attributes(self._PerFrameTags): for i in range(0, len(self.SingleFrameSet)): item = self._get_perframe_item(i) self._add_module_to_functional_group( @@ -1578,13 +1578,13 @@ def _add_module_to_functional_group( dest_fg[tg] = seq def AddModule(self) -> None: - if (not self._contains_right_attributes(self.PerFrameTags) and + if (not self._contains_right_attributes(self._PerFrameTags) and (self._contains_right_attributes(self.SharedTags) or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) ): item = self._get_shared_item() self._add_module_to_functional_group(self.SingleFrameSet[0], item) - elif self._contains_right_attributes(self.PerFrameTags): + elif self._contains_right_attributes(self._PerFrameTags): for i in range(0, len(self.SingleFrameSet)): item = self._get_perframe_item(i) self._add_module_to_functional_group( @@ -1610,7 +1610,7 @@ def __init__(self, sf_datasets: Sequence[Dataset], def _add_module_to_functional_group( self, src_fg: Dataset, dest_fg: Dataset) -> None: item = Dataset() - for tg, used in self.PerFrameTags.items(): + for tg, used in self._PerFrameTags.items(): if used not in self.ExcludedFromFunctionalGroupsTags: self._copy_attrib_if_present(src_fg, item, @@ -1742,7 +1742,7 @@ def _add_module_to_functional_group( AcquisitionDateTime_a = self._get_or_create_attribute( src_fg, 'AcquisitionDateTime', self.EarliestDateTime) AcquisitionDateTime_is_perframe = self._contains_right_attributes( - self.PerFrameTags) + self._PerFrameTags) if AcquisitionDateTime_a.value == self.EarliestDateTime: AcquisitionDate_a = self._get_or_create_attribute( src_fg, 'AcquisitionDate', self.EarliestDate) @@ -2104,7 +2104,7 @@ class LegacyConvertedEnhanceImage(SOPClass): def __init__( self, - legacy_datasets: Sequence[Dataset], + frame_set: FrameSet, series_instance_uid: str, series_number: int, sop_instance_uid: str, @@ -2129,6 +2129,7 @@ def __init__( Additional keyword arguments that will be passed to the constructor of `highdicom.base.SOPClass` """ + legacy_datasets = frame_set.Frames try: ref_ds = legacy_datasets[0] except IndexError: @@ -2156,84 +2157,16 @@ def __init__( referring_physician_name=ref_ds.ReferringPhysicianName, **kwargs) self._legacy_datasets = legacy_datasets - self.DistinguishingAttributeKeywords = [ - 'PatientID', - 'PatientName', - 'Manufacturer', - 'InstitutionName', - 'InstitutionAddress', - 'StationName', - 'InstitutionalDepartmentName', - 'ManufacturerModelName', - 'DeviceSerialNumber', - 'SoftwareVersions', - 'GantryID', - 'PixelPaddingValue', - 'Modality', - 'ImageType', - 'BurnedInAnnotation', - 'SOPClassUID', - 'Rows', - 'Columns', - 'BitsStored', - 'BitsAllocated', - 'HighBit', - 'PixelRepresentation', - 'PhotometricInterpretation', - 'PlanarConfiguration', - 'SamplesPerPixel', - 'ProtocolName', - 'ImageOrientationPatient', - 'PixelSpacing', - 'SliceThickness', - 'AcquisitionContextSequence'] - to_be_removed_from_distinguishing_attribs = set() - for kw in self.DistinguishingAttributeKeywords: - x: list = [] - not_present_attribute_count = 0 - for ds in legacy_datasets: - if kw in ds: - if len(x) == 0: - x.append(ds[kw]) - else: - already_has_new_value = False - for x_elem in x: - if self._isequal(x_elem.value, ds[kw].value): - already_has_new_value = True - break - if not already_has_new_value: - x.append(ds[kw]) - else: - to_be_removed_from_distinguishing_attribs.add(kw) - not_present_attribute_count += 1 - if not_present_attribute_count != len(legacy_datasets)\ - and not_present_attribute_count != 0: - raise ValueError('One or more datasets lack {} ' - 'distinguishing attributes'.format(kw)) - if len(x) > 1: - error_msg = 'All instances must have the same' - ' value for {}.\n\tExisting values: ' - for x_elem in x: - error_msg += '\n\t\t{}'.format(x_elem.value) - raise ValueError(error_msg) - for kw in to_be_removed_from_distinguishing_attribs: - self.DistinguishingAttributeKeywords.remove(kw) - self.ExcludedFromPerFrameTags = {} - for i in self.DistinguishingAttributeKeywords: - self.ExcludedFromPerFrameTags[tag_for_keyword(i)] = False - self.ExcludedFromPerFrameTags[ - tag_for_keyword('AcquisitionDateTime')] = False - self.ExcludedFromPerFrameTags[ - tag_for_keyword('AcquisitionDate')] = False - self.ExcludedFromPerFrameTags[ - tag_for_keyword('AcquisitionTime')] = False + self.DistinguishingAttributesTags = self._get_tag_used_dictionary( + frame_set.DistinguishingAttributesTags) + self.ExcludedFromPerFrameTags = self._get_tag_used_dictionary( + frame_set.ExcludedFromPerFrameTags) + self._PerFrameTags = self._get_tag_used_dictionary( + frame_set.PerFrameTags) + self.SharedTags = self._get_tag_used_dictionary( + frame_set.SharedTags) self.ExcludedFromFunctionalGroupsTags = { tag_for_keyword('SpecificCharacterSet'): False} - - # --------------------------------------------------------------------- - self.PerFrameTags: dict = {} - self.SharedTags: dict = {} - self._find_per_frame_and_shared_tags() # ---------------------------------------------------------------------- self.__build_blocks: list = [] # == == == == == == == == == == == == == == == == == == == == == == == = @@ -2251,6 +2184,12 @@ def __init__( 'legacy-converted-enhanced-pet-image'): self.AddBuildBlocksForPET() + def _get_tag_used_dictionary(self, input: list) -> dict: + out: dict = {} + for item in input: + out[item] = False + return out + def default_sort_key(x: Dataset) -> tuple: out: tuple = tuple() if 'SeriesNumber' in x: @@ -2261,104 +2200,6 @@ def default_sort_key(x: Dataset) -> tuple: out += (x['SOPInstanceUID'].value, ) return out - def _find_per_frame_and_shared_tags(self) -> None: - rough_shared: dict = {} - sfs = self._legacy_datasets - for ds in sfs: - for ttag, elem in ds.items(): - if (not ttag.is_private and not - self._istag_file_meta_information_group(ttag) and not - self._istag_repeating_group(ttag) and not - self._istag_group_length(ttag) and not - self._istag_excluded_from_perframe(ttag) and - ttag != tag_for_keyword('PixelData')): - elem = ds[ttag] - self.PerFrameTags[ttag] = False - if ttag in rough_shared: - rough_shared[ttag].append(elem.value) - else: - rough_shared[ttag] = [elem.value] - to_be_removed_from_shared = [] - for ttag, v in rough_shared.items(): - v = rough_shared[ttag] - if len(v) < len(self._legacy_datasets): - to_be_removed_from_shared.append(ttag) - else: - all_values_are_equal = True - for v_i in v: - if not self._isequal(v_i, v[0]): - all_values_are_equal = False - break - if not all_values_are_equal: - to_be_removed_from_shared.append(ttag) - from pydicom.datadict import keyword_for_tag - for t, v in rough_shared.items(): - if keyword_for_tag(t) != 'PatientSex': - continue - for t in to_be_removed_from_shared: - del rough_shared[t] - for t, v in rough_shared.items(): - self.SharedTags[t] = False - del self.PerFrameTags[t] - # for t in self.SharedTags: - # print(keyword_for_tag(t)) - # print('perframe ---------------------------') - # for t in self.PerFrameTags: - # print (keyword_for_tag(t)) - - def _istag_excluded_from_perframe(self, t: Tag) -> bool: - return t in self.ExcludedFromPerFrameTags - - def _istag_file_meta_information_group(self, t: Tag) -> bool: - return t.group == 0x0002 - - def _istag_repeating_group(self, t: Tag) -> bool: - g = t.group - return (g >= 0x5000 and g <= 0x501e) or\ - (g >= 0x6000 and g <= 0x601e) - - def _istag_group_length(self, t: Tag) -> bool: - return t.element == 0 - - def _isequal(self, v1: Any, v2: Any) -> bool: - from pydicom.valuerep import DSfloat - float_tolerance = 1.0e-5 - - def is_equal_float(x1: float, x2: float) -> bool: - return abs(x1 - x2) < float_tolerance - if type(v1) != type(v2): - return False - if type(v1) == DicomSequence: - for item1, item2 in zip(v1, v2): - self._isequal_dicom_dataset(item1, item2) - if type(v1) != MultiValue: - v11 = [v1] - v22 = [v2] - else: - v11 = v1 - v22 = v2 - for xx, yy in zip(v11, v22): - if type(xx) == DSfloat or type(xx) == float: - if not is_equal_float(xx, yy): - return False - else: - if xx != yy: - return False - return True - - def _isequal_dicom_dataset(self, ds1: Dataset, ds2: Dataset) -> bool: - if type(ds1) != type(ds2): - return False - if type(ds1) != Dataset: - return False - for k1, elem1 in ds1.items(): - if k1 not in ds2: - return False - elem2 = ds2[k1] - if not self._isequal(elem2.value, elem1.value): - return False - return True - def AddNewBuildBlock(self, element: Abstract_MultiframeModuleAdder) -> None: if not isinstance(element, Abstract_MultiframeModuleAdder): raise ValueError('Build block must be an instance ' @@ -2374,140 +2215,140 @@ def AddCommonCT_PET_MR_BuildBlocks(self) -> None: self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, - self.PerFrameTags, + self._PerFrameTags, self.SharedTags, self), CompositeInstanceContex( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, - self.PerFrameTags, + self._PerFrameTags, self.SharedTags, self), EnhancedCommonImageModule( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, - self.PerFrameTags, + self._PerFrameTags, self.SharedTags, self), AcquisitionContextModule( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, - self.PerFrameTags, + self._PerFrameTags, self.SharedTags, self), FrameAnatomyFunctionalGroup( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, - self.PerFrameTags, + self._PerFrameTags, self.SharedTags, self), PixelMeasuresFunctionalGroup( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, - self.PerFrameTags, + self._PerFrameTags, self.SharedTags, self), PlaneOrientationFunctionalGroup( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, - self.PerFrameTags, + self._PerFrameTags, self.SharedTags, self), PlanePositionFunctionalGroup( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, - self.PerFrameTags, + self._PerFrameTags, self.SharedTags, self), FrameVOILUTFunctionalGroup( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, - self.PerFrameTags, + self._PerFrameTags, self.SharedTags, self), PixelValueTransformationFunctionalGroup( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, - self.PerFrameTags, + self._PerFrameTags, self.SharedTags, self), ReferencedImageFunctionalGroup( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, - self.PerFrameTags, + self._PerFrameTags, self.SharedTags, self), ConversionSourceFunctionalGroup( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, - self.PerFrameTags, + self._PerFrameTags, self.SharedTags, self), FrameContentFunctionalGroup( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, - self.PerFrameTags, + self._PerFrameTags, self.SharedTags, self), PixelData( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, - self.PerFrameTags, + self._PerFrameTags, self.SharedTags, self), ContentDateTime( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, - self.PerFrameTags, + self._PerFrameTags, self.SharedTags, self), InstanceCreationDateTime( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, - self.PerFrameTags, + self._PerFrameTags, self.SharedTags, self), ContributingEquipmentSequence( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, - self.PerFrameTags, + self._PerFrameTags, self.SharedTags, self), UnassignedPerFrame( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, - self.PerFrameTags, + self._PerFrameTags, self.SharedTags, self), UnassignedShared( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, - self.PerFrameTags, + self._PerFrameTags, self.SharedTags, self), StackInformation( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, - self.PerFrameTags, + self._PerFrameTags, self.SharedTags, self) ] @@ -2520,7 +2361,7 @@ def AddCTSpecificBuildBlocks(self) -> None: self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, - self.PerFrameTags, + self._PerFrameTags, self.SharedTags, self, 'CT'), @@ -2528,14 +2369,14 @@ def AddCTSpecificBuildBlocks(self) -> None: self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, - self.PerFrameTags, + self._PerFrameTags, self.SharedTags, self), ContrastBolusModule( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, - self.PerFrameTags, + self._PerFrameTags, self.SharedTags, self) ] @@ -2548,7 +2389,7 @@ def AddMRSpecificBuildBlocks(self) -> None: self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, - self.PerFrameTags, + self._PerFrameTags, self.SharedTags, self, 'MR'), @@ -2556,14 +2397,14 @@ def AddMRSpecificBuildBlocks(self) -> None: self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, - self.PerFrameTags, + self._PerFrameTags, self.SharedTags, self), ContrastBolusModule( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, - self.PerFrameTags, + self._PerFrameTags, self.SharedTags, self) ] @@ -2576,7 +2417,7 @@ def AddPETSpecificBuildBlocks(self) -> None: self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, - self.PerFrameTags, + self._PerFrameTags, self.SharedTags, self, 'PET'), @@ -2584,7 +2425,7 @@ def AddPETSpecificBuildBlocks(self) -> None: self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, - self.PerFrameTags, + self._PerFrameTags, self.SharedTags, self) ] @@ -2646,3 +2487,235 @@ def AreParallel(slice1: GeometryOfSlice, if abs(el1 - el2) > tolerance: return False return True + + +class DicomHelper: + def __init__(self): + pass + + def istag_file_meta_information_group(t: Tag) -> bool: + return t.group == 0x0002 + + def istag_repeating_group(t: Tag) -> bool: + g = t.group + return (g >= 0x5000 and g <= 0x501e) or\ + (g >= 0x6000 and g <= 0x601e) + + def istag_group_length(t: Tag) -> bool: + return t.element == 0 + + def isequal(v1: Any, v2: Any) -> bool: + from pydicom.valuerep import DSfloat + float_tolerance = 1.0e-5 + + def is_equal_float(x1: float, x2: float) -> bool: + return abs(x1 - x2) < float_tolerance + if type(v1) != type(v2): + return False + if type(v1) == DicomSequence: + for item1, item2 in zip(v1, v2): + DicomHelper.isequal_dicom_dataset(item1, item2) + if type(v1) != MultiValue: + v11 = [v1] + v22 = [v2] + else: + v11 = v1 + v22 = v2 + for xx, yy in zip(v11, v22): + if type(xx) == DSfloat or type(xx) == float: + if not is_equal_float(xx, yy): + return False + else: + if xx != yy: + return False + return True + + def isequal_dicom_dataset(ds1: Dataset, ds2: Dataset) -> bool: + if type(ds1) != type(ds2): + return False + if type(ds1) != Dataset: + return False + for k1, elem1 in ds1.items(): + if k1 not in ds2: + return False + elem2 = ds2[k1] + if not DicomHelper.isequal(elem2.value, elem1.value): + return False + return True + + +class FrameSet: + def __init__(self, single_frame_list: list, + distinguishing_tags: list): + self._Frames = single_frame_list + self._DistinguishingAttributesTags = distinguishing_tags + tmp = [ + tag_for_keyword('AcquisitionDateTime'), + tag_for_keyword('AcquisitionDate'), + tag_for_keyword('AcquisitionTime'), + tag_for_keyword('SpecificCharacterSet')] + self._ExcludedFromPerFrameTags = self.DistinguishingAttributesTags + tmp + self._PerFrameTags = [] + self._SharedTags = [] + self._find_per_frame_and_shared_tags() + + @property + def Frames(self): + return self._Frames[:] + + @property + def DistinguishingAttributesTags(self): + return self._DistinguishingAttributesTags[:] + + @property + def ExcludedFromPerFrameTags(self): + return self._ExcludedFromPerFrameTags[:] + + @property + def PerFrameTags(self): + return self._PerFrameTags[:] + + @property + def SharedTags(self): + return self._SharedTags[:] + + def _find_per_frame_and_shared_tags(self) -> None: + rough_shared: dict = {} + sfs = self.Frames + for ds in sfs: + for ttag, elem in ds.items(): + if (not ttag.is_private and not + DicomHelper.istag_file_meta_information_group(ttag) and not + DicomHelper.istag_repeating_group(ttag) and not + DicomHelper.istag_group_length(ttag) and not + self._istag_excluded_from_perframe(ttag) and + ttag != tag_for_keyword('PixelData')): + elem = ds[ttag] + self._PerFrameTags.append(ttag) + if ttag in rough_shared: + rough_shared[ttag].append(elem.value) + else: + rough_shared[ttag] = [elem.value] + to_be_removed_from_shared = [] + for ttag, v in rough_shared.items(): + v = rough_shared[ttag] + if len(v) < len(self.Frames): + to_be_removed_from_shared.append(ttag) + else: + all_values_are_equal = True + for v_i in v: + if not DicomHelper.isequal(v_i, v[0]): + all_values_are_equal = False + break + if not all_values_are_equal: + to_be_removed_from_shared.append(ttag) + from pydicom.datadict import keyword_for_tag + for t, v in rough_shared.items(): + if keyword_for_tag(t) != 'PatientSex': + continue + for t in to_be_removed_from_shared: + del rough_shared[t] + for t, v in rough_shared.items(): + self._SharedTags.append(t) + self._PerFrameTags.remove(t) + + def _istag_excluded_from_perframe(self, t: Tag) -> bool: + return t in self.ExcludedFromPerFrameTags + + +class FrameSetCollection: + def __init__(self, single_frame_list: list): + self.MixedFrames = single_frame_list + self.MixedFramesCopy = self.MixedFrames[:] + self._DistinguishingAttributeKeywords = [ + 'PatientID', + 'PatientName', + 'Manufacturer', + 'InstitutionName', + 'InstitutionAddress', + 'StationName', + 'InstitutionalDepartmentName', + 'ManufacturerModelName', + 'DeviceSerialNumber', + 'SoftwareVersions', + 'GantryID', + 'PixelPaddingValue', + 'Modality', + 'ImageType', + 'BurnedInAnnotation', + 'SOPClassUID', + 'Rows', + 'Columns', + 'BitsStored', + 'BitsAllocated', + 'HighBit', + 'PixelRepresentation', + 'PhotometricInterpretation', + 'PlanarConfiguration', + 'SamplesPerPixel', + 'ProtocolName', + 'ImageOrientationPatient', + 'PixelSpacing', + 'SliceThickness', + 'AcquisitionContextSequence'] + to_be_removed_from_distinguishing_attribs = set() + self._FrameSets = [] + while len(self.MixedFramesCopy) != 0: + x = self._find_all_similar_to_first_datasets() + self._FrameSets.append(FrameSet(x[0], x[1])) + for kw in to_be_removed_from_distinguishing_attribs: + self.DistinguishingAttributeKeywords.remove(kw) + self.ExcludedFromPerFrameTags = {} + for i in self.DistinguishingAttributeKeywords: + self.ExcludedFromPerFrameTags[tag_for_keyword(i)] = False + self.ExcludedFromPerFrameTags[ + tag_for_keyword('AcquisitionDateTime')] = False + self.ExcludedFromPerFrameTags[ + tag_for_keyword('AcquisitionDate')] = False + self.ExcludedFromPerFrameTags[ + tag_for_keyword('AcquisitionTime')] = False + self.ExcludedFromFunctionalGroupsTags = { + tag_for_keyword('SpecificCharacterSet'): False} + + def _find_all_similar_to_first_datasets(self) -> tuple: + similar_ds: list = [self.MixedFramesCopy[0]] + distinguishing_tags_existing = [] + distinguishing_tags_missing = [] + self.MixedFramesCopy = self.MixedFramesCopy[1:] + for kw in self.DistinguishingAttributeKeywords: + tg = tag_for_keyword(kw) + if tg in similar_ds[0]: + distinguishing_tags_existing.append(tg) + else: + distinguishing_tags_missing.append(tg) + for ds in self.MixedFramesCopy: + all_equal = True + for tg in distinguishing_tags_missing: + if tg in ds: + all_equal = False + break + if not all_equal: + continue + for tg in distinguishing_tags_existing: + ref_val = similar_ds[0][tg].value + if tg not in ds: + all_equal = False + break + new_val = ds[tg].value + if not DicomHelper.isequal(ref_val, new_val): + all_equal = False + break + if all_equal: + similar_ds.append(ds) + for ds in similar_ds: + if ds in self.MixedFramesCopy: + self.MixedFramesCopy.remove(ds) + return (similar_ds, distinguishing_tags_existing) + + @property + def DistinguishingAttributeKeywords(self): + return self._DistinguishingAttributeKeywords[:] + + @property + def FrameSets(self): + return self._FrameSets From 55ebc0afc20c6e9c022b2ffad85fc6050259424f Mon Sep 17 00:00:00 2001 From: Afshin Date: Wed, 15 Jul 2020 13:47:38 -0400 Subject: [PATCH 10/44] mend --- src/highdicom/legacy/sop.py | 36 +++++++++++++++++++----------------- 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/src/highdicom/legacy/sop.py b/src/highdicom/legacy/sop.py index 0daf0804..53207a20 100644 --- a/src/highdicom/legacy/sop.py +++ b/src/highdicom/legacy/sop.py @@ -2060,7 +2060,7 @@ def _are_all_slices_parallel(self) -> bool: for i in range(1, slice_count): curr_slice = self._slices[i] if not GeometryOfSlice.AreParallel( - curr_slice, last_slice, self._tolerance): + curr_slice, last_slice, self._tolerance): return False last_slice = curr_slice return True @@ -2167,9 +2167,9 @@ def __init__( frame_set.SharedTags) self.ExcludedFromFunctionalGroupsTags = { tag_for_keyword('SpecificCharacterSet'): False} - # ---------------------------------------------------------------------- + # -------------------------------------------------------------------- self.__build_blocks: list = [] - # == == == == == == == == == == == == == == == == == == == == == == == = + # == == == == == == == == == == == == == == == == == == == == == == == new_ds = [] for item in sorted(self._legacy_datasets, key=sort_key): new_ds.append(item) @@ -2200,7 +2200,8 @@ def default_sort_key(x: Dataset) -> tuple: out += (x['SOPInstanceUID'].value, ) return out - def AddNewBuildBlock(self, element: Abstract_MultiframeModuleAdder) -> None: + def AddNewBuildBlock( + self, element: Abstract_MultiframeModuleAdder) -> None: if not isinstance(element, Abstract_MultiframeModuleAdder): raise ValueError('Build block must be an instance ' 'of Abstract_MultiframeModuleAdder') @@ -2490,7 +2491,7 @@ def AreParallel(slice1: GeometryOfSlice, class DicomHelper: - def __init__(self): + def __init__(self) -> None: pass def istag_file_meta_information_group(t: Tag) -> bool: @@ -2554,29 +2555,30 @@ def __init__(self, single_frame_list: list, tag_for_keyword('AcquisitionDate'), tag_for_keyword('AcquisitionTime'), tag_for_keyword('SpecificCharacterSet')] - self._ExcludedFromPerFrameTags = self.DistinguishingAttributesTags + tmp - self._PerFrameTags = [] - self._SharedTags = [] + self._ExcludedFromPerFrameTags =\ + self.DistinguishingAttributesTags + tmp + self._PerFrameTags: list = [] + self._SharedTags: list = [] self._find_per_frame_and_shared_tags() @property - def Frames(self): + def Frames(self) -> List[Dataset]: return self._Frames[:] @property - def DistinguishingAttributesTags(self): + def DistinguishingAttributesTags(self) -> List[Tag]: return self._DistinguishingAttributesTags[:] @property - def ExcludedFromPerFrameTags(self): + def ExcludedFromPerFrameTags(self) -> List[Tag]: return self._ExcludedFromPerFrameTags[:] @property - def PerFrameTags(self): + def PerFrameTags(self) -> List[Tag]: return self._PerFrameTags[:] @property - def SharedTags(self): + def SharedTags(self) -> List[Tag]: return self._SharedTags[:] def _find_per_frame_and_shared_tags(self) -> None: @@ -2658,8 +2660,8 @@ def __init__(self, single_frame_list: list): 'PixelSpacing', 'SliceThickness', 'AcquisitionContextSequence'] - to_be_removed_from_distinguishing_attribs = set() - self._FrameSets = [] + to_be_removed_from_distinguishing_attribs: set = set() + self._FrameSets: list = [] while len(self.MixedFramesCopy) != 0: x = self._find_all_similar_to_first_datasets() self._FrameSets.append(FrameSet(x[0], x[1])) @@ -2713,9 +2715,9 @@ def _find_all_similar_to_first_datasets(self) -> tuple: return (similar_ds, distinguishing_tags_existing) @property - def DistinguishingAttributeKeywords(self): + def DistinguishingAttributeKeywords(self) -> List[str]: return self._DistinguishingAttributeKeywords[:] @property - def FrameSets(self): + def FrameSets(self) -> List[FrameSet]: return self._FrameSets From 716c58180face081e3501ec6cd817f2a01070824 Mon Sep 17 00:00:00 2001 From: Afshin Date: Thu, 13 Aug 2020 15:51:43 -0400 Subject: [PATCH 11/44] mend --- src/highdicom/legacy/sop.py | 541 ++++++++++++++++++++++-------------- 1 file changed, 334 insertions(+), 207 deletions(-) diff --git a/src/highdicom/legacy/sop.py b/src/highdicom/legacy/sop.py index 53207a20..4f22ba3b 100644 --- a/src/highdicom/legacy/sop.py +++ b/src/highdicom/legacy/sop.py @@ -550,7 +550,7 @@ def __init__(self, sf_datasets: Sequence[Dataset], self.ExcludedFromPerFrameTags = excluded_from_perframe_tags self.ExcludedFromFunctionalGroupsTags = excluded_from_functional_tags self._PerFrameTags = perframe_tags - self.SharedTags = shared_tags + self._SharedTags = shared_tags self.TargetDataset = multi_frame_output self.SingleFrameSet = sf_datasets self.EarliestDate = DA('00010101') @@ -574,8 +574,8 @@ def _is_empty_or_empty_items(self, attribute: DataElement) -> bool: return False def _mark_tag_as_used(self, tg: Tag) -> None: - if tg in self.SharedTags: - self.SharedTags[tg] = True + if tg in self._SharedTags: + self._SharedTags[tg] = True elif tg in self.ExcludedFromPerFrameTags: self.ExcludedFromPerFrameTags[tg] = True elif tg in self._PerFrameTags: @@ -635,8 +635,11 @@ def _get_shared_item(self) -> Dataset: return self.TargetDataset[sf_tg].value[0] def _get_or_create_attribute( - self, src: Dataset, kw: str, default: Any) -> DataElement: - tg = tag_for_keyword(kw) + self, src: Dataset, kw: Union[str, Tag], default: Any) -> DataElement: + if kw is str: + tg = tag_for_keyword(kw) + else: + tg = kw if kw in src: a = deepcopy(src[kw]) else: @@ -709,7 +712,7 @@ def AddModule(self) -> None: module, excepted_attributes=except_at, check_not_to_be_empty=False, - check_not_to_be_perframe=False) # don't check the perframe set + check_not_to_be_perframe=True) # don't check the perframe set class CompositeInstanceContex(Abstract_MultiframeModuleAdder): @@ -818,7 +821,7 @@ def AddModule(self) -> None: module_name, excepted_attributes=excpeted_a, check_not_to_be_empty=False, - check_not_to_be_perframe=False) # don't check the perframe set + check_not_to_be_perframe=True) # don't check the perframe set class CommonCTMRPETImageDescriptionMacro(Abstract_MultiframeModuleAdder): @@ -923,7 +926,7 @@ def AddModule(self) -> None: # ct_mr = CommonCTMRImageDescriptionMacro(self.SingleFrameSet # , self.ExcludedFromPerFrameTags # , self._PerFrameTags - # , self.SharedTags + # , self._SharedTags # , self.TargetDataset) # ct_mr.AddModule() # Acquisition Number @@ -952,7 +955,10 @@ def AddModule(self) -> None: 'LossyImageCompressionRatio', 'LossyImageCompressionMethod'] for kw in attribs_to_be_added: - self._copy_attrib_if_present(ref_dataset, self.TargetDataset, kw) + self._copy_attrib_if_present( + ref_dataset, self.TargetDataset, kw, + check_not_to_be_perframe=True, + check_not_to_be_empty=False) if tag_for_keyword('PresentationLUTShape') not in self._PerFrameTags: # actually should really invert the pixel data if MONOCHROME1, # since only MONOCHROME2 is permitted : ( @@ -1030,8 +1036,12 @@ def __init__(self, sf_datasets: Sequence[Dataset], multi_frame_output) def AddModule(self) -> None: - pass # David's code doesn't hold anything for this module ... should ask him + kw = 'ContentQualification' + tg = tag_for_keyword(kw) + elem = self._get_or_create_attribute( + self.SingleFrameSet[0], kw, 'RESEARCH') + self.TargetDataset[tg] = elem class EnhancedMRImageModule(Abstract_MultiframeModuleAdder): @@ -1110,11 +1120,12 @@ def __init__(self, sf_datasets: Sequence[Dataset], multi_frame_output) def AddModule(self) -> None: - self._copy_attrib_if_present( - self.SingleFrameSet, - self.TargetDataset, - 'AcquisitionContextSequence', - check_not_to_be_perframe=True) # check not to be in perframe + tg = tag_for_keyword('AcquisitionContextSequence') + if tg not in self._PerFrameTags: + self.TargetDataset[tg] = self._get_or_create_attribute( + self.SingleFrameSet[0], + tg, + None) class FrameAnatomyFunctionalGroup(Abstract_MultiframeModuleAdder): @@ -1176,7 +1187,7 @@ def _contains_right_attributes(self, tags: dict) -> bool: def AddModule(self) -> None: if (not self._contains_right_attributes(self._PerFrameTags) and - (self._contains_right_attributes(self.SharedTags) or + (self._contains_right_attributes(self._SharedTags) or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) ): item = self._get_shared_item() @@ -1239,7 +1250,7 @@ def _add_module_to_functional_group( def AddModule(self) -> None: if (not self._contains_right_attributes(self._PerFrameTags) and - (self._contains_right_attributes(self.SharedTags) or + (self._contains_right_attributes(self._SharedTags) or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) ): item = self._get_shared_item() @@ -1288,7 +1299,7 @@ def _add_module_to_functional_group( def AddModule(self) -> None: if (not self._contains_right_attributes(self._PerFrameTags) and - (self._contains_right_attributes(self.SharedTags) or + (self._contains_right_attributes(self._SharedTags) or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) ): item = self._get_shared_item() @@ -1335,7 +1346,7 @@ def _add_module_to_functional_group( def AddModule(self) -> None: if (not self._contains_right_attributes(self._PerFrameTags) and - (self._contains_right_attributes(self.SharedTags) or + (self._contains_right_attributes(self._SharedTags) or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) ): item = self._get_shared_item() @@ -1397,7 +1408,7 @@ def _add_module_to_functional_group( def AddModule(self) -> None: if (not self._contains_right_attributes(self._PerFrameTags) and - (self._contains_right_attributes(self.SharedTags) or + (self._contains_right_attributes(self._SharedTags) or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) ): item = self._get_shared_item() @@ -1482,7 +1493,7 @@ def _add_module_to_functional_group( def AddModule(self) -> None: if (not self._contains_right_attributes(self._PerFrameTags) and - (self._contains_right_attributes(self.SharedTags) or + (self._contains_right_attributes(self._SharedTags) or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) ): item = self._get_shared_item() @@ -1523,7 +1534,7 @@ def _add_module_to_functional_group( def AddModule(self) -> None: if (not self._contains_right_attributes(self._PerFrameTags) and - (self._contains_right_attributes(self.SharedTags) or + (self._contains_right_attributes(self._SharedTags) or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) ): item = self._get_shared_item() @@ -1579,7 +1590,7 @@ def _add_module_to_functional_group( def AddModule(self) -> None: if (not self._contains_right_attributes(self._PerFrameTags) and - (self._contains_right_attributes(self.SharedTags) or + (self._contains_right_attributes(self._SharedTags) or self._contains_right_attributes(self.ExcludedFromPerFrameTags)) ): item = self._get_shared_item() @@ -1610,19 +1621,53 @@ def __init__(self, sf_datasets: Sequence[Dataset], def _add_module_to_functional_group( self, src_fg: Dataset, dest_fg: Dataset) -> None: item = Dataset() - for tg, used in self._PerFrameTags.items(): - if used not in self.ExcludedFromFunctionalGroupsTags: - self._copy_attrib_if_present(src_fg, - item, - tg, - check_not_to_be_perframe=False, - check_not_to_be_empty=False) + for tg in self._eligeible_tags: + self._copy_attrib_if_present(src_fg, + item, + tg, + check_not_to_be_perframe=False, + check_not_to_be_empty=False) kw = 'UnassignedPerFrameConvertedAttributesSequence' tg = tag_for_keyword(kw) seq = DataElement(tg, dictionary_VR(tg), [item]) dest_fg[tg] = seq + def _add_largest_smallest_pixle_value(self) -> None: + ltg = tag_for_keyword("LargestImagePixelValue") + from sys import float_info + lval = float_info.min + if ltg in self._PerFrameTags: + for frame in self.SingleFrameSet: + if ltg in frame: + nval = frame[ltg].value + else: + continue + lval = nval if lval < nval else lval + if lval > float_info.min: + self.TargetDataset[ltg] = DataElement(ltg, 'SS', int(lval)) + # ========================== + stg = tag_for_keyword("SmallestImagePixelValue") + sval = float_info.max + if stg in self._PerFrameTags: + for frame in self.SingleFrameSet: + if stg in frame: + nval = frame[stg].value + else: + continue + sval = nval if sval < nval else sval + if sval < float_info.max: + self.TargetDataset[stg] = DataElement(stg, 'SS', int(sval)) + + stg = "SmallestImagePixelValue" + def AddModule(self) -> None: + # first collect all not used tags + # note that this is module is order dependent + self._add_largest_smallest_pixle_value() + self._eligeible_tags: List[Tag] = [] + for tg, used in self._PerFrameTags.items(): + if not used and tg not in self.ExcludedFromFunctionalGroupsTags: + self._eligeible_tags.append(tg) for i in range(0, len(self.SingleFrameSet)): item = self._get_perframe_item(i) self._add_module_to_functional_group( @@ -1648,8 +1693,9 @@ def __init__(self, sf_datasets: Sequence[Dataset], def _add_module_to_functional_group( self, src_fg: Dataset, dest_fg: Dataset) -> None: item = Dataset() - for tg, used in self.SharedTags.items(): + for tg, used in self._SharedTags.items(): if (not used and + tg not in self.TargetDataset and tg not in self.ExcludedFromFunctionalGroupsTags): self._copy_attrib_if_present(src_fg, item, @@ -1666,6 +1712,44 @@ def AddModule(self) -> None: self._add_module_to_functional_group(self.SingleFrameSet[0], item) +class EmptyType2Attributes(Abstract_MultiframeModuleAdder): + + def __init__(self, sf_datasets: Sequence[Dataset], + excluded_from_perframe_tags: dict, + excluded_from_functional_tags: dict, + perframe_tags: dict, + shared_tags: dict, + multi_frame_output: Dataset): + super().__init__( + sf_datasets, + excluded_from_perframe_tags, + excluded_from_functional_tags, + perframe_tags, + shared_tags, + multi_frame_output) + + def CreateEmptyElement(self, tg: Tag) -> DataElement: + return DataElement(tg, dictionary_VR(tg), None) + + def AddModule(self) -> None: + iod_name = _SOP_CLASS_UID_IOD_KEY_MAP[ + self.TargetDataset['SOPClassUID'].value] + modules = IOD_MODULE_MAP[iod_name] + for module in modules: + if module['usage'] == 'M': + mod_key = module['key'] + attrib_list = MODULE_ATTRIBUTE_MAP[mod_key] + for a in attrib_list: + if len(a['path']) == 0 and a['type'] == '2': + tg = tag_for_keyword(a['keyword']) + if (tg not in self.SingleFrameSet[0] and + tg not in self.TargetDataset and + tg not in self._PerFrameTags and + tg not in self._SharedTags): + self.TargetDataset[tg] =\ + self.CreateEmptyElement(tg) + + class ConversionSourceFunctionalGroup(Abstract_MultiframeModuleAdder): def __init__(self, sf_datasets: Sequence[Dataset], @@ -1725,6 +1809,95 @@ def __init__(self, sf_datasets: Sequence[Dataset], shared_tags, multi_frame_output) self.EarliestFrameAcquisitionDateTime = self.FarthestFutureDateTime + self._slices: list = [] + self._tolerance = 0.0001 + self._slice_location_map: dict = {} + + def _build_slices_geometry(self) -> None: + frame_count = len(self.SingleFrameSet) + for i in range(0, frame_count): + curr_frame = self.SingleFrameSet[i] + ImagePositionPatient_v = None \ + if 'ImagePositionPatient' not in curr_frame\ + else curr_frame['ImagePositionPatient'].value + ImageOrientationPatient_v = None \ + if 'ImageOrientationPatient' not in curr_frame\ + else curr_frame['ImageOrientationPatient'].value + PixelSpacing_v = None \ + if 'PixelSpacing' not in curr_frame\ + else curr_frame['PixelSpacing'].value + SliceThickness_v = 0.0 \ + if 'SliceThickness' not in curr_frame\ + else curr_frame['SliceThickness'].value + # SliceLocation_v = None \ + # if 'SliceLocation' not in curr_frame\ + # else curr_frame['SliceLocation'].value + Rows_v = 0 \ + if 'Rows' not in curr_frame\ + else curr_frame['Rows'].value + Columns_v = 0 \ + if 'Columns' not in curr_frame\ + else curr_frame['Columns'].value + if (ImageOrientationPatient_v is not None and + ImagePositionPatient_v is not None and + PixelSpacing_v is not None): + row = array(ImageOrientationPatient_v[0:3]) + col = array(ImageOrientationPatient_v[3:]) + voxel_spaceing = array([PixelSpacing_v[0], + PixelSpacing_v[1], + SliceThickness_v]) + tpl = array(ImagePositionPatient_v) + dim = (Rows_v, Columns_v, 1) + self._slices.append(GeometryOfSlice(row, col, + tpl, voxel_spaceing, dim)) + else: + print("Error in geometri ...") + self._slices = [] # clear the slices + break + + def _are_all_slices_parallel(self) -> bool: + slice_count = len(self._slices) + if slice_count >= 2: + last_slice = self._slices[0] + for i in range(1, slice_count): + curr_slice = self._slices[i] + if not GeometryOfSlice.AreParallel( + curr_slice, last_slice, self._tolerance): + return False + last_slice = curr_slice + return True + elif slice_count == 1: + return True + else: + return False + + def _add_stack_info(self) -> None: + self._build_slices_geometry() + round_digits = int(ceil(-log10(self._tolerance))) + if self._are_all_slices_parallel(): + self._slice_location_map = {} + for idx, s in enumerate(self._slices): + dist = round(s.GetDistanceAlongOrigin(), round_digits) + if dist in self._slice_location_map: + self._slice_location_map[dist].append(idx) + else: + self._slice_location_map[dist] = [idx] + distance_index = 1 + frame_content_tg = tag_for_keyword("FrameContentSequence") + for loc, idxs in sorted(self._slice_location_map.items()): + if len(idxs) != 1: + print('Error') + for frame_index in idxs: + frame = self._get_perframe_item(frame_index) + new_item = frame[frame_content_tg].value[0] + new_item["StackID"] = self._get_or_create_attribute( + self.SingleFrameSet[0], + "StackID", "0") + new_item["InStackPositionNumber"] =\ + self._get_or_create_attribute( + self.SingleFrameSet[0], + "InStackPositionNumber", distance_index) + distance_index += 1 def _contains_right_attributes(self, tags: dict) -> bool: AcquisitionDateTime_tg = tag_for_keyword('AcquisitionDateTime') @@ -1737,26 +1910,37 @@ def _contains_right_attributes(self, tags: dict) -> bool: def _add_module_to_functional_group( self, src_fg: Dataset, dest_fg: Dataset) -> None: item = Dataset() - item['AcquisitionNumber'] = self._get_or_create_attribute( - src_fg, 'AcquisitionNumber', 0) + fan_tg = tag_for_keyword('FrameAcquisitionNumber') + an_tg = tag_for_keyword('AcquisitionNumber') + if an_tg in src_fg: + fan_val = src_fg[an_tg].value + else: + fan_val = 0 + item[fan_tg] = DataElement(fan_tg, dictionary_VR(fan_tg), fan_val) + self._mark_tag_as_used(an_tg) + # ---------------------------------------------------------------- AcquisitionDateTime_a = self._get_or_create_attribute( src_fg, 'AcquisitionDateTime', self.EarliestDateTime) + # chnage the keyword to FrameAcquisitionDateTime: + FrameAcquisitionDateTime_a = DataElement( + tag_for_keyword('FrameAcquisitionDateTime'), + 'DT', AcquisitionDateTime_a.value) AcquisitionDateTime_is_perframe = self._contains_right_attributes( self._PerFrameTags) - if AcquisitionDateTime_a.value == self.EarliestDateTime: + if FrameAcquisitionDateTime_a.value == self.EarliestDateTime: AcquisitionDate_a = self._get_or_create_attribute( src_fg, 'AcquisitionDate', self.EarliestDate) AcquisitionTime_a = self._get_or_create_attribute( src_fg, 'AcquisitionTime', self.EarliestTime) d = AcquisitionDate_a.value t = AcquisitionTime_a.value - AcquisitionDateTime_a.value = (DT(d.strftime('%Y%m%d') + - t.strftime('%H%M%S'))) - if AcquisitionDateTime_a.value > self.EarliestDateTime: - if (AcquisitionDateTime_a.value < + FrameAcquisitionDateTime_a.value = (DT(d.strftime('%Y%m%d') + + t.strftime('%H%M%S'))) + if FrameAcquisitionDateTime_a.value > self.EarliestDateTime: + if (FrameAcquisitionDateTime_a.value < self.EarliestFrameAcquisitionDateTime): self.EarliestFrameAcquisitionDateTime =\ - AcquisitionDateTime_a.value + FrameAcquisitionDateTime_a.value if not AcquisitionDateTime_is_perframe: if ('TriggerTime' in src_fg and 'FrameReferenceDateTime' not in src_fg): @@ -1768,12 +1952,12 @@ def _add_module_to_functional_group( # this is so rediculous. I'm not able to cnvert # the DT to datetime (cast to superclass) d_t = datetime.combine( - AcquisitionDateTime_a.value.date(), - AcquisitionDateTime_a.value.time()) + FrameAcquisitionDateTime_a.value.date(), + FrameAcquisitionDateTime_a.value.time()) d_t = d_t + t_delta - AcquisitionDateTime_a.value =\ + FrameAcquisitionDateTime_a.value =\ DT(d_t.strftime('%Y%m%d%H%M%S')) - item['AcquisitionDateTime'] = AcquisitionDateTime_a + item['FrameAcquisitionDateTime'] = FrameAcquisitionDateTime_a # --------------------------------- self._copy_attrib_if_present( src_fg, item, "AcquisitionDuration", @@ -1795,7 +1979,7 @@ def _add_module_to_functional_group( dest_fg[seq_tg] = DataElement(seq_tg, dictionary_VR(seq_tg), [item]) # Also we want to add the earliest frame acq date time to the multiframe: - def AddModule(self) -> None: + def _add_acquisition_info(self) -> None: for i in range(0, len(self.SingleFrameSet)): item = self._get_perframe_item(i) self._add_module_to_functional_group( @@ -1806,6 +1990,10 @@ def AddModule(self) -> None: tag_for_keyword(kw), 'DT', self.EarliestFrameAcquisitionDateTime) + def AddModule(self) -> None: + self._add_acquisition_info() + self._add_stack_info() + class PixelData(Abstract_MultiframeModuleAdder): @@ -1834,11 +2022,30 @@ def _is_other_word_vr(self, vr: str) -> bool: # ImagePositionPatient_tg = tag_for_keyword('ImagePositionPatient') # return ImagePositionPatient_tg in tags + def _copy_data(self, src: bytearray, word_data: bool = False) -> None: + # Make sure that the length complies by row and col + if word_data: + des = self._word_data + ByteCount = 2 * self._number_of_pixels_per_frame + else: + des = self._byte_data + ByteCount = self._number_of_pixels_per_frame + if len(src) != ByteCount: + tmp: bytearray = bytearray(ByteCount) + tmp[:len(src)] = src[:] + src = tmp + des.extend(src) + def AddModule(self) -> None: kw = 'NumberOfFrames' tg = tag_for_keyword(kw) - FrameCount = len(self.SingleFrameSet) - self.TargetDataset[kw] = DataElement(tg, dictionary_VR(tg), FrameCount) + self._frame_count = len(self.SingleFrameSet) + self.TargetDataset[kw] =\ + DataElement(tg, dictionary_VR(tg), self._frame_count) + row = self.SingleFrameSet[0]["Rows"].value + col = self.SingleFrameSet[0]["Columns"].value + self._number_of_pixels_per_frame = row * col + self._number_of_pixels = row * col * self._frame_count kw = "PixelData" for i in range(0, len(self.SingleFrameSet)): PixelData_a = self.SingleFrameSet[i][kw] @@ -1847,13 +2054,13 @@ def AddModule(self) -> None: raise TypeError( 'Cannot mix OB and OW Pixel Data ' 'VR from different frames') - self._byte_data.extend(PixelData_a.value) + self._copy_data(PixelData_a.value, False) elif self._is_other_word_vr(PixelData_a.VR): if len(self._byte_data) != 0: raise TypeError( 'Cannot mix OB and OW Pixel Data ' 'VR from different frames') - self._word_data.extend(PixelData_a.value) + self._copy_data(PixelData_a.value, True) else: raise TypeError( 'Cannot mix OB and OW Pixel Data VR from different frames') @@ -1886,18 +2093,16 @@ def __init__(self, sf_datasets: Sequence[Dataset], def AddModule(self) -> None: for i in range(0, len(self.SingleFrameSet)): src = self.SingleFrameSet[i] - ContentDate_a = self._get_or_create_attribute( - src, 'ContentDate', self.EarliestDate) - ContentTime_a = self._get_or_create_attribute( - src, 'ContentTime', self.EarliestTime) - d = ContentDate_a.value - t = ContentTime_a.value - value = DT(d.strftime('%Y%m%d') + t.strftime('%H%M%S')) + kw = 'ContentDate' + d = DA(self.FarthestFutureDate if kw not in src else src[kw].value) + kw = 'ContentTime' + t = TM(self.FarthestFutureTime if kw not in src else src[kw].value) + value = DT(d.strftime('%Y%m%d') + t.strftime('%H%M%S.%f')) if self.EarliestContentDateTime > value: self.EarliestContentDateTime = value if self.EarliestContentDateTime < self.FarthestFutureDateTime: n_d = DA(self.EarliestContentDateTime.date().strftime('%Y%m%d')) - n_t = TM(self.EarliestContentDateTime.time().strftime('%H%M%S')) + n_t = TM(self.EarliestContentDateTime.time().strftime('%H%M%S.%f')) kw = 'ContentDate' self.TargetDataset[kw] = DataElement( tag_for_keyword(kw), 'DA', n_d) @@ -1950,9 +2155,10 @@ def __init__(self, sf_datasets: Sequence[Dataset], shared_tags, multi_frame_output) - def _add_data_element_to_target(self, kw: str, value: Any) -> None: + def _add_data_element_to_target(self, target: Dataset, + kw: str, value: Any) -> None: tg = tag_for_keyword(kw) - self.TargetDataset[kw] = DataElement(tg, dictionary_VR(tg), value) + target[kw] = DataElement(tg, dictionary_VR(tg), value) def AddModule(self) -> None: CodeValue_tg = tag_for_keyword('CodeValue') @@ -1974,129 +2180,29 @@ def AddModule(self) -> None: PurposeOfReferenceCode_seq = DataElement( tag_for_keyword('PurposeOfReferenceCodeSequence'), 'SQ', [PurposeOfReferenceCode_item]) - self.TargetDataset[ + item: Dataset = Dataset() + item[ 'PurposeOfReferenceCodeSequence'] = PurposeOfReferenceCode_seq - self._add_data_element_to_target("Manufacturer", 'HighDicom') - self._add_data_element_to_target("InstitutionName", 'HighDicom') + self._add_data_element_to_target(item, "Manufacturer", 'HighDicom') + self._add_data_element_to_target(item, "InstitutionName", 'HighDicom') self._add_data_element_to_target( + item, "InstitutionalDepartmentName", 'Software Development') self._add_data_element_to_target( + item, "InstitutionAddress", 'Radialogy Department, B&W Hospital, Boston, MA') self._add_data_element_to_target( + item, "SoftwareVersions", '1.4') # get sw version self._add_data_element_to_target( + item, "ContributionDescription", 'Legacy Enhanced Image created from Classic Images') - - -class StackInformation(Abstract_MultiframeModuleAdder): - - def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: dict, - excluded_from_functional_tags: dict, - perframe_tags: dict, - shared_tags: dict, - multi_frame_output: Dataset): - super().__init__( - sf_datasets, - excluded_from_perframe_tags, - excluded_from_functional_tags, - perframe_tags, - shared_tags, - multi_frame_output) - self._slices: list = [] - self._tolerance = 0.0001 - self._slice_location_map: dict = {} - - def _build_slices_geometry(self) -> None: - frame_count = len(self.SingleFrameSet) - for i in range(0, frame_count): - curr_frame = self.SingleFrameSet[i] - ImagePositionPatient_v = None \ - if 'ImagePositionPatient' not in curr_frame\ - else curr_frame['ImagePositionPatient'].value - ImageOrientationPatient_v = None \ - if 'ImageOrientationPatient' not in curr_frame\ - else curr_frame['ImageOrientationPatient'].value - PixelSpacing_v = None \ - if 'PixelSpacing' not in curr_frame\ - else curr_frame['PixelSpacing'].value - SliceThickness_v = 0.0 \ - if 'SliceThickness' not in curr_frame\ - else curr_frame['SliceThickness'].value - # SliceLocation_v = None \ - # if 'SliceLocation' not in curr_frame\ - # else curr_frame['SliceLocation'].value - Rows_v = 0 \ - if 'Rows' not in curr_frame\ - else curr_frame['Rows'].value - Columns_v = 0 \ - if 'Columns' not in curr_frame\ - else curr_frame['Columns'].value - if (ImageOrientationPatient_v is not None and - ImagePositionPatient_v is not None and - PixelSpacing_v is not None): - row = array(ImageOrientationPatient_v[0:3]) - col = array(ImageOrientationPatient_v[3:]) - voxel_spaceing = array([PixelSpacing_v[0], - PixelSpacing_v[1], - SliceThickness_v]) - tpl = array(ImagePositionPatient_v) - dim = (Rows_v, Columns_v, 1) - self._slices.append(GeometryOfSlice(row, col, - tpl, voxel_spaceing, dim)) - else: - print("Error in geometri ...") - self._slices = [] # clear the slices - break - - def _are_all_slices_parallel(self) -> bool: - slice_count = len(self._slices) - if slice_count >= 2: - last_slice = self._slices[0] - for i in range(1, slice_count): - curr_slice = self._slices[i] - if not GeometryOfSlice.AreParallel( - curr_slice, last_slice, self._tolerance): - return False - last_slice = curr_slice - return True - elif slice_count == 1: - return True - else: - return False - - def AddModule(self) -> None: - self._build_slices_geometry() - round_digits = int(ceil(-log10(self._tolerance))) - if self._are_all_slices_parallel(): - for idx, s in enumerate(self._slices): - dist = round(s.GetDistanceAlongOrigin(), round_digits) - if dist in self._slice_location_map: - self._slice_location_map[dist].append(idx) - else: - self._slice_location_map[dist] = [idx] - distance_index = 0 - for loc, idxs in sorted(self._slice_location_map.items()): - if len(idxs) != 1: - print('Error') - return - frame_index = idxs[0] - frame = self._get_perframe_item(frame_index) - new_item = Dataset() - new_item["StackID"] = self._get_or_create_attribute( - self.SingleFrameSet[0], - "StackID", "0") - new_item["InStackPositionNumber"] =\ - self._get_or_create_attribute( - self.SingleFrameSet[0], - "InStackPositionNumber", distance_index) - tg = tag_for_keyword("FrameContentSequence") - frame[tg] = DataElement(tg, "SQ", [new_item]) - distance_index += 1 + tg = tag_for_keyword('ContributingEquipmentSequence') + self.TargetDataset[tg] = DataElement(tg, 'SQ', [item]) class LegacyConvertedEnhanceImage(SOPClass): @@ -2163,7 +2269,7 @@ def __init__( frame_set.ExcludedFromPerFrameTags) self._PerFrameTags = self._get_tag_used_dictionary( frame_set.PerFrameTags) - self.SharedTags = self._get_tag_used_dictionary( + self._SharedTags = self._get_tag_used_dictionary( frame_set.SharedTags) self.ExcludedFromFunctionalGroupsTags = { tag_for_keyword('SpecificCharacterSet'): False} @@ -2173,7 +2279,7 @@ def __init__( new_ds = [] for item in sorted(self._legacy_datasets, key=sort_key): new_ds.append(item) - self.legacy_datasets = new_ds + self._legacy_datasets = new_ds if (_SOP_CLASS_UID_IOD_KEY_MAP[sop_class_uid] == 'legacy-converted-enhanced-ct-image'): self.AddBuildBlocksForCT() @@ -2217,141 +2323,148 @@ def AddCommonCT_PET_MR_BuildBlocks(self) -> None: self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, self._PerFrameTags, - self.SharedTags, + self._SharedTags, self), CompositeInstanceContex( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, self._PerFrameTags, - self.SharedTags, + self._SharedTags, self), EnhancedCommonImageModule( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, self._PerFrameTags, - self.SharedTags, + self._SharedTags, self), AcquisitionContextModule( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, self._PerFrameTags, - self.SharedTags, + self._SharedTags, self), FrameAnatomyFunctionalGroup( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, self._PerFrameTags, - self.SharedTags, + self._SharedTags, self), PixelMeasuresFunctionalGroup( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, self._PerFrameTags, - self.SharedTags, + self._SharedTags, self), PlaneOrientationFunctionalGroup( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, self._PerFrameTags, - self.SharedTags, + self._SharedTags, self), PlanePositionFunctionalGroup( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, self._PerFrameTags, - self.SharedTags, + self._SharedTags, self), FrameVOILUTFunctionalGroup( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, self._PerFrameTags, - self.SharedTags, + self._SharedTags, self), PixelValueTransformationFunctionalGroup( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, self._PerFrameTags, - self.SharedTags, + self._SharedTags, self), ReferencedImageFunctionalGroup( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, self._PerFrameTags, - self.SharedTags, + self._SharedTags, self), ConversionSourceFunctionalGroup( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, self._PerFrameTags, - self.SharedTags, + self._SharedTags, self), FrameContentFunctionalGroup( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, self._PerFrameTags, - self.SharedTags, + self._SharedTags, self), PixelData( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, self._PerFrameTags, - self.SharedTags, + self._SharedTags, self), ContentDateTime( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, self._PerFrameTags, - self.SharedTags, + self._SharedTags, self), InstanceCreationDateTime( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, self._PerFrameTags, - self.SharedTags, + self._SharedTags, self), ContributingEquipmentSequence( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, self._PerFrameTags, - self.SharedTags, + self._SharedTags, self), UnassignedPerFrame( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, self._PerFrameTags, - self.SharedTags, + self._SharedTags, self), UnassignedShared( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, self._PerFrameTags, - self.SharedTags, - self), - StackInformation( - self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self.SharedTags, - self) + self._SharedTags, + self) # , + # StackInformation( + # self._legacy_datasets, + # self.ExcludedFromPerFrameTags, + # self.ExcludedFromFunctionalGroupsTags, + # self._PerFrameTags, + # self._SharedTags, + # self), + # EmptyType2Attributes( + # self._legacy_datasets, + # self.ExcludedFromPerFrameTags, + # self.ExcludedFromFunctionalGroupsTags, + # self._PerFrameTags, + # self._SharedTags, + # self) ] for b in Blocks: self.AddNewBuildBlock(b) @@ -2363,7 +2476,7 @@ def AddCTSpecificBuildBlocks(self) -> None: self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, self._PerFrameTags, - self.SharedTags, + self._SharedTags, self, 'CT'), EnhancedCTImageModule( @@ -2371,14 +2484,14 @@ def AddCTSpecificBuildBlocks(self) -> None: self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, self._PerFrameTags, - self.SharedTags, + self._SharedTags, self), ContrastBolusModule( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, self._PerFrameTags, - self.SharedTags, + self._SharedTags, self) ] for b in Blocks: @@ -2391,7 +2504,7 @@ def AddMRSpecificBuildBlocks(self) -> None: self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, self._PerFrameTags, - self.SharedTags, + self._SharedTags, self, 'MR'), EnhancedMRImageModule( @@ -2399,14 +2512,14 @@ def AddMRSpecificBuildBlocks(self) -> None: self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, self._PerFrameTags, - self.SharedTags, + self._SharedTags, self), ContrastBolusModule( self._legacy_datasets, self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, self._PerFrameTags, - self.SharedTags, + self._SharedTags, self) ] for b in Blocks: @@ -2419,7 +2532,7 @@ def AddPETSpecificBuildBlocks(self) -> None: self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, self._PerFrameTags, - self.SharedTags, + self._SharedTags, self, 'PET'), EnhancedPETImageModule( @@ -2427,7 +2540,7 @@ def AddPETSpecificBuildBlocks(self) -> None: self.ExcludedFromPerFrameTags, self.ExcludedFromFunctionalGroupsTags, self._PerFrameTags, - self.SharedTags, + self._SharedTags, self) ] for b in Blocks: @@ -2467,7 +2580,9 @@ def __init__(self, self.Dim = dimensions def GetNormalVector(self) -> ndarray: - return cross(self.RowVector, self.ColVector) + n: ndarray = cross(self.RowVector, self.ColVector) + n[2] = -n[2] + return n def GetDistanceAlongOrigin(self) -> float: n = self.GetNormalVector() @@ -2522,6 +2637,8 @@ def is_equal_float(x1: float, x2: float) -> bool: else: v11 = v1 v22 = v2 + if len(v11) != len(v22): + return False for xx, yy in zip(v11, v22): if type(xx) == DSfloat or type(xx) == float: if not is_equal_float(xx, yy): @@ -2581,6 +2698,15 @@ def PerFrameTags(self) -> List[Tag]: def SharedTags(self) -> List[Tag]: return self._SharedTags[:] + def GetSOPInstanceUIDList(self) -> list: + OutputList: list = [] + for f in self._Frames: + OutputList.append(f.SOPInstanceUID) + return OutputList + + def GetSOPClassUID(self) -> UID: + return self._Frames[0].SOPClassUID + def _find_per_frame_and_shared_tags(self) -> None: rough_shared: dict = {} sfs = self.Frames @@ -2593,7 +2719,8 @@ def _find_per_frame_and_shared_tags(self) -> None: self._istag_excluded_from_perframe(ttag) and ttag != tag_for_keyword('PixelData')): elem = ds[ttag] - self._PerFrameTags.append(ttag) + if ttag not in self._PerFrameTags: + self._PerFrameTags.append(ttag) if ttag in rough_shared: rough_shared[ttag].append(elem.value) else: From 505d272e7436771229535e2461e641b0b70110df Mon Sep 17 00:00:00 2001 From: AfshinMessiah Date: Thu, 27 Aug 2020 17:58:06 +0000 Subject: [PATCH 12/44] Debugged for exception on string conversion to DA, DT, TM --- src/highdicom/legacy/sop.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/src/highdicom/legacy/sop.py b/src/highdicom/legacy/sop.py index 4f22ba3b..5a3dd9cb 100644 --- a/src/highdicom/legacy/sop.py +++ b/src/highdicom/legacy/sop.py @@ -646,11 +646,21 @@ def _get_or_create_attribute( a = DataElement(tg, dictionary_VR(tg), default) from pydicom.valuerep import DT, TM, DA if a.VR == 'DA' and type(a.value) == str: - a.value = DA(a.value) + try: + a.value = DA(a.value) + except:git + a.value = DA(default) if a.VR == 'DT' and type(a.value) == str: - a.value = DT(a.value) + try: + a.value = DT(a.value) + except: + a.value = DT(default) if a.VR == 'TM' and type(a.value) == str: - a.value = TM(a.value) + try: + a.value = TM(a.value) + except: + a.value = TM(default) + self._mark_tag_as_used(tg) return a From cd9e8c578c5bc4f8b56394459941a3cea2a2a847 Mon Sep 17 00:00:00 2001 From: AfshinMessiah Date: Thu, 27 Aug 2020 18:03:22 +0000 Subject: [PATCH 13/44] mend --- src/highdicom/legacy/sop.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/highdicom/legacy/sop.py b/src/highdicom/legacy/sop.py index 5a3dd9cb..77b31d56 100644 --- a/src/highdicom/legacy/sop.py +++ b/src/highdicom/legacy/sop.py @@ -648,7 +648,7 @@ def _get_or_create_attribute( if a.VR == 'DA' and type(a.value) == str: try: a.value = DA(a.value) - except:git + except: a.value = DA(default) if a.VR == 'DT' and type(a.value) == str: try: From c3f27d6487e81471c312eb12771a419a3065cc21 Mon Sep 17 00:00:00 2001 From: AfshinMessiah Date: Tue, 1 Sep 2020 17:34:03 +0000 Subject: [PATCH 14/44] mend --- src/highdicom/legacy/sop.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/highdicom/legacy/sop.py b/src/highdicom/legacy/sop.py index 77b31d56..6c726bf2 100644 --- a/src/highdicom/legacy/sop.py +++ b/src/highdicom/legacy/sop.py @@ -1944,8 +1944,9 @@ def _add_module_to_functional_group( src_fg, 'AcquisitionTime', self.EarliestTime) d = AcquisitionDate_a.value t = AcquisitionTime_a.value - FrameAcquisitionDateTime_a.value = (DT(d.strftime('%Y%m%d') + - t.strftime('%H%M%S'))) + # FrameAcquisitionDateTime_a.value = (DT(d.strftime('%Y%m%d') + + # t.strftime('%H%M%S'))) + FrameAcquisitionDateTime_a.value = DT(str(d) + str(t)) if FrameAcquisitionDateTime_a.value > self.EarliestDateTime: if (FrameAcquisitionDateTime_a.value < self.EarliestFrameAcquisitionDateTime): From 17d39e69c7f453b5fceba835d4e8fedacb9aa7da Mon Sep 17 00:00:00 2001 From: Afshin Date: Mon, 7 Sep 2020 20:57:18 -0400 Subject: [PATCH 15/44] Fixed issues related to Marcus minor comments --- src/highdicom/legacy/sop.py | 1417 +++++++++++------------------------ 1 file changed, 458 insertions(+), 959 deletions(-) diff --git a/src/highdicom/legacy/sop.py b/src/highdicom/legacy/sop.py index 6c726bf2..55f1170d 100644 --- a/src/highdicom/legacy/sop.py +++ b/src/highdicom/legacy/sop.py @@ -4,20 +4,24 @@ from collections import defaultdict from typing import Any, Dict, List, Optional, Sequence, Union, Callable from numpy import log10, array, ceil, cross, dot, ndarray -from pydicom.datadict import tag_for_keyword, dictionary_VR +from pydicom.datadict import tag_for_keyword, dictionary_VR, keyword_for_tag from pydicom.dataset import Dataset from pydicom.tag import Tag from pydicom.dataelem import DataElement from pydicom.sequence import Sequence as DicomSequence from pydicom.multival import MultiValue from datetime import date, datetime, time, timedelta -from pydicom.valuerep import DT, DA, TM +from pydicom.valuerep import DT, DA, TM, DSfloat from copy import deepcopy from pydicom.uid import UID from highdicom.base import SOPClass +from sys import float_info from highdicom.legacy import SOP_CLASS_UIDS from highdicom._iods import IOD_MODULE_MAP from highdicom._modules import MODULE_ATTRIBUTE_MAP +from abc import ABC, abstractmethod + + logger = logging.getLogger(__name__) LEGACY_ENHANCED_SOP_CLASS_UID_MAP = { # CT Image Storage @@ -34,510 +38,6 @@ } -def _convert_legacy_to_enhanced( - sf_datasets: Sequence[Dataset], - mf_dataset: Optional[Dataset] = None -) -> Dataset: - """Converts one or more MR, CT or PET Image instances into one - Legacy Converted Enhanced MR/CT/PET Image instance by copying information - from `sf_datasets` into `mf_dataset`. - Parameters - ---------- - sf_datasets: Sequence[pydicom.dataset.Dataset] - DICOM data sets of single-frame legacy image instances - mf_dataset: pydicom.dataset.Dataset, optional - DICOM data set of multi-frame enhanced image instance - Returns - ------- - pydicom.dataset.Dataset - DICOM data set of enhanced multi-frame image instance - Note - ---- - Frames will be included into the Pixel Data element in the order in - which instances are provided via `sf_datasets`. - """ - try: - ref_ds = sf_datasets[0] - except IndexError: - raise ValueError('No data sets of single-frame legacy images provided.') - if mf_dataset is None: - mf_dataset = Dataset() - transfer_syntaxes = set() - series = set() - studies = set() - modalities = set() - for ds in sf_datasets: - transfer_syntaxes.add(ds.file_meta.TransferSyntaxUID) - series.add(ds.SeriesInstanceUID) - studies.add(ds.StudyInstanceUID) - modalities.add(ds.Modality) - if len(series) > 1: - raise ValueError( - 'All instances must belong to the same series.') - if len(studies) > 1: - raise ValueError( - 'All instances must belong to the same study.') - if len(modalities) > 1: - raise ValueError( - 'All instances must have the same modality.') - if len(transfer_syntaxes) > 1: - raise ValueError( - 'All instances must have the same transfer syntaxes.') - sop_class_uid = LEGACY_ENHANCED_SOP_CLASS_UID_MAP[ref_ds.SOPClassUID] - mf_dataset.NumberOfFrames = len(sf_datasets) - # We will ignore some attributes, because they will get assigned new - # values in the legacy converted enhanced image instance. - ignored_attributes = { - tag_for_keyword('NumberOfFrames'), - tag_for_keyword('InstanceNumber'), - tag_for_keyword('SOPClassUID'), - tag_for_keyword('SOPInstanceUID'), - tag_for_keyword('PixelData'), - tag_for_keyword('SeriesInstanceUID'), - } - mf_attributes = [] - iod_key = _SOP_CLASS_UID_IOD_KEY_MAP[sop_class_uid] - for module_item in IOD_MODULE_MAP[iod_key]: - module_key = module_item['key'] - for attr_item in MODULE_ATTRIBUTE_MAP[module_key]: - # Only root-level attributes - if len(attr_item['path']) > 0: - continue - tag = tag_for_keyword(attr_item['keyword']) - if tag in ignored_attributes: - continue - mf_attributes.append(tag) - # Assign attributes that are not defined at the root level of the - # Lecacy Converted Enhanced MR/CT/PET Image IOD to the appropriate - # sequence attributes of the SharedFunctinoalGroupsSequence or - # PerFrameFunctionalGroupsSequence attributes. Collect all unassigned - # attributes (we will deal with them later on). - # IODs only cover the modules, but not functional group macros. - # Therefore, we need to handle those separately. - assigned_attributes = { - # shared - tag_for_keyword('ImageOrientationPatient'), - tag_for_keyword('PixelSpacing'), - tag_for_keyword('SliceThickness'), - tag_for_keyword('SpacingBetweenSlices'), - # per-frame - tag_for_keyword('ImageType'), - tag_for_keyword('AcquisitionDate'), - tag_for_keyword('AcquisitionTime'), - tag_for_keyword('InstanceNumber'), - tag_for_keyword('SOPClassUID'), - tag_for_keyword('SOPInstanceUID'), - tag_for_keyword('ImagePositionPatient'), - tag_for_keyword('WindowCenter'), - tag_for_keyword('WindowWidth'), - tag_for_keyword('ReferencedImageSequence'), - tag_for_keyword('SourceImageSequence'), - tag_for_keyword('BodyPartExamined'), - tag_for_keyword('IrradiationEventUID'), - tag_for_keyword('RescaleIntercept'), - tag_for_keyword('RescaleSlope'), - tag_for_keyword('RescaleType'), - } - if ref_ds.ImageType[0] == 'ORIGINAL': - mf_dataset.VolumeBasedCalculationTechnique = 'NONE' - else: - mf_dataset.VolumeBasedCalculationTechnique = 'MIXED' - pixel_representation = sf_datasets[0].PixelRepresentation - volumetric_properties = 'VOLUME' - unique_image_types = set() - unassigned_dataelements: Dict[str, List[Dataset]] = defaultdict(list) - # Per-Frame Functional Groups - perframe_items = [] - for i, ds in enumerate(sf_datasets): - perframe_item = Dataset() - # Frame Content (M) - frame_content_item = Dataset() - if 'AcquisitionDate' in ds and 'AcquisitionTime' in ds: - frame_content_item.FrameAcquisitionDateTime = '{}{}'.format( - ds.AcquisitionDate, - ds.AcquisitionTime) - frame_content_item.FrameAcquisitionNumber = ds.InstanceNumber - perframe_item.FrameContentSequence = [ - frame_content_item, - ] - # Plane Position (Patient) (M) - plane_position_item = Dataset() - plane_position_item.ImagePositionPatient = ds.ImagePositionPatient - perframe_item.PlanePositionSequence = [ - plane_position_item, - ] - frame_type = list(ds.ImageType) - if len(frame_type) < 4: - if frame_type[0] == 'ORIGINAL': - frame_type.append('NONE') - else: - logger.warn('unknown derived pixel contrast') - frame_type.append('OTHER') - unique_image_types.add(tuple(frame_type)) - frame_type_item = Dataset() - frame_type_item.FrameType = frame_type - frame_type_item.PixelRepresentation = pixel_representation - frame_type_item.VolumetricProperties = volumetric_properties - if frame_type[0] == 'ORIGINAL': - frame_type_item.FrameVolumeBasedCalculationTechnique = 'NONE' - else: - frame_type_item.FrameVolumeBasedCalculationTechnique = 'MIXED' - if sop_class_uid == '1.2.840.10008.5.1.4.1.1.4.4': - # MR Image Frame Type (M) - perframe_item.MRImageFrameTypeSequence = [ - frame_type_item, - ] - elif sop_class_uid == '1.2.840.10008.5.1.4.1.1.2.2': - # CT Image Frame Type (M) - perframe_item.CTImageFrameTypeSequence = [ - frame_type_item, - ] - # CT Pixel Value Transformation (M) - pixel_val_transform_item = Dataset() - pixel_val_transform_item.RescaleIntercept = ds.RescaleIntercept - pixel_val_transform_item.RescaleSlope = ds.RescaleSlope - try: - pixel_val_transform_item.RescaleType = ds.RescaleType - except AttributeError: - pixel_val_transform_item.RescaleType = 'US' - perframe_item.PixelValueTransformationSequence = [ - pixel_val_transform_item, - ] - elif sop_class_uid == '1.2.840.10008.5.1.4.1.1.128.1': - # PET Image Frame Type (M) - perframe_item.PETImageFrameTypeSequence = [ - frame_type_item, - ] - # Frame VOI LUT (U) - try: - frame_voi_lut_item = Dataset() - frame_voi_lut_item.WindowCenter = ds.WindowCenter - frame_voi_lut_item.WindowWidth = ds.WindowWidth - perframe_item.FrameVOILUTSequence = [ - frame_voi_lut_item, - ] - except AttributeError: - pass - # Referenced Image (C) - try: - perframe_item.ReferencedImageSequence = \ - ds.ReferencedImageSequence - except AttributeError: - pass - # Derivation Image (C) - try: - perframe_item.SourceImageSequence = ds.SourceImageSequence - except AttributeError: - pass - # Frame Anatomy (C) - try: - frame_anatomy_item = Dataset() - frame_anatomy_item.BodyPartExamined = ds.BodyPartExamined - perframe_item.FrameAnatomySequence = [ - frame_anatomy_item, - ] - except AttributeError: - pass - # Image Frame Conversion Source (C) - conv_src_attr_item = Dataset() - conv_src_attr_item.ReferencedSOPClassUID = ds.SOPClassUID - conv_src_attr_item.ReferencedSOPInstanceUID = ds.SOPInstanceUID - perframe_item.ConversionSourceAttributesSequence = [ - conv_src_attr_item, - ] - # Irradiation Event Identification (C) - CT/PET only - try: - irradiation_event_id_item = Dataset() - irradiation_event_id_item.IrradiationEventUID = \ - ref_ds.IrradiationEventUID - perframe_item.IrradiationEventIdentificationSequence = [ - irradiation_event_id_item, - ] - except AttributeError: - pass - # Temporal Position (U) - try: - temporal_position_item = Dataset() - temporal_position_item.TemporalPositionTimeOffset = \ - ref_ds.TemporalPositionTimeOffset - perframe_item.TemporalPositionSequence = [ - temporal_position_item, - ] - except AttributeError: - pass - # Cardiac Synchronization (U # TODO: http: # dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.7.6.16.2.html# sect_C.7.6.16.2.7 # noqa - # Contrast/Bolus Usage (U) - MR/CT onl # TODO: http: # dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.7.6.16.2.html# sect_C.7.6.16.2.12 # noqa - # Respiratory Synchronization (U # TODO: http: # dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.7.6.16.2.html# sect_C.7.6.16.2.17 # noqa - # Real World Value Mapping (U) - PET onl # TODO: http: # dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.7.6.16.2.html# sect_C.7.6.16.2.11 # noqa - perframe_items.append(perframe_item) - # All other attributes that are not assigned to functional groups. - for tag, da in ds.items(): - if tag in assigned_attributes: - continue - elif tag in mf_attributes: - mf_dataset.add(da) - else: - if tag not in ignored_attributes: - unassigned_dataelements[tag].append(da) - # All remaining unassigned attributes will be collected in either the - # UnassignedSharedConvertedAttributesSequence or the - # UnassignedPerFrameConvertedAttributesSequence, depending on whether - # values vary accross frames (original single-frame image instances). - unassigned_shared_ca_item = Dataset() - unassigned_perframe_ca_items = [ - Dataset() - for _ in range(len(sf_datasets)) - ] - for tag, dataelements in unassigned_dataelements.items(): - values = [str(da.value) for da in dataelements] - unique_values = set(values) - if len(unique_values) == 1: - unassigned_shared_ca_item.add(dataelements[0]) - else: - for i, da in enumerate(dataelements): - unassigned_perframe_ca_items[i].add(da) - mf_dataset.ImageType = list(list(unique_image_types)[0]) - if len(unique_image_types) > 1: - mf_dataset.ImageType[2] = 'MIXED' - mf_dataset.PixelRepresentation = pixel_representation - mf_dataset.VolumetricProperties = volumetric_properties - # Shared Functional Groups - shared_item = Dataset() - # Pixel Measures (M) - pixel_measures_item = Dataset() - pixel_measures_item.PixelSpacing = ref_ds.PixelSpacing - pixel_measures_item.SliceThickness = ref_ds.SliceThickness - try: - pixel_measures_item.SpacingBetweenSlices = \ - ref_ds.SpacingBetweenSlices - except AttributeError: - pass - shared_item.PixelMeasuresSequence = [ - pixel_measures_item, - ] - # Plane Orientation (Patient) (M) - plane_orientation_item = Dataset() - plane_orientation_item.ImageOrientationPatient = \ - ref_ds.ImageOrientationPatient - shared_item.PlaneOrientationSequence = [ - plane_orientation_item, - ] - shared_item.UnassignedSharedConvertedAttributesSequence = [ - unassigned_shared_ca_item, - ] - mf_dataset.SharedFunctionalGroupsSequence = [ - shared_item, - ] - for i, ca_item in enumerate(unassigned_perframe_ca_items): - perframe_items[i].UnassignedPerFrameConvertedAttributesSequence = [ - ca_item, - ] - mf_dataset.PerFrameFunctionalGroupsSequence = perframe_items - mf_dataset.AcquisitionContextSequence = [] - # TODO: Encapsulated Pixel Data with compressed frame items. - # Create the Pixel Data element of the mulit-frame image instance using - # native encoding (simply concatenating pixels of individual frames) - # Sometimes there may be numpy types such as " > i2". The (* 1) hack - # ensures that pixel values have the correct integer type. - mf_dataset.PixelData = b''.join([ - (ds.pixel_array * 1).data for ds in sf_datasets - ]) - return mf_dataset - - -class LegacyConvertedEnhancedMRImage(SOPClass): - """SOP class for Legacy Converted Enhanced MR Image instances.""" - - def __init__( - self, - legacy_datasets: Sequence[Dataset], - series_instance_uid: str, - series_number: int, - sop_instance_uid: str, - instance_number: int, - **kwargs: Any) -> None: - """ - Parameters - ---------- - legacy_datasets: Sequence[pydicom.dataset.Dataset] - DICOM data sets of legacy single-frame image instances that should - be converted - series_instance_uid: str - UID of the series - series_number: Union[int, None] - Number of the series within the study - sop_instance_uid: str - UID that should be assigned to the instance - instance_number: int - Number that should be assigned to the instance - **kwargs: Any, optional - Additional keyword arguments that will be passed to the constructor - of `highdicom.base.SOPClass` - """ - try: - ref_ds = legacy_datasets[0] - except IndexError: - raise ValueError('No DICOM data sets of provided.') - if ref_ds.Modality != 'MR': - raise ValueError( - 'Wrong modality for conversion of legacy MR images.') - if ref_ds.SOPClassUID != '1.2.840.10008.5.1.4.1.1.4': - raise ValueError( - 'Wrong SOP class for conversion of legacy MR images.') - sop_class_uid = LEGACY_ENHANCED_SOP_CLASS_UID_MAP[ref_ds.SOPClassUID] - super().__init__( - study_instance_uid=ref_ds.StudyInstanceUID, - series_instance_uid=series_instance_uid, - series_number=series_number, - sop_instance_uid=sop_instance_uid, - sop_class_uid=sop_class_uid, - instance_number=instance_number, - manufacturer=ref_ds.Manufacturer, - modality=ref_ds.Modality, - transfer_syntax_uid=None, # FIXME: frame encoding - patient_id=ref_ds.PatientID, - patient_name=ref_ds.PatientName, - patient_birth_date=ref_ds.PatientBirthDate, - patient_sex=ref_ds.PatientSex, - accession_number=ref_ds.AccessionNumber, - study_id=ref_ds.StudyID, - study_date=ref_ds.StudyDate, - study_time=ref_ds.StudyTime, - referring_physician_name=ref_ds.ReferringPhysicianName, - **kwargs) - _convert_legacy_to_enhanced(legacy_datasets, self) - self.PresentationLUTShape = 'IDENTITY' - - -class LegacyConvertedEnhancedCTImage(SOPClass): - """SOP class for Legacy Converted Enhanced CT Image instances.""" - - def __init__( - self, - legacy_datasets: Sequence[Dataset], - series_instance_uid: str, - series_number: int, - sop_instance_uid: str, - instance_number: int, - **kwargs: Any) -> None: - """ - Parameters - ---------- - legacy_datasets: Sequence[pydicom.dataset.Dataset] - DICOM data sets of legacy single-frame image instances that should - be converted - series_instance_uid: str - UID of the series - series_number: Union[int, None] - Number of the series within the study - sop_instance_uid: str - UID that should be assigned to the instance - instance_number: int - Number that should be assigned to the instance - **kwargs: Any, optional - Additional keyword arguments that will be passed to the constructor - of `highdicom.base.SOPClass` - """ - try: - ref_ds = legacy_datasets[0] - except IndexError: - raise ValueError('No DICOM data sets of provided.') - if ref_ds.Modality != 'CT': - raise ValueError( - 'Wrong modality for conversion of legacy CT images.') - if ref_ds.SOPClassUID != '1.2.840.10008.5.1.4.1.1.2': - raise ValueError( - 'Wrong SOP class for conversion of legacy CT images.') - sop_class_uid = LEGACY_ENHANCED_SOP_CLASS_UID_MAP[ref_ds.SOPClassUID] - super().__init__( - study_instance_uid=ref_ds.StudyInstanceUID, - series_instance_uid=series_instance_uid, - series_number=series_number, - sop_instance_uid=sop_instance_uid, - sop_class_uid=sop_class_uid, - instance_number=instance_number, - manufacturer=ref_ds.Manufacturer, - modality=ref_ds.Modality, - transfer_syntax_uid=None, # FIXME: frame encoding - patient_id=ref_ds.PatientID, - patient_name=ref_ds.PatientName, - patient_birth_date=ref_ds.PatientBirthDate, - patient_sex=ref_ds.PatientSex, - accession_number=ref_ds.AccessionNumber, - study_id=ref_ds.StudyID, - study_date=ref_ds.StudyDate, - study_time=ref_ds.StudyTime, - referring_physician_name=ref_ds.ReferringPhysicianName, - **kwargs) - _convert_legacy_to_enhanced(legacy_datasets, self) - - -class LegacyConvertedEnhancedPETImage(SOPClass): - """SOP class for Legacy Converted Enhanced PET Image instances.""" - - def __init__( - self, - legacy_datasets: Sequence[Dataset], - series_instance_uid: str, - series_number: int, - sop_instance_uid: str, - instance_number: int, - **kwargs: Any) -> None: - """ - Parameters - ---------- - legacy_datasets: Sequence[pydicom.dataset.Dataset] - DICOM data sets of legacy single-frame image instances that should - be converted - series_instance_uid: str - UID of the series - series_number: Union[int, None] - Number of the series within the study - sop_instance_uid: str - UID that should be assigned to the instance - instance_number: int - Number that should be assigned to the instance - **kwargs: Any, optional - Additional keyword arguments that will be passed to the constructor - of `highdicom.base.SOPClass` - """ - try: - ref_ds = legacy_datasets[0] - except IndexError: - raise ValueError('No DICOM data sets of provided.') - if ref_ds.Modality != 'PT': - raise ValueError( - 'Wrong modality for conversion of legacy PET images.') - if ref_ds.SOPClassUID != '1.2.840.10008.5.1.4.1.1.128': - raise ValueError( - 'Wrong SOP class for conversion of legacy PET images.') - sop_class_uid = LEGACY_ENHANCED_SOP_CLASS_UID_MAP[ref_ds.SOPClassUID] - super().__init__( - study_instance_uid=ref_ds.StudyInstanceUID, - series_instance_uid=series_instance_uid, - series_number=series_number, - sop_instance_uid=sop_instance_uid, - sop_class_uid=sop_class_uid, - instance_number=instance_number, - manufacturer=ref_ds.Manufacturer, - modality=ref_ds.Modality, - transfer_syntax_uid=None, # FIXME: frame encoding - patient_id=ref_ds.PatientID, - patient_name=ref_ds.PatientName, - patient_birth_date=ref_ds.PatientBirthDate, - patient_sex=ref_ds.PatientSex, - accession_number=ref_ds.AccessionNumber, - study_id=ref_ds.StudyID, - study_date=ref_ds.StudyDate, - study_time=ref_ds.StudyTime, - referring_physician_name=ref_ds.ReferringPhysicianName, - **kwargs) - _convert_legacy_to_enhanced(legacy_datasets, self) - - -from abc import ABC, abstractmethod - - class Abstract_MultiframeModuleAdder(ABC): def __init__(self, sf_datasets: Sequence[Dataset], @@ -547,23 +47,23 @@ def __init__(self, sf_datasets: Sequence[Dataset], shared_tags: dict, multi_frame_output: Dataset): - self.ExcludedFromPerFrameTags = excluded_from_perframe_tags - self.ExcludedFromFunctionalGroupsTags = excluded_from_functional_tags - self._PerFrameTags = perframe_tags - self._SharedTags = shared_tags - self.TargetDataset = multi_frame_output - self.SingleFrameSet = sf_datasets - self.EarliestDate = DA('00010101') - self.EarliestTime = TM('000000') - self.EarliestDateTime = DT('00010101000000') - self.FarthestFutureDate = DA('99991231') - self.FarthestFutureTime = TM('235959') - self.FarthestFutureDateTime = DT('99991231235959') + self.excluded_from_per_frame_tags = excluded_from_perframe_tags + self.excluded_from_functional_group_tags = excluded_from_functional_tags + self._perframe_tags = perframe_tags + self._shared_tags = shared_tags + self.target_dataset = multi_frame_output + self.single_frame_set = sf_datasets + self.earliest_date = DA('00010101') + self.earliest_time = TM('000000') + self.earliest_date_time = DT('00010101000000') + self.farthest_future_date = DA('99991231') + self.farthest_future_time = TM('235959') + self.farthest_future_date_time = DT('99991231235959') def _is_empty_or_empty_items(self, attribute: DataElement) -> bool: if attribute.is_empty: return True - if type(attribute.value) == Sequence: + if isinstance(attribute.value, Sequence): if len(attribute.value) == 0: return True for item in attribute.value: @@ -574,25 +74,25 @@ def _is_empty_or_empty_items(self, attribute: DataElement) -> bool: return False def _mark_tag_as_used(self, tg: Tag) -> None: - if tg in self._SharedTags: - self._SharedTags[tg] = True - elif tg in self.ExcludedFromPerFrameTags: - self.ExcludedFromPerFrameTags[tg] = True - elif tg in self._PerFrameTags: - self._PerFrameTags[tg] = True + if tg in self._shared_tags: + self._shared_tags[tg] = True + elif tg in self.excluded_from_per_frame_tags: + self.excluded_from_per_frame_tags[tg] = True + elif tg in self._perframe_tags: + self._perframe_tags[tg] = True def _copy_attrib_if_present(self, src_ds: Dataset, dest_ds: Dataset, src_kw_or_tg: str, dest_kw_or_tg: str = None, check_not_to_be_perframe: bool = True, check_not_to_be_empty: bool = False) -> None: - if type(src_kw_or_tg) == str: + if isinstance(src_kw_or_tg, str): src_kw_or_tg = tag_for_keyword(src_kw_or_tg) if dest_kw_or_tg is None: dest_kw_or_tg = src_kw_or_tg - elif type(dest_kw_or_tg) == str: + elif isinstance(dest_kw_or_tg, str): dest_kw_or_tg = tag_for_keyword(dest_kw_or_tg) if check_not_to_be_perframe: - if src_kw_or_tg in self._PerFrameTags: + if src_kw_or_tg in self._perframe_tags: return if src_kw_or_tg in src_ds: elem = src_ds[src_kw_or_tg] @@ -611,28 +111,28 @@ def _copy_attrib_if_present(self, src_ds: Dataset, dest_ds: Dataset, self._mark_tag_as_used(src_kw_or_tg) def _get_perframe_item(self, index: int) -> Dataset: - if index > len(self.SingleFrameSet): + if index > len(self.single_frame_set): return None pf_kw: str = 'PerFrameFunctionalGroupsSequence' pf_tg = tag_for_keyword(pf_kw) - if pf_tg not in self.TargetDataset: + if pf_tg not in self.target_dataset: seq = [] - for i in range(0, len(self.SingleFrameSet)): + for i in range(0, len(self.single_frame_set)): seq.append(Dataset()) - self.TargetDataset[pf_tg] = DataElement(pf_tg, + self.target_dataset[pf_tg] = DataElement(pf_tg, 'SQ', DicomSequence(seq)) - return self.TargetDataset[pf_tg].value[index] + return self.target_dataset[pf_tg].value[index] def _get_shared_item(self) -> Dataset: sf_kw = 'SharedFunctionalGroupsSequence' sf_tg = tag_for_keyword(sf_kw) - if sf_kw not in self.TargetDataset: + if sf_kw not in self.target_dataset: seq = [Dataset()] - self.TargetDataset[sf_tg] = DataElement(sf_tg, + self.target_dataset[sf_tg] = DataElement(sf_tg, 'SQ', DicomSequence(seq)) - return self.TargetDataset[sf_tg].value[0] + return self.target_dataset[sf_tg].value[0] def _get_or_create_attribute( self, src: Dataset, kw: Union[str, Tag], default: Any) -> DataElement: @@ -644,18 +144,17 @@ def _get_or_create_attribute( a = deepcopy(src[kw]) else: a = DataElement(tg, dictionary_VR(tg), default) - from pydicom.valuerep import DT, TM, DA - if a.VR == 'DA' and type(a.value) == str: + if a.VR == 'DA' and isinstance(a.value, str): try: a.value = DA(a.value) except: a.value = DA(default) - if a.VR == 'DT' and type(a.value) == str: + if a.VR == 'DT' and isinstance(a.value, str): try: a.value = DT(a.value) except: a.value = DT(default) - if a.VR == 'TM' and type(a.value) == str: + if a.VR == 'TM' and isinstance(a.value, str): try: a.value = TM(a.value) except: @@ -672,21 +171,20 @@ def _add_module(self, module_name: str, excepted_attributes: list = [], # sf_sop_instance_uid] # iod_name = _SOP_CLASS_UID_IOD_KEY_MAP[mf_sop_instance_uid] # modules = IOD_MODULE_MAP[iod_name] - from copy import deepcopy attribs: list = MODULE_ATTRIBUTE_MAP[module_name] - ref_dataset = self.SingleFrameSet[0] + ref_dataset = self.single_frame_set[0] for a in attribs: kw: str = a['keyword'] if kw in excepted_attributes: continue if len(a['path']) == 0: self._copy_attrib_if_present( - ref_dataset, self.TargetDataset, kw, + ref_dataset, self.target_dataset, kw, check_not_to_be_perframe=check_not_to_be_perframe, check_not_to_be_empty=check_not_to_be_empty) @abstractmethod - def AddModule(self) -> None: + def add_module(self) -> None: pass @@ -706,7 +204,7 @@ def __init__(self, sf_datasets: Sequence[Dataset], shared_tags, multi_frame_output) - def AddModule(self) -> None: + def add_module(self) -> None: module_and_excepted_at = { "image-pixel": [ @@ -825,7 +323,7 @@ def __init__(self, sf_datasets: Sequence[Dataset], ] } - def AddModule(self) -> None: + def add_module(self) -> None: for module_name, excpeted_a in self._module_excepted_list.items(): self._add_module( module_name, @@ -850,11 +348,11 @@ def __init__(self, sf_datasets: Sequence[Dataset], perframe_tags, shared_tags, multi_frame_output) - self.Modality = modality + self.modality = modality def _get_value_for_frame_type(self, attrib: DataElement) -> Union[list, None]: - if type(attrib) != DataElement: + if not isinstance(attrib, DataElement): return None output = ['', '', '', ''] v = attrib.value @@ -867,10 +365,10 @@ def _get_value_for_frame_type(self, def _get_frame_type_seq_tag(self) -> int: seq_kw = '{}{}FrameTypeSequence' - if self.Modality == 'PET': - seq_kw = seq_kw.format(self.Modality, '') + if self.modality == 'PET': + seq_kw = seq_kw.format(self.modality, '') else: - seq_kw = seq_kw.format(self.Modality, 'Image') + seq_kw = seq_kw.format(self.modality, 'Image') return tag_for_keyword(seq_kw) def _add_module_to_functional_group(self, src_fg: Dataset, @@ -895,23 +393,23 @@ def element_generator(kw: str, val: Any) -> DataElement: dest_fg['VolumeBasedCalculationTechnique'] = element_generator( 'VolumeBasedCalculationTechnique', "NONE") - def AddModule(self) -> None: + def add_module(self) -> None: im_type_tag = tag_for_keyword('ImageType') seq_tg = self._get_frame_type_seq_tag() - if im_type_tag not in self._PerFrameTags: - self._add_module_to_functional_group(self.SingleFrameSet[0], - self.TargetDataset, 0) + if im_type_tag not in self._perframe_tags: + self._add_module_to_functional_group(self.single_frame_set[0], + self.target_dataset, 0) # ---------------------------- item = self._get_shared_item() inner_item = Dataset() - self._add_module_to_functional_group(self.SingleFrameSet[0], + self._add_module_to_functional_group(self.single_frame_set[0], inner_item, 1) item[seq_tg] = DataElement(seq_tg, 'SQ', [inner_item]) else: - for i in range(0, len(self.SingleFrameSet)): + for i in range(0, len(self.single_frame_set)): item = self._get_perframe_item(i) inner_item = Dataset() - self._add_module_to_functional_group(self.SingleFrameSet[i], + self._add_module_to_functional_group(self.single_frame_set[i], inner_item, 1) item[seq_tg] = DataElement(seq_tg, 'SQ', [inner_item]) @@ -932,13 +430,13 @@ def __init__(self, sf_datasets: Sequence[Dataset], shared_tags, multi_frame_output) - def AddModule(self) -> None: - # ct_mr = CommonCTMRImageDescriptionMacro(self.SingleFrameSet - # , self.ExcludedFromPerFrameTags - # , self._PerFrameTags - # , self._SharedTags - # , self.TargetDataset) - # ct_mr.AddModule() + def add_module(self) -> None: + # ct_mr = CommonCTMRImageDescriptionMacro(self.single_frame_set + # , self.excluded_from_per_frame_tags + # , self._perframe_tags + # , self._shared_tags + # , self.target_dataset) + # ct_mr.add_module() # Acquisition Number # Acquisition DateTime - should be able to find earliest amongst all # frames, if present (required if ORIGINAL) @@ -955,7 +453,7 @@ def AddModule(self) -> None: # Bits Allocated - handled by distinguishingAttribute copy # Bits Stored - handled by distinguishingAttribute copy # High Bit - handled by distinguishingAttribute copy - ref_dataset = self.SingleFrameSet[0] + ref_dataset = self.single_frame_set[0] attribs_to_be_added = [ 'ContentQualification', 'ImageComments', @@ -966,25 +464,25 @@ def AddModule(self) -> None: 'LossyImageCompressionMethod'] for kw in attribs_to_be_added: self._copy_attrib_if_present( - ref_dataset, self.TargetDataset, kw, + ref_dataset, self.target_dataset, kw, check_not_to_be_perframe=True, check_not_to_be_empty=False) - if tag_for_keyword('PresentationLUTShape') not in self._PerFrameTags: + if tag_for_keyword('PresentationLUTShape') not in self._perframe_tags: # actually should really invert the pixel data if MONOCHROME1, # since only MONOCHROME2 is permitted : ( # also, do not need to check if PhotometricInterpretation is # per-frame, since a distinguishing attribute phmi_kw = 'PhotometricInterpretation' - phmi_a = self._get_or_create_attribute(self.SingleFrameSet[0], + phmi_a = self._get_or_create_attribute(self.single_frame_set[0], phmi_kw, "MONOCHROME2") LUT_shape_default = "INVERTED" if phmi_a.value == 'MONOCHROME1'\ else "IDENTITY" - LUT_shape_a = self._get_or_create_attribute(self.SingleFrameSet[0], + LUT_shape_a = self._get_or_create_attribute(self.single_frame_set[0], 'PresentationLUTShape', LUT_shape_default) if not LUT_shape_a.is_empty: - self.TargetDataset['PresentationLUTShape'] = LUT_shape_a + self.target_dataset['PresentationLUTShape'] = LUT_shape_a # Icon Image Sequence - always discard these @@ -1004,7 +502,7 @@ def __init__(self, sf_datasets: Sequence[Dataset], shared_tags, multi_frame_output) - def AddModule(self) -> None: + def add_module(self) -> None: self._add_module('contrast-bolus') @@ -1024,7 +522,7 @@ def __init__(self, sf_datasets: Sequence[Dataset], shared_tags, multi_frame_output) - def AddModule(self) -> None: + def add_module(self) -> None: pass # David's code doesn't hold anything for this module ... should ask him @@ -1045,13 +543,13 @@ def __init__(self, sf_datasets: Sequence[Dataset], shared_tags, multi_frame_output) - def AddModule(self) -> None: + def add_module(self) -> None: # David's code doesn't hold anything for this module ... should ask him kw = 'ContentQualification' tg = tag_for_keyword(kw) elem = self._get_or_create_attribute( - self.SingleFrameSet[0], kw, 'RESEARCH') - self.TargetDataset[tg] = elem + self.single_frame_set[0], kw, 'RESEARCH') + self.target_dataset[tg] = elem class EnhancedMRImageModule(Abstract_MultiframeModuleAdder): @@ -1070,44 +568,44 @@ def __init__(self, sf_datasets: Sequence[Dataset], shared_tags, multi_frame_output) - def AddModule(self) -> None: + def add_module(self) -> None: self._copy_attrib_if_present( - self.SingleFrameSet[0], - self.TargetDataset, + self.single_frame_set[0], + self.target_dataset, "ResonantNucleus", check_not_to_be_perframe=True, check_not_to_be_empty=True) - if 'ResonantNucleus' not in self.TargetDataset: + if 'ResonantNucleus' not in self.target_dataset: # derive from ImagedNucleus, which is the one used in legacy MR # IOD, but does not have a standard list of defined terms ... # (could check these : () self._copy_attrib_if_present( - self.SingleFrameSet[0], - self.TargetDataset, + self.single_frame_set[0], + self.target_dataset, "ImagedNucleus", check_not_to_be_perframe=True, check_not_to_be_empty=True) self._copy_attrib_if_present( - self.SingleFrameSet[0], - self.TargetDataset, + self.single_frame_set[0], + self.target_dataset, "KSpaceFiltering", check_not_to_be_perframe=True, check_not_to_be_empty=True) self._copy_attrib_if_present( - self.SingleFrameSet[0], - self.TargetDataset, + self.single_frame_set[0], + self.target_dataset, "MagneticFieldStrength", check_not_to_be_perframe=True, check_not_to_be_empty=True) self._copy_attrib_if_present( - self.SingleFrameSet[0], - self.TargetDataset, + self.single_frame_set[0], + self.target_dataset, "ApplicableSafetyStandardAgency", check_not_to_be_perframe=True, check_not_to_be_empty=True) self._copy_attrib_if_present( - self.SingleFrameSet[0], - self.TargetDataset, + self.single_frame_set[0], + self.target_dataset, "ApplicableSafetyStandardDescription", check_not_to_be_perframe=True, check_not_to_be_empty=True) @@ -1129,11 +627,11 @@ def __init__(self, sf_datasets: Sequence[Dataset], shared_tags, multi_frame_output) - def AddModule(self) -> None: + def add_module(self) -> None: tg = tag_for_keyword('AcquisitionContextSequence') - if tg not in self._PerFrameTags: - self.TargetDataset[tg] = self._get_or_create_attribute( - self.SingleFrameSet[0], + if tg not in self._perframe_tags: + self.target_dataset[tg] = self._get_or_create_attribute( + self.single_frame_set[0], tg, None) @@ -1195,18 +693,18 @@ def _contains_right_attributes(self, tags: dict) -> bool: bodypart_tg in tags or anatomical_reg_tg) - def AddModule(self) -> None: - if (not self._contains_right_attributes(self._PerFrameTags) and - (self._contains_right_attributes(self._SharedTags) or - self._contains_right_attributes(self.ExcludedFromPerFrameTags)) + def add_module(self) -> None: + if (not self._contains_right_attributes(self._perframe_tags) and + (self._contains_right_attributes(self._shared_tags) or + self._contains_right_attributes(self.excluded_from_per_frame_tags)) ): item = self._get_shared_item() - self._add_module_to_functional_group(self.SingleFrameSet[0], item) - elif self._contains_right_attributes(self._PerFrameTags): - for i in range(0, len(self.SingleFrameSet)): + self._add_module_to_functional_group(self.single_frame_set[0], item) + elif self._contains_right_attributes(self._perframe_tags): + for i in range(0, len(self.single_frame_set)): item = self._get_perframe_item(i) self._add_module_to_functional_group( - self.SingleFrameSet[i], item) + self.single_frame_set[i], item) class PixelMeasuresFunctionalGroup(Abstract_MultiframeModuleAdder): @@ -1258,18 +756,18 @@ def _add_module_to_functional_group( [item]) dest_fg[pixel_measures_tg] = seq - def AddModule(self) -> None: - if (not self._contains_right_attributes(self._PerFrameTags) and - (self._contains_right_attributes(self._SharedTags) or - self._contains_right_attributes(self.ExcludedFromPerFrameTags)) + def add_module(self) -> None: + if (not self._contains_right_attributes(self._perframe_tags) and + (self._contains_right_attributes(self._shared_tags) or + self._contains_right_attributes(self.excluded_from_per_frame_tags)) ): item = self._get_shared_item() - self._add_module_to_functional_group(self.SingleFrameSet[0], item) - elif self._contains_right_attributes(self._PerFrameTags): - for i in range(0, len(self.SingleFrameSet)): + self._add_module_to_functional_group(self.single_frame_set[0], item) + elif self._contains_right_attributes(self._perframe_tags): + for i in range(0, len(self.single_frame_set)): item = self._get_perframe_item(i) self._add_module_to_functional_group( - self.SingleFrameSet[i], item) + self.single_frame_set[i], item) class PlanePositionFunctionalGroup(Abstract_MultiframeModuleAdder): @@ -1307,18 +805,18 @@ def _add_module_to_functional_group( [item]) dest_fg[PlanePositionSequence_tg] = seq - def AddModule(self) -> None: - if (not self._contains_right_attributes(self._PerFrameTags) and - (self._contains_right_attributes(self._SharedTags) or - self._contains_right_attributes(self.ExcludedFromPerFrameTags)) + def add_module(self) -> None: + if (not self._contains_right_attributes(self._perframe_tags) and + (self._contains_right_attributes(self._shared_tags) or + self._contains_right_attributes(self.excluded_from_per_frame_tags)) ): item = self._get_shared_item() - self._add_module_to_functional_group(self.SingleFrameSet[0], item) - elif self._contains_right_attributes(self._PerFrameTags): - for i in range(0, len(self.SingleFrameSet)): + self._add_module_to_functional_group(self.single_frame_set[0], item) + elif self._contains_right_attributes(self._perframe_tags): + for i in range(0, len(self.single_frame_set)): item = self._get_perframe_item(i) self._add_module_to_functional_group( - self.SingleFrameSet[i], item) + self.single_frame_set[i], item) class PlaneOrientationFunctionalGroup(Abstract_MultiframeModuleAdder): @@ -1354,18 +852,18 @@ def _add_module_to_functional_group( seq = DataElement(tg, dictionary_VR(tg), [item]) dest_fg[tg] = seq - def AddModule(self) -> None: - if (not self._contains_right_attributes(self._PerFrameTags) and - (self._contains_right_attributes(self._SharedTags) or - self._contains_right_attributes(self.ExcludedFromPerFrameTags)) + def add_module(self) -> None: + if (not self._contains_right_attributes(self._perframe_tags) and + (self._contains_right_attributes(self._shared_tags) or + self._contains_right_attributes(self.excluded_from_per_frame_tags)) ): item = self._get_shared_item() - self._add_module_to_functional_group(self.SingleFrameSet[0], item) - elif self._contains_right_attributes(self._PerFrameTags): - for i in range(0, len(self.SingleFrameSet)): + self._add_module_to_functional_group(self.single_frame_set[0], item) + elif self._contains_right_attributes(self._perframe_tags): + for i in range(0, len(self.single_frame_set)): item = self._get_perframe_item(i) self._add_module_to_functional_group( - self.SingleFrameSet[i], item) + self.single_frame_set[i], item) class FrameVOILUTFunctionalGroup(Abstract_MultiframeModuleAdder): @@ -1416,18 +914,18 @@ def _add_module_to_functional_group( seq = DataElement(tg, dictionary_VR(tg), [item]) dest_fg[tg] = seq - def AddModule(self) -> None: - if (not self._contains_right_attributes(self._PerFrameTags) and - (self._contains_right_attributes(self._SharedTags) or - self._contains_right_attributes(self.ExcludedFromPerFrameTags)) + def add_module(self) -> None: + if (not self._contains_right_attributes(self._perframe_tags) and + (self._contains_right_attributes(self._shared_tags) or + self._contains_right_attributes(self.excluded_from_per_frame_tags)) ): item = self._get_shared_item() - self._add_module_to_functional_group(self.SingleFrameSet[0], item) - elif self._contains_right_attributes(self._PerFrameTags): - for i in range(0, len(self.SingleFrameSet)): + self._add_module_to_functional_group(self.single_frame_set[0], item) + elif self._contains_right_attributes(self._perframe_tags): + for i in range(0, len(self.single_frame_set)): item = self._get_perframe_item(i) self._add_module_to_functional_group( - self.SingleFrameSet[i], item) + self.single_frame_set[i], item) class PixelValueTransformationFunctionalGroup(Abstract_MultiframeModuleAdder): @@ -1501,18 +999,18 @@ def _add_module_to_functional_group( seq = DataElement(tg, dictionary_VR(tg), [item]) dest_fg[tg] = seq - def AddModule(self) -> None: - if (not self._contains_right_attributes(self._PerFrameTags) and - (self._contains_right_attributes(self._SharedTags) or - self._contains_right_attributes(self.ExcludedFromPerFrameTags)) + def add_module(self) -> None: + if (not self._contains_right_attributes(self._perframe_tags) and + (self._contains_right_attributes(self._shared_tags) or + self._contains_right_attributes(self.excluded_from_per_frame_tags)) ): item = self._get_shared_item() - self._add_module_to_functional_group(self.SingleFrameSet[0], item) - elif self._contains_right_attributes(self._PerFrameTags): - for i in range(0, len(self.SingleFrameSet)): + self._add_module_to_functional_group(self.single_frame_set[0], item) + elif self._contains_right_attributes(self._perframe_tags): + for i in range(0, len(self.single_frame_set)): item = self._get_perframe_item(i) self._add_module_to_functional_group( - self.SingleFrameSet[i], item) + self.single_frame_set[i], item) class ReferencedImageFunctionalGroup(Abstract_MultiframeModuleAdder): @@ -1542,18 +1040,18 @@ def _add_module_to_functional_group( check_not_to_be_perframe=False, check_not_to_be_empty=False) - def AddModule(self) -> None: - if (not self._contains_right_attributes(self._PerFrameTags) and - (self._contains_right_attributes(self._SharedTags) or - self._contains_right_attributes(self.ExcludedFromPerFrameTags)) + def add_module(self) -> None: + if (not self._contains_right_attributes(self._perframe_tags) and + (self._contains_right_attributes(self._shared_tags) or + self._contains_right_attributes(self.excluded_from_per_frame_tags)) ): item = self._get_shared_item() - self._add_module_to_functional_group(self.SingleFrameSet[0], item) - elif self._contains_right_attributes(self._PerFrameTags): - for i in range(0, len(self.SingleFrameSet)): + self._add_module_to_functional_group(self.single_frame_set[0], item) + elif self._contains_right_attributes(self._perframe_tags): + for i in range(0, len(self.single_frame_set)): item = self._get_perframe_item(i) self._add_module_to_functional_group( - self.SingleFrameSet[i], item) + self.single_frame_set[i], item) class DerivationImageFunctionalGroup(Abstract_MultiframeModuleAdder): @@ -1598,18 +1096,18 @@ def _add_module_to_functional_group( seq = DataElement(tg, dictionary_VR(tg), [item]) dest_fg[tg] = seq - def AddModule(self) -> None: - if (not self._contains_right_attributes(self._PerFrameTags) and - (self._contains_right_attributes(self._SharedTags) or - self._contains_right_attributes(self.ExcludedFromPerFrameTags)) + def add_module(self) -> None: + if (not self._contains_right_attributes(self._perframe_tags) and + (self._contains_right_attributes(self._shared_tags) or + self._contains_right_attributes(self.excluded_from_per_frame_tags)) ): item = self._get_shared_item() - self._add_module_to_functional_group(self.SingleFrameSet[0], item) - elif self._contains_right_attributes(self._PerFrameTags): - for i in range(0, len(self.SingleFrameSet)): + self._add_module_to_functional_group(self.single_frame_set[0], item) + elif self._contains_right_attributes(self._perframe_tags): + for i in range(0, len(self.single_frame_set)): item = self._get_perframe_item(i) self._add_module_to_functional_group( - self.SingleFrameSet[i], item) + self.single_frame_set[i], item) class UnassignedPerFrame(Abstract_MultiframeModuleAdder): @@ -1644,44 +1142,43 @@ def _add_module_to_functional_group( def _add_largest_smallest_pixle_value(self) -> None: ltg = tag_for_keyword("LargestImagePixelValue") - from sys import float_info lval = float_info.min - if ltg in self._PerFrameTags: - for frame in self.SingleFrameSet: + if ltg in self._perframe_tags: + for frame in self.single_frame_set: if ltg in frame: nval = frame[ltg].value else: continue lval = nval if lval < nval else lval if lval > float_info.min: - self.TargetDataset[ltg] = DataElement(ltg, 'SS', int(lval)) + self.target_dataset[ltg] = DataElement(ltg, 'SS', int(lval)) # ========================== stg = tag_for_keyword("SmallestImagePixelValue") sval = float_info.max - if stg in self._PerFrameTags: - for frame in self.SingleFrameSet: + if stg in self._perframe_tags: + for frame in self.single_frame_set: if stg in frame: nval = frame[stg].value else: continue sval = nval if sval < nval else sval if sval < float_info.max: - self.TargetDataset[stg] = DataElement(stg, 'SS', int(sval)) + self.target_dataset[stg] = DataElement(stg, 'SS', int(sval)) stg = "SmallestImagePixelValue" - def AddModule(self) -> None: + def add_module(self) -> None: # first collect all not used tags # note that this is module is order dependent self._add_largest_smallest_pixle_value() self._eligeible_tags: List[Tag] = [] - for tg, used in self._PerFrameTags.items(): - if not used and tg not in self.ExcludedFromFunctionalGroupsTags: + for tg, used in self._perframe_tags.items(): + if not used and tg not in self.excluded_from_functional_group_tags: self._eligeible_tags.append(tg) - for i in range(0, len(self.SingleFrameSet)): + for i in range(0, len(self.single_frame_set)): item = self._get_perframe_item(i) self._add_module_to_functional_group( - self.SingleFrameSet[i], item) + self.single_frame_set[i], item) class UnassignedShared(Abstract_MultiframeModuleAdder): @@ -1703,10 +1200,10 @@ def __init__(self, sf_datasets: Sequence[Dataset], def _add_module_to_functional_group( self, src_fg: Dataset, dest_fg: Dataset) -> None: item = Dataset() - for tg, used in self._SharedTags.items(): + for tg, used in self._shared_tags.items(): if (not used and - tg not in self.TargetDataset and - tg not in self.ExcludedFromFunctionalGroupsTags): + tg not in self.target_dataset and + tg not in self.excluded_from_functional_group_tags): self._copy_attrib_if_present(src_fg, item, tg, @@ -1717,9 +1214,9 @@ def _add_module_to_functional_group( seq = DataElement(tg, dictionary_VR(tg), [item]) dest_fg[tg] = seq - def AddModule(self) -> None: + def add_module(self) -> None: item = self._get_shared_item() - self._add_module_to_functional_group(self.SingleFrameSet[0], item) + self._add_module_to_functional_group(self.single_frame_set[0], item) class EmptyType2Attributes(Abstract_MultiframeModuleAdder): @@ -1738,12 +1235,12 @@ def __init__(self, sf_datasets: Sequence[Dataset], shared_tags, multi_frame_output) - def CreateEmptyElement(self, tg: Tag) -> DataElement: + def create_empty_element(self, tg: Tag) -> DataElement: return DataElement(tg, dictionary_VR(tg), None) - def AddModule(self) -> None: + def add_module(self) -> None: iod_name = _SOP_CLASS_UID_IOD_KEY_MAP[ - self.TargetDataset['SOPClassUID'].value] + self.target_dataset['SOPClassUID'].value] modules = IOD_MODULE_MAP[iod_name] for module in modules: if module['usage'] == 'M': @@ -1752,12 +1249,12 @@ def AddModule(self) -> None: for a in attrib_list: if len(a['path']) == 0 and a['type'] == '2': tg = tag_for_keyword(a['keyword']) - if (tg not in self.SingleFrameSet[0] and - tg not in self.TargetDataset and - tg not in self._PerFrameTags and - tg not in self._SharedTags): - self.TargetDataset[tg] =\ - self.CreateEmptyElement(tg) + if (tg not in self.single_frame_set[0] and + tg not in self.target_dataset and + tg not in self._perframe_tags and + tg not in self._shared_tags): + self.target_dataset[tg] =\ + self.create_empty_element(tg) class ConversionSourceFunctionalGroup(Abstract_MultiframeModuleAdder): @@ -1796,11 +1293,11 @@ def _add_module_to_functional_group( seq = DataElement(tg, dictionary_VR(tg), [item]) dest_fg[tg] = seq - def AddModule(self) -> None: - for i in range(0, len(self.SingleFrameSet)): + def add_module(self) -> None: + for i in range(0, len(self.single_frame_set)): item = self._get_perframe_item(i) self._add_module_to_functional_group( - self.SingleFrameSet[i], item) + self.single_frame_set[i], item) class FrameContentFunctionalGroup(Abstract_MultiframeModuleAdder): @@ -1818,15 +1315,16 @@ def __init__(self, sf_datasets: Sequence[Dataset], perframe_tags, shared_tags, multi_frame_output) - self.EarliestFrameAcquisitionDateTime = self.FarthestFutureDateTime + self.earliest_frame_acquisition_date_time = \ + self.farthest_future_date_time self._slices: list = [] self._tolerance = 0.0001 self._slice_location_map: dict = {} def _build_slices_geometry(self) -> None: - frame_count = len(self.SingleFrameSet) + frame_count = len(self.single_frame_set) for i in range(0, frame_count): - curr_frame = self.SingleFrameSet[i] + curr_frame = self.single_frame_set[i] ImagePositionPatient_v = None \ if 'ImagePositionPatient' not in curr_frame\ else curr_frame['ImagePositionPatient'].value @@ -1871,7 +1369,7 @@ def _are_all_slices_parallel(self) -> bool: last_slice = self._slices[0] for i in range(1, slice_count): curr_slice = self._slices[i] - if not GeometryOfSlice.AreParallel( + if not GeometryOfSlice.are_parallel( curr_slice, last_slice, self._tolerance): return False last_slice = curr_slice @@ -1887,7 +1385,7 @@ def _add_stack_info(self) -> None: if self._are_all_slices_parallel(): self._slice_location_map = {} for idx, s in enumerate(self._slices): - dist = round(s.GetDistanceAlongOrigin(), round_digits) + dist = round(s.get_distance_along_origin(), round_digits) if dist in self._slice_location_map: self._slice_location_map[dist].append(idx) else: @@ -1901,11 +1399,11 @@ def _add_stack_info(self) -> None: frame = self._get_perframe_item(frame_index) new_item = frame[frame_content_tg].value[0] new_item["StackID"] = self._get_or_create_attribute( - self.SingleFrameSet[0], + self.single_frame_set[0], "StackID", "0") new_item["InStackPositionNumber"] =\ self._get_or_create_attribute( - self.SingleFrameSet[0], + self.single_frame_set[0], "InStackPositionNumber", distance_index) distance_index += 1 @@ -1930,33 +1428,33 @@ def _add_module_to_functional_group( self._mark_tag_as_used(an_tg) # ---------------------------------------------------------------- AcquisitionDateTime_a = self._get_or_create_attribute( - src_fg, 'AcquisitionDateTime', self.EarliestDateTime) + src_fg, 'AcquisitionDateTime', self.earliest_date_time) # chnage the keyword to FrameAcquisitionDateTime: FrameAcquisitionDateTime_a = DataElement( tag_for_keyword('FrameAcquisitionDateTime'), 'DT', AcquisitionDateTime_a.value) AcquisitionDateTime_is_perframe = self._contains_right_attributes( - self._PerFrameTags) - if FrameAcquisitionDateTime_a.value == self.EarliestDateTime: + self._perframe_tags) + if FrameAcquisitionDateTime_a.value == self.earliest_date_time: AcquisitionDate_a = self._get_or_create_attribute( - src_fg, 'AcquisitionDate', self.EarliestDate) + src_fg, 'AcquisitionDate', self.earliest_date) AcquisitionTime_a = self._get_or_create_attribute( - src_fg, 'AcquisitionTime', self.EarliestTime) + src_fg, 'AcquisitionTime', self.earliest_time) d = AcquisitionDate_a.value t = AcquisitionTime_a.value # FrameAcquisitionDateTime_a.value = (DT(d.strftime('%Y%m%d') + # t.strftime('%H%M%S'))) FrameAcquisitionDateTime_a.value = DT(str(d) + str(t)) - if FrameAcquisitionDateTime_a.value > self.EarliestDateTime: + if FrameAcquisitionDateTime_a.value > self.earliest_date_time: if (FrameAcquisitionDateTime_a.value < - self.EarliestFrameAcquisitionDateTime): - self.EarliestFrameAcquisitionDateTime =\ + self.earliest_frame_acquisition_date_time): + self.earliest_frame_acquisition_date_time =\ FrameAcquisitionDateTime_a.value if not AcquisitionDateTime_is_perframe: if ('TriggerTime' in src_fg and 'FrameReferenceDateTime' not in src_fg): TriggerTime_a = self._get_or_create_attribute( - src_fg, 'TriggerTime', self.EarliestTime) + src_fg, 'TriggerTime', self.earliest_time) trigger_time_in_millisecond = int(TriggerTime_a.value) if trigger_time_in_millisecond > 0: t_delta = timedelta(trigger_time_in_millisecond) @@ -1991,17 +1489,17 @@ def _add_module_to_functional_group( # Also we want to add the earliest frame acq date time to the multiframe: def _add_acquisition_info(self) -> None: - for i in range(0, len(self.SingleFrameSet)): + for i in range(0, len(self.single_frame_set)): item = self._get_perframe_item(i) self._add_module_to_functional_group( - self.SingleFrameSet[i], item) - if self.EarliestFrameAcquisitionDateTime < self.FarthestFutureDateTime: + self.single_frame_set[i], item) + if self.earliest_frame_acquisition_date_time < self.farthest_future_date_time: kw = 'AcquisitionDateTime' - self.TargetDataset[kw] = DataElement( + self.target_dataset[kw] = DataElement( tag_for_keyword(kw), - 'DT', self.EarliestFrameAcquisitionDateTime) + 'DT', self.earliest_frame_acquisition_date_time) - def AddModule(self) -> None: + def add_module(self) -> None: self._add_acquisition_info() self._add_stack_info() @@ -2047,19 +1545,19 @@ def _copy_data(self, src: bytearray, word_data: bool = False) -> None: src = tmp des.extend(src) - def AddModule(self) -> None: - kw = 'NumberOfFrames' + def add_module(self) -> None: + kw = 'NumberOfframes' tg = tag_for_keyword(kw) - self._frame_count = len(self.SingleFrameSet) - self.TargetDataset[kw] =\ + self._frame_count = len(self.single_frame_set) + self.target_dataset[kw] =\ DataElement(tg, dictionary_VR(tg), self._frame_count) - row = self.SingleFrameSet[0]["Rows"].value - col = self.SingleFrameSet[0]["Columns"].value + row = self.single_frame_set[0]["Rows"].value + col = self.single_frame_set[0]["Columns"].value self._number_of_pixels_per_frame = row * col self._number_of_pixels = row * col * self._frame_count kw = "PixelData" - for i in range(0, len(self.SingleFrameSet)): - PixelData_a = self.SingleFrameSet[i][kw] + for i in range(0, len(self.single_frame_set)): + PixelData_a = self.single_frame_set[i][kw] if self._is_other_byte_vr(PixelData_a.VR): if len(self._word_data) != 0: raise TypeError( @@ -2081,7 +1579,7 @@ def AddModule(self) -> None: elif len(self._word_data) != 0: MF_PixelData = DataElement(tag_for_keyword(kw), 'OW', bytes(self._word_data)) - self.TargetDataset[kw] = MF_PixelData + self.target_dataset[kw] = MF_PixelData class ContentDateTime(Abstract_MultiframeModuleAdder): @@ -2099,26 +1597,29 @@ def __init__(self, sf_datasets: Sequence[Dataset], perframe_tags, shared_tags, multi_frame_output) - self.EarliestContentDateTime = self.FarthestFutureDateTime + self.earliest_content_date_time = self.farthest_future_date_time - def AddModule(self) -> None: - for i in range(0, len(self.SingleFrameSet)): - src = self.SingleFrameSet[i] + def add_module(self) -> None: + for i in range(0, len(self.single_frame_set)): + src = self.single_frame_set[i] kw = 'ContentDate' - d = DA(self.FarthestFutureDate if kw not in src else src[kw].value) + d = DA( + self.farthest_future_date if kw not in src else src[kw].value) kw = 'ContentTime' - t = TM(self.FarthestFutureTime if kw not in src else src[kw].value) + t = TM( + self.farthest_future_time if kw not in src else src[kw].value) value = DT(d.strftime('%Y%m%d') + t.strftime('%H%M%S.%f')) - if self.EarliestContentDateTime > value: - self.EarliestContentDateTime = value - if self.EarliestContentDateTime < self.FarthestFutureDateTime: - n_d = DA(self.EarliestContentDateTime.date().strftime('%Y%m%d')) - n_t = TM(self.EarliestContentDateTime.time().strftime('%H%M%S.%f')) + if self.earliest_content_date_time > value: + self.earliest_content_date_time = value + if self.earliest_content_date_time < self.farthest_future_date_time: + n_d = DA(self.earliest_content_date_time.date().strftime('%Y%m%d')) + n_t = TM( + self.earliest_content_date_time.time().strftime('%H%M%S.%f')) kw = 'ContentDate' - self.TargetDataset[kw] = DataElement( + self.target_dataset[kw] = DataElement( tag_for_keyword(kw), 'DA', n_d) kw = 'ContentTime' - self.TargetDataset[kw] = DataElement( + self.target_dataset[kw] = DataElement( tag_for_keyword(kw), 'TM', n_t) @@ -2138,15 +1639,15 @@ def __init__(self, sf_datasets: Sequence[Dataset], shared_tags, multi_frame_output) - def AddModule(self) -> None: + def add_module(self) -> None: nnooww = datetime.now() n_d = DA(nnooww.date().strftime('%Y%m%d')) n_t = TM(nnooww.time().strftime('%H%M%S')) kw = 'InstanceCreationDate' - self.TargetDataset[kw] = DataElement( + self.target_dataset[kw] = DataElement( tag_for_keyword(kw), 'DA', n_d) kw = 'InstanceCreationTime' - self.TargetDataset[kw] = DataElement( + self.target_dataset[kw] = DataElement( tag_for_keyword(kw), 'TM', n_t) @@ -2171,7 +1672,7 @@ def _add_data_element_to_target(self, target: Dataset, tg = tag_for_keyword(kw) target[kw] = DataElement(tg, dictionary_VR(tg), value) - def AddModule(self) -> None: + def add_module(self) -> None: CodeValue_tg = tag_for_keyword('CodeValue') CodeMeaning_tg = tag_for_keyword('CodeMeaning') CodingSchemeDesignator_tg = tag_for_keyword('CodingSchemeDesignator') @@ -2213,7 +1714,7 @@ def AddModule(self) -> None: "ContributionDescription", 'Legacy Enhanced Image created from Classic Images') tg = tag_for_keyword('ContributingEquipmentSequence') - self.TargetDataset[tg] = DataElement(tg, 'SQ', [item]) + self.target_dataset[tg] = DataElement(tg, 'SQ', [item]) class LegacyConvertedEnhanceImage(SOPClass): @@ -2246,7 +1747,7 @@ def __init__( Additional keyword arguments that will be passed to the constructor of `highdicom.base.SOPClass` """ - legacy_datasets = frame_set.Frames + legacy_datasets = frame_set.frames try: ref_ds = legacy_datasets[0] except IndexError: @@ -2262,7 +1763,7 @@ def __init__( sop_class_uid=sop_class_uid, instance_number=instance_number, manufacturer=ref_ds.Manufacturer, - modality=ref_ds.Modality, + modality=ref_ds.modality, patient_id=ref_ds.PatientID, patient_name=ref_ds.PatientName, patient_birth_date=ref_ds.PatientBirthDate, @@ -2274,15 +1775,15 @@ def __init__( referring_physician_name=ref_ds.ReferringPhysicianName, **kwargs) self._legacy_datasets = legacy_datasets - self.DistinguishingAttributesTags = self._get_tag_used_dictionary( - frame_set.DistinguishingAttributesTags) - self.ExcludedFromPerFrameTags = self._get_tag_used_dictionary( - frame_set.ExcludedFromPerFrameTags) - self._PerFrameTags = self._get_tag_used_dictionary( - frame_set.PerFrameTags) - self._SharedTags = self._get_tag_used_dictionary( - frame_set.SharedTags) - self.ExcludedFromFunctionalGroupsTags = { + self.distinguishing_attributes_tags = self._get_tag_used_dictionary( + frame_set.distinguishing_attributes_tags) + self.excluded_from_per_frame_tags = self._get_tag_used_dictionary( + frame_set.excluded_from_per_frame_tags) + self._perframe_tags = self._get_tag_used_dictionary( + frame_set.perframe_tags) + self._shared_tags = self._get_tag_used_dictionary( + frame_set.shared_tags) + self.excluded_from_functional_group_tags = { tag_for_keyword('SpecificCharacterSet'): False} # -------------------------------------------------------------------- self.__build_blocks: list = [] @@ -2293,13 +1794,13 @@ def __init__( self._legacy_datasets = new_ds if (_SOP_CLASS_UID_IOD_KEY_MAP[sop_class_uid] == 'legacy-converted-enhanced-ct-image'): - self.AddBuildBlocksForCT() + self.add_build_blocks_for_ct() elif (_SOP_CLASS_UID_IOD_KEY_MAP[sop_class_uid] == 'legacy-converted-enhanced-mr-image'): - self.AddBuildBlocksForMR() + self.add_build_blocks_for_mr() elif (_SOP_CLASS_UID_IOD_KEY_MAP[sop_class_uid] == 'legacy-converted-enhanced-pet-image'): - self.AddBuildBlocksForPET() + self.add_build_blocks_for_pet() def _get_tag_used_dictionary(self, input: list) -> dict: out: dict = {} @@ -2317,264 +1818,264 @@ def default_sort_key(x: Dataset) -> tuple: out += (x['SOPInstanceUID'].value, ) return out - def AddNewBuildBlock( + def add_new_build_block( self, element: Abstract_MultiframeModuleAdder) -> None: if not isinstance(element, Abstract_MultiframeModuleAdder): raise ValueError('Build block must be an instance ' 'of Abstract_MultiframeModuleAdder') self.__build_blocks.append(element) - def ClearBuildBlocks(self) -> None: + def clear_build_blocks(self) -> None: self.__build_blocks = [] - def AddCommonCT_PET_MR_BuildBlocks(self) -> None: + def add_common_ct_pet_mr_build_blocks(self) -> None: Blocks = [ ImagePixelModule( self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, + self.excluded_from_per_frame_tags, + self.excluded_from_functional_group_tags, + self._perframe_tags, + self._shared_tags, self), CompositeInstanceContex( self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, + self.excluded_from_per_frame_tags, + self.excluded_from_functional_group_tags, + self._perframe_tags, + self._shared_tags, self), EnhancedCommonImageModule( self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, + self.excluded_from_per_frame_tags, + self.excluded_from_functional_group_tags, + self._perframe_tags, + self._shared_tags, self), AcquisitionContextModule( self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, + self.excluded_from_per_frame_tags, + self.excluded_from_functional_group_tags, + self._perframe_tags, + self._shared_tags, self), FrameAnatomyFunctionalGroup( self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, + self.excluded_from_per_frame_tags, + self.excluded_from_functional_group_tags, + self._perframe_tags, + self._shared_tags, self), PixelMeasuresFunctionalGroup( self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, + self.excluded_from_per_frame_tags, + self.excluded_from_functional_group_tags, + self._perframe_tags, + self._shared_tags, self), PlaneOrientationFunctionalGroup( self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, + self.excluded_from_per_frame_tags, + self.excluded_from_functional_group_tags, + self._perframe_tags, + self._shared_tags, self), PlanePositionFunctionalGroup( self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, + self.excluded_from_per_frame_tags, + self.excluded_from_functional_group_tags, + self._perframe_tags, + self._shared_tags, self), FrameVOILUTFunctionalGroup( self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, + self.excluded_from_per_frame_tags, + self.excluded_from_functional_group_tags, + self._perframe_tags, + self._shared_tags, self), PixelValueTransformationFunctionalGroup( self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, + self.excluded_from_per_frame_tags, + self.excluded_from_functional_group_tags, + self._perframe_tags, + self._shared_tags, self), ReferencedImageFunctionalGroup( self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, + self.excluded_from_per_frame_tags, + self.excluded_from_functional_group_tags, + self._perframe_tags, + self._shared_tags, self), ConversionSourceFunctionalGroup( self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, + self.excluded_from_per_frame_tags, + self.excluded_from_functional_group_tags, + self._perframe_tags, + self._shared_tags, self), FrameContentFunctionalGroup( self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, + self.excluded_from_per_frame_tags, + self.excluded_from_functional_group_tags, + self._perframe_tags, + self._shared_tags, self), PixelData( self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, + self.excluded_from_per_frame_tags, + self.excluded_from_functional_group_tags, + self._perframe_tags, + self._shared_tags, self), ContentDateTime( self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, + self.excluded_from_per_frame_tags, + self.excluded_from_functional_group_tags, + self._perframe_tags, + self._shared_tags, self), InstanceCreationDateTime( self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, + self.excluded_from_per_frame_tags, + self.excluded_from_functional_group_tags, + self._perframe_tags, + self._shared_tags, self), ContributingEquipmentSequence( self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, + self.excluded_from_per_frame_tags, + self.excluded_from_functional_group_tags, + self._perframe_tags, + self._shared_tags, self), UnassignedPerFrame( self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, + self.excluded_from_per_frame_tags, + self.excluded_from_functional_group_tags, + self._perframe_tags, + self._shared_tags, self), UnassignedShared( self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, + self.excluded_from_per_frame_tags, + self.excluded_from_functional_group_tags, + self._perframe_tags, + self._shared_tags, self) # , # StackInformation( # self._legacy_datasets, - # self.ExcludedFromPerFrameTags, - # self.ExcludedFromFunctionalGroupsTags, - # self._PerFrameTags, - # self._SharedTags, + # self.excluded_from_per_frame_tags, + # self.excluded_from_functional_group_tags, + # self._perframe_tags, + # self._shared_tags, # self), # EmptyType2Attributes( # self._legacy_datasets, - # self.ExcludedFromPerFrameTags, - # self.ExcludedFromFunctionalGroupsTags, - # self._PerFrameTags, - # self._SharedTags, + # self.excluded_from_per_frame_tags, + # self.excluded_from_functional_group_tags, + # self._perframe_tags, + # self._shared_tags, # self) ] for b in Blocks: - self.AddNewBuildBlock(b) + self.add_new_build_block(b) - def AddCTSpecificBuildBlocks(self) -> None: + def add_ct_specific_build_blocks(self) -> None: Blocks = [ CommonCTMRPETImageDescriptionMacro( self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, + self.excluded_from_per_frame_tags, + self.excluded_from_functional_group_tags, + self._perframe_tags, + self._shared_tags, self, 'CT'), EnhancedCTImageModule( self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, + self.excluded_from_per_frame_tags, + self.excluded_from_functional_group_tags, + self._perframe_tags, + self._shared_tags, self), ContrastBolusModule( self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, + self.excluded_from_per_frame_tags, + self.excluded_from_functional_group_tags, + self._perframe_tags, + self._shared_tags, self) ] for b in Blocks: - self.AddNewBuildBlock(b) + self.add_new_build_block(b) def AddMRSpecificBuildBlocks(self) -> None: Blocks = [ CommonCTMRPETImageDescriptionMacro( self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, + self.excluded_from_per_frame_tags, + self.excluded_from_functional_group_tags, + self._perframe_tags, + self._shared_tags, self, 'MR'), EnhancedMRImageModule( self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, + self.excluded_from_per_frame_tags, + self.excluded_from_functional_group_tags, + self._perframe_tags, + self._shared_tags, self), ContrastBolusModule( self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, + self.excluded_from_per_frame_tags, + self.excluded_from_functional_group_tags, + self._perframe_tags, + self._shared_tags, self) ] for b in Blocks: - self.AddNewBuildBlock(b) + self.add_new_build_block(b) - def AddPETSpecificBuildBlocks(self) -> None: + def add_pet_specific_build_blocks(self) -> None: Blocks = [ CommonCTMRPETImageDescriptionMacro( self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, + self.excluded_from_per_frame_tags, + self.excluded_from_functional_group_tags, + self._perframe_tags, + self._shared_tags, self, 'PET'), EnhancedPETImageModule( self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, + self.excluded_from_per_frame_tags, + self.excluded_from_functional_group_tags, + self._perframe_tags, + self._shared_tags, self) ] for b in Blocks: - self.AddNewBuildBlock(b) + self.add_new_build_block(b) - def AddBuildBlocksForCT(self) -> None: - self.ClearBuildBlocks() - self.AddCommonCT_PET_MR_BuildBlocks() - self.AddCTSpecificBuildBlocks() + def add_build_blocks_for_ct(self) -> None: + self.clear_build_blocks() + self.add_common_ct_pet_mr_build_blocks() + self.add_ct_specific_build_blocks() - def AddBuildBlocksForMR(self) -> None: - self.ClearBuildBlocks() - self.AddCommonCT_PET_MR_BuildBlocks() - self.AddMRSpecificBuildBlocks() + def add_build_blocks_for_mr(self) -> None: + self.clear_build_blocks() + self.add_common_ct_pet_mr_build_blocks() + self.add_mr_specific_build_blocks() - def AddBuildBlocksForPET(self) -> None: - self.ClearBuildBlocks() - self.AddCommonCT_PET_MR_BuildBlocks() - self.AddPETSpecificBuildBlocks() + def add_build_blocks_for_pet(self) -> None: + self.clear_build_blocks() + self.add_common_ct_pet_mr_build_blocks() + self.add_pet_specific_build_blocks() def BuildMultiFrame(self) -> None: for builder in self.__build_blocks: - builder.AddModule() + builder.add_module() class GeometryOfSlice: @@ -2584,32 +2085,32 @@ def __init__(self, top_left_corner_pos: ndarray, voxel_spaceing: ndarray, dimensions: tuple): - self.RowVector = row_vector - self.ColVector = col_vector - self.TopLeftCornerPosition = top_left_corner_pos - self.VoxelSpacing = voxel_spaceing - self.Dim = dimensions - - def GetNormalVector(self) -> ndarray: - n: ndarray = cross(self.RowVector, self.ColVector) + self.row_vector = row_vector + self.col_vector = col_vector + self.top_left_corner_position = top_left_corner_pos + self.voxel_spacing = voxel_spaceing + self.dim = dimensions + + def get_normal_vector(self) -> ndarray: + n: ndarray = cross(self.row_vector, self.col_vector) n[2] = -n[2] return n - def GetDistanceAlongOrigin(self) -> float: - n = self.GetNormalVector() + def get_distance_along_origin(self) -> float: + n = self.get_normal_vector() return float( - dot(self.TopLeftCornerPosition, n)) + dot(self.top_left_corner_position, n)) - def AreParallel(slice1: GeometryOfSlice, + def are_parallel(slice1: GeometryOfSlice, slice2: GeometryOfSlice, tolerance: float = 0.0001) -> bool: - if (type(slice1) != GeometryOfSlice or - type(slice2) != GeometryOfSlice): + if (not isinstance(slice1, GeometryOfSlice) or + not isinstance(slice2, GeometryOfSlice)): print('Error') return False else: - n1: ndarray = slice1.GetNormalVector() - n2: ndarray = slice2.GetNormalVector() + n1: ndarray = slice1.get_normal_vector() + n2: ndarray = slice2.get_normal_vector() for el1, el2 in zip(n1, n2): if abs(el1 - el2) > tolerance: return False @@ -2632,17 +2133,16 @@ def istag_group_length(t: Tag) -> bool: return t.element == 0 def isequal(v1: Any, v2: Any) -> bool: - from pydicom.valuerep import DSfloat float_tolerance = 1.0e-5 def is_equal_float(x1: float, x2: float) -> bool: return abs(x1 - x2) < float_tolerance - if type(v1) != type(v2): + if not type(v1) == type(v2): return False - if type(v1) == DicomSequence: + if isinstance(v1, DicomSequence): for item1, item2 in zip(v1, v2): DicomHelper.isequal_dicom_dataset(item1, item2) - if type(v1) != MultiValue: + if not isinstance(v1, MultiValue): v11 = [v1] v22 = [v2] else: @@ -2651,7 +2151,7 @@ def is_equal_float(x1: float, x2: float) -> bool: if len(v11) != len(v22): return False for xx, yy in zip(v11, v22): - if type(xx) == DSfloat or type(xx) == float: + if isinstance(xx) == DSfloat or isinstance(xx, float): if not is_equal_float(xx, yy): return False else: @@ -2662,7 +2162,7 @@ def is_equal_float(x1: float, x2: float) -> bool: def isequal_dicom_dataset(ds1: Dataset, ds2: Dataset) -> bool: if type(ds1) != type(ds2): return False - if type(ds1) != Dataset: + if not isinstance(ds1, Dataset): return False for k1, elem1 in ds1.items(): if k1 not in ds2: @@ -2676,51 +2176,51 @@ def isequal_dicom_dataset(ds1: Dataset, ds2: Dataset) -> bool: class FrameSet: def __init__(self, single_frame_list: list, distinguishing_tags: list): - self._Frames = single_frame_list - self._DistinguishingAttributesTags = distinguishing_tags + self._frames = single_frame_list + self._distinguishing_attributes_tags = distinguishing_tags tmp = [ tag_for_keyword('AcquisitionDateTime'), tag_for_keyword('AcquisitionDate'), tag_for_keyword('AcquisitionTime'), tag_for_keyword('SpecificCharacterSet')] - self._ExcludedFromPerFrameTags =\ - self.DistinguishingAttributesTags + tmp - self._PerFrameTags: list = [] - self._SharedTags: list = [] + self._excluded_from_per_frame_tags =\ + self.distinguishing_attributes_tags + tmp + self._perframe_tags: list = [] + self._shared_tags: list = [] self._find_per_frame_and_shared_tags() @property - def Frames(self) -> List[Dataset]: - return self._Frames[:] + def frames(self) -> List[Dataset]: + return self._frames[:] @property - def DistinguishingAttributesTags(self) -> List[Tag]: - return self._DistinguishingAttributesTags[:] + def distinguishing_attributes_tags(self) -> List[Tag]: + return self._distinguishing_attributes_tags[:] @property - def ExcludedFromPerFrameTags(self) -> List[Tag]: - return self._ExcludedFromPerFrameTags[:] + def excluded_from_per_frame_tags(self) -> List[Tag]: + return self._excluded_from_per_frame_tags[:] @property - def PerFrameTags(self) -> List[Tag]: - return self._PerFrameTags[:] + def perframe_tags(self) -> List[Tag]: + return self._perframe_tags[:] @property - def SharedTags(self) -> List[Tag]: - return self._SharedTags[:] + def shared_tags(self) -> List[Tag]: + return self._shared_tags[:] def GetSOPInstanceUIDList(self) -> list: OutputList: list = [] - for f in self._Frames: + for f in self._frames: OutputList.append(f.SOPInstanceUID) return OutputList def GetSOPClassUID(self) -> UID: - return self._Frames[0].SOPClassUID + return self._frames[0].SOPClassUID def _find_per_frame_and_shared_tags(self) -> None: rough_shared: dict = {} - sfs = self.Frames + sfs = self.frames for ds in sfs: for ttag, elem in ds.items(): if (not ttag.is_private and not @@ -2730,8 +2230,8 @@ def _find_per_frame_and_shared_tags(self) -> None: self._istag_excluded_from_perframe(ttag) and ttag != tag_for_keyword('PixelData')): elem = ds[ttag] - if ttag not in self._PerFrameTags: - self._PerFrameTags.append(ttag) + if ttag not in self._perframe_tags: + self._perframe_tags.append(ttag) if ttag in rough_shared: rough_shared[ttag].append(elem.value) else: @@ -2739,7 +2239,7 @@ def _find_per_frame_and_shared_tags(self) -> None: to_be_removed_from_shared = [] for ttag, v in rough_shared.items(): v = rough_shared[ttag] - if len(v) < len(self.Frames): + if len(v) < len(self.frames): to_be_removed_from_shared.append(ttag) else: all_values_are_equal = True @@ -2749,25 +2249,24 @@ def _find_per_frame_and_shared_tags(self) -> None: break if not all_values_are_equal: to_be_removed_from_shared.append(ttag) - from pydicom.datadict import keyword_for_tag for t, v in rough_shared.items(): if keyword_for_tag(t) != 'PatientSex': continue for t in to_be_removed_from_shared: del rough_shared[t] for t, v in rough_shared.items(): - self._SharedTags.append(t) - self._PerFrameTags.remove(t) + self._shared_tags.append(t) + self._perframe_tags.remove(t) def _istag_excluded_from_perframe(self, t: Tag) -> bool: - return t in self.ExcludedFromPerFrameTags + return t in self.excluded_from_per_frame_tags class FrameSetCollection: def __init__(self, single_frame_list: list): - self.MixedFrames = single_frame_list - self.MixedFramesCopy = self.MixedFrames[:] - self._DistinguishingAttributeKeywords = [ + self.mixed_frames = single_frame_list + self.mixed_frames_copy = self.mixed_frames[:] + self._distinguishing_attribute_keywords = [ 'PatientID', 'PatientName', 'Manufacturer', @@ -2799,36 +2298,36 @@ def __init__(self, single_frame_list: list): 'SliceThickness', 'AcquisitionContextSequence'] to_be_removed_from_distinguishing_attribs: set = set() - self._FrameSets: list = [] - while len(self.MixedFramesCopy) != 0: + self._frame_sets: list = [] + while len(self.mixed_frames_copy) != 0: x = self._find_all_similar_to_first_datasets() - self._FrameSets.append(FrameSet(x[0], x[1])) + self._frame_sets.append(FrameSet(x[0], x[1])) for kw in to_be_removed_from_distinguishing_attribs: - self.DistinguishingAttributeKeywords.remove(kw) - self.ExcludedFromPerFrameTags = {} - for i in self.DistinguishingAttributeKeywords: - self.ExcludedFromPerFrameTags[tag_for_keyword(i)] = False - self.ExcludedFromPerFrameTags[ + self.distinguishing_attribute_keywords.remove(kw) + self.excluded_from_per_frame_tags = {} + for i in self.distinguishing_attribute_keywords: + self.excluded_from_per_frame_tags[tag_for_keyword(i)] = False + self.excluded_from_per_frame_tags[ tag_for_keyword('AcquisitionDateTime')] = False - self.ExcludedFromPerFrameTags[ + self.excluded_from_per_frame_tags[ tag_for_keyword('AcquisitionDate')] = False - self.ExcludedFromPerFrameTags[ + self.excluded_from_per_frame_tags[ tag_for_keyword('AcquisitionTime')] = False - self.ExcludedFromFunctionalGroupsTags = { + self.excluded_from_functional_group_tags = { tag_for_keyword('SpecificCharacterSet'): False} def _find_all_similar_to_first_datasets(self) -> tuple: - similar_ds: list = [self.MixedFramesCopy[0]] + similar_ds: list = [self.mixed_frames_copy[0]] distinguishing_tags_existing = [] distinguishing_tags_missing = [] - self.MixedFramesCopy = self.MixedFramesCopy[1:] - for kw in self.DistinguishingAttributeKeywords: + self.mixed_frames_copy = self.mixed_frames_copy[1:] + for kw in self.distinguishing_attribute_keywords: tg = tag_for_keyword(kw) if tg in similar_ds[0]: distinguishing_tags_existing.append(tg) else: distinguishing_tags_missing.append(tg) - for ds in self.MixedFramesCopy: + for ds in self.mixed_frames_copy: all_equal = True for tg in distinguishing_tags_missing: if tg in ds: @@ -2848,14 +2347,14 @@ def _find_all_similar_to_first_datasets(self) -> tuple: if all_equal: similar_ds.append(ds) for ds in similar_ds: - if ds in self.MixedFramesCopy: - self.MixedFramesCopy.remove(ds) + if ds in self.mixed_frames_copy: + self.mixed_frames_copy.remove(ds) return (similar_ds, distinguishing_tags_existing) @property - def DistinguishingAttributeKeywords(self) -> List[str]: - return self._DistinguishingAttributeKeywords[:] + def distinguishing_attribute_keywords(self) -> List[str]: + return self._distinguishing_attribute_keywords[:] @property def FrameSets(self) -> List[FrameSet]: - return self._FrameSets + return self._frame_sets From 531b3ce6d9e5ec4ce8375dbc6ed01d783b6fc919 Mon Sep 17 00:00:00 2001 From: Afshin Date: Mon, 7 Sep 2020 21:10:32 -0400 Subject: [PATCH 16/44] mend --- src/highdicom/legacy/sop.py | 34 ++++++++++++++++++---------------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/src/highdicom/legacy/sop.py b/src/highdicom/legacy/sop.py index 55f1170d..87e3e47d 100644 --- a/src/highdicom/legacy/sop.py +++ b/src/highdicom/legacy/sop.py @@ -120,8 +120,8 @@ def _get_perframe_item(self, index: int) -> Dataset: for i in range(0, len(self.single_frame_set)): seq.append(Dataset()) self.target_dataset[pf_tg] = DataElement(pf_tg, - 'SQ', - DicomSequence(seq)) + 'SQ', + DicomSequence(seq)) return self.target_dataset[pf_tg].value[index] def _get_shared_item(self) -> Dataset: @@ -130,8 +130,8 @@ def _get_shared_item(self) -> Dataset: if sf_kw not in self.target_dataset: seq = [Dataset()] self.target_dataset[sf_tg] = DataElement(sf_tg, - 'SQ', - DicomSequence(seq)) + 'SQ', + DicomSequence(seq)) return self.target_dataset[sf_tg].value[0] def _get_or_create_attribute( @@ -147,17 +147,17 @@ def _get_or_create_attribute( if a.VR == 'DA' and isinstance(a.value, str): try: a.value = DA(a.value) - except: + except BaseException: a.value = DA(default) if a.VR == 'DT' and isinstance(a.value, str): try: a.value = DT(a.value) - except: + except BaseException: a.value = DT(default) if a.VR == 'TM' and isinstance(a.value, str): try: a.value = TM(a.value) - except: + except BaseException: a.value = TM(default) self._mark_tag_as_used(tg) @@ -478,9 +478,10 @@ def add_module(self) -> None: "MONOCHROME2") LUT_shape_default = "INVERTED" if phmi_a.value == 'MONOCHROME1'\ else "IDENTITY" - LUT_shape_a = self._get_or_create_attribute(self.single_frame_set[0], - 'PresentationLUTShape', - LUT_shape_default) + LUT_shape_a = self._get_or_create_attribute( + self.single_frame_set[0], + 'PresentationLUTShape', + LUT_shape_default) if not LUT_shape_a.is_empty: self.target_dataset['PresentationLUTShape'] = LUT_shape_a # Icon Image Sequence - always discard these @@ -1493,7 +1494,8 @@ def _add_acquisition_info(self) -> None: item = self._get_perframe_item(i) self._add_module_to_functional_group( self.single_frame_set[i], item) - if self.earliest_frame_acquisition_date_time < self.farthest_future_date_time: + if self.earliest_frame_acquisition_date_time <\ + self.farthest_future_date_time: kw = 'AcquisitionDateTime' self.target_dataset[kw] = DataElement( tag_for_keyword(kw), @@ -2102,10 +2104,10 @@ def get_distance_along_origin(self) -> float: dot(self.top_left_corner_position, n)) def are_parallel(slice1: GeometryOfSlice, - slice2: GeometryOfSlice, - tolerance: float = 0.0001) -> bool: - if (not isinstance(slice1, GeometryOfSlice) or - not isinstance(slice2, GeometryOfSlice)): + slice2: GeometryOfSlice, + tolerance: float = 0.0001) -> bool: + if (isinstance(slice1, GeometryOfSlice) == False) or\ + (isinstance(slice2, GeometryOfSlice) == False): print('Error') return False else: @@ -2151,7 +2153,7 @@ def is_equal_float(x1: float, x2: float) -> bool: if len(v11) != len(v22): return False for xx, yy in zip(v11, v22): - if isinstance(xx) == DSfloat or isinstance(xx, float): + if isinstance(xx, DSfloat) or isinstance(xx, float): if not is_equal_float(xx, yy): return False else: From 79f0194fe6baac910c40c0699e35d3aebca825cd Mon Sep 17 00:00:00 2001 From: afshin Date: Sun, 4 Oct 2020 18:27:52 +0000 Subject: [PATCH 17/44] mend --- src/highdicom/legacy/sop.py | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/src/highdicom/legacy/sop.py b/src/highdicom/legacy/sop.py index 6c726bf2..0243b1d5 100644 --- a/src/highdicom/legacy/sop.py +++ b/src/highdicom/legacy/sop.py @@ -647,12 +647,14 @@ def _get_or_create_attribute( from pydicom.valuerep import DT, TM, DA if a.VR == 'DA' and type(a.value) == str: try: - a.value = DA(a.value) + dtmp = DA(a.value) + a.value = DA(default) if dtmp is None else dtmp except: a.value = DA(default) if a.VR == 'DT' and type(a.value) == str: try: - a.value = DT(a.value) + ttmp = DT(a.value) + a.value = DT(default) if ttmp is None else ttmp except: a.value = DT(default) if a.VR == 'TM' and type(a.value) == str: @@ -2105,9 +2107,13 @@ def AddModule(self) -> None: for i in range(0, len(self.SingleFrameSet)): src = self.SingleFrameSet[i] kw = 'ContentDate' - d = DA(self.FarthestFutureDate if kw not in src else src[kw].value) + d_a = self._get_or_create_attribute( + src, kw, self.FarthestFutureDate) + d = d_a.value kw = 'ContentTime' - t = TM(self.FarthestFutureTime if kw not in src else src[kw].value) + t_a = self._get_or_create_attribute( + src, kw, self.FarthestFutureTime) + t = t_a.value value = DT(d.strftime('%Y%m%d') + t.strftime('%H%M%S.%f')) if self.EarliestContentDateTime > value: self.EarliestContentDateTime = value @@ -2708,6 +2714,15 @@ def PerFrameTags(self) -> List[Tag]: @property def SharedTags(self) -> List[Tag]: return self._SharedTags[:] + + @property + def SeriesInstanceUID(self) -> UID: + return self._Frames[0].SeriesInstanceUID + + @property + def StudyInstanceUID(self) -> UID: + return self._Frames[0].StudyInstanceUID + def GetSOPInstanceUIDList(self) -> list: OutputList: list = [] From 942d6fca404ea0b6ba8c5ab4e42ba541dbdad6f2 Mon Sep 17 00:00:00 2001 From: afshin Date: Sun, 4 Oct 2020 19:06:51 +0000 Subject: [PATCH 18/44] mend --- src/highdicom/legacy/sop.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/highdicom/legacy/sop.py b/src/highdicom/legacy/sop.py index 0243b1d5..dbf0c421 100644 --- a/src/highdicom/legacy/sop.py +++ b/src/highdicom/legacy/sop.py @@ -647,19 +647,20 @@ def _get_or_create_attribute( from pydicom.valuerep import DT, TM, DA if a.VR == 'DA' and type(a.value) == str: try: - dtmp = DA(a.value) - a.value = DA(default) if dtmp is None else dtmp + d_tmp = DA(a.value) + a.value = DA(default) if d_tmp is None else d_tmp except: a.value = DA(default) if a.VR == 'DT' and type(a.value) == str: try: - ttmp = DT(a.value) - a.value = DT(default) if ttmp is None else ttmp + dt_tmp = DT(a.value) + a.value = DT(default) if dt_tmp is None else dt_tmp except: a.value = DT(default) if a.VR == 'TM' and type(a.value) == str: try: - a.value = TM(a.value) + t_tmp = TM(a.value) + a.value = TM(default) if t_tmp is None else t_tmp except: a.value = TM(default) From 4c8da89f4af3d777837042dc98fb6210c37a1929 Mon Sep 17 00:00:00 2001 From: Afshin Date: Mon, 12 Oct 2020 19:29:55 -0400 Subject: [PATCH 19/44] added logger for conversion --- src/highdicom/legacy/sop.py | 215 ++++++++++++++++++++++++++++-------- 1 file changed, 167 insertions(+), 48 deletions(-) diff --git a/src/highdicom/legacy/sop.py b/src/highdicom/legacy/sop.py index dbf0c421..53c17760 100644 --- a/src/highdicom/legacy/sop.py +++ b/src/highdicom/legacy/sop.py @@ -4,9 +4,9 @@ from collections import defaultdict from typing import Any, Dict, List, Optional, Sequence, Union, Callable from numpy import log10, array, ceil, cross, dot, ndarray -from pydicom.datadict import tag_for_keyword, dictionary_VR +from pydicom.datadict import tag_for_keyword, dictionary_VR, keyword_for_tag from pydicom.dataset import Dataset -from pydicom.tag import Tag +from pydicom.tag import Tag, BaseTag from pydicom.dataelem import DataElement from pydicom.sequence import Sequence as DicomSequence from pydicom.multival import MultiValue @@ -18,7 +18,7 @@ from highdicom.legacy import SOP_CLASS_UIDS from highdicom._iods import IOD_MODULE_MAP from highdicom._modules import MODULE_ATTRIBUTE_MAP -logger = logging.getLogger(__name__) +# logger = logging.getLogger(__name__) LEGACY_ENHANCED_SOP_CLASS_UID_MAP = { # CT Image Storage '1.2.840.10008.5.1.4.1.1.2': '1.2.840.10008.5.1.4.1.1.2.2', @@ -573,7 +573,7 @@ def _is_empty_or_empty_items(self, attribute: DataElement) -> bool: return False return False - def _mark_tag_as_used(self, tg: Tag) -> None: + def _mark_tag_as_used(self, tg: BaseTag) -> None: if tg in self._SharedTags: self._SharedTags[tg] = True elif tg in self.ExcludedFromPerFrameTags: @@ -972,6 +972,27 @@ def AddModule(self) -> None: ref_dataset, self.TargetDataset, kw, check_not_to_be_perframe=True, check_not_to_be_empty=False) + sum_compression_ratio = 0 + c_ratio_tag = tag_for_keyword('LossyImageCompressionRatio') + if tag_for_keyword('LossyImageCompression') in self._SharedTags and \ + tag_for_keyword( + 'LossyImageCompressionMethod') in self._SharedTags and \ + c_ratio_tag in self._PerFrameTags: + for fr_ds in self.SingleFrameSet: + if c_ratio_tag in fr_ds: + ratio = fr_ds[c_ratio_tag].value + try: + sum_compression_ratio += float(ratio) + except: + sum_compression_ratio += 1 # supposing uncompressed + else: + supe_compression_ratio += 1 + avg_compression_ratio = sum_compression_ratio /\ + len(self.SingleFrameSet) + avg_ratio_str = '{:.6f}'.format(avg_compression_ratio) + self.TargetDataset[c_ratio_tag] = \ + DataElement(c_ratio_tag, 'DS', avg_ratio_str) + if tag_for_keyword('PresentationLUTShape') not in self._PerFrameTags: # actually should really invert the pixel data if MONOCHROME1, # since only MONOCHROME2 is permitted : ( @@ -1477,28 +1498,39 @@ def _add_module_to_functional_group( 'RescaleType', check_not_to_be_perframe=False, check_not_to_be_empty=True) - if "RescaleType" not in item: - value = '' - modality = '' if 'Modality' not in src_fg\ - else src_fg["Modality"].value - if haveValuesSoAddType: + + value = '' + modality = '' if 'Modality' not in src_fg\ + else src_fg["Modality"].value + if haveValuesSoAddType: + value = 'US' + if modality == 'CT': + containes_localizer = False + ImageType_v = [] if 'ImageType' not in src_fg\ + else src_fg['ImageType'].value + for i in ImageType_v: + if i == 'LOCALIZER': + containes_localizer = True + break + if not containes_localizer: + value = "HU" + # elif modality == 'PT': + # value = 'US' if 'Units' not in src_fg\ + # else src_fg['Units'].value + else: value = 'US' - if modality == 'CT': - containes_localizer = False - ImageType_v = [] if 'ImageType' not in src_fg\ - else src_fg['ImageType'].value - for i in ImageType_v: - if i == 'LOCALIZER': - containes_localizer = True - break - if not containes_localizer: - value = "HU" - elif modality == 'PT': - value = 'US' if 'Units' not in src_fg\ - else src_fg['Units'].value - if value != '': - tg = tag_for_keyword('RescaleType') - item[tg] = DataElement(tg, dictionary_VR(tg), value) + tg = tag_for_keyword('RescaleType') + if "RescaleType" not in item: + item[tg] = DataElement(tg, dictionary_VR(tg), value) + elif item[tg].value != value: + # keep the copied value as LUT explanation + voi_exp_tg = tag_for_keyword('LUTExplanation') + item[voi_exp_tg] = DataElement( + voi_exp_tg, dictionary_VR(voi_exp_tg), item[tg].value) + item[tg].value = value + + + kw = 'PixelValueTransformationSequence' tg = tag_for_keyword(kw) seq = DataElement(tg, dictionary_VR(tg), [item]) @@ -1741,7 +1773,7 @@ def __init__(self, sf_datasets: Sequence[Dataset], shared_tags, multi_frame_output) - def CreateEmptyElement(self, tg: Tag) -> DataElement: + def CreateEmptyElement(self, tg: BaseTag) -> DataElement: return DataElement(tg, dictionary_VR(tg), None) def AddModule(self) -> None: @@ -1827,6 +1859,7 @@ def __init__(self, sf_datasets: Sequence[Dataset], self._slice_location_map: dict = {} def _build_slices_geometry(self) -> None: + logger = logging.getLogger(__name__) frame_count = len(self.SingleFrameSet) for i in range(0, frame_count): curr_frame = self.SingleFrameSet[i] @@ -1864,7 +1897,14 @@ def _build_slices_geometry(self) -> None: self._slices.append(GeometryOfSlice(row, col, tpl, voxel_spaceing, dim)) else: - print("Error in geometri ...") + logger.error( + "Error in geometry. One or more required " + "attributes are not available") + logger.error("\tImageOrientationPatient = {}".format( + ImageOrientationPatient_v)) + logger.error("\tImagePositionPatient = {}".format( + ImagePositionPatient_v)) + logger.error("\tPixelSpacing = {}".format(PixelSpacing_v)) self._slices = [] # clear the slices break @@ -1885,12 +1925,18 @@ def _are_all_slices_parallel(self) -> bool: return False def _add_stack_info(self) -> None: + logger = logging.getLogger(__name__) self._build_slices_geometry() round_digits = int(ceil(-log10(self._tolerance))) if self._are_all_slices_parallel(): self._slice_location_map = {} for idx, s in enumerate(self._slices): - dist = round(s.GetDistanceAlongOrigin(), round_digits) + not_round_dist = s.GetDistanceAlongOrigin() + dist = round(not_round_dist, round_digits) + logger.debug( + 'Slice locaation {} rounded by {} digits to {}'.format( + not_round_dist, round_digits, dist + )) if dist in self._slice_location_map: self._slice_location_map[dist].append(idx) else: @@ -1899,7 +1945,10 @@ def _add_stack_info(self) -> None: frame_content_tg = tag_for_keyword("FrameContentSequence") for loc, idxs in sorted(self._slice_location_map.items()): if len(idxs) != 1: - print('Error') + logger.warning( + 'There are {} slices in one location {}'.format( + len(idx), loc) + ) for frame_index in idxs: frame = self._get_perframe_item(frame_index) new_item = frame[frame_content_tg].value[0] @@ -2105,15 +2154,32 @@ def __init__(self, sf_datasets: Sequence[Dataset], self.EarliestContentDateTime = self.FarthestFutureDateTime def AddModule(self) -> None: + default_atrs = ["Acquisition", "Series", "Study"] for i in range(0, len(self.SingleFrameSet)): src = self.SingleFrameSet[i] + default_date = self.FarthestFutureDate + for def_atr in default_atrs: + at_tg = tag_for_keyword(def_atr + "Date") + if at_tg in src: + val = src[at_tg].value + if isinstance(val, DA): + default_date = val + break kw = 'ContentDate' d_a = self._get_or_create_attribute( - src, kw, self.FarthestFutureDate) + src, kw, default_date) d = d_a.value + default_time = self.FarthestFutureTime + for def_atr in default_atrs: + at_tg = tag_for_keyword(def_atr + "Time") + if at_tg in src: + val = src[at_tg].value + if isinstance(val, TM): + default_time = val + break kw = 'ContentTime' t_a = self._get_or_create_attribute( - src, kw, self.FarthestFutureTime) + src, kw, default_time) t = t_a.value value = DT(d.strftime('%Y%m%d') + t.strftime('%H%M%S.%f')) if self.EarliestContentDateTime > value: @@ -2262,23 +2328,35 @@ def __init__( if sort_key is None: sort_key = LegacyConvertedEnhanceImage.default_sort_key super().__init__( - study_instance_uid=ref_ds.StudyInstanceUID, + study_instance_uid= None if 'StudyInstanceUID' not in ref_ds \ + else ref_ds.StudyInstanceUID, series_instance_uid=series_instance_uid, series_number=series_number, sop_instance_uid=sop_instance_uid, sop_class_uid=sop_class_uid, instance_number=instance_number, - manufacturer=ref_ds.Manufacturer, - modality=ref_ds.Modality, - patient_id=ref_ds.PatientID, - patient_name=ref_ds.PatientName, - patient_birth_date=ref_ds.PatientBirthDate, - patient_sex=ref_ds.PatientSex, - accession_number=ref_ds.AccessionNumber, - study_id=ref_ds.StudyID, - study_date=ref_ds.StudyDate, - study_time=ref_ds.StudyTime, - referring_physician_name=ref_ds.ReferringPhysicianName, + manufacturer= None if 'Manufacturer' not in ref_ds \ + else ref_ds.Manufacturer, + modality= None if 'Modality' not in ref_ds \ + else ref_ds.Modality, + patient_id= None if 'PatientID' not in ref_ds \ + else ref_ds.PatientID, + patient_name= None if 'PatientName' not in ref_ds \ + else ref_ds.PatientName, + patient_birth_date= None if 'PatientBirthDate' not in ref_ds \ + else ref_ds.PatientBirthDate, + patient_sex= None if 'PatientSex' not in ref_ds \ + else ref_ds.PatientSex, + accession_number= None if 'AccessionNumber' not in ref_ds \ + else ref_ds.AccessionNumber, + study_id= None if 'StudyID' not in ref_ds \ + else ref_ds.StudyID, + study_date= None if 'StudyDate' not in ref_ds \ + else ref_ds.StudyDate, + study_time= None if 'StudyTime' not in ref_ds \ + else ref_ds.StudyTime, + referring_physician_name= None if 'ReferringPhysicianName' not in ref_ds \ + else ref_ds.ReferringPhysicianName, **kwargs) self._legacy_datasets = legacy_datasets self.DistinguishingAttributesTags = self._get_tag_used_dictionary( @@ -2580,8 +2658,11 @@ def AddBuildBlocksForPET(self) -> None: self.AddPETSpecificBuildBlocks() def BuildMultiFrame(self) -> None: + logger = logging.getLogger(__name__) + logger.debug('Strt singleframe to multiframe conversion') for builder in self.__build_blocks: builder.AddModule() + logger.debug('Conversion succeeded') class GeometryOfSlice: @@ -2610,9 +2691,14 @@ def GetDistanceAlongOrigin(self) -> float: def AreParallel(slice1: GeometryOfSlice, slice2: GeometryOfSlice, tolerance: float = 0.0001) -> bool: + logger = logging.getLogger(__name__) if (type(slice1) != GeometryOfSlice or type(slice2) != GeometryOfSlice): - print('Error') + logger.warning( + 'slice1 and slice2 are not of the same ' + 'type: type(slice1) = {} and type(slice2) = {}'.format( + type(slice1), type(slice2) + )) return False else: n1: ndarray = slice1.GetNormalVector() @@ -2627,15 +2713,15 @@ class DicomHelper: def __init__(self) -> None: pass - def istag_file_meta_information_group(t: Tag) -> bool: + def istag_file_meta_information_group(t: BaseTag) -> bool: return t.group == 0x0002 - def istag_repeating_group(t: Tag) -> bool: + def istag_repeating_group(t: BaseTag) -> bool: g = t.group return (g >= 0x5000 and g <= 0x501e) or\ (g >= 0x6000 and g <= 0x601e) - def istag_group_length(t: Tag) -> bool: + def istag_group_length(t: BaseTag) -> bool: return t.element == 0 def isequal(v1: Any, v2: Any) -> bool: @@ -2679,6 +2765,15 @@ def isequal_dicom_dataset(ds1: Dataset, ds2: Dataset) -> bool: return False return True + def tag2str(tg: BaseTag) -> str: + if not isinstance(tg, BaseTag): + tg = Tag(tg) + return '(0x{:0>4x}, 0x{:0>4x})'.format(tg.group, tg.element) + + def tag2kwstr(tg: BaseTag) -> str: + return '{}-{:32.32s}'.format( + DicomHelper.tag2str(tg), keyword_for_tag(tg)) + class FrameSet: def __init__(self, single_frame_list: list, @@ -2735,6 +2830,7 @@ def GetSOPClassUID(self) -> UID: return self._Frames[0].SOPClassUID def _find_per_frame_and_shared_tags(self) -> None: + logger = logging.getLogger(__name__) rough_shared: dict = {} sfs = self.Frames for ds in sfs: @@ -2775,12 +2871,13 @@ def _find_per_frame_and_shared_tags(self) -> None: self._SharedTags.append(t) self._PerFrameTags.remove(t) - def _istag_excluded_from_perframe(self, t: Tag) -> bool: + def _istag_excluded_from_perframe(self, t: BaseTag) -> bool: return t in self.ExcludedFromPerFrameTags class FrameSetCollection: def __init__(self, single_frame_list: list): + logger = logging.getLogger(__name__) self.MixedFrames = single_frame_list self.MixedFramesCopy = self.MixedFrames[:] self._DistinguishingAttributeKeywords = [ @@ -2816,9 +2913,25 @@ def __init__(self, single_frame_list: list): 'AcquisitionContextSequence'] to_be_removed_from_distinguishing_attribs: set = set() self._FrameSets: list = [] + frameset_counter = 0 while len(self.MixedFramesCopy) != 0: + frameset_counter += 1 x = self._find_all_similar_to_first_datasets() self._FrameSets.append(FrameSet(x[0], x[1])) + # log information + logger.debug("Frameset({:02d}) including {:03d} frames".format( + frameset_counter, len(x[0]))) + logger.debug('\t Distinguishing tags:') + for dg_i, dg_tg in enumerate(x[1], 1): + logger.debug('\t\t{:02d}/{})\t{}-{:32.32s} = {:32.32s}'.format( + dg_i, len(x[1]), DicomHelper.tag2str(dg_tg), + keyword_for_tag(dg_tg), + str(x[0][0][dg_tg].value))) + logger.debug('\t dicom datasets in this frame set:') + for dicom_i, dicom_ds in enumerate(x[0], 1): + logger.debug('\t\t{}/{})\t {}'.format( + dicom_i, len(x[0]), dicom_ds['SOPInstanceUID'])) + for kw in to_be_removed_from_distinguishing_attribs: self.DistinguishingAttributeKeywords.remove(kw) self.ExcludedFromPerFrameTags = {} @@ -2834,6 +2947,7 @@ def __init__(self, single_frame_list: list): tag_for_keyword('SpecificCharacterSet'): False} def _find_all_similar_to_first_datasets(self) -> tuple: + logger = logging.getLogger(__name__) similar_ds: list = [self.MixedFramesCopy[0]] distinguishing_tags_existing = [] distinguishing_tags_missing = [] @@ -2848,6 +2962,8 @@ def _find_all_similar_to_first_datasets(self) -> tuple: all_equal = True for tg in distinguishing_tags_missing: if tg in ds: + logger.info('{} is missing in all but {}'.format( + DicomHelper.tag2kwstr(tg), ds['SOPInstanceUID'])) all_equal = False break if not all_equal: @@ -2859,6 +2975,9 @@ def _find_all_similar_to_first_datasets(self) -> tuple: break new_val = ds[tg].value if not DicomHelper.isequal(ref_val, new_val): + logger.info( + 'Inequality on distinguishing attribute{} -> {} != {}'.format( + DicomHelper.tag2kwstr(tg), ref_val, new_val)) all_equal = False break if all_equal: From 477f6147bfa5e110fb1f438ff6da81cde9ff6a2d Mon Sep 17 00:00:00 2001 From: afshin Date: Sat, 24 Oct 2020 00:02:17 +0000 Subject: [PATCH 20/44] mend --- src/highdicom/legacy/sop.py | 77 ++++++++++++++++++++++--------------- 1 file changed, 46 insertions(+), 31 deletions(-) diff --git a/src/highdicom/legacy/sop.py b/src/highdicom/legacy/sop.py index 53c17760..bc3e66be 100644 --- a/src/highdicom/legacy/sop.py +++ b/src/highdicom/legacy/sop.py @@ -56,6 +56,7 @@ def _convert_legacy_to_enhanced( Frames will be included into the Pixel Data element in the order in which instances are provided via `sf_datasets`. """ + logger = logging.getLogger(__name__) try: ref_ds = sf_datasets[0] except IndexError: @@ -1947,7 +1948,7 @@ def _add_stack_info(self) -> None: if len(idxs) != 1: logger.warning( 'There are {} slices in one location {}'.format( - len(idx), loc) + len(idxs), loc) ) for frame_index in idxs: frame = self._get_perframe_item(frame_index) @@ -2328,35 +2329,35 @@ def __init__( if sort_key is None: sort_key = LegacyConvertedEnhanceImage.default_sort_key super().__init__( - study_instance_uid= None if 'StudyInstanceUID' not in ref_ds \ - else ref_ds.StudyInstanceUID, + study_instance_uid=None if 'StudyInstanceUID' not in ref_ds + else ref_ds.StudyInstanceUID, series_instance_uid=series_instance_uid, series_number=series_number, sop_instance_uid=sop_instance_uid, sop_class_uid=sop_class_uid, instance_number=instance_number, - manufacturer= None if 'Manufacturer' not in ref_ds \ - else ref_ds.Manufacturer, - modality= None if 'Modality' not in ref_ds \ - else ref_ds.Modality, - patient_id= None if 'PatientID' not in ref_ds \ - else ref_ds.PatientID, - patient_name= None if 'PatientName' not in ref_ds \ - else ref_ds.PatientName, - patient_birth_date= None if 'PatientBirthDate' not in ref_ds \ - else ref_ds.PatientBirthDate, - patient_sex= None if 'PatientSex' not in ref_ds \ - else ref_ds.PatientSex, - accession_number= None if 'AccessionNumber' not in ref_ds \ - else ref_ds.AccessionNumber, - study_id= None if 'StudyID' not in ref_ds \ - else ref_ds.StudyID, - study_date= None if 'StudyDate' not in ref_ds \ - else ref_ds.StudyDate, - study_time= None if 'StudyTime' not in ref_ds \ - else ref_ds.StudyTime, - referring_physician_name= None if 'ReferringPhysicianName' not in ref_ds \ - else ref_ds.ReferringPhysicianName, + manufacturer=None if 'Manufacturer' not in ref_ds + else ref_ds.Manufacturer, + modality=None if 'Modality' not in ref_ds + else ref_ds.Modality, + patient_id=None if 'PatientID' not in ref_ds + else ref_ds.PatientID, + patient_name=None if 'PatientName' not in ref_ds + else ref_ds.PatientName, + patient_birth_date=None if 'PatientBirthDate' not in ref_ds + else ref_ds.PatientBirthDate, + patient_sex=None if 'PatientSex' not in ref_ds + else ref_ds.PatientSex, + accession_number=None if 'AccessionNumber' not in ref_ds + else ref_ds.AccessionNumber, + study_id=None if 'StudyID' not in ref_ds + else ref_ds.StudyID, + study_date=None if 'StudyDate' not in ref_ds + else ref_ds.StudyDate, + study_time=None if 'StudyTime' not in ref_ds + else ref_ds.StudyTime, + referring_physician_name=None if 'ReferringPhysicianName' not in + ref_ds else ref_ds.ReferringPhysicianName, **kwargs) self._legacy_datasets = legacy_datasets self.DistinguishingAttributesTags = self._get_tag_used_dictionary( @@ -2913,11 +2914,13 @@ def __init__(self, single_frame_list: list): 'AcquisitionContextSequence'] to_be_removed_from_distinguishing_attribs: set = set() self._FrameSets: list = [] + frame_counts = [] frameset_counter = 0 while len(self.MixedFramesCopy) != 0: frameset_counter += 1 x = self._find_all_similar_to_first_datasets() self._FrameSets.append(FrameSet(x[0], x[1])) + frame_counts.append(len(x[0])) # log information logger.debug("Frameset({:02d}) including {:03d} frames".format( frameset_counter, len(x[0]))) @@ -2931,7 +2934,12 @@ def __init__(self, single_frame_list: list): for dicom_i, dicom_ds in enumerate(x[0], 1): logger.debug('\t\t{}/{})\t {}'.format( dicom_i, len(x[0]), dicom_ds['SOPInstanceUID'])) - + frames = '' + for i, f_count in enumerate(frame_counts, 1): + frames += '{: 2d}){:03d}\t'.format(i, f_count) + frames = '{: 2d} frameset(s) out of all {: 3d} instances:'.format( + len(frame_counts), len(self.MixedFrames)) + frames + logger.info(frames) for kw in to_be_removed_from_distinguishing_attribs: self.DistinguishingAttributeKeywords.remove(kw) self.ExcludedFromPerFrameTags = {} @@ -2958,12 +2966,15 @@ def _find_all_similar_to_first_datasets(self) -> tuple: distinguishing_tags_existing.append(tg) else: distinguishing_tags_missing.append(tg) + logger_msg = set() for ds in self.MixedFramesCopy: all_equal = True for tg in distinguishing_tags_missing: if tg in ds: - logger.info('{} is missing in all but {}'.format( - DicomHelper.tag2kwstr(tg), ds['SOPInstanceUID'])) + logger.info() + logger_msg.add( + '{} is missing in all but {}'.format( + DicomHelper.tag2kwstr(tg), ds['SOPInstanceUID'])) all_equal = False break if not all_equal: @@ -2975,13 +2986,17 @@ def _find_all_similar_to_first_datasets(self) -> tuple: break new_val = ds[tg].value if not DicomHelper.isequal(ref_val, new_val): - logger.info( - 'Inequality on distinguishing attribute{} -> {} != {}'.format( - DicomHelper.tag2kwstr(tg), ref_val, new_val)) + logger_msg.add( + 'Inequality on distinguishing ' + 'attribute{} -> {} != {} \n series uid = {}'.format( + DicomHelper.tag2kwstr(tg), ref_val, new_val, + ds.SeriesInstanceUID)) all_equal = False break if all_equal: similar_ds.append(ds) + for msg_ in logger_msg: + logger.info(msg_) for ds in similar_ds: if ds in self.MixedFramesCopy: self.MixedFramesCopy.remove(ds) From 4a7c9069874d85f5b2571bb7a43113d7860d214a Mon Sep 17 00:00:00 2001 From: Afshin Date: Wed, 28 Oct 2020 18:59:00 -0400 Subject: [PATCH 21/44] Modified the way frameanatomysequence was added --- src/highdicom/legacy/sop.py | 43 +++++++++++++++++++------------------ 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/src/highdicom/legacy/sop.py b/src/highdicom/legacy/sop.py index bc3e66be..2c7f8c63 100644 --- a/src/highdicom/legacy/sop.py +++ b/src/highdicom/legacy/sop.py @@ -1188,27 +1188,28 @@ def _add_module_to_functional_group( self._copy_attrib_if_present(src_fg, item, 'AnatomicRegionSequence', check_not_to_be_perframe=False, check_not_to_be_empty=False) - self._copy_attrib_if_present(src_fg, item, 'FrameLaterality', - check_not_to_be_perframe=False, - check_not_to_be_empty=True) - if 'FrameLaterality' not in item: - self._copy_attrib_if_present(src_fg, item, 'ImageLaterality', - 'FrameLaterality', - check_not_to_be_perframe=False, - check_not_to_be_empty=True) - if 'FrameLaterality' not in item: - self._copy_attrib_if_present(src_fg, item, 'Laterality', - 'FrameLaterality', - check_not_to_be_perframe=False, - check_not_to_be_empty=True) - if 'FrameLaterality' not in item: - FrameLaterality_a = self._get_or_create_attribute( - src_fg, 'FrameLaterality', "U") - item['FrameLaterality'] = FrameLaterality_a - FrameAnatomy_a = DataElement(fa_seq_tg, - dictionary_VR(fa_seq_tg), - [item]) - dest_fg['FrameAnatomySequence'] = FrameAnatomy_a + if len(item) != 0: + self._copy_attrib_if_present(src_fg, item, 'FrameLaterality', + check_not_to_be_perframe=False, + check_not_to_be_empty=True) + if 'FrameLaterality' not in item: + self._copy_attrib_if_present(src_fg, item, 'ImageLaterality', + 'FrameLaterality', + check_not_to_be_perframe=False, + check_not_to_be_empty=True) + if 'FrameLaterality' not in item: + self._copy_attrib_if_present(src_fg, item, 'Laterality', + 'FrameLaterality', + check_not_to_be_perframe=False, + check_not_to_be_empty=True) + if 'FrameLaterality' not in item: + FrameLaterality_a = self._get_or_create_attribute( + src_fg, 'FrameLaterality', "U") + item['FrameLaterality'] = FrameLaterality_a + FrameAnatomy_a = DataElement(fa_seq_tg, + dictionary_VR(fa_seq_tg), + [item]) + dest_fg['FrameAnatomySequence'] = FrameAnatomy_a def _contains_right_attributes(self, tags: dict) -> bool: laterality_tg = tag_for_keyword('Laterality') From 4f17d1184cdc8ac19dc5c7f608fdb42dc4ca16d3 Mon Sep 17 00:00:00 2001 From: Afshin Date: Sat, 7 Nov 2020 20:46:04 -0500 Subject: [PATCH 22/44] mend --- src/highdicom/legacy/sop.py | 33 ++++++++++++++++++--------------- 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/src/highdicom/legacy/sop.py b/src/highdicom/legacy/sop.py index 2c7f8c63..c6a7e7a0 100644 --- a/src/highdicom/legacy/sop.py +++ b/src/highdicom/legacy/sop.py @@ -910,14 +910,16 @@ def AddModule(self) -> None: inner_item = Dataset() self._add_module_to_functional_group(self.SingleFrameSet[0], inner_item, 1) - item[seq_tg] = DataElement(seq_tg, 'SQ', [inner_item]) + item[seq_tg] = DataElement( + seq_tg, 'SQ', DicomSequence([inner_item])) else: for i in range(0, len(self.SingleFrameSet)): item = self._get_perframe_item(i) inner_item = Dataset() self._add_module_to_functional_group(self.SingleFrameSet[i], inner_item, 1) - item[seq_tg] = DataElement(seq_tg, 'SQ', [inner_item]) + item[seq_tg] = DataElement( + seq_tg, 'SQ', DicomSequence([inner_item])) class EnhancedCommonImageModule(Abstract_MultiframeModuleAdder): @@ -1208,7 +1210,7 @@ def _add_module_to_functional_group( item['FrameLaterality'] = FrameLaterality_a FrameAnatomy_a = DataElement(fa_seq_tg, dictionary_VR(fa_seq_tg), - [item]) + DicomSequence([item])) dest_fg['FrameAnatomySequence'] = FrameAnatomy_a def _contains_right_attributes(self, tags: dict) -> bool: @@ -1281,7 +1283,7 @@ def _add_module_to_functional_group( pixel_measures_tg = tag_for_keyword(pixel_measures_kw) seq = DataElement(pixel_measures_tg, dictionary_VR(pixel_measures_tg), - [item]) + DicomSequence([item])) dest_fg[pixel_measures_tg] = seq def AddModule(self) -> None: @@ -1330,7 +1332,7 @@ def _add_module_to_functional_group( PlanePositionSequence_tg = tag_for_keyword(PlanePositionSequence_kw) seq = DataElement(PlanePositionSequence_tg, dictionary_VR(PlanePositionSequence_tg), - [item]) + DicomSequence([item])) dest_fg[PlanePositionSequence_tg] = seq def AddModule(self) -> None: @@ -1377,7 +1379,7 @@ def _add_module_to_functional_group( check_not_to_be_empty=False) kw = 'PlaneOrientationSequence' tg = tag_for_keyword(kw) - seq = DataElement(tg, dictionary_VR(tg), [item]) + seq = DataElement(tg, dictionary_VR(tg), DicomSequence([item])) dest_fg[tg] = seq def AddModule(self) -> None: @@ -1439,7 +1441,7 @@ def _add_module_to_functional_group( check_not_to_be_empty=False) kw = 'FrameVOILUTSequence' tg = tag_for_keyword(kw) - seq = DataElement(tg, dictionary_VR(tg), [item]) + seq = DataElement(tg, dictionary_VR(tg), DicomSequence([item])) dest_fg[tg] = seq def AddModule(self) -> None: @@ -1535,7 +1537,7 @@ def _add_module_to_functional_group( kw = 'PixelValueTransformationSequence' tg = tag_for_keyword(kw) - seq = DataElement(tg, dictionary_VR(tg), [item]) + seq = DataElement(tg, dictionary_VR(tg), DicomSequence([item])) dest_fg[tg] = seq def AddModule(self) -> None: @@ -1632,7 +1634,7 @@ def _add_module_to_functional_group( check_not_to_be_empty=False) kw = 'DerivationImageSequence' tg = tag_for_keyword(kw) - seq = DataElement(tg, dictionary_VR(tg), [item]) + seq = DataElement(tg, dictionary_VR(tg), DicomSequence([item])) dest_fg[tg] = seq def AddModule(self) -> None: @@ -1676,7 +1678,7 @@ def _add_module_to_functional_group( check_not_to_be_empty=False) kw = 'UnassignedPerFrameConvertedAttributesSequence' tg = tag_for_keyword(kw) - seq = DataElement(tg, dictionary_VR(tg), [item]) + seq = DataElement(tg, dictionary_VR(tg),DicomSequence([item])) dest_fg[tg] = seq def _add_largest_smallest_pixle_value(self) -> None: @@ -1751,7 +1753,7 @@ def _add_module_to_functional_group( check_not_to_be_empty=False) kw = 'UnassignedSharedConvertedAttributesSequence' tg = tag_for_keyword(kw) - seq = DataElement(tg, dictionary_VR(tg), [item]) + seq = DataElement(tg, dictionary_VR(tg), DicomSequence([item])) dest_fg[tg] = seq def AddModule(self) -> None: @@ -1830,7 +1832,7 @@ def _add_module_to_functional_group( check_not_to_be_empty=True) kw = 'ConversionSourceAttributesSequence' tg = tag_for_keyword(kw) - seq = DataElement(tg, dictionary_VR(tg), [item]) + seq = DataElement(tg, dictionary_VR(tg), DicomSequence([item])) dest_fg[tg] = seq def AddModule(self) -> None: @@ -2041,7 +2043,8 @@ def _add_module_to_functional_group( check_not_to_be_empty=True) # ----------------------------------- seq_tg = tag_for_keyword('FrameContentSequence') - dest_fg[seq_tg] = DataElement(seq_tg, dictionary_VR(seq_tg), [item]) + dest_fg[seq_tg] = DataElement( + seq_tg, dictionary_VR(seq_tg), DicomSequence([item])) # Also we want to add the earliest frame acq date time to the multiframe: def _add_acquisition_info(self) -> None: @@ -2265,7 +2268,7 @@ def AddModule(self) -> None: 'DCM') PurposeOfReferenceCode_seq = DataElement( tag_for_keyword('PurposeOfReferenceCodeSequence'), - 'SQ', [PurposeOfReferenceCode_item]) + 'SQ', DicomSequence([PurposeOfReferenceCode_item])) item: Dataset = Dataset() item[ 'PurposeOfReferenceCodeSequence'] = PurposeOfReferenceCode_seq @@ -2288,7 +2291,7 @@ def AddModule(self) -> None: "ContributionDescription", 'Legacy Enhanced Image created from Classic Images') tg = tag_for_keyword('ContributingEquipmentSequence') - self.TargetDataset[tg] = DataElement(tg, 'SQ', [item]) + self.TargetDataset[tg] = DataElement(tg, 'SQ', DicomSequence([item])) class LegacyConvertedEnhanceImage(SOPClass): From 7e2aa9ca83766c0a7eee16b3d7ceb04be1a04b33 Mon Sep 17 00:00:00 2001 From: afshin Date: Sun, 8 Nov 2020 02:37:40 +0000 Subject: [PATCH 23/44] mend --- src/highdicom/legacy/sop.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/highdicom/legacy/sop.py b/src/highdicom/legacy/sop.py index c6a7e7a0..136c6aba 100644 --- a/src/highdicom/legacy/sop.py +++ b/src/highdicom/legacy/sop.py @@ -1932,6 +1932,7 @@ def _add_stack_info(self) -> None: logger = logging.getLogger(__name__) self._build_slices_geometry() round_digits = int(ceil(-log10(self._tolerance))) + source_series_uid = '' if self._are_all_slices_parallel(): self._slice_location_map = {} for idx, s in enumerate(self._slices): @@ -1949,9 +1950,13 @@ def _add_stack_info(self) -> None: frame_content_tg = tag_for_keyword("FrameContentSequence") for loc, idxs in sorted(self._slice_location_map.items()): if len(idxs) != 1: + if source_series_uid == '': + source_series_uid = \ + self.SingleFrameSet[0].SeriesInstanceUID logger.warning( - 'There are {} slices in one location {}'.format( - len(idxs), loc) + 'There are {} slices in one location {} on ' + 'series = {}'.format( + len(idxs), loc, source_series_uid) ) for frame_index in idxs: frame = self._get_perframe_item(frame_index) @@ -2975,7 +2980,6 @@ def _find_all_similar_to_first_datasets(self) -> tuple: all_equal = True for tg in distinguishing_tags_missing: if tg in ds: - logger.info() logger_msg.add( '{} is missing in all but {}'.format( DicomHelper.tag2kwstr(tg), ds['SOPInstanceUID'])) From a179f71983f0a2f848b8241a4c4223f29193b77e Mon Sep 17 00:00:00 2001 From: afshin Date: Mon, 9 Nov 2020 00:12:34 +0000 Subject: [PATCH 24/44] mend --- src/highdicom/legacy/sop.py | 86 +++++++++++++++++++------------------ 1 file changed, 45 insertions(+), 41 deletions(-) diff --git a/src/highdicom/legacy/sop.py b/src/highdicom/legacy/sop.py index 136c6aba..e345fc1e 100644 --- a/src/highdicom/legacy/sop.py +++ b/src/highdicom/legacy/sop.py @@ -1,5 +1,9 @@ """ Module for SOP Classes of Legacy Converted Enhanced Image IODs.""" -from __future__ import annotations +import sys +expect_major = 3 +expect_minor = 7 +if sys.version_info[:2] != (expect_major, expect_minor): + from __future__ import annotations import logging from collections import defaultdict from typing import Any, Dict, List, Optional, Sequence, Union, Callable @@ -650,19 +654,19 @@ def _get_or_create_attribute( try: d_tmp = DA(a.value) a.value = DA(default) if d_tmp is None else d_tmp - except: + except BaseException: a.value = DA(default) if a.VR == 'DT' and type(a.value) == str: try: dt_tmp = DT(a.value) a.value = DT(default) if dt_tmp is None else dt_tmp - except: + except BaseException: a.value = DT(default) if a.VR == 'TM' and type(a.value) == str: try: t_tmp = TM(a.value) a.value = TM(default) if t_tmp is None else t_tmp - except: + except BaseException: a.value = TM(default) self._mark_tag_as_used(tg) @@ -975,9 +979,9 @@ def AddModule(self) -> None: ref_dataset, self.TargetDataset, kw, check_not_to_be_perframe=True, check_not_to_be_empty=False) - sum_compression_ratio = 0 + sum_compression_ratio: float = 0 c_ratio_tag = tag_for_keyword('LossyImageCompressionRatio') - if tag_for_keyword('LossyImageCompression') in self._SharedTags and \ + if tag_for_keyword('LossyImageCompression') in self._SharedTags and \ tag_for_keyword( 'LossyImageCompressionMethod') in self._SharedTags and \ c_ratio_tag in self._PerFrameTags: @@ -986,10 +990,10 @@ def AddModule(self) -> None: ratio = fr_ds[c_ratio_tag].value try: sum_compression_ratio += float(ratio) - except: - sum_compression_ratio += 1 # supposing uncompressed + except BaseException: + sum_compression_ratio += 1 # supposing uncompressed else: - supe_compression_ratio += 1 + sum_compression_ratio += 1 avg_compression_ratio = sum_compression_ratio /\ len(self.SingleFrameSet) avg_ratio_str = '{:.6f}'.format(avg_compression_ratio) @@ -1191,26 +1195,30 @@ def _add_module_to_functional_group( check_not_to_be_perframe=False, check_not_to_be_empty=False) if len(item) != 0: - self._copy_attrib_if_present(src_fg, item, 'FrameLaterality', - check_not_to_be_perframe=False, - check_not_to_be_empty=True) + self._copy_attrib_if_present( + src_fg, item, 'FrameLaterality', + check_not_to_be_perframe=False, + check_not_to_be_empty=True) if 'FrameLaterality' not in item: - self._copy_attrib_if_present(src_fg, item, 'ImageLaterality', - 'FrameLaterality', - check_not_to_be_perframe=False, - check_not_to_be_empty=True) + self._copy_attrib_if_present( + src_fg, item, 'ImageLaterality', + 'FrameLaterality', + check_not_to_be_perframe=False, + check_not_to_be_empty=True) if 'FrameLaterality' not in item: - self._copy_attrib_if_present(src_fg, item, 'Laterality', - 'FrameLaterality', - check_not_to_be_perframe=False, - check_not_to_be_empty=True) + self._copy_attrib_if_present( + src_fg, item, 'Laterality', + 'FrameLaterality', + check_not_to_be_perframe=False, + check_not_to_be_empty=True) if 'FrameLaterality' not in item: FrameLaterality_a = self._get_or_create_attribute( src_fg, 'FrameLaterality', "U") item['FrameLaterality'] = FrameLaterality_a - FrameAnatomy_a = DataElement(fa_seq_tg, - dictionary_VR(fa_seq_tg), - DicomSequence([item])) + FrameAnatomy_a = DataElement( + fa_seq_tg, + dictionary_VR(fa_seq_tg), + DicomSequence([item])) dest_fg['FrameAnatomySequence'] = FrameAnatomy_a def _contains_right_attributes(self, tags: dict) -> bool: @@ -1502,7 +1510,6 @@ def _add_module_to_functional_group( 'RescaleType', check_not_to_be_perframe=False, check_not_to_be_empty=True) - value = '' modality = '' if 'Modality' not in src_fg\ else src_fg["Modality"].value @@ -1525,16 +1532,13 @@ def _add_module_to_functional_group( value = 'US' tg = tag_for_keyword('RescaleType') if "RescaleType" not in item: - item[tg] = DataElement(tg, dictionary_VR(tg), value) + item[tg] = DataElement(tg, dictionary_VR(tg), value) elif item[tg].value != value: # keep the copied value as LUT explanation voi_exp_tg = tag_for_keyword('LUTExplanation') item[voi_exp_tg] = DataElement( voi_exp_tg, dictionary_VR(voi_exp_tg), item[tg].value) item[tg].value = value - - - kw = 'PixelValueTransformationSequence' tg = tag_for_keyword(kw) seq = DataElement(tg, dictionary_VR(tg), DicomSequence([item])) @@ -1678,7 +1682,7 @@ def _add_module_to_functional_group( check_not_to_be_empty=False) kw = 'UnassignedPerFrameConvertedAttributesSequence' tg = tag_for_keyword(kw) - seq = DataElement(tg, dictionary_VR(tg),DicomSequence([item])) + seq = DataElement(tg, dictionary_VR(tg), DicomSequence([item])) dest_fg[tg] = seq def _add_largest_smallest_pixle_value(self) -> None: @@ -1956,8 +1960,7 @@ def _add_stack_info(self) -> None: logger.warning( 'There are {} slices in one location {} on ' 'series = {}'.format( - len(idxs), loc, source_series_uid) - ) + len(idxs), loc, source_series_uid)) for frame_index in idxs: frame = self._get_perframe_item(frame_index) new_item = frame[frame_content_tg].value[0] @@ -2121,6 +2124,8 @@ def AddModule(self) -> None: self._number_of_pixels = row * col * self._frame_count kw = "PixelData" for i in range(0, len(self.SingleFrameSet)): + if kw not in self.SingleFrameSet[i]: + continue PixelData_a = self.SingleFrameSet[i][kw] if self._is_other_byte_vr(PixelData_a.VR): if len(self._word_data) != 0: @@ -2338,16 +2343,16 @@ def __init__( if sort_key is None: sort_key = LegacyConvertedEnhanceImage.default_sort_key super().__init__( - study_instance_uid=None if 'StudyInstanceUID' not in ref_ds + study_instance_uid="" if 'StudyInstanceUID' not in ref_ds else ref_ds.StudyInstanceUID, series_instance_uid=series_instance_uid, series_number=series_number, sop_instance_uid=sop_instance_uid, sop_class_uid=sop_class_uid, instance_number=instance_number, - manufacturer=None if 'Manufacturer' not in ref_ds + manufacturer="" if 'Manufacturer' not in ref_ds else ref_ds.Manufacturer, - modality=None if 'Modality' not in ref_ds + modality="" if 'Modality' not in ref_ds else ref_ds.Modality, patient_id=None if 'PatientID' not in ref_ds else ref_ds.PatientID, @@ -2820,16 +2825,15 @@ def PerFrameTags(self) -> List[Tag]: @property def SharedTags(self) -> List[Tag]: return self._SharedTags[:] - + @property def SeriesInstanceUID(self) -> UID: return self._Frames[0].SeriesInstanceUID - + @property def StudyInstanceUID(self) -> UID: return self._Frames[0].StudyInstanceUID - def GetSOPInstanceUIDList(self) -> list: OutputList: list = [] for f in self._Frames: @@ -2840,7 +2844,7 @@ def GetSOPClassUID(self) -> UID: return self._Frames[0].SOPClassUID def _find_per_frame_and_shared_tags(self) -> None: - logger = logging.getLogger(__name__) + # logger = logging.getLogger(__name__) rough_shared: dict = {} sfs = self.Frames for ds in sfs: @@ -2948,12 +2952,12 @@ def __init__(self, single_frame_list: list): frames += '{: 2d}){:03d}\t'.format(i, f_count) frames = '{: 2d} frameset(s) out of all {: 3d} instances:'.format( len(frame_counts), len(self.MixedFrames)) + frames - logger.info(frames) + logger.info(frames) for kw in to_be_removed_from_distinguishing_attribs: self.DistinguishingAttributeKeywords.remove(kw) self.ExcludedFromPerFrameTags = {} - for i in self.DistinguishingAttributeKeywords: - self.ExcludedFromPerFrameTags[tag_for_keyword(i)] = False + for kwkw in self.DistinguishingAttributeKeywords: + self.ExcludedFromPerFrameTags[tag_for_keyword(kwkw)] = False self.ExcludedFromPerFrameTags[ tag_for_keyword('AcquisitionDateTime')] = False self.ExcludedFromPerFrameTags[ From 5b0bdba6881b46057c72816f53e745a837bb61fc Mon Sep 17 00:00:00 2001 From: afshin Date: Mon, 9 Nov 2020 00:35:45 +0000 Subject: [PATCH 25/44] mend --- src/highdicom/legacy/sop.py | 472 ++++++++++++++++++------------------ 1 file changed, 234 insertions(+), 238 deletions(-) diff --git a/src/highdicom/legacy/sop.py b/src/highdicom/legacy/sop.py index e345fc1e..6045f918 100644 --- a/src/highdicom/legacy/sop.py +++ b/src/highdicom/legacy/sop.py @@ -1,9 +1,5 @@ """ Module for SOP Classes of Legacy Converted Enhanced Image IODs.""" -import sys -expect_major = 3 -expect_minor = 7 -if sys.version_info[:2] != (expect_major, expect_minor): - from __future__ import annotations +from __future__ import annotations import logging from collections import defaultdict from typing import Any, Dict, List, Optional, Sequence, Union, Callable @@ -2304,6 +2300,239 @@ def AddModule(self) -> None: self.TargetDataset[tg] = DataElement(tg, 'SQ', DicomSequence([item])) +class FrameSet: + def __init__(self, single_frame_list: list, + distinguishing_tags: list): + self._Frames = single_frame_list + self._DistinguishingAttributesTags = distinguishing_tags + tmp = [ + tag_for_keyword('AcquisitionDateTime'), + tag_for_keyword('AcquisitionDate'), + tag_for_keyword('AcquisitionTime'), + tag_for_keyword('SpecificCharacterSet')] + self._ExcludedFromPerFrameTags =\ + self.DistinguishingAttributesTags + tmp + self._PerFrameTags: list = [] + self._SharedTags: list = [] + self._find_per_frame_and_shared_tags() + + @property + def Frames(self) -> List[Dataset]: + return self._Frames[:] + + @property + def DistinguishingAttributesTags(self) -> List[Tag]: + return self._DistinguishingAttributesTags[:] + + @property + def ExcludedFromPerFrameTags(self) -> List[Tag]: + return self._ExcludedFromPerFrameTags[:] + + @property + def PerFrameTags(self) -> List[Tag]: + return self._PerFrameTags[:] + + @property + def SharedTags(self) -> List[Tag]: + return self._SharedTags[:] + + @property + def SeriesInstanceUID(self) -> UID: + return self._Frames[0].SeriesInstanceUID + + @property + def StudyInstanceUID(self) -> UID: + return self._Frames[0].StudyInstanceUID + + def GetSOPInstanceUIDList(self) -> list: + OutputList: list = [] + for f in self._Frames: + OutputList.append(f.SOPInstanceUID) + return OutputList + + def GetSOPClassUID(self) -> UID: + return self._Frames[0].SOPClassUID + + def _find_per_frame_and_shared_tags(self) -> None: + # logger = logging.getLogger(__name__) + rough_shared: dict = {} + sfs = self.Frames + for ds in sfs: + for ttag, elem in ds.items(): + if (not ttag.is_private and not + DicomHelper.istag_file_meta_information_group(ttag) and not + DicomHelper.istag_repeating_group(ttag) and not + DicomHelper.istag_group_length(ttag) and not + self._istag_excluded_from_perframe(ttag) and + ttag != tag_for_keyword('PixelData')): + elem = ds[ttag] + if ttag not in self._PerFrameTags: + self._PerFrameTags.append(ttag) + if ttag in rough_shared: + rough_shared[ttag].append(elem.value) + else: + rough_shared[ttag] = [elem.value] + to_be_removed_from_shared = [] + for ttag, v in rough_shared.items(): + v = rough_shared[ttag] + if len(v) < len(self.Frames): + to_be_removed_from_shared.append(ttag) + else: + all_values_are_equal = True + for v_i in v: + if not DicomHelper.isequal(v_i, v[0]): + all_values_are_equal = False + break + if not all_values_are_equal: + to_be_removed_from_shared.append(ttag) + from pydicom.datadict import keyword_for_tag + for t, v in rough_shared.items(): + if keyword_for_tag(t) != 'PatientSex': + continue + for t in to_be_removed_from_shared: + del rough_shared[t] + for t, v in rough_shared.items(): + self._SharedTags.append(t) + self._PerFrameTags.remove(t) + + def _istag_excluded_from_perframe(self, t: BaseTag) -> bool: + return t in self.ExcludedFromPerFrameTags + + +class FrameSetCollection: + def __init__(self, single_frame_list: list): + logger = logging.getLogger(__name__) + self.MixedFrames = single_frame_list + self.MixedFramesCopy = self.MixedFrames[:] + self._DistinguishingAttributeKeywords = [ + 'PatientID', + 'PatientName', + 'Manufacturer', + 'InstitutionName', + 'InstitutionAddress', + 'StationName', + 'InstitutionalDepartmentName', + 'ManufacturerModelName', + 'DeviceSerialNumber', + 'SoftwareVersions', + 'GantryID', + 'PixelPaddingValue', + 'Modality', + 'ImageType', + 'BurnedInAnnotation', + 'SOPClassUID', + 'Rows', + 'Columns', + 'BitsStored', + 'BitsAllocated', + 'HighBit', + 'PixelRepresentation', + 'PhotometricInterpretation', + 'PlanarConfiguration', + 'SamplesPerPixel', + 'ProtocolName', + 'ImageOrientationPatient', + 'PixelSpacing', + 'SliceThickness', + 'AcquisitionContextSequence'] + to_be_removed_from_distinguishing_attribs: set = set() + self._FrameSets: list = [] + frame_counts = [] + frameset_counter = 0 + while len(self.MixedFramesCopy) != 0: + frameset_counter += 1 + x = self._find_all_similar_to_first_datasets() + self._FrameSets.append(FrameSet(x[0], x[1])) + frame_counts.append(len(x[0])) + # log information + logger.debug("Frameset({:02d}) including {:03d} frames".format( + frameset_counter, len(x[0]))) + logger.debug('\t Distinguishing tags:') + for dg_i, dg_tg in enumerate(x[1], 1): + logger.debug('\t\t{:02d}/{})\t{}-{:32.32s} = {:32.32s}'.format( + dg_i, len(x[1]), DicomHelper.tag2str(dg_tg), + keyword_for_tag(dg_tg), + str(x[0][0][dg_tg].value))) + logger.debug('\t dicom datasets in this frame set:') + for dicom_i, dicom_ds in enumerate(x[0], 1): + logger.debug('\t\t{}/{})\t {}'.format( + dicom_i, len(x[0]), dicom_ds['SOPInstanceUID'])) + frames = '' + for i, f_count in enumerate(frame_counts, 1): + frames += '{: 2d}){:03d}\t'.format(i, f_count) + frames = '{: 2d} frameset(s) out of all {: 3d} instances:'.format( + len(frame_counts), len(self.MixedFrames)) + frames + logger.info(frames) + for kw in to_be_removed_from_distinguishing_attribs: + self.DistinguishingAttributeKeywords.remove(kw) + self.ExcludedFromPerFrameTags = {} + for kwkw in self.DistinguishingAttributeKeywords: + self.ExcludedFromPerFrameTags[tag_for_keyword(kwkw)] = False + self.ExcludedFromPerFrameTags[ + tag_for_keyword('AcquisitionDateTime')] = False + self.ExcludedFromPerFrameTags[ + tag_for_keyword('AcquisitionDate')] = False + self.ExcludedFromPerFrameTags[ + tag_for_keyword('AcquisitionTime')] = False + self.ExcludedFromFunctionalGroupsTags = { + tag_for_keyword('SpecificCharacterSet'): False} + + def _find_all_similar_to_first_datasets(self) -> tuple: + logger = logging.getLogger(__name__) + similar_ds: list = [self.MixedFramesCopy[0]] + distinguishing_tags_existing = [] + distinguishing_tags_missing = [] + self.MixedFramesCopy = self.MixedFramesCopy[1:] + for kw in self.DistinguishingAttributeKeywords: + tg = tag_for_keyword(kw) + if tg in similar_ds[0]: + distinguishing_tags_existing.append(tg) + else: + distinguishing_tags_missing.append(tg) + logger_msg = set() + for ds in self.MixedFramesCopy: + all_equal = True + for tg in distinguishing_tags_missing: + if tg in ds: + logger_msg.add( + '{} is missing in all but {}'.format( + DicomHelper.tag2kwstr(tg), ds['SOPInstanceUID'])) + all_equal = False + break + if not all_equal: + continue + for tg in distinguishing_tags_existing: + ref_val = similar_ds[0][tg].value + if tg not in ds: + all_equal = False + break + new_val = ds[tg].value + if not DicomHelper.isequal(ref_val, new_val): + logger_msg.add( + 'Inequality on distinguishing ' + 'attribute{} -> {} != {} \n series uid = {}'.format( + DicomHelper.tag2kwstr(tg), ref_val, new_val, + ds.SeriesInstanceUID)) + all_equal = False + break + if all_equal: + similar_ds.append(ds) + for msg_ in logger_msg: + logger.info(msg_) + for ds in similar_ds: + if ds in self.MixedFramesCopy: + self.MixedFramesCopy.remove(ds) + return (similar_ds, distinguishing_tags_existing) + + @property + def DistinguishingAttributeKeywords(self) -> List[str]: + return self._DistinguishingAttributeKeywords[:] + + @property + def FrameSets(self) -> List[FrameSet]: + return self._FrameSets + + class LegacyConvertedEnhanceImage(SOPClass): """SOP class for Legacy Converted Enhanced PET Image instances.""" @@ -2788,236 +3017,3 @@ def tag2str(tg: BaseTag) -> str: def tag2kwstr(tg: BaseTag) -> str: return '{}-{:32.32s}'.format( DicomHelper.tag2str(tg), keyword_for_tag(tg)) - - -class FrameSet: - def __init__(self, single_frame_list: list, - distinguishing_tags: list): - self._Frames = single_frame_list - self._DistinguishingAttributesTags = distinguishing_tags - tmp = [ - tag_for_keyword('AcquisitionDateTime'), - tag_for_keyword('AcquisitionDate'), - tag_for_keyword('AcquisitionTime'), - tag_for_keyword('SpecificCharacterSet')] - self._ExcludedFromPerFrameTags =\ - self.DistinguishingAttributesTags + tmp - self._PerFrameTags: list = [] - self._SharedTags: list = [] - self._find_per_frame_and_shared_tags() - - @property - def Frames(self) -> List[Dataset]: - return self._Frames[:] - - @property - def DistinguishingAttributesTags(self) -> List[Tag]: - return self._DistinguishingAttributesTags[:] - - @property - def ExcludedFromPerFrameTags(self) -> List[Tag]: - return self._ExcludedFromPerFrameTags[:] - - @property - def PerFrameTags(self) -> List[Tag]: - return self._PerFrameTags[:] - - @property - def SharedTags(self) -> List[Tag]: - return self._SharedTags[:] - - @property - def SeriesInstanceUID(self) -> UID: - return self._Frames[0].SeriesInstanceUID - - @property - def StudyInstanceUID(self) -> UID: - return self._Frames[0].StudyInstanceUID - - def GetSOPInstanceUIDList(self) -> list: - OutputList: list = [] - for f in self._Frames: - OutputList.append(f.SOPInstanceUID) - return OutputList - - def GetSOPClassUID(self) -> UID: - return self._Frames[0].SOPClassUID - - def _find_per_frame_and_shared_tags(self) -> None: - # logger = logging.getLogger(__name__) - rough_shared: dict = {} - sfs = self.Frames - for ds in sfs: - for ttag, elem in ds.items(): - if (not ttag.is_private and not - DicomHelper.istag_file_meta_information_group(ttag) and not - DicomHelper.istag_repeating_group(ttag) and not - DicomHelper.istag_group_length(ttag) and not - self._istag_excluded_from_perframe(ttag) and - ttag != tag_for_keyword('PixelData')): - elem = ds[ttag] - if ttag not in self._PerFrameTags: - self._PerFrameTags.append(ttag) - if ttag in rough_shared: - rough_shared[ttag].append(elem.value) - else: - rough_shared[ttag] = [elem.value] - to_be_removed_from_shared = [] - for ttag, v in rough_shared.items(): - v = rough_shared[ttag] - if len(v) < len(self.Frames): - to_be_removed_from_shared.append(ttag) - else: - all_values_are_equal = True - for v_i in v: - if not DicomHelper.isequal(v_i, v[0]): - all_values_are_equal = False - break - if not all_values_are_equal: - to_be_removed_from_shared.append(ttag) - from pydicom.datadict import keyword_for_tag - for t, v in rough_shared.items(): - if keyword_for_tag(t) != 'PatientSex': - continue - for t in to_be_removed_from_shared: - del rough_shared[t] - for t, v in rough_shared.items(): - self._SharedTags.append(t) - self._PerFrameTags.remove(t) - - def _istag_excluded_from_perframe(self, t: BaseTag) -> bool: - return t in self.ExcludedFromPerFrameTags - - -class FrameSetCollection: - def __init__(self, single_frame_list: list): - logger = logging.getLogger(__name__) - self.MixedFrames = single_frame_list - self.MixedFramesCopy = self.MixedFrames[:] - self._DistinguishingAttributeKeywords = [ - 'PatientID', - 'PatientName', - 'Manufacturer', - 'InstitutionName', - 'InstitutionAddress', - 'StationName', - 'InstitutionalDepartmentName', - 'ManufacturerModelName', - 'DeviceSerialNumber', - 'SoftwareVersions', - 'GantryID', - 'PixelPaddingValue', - 'Modality', - 'ImageType', - 'BurnedInAnnotation', - 'SOPClassUID', - 'Rows', - 'Columns', - 'BitsStored', - 'BitsAllocated', - 'HighBit', - 'PixelRepresentation', - 'PhotometricInterpretation', - 'PlanarConfiguration', - 'SamplesPerPixel', - 'ProtocolName', - 'ImageOrientationPatient', - 'PixelSpacing', - 'SliceThickness', - 'AcquisitionContextSequence'] - to_be_removed_from_distinguishing_attribs: set = set() - self._FrameSets: list = [] - frame_counts = [] - frameset_counter = 0 - while len(self.MixedFramesCopy) != 0: - frameset_counter += 1 - x = self._find_all_similar_to_first_datasets() - self._FrameSets.append(FrameSet(x[0], x[1])) - frame_counts.append(len(x[0])) - # log information - logger.debug("Frameset({:02d}) including {:03d} frames".format( - frameset_counter, len(x[0]))) - logger.debug('\t Distinguishing tags:') - for dg_i, dg_tg in enumerate(x[1], 1): - logger.debug('\t\t{:02d}/{})\t{}-{:32.32s} = {:32.32s}'.format( - dg_i, len(x[1]), DicomHelper.tag2str(dg_tg), - keyword_for_tag(dg_tg), - str(x[0][0][dg_tg].value))) - logger.debug('\t dicom datasets in this frame set:') - for dicom_i, dicom_ds in enumerate(x[0], 1): - logger.debug('\t\t{}/{})\t {}'.format( - dicom_i, len(x[0]), dicom_ds['SOPInstanceUID'])) - frames = '' - for i, f_count in enumerate(frame_counts, 1): - frames += '{: 2d}){:03d}\t'.format(i, f_count) - frames = '{: 2d} frameset(s) out of all {: 3d} instances:'.format( - len(frame_counts), len(self.MixedFrames)) + frames - logger.info(frames) - for kw in to_be_removed_from_distinguishing_attribs: - self.DistinguishingAttributeKeywords.remove(kw) - self.ExcludedFromPerFrameTags = {} - for kwkw in self.DistinguishingAttributeKeywords: - self.ExcludedFromPerFrameTags[tag_for_keyword(kwkw)] = False - self.ExcludedFromPerFrameTags[ - tag_for_keyword('AcquisitionDateTime')] = False - self.ExcludedFromPerFrameTags[ - tag_for_keyword('AcquisitionDate')] = False - self.ExcludedFromPerFrameTags[ - tag_for_keyword('AcquisitionTime')] = False - self.ExcludedFromFunctionalGroupsTags = { - tag_for_keyword('SpecificCharacterSet'): False} - - def _find_all_similar_to_first_datasets(self) -> tuple: - logger = logging.getLogger(__name__) - similar_ds: list = [self.MixedFramesCopy[0]] - distinguishing_tags_existing = [] - distinguishing_tags_missing = [] - self.MixedFramesCopy = self.MixedFramesCopy[1:] - for kw in self.DistinguishingAttributeKeywords: - tg = tag_for_keyword(kw) - if tg in similar_ds[0]: - distinguishing_tags_existing.append(tg) - else: - distinguishing_tags_missing.append(tg) - logger_msg = set() - for ds in self.MixedFramesCopy: - all_equal = True - for tg in distinguishing_tags_missing: - if tg in ds: - logger_msg.add( - '{} is missing in all but {}'.format( - DicomHelper.tag2kwstr(tg), ds['SOPInstanceUID'])) - all_equal = False - break - if not all_equal: - continue - for tg in distinguishing_tags_existing: - ref_val = similar_ds[0][tg].value - if tg not in ds: - all_equal = False - break - new_val = ds[tg].value - if not DicomHelper.isequal(ref_val, new_val): - logger_msg.add( - 'Inequality on distinguishing ' - 'attribute{} -> {} != {} \n series uid = {}'.format( - DicomHelper.tag2kwstr(tg), ref_val, new_val, - ds.SeriesInstanceUID)) - all_equal = False - break - if all_equal: - similar_ds.append(ds) - for msg_ in logger_msg: - logger.info(msg_) - for ds in similar_ds: - if ds in self.MixedFramesCopy: - self.MixedFramesCopy.remove(ds) - return (similar_ds, distinguishing_tags_existing) - - @property - def DistinguishingAttributeKeywords(self) -> List[str]: - return self._DistinguishingAttributeKeywords[:] - - @property - def FrameSets(self) -> List[FrameSet]: - return self._FrameSets From 32e9983a2a778d0f9a710e300aaebaeb1bb30f6d Mon Sep 17 00:00:00 2001 From: afshin Date: Tue, 9 Feb 2021 19:04:08 +0000 Subject: [PATCH 26/44] Image/Frame-Type bug was fixed --- src/highdicom/legacy/sop.py | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/src/highdicom/legacy/sop.py b/src/highdicom/legacy/sop.py index 6045f918..890dcf0d 100644 --- a/src/highdicom/legacy/sop.py +++ b/src/highdicom/legacy/sop.py @@ -564,7 +564,7 @@ def __init__(self, sf_datasets: Sequence[Dataset], def _is_empty_or_empty_items(self, attribute: DataElement) -> bool: if attribute.is_empty: return True - if type(attribute.value) == Sequence: + if isinstance(attribute.value, DicomSequence): if len(attribute.value) == 0: return True for item in attribute.value: @@ -586,11 +586,11 @@ def _copy_attrib_if_present(self, src_ds: Dataset, dest_ds: Dataset, src_kw_or_tg: str, dest_kw_or_tg: str = None, check_not_to_be_perframe: bool = True, check_not_to_be_empty: bool = False) -> None: - if type(src_kw_or_tg) == str: + if isinstance(src_kw_or_tg, str): src_kw_or_tg = tag_for_keyword(src_kw_or_tg) if dest_kw_or_tg is None: dest_kw_or_tg = src_kw_or_tg - elif type(dest_kw_or_tg) == str: + elif isinstance(dest_kw_or_tg, str): dest_kw_or_tg = tag_for_keyword(dest_kw_or_tg) if check_not_to_be_perframe: if src_kw_or_tg in self._PerFrameTags: @@ -646,19 +646,19 @@ def _get_or_create_attribute( else: a = DataElement(tg, dictionary_VR(tg), default) from pydicom.valuerep import DT, TM, DA - if a.VR == 'DA' and type(a.value) == str: + if a.VR == 'DA' and isinstance(a.value, str): try: d_tmp = DA(a.value) a.value = DA(default) if d_tmp is None else d_tmp except BaseException: a.value = DA(default) - if a.VR == 'DT' and type(a.value) == str: + if a.VR == 'DT' and isinstance(a.value, str): try: dt_tmp = DT(a.value) a.value = DT(default) if dt_tmp is None else dt_tmp except BaseException: a.value = DT(default) - if a.VR == 'TM' and type(a.value) == str: + if a.VR == 'TM' and isinstance(a.value, str): try: t_tmp = TM(a.value) a.value = TM(default) if t_tmp is None else t_tmp @@ -858,7 +858,7 @@ def __init__(self, sf_datasets: Sequence[Dataset], def _get_value_for_frame_type(self, attrib: DataElement) -> Union[list, None]: - if type(attrib) != DataElement: + if not isinstance(attrib, DataElement): return None output = ['', '', '', ''] v = attrib.value @@ -2936,8 +2936,8 @@ def AreParallel(slice1: GeometryOfSlice, slice2: GeometryOfSlice, tolerance: float = 0.0001) -> bool: logger = logging.getLogger(__name__) - if (type(slice1) != GeometryOfSlice or - type(slice2) != GeometryOfSlice): + if (not isinstance(slice1, GeometryOfSlice) or + not isinstance(slice2, GeometryOfSlice)): logger.warning( 'slice1 and slice2 are not of the same ' 'type: type(slice1) = {} and type(slice2) = {}'.format( @@ -2976,10 +2976,10 @@ def is_equal_float(x1: float, x2: float) -> bool: return abs(x1 - x2) < float_tolerance if type(v1) != type(v2): return False - if type(v1) == DicomSequence: + if isinstance(v1, DicomSequence): for item1, item2 in zip(v1, v2): DicomHelper.isequal_dicom_dataset(item1, item2) - if type(v1) != MultiValue: + if not isinstance(v1, MultiValue): v11 = [v1] v22 = [v2] else: @@ -2988,7 +2988,7 @@ def is_equal_float(x1: float, x2: float) -> bool: if len(v11) != len(v22): return False for xx, yy in zip(v11, v22): - if type(xx) == DSfloat or type(xx) == float: + if isinstance(xx, DSfloat) or isinstance(xx, float): if not is_equal_float(xx, yy): return False else: @@ -2999,7 +2999,7 @@ def is_equal_float(x1: float, x2: float) -> bool: def isequal_dicom_dataset(ds1: Dataset, ds2: Dataset) -> bool: if type(ds1) != type(ds2): return False - if type(ds1) != Dataset: + if not isinstance(ds1, Dataset): return False for k1, elem1 in ds1.items(): if k1 not in ds2: From aafa65c2b69e5ebe71eb2366b467206d4e1b5075 Mon Sep 17 00:00:00 2001 From: afshin Date: Thu, 1 Apr 2021 20:21:37 -0400 Subject: [PATCH 27/44] changed the class structures for sop --- src/highdicom/legacy/sop.py | 2921 ++++++++++++++--------------------- 1 file changed, 1131 insertions(+), 1790 deletions(-) diff --git a/src/highdicom/legacy/sop.py b/src/highdicom/legacy/sop.py index 8b92421d..7da8005b 100644 --- a/src/highdicom/legacy/sop.py +++ b/src/highdicom/legacy/sop.py @@ -8,7 +8,7 @@ from pydicom.dataset import Dataset from pydicom.tag import Tag, BaseTag from pydicom.dataelem import DataElement -from pydicom.sequence import Sequence as DicomSequence +from pydicom.sequence import Sequence as DataElementSequence from pydicom.multival import MultiValue from datetime import date, datetime, time, timedelta from pydicom.valuerep import DT, DA, TM @@ -534,654 +534,777 @@ def __init__( referring_physician_name=ref_ds.ReferringPhysicianName, **kwargs) _convert_legacy_to_enhanced(legacy_datasets, self) + +class PerframeFunctionalGroup(DataElementSequence): -from abc import ABC, abstractmethod + def __init__(self, number_of_frames: int) -> None: + super().__init__() + for i in range(0, number_of_frames): + item = Dataset() + self.append(item) -class Abstract_MultiframeModuleAdder(ABC): +class SharedFunctionalGroup(DataElementSequence): - def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: dict, - excluded_from_functional_tags: dict, - perframe_tags: dict, - shared_tags: dict, - multi_frame_output: Dataset): - - self.ExcludedFromPerFrameTags = excluded_from_perframe_tags - self.ExcludedFromFunctionalGroupsTags = excluded_from_functional_tags - self._PerFrameTags = perframe_tags - self._SharedTags = shared_tags - self.TargetDataset = multi_frame_output - self.SingleFrameSet = sf_datasets - self.EarliestDate = DA('00010101') - self.EarliestTime = TM('000000') - self.EarliestDateTime = DT('00010101000000') - self.FarthestFutureDate = DA('99991231') - self.FarthestFutureTime = TM('235959') - self.FarthestFutureDateTime = DT('99991231235959') + def __init__(self) -> None: + super().__init__() + item = Dataset() + self.append(item) - def _is_empty_or_empty_items(self, attribute: DataElement) -> bool: - if attribute.is_empty: - return True - if isinstance(attribute.value, DicomSequence): - if len(attribute.value) == 0: - return True - for item in attribute.value: - for tg, v in item.items(): - v = item[tg] - if not self._is_empty_or_empty_items(v): - return False - return False + +class FrameSet: + def __init__(self, single_frame_list: list, + distinguishing_tags: list): + self._Frames = single_frame_list + self._DistinguishingAttributesTags = distinguishing_tags + tmp = [ + tag_for_keyword('AcquisitionDateTime'), + tag_for_keyword('AcquisitionDate'), + tag_for_keyword('AcquisitionTime'), + tag_for_keyword('SpecificCharacterSet')] + self._ExcludedFromPerFrameTags =\ + self.DistinguishingAttributesTags + tmp + self._PerFrameTags: list = [] + self._SharedTags: list = [] + self._find_per_frame_and_shared_tags() - def _mark_tag_as_used(self, tg: BaseTag) -> None: - if tg in self._SharedTags: - self._SharedTags[tg] = True - elif tg in self.ExcludedFromPerFrameTags: - self.ExcludedFromPerFrameTags[tg] = True - elif tg in self._PerFrameTags: - self._PerFrameTags[tg] = True + @property + def Frames(self) -> List[Dataset]: + return self._Frames[:] - def _copy_attrib_if_present(self, src_ds: Dataset, dest_ds: Dataset, - src_kw_or_tg: str, dest_kw_or_tg: str = None, - check_not_to_be_perframe: bool = True, - check_not_to_be_empty: bool = False) -> None: - if isinstance(src_kw_or_tg, str): - src_kw_or_tg = tag_for_keyword(src_kw_or_tg) - if dest_kw_or_tg is None: - dest_kw_or_tg = src_kw_or_tg - elif isinstance(dest_kw_or_tg, str): - dest_kw_or_tg = tag_for_keyword(dest_kw_or_tg) - if check_not_to_be_perframe: - if src_kw_or_tg in self._PerFrameTags: - return - if src_kw_or_tg in src_ds: - elem = src_ds[src_kw_or_tg] - if check_not_to_be_empty: - if self._is_empty_or_empty_items(elem): - return - new_elem = deepcopy(elem) - if dest_kw_or_tg == src_kw_or_tg: - dest_ds[dest_kw_or_tg] = new_elem - else: - new_elem1 = DataElement(dest_kw_or_tg, - dictionary_VR(dest_kw_or_tg), - new_elem.value) - dest_ds[dest_kw_or_tg] = new_elem1 - # now mark the attrib as used/done to keep track of every one of it - self._mark_tag_as_used(src_kw_or_tg) + @property + def DistinguishingAttributesTags(self) -> List[Tag]: + return self._DistinguishingAttributesTags[:] - def _get_perframe_item(self, index: int) -> Dataset: - if index > len(self.SingleFrameSet): - return None - pf_kw: str = 'PerFrameFunctionalGroupsSequence' - pf_tg = tag_for_keyword(pf_kw) - if pf_tg not in self.TargetDataset: - seq = [] - for i in range(0, len(self.SingleFrameSet)): - seq.append(Dataset()) - self.TargetDataset[pf_tg] = DataElement(pf_tg, - 'SQ', - DicomSequence(seq)) - return self.TargetDataset[pf_tg].value[index] - - def _get_shared_item(self) -> Dataset: - sf_kw = 'SharedFunctionalGroupsSequence' - sf_tg = tag_for_keyword(sf_kw) - if sf_kw not in self.TargetDataset: - seq = [Dataset()] - self.TargetDataset[sf_tg] = DataElement(sf_tg, - 'SQ', - DicomSequence(seq)) - return self.TargetDataset[sf_tg].value[0] + @property + def ExcludedFromPerFrameTags(self) -> List[Tag]: + return self._ExcludedFromPerFrameTags[:] - def _get_or_create_attribute( - self, src: Dataset, kw: Union[str, Tag], default: Any) -> DataElement: - if kw is str: - tg = tag_for_keyword(kw) - else: - tg = kw - if kw in src: - a = deepcopy(src[kw]) - else: - a = DataElement(tg, dictionary_VR(tg), default) - from pydicom.valuerep import DT, TM, DA - if a.VR == 'DA' and isinstance(a.value, str): - try: - d_tmp = DA(a.value) - a.value = DA(default) if d_tmp is None else d_tmp - except BaseException: - a.value = DA(default) - if a.VR == 'DT' and isinstance(a.value, str): - try: - dt_tmp = DT(a.value) - a.value = DT(default) if dt_tmp is None else dt_tmp - except BaseException: - a.value = DT(default) - if a.VR == 'TM' and isinstance(a.value, str): - try: - t_tmp = TM(a.value) - a.value = TM(default) if t_tmp is None else t_tmp - except BaseException: - a.value = TM(default) + @property + def PerFrameTags(self) -> List[Tag]: + return self._PerFrameTags[:] - self._mark_tag_as_used(tg) - return a + @property + def SharedTags(self) -> List[Tag]: + return self._SharedTags[:] - def _add_module(self, module_name: str, excepted_attributes: list = [], - check_not_to_be_perframe: bool = True, - check_not_to_be_empty: bool = False) -> None: - # sf_sop_instance_uid = sf_datasets[0] - # mf_sop_instance_uid = LEGACY_ENHANCED_SOP_CLASS_UID_MAP[ - # sf_sop_instance_uid] - # iod_name = _SOP_CLASS_UID_IOD_KEY_MAP[mf_sop_instance_uid] - # modules = IOD_MODULE_MAP[iod_name] - from copy import deepcopy - attribs: list = MODULE_ATTRIBUTE_MAP[module_name] - ref_dataset = self.SingleFrameSet[0] - for a in attribs: - kw: str = a['keyword'] - if kw in excepted_attributes: - continue - if len(a['path']) == 0: - self._copy_attrib_if_present( - ref_dataset, self.TargetDataset, kw, - check_not_to_be_perframe=check_not_to_be_perframe, - check_not_to_be_empty=check_not_to_be_empty) + @property + def SeriesInstanceUID(self) -> UID: + return self._Frames[0].SeriesInstanceUID - @abstractmethod - def AddModule(self) -> None: - pass + @property + def StudyInstanceUID(self) -> UID: + return self._Frames[0].StudyInstanceUID + def GetSOPInstanceUIDList(self) -> list: + OutputList: list = [] + for f in self._Frames: + OutputList.append(f.SOPInstanceUID) + return OutputList -class ImagePixelModule(Abstract_MultiframeModuleAdder): + def GetSOPClassUID(self) -> UID: + return self._Frames[0].SOPClassUID - def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: dict, - excluded_from_functional_tags: dict, - perframe_tags: dict, - shared_tags: dict, - multi_frame_output: Dataset): - super().__init__( - sf_datasets, - excluded_from_perframe_tags, - excluded_from_functional_tags, - perframe_tags, - shared_tags, - multi_frame_output) - - def AddModule(self) -> None: - module_and_excepted_at = { - "image-pixel": - [ - "ColorSpace", - "PixelDataProviderURL", - "ExtendedOffsetTable", - "ExtendedOffsetTableLengths", - "PixelData" - ] - } - for module, except_at in module_and_excepted_at.items(): - self._add_module( - module, - excepted_attributes=except_at, - check_not_to_be_empty=False, - check_not_to_be_perframe=True) # don't check the perframe set + def _find_per_frame_and_shared_tags(self) -> None: + # logger = logging.getLogger(__name__) + rough_shared: dict = {} + sfs = self.Frames + for ds in sfs: + for ttag, elem in ds.items(): + if (not ttag.is_private and not + DicomHelper.istag_file_meta_information_group(ttag) and not + DicomHelper.istag_repeating_group(ttag) and not + DicomHelper.istag_group_length(ttag) and not + self._istag_excluded_from_perframe(ttag) and + ttag != tag_for_keyword('PixelData')): + elem = ds[ttag] + if ttag not in self._PerFrameTags: + self._PerFrameTags.append(ttag) + if ttag in rough_shared: + rough_shared[ttag].append(elem.value) + else: + rough_shared[ttag] = [elem.value] + to_be_removed_from_shared = [] + for ttag, v in rough_shared.items(): + v = rough_shared[ttag] + if len(v) < len(self.Frames): + to_be_removed_from_shared.append(ttag) + else: + all_values_are_equal = True + for v_i in v: + if not DicomHelper.isequal(v_i, v[0]): + all_values_are_equal = False + break + if not all_values_are_equal: + to_be_removed_from_shared.append(ttag) + from pydicom.datadict import keyword_for_tag + for t, v in rough_shared.items(): + if keyword_for_tag(t) != 'PatientSex': + continue + for t in to_be_removed_from_shared: + del rough_shared[t] + for t, v in rough_shared.items(): + self._SharedTags.append(t) + self._PerFrameTags.remove(t) + def _istag_excluded_from_perframe(self, t: BaseTag) -> bool: + return t in self.ExcludedFromPerFrameTags -class CompositeInstanceContex(Abstract_MultiframeModuleAdder): - def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: dict, - excluded_from_functional_tags: dict, - perframe_tags: dict, - shared_tags: dict, - multi_frame_output: Dataset): - super().__init__( - sf_datasets, - excluded_from_perframe_tags, - excluded_from_functional_tags, - perframe_tags, - shared_tags, - multi_frame_output) - self._module_excepted_list: dict = { - "patient": [], - "clinical-trial-subject": [], - "general-study": - [ - "StudyInstanceUID", - "RequestingService" - ], - "patient-study": - [ - "ReasonForVisit", - "ReasonForVisitCodeSequence" - ], - "clinical-trial-study": [], - "general-series": - [ - "SeriesInstanceUID", - "SeriesNumber", - "SmallestPixelValueInSeries", - "LargestPixelValueInSeries", - "PerformedProcedureStepEndDate", - "PerformedProcedureStepEndTime" - ], - "clinical-trial-series": [], - "general-equipment": - [ - "InstitutionalDepartmentTypeCodeSequence" - ], - "frame-of-reference": [], - "sop-common": - [ - "SOPClassUID", - "SOPInstanceUID", - "InstanceNumber", - "SpecificCharacterSet", - "EncryptedAttributesSequence", - "MACParametersSequence", - "DigitalSignaturesSequence" - ], - "general-image": - [ - "ImageType", - "AcquisitionDate", - "AcquisitionDateTime", - "AcquisitionTime", - "AnatomicRegionSequence", - "PrimaryAnatomicStructureSequence", - "IrradiationEventUID", - "AcquisitionNumber", - "InstanceNumber", - "PatientOrientation", - "ImageLaterality", - "ImagesInAcquisition", - "ImageComments", - "QualityControlImage", - "BurnedInAnnotation", - "RecognizableVisualFeatures", - "LossyImageCompression", - "LossyImageCompressionRatio", - "LossyImageCompressionMethod", - "RealWorldValueMappingSequence", - "IconImageSequence", - "PresentationLUTShape" - ], - "sr-document-general": - [ - "ContentDate", - "ContentTime", - "ReferencedInstanceSequence", - "InstanceNumber", - "VerifyingObserverSequence", - "AuthorObserverSequence", - "ParticipantSequence", - "CustodialOrganizationSequence", - "PredecessorDocumentsSequence", - "CurrentRequestedProcedureEvidenceSequence", - "PertinentOtherEvidenceSequence", - "CompletionFlag", - "CompletionFlagDescription", - "VerificationFlag", - "PreliminaryFlag", - "IdenticalDocumentsSequence" - ] - } - - def AddModule(self) -> None: - for module_name, excpeted_a in self._module_excepted_list.items(): - self._add_module( - module_name, - excepted_attributes=excpeted_a, - check_not_to_be_empty=False, - check_not_to_be_perframe=True) # don't check the perframe set - - -class CommonCTMRPETImageDescriptionMacro(Abstract_MultiframeModuleAdder): - - def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: dict, - excluded_from_functional_tags: dict, - perframe_tags: dict, - shared_tags: dict, - multi_frame_output: Dataset, - modality: str = 'CT'): - super().__init__( - sf_datasets, - excluded_from_perframe_tags, - excluded_from_functional_tags, - perframe_tags, - shared_tags, - multi_frame_output) - self.Modality = modality - - def _get_value_for_frame_type(self, - attrib: DataElement) -> Union[list, None]: - if not isinstance(attrib, DataElement): - return None - output = ['', '', '', ''] - v = attrib.value - lng = len(v) - output[0] = 'ORIGINAL' if lng == 0 else v[0] - output[1] = 'PRIMARY' - output[2] = 'VOLUME' if lng < 3 else v[2] - output[3] = 'NONE' - return output - - def _get_frame_type_seq_tag(self) -> int: - seq_kw = '{}{}FrameTypeSequence' - if self.Modality == 'PET': - seq_kw = seq_kw.format(self.Modality, '') - else: - seq_kw = seq_kw.format(self.Modality, 'Image') - return tag_for_keyword(seq_kw) - - def _add_module_to_functional_group(self, src_fg: Dataset, - dest_fg: Dataset, level: int) -> None: - FrameType_a = src_fg['ImageType'] - if level == 0: - FrameType_tg = tag_for_keyword('ImageType') - else: - FrameType_tg = tag_for_keyword('FrameType') - new_val = self._get_value_for_frame_type(FrameType_a) - dest_fg[FrameType_tg] = DataElement(FrameType_tg, - FrameType_a.VR, new_val) +class FrameSetCollection: + def __init__(self, single_frame_list: list): + logger = logging.getLogger(__name__) + self.MixedFrames = single_frame_list + self.MixedFramesCopy = self.MixedFrames[:] + self._DistinguishingAttributeKeywords = [ + 'PatientID', + 'PatientName', + 'Manufacturer', + 'InstitutionName', + 'InstitutionAddress', + 'StationName', + 'InstitutionalDepartmentName', + 'ManufacturerModelName', + 'DeviceSerialNumber', + 'SoftwareVersions', + 'GantryID', + 'PixelPaddingValue', + 'Modality', + 'ImageType', + 'BurnedInAnnotation', + 'SOPClassUID', + 'Rows', + 'Columns', + 'BitsStored', + 'BitsAllocated', + 'HighBit', + 'PixelRepresentation', + 'PhotometricInterpretation', + 'PlanarConfiguration', + 'SamplesPerPixel', + 'ProtocolName', + 'ImageOrientationPatient', + 'PixelSpacing', + 'SliceThickness', + 'AcquisitionContextSequence'] + to_be_removed_from_distinguishing_attribs: set = set() + self._FrameSets: list = [] + frame_counts = [] + frameset_counter = 0 + while len(self.MixedFramesCopy) != 0: + frameset_counter += 1 + x = self._find_all_similar_to_first_datasets() + self._FrameSets.append(FrameSet(x[0], x[1])) + frame_counts.append(len(x[0])) + # log information + logger.debug("Frameset({:02d}) including {:03d} frames".format( + frameset_counter, len(x[0]))) + logger.debug('\t Distinguishing tags:') + for dg_i, dg_tg in enumerate(x[1], 1): + logger.debug('\t\t{:02d}/{})\t{}-{:32.32s} = {:32.32s}'.format( + dg_i, len(x[1]), DicomHelper.tag2str(dg_tg), + keyword_for_tag(dg_tg), + str(x[0][0][dg_tg].value))) + logger.debug('\t dicom datasets in this frame set:') + for dicom_i, dicom_ds in enumerate(x[0], 1): + logger.debug('\t\t{}/{})\t {}'.format( + dicom_i, len(x[0]), dicom_ds['SOPInstanceUID'])) + frames = '' + for i, f_count in enumerate(frame_counts, 1): + frames += '{: 2d}){:03d}\t'.format(i, f_count) + frames = '{: 2d} frameset(s) out of all {: 3d} instances:'.format( + len(frame_counts), len(self.MixedFrames)) + frames + logger.info(frames) + for kw in to_be_removed_from_distinguishing_attribs: + self.DistinguishingAttributeKeywords.remove(kw) + self.ExcludedFromPerFrameTags = {} + for kwkw in self.DistinguishingAttributeKeywords: + self.ExcludedFromPerFrameTags[tag_for_keyword(kwkw)] = False + self.ExcludedFromPerFrameTags[ + tag_for_keyword('AcquisitionDateTime')] = False + self.ExcludedFromPerFrameTags[ + tag_for_keyword('AcquisitionDate')] = False + self.ExcludedFromPerFrameTags[ + tag_for_keyword('AcquisitionTime')] = False + self.ExcludedFromFunctionalGroupsTags = { + tag_for_keyword('SpecificCharacterSet'): False} - def element_generator(kw: str, val: Any) -> DataElement: - return DataElement( - tag_for_keyword(kw), - dictionary_VR(tag_for_keyword(kw)), val) - dest_fg['PixelPresentation'] = element_generator( - 'PixelPresentation', "MONOCHROME") - dest_fg['VolumetricProperties'] = element_generator( - 'VolumetricProperties', "VOLUME") - dest_fg['VolumeBasedCalculationTechnique'] = element_generator( - 'VolumeBasedCalculationTechnique', "NONE") + def _find_all_similar_to_first_datasets(self) -> tuple: + logger = logging.getLogger(__name__) + similar_ds: list = [self.MixedFramesCopy[0]] + distinguishing_tags_existing = [] + distinguishing_tags_missing = [] + self.MixedFramesCopy = self.MixedFramesCopy[1:] + for kw in self.DistinguishingAttributeKeywords: + tg = tag_for_keyword(kw) + if tg in similar_ds[0]: + distinguishing_tags_existing.append(tg) + else: + distinguishing_tags_missing.append(tg) + logger_msg = set() + for ds in self.MixedFramesCopy: + all_equal = True + for tg in distinguishing_tags_missing: + if tg in ds: + logger_msg.add( + '{} is missing in all but {}'.format( + DicomHelper.tag2kwstr(tg), ds['SOPInstanceUID'])) + all_equal = False + break + if not all_equal: + continue + for tg in distinguishing_tags_existing: + ref_val = similar_ds[0][tg].value + if tg not in ds: + all_equal = False + break + new_val = ds[tg].value + if not DicomHelper.isequal(ref_val, new_val): + logger_msg.add( + 'Inequality on distinguishing ' + 'attribute{} -> {} != {} \n series uid = {}'.format( + DicomHelper.tag2kwstr(tg), ref_val, new_val, + ds.SeriesInstanceUID)) + all_equal = False + break + if all_equal: + similar_ds.append(ds) + for msg_ in logger_msg: + logger.info(msg_) + for ds in similar_ds: + if ds in self.MixedFramesCopy: + self.MixedFramesCopy.remove(ds) + return (similar_ds, distinguishing_tags_existing) - def AddModule(self) -> None: - im_type_tag = tag_for_keyword('ImageType') - seq_tg = self._get_frame_type_seq_tag() - if im_type_tag not in self._PerFrameTags: - self._add_module_to_functional_group(self.SingleFrameSet[0], - self.TargetDataset, 0) - # ---------------------------- - item = self._get_shared_item() - inner_item = Dataset() - self._add_module_to_functional_group(self.SingleFrameSet[0], - inner_item, 1) - item[seq_tg] = DataElement( - seq_tg, 'SQ', DicomSequence([inner_item])) - else: - for i in range(0, len(self.SingleFrameSet)): - item = self._get_perframe_item(i) - inner_item = Dataset() - self._add_module_to_functional_group(self.SingleFrameSet[i], - inner_item, 1) - item[seq_tg] = DataElement( - seq_tg, 'SQ', DicomSequence([inner_item])) + @property + def DistinguishingAttributeKeywords(self) -> List[str]: + return self._DistinguishingAttributeKeywords[:] + @property + def FrameSets(self) -> List[FrameSet]: + return self._FrameSets -class EnhancedCommonImageModule(Abstract_MultiframeModuleAdder): - def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: dict, - excluded_from_functional_tags: dict, - perframe_tags: dict, - shared_tags: dict, - multi_frame_output: Dataset): - super().__init__( - sf_datasets, - excluded_from_perframe_tags, - excluded_from_functional_tags, - perframe_tags, - shared_tags, - multi_frame_output) - - def AddModule(self) -> None: - # ct_mr = CommonCTMRImageDescriptionMacro(self.SingleFrameSet - # , self.ExcludedFromPerFrameTags - # , self._PerFrameTags - # , self._SharedTags - # , self.TargetDataset) - # ct_mr.AddModule() - # Acquisition Number - # Acquisition DateTime - should be able to find earliest amongst all - # frames, if present (required if ORIGINAL) - # Acquisition Duration - should be able to work this out, but type 2C, - # so can send empty - # Referenced Raw Data Sequence - optional - ignore - too hard to merge - # Referenced Waveform Sequence - optional - ignore - too hard to merge - # Referenced Image Evidence Sequence - should add if we have references - # Source Image Evidence Sequence - should add if we have sources : ( - # Referenced Presentation State Sequence - should merge if present in - # any source frame - # Samples per Pixel - handled by distinguishingAttribute copy - # Photometric Interpretation - handled by distinguishingAttribute copy - # Bits Allocated - handled by distinguishingAttribute copy - # Bits Stored - handled by distinguishingAttribute copy - # High Bit - handled by distinguishingAttribute copy - ref_dataset = self.SingleFrameSet[0] - attribs_to_be_added = [ - 'ContentQualification', - 'ImageComments', - 'BurnedInAnnotation', - 'RecognizableVisualFeatures', - 'LossyImageCompression', - 'LossyImageCompressionRatio', - 'LossyImageCompressionMethod'] - for kw in attribs_to_be_added: - self._copy_attrib_if_present( - ref_dataset, self.TargetDataset, kw, - check_not_to_be_perframe=True, - check_not_to_be_empty=False) - sum_compression_ratio: float = 0 - c_ratio_tag = tag_for_keyword('LossyImageCompressionRatio') - if tag_for_keyword('LossyImageCompression') in self._SharedTags and \ - tag_for_keyword( - 'LossyImageCompressionMethod') in self._SharedTags and \ - c_ratio_tag in self._PerFrameTags: - for fr_ds in self.SingleFrameSet: - if c_ratio_tag in fr_ds: - ratio = fr_ds[c_ratio_tag].value - try: - sum_compression_ratio += float(ratio) - except BaseException: - sum_compression_ratio += 1 # supposing uncompressed - else: - sum_compression_ratio += 1 - avg_compression_ratio = sum_compression_ratio /\ - len(self.SingleFrameSet) - avg_ratio_str = '{:.6f}'.format(avg_compression_ratio) - self.TargetDataset[c_ratio_tag] = \ - DataElement(c_ratio_tag, 'DS', avg_ratio_str) +class LegacyConvertedEnhanceImage(SOPClass): + """SOP class for Legacy Converted Enhanced PET Image instances.""" - if tag_for_keyword('PresentationLUTShape') not in self._PerFrameTags: - # actually should really invert the pixel data if MONOCHROME1, - # since only MONOCHROME2 is permitted : ( - # also, do not need to check if PhotometricInterpretation is - # per-frame, since a distinguishing attribute - phmi_kw = 'PhotometricInterpretation' - phmi_a = self._get_or_create_attribute(self.SingleFrameSet[0], - phmi_kw, - "MONOCHROME2") - LUT_shape_default = "INVERTED" if phmi_a.value == 'MONOCHROME1'\ - else "IDENTITY" - LUT_shape_a = self._get_or_create_attribute(self.SingleFrameSet[0], - 'PresentationLUTShape', - LUT_shape_default) - if not LUT_shape_a.is_empty: - self.TargetDataset['PresentationLUTShape'] = LUT_shape_a - # Icon Image Sequence - always discard these - - -class ContrastBolusModule(Abstract_MultiframeModuleAdder): - - def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: dict, - excluded_from_functional_tags: dict, - perframe_tags: dict, - shared_tags: dict, - multi_frame_output: Dataset): + def __init__( + self, + frame_set: FrameSet, + series_instance_uid: str, + series_number: int, + sop_instance_uid: str, + instance_number: int, + sort_key: Callable = None, + **kwargs: Any) -> None: + """ + Parameters + ---------- + legacy_datasets: Sequence[pydicom.dataset.Dataset] + DICOM data sets of legacy single-frame image instances that should + be converted + series_instance_uid: str + UID of the series + series_number: Union[int, None] + Number of the series within the study + sop_instance_uid: str + UID that should be assigned to the instance + instance_number: int + Number that should be assigned to the instance + **kwargs: Any, optional + Additional keyword arguments that will be passed to the constructor + of `highdicom.base.SOPClass` + """ + legacy_datasets = frame_set.Frames + try: + ref_ds = legacy_datasets[0] + except IndexError: + raise ValueError('No DICOM data sets of provided.') + sop_class_uid = LEGACY_ENHANCED_SOP_CLASS_UID_MAP[ref_ds.SOPClassUID] + if sort_key is None: + sort_key = LegacyConvertedEnhanceImage.default_sort_key super().__init__( - sf_datasets, - excluded_from_perframe_tags, - excluded_from_functional_tags, - perframe_tags, - shared_tags, - multi_frame_output) - - def AddModule(self) -> None: - self._add_module('contrast-bolus') + study_instance_uid="" if 'StudyInstanceUID' not in ref_ds + else ref_ds.StudyInstanceUID, + series_instance_uid=series_instance_uid, + series_number=series_number, + sop_instance_uid=sop_instance_uid, + sop_class_uid=sop_class_uid, + instance_number=instance_number, + manufacturer="" if 'Manufacturer' not in ref_ds + else ref_ds.Manufacturer, + modality="" if 'Modality' not in ref_ds + else ref_ds.Modality, + patient_id=None if 'PatientID' not in ref_ds + else ref_ds.PatientID, + patient_name=None if 'PatientName' not in ref_ds + else ref_ds.PatientName, + patient_birth_date=None if 'PatientBirthDate' not in ref_ds + else ref_ds.PatientBirthDate, + patient_sex=None if 'PatientSex' not in ref_ds + else ref_ds.PatientSex, + accession_number=None if 'AccessionNumber' not in ref_ds + else ref_ds.AccessionNumber, + study_id=None if 'StudyID' not in ref_ds + else ref_ds.StudyID, + study_date=None if 'StudyDate' not in ref_ds + else ref_ds.StudyDate, + study_time=None if 'StudyTime' not in ref_ds + else ref_ds.StudyTime, + referring_physician_name=None if 'ReferringPhysicianName' not in + ref_ds else ref_ds.ReferringPhysicianName, + **kwargs) + self._legacy_datasets = legacy_datasets + self._perframe_functional_group = PerframeFunctionalGroup( + len(legacy_datasets)) + tg = tag_for_keyword('PerFrameFunctionalGroupsSequence') + self[tg] = DataElement(tg, 'SQ', self._perframe_functional_group) + self._shared_functional_group = SharedFunctionalGroup() + tg = tag_for_keyword('SharedFunctionalGroupsSequence') + self[tg] = DataElement(tg, 'SQ', self._perframe_functional_group) + self.DistinguishingAttributesTags = self._get_tag_used_dictionary( + frame_set.DistinguishingAttributesTags) + self.ExcludedFromPerFrameTags = self._get_tag_used_dictionary( + frame_set.ExcludedFromPerFrameTags) + self._PerFrameTags = self._get_tag_used_dictionary( + frame_set.PerFrameTags) + self._SharedTags = self._get_tag_used_dictionary( + frame_set.SharedTags) + self.ExcludedFromFunctionalGroupsTags = { + tag_for_keyword('SpecificCharacterSet'): False} + + # -------------------------------------------------------------------- + self.__build_blocks: list = [] + # == == == == == == == == == == == == == == == == == == == == == == == + new_ds = [] + for item in sorted(self._legacy_datasets, key=sort_key): + new_ds.append(item) + + # self = multi_frame_output + self._module_excepted_list: dict = { + "patient": [], + "clinical-trial-subject": [], + "general-study": + [ + "StudyInstanceUID", + "RequestingService" + ], + "patient-study": + [ + "ReasonForVisit", + "ReasonForVisitCodeSequence" + ], + "clinical-trial-study": [], + "general-series": + [ + "SeriesInstanceUID", + "SeriesNumber", + "SmallestPixelValueInSeries", + "LargestPixelValueInSeries", + "PerformedProcedureStepEndDate", + "PerformedProcedureStepEndTime" + ], + "clinical-trial-series": [], + "general-equipment": + [ + "InstitutionalDepartmentTypeCodeSequence" + ], + "frame-of-reference": [], + "sop-common": + [ + "SOPClassUID", + "SOPInstanceUID", + "InstanceNumber", + "SpecificCharacterSet", + "EncryptedAttributesSequence", + "MACParametersSequence", + "DigitalSignaturesSequence" + ], + "general-image": + [ + "ImageType", + "AcquisitionDate", + "AcquisitionDateTime", + "AcquisitionTime", + "AnatomicRegionSequence", + "PrimaryAnatomicStructureSequence", + "IrradiationEventUID", + "AcquisitionNumber", + "InstanceNumber", + "PatientOrientation", + "ImageLaterality", + "ImagesInAcquisition", + "ImageComments", + "QualityControlImage", + "BurnedInAnnotation", + "RecognizableVisualFeatures", + "LossyImageCompression", + "LossyImageCompressionRatio", + "LossyImageCompressionMethod", + "RealWorldValueMappingSequence", + "IconImageSequence", + "PresentationLUTShape" + ], + "sr-document-general": + [ + "ContentDate", + "ContentTime", + "ReferencedInstanceSequence", + "InstanceNumber", + "VerifyingObserverSequence", + "AuthorObserverSequence", + "ParticipantSequence", + "CustodialOrganizationSequence", + "PredecessorDocumentsSequence", + "CurrentRequestedProcedureEvidenceSequence", + "PertinentOtherEvidenceSequence", + "CompletionFlag", + "CompletionFlagDescription", + "VerificationFlag", + "PreliminaryFlag", + "IdenticalDocumentsSequence" + ] + } + self.EarliestDate = DA('00010101') + self.EarliestTime = TM('000000') + self.EarliestDateTime = DT('00010101000000') + self.FarthestFutureDate = DA('99991231') + self.FarthestFutureTime = TM('235959') + self.FarthestFutureDateTime = DT('99991231235959') + self._slices: list = [] + self._tolerance = 0.0001 + self._slice_location_map: dict = {} + self._byte_data = bytearray() + self._word_data = bytearray() + self.EarliestContentDateTime = self.FarthestFutureDateTime -class EnhancedCTImageModule(Abstract_MultiframeModuleAdder): + if (_SOP_CLASS_UID_IOD_KEY_MAP[sop_class_uid] == + 'legacy-converted-enhanced-ct-image'): + self.AddBuildBlocksForCT() + elif (_SOP_CLASS_UID_IOD_KEY_MAP[sop_class_uid] == + 'legacy-converted-enhanced-mr-image'): + self.AddBuildBlocksForMR() + elif (_SOP_CLASS_UID_IOD_KEY_MAP[sop_class_uid] == + 'legacy-converted-enhanced-pet-image'): + self.AddBuildBlocksForPET() - def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: dict, - excluded_from_functional_tags: dict, - perframe_tags: dict, - shared_tags: dict, - multi_frame_output: Dataset): - super().__init__( - sf_datasets, - excluded_from_perframe_tags, - excluded_from_functional_tags, - perframe_tags, - shared_tags, - multi_frame_output) - - def AddModule(self) -> None: - pass - # David's code doesn't hold anything for this module ... should ask him + def _is_empty_or_empty_items(self, attribute: DataElement) -> bool: + if attribute.is_empty: + return True + if isinstance(attribute.value, DataElementSequence): + if len(attribute.value) == 0: + return True + for item in attribute.value: + for tg, v in item.items(): + v = item[tg] + if not self._is_empty_or_empty_items(v): + return False + return False + def _mark_tag_as_used(self, tg: BaseTag) -> None: + if tg in self._SharedTags: + self._SharedTags[tg] = True + elif tg in self.ExcludedFromPerFrameTags: + self.ExcludedFromPerFrameTags[tg] = True + elif tg in self._PerFrameTags: + self._PerFrameTags[tg] = True -class EnhancedPETImageModule(Abstract_MultiframeModuleAdder): + def _copy_attrib_if_present(self, src_ds: Dataset, dest_ds: Dataset, + src_kw_or_tg: str, dest_kw_or_tg: str = None, + check_not_to_be_perframe: bool = True, + check_not_to_be_empty: bool = False) -> None: + if isinstance(src_kw_or_tg, str): + src_kw_or_tg = tag_for_keyword(src_kw_or_tg) + if dest_kw_or_tg is None: + dest_kw_or_tg = src_kw_or_tg + elif isinstance(dest_kw_or_tg, str): + dest_kw_or_tg = tag_for_keyword(dest_kw_or_tg) + if check_not_to_be_perframe: + if src_kw_or_tg in self._PerFrameTags: + return + if src_kw_or_tg in src_ds: + elem = src_ds[src_kw_or_tg] + if check_not_to_be_empty: + if self._is_empty_or_empty_items(elem): + return + new_elem = deepcopy(elem) + if dest_kw_or_tg == src_kw_or_tg: + dest_ds[dest_kw_or_tg] = new_elem + else: + new_elem1 = DataElement(dest_kw_or_tg, + dictionary_VR(dest_kw_or_tg), + new_elem.value) + dest_ds[dest_kw_or_tg] = new_elem1 + # now mark the attrib as used/done to keep track of every one of it + self._mark_tag_as_used(src_kw_or_tg) - def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: dict, - excluded_from_functional_tags: dict, - perframe_tags: dict, - shared_tags: dict, - multi_frame_output: Dataset): - super().__init__( - sf_datasets, - excluded_from_perframe_tags, - excluded_from_functional_tags, - perframe_tags, - shared_tags, - multi_frame_output) - - def AddModule(self) -> None: - # David's code doesn't hold anything for this module ... should ask him - kw = 'ContentQualification' - tg = tag_for_keyword(kw) - elem = self._get_or_create_attribute( - self.SingleFrameSet[0], kw, 'RESEARCH') - self.TargetDataset[tg] = elem + def _get_or_create_attribute( + self, src: Dataset, kw: Union[str, Tag], default: Any) -> DataElement: + if kw is str: + tg = tag_for_keyword(kw) + else: + tg = kw + if kw in src: + a = deepcopy(src[kw]) + else: + a = DataElement(tg, dictionary_VR(tg), default) + from pydicom.valuerep import DT, TM, DA + if a.VR == 'DA' and isinstance(a.value, str): + try: + d_tmp = DA(a.value) + a.value = DA(default) if d_tmp is None else d_tmp + except BaseException: + a.value = DA(default) + if a.VR == 'DT' and isinstance(a.value, str): + try: + dt_tmp = DT(a.value) + a.value = DT(default) if dt_tmp is None else dt_tmp + except BaseException: + a.value = DT(default) + if a.VR == 'TM' and isinstance(a.value, str): + try: + t_tmp = TM(a.value) + a.value = TM(default) if t_tmp is None else t_tmp + except BaseException: + a.value = TM(default) + self._mark_tag_as_used(tg) + return a -class EnhancedMRImageModule(Abstract_MultiframeModuleAdder): + def _add_module(self, module_name: str, excepted_attributes: list = [], + check_not_to_be_perframe: bool = True, + check_not_to_be_empty: bool = False) -> None: + # sf_sop_instance_uid = sf_datasets[0] + # mf_sop_instance_uid = LEGACY_ENHANCED_SOP_CLASS_UID_MAP[ + # sf_sop_instance_uid] + # iod_name = _SOP_CLASS_UID_IOD_KEY_MAP[mf_sop_instance_uid] + # modules = IOD_MODULE_MAP[iod_name] + from copy import deepcopy + attribs: list = MODULE_ATTRIBUTE_MAP[module_name] + ref_dataset = self._legacy_datasets[0] + for a in attribs: + kw: str = a['keyword'] + if kw in excepted_attributes: + continue + if len(a['path']) == 0: + self._copy_attrib_if_present( + ref_dataset, self, kw, + check_not_to_be_perframe=check_not_to_be_perframe, + check_not_to_be_empty=check_not_to_be_empty) - def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: dict, - excluded_from_functional_tags: dict, - perframe_tags: dict, - shared_tags: dict, - multi_frame_output: Dataset): - super().__init__( - sf_datasets, - excluded_from_perframe_tags, - excluded_from_functional_tags, - perframe_tags, - shared_tags, - multi_frame_output) - - def AddModule(self) -> None: + def add_module_image_pixel(self) -> None: + module_and_excepted_at = { + "image-pixel": + [ + "ColorSpace", + "PixelDataProviderURL", + "ExtendedOffsetTable", + "ExtendedOffsetTableLengths", + "PixelData" + ] + } + for module, except_at in module_and_excepted_at.items(): + self._add_module( + module, + excepted_attributes=except_at, + check_not_to_be_empty=False, + check_not_to_be_perframe=True) # don't check the perframe set + + def add_module_enhanced_common_image(self) -> None: + ref_dataset = self._legacy_datasets[0] + attribs_to_be_added = [ + 'ContentQualification', + 'ImageComments', + 'BurnedInAnnotation', + 'RecognizableVisualFeatures', + 'LossyImageCompression', + 'LossyImageCompressionRatio', + 'LossyImageCompressionMethod'] + for kw in attribs_to_be_added: + self._copy_attrib_if_present( + ref_dataset, self, kw, + check_not_to_be_perframe=True, + check_not_to_be_empty=False) + sum_compression_ratio: float = 0 + c_ratio_tag = tag_for_keyword('LossyImageCompressionRatio') + if tag_for_keyword('LossyImageCompression') in self._SharedTags and \ + tag_for_keyword( + 'LossyImageCompressionMethod') in self._SharedTags and \ + c_ratio_tag in self._PerFrameTags: + for fr_ds in self._legacy_datasets: + if c_ratio_tag in fr_ds: + ratio = fr_ds[c_ratio_tag].value + try: + sum_compression_ratio += float(ratio) + except BaseException: + sum_compression_ratio += 1 # supposing uncompressed + else: + sum_compression_ratio += 1 + avg_compression_ratio = sum_compression_ratio /\ + len(self._legacy_datasets) + avg_ratio_str = '{:.6f}'.format(avg_compression_ratio) + self[c_ratio_tag] = \ + DataElement(c_ratio_tag, 'DS', avg_ratio_str) + + if tag_for_keyword('PresentationLUTShape') not in self._PerFrameTags: + # actually should really invert the pixel data if MONOCHROME1, + # since only MONOCHROME2 is permitted : ( + # also, do not need to check if PhotometricInterpretation is + # per-frame, since a distinguishing attribute + phmi_kw = 'PhotometricInterpretation' + phmi_a = self._get_or_create_attribute(self._legacy_datasets[0], + phmi_kw, + "MONOCHROME2") + LUT_shape_default = "INVERTED" if phmi_a.value == 'MONOCHROME1'\ + else "IDENTITY" + LUT_shape_a = self._get_or_create_attribute(self._legacy_datasets[0], + 'PresentationLUTShape', + LUT_shape_default) + if not LUT_shape_a.is_empty: + self['PresentationLUTShape'] = LUT_shape_a + # Icon Image Sequence - always discard these + + def add_module_contrast_bolus(self) -> None: + self._add_module('contrast-bolus') + + def add_module_enhanced_ct_image(self) -> None: + pass + # David's code doesn't hold anything for this module ... should ask him + + def add_module_enhanced_pet_image(self) -> None: + # David's code doesn't hold anything for this module ... should ask him + kw = 'ContentQualification' + tg = tag_for_keyword(kw) + elem = self._get_or_create_attribute( + self._legacy_datasets[0], kw, 'RESEARCH') + self[tg] = elem + + def add_module_enhanced_mr_image(self) -> None: self._copy_attrib_if_present( - self.SingleFrameSet[0], - self.TargetDataset, + self._legacy_datasets[0], + self, "ResonantNucleus", check_not_to_be_perframe=True, check_not_to_be_empty=True) - if 'ResonantNucleus' not in self.TargetDataset: + if 'ResonantNucleus' not in self: # derive from ImagedNucleus, which is the one used in legacy MR # IOD, but does not have a standard list of defined terms ... # (could check these : () self._copy_attrib_if_present( - self.SingleFrameSet[0], - self.TargetDataset, + self._legacy_datasets[0], + self, "ImagedNucleus", check_not_to_be_perframe=True, check_not_to_be_empty=True) self._copy_attrib_if_present( - self.SingleFrameSet[0], - self.TargetDataset, + self._legacy_datasets[0], + self, "KSpaceFiltering", check_not_to_be_perframe=True, check_not_to_be_empty=True) self._copy_attrib_if_present( - self.SingleFrameSet[0], - self.TargetDataset, + self._legacy_datasets[0], + self, "MagneticFieldStrength", check_not_to_be_perframe=True, check_not_to_be_empty=True) self._copy_attrib_if_present( - self.SingleFrameSet[0], - self.TargetDataset, + self._legacy_datasets[0], + self, "ApplicableSafetyStandardAgency", check_not_to_be_perframe=True, check_not_to_be_empty=True) self._copy_attrib_if_present( - self.SingleFrameSet[0], - self.TargetDataset, + self._legacy_datasets[0], + self, "ApplicableSafetyStandardDescription", check_not_to_be_perframe=True, check_not_to_be_empty=True) - -class AcquisitionContextModule(Abstract_MultiframeModuleAdder): - - def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: dict, - excluded_from_functional_tags: dict, - perframe_tags: dict, - shared_tags: dict, - multi_frame_output: Dataset): - super().__init__( - sf_datasets, - excluded_from_perframe_tags, - excluded_from_functional_tags, - perframe_tags, - shared_tags, - multi_frame_output) - - def AddModule(self) -> None: + def add_module_acquisition_context(self) -> None: tg = tag_for_keyword('AcquisitionContextSequence') if tg not in self._PerFrameTags: - self.TargetDataset[tg] = self._get_or_create_attribute( - self.SingleFrameSet[0], + self[tg] = self._get_or_create_attribute( + self._legacy_datasets[0], tg, None) + + def _get_value_for_frame_type_common_ct_mr_pet_image_description(self, + attrib: DataElement) -> Union[list, None]: + if not isinstance(attrib, DataElement): + return None + output = ['', '', '', ''] + v = attrib.value + lng = len(v) + output[0] = 'ORIGINAL' if lng == 0 else v[0] + output[1] = 'PRIMARY' + output[2] = 'VOLUME' if lng < 3 else v[2] + output[3] = 'NONE' + return output + def _get_frame_type_seq_tag_common_ct_mr_pet_image_description(self, modality: str) -> int: + seq_kw = '{}{}FrameTypeSequence' + if modality == 'PET': + seq_kw = seq_kw.format(modality, '') + else: + seq_kw = seq_kw.format(modality, 'Image') + return tag_for_keyword(seq_kw) -class FrameAnatomyFunctionalGroup(Abstract_MultiframeModuleAdder): + def _add_module_to_functional_group_common_ct_mr_pet_image_description(self, src_fg: Dataset, + dest_fg: Dataset, level: int) -> None: + FrameType_a = src_fg['ImageType'] + if level == 0: + FrameType_tg = tag_for_keyword('ImageType') + else: + FrameType_tg = tag_for_keyword('FrameType') + new_val = self._get_value_for_frame_type_common_ct_mr_pet_image_description(FrameType_a) + dest_fg[FrameType_tg] = DataElement(FrameType_tg, + FrameType_a.VR, new_val) - def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: dict, - excluded_from_functional_tags: dict, - perframe_tags: dict, - shared_tags: dict, - multi_frame_output: Dataset): - super().__init__( - sf_datasets, - excluded_from_perframe_tags, - excluded_from_functional_tags, - perframe_tags, - shared_tags, - multi_frame_output) - - def _add_module_to_functional_group( + def element_generator(kw: str, val: Any) -> DataElement: + return DataElement( + tag_for_keyword(kw), + dictionary_VR(tag_for_keyword(kw)), val) + dest_fg['PixelPresentation'] = element_generator( + 'PixelPresentation', "MONOCHROME") + dest_fg['VolumetricProperties'] = element_generator( + 'VolumetricProperties', "VOLUME") + dest_fg['VolumeBasedCalculationTechnique'] = element_generator( + 'VolumeBasedCalculationTechnique', "NONE") + + def add_module_common_ct_mr_pet_image_description(self, modality: str) -> None: + im_type_tag = tag_for_keyword('ImageType') + seq_tg = self._get_frame_type_seq_tag_common_ct_mr_pet_image_description(modality) + if im_type_tag not in self._PerFrameTags: + self._add_module_to_functional_group_common_ct_mr_pet_image_description(self._legacy_datasets[0], + self, 0) + # ---------------------------- + item = self._shared_functional_group[0] + inner_item = Dataset() + self._add_module_to_functional_group_common_ct_mr_pet_image_description(self._legacy_datasets[0], + inner_item, 1) + item[seq_tg] = DataElement( + seq_tg, 'SQ', DataElementSequence([inner_item])) + else: + for i in range(0, len(self._legacy_datasets)): + item = self._perframe_functional_group[i] + inner_item = Dataset() + self._add_module_to_functional_group_common_ct_mr_pet_image_description(self._legacy_datasets[i], + inner_item, 1) + item[seq_tg] = DataElement( + seq_tg, 'SQ', DataElementSequence([inner_item])) + + def add_module_composite_instance_contex(self) -> None: + for module_name, excpeted_a in self._module_excepted_list.items(): + self._add_module( + module_name, + excepted_attributes=excpeted_a, + check_not_to_be_empty=False, + check_not_to_be_perframe=True) # don't check the perframe set + + def _add_module_to_functional_group_frame_anatomy( self, src_fg: Dataset, dest_fg: Dataset) -> None: # David's code is more complicaated than mine # Should check it out later. @@ -1214,10 +1337,10 @@ def _add_module_to_functional_group( FrameAnatomy_a = DataElement( fa_seq_tg, dictionary_VR(fa_seq_tg), - DicomSequence([item])) + DataElementSequence([item])) dest_fg['FrameAnatomySequence'] = FrameAnatomy_a - def _contains_right_attributes(self, tags: dict) -> bool: + def _contains_right_attributes_frame_anatomy(self, tags: dict) -> bool: laterality_tg = tag_for_keyword('Laterality') im_laterality_tg = tag_for_keyword('ImageLaterality') bodypart_tg = tag_for_keyword('BodyPartExamined') @@ -1227,37 +1350,20 @@ def _contains_right_attributes(self, tags: dict) -> bool: bodypart_tg in tags or anatomical_reg_tg) - def AddModule(self) -> None: - if (not self._contains_right_attributes(self._PerFrameTags) and - (self._contains_right_attributes(self._SharedTags) or - self._contains_right_attributes(self.ExcludedFromPerFrameTags)) + def add_module_frame_anatomy(self) -> None: + if (not self._contains_right_attributes_frame_anatomy(self._PerFrameTags) and + (self._contains_right_attributes_frame_anatomy(self._SharedTags) or + self._contains_right_attributes_frame_anatomy(self.ExcludedFromPerFrameTags)) ): - item = self._get_shared_item() - self._add_module_to_functional_group(self.SingleFrameSet[0], item) - elif self._contains_right_attributes(self._PerFrameTags): - for i in range(0, len(self.SingleFrameSet)): - item = self._get_perframe_item(i) - self._add_module_to_functional_group( - self.SingleFrameSet[i], item) - - -class PixelMeasuresFunctionalGroup(Abstract_MultiframeModuleAdder): - - def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: dict, - excluded_from_functional_tags: dict, - perframe_tags: dict, - shared_tags: dict, - multi_frame_output: Dataset): - super().__init__( - sf_datasets, - excluded_from_perframe_tags, - excluded_from_functional_tags, - perframe_tags, - shared_tags, - multi_frame_output) - - def _contains_right_attributes(self, tags: dict) -> bool: + item = self._shared_functional_group[0] + self._add_module_to_functional_group_frame_anatomy(self._legacy_datasets[0], item) + elif self._contains_right_attributes_frame_anatomy(self._PerFrameTags): + for i in range(0, len(self._legacy_datasets)): + item = self._perframe_functional_group[i] + self._add_module_to_functional_group_frame_anatomy( + self._legacy_datasets[i], item) + + def _contains_right_attributes_pixel_measures(self, tags: dict) -> bool: PixelSpacing_tg = tag_for_keyword('PixelSpacing') SliceThickness_tg = tag_for_keyword('SliceThickness') ImagerPixelSpacing_tg = tag_for_keyword('ImagerPixelSpacing') @@ -1265,7 +1371,7 @@ def _contains_right_attributes(self, tags: dict) -> bool: SliceThickness_tg in tags or ImagerPixelSpacing_tg in tags) - def _add_module_to_functional_group( + def _add_module_to_functional_group_pixel_measures( self, src_fg: Dataset, dest_fg: Dataset) -> None: item = Dataset() self._copy_attrib_if_present(src_fg, @@ -1287,44 +1393,27 @@ def _add_module_to_functional_group( pixel_measures_tg = tag_for_keyword(pixel_measures_kw) seq = DataElement(pixel_measures_tg, dictionary_VR(pixel_measures_tg), - DicomSequence([item])) + DataElementSequence([item])) dest_fg[pixel_measures_tg] = seq - def AddModule(self) -> None: - if (not self._contains_right_attributes(self._PerFrameTags) and - (self._contains_right_attributes(self._SharedTags) or - self._contains_right_attributes(self.ExcludedFromPerFrameTags)) + def add_module_pixel_measures(self) -> None: + if (not self._contains_right_attributes_pixel_measures(self._PerFrameTags) and + (self._contains_right_attributes_pixel_measures(self._SharedTags) or + self._contains_right_attributes_pixel_measures(self.ExcludedFromPerFrameTags)) ): - item = self._get_shared_item() - self._add_module_to_functional_group(self.SingleFrameSet[0], item) - elif self._contains_right_attributes(self._PerFrameTags): - for i in range(0, len(self.SingleFrameSet)): - item = self._get_perframe_item(i) - self._add_module_to_functional_group( - self.SingleFrameSet[i], item) - - -class PlanePositionFunctionalGroup(Abstract_MultiframeModuleAdder): - - def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: dict, - excluded_from_functional_tags: dict, - perframe_tags: dict, - shared_tags: dict, - multi_frame_output: Dataset): - super().__init__( - sf_datasets, - excluded_from_perframe_tags, - excluded_from_functional_tags, - perframe_tags, - shared_tags, - multi_frame_output) - - def _contains_right_attributes(self, tags: dict) -> bool: + item = self._shared_functional_group[0] + self._add_module_to_functional_group_pixel_measures(self._legacy_datasets[0], item) + elif self._contains_right_attributes_pixel_measures(self._PerFrameTags): + for i in range(0, len(self._legacy_datasets)): + item = self._perframe_functional_group[i] + self._add_module_to_functional_group_pixel_measures( + self._legacy_datasets[i], item) + + def _contains_right_attributes_plane_position(self, tags: dict) -> bool: ImagePositionPatient_tg = tag_for_keyword('ImagePositionPatient') return ImagePositionPatient_tg in tags - def _add_module_to_functional_group( + def _add_module_to_functional_group_plane_position( self, src_fg: Dataset, dest_fg: Dataset) -> None: item = Dataset() self._copy_attrib_if_present(src_fg, @@ -1336,44 +1425,27 @@ def _add_module_to_functional_group( PlanePositionSequence_tg = tag_for_keyword(PlanePositionSequence_kw) seq = DataElement(PlanePositionSequence_tg, dictionary_VR(PlanePositionSequence_tg), - DicomSequence([item])) + DataElementSequence([item])) dest_fg[PlanePositionSequence_tg] = seq - def AddModule(self) -> None: - if (not self._contains_right_attributes(self._PerFrameTags) and - (self._contains_right_attributes(self._SharedTags) or - self._contains_right_attributes(self.ExcludedFromPerFrameTags)) + def add_module_plane_position(self) -> None: + if (not self._contains_right_attributes_plane_position(self._PerFrameTags) and + (self._contains_right_attributes_plane_position(self._SharedTags) or + self._contains_right_attributes_plane_position(self.ExcludedFromPerFrameTags)) ): - item = self._get_shared_item() - self._add_module_to_functional_group(self.SingleFrameSet[0], item) - elif self._contains_right_attributes(self._PerFrameTags): - for i in range(0, len(self.SingleFrameSet)): - item = self._get_perframe_item(i) - self._add_module_to_functional_group( - self.SingleFrameSet[i], item) - - -class PlaneOrientationFunctionalGroup(Abstract_MultiframeModuleAdder): - - def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: dict, - excluded_from_functional_tags: dict, - perframe_tags: dict, - shared_tags: dict, - multi_frame_output: Dataset): - super().__init__( - sf_datasets, - excluded_from_perframe_tags, - excluded_from_functional_tags, - perframe_tags, - shared_tags, - multi_frame_output) - - def _contains_right_attributes(self, tags: dict) -> bool: + item = self._shared_functional_group[0] + self._add_module_to_functional_group_plane_position(self._legacy_datasets[0], item) + elif self._contains_right_attributes_plane_position(self._PerFrameTags): + for i in range(0, len(self._legacy_datasets)): + item = self._perframe_functional_group[i] + self._add_module_to_functional_group_plane_position( + self._legacy_datasets[i], item) + + def _contains_right_attributes_plane_orientation(self, tags: dict) -> bool: ImageOrientationPatient_tg = tag_for_keyword('ImageOrientationPatient') return ImageOrientationPatient_tg in tags - def _add_module_to_functional_group( + def _add_module_to_functional_group_plane_orientation( self, src_fg: Dataset, dest_fg: Dataset) -> None: item = Dataset() self._copy_attrib_if_present(src_fg, @@ -1383,40 +1455,25 @@ def _add_module_to_functional_group( check_not_to_be_empty=False) kw = 'PlaneOrientationSequence' tg = tag_for_keyword(kw) - seq = DataElement(tg, dictionary_VR(tg), DicomSequence([item])) + seq = DataElement(tg, dictionary_VR(tg), DataElementSequence([item])) dest_fg[tg] = seq - def AddModule(self) -> None: - if (not self._contains_right_attributes(self._PerFrameTags) and - (self._contains_right_attributes(self._SharedTags) or - self._contains_right_attributes(self.ExcludedFromPerFrameTags)) + def add_module_plane_orientation(self) -> None: + if (not self._contains_right_attributes_plane_orientation(self._PerFrameTags) and + (self._contains_right_attributes_plane_orientation(self._SharedTags) or + self._contains_right_attributes_plane_orientation(self.ExcludedFromPerFrameTags)) ): - item = self._get_shared_item() - self._add_module_to_functional_group(self.SingleFrameSet[0], item) - elif self._contains_right_attributes(self._PerFrameTags): - for i in range(0, len(self.SingleFrameSet)): - item = self._get_perframe_item(i) - self._add_module_to_functional_group( - self.SingleFrameSet[i], item) - - -class FrameVOILUTFunctionalGroup(Abstract_MultiframeModuleAdder): - - def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: dict, - excluded_from_functional_tags: dict, - perframe_tags: dict, - shared_tags: dict, - multi_frame_output: Dataset): - super().__init__( - sf_datasets, - excluded_from_perframe_tags, - excluded_from_functional_tags, - perframe_tags, - shared_tags, - multi_frame_output) - - def _contains_right_attributes(self, tags: dict) -> bool: + item = self._shared_functional_group[0] + self._add_module_to_functional_group_plane_orientation(self._legacy_datasets[0], item) + elif self._contains_right_attributes_plane_orientation(self._PerFrameTags): + for i in range(0, len(self._legacy_datasets)): + item = self._perframe_functional_group[i] + self._add_module_to_functional_group_plane_orientation( + self._legacy_datasets[i], item) + + + + def _contains_right_attributes_frame_voi_lut(self, tags: dict) -> bool: WindowWidth_tg = tag_for_keyword('WindowWidth') WindowCenter_tg = tag_for_keyword('WindowCenter') WindowCenterWidthExplanation_tg = tag_for_keyword( @@ -1425,7 +1482,7 @@ def _contains_right_attributes(self, tags: dict) -> bool: WindowCenter_tg in tags or WindowCenterWidthExplanation_tg in tags) - def _add_module_to_functional_group( + def _add_module_to_functional_group_frame_voi_lut( self, src_fg: Dataset, dest_fg: Dataset) -> None: item = Dataset() self._copy_attrib_if_present(src_fg, @@ -1445,40 +1502,23 @@ def _add_module_to_functional_group( check_not_to_be_empty=False) kw = 'FrameVOILUTSequence' tg = tag_for_keyword(kw) - seq = DataElement(tg, dictionary_VR(tg), DicomSequence([item])) + seq = DataElement(tg, dictionary_VR(tg), DataElementSequence([item])) dest_fg[tg] = seq - def AddModule(self) -> None: - if (not self._contains_right_attributes(self._PerFrameTags) and - (self._contains_right_attributes(self._SharedTags) or - self._contains_right_attributes(self.ExcludedFromPerFrameTags)) + def add_module_frame_voi_lut(self) -> None: + if (not self._contains_right_attributes_frame_voi_lut(self._PerFrameTags) and + (self._contains_right_attributes_frame_voi_lut(self._SharedTags) or + self._contains_right_attributes_frame_voi_lut(self.ExcludedFromPerFrameTags)) ): - item = self._get_shared_item() - self._add_module_to_functional_group(self.SingleFrameSet[0], item) - elif self._contains_right_attributes(self._PerFrameTags): - for i in range(0, len(self.SingleFrameSet)): - item = self._get_perframe_item(i) - self._add_module_to_functional_group( - self.SingleFrameSet[i], item) - - -class PixelValueTransformationFunctionalGroup(Abstract_MultiframeModuleAdder): - - def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: dict, - excluded_from_functional_tags: dict, - perframe_tags: dict, - shared_tags: dict, - multi_frame_output: Dataset): - super().__init__( - sf_datasets, - excluded_from_perframe_tags, - excluded_from_functional_tags, - perframe_tags, - shared_tags, - multi_frame_output) - - def _contains_right_attributes(self, tags: dict) -> bool: + item = self._shared_functional_group[0] + self._add_module_to_functional_group_frame_voi_lut(self._legacy_datasets[0], item) + elif self._contains_right_attributes_frame_voi_lut(self._PerFrameTags): + for i in range(0, len(self._legacy_datasets)): + item = self._perframe_functional_group[i] + self._add_module_to_functional_group_frame_voi_lut( + self._legacy_datasets[i], item) + + def _contains_right_attributes_pixel_value_transformation(self, tags: dict) -> bool: RescaleIntercept_tg = tag_for_keyword('RescaleIntercept') RescaleSlope_tg = tag_for_keyword('RescaleSlope') RescaleType_tg = tag_for_keyword('RescaleType') @@ -1486,7 +1526,7 @@ def _contains_right_attributes(self, tags: dict) -> bool: RescaleSlope_tg in tags or RescaleType_tg in tags) - def _add_module_to_functional_group( + def _add_module_to_functional_group_pixel_value_transformation( self, src_fg: Dataset, dest_fg: Dataset) -> None: item = Dataset() self._copy_attrib_if_present(src_fg, @@ -1537,43 +1577,26 @@ def _add_module_to_functional_group( item[tg].value = value kw = 'PixelValueTransformationSequence' tg = tag_for_keyword(kw) - seq = DataElement(tg, dictionary_VR(tg), DicomSequence([item])) + seq = DataElement(tg, dictionary_VR(tg), DataElementSequence([item])) dest_fg[tg] = seq - def AddModule(self) -> None: - if (not self._contains_right_attributes(self._PerFrameTags) and - (self._contains_right_attributes(self._SharedTags) or - self._contains_right_attributes(self.ExcludedFromPerFrameTags)) + def add_module_pixel_value_transformation(self) -> None: + if (not self._contains_right_attributes_pixel_value_transformation(self._PerFrameTags) and + (self._contains_right_attributes_pixel_value_transformation(self._SharedTags) or + self._contains_right_attributes_pixel_value_transformation(self.ExcludedFromPerFrameTags)) ): - item = self._get_shared_item() - self._add_module_to_functional_group(self.SingleFrameSet[0], item) - elif self._contains_right_attributes(self._PerFrameTags): - for i in range(0, len(self.SingleFrameSet)): - item = self._get_perframe_item(i) - self._add_module_to_functional_group( - self.SingleFrameSet[i], item) - - -class ReferencedImageFunctionalGroup(Abstract_MultiframeModuleAdder): - - def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: dict, - excluded_from_functional_tags: dict, - perframe_tags: dict, - shared_tags: dict, - multi_frame_output: Dataset): - super().__init__( - sf_datasets, - excluded_from_perframe_tags, - excluded_from_functional_tags, - perframe_tags, - shared_tags, - multi_frame_output) - - def _contains_right_attributes(self, tags: dict) -> bool: + item = self._shared_functional_group[0] + self._add_module_to_functional_group_pixel_value_transformation(self._legacy_datasets[0], item) + elif self._contains_right_attributes_pixel_value_transformation(self._PerFrameTags): + for i in range(0, len(self._legacy_datasets)): + item = self._perframe_functional_group[i] + self._add_module_to_functional_group_pixel_value_transformation( + self._legacy_datasets[i], item) + + def _contains_right_attributes_referenced_image(self, tags: dict) -> bool: return tag_for_keyword('ReferencedImageSequence') in tags - def _add_module_to_functional_group( + def _add_module_to_functional_group_referenced_image( self, src_fg: Dataset, dest_fg: Dataset) -> None: self._copy_attrib_if_present(src_fg, dest_fg, @@ -1581,40 +1604,23 @@ def _add_module_to_functional_group( check_not_to_be_perframe=False, check_not_to_be_empty=False) - def AddModule(self) -> None: - if (not self._contains_right_attributes(self._PerFrameTags) and - (self._contains_right_attributes(self._SharedTags) or - self._contains_right_attributes(self.ExcludedFromPerFrameTags)) + def add_module_referenced_image(self) -> None: + if (not self._contains_right_attributes_referenced_image(self._PerFrameTags) and + (self._contains_right_attributes_referenced_image(self._SharedTags) or + self._contains_right_attributes_referenced_image(self.ExcludedFromPerFrameTags)) ): - item = self._get_shared_item() - self._add_module_to_functional_group(self.SingleFrameSet[0], item) - elif self._contains_right_attributes(self._PerFrameTags): - for i in range(0, len(self.SingleFrameSet)): - item = self._get_perframe_item(i) - self._add_module_to_functional_group( - self.SingleFrameSet[i], item) - - -class DerivationImageFunctionalGroup(Abstract_MultiframeModuleAdder): - - def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: dict, - excluded_from_functional_tags: dict, - perframe_tags: dict, - shared_tags: dict, - multi_frame_output: Dataset): - super().__init__( - sf_datasets, - excluded_from_perframe_tags, - excluded_from_functional_tags, - perframe_tags, - shared_tags, - multi_frame_output) - - def _contains_right_attributes(self, tags: dict) -> bool: + item = self._shared_functional_group[0] + self._add_module_to_functional_group_referenced_image(self._legacy_datasets[0], item) + elif self._contains_right_attributes_referenced_image(self._PerFrameTags): + for i in range(0, len(self._legacy_datasets)): + item = self._perframe_functional_group[i] + self._add_module_to_functional_group_referenced_image( + self._legacy_datasets[i], item) + + def _contains_right_attributes_derivation_image(self, tags: dict) -> bool: return tag_for_keyword('SourceImageSequence') in tags - def _add_module_to_functional_group( + def _add_module_to_functional_group_derivation_image( self, src_fg: Dataset, dest_fg: Dataset) -> None: item = Dataset() self._copy_attrib_if_present(src_fg, @@ -1634,40 +1640,29 @@ def _add_module_to_functional_group( check_not_to_be_empty=False) kw = 'DerivationImageSequence' tg = tag_for_keyword(kw) - seq = DataElement(tg, dictionary_VR(tg), DicomSequence([item])) + seq = DataElement(tg, dictionary_VR(tg), DataElementSequence([item])) dest_fg[tg] = seq - def AddModule(self) -> None: - if (not self._contains_right_attributes(self._PerFrameTags) and - (self._contains_right_attributes(self._SharedTags) or - self._contains_right_attributes(self.ExcludedFromPerFrameTags)) + def add_module_derivation_image(self) -> None: + if (not self._contains_right_attributes_derivation_image(self._PerFrameTags) and + (self._contains_right_attributes_derivation_image(self._SharedTags) or + self._contains_right_attributes_derivation_image(self.ExcludedFromPerFrameTags)) ): - item = self._get_shared_item() - self._add_module_to_functional_group(self.SingleFrameSet[0], item) - elif self._contains_right_attributes(self._PerFrameTags): - for i in range(0, len(self.SingleFrameSet)): - item = self._get_perframe_item(i) - self._add_module_to_functional_group( - self.SingleFrameSet[i], item) - - -class UnassignedPerFrame(Abstract_MultiframeModuleAdder): - - def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: dict, - excluded_from_functional_tags: dict, - perframe_tags: dict, - shared_tags: dict, - multi_frame_output: Dataset): - super().__init__( - sf_datasets, - excluded_from_perframe_tags, - excluded_from_functional_tags, - perframe_tags, - shared_tags, - multi_frame_output) - - def _add_module_to_functional_group( + item = self._shared_functional_group[0] + self._add_module_to_functional_group_derivation_image(self._legacy_datasets[0], item) + elif self._contains_right_attributes_derivation_image(self._PerFrameTags): + for i in range(0, len(self._legacy_datasets)): + item = self._perframe_functional_group[i] + self._add_module_to_functional_group_derivation_image( + self._legacy_datasets[i], item) + + def _get_tag_used_dictionary(self, input: list) -> dict: + out: dict = {} + for item in input: + out[item] = False + return out + + def _add_module_to_functional_group_unassigned_perframe( self, src_fg: Dataset, dest_fg: Dataset) -> None: item = Dataset() for tg in self._eligeible_tags: @@ -1678,7 +1673,7 @@ def _add_module_to_functional_group( check_not_to_be_empty=False) kw = 'UnassignedPerFrameConvertedAttributesSequence' tg = tag_for_keyword(kw) - seq = DataElement(tg, dictionary_VR(tg), DicomSequence([item])) + seq = DataElement(tg, dictionary_VR(tg), DataElementSequence([item])) dest_fg[tg] = seq def _add_largest_smallest_pixle_value(self) -> None: @@ -1686,30 +1681,30 @@ def _add_largest_smallest_pixle_value(self) -> None: from sys import float_info lval = float_info.min if ltg in self._PerFrameTags: - for frame in self.SingleFrameSet: + for frame in self._legacy_datasets: if ltg in frame: nval = frame[ltg].value else: continue lval = nval if lval < nval else lval if lval > float_info.min: - self.TargetDataset[ltg] = DataElement(ltg, 'SS', int(lval)) + self[ltg] = DataElement(ltg, 'SS', int(lval)) # ========================== stg = tag_for_keyword("SmallestImagePixelValue") sval = float_info.max if stg in self._PerFrameTags: - for frame in self.SingleFrameSet: + for frame in self._legacy_datasets: if stg in frame: nval = frame[stg].value else: continue sval = nval if sval < nval else sval if sval < float_info.max: - self.TargetDataset[stg] = DataElement(stg, 'SS', int(sval)) + self[stg] = DataElement(stg, 'SS', int(sval)) stg = "SmallestImagePixelValue" - def AddModule(self) -> None: + def add_module_unassigned_perframe(self) -> None: # first collect all not used tags # note that this is module is order dependent self._add_largest_smallest_pixle_value() @@ -1717,34 +1712,17 @@ def AddModule(self) -> None: for tg, used in self._PerFrameTags.items(): if not used and tg not in self.ExcludedFromFunctionalGroupsTags: self._eligeible_tags.append(tg) - for i in range(0, len(self.SingleFrameSet)): - item = self._get_perframe_item(i) - self._add_module_to_functional_group( - self.SingleFrameSet[i], item) - + for i in range(0, len(self._legacy_datasets)): + item = self._perframe_functional_group[i] + self._add_module_to_functional_group_unassigned_perframe( + self._legacy_datasets[i], item) -class UnassignedShared(Abstract_MultiframeModuleAdder): - - def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: dict, - excluded_from_functional_tags: dict, - perframe_tags: dict, - shared_tags: dict, - multi_frame_output: Dataset): - super().__init__( - sf_datasets, - excluded_from_perframe_tags, - excluded_from_functional_tags, - perframe_tags, - shared_tags, - multi_frame_output) - - def _add_module_to_functional_group( + def _add_module_to_functional_group_unassigned_shared( self, src_fg: Dataset, dest_fg: Dataset) -> None: item = Dataset() for tg, used in self._SharedTags.items(): if (not used and - tg not in self.TargetDataset and + tg not in self and tg not in self.ExcludedFromFunctionalGroupsTags): self._copy_attrib_if_present(src_fg, item, @@ -1753,36 +1731,19 @@ def _add_module_to_functional_group( check_not_to_be_empty=False) kw = 'UnassignedSharedConvertedAttributesSequence' tg = tag_for_keyword(kw) - seq = DataElement(tg, dictionary_VR(tg), DicomSequence([item])) + seq = DataElement(tg, dictionary_VR(tg), DataElementSequence([item])) dest_fg[tg] = seq - def AddModule(self) -> None: - item = self._get_shared_item() - self._add_module_to_functional_group(self.SingleFrameSet[0], item) - - -class EmptyType2Attributes(Abstract_MultiframeModuleAdder): - - def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: dict, - excluded_from_functional_tags: dict, - perframe_tags: dict, - shared_tags: dict, - multi_frame_output: Dataset): - super().__init__( - sf_datasets, - excluded_from_perframe_tags, - excluded_from_functional_tags, - perframe_tags, - shared_tags, - multi_frame_output) - - def CreateEmptyElement(self, tg: BaseTag) -> DataElement: + def add_module_unassigned_shared(self) -> None: + item = self._shared_functional_group[0] + self._add_module_to_functional_group_unassigned_shared(self._legacy_datasets[0], item) + + def _create_empty_element(self, tg: BaseTag) -> DataElement: return DataElement(tg, dictionary_VR(tg), None) - def AddModule(self) -> None: + def add_module_empty_type2_attributes(self) -> None: iod_name = _SOP_CLASS_UID_IOD_KEY_MAP[ - self.TargetDataset['SOPClassUID'].value] + self['SOPClassUID'].value] modules = IOD_MODULE_MAP[iod_name] for module in modules: if module['usage'] == 'M': @@ -1791,31 +1752,14 @@ def AddModule(self) -> None: for a in attrib_list: if len(a['path']) == 0 and a['type'] == '2': tg = tag_for_keyword(a['keyword']) - if (tg not in self.SingleFrameSet[0] and - tg not in self.TargetDataset and + if (tg not in self._legacy_datasets[0] and + tg not in self and tg not in self._PerFrameTags and tg not in self._SharedTags): - self.TargetDataset[tg] =\ - self.CreateEmptyElement(tg) - - -class ConversionSourceFunctionalGroup(Abstract_MultiframeModuleAdder): - - def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: dict, - excluded_from_functional_tags: dict, - perframe_tags: dict, - shared_tags: dict, - multi_frame_output: Dataset): - super().__init__( - sf_datasets, - excluded_from_perframe_tags, - excluded_from_functional_tags, - perframe_tags, - shared_tags, - multi_frame_output) - - def _add_module_to_functional_group( + self[tg] =\ + self._create_empty_element(tg) + + def _add_module_to_functional_group_conversion_source( self, src_fg: Dataset, dest_fg: Dataset) -> None: item = Dataset() self._copy_attrib_if_present(src_fg, @@ -1832,41 +1776,22 @@ def _add_module_to_functional_group( check_not_to_be_empty=True) kw = 'ConversionSourceAttributesSequence' tg = tag_for_keyword(kw) - seq = DataElement(tg, dictionary_VR(tg), DicomSequence([item])) + seq = DataElement(tg, dictionary_VR(tg), DataElementSequence([item])) dest_fg[tg] = seq - def AddModule(self) -> None: - for i in range(0, len(self.SingleFrameSet)): - item = self._get_perframe_item(i) - self._add_module_to_functional_group( - self.SingleFrameSet[i], item) - - -class FrameContentFunctionalGroup(Abstract_MultiframeModuleAdder): - - def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: dict, - excluded_from_functional_tags: dict, - perframe_tags: dict, - shared_tags: dict, - multi_frame_output: Dataset): - super().__init__( - sf_datasets, - excluded_from_perframe_tags, - excluded_from_functional_tags, - perframe_tags, - shared_tags, - multi_frame_output) - self.EarliestFrameAcquisitionDateTime = self.FarthestFutureDateTime - self._slices: list = [] - self._tolerance = 0.0001 - self._slice_location_map: dict = {} + def add_module_conversion_source(self) -> None: + for i in range(0, len(self._legacy_datasets)): + item = self._perframe_functional_group[i] + self._add_module_to_functional_group_conversion_source( + self._legacy_datasets[i], item) + + self.EarliestFrameAcquisitionDateTime = self.FarthestFutureDateTime - def _build_slices_geometry(self) -> None: + def _build_slices_geometry_frame_content(self) -> None: logger = logging.getLogger(__name__) - frame_count = len(self.SingleFrameSet) + frame_count = len(self._legacy_datasets) for i in range(0, frame_count): - curr_frame = self.SingleFrameSet[i] + curr_frame = self._legacy_datasets[i] ImagePositionPatient_v = None \ if 'ImagePositionPatient' not in curr_frame\ else curr_frame['ImagePositionPatient'].value @@ -1912,7 +1837,7 @@ def _build_slices_geometry(self) -> None: self._slices = [] # clear the slices break - def _are_all_slices_parallel(self) -> bool: + def _are_all_slices_parallel_frame_content(self) -> bool: slice_count = len(self._slices) if slice_count >= 2: last_slice = self._slices[0] @@ -1928,12 +1853,12 @@ def _are_all_slices_parallel(self) -> bool: else: return False - def _add_stack_info(self) -> None: + def _add_stack_info_frame_content(self) -> None: logger = logging.getLogger(__name__) - self._build_slices_geometry() + self._build_slices_geometry_frame_content() round_digits = int(ceil(-log10(self._tolerance))) source_series_uid = '' - if self._are_all_slices_parallel(): + if self._are_all_slices_parallel_frame_content(): self._slice_location_map = {} for idx, s in enumerate(self._slices): not_round_dist = s.GetDistanceAlongOrigin() @@ -1952,24 +1877,24 @@ def _add_stack_info(self) -> None: if len(idxs) != 1: if source_series_uid == '': source_series_uid = \ - self.SingleFrameSet[0].SeriesInstanceUID + self._legacy_datasets[0].SeriesInstanceUID logger.warning( 'There are {} slices in one location {} on ' 'series = {}'.format( len(idxs), loc, source_series_uid)) for frame_index in idxs: - frame = self._get_perframe_item(frame_index) + frame = self._perframe_functional_group[frame_index] new_item = frame[frame_content_tg].value[0] new_item["StackID"] = self._get_or_create_attribute( - self.SingleFrameSet[0], + self._legacy_datasets[0], "StackID", "0") new_item["InStackPositionNumber"] =\ self._get_or_create_attribute( - self.SingleFrameSet[0], + self._legacy_datasets[0], "InStackPositionNumber", distance_index) distance_index += 1 - def _contains_right_attributes(self, tags: dict) -> bool: + def _contains_right_attributes_frame_content(self, tags: dict) -> bool: AcquisitionDateTime_tg = tag_for_keyword('AcquisitionDateTime') AcquisitionDate_tg = tag_for_keyword('AcquisitionDate') AcquisitionTime_tg = tag_for_keyword('AcquisitionTime') @@ -1977,7 +1902,7 @@ def _contains_right_attributes(self, tags: dict) -> bool: AcquisitionTime_tg in tags or AcquisitionDate_tg in tags) - def _add_module_to_functional_group( + def _add_module_to_functional_group_frame_content( self, src_fg: Dataset, dest_fg: Dataset) -> None: item = Dataset() fan_tg = tag_for_keyword('FrameAcquisitionNumber') @@ -1995,7 +1920,7 @@ def _add_module_to_functional_group( FrameAcquisitionDateTime_a = DataElement( tag_for_keyword('FrameAcquisitionDateTime'), 'DT', AcquisitionDateTime_a.value) - AcquisitionDateTime_is_perframe = self._contains_right_attributes( + AcquisitionDateTime_is_perframe = self._contains_right_attributes_frame_content( self._PerFrameTags) if FrameAcquisitionDateTime_a.value == self.EarliestDateTime: AcquisitionDate_a = self._get_or_create_attribute( @@ -2030,612 +1955,204 @@ def _add_module_to_functional_group( DT(d_t.strftime('%Y%m%d%H%M%S')) item['FrameAcquisitionDateTime'] = FrameAcquisitionDateTime_a # --------------------------------- - self._copy_attrib_if_present( - src_fg, item, "AcquisitionDuration", - "FrameAcquisitionDuration", - check_not_to_be_perframe=False, - check_not_to_be_empty=True) - self._copy_attrib_if_present( - src_fg, item, - 'TemporalPositionIndex', - check_not_to_be_perframe=False, - check_not_to_be_empty=True) - self._copy_attrib_if_present( - src_fg, item, "ImageComments", - "FrameComments", - check_not_to_be_perframe=False, - check_not_to_be_empty=True) - # ----------------------------------- - seq_tg = tag_for_keyword('FrameContentSequence') - dest_fg[seq_tg] = DataElement( - seq_tg, dictionary_VR(seq_tg), DicomSequence([item])) - # Also we want to add the earliest frame acq date time to the multiframe: - - def _add_acquisition_info(self) -> None: - for i in range(0, len(self.SingleFrameSet)): - item = self._get_perframe_item(i) - self._add_module_to_functional_group( - self.SingleFrameSet[i], item) - if self.EarliestFrameAcquisitionDateTime < self.FarthestFutureDateTime: - kw = 'AcquisitionDateTime' - self.TargetDataset[kw] = DataElement( - tag_for_keyword(kw), - 'DT', self.EarliestFrameAcquisitionDateTime) - - def AddModule(self) -> None: - self._add_acquisition_info() - self._add_stack_info() - - -class PixelData(Abstract_MultiframeModuleAdder): - - def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: dict, - excluded_from_functional_tags: dict, - perframe_tags: dict, - shared_tags: dict, - multi_frame_output: Dataset): - super().__init__( - sf_datasets, - excluded_from_perframe_tags, - excluded_from_functional_tags, - perframe_tags, - shared_tags, - multi_frame_output) - self._byte_data = bytearray() - self._word_data = bytearray() - - def _is_other_byte_vr(self, vr: str) -> bool: - return vr[0] == 'O' and vr[1] == 'B' - - def _is_other_word_vr(self, vr: str) -> bool: - return vr[0] == 'O' and vr[1] == 'W' - # def _contains_right_attributes(self, tags: dict) -> bool: - # ImagePositionPatient_tg = tag_for_keyword('ImagePositionPatient') - # return ImagePositionPatient_tg in tags - - def _copy_data(self, src: bytearray, word_data: bool = False) -> None: - # Make sure that the length complies by row and col - if word_data: - des = self._word_data - ByteCount = 2 * self._number_of_pixels_per_frame - else: - des = self._byte_data - ByteCount = self._number_of_pixels_per_frame - if len(src) != ByteCount: - tmp: bytearray = bytearray(ByteCount) - tmp[:len(src)] = src[:] - src = tmp - des.extend(src) - - def AddModule(self) -> None: - kw = 'NumberOfFrames' - tg = tag_for_keyword(kw) - self._frame_count = len(self.SingleFrameSet) - self.TargetDataset[kw] =\ - DataElement(tg, dictionary_VR(tg), self._frame_count) - row = self.SingleFrameSet[0]["Rows"].value - col = self.SingleFrameSet[0]["Columns"].value - self._number_of_pixels_per_frame = row * col - self._number_of_pixels = row * col * self._frame_count - kw = "PixelData" - for i in range(0, len(self.SingleFrameSet)): - if kw not in self.SingleFrameSet[i]: - continue - PixelData_a = self.SingleFrameSet[i][kw] - if self._is_other_byte_vr(PixelData_a.VR): - if len(self._word_data) != 0: - raise TypeError( - 'Cannot mix OB and OW Pixel Data ' - 'VR from different frames') - self._copy_data(PixelData_a.value, False) - elif self._is_other_word_vr(PixelData_a.VR): - if len(self._byte_data) != 0: - raise TypeError( - 'Cannot mix OB and OW Pixel Data ' - 'VR from different frames') - self._copy_data(PixelData_a.value, True) - else: - raise TypeError( - 'Cannot mix OB and OW Pixel Data VR from different frames') - if len(self._byte_data) != 0: - MF_PixelData = DataElement(tag_for_keyword(kw), - 'OB', bytes(self._byte_data)) - elif len(self._word_data) != 0: - MF_PixelData = DataElement(tag_for_keyword(kw), - 'OW', bytes(self._word_data)) - self.TargetDataset[kw] = MF_PixelData - - -class ContentDateTime(Abstract_MultiframeModuleAdder): - - def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: dict, - excluded_from_functional_tags: dict, - perframe_tags: dict, - shared_tags: dict, - multi_frame_output: Dataset): - super().__init__( - sf_datasets, - excluded_from_perframe_tags, - excluded_from_functional_tags, - perframe_tags, - shared_tags, - multi_frame_output) - self.EarliestContentDateTime = self.FarthestFutureDateTime - - def AddModule(self) -> None: - default_atrs = ["Acquisition", "Series", "Study"] - for i in range(0, len(self.SingleFrameSet)): - src = self.SingleFrameSet[i] - default_date = self.FarthestFutureDate - for def_atr in default_atrs: - at_tg = tag_for_keyword(def_atr + "Date") - if at_tg in src: - val = src[at_tg].value - if isinstance(val, DA): - default_date = val - break - kw = 'ContentDate' - d_a = self._get_or_create_attribute( - src, kw, default_date) - d = d_a.value - default_time = self.FarthestFutureTime - for def_atr in default_atrs: - at_tg = tag_for_keyword(def_atr + "Time") - if at_tg in src: - val = src[at_tg].value - if isinstance(val, TM): - default_time = val - break - kw = 'ContentTime' - t_a = self._get_or_create_attribute( - src, kw, default_time) - t = t_a.value - value = DT(d.strftime('%Y%m%d') + t.strftime('%H%M%S.%f')) - if self.EarliestContentDateTime > value: - self.EarliestContentDateTime = value - if self.EarliestContentDateTime < self.FarthestFutureDateTime: - n_d = DA(self.EarliestContentDateTime.date().strftime('%Y%m%d')) - n_t = TM(self.EarliestContentDateTime.time().strftime('%H%M%S.%f')) - kw = 'ContentDate' - self.TargetDataset[kw] = DataElement( - tag_for_keyword(kw), 'DA', n_d) - kw = 'ContentTime' - self.TargetDataset[kw] = DataElement( - tag_for_keyword(kw), 'TM', n_t) - - -class InstanceCreationDateTime(Abstract_MultiframeModuleAdder): - - def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: dict, - excluded_from_functional_tags: dict, - perframe_tags: dict, - shared_tags: dict, - multi_frame_output: Dataset): - super().__init__( - sf_datasets, - excluded_from_perframe_tags, - excluded_from_functional_tags, - perframe_tags, - shared_tags, - multi_frame_output) - - def AddModule(self) -> None: - nnooww = datetime.now() - n_d = DA(nnooww.date().strftime('%Y%m%d')) - n_t = TM(nnooww.time().strftime('%H%M%S')) - kw = 'InstanceCreationDate' - self.TargetDataset[kw] = DataElement( - tag_for_keyword(kw), 'DA', n_d) - kw = 'InstanceCreationTime' - self.TargetDataset[kw] = DataElement( - tag_for_keyword(kw), 'TM', n_t) - - -class ContributingEquipmentSequence(Abstract_MultiframeModuleAdder): - - def __init__(self, sf_datasets: Sequence[Dataset], - excluded_from_perframe_tags: dict, - excluded_from_functional_tags: dict, - perframe_tags: dict, - shared_tags: dict, - multi_frame_output: Dataset): - super().__init__( - sf_datasets, - excluded_from_perframe_tags, - excluded_from_functional_tags, - perframe_tags, - shared_tags, - multi_frame_output) - - def _add_data_element_to_target(self, target: Dataset, - kw: str, value: Any) -> None: - tg = tag_for_keyword(kw) - target[kw] = DataElement(tg, dictionary_VR(tg), value) - - def AddModule(self) -> None: - CodeValue_tg = tag_for_keyword('CodeValue') - CodeMeaning_tg = tag_for_keyword('CodeMeaning') - CodingSchemeDesignator_tg = tag_for_keyword('CodingSchemeDesignator') - PurposeOfReferenceCode_item = Dataset() - PurposeOfReferenceCode_item['CodeValue'] = DataElement( - CodeValue_tg, - dictionary_VR(CodeValue_tg), - '109106') - PurposeOfReferenceCode_item['CodeMeaning'] = DataElement( - CodeMeaning_tg, - dictionary_VR(CodeMeaning_tg), - 'Enhanced Multi-frame Conversion Equipment') - PurposeOfReferenceCode_item['CodingSchemeDesignator'] = DataElement( - CodingSchemeDesignator_tg, - dictionary_VR(CodingSchemeDesignator_tg), - 'DCM') - PurposeOfReferenceCode_seq = DataElement( - tag_for_keyword('PurposeOfReferenceCodeSequence'), - 'SQ', DicomSequence([PurposeOfReferenceCode_item])) - item: Dataset = Dataset() - item[ - 'PurposeOfReferenceCodeSequence'] = PurposeOfReferenceCode_seq - self._add_data_element_to_target(item, "Manufacturer", 'HighDicom') - self._add_data_element_to_target(item, "InstitutionName", 'HighDicom') - self._add_data_element_to_target( - item, - "InstitutionalDepartmentName", - 'Software Development') - self._add_data_element_to_target( - item, - "InstitutionAddress", - 'Radialogy Department, B&W Hospital, Boston, MA') - self._add_data_element_to_target( - item, - "SoftwareVersions", - '1.4') # get sw version - self._add_data_element_to_target( - item, - "ContributionDescription", - 'Legacy Enhanced Image created from Classic Images') - tg = tag_for_keyword('ContributingEquipmentSequence') - self.TargetDataset[tg] = DataElement(tg, 'SQ', DicomSequence([item])) - - -class FrameSet: - def __init__(self, single_frame_list: list, - distinguishing_tags: list): - self._Frames = single_frame_list - self._DistinguishingAttributesTags = distinguishing_tags - tmp = [ - tag_for_keyword('AcquisitionDateTime'), - tag_for_keyword('AcquisitionDate'), - tag_for_keyword('AcquisitionTime'), - tag_for_keyword('SpecificCharacterSet')] - self._ExcludedFromPerFrameTags =\ - self.DistinguishingAttributesTags + tmp - self._PerFrameTags: list = [] - self._SharedTags: list = [] - self._find_per_frame_and_shared_tags() - - @property - def Frames(self) -> List[Dataset]: - return self._Frames[:] - - @property - def DistinguishingAttributesTags(self) -> List[Tag]: - return self._DistinguishingAttributesTags[:] - - @property - def ExcludedFromPerFrameTags(self) -> List[Tag]: - return self._ExcludedFromPerFrameTags[:] - - @property - def PerFrameTags(self) -> List[Tag]: - return self._PerFrameTags[:] - - @property - def SharedTags(self) -> List[Tag]: - return self._SharedTags[:] - - @property - def SeriesInstanceUID(self) -> UID: - return self._Frames[0].SeriesInstanceUID - - @property - def StudyInstanceUID(self) -> UID: - return self._Frames[0].StudyInstanceUID - - def GetSOPInstanceUIDList(self) -> list: - OutputList: list = [] - for f in self._Frames: - OutputList.append(f.SOPInstanceUID) - return OutputList - - def GetSOPClassUID(self) -> UID: - return self._Frames[0].SOPClassUID - - def _find_per_frame_and_shared_tags(self) -> None: - # logger = logging.getLogger(__name__) - rough_shared: dict = {} - sfs = self.Frames - for ds in sfs: - for ttag, elem in ds.items(): - if (not ttag.is_private and not - DicomHelper.istag_file_meta_information_group(ttag) and not - DicomHelper.istag_repeating_group(ttag) and not - DicomHelper.istag_group_length(ttag) and not - self._istag_excluded_from_perframe(ttag) and - ttag != tag_for_keyword('PixelData')): - elem = ds[ttag] - if ttag not in self._PerFrameTags: - self._PerFrameTags.append(ttag) - if ttag in rough_shared: - rough_shared[ttag].append(elem.value) - else: - rough_shared[ttag] = [elem.value] - to_be_removed_from_shared = [] - for ttag, v in rough_shared.items(): - v = rough_shared[ttag] - if len(v) < len(self.Frames): - to_be_removed_from_shared.append(ttag) - else: - all_values_are_equal = True - for v_i in v: - if not DicomHelper.isequal(v_i, v[0]): - all_values_are_equal = False - break - if not all_values_are_equal: - to_be_removed_from_shared.append(ttag) - from pydicom.datadict import keyword_for_tag - for t, v in rough_shared.items(): - if keyword_for_tag(t) != 'PatientSex': - continue - for t in to_be_removed_from_shared: - del rough_shared[t] - for t, v in rough_shared.items(): - self._SharedTags.append(t) - self._PerFrameTags.remove(t) - - def _istag_excluded_from_perframe(self, t: BaseTag) -> bool: - return t in self.ExcludedFromPerFrameTags - - -class FrameSetCollection: - def __init__(self, single_frame_list: list): - logger = logging.getLogger(__name__) - self.MixedFrames = single_frame_list - self.MixedFramesCopy = self.MixedFrames[:] - self._DistinguishingAttributeKeywords = [ - 'PatientID', - 'PatientName', - 'Manufacturer', - 'InstitutionName', - 'InstitutionAddress', - 'StationName', - 'InstitutionalDepartmentName', - 'ManufacturerModelName', - 'DeviceSerialNumber', - 'SoftwareVersions', - 'GantryID', - 'PixelPaddingValue', - 'Modality', - 'ImageType', - 'BurnedInAnnotation', - 'SOPClassUID', - 'Rows', - 'Columns', - 'BitsStored', - 'BitsAllocated', - 'HighBit', - 'PixelRepresentation', - 'PhotometricInterpretation', - 'PlanarConfiguration', - 'SamplesPerPixel', - 'ProtocolName', - 'ImageOrientationPatient', - 'PixelSpacing', - 'SliceThickness', - 'AcquisitionContextSequence'] - to_be_removed_from_distinguishing_attribs: set = set() - self._FrameSets: list = [] - frame_counts = [] - frameset_counter = 0 - while len(self.MixedFramesCopy) != 0: - frameset_counter += 1 - x = self._find_all_similar_to_first_datasets() - self._FrameSets.append(FrameSet(x[0], x[1])) - frame_counts.append(len(x[0])) - # log information - logger.debug("Frameset({:02d}) including {:03d} frames".format( - frameset_counter, len(x[0]))) - logger.debug('\t Distinguishing tags:') - for dg_i, dg_tg in enumerate(x[1], 1): - logger.debug('\t\t{:02d}/{})\t{}-{:32.32s} = {:32.32s}'.format( - dg_i, len(x[1]), DicomHelper.tag2str(dg_tg), - keyword_for_tag(dg_tg), - str(x[0][0][dg_tg].value))) - logger.debug('\t dicom datasets in this frame set:') - for dicom_i, dicom_ds in enumerate(x[0], 1): - logger.debug('\t\t{}/{})\t {}'.format( - dicom_i, len(x[0]), dicom_ds['SOPInstanceUID'])) - frames = '' - for i, f_count in enumerate(frame_counts, 1): - frames += '{: 2d}){:03d}\t'.format(i, f_count) - frames = '{: 2d} frameset(s) out of all {: 3d} instances:'.format( - len(frame_counts), len(self.MixedFrames)) + frames - logger.info(frames) - for kw in to_be_removed_from_distinguishing_attribs: - self.DistinguishingAttributeKeywords.remove(kw) - self.ExcludedFromPerFrameTags = {} - for kwkw in self.DistinguishingAttributeKeywords: - self.ExcludedFromPerFrameTags[tag_for_keyword(kwkw)] = False - self.ExcludedFromPerFrameTags[ - tag_for_keyword('AcquisitionDateTime')] = False - self.ExcludedFromPerFrameTags[ - tag_for_keyword('AcquisitionDate')] = False - self.ExcludedFromPerFrameTags[ - tag_for_keyword('AcquisitionTime')] = False - self.ExcludedFromFunctionalGroupsTags = { - tag_for_keyword('SpecificCharacterSet'): False} - - def _find_all_similar_to_first_datasets(self) -> tuple: - logger = logging.getLogger(__name__) - similar_ds: list = [self.MixedFramesCopy[0]] - distinguishing_tags_existing = [] - distinguishing_tags_missing = [] - self.MixedFramesCopy = self.MixedFramesCopy[1:] - for kw in self.DistinguishingAttributeKeywords: - tg = tag_for_keyword(kw) - if tg in similar_ds[0]: - distinguishing_tags_existing.append(tg) - else: - distinguishing_tags_missing.append(tg) - logger_msg = set() - for ds in self.MixedFramesCopy: - all_equal = True - for tg in distinguishing_tags_missing: - if tg in ds: - logger_msg.add( - '{} is missing in all but {}'.format( - DicomHelper.tag2kwstr(tg), ds['SOPInstanceUID'])) - all_equal = False - break - if not all_equal: - continue - for tg in distinguishing_tags_existing: - ref_val = similar_ds[0][tg].value - if tg not in ds: - all_equal = False - break - new_val = ds[tg].value - if not DicomHelper.isequal(ref_val, new_val): - logger_msg.add( - 'Inequality on distinguishing ' - 'attribute{} -> {} != {} \n series uid = {}'.format( - DicomHelper.tag2kwstr(tg), ref_val, new_val, - ds.SeriesInstanceUID)) - all_equal = False - break - if all_equal: - similar_ds.append(ds) - for msg_ in logger_msg: - logger.info(msg_) - for ds in similar_ds: - if ds in self.MixedFramesCopy: - self.MixedFramesCopy.remove(ds) - return (similar_ds, distinguishing_tags_existing) + self._copy_attrib_if_present( + src_fg, item, "AcquisitionDuration", + "FrameAcquisitionDuration", + check_not_to_be_perframe=False, + check_not_to_be_empty=True) + self._copy_attrib_if_present( + src_fg, item, + 'TemporalPositionIndex', + check_not_to_be_perframe=False, + check_not_to_be_empty=True) + self._copy_attrib_if_present( + src_fg, item, "ImageComments", + "FrameComments", + check_not_to_be_perframe=False, + check_not_to_be_empty=True) + # ----------------------------------- + seq_tg = tag_for_keyword('FrameContentSequence') + dest_fg[seq_tg] = DataElement( + seq_tg, dictionary_VR(seq_tg), DataElementSequence([item])) + # Also we want to add the earliest frame acq date time to the multiframe: - @property - def DistinguishingAttributeKeywords(self) -> List[str]: - return self._DistinguishingAttributeKeywords[:] + def _add_acquisition_info_frame_content(self) -> None: + for i in range(0, len(self._legacy_datasets)): + item = self._perframe_functional_group[i] + self._add_module_to_functional_group_frame_content( + self._legacy_datasets[i], item) + if self.EarliestFrameAcquisitionDateTime < self.FarthestFutureDateTime: + kw = 'AcquisitionDateTime' + self[kw] = DataElement( + tag_for_keyword(kw), + 'DT', self.EarliestFrameAcquisitionDateTime) - @property - def FrameSets(self) -> List[FrameSet]: - return self._FrameSets + def add_module_frame_content(self) -> None: + self._add_acquisition_info_frame_content() + self._add_stack_info_frame_content() + def _is_other_byte_vr_pixel_data(self, vr: str) -> bool: + return vr[0] == 'O' and vr[1] == 'B' -class LegacyConvertedEnhanceImage(SOPClass): - """SOP class for Legacy Converted Enhanced PET Image instances.""" + def _is_other_word_vr_pixel_data(self, vr: str) -> bool: + return vr[0] == 'O' and vr[1] == 'W' + # def _contains_right_attributes(self, tags: dict) -> bool: + # ImagePositionPatient_tg = tag_for_keyword('ImagePositionPatient') + # return ImagePositionPatient_tg in tags - def __init__( - self, - frame_set: FrameSet, - series_instance_uid: str, - series_number: int, - sop_instance_uid: str, - instance_number: int, - sort_key: Callable = None, - **kwargs: Any) -> None: - """ - Parameters - ---------- - legacy_datasets: Sequence[pydicom.dataset.Dataset] - DICOM data sets of legacy single-frame image instances that should - be converted - series_instance_uid: str - UID of the series - series_number: Union[int, None] - Number of the series within the study - sop_instance_uid: str - UID that should be assigned to the instance - instance_number: int - Number that should be assigned to the instance - **kwargs: Any, optional - Additional keyword arguments that will be passed to the constructor - of `highdicom.base.SOPClass` - """ - legacy_datasets = frame_set.Frames - try: - ref_ds = legacy_datasets[0] - except IndexError: - raise ValueError('No DICOM data sets of provided.') - sop_class_uid = LEGACY_ENHANCED_SOP_CLASS_UID_MAP[ref_ds.SOPClassUID] - if sort_key is None: - sort_key = LegacyConvertedEnhanceImage.default_sort_key - super().__init__( - study_instance_uid="" if 'StudyInstanceUID' not in ref_ds - else ref_ds.StudyInstanceUID, - series_instance_uid=series_instance_uid, - series_number=series_number, - sop_instance_uid=sop_instance_uid, - sop_class_uid=sop_class_uid, - instance_number=instance_number, - manufacturer="" if 'Manufacturer' not in ref_ds - else ref_ds.Manufacturer, - modality="" if 'Modality' not in ref_ds - else ref_ds.Modality, - patient_id=None if 'PatientID' not in ref_ds - else ref_ds.PatientID, - patient_name=None if 'PatientName' not in ref_ds - else ref_ds.PatientName, - patient_birth_date=None if 'PatientBirthDate' not in ref_ds - else ref_ds.PatientBirthDate, - patient_sex=None if 'PatientSex' not in ref_ds - else ref_ds.PatientSex, - accession_number=None if 'AccessionNumber' not in ref_ds - else ref_ds.AccessionNumber, - study_id=None if 'StudyID' not in ref_ds - else ref_ds.StudyID, - study_date=None if 'StudyDate' not in ref_ds - else ref_ds.StudyDate, - study_time=None if 'StudyTime' not in ref_ds - else ref_ds.StudyTime, - referring_physician_name=None if 'ReferringPhysicianName' not in - ref_ds else ref_ds.ReferringPhysicianName, - **kwargs) - self._legacy_datasets = legacy_datasets - self.DistinguishingAttributesTags = self._get_tag_used_dictionary( - frame_set.DistinguishingAttributesTags) - self.ExcludedFromPerFrameTags = self._get_tag_used_dictionary( - frame_set.ExcludedFromPerFrameTags) - self._PerFrameTags = self._get_tag_used_dictionary( - frame_set.PerFrameTags) - self._SharedTags = self._get_tag_used_dictionary( - frame_set.SharedTags) - self.ExcludedFromFunctionalGroupsTags = { - tag_for_keyword('SpecificCharacterSet'): False} - # -------------------------------------------------------------------- - self.__build_blocks: list = [] - # == == == == == == == == == == == == == == == == == == == == == == == - new_ds = [] - for item in sorted(self._legacy_datasets, key=sort_key): - new_ds.append(item) - self._legacy_datasets = new_ds - if (_SOP_CLASS_UID_IOD_KEY_MAP[sop_class_uid] == - 'legacy-converted-enhanced-ct-image'): - self.AddBuildBlocksForCT() - elif (_SOP_CLASS_UID_IOD_KEY_MAP[sop_class_uid] == - 'legacy-converted-enhanced-mr-image'): - self.AddBuildBlocksForMR() - elif (_SOP_CLASS_UID_IOD_KEY_MAP[sop_class_uid] == - 'legacy-converted-enhanced-pet-image'): - self.AddBuildBlocksForPET() + def _copy_data_pixel_data(self, src: bytearray, word_data: bool = False) -> None: + # Make sure that the length complies by row and col + if word_data: + des = self._word_data + ByteCount = 2 * self._number_of_pixels_per_frame + else: + des = self._byte_data + ByteCount = self._number_of_pixels_per_frame + if len(src) != ByteCount: + tmp: bytearray = bytearray(ByteCount) + tmp[:len(src)] = src[:] + src = tmp + des.extend(src) - def _get_tag_used_dictionary(self, input: list) -> dict: - out: dict = {} - for item in input: - out[item] = False - return out + def add_module_pixel_data(self) -> None: + kw = 'NumberOfFrames' + tg = tag_for_keyword(kw) + self._frame_count = len(self._legacy_datasets) + self[kw] =\ + DataElement(tg, dictionary_VR(tg), self._frame_count) + row = self._legacy_datasets[0]["Rows"].value + col = self._legacy_datasets[0]["Columns"].value + self._number_of_pixels_per_frame = row * col + self._number_of_pixels = row * col * self._frame_count + kw = "PixelData" + for i in range(0, len(self._legacy_datasets)): + if kw not in self._legacy_datasets[i]: + continue + PixelData_a = self._legacy_datasets[i][kw] + if self._is_other_byte_vr_pixel_data(PixelData_a.VR): + if len(self._word_data) != 0: + raise TypeError( + 'Cannot mix OB and OW Pixel Data ' + 'VR from different frames') + self._copy_data_pixel_data(PixelData_a.value, False) + elif self._is_other_word_vr_pixel_data(PixelData_a.VR): + if len(self._byte_data) != 0: + raise TypeError( + 'Cannot mix OB and OW Pixel Data ' + 'VR from different frames') + self._copy_data_pixel_data(PixelData_a.value, True) + else: + raise TypeError( + 'Cannot mix OB and OW Pixel Data VR from different frames') + if len(self._byte_data) != 0: + MF_PixelData = DataElement(tag_for_keyword(kw), + 'OB', bytes(self._byte_data)) + elif len(self._word_data) != 0: + MF_PixelData = DataElement(tag_for_keyword(kw), + 'OW', bytes(self._word_data)) + self[kw] = MF_PixelData + + def add_module_content_date_time(self) -> None: + default_atrs = ["Acquisition", "Series", "Study"] + for i in range(0, len(self._legacy_datasets)): + src = self._legacy_datasets[i] + default_date = self.FarthestFutureDate + for def_atr in default_atrs: + at_tg = tag_for_keyword(def_atr + "Date") + if at_tg in src: + val = src[at_tg].value + if isinstance(val, DA): + default_date = val + break + kw = 'ContentDate' + d_a = self._get_or_create_attribute( + src, kw, default_date) + d = d_a.value + default_time = self.FarthestFutureTime + for def_atr in default_atrs: + at_tg = tag_for_keyword(def_atr + "Time") + if at_tg in src: + val = src[at_tg].value + if isinstance(val, TM): + default_time = val + break + kw = 'ContentTime' + t_a = self._get_or_create_attribute( + src, kw, default_time) + t = t_a.value + value = DT(d.strftime('%Y%m%d') + t.strftime('%H%M%S.%f')) + if self.EarliestContentDateTime > value: + self.EarliestContentDateTime = value + if self.EarliestContentDateTime < self.FarthestFutureDateTime: + n_d = DA(self.EarliestContentDateTime.date().strftime('%Y%m%d')) + n_t = TM(self.EarliestContentDateTime.time().strftime('%H%M%S.%f')) + kw = 'ContentDate' + self[kw] = DataElement( + tag_for_keyword(kw), 'DA', n_d) + kw = 'ContentTime' + self[kw] = DataElement( + tag_for_keyword(kw), 'TM', n_t) + + def _add_data_element_to_target_contributing_equipment(self, target: Dataset, + kw: str, value: Any) -> None: + tg = tag_for_keyword(kw) + target[kw] = DataElement(tg, dictionary_VR(tg), value) + + def add_module_contributing_equipment(self) -> None: + CodeValue_tg = tag_for_keyword('CodeValue') + CodeMeaning_tg = tag_for_keyword('CodeMeaning') + CodingSchemeDesignator_tg = tag_for_keyword('CodingSchemeDesignator') + PurposeOfReferenceCode_item = Dataset() + PurposeOfReferenceCode_item['CodeValue'] = DataElement( + CodeValue_tg, + dictionary_VR(CodeValue_tg), + '109106') + PurposeOfReferenceCode_item['CodeMeaning'] = DataElement( + CodeMeaning_tg, + dictionary_VR(CodeMeaning_tg), + 'Enhanced Multi-frame Conversion Equipment') + PurposeOfReferenceCode_item['CodingSchemeDesignator'] = DataElement( + CodingSchemeDesignator_tg, + dictionary_VR(CodingSchemeDesignator_tg), + 'DCM') + PurposeOfReferenceCode_seq = DataElement( + tag_for_keyword('PurposeOfReferenceCodeSequence'), + 'SQ', DataElementSequence([PurposeOfReferenceCode_item])) + item: Dataset = Dataset() + item[ + 'PurposeOfReferenceCodeSequence'] = PurposeOfReferenceCode_seq + self._add_data_element_to_target_contributing_equipment(item, "Manufacturer", 'HighDicom') + self._add_data_element_to_target_contributing_equipment(item, "InstitutionName", 'HighDicom') + self._add_data_element_to_target_contributing_equipment( + item, + "InstitutionalDepartmentName", + 'Software Development') + self._add_data_element_to_target_contributing_equipment( + item, + "InstitutionAddress", + 'Radialogy Department, B&W Hospital, Boston, MA') + self._add_data_element_to_target_contributing_equipment( + item, + "SoftwareVersions", + '1.4') # get sw version + self._add_data_element_to_target_contributing_equipment( + item, + "ContributionDescription", + 'Legacy Enhanced Image created from Classic Images') + tg = tag_for_keyword('ContributingEquipmentSequence') + self[tg] = DataElement(tg, 'SQ', DataElementSequence([item])) + def add_module_instance_creation_date_time(self) -> None: + nnooww = datetime.now() + n_d = DA(nnooww.date().strftime('%Y%m%d')) + n_t = TM(nnooww.time().strftime('%H%M%S')) + kw = 'InstanceCreationDate' + self[kw] = DataElement( + tag_for_keyword(kw), 'DA', n_d) + kw = 'InstanceCreationTime' + self[kw] = DataElement( + tag_for_keyword(kw), 'TM', n_t) + def default_sort_key(x: Dataset) -> tuple: out: tuple = tuple() if 'SeriesNumber' in x: @@ -2646,266 +2163,90 @@ def default_sort_key(x: Dataset) -> tuple: out += (x['SOPInstanceUID'].value, ) return out - def AddNewBuildBlock( - self, element: Abstract_MultiframeModuleAdder) -> None: - if not isinstance(element, Abstract_MultiframeModuleAdder): - raise ValueError('Build block must be an instance ' - 'of Abstract_MultiframeModuleAdder') + def add_new_build_block( + self, element) -> None: + # if not isinstance(element, Abstract_MultiframeModuleAdder): + # raise ValueError('Build block must be an instance ' + # 'of Abstract_MultiframeModuleAdder') self.__build_blocks.append(element) def ClearBuildBlocks(self) -> None: self.__build_blocks = [] - def AddCommonCT_PET_MR_BuildBlocks(self) -> None: - Blocks = [ - ImagePixelModule( - self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, - self), - CompositeInstanceContex( - self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, - self), - EnhancedCommonImageModule( - self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, - self), - AcquisitionContextModule( - self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, - self), - FrameAnatomyFunctionalGroup( - self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, - self), - PixelMeasuresFunctionalGroup( - self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, - self), - PlaneOrientationFunctionalGroup( - self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, - self), - PlanePositionFunctionalGroup( - self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, - self), - FrameVOILUTFunctionalGroup( - self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, - self), - PixelValueTransformationFunctionalGroup( - self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, - self), - ReferencedImageFunctionalGroup( - self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, - self), - ConversionSourceFunctionalGroup( - self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, - self), - FrameContentFunctionalGroup( - self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, - self), - PixelData( - self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, - self), - ContentDateTime( - self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, - self), - InstanceCreationDateTime( - self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, - self), - ContributingEquipmentSequence( - self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, - self), - UnassignedPerFrame( - self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, - self), - UnassignedShared( - self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, - self) # , - # StackInformation( - # self._legacy_datasets, - # self.ExcludedFromPerFrameTags, - # self.ExcludedFromFunctionalGroupsTags, - # self._PerFrameTags, - # self._SharedTags, - # self), - # EmptyType2Attributes( - # self._legacy_datasets, - # self.ExcludedFromPerFrameTags, - # self.ExcludedFromFunctionalGroupsTags, - # self._PerFrameTags, - # self._SharedTags, - # self) + def add_commonct_pet_mr_build_blocks(self) -> none: + blocks = [ + [self.add_module_image_pixel, None], + [self.add_module_composite_instance_contex, None], + [self.add_module_enhanced_common_image, None], + [self.add_module_acquisition_context, None], + [self.add_module_frame_anatomy, None], + [self.add_module_pixel_measures, None], + [self.add_module_plane_orientation, None], + [self.add_module_plane_position, None], + [self.add_module_frame_voi_lut, None], + [self.add_module_pixel_value_transformation, None], + [self.add_module_referenced_image, None], + [self.add_module_conversion_source, None], + [self.add_module_frame_content, None], + [self.add_module_pixel_data, None], + [self.add_module_content_date_time, None], + [self.add_module_instance_creation_date_time, None], + [self.add_module_contributing_equipment, None], + [self.add_module_unassigned_perframe, None], + [self.add_module_unassigned_shared, None], ] - for b in Blocks: - self.AddNewBuildBlock(b) - - def AddCTSpecificBuildBlocks(self) -> None: - Blocks = [ - CommonCTMRPETImageDescriptionMacro( - self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, - self, - 'CT'), - EnhancedCTImageModule( - self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, - self), - ContrastBolusModule( - self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, - self) + for b in blocks: + self.add_new_build_block(b) + + def add_ct_specific_build_blocks(self) -> none: + blocks = [ + [self.add_module_common_ct_mr_pet_image_description, ('CT',)], + [self.add_module_enhanced_ct_image, None], + [self.add_module_contrast_bolus, None], ] - for b in Blocks: - self.AddNewBuildBlock(b) - - def AddMRSpecificBuildBlocks(self) -> None: - Blocks = [ - CommonCTMRPETImageDescriptionMacro( - self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, - self, - 'MR'), - EnhancedMRImageModule( - self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, - self), - ContrastBolusModule( - self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, - self) + for b in blocks: + self.add_new_build_block(b) + + def add_mr_specific_build_blocks(self) -> none: + blocks = [ + [self.add_module_common_ct_mr_pet_image_description, ('MR',)], + [self.add_module_enhanced_mr_image, None], + [self.add_module_contrast_bolus, None], ] - for b in Blocks: - self.AddNewBuildBlock(b) - - def AddPETSpecificBuildBlocks(self) -> None: - Blocks = [ - CommonCTMRPETImageDescriptionMacro( - self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, - self, - 'PET'), - EnhancedPETImageModule( - self._legacy_datasets, - self.ExcludedFromPerFrameTags, - self.ExcludedFromFunctionalGroupsTags, - self._PerFrameTags, - self._SharedTags, - self) + for b in blocks: + self.add_new_build_block(b) + + def add_pet_specific_build_blocks(self) -> none: + blocks = [ + [self.add_module_common_ct_mr_pet_image_description, ('PET',)], + [self.add_module_enhanced_pet_image, None], ] - for b in Blocks: - self.AddNewBuildBlock(b) + for b in blocks: + self.add_new_build_block(b) def AddBuildBlocksForCT(self) -> None: self.ClearBuildBlocks() - self.AddCommonCT_PET_MR_BuildBlocks() - self.AddCTSpecificBuildBlocks() + self.add_commonct_pet_mr_build_blocks() + self.add_ct_specific_build_blocks() def AddBuildBlocksForMR(self) -> None: self.ClearBuildBlocks() - self.AddCommonCT_PET_MR_BuildBlocks() - self.AddMRSpecificBuildBlocks() + self.add_commonct_pet_mr_build_blocks() + self.add_mr_specific_build_blocks() def AddBuildBlocksForPET(self) -> None: self.ClearBuildBlocks() - self.AddCommonCT_PET_MR_BuildBlocks() - self.AddPETSpecificBuildBlocks() + self.add_commonct_pet_mr_build_blocks() + self.add_pet_specific_build_blocks() def BuildMultiFrame(self) -> None: logger = logging.getLogger(__name__) logger.debug('Strt singleframe to multiframe conversion') - for builder in self.__build_blocks: - builder.AddModule() + for fun, args in self.__build_blocks: + if not args: + fun() + else: + fun(*args) logger.debug('Conversion succeeded') @@ -2976,7 +2317,7 @@ def is_equal_float(x1: float, x2: float) -> bool: return abs(x1 - x2) < float_tolerance if type(v1) != type(v2): return False - if isinstance(v1, DicomSequence): + if isinstance(v1, DataElementSequence): for item1, item2 in zip(v1, v2): DicomHelper.isequal_dicom_dataset(item1, item2) if not isinstance(v1, MultiValue): From ac45b5e531c77ac737b46a819ef584fb277446cd Mon Sep 17 00:00:00 2001 From: afshin Date: Thu, 1 Apr 2021 23:54:07 -0400 Subject: [PATCH 28/44] modified the code to apply all major comments --- src/highdicom/legacy/sop.py | 603 ++++++++++++++++++------------------ 1 file changed, 304 insertions(+), 299 deletions(-) diff --git a/src/highdicom/legacy/sop.py b/src/highdicom/legacy/sop.py index 7da8005b..0ecf505f 100644 --- a/src/highdicom/legacy/sop.py +++ b/src/highdicom/legacy/sop.py @@ -10,7 +10,7 @@ from pydicom.dataelem import DataElement from pydicom.sequence import Sequence as DataElementSequence from pydicom.multival import MultiValue -from datetime import date, datetime, time, timedelta +from datetime import datetime, timedelta from pydicom.valuerep import DT, DA, TM from copy import deepcopy from pydicom.uid import UID @@ -27,17 +27,16 @@ '1.2.840.10008.5.1.4.1.1.128': '1.2.840.10008.5.1.4.1.1.128.1', } _SOP_CLASS_UID_IOD_KEY_MAP = { - '1.2.840.10008.5.1.4.1.1.2.2': 'legacy-converted-enhanced-ct-image', - '1.2.840.10008.5.1.4.1.1.4.4': 'legacy-converted-enhanced-mr-image', + '1.2.840.10008.5.1.4.1.1.2.2': 'legacy-converted-enhanced-ct-image', + '1.2.840.10008.5.1.4.1.1.4.4': 'legacy-converted-enhanced-mr-image', '1.2.840.10008.5.1.4.1.1.128.1': 'legacy-converted-enhanced-pet-image', } - def _convert_legacy_to_enhanced( sf_datasets: Sequence[Dataset], mf_dataset: Optional[Dataset] = None -) -> Dataset: + ) -> Dataset: """Converts one or more MR, CT or PET Image instances into one Legacy Converted Enhanced MR/CT/PET Image instance by copying information from `sf_datasets` into `mf_dataset`. @@ -534,11 +533,11 @@ def __init__( referring_physician_name=ref_ds.ReferringPhysicianName, **kwargs) _convert_legacy_to_enhanced(legacy_datasets, self) - + class PerframeFunctionalGroup(DataElementSequence): - def __init__(self, number_of_frames: int) -> None: + def __init__(self, number_of_frames: int) -> None: super().__init__() for i in range(0, number_of_frames): item = Dataset() @@ -552,7 +551,7 @@ def __init__(self) -> None: item = Dataset() self.append(item) - + class FrameSet: def __init__(self, single_frame_list: list, distinguishing_tags: list): @@ -862,7 +861,7 @@ def __init__( self[tg] = DataElement(tg, 'SQ', self._perframe_functional_group) self._shared_functional_group = SharedFunctionalGroup() tg = tag_for_keyword('SharedFunctionalGroupsSequence') - self[tg] = DataElement(tg, 'SQ', self._perframe_functional_group) + self[tg] = DataElement(tg, 'SQ', self._shared_functional_group) self.DistinguishingAttributesTags = self._get_tag_used_dictionary( frame_set.DistinguishingAttributesTags) self.ExcludedFromPerFrameTags = self._get_tag_used_dictionary( @@ -873,14 +872,14 @@ def __init__( frame_set.SharedTags) self.ExcludedFromFunctionalGroupsTags = { tag_for_keyword('SpecificCharacterSet'): False} - + # -------------------------------------------------------------------- self.__build_blocks: list = [] # == == == == == == == == == == == == == == == == == == == == == == == new_ds = [] for item in sorted(self._legacy_datasets, key=sort_key): new_ds.append(item) - + # self = multi_frame_output self._module_excepted_list: dict = { "patient": [], @@ -978,17 +977,15 @@ def __init__( self._byte_data = bytearray() self._word_data = bytearray() self.EarliestContentDateTime = self.FarthestFutureDateTime - - if (_SOP_CLASS_UID_IOD_KEY_MAP[sop_class_uid] == 'legacy-converted-enhanced-ct-image'): - self.AddBuildBlocksForCT() + self._add_build_blocks_for_ct() elif (_SOP_CLASS_UID_IOD_KEY_MAP[sop_class_uid] == 'legacy-converted-enhanced-mr-image'): - self.AddBuildBlocksForMR() + self._add_build_blocks_for_mr() elif (_SOP_CLASS_UID_IOD_KEY_MAP[sop_class_uid] == 'legacy-converted-enhanced-pet-image'): - self.AddBuildBlocksForPET() + self._add_build_blocks_for_pet() def _is_empty_or_empty_items(self, attribute: DataElement) -> bool: if attribute.is_empty: @@ -1076,12 +1073,6 @@ def _get_or_create_attribute( def _add_module(self, module_name: str, excepted_attributes: list = [], check_not_to_be_perframe: bool = True, check_not_to_be_empty: bool = False) -> None: - # sf_sop_instance_uid = sf_datasets[0] - # mf_sop_instance_uid = LEGACY_ENHANCED_SOP_CLASS_UID_MAP[ - # sf_sop_instance_uid] - # iod_name = _SOP_CLASS_UID_IOD_KEY_MAP[mf_sop_instance_uid] - # modules = IOD_MODULE_MAP[iod_name] - from copy import deepcopy attribs: list = MODULE_ATTRIBUTE_MAP[module_name] ref_dataset = self._legacy_datasets[0] for a in attribs: @@ -1094,7 +1085,7 @@ def _add_module(self, module_name: str, excepted_attributes: list = [], check_not_to_be_perframe=check_not_to_be_perframe, check_not_to_be_empty=check_not_to_be_empty) - def add_module_image_pixel(self) -> None: + def _add_module_to_mf_image_pixel(self) -> None: module_and_excepted_at = { "image-pixel": [ @@ -1111,8 +1102,8 @@ def add_module_image_pixel(self) -> None: excepted_attributes=except_at, check_not_to_be_empty=False, check_not_to_be_perframe=True) # don't check the perframe set - - def add_module_enhanced_common_image(self) -> None: + + def _add_module_to_mf_enhanced_common_image(self) -> None: ref_dataset = self._legacy_datasets[0] attribs_to_be_added = [ 'ContentQualification', @@ -1154,26 +1145,26 @@ def add_module_enhanced_common_image(self) -> None: # also, do not need to check if PhotometricInterpretation is # per-frame, since a distinguishing attribute phmi_kw = 'PhotometricInterpretation' - phmi_a = self._get_or_create_attribute(self._legacy_datasets[0], - phmi_kw, - "MONOCHROME2") + phmi_a = self._get_or_create_attribute( + self._legacy_datasets[0], phmi_kw, "MONOCHROME2") LUT_shape_default = "INVERTED" if phmi_a.value == 'MONOCHROME1'\ else "IDENTITY" - LUT_shape_a = self._get_or_create_attribute(self._legacy_datasets[0], - 'PresentationLUTShape', - LUT_shape_default) + LUT_shape_a = self._get_or_create_attribute( + self._legacy_datasets[0], + 'PresentationLUTShape', + LUT_shape_default) if not LUT_shape_a.is_empty: self['PresentationLUTShape'] = LUT_shape_a # Icon Image Sequence - always discard these - def add_module_contrast_bolus(self) -> None: + def _add_module_to_mf_contrast_bolus(self) -> None: self._add_module('contrast-bolus') - def add_module_enhanced_ct_image(self) -> None: + def _add_module_to_mf_enhanced_ct_image(self) -> None: pass # David's code doesn't hold anything for this module ... should ask him - def add_module_enhanced_pet_image(self) -> None: + def _add_module_to_mf_enhanced_pet_image(self) -> None: # David's code doesn't hold anything for this module ... should ask him kw = 'ContentQualification' tg = tag_for_keyword(kw) @@ -1181,7 +1172,7 @@ def add_module_enhanced_pet_image(self) -> None: self._legacy_datasets[0], kw, 'RESEARCH') self[tg] = elem - def add_module_enhanced_mr_image(self) -> None: + def _add_module_to_mf_enhanced_mr_image(self) -> None: self._copy_attrib_if_present( self._legacy_datasets[0], self, @@ -1223,16 +1214,16 @@ def add_module_enhanced_mr_image(self) -> None: check_not_to_be_perframe=True, check_not_to_be_empty=True) - def add_module_acquisition_context(self) -> None: + def _add_module_to_mf_acquisition_context(self) -> None: tg = tag_for_keyword('AcquisitionContextSequence') if tg not in self._PerFrameTags: self[tg] = self._get_or_create_attribute( self._legacy_datasets[0], tg, None) - - def _get_value_for_frame_type_common_ct_mr_pet_image_description(self, - attrib: DataElement) -> Union[list, None]: + + def _get_value_for_frame_type( + self, attrib: DataElement) -> Union[list, None]: if not isinstance(attrib, DataElement): return None output = ['', '', '', ''] @@ -1244,7 +1235,8 @@ def _get_value_for_frame_type_common_ct_mr_pet_image_description(self, output[3] = 'NONE' return output - def _get_frame_type_seq_tag_common_ct_mr_pet_image_description(self, modality: str) -> int: + def _get_frame_type_seq_tag( + self, modality: str) -> int: seq_kw = '{}{}FrameTypeSequence' if modality == 'PET': seq_kw = seq_kw.format(modality, '') @@ -1252,95 +1244,96 @@ def _get_frame_type_seq_tag_common_ct_mr_pet_image_description(self, modality: s seq_kw = seq_kw.format(modality, 'Image') return tag_for_keyword(seq_kw) - def _add_module_to_functional_group_common_ct_mr_pet_image_description(self, src_fg: Dataset, - dest_fg: Dataset, level: int) -> None: - FrameType_a = src_fg['ImageType'] + def _add_module_to_dataset_common_ct_mr_pet_image_description( + self, source: Dataset, destination: Dataset, level: int) -> None: + FrameType_a = source['ImageType'] if level == 0: FrameType_tg = tag_for_keyword('ImageType') else: FrameType_tg = tag_for_keyword('FrameType') - new_val = self._get_value_for_frame_type_common_ct_mr_pet_image_description(FrameType_a) - dest_fg[FrameType_tg] = DataElement(FrameType_tg, - FrameType_a.VR, new_val) + new_val = self._get_value_for_frame_type(FrameType_a) + destination[FrameType_tg] = DataElement( + FrameType_tg, FrameType_a.VR, new_val) def element_generator(kw: str, val: Any) -> DataElement: return DataElement( tag_for_keyword(kw), dictionary_VR(tag_for_keyword(kw)), val) - dest_fg['PixelPresentation'] = element_generator( + destination['PixelPresentation'] = element_generator( 'PixelPresentation', "MONOCHROME") - dest_fg['VolumetricProperties'] = element_generator( + destination['VolumetricProperties'] = element_generator( 'VolumetricProperties', "VOLUME") - dest_fg['VolumeBasedCalculationTechnique'] = element_generator( + destination['VolumeBasedCalculationTechnique'] = element_generator( 'VolumeBasedCalculationTechnique', "NONE") - def add_module_common_ct_mr_pet_image_description(self, modality: str) -> None: + def _add_module_to_mf_common_ct_mr_pet_image_description( + self, modality: str) -> None: im_type_tag = tag_for_keyword('ImageType') - seq_tg = self._get_frame_type_seq_tag_common_ct_mr_pet_image_description(modality) + seq_tg = self._get_frame_type_seq_tag(modality) if im_type_tag not in self._PerFrameTags: - self._add_module_to_functional_group_common_ct_mr_pet_image_description(self._legacy_datasets[0], - self, 0) + self._add_module_to_dataset_common_ct_mr_pet_image_description( + self._legacy_datasets[0], self, 0) # ---------------------------- item = self._shared_functional_group[0] inner_item = Dataset() - self._add_module_to_functional_group_common_ct_mr_pet_image_description(self._legacy_datasets[0], - inner_item, 1) + self._add_module_to_dataset_common_ct_mr_pet_image_description( + self._legacy_datasets[0], inner_item, 1) item[seq_tg] = DataElement( seq_tg, 'SQ', DataElementSequence([inner_item])) else: for i in range(0, len(self._legacy_datasets)): item = self._perframe_functional_group[i] inner_item = Dataset() - self._add_module_to_functional_group_common_ct_mr_pet_image_description(self._legacy_datasets[i], - inner_item, 1) + self._add_module_to_dataset_common_ct_mr_pet_image_description( + self._legacy_datasets[i], inner_item, 1) item[seq_tg] = DataElement( seq_tg, 'SQ', DataElementSequence([inner_item])) - def add_module_composite_instance_contex(self) -> None: + def _add_module_to_mf_composite_instance_contex(self) -> None: for module_name, excpeted_a in self._module_excepted_list.items(): self._add_module( module_name, excepted_attributes=excpeted_a, check_not_to_be_empty=False, check_not_to_be_perframe=True) # don't check the perframe set - - def _add_module_to_functional_group_frame_anatomy( - self, src_fg: Dataset, dest_fg: Dataset) -> None: + + def _add_module_to_dataset_frame_anatomy( + self, source: Dataset, destination: Dataset) -> None: # David's code is more complicaated than mine # Should check it out later. fa_seq_tg = tag_for_keyword('FrameAnatomySequence') item = Dataset() - self._copy_attrib_if_present(src_fg, item, 'AnatomicRegionSequence', + self._copy_attrib_if_present(source, item, 'AnatomicRegionSequence', check_not_to_be_perframe=False, check_not_to_be_empty=False) if len(item) != 0: self._copy_attrib_if_present( - src_fg, item, 'FrameLaterality', + source, item, 'FrameLaterality', check_not_to_be_perframe=False, check_not_to_be_empty=True) if 'FrameLaterality' not in item: self._copy_attrib_if_present( - src_fg, item, 'ImageLaterality', + source, item, 'ImageLaterality', 'FrameLaterality', check_not_to_be_perframe=False, check_not_to_be_empty=True) if 'FrameLaterality' not in item: self._copy_attrib_if_present( - src_fg, item, 'Laterality', + source, item, 'Laterality', 'FrameLaterality', check_not_to_be_perframe=False, check_not_to_be_empty=True) if 'FrameLaterality' not in item: FrameLaterality_a = self._get_or_create_attribute( - src_fg, 'FrameLaterality', "U") + source, 'FrameLaterality', "U") item['FrameLaterality'] = FrameLaterality_a FrameAnatomy_a = DataElement( fa_seq_tg, dictionary_VR(fa_seq_tg), DataElementSequence([item])) - dest_fg['FrameAnatomySequence'] = FrameAnatomy_a + destination['FrameAnatomySequence'] = FrameAnatomy_a - def _contains_right_attributes_frame_anatomy(self, tags: dict) -> bool: + def _has_frame_anatomy(self, tags: dict) -> bool: laterality_tg = tag_for_keyword('Laterality') im_laterality_tg = tag_for_keyword('ImageLaterality') bodypart_tg = tag_for_keyword('BodyPartExamined') @@ -1350,20 +1343,21 @@ def _contains_right_attributes_frame_anatomy(self, tags: dict) -> bool: bodypart_tg in tags or anatomical_reg_tg) - def add_module_frame_anatomy(self) -> None: - if (not self._contains_right_attributes_frame_anatomy(self._PerFrameTags) and - (self._contains_right_attributes_frame_anatomy(self._SharedTags) or - self._contains_right_attributes_frame_anatomy(self.ExcludedFromPerFrameTags)) + def _add_module_to_mf_frame_anatomy(self) -> None: + if (not self._has_frame_anatomy(self._PerFrameTags) and + (self._has_frame_anatomy(self._SharedTags) or + self._has_frame_anatomy(self.ExcludedFromPerFrameTags)) ): item = self._shared_functional_group[0] - self._add_module_to_functional_group_frame_anatomy(self._legacy_datasets[0], item) - elif self._contains_right_attributes_frame_anatomy(self._PerFrameTags): + self._add_module_to_dataset_frame_anatomy( + self._legacy_datasets[0], item) + elif self._has_frame_anatomy(self._PerFrameTags): for i in range(0, len(self._legacy_datasets)): item = self._perframe_functional_group[i] - self._add_module_to_functional_group_frame_anatomy( + self._add_module_to_dataset_frame_anatomy( self._legacy_datasets[i], item) - def _contains_right_attributes_pixel_measures(self, tags: dict) -> bool: + def _has_pixel_measures(self, tags: dict) -> bool: PixelSpacing_tg = tag_for_keyword('PixelSpacing') SliceThickness_tg = tag_for_keyword('SliceThickness') ImagerPixelSpacing_tg = tag_for_keyword('ImagerPixelSpacing') @@ -1371,19 +1365,19 @@ def _contains_right_attributes_pixel_measures(self, tags: dict) -> bool: SliceThickness_tg in tags or ImagerPixelSpacing_tg in tags) - def _add_module_to_functional_group_pixel_measures( - self, src_fg: Dataset, dest_fg: Dataset) -> None: + def _add_module_to_dataset_pixel_measures( + self, source: Dataset, destination: Dataset) -> None: item = Dataset() - self._copy_attrib_if_present(src_fg, + self._copy_attrib_if_present(source, item, 'PixelSpacing', check_not_to_be_perframe=False) - self._copy_attrib_if_present(src_fg, + self._copy_attrib_if_present(source, item, 'SliceThickness', check_not_to_be_perframe=False) if 'PixelSpacing' not in item: - self._copy_attrib_if_present(src_fg, + self._copy_attrib_if_present(source, item, 'ImagerPixelSpacing', 'PixelSpacing', @@ -1394,29 +1388,30 @@ def _add_module_to_functional_group_pixel_measures( seq = DataElement(pixel_measures_tg, dictionary_VR(pixel_measures_tg), DataElementSequence([item])) - dest_fg[pixel_measures_tg] = seq + destination[pixel_measures_tg] = seq - def add_module_pixel_measures(self) -> None: - if (not self._contains_right_attributes_pixel_measures(self._PerFrameTags) and - (self._contains_right_attributes_pixel_measures(self._SharedTags) or - self._contains_right_attributes_pixel_measures(self.ExcludedFromPerFrameTags)) + def _add_module_to_mf_pixel_measures(self) -> None: + if (not self._has_pixel_measures(self._PerFrameTags) and + (self._has_pixel_measures(self._SharedTags) or + self._has_pixel_measures(self.ExcludedFromPerFrameTags)) ): item = self._shared_functional_group[0] - self._add_module_to_functional_group_pixel_measures(self._legacy_datasets[0], item) - elif self._contains_right_attributes_pixel_measures(self._PerFrameTags): + self._add_module_to_dataset_pixel_measures( + self._legacy_datasets[0], item) + elif self._has_pixel_measures(self._PerFrameTags): for i in range(0, len(self._legacy_datasets)): item = self._perframe_functional_group[i] - self._add_module_to_functional_group_pixel_measures( + self._add_module_to_dataset_pixel_measures( self._legacy_datasets[i], item) - def _contains_right_attributes_plane_position(self, tags: dict) -> bool: + def _has_plane_position(self, tags: dict) -> bool: ImagePositionPatient_tg = tag_for_keyword('ImagePositionPatient') return ImagePositionPatient_tg in tags - def _add_module_to_functional_group_plane_position( - self, src_fg: Dataset, dest_fg: Dataset) -> None: + def _add_module_to_dataset_plane_position( + self, source: Dataset, destination: Dataset) -> None: item = Dataset() - self._copy_attrib_if_present(src_fg, + self._copy_attrib_if_present(source, item, 'ImagePositionPatient', check_not_to_be_perframe=False, @@ -1426,29 +1421,30 @@ def _add_module_to_functional_group_plane_position( seq = DataElement(PlanePositionSequence_tg, dictionary_VR(PlanePositionSequence_tg), DataElementSequence([item])) - dest_fg[PlanePositionSequence_tg] = seq + destination[PlanePositionSequence_tg] = seq - def add_module_plane_position(self) -> None: - if (not self._contains_right_attributes_plane_position(self._PerFrameTags) and - (self._contains_right_attributes_plane_position(self._SharedTags) or - self._contains_right_attributes_plane_position(self.ExcludedFromPerFrameTags)) + def _add_module_to_mf_plane_position(self) -> None: + if (not self._has_plane_position(self._PerFrameTags) and + (self._has_plane_position(self._SharedTags) or + self._has_plane_position(self.ExcludedFromPerFrameTags)) ): item = self._shared_functional_group[0] - self._add_module_to_functional_group_plane_position(self._legacy_datasets[0], item) - elif self._contains_right_attributes_plane_position(self._PerFrameTags): + self._add_module_to_dataset_plane_position( + self._legacy_datasets[0], item) + elif self._has_plane_position(self._PerFrameTags): for i in range(0, len(self._legacy_datasets)): item = self._perframe_functional_group[i] - self._add_module_to_functional_group_plane_position( - self._legacy_datasets[i], item) - - def _contains_right_attributes_plane_orientation(self, tags: dict) -> bool: + self._add_module_to_dataset_plane_position( + self._legacy_datasets[i], item) + + def _has_plane_orientation(self, tags: dict) -> bool: ImageOrientationPatient_tg = tag_for_keyword('ImageOrientationPatient') return ImageOrientationPatient_tg in tags - def _add_module_to_functional_group_plane_orientation( - self, src_fg: Dataset, dest_fg: Dataset) -> None: + def _add_module_to_dataset_plane_orientation( + self, source: Dataset, destination: Dataset) -> None: item = Dataset() - self._copy_attrib_if_present(src_fg, + self._copy_attrib_if_present(source, item, 'ImageOrientationPatient', check_not_to_be_perframe=False, @@ -1456,24 +1452,23 @@ def _add_module_to_functional_group_plane_orientation( kw = 'PlaneOrientationSequence' tg = tag_for_keyword(kw) seq = DataElement(tg, dictionary_VR(tg), DataElementSequence([item])) - dest_fg[tg] = seq + destination[tg] = seq - def add_module_plane_orientation(self) -> None: - if (not self._contains_right_attributes_plane_orientation(self._PerFrameTags) and - (self._contains_right_attributes_plane_orientation(self._SharedTags) or - self._contains_right_attributes_plane_orientation(self.ExcludedFromPerFrameTags)) + def _add_module_to_mf_plane_orientation(self) -> None: + if (not self._has_plane_orientation(self._PerFrameTags) and + (self._has_plane_orientation(self._SharedTags) or + self._has_plane_orientation(self.ExcludedFromPerFrameTags)) ): item = self._shared_functional_group[0] - self._add_module_to_functional_group_plane_orientation(self._legacy_datasets[0], item) - elif self._contains_right_attributes_plane_orientation(self._PerFrameTags): + self._add_module_to_dataset_plane_orientation( + self._legacy_datasets[0], item) + elif self._has_plane_orientation(self._PerFrameTags): for i in range(0, len(self._legacy_datasets)): item = self._perframe_functional_group[i] - self._add_module_to_functional_group_plane_orientation( + self._add_module_to_dataset_plane_orientation( self._legacy_datasets[i], item) - - - def _contains_right_attributes_frame_voi_lut(self, tags: dict) -> bool: + def _has_frame_voi_lut(self, tags: dict) -> bool: WindowWidth_tg = tag_for_keyword('WindowWidth') WindowCenter_tg = tag_for_keyword('WindowCenter') WindowCenterWidthExplanation_tg = tag_for_keyword( @@ -1482,20 +1477,20 @@ def _contains_right_attributes_frame_voi_lut(self, tags: dict) -> bool: WindowCenter_tg in tags or WindowCenterWidthExplanation_tg in tags) - def _add_module_to_functional_group_frame_voi_lut( - self, src_fg: Dataset, dest_fg: Dataset) -> None: + def _add_module_to_dataset_frame_voi_lut( + self, source: Dataset, destination: Dataset) -> None: item = Dataset() - self._copy_attrib_if_present(src_fg, + self._copy_attrib_if_present(source, item, 'WindowWidth', check_not_to_be_perframe=False, check_not_to_be_empty=False) - self._copy_attrib_if_present(src_fg, + self._copy_attrib_if_present(source, item, 'WindowCenter', check_not_to_be_perframe=False, check_not_to_be_empty=False) - self._copy_attrib_if_present(src_fg, + self._copy_attrib_if_present(source, item, 'WindowCenterWidthExplanation', check_not_to_be_perframe=False, @@ -1503,22 +1498,23 @@ def _add_module_to_functional_group_frame_voi_lut( kw = 'FrameVOILUTSequence' tg = tag_for_keyword(kw) seq = DataElement(tg, dictionary_VR(tg), DataElementSequence([item])) - dest_fg[tg] = seq + destination[tg] = seq - def add_module_frame_voi_lut(self) -> None: - if (not self._contains_right_attributes_frame_voi_lut(self._PerFrameTags) and - (self._contains_right_attributes_frame_voi_lut(self._SharedTags) or - self._contains_right_attributes_frame_voi_lut(self.ExcludedFromPerFrameTags)) + def _add_module_to_mf_frame_voi_lut(self) -> None: + if (not self._has_frame_voi_lut(self._PerFrameTags) and + (self._has_frame_voi_lut(self._SharedTags) or + self._has_frame_voi_lut(self.ExcludedFromPerFrameTags)) ): item = self._shared_functional_group[0] - self._add_module_to_functional_group_frame_voi_lut(self._legacy_datasets[0], item) - elif self._contains_right_attributes_frame_voi_lut(self._PerFrameTags): + self._add_module_to_dataset_frame_voi_lut( + self._legacy_datasets[0], item) + elif self._has_frame_voi_lut(self._PerFrameTags): for i in range(0, len(self._legacy_datasets)): item = self._perframe_functional_group[i] - self._add_module_to_functional_group_frame_voi_lut( + self._add_module_to_dataset_frame_voi_lut( self._legacy_datasets[i], item) - - def _contains_right_attributes_pixel_value_transformation(self, tags: dict) -> bool: + + def _has_pixel_value_transformation(self, tags: dict) -> bool: RescaleIntercept_tg = tag_for_keyword('RescaleIntercept') RescaleSlope_tg = tag_for_keyword('RescaleSlope') RescaleType_tg = tag_for_keyword('RescaleType') @@ -1526,35 +1522,35 @@ def _contains_right_attributes_pixel_value_transformation(self, tags: dict) -> b RescaleSlope_tg in tags or RescaleType_tg in tags) - def _add_module_to_functional_group_pixel_value_transformation( - self, src_fg: Dataset, dest_fg: Dataset) -> None: + def _add_module_to_dataset_pixel_value_transformation( + self, source: Dataset, destination: Dataset) -> None: item = Dataset() - self._copy_attrib_if_present(src_fg, + self._copy_attrib_if_present(source, item, 'RescaleSlope', check_not_to_be_perframe=False, check_not_to_be_empty=False) - self._copy_attrib_if_present(src_fg, + self._copy_attrib_if_present(source, item, 'RescaleIntercept', check_not_to_be_perframe=False, check_not_to_be_empty=False) haveValuesSoAddType = ('RescaleSlope' in item or 'RescaleIntercept' in item) - self._copy_attrib_if_present(src_fg, + self._copy_attrib_if_present(source, item, 'RescaleType', check_not_to_be_perframe=False, check_not_to_be_empty=True) value = '' - modality = '' if 'Modality' not in src_fg\ - else src_fg["Modality"].value + modality = '' if 'Modality' not in source\ + else source["Modality"].value if haveValuesSoAddType: value = 'US' if modality == 'CT': containes_localizer = False - ImageType_v = [] if 'ImageType' not in src_fg\ - else src_fg['ImageType'].value + ImageType_v = [] if 'ImageType' not in source\ + else source['ImageType'].value for i in ImageType_v: if i == 'LOCALIZER': containes_localizer = True @@ -1562,8 +1558,8 @@ def _add_module_to_functional_group_pixel_value_transformation( if not containes_localizer: value = "HU" # elif modality == 'PT': - # value = 'US' if 'Units' not in src_fg\ - # else src_fg['Units'].value + # value = 'US' if 'Units' not in source\ + # else source['Units'].value else: value = 'US' tg = tag_for_keyword('RescaleType') @@ -1578,62 +1574,64 @@ def _add_module_to_functional_group_pixel_value_transformation( kw = 'PixelValueTransformationSequence' tg = tag_for_keyword(kw) seq = DataElement(tg, dictionary_VR(tg), DataElementSequence([item])) - dest_fg[tg] = seq + destination[tg] = seq - def add_module_pixel_value_transformation(self) -> None: - if (not self._contains_right_attributes_pixel_value_transformation(self._PerFrameTags) and - (self._contains_right_attributes_pixel_value_transformation(self._SharedTags) or - self._contains_right_attributes_pixel_value_transformation(self.ExcludedFromPerFrameTags)) + def _add_module_to_mf_pixel_value_transformation(self) -> None: + if (not self._has_pixel_value_transformation(self._PerFrameTags) and + (self._has_pixel_value_transformation(self._SharedTags) or + self._has_pixel_value_transformation(self.ExcludedFromPerFrameTags)) ): item = self._shared_functional_group[0] - self._add_module_to_functional_group_pixel_value_transformation(self._legacy_datasets[0], item) - elif self._contains_right_attributes_pixel_value_transformation(self._PerFrameTags): + self._add_module_to_dataset_pixel_value_transformation( + self._legacy_datasets[0], item) + elif self._has_pixel_value_transformation(self._PerFrameTags): for i in range(0, len(self._legacy_datasets)): item = self._perframe_functional_group[i] - self._add_module_to_functional_group_pixel_value_transformation( + self._add_module_to_dataset_pixel_value_transformation( self._legacy_datasets[i], item) - - def _contains_right_attributes_referenced_image(self, tags: dict) -> bool: + + def _has_referenced_image(self, tags: dict) -> bool: return tag_for_keyword('ReferencedImageSequence') in tags - def _add_module_to_functional_group_referenced_image( - self, src_fg: Dataset, dest_fg: Dataset) -> None: - self._copy_attrib_if_present(src_fg, - dest_fg, + def _add_module_to_dataset_referenced_image( + self, source: Dataset, destination: Dataset) -> None: + self._copy_attrib_if_present(source, + destination, 'ReferencedImageSequence', check_not_to_be_perframe=False, check_not_to_be_empty=False) - def add_module_referenced_image(self) -> None: - if (not self._contains_right_attributes_referenced_image(self._PerFrameTags) and - (self._contains_right_attributes_referenced_image(self._SharedTags) or - self._contains_right_attributes_referenced_image(self.ExcludedFromPerFrameTags)) + def _add_module_to_mf_referenced_image(self) -> None: + if (not self._has_referenced_image(self._PerFrameTags) and + (self._has_referenced_image(self._SharedTags) or + self._has_referenced_image(self.ExcludedFromPerFrameTags)) ): item = self._shared_functional_group[0] - self._add_module_to_functional_group_referenced_image(self._legacy_datasets[0], item) - elif self._contains_right_attributes_referenced_image(self._PerFrameTags): + self._add_module_to_dataset_referenced_image( + self._legacy_datasets[0], item) + elif self._has_referenced_image(self._PerFrameTags): for i in range(0, len(self._legacy_datasets)): item = self._perframe_functional_group[i] - self._add_module_to_functional_group_referenced_image( + self._add_module_to_dataset_referenced_image( self._legacy_datasets[i], item) - def _contains_right_attributes_derivation_image(self, tags: dict) -> bool: + def _has_derivation_image(self, tags: dict) -> bool: return tag_for_keyword('SourceImageSequence') in tags - def _add_module_to_functional_group_derivation_image( - self, src_fg: Dataset, dest_fg: Dataset) -> None: + def _add_module_to_dataset_derivation_image( + self, source: Dataset, destination: Dataset) -> None: item = Dataset() - self._copy_attrib_if_present(src_fg, + self._copy_attrib_if_present(source, item, 'DerivationDescription', check_not_to_be_perframe=False, check_not_to_be_empty=True) - self._copy_attrib_if_present(src_fg, + self._copy_attrib_if_present(source, item, 'DerivationCodeSequence', check_not_to_be_perframe=False, check_not_to_be_empty=False) - self._copy_attrib_if_present(src_fg, + self._copy_attrib_if_present(source, item, 'SourceImageSequence', check_not_to_be_perframe=False, @@ -1641,19 +1639,20 @@ def _add_module_to_functional_group_derivation_image( kw = 'DerivationImageSequence' tg = tag_for_keyword(kw) seq = DataElement(tg, dictionary_VR(tg), DataElementSequence([item])) - dest_fg[tg] = seq + destination[tg] = seq - def add_module_derivation_image(self) -> None: - if (not self._contains_right_attributes_derivation_image(self._PerFrameTags) and - (self._contains_right_attributes_derivation_image(self._SharedTags) or - self._contains_right_attributes_derivation_image(self.ExcludedFromPerFrameTags)) + def _add_module_to_mf_derivation_image(self) -> None: + if (not self._has_derivation_image(self._PerFrameTags) and + (self._has_derivation_image(self._SharedTags) or + self._has_derivation_image(self.ExcludedFromPerFrameTags)) ): item = self._shared_functional_group[0] - self._add_module_to_functional_group_derivation_image(self._legacy_datasets[0], item) - elif self._contains_right_attributes_derivation_image(self._PerFrameTags): + self._add_module_to_dataset_derivation_image( + self._legacy_datasets[0], item) + elif self._has_derivation_image(self._PerFrameTags): for i in range(0, len(self._legacy_datasets)): item = self._perframe_functional_group[i] - self._add_module_to_functional_group_derivation_image( + self._add_module_to_dataset_derivation_image( self._legacy_datasets[i], item) def _get_tag_used_dictionary(self, input: list) -> dict: @@ -1662,11 +1661,11 @@ def _get_tag_used_dictionary(self, input: list) -> dict: out[item] = False return out - def _add_module_to_functional_group_unassigned_perframe( - self, src_fg: Dataset, dest_fg: Dataset) -> None: + def _add_module_to_dataset_unassigned_perframe( + self, source: Dataset, destination: Dataset) -> None: item = Dataset() for tg in self._eligeible_tags: - self._copy_attrib_if_present(src_fg, + self._copy_attrib_if_present(source, item, tg, check_not_to_be_perframe=False, @@ -1674,7 +1673,7 @@ def _add_module_to_functional_group_unassigned_perframe( kw = 'UnassignedPerFrameConvertedAttributesSequence' tg = tag_for_keyword(kw) seq = DataElement(tg, dictionary_VR(tg), DataElementSequence([item])) - dest_fg[tg] = seq + destination[tg] = seq def _add_largest_smallest_pixle_value(self) -> None: ltg = tag_for_keyword("LargestImagePixelValue") @@ -1704,7 +1703,7 @@ def _add_largest_smallest_pixle_value(self) -> None: stg = "SmallestImagePixelValue" - def add_module_unassigned_perframe(self) -> None: + def _add_module_to_mf_unassigned_perframe(self) -> None: # first collect all not used tags # note that this is module is order dependent self._add_largest_smallest_pixle_value() @@ -1714,17 +1713,17 @@ def add_module_unassigned_perframe(self) -> None: self._eligeible_tags.append(tg) for i in range(0, len(self._legacy_datasets)): item = self._perframe_functional_group[i] - self._add_module_to_functional_group_unassigned_perframe( + self._add_module_to_dataset_unassigned_perframe( self._legacy_datasets[i], item) - def _add_module_to_functional_group_unassigned_shared( - self, src_fg: Dataset, dest_fg: Dataset) -> None: + def _add_module_to_dataset_unassigned_shared( + self, source: Dataset, destination: Dataset) -> None: item = Dataset() for tg, used in self._SharedTags.items(): if (not used and tg not in self and tg not in self.ExcludedFromFunctionalGroupsTags): - self._copy_attrib_if_present(src_fg, + self._copy_attrib_if_present(source, item, tg, check_not_to_be_perframe=False, @@ -1732,16 +1731,17 @@ def _add_module_to_functional_group_unassigned_shared( kw = 'UnassignedSharedConvertedAttributesSequence' tg = tag_for_keyword(kw) seq = DataElement(tg, dictionary_VR(tg), DataElementSequence([item])) - dest_fg[tg] = seq + destination[tg] = seq - def add_module_unassigned_shared(self) -> None: + def _add_module_to_mf_unassigned_shared(self) -> None: item = self._shared_functional_group[0] - self._add_module_to_functional_group_unassigned_shared(self._legacy_datasets[0], item) - + self._add_module_to_dataset_unassigned_shared( + self._legacy_datasets[0], item) + def _create_empty_element(self, tg: BaseTag) -> DataElement: return DataElement(tg, dictionary_VR(tg), None) - def add_module_empty_type2_attributes(self) -> None: + def _add_module_to_mf_empty_type2_attributes(self) -> None: iod_name = _SOP_CLASS_UID_IOD_KEY_MAP[ self['SOPClassUID'].value] modules = IOD_MODULE_MAP[iod_name] @@ -1758,17 +1758,17 @@ def add_module_empty_type2_attributes(self) -> None: tg not in self._SharedTags): self[tg] =\ self._create_empty_element(tg) - - def _add_module_to_functional_group_conversion_source( - self, src_fg: Dataset, dest_fg: Dataset) -> None: + + def _add_module_to_dataset_conversion_source( + self, source: Dataset, destination: Dataset) -> None: item = Dataset() - self._copy_attrib_if_present(src_fg, + self._copy_attrib_if_present(source, item, 'SOPClassUID', 'ReferencedSOPClassUID', check_not_to_be_perframe=False, check_not_to_be_empty=True) - self._copy_attrib_if_present(src_fg, + self._copy_attrib_if_present(source, item, 'SOPInstanceUID', 'ReferencedSOPInstanceUID', @@ -1777,14 +1777,14 @@ def _add_module_to_functional_group_conversion_source( kw = 'ConversionSourceAttributesSequence' tg = tag_for_keyword(kw) seq = DataElement(tg, dictionary_VR(tg), DataElementSequence([item])) - dest_fg[tg] = seq + destination[tg] = seq - def add_module_conversion_source(self) -> None: + def _add_module_to_mf_conversion_source(self) -> None: for i in range(0, len(self._legacy_datasets)): item = self._perframe_functional_group[i] - self._add_module_to_functional_group_conversion_source( + self._add_module_to_dataset_conversion_source( self._legacy_datasets[i], item) - + self.EarliestFrameAcquisitionDateTime = self.FarthestFutureDateTime def _build_slices_geometry_frame_content(self) -> None: @@ -1843,7 +1843,7 @@ def _are_all_slices_parallel_frame_content(self) -> bool: last_slice = self._slices[0] for i in range(1, slice_count): curr_slice = self._slices[i] - if not GeometryOfSlice.AreParallel( + if not GeometryOfSlice.are_parallel( curr_slice, last_slice, self._tolerance): return False last_slice = curr_slice @@ -1861,7 +1861,7 @@ def _add_stack_info_frame_content(self) -> None: if self._are_all_slices_parallel_frame_content(): self._slice_location_map = {} for idx, s in enumerate(self._slices): - not_round_dist = s.GetDistanceAlongOrigin() + not_round_dist = s.get_distance_along_origin() dist = round(not_round_dist, round_digits) logger.debug( 'Slice locaation {} rounded by {} digits to {}'.format( @@ -1894,7 +1894,7 @@ def _add_stack_info_frame_content(self) -> None: "InStackPositionNumber", distance_index) distance_index += 1 - def _contains_right_attributes_frame_content(self, tags: dict) -> bool: + def _has_frame_content(self, tags: dict) -> bool: AcquisitionDateTime_tg = tag_for_keyword('AcquisitionDateTime') AcquisitionDate_tg = tag_for_keyword('AcquisitionDate') AcquisitionTime_tg = tag_for_keyword('AcquisitionTime') @@ -1902,31 +1902,31 @@ def _contains_right_attributes_frame_content(self, tags: dict) -> bool: AcquisitionTime_tg in tags or AcquisitionDate_tg in tags) - def _add_module_to_functional_group_frame_content( - self, src_fg: Dataset, dest_fg: Dataset) -> None: + def _add_module_to_dataset_frame_content( + self, source: Dataset, destination: Dataset) -> None: item = Dataset() fan_tg = tag_for_keyword('FrameAcquisitionNumber') an_tg = tag_for_keyword('AcquisitionNumber') - if an_tg in src_fg: - fan_val = src_fg[an_tg].value + if an_tg in source: + fan_val = source[an_tg].value else: fan_val = 0 item[fan_tg] = DataElement(fan_tg, dictionary_VR(fan_tg), fan_val) self._mark_tag_as_used(an_tg) # ---------------------------------------------------------------- AcquisitionDateTime_a = self._get_or_create_attribute( - src_fg, 'AcquisitionDateTime', self.EarliestDateTime) + source, 'AcquisitionDateTime', self.EarliestDateTime) # chnage the keyword to FrameAcquisitionDateTime: FrameAcquisitionDateTime_a = DataElement( tag_for_keyword('FrameAcquisitionDateTime'), 'DT', AcquisitionDateTime_a.value) - AcquisitionDateTime_is_perframe = self._contains_right_attributes_frame_content( + AcquisitionDateTime_is_perframe = self._has_frame_content( self._PerFrameTags) if FrameAcquisitionDateTime_a.value == self.EarliestDateTime: AcquisitionDate_a = self._get_or_create_attribute( - src_fg, 'AcquisitionDate', self.EarliestDate) + source, 'AcquisitionDate', self.EarliestDate) AcquisitionTime_a = self._get_or_create_attribute( - src_fg, 'AcquisitionTime', self.EarliestTime) + source, 'AcquisitionTime', self.EarliestTime) d = AcquisitionDate_a.value t = AcquisitionTime_a.value # FrameAcquisitionDateTime_a.value = (DT(d.strftime('%Y%m%d') + @@ -1938,10 +1938,10 @@ def _add_module_to_functional_group_frame_content( self.EarliestFrameAcquisitionDateTime =\ FrameAcquisitionDateTime_a.value if not AcquisitionDateTime_is_perframe: - if ('TriggerTime' in src_fg and - 'FrameReferenceDateTime' not in src_fg): + if ('TriggerTime' in source and + 'FrameReferenceDateTime' not in source): TriggerTime_a = self._get_or_create_attribute( - src_fg, 'TriggerTime', self.EarliestTime) + source, 'TriggerTime', self.EarliestTime) trigger_time_in_millisecond = int(TriggerTime_a.value) if trigger_time_in_millisecond > 0: t_delta = timedelta(trigger_time_in_millisecond) @@ -1956,30 +1956,30 @@ def _add_module_to_functional_group_frame_content( item['FrameAcquisitionDateTime'] = FrameAcquisitionDateTime_a # --------------------------------- self._copy_attrib_if_present( - src_fg, item, "AcquisitionDuration", + source, item, "AcquisitionDuration", "FrameAcquisitionDuration", check_not_to_be_perframe=False, check_not_to_be_empty=True) self._copy_attrib_if_present( - src_fg, item, + source, item, 'TemporalPositionIndex', check_not_to_be_perframe=False, check_not_to_be_empty=True) self._copy_attrib_if_present( - src_fg, item, "ImageComments", + source, item, "ImageComments", "FrameComments", check_not_to_be_perframe=False, check_not_to_be_empty=True) # ----------------------------------- seq_tg = tag_for_keyword('FrameContentSequence') - dest_fg[seq_tg] = DataElement( + destination[seq_tg] = DataElement( seq_tg, dictionary_VR(seq_tg), DataElementSequence([item])) # Also we want to add the earliest frame acq date time to the multiframe: def _add_acquisition_info_frame_content(self) -> None: for i in range(0, len(self._legacy_datasets)): item = self._perframe_functional_group[i] - self._add_module_to_functional_group_frame_content( + self._add_module_to_dataset_frame_content( self._legacy_datasets[i], item) if self.EarliestFrameAcquisitionDateTime < self.FarthestFutureDateTime: kw = 'AcquisitionDateTime' @@ -1987,7 +1987,7 @@ def _add_acquisition_info_frame_content(self) -> None: tag_for_keyword(kw), 'DT', self.EarliestFrameAcquisitionDateTime) - def add_module_frame_content(self) -> None: + def _add_module_to_mf_frame_content(self) -> None: self._add_acquisition_info_frame_content() self._add_stack_info_frame_content() @@ -1996,11 +1996,12 @@ def _is_other_byte_vr_pixel_data(self, vr: str) -> bool: def _is_other_word_vr_pixel_data(self, vr: str) -> bool: return vr[0] == 'O' and vr[1] == 'W' - # def _contains_right_attributes(self, tags: dict) -> bool: + # def _has(self, tags: dict) -> bool: # ImagePositionPatient_tg = tag_for_keyword('ImagePositionPatient') # return ImagePositionPatient_tg in tags - def _copy_data_pixel_data(self, src: bytearray, word_data: bool = False) -> None: + def _copy_data_pixel_data( + self, src: bytearray, word_data: bool = False) -> None: # Make sure that the length complies by row and col if word_data: des = self._word_data @@ -2014,7 +2015,7 @@ def _copy_data_pixel_data(self, src: bytearray, word_data: bool = False) -> None src = tmp des.extend(src) - def add_module_pixel_data(self) -> None: + def _add_module_to_mf_pixel_data(self) -> None: kw = 'NumberOfFrames' tg = tag_for_keyword(kw) self._frame_count = len(self._legacy_datasets) @@ -2052,7 +2053,7 @@ def add_module_pixel_data(self) -> None: 'OW', bytes(self._word_data)) self[kw] = MF_PixelData - def add_module_content_date_time(self) -> None: + def _add_module_to_mf_content_date_time(self) -> None: default_atrs = ["Acquisition", "Series", "Study"] for i in range(0, len(self._legacy_datasets)): src = self._legacy_datasets[i] @@ -2093,12 +2094,12 @@ def add_module_content_date_time(self) -> None: self[kw] = DataElement( tag_for_keyword(kw), 'TM', n_t) - def _add_data_element_to_target_contributing_equipment(self, target: Dataset, - kw: str, value: Any) -> None: + def _add_data_element_to_target_contributing_equipment( + self, target: Dataset, kw: str, value: Any) -> None: tg = tag_for_keyword(kw) target[kw] = DataElement(tg, dictionary_VR(tg), value) - def add_module_contributing_equipment(self) -> None: + def _add_module_to_mf_contributing_equipment(self) -> None: CodeValue_tg = tag_for_keyword('CodeValue') CodeMeaning_tg = tag_for_keyword('CodeMeaning') CodingSchemeDesignator_tg = tag_for_keyword('CodingSchemeDesignator') @@ -2121,8 +2122,10 @@ def add_module_contributing_equipment(self) -> None: item: Dataset = Dataset() item[ 'PurposeOfReferenceCodeSequence'] = PurposeOfReferenceCode_seq - self._add_data_element_to_target_contributing_equipment(item, "Manufacturer", 'HighDicom') - self._add_data_element_to_target_contributing_equipment(item, "InstitutionName", 'HighDicom') + self._add_data_element_to_target_contributing_equipment( + item, "Manufacturer", 'HighDicom') + self._add_data_element_to_target_contributing_equipment( + item, "InstitutionName", 'HighDicom') self._add_data_element_to_target_contributing_equipment( item, "InstitutionalDepartmentName", @@ -2142,7 +2145,7 @@ def add_module_contributing_equipment(self) -> None: tg = tag_for_keyword('ContributingEquipmentSequence') self[tg] = DataElement(tg, 'SQ', DataElementSequence([item])) - def add_module_instance_creation_date_time(self) -> None: + def _add_module_to_mf_instance_creation_date_time(self) -> None: nnooww = datetime.now() n_d = DA(nnooww.date().strftime('%Y%m%d')) n_t = TM(nnooww.time().strftime('%H%M%S')) @@ -2152,7 +2155,7 @@ def add_module_instance_creation_date_time(self) -> None: kw = 'InstanceCreationTime' self[kw] = DataElement( tag_for_keyword(kw), 'TM', n_t) - + def default_sort_key(x: Dataset) -> tuple: out: tuple = tuple() if 'SeriesNumber' in x: @@ -2163,83 +2166,85 @@ def default_sort_key(x: Dataset) -> tuple: out += (x['SOPInstanceUID'].value, ) return out - def add_new_build_block( - self, element) -> None: - # if not isinstance(element, Abstract_MultiframeModuleAdder): - # raise ValueError('Build block must be an instance ' - # 'of Abstract_MultiframeModuleAdder') - self.__build_blocks.append(element) - - def ClearBuildBlocks(self) -> None: + def _clear_build_blocks(self) -> None: self.__build_blocks = [] - def add_commonct_pet_mr_build_blocks(self) -> none: + def _add_common_ct_pet_mr_build_blocks(self) -> None: blocks = [ - [self.add_module_image_pixel, None], - [self.add_module_composite_instance_contex, None], - [self.add_module_enhanced_common_image, None], - [self.add_module_acquisition_context, None], - [self.add_module_frame_anatomy, None], - [self.add_module_pixel_measures, None], - [self.add_module_plane_orientation, None], - [self.add_module_plane_position, None], - [self.add_module_frame_voi_lut, None], - [self.add_module_pixel_value_transformation, None], - [self.add_module_referenced_image, None], - [self.add_module_conversion_source, None], - [self.add_module_frame_content, None], - [self.add_module_pixel_data, None], - [self.add_module_content_date_time, None], - [self.add_module_instance_creation_date_time, None], - [self.add_module_contributing_equipment, None], - [self.add_module_unassigned_perframe, None], - [self.add_module_unassigned_shared, None], + [self._add_module_to_mf_image_pixel, None], + [self._add_module_to_mf_composite_instance_contex, None], + [self._add_module_to_mf_enhanced_common_image, None], + [self._add_module_to_mf_acquisition_context, None], + [self._add_module_to_mf_frame_anatomy, None], + [self._add_module_to_mf_pixel_measures, None], + [self._add_module_to_mf_plane_orientation, None], + [self._add_module_to_mf_plane_position, None], + [self._add_module_to_mf_frame_voi_lut, None], + [self._add_module_to_mf_pixel_value_transformation, None], + [self._add_module_to_mf_referenced_image, None], + [self._add_module_to_mf_conversion_source, None], + [self._add_module_to_mf_frame_content, None], + [self._add_module_to_mf_pixel_data, None], + [self._add_module_to_mf_content_date_time, None], + [self._add_module_to_mf_instance_creation_date_time, None], + [self._add_module_to_mf_contributing_equipment, None], + [self._add_module_to_mf_unassigned_perframe, None], + [self._add_module_to_mf_unassigned_shared, None], ] for b in blocks: - self.add_new_build_block(b) + self.__build_blocks.append(b) - def add_ct_specific_build_blocks(self) -> none: + def _add_ct_specific_build_blocks(self) -> None: blocks = [ - [self.add_module_common_ct_mr_pet_image_description, ('CT',)], - [self.add_module_enhanced_ct_image, None], - [self.add_module_contrast_bolus, None], + [ + self._add_module_to_mf_common_ct_mr_pet_image_description, + ('CT',) + ], + [self._add_module_to_mf_enhanced_ct_image, None], + [self._add_module_to_mf_contrast_bolus, None], ] for b in blocks: - self.add_new_build_block(b) + self.__build_blocks.append(b) - def add_mr_specific_build_blocks(self) -> none: + def _add_mr_specific_build_blocks(self) -> None: blocks = [ - [self.add_module_common_ct_mr_pet_image_description, ('MR',)], - [self.add_module_enhanced_mr_image, None], - [self.add_module_contrast_bolus, None], + [ + self._add_module_to_mf_common_ct_mr_pet_image_description, + ('MR',) + ], + [self._add_module_to_mf_enhanced_mr_image, None], + [self._add_module_to_mf_contrast_bolus, None], ] for b in blocks: - self.add_new_build_block(b) + self.__build_blocks.append(b) - def add_pet_specific_build_blocks(self) -> none: + def _add_pet_specific_build_blocks(self) -> None: blocks = [ - [self.add_module_common_ct_mr_pet_image_description, ('PET',)], - [self.add_module_enhanced_pet_image, None], + [ + self._add_module_to_mf_common_ct_mr_pet_image_description, + ('PET',) + ], + [self._add_module_to_mf_enhanced_pet_image, None], ] for b in blocks: - self.add_new_build_block(b) + self.__build_blocks.append(b) - def AddBuildBlocksForCT(self) -> None: - self.ClearBuildBlocks() - self.add_commonct_pet_mr_build_blocks() - self.add_ct_specific_build_blocks() + def _add_build_blocks_for_ct(self) -> None: + self._clear_build_blocks() + self._add_common_ct_pet_mr_build_blocks() + self._add_ct_specific_build_blocks() - def AddBuildBlocksForMR(self) -> None: - self.ClearBuildBlocks() - self.add_commonct_pet_mr_build_blocks() - self.add_mr_specific_build_blocks() + def _add_build_blocks_for_mr(self) -> None: + self._clear_build_blocks() + self._add_common_ct_pet_mr_build_blocks() + self._add_mr_specific_build_blocks() - def AddBuildBlocksForPET(self) -> None: - self.ClearBuildBlocks() - self.add_commonct_pet_mr_build_blocks() - self.add_pet_specific_build_blocks() + def _add_build_blocks_for_pet(self) -> None: + self._clear_build_blocks() + self._add_common_ct_pet_mr_build_blocks() + self._add_pet_specific_build_blocks() - def BuildMultiFrame(self) -> None: + def convert2mf(self) -> None: logger = logging.getLogger(__name__) logger.debug('Strt singleframe to multiframe conversion') for fun, args in self.__build_blocks: @@ -2263,31 +2268,31 @@ def __init__(self, self.VoxelSpacing = voxel_spaceing self.Dim = dimensions - def GetNormalVector(self) -> ndarray: + def get_normal_vector(self) -> ndarray: n: ndarray = cross(self.RowVector, self.ColVector) n[2] = -n[2] return n - def GetDistanceAlongOrigin(self) -> float: - n = self.GetNormalVector() + def get_distance_along_origin(self) -> float: + n = self.get_normal_vector() return float( dot(self.TopLeftCornerPosition, n)) - def AreParallel(slice1: GeometryOfSlice, - slice2: GeometryOfSlice, - tolerance: float = 0.0001) -> bool: + def are_parallel( + slice1: GeometryOfSlice, + slice2: GeometryOfSlice, + tolerance: float = 0.0001) -> bool: logger = logging.getLogger(__name__) if (not isinstance(slice1, GeometryOfSlice) or not isinstance(slice2, GeometryOfSlice)): logger.warning( 'slice1 and slice2 are not of the same ' 'type: type(slice1) = {} and type(slice2) = {}'.format( - type(slice1), type(slice2) - )) + type(slice1), type(slice2))) return False else: - n1: ndarray = slice1.GetNormalVector() - n2: ndarray = slice2.GetNormalVector() + n1: ndarray = slice1.get_normal_vector() + n2: ndarray = slice2.get_normal_vector() for el1, el2 in zip(n1, n2): if abs(el1 - el2) > tolerance: return False From 3277b49672bf1a6a8bf07906355ce629649b5caa Mon Sep 17 00:00:00 2001 From: afshin Date: Fri, 2 Apr 2021 10:58:42 -0400 Subject: [PATCH 29/44] mend --- src/highdicom/legacy/sop.py | 701 +++++------------------------------- 1 file changed, 99 insertions(+), 602 deletions(-) diff --git a/src/highdicom/legacy/sop.py b/src/highdicom/legacy/sop.py index 0ecf505f..d948d560 100644 --- a/src/highdicom/legacy/sop.py +++ b/src/highdicom/legacy/sop.py @@ -1,5 +1,4 @@ """ Module for SOP Classes of Legacy Converted Enhanced Image IODs.""" -from __future__ import annotations import logging from collections import defaultdict from typing import Any, Dict, List, Optional, Sequence, Union, Callable @@ -33,506 +32,114 @@ } -def _convert_legacy_to_enhanced( - sf_datasets: Sequence[Dataset], - mf_dataset: Optional[Dataset] = None - ) -> Dataset: - """Converts one or more MR, CT or PET Image instances into one - Legacy Converted Enhanced MR/CT/PET Image instance by copying information - from `sf_datasets` into `mf_dataset`. - Parameters - ---------- - sf_datasets: Sequence[pydicom.dataset.Dataset] - DICOM data sets of single-frame legacy image instances - mf_dataset: pydicom.dataset.Dataset, optional - DICOM data set of multi-frame enhanced image instance - Returns - ------- - pydicom.dataset.Dataset - DICOM data set of enhanced multi-frame image instance - Note - ---- - Frames will be included into the Pixel Data element in the order in - which instances are provided via `sf_datasets`. - """ - logger = logging.getLogger(__name__) - try: - ref_ds = sf_datasets[0] - except IndexError: - raise ValueError('No data sets of single-frame legacy images provided.') - if mf_dataset is None: - mf_dataset = Dataset() - transfer_syntaxes = set() - series = set() - studies = set() - modalities = set() - for ds in sf_datasets: - transfer_syntaxes.add(ds.file_meta.TransferSyntaxUID) - series.add(ds.SeriesInstanceUID) - studies.add(ds.StudyInstanceUID) - modalities.add(ds.Modality) - if len(series) > 1: - raise ValueError( - 'All instances must belong to the same series.') - if len(studies) > 1: - raise ValueError( - 'All instances must belong to the same study.') - if len(modalities) > 1: - raise ValueError( - 'All instances must have the same modality.') - if len(transfer_syntaxes) > 1: - raise ValueError( - 'All instances must have the same transfer syntaxes.') - sop_class_uid = LEGACY_ENHANCED_SOP_CLASS_UID_MAP[ref_ds.SOPClassUID] - mf_dataset.NumberOfFrames = len(sf_datasets) - # We will ignore some attributes, because they will get assigned new - # values in the legacy converted enhanced image instance. - ignored_attributes = { - tag_for_keyword('NumberOfFrames'), - tag_for_keyword('InstanceNumber'), - tag_for_keyword('SOPClassUID'), - tag_for_keyword('SOPInstanceUID'), - tag_for_keyword('PixelData'), - tag_for_keyword('SeriesInstanceUID'), - } - mf_attributes = [] - iod_key = SOP_CLASS_UID_IOD_KEY_MAP[sop_class_uid] - for module_item in IOD_MODULE_MAP[iod_key]: - module_key = module_item['key'] - for attr_item in MODULE_ATTRIBUTE_MAP[module_key]: - # Only root-level attributes - if len(attr_item['path']) > 0: - continue - tag = tag_for_keyword(attr_item['keyword']) - if tag in ignored_attributes: - continue - mf_attributes.append(tag) - # Assign attributes that are not defined at the root level of the - # Lecacy Converted Enhanced MR/CT/PET Image IOD to the appropriate - # sequence attributes of the SharedFunctinoalGroupsSequence or - # PerFrameFunctionalGroupsSequence attributes. Collect all unassigned - # attributes (we will deal with them later on). - # IODs only cover the modules, but not functional group macros. - # Therefore, we need to handle those separately. - assigned_attributes = { - # shared - tag_for_keyword('ImageOrientationPatient'), - tag_for_keyword('PixelSpacing'), - tag_for_keyword('SliceThickness'), - tag_for_keyword('SpacingBetweenSlices'), - # per-frame - tag_for_keyword('ImageType'), - tag_for_keyword('AcquisitionDate'), - tag_for_keyword('AcquisitionTime'), - tag_for_keyword('InstanceNumber'), - tag_for_keyword('SOPClassUID'), - tag_for_keyword('SOPInstanceUID'), - tag_for_keyword('ImagePositionPatient'), - tag_for_keyword('WindowCenter'), - tag_for_keyword('WindowWidth'), - tag_for_keyword('ReferencedImageSequence'), - tag_for_keyword('SourceImageSequence'), - tag_for_keyword('BodyPartExamined'), - tag_for_keyword('IrradiationEventUID'), - tag_for_keyword('RescaleIntercept'), - tag_for_keyword('RescaleSlope'), - tag_for_keyword('RescaleType'), - } - if ref_ds.ImageType[0] == 'ORIGINAL': - mf_dataset.VolumeBasedCalculationTechnique = 'NONE' - else: - mf_dataset.VolumeBasedCalculationTechnique = 'MIXED' - pixel_representation = sf_datasets[0].PixelRepresentation - volumetric_properties = 'VOLUME' - unique_image_types = set() - unassigned_dataelements: Dict[str, List[Dataset]] = defaultdict(list) - # Per-Frame Functional Groups - perframe_items = [] - for i, ds in enumerate(sf_datasets): - perframe_item = Dataset() - # Frame Content (M) - frame_content_item = Dataset() - if 'AcquisitionDate' in ds and 'AcquisitionTime' in ds: - frame_content_item.FrameAcquisitionDateTime = '{}{}'.format( - ds.AcquisitionDate, - ds.AcquisitionTime) - frame_content_item.FrameAcquisitionNumber = ds.InstanceNumber - perframe_item.FrameContentSequence = [ - frame_content_item, - ] - # Plane Position (Patient) (M) - plane_position_item = Dataset() - plane_position_item.ImagePositionPatient = ds.ImagePositionPatient - perframe_item.PlanePositionSequence = [ - plane_position_item, - ] - frame_type = list(ds.ImageType) - if len(frame_type) < 4: - if frame_type[0] == 'ORIGINAL': - frame_type.append('NONE') - else: - logger.warn('unknown derived pixel contrast') - frame_type.append('OTHER') - unique_image_types.add(tuple(frame_type)) - frame_type_item = Dataset() - frame_type_item.FrameType = frame_type - frame_type_item.PixelRepresentation = pixel_representation - frame_type_item.VolumetricProperties = volumetric_properties - if frame_type[0] == 'ORIGINAL': - frame_type_item.FrameVolumeBasedCalculationTechnique = 'NONE' +class DicomHelper: + def __init__(self) -> None: + pass + + def istag_file_meta_information_group(t: BaseTag) -> bool: + return t.group == 0x0002 + + def istag_repeating_group(t: BaseTag) -> bool: + g = t.group + return (g >= 0x5000 and g <= 0x501e) or\ + (g >= 0x6000 and g <= 0x601e) + + def istag_group_length(t: BaseTag) -> bool: + return t.element == 0 + + def isequal(v1: Any, v2: Any) -> bool: + from pydicom.valuerep import DSfloat + float_tolerance = 1.0e-5 + + def is_equal_float(x1: float, x2: float) -> bool: + return abs(x1 - x2) < float_tolerance + if type(v1) != type(v2): + return False + if isinstance(v1, DataElementSequence): + for item1, item2 in zip(v1, v2): + DicomHelper.isequal_dicom_dataset(item1, item2) + if not isinstance(v1, MultiValue): + v11 = [v1] + v22 = [v2] else: - frame_type_item.FrameVolumeBasedCalculationTechnique = 'MIXED' - if sop_class_uid == '1.2.840.10008.5.1.4.1.1.4.4': - # MR Image Frame Type (M) - perframe_item.MRImageFrameTypeSequence = [ - frame_type_item, - ] - elif sop_class_uid == '1.2.840.10008.5.1.4.1.1.2.2': - # CT Image Frame Type (M) - perframe_item.CTImageFrameTypeSequence = [ - frame_type_item, - ] - # CT Pixel Value Transformation (M) - pixel_val_transform_item = Dataset() - pixel_val_transform_item.RescaleIntercept = ds.RescaleIntercept - pixel_val_transform_item.RescaleSlope = ds.RescaleSlope - try: - pixel_val_transform_item.RescaleType = ds.RescaleType - except AttributeError: - pixel_val_transform_item.RescaleType = 'US' - perframe_item.PixelValueTransformationSequence = [ - pixel_val_transform_item, - ] - elif sop_class_uid == '1.2.840.10008.5.1.4.1.1.128.1': - # PET Image Frame Type (M) - perframe_item.PETImageFrameTypeSequence = [ - frame_type_item, - ] - # Frame VOI LUT (U) - try: - frame_voi_lut_item = Dataset() - frame_voi_lut_item.WindowCenter = ds.WindowCenter - frame_voi_lut_item.WindowWidth = ds.WindowWidth - perframe_item.FrameVOILUTSequence = [ - frame_voi_lut_item, - ] - except AttributeError: - pass - # Referenced Image (C) - try: - perframe_item.ReferencedImageSequence = \ - ds.ReferencedImageSequence - except AttributeError: - pass - # Derivation Image (C) - try: - perframe_item.SourceImageSequence = ds.SourceImageSequence - except AttributeError: - pass - # Frame Anatomy (C) - try: - frame_anatomy_item = Dataset() - frame_anatomy_item.BodyPartExamined = ds.BodyPartExamined - perframe_item.FrameAnatomySequence = [ - frame_anatomy_item, - ] - except AttributeError: - pass - # Image Frame Conversion Source (C) - conv_src_attr_item = Dataset() - conv_src_attr_item.ReferencedSOPClassUID = ds.SOPClassUID - conv_src_attr_item.ReferencedSOPInstanceUID = ds.SOPInstanceUID - perframe_item.ConversionSourceAttributesSequence = [ - conv_src_attr_item, - ] - # Irradiation Event Identification (C) - CT/PET only - try: - irradiation_event_id_item = Dataset() - irradiation_event_id_item.IrradiationEventUID = \ - ref_ds.IrradiationEventUID - perframe_item.IrradiationEventIdentificationSequence = [ - irradiation_event_id_item, - ] - except AttributeError: - pass - # Temporal Position (U) - try: - temporal_position_item = Dataset() - temporal_position_item.TemporalPositionTimeOffset = \ - ref_ds.TemporalPositionTimeOffset - perframe_item.TemporalPositionSequence = [ - temporal_position_item, - ] - except AttributeError: - pass - # Cardiac Synchronization (U # TODO: http: # dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.7.6.16.2.html# sect_C.7.6.16.2.7 # noqa - # Contrast/Bolus Usage (U) - MR/CT onl # TODO: http: # dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.7.6.16.2.html# sect_C.7.6.16.2.12 # noqa - # Respiratory Synchronization (U # TODO: http: # dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.7.6.16.2.html# sect_C.7.6.16.2.17 # noqa - # Real World Value Mapping (U) - PET onl # TODO: http: # dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.7.6.16.2.html# sect_C.7.6.16.2.11 # noqa - perframe_items.append(perframe_item) - # All other attributes that are not assigned to functional groups. - for tag, da in ds.items(): - if tag in assigned_attributes: - continue - elif tag in mf_attributes: - mf_dataset.add(da) + v11 = v1 + v22 = v2 + if len(v11) != len(v22): + return False + for xx, yy in zip(v11, v22): + if isinstance(xx, DSfloat) or isinstance(xx, float): + if not is_equal_float(xx, yy): + return False else: - if tag not in ignored_attributes: - unassigned_dataelements[tag].append(da) - # All remaining unassigned attributes will be collected in either the - # UnassignedSharedConvertedAttributesSequence or the - # UnassignedPerFrameConvertedAttributesSequence, depending on whether - # values vary accross frames (original single-frame image instances). - unassigned_shared_ca_item = Dataset() - unassigned_perframe_ca_items = [ - Dataset() - for _ in range(len(sf_datasets)) - ] - for tag, dataelements in unassigned_dataelements.items(): - values = [str(da.value) for da in dataelements] - unique_values = set(values) - if len(unique_values) == 1: - unassigned_shared_ca_item.add(dataelements[0]) - else: - for i, da in enumerate(dataelements): - unassigned_perframe_ca_items[i].add(da) - mf_dataset.ImageType = list(list(unique_image_types)[0]) - if len(unique_image_types) > 1: - mf_dataset.ImageType[2] = 'MIXED' - mf_dataset.PixelRepresentation = pixel_representation - mf_dataset.VolumetricProperties = volumetric_properties - # Shared Functional Groups - shared_item = Dataset() - # Pixel Measures (M) - pixel_measures_item = Dataset() - pixel_measures_item.PixelSpacing = ref_ds.PixelSpacing - pixel_measures_item.SliceThickness = ref_ds.SliceThickness - try: - pixel_measures_item.SpacingBetweenSlices = \ - ref_ds.SpacingBetweenSlices - except AttributeError: - pass - shared_item.PixelMeasuresSequence = [ - pixel_measures_item, - ] - # Plane Orientation (Patient) (M) - plane_orientation_item = Dataset() - plane_orientation_item.ImageOrientationPatient = \ - ref_ds.ImageOrientationPatient - shared_item.PlaneOrientationSequence = [ - plane_orientation_item, - ] - shared_item.UnassignedSharedConvertedAttributesSequence = [ - unassigned_shared_ca_item, - ] - mf_dataset.SharedFunctionalGroupsSequence = [ - shared_item, - ] - for i, ca_item in enumerate(unassigned_perframe_ca_items): - perframe_items[i].UnassignedPerFrameConvertedAttributesSequence = [ - ca_item, - ] - mf_dataset.PerFrameFunctionalGroupsSequence = perframe_items - mf_dataset.AcquisitionContextSequence = [] - # TODO: Encapsulated Pixel Data with compressed frame items. - # Create the Pixel Data element of the mulit-frame image instance using - # native encoding (simply concatenating pixels of individual frames) - # Sometimes there may be numpy types such as " > i2". The (* 1) hack - # ensures that pixel values have the correct integer type. - mf_dataset.PixelData = b''.join([ - (ds.pixel_array * 1).data for ds in sf_datasets - ]) - return mf_dataset - - -class LegacyConvertedEnhancedMRImage(SOPClass): - """SOP class for Legacy Converted Enhanced MR Image instances.""" + if xx != yy: + return False + return True - def __init__( - self, - legacy_datasets: Sequence[Dataset], - series_instance_uid: str, - series_number: int, - sop_instance_uid: str, - instance_number: int, - **kwargs: Any) -> None: - """ - Parameters - ---------- - legacy_datasets: Sequence[pydicom.dataset.Dataset] - DICOM data sets of legacy single-frame image instances that should - be converted - series_instance_uid: str - UID of the series - series_number: Union[int, None] - Number of the series within the study - sop_instance_uid: str - UID that should be assigned to the instance - instance_number: int - Number that should be assigned to the instance - **kwargs: Any, optional - Additional keyword arguments that will be passed to the constructor - of `highdicom.base.SOPClass` - """ - try: - ref_ds = legacy_datasets[0] - except IndexError: - raise ValueError('No DICOM data sets of provided.') - if ref_ds.Modality != 'MR': - raise ValueError( - 'Wrong modality for conversion of legacy MR images.') - if ref_ds.SOPClassUID != '1.2.840.10008.5.1.4.1.1.4': - raise ValueError( - 'Wrong SOP class for conversion of legacy MR images.') - sop_class_uid = LEGACY_ENHANCED_SOP_CLASS_UID_MAP[ref_ds.SOPClassUID] - super().__init__( - study_instance_uid=ref_ds.StudyInstanceUID, - series_instance_uid=series_instance_uid, - series_number=series_number, - sop_instance_uid=sop_instance_uid, - sop_class_uid=sop_class_uid, - instance_number=instance_number, - manufacturer=ref_ds.Manufacturer, - modality=ref_ds.Modality, - transfer_syntax_uid=None, # FIXME: frame encoding - patient_id=ref_ds.PatientID, - patient_name=ref_ds.PatientName, - patient_birth_date=ref_ds.PatientBirthDate, - patient_sex=ref_ds.PatientSex, - accession_number=ref_ds.AccessionNumber, - study_id=ref_ds.StudyID, - study_date=ref_ds.StudyDate, - study_time=ref_ds.StudyTime, - referring_physician_name=ref_ds.ReferringPhysicianName, - **kwargs) - _convert_legacy_to_enhanced(legacy_datasets, self) - self.PresentationLUTShape = 'IDENTITY' + def isequal_dicom_dataset(ds1: Dataset, ds2: Dataset) -> bool: + if type(ds1) != type(ds2): + return False + if not isinstance(ds1, Dataset): + return False + for k1, elem1 in ds1.items(): + if k1 not in ds2: + return False + elem2 = ds2[k1] + if not DicomHelper.isequal(elem2.value, elem1.value): + return False + return True + def tag2str(tg: BaseTag) -> str: + if not isinstance(tg, BaseTag): + tg = Tag(tg) + return '(0x{:0>4x}, 0x{:0>4x})'.format(tg.group, tg.element) -class LegacyConvertedEnhancedCTImage(SOPClass): - """SOP class for Legacy Converted Enhanced CT Image instances.""" + def tag2kwstr(tg: BaseTag) -> str: + return '{}-{:32.32s}'.format( + DicomHelper.tag2str(tg), keyword_for_tag(tg)) - def __init__( - self, - legacy_datasets: Sequence[Dataset], - series_instance_uid: str, - series_number: int, - sop_instance_uid: str, - instance_number: int, - **kwargs: Any) -> None: - """ - Parameters - ---------- - legacy_datasets: Sequence[pydicom.dataset.Dataset] - DICOM data sets of legacy single-frame image instances that should - be converted - series_instance_uid: str - UID of the series - series_number: Union[int, None] - Number of the series within the study - sop_instance_uid: str - UID that should be assigned to the instance - instance_number: int - Number that should be assigned to the instance - **kwargs: Any, optional - Additional keyword arguments that will be passed to the constructor - of `highdicom.base.SOPClass` - """ - try: - ref_ds = legacy_datasets[0] - except IndexError: - raise ValueError('No DICOM data sets of provided.') - if ref_ds.Modality != 'CT': - raise ValueError( - 'Wrong modality for conversion of legacy CT images.') - if ref_ds.SOPClassUID != '1.2.840.10008.5.1.4.1.1.2': - raise ValueError( - 'Wrong SOP class for conversion of legacy CT images.') - sop_class_uid = LEGACY_ENHANCED_SOP_CLASS_UID_MAP[ref_ds.SOPClassUID] - super().__init__( - study_instance_uid=ref_ds.StudyInstanceUID, - series_instance_uid=series_instance_uid, - series_number=series_number, - sop_instance_uid=sop_instance_uid, - sop_class_uid=sop_class_uid, - instance_number=instance_number, - manufacturer=ref_ds.Manufacturer, - modality=ref_ds.Modality, - transfer_syntax_uid=None, # FIXME: frame encoding - patient_id=ref_ds.PatientID, - patient_name=ref_ds.PatientName, - patient_birth_date=ref_ds.PatientBirthDate, - patient_sex=ref_ds.PatientSex, - accession_number=ref_ds.AccessionNumber, - study_id=ref_ds.StudyID, - study_date=ref_ds.StudyDate, - study_time=ref_ds.StudyTime, - referring_physician_name=ref_ds.ReferringPhysicianName, - **kwargs) - _convert_legacy_to_enhanced(legacy_datasets, self) +class GeometryOfSlice: + def __init__(self, + row_vector: ndarray, + col_vector: ndarray, + top_left_corner_pos: ndarray, + voxel_spaceing: ndarray, + dimensions: tuple): + self.RowVector = row_vector + self.ColVector = col_vector + self.TopLeftCornerPosition = top_left_corner_pos + self.VoxelSpacing = voxel_spaceing + self.Dim = dimensions -class LegacyConvertedEnhancedPETImage(SOPClass): - """SOP class for Legacy Converted Enhanced PET Image instances.""" + def get_normal_vector(self) -> ndarray: + n: ndarray = cross(self.RowVector, self.ColVector) + n[2] = -n[2] + return n - def __init__( - self, - legacy_datasets: Sequence[Dataset], - series_instance_uid: str, - series_number: int, - sop_instance_uid: str, - instance_number: int, - **kwargs: Any) -> None: - """ - Parameters - ---------- - legacy_datasets: Sequence[pydicom.dataset.Dataset] - DICOM data sets of legacy single-frame image instances that should - be converted - series_instance_uid: str - UID of the series - series_number: Union[int, None] - Number of the series within the study - sop_instance_uid: str - UID that should be assigned to the instance - instance_number: int - Number that should be assigned to the instance - **kwargs: Any, optional - Additional keyword arguments that will be passed to the constructor - of `highdicom.base.SOPClass` - """ - try: - ref_ds = legacy_datasets[0] - except IndexError: - raise ValueError('No DICOM data sets of provided.') - if ref_ds.Modality != 'PT': - raise ValueError( - 'Wrong modality for conversion of legacy PET images.') - if ref_ds.SOPClassUID != '1.2.840.10008.5.1.4.1.1.128': - raise ValueError( - 'Wrong SOP class for conversion of legacy PET images.') - sop_class_uid = LEGACY_ENHANCED_SOP_CLASS_UID_MAP[ref_ds.SOPClassUID] - super().__init__( - study_instance_uid=ref_ds.StudyInstanceUID, - series_instance_uid=series_instance_uid, - series_number=series_number, - sop_instance_uid=sop_instance_uid, - sop_class_uid=sop_class_uid, - instance_number=instance_number, - manufacturer=ref_ds.Manufacturer, - modality=ref_ds.Modality, - transfer_syntax_uid=None, # FIXME: frame encoding - patient_id=ref_ds.PatientID, - patient_name=ref_ds.PatientName, - patient_birth_date=ref_ds.PatientBirthDate, - patient_sex=ref_ds.PatientSex, - accession_number=ref_ds.AccessionNumber, - study_id=ref_ds.StudyID, - study_date=ref_ds.StudyDate, - study_time=ref_ds.StudyTime, - referring_physician_name=ref_ds.ReferringPhysicianName, - **kwargs) - _convert_legacy_to_enhanced(legacy_datasets, self) + def get_distance_along_origin(self) -> float: + n = self.get_normal_vector() + return float( + dot(self.TopLeftCornerPosition, n)) + + def are_parallel( + slice1: Any, + slice2: Any, + tolerance: float = 0.0001) -> bool: + logger = logging.getLogger(__name__) + if (not isinstance(slice1, GeometryOfSlice) or + not isinstance(slice2, GeometryOfSlice)): + logger.warning( + 'slice1 and slice2 are not of the same ' + 'type: type(slice1) = {} and type(slice2) = {}'.format( + type(slice1), type(slice2))) + return False + else: + n1: ndarray = slice1.get_normal_vector() + n2: ndarray = slice2.get_normal_vector() + for el1, el2 in zip(n1, n2): + if abs(el1 - el2) > tolerance: + return False + return True class PerframeFunctionalGroup(DataElementSequence): @@ -2253,113 +1860,3 @@ def convert2mf(self) -> None: else: fun(*args) logger.debug('Conversion succeeded') - - -class GeometryOfSlice: - def __init__(self, - row_vector: ndarray, - col_vector: ndarray, - top_left_corner_pos: ndarray, - voxel_spaceing: ndarray, - dimensions: tuple): - self.RowVector = row_vector - self.ColVector = col_vector - self.TopLeftCornerPosition = top_left_corner_pos - self.VoxelSpacing = voxel_spaceing - self.Dim = dimensions - - def get_normal_vector(self) -> ndarray: - n: ndarray = cross(self.RowVector, self.ColVector) - n[2] = -n[2] - return n - - def get_distance_along_origin(self) -> float: - n = self.get_normal_vector() - return float( - dot(self.TopLeftCornerPosition, n)) - - def are_parallel( - slice1: GeometryOfSlice, - slice2: GeometryOfSlice, - tolerance: float = 0.0001) -> bool: - logger = logging.getLogger(__name__) - if (not isinstance(slice1, GeometryOfSlice) or - not isinstance(slice2, GeometryOfSlice)): - logger.warning( - 'slice1 and slice2 are not of the same ' - 'type: type(slice1) = {} and type(slice2) = {}'.format( - type(slice1), type(slice2))) - return False - else: - n1: ndarray = slice1.get_normal_vector() - n2: ndarray = slice2.get_normal_vector() - for el1, el2 in zip(n1, n2): - if abs(el1 - el2) > tolerance: - return False - return True - - -class DicomHelper: - def __init__(self) -> None: - pass - - def istag_file_meta_information_group(t: BaseTag) -> bool: - return t.group == 0x0002 - - def istag_repeating_group(t: BaseTag) -> bool: - g = t.group - return (g >= 0x5000 and g <= 0x501e) or\ - (g >= 0x6000 and g <= 0x601e) - - def istag_group_length(t: BaseTag) -> bool: - return t.element == 0 - - def isequal(v1: Any, v2: Any) -> bool: - from pydicom.valuerep import DSfloat - float_tolerance = 1.0e-5 - - def is_equal_float(x1: float, x2: float) -> bool: - return abs(x1 - x2) < float_tolerance - if type(v1) != type(v2): - return False - if isinstance(v1, DataElementSequence): - for item1, item2 in zip(v1, v2): - DicomHelper.isequal_dicom_dataset(item1, item2) - if not isinstance(v1, MultiValue): - v11 = [v1] - v22 = [v2] - else: - v11 = v1 - v22 = v2 - if len(v11) != len(v22): - return False - for xx, yy in zip(v11, v22): - if isinstance(xx, DSfloat) or isinstance(xx, float): - if not is_equal_float(xx, yy): - return False - else: - if xx != yy: - return False - return True - - def isequal_dicom_dataset(ds1: Dataset, ds2: Dataset) -> bool: - if type(ds1) != type(ds2): - return False - if not isinstance(ds1, Dataset): - return False - for k1, elem1 in ds1.items(): - if k1 not in ds2: - return False - elem2 = ds2[k1] - if not DicomHelper.isequal(elem2.value, elem1.value): - return False - return True - - def tag2str(tg: BaseTag) -> str: - if not isinstance(tg, BaseTag): - tg = Tag(tg) - return '(0x{:0>4x}, 0x{:0>4x})'.format(tg.group, tg.element) - - def tag2kwstr(tg: BaseTag) -> str: - return '{}-{:32.32s}'.format( - DicomHelper.tag2str(tg), keyword_for_tag(tg)) From 5c1687c4ad237ab798b714d8b1bc39b98ad2743f Mon Sep 17 00:00:00 2001 From: afshin Date: Fri, 2 Apr 2021 11:57:42 -0400 Subject: [PATCH 30/44] mend --- tests/test_legacy.py | 518 +++++++++++++++++++++---------------------- 1 file changed, 259 insertions(+), 259 deletions(-) diff --git a/tests/test_legacy.py b/tests/test_legacy.py index cf75aa2d..59972784 100644 --- a/tests/test_legacy.py +++ b/tests/test_legacy.py @@ -6,280 +6,280 @@ import enum -class Modality(enum.IntEnum): - CT = 0 - MR = 1 - PT = 2 +# class Modality(enum.IntEnum): +# CT = 0 +# MR = 1 +# PT = 2 -sop_classes = [('CT', '1.2.840.10008.5.1.4.1.1.2'), - ('MR', '1.2.840.10008.5.1.4.1.1.4'), - ('PT', '1.2.840.10008.5.1.4.1.1.128')] +# sop_classes = [('CT', '1.2.840.10008.5.1.4.1.1.2'), +# ('MR', '1.2.840.10008.5.1.4.1.1.4'), +# ('PT', '1.2.840.10008.5.1.4.1.1.128')] -class TestLegacyConvertedEnhancedImage(unittest.TestCase): +# class TestLegacyConvertedEnhancedImage(unittest.TestCase): - def setUp(self): - super().setUp() - self._modalities = ('CT', 'MR', 'PET') - self._ref_dataset_seq_CT = \ - self.generate_common_dicom_dataset_series(3, Modality.CT) - self._ref_dataset_seq_MR = \ - self.generate_common_dicom_dataset_series(3, Modality.MR) - self._ref_dataset_seq_PET = \ - self.generate_common_dicom_dataset_series(3, Modality.PT) - self._output_series_instance_uid = generate_uid() - self._output_sop_instance_uid = generate_uid() - self._output_series_number = '1' - self._output_instance_number = '1' +# def setUp(self): +# super().setUp() +# self._modalities = ('CT', 'MR', 'PET') +# self._ref_dataset_seq_CT = \ +# self.generate_common_dicom_dataset_series(3, Modality.CT) +# self._ref_dataset_seq_MR = \ +# self.generate_common_dicom_dataset_series(3, Modality.MR) +# self._ref_dataset_seq_PET = \ +# self.generate_common_dicom_dataset_series(3, Modality.PT) +# self._output_series_instance_uid = generate_uid() +# self._output_sop_instance_uid = generate_uid() +# self._output_series_number = '1' +# self._output_instance_number = '1' - def test_output_attributes(self): - for m in self._modalities: - with self.subTest(m=m): - LegacyConverterClass = getattr( - sop, - "LegacyConvertedEnhanced{}Image".format(m) - ) - ref_dataset_seq = getattr(self, "_ref_dataset_seq_{}".format(m)) +# def test_output_attributes(self): +# for m in self._modalities: +# with self.subTest(m=m): +# LegacyConverterClass = getattr( +# sop, +# "LegacyConvertedEnhanced{}Image".format(m) +# ) +# ref_dataset_seq = getattr(self, "_ref_dataset_seq_{}".format(m)) - multiframe_item = LegacyConverterClass( - legacy_datasets=ref_dataset_seq, - series_instance_uid=self._output_series_instance_uid, - series_number=self._output_instance_number, - sop_instance_uid=self._output_sop_instance_uid, - instance_number=self._output_instance_number) - assert multiframe_item.SeriesInstanceUID == \ - self._output_series_instance_uid - assert multiframe_item.SOPInstanceUID == \ - self._output_sop_instance_uid - assert int(multiframe_item.SeriesNumber) == int( - self._output_series_number) - assert int(multiframe_item.InstanceNumber) == int( - self._output_instance_number) +# multiframe_item = LegacyConverterClass( +# legacy_datasets=ref_dataset_seq, +# series_instance_uid=self._output_series_instance_uid, +# series_number=self._output_instance_number, +# sop_instance_uid=self._output_sop_instance_uid, +# instance_number=self._output_instance_number) +# assert multiframe_item.SeriesInstanceUID == \ +# self._output_series_instance_uid +# assert multiframe_item.SOPInstanceUID == \ +# self._output_sop_instance_uid +# assert int(multiframe_item.SeriesNumber) == int( +# self._output_series_number) +# assert int(multiframe_item.InstanceNumber) == int( +# self._output_instance_number) - def test_empty_dataset(self): - for m in self._modalities: - with self.subTest(m=m): - LegacyConverterClass = getattr( - sop, - "LegacyConvertedEnhanced{}Image".format(m) - ) - with self.assertRaises(ValueError): - LegacyConverterClass( - [], - series_instance_uid=self._output_series_instance_uid, - series_number=self._output_instance_number, - sop_instance_uid=self._output_sop_instance_uid, - instance_number=self._output_instance_number) +# def test_empty_dataset(self): +# for m in self._modalities: +# with self.subTest(m=m): +# LegacyConverterClass = getattr( +# sop, +# "LegacyConvertedEnhanced{}Image".format(m) +# ) +# with self.assertRaises(ValueError): +# LegacyConverterClass( +# [], +# series_instance_uid=self._output_series_instance_uid, +# series_number=self._output_instance_number, +# sop_instance_uid=self._output_sop_instance_uid, +# instance_number=self._output_instance_number) - def test_wrong_modality(self): +# def test_wrong_modality(self): - for m in self._modalities: - with self.subTest(m=m): - LegacyConverterClass = getattr( - sop, - "LegacyConvertedEnhanced{}Image".format(m) - ) - ref_dataset_seq = getattr(self, "_ref_dataset_seq_{}".format(m)) - tmp_orig_modality = ref_dataset_seq[0].Modality - ref_dataset_seq[0].Modality = '' - with self.assertRaises(ValueError): - LegacyConverterClass( - legacy_datasets=ref_dataset_seq, - series_instance_uid=self._output_series_instance_uid, - series_number=self._output_instance_number, - sop_instance_uid=self._output_sop_instance_uid, - instance_number=self._output_instance_number) - ref_dataset_seq[0].Modality = tmp_orig_modality +# for m in self._modalities: +# with self.subTest(m=m): +# LegacyConverterClass = getattr( +# sop, +# "LegacyConvertedEnhanced{}Image".format(m) +# ) +# ref_dataset_seq = getattr(self, "_ref_dataset_seq_{}".format(m)) +# tmp_orig_modality = ref_dataset_seq[0].Modality +# ref_dataset_seq[0].Modality = '' +# with self.assertRaises(ValueError): +# LegacyConverterClass( +# legacy_datasets=ref_dataset_seq, +# series_instance_uid=self._output_series_instance_uid, +# series_number=self._output_instance_number, +# sop_instance_uid=self._output_sop_instance_uid, +# instance_number=self._output_instance_number) +# ref_dataset_seq[0].Modality = tmp_orig_modality - def test_wrong_sop_class_uid(self): - for m in self._modalities: - with self.subTest(m=m): - LegacyConverterClass = getattr( - sop, - "LegacyConvertedEnhanced{}Image".format(m) - ) - ref_dataset_seq = getattr(self, "_ref_dataset_seq_{}".format(m)) - tmp_orig_sop_class_id = ref_dataset_seq[0].SOPClassUID - ref_dataset_seq[0].SOPClassUID = '1.2.3.4.5.6.7.8.9' - with self.assertRaises(ValueError): - LegacyConverterClass( - legacy_datasets=ref_dataset_seq, - series_instance_uid=self._output_series_instance_uid, - series_number=self._output_instance_number, - sop_instance_uid=self._output_sop_instance_uid, - instance_number=self._output_instance_number) - ref_dataset_seq[0].SOPClassUID = tmp_orig_sop_class_id +# def test_wrong_sop_class_uid(self): +# for m in self._modalities: +# with self.subTest(m=m): +# LegacyConverterClass = getattr( +# sop, +# "LegacyConvertedEnhanced{}Image".format(m) +# ) +# ref_dataset_seq = getattr(self, "_ref_dataset_seq_{}".format(m)) +# tmp_orig_sop_class_id = ref_dataset_seq[0].SOPClassUID +# ref_dataset_seq[0].SOPClassUID = '1.2.3.4.5.6.7.8.9' +# with self.assertRaises(ValueError): +# LegacyConverterClass( +# legacy_datasets=ref_dataset_seq, +# series_instance_uid=self._output_series_instance_uid, +# series_number=self._output_instance_number, +# sop_instance_uid=self._output_sop_instance_uid, +# instance_number=self._output_instance_number) +# ref_dataset_seq[0].SOPClassUID = tmp_orig_sop_class_id - def test_mixed_studies(self): - for m in self._modalities: - with self.subTest(m=m): - LegacyConverterClass = getattr( - sop, - "LegacyConvertedEnhanced{}Image".format(m) - ) - ref_dataset_seq = getattr(self, "_ref_dataset_seq_{}".format(m)) - # first run with intact input +# def test_mixed_studies(self): +# for m in self._modalities: +# with self.subTest(m=m): +# LegacyConverterClass = getattr( +# sop, +# "LegacyConvertedEnhanced{}Image".format(m) +# ) +# ref_dataset_seq = getattr(self, "_ref_dataset_seq_{}".format(m)) +# # first run with intact input - LegacyConverterClass( - legacy_datasets=ref_dataset_seq, - series_instance_uid=self._output_series_instance_uid, - series_number=self._output_instance_number, - sop_instance_uid=self._output_sop_instance_uid, - instance_number=self._output_instance_number) - # second run with defected input - tmp_orig_study_instance_uid = ref_dataset_seq[ - 0].StudyInstanceUID - ref_dataset_seq[0].StudyInstanceUID = '1.2.3.4.5.6.7.8.9' - with self.assertRaises(ValueError): - LegacyConverterClass( - legacy_datasets=ref_dataset_seq, - series_instance_uid=self._output_series_instance_uid, - series_number=self._output_instance_number, - sop_instance_uid=self._output_sop_instance_uid, - instance_number=self._output_instance_number) - ref_dataset_seq[ - 0].StudyInstanceUID = tmp_orig_study_instance_uid +# LegacyConverterClass( +# legacy_datasets=ref_dataset_seq, +# series_instance_uid=self._output_series_instance_uid, +# series_number=self._output_instance_number, +# sop_instance_uid=self._output_sop_instance_uid, +# instance_number=self._output_instance_number) +# # second run with defected input +# tmp_orig_study_instance_uid = ref_dataset_seq[ +# 0].StudyInstanceUID +# ref_dataset_seq[0].StudyInstanceUID = '1.2.3.4.5.6.7.8.9' +# with self.assertRaises(ValueError): +# LegacyConverterClass( +# legacy_datasets=ref_dataset_seq, +# series_instance_uid=self._output_series_instance_uid, +# series_number=self._output_instance_number, +# sop_instance_uid=self._output_sop_instance_uid, +# instance_number=self._output_instance_number) +# ref_dataset_seq[ +# 0].StudyInstanceUID = tmp_orig_study_instance_uid - def test_mixed_series(self): - for m in self._modalities: - with self.subTest(m=m): - LegacyConverterClass = getattr( - sop, - "LegacyConvertedEnhanced{}Image".format(m) - ) - ref_dataset_seq = getattr(self, "_ref_dataset_seq_{}".format(m)) - # first run with intact input - LegacyConverterClass( - legacy_datasets=ref_dataset_seq, - series_instance_uid=self._output_series_instance_uid, - series_number=self._output_instance_number, - sop_instance_uid=self._output_sop_instance_uid, - instance_number=self._output_instance_number) - # second run with defected input - tmp_series_instance_uid = ref_dataset_seq[0].SeriesInstanceUID - ref_dataset_seq[0].SeriesInstanceUID = '1.2.3.4.5.6.7.8.9' - with self.assertRaises(ValueError): - LegacyConverterClass( - legacy_datasets=ref_dataset_seq, - series_instance_uid=self._output_series_instance_uid, - series_number=self._output_instance_number, - sop_instance_uid=self._output_sop_instance_uid, - instance_number=self._output_instance_number) - ref_dataset_seq[0].SeriesInstanceUID = tmp_series_instance_uid +# def test_mixed_series(self): +# for m in self._modalities: +# with self.subTest(m=m): +# LegacyConverterClass = getattr( +# sop, +# "LegacyConvertedEnhanced{}Image".format(m) +# ) +# ref_dataset_seq = getattr(self, "_ref_dataset_seq_{}".format(m)) +# # first run with intact input +# LegacyConverterClass( +# legacy_datasets=ref_dataset_seq, +# series_instance_uid=self._output_series_instance_uid, +# series_number=self._output_instance_number, +# sop_instance_uid=self._output_sop_instance_uid, +# instance_number=self._output_instance_number) +# # second run with defected input +# tmp_series_instance_uid = ref_dataset_seq[0].SeriesInstanceUID +# ref_dataset_seq[0].SeriesInstanceUID = '1.2.3.4.5.6.7.8.9' +# with self.assertRaises(ValueError): +# LegacyConverterClass( +# legacy_datasets=ref_dataset_seq, +# series_instance_uid=self._output_series_instance_uid, +# series_number=self._output_instance_number, +# sop_instance_uid=self._output_sop_instance_uid, +# instance_number=self._output_instance_number) +# ref_dataset_seq[0].SeriesInstanceUID = tmp_series_instance_uid - def test_mixed_transfer_syntax(self): - for m in self._modalities: - with self.subTest(m=m): - LegacyConverterClass = getattr( - sop, - "LegacyConvertedEnhanced{}Image".format(m) - ) - ref_dataset_seq = getattr(self, "_ref_dataset_seq_{}".format(m)) - # first run with intact input - LegacyConverterClass( - legacy_datasets=ref_dataset_seq, - series_instance_uid=self._output_series_instance_uid, - series_number=self._output_instance_number, - sop_instance_uid=self._output_sop_instance_uid, - instance_number=self._output_instance_number) - # second run with defected input - tmp_transfer_syntax_uid = ref_dataset_seq[ - 0].file_meta.TransferSyntaxUID - ref_dataset_seq[ - 0].file_meta.TransferSyntaxUID = '1.2.3.4.5.6.7.8.9' - with self.assertRaises(ValueError): - LegacyConverterClass( - legacy_datasets=ref_dataset_seq, - series_instance_uid=self._output_series_instance_uid, - series_number=self._output_instance_number, - sop_instance_uid=self._output_sop_instance_uid, - instance_number=self._output_instance_number) - ref_dataset_seq[ - 0].file_meta.TransferSyntaxUID = tmp_transfer_syntax_uid +# def test_mixed_transfer_syntax(self): +# for m in self._modalities: +# with self.subTest(m=m): +# LegacyConverterClass = getattr( +# sop, +# "LegacyConvertedEnhanced{}Image".format(m) +# ) +# ref_dataset_seq = getattr(self, "_ref_dataset_seq_{}".format(m)) +# # first run with intact input +# LegacyConverterClass( +# legacy_datasets=ref_dataset_seq, +# series_instance_uid=self._output_series_instance_uid, +# series_number=self._output_instance_number, +# sop_instance_uid=self._output_sop_instance_uid, +# instance_number=self._output_instance_number) +# # second run with defected input +# tmp_transfer_syntax_uid = ref_dataset_seq[ +# 0].file_meta.TransferSyntaxUID +# ref_dataset_seq[ +# 0].file_meta.TransferSyntaxUID = '1.2.3.4.5.6.7.8.9' +# with self.assertRaises(ValueError): +# LegacyConverterClass( +# legacy_datasets=ref_dataset_seq, +# series_instance_uid=self._output_series_instance_uid, +# series_number=self._output_instance_number, +# sop_instance_uid=self._output_sop_instance_uid, +# instance_number=self._output_instance_number) +# ref_dataset_seq[ +# 0].file_meta.TransferSyntaxUID = tmp_transfer_syntax_uid - def generate_common_dicom_dataset_series(self, slice_count: int, - system: Modality) -> list: - output_dataset = [] - slice_pos = 0 - slice_thickness = 0 - study_uid = generate_uid() - series_uid = generate_uid() - frame_of_ref_uid = generate_uid() - date_ = datetime.now().date() - age = timedelta(days=45 * 365) - time_ = datetime.now().time() - cols = 2 - rows = 2 - bytes_per_voxel = 2 +# def generate_common_dicom_dataset_series(self, slice_count: int, +# system: Modality) -> list: +# output_dataset = [] +# slice_pos = 0 +# slice_thickness = 0 +# study_uid = generate_uid() +# series_uid = generate_uid() +# frame_of_ref_uid = generate_uid() +# date_ = datetime.now().date() +# age = timedelta(days=45 * 365) +# time_ = datetime.now().time() +# cols = 2 +# rows = 2 +# bytes_per_voxel = 2 - for i in range(0, slice_count): - file_meta = Dataset() - pixel_array = b"\0" * cols * rows * bytes_per_voxel - file_meta.MediaStorageSOPClassUID = sop_classes[system][1] - file_meta.MediaStorageSOPInstanceUID = generate_uid() - file_meta.ImplementationClassUID = generate_uid() +# for i in range(0, slice_count): +# file_meta = Dataset() +# pixel_array = b"\0" * cols * rows * bytes_per_voxel +# file_meta.MediaStorageSOPClassUID = sop_classes[system][1] +# file_meta.MediaStorageSOPInstanceUID = generate_uid() +# file_meta.ImplementationClassUID = generate_uid() - tmp_dataset = FileDataset('', {}, file_meta=file_meta, - preamble=pixel_array) - tmp_dataset.file_meta.TransferSyntaxUID = "1.2.840.10008.1.2.1" - tmp_dataset.SliceLocation = slice_pos + i * slice_thickness - tmp_dataset.SliceThickness = slice_thickness - tmp_dataset.WindowCenter = 1 - tmp_dataset.WindowWidth = 2 - tmp_dataset.AcquisitionNumber = 1 - tmp_dataset.InstanceNumber = i - tmp_dataset.SeriesNumber = 1 - tmp_dataset.ImageOrientationPatient = [1.000000, 0.000000, 0.000000, - 0.000000, 1.000000, 0.000000] - tmp_dataset.ImagePositionPatient = [0.0, 0.0, - tmp_dataset.SliceLocation] - tmp_dataset.ImageType = ['ORIGINAL', 'PRIMARY', 'AXIAL'] - tmp_dataset.PixelSpacing = [1, 1] - tmp_dataset.PatientName = 'John Doe' - tmp_dataset.FrameOfReferenceUID = frame_of_ref_uid - tmp_dataset.SOPClassUID = sop_classes[system][1] - tmp_dataset.SOPInstanceUID = generate_uid() - tmp_dataset.SeriesInstanceUID = series_uid - tmp_dataset.StudyInstanceUID = study_uid - tmp_dataset.BitsAllocated = bytes_per_voxel * 8 - tmp_dataset.BitsStored = bytes_per_voxel * 8 - tmp_dataset.HighBit = (bytes_per_voxel * 8 - 1) - tmp_dataset.PixelRepresentation = 1 - tmp_dataset.Columns = cols - tmp_dataset.Rows = rows - tmp_dataset.SamplesPerPixel = 1 - tmp_dataset.AccessionNumber = '2' - tmp_dataset.AcquisitionDate = date_ - tmp_dataset.AcquisitionTime = datetime.now().time() - tmp_dataset.AdditionalPatientHistory = 'UTERINE CA PRE-OP EVAL' - tmp_dataset.ContentDate = date_ - tmp_dataset.ContentTime = datetime.now().time() - tmp_dataset.Manufacturer = 'Mnufacturer' - tmp_dataset.ManufacturerModelName = 'Model' - tmp_dataset.Modality = sop_classes[system][0] - tmp_dataset.PatientAge = '064Y' - tmp_dataset.PatientBirthDate = date_ - age - tmp_dataset.PatientID = 'ID0001' - tmp_dataset.PatientIdentityRemoved = 'YES' - tmp_dataset.PatientPosition = 'FFS' - tmp_dataset.PatientSex = 'F' - tmp_dataset.PhotometricInterpretation = 'MONOCHROME2' - tmp_dataset.PixelData = pixel_array - tmp_dataset.PositionReferenceIndicator = 'XY' - tmp_dataset.ProtocolName = 'some protocole' - tmp_dataset.ReferringPhysicianName = '' - tmp_dataset.SeriesDate = date_ - tmp_dataset.SeriesDescription = 'test series ' - tmp_dataset.SeriesTime = time_ - tmp_dataset.SoftwareVersions = '01' - tmp_dataset.SpecificCharacterSet = 'ISO_IR 100' - tmp_dataset.StudyDate = date_ - tmp_dataset.StudyDescription = 'test study' - tmp_dataset.StudyID = '' - if (system == Modality.CT): - tmp_dataset.RescaleIntercept = 0 - tmp_dataset.RescaleSlope = 1 - tmp_dataset.StudyTime = time_ - output_dataset.append(tmp_dataset) - return output_dataset +# tmp_dataset = FileDataset('', {}, file_meta=file_meta, +# preamble=pixel_array) +# tmp_dataset.file_meta.TransferSyntaxUID = "1.2.840.10008.1.2.1" +# tmp_dataset.SliceLocation = slice_pos + i * slice_thickness +# tmp_dataset.SliceThickness = slice_thickness +# tmp_dataset.WindowCenter = 1 +# tmp_dataset.WindowWidth = 2 +# tmp_dataset.AcquisitionNumber = 1 +# tmp_dataset.InstanceNumber = i +# tmp_dataset.SeriesNumber = 1 +# tmp_dataset.ImageOrientationPatient = [1.000000, 0.000000, 0.000000, +# 0.000000, 1.000000, 0.000000] +# tmp_dataset.ImagePositionPatient = [0.0, 0.0, +# tmp_dataset.SliceLocation] +# tmp_dataset.ImageType = ['ORIGINAL', 'PRIMARY', 'AXIAL'] +# tmp_dataset.PixelSpacing = [1, 1] +# tmp_dataset.PatientName = 'John Doe' +# tmp_dataset.FrameOfReferenceUID = frame_of_ref_uid +# tmp_dataset.SOPClassUID = sop_classes[system][1] +# tmp_dataset.SOPInstanceUID = generate_uid() +# tmp_dataset.SeriesInstanceUID = series_uid +# tmp_dataset.StudyInstanceUID = study_uid +# tmp_dataset.BitsAllocated = bytes_per_voxel * 8 +# tmp_dataset.BitsStored = bytes_per_voxel * 8 +# tmp_dataset.HighBit = (bytes_per_voxel * 8 - 1) +# tmp_dataset.PixelRepresentation = 1 +# tmp_dataset.Columns = cols +# tmp_dataset.Rows = rows +# tmp_dataset.SamplesPerPixel = 1 +# tmp_dataset.AccessionNumber = '2' +# tmp_dataset.AcquisitionDate = date_ +# tmp_dataset.AcquisitionTime = datetime.now().time() +# tmp_dataset.AdditionalPatientHistory = 'UTERINE CA PRE-OP EVAL' +# tmp_dataset.ContentDate = date_ +# tmp_dataset.ContentTime = datetime.now().time() +# tmp_dataset.Manufacturer = 'Mnufacturer' +# tmp_dataset.ManufacturerModelName = 'Model' +# tmp_dataset.Modality = sop_classes[system][0] +# tmp_dataset.PatientAge = '064Y' +# tmp_dataset.PatientBirthDate = date_ - age +# tmp_dataset.PatientID = 'ID0001' +# tmp_dataset.PatientIdentityRemoved = 'YES' +# tmp_dataset.PatientPosition = 'FFS' +# tmp_dataset.PatientSex = 'F' +# tmp_dataset.PhotometricInterpretation = 'MONOCHROME2' +# tmp_dataset.PixelData = pixel_array +# tmp_dataset.PositionReferenceIndicator = 'XY' +# tmp_dataset.ProtocolName = 'some protocole' +# tmp_dataset.ReferringPhysicianName = '' +# tmp_dataset.SeriesDate = date_ +# tmp_dataset.SeriesDescription = 'test series ' +# tmp_dataset.SeriesTime = time_ +# tmp_dataset.SoftwareVersions = '01' +# tmp_dataset.SpecificCharacterSet = 'ISO_IR 100' +# tmp_dataset.StudyDate = date_ +# tmp_dataset.StudyDescription = 'test study' +# tmp_dataset.StudyID = '' +# if (system == Modality.CT): +# tmp_dataset.RescaleIntercept = 0 +# tmp_dataset.RescaleSlope = 1 +# tmp_dataset.StudyTime = time_ +# output_dataset.append(tmp_dataset) +# return output_dataset From fe51ca2b858bc899e2be828c5bd04da8f5526d90 Mon Sep 17 00:00:00 2001 From: afshin Date: Sun, 4 Apr 2021 17:07:15 -0400 Subject: [PATCH 31/44] Added tests to dicom legacy converter --- tests/test_legacy.py | 387 +++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 371 insertions(+), 16 deletions(-) diff --git a/tests/test_legacy.py b/tests/test_legacy.py index 59972784..c05e5ce0 100644 --- a/tests/test_legacy.py +++ b/tests/test_legacy.py @@ -1,23 +1,371 @@ import unittest from pydicom import FileDataset, Dataset +from pydicom.dataelem import DataElement from pydicom.uid import generate_uid from highdicom.legacy import sop from datetime import datetime, timedelta import enum -# class Modality(enum.IntEnum): -# CT = 0 -# MR = 1 -# PT = 2 +class Modality(enum.IntEnum): + CT = 0 + MR = 1 + PT = 2 -# sop_classes = [('CT', '1.2.840.10008.5.1.4.1.1.2'), -# ('MR', '1.2.840.10008.5.1.4.1.1.4'), -# ('PT', '1.2.840.10008.5.1.4.1.1.128')] +sop_classes = [('CT', '1.2.840.10008.5.1.4.1.1.2'), + ('MR', '1.2.840.10008.5.1.4.1.1.4'), + ('PT', '1.2.840.10008.5.1.4.1.1.128')] -# class TestLegacyConvertedEnhancedImage(unittest.TestCase): +class DicomGenerator: + + def __init__( + self, + slice_per_frameset: int = 3, + slice_thickness: float = 0.1, + pixel_spacing: float = 0.1, + row: int = 2, + col: int = 2,) -> None: + self._slice_per_frameset = slice_per_frameset + self._slice_thickness = slice_thickness + self._pixel_spacing = pixel_spacing + self._row = row + self._col = col + self._study_uid = generate_uid() + self._z_orientation_mat = [ + 1.000000, 0.000000, 0.000000, + 0.000000, 1.000000, 0.000000] + self._z_position_vec = [0.0, 0.0, 1.0] + self._y_orientation_mat = [ + 0.000000, 0.000000, 1.000000, + 1.000000, 0.000000, 0.000000] + self._y_position_vec = [0.0, 1.0, 0.0] + self._x_orientation_mat = [ + 0.000000, 1.000000, 0.000000, + 0.000000, 0.000000, 1.000000] + self._x_position_vec = [1.0, 0.0, 0.0] + + def _generate_frameset(self, + system: Modality, + orientation_mat: list, + position_vec: list, + series_uid: str, + first_slice_offset: float = 0, + frameset_idx: int = 0) -> list: + output_dataset = [] + slice_pos = first_slice_offset + slice_thickness = self._slice_thickness + study_uid = self._study_uid + frame_of_ref_uid = generate_uid() + date_ = datetime.now().date() + age = timedelta(days=45 * 365) + time_ = datetime.now().time() + cols = self._col + rows = self._row + bytes_per_voxel = 2 + + for i in range(0, self._slice_per_frameset): + file_meta = Dataset() + pixel_array = b"\0" * cols * rows * bytes_per_voxel + file_meta.MediaStorageSOPClassUID = sop_classes[system][1] + file_meta.MediaStorageSOPInstanceUID = generate_uid() + file_meta.ImplementationClassUID = generate_uid() + tmp_dataset = FileDataset('', {}, file_meta=file_meta, + preamble=pixel_array) + tmp_dataset.file_meta.TransferSyntaxUID = "1.2.840.10008.1.2.1" + tmp_dataset.SliceLocation = slice_pos + i * slice_thickness + tmp_dataset.SliceThickness = slice_thickness + tmp_dataset.WindowCenter = 1 + tmp_dataset.WindowWidth = 2 + tmp_dataset.AcquisitionNumber = 1 + tmp_dataset.InstanceNumber = i + tmp_dataset.SeriesNumber = 1 + tmp_dataset.ImageOrientationPatient = orientation_mat + tmp_dataset.ImagePositionPatient = [ + tmp_dataset.SliceLocation * i for i in position_vec] + if system == Modality.CT: + tmp_dataset.ImageType = ['ORIGINAL', 'PRIMARY', 'AXIAL'] + elif system == Modality.MR: + tmp_dataset.ImageType = ['ORIGINAL', 'PRIMARY', 'OTHER'] + elif system == Modality.PT: + tmp_dataset.ImageType = [ + 'ORIGINAL', 'PRIMARY', 'RECON', 'EMISSION'] + tmp_dataset.PixelSpacing = [ + self._pixel_spacing, self._pixel_spacing] + tmp_dataset.PatientName = 'John^Doe' + tmp_dataset.FrameOfReferenceUID = frame_of_ref_uid + tmp_dataset.SOPClassUID = sop_classes[system][1] + tmp_dataset.SOPInstanceUID = generate_uid() + tmp_dataset.SeriesInstanceUID = series_uid + tmp_dataset.StudyInstanceUID = study_uid + tmp_dataset.BitsAllocated = bytes_per_voxel * 8 + tmp_dataset.BitsStored = bytes_per_voxel * 8 + tmp_dataset.HighBit = (bytes_per_voxel * 8 - 1) + tmp_dataset.PixelRepresentation = 1 + tmp_dataset.Columns = cols + tmp_dataset.Rows = rows + tmp_dataset.SamplesPerPixel = 1 + tmp_dataset.AccessionNumber = '1{:05d}'.format(frameset_idx) + tmp_dataset.AcquisitionDate = date_ + tmp_dataset.AcquisitionTime = datetime.now().time() + tmp_dataset.AdditionalPatientHistory = 'UTERINE CA PRE-OP EVAL' + tmp_dataset.ContentDate = date_ + tmp_dataset.ContentTime = datetime.now().time() + tmp_dataset.Manufacturer = 'Mnufacturer' + tmp_dataset.ManufacturerModelName = 'Model' + tmp_dataset.Modality = sop_classes[system][0] + tmp_dataset.PatientAge = '064Y' + tmp_dataset.PatientBirthDate = date_ - age + tmp_dataset.PatientID = 'ID{:05d}'.format(frameset_idx) + tmp_dataset.PatientIdentityRemoved = 'YES' + tmp_dataset.PatientPosition = 'FFS' + tmp_dataset.PatientSex = 'F' + tmp_dataset.PhotometricInterpretation = 'MONOCHROME2' + tmp_dataset.PixelData = pixel_array + tmp_dataset.PositionReferenceIndicator = 'XY' + tmp_dataset.ProtocolName = 'some protocole' + tmp_dataset.ReferringPhysicianName = '' + tmp_dataset.SeriesDate = date_ + tmp_dataset.SeriesDescription = \ + 'test series_frameset{:05d}'.format(frameset_idx) + tmp_dataset.SeriesTime = time_ + tmp_dataset.SoftwareVersions = '01' + tmp_dataset.SpecificCharacterSet = 'ISO_IR 100' + tmp_dataset.StudyDate = date_ + tmp_dataset.StudyDescription = 'test study' + tmp_dataset.StudyID = '' + if (system == Modality.CT): + tmp_dataset.RescaleIntercept = 0 + tmp_dataset.RescaleSlope = 1 + tmp_dataset.StudyTime = time_ + output_dataset.append(tmp_dataset) + return output_dataset + + def generate_mixed_framesets( + self, system: Modality, + frame_set_count: int, parallel: bool = True, + flatten_output: bool = True) -> list: + out = [] + orients = [ + self._z_orientation_mat, + self._y_orientation_mat, + self._x_orientation_mat, ] + poses = [ + self._z_position_vec, + self._y_position_vec, + self._x_position_vec, + ] + se_uid = generate_uid() + for i in range(frame_set_count): + if parallel: + pos = poses[0] + orient = orients[0] + else: + pos = poses[i % len(poses)] + orient = orients[i % len(orients)] + if flatten_output: + out.extend( + self._generate_frameset( + system, orient, pos, se_uid, i * 50, i) + ) + else: + out.append( + self._generate_frameset( + system, orient, pos, se_uid, i * 50, i) + ) + return out + + +class TestDicomHelper(unittest.TestCase): + + def setUp(self) -> None: + super().setUp() + # Build data element for all value representations: + # vrs = [ + # 'AE', 'AS', 'AT', 'CS', 'DA', 'DS', 'DT', 'FL', 'FD', 'IS', 'LO', + # 'LT', 'OB', 'OD', 'OF', 'OL', 'OV', 'OW', 'PN', 'SH', 'SL', 'SQ', + # 'SS', 'ST', 'SV', 'TM', 'UC', 'UI', 'UL', 'UN', 'UR', + # 'US', 'UT', 'UV'] + self.data = { + "UL": [ + # Keyword: (0008, 0000) + DataElement(524288, "UL", 506), + DataElement(524288, "UL", 506), + DataElement(524288, "UL", 6), + ], + "CS": [ + # Keyword: (0008, 0005) SpecificCharacterSet + DataElement(524293, "CS", "ISO_IR 100"), + DataElement(524293, "CS", "ISO_IR 100"), + DataElement(524293, "CS", "ISO_IR 00"), + ], + "UI": [ + # Keyword: (0008, 0016) SOPClassUID + DataElement(524310, "UI", "1.2.840.10008.5.1.4.1.1.1"), + DataElement(524310, "UI", "1.2.840.10008.5.1.4.1.1.1"), + DataElement(524310, "UI", "1.2.840.10008.5.1.4.1.1."), + ], + "DA": [ + # Keyword: (0008, 0020) StudyDate + DataElement(524320, "DA", "19950809"), + DataElement(524320, "DA", "19950809"), + DataElement(524320, "DA", "9950809"), + ], + "TM": [ + # Keyword: (0008, 0030) StudyTime + DataElement(524336, "TM", "100044"), + DataElement(524336, "TM", "100044"), + DataElement(524336, "TM", "00044"), + ], + "US": [ + # Keyword: (0008, 0040) DataSetType + DataElement(524352, "US", 0), + DataElement(524352, "US", 0), + DataElement(524352, "US", 1), + ], + "LO": [ + # Keyword: (0008, 0041) DataSetSubtype + DataElement(524353, "LO", "IMA NONE"), + DataElement(524353, "LO", "IMA NONE"), + DataElement(524353, "LO", "IMA ONE"), + ], + "SH": [ + # Keyword: (0008, 0050) AccessionNumber + DataElement(524368, "SH", "1157687691469610"), + DataElement(524368, "SH", "1157687691469610"), + DataElement(524368, "SH", "157687691469610"), + ], + "PN": [ + # Keyword: (0008, 0090) ReferringPhysicianName + DataElement(524432, "PN", "Dr Alpha"), + DataElement(524432, "PN", "Dr Alpha"), + DataElement(524432, "PN", "Dr Beta"), + ], + "ST": [ + # Keyword: (0008, 2111) DerivationDescription + DataElement(532753, "ST", "G0.9D#1.60+0.00,R4R0.5,,D2B0.6,,,"), + DataElement(532753, "ST", "G0.9D#1.60+0.00,R4R0.5,,D2B0.6,,,"), + DataElement(532753, "ST", "G0.9D#1.60+0.00,R4R0.5,,D2B0.,,,"), + ], + "UN": [ + # Keyword: (0013, 0000) + DataElement(1245184, "UN", b'\x00\x00\x00'), + DataElement(1245184, "UN", b'\x00\x00\x00'), + DataElement(1245184, "UN", b'\x00\x00\x01'), + ], + "DS": [ + # Keyword: (0018, 0060) KVP + DataElement(1572960, "DS", 110), + DataElement(1572960, "DS", 110), + DataElement(1572960, "DS", 10), + ], + "IS": [ + # Keyword: (0018, 1150) ExposureTime + DataElement(1577296, "IS", 32), + DataElement(1577296, "IS", 32), + DataElement(1577296, "IS", 2), + ], + "AS": [ + # Keyword: (0010, 1010) PatientAge + DataElement(1052688, "AS", "075Y"), + DataElement(1052688, "AS", "075Y"), + DataElement(1052688, "AS", "75Y"), + ], + "OW": [ + # Keyword: (7fe0, 0010) PixelData + DataElement(2145386512, "OW", b'\x00\x00\x00\x00\x00\x00'), + DataElement(2145386512, "OW", b'\x00\x00\x00\x00\x00\x00'), + DataElement(2145386512, "OW", b'\x00\x00\x00\x00\x00\x01'), + ], + "SS": [ + # Keyword: (0028, 0106) SmallestImagePixelValue + DataElement(2621702, "SS", 0), + DataElement(2621702, "SS", 0), + DataElement(2621702, "SS", 1), + ], + "DT": [ + # Keyword: (0008, 002a) AcquisitionDateTime + DataElement(524330, "DT", "20030922101033.000000"), + DataElement(524330, "DT", "20030922101033.000000"), + DataElement(524330, "DT", "20030922101033.00000"), + ], + "LT": [ + # Keyword: (0018, 7006) DetectorDescription + DataElement(1601542, "LT", "DETECTOR VERSION 1.0 MTFCOMP 1.0"), + DataElement(1601542, "LT", "DETECTOR VERSION 1.0 MTFCOMP 1.0"), + DataElement(1601542, "LT", "DETECTOR VERSION 1.0 MTFCOMP 1."), + ], + "OB": [ + # Keyword: (0029, 1131) + DataElement(2691377, "OB", b'4.0.701169981 '), + DataElement(2691377, "OB", b'4.0.701169981 '), + DataElement(2691377, "OB", b'4.0.01169981 '), + ], + "AT": [ + # Keyword: (0028, 0009) FrameIncrementPointer + DataElement(2621449, "AT", 5505152), + DataElement(2621449, "AT", 5505152), + DataElement(2621449, "AT", 505152), + ], + } + + def test_attribute_equality(self) -> None: + for vr, [v1, v2, v3] in self.data.items(): + assert sop.DicomHelper.isequal(v1.value, v2.value) is True + assert sop.DicomHelper.isequal(v1.value, v3.value) is False + + +class TestFrameSetCollection(unittest.TestCase): + + def setUp(self) -> None: + super().setUp() + + def test_frameset_detection(self) -> None: + data_generator = DicomGenerator() + for i in range(1, 10): + data = data_generator.generate_mixed_framesets( + Modality.CT, i, True, True) + fset_collection = sop.FrameSetCollection(data) + assert len(fset_collection.FrameSets) == i + + def test_frameset_framecount_detection(self) -> None: + for i in range(1, 10): + data_generator = DicomGenerator(i) + data = data_generator.generate_mixed_framesets( + Modality.CT, 1, True, True) + fset_collection = sop.FrameSetCollection(data) + assert len(fset_collection.FrameSets) == 1 + assert len(fset_collection.FrameSets[0].Frames) == i + + +class TestLegacyConvertedEnhanceImage(unittest.TestCase): + + def setUp(self) -> None: + super().setUp() + + def test_conversion(self) -> None: + for i in range(1, 10): + for j in range(3): + data_generator = DicomGenerator(i) + data = data_generator.generate_mixed_framesets( + Modality(j), 1, True, True) + fset_collection = sop.FrameSetCollection(data) + assert len(fset_collection.FrameSets) == 1 + assert len(fset_collection.FrameSets[0].Frames) == i + convertor = sop.LegacyConvertedEnhanceImage( + fset_collection.FrameSets[0], + generate_uid(), + 555, + generate_uid(), + 111) + convertor.convert2mf() + assert convertor.NumberOfFrames == i + assert convertor.SOPClassUID == \ + sop.LEGACY_ENHANCED_SOP_CLASS_UID_MAP[sop_classes[j][1]] + + +# class TestLegacyConvertedEnhancedImage(unittest.TestCase): # def setUp(self): # super().setUp() @@ -40,7 +388,8 @@ # sop, # "LegacyConvertedEnhanced{}Image".format(m) # ) -# ref_dataset_seq = getattr(self, "_ref_dataset_seq_{}".format(m)) +# ref_dataset_seq = getattr( +# self, "_ref_dataset_seq_{}".format(m)) # multiframe_item = LegacyConverterClass( # legacy_datasets=ref_dataset_seq, @@ -80,7 +429,8 @@ # sop, # "LegacyConvertedEnhanced{}Image".format(m) # ) -# ref_dataset_seq = getattr(self, "_ref_dataset_seq_{}".format(m)) +# ref_dataset_seq = getattr( +# self, "_ref_dataset_seq_{}".format(m)) # tmp_orig_modality = ref_dataset_seq[0].Modality # ref_dataset_seq[0].Modality = '' # with self.assertRaises(ValueError): @@ -99,7 +449,8 @@ # sop, # "LegacyConvertedEnhanced{}Image".format(m) # ) -# ref_dataset_seq = getattr(self, "_ref_dataset_seq_{}".format(m)) +# ref_dataset_seq = getattr( +# self, "_ref_dataset_seq_{}".format(m)) # tmp_orig_sop_class_id = ref_dataset_seq[0].SOPClassUID # ref_dataset_seq[0].SOPClassUID = '1.2.3.4.5.6.7.8.9' # with self.assertRaises(ValueError): @@ -118,7 +469,8 @@ # sop, # "LegacyConvertedEnhanced{}Image".format(m) # ) -# ref_dataset_seq = getattr(self, "_ref_dataset_seq_{}".format(m)) +# ref_dataset_seq = getattr( +# self, "_ref_dataset_seq_{}".format(m)) # # first run with intact input # LegacyConverterClass( @@ -148,7 +500,8 @@ # sop, # "LegacyConvertedEnhanced{}Image".format(m) # ) -# ref_dataset_seq = getattr(self, "_ref_dataset_seq_{}".format(m)) +# ref_dataset_seq = getattr( +# self, "_ref_dataset_seq_{}".format(m)) # # first run with intact input # LegacyConverterClass( # legacy_datasets=ref_dataset_seq, @@ -175,7 +528,8 @@ # sop, # "LegacyConvertedEnhanced{}Image".format(m) # ) -# ref_dataset_seq = getattr(self, "_ref_dataset_seq_{}".format(m)) +# ref_dataset_seq = getattr( +# self, "_ref_dataset_seq_{}".format(m)) # # first run with intact input # LegacyConverterClass( # legacy_datasets=ref_dataset_seq, @@ -230,8 +584,9 @@ # tmp_dataset.AcquisitionNumber = 1 # tmp_dataset.InstanceNumber = i # tmp_dataset.SeriesNumber = 1 -# tmp_dataset.ImageOrientationPatient = [1.000000, 0.000000, 0.000000, -# 0.000000, 1.000000, 0.000000] +# tmp_dataset.ImageOrientationPatient =\ +# [1.000000, 0.000000, 0.000000, +# 0.000000, 1.000000, 0.000000] # tmp_dataset.ImagePositionPatient = [0.0, 0.0, # tmp_dataset.SliceLocation] # tmp_dataset.ImageType = ['ORIGINAL', 'PRIMARY', 'AXIAL'] From b6559c362cc7db5a189c8309cee568e10147f0f6 Mon Sep 17 00:00:00 2001 From: afshin Date: Sun, 4 Apr 2021 17:20:32 -0400 Subject: [PATCH 32/44] mend --- src/highdicom/legacy/sop.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/highdicom/legacy/sop.py b/src/highdicom/legacy/sop.py index d948d560..a5277e18 100644 --- a/src/highdicom/legacy/sop.py +++ b/src/highdicom/legacy/sop.py @@ -1,7 +1,6 @@ """ Module for SOP Classes of Legacy Converted Enhanced Image IODs.""" import logging -from collections import defaultdict -from typing import Any, Dict, List, Optional, Sequence, Union, Callable +from typing import Any, List, Union, Callable from numpy import log10, array, ceil, cross, dot, ndarray from pydicom.datadict import tag_for_keyword, dictionary_VR, keyword_for_tag from pydicom.dataset import Dataset @@ -14,7 +13,7 @@ from copy import deepcopy from pydicom.uid import UID from highdicom.base import SOPClass -from highdicom._iods import IOD_MODULE_MAP, SOP_CLASS_UID_IOD_KEY_MAP +from highdicom._iods import IOD_MODULE_MAP from highdicom._modules import MODULE_ATTRIBUTE_MAP # logger = logging.getLogger(__name__) LEGACY_ENHANCED_SOP_CLASS_UID_MAP = { From 7f7df4a886d446797655f97c6a413564d8f38508 Mon Sep 17 00:00:00 2001 From: afshin Date: Sun, 11 Apr 2021 18:08:34 -0400 Subject: [PATCH 33/44] Modified code based on Markus's comments --- src/highdicom/legacy/sop.py | 1339 ++++++++++++++++++++++++++++------- tests/test_legacy.py | 505 ++++++------- 2 files changed, 1275 insertions(+), 569 deletions(-) diff --git a/src/highdicom/legacy/sop.py b/src/highdicom/legacy/sop.py index a5277e18..10936905 100644 --- a/src/highdicom/legacy/sop.py +++ b/src/highdicom/legacy/sop.py @@ -12,6 +12,7 @@ from pydicom.valuerep import DT, DA, TM from copy import deepcopy from pydicom.uid import UID + from highdicom.base import SOPClass from highdicom._iods import IOD_MODULE_MAP from highdicom._modules import MODULE_ATTRIBUTE_MAP @@ -32,31 +33,104 @@ class DicomHelper: + + """A class for checking dicom tags and comparing dicom attributes""" + def __init__(self) -> None: + """ + Parameters + ---------- + + Returns + ------- + + Note + ---- + + """ pass - def istag_file_meta_information_group(t: BaseTag) -> bool: + @classmethod + def istag_file_meta_information_group(cls, t: BaseTag) -> bool: + """ + Parameters + ---------- + + Returns + ------- + + Note + ---- + + """ return t.group == 0x0002 - def istag_repeating_group(t: BaseTag) -> bool: + @classmethod + def istag_repeating_group(cls, t: BaseTag) -> bool: + """ + Parameters + ---------- + + Returns + ------- + + Note + ---- + + """ g = t.group return (g >= 0x5000 and g <= 0x501e) or\ (g >= 0x6000 and g <= 0x601e) - def istag_group_length(t: BaseTag) -> bool: + @classmethod + def istag_group_length(cls, t: BaseTag) -> bool: + """ + Parameters + ---------- + + Returns + ------- + + Note + ---- + + """ return t.element == 0 - def isequal(v1: Any, v2: Any) -> bool: + @classmethod + def isequal(cls, v1: Any, v2: Any) -> bool: + """ + Parameters + ---------- + + Returns + ------- + + Note + ---- + + """ from pydicom.valuerep import DSfloat float_tolerance = 1.0e-5 def is_equal_float(x1: float, x2: float) -> bool: + """ + Parameters + ---------- + + Returns + ------- + + Note + ---- + + """ return abs(x1 - x2) < float_tolerance if type(v1) != type(v2): return False if isinstance(v1, DataElementSequence): for item1, item2 in zip(v1, v2): - DicomHelper.isequal_dicom_dataset(item1, item2) + cls.isequal_dicom_dataset(item1, item2) if not isinstance(v1, MultiValue): v11 = [v1] v22 = [v2] @@ -74,7 +148,19 @@ def is_equal_float(x1: float, x2: float) -> bool: return False return True - def isequal_dicom_dataset(ds1: Dataset, ds2: Dataset) -> bool: + @classmethod + def isequal_dicom_dataset(cls, ds1: Dataset, ds2: Dataset) -> bool: + """Checks if two dicom dataset have the same value in all attributes + Parameters + ---------- + ds1: Dataset + 1st dicom dataset + ds2: Dataset + 2nd dicom dataset + Returns + ------- + True of dicom datasets are equal otherwise False + """ if type(ds1) != type(ds2): return False if not isinstance(ds1, Dataset): @@ -83,27 +169,54 @@ def isequal_dicom_dataset(ds1: Dataset, ds2: Dataset) -> bool: if k1 not in ds2: return False elem2 = ds2[k1] - if not DicomHelper.isequal(elem2.value, elem1.value): + if not cls.isequal(elem2.value, elem1.value): return False return True - def tag2str(tg: BaseTag) -> str: + @classmethod + def tag2str(cls, tg: BaseTag) -> str: + """Converts tag to hex format like (group, element) + + """ if not isinstance(tg, BaseTag): tg = Tag(tg) return '(0x{:0>4x}, 0x{:0>4x})'.format(tg.group, tg.element) - def tag2kwstr(tg: BaseTag) -> str: + @classmethod + def tag2kwstr(cls, tg: BaseTag) -> str: + """Converts tag to keyword and (group, element) form + + """ return '{}-{:32.32s}'.format( - DicomHelper.tag2str(tg), keyword_for_tag(tg)) + cls.tag2str(tg), keyword_for_tag(tg)) class GeometryOfSlice: + + """A class for checking dicom slices geomtery/parallelization""" + def __init__(self, row_vector: ndarray, col_vector: ndarray, top_left_corner_pos: ndarray, voxel_spaceing: ndarray, - dimensions: tuple): + dimensions: tuple) -> None: + """ + Parameters + ---------- + row_vector: ndarray + 3D vector representing row of the input slice + col_vector: ndarray + 3D vector representing column the input aslice + top_left_corner_pos: ndarray + 3D point representing top left coner position of the input slice + voxel_spaceing: ndarray + Three element array. 1st and 2nd copied from PexelSpacing and the + 3rd copied from SliceThickness + dimensions: tuple + 3 element tuple holding x as number of cols, y as number of rows + and z as 1 + """ self.RowVector = row_vector self.ColVector = col_vector self.TopLeftCornerPosition = top_left_corner_pos @@ -111,11 +224,17 @@ def __init__(self, self.Dim = dimensions def get_normal_vector(self) -> ndarray: + """Returns the normal vector of the input slice + + """ n: ndarray = cross(self.RowVector, self.ColVector) n[2] = -n[2] return n def get_distance_along_origin(self) -> float: + """Returns the shortest distince of the slice from the origin + + """ n = self.get_normal_vector() return float( dot(self.TopLeftCornerPosition, n)) @@ -124,6 +243,9 @@ def are_parallel( slice1: Any, slice2: Any, tolerance: float = 0.0001) -> bool: + """Returns False if two slices are not prallel else True + + """ logger = logging.getLogger(__name__) if (not isinstance(slice1, GeometryOfSlice) or not isinstance(slice2, GeometryOfSlice)): @@ -143,7 +265,16 @@ def are_parallel( class PerframeFunctionalGroup(DataElementSequence): + """A sequence class for perframe functional group""" + def __init__(self, number_of_frames: int) -> None: + """ + Parameters + ---------- + number_of_frames: int + The perframe functional group sequence will have items equal to + the whole number of frames + """ super().__init__() for i in range(0, number_of_frames): item = Dataset() @@ -152,69 +283,103 @@ def __init__(self, number_of_frames: int) -> None: class SharedFunctionalGroup(DataElementSequence): + """A sequence class for shared functional group""" + def __init__(self) -> None: + """Construncts a shared frame functional group holding only one item + + """ super().__init__() item = Dataset() self.append(item) class FrameSet: + + """ + A class containing the dicom frames that hold equal distinguishing + attributes to detect all perframe and shared dicom attributes + """ + def __init__(self, single_frame_list: list, - distinguishing_tags: list): - self._Frames = single_frame_list - self._DistinguishingAttributesTags = distinguishing_tags + distinguishing_tags: list) -> None: + """ + Parameters + ---------- + single_frame_list: list + list of single frames that have equal distinguising attributes + distinguishing_tags: list + list of distinguishing attributes tags + """ + self._frames = single_frame_list + self._distinguishing_attributes_tags = distinguishing_tags tmp = [ tag_for_keyword('AcquisitionDateTime'), tag_for_keyword('AcquisitionDate'), tag_for_keyword('AcquisitionTime'), tag_for_keyword('SpecificCharacterSet')] - self._ExcludedFromPerFrameTags =\ - self.DistinguishingAttributesTags + tmp - self._PerFrameTags: list = [] - self._SharedTags: list = [] + self._excluded_fromperframe_tags =\ + self.distinguishing_attributes_tags + tmp + self._perframe_tags: list = [] + self._shared_tags: list = [] self._find_per_frame_and_shared_tags() @property - def Frames(self) -> List[Dataset]: - return self._Frames[:] + def frames(self) -> List[Dataset]: + return self._frames[:] @property - def DistinguishingAttributesTags(self) -> List[Tag]: - return self._DistinguishingAttributesTags[:] + def distinguishing_attributes_tags(self) -> List[Tag]: + return self._distinguishing_attributes_tags[:] @property - def ExcludedFromPerFrameTags(self) -> List[Tag]: - return self._ExcludedFromPerFrameTags[:] + def excluded_from_perframe_tags(self) -> List[Tag]: + return self._excluded_fromperframe_tags[:] @property - def PerFrameTags(self) -> List[Tag]: - return self._PerFrameTags[:] + def perframe_tags(self) -> List[Tag]: + return self._perframe_tags[:] @property - def SharedTags(self) -> List[Tag]: - return self._SharedTags[:] + def shared_tags(self) -> List[Tag]: + return self._shared_tags[:] @property def SeriesInstanceUID(self) -> UID: - return self._Frames[0].SeriesInstanceUID + """Returns the sereis instance uid of the FrameSet + + """ + return self._frames[0].SeriesInstanceUID @property def StudyInstanceUID(self) -> UID: - return self._Frames[0].StudyInstanceUID + """Returns the study instance uid of the FrameSet + + """ + return self._frames[0].StudyInstanceUID + + def get_sop_instance_uid_list(self) -> list: + """Returns a list containing all SOPInstanceUID of the FrameSet - def GetSOPInstanceUIDList(self) -> list: + """ OutputList: list = [] - for f in self._Frames: + for f in self._frames: OutputList.append(f.SOPInstanceUID) return OutputList - def GetSOPClassUID(self) -> UID: - return self._Frames[0].SOPClassUID + def get_sop_class_uid(self) -> UID: + """Returns the sop class uid of the FrameSet + + """ + return self._frames[0].SOPClassUID def _find_per_frame_and_shared_tags(self) -> None: + """Detects and collects all shared and perframe attributes + + """ # logger = logging.getLogger(__name__) rough_shared: dict = {} - sfs = self.Frames + sfs = self.frames for ds in sfs: for ttag, elem in ds.items(): if (not ttag.is_private and not @@ -224,8 +389,8 @@ def _find_per_frame_and_shared_tags(self) -> None: self._istag_excluded_from_perframe(ttag) and ttag != tag_for_keyword('PixelData')): elem = ds[ttag] - if ttag not in self._PerFrameTags: - self._PerFrameTags.append(ttag) + if ttag not in self._perframe_tags: + self._perframe_tags.append(ttag) if ttag in rough_shared: rough_shared[ttag].append(elem.value) else: @@ -233,7 +398,7 @@ def _find_per_frame_and_shared_tags(self) -> None: to_be_removed_from_shared = [] for ttag, v in rough_shared.items(): v = rough_shared[ttag] - if len(v) < len(self.Frames): + if len(v) < len(self.frames): to_be_removed_from_shared.append(ttag) else: all_values_are_equal = True @@ -250,19 +415,28 @@ def _find_per_frame_and_shared_tags(self) -> None: for t in to_be_removed_from_shared: del rough_shared[t] for t, v in rough_shared.items(): - self._SharedTags.append(t) - self._PerFrameTags.remove(t) + self._shared_tags.append(t) + self._perframe_tags.remove(t) def _istag_excluded_from_perframe(self, t: BaseTag) -> bool: - return t in self.ExcludedFromPerFrameTags + return t in self.excluded_from_perframe_tags class FrameSetCollection: - def __init__(self, single_frame_list: list): + + """A calss to extract framesets based on distinguishing dicom attributes""" + + def __init__(self, single_frame_list: list) -> None: + """ + Parameters + ---------- + single_frame_list: list + lisf of mixed or non-mixed single frame dicom images + """ logger = logging.getLogger(__name__) - self.MixedFrames = single_frame_list - self.MixedFramesCopy = self.MixedFrames[:] - self._DistinguishingAttributeKeywords = [ + self.mixed_frames = single_frame_list + self.mixed_frames_copy = self.mixed_frames[:] + self._distinguishing_attribute_keywords = [ 'PatientID', 'PatientName', 'Manufacturer', @@ -294,13 +468,13 @@ def __init__(self, single_frame_list: list): 'SliceThickness', 'AcquisitionContextSequence'] to_be_removed_from_distinguishing_attribs: set = set() - self._FrameSets: list = [] + self._frame_sets: list = [] frame_counts = [] frameset_counter = 0 - while len(self.MixedFramesCopy) != 0: + while len(self.mixed_frames_copy) != 0: frameset_counter += 1 x = self._find_all_similar_to_first_datasets() - self._FrameSets.append(FrameSet(x[0], x[1])) + self._frame_sets.append(FrameSet(x[0], x[1])) frame_counts.append(len(x[0])) # log information logger.debug("Frameset({:02d}) including {:03d} frames".format( @@ -319,36 +493,39 @@ def __init__(self, single_frame_list: list): for i, f_count in enumerate(frame_counts, 1): frames += '{: 2d}){:03d}\t'.format(i, f_count) frames = '{: 2d} frameset(s) out of all {: 3d} instances:'.format( - len(frame_counts), len(self.MixedFrames)) + frames + len(frame_counts), len(self.mixed_frames)) + frames logger.info(frames) for kw in to_be_removed_from_distinguishing_attribs: - self.DistinguishingAttributeKeywords.remove(kw) - self.ExcludedFromPerFrameTags = {} - for kwkw in self.DistinguishingAttributeKeywords: - self.ExcludedFromPerFrameTags[tag_for_keyword(kwkw)] = False - self.ExcludedFromPerFrameTags[ + self.distinguishing_attribute_keywords.remove(kw) + self.excluded_from_perframe_tags = {} + for kwkw in self.distinguishing_attribute_keywords: + self.excluded_from_perframe_tags[tag_for_keyword(kwkw)] = False + self.excluded_from_perframe_tags[ tag_for_keyword('AcquisitionDateTime')] = False - self.ExcludedFromPerFrameTags[ + self.excluded_from_perframe_tags[ tag_for_keyword('AcquisitionDate')] = False - self.ExcludedFromPerFrameTags[ + self.excluded_from_perframe_tags[ tag_for_keyword('AcquisitionTime')] = False self.ExcludedFromFunctionalGroupsTags = { tag_for_keyword('SpecificCharacterSet'): False} def _find_all_similar_to_first_datasets(self) -> tuple: + """Takes the fist instance from mixed-frames and finds all dicom images + that have the same distinguishing attributes. + """ logger = logging.getLogger(__name__) - similar_ds: list = [self.MixedFramesCopy[0]] + similar_ds: list = [self.mixed_frames_copy[0]] distinguishing_tags_existing = [] distinguishing_tags_missing = [] - self.MixedFramesCopy = self.MixedFramesCopy[1:] - for kw in self.DistinguishingAttributeKeywords: + self.mixed_frames_copy = self.mixed_frames_copy[1:] + for kw in self.distinguishing_attribute_keywords: tg = tag_for_keyword(kw) if tg in similar_ds[0]: distinguishing_tags_existing.append(tg) else: distinguishing_tags_missing.append(tg) logger_msg = set() - for ds in self.MixedFramesCopy: + for ds in self.mixed_frames_copy: all_equal = True for tg in distinguishing_tags_missing: if tg in ds: @@ -378,21 +555,24 @@ def _find_all_similar_to_first_datasets(self) -> tuple: for msg_ in logger_msg: logger.info(msg_) for ds in similar_ds: - if ds in self.MixedFramesCopy: - self.MixedFramesCopy.remove(ds) + if ds in self.mixed_frames_copy: + self.mixed_frames_copy.remove(ds) return (similar_ds, distinguishing_tags_existing) @property - def DistinguishingAttributeKeywords(self) -> List[str]: - return self._DistinguishingAttributeKeywords[:] + def distinguishing_attribute_keywords(self) -> List[str]: + """Returns the list of all distinguising attributes found.""" + return self._distinguishing_attribute_keywords[:] @property - def FrameSets(self) -> List[FrameSet]: - return self._FrameSets + def frame_sets(self) -> List[FrameSet]: + """Returns the list of all FrameSets found.""" + return self._frame_sets -class LegacyConvertedEnhanceImage(SOPClass): - """SOP class for Legacy Converted Enhanced PET Image instances.""" +class _CommonLegacyConvertedEnhanceImage(SOPClass): + + """SOP class for common Legacy Converted Enhanced instances.""" def __init__( self, @@ -402,7 +582,7 @@ def __init__( sop_instance_uid: str, instance_number: int, sort_key: Callable = None, - **kwargs: Any) -> None: + ) -> None: """ Parameters ---------- @@ -417,18 +597,15 @@ def __init__( UID that should be assigned to the instance instance_number: int Number that should be assigned to the instance - **kwargs: Any, optional - Additional keyword arguments that will be passed to the constructor - of `highdicom.base.SOPClass` """ - legacy_datasets = frame_set.Frames + legacy_datasets = frame_set.frames try: ref_ds = legacy_datasets[0] except IndexError: raise ValueError('No DICOM data sets of provided.') sop_class_uid = LEGACY_ENHANCED_SOP_CLASS_UID_MAP[ref_ds.SOPClassUID] if sort_key is None: - sort_key = LegacyConvertedEnhanceImage.default_sort_key + sort_key = _CommonLegacyConvertedEnhanceImage.default_sort_key super().__init__( study_instance_uid="" if 'StudyInstanceUID' not in ref_ds else ref_ds.StudyInstanceUID, @@ -459,7 +636,7 @@ def __init__( else ref_ds.StudyTime, referring_physician_name=None if 'ReferringPhysicianName' not in ref_ds else ref_ds.ReferringPhysicianName, - **kwargs) + ) self._legacy_datasets = legacy_datasets self._perframe_functional_group = PerframeFunctionalGroup( len(legacy_datasets)) @@ -468,14 +645,14 @@ def __init__( self._shared_functional_group = SharedFunctionalGroup() tg = tag_for_keyword('SharedFunctionalGroupsSequence') self[tg] = DataElement(tg, 'SQ', self._shared_functional_group) - self.DistinguishingAttributesTags = self._get_tag_used_dictionary( - frame_set.DistinguishingAttributesTags) - self.ExcludedFromPerFrameTags = self._get_tag_used_dictionary( - frame_set.ExcludedFromPerFrameTags) - self._PerFrameTags = self._get_tag_used_dictionary( - frame_set.PerFrameTags) - self._SharedTags = self._get_tag_used_dictionary( - frame_set.SharedTags) + self.distinguishing_attributes_tags = self._get_tag_used_dictionary( + frame_set.distinguishing_attributes_tags) + self.excluded_from_perframe_tags = self._get_tag_used_dictionary( + frame_set.excluded_from_perframe_tags) + self._perframe_tags = self._get_tag_used_dictionary( + frame_set.perframe_tags) + self._shared_tags = self._get_tag_used_dictionary( + frame_set.shared_tags) self.ExcludedFromFunctionalGroupsTags = { tag_for_keyword('SpecificCharacterSet'): False} @@ -571,18 +748,19 @@ def __init__( "IdenticalDocumentsSequence" ] } - self.EarliestDate = DA('00010101') - self.EarliestTime = TM('000000') - self.EarliestDateTime = DT('00010101000000') - self.FarthestFutureDate = DA('99991231') - self.FarthestFutureTime = TM('235959') - self.FarthestFutureDateTime = DT('99991231235959') + self.earliest_date = DA('00010101') + self.earliest_time = TM('000000') + self.earliest_date_time = DT('00010101000000') + self.farthest_future_date = DA('99991231') + self.farthest_future_time = TM('235959') + self.farthest_future_date_time = DT('99991231235959') self._slices: list = [] self._tolerance = 0.0001 self._slice_location_map: dict = {} self._byte_data = bytearray() self._word_data = bytearray() - self.EarliestContentDateTime = self.FarthestFutureDateTime + self.earliest_content_date_time = self.farthest_future_date_time + self._add_common_ct_pet_mr_build_blocks() if (_SOP_CLASS_UID_IOD_KEY_MAP[sop_class_uid] == 'legacy-converted-enhanced-ct-image'): self._add_build_blocks_for_ct() @@ -594,6 +772,10 @@ def __init__( self._add_build_blocks_for_pet() def _is_empty_or_empty_items(self, attribute: DataElement) -> bool: + """Takes a dicom DataElement and check if DataElement is empty or in + case of Sequence returns True if there is not item or all the items + are empty. + """ if attribute.is_empty: return True if isinstance(attribute.value, DataElementSequence): @@ -607,17 +789,41 @@ def _is_empty_or_empty_items(self, attribute: DataElement) -> bool: return False def _mark_tag_as_used(self, tg: BaseTag) -> None: - if tg in self._SharedTags: - self._SharedTags[tg] = True - elif tg in self.ExcludedFromPerFrameTags: - self.ExcludedFromPerFrameTags[tg] = True - elif tg in self._PerFrameTags: - self._PerFrameTags[tg] = True + """Checks what group the input tag belongs to and marks it as used to + keep track of all used and unused tags + """ + if tg in self._shared_tags: + self._shared_tags[tg] = True + elif tg in self.excluded_from_perframe_tags: + self.excluded_from_perframe_tags[tg] = True + elif tg in self._perframe_tags: + self._perframe_tags[tg] = True def _copy_attrib_if_present(self, src_ds: Dataset, dest_ds: Dataset, src_kw_or_tg: str, dest_kw_or_tg: str = None, check_not_to_be_perframe: bool = True, check_not_to_be_empty: bool = False) -> None: + """Copies a dicom attribute value from a keyword in the source Dataset + to a similar or different keyword in the destination Dataset + Parameters + ---------- + src_ds: Dataset + Source Dataset to copy the attribute from. + dest_ds: Dataset + Destination Dataset to copy the attribute to. + src_kw_or_tg: str + The keyword from the souce Dataset to copy its value. + dest_kw_or_tg: str = None + The keyword of the destination Dataset, the value is copied to. If + its value is None, then the destination keyword will be exactly the + source keyword. + check_not_to_be_perframe: bool = True + If this arg is true, then copy is aborted if the src attribute is + perframe. + check_not_to_be_empty: bool = False + If this arg is true, then copy is aborted if the source attribute is + empty. + """ if isinstance(src_kw_or_tg, str): src_kw_or_tg = tag_for_keyword(src_kw_or_tg) if dest_kw_or_tg is None: @@ -625,7 +831,7 @@ def _copy_attrib_if_present(self, src_ds: Dataset, dest_ds: Dataset, elif isinstance(dest_kw_or_tg, str): dest_kw_or_tg = tag_for_keyword(dest_kw_or_tg) if check_not_to_be_perframe: - if src_kw_or_tg in self._PerFrameTags: + if src_kw_or_tg in self._perframe_tags: return if src_kw_or_tg in src_ds: elem = src_ds[src_kw_or_tg] @@ -645,6 +851,22 @@ def _copy_attrib_if_present(self, src_ds: Dataset, dest_ds: Dataset, def _get_or_create_attribute( self, src: Dataset, kw: Union[str, Tag], default: Any) -> DataElement: + """Creates a new DataElement with a value copied from the source + Dataset. If the attribute is absent in source Dataset, then its value + will be the default value. + Parameters + ---------- + src: Dataset + Source Dataset to copy the value if available. + kw: Union[str, Tag] + The keyword of created DataElement. + default: Any + The default value created DataElement if the keyword was absent in + the source Dataset. + Returns + ------- + A new DataElement created. + """ if kw is str: tg = tag_for_keyword(kw) else: @@ -679,6 +901,21 @@ def _get_or_create_attribute( def _add_module(self, module_name: str, excepted_attributes: list = [], check_not_to_be_perframe: bool = True, check_not_to_be_empty: bool = False) -> None: + """Copies all attribute of a particular module to current SOPClass, + excepting the excepted_attributes, from a reference frame (the first + frame on the single frame list). + Parameters + ---------- + module_name: str: + A hiphenated module name like `image-pixel`. + excepted_attributes: list = [] + List of all attributes that are not allowed to be copied + check_not_to_be_perframe: bool = True + If this flag is true, then the perframe attributes will not be + copied. + check_not_to_be_empty: bool = False + If this flag is true, then the empty attributes will not be copied. + """ attribs: list = MODULE_ATTRIBUTE_MAP[module_name] ref_dataset = self._legacy_datasets[0] for a in attribs: @@ -692,6 +929,9 @@ def _add_module(self, module_name: str, excepted_attributes: list = [], check_not_to_be_empty=check_not_to_be_empty) def _add_module_to_mf_image_pixel(self) -> None: + """Copies/adds` a/an image_pixel` multiframe module to + the current SOPClass from its single frame source. + """ module_and_excepted_at = { "image-pixel": [ @@ -710,6 +950,9 @@ def _add_module_to_mf_image_pixel(self) -> None: check_not_to_be_perframe=True) # don't check the perframe set def _add_module_to_mf_enhanced_common_image(self) -> None: + """Copies/adds a/an `enhanced_common_image` multiframe module to + the current SOPClass from its single frame source. + """ ref_dataset = self._legacy_datasets[0] attribs_to_be_added = [ 'ContentQualification', @@ -726,10 +969,10 @@ def _add_module_to_mf_enhanced_common_image(self) -> None: check_not_to_be_empty=False) sum_compression_ratio: float = 0 c_ratio_tag = tag_for_keyword('LossyImageCompressionRatio') - if tag_for_keyword('LossyImageCompression') in self._SharedTags and \ + if tag_for_keyword('LossyImageCompression') in self._shared_tags and \ tag_for_keyword( - 'LossyImageCompressionMethod') in self._SharedTags and \ - c_ratio_tag in self._PerFrameTags: + 'LossyImageCompressionMethod') in self._shared_tags and \ + c_ratio_tag in self._perframe_tags: for fr_ds in self._legacy_datasets: if c_ratio_tag in fr_ds: ratio = fr_ds[c_ratio_tag].value @@ -745,7 +988,7 @@ def _add_module_to_mf_enhanced_common_image(self) -> None: self[c_ratio_tag] = \ DataElement(c_ratio_tag, 'DS', avg_ratio_str) - if tag_for_keyword('PresentationLUTShape') not in self._PerFrameTags: + if tag_for_keyword('PresentationLUTShape') not in self._perframe_tags: # actually should really invert the pixel data if MONOCHROME1, # since only MONOCHROME2 is permitted : ( # also, do not need to check if PhotometricInterpretation is @@ -764,13 +1007,22 @@ def _add_module_to_mf_enhanced_common_image(self) -> None: # Icon Image Sequence - always discard these def _add_module_to_mf_contrast_bolus(self) -> None: + """Copies/adds a/an `contrast_bolus` multiframe module to + the current SOPClass from its single frame source. + """ self._add_module('contrast-bolus') def _add_module_to_mf_enhanced_ct_image(self) -> None: + """Copies/adds a/an `enhanced_ct_image` multiframe module to + the current SOPClass from its single frame source. + """ pass # David's code doesn't hold anything for this module ... should ask him def _add_module_to_mf_enhanced_pet_image(self) -> None: + """Copies/adds a/an `enhanced_pet_image` multiframe module to + the current SOPClass from its single frame source. + """ # David's code doesn't hold anything for this module ... should ask him kw = 'ContentQualification' tg = tag_for_keyword(kw) @@ -779,6 +1031,9 @@ def _add_module_to_mf_enhanced_pet_image(self) -> None: self[tg] = elem def _add_module_to_mf_enhanced_mr_image(self) -> None: + """Copies/adds a/an `enhanced_mr_image` multiframe module to + the current SOPClass from its single frame source. + """ self._copy_attrib_if_present( self._legacy_datasets[0], self, @@ -821,8 +1076,11 @@ def _add_module_to_mf_enhanced_mr_image(self) -> None: check_not_to_be_empty=True) def _add_module_to_mf_acquisition_context(self) -> None: + """Copies/adds a/an `acquisition_context` multiframe module to + the current SOPClass from its single frame source. + """ tg = tag_for_keyword('AcquisitionContextSequence') - if tg not in self._PerFrameTags: + if tg not in self._perframe_tags: self[tg] = self._get_or_create_attribute( self._legacy_datasets[0], tg, @@ -830,6 +1088,9 @@ def _add_module_to_mf_acquisition_context(self) -> None: def _get_value_for_frame_type( self, attrib: DataElement) -> Union[list, None]: + """Guesses the appropriate FrameType attribute value from ImageType. + + """ if not isinstance(attrib, DataElement): return None output = ['', '', '', ''] @@ -843,6 +1104,9 @@ def _get_value_for_frame_type( def _get_frame_type_seq_tag( self, modality: str) -> int: + """Detects the correct tag/keyword for the frame type sq based on the + modality name. + """ seq_kw = '{}{}FrameTypeSequence' if modality == 'PET': seq_kw = seq_kw.format(modality, '') @@ -852,14 +1116,31 @@ def _get_frame_type_seq_tag( def _add_module_to_dataset_common_ct_mr_pet_image_description( self, source: Dataset, destination: Dataset, level: int) -> None: - FrameType_a = source['ImageType'] + """Copies/adds attributes related to `common_ct_mr_pet_image_description` + to destination dicom Dataset + Parameters + ---------- + source: Dataset + the source dicom Dataset from which the modules attributes values + are copied + destination: Dataset + the destination dicom Dataset to which the modules attributes + values are copied. The destination Dataset usually is an item + from a perframe/shared functional group sequence. + level: int + If level is `0` then the destination attributes will be in the root + of dicom Dataset like `ImageType`. If level is not `0`, then the + destination attributes will be in functional groups items like + `FrameType` + """ + frame_type_a = source['ImageType'] if level == 0: - FrameType_tg = tag_for_keyword('ImageType') + frame_type_tg = tag_for_keyword('ImageType') else: - FrameType_tg = tag_for_keyword('FrameType') - new_val = self._get_value_for_frame_type(FrameType_a) - destination[FrameType_tg] = DataElement( - FrameType_tg, FrameType_a.VR, new_val) + frame_type_tg = tag_for_keyword('FrameType') + new_val = self._get_value_for_frame_type(frame_type_a) + destination[frame_type_tg] = DataElement( + frame_type_tg, frame_type_a.VR, new_val) def element_generator(kw: str, val: Any) -> DataElement: return DataElement( @@ -874,9 +1155,12 @@ def element_generator(kw: str, val: Any) -> DataElement: def _add_module_to_mf_common_ct_mr_pet_image_description( self, modality: str) -> None: + """Copies/adds the common attrabutes for ct/mr/pet description + module to the current SOPClass from its single frame source. + """ im_type_tag = tag_for_keyword('ImageType') seq_tg = self._get_frame_type_seq_tag(modality) - if im_type_tag not in self._PerFrameTags: + if im_type_tag not in self._perframe_tags: self._add_module_to_dataset_common_ct_mr_pet_image_description( self._legacy_datasets[0], self, 0) # ---------------------------- @@ -896,6 +1180,9 @@ def _add_module_to_mf_common_ct_mr_pet_image_description( seq_tg, 'SQ', DataElementSequence([inner_item])) def _add_module_to_mf_composite_instance_contex(self) -> None: + """Copies/adds a/an `composite_instance_contex` multiframe module to + the current SOPClass from its single frame source. + """ for module_name, excpeted_a in self._module_excepted_list.items(): self._add_module( module_name, @@ -905,6 +1192,18 @@ def _add_module_to_mf_composite_instance_contex(self) -> None: def _add_module_to_dataset_frame_anatomy( self, source: Dataset, destination: Dataset) -> None: + """Copies/adds attributes related to `frame_anatomy` + to destination dicom Dataset + Parameters + ---------- + source: Dataset + the source dicom Dataset from which the modules attributes values + are copied + destination: Dataset + the destination dicom Dataset to which the modules attributes + values are copied. The destination Dataset usually is an item + from a perframe/shared functional group sequence. + """ # David's code is more complicaated than mine # Should check it out later. fa_seq_tg = tag_for_keyword('FrameAnatomySequence') @@ -930,16 +1229,20 @@ def _add_module_to_dataset_frame_anatomy( check_not_to_be_perframe=False, check_not_to_be_empty=True) if 'FrameLaterality' not in item: - FrameLaterality_a = self._get_or_create_attribute( + frame_laterality_a = self._get_or_create_attribute( source, 'FrameLaterality', "U") - item['FrameLaterality'] = FrameLaterality_a - FrameAnatomy_a = DataElement( + item['FrameLaterality'] = frame_laterality_a + frame_anatomy_a = DataElement( fa_seq_tg, dictionary_VR(fa_seq_tg), DataElementSequence([item])) - destination['FrameAnatomySequence'] = FrameAnatomy_a + destination['FrameAnatomySequence'] = frame_anatomy_a def _has_frame_anatomy(self, tags: dict) -> bool: + """returns true if attributes specific to + `frame_anatomy` present in source single frames. + Otherwise returns false. + """ laterality_tg = tag_for_keyword('Laterality') im_laterality_tg = tag_for_keyword('ImageLaterality') bodypart_tg = tag_for_keyword('BodyPartExamined') @@ -950,29 +1253,48 @@ def _has_frame_anatomy(self, tags: dict) -> bool: anatomical_reg_tg) def _add_module_to_mf_frame_anatomy(self) -> None: - if (not self._has_frame_anatomy(self._PerFrameTags) and - (self._has_frame_anatomy(self._SharedTags) or - self._has_frame_anatomy(self.ExcludedFromPerFrameTags)) + """Copies/adds a/an `frame_anatomy` multiframe module to + the current SOPClass from its single frame source. + """ + if (not self._has_frame_anatomy(self._perframe_tags) and + (self._has_frame_anatomy(self._shared_tags) or + self._has_frame_anatomy(self.excluded_from_perframe_tags)) ): item = self._shared_functional_group[0] self._add_module_to_dataset_frame_anatomy( self._legacy_datasets[0], item) - elif self._has_frame_anatomy(self._PerFrameTags): + elif self._has_frame_anatomy(self._perframe_tags): for i in range(0, len(self._legacy_datasets)): item = self._perframe_functional_group[i] self._add_module_to_dataset_frame_anatomy( self._legacy_datasets[i], item) def _has_pixel_measures(self, tags: dict) -> bool: - PixelSpacing_tg = tag_for_keyword('PixelSpacing') - SliceThickness_tg = tag_for_keyword('SliceThickness') - ImagerPixelSpacing_tg = tag_for_keyword('ImagerPixelSpacing') - return (PixelSpacing_tg in tags or - SliceThickness_tg in tags or - ImagerPixelSpacing_tg in tags) + """returns true if attributes specific to + `pixel_measures` present in source single frames. + Otherwise returns false. + """ + pixel_spacing_tg = tag_for_keyword('PixelSpacing') + slice_thickness_tg = tag_for_keyword('SliceThickness') + imager_pixel_spacing_tg = tag_for_keyword('ImagerPixelSpacing') + return (pixel_spacing_tg in tags or + slice_thickness_tg in tags or + imager_pixel_spacing_tg in tags) def _add_module_to_dataset_pixel_measures( self, source: Dataset, destination: Dataset) -> None: + """Copies/adds attributes related to `pixel_measures` + to destination dicom Dataset + Parameters + ---------- + source: Dataset + the source dicom Dataset from which the modules attributes values + are copied + destination: Dataset + the destination dicom Dataset to which the modules attributes + values are copied. The destination Dataset usually is an item + from a perframe/shared functional group sequence. + """ item = Dataset() self._copy_attrib_if_present(source, item, @@ -997,58 +1319,97 @@ def _add_module_to_dataset_pixel_measures( destination[pixel_measures_tg] = seq def _add_module_to_mf_pixel_measures(self) -> None: - if (not self._has_pixel_measures(self._PerFrameTags) and - (self._has_pixel_measures(self._SharedTags) or - self._has_pixel_measures(self.ExcludedFromPerFrameTags)) + """Copies/adds a/an `pixel_measures` multiframe module to + the current SOPClass from its single frame source. + """ + if (not self._has_pixel_measures(self._perframe_tags) and + (self._has_pixel_measures(self._shared_tags) or + self._has_pixel_measures(self.excluded_from_perframe_tags)) ): item = self._shared_functional_group[0] self._add_module_to_dataset_pixel_measures( self._legacy_datasets[0], item) - elif self._has_pixel_measures(self._PerFrameTags): + elif self._has_pixel_measures(self._perframe_tags): for i in range(0, len(self._legacy_datasets)): item = self._perframe_functional_group[i] self._add_module_to_dataset_pixel_measures( self._legacy_datasets[i], item) def _has_plane_position(self, tags: dict) -> bool: - ImagePositionPatient_tg = tag_for_keyword('ImagePositionPatient') - return ImagePositionPatient_tg in tags + """returns true if attributes specific to + `plane_position` present in source single frames. + Otherwise returns false. + """ + image_position_patient_tg = tag_for_keyword('ImagePositionPatient') + return image_position_patient_tg in tags def _add_module_to_dataset_plane_position( self, source: Dataset, destination: Dataset) -> None: + """Copies/adds attributes related to `plane_position` + to destination dicom Dataset + Parameters + ---------- + source: Dataset + the source dicom Dataset from which the modules attributes values + are copied + destination: Dataset + the destination dicom Dataset to which the modules attributes + values are copied. The destination Dataset usually is an item + from a perframe/shared functional group sequence. + """ item = Dataset() self._copy_attrib_if_present(source, item, 'ImagePositionPatient', check_not_to_be_perframe=False, check_not_to_be_empty=False) - PlanePositionSequence_kw = 'PlanePositionSequence' - PlanePositionSequence_tg = tag_for_keyword(PlanePositionSequence_kw) - seq = DataElement(PlanePositionSequence_tg, - dictionary_VR(PlanePositionSequence_tg), + plane_position_sequence_kw = 'PlanePositionSequence' + plane_position_sequence_tg = tag_for_keyword(plane_position_sequence_kw) + seq = DataElement(plane_position_sequence_tg, + dictionary_VR(plane_position_sequence_tg), DataElementSequence([item])) - destination[PlanePositionSequence_tg] = seq + destination[plane_position_sequence_tg] = seq def _add_module_to_mf_plane_position(self) -> None: - if (not self._has_plane_position(self._PerFrameTags) and - (self._has_plane_position(self._SharedTags) or - self._has_plane_position(self.ExcludedFromPerFrameTags)) + """Copies/adds a/an `plane_position` multiframe module to + the current SOPClass from its single frame source. + """ + if (not self._has_plane_position(self._perframe_tags) and + (self._has_plane_position(self._shared_tags) or + self._has_plane_position(self.excluded_from_perframe_tags)) ): item = self._shared_functional_group[0] self._add_module_to_dataset_plane_position( self._legacy_datasets[0], item) - elif self._has_plane_position(self._PerFrameTags): + elif self._has_plane_position(self._perframe_tags): for i in range(0, len(self._legacy_datasets)): item = self._perframe_functional_group[i] self._add_module_to_dataset_plane_position( self._legacy_datasets[i], item) def _has_plane_orientation(self, tags: dict) -> bool: - ImageOrientationPatient_tg = tag_for_keyword('ImageOrientationPatient') - return ImageOrientationPatient_tg in tags + """returns true if attributes specific to + `plane_orientation` present in source single frames. + Otherwise returns false. + """ + image_orientation_patient_tg = tag_for_keyword( + 'ImageOrientationPatient') + return image_orientation_patient_tg in tags def _add_module_to_dataset_plane_orientation( self, source: Dataset, destination: Dataset) -> None: + """Copies/adds attributes related to `plane_orientation` + to destination dicom Dataset + Parameters + ---------- + source: Dataset + the source dicom Dataset from which the modules attributes values + are copied + destination: Dataset + the destination dicom Dataset to which the modules attributes + values are copied. The destination Dataset usually is an item + from a perframe/shared functional group sequence. + """ item = Dataset() self._copy_attrib_if_present(source, item, @@ -1061,30 +1422,49 @@ def _add_module_to_dataset_plane_orientation( destination[tg] = seq def _add_module_to_mf_plane_orientation(self) -> None: - if (not self._has_plane_orientation(self._PerFrameTags) and - (self._has_plane_orientation(self._SharedTags) or - self._has_plane_orientation(self.ExcludedFromPerFrameTags)) + """Copies/adds a/an `plane_orientation` multiframe module to + the current SOPClass from its single frame source. + """ + if (not self._has_plane_orientation(self._perframe_tags) and + (self._has_plane_orientation(self._shared_tags) or + self._has_plane_orientation(self.excluded_from_perframe_tags)) ): item = self._shared_functional_group[0] self._add_module_to_dataset_plane_orientation( self._legacy_datasets[0], item) - elif self._has_plane_orientation(self._PerFrameTags): + elif self._has_plane_orientation(self._perframe_tags): for i in range(0, len(self._legacy_datasets)): item = self._perframe_functional_group[i] self._add_module_to_dataset_plane_orientation( self._legacy_datasets[i], item) def _has_frame_voi_lut(self, tags: dict) -> bool: - WindowWidth_tg = tag_for_keyword('WindowWidth') - WindowCenter_tg = tag_for_keyword('WindowCenter') - WindowCenterWidthExplanation_tg = tag_for_keyword( + """returns true if attributes specific to + `frame_voi_lut` present in source single frames. + Otherwise returns false. + """ + window_width_tg = tag_for_keyword('WindowWidth') + window_center_tg = tag_for_keyword('WindowCenter') + window_center_width_explanation_tg = tag_for_keyword( 'WindowCenterWidthExplanation') - return (WindowWidth_tg in tags or - WindowCenter_tg in tags or - WindowCenterWidthExplanation_tg in tags) + return (window_width_tg in tags or + window_center_tg in tags or + window_center_width_explanation_tg in tags) def _add_module_to_dataset_frame_voi_lut( self, source: Dataset, destination: Dataset) -> None: + """Copies/adds attributes related to `frame_voi_lut` + to destination dicom Dataset + Parameters + ---------- + source: Dataset + the source dicom Dataset from which the modules attributes values + are copied + destination: Dataset + the destination dicom Dataset to which the modules attributes + values are copied. The destination Dataset usually is an item + from a perframe/shared functional group sequence. + """ item = Dataset() self._copy_attrib_if_present(source, item, @@ -1107,29 +1487,48 @@ def _add_module_to_dataset_frame_voi_lut( destination[tg] = seq def _add_module_to_mf_frame_voi_lut(self) -> None: - if (not self._has_frame_voi_lut(self._PerFrameTags) and - (self._has_frame_voi_lut(self._SharedTags) or - self._has_frame_voi_lut(self.ExcludedFromPerFrameTags)) + """Copies/adds a/an `frame_voi_lut` multiframe module to + the current SOPClass from its single frame source. + """ + if (not self._has_frame_voi_lut(self._perframe_tags) and + (self._has_frame_voi_lut(self._shared_tags) or + self._has_frame_voi_lut(self.excluded_from_perframe_tags)) ): item = self._shared_functional_group[0] self._add_module_to_dataset_frame_voi_lut( self._legacy_datasets[0], item) - elif self._has_frame_voi_lut(self._PerFrameTags): + elif self._has_frame_voi_lut(self._perframe_tags): for i in range(0, len(self._legacy_datasets)): item = self._perframe_functional_group[i] self._add_module_to_dataset_frame_voi_lut( self._legacy_datasets[i], item) def _has_pixel_value_transformation(self, tags: dict) -> bool: - RescaleIntercept_tg = tag_for_keyword('RescaleIntercept') - RescaleSlope_tg = tag_for_keyword('RescaleSlope') - RescaleType_tg = tag_for_keyword('RescaleType') - return (RescaleIntercept_tg in tags or - RescaleSlope_tg in tags or - RescaleType_tg in tags) + """returns true if attributes specific to + `pixel_value_transformation` present in source single frames. + Otherwise returns false. + """ + rescale_intercept_tg = tag_for_keyword('RescaleIntercept') + rescale_slope_tg = tag_for_keyword('RescaleSlope') + rescale_type_tg = tag_for_keyword('RescaleType') + return (rescale_intercept_tg in tags or + rescale_slope_tg in tags or + rescale_type_tg in tags) def _add_module_to_dataset_pixel_value_transformation( self, source: Dataset, destination: Dataset) -> None: + """Copies/adds attributes related to `pixel_value_transformation` + to destination dicom Dataset + Parameters + ---------- + source: Dataset + the source dicom Dataset from which the modules attributes values + are copied + destination: Dataset + the destination dicom Dataset to which the modules attributes + values are copied. The destination Dataset usually is an item + from a perframe/shared functional group sequence. + """ item = Dataset() self._copy_attrib_if_present(source, item, @@ -1155,9 +1554,9 @@ def _add_module_to_dataset_pixel_value_transformation( value = 'US' if modality == 'CT': containes_localizer = False - ImageType_v = [] if 'ImageType' not in source\ + image_type_v = [] if 'ImageType' not in source\ else source['ImageType'].value - for i in ImageType_v: + for i in image_type_v: if i == 'LOCALIZER': containes_localizer = True break @@ -1183,24 +1582,43 @@ def _add_module_to_dataset_pixel_value_transformation( destination[tg] = seq def _add_module_to_mf_pixel_value_transformation(self) -> None: - if (not self._has_pixel_value_transformation(self._PerFrameTags) and - (self._has_pixel_value_transformation(self._SharedTags) or - self._has_pixel_value_transformation(self.ExcludedFromPerFrameTags)) - ): + """Copies/adds a/an `pixel_value_transformation` multiframe module to + the current SOPClass from its single frame source. + """ + if (not self._has_pixel_value_transformation(self._perframe_tags) and + (self._has_pixel_value_transformation(self._shared_tags) or + self._has_pixel_value_transformation( + self.excluded_from_perframe_tags))): item = self._shared_functional_group[0] self._add_module_to_dataset_pixel_value_transformation( self._legacy_datasets[0], item) - elif self._has_pixel_value_transformation(self._PerFrameTags): + elif self._has_pixel_value_transformation(self._perframe_tags): for i in range(0, len(self._legacy_datasets)): item = self._perframe_functional_group[i] self._add_module_to_dataset_pixel_value_transformation( self._legacy_datasets[i], item) def _has_referenced_image(self, tags: dict) -> bool: + """returns true if attributes specific to + `referenced_image` present in source single frames. + Otherwise returns false. + """ return tag_for_keyword('ReferencedImageSequence') in tags def _add_module_to_dataset_referenced_image( self, source: Dataset, destination: Dataset) -> None: + """Copies/adds attributes related to `referenced_image` + to destination dicom Dataset + Parameters + ---------- + source: Dataset + the source dicom Dataset from which the modules attributes values + are copied + destination: Dataset + the destination dicom Dataset to which the modules attributes + values are copied. The destination Dataset usually is an item + from a perframe/shared functional group sequence. + """ self._copy_attrib_if_present(source, destination, 'ReferencedImageSequence', @@ -1208,24 +1626,43 @@ def _add_module_to_dataset_referenced_image( check_not_to_be_empty=False) def _add_module_to_mf_referenced_image(self) -> None: - if (not self._has_referenced_image(self._PerFrameTags) and - (self._has_referenced_image(self._SharedTags) or - self._has_referenced_image(self.ExcludedFromPerFrameTags)) + """Copies/adds a/an `referenced_image` multiframe module to + the current SOPClass from its single frame source. + """ + if (not self._has_referenced_image(self._perframe_tags) and + (self._has_referenced_image(self._shared_tags) or + self._has_referenced_image(self.excluded_from_perframe_tags)) ): item = self._shared_functional_group[0] self._add_module_to_dataset_referenced_image( self._legacy_datasets[0], item) - elif self._has_referenced_image(self._PerFrameTags): + elif self._has_referenced_image(self._perframe_tags): for i in range(0, len(self._legacy_datasets)): item = self._perframe_functional_group[i] self._add_module_to_dataset_referenced_image( self._legacy_datasets[i], item) def _has_derivation_image(self, tags: dict) -> bool: + """returns true if attributes specific to + `derivation_image` present in source single frames. + Otherwise returns false. + """ return tag_for_keyword('SourceImageSequence') in tags def _add_module_to_dataset_derivation_image( self, source: Dataset, destination: Dataset) -> None: + """Copies/adds attributes related to `derivation_image` + to destination dicom Dataset + Parameters + ---------- + source: Dataset + the source dicom Dataset from which the modules attributes values + are copied + destination: Dataset + the destination dicom Dataset to which the modules attributes + values are copied. The destination Dataset usually is an item + from a perframe/shared functional group sequence. + """ item = Dataset() self._copy_attrib_if_present(source, item, @@ -1248,20 +1685,26 @@ def _add_module_to_dataset_derivation_image( destination[tg] = seq def _add_module_to_mf_derivation_image(self) -> None: - if (not self._has_derivation_image(self._PerFrameTags) and - (self._has_derivation_image(self._SharedTags) or - self._has_derivation_image(self.ExcludedFromPerFrameTags)) + """Copies/adds a/an `derivation_image` multiframe module to + the current SOPClass from its single frame source. + """ + if (not self._has_derivation_image(self._perframe_tags) and + (self._has_derivation_image(self._shared_tags) or + self._has_derivation_image(self.excluded_from_perframe_tags)) ): item = self._shared_functional_group[0] self._add_module_to_dataset_derivation_image( self._legacy_datasets[0], item) - elif self._has_derivation_image(self._PerFrameTags): + elif self._has_derivation_image(self._perframe_tags): for i in range(0, len(self._legacy_datasets)): item = self._perframe_functional_group[i] self._add_module_to_dataset_derivation_image( self._legacy_datasets[i], item) def _get_tag_used_dictionary(self, input: list) -> dict: + """Returns a dictionary of input tags with a use flag + + """ out: dict = {} for item in input: out[item] = False @@ -1269,6 +1712,18 @@ def _get_tag_used_dictionary(self, input: list) -> dict: def _add_module_to_dataset_unassigned_perframe( self, source: Dataset, destination: Dataset) -> None: + """Copies/adds attributes related to `unassigned_perframe` + to destination dicom Dataset + Parameters + ---------- + source: Dataset + the source dicom Dataset from which the modules attributes values + are copied + destination: Dataset + the destination dicom Dataset to which the modules attributes + values are copied. The destination Dataset usually is an item + from a perframe/shared functional group sequence. + """ item = Dataset() for tg in self._eligeible_tags: self._copy_attrib_if_present(source, @@ -1282,10 +1737,14 @@ def _add_module_to_dataset_unassigned_perframe( destination[tg] = seq def _add_largest_smallest_pixle_value(self) -> None: + """Adds the attributes for largest and smallest pixel value to + current SOPClass object + + """ ltg = tag_for_keyword("LargestImagePixelValue") from sys import float_info lval = float_info.min - if ltg in self._PerFrameTags: + if ltg in self._perframe_tags: for frame in self._legacy_datasets: if ltg in frame: nval = frame[ltg].value @@ -1294,10 +1753,10 @@ def _add_largest_smallest_pixle_value(self) -> None: lval = nval if lval < nval else lval if lval > float_info.min: self[ltg] = DataElement(ltg, 'SS', int(lval)) - # ========================== + # ========================== stg = tag_for_keyword("SmallestImagePixelValue") sval = float_info.max - if stg in self._PerFrameTags: + if stg in self._perframe_tags: for frame in self._legacy_datasets: if stg in frame: nval = frame[stg].value @@ -1310,11 +1769,14 @@ def _add_largest_smallest_pixle_value(self) -> None: stg = "SmallestImagePixelValue" def _add_module_to_mf_unassigned_perframe(self) -> None: + """Copies/adds a/an `unassigned_perframe` multiframe module to + the current SOPClass from its single frame source. + """ # first collect all not used tags # note that this is module is order dependent self._add_largest_smallest_pixle_value() self._eligeible_tags: List[Tag] = [] - for tg, used in self._PerFrameTags.items(): + for tg, used in self._perframe_tags.items(): if not used and tg not in self.ExcludedFromFunctionalGroupsTags: self._eligeible_tags.append(tg) for i in range(0, len(self._legacy_datasets)): @@ -1324,8 +1786,20 @@ def _add_module_to_mf_unassigned_perframe(self) -> None: def _add_module_to_dataset_unassigned_shared( self, source: Dataset, destination: Dataset) -> None: + """Copies/adds attributes related to `unassigned_shared` + to destination dicom Dataset + Parameters + ---------- + source: Dataset + the source dicom Dataset from which the modules attributes values + are copied + destination: Dataset + the destination dicom Dataset to which the modules attributes + values are copied. The destination Dataset usually is an item + from a perframe/shared functional group sequence. + """ item = Dataset() - for tg, used in self._SharedTags.items(): + for tg, used in self._shared_tags.items(): if (not used and tg not in self and tg not in self.ExcludedFromFunctionalGroupsTags): @@ -1340,14 +1814,23 @@ def _add_module_to_dataset_unassigned_shared( destination[tg] = seq def _add_module_to_mf_unassigned_shared(self) -> None: + """Copies/adds a/an `unassigned_shared` multiframe module to + the current SOPClass from its single frame source. + """ item = self._shared_functional_group[0] self._add_module_to_dataset_unassigned_shared( self._legacy_datasets[0], item) def _create_empty_element(self, tg: BaseTag) -> DataElement: + """Creates an empty dicom DataElement for input tag + + """ return DataElement(tg, dictionary_VR(tg), None) def _add_module_to_mf_empty_type2_attributes(self) -> None: + """Adds empty type2 attributes to the current SOPClass to avoid + type2 missing error. + """ iod_name = _SOP_CLASS_UID_IOD_KEY_MAP[ self['SOPClassUID'].value] modules = IOD_MODULE_MAP[iod_name] @@ -1360,13 +1843,25 @@ def _add_module_to_mf_empty_type2_attributes(self) -> None: tg = tag_for_keyword(a['keyword']) if (tg not in self._legacy_datasets[0] and tg not in self and - tg not in self._PerFrameTags and - tg not in self._SharedTags): + tg not in self._perframe_tags and + tg not in self._shared_tags): self[tg] =\ self._create_empty_element(tg) def _add_module_to_dataset_conversion_source( self, source: Dataset, destination: Dataset) -> None: + """Copies/adds attributes related to `conversion_source` + to destination dicom Dataset + Parameters + ---------- + source: Dataset + the source dicom Dataset from which the modules attributes values + are copied + destination: Dataset + the destination dicom Dataset to which the modules attributes + values are copied. The destination Dataset usually is an item + from a perframe/shared functional group sequence. + """ item = Dataset() self._copy_attrib_if_present(source, item, @@ -1386,49 +1881,56 @@ def _add_module_to_dataset_conversion_source( destination[tg] = seq def _add_module_to_mf_conversion_source(self) -> None: + """Copies/adds a/an `conversion_source` multiframe module to + the current SOPClass from its single frame source. + """ for i in range(0, len(self._legacy_datasets)): item = self._perframe_functional_group[i] self._add_module_to_dataset_conversion_source( self._legacy_datasets[i], item) - self.EarliestFrameAcquisitionDateTime = self.FarthestFutureDateTime + self.EarliestFrameAcquisitionDateTime =\ + self.farthest_future_date_time def _build_slices_geometry_frame_content(self) -> None: + """Instantiates an object of GeometryOfSlice for each sice. + + """ logger = logging.getLogger(__name__) frame_count = len(self._legacy_datasets) for i in range(0, frame_count): curr_frame = self._legacy_datasets[i] - ImagePositionPatient_v = None \ + image_position_patient_v = None \ if 'ImagePositionPatient' not in curr_frame\ else curr_frame['ImagePositionPatient'].value - ImageOrientationPatient_v = None \ + image_orientation_patient_v = None \ if 'ImageOrientationPatient' not in curr_frame\ else curr_frame['ImageOrientationPatient'].value - PixelSpacing_v = None \ + pixel_spacing_v = None \ if 'PixelSpacing' not in curr_frame\ else curr_frame['PixelSpacing'].value - SliceThickness_v = 0.0 \ + slice_thickness_v = 0.0 \ if 'SliceThickness' not in curr_frame\ else curr_frame['SliceThickness'].value - # SliceLocation_v = None \ + # slice_location_v = None \ # if 'SliceLocation' not in curr_frame\ # else curr_frame['SliceLocation'].value - Rows_v = 0 \ + rows_v = 0 \ if 'Rows' not in curr_frame\ else curr_frame['Rows'].value - Columns_v = 0 \ + columns_v = 0 \ if 'Columns' not in curr_frame\ else curr_frame['Columns'].value - if (ImageOrientationPatient_v is not None and - ImagePositionPatient_v is not None and - PixelSpacing_v is not None): - row = array(ImageOrientationPatient_v[0:3]) - col = array(ImageOrientationPatient_v[3:]) - voxel_spaceing = array([PixelSpacing_v[0], - PixelSpacing_v[1], - SliceThickness_v]) - tpl = array(ImagePositionPatient_v) - dim = (Rows_v, Columns_v, 1) + if (image_orientation_patient_v is not None and + image_position_patient_v is not None and + pixel_spacing_v is not None): + row = array(image_orientation_patient_v[0:3]) + col = array(image_orientation_patient_v[3:]) + voxel_spaceing = array([pixel_spacing_v[0], + pixel_spacing_v[1], + slice_thickness_v]) + tpl = array(image_position_patient_v) + dim = (rows_v, columns_v, 1) self._slices.append(GeometryOfSlice(row, col, tpl, voxel_spaceing, dim)) else: @@ -1436,14 +1938,17 @@ def _build_slices_geometry_frame_content(self) -> None: "Error in geometry. One or more required " "attributes are not available") logger.error("\tImageOrientationPatient = {}".format( - ImageOrientationPatient_v)) + image_orientation_patient_v)) logger.error("\tImagePositionPatient = {}".format( - ImagePositionPatient_v)) - logger.error("\tPixelSpacing = {}".format(PixelSpacing_v)) + image_position_patient_v)) + logger.error("\tPixelSpacing = {}".format(pixel_spacing_v)) self._slices = [] # clear the slices break def _are_all_slices_parallel_frame_content(self) -> bool: + """Returns true if all slices are prallel otherwise, false. + + """ slice_count = len(self._slices) if slice_count >= 2: last_slice = self._slices[0] @@ -1460,6 +1965,9 @@ def _are_all_slices_parallel_frame_content(self) -> bool: return False def _add_stack_info_frame_content(self) -> None: + """Adds stack information to the FrameContentSequence dicom attribute. + + """ logger = logging.getLogger(__name__) self._build_slices_geometry_frame_content() round_digits = int(ceil(-log10(self._tolerance))) @@ -1501,15 +2009,31 @@ def _add_stack_info_frame_content(self) -> None: distance_index += 1 def _has_frame_content(self, tags: dict) -> bool: - AcquisitionDateTime_tg = tag_for_keyword('AcquisitionDateTime') - AcquisitionDate_tg = tag_for_keyword('AcquisitionDate') - AcquisitionTime_tg = tag_for_keyword('AcquisitionTime') - return (AcquisitionDateTime_tg in tags or - AcquisitionTime_tg in tags or - AcquisitionDate_tg in tags) + """returns true if attributes specific to + `frame_content` present in source single frames. + Otherwise returns false. + """ + acquisition_date_time_tg = tag_for_keyword('AcquisitionDateTime') + acquisition_date_tg = tag_for_keyword('AcquisitionDate') + acquisition_time_tg = tag_for_keyword('AcquisitionTime') + return (acquisition_date_time_tg in tags or + acquisition_time_tg in tags or + acquisition_date_tg in tags) def _add_module_to_dataset_frame_content( self, source: Dataset, destination: Dataset) -> None: + """Copies/adds attributes related to `frame_content` + to destination dicom Dataset + Parameters + ---------- + source: Dataset + the source dicom Dataset from which the modules attributes values + are copied + destination: Dataset + the destination dicom Dataset to which the modules attributes + values are copied. The destination Dataset usually is an item + from a perframe/shared functional group sequence. + """ item = Dataset() fan_tg = tag_for_keyword('FrameAcquisitionNumber') an_tg = tag_for_keyword('AcquisitionNumber') @@ -1520,46 +2044,46 @@ def _add_module_to_dataset_frame_content( item[fan_tg] = DataElement(fan_tg, dictionary_VR(fan_tg), fan_val) self._mark_tag_as_used(an_tg) # ---------------------------------------------------------------- - AcquisitionDateTime_a = self._get_or_create_attribute( - source, 'AcquisitionDateTime', self.EarliestDateTime) + acquisition_date_time_a = self._get_or_create_attribute( + source, 'AcquisitionDateTime', self.earliest_date_time) # chnage the keyword to FrameAcquisitionDateTime: - FrameAcquisitionDateTime_a = DataElement( + frame_acquisition_date_time_a = DataElement( tag_for_keyword('FrameAcquisitionDateTime'), - 'DT', AcquisitionDateTime_a.value) - AcquisitionDateTime_is_perframe = self._has_frame_content( - self._PerFrameTags) - if FrameAcquisitionDateTime_a.value == self.EarliestDateTime: - AcquisitionDate_a = self._get_or_create_attribute( - source, 'AcquisitionDate', self.EarliestDate) - AcquisitionTime_a = self._get_or_create_attribute( - source, 'AcquisitionTime', self.EarliestTime) - d = AcquisitionDate_a.value - t = AcquisitionTime_a.value - # FrameAcquisitionDateTime_a.value = (DT(d.strftime('%Y%m%d') + + 'DT', acquisition_date_time_a.value) + acquisition_date_time_is_perframe = self._has_frame_content( + self._perframe_tags) + if frame_acquisition_date_time_a.value == self.earliest_date_time: + acquisition_date_a = self._get_or_create_attribute( + source, 'AcquisitionDate', self.earliest_date) + acquisition_time_a = self._get_or_create_attribute( + source, 'AcquisitionTime', self.earliest_time) + d = acquisition_date_a.value + t = acquisition_time_a.value + # frame_acquisition_date_time_a.value = (DT(d.strftime('%Y%m%d') + # t.strftime('%H%M%S'))) - FrameAcquisitionDateTime_a.value = DT(str(d) + str(t)) - if FrameAcquisitionDateTime_a.value > self.EarliestDateTime: - if (FrameAcquisitionDateTime_a.value < + frame_acquisition_date_time_a.value = DT(str(d) + str(t)) + if frame_acquisition_date_time_a.value > self.earliest_date_time: + if (frame_acquisition_date_time_a.value < self.EarliestFrameAcquisitionDateTime): self.EarliestFrameAcquisitionDateTime =\ - FrameAcquisitionDateTime_a.value - if not AcquisitionDateTime_is_perframe: + frame_acquisition_date_time_a.value + if not acquisition_date_time_is_perframe: if ('TriggerTime' in source and 'FrameReferenceDateTime' not in source): - TriggerTime_a = self._get_or_create_attribute( - source, 'TriggerTime', self.EarliestTime) - trigger_time_in_millisecond = int(TriggerTime_a.value) + trigger_time_a = self._get_or_create_attribute( + source, 'TriggerTime', self.earliest_time) + trigger_time_in_millisecond = int(trigger_time_a.value) if trigger_time_in_millisecond > 0: t_delta = timedelta(trigger_time_in_millisecond) # this is so rediculous. I'm not able to cnvert # the DT to datetime (cast to superclass) d_t = datetime.combine( - FrameAcquisitionDateTime_a.value.date(), - FrameAcquisitionDateTime_a.value.time()) + frame_acquisition_date_time_a.value.date(), + frame_acquisition_date_time_a.value.time()) d_t = d_t + t_delta - FrameAcquisitionDateTime_a.value =\ + frame_acquisition_date_time_a.value =\ DT(d_t.strftime('%Y%m%d%H%M%S')) - item['FrameAcquisitionDateTime'] = FrameAcquisitionDateTime_a + item['FrameAcquisitionDateTime'] = frame_acquisition_date_time_a # --------------------------------- self._copy_attrib_if_present( source, item, "AcquisitionDuration", @@ -1580,34 +2104,57 @@ def _add_module_to_dataset_frame_content( seq_tg = tag_for_keyword('FrameContentSequence') destination[seq_tg] = DataElement( seq_tg, dictionary_VR(seq_tg), DataElementSequence([item])) - # Also we want to add the earliest frame acq date time to the multiframe: def _add_acquisition_info_frame_content(self) -> None: + """Adds acquisition information to the FrameContentSequence dicom + attribute. + + """ for i in range(0, len(self._legacy_datasets)): item = self._perframe_functional_group[i] self._add_module_to_dataset_frame_content( self._legacy_datasets[i], item) - if self.EarliestFrameAcquisitionDateTime < self.FarthestFutureDateTime: + if (self.EarliestFrameAcquisitionDateTime < + self.farthest_future_date_time): kw = 'AcquisitionDateTime' self[kw] = DataElement( tag_for_keyword(kw), 'DT', self.EarliestFrameAcquisitionDateTime) def _add_module_to_mf_frame_content(self) -> None: + """Copies/adds a/an 'frame_content` multiframe module to + the current SOPClass from its single frame source. + """ self._add_acquisition_info_frame_content() self._add_stack_info_frame_content() def _is_other_byte_vr_pixel_data(self, vr: str) -> bool: + """checks if `PixelData` dicom value representation is OB. + + """ return vr[0] == 'O' and vr[1] == 'B' def _is_other_word_vr_pixel_data(self, vr: str) -> bool: + """checks if `PixelData` dicom value representation is OW. + + """ return vr[0] == 'O' and vr[1] == 'W' - # def _has(self, tags: dict) -> bool: - # ImagePositionPatient_tg = tag_for_keyword('ImagePositionPatient') - # return ImagePositionPatient_tg in tags + # def _has(self, tags: dict) -> bool: """ + # image_position_patient_tg = tag_for_keyword('ImagePositionPatient') + # return image_position_patient_tg in tags def _copy_data_pixel_data( self, src: bytearray, word_data: bool = False) -> None: + """Copies contnet of PixelData from one frame and appends it to the + content of PixelData for multiframe + Parameters + ---------- + src: bytearray + content of pixel data from source frame(one of the single frames) + word_data: bool = False + flag representing if the data is word-wise instead of blyte-wise + + """ # Make sure that the length complies by row and col if word_data: des = self._word_data @@ -1622,6 +2169,9 @@ def _copy_data_pixel_data( des.extend(src) def _add_module_to_mf_pixel_data(self) -> None: + """Copies/add`s a/an pixel_data` multiframe module to + the current SOPClass from its single frame source. + """ kw = 'NumberOfFrames' tg = tag_for_keyword(kw) self._frame_count = len(self._legacy_datasets) @@ -1635,19 +2185,19 @@ def _add_module_to_mf_pixel_data(self) -> None: for i in range(0, len(self._legacy_datasets)): if kw not in self._legacy_datasets[i]: continue - PixelData_a = self._legacy_datasets[i][kw] - if self._is_other_byte_vr_pixel_data(PixelData_a.VR): + pixel_data_a = self._legacy_datasets[i][kw] + if self._is_other_byte_vr_pixel_data(pixel_data_a.VR): if len(self._word_data) != 0: raise TypeError( 'Cannot mix OB and OW Pixel Data ' 'VR from different frames') - self._copy_data_pixel_data(PixelData_a.value, False) - elif self._is_other_word_vr_pixel_data(PixelData_a.VR): + self._copy_data_pixel_data(pixel_data_a.value, False) + elif self._is_other_word_vr_pixel_data(pixel_data_a.VR): if len(self._byte_data) != 0: raise TypeError( 'Cannot mix OB and OW Pixel Data ' 'VR from different frames') - self._copy_data_pixel_data(PixelData_a.value, True) + self._copy_data_pixel_data(pixel_data_a.value, True) else: raise TypeError( 'Cannot mix OB and OW Pixel Data VR from different frames') @@ -1660,10 +2210,13 @@ def _add_module_to_mf_pixel_data(self) -> None: self[kw] = MF_PixelData def _add_module_to_mf_content_date_time(self) -> None: + """Copies/adds a/an `content_date_time` multiframe module to + the current SOPClass from its single frame source. + """ default_atrs = ["Acquisition", "Series", "Study"] for i in range(0, len(self._legacy_datasets)): src = self._legacy_datasets[i] - default_date = self.FarthestFutureDate + default_date = self.farthest_future_date for def_atr in default_atrs: at_tg = tag_for_keyword(def_atr + "Date") if at_tg in src: @@ -1675,7 +2228,7 @@ def _add_module_to_mf_content_date_time(self) -> None: d_a = self._get_or_create_attribute( src, kw, default_date) d = d_a.value - default_time = self.FarthestFutureTime + default_time = self.farthest_future_time for def_atr in default_atrs: at_tg = tag_for_keyword(def_atr + "Time") if at_tg in src: @@ -1688,11 +2241,12 @@ def _add_module_to_mf_content_date_time(self) -> None: src, kw, default_time) t = t_a.value value = DT(d.strftime('%Y%m%d') + t.strftime('%H%M%S.%f')) - if self.EarliestContentDateTime > value: - self.EarliestContentDateTime = value - if self.EarliestContentDateTime < self.FarthestFutureDateTime: - n_d = DA(self.EarliestContentDateTime.date().strftime('%Y%m%d')) - n_t = TM(self.EarliestContentDateTime.time().strftime('%H%M%S.%f')) + if self.earliest_content_date_time > value: + self.earliest_content_date_time = value + if self.earliest_content_date_time < self.farthest_future_date_time: + n_d = DA(self.earliest_content_date_time.date().strftime('%Y%m%d')) + n_t = TM( + self.earliest_content_date_time.time().strftime('%H%M%S.%f')) kw = 'ContentDate' self[kw] = DataElement( tag_for_keyword(kw), 'DA', n_d) @@ -1702,32 +2256,46 @@ def _add_module_to_mf_content_date_time(self) -> None: def _add_data_element_to_target_contributing_equipment( self, target: Dataset, kw: str, value: Any) -> None: + """Add new data element related to ContributingEquipmentSequence to a + target dataset(usually an item). + Parameters + ---------- + target: Dataset + destination dicom Dataset. + kw: str + keyword if the attribute being added. + value: Any + value if the attribute being added. + """ tg = tag_for_keyword(kw) target[kw] = DataElement(tg, dictionary_VR(tg), value) def _add_module_to_mf_contributing_equipment(self) -> None: - CodeValue_tg = tag_for_keyword('CodeValue') - CodeMeaning_tg = tag_for_keyword('CodeMeaning') - CodingSchemeDesignator_tg = tag_for_keyword('CodingSchemeDesignator') - PurposeOfReferenceCode_item = Dataset() - PurposeOfReferenceCode_item['CodeValue'] = DataElement( - CodeValue_tg, - dictionary_VR(CodeValue_tg), + """Copies/adds a/an `contributing_equipment` multiframe module to + the current SOPClass from its single frame source. + """ + code_value_tg = tag_for_keyword('CodeValue') + code_meaning_tg = tag_for_keyword('CodeMeaning') + coding_scheme_designator_tg = tag_for_keyword('CodingSchemeDesignator') + purpose_of_reference_code_item = Dataset() + purpose_of_reference_code_item['CodeValue'] = DataElement( + code_value_tg, + dictionary_VR(code_value_tg), '109106') - PurposeOfReferenceCode_item['CodeMeaning'] = DataElement( - CodeMeaning_tg, - dictionary_VR(CodeMeaning_tg), + purpose_of_reference_code_item['CodeMeaning'] = DataElement( + code_meaning_tg, + dictionary_VR(code_meaning_tg), 'Enhanced Multi-frame Conversion Equipment') - PurposeOfReferenceCode_item['CodingSchemeDesignator'] = DataElement( - CodingSchemeDesignator_tg, - dictionary_VR(CodingSchemeDesignator_tg), + purpose_of_reference_code_item['CodingSchemeDesignator'] = DataElement( + coding_scheme_designator_tg, + dictionary_VR(coding_scheme_designator_tg), 'DCM') - PurposeOfReferenceCode_seq = DataElement( + purpose_of_reference_code_seq = DataElement( tag_for_keyword('PurposeOfReferenceCodeSequence'), - 'SQ', DataElementSequence([PurposeOfReferenceCode_item])) + 'SQ', DataElementSequence([purpose_of_reference_code_item])) item: Dataset = Dataset() item[ - 'PurposeOfReferenceCodeSequence'] = PurposeOfReferenceCode_seq + 'PurposeOfReferenceCodeSequence'] = purpose_of_reference_code_seq self._add_data_element_to_target_contributing_equipment( item, "Manufacturer", 'HighDicom') self._add_data_element_to_target_contributing_equipment( @@ -1752,6 +2320,9 @@ def _add_module_to_mf_contributing_equipment(self) -> None: self[tg] = DataElement(tg, 'SQ', DataElementSequence([item])) def _add_module_to_mf_instance_creation_date_time(self) -> None: + """Copies/adds a/an `instance_creation_date_time` multiframe module to + the current SOPClass from its single frame source. + """ nnooww = datetime.now() n_d = DA(nnooww.date().strftime('%Y%m%d')) n_t = TM(nnooww.time().strftime('%H%M%S')) @@ -1762,7 +2333,11 @@ def _add_module_to_mf_instance_creation_date_time(self) -> None: self[kw] = DataElement( tag_for_keyword(kw), 'TM', n_t) - def default_sort_key(x: Dataset) -> tuple: + @classmethod + def default_sort_key(cls, x: Dataset) -> tuple: + """The default sort key to sort all single frames before conversion + + """ out: tuple = tuple() if 'SeriesNumber' in x: out += (x['SeriesNumber'].value, ) @@ -1773,9 +2348,16 @@ def default_sort_key(x: Dataset) -> tuple: return out def _clear_build_blocks(self) -> None: + """Clears the array containing all methods for multiframe conversion + + """ self.__build_blocks = [] def _add_common_ct_pet_mr_build_blocks(self) -> None: + """Arranges common methods for multiframe conversion and + put them in place. + + """ blocks = [ [self._add_module_to_mf_image_pixel, None], [self._add_module_to_mf_composite_instance_contex, None], @@ -1801,6 +2383,10 @@ def _add_common_ct_pet_mr_build_blocks(self) -> None: self.__build_blocks.append(b) def _add_ct_specific_build_blocks(self) -> None: + """Arranges CT specific methods for multiframe conversion and + put them in place. + + """ blocks = [ [ self._add_module_to_mf_common_ct_mr_pet_image_description, @@ -1813,6 +2399,10 @@ def _add_ct_specific_build_blocks(self) -> None: self.__build_blocks.append(b) def _add_mr_specific_build_blocks(self) -> None: + """Arranges MRI specific methods for multiframe conversion and + put them in place + + """ blocks = [ [ self._add_module_to_mf_common_ct_mr_pet_image_description, @@ -1825,6 +2415,10 @@ def _add_mr_specific_build_blocks(self) -> None: self.__build_blocks.append(b) def _add_pet_specific_build_blocks(self) -> None: + """Arranges PET specific methods for multiframe conversion and + put them in place + + """ blocks = [ [ self._add_module_to_mf_common_ct_mr_pet_image_description, @@ -1835,22 +2429,38 @@ def _add_pet_specific_build_blocks(self) -> None: for b in blocks: self.__build_blocks.append(b) - def _add_build_blocks_for_ct(self) -> None: - self._clear_build_blocks() - self._add_common_ct_pet_mr_build_blocks() - self._add_ct_specific_build_blocks() - def _add_build_blocks_for_mr(self) -> None: + """Arranges all methods necessary for MRI multiframe conversion and + put them in place + + """ self._clear_build_blocks() self._add_common_ct_pet_mr_build_blocks() self._add_mr_specific_build_blocks() def _add_build_blocks_for_pet(self) -> None: + """Arranges all methods necessary for PET multiframe conversion and + put them in place + + """ self._clear_build_blocks() self._add_common_ct_pet_mr_build_blocks() self._add_pet_specific_build_blocks() + def _add_build_blocks_for_ct(self) -> None: + """Arranges all methods necessary for CT multiframe conversion and + put them in place. + + """ + self._clear_build_blocks() + self._add_common_ct_pet_mr_build_blocks() + self._add_ct_specific_build_blocks() + def convert2mf(self) -> None: + """Runs all necessary methods to conver from single frame to + multi-frame. + + """ logger = logging.getLogger(__name__) logger.debug('Strt singleframe to multiframe conversion') for fun, args in self.__build_blocks: @@ -1859,3 +2469,184 @@ def convert2mf(self) -> None: else: fun(*args) logger.debug('Conversion succeeded') + + +class LegacyConvertedEnhancedCTImage(_CommonLegacyConvertedEnhanceImage): + + """SOP class for Legacy Converted Enhanced CT Image instances.""" + + def __init__( + self, + frame_set: FrameSet, + series_instance_uid: str, + series_number: int, + sop_instance_uid: str, + instance_number: int, + sort_key: Callable = None, + ) -> None: + """ + Parameters + ---------- + + Returns + ------- + + Note + ---- + + """ + """ + Parameters + ---------- + legacy_datasets: Sequence[pydicom.dataset.Dataset] + DICOM data sets of legacy single-frame image instances that should + be converted + series_instance_uid: str + UID of the series + series_number: Union[int, None] + Number of the series within the study + sop_instance_uid: str + UID that should be assigned to the instance + instance_number: int + Number that should be assigned to the instance + """ + legacy_datasets = frame_set.frames + try: + ref_ds = legacy_datasets[0] + except IndexError: + raise ValueError('No DICOM data sets of provided.') + if ref_ds.Modality != 'CT': + raise ValueError( + 'Wrong modality for conversion of legacy CT images.' + ) + if ref_ds.SOPClassUID != '1.2.840.10008.5.1.4.1.1.2': + raise ValueError( + 'Wrong SOP class for conversion of legacy CT images.' + ) + super().__init__( + frame_set, + series_instance_uid=series_instance_uid, + series_number=series_number, + sop_instance_uid=sop_instance_uid, + instance_number=instance_number, + sort_key=sort_key + ) + self._add_build_blocks_for_ct() + self.convert2mf() + + +class LegacyConvertedEnhancedPETImage(_CommonLegacyConvertedEnhanceImage): + + """SOP class for Legacy Converted Enhanced PET Image instances.""" + + def __init__( + self, + frame_set: FrameSet, + series_instance_uid: str, + series_number: int, + sop_instance_uid: str, + instance_number: int, + sort_key: Callable = None, + ) -> None: + """ + Parameters + ---------- + legacy_datasets: Sequence[pydicom.dataset.Dataset] + DICOM data sets of legacy single-frame image instances that should + be converted + series_instance_uid: str + UID of the series + series_number: Union[int, None] + Number of the series within the study + sop_instance_uid: str + UID that should be assigned to the instance + instance_number: int + Number that should be assigned to the instance + """ + legacy_datasets = frame_set.frames + try: + ref_ds = legacy_datasets[0] + except IndexError: + raise ValueError('No DICOM data sets of provided.') + if ref_ds.Modality != 'PT': + raise ValueError( + 'Wrong modality for conversion of legacy PET images.' + ) + if ref_ds.SOPClassUID != '1.2.840.10008.5.1.4.1.1.128': + raise ValueError( + 'Wrong SOP class for conversion of legacy PET images.' + ) + super().__init__( + frame_set, + series_instance_uid=series_instance_uid, + series_number=series_number, + sop_instance_uid=sop_instance_uid, + instance_number=instance_number, + sort_key=sort_key + ) + self._add_build_blocks_for_pet() + self.convert2mf() + + +class LegacyConvertedEnhancedMRImage(_CommonLegacyConvertedEnhanceImage): + + """SOP class for Legacy Converted Enhanced MR Image instances.""" + + def __init__( + self, + frame_set: FrameSet, + series_instance_uid: str, + series_number: int, + sop_instance_uid: str, + instance_number: int, + sort_key: Callable = None, + ) -> None: + """ + Parameters + ---------- + + Returns + ------- + + Note + ---- + + """ + """ + Parameters + ---------- + legacy_datasets: Sequence[pydicom.dataset.Dataset] + DICOM data sets of legacy single-frame image instances that should + be converted + series_instance_uid: str + UID of the series + series_number: Union[int, None] + Number of the series within the study + sop_instance_uid: str + UID that should be assigned to the instance + instance_number: int + Number that should be assigned to the instance + """ + legacy_datasets = frame_set.frames + try: + ref_ds = legacy_datasets[0] + except IndexError: + raise ValueError('No DICOM data sets of provided.') + if ref_ds.Modality != 'MR': + raise ValueError( + 'Wrong modality for conversion of legacy MR images.' + ) + if ref_ds.SOPClassUID != '1.2.840.10008.5.1.4.1.1.4': + raise ValueError( + 'Wrong SOP class for conversion of legacy MR images.' + ) + super().__init__( + frame_set, + series_instance_uid=series_instance_uid, + series_number=series_number, + sop_instance_uid=sop_instance_uid, + instance_number=instance_number, + sort_key=sort_key + ) + self._add_build_blocks_for_mr() + self.convert2mf() diff --git a/tests/test_legacy.py b/tests/test_legacy.py index c05e5ce0..8397a6bc 100644 --- a/tests/test_legacy.py +++ b/tests/test_legacy.py @@ -327,7 +327,7 @@ def test_frameset_detection(self) -> None: data = data_generator.generate_mixed_framesets( Modality.CT, i, True, True) fset_collection = sop.FrameSetCollection(data) - assert len(fset_collection.FrameSets) == i + assert len(fset_collection.frame_sets) == i def test_frameset_framecount_detection(self) -> None: for i in range(1, 10): @@ -335,306 +335,221 @@ def test_frameset_framecount_detection(self) -> None: data = data_generator.generate_mixed_framesets( Modality.CT, 1, True, True) fset_collection = sop.FrameSetCollection(data) - assert len(fset_collection.FrameSets) == 1 - assert len(fset_collection.FrameSets[0].Frames) == i + assert len(fset_collection.frame_sets) == 1 + assert len(fset_collection.frame_sets[0].frames) == i class TestLegacyConvertedEnhanceImage(unittest.TestCase): def setUp(self) -> None: super().setUp() + self._modalities = ('CT', 'MR', 'PET') + self._dicom_generator = DicomGenerator(slice_per_frameset=5) + self._ref_dataset_seq_CT = \ + self._dicom_generator.generate_mixed_framesets(Modality.CT, 1) + self._ref_dataset_seq_MR = \ + self._dicom_generator.generate_mixed_framesets(Modality.MR, 1) + self._ref_dataset_seq_PET = \ + self._dicom_generator.generate_mixed_framesets(Modality.PT, 1) + self._output_series_instance_uid = generate_uid() + self._output_sop_instance_uid = generate_uid() + self._output_series_number = '1' + self._output_instance_number = '1' def test_conversion(self) -> None: for i in range(1, 10): - for j in range(3): - data_generator = DicomGenerator(i) - data = data_generator.generate_mixed_framesets( - Modality(j), 1, True, True) - fset_collection = sop.FrameSetCollection(data) - assert len(fset_collection.FrameSets) == 1 - assert len(fset_collection.FrameSets[0].Frames) == i - convertor = sop.LegacyConvertedEnhanceImage( - fset_collection.FrameSets[0], - generate_uid(), - 555, - generate_uid(), - 111) - convertor.convert2mf() - assert convertor.NumberOfFrames == i - assert convertor.SOPClassUID == \ - sop.LEGACY_ENHANCED_SOP_CLASS_UID_MAP[sop_classes[j][1]] - - -# class TestLegacyConvertedEnhancedImage(unittest.TestCase): - -# def setUp(self): -# super().setUp() -# self._modalities = ('CT', 'MR', 'PET') -# self._ref_dataset_seq_CT = \ -# self.generate_common_dicom_dataset_series(3, Modality.CT) -# self._ref_dataset_seq_MR = \ -# self.generate_common_dicom_dataset_series(3, Modality.MR) -# self._ref_dataset_seq_PET = \ -# self.generate_common_dicom_dataset_series(3, Modality.PT) -# self._output_series_instance_uid = generate_uid() -# self._output_sop_instance_uid = generate_uid() -# self._output_series_number = '1' -# self._output_instance_number = '1' - -# def test_output_attributes(self): -# for m in self._modalities: -# with self.subTest(m=m): -# LegacyConverterClass = getattr( -# sop, -# "LegacyConvertedEnhanced{}Image".format(m) -# ) -# ref_dataset_seq = getattr( -# self, "_ref_dataset_seq_{}".format(m)) - -# multiframe_item = LegacyConverterClass( -# legacy_datasets=ref_dataset_seq, -# series_instance_uid=self._output_series_instance_uid, -# series_number=self._output_instance_number, -# sop_instance_uid=self._output_sop_instance_uid, -# instance_number=self._output_instance_number) -# assert multiframe_item.SeriesInstanceUID == \ -# self._output_series_instance_uid -# assert multiframe_item.SOPInstanceUID == \ -# self._output_sop_instance_uid -# assert int(multiframe_item.SeriesNumber) == int( -# self._output_series_number) -# assert int(multiframe_item.InstanceNumber) == int( -# self._output_instance_number) - -# def test_empty_dataset(self): -# for m in self._modalities: -# with self.subTest(m=m): -# LegacyConverterClass = getattr( -# sop, -# "LegacyConvertedEnhanced{}Image".format(m) -# ) -# with self.assertRaises(ValueError): -# LegacyConverterClass( -# [], -# series_instance_uid=self._output_series_instance_uid, -# series_number=self._output_instance_number, -# sop_instance_uid=self._output_sop_instance_uid, -# instance_number=self._output_instance_number) - -# def test_wrong_modality(self): - -# for m in self._modalities: -# with self.subTest(m=m): -# LegacyConverterClass = getattr( -# sop, -# "LegacyConvertedEnhanced{}Image".format(m) -# ) -# ref_dataset_seq = getattr( -# self, "_ref_dataset_seq_{}".format(m)) -# tmp_orig_modality = ref_dataset_seq[0].Modality -# ref_dataset_seq[0].Modality = '' -# with self.assertRaises(ValueError): -# LegacyConverterClass( -# legacy_datasets=ref_dataset_seq, -# series_instance_uid=self._output_series_instance_uid, -# series_number=self._output_instance_number, -# sop_instance_uid=self._output_sop_instance_uid, -# instance_number=self._output_instance_number) -# ref_dataset_seq[0].Modality = tmp_orig_modality - -# def test_wrong_sop_class_uid(self): -# for m in self._modalities: -# with self.subTest(m=m): -# LegacyConverterClass = getattr( -# sop, -# "LegacyConvertedEnhanced{}Image".format(m) -# ) -# ref_dataset_seq = getattr( -# self, "_ref_dataset_seq_{}".format(m)) -# tmp_orig_sop_class_id = ref_dataset_seq[0].SOPClassUID -# ref_dataset_seq[0].SOPClassUID = '1.2.3.4.5.6.7.8.9' -# with self.assertRaises(ValueError): -# LegacyConverterClass( -# legacy_datasets=ref_dataset_seq, -# series_instance_uid=self._output_series_instance_uid, -# series_number=self._output_instance_number, -# sop_instance_uid=self._output_sop_instance_uid, -# instance_number=self._output_instance_number) -# ref_dataset_seq[0].SOPClassUID = tmp_orig_sop_class_id - -# def test_mixed_studies(self): -# for m in self._modalities: -# with self.subTest(m=m): -# LegacyConverterClass = getattr( -# sop, -# "LegacyConvertedEnhanced{}Image".format(m) -# ) -# ref_dataset_seq = getattr( -# self, "_ref_dataset_seq_{}".format(m)) -# # first run with intact input - -# LegacyConverterClass( -# legacy_datasets=ref_dataset_seq, -# series_instance_uid=self._output_series_instance_uid, -# series_number=self._output_instance_number, -# sop_instance_uid=self._output_sop_instance_uid, -# instance_number=self._output_instance_number) -# # second run with defected input -# tmp_orig_study_instance_uid = ref_dataset_seq[ -# 0].StudyInstanceUID -# ref_dataset_seq[0].StudyInstanceUID = '1.2.3.4.5.6.7.8.9' -# with self.assertRaises(ValueError): -# LegacyConverterClass( -# legacy_datasets=ref_dataset_seq, -# series_instance_uid=self._output_series_instance_uid, -# series_number=self._output_instance_number, -# sop_instance_uid=self._output_sop_instance_uid, -# instance_number=self._output_instance_number) -# ref_dataset_seq[ -# 0].StudyInstanceUID = tmp_orig_study_instance_uid - -# def test_mixed_series(self): -# for m in self._modalities: -# with self.subTest(m=m): -# LegacyConverterClass = getattr( -# sop, -# "LegacyConvertedEnhanced{}Image".format(m) -# ) -# ref_dataset_seq = getattr( -# self, "_ref_dataset_seq_{}".format(m)) -# # first run with intact input -# LegacyConverterClass( -# legacy_datasets=ref_dataset_seq, -# series_instance_uid=self._output_series_instance_uid, -# series_number=self._output_instance_number, -# sop_instance_uid=self._output_sop_instance_uid, -# instance_number=self._output_instance_number) -# # second run with defected input -# tmp_series_instance_uid = ref_dataset_seq[0].SeriesInstanceUID -# ref_dataset_seq[0].SeriesInstanceUID = '1.2.3.4.5.6.7.8.9' -# with self.assertRaises(ValueError): -# LegacyConverterClass( -# legacy_datasets=ref_dataset_seq, -# series_instance_uid=self._output_series_instance_uid, -# series_number=self._output_instance_number, -# sop_instance_uid=self._output_sop_instance_uid, -# instance_number=self._output_instance_number) -# ref_dataset_seq[0].SeriesInstanceUID = tmp_series_instance_uid - -# def test_mixed_transfer_syntax(self): -# for m in self._modalities: -# with self.subTest(m=m): -# LegacyConverterClass = getattr( -# sop, -# "LegacyConvertedEnhanced{}Image".format(m) -# ) -# ref_dataset_seq = getattr( -# self, "_ref_dataset_seq_{}".format(m)) -# # first run with intact input -# LegacyConverterClass( -# legacy_datasets=ref_dataset_seq, -# series_instance_uid=self._output_series_instance_uid, -# series_number=self._output_instance_number, -# sop_instance_uid=self._output_sop_instance_uid, -# instance_number=self._output_instance_number) -# # second run with defected input -# tmp_transfer_syntax_uid = ref_dataset_seq[ -# 0].file_meta.TransferSyntaxUID -# ref_dataset_seq[ -# 0].file_meta.TransferSyntaxUID = '1.2.3.4.5.6.7.8.9' -# with self.assertRaises(ValueError): -# LegacyConverterClass( -# legacy_datasets=ref_dataset_seq, -# series_instance_uid=self._output_series_instance_uid, -# series_number=self._output_instance_number, -# sop_instance_uid=self._output_sop_instance_uid, -# instance_number=self._output_instance_number) -# ref_dataset_seq[ -# 0].file_meta.TransferSyntaxUID = tmp_transfer_syntax_uid - -# def generate_common_dicom_dataset_series(self, slice_count: int, -# system: Modality) -> list: -# output_dataset = [] -# slice_pos = 0 -# slice_thickness = 0 -# study_uid = generate_uid() -# series_uid = generate_uid() -# frame_of_ref_uid = generate_uid() -# date_ = datetime.now().date() -# age = timedelta(days=45 * 365) -# time_ = datetime.now().time() -# cols = 2 -# rows = 2 -# bytes_per_voxel = 2 - -# for i in range(0, slice_count): -# file_meta = Dataset() -# pixel_array = b"\0" * cols * rows * bytes_per_voxel -# file_meta.MediaStorageSOPClassUID = sop_classes[system][1] -# file_meta.MediaStorageSOPInstanceUID = generate_uid() -# file_meta.ImplementationClassUID = generate_uid() - -# tmp_dataset = FileDataset('', {}, file_meta=file_meta, -# preamble=pixel_array) -# tmp_dataset.file_meta.TransferSyntaxUID = "1.2.840.10008.1.2.1" -# tmp_dataset.SliceLocation = slice_pos + i * slice_thickness -# tmp_dataset.SliceThickness = slice_thickness -# tmp_dataset.WindowCenter = 1 -# tmp_dataset.WindowWidth = 2 -# tmp_dataset.AcquisitionNumber = 1 -# tmp_dataset.InstanceNumber = i -# tmp_dataset.SeriesNumber = 1 -# tmp_dataset.ImageOrientationPatient =\ -# [1.000000, 0.000000, 0.000000, -# 0.000000, 1.000000, 0.000000] -# tmp_dataset.ImagePositionPatient = [0.0, 0.0, -# tmp_dataset.SliceLocation] -# tmp_dataset.ImageType = ['ORIGINAL', 'PRIMARY', 'AXIAL'] -# tmp_dataset.PixelSpacing = [1, 1] -# tmp_dataset.PatientName = 'John Doe' -# tmp_dataset.FrameOfReferenceUID = frame_of_ref_uid -# tmp_dataset.SOPClassUID = sop_classes[system][1] -# tmp_dataset.SOPInstanceUID = generate_uid() -# tmp_dataset.SeriesInstanceUID = series_uid -# tmp_dataset.StudyInstanceUID = study_uid -# tmp_dataset.BitsAllocated = bytes_per_voxel * 8 -# tmp_dataset.BitsStored = bytes_per_voxel * 8 -# tmp_dataset.HighBit = (bytes_per_voxel * 8 - 1) -# tmp_dataset.PixelRepresentation = 1 -# tmp_dataset.Columns = cols -# tmp_dataset.Rows = rows -# tmp_dataset.SamplesPerPixel = 1 -# tmp_dataset.AccessionNumber = '2' -# tmp_dataset.AcquisitionDate = date_ -# tmp_dataset.AcquisitionTime = datetime.now().time() -# tmp_dataset.AdditionalPatientHistory = 'UTERINE CA PRE-OP EVAL' -# tmp_dataset.ContentDate = date_ -# tmp_dataset.ContentTime = datetime.now().time() -# tmp_dataset.Manufacturer = 'Mnufacturer' -# tmp_dataset.ManufacturerModelName = 'Model' -# tmp_dataset.Modality = sop_classes[system][0] -# tmp_dataset.PatientAge = '064Y' -# tmp_dataset.PatientBirthDate = date_ - age -# tmp_dataset.PatientID = 'ID0001' -# tmp_dataset.PatientIdentityRemoved = 'YES' -# tmp_dataset.PatientPosition = 'FFS' -# tmp_dataset.PatientSex = 'F' -# tmp_dataset.PhotometricInterpretation = 'MONOCHROME2' -# tmp_dataset.PixelData = pixel_array -# tmp_dataset.PositionReferenceIndicator = 'XY' -# tmp_dataset.ProtocolName = 'some protocole' -# tmp_dataset.ReferringPhysicianName = '' -# tmp_dataset.SeriesDate = date_ -# tmp_dataset.SeriesDescription = 'test series ' -# tmp_dataset.SeriesTime = time_ -# tmp_dataset.SoftwareVersions = '01' -# tmp_dataset.SpecificCharacterSet = 'ISO_IR 100' -# tmp_dataset.StudyDate = date_ -# tmp_dataset.StudyDescription = 'test study' -# tmp_dataset.StudyID = '' -# if (system == Modality.CT): -# tmp_dataset.RescaleIntercept = 0 -# tmp_dataset.RescaleSlope = 1 -# tmp_dataset.StudyTime = time_ -# output_dataset.append(tmp_dataset) -# return output_dataset + for j, m in enumerate(self._modalities): + with self.subTest(m=m): + LegacyConverterClass = getattr( + sop, + "LegacyConvertedEnhanced{}Image".format(m) + ) + data_generator = DicomGenerator(i) + data = data_generator.generate_mixed_framesets( + Modality(j), 1, True, True) + fset_collection = sop.FrameSetCollection(data) + assert len(fset_collection.frame_sets) == 1 + assert len(fset_collection.frame_sets[0].frames) == i + convertor = LegacyConverterClass( + fset_collection.frame_sets[0], + generate_uid(), + 555, + generate_uid(), + 111) + assert convertor.NumberOfFrames == i + assert convertor.SOPClassUID == \ + sop.LEGACY_ENHANCED_SOP_CLASS_UID_MAP[sop_classes[j][1]] + + def test_output_attributes(self) -> None: + for m in self._modalities: + with self.subTest(m=m): + LegacyConverterClass = getattr( + sop, + "LegacyConvertedEnhanced{}Image".format(m) + ) + ref_dataset_seq = getattr( + self, "_ref_dataset_seq_{}".format(m)) + fset_collection = sop.FrameSetCollection(ref_dataset_seq) + multiframe_item = LegacyConverterClass( + fset_collection.frame_sets[0], + series_instance_uid=self._output_series_instance_uid, + series_number=self._output_instance_number, + sop_instance_uid=self._output_sop_instance_uid, + instance_number=self._output_instance_number) + assert multiframe_item.SeriesInstanceUID == \ + self._output_series_instance_uid + assert multiframe_item.SOPInstanceUID == \ + self._output_sop_instance_uid + assert int(multiframe_item.SeriesNumber) == int( + self._output_series_number) + assert int(multiframe_item.InstanceNumber) == int( + self._output_instance_number) + + # def test_empty_dataset(self): + # for m in self._modalities: + # with self.subTest(m=m): + # LegacyConverterClass = getattr( + # sop, + # "LegacyConvertedEnhanced{}Image".format(m) + # ) + # with self.assertRaises(ValueError): + # LegacyConverterClass( + # FrameSet(), + # series_instance_uid=self._output_series_instance_uid, + # series_number=self._output_instance_number, + # sop_instance_uid=self._output_sop_instance_uid, + # instance_number=self._output_instance_number) + + def test_wrong_modality(self) -> None: + + for j, m in enumerate(self._modalities): + with self.subTest(m=m): + LegacyConverterClass = getattr( + sop, + "LegacyConvertedEnhanced{}Image".format(m) + ) + next_idx = (j + 1) % len(self._modalities) + ref_dataset_seq = getattr( + self, "_ref_dataset_seq_{}".format( + self._modalities[next_idx])) + fset_collection = sop.FrameSetCollection(ref_dataset_seq) + with self.assertRaises(ValueError): + LegacyConverterClass( + fset_collection.frame_sets[0], + series_instance_uid=self._output_series_instance_uid, + series_number=self._output_instance_number, + sop_instance_uid=self._output_sop_instance_uid, + instance_number=self._output_instance_number) + + def test_wrong_sop_class_uid(self) -> None: + for m in self._modalities: + with self.subTest(m=m): + LegacyConverterClass = getattr( + sop, + "LegacyConvertedEnhanced{}Image".format(m) + ) + ref_dataset_seq = getattr( + self, "_ref_dataset_seq_{}".format(m)) + tmp_orig_sop_class_id = ref_dataset_seq[0].SOPClassUID + for ddss in ref_dataset_seq: + ddss.SOPClassUID = '1.2.3.4.5.6.7.8.9' + fset_collection = sop.FrameSetCollection(ref_dataset_seq) + with self.assertRaises(ValueError): + LegacyConverterClass( + fset_collection.frame_sets[0], + series_instance_uid=self._output_series_instance_uid, + series_number=self._output_instance_number, + sop_instance_uid=self._output_sop_instance_uid, + instance_number=self._output_instance_number) + for ddss in ref_dataset_seq: + ddss.SOPClassUID = tmp_orig_sop_class_id + + # def test_mixed_studies(self): + # for m in self._modalities: + # with self.subTest(m=m): + # LegacyConverterClass = getattr( + # sop, + # "LegacyConvertedEnhanced{}Image".format(m) + # ) + # ref_dataset_seq = getattr( + # self, "_ref_dataset_seq_{}".format(m)) + # # first run with intact input + + # LegacyConverterClass( + # legacy_datasets=ref_dataset_seq, + # series_instance_uid=self._output_series_instance_uid, + # series_number=self._output_instance_number, + # sop_instance_uid=self._output_sop_instance_uid, + # instance_number=self._output_instance_number) + # # second run with defected input + # tmp_orig_study_instance_uid = ref_dataset_seq[ + # 0].StudyInstanceUID + # ref_dataset_seq[0].StudyInstanceUID = '1.2.3.4.5.6.7.8.9' + # with self.assertRaises(ValueError): + # LegacyConverterClass( + # legacy_datasets=ref_dataset_seq, + # series_instance_uid=self._output_series_instance_uid, + # series_number=self._output_instance_number, + # sop_instance_uid=self._output_sop_instance_uid, + # instance_number=self._output_instance_number) + # ref_dataset_seq[ + # 0].StudyInstanceUID = tmp_orig_study_instance_uid + + # def test_mixed_series(self): + # for m in self._modalities: + # with self.subTest(m=m): + # LegacyConverterClass = getattr( + # sop, + # "LegacyConvertedEnhanced{}Image".format(m) + # ) + # ref_dataset_seq = getattr( + # self, "_ref_dataset_seq_{}".format(m)) + # # first run with intact input + # LegacyConverterClass( + # legacy_datasets=ref_dataset_seq, + # series_instance_uid=self._output_series_instance_uid, + # series_number=self._output_instance_number, + # sop_instance_uid=self._output_sop_instance_uid, + # instance_number=self._output_instance_number) + # # second run with defected input + # tmp_series_instance_uid = ref_dataset_seq[0].SeriesInstanceUID + # ref_dataset_seq[0].SeriesInstanceUID = '1.2.3.4.5.6.7.8.9' + # with self.assertRaises(ValueError): + # LegacyConverterClass( + # legacy_datasets=ref_dataset_seq, + # series_instance_uid=self._output_series_instance_uid, + # series_number=self._output_instance_number, + # sop_instance_uid=self._output_sop_instance_uid, + # instance_number=self._output_instance_number) + # ref_dataset_seq[0].SeriesInstanceUID = tmp_series_instance_uid + + # def test_mixed_transfer_syntax(self): + # for m in self._modalities: + # with self.subTest(m=m): + # LegacyConverterClass = getattr( + # sop, + # "LegacyConvertedEnhanced{}Image".format(m) + # ) + # ref_dataset_seq = getattr( + # self, "_ref_dataset_seq_{}".format(m)) + # # first run with intact input + # LegacyConverterClass( + # legacy_datasets=ref_dataset_seq, + # series_instance_uid=self._output_series_instance_uid, + # series_number=self._output_instance_number, + # sop_instance_uid=self._output_sop_instance_uid, + # instance_number=self._output_instance_number) + # # second run with defected input + # tmp_transfer_syntax_uid = ref_dataset_seq[ + # 0].file_meta.TransferSyntaxUID + # ref_dataset_seq[ + # 0].file_meta.TransferSyntaxUID = '1.2.3.4.5.6.7.8.9' + # with self.assertRaises(ValueError): + # LegacyConverterClass( + # legacy_datasets=ref_dataset_seq, + # series_instance_uid=self._output_series_instance_uid, + # series_number=self._output_instance_number, + # sop_instance_uid=self._output_sop_instance_uid, + # instance_number=self._output_instance_number) + # ref_dataset_seq[ + # 0].file_meta.TransferSyntaxUID = tmp_transfer_syntax_uid From a6551fb50fbc2bce8f94b80961a563288b26fb0b Mon Sep 17 00:00:00 2001 From: afshin Date: Sun, 11 Apr 2021 18:18:04 -0400 Subject: [PATCH 34/44] Modified code based on Markus's comments --- src/highdicom/legacy/sop.py | 82 +++++-------------------------------- 1 file changed, 11 insertions(+), 71 deletions(-) diff --git a/src/highdicom/legacy/sop.py b/src/highdicom/legacy/sop.py index 10936905..22bd8f33 100644 --- a/src/highdicom/legacy/sop.py +++ b/src/highdicom/legacy/sop.py @@ -16,7 +16,11 @@ from highdicom.base import SOPClass from highdicom._iods import IOD_MODULE_MAP from highdicom._modules import MODULE_ATTRIBUTE_MAP -# logger = logging.getLogger(__name__) + + +logger = logging.getLogger(__name__) + + LEGACY_ENHANCED_SOP_CLASS_UID_MAP = { # CT Image Storage '1.2.840.10008.5.1.4.1.1.2': '1.2.840.10008.5.1.4.1.1.2.2', @@ -25,6 +29,8 @@ # PET Image Storage '1.2.840.10008.5.1.4.1.1.128': '1.2.840.10008.5.1.4.1.1.128.1', } + + _SOP_CLASS_UID_IOD_KEY_MAP = { '1.2.840.10008.5.1.4.1.1.2.2': 'legacy-converted-enhanced-ct-image', '1.2.840.10008.5.1.4.1.1.4.4': 'legacy-converted-enhanced-mr-image', @@ -37,94 +43,28 @@ class DicomHelper: """A class for checking dicom tags and comparing dicom attributes""" def __init__(self) -> None: - """ - Parameters - ---------- - - Returns - ------- - - Note - ---- - - """ pass @classmethod def istag_file_meta_information_group(cls, t: BaseTag) -> bool: - """ - Parameters - ---------- - - Returns - ------- - - Note - ---- - - """ return t.group == 0x0002 @classmethod def istag_repeating_group(cls, t: BaseTag) -> bool: - """ - Parameters - ---------- - - Returns - ------- - - Note - ---- - - """ g = t.group return (g >= 0x5000 and g <= 0x501e) or\ (g >= 0x6000 and g <= 0x601e) @classmethod def istag_group_length(cls, t: BaseTag) -> bool: - """ - Parameters - ---------- - - Returns - ------- - - Note - ---- - - """ return t.element == 0 @classmethod def isequal(cls, v1: Any, v2: Any) -> bool: - """ - Parameters - ---------- - - Returns - ------- - - Note - ---- - - """ from pydicom.valuerep import DSfloat float_tolerance = 1.0e-5 def is_equal_float(x1: float, x2: float) -> bool: - """ - Parameters - ---------- - - Returns - ------- - - Note - ---- - - """ return abs(x1 - x2) < float_tolerance if type(v1) != type(v2): return False @@ -2456,7 +2396,7 @@ def _add_build_blocks_for_ct(self) -> None: self._add_common_ct_pet_mr_build_blocks() self._add_ct_specific_build_blocks() - def convert2mf(self) -> None: + def _convert2multiframe(self) -> None: """Runs all necessary methods to conver from single frame to multi-frame. @@ -2532,7 +2472,7 @@ def __init__( sort_key=sort_key ) self._add_build_blocks_for_ct() - self.convert2mf() + self._convert2multiframe() class LegacyConvertedEnhancedPETImage(_CommonLegacyConvertedEnhanceImage): @@ -2585,7 +2525,7 @@ def __init__( sort_key=sort_key ) self._add_build_blocks_for_pet() - self.convert2mf() + self._convert2multiframe() class LegacyConvertedEnhancedMRImage(_CommonLegacyConvertedEnhanceImage): @@ -2649,4 +2589,4 @@ def __init__( sort_key=sort_key ) self._add_build_blocks_for_mr() - self.convert2mf() + self._convert2multiframe() From c417da5373debdeb54fdfbeb71620e26bc8ad712 Mon Sep 17 00:00:00 2001 From: afshin Date: Mon, 12 Apr 2021 09:40:44 -0400 Subject: [PATCH 35/44] finished applying Markus's comments --- src/highdicom/legacy/sop.py | 116 ++++++++++++++++++++---------------- tests/test_legacy.py | 101 +++++++++++++++---------------- 2 files changed, 112 insertions(+), 105 deletions(-) diff --git a/src/highdicom/legacy/sop.py b/src/highdicom/legacy/sop.py index 22bd8f33..9af832e3 100644 --- a/src/highdicom/legacy/sop.py +++ b/src/highdicom/legacy/sop.py @@ -1,6 +1,6 @@ """ Module for SOP Classes of Legacy Converted Enhanced Image IODs.""" import logging -from typing import Any, List, Union, Callable +from typing import Any, List, Union, Callable, Sequence from numpy import log10, array, ceil, cross, dot, ndarray from pydicom.datadict import tag_for_keyword, dictionary_VR, keyword_for_tag from pydicom.dataset import Dataset @@ -38,13 +38,10 @@ } -class DicomHelper: +class Ù€DicomHelper: """A class for checking dicom tags and comparing dicom attributes""" - def __init__(self) -> None: - pass - @classmethod def istag_file_meta_information_group(cls, t: BaseTag) -> bool: return t.group == 0x0002 @@ -186,7 +183,6 @@ def are_parallel( """Returns False if two slices are not prallel else True """ - logger = logging.getLogger(__name__) if (not isinstance(slice1, GeometryOfSlice) or not isinstance(slice2, GeometryOfSlice)): logger.warning( @@ -203,7 +199,7 @@ def are_parallel( return True -class PerframeFunctionalGroup(DataElementSequence): +class Ù€PerframeFunctionalGroup(DataElementSequence): """A sequence class for perframe functional group""" @@ -221,7 +217,7 @@ def __init__(self, number_of_frames: int) -> None: self.append(item) -class SharedFunctionalGroup(DataElementSequence): +class Ù€SharedFunctionalGroup(DataElementSequence): """A sequence class for shared functional group""" @@ -317,15 +313,14 @@ def _find_per_frame_and_shared_tags(self) -> None: """Detects and collects all shared and perframe attributes """ - # logger = logging.getLogger(__name__) rough_shared: dict = {} sfs = self.frames for ds in sfs: for ttag, elem in ds.items(): if (not ttag.is_private and not - DicomHelper.istag_file_meta_information_group(ttag) and not - DicomHelper.istag_repeating_group(ttag) and not - DicomHelper.istag_group_length(ttag) and not + Ù€DicomHelper.istag_file_meta_information_group(ttag) and not + Ù€DicomHelper.istag_repeating_group(ttag) and not + Ù€DicomHelper.istag_group_length(ttag) and not self._istag_excluded_from_perframe(ttag) and ttag != tag_for_keyword('PixelData')): elem = ds[ttag] @@ -343,7 +338,7 @@ def _find_per_frame_and_shared_tags(self) -> None: else: all_values_are_equal = True for v_i in v: - if not DicomHelper.isequal(v_i, v[0]): + if not Ù€DicomHelper.isequal(v_i, v[0]): all_values_are_equal = False break if not all_values_are_equal: @@ -366,19 +361,32 @@ class FrameSetCollection: """A calss to extract framesets based on distinguishing dicom attributes""" - def __init__(self, single_frame_list: list) -> None: - """ + def __init__(self, single_frame_list: Sequence[Any]) -> None: + """Forms framesets based on a list of distinguishing attributes. + The list of "distinguishing" attributes that are used to determine + commonality is currently fixed, and includes the unique identifying + attributes at the Patient, Study, Equipment levels, the Modality and + SOP Class, and ImageType as well as the characteristics of the Pixel + Data, and those attributes that for cross-sectional images imply + consistent sampling, such as ImageOrientationPatient, PixelSpacing and + SliceThickness, and in addition AcquisitionContextSequence and + BurnedInAnnotation. Parameters ---------- single_frame_list: list lisf of mixed or non-mixed single frame dicom images + Note + ----- + Note that Series identification, specifically SeriesInstanceUID is NOT + a distinguishing attribute; i.e. FrameSets may span Series """ - logger = logging.getLogger(__name__) self.mixed_frames = single_frame_list self.mixed_frames_copy = self.mixed_frames[:] self._distinguishing_attribute_keywords = [ 'PatientID', 'PatientName', + 'StudyInstanceUID', + 'FrameOfReferenceUID', 'Manufacturer', 'InstitutionName', 'InstitutionAddress', @@ -422,7 +430,7 @@ def __init__(self, single_frame_list: list) -> None: logger.debug('\t Distinguishing tags:') for dg_i, dg_tg in enumerate(x[1], 1): logger.debug('\t\t{:02d}/{})\t{}-{:32.32s} = {:32.32s}'.format( - dg_i, len(x[1]), DicomHelper.tag2str(dg_tg), + dg_i, len(x[1]), Ù€DicomHelper.tag2str(dg_tg), keyword_for_tag(dg_tg), str(x[0][0][dg_tg].value))) logger.debug('\t dicom datasets in this frame set:') @@ -453,7 +461,6 @@ def _find_all_similar_to_first_datasets(self) -> tuple: """Takes the fist instance from mixed-frames and finds all dicom images that have the same distinguishing attributes. """ - logger = logging.getLogger(__name__) similar_ds: list = [self.mixed_frames_copy[0]] distinguishing_tags_existing = [] distinguishing_tags_missing = [] @@ -471,7 +478,7 @@ def _find_all_similar_to_first_datasets(self) -> tuple: if tg in ds: logger_msg.add( '{} is missing in all but {}'.format( - DicomHelper.tag2kwstr(tg), ds['SOPInstanceUID'])) + Ù€DicomHelper.tag2kwstr(tg), ds['SOPInstanceUID'])) all_equal = False break if not all_equal: @@ -482,11 +489,11 @@ def _find_all_similar_to_first_datasets(self) -> tuple: all_equal = False break new_val = ds[tg].value - if not DicomHelper.isequal(ref_val, new_val): + if not Ù€DicomHelper.isequal(ref_val, new_val): logger_msg.add( 'Inequality on distinguishing ' 'attribute{} -> {} != {} \n series uid = {}'.format( - DicomHelper.tag2kwstr(tg), ref_val, new_val, + Ù€DicomHelper.tag2kwstr(tg), ref_val, new_val, ds.SeriesInstanceUID)) all_equal = False break @@ -496,7 +503,8 @@ def _find_all_similar_to_first_datasets(self) -> tuple: logger.info(msg_) for ds in similar_ds: if ds in self.mixed_frames_copy: - self.mixed_frames_copy.remove(ds) + self.mixed_frames_copy = [ + nds for nds in self.mixed_frames_copy if nds != ds] return (similar_ds, distinguishing_tags_existing) @property @@ -516,7 +524,7 @@ class _CommonLegacyConvertedEnhanceImage(SOPClass): def __init__( self, - frame_set: FrameSet, + legacy_datasets: Sequence[Dataset], series_instance_uid: str, series_number: int, sop_instance_uid: str, @@ -538,17 +546,21 @@ def __init__( instance_number: int Number that should be assigned to the instance """ - legacy_datasets = frame_set.frames try: ref_ds = legacy_datasets[0] except IndexError: raise ValueError('No DICOM data sets of provided.') sop_class_uid = LEGACY_ENHANCED_SOP_CLASS_UID_MAP[ref_ds.SOPClassUID] + all_framesets = FrameSetCollection(legacy_datasets) + if len(all_framesets.frame_sets) > 1: + raise ValueError( + 'Mixed frames sets: the input single frame list contain more ' + 'than one multiframe collection') + frame_set = all_framesets.frame_sets[0] if sort_key is None: sort_key = _CommonLegacyConvertedEnhanceImage.default_sort_key super().__init__( - study_instance_uid="" if 'StudyInstanceUID' not in ref_ds - else ref_ds.StudyInstanceUID, + study_instance_uid=ref_ds.StudyInstanceUID, series_instance_uid=series_instance_uid, series_number=series_number, sop_instance_uid=sop_instance_uid, @@ -578,11 +590,11 @@ def __init__( ref_ds else ref_ds.ReferringPhysicianName, ) self._legacy_datasets = legacy_datasets - self._perframe_functional_group = PerframeFunctionalGroup( + self._perframe_functional_group = Ù€PerframeFunctionalGroup( len(legacy_datasets)) tg = tag_for_keyword('PerFrameFunctionalGroupsSequence') self[tg] = DataElement(tg, 'SQ', self._perframe_functional_group) - self._shared_functional_group = SharedFunctionalGroup() + self._shared_functional_group = Ù€SharedFunctionalGroup() tg = tag_for_keyword('SharedFunctionalGroupsSequence') self[tg] = DataElement(tg, 'SQ', self._shared_functional_group) self.distinguishing_attributes_tags = self._get_tag_used_dictionary( @@ -1836,22 +1848,27 @@ def _build_slices_geometry_frame_content(self) -> None: """Instantiates an object of GeometryOfSlice for each sice. """ - logger = logging.getLogger(__name__) frame_count = len(self._legacy_datasets) for i in range(0, frame_count): curr_frame = self._legacy_datasets[i] - image_position_patient_v = None \ - if 'ImagePositionPatient' not in curr_frame\ - else curr_frame['ImagePositionPatient'].value - image_orientation_patient_v = None \ - if 'ImageOrientationPatient' not in curr_frame\ - else curr_frame['ImageOrientationPatient'].value - pixel_spacing_v = None \ - if 'PixelSpacing' not in curr_frame\ - else curr_frame['PixelSpacing'].value - slice_thickness_v = 0.0 \ - if 'SliceThickness' not in curr_frame\ - else curr_frame['SliceThickness'].value + if 'ImagePositionPatient' not in curr_frame: + image_position_patient_v = None + else: + image_position_patient_v =\ + curr_frame['ImagePositionPatient'].value + if 'ImageOrientationPatient' not in curr_frame: + image_orientation_patient_v = None + else: + image_orientation_patient_v =\ + curr_frame['ImageOrientationPatient'].value + if 'PixelSpacing' not in curr_frame: + pixel_spacing_v = None + else: + pixel_spacing_v = curr_frame['PixelSpacing'].value + if 'SliceThickness' not in curr_frame: + slice_thickness_v = 0.0 + else: + slice_thickness_v = curr_frame['SliceThickness'].value # slice_location_v = None \ # if 'SliceLocation' not in curr_frame\ # else curr_frame['SliceLocation'].value @@ -1908,7 +1925,6 @@ def _add_stack_info_frame_content(self) -> None: """Adds stack information to the FrameContentSequence dicom attribute. """ - logger = logging.getLogger(__name__) self._build_slices_geometry_frame_content() round_digits = int(ceil(-log10(self._tolerance))) source_series_uid = '' @@ -2401,7 +2417,6 @@ def _convert2multiframe(self) -> None: multi-frame. """ - logger = logging.getLogger(__name__) logger.debug('Strt singleframe to multiframe conversion') for fun, args in self.__build_blocks: if not args: @@ -2417,7 +2432,7 @@ class LegacyConvertedEnhancedCTImage(_CommonLegacyConvertedEnhanceImage): def __init__( self, - frame_set: FrameSet, + legacy_datasets: Sequence[Dataset], series_instance_uid: str, series_number: int, sop_instance_uid: str, @@ -2450,7 +2465,6 @@ def __init__( instance_number: int Number that should be assigned to the instance """ - legacy_datasets = frame_set.frames try: ref_ds = legacy_datasets[0] except IndexError: @@ -2464,7 +2478,7 @@ def __init__( 'Wrong SOP class for conversion of legacy CT images.' ) super().__init__( - frame_set, + legacy_datasets, series_instance_uid=series_instance_uid, series_number=series_number, sop_instance_uid=sop_instance_uid, @@ -2481,7 +2495,7 @@ class LegacyConvertedEnhancedPETImage(_CommonLegacyConvertedEnhanceImage): def __init__( self, - frame_set: FrameSet, + legacy_datasets: Sequence[Dataset], series_instance_uid: str, series_number: int, sop_instance_uid: str, @@ -2503,7 +2517,6 @@ def __init__( instance_number: int Number that should be assigned to the instance """ - legacy_datasets = frame_set.frames try: ref_ds = legacy_datasets[0] except IndexError: @@ -2517,7 +2530,7 @@ def __init__( 'Wrong SOP class for conversion of legacy PET images.' ) super().__init__( - frame_set, + legacy_datasets, series_instance_uid=series_instance_uid, series_number=series_number, sop_instance_uid=sop_instance_uid, @@ -2534,7 +2547,7 @@ class LegacyConvertedEnhancedMRImage(_CommonLegacyConvertedEnhanceImage): def __init__( self, - frame_set: FrameSet, + legacy_datasets: Sequence[Dataset], series_instance_uid: str, series_number: int, sop_instance_uid: str, @@ -2567,7 +2580,6 @@ def __init__( instance_number: int Number that should be assigned to the instance """ - legacy_datasets = frame_set.frames try: ref_ds = legacy_datasets[0] except IndexError: @@ -2581,7 +2593,7 @@ def __init__( 'Wrong SOP class for conversion of legacy MR images.' ) super().__init__( - frame_set, + legacy_datasets, series_instance_uid=series_instance_uid, series_number=series_number, sop_instance_uid=sop_instance_uid, diff --git a/tests/test_legacy.py b/tests/test_legacy.py index 8397a6bc..aec14d3a 100644 --- a/tests/test_legacy.py +++ b/tests/test_legacy.py @@ -312,8 +312,8 @@ def setUp(self) -> None: def test_attribute_equality(self) -> None: for vr, [v1, v2, v3] in self.data.items(): - assert sop.DicomHelper.isequal(v1.value, v2.value) is True - assert sop.DicomHelper.isequal(v1.value, v3.value) is False + assert sop.Ù€DicomHelper.isequal(v1.value, v2.value) is True + assert sop.Ù€DicomHelper.isequal(v1.value, v3.value) is False class TestFrameSetCollection(unittest.TestCase): @@ -371,7 +371,7 @@ def test_conversion(self) -> None: assert len(fset_collection.frame_sets) == 1 assert len(fset_collection.frame_sets[0].frames) == i convertor = LegacyConverterClass( - fset_collection.frame_sets[0], + data, generate_uid(), 555, generate_uid(), @@ -389,9 +389,8 @@ def test_output_attributes(self) -> None: ) ref_dataset_seq = getattr( self, "_ref_dataset_seq_{}".format(m)) - fset_collection = sop.FrameSetCollection(ref_dataset_seq) multiframe_item = LegacyConverterClass( - fset_collection.frame_sets[0], + ref_dataset_seq, series_instance_uid=self._output_series_instance_uid, series_number=self._output_instance_number, sop_instance_uid=self._output_sop_instance_uid, @@ -405,20 +404,20 @@ def test_output_attributes(self) -> None: assert int(multiframe_item.InstanceNumber) == int( self._output_instance_number) - # def test_empty_dataset(self): - # for m in self._modalities: - # with self.subTest(m=m): - # LegacyConverterClass = getattr( - # sop, - # "LegacyConvertedEnhanced{}Image".format(m) - # ) - # with self.assertRaises(ValueError): - # LegacyConverterClass( - # FrameSet(), - # series_instance_uid=self._output_series_instance_uid, - # series_number=self._output_instance_number, - # sop_instance_uid=self._output_sop_instance_uid, - # instance_number=self._output_instance_number) + def test_empty_dataset(self) -> None: + for m in self._modalities: + with self.subTest(m=m): + LegacyConverterClass = getattr( + sop, + "LegacyConvertedEnhanced{}Image".format(m) + ) + with self.assertRaises(ValueError): + LegacyConverterClass( + [], + series_instance_uid=self._output_series_instance_uid, + series_number=self._output_instance_number, + sop_instance_uid=self._output_sop_instance_uid, + instance_number=self._output_instance_number) def test_wrong_modality(self) -> None: @@ -432,10 +431,9 @@ def test_wrong_modality(self) -> None: ref_dataset_seq = getattr( self, "_ref_dataset_seq_{}".format( self._modalities[next_idx])) - fset_collection = sop.FrameSetCollection(ref_dataset_seq) with self.assertRaises(ValueError): LegacyConverterClass( - fset_collection.frame_sets[0], + ref_dataset_seq, series_instance_uid=self._output_series_instance_uid, series_number=self._output_instance_number, sop_instance_uid=self._output_sop_instance_uid, @@ -453,10 +451,9 @@ def test_wrong_sop_class_uid(self) -> None: tmp_orig_sop_class_id = ref_dataset_seq[0].SOPClassUID for ddss in ref_dataset_seq: ddss.SOPClassUID = '1.2.3.4.5.6.7.8.9' - fset_collection = sop.FrameSetCollection(ref_dataset_seq) with self.assertRaises(ValueError): LegacyConverterClass( - fset_collection.frame_sets[0], + ref_dataset_seq, series_instance_uid=self._output_series_instance_uid, series_number=self._output_instance_number, sop_instance_uid=self._output_sop_instance_uid, @@ -464,36 +461,34 @@ def test_wrong_sop_class_uid(self) -> None: for ddss in ref_dataset_seq: ddss.SOPClassUID = tmp_orig_sop_class_id - # def test_mixed_studies(self): - # for m in self._modalities: - # with self.subTest(m=m): - # LegacyConverterClass = getattr( - # sop, - # "LegacyConvertedEnhanced{}Image".format(m) - # ) - # ref_dataset_seq = getattr( - # self, "_ref_dataset_seq_{}".format(m)) - # # first run with intact input - - # LegacyConverterClass( - # legacy_datasets=ref_dataset_seq, - # series_instance_uid=self._output_series_instance_uid, - # series_number=self._output_instance_number, - # sop_instance_uid=self._output_sop_instance_uid, - # instance_number=self._output_instance_number) - # # second run with defected input - # tmp_orig_study_instance_uid = ref_dataset_seq[ - # 0].StudyInstanceUID - # ref_dataset_seq[0].StudyInstanceUID = '1.2.3.4.5.6.7.8.9' - # with self.assertRaises(ValueError): - # LegacyConverterClass( - # legacy_datasets=ref_dataset_seq, - # series_instance_uid=self._output_series_instance_uid, - # series_number=self._output_instance_number, - # sop_instance_uid=self._output_sop_instance_uid, - # instance_number=self._output_instance_number) - # ref_dataset_seq[ - # 0].StudyInstanceUID = tmp_orig_study_instance_uid + def test_mixed_studies(self) -> None: + for m in self._modalities: + with self.subTest(m=m): + LegacyConverterClass = getattr( + sop, + "LegacyConvertedEnhanced{}Image".format(m) + ) + ref_dataset_seq = getattr( + self, "_ref_dataset_seq_{}".format(m)) + LegacyConverterClass( + legacy_datasets=ref_dataset_seq, + series_instance_uid=self._output_series_instance_uid, + series_number=self._output_instance_number, + sop_instance_uid=self._output_sop_instance_uid, + instance_number=self._output_instance_number) + # second run with defected input + tmp_orig_study_instance_uid = ref_dataset_seq[ + 0].StudyInstanceUID + ref_dataset_seq[0].StudyInstanceUID = '1.2.3.4.5.6.7.8.9' + with self.assertRaises(ValueError): + LegacyConverterClass( + legacy_datasets=ref_dataset_seq, + series_instance_uid=self._output_series_instance_uid, + series_number=self._output_instance_number, + sop_instance_uid=self._output_sop_instance_uid, + instance_number=self._output_instance_number) + ref_dataset_seq[ + 0].StudyInstanceUID = tmp_orig_study_instance_uid # def test_mixed_series(self): # for m in self._modalities: From 831e3da76de4ee43ff0a54d516e38e91f4689960 Mon Sep 17 00:00:00 2001 From: afshin Date: Wed, 14 Apr 2021 23:27:04 -0400 Subject: [PATCH 36/44] Finished applying Markus/Andrey's commnets --- src/highdicom/legacy/sop.py | 643 +++++++++++++++++++++++------------- 1 file changed, 421 insertions(+), 222 deletions(-) diff --git a/src/highdicom/legacy/sop.py b/src/highdicom/legacy/sop.py index 9af832e3..0645540c 100644 --- a/src/highdicom/legacy/sop.py +++ b/src/highdicom/legacy/sop.py @@ -1,6 +1,13 @@ -""" Module for SOP Classes of Legacy Converted Enhanced Image IODs.""" +""" Module for SOP Classes of Legacy Converted Enhanced Image IODs. +For the most part the single frame to multi-frame conversion logic is taken +from `PixelMed `_ by David Clunie + +""" import logging from typing import Any, List, Union, Callable, Sequence +from datetime import datetime, timedelta +from copy import deepcopy + from numpy import log10, array, ceil, cross, dot, ndarray from pydicom.datadict import tag_for_keyword, dictionary_VR, keyword_for_tag from pydicom.dataset import Dataset @@ -8,9 +15,7 @@ from pydicom.dataelem import DataElement from pydicom.sequence import Sequence as DataElementSequence from pydicom.multival import MultiValue -from datetime import datetime, timedelta from pydicom.valuerep import DT, DA, TM -from copy import deepcopy from pydicom.uid import UID from highdicom.base import SOPClass @@ -57,9 +62,8 @@ def istag_group_length(cls, t: BaseTag) -> bool: return t.element == 0 @classmethod - def isequal(cls, v1: Any, v2: Any) -> bool: + def isequal(cls, v1: Any, v2: Any, float_tolerance: float = 1.0e-5) -> bool: from pydicom.valuerep import DSfloat - float_tolerance = 1.0e-5 def is_equal_float(x1: float, x2: float) -> bool: return abs(x1 - x2) < float_tolerance @@ -67,7 +71,8 @@ def is_equal_float(x1: float, x2: float) -> bool: return False if isinstance(v1, DataElementSequence): for item1, item2 in zip(v1, v2): - cls.isequal_dicom_dataset(item1, item2) + if not cls.isequal_dicom_dataset(item1, item2): + return False if not isinstance(v1, MultiValue): v11 = [v1] v22 = [v2] @@ -90,14 +95,15 @@ def isequal_dicom_dataset(cls, ds1: Dataset, ds2: Dataset) -> bool: """Checks if two dicom dataset have the same value in all attributes Parameters ---------- - ds1: Dataset + ds1: pydicom.dataset.Dataset 1st dicom dataset - ds2: Dataset + ds2: pydicom.dataset.Dataset 2nd dicom dataset Returns ------- True of dicom datasets are equal otherwise False """ + if type(ds1) != type(ds2): return False if not isinstance(ds1, Dataset): @@ -115,6 +121,7 @@ def tag2str(cls, tg: BaseTag) -> str: """Converts tag to hex format like (group, element) """ + if not isinstance(tg, BaseTag): tg = Tag(tg) return '(0x{:0>4x}, 0x{:0>4x})'.format(tg.group, tg.element) @@ -124,65 +131,71 @@ def tag2kwstr(cls, tg: BaseTag) -> str: """Converts tag to keyword and (group, element) form """ + return '{}-{:32.32s}'.format( cls.tag2str(tg), keyword_for_tag(tg)) class GeometryOfSlice: - """A class for checking dicom slices geomtery/parallelization""" + """A class for checking dicom slices geometry/parallelity""" def __init__(self, row_vector: ndarray, col_vector: ndarray, top_left_corner_pos: ndarray, - voxel_spaceing: ndarray, + voxel_spacing: ndarray, dimensions: tuple) -> None: """ Parameters ---------- - row_vector: ndarray + row_vector: numpy.ndarray 3D vector representing row of the input slice - col_vector: ndarray - 3D vector representing column the input aslice - top_left_corner_pos: ndarray - 3D point representing top left coner position of the input slice - voxel_spaceing: ndarray - Three element array. 1st and 2nd copied from PexelSpacing and the + col_vector: numpy.ndarray + 3D vector representing column the input slice + top_left_corner_pos: numpy.ndarray + 3D point representing top left corner position of the input slice + voxel_spacing: numpy.ndarray + Three element array. 1st and 2nd copied from PixelSpacing and the 3rd copied from SliceThickness dimensions: tuple 3 element tuple holding x as number of cols, y as number of rows and z as 1 """ - self.RowVector = row_vector - self.ColVector = col_vector - self.TopLeftCornerPosition = top_left_corner_pos - self.VoxelSpacing = voxel_spaceing + + self.row_vector = row_vector + self.col_vector = col_vector + self.top_left_corner_position = top_left_corner_pos + self.voxel_spacing = voxel_spacing self.Dim = dimensions def get_normal_vector(self) -> ndarray: """Returns the normal vector of the input slice """ - n: ndarray = cross(self.RowVector, self.ColVector) + + n: ndarray = cross(self.row_vector, self.col_vector) n[2] = -n[2] return n def get_distance_along_origin(self) -> float: - """Returns the shortest distince of the slice from the origin + """Returns the shortest distance of the slice from the origin """ + n = self.get_normal_vector() return float( - dot(self.TopLeftCornerPosition, n)) + dot(self.top_left_corner_position, n)) def are_parallel( slice1: Any, slice2: Any, - tolerance: float = 0.0001) -> bool: - """Returns False if two slices are not prallel else True + tolerance: float = 0.0001, + ) -> bool: + """Returns False if two slices are not parallel else True """ + if (not isinstance(slice1, GeometryOfSlice) or not isinstance(slice2, GeometryOfSlice)): logger.warning( @@ -199,7 +212,7 @@ def are_parallel( return True -class Ù€PerframeFunctionalGroup(DataElementSequence): +class Ù€PerframeFunctionalGroups(DataElementSequence): """A sequence class for perframe functional group""" @@ -211,20 +224,22 @@ def __init__(self, number_of_frames: int) -> None: The perframe functional group sequence will have items equal to the whole number of frames """ + super().__init__() for i in range(0, number_of_frames): item = Dataset() self.append(item) -class Ù€SharedFunctionalGroup(DataElementSequence): +class Ù€SharedFunctionalGroups(DataElementSequence): """A sequence class for shared functional group""" def __init__(self) -> None: - """Construncts a shared frame functional group holding only one item + """Constructs a shared frame functional group holding only one item """ + super().__init__() item = Dataset() self.append(item) @@ -233,20 +248,25 @@ def __init__(self) -> None: class FrameSet: """ + A class containing the dicom frames that hold equal distinguishing attributes to detect all perframe and shared dicom attributes """ - def __init__(self, single_frame_list: list, - distinguishing_tags: list) -> None: + def __init__( + self, + single_frame_list: List[Dataset], + distinguishing_tags: List[BaseTag], + ) -> None: """ Parameters ---------- - single_frame_list: list + single_frame_list: List[pydicom.dataset.Dataset] list of single frames that have equal distinguising attributes - distinguishing_tags: list + distinguishing_tags: List[pydicom.tag.BaseTag] list of distinguishing attributes tags """ + self._frames = single_frame_list self._distinguishing_attributes_tags = distinguishing_tags tmp = [ @@ -254,10 +274,10 @@ def __init__(self, single_frame_list: list, tag_for_keyword('AcquisitionDate'), tag_for_keyword('AcquisitionTime'), tag_for_keyword('SpecificCharacterSet')] - self._excluded_fromperframe_tags =\ - self.distinguishing_attributes_tags + tmp - self._perframe_tags: list = [] - self._shared_tags: list = [] + self._excluded_from_perframe_tags =\ + self._distinguishing_attributes_tags + tmp + self._perframe_tags: List[BaseTag] = [] + self._shared_tags: List[BaseTag] = [] self._find_per_frame_and_shared_tags() @property @@ -270,7 +290,7 @@ def distinguishing_attributes_tags(self) -> List[Tag]: @property def excluded_from_perframe_tags(self) -> List[Tag]: - return self._excluded_fromperframe_tags[:] + return self._excluded_from_perframe_tags[:] @property def perframe_tags(self) -> List[Tag]: @@ -281,38 +301,43 @@ def shared_tags(self) -> List[Tag]: return self._shared_tags[:] @property - def SeriesInstanceUID(self) -> UID: - """Returns the sereis instance uid of the FrameSet + def series_instance_uid(self) -> UID: + """Returns the series instance uid of the FrameSet """ + return self._frames[0].SeriesInstanceUID @property - def StudyInstanceUID(self) -> UID: + def study_instance_uid(self) -> UID: """Returns the study instance uid of the FrameSet """ + return self._frames[0].StudyInstanceUID def get_sop_instance_uid_list(self) -> list: """Returns a list containing all SOPInstanceUID of the FrameSet """ - OutputList: list = [] + + output_list: List[UID] = [] for f in self._frames: - OutputList.append(f.SOPInstanceUID) - return OutputList + output_list.append(f.SOPInstanceUID) + return output_list def get_sop_class_uid(self) -> UID: """Returns the sop class uid of the FrameSet """ + return self._frames[0].SOPClassUID def _find_per_frame_and_shared_tags(self) -> None: """Detects and collects all shared and perframe attributes """ + rough_shared: dict = {} sfs = self.frames for ds in sfs: @@ -354,12 +379,12 @@ def _find_per_frame_and_shared_tags(self) -> None: self._perframe_tags.remove(t) def _istag_excluded_from_perframe(self, t: BaseTag) -> bool: - return t in self.excluded_from_perframe_tags + return t in self._excluded_from_perframe_tags class FrameSetCollection: - """A calss to extract framesets based on distinguishing dicom attributes""" + """A class to extract framesets based on distinguishing dicom attributes""" def __init__(self, single_frame_list: Sequence[Any]) -> None: """Forms framesets based on a list of distinguishing attributes. @@ -373,13 +398,14 @@ def __init__(self, single_frame_list: Sequence[Any]) -> None: BurnedInAnnotation. Parameters ---------- - single_frame_list: list - lisf of mixed or non-mixed single frame dicom images + single_frame_list: Sequence[Any] + list of mixed or non-mixed single frame dicom images Note ----- Note that Series identification, specifically SeriesInstanceUID is NOT a distinguishing attribute; i.e. FrameSets may span Series """ + self.mixed_frames = single_frame_list self.mixed_frames_copy = self.mixed_frames[:] self._distinguishing_attribute_keywords = [ @@ -416,7 +442,7 @@ def __init__(self, single_frame_list: Sequence[Any]) -> None: 'SliceThickness', 'AcquisitionContextSequence'] to_be_removed_from_distinguishing_attribs: set = set() - self._frame_sets: list = [] + self._frame_sets: List[FrameSet] = [] frame_counts = [] frameset_counter = 0 while len(self.mixed_frames_copy) != 0: @@ -445,23 +471,24 @@ def __init__(self, single_frame_list: Sequence[Any]) -> None: logger.info(frames) for kw in to_be_removed_from_distinguishing_attribs: self.distinguishing_attribute_keywords.remove(kw) - self.excluded_from_perframe_tags = {} + self._excluded_from_perframe_tags = {} for kwkw in self.distinguishing_attribute_keywords: - self.excluded_from_perframe_tags[tag_for_keyword(kwkw)] = False - self.excluded_from_perframe_tags[ + self._excluded_from_perframe_tags[tag_for_keyword(kwkw)] = False + self._excluded_from_perframe_tags[ tag_for_keyword('AcquisitionDateTime')] = False - self.excluded_from_perframe_tags[ + self._excluded_from_perframe_tags[ tag_for_keyword('AcquisitionDate')] = False - self.excluded_from_perframe_tags[ + self._excluded_from_perframe_tags[ tag_for_keyword('AcquisitionTime')] = False - self.ExcludedFromFunctionalGroupsTags = { + self.excluded_from_functional_groups_tags = { tag_for_keyword('SpecificCharacterSet'): False} def _find_all_similar_to_first_datasets(self) -> tuple: """Takes the fist instance from mixed-frames and finds all dicom images that have the same distinguishing attributes. """ - similar_ds: list = [self.mixed_frames_copy[0]] + + similar_ds: List[Dataset] = [self.mixed_frames_copy[0]] distinguishing_tags_existing = [] distinguishing_tags_missing = [] self.mixed_frames_copy = self.mixed_frames_copy[1:] @@ -510,11 +537,13 @@ def _find_all_similar_to_first_datasets(self) -> tuple: @property def distinguishing_attribute_keywords(self) -> List[str]: """Returns the list of all distinguising attributes found.""" + return self._distinguishing_attribute_keywords[:] @property def frame_sets(self) -> List[FrameSet]: """Returns the list of all FrameSets found.""" + return self._frame_sets @@ -546,6 +575,7 @@ def __init__( instance_number: int Number that should be assigned to the instance """ + try: ref_ds = legacy_datasets[0] except IndexError: @@ -590,26 +620,26 @@ def __init__( ref_ds else ref_ds.ReferringPhysicianName, ) self._legacy_datasets = legacy_datasets - self._perframe_functional_group = Ù€PerframeFunctionalGroup( + self._perframe_functional_groups = Ù€PerframeFunctionalGroups( len(legacy_datasets)) tg = tag_for_keyword('PerFrameFunctionalGroupsSequence') - self[tg] = DataElement(tg, 'SQ', self._perframe_functional_group) - self._shared_functional_group = Ù€SharedFunctionalGroup() + self[tg] = DataElement(tg, 'SQ', self._perframe_functional_groups) + self._shared_functional_groups = Ù€SharedFunctionalGroups() tg = tag_for_keyword('SharedFunctionalGroupsSequence') - self[tg] = DataElement(tg, 'SQ', self._shared_functional_group) - self.distinguishing_attributes_tags = self._get_tag_used_dictionary( + self[tg] = DataElement(tg, 'SQ', self._shared_functional_groups) + self._distinguishing_attributes_tags = self._get_tag_used_dictionary( frame_set.distinguishing_attributes_tags) - self.excluded_from_perframe_tags = self._get_tag_used_dictionary( + self._excluded_from_perframe_tags = self._get_tag_used_dictionary( frame_set.excluded_from_perframe_tags) self._perframe_tags = self._get_tag_used_dictionary( frame_set.perframe_tags) self._shared_tags = self._get_tag_used_dictionary( frame_set.shared_tags) - self.ExcludedFromFunctionalGroupsTags = { + self.excluded_from_functional_groups_tags = { tag_for_keyword('SpecificCharacterSet'): False} # -------------------------------------------------------------------- - self.__build_blocks: list = [] + self.__build_blocks: List[Any] = [] # == == == == == == == == == == == == == == == == == == == == == == == new_ds = [] for item in sorted(self._legacy_datasets, key=sort_key): @@ -706,7 +736,7 @@ def __init__( self.farthest_future_date = DA('99991231') self.farthest_future_time = TM('235959') self.farthest_future_date_time = DT('99991231235959') - self._slices: list = [] + self._slices: List[GeometryOfSlice] = [] self._tolerance = 0.0001 self._slice_location_map: dict = {} self._byte_data = bytearray() @@ -727,7 +757,12 @@ def _is_empty_or_empty_items(self, attribute: DataElement) -> bool: """Takes a dicom DataElement and check if DataElement is empty or in case of Sequence returns True if there is not item or all the items are empty. + Parameters + ---------- + attrib: pydicom.dataelem.DataElement + input DICOM attribute whose emptiness will be checked. """ + if attribute.is_empty: return True if isinstance(attribute.value, DataElementSequence): @@ -744,27 +779,33 @@ def _mark_tag_as_used(self, tg: BaseTag) -> None: """Checks what group the input tag belongs to and marks it as used to keep track of all used and unused tags """ + if tg in self._shared_tags: self._shared_tags[tg] = True - elif tg in self.excluded_from_perframe_tags: - self.excluded_from_perframe_tags[tg] = True + elif tg in self._excluded_from_perframe_tags: + self._excluded_from_perframe_tags[tg] = True elif tg in self._perframe_tags: self._perframe_tags[tg] = True - def _copy_attrib_if_present(self, src_ds: Dataset, dest_ds: Dataset, - src_kw_or_tg: str, dest_kw_or_tg: str = None, - check_not_to_be_perframe: bool = True, - check_not_to_be_empty: bool = False) -> None: + def _copy_attrib_if_present( + self, + src_ds: Dataset, + dest_ds: Dataset, + src_kw_or_tg: str, + dest_kw_or_tg: str = None, + check_not_to_be_perframe: bool = True, + check_not_to_be_empty: bool = False + ) -> None: """Copies a dicom attribute value from a keyword in the source Dataset to a similar or different keyword in the destination Dataset Parameters ---------- - src_ds: Dataset + src_ds: pydicom.dataset.Dataset Source Dataset to copy the attribute from. - dest_ds: Dataset + dest_ds: pydicom.dataset.Dataset Destination Dataset to copy the attribute to. src_kw_or_tg: str - The keyword from the souce Dataset to copy its value. + The keyword from the source Dataset to copy its value. dest_kw_or_tg: str = None The keyword of the destination Dataset, the value is copied to. If its value is None, then the destination keyword will be exactly the @@ -776,6 +817,7 @@ def _copy_attrib_if_present(self, src_ds: Dataset, dest_ds: Dataset, If this arg is true, then copy is aborted if the source attribute is empty. """ + if isinstance(src_kw_or_tg, str): src_kw_or_tg = tag_for_keyword(src_kw_or_tg) if dest_kw_or_tg is None: @@ -802,23 +844,29 @@ def _copy_attrib_if_present(self, src_ds: Dataset, dest_ds: Dataset, self._mark_tag_as_used(src_kw_or_tg) def _get_or_create_attribute( - self, src: Dataset, kw: Union[str, Tag], default: Any) -> DataElement: + self, + src: Dataset, + kw: Union[str, Tag], + default: Any, + ) -> DataElement: """Creates a new DataElement with a value copied from the source Dataset. If the attribute is absent in source Dataset, then its value will be the default value. Parameters ---------- - src: Dataset + src: pydicom.dataset.Dataset Source Dataset to copy the value if available. kw: Union[str, Tag] - The keyword of created DataElement. + The keyword for created DataElement. default: Any The default value created DataElement if the keyword was absent in the source Dataset. Returns ------- - A new DataElement created. + pydicom.dataelem.DataElement + A new DataElement created. """ + if kw is str: tg = tag_for_keyword(kw) else: @@ -850,17 +898,21 @@ def _get_or_create_attribute( self._mark_tag_as_used(tg) return a - def _add_module(self, module_name: str, excepted_attributes: list = [], - check_not_to_be_perframe: bool = True, - check_not_to_be_empty: bool = False) -> None: + def _add_module( + self, + module_name: str, + excepted_attributes: List[str] = [], + check_not_to_be_perframe: bool = True, + check_not_to_be_empty: bool = False + ) -> None: """Copies all attribute of a particular module to current SOPClass, excepting the excepted_attributes, from a reference frame (the first frame on the single frame list). Parameters ---------- module_name: str: - A hiphenated module name like `image-pixel`. - excepted_attributes: list = [] + A hyphenated module name like `image-pixel`. + excepted_attributes: List[str] = [] List of all attributes that are not allowed to be copied check_not_to_be_perframe: bool = True If this flag is true, then the perframe attributes will not be @@ -868,7 +920,8 @@ def _add_module(self, module_name: str, excepted_attributes: list = [], check_not_to_be_empty: bool = False If this flag is true, then the empty attributes will not be copied. """ - attribs: list = MODULE_ATTRIBUTE_MAP[module_name] + + attribs: List[dict] = MODULE_ATTRIBUTE_MAP[module_name] ref_dataset = self._legacy_datasets[0] for a in attribs: kw: str = a['keyword'] @@ -884,6 +937,7 @@ def _add_module_to_mf_image_pixel(self) -> None: """Copies/adds` a/an image_pixel` multiframe module to the current SOPClass from its single frame source. """ + module_and_excepted_at = { "image-pixel": [ @@ -905,6 +959,7 @@ def _add_module_to_mf_enhanced_common_image(self) -> None: """Copies/adds a/an `enhanced_common_image` multiframe module to the current SOPClass from its single frame source. """ + ref_dataset = self._legacy_datasets[0] attribs_to_be_added = [ 'ContentQualification', @@ -962,12 +1017,14 @@ def _add_module_to_mf_contrast_bolus(self) -> None: """Copies/adds a/an `contrast_bolus` multiframe module to the current SOPClass from its single frame source. """ + self._add_module('contrast-bolus') def _add_module_to_mf_enhanced_ct_image(self) -> None: """Copies/adds a/an `enhanced_ct_image` multiframe module to the current SOPClass from its single frame source. """ + pass # David's code doesn't hold anything for this module ... should ask him @@ -986,6 +1043,7 @@ def _add_module_to_mf_enhanced_mr_image(self) -> None: """Copies/adds a/an `enhanced_mr_image` multiframe module to the current SOPClass from its single frame source. """ + self._copy_attrib_if_present( self._legacy_datasets[0], self, @@ -1031,6 +1089,7 @@ def _add_module_to_mf_acquisition_context(self) -> None: """Copies/adds a/an `acquisition_context` multiframe module to the current SOPClass from its single frame source. """ + tg = tag_for_keyword('AcquisitionContextSequence') if tg not in self._perframe_tags: self[tg] = self._get_or_create_attribute( @@ -1039,10 +1098,21 @@ def _add_module_to_mf_acquisition_context(self) -> None: None) def _get_value_for_frame_type( - self, attrib: DataElement) -> Union[list, None]: + self, + attrib: DataElement, + ) -> Union[list, None]: """Guesses the appropriate FrameType attribute value from ImageType. - + Parameters + ---------- + attrib: pydicom.dataelem.DataElement + source attribute from which the frame type is inferred. + Returns + ------- + Union[list, None] + A new list of FrameType value is returned. If attrib is not of type + DataElement None is returned. """ + if not isinstance(attrib, DataElement): return None output = ['', '', '', ''] @@ -1055,10 +1125,21 @@ def _get_value_for_frame_type( return output def _get_frame_type_seq_tag( - self, modality: str) -> int: + self, + modality: str, + ) -> int: """Detects the correct tag/keyword for the frame type sq based on the modality name. + Parameters + ---------- + modality: str: + A string representing DICOM image Modality. + Returns + ------- + int: + Appropriate DICOM tag integer is returned. """ + seq_kw = '{}{}FrameTypeSequence' if modality == 'PET': seq_kw = seq_kw.format(modality, '') @@ -1067,15 +1148,19 @@ def _get_frame_type_seq_tag( return tag_for_keyword(seq_kw) def _add_module_to_dataset_common_ct_mr_pet_image_description( - self, source: Dataset, destination: Dataset, level: int) -> None: + self, + source: Dataset, + destination: Dataset, + level: int, + ) -> None: """Copies/adds attributes related to `common_ct_mr_pet_image_description` to destination dicom Dataset Parameters ---------- - source: Dataset + source: pydicom.dataset.Dataset the source dicom Dataset from which the modules attributes values are copied - destination: Dataset + destination: pydicom.dataset.Dataset the destination dicom Dataset to which the modules attributes values are copied. The destination Dataset usually is an item from a perframe/shared functional group sequence. @@ -1085,6 +1170,7 @@ def _add_module_to_dataset_common_ct_mr_pet_image_description( destination attributes will be in functional groups items like `FrameType` """ + frame_type_a = source['ImageType'] if level == 0: frame_type_tg = tag_for_keyword('ImageType') @@ -1106,17 +1192,20 @@ def element_generator(kw: str, val: Any) -> DataElement: 'VolumeBasedCalculationTechnique', "NONE") def _add_module_to_mf_common_ct_mr_pet_image_description( - self, modality: str) -> None: - """Copies/adds the common attrabutes for ct/mr/pet description + self, + modality: str, + ) -> None: + """Copies/adds the common attributes for ct/mr/pet description module to the current SOPClass from its single frame source. """ + im_type_tag = tag_for_keyword('ImageType') seq_tg = self._get_frame_type_seq_tag(modality) if im_type_tag not in self._perframe_tags: self._add_module_to_dataset_common_ct_mr_pet_image_description( self._legacy_datasets[0], self, 0) # ---------------------------- - item = self._shared_functional_group[0] + item = self._shared_functional_groups[0] inner_item = Dataset() self._add_module_to_dataset_common_ct_mr_pet_image_description( self._legacy_datasets[0], inner_item, 1) @@ -1124,7 +1213,7 @@ def _add_module_to_mf_common_ct_mr_pet_image_description( seq_tg, 'SQ', DataElementSequence([inner_item])) else: for i in range(0, len(self._legacy_datasets)): - item = self._perframe_functional_group[i] + item = self._perframe_functional_groups[i] inner_item = Dataset() self._add_module_to_dataset_common_ct_mr_pet_image_description( self._legacy_datasets[i], inner_item, 1) @@ -1135,28 +1224,32 @@ def _add_module_to_mf_composite_instance_contex(self) -> None: """Copies/adds a/an `composite_instance_contex` multiframe module to the current SOPClass from its single frame source. """ - for module_name, excpeted_a in self._module_excepted_list.items(): + + for module_name, excepted_a in self._module_excepted_list.items(): self._add_module( module_name, - excepted_attributes=excpeted_a, + excepted_attributes=excepted_a, check_not_to_be_empty=False, check_not_to_be_perframe=True) # don't check the perframe set def _add_module_to_dataset_frame_anatomy( - self, source: Dataset, destination: Dataset) -> None: + self, + source: Dataset, + destination: Dataset, + ) -> None: """Copies/adds attributes related to `frame_anatomy` to destination dicom Dataset Parameters ---------- - source: Dataset + source: pydicom.dataset.Dataset the source dicom Dataset from which the modules attributes values are copied - destination: Dataset + destination: pydicom.dataset.Dataset the destination dicom Dataset to which the modules attributes values are copied. The destination Dataset usually is an item from a perframe/shared functional group sequence. """ - # David's code is more complicaated than mine + # David's code is more complicated than mine # Should check it out later. fa_seq_tg = tag_for_keyword('FrameAnatomySequence') item = Dataset() @@ -1195,6 +1288,7 @@ def _has_frame_anatomy(self, tags: dict) -> bool: `frame_anatomy` present in source single frames. Otherwise returns false. """ + laterality_tg = tag_for_keyword('Laterality') im_laterality_tg = tag_for_keyword('ImageLaterality') bodypart_tg = tag_for_keyword('BodyPartExamined') @@ -1208,16 +1302,17 @@ def _add_module_to_mf_frame_anatomy(self) -> None: """Copies/adds a/an `frame_anatomy` multiframe module to the current SOPClass from its single frame source. """ + if (not self._has_frame_anatomy(self._perframe_tags) and (self._has_frame_anatomy(self._shared_tags) or - self._has_frame_anatomy(self.excluded_from_perframe_tags)) + self._has_frame_anatomy(self._excluded_from_perframe_tags)) ): - item = self._shared_functional_group[0] + item = self._shared_functional_groups[0] self._add_module_to_dataset_frame_anatomy( self._legacy_datasets[0], item) elif self._has_frame_anatomy(self._perframe_tags): for i in range(0, len(self._legacy_datasets)): - item = self._perframe_functional_group[i] + item = self._perframe_functional_groups[i] self._add_module_to_dataset_frame_anatomy( self._legacy_datasets[i], item) @@ -1226,6 +1321,7 @@ def _has_pixel_measures(self, tags: dict) -> bool: `pixel_measures` present in source single frames. Otherwise returns false. """ + pixel_spacing_tg = tag_for_keyword('PixelSpacing') slice_thickness_tg = tag_for_keyword('SliceThickness') imager_pixel_spacing_tg = tag_for_keyword('ImagerPixelSpacing') @@ -1234,19 +1330,23 @@ def _has_pixel_measures(self, tags: dict) -> bool: imager_pixel_spacing_tg in tags) def _add_module_to_dataset_pixel_measures( - self, source: Dataset, destination: Dataset) -> None: + self, + source: Dataset, + destination: Dataset, + ) -> None: """Copies/adds attributes related to `pixel_measures` to destination dicom Dataset Parameters ---------- - source: Dataset + source: pydicom.dataset.Dataset the source dicom Dataset from which the modules attributes values are copied - destination: Dataset + destination: pydicom.dataset.Dataset the destination dicom Dataset to which the modules attributes values are copied. The destination Dataset usually is an item from a perframe/shared functional group sequence. """ + item = Dataset() self._copy_attrib_if_present(source, item, @@ -1274,16 +1374,17 @@ def _add_module_to_mf_pixel_measures(self) -> None: """Copies/adds a/an `pixel_measures` multiframe module to the current SOPClass from its single frame source. """ + if (not self._has_pixel_measures(self._perframe_tags) and (self._has_pixel_measures(self._shared_tags) or - self._has_pixel_measures(self.excluded_from_perframe_tags)) + self._has_pixel_measures(self._excluded_from_perframe_tags)) ): - item = self._shared_functional_group[0] + item = self._shared_functional_groups[0] self._add_module_to_dataset_pixel_measures( self._legacy_datasets[0], item) elif self._has_pixel_measures(self._perframe_tags): for i in range(0, len(self._legacy_datasets)): - item = self._perframe_functional_group[i] + item = self._perframe_functional_groups[i] self._add_module_to_dataset_pixel_measures( self._legacy_datasets[i], item) @@ -1292,23 +1393,28 @@ def _has_plane_position(self, tags: dict) -> bool: `plane_position` present in source single frames. Otherwise returns false. """ + image_position_patient_tg = tag_for_keyword('ImagePositionPatient') return image_position_patient_tg in tags def _add_module_to_dataset_plane_position( - self, source: Dataset, destination: Dataset) -> None: + self, + source: Dataset, + destination: Dataset, + ) -> None: """Copies/adds attributes related to `plane_position` to destination dicom Dataset Parameters ---------- - source: Dataset + source: pydicom.dataset.Dataset the source dicom Dataset from which the modules attributes values are copied - destination: Dataset + destination: pydicom.dataset.Dataset the destination dicom Dataset to which the modules attributes values are copied. The destination Dataset usually is an item from a perframe/shared functional group sequence. """ + item = Dataset() self._copy_attrib_if_present(source, item, @@ -1326,16 +1432,17 @@ def _add_module_to_mf_plane_position(self) -> None: """Copies/adds a/an `plane_position` multiframe module to the current SOPClass from its single frame source. """ + if (not self._has_plane_position(self._perframe_tags) and (self._has_plane_position(self._shared_tags) or - self._has_plane_position(self.excluded_from_perframe_tags)) + self._has_plane_position(self._excluded_from_perframe_tags)) ): - item = self._shared_functional_group[0] + item = self._shared_functional_groups[0] self._add_module_to_dataset_plane_position( self._legacy_datasets[0], item) elif self._has_plane_position(self._perframe_tags): for i in range(0, len(self._legacy_datasets)): - item = self._perframe_functional_group[i] + item = self._perframe_functional_groups[i] self._add_module_to_dataset_plane_position( self._legacy_datasets[i], item) @@ -1344,24 +1451,29 @@ def _has_plane_orientation(self, tags: dict) -> bool: `plane_orientation` present in source single frames. Otherwise returns false. """ + image_orientation_patient_tg = tag_for_keyword( 'ImageOrientationPatient') return image_orientation_patient_tg in tags def _add_module_to_dataset_plane_orientation( - self, source: Dataset, destination: Dataset) -> None: + self, + source: Dataset, + destination: Dataset, + ) -> None: """Copies/adds attributes related to `plane_orientation` to destination dicom Dataset Parameters ---------- - source: Dataset + source: pydicom.dataset.Dataset the source dicom Dataset from which the modules attributes values are copied - destination: Dataset + destination: pydicom.dataset.Dataset the destination dicom Dataset to which the modules attributes values are copied. The destination Dataset usually is an item from a perframe/shared functional group sequence. """ + item = Dataset() self._copy_attrib_if_present(source, item, @@ -1377,16 +1489,17 @@ def _add_module_to_mf_plane_orientation(self) -> None: """Copies/adds a/an `plane_orientation` multiframe module to the current SOPClass from its single frame source. """ + if (not self._has_plane_orientation(self._perframe_tags) and (self._has_plane_orientation(self._shared_tags) or - self._has_plane_orientation(self.excluded_from_perframe_tags)) + self._has_plane_orientation(self._excluded_from_perframe_tags)) ): - item = self._shared_functional_group[0] + item = self._shared_functional_groups[0] self._add_module_to_dataset_plane_orientation( self._legacy_datasets[0], item) elif self._has_plane_orientation(self._perframe_tags): for i in range(0, len(self._legacy_datasets)): - item = self._perframe_functional_group[i] + item = self._perframe_functional_groups[i] self._add_module_to_dataset_plane_orientation( self._legacy_datasets[i], item) @@ -1395,6 +1508,7 @@ def _has_frame_voi_lut(self, tags: dict) -> bool: `frame_voi_lut` present in source single frames. Otherwise returns false. """ + window_width_tg = tag_for_keyword('WindowWidth') window_center_tg = tag_for_keyword('WindowCenter') window_center_width_explanation_tg = tag_for_keyword( @@ -1404,19 +1518,23 @@ def _has_frame_voi_lut(self, tags: dict) -> bool: window_center_width_explanation_tg in tags) def _add_module_to_dataset_frame_voi_lut( - self, source: Dataset, destination: Dataset) -> None: + self, + source: Dataset, + destination: Dataset, + ) -> None: """Copies/adds attributes related to `frame_voi_lut` to destination dicom Dataset Parameters ---------- - source: Dataset + source: pydicom.dataset.Dataset the source dicom Dataset from which the modules attributes values are copied - destination: Dataset + destination: pydicom.dataset.Dataset the destination dicom Dataset to which the modules attributes values are copied. The destination Dataset usually is an item from a perframe/shared functional group sequence. """ + item = Dataset() self._copy_attrib_if_present(source, item, @@ -1442,16 +1560,17 @@ def _add_module_to_mf_frame_voi_lut(self) -> None: """Copies/adds a/an `frame_voi_lut` multiframe module to the current SOPClass from its single frame source. """ + if (not self._has_frame_voi_lut(self._perframe_tags) and (self._has_frame_voi_lut(self._shared_tags) or - self._has_frame_voi_lut(self.excluded_from_perframe_tags)) + self._has_frame_voi_lut(self._excluded_from_perframe_tags)) ): - item = self._shared_functional_group[0] + item = self._shared_functional_groups[0] self._add_module_to_dataset_frame_voi_lut( self._legacy_datasets[0], item) elif self._has_frame_voi_lut(self._perframe_tags): for i in range(0, len(self._legacy_datasets)): - item = self._perframe_functional_group[i] + item = self._perframe_functional_groups[i] self._add_module_to_dataset_frame_voi_lut( self._legacy_datasets[i], item) @@ -1460,6 +1579,7 @@ def _has_pixel_value_transformation(self, tags: dict) -> bool: `pixel_value_transformation` present in source single frames. Otherwise returns false. """ + rescale_intercept_tg = tag_for_keyword('RescaleIntercept') rescale_slope_tg = tag_for_keyword('RescaleSlope') rescale_type_tg = tag_for_keyword('RescaleType') @@ -1468,19 +1588,23 @@ def _has_pixel_value_transformation(self, tags: dict) -> bool: rescale_type_tg in tags) def _add_module_to_dataset_pixel_value_transformation( - self, source: Dataset, destination: Dataset) -> None: + self, + source: Dataset, + destination: Dataset, + ) -> None: """Copies/adds attributes related to `pixel_value_transformation` to destination dicom Dataset Parameters ---------- - source: Dataset + source: pydicom.dataset.Dataset the source dicom Dataset from which the modules attributes values are copied - destination: Dataset + destination: pydicom.dataset.Dataset the destination dicom Dataset to which the modules attributes values are copied. The destination Dataset usually is an item from a perframe/shared functional group sequence. """ + item = Dataset() self._copy_attrib_if_present(source, item, @@ -1537,16 +1661,17 @@ def _add_module_to_mf_pixel_value_transformation(self) -> None: """Copies/adds a/an `pixel_value_transformation` multiframe module to the current SOPClass from its single frame source. """ + if (not self._has_pixel_value_transformation(self._perframe_tags) and (self._has_pixel_value_transformation(self._shared_tags) or self._has_pixel_value_transformation( - self.excluded_from_perframe_tags))): - item = self._shared_functional_group[0] + self._excluded_from_perframe_tags))): + item = self._shared_functional_groups[0] self._add_module_to_dataset_pixel_value_transformation( self._legacy_datasets[0], item) elif self._has_pixel_value_transformation(self._perframe_tags): for i in range(0, len(self._legacy_datasets)): - item = self._perframe_functional_group[i] + item = self._perframe_functional_groups[i] self._add_module_to_dataset_pixel_value_transformation( self._legacy_datasets[i], item) @@ -1555,22 +1680,27 @@ def _has_referenced_image(self, tags: dict) -> bool: `referenced_image` present in source single frames. Otherwise returns false. """ + return tag_for_keyword('ReferencedImageSequence') in tags def _add_module_to_dataset_referenced_image( - self, source: Dataset, destination: Dataset) -> None: + self, + source: Dataset, + destination: Dataset, + ) -> None: """Copies/adds attributes related to `referenced_image` to destination dicom Dataset Parameters ---------- - source: Dataset + source: pydicom.dataset.Dataset the source dicom Dataset from which the modules attributes values are copied - destination: Dataset + destination: pydicom.dataset.Dataset the destination dicom Dataset to which the modules attributes values are copied. The destination Dataset usually is an item from a perframe/shared functional group sequence. """ + self._copy_attrib_if_present(source, destination, 'ReferencedImageSequence', @@ -1581,16 +1711,17 @@ def _add_module_to_mf_referenced_image(self) -> None: """Copies/adds a/an `referenced_image` multiframe module to the current SOPClass from its single frame source. """ + if (not self._has_referenced_image(self._perframe_tags) and (self._has_referenced_image(self._shared_tags) or - self._has_referenced_image(self.excluded_from_perframe_tags)) + self._has_referenced_image(self._excluded_from_perframe_tags)) ): - item = self._shared_functional_group[0] + item = self._shared_functional_groups[0] self._add_module_to_dataset_referenced_image( self._legacy_datasets[0], item) elif self._has_referenced_image(self._perframe_tags): for i in range(0, len(self._legacy_datasets)): - item = self._perframe_functional_group[i] + item = self._perframe_functional_groups[i] self._add_module_to_dataset_referenced_image( self._legacy_datasets[i], item) @@ -1599,22 +1730,27 @@ def _has_derivation_image(self, tags: dict) -> bool: `derivation_image` present in source single frames. Otherwise returns false. """ + return tag_for_keyword('SourceImageSequence') in tags def _add_module_to_dataset_derivation_image( - self, source: Dataset, destination: Dataset) -> None: + self, + source: Dataset, + destination: Dataset, + ) -> None: """Copies/adds attributes related to `derivation_image` to destination dicom Dataset Parameters ---------- - source: Dataset + source: pydicom.dataset.Dataset the source dicom Dataset from which the modules attributes values are copied - destination: Dataset + destination: pydicom.dataset.Dataset the destination dicom Dataset to which the modules attributes values are copied. The destination Dataset usually is an item from a perframe/shared functional group sequence. """ + item = Dataset() self._copy_attrib_if_present(source, item, @@ -1640,44 +1776,58 @@ def _add_module_to_mf_derivation_image(self) -> None: """Copies/adds a/an `derivation_image` multiframe module to the current SOPClass from its single frame source. """ + if (not self._has_derivation_image(self._perframe_tags) and (self._has_derivation_image(self._shared_tags) or - self._has_derivation_image(self.excluded_from_perframe_tags)) + self._has_derivation_image(self._excluded_from_perframe_tags)) ): - item = self._shared_functional_group[0] + item = self._shared_functional_groups[0] self._add_module_to_dataset_derivation_image( self._legacy_datasets[0], item) elif self._has_derivation_image(self._perframe_tags): for i in range(0, len(self._legacy_datasets)): - item = self._perframe_functional_group[i] + item = self._perframe_functional_groups[i] self._add_module_to_dataset_derivation_image( self._legacy_datasets[i], item) - def _get_tag_used_dictionary(self, input: list) -> dict: + def _get_tag_used_dictionary(self, input: List[BaseTag]) -> dict: """Returns a dictionary of input tags with a use flag + Parameters + ---------- + input: List[pydicom.tag.BaseTag] + list of tags to build dictionary holding their used flag. + Returns + ------- + dict: + a dictionary type of tags with used flag. """ + out: dict = {} for item in input: out[item] = False return out def _add_module_to_dataset_unassigned_perframe( - self, source: Dataset, destination: Dataset) -> None: + self, + source: Dataset, + destination: Dataset, + ) -> None: """Copies/adds attributes related to `unassigned_perframe` to destination dicom Dataset Parameters ---------- - source: Dataset + source: pydicom.dataset.Dataset the source dicom Dataset from which the modules attributes values are copied - destination: Dataset + destination: pydicom.dataset.Dataset the destination dicom Dataset to which the modules attributes values are copied. The destination Dataset usually is an item from a perframe/shared functional group sequence. """ + item = Dataset() - for tg in self._eligeible_tags: + for tg in self._eligible_tags: self._copy_attrib_if_present(source, item, tg, @@ -1688,11 +1838,12 @@ def _add_module_to_dataset_unassigned_perframe( seq = DataElement(tg, dictionary_VR(tg), DataElementSequence([item])) destination[tg] = seq - def _add_largest_smallest_pixle_value(self) -> None: + def _add_largest_smallest_pixel_value(self) -> None: """Adds the attributes for largest and smallest pixel value to current SOPClass object """ + ltg = tag_for_keyword("LargestImagePixelValue") from sys import float_info lval = float_info.min @@ -1726,35 +1877,38 @@ def _add_module_to_mf_unassigned_perframe(self) -> None: """ # first collect all not used tags # note that this is module is order dependent - self._add_largest_smallest_pixle_value() - self._eligeible_tags: List[Tag] = [] + self._add_largest_smallest_pixel_value() + self._eligible_tags: List[BaseTag] = [] for tg, used in self._perframe_tags.items(): - if not used and tg not in self.ExcludedFromFunctionalGroupsTags: - self._eligeible_tags.append(tg) + if not used and tg not in self.excluded_from_functional_groups_tags: + self._eligible_tags.append(tg) for i in range(0, len(self._legacy_datasets)): - item = self._perframe_functional_group[i] + item = self._perframe_functional_groups[i] self._add_module_to_dataset_unassigned_perframe( self._legacy_datasets[i], item) def _add_module_to_dataset_unassigned_shared( - self, source: Dataset, destination: Dataset) -> None: + self, source: Dataset, + destination: Dataset, + ) -> None: """Copies/adds attributes related to `unassigned_shared` to destination dicom Dataset Parameters ---------- - source: Dataset + source: pydicom.dataset.Dataset the source dicom Dataset from which the modules attributes values are copied - destination: Dataset + destination: pydicom.dataset.Dataset the destination dicom Dataset to which the modules attributes values are copied. The destination Dataset usually is an item from a perframe/shared functional group sequence. """ + item = Dataset() for tg, used in self._shared_tags.items(): if (not used and tg not in self and - tg not in self.ExcludedFromFunctionalGroupsTags): + tg not in self.excluded_from_functional_groups_tags): self._copy_attrib_if_present(source, item, tg, @@ -1769,20 +1923,31 @@ def _add_module_to_mf_unassigned_shared(self) -> None: """Copies/adds a/an `unassigned_shared` multiframe module to the current SOPClass from its single frame source. """ - item = self._shared_functional_group[0] + + item = self._shared_functional_groups[0] self._add_module_to_dataset_unassigned_shared( self._legacy_datasets[0], item) def _create_empty_element(self, tg: BaseTag) -> DataElement: """Creates an empty dicom DataElement for input tag + Parameters + ---------- + tg: pydicom.tag.BaseTag + input tag. + Returns + ------- + pydicom.dataelem.DataElement + an empty DataElement created. """ + return DataElement(tg, dictionary_VR(tg), None) def _add_module_to_mf_empty_type2_attributes(self) -> None: """Adds empty type2 attributes to the current SOPClass to avoid type2 missing error. """ + iod_name = _SOP_CLASS_UID_IOD_KEY_MAP[ self['SOPClassUID'].value] modules = IOD_MODULE_MAP[iod_name] @@ -1801,19 +1966,23 @@ def _add_module_to_mf_empty_type2_attributes(self) -> None: self._create_empty_element(tg) def _add_module_to_dataset_conversion_source( - self, source: Dataset, destination: Dataset) -> None: + self, + source: Dataset, + destination: Dataset, + ) -> None: """Copies/adds attributes related to `conversion_source` to destination dicom Dataset Parameters ---------- - source: Dataset + source: pydicom.dataset.Dataset the source dicom Dataset from which the modules attributes values are copied - destination: Dataset + destination: pydicom.dataset.Dataset the destination dicom Dataset to which the modules attributes values are copied. The destination Dataset usually is an item from a perframe/shared functional group sequence. """ + item = Dataset() self._copy_attrib_if_present(source, item, @@ -1836,18 +2005,20 @@ def _add_module_to_mf_conversion_source(self) -> None: """Copies/adds a/an `conversion_source` multiframe module to the current SOPClass from its single frame source. """ + for i in range(0, len(self._legacy_datasets)): - item = self._perframe_functional_group[i] + item = self._perframe_functional_groups[i] self._add_module_to_dataset_conversion_source( self._legacy_datasets[i], item) - self.EarliestFrameAcquisitionDateTime =\ + self.earliest_frame_acquisition_date_time =\ self.farthest_future_date_time def _build_slices_geometry_frame_content(self) -> None: - """Instantiates an object of GeometryOfSlice for each sice. + """Instantiates an object of GeometryOfSlice for each slice. """ + frame_count = len(self._legacy_datasets) for i in range(0, frame_count): curr_frame = self._legacy_datasets[i] @@ -1883,13 +2054,16 @@ def _build_slices_geometry_frame_content(self) -> None: pixel_spacing_v is not None): row = array(image_orientation_patient_v[0:3]) col = array(image_orientation_patient_v[3:]) - voxel_spaceing = array([pixel_spacing_v[0], - pixel_spacing_v[1], - slice_thickness_v]) + voxel_spacing = array( + [ + pixel_spacing_v[0], + pixel_spacing_v[1], + slice_thickness_v + ]) tpl = array(image_position_patient_v) dim = (rows_v, columns_v, 1) self._slices.append(GeometryOfSlice(row, col, - tpl, voxel_spaceing, dim)) + tpl, voxel_spacing, dim)) else: logger.error( "Error in geometry. One or more required " @@ -1903,9 +2077,10 @@ def _build_slices_geometry_frame_content(self) -> None: break def _are_all_slices_parallel_frame_content(self) -> bool: - """Returns true if all slices are prallel otherwise, false. + """Returns true if all slices are parallel otherwise, false. """ + slice_count = len(self._slices) if slice_count >= 2: last_slice = self._slices[0] @@ -1925,6 +2100,7 @@ def _add_stack_info_frame_content(self) -> None: """Adds stack information to the FrameContentSequence dicom attribute. """ + self._build_slices_geometry_frame_content() round_digits = int(ceil(-log10(self._tolerance))) source_series_uid = '' @@ -1934,7 +2110,7 @@ def _add_stack_info_frame_content(self) -> None: not_round_dist = s.get_distance_along_origin() dist = round(not_round_dist, round_digits) logger.debug( - 'Slice locaation {} rounded by {} digits to {}'.format( + 'Slice location {} rounded by {} digits to {}'.format( not_round_dist, round_digits, dist )) if dist in self._slice_location_map: @@ -1953,7 +2129,7 @@ def _add_stack_info_frame_content(self) -> None: 'series = {}'.format( len(idxs), loc, source_series_uid)) for frame_index in idxs: - frame = self._perframe_functional_group[frame_index] + frame = self._perframe_functional_groups[frame_index] new_item = frame[frame_content_tg].value[0] new_item["StackID"] = self._get_or_create_attribute( self._legacy_datasets[0], @@ -1969,6 +2145,7 @@ def _has_frame_content(self, tags: dict) -> bool: `frame_content` present in source single frames. Otherwise returns false. """ + acquisition_date_time_tg = tag_for_keyword('AcquisitionDateTime') acquisition_date_tg = tag_for_keyword('AcquisitionDate') acquisition_time_tg = tag_for_keyword('AcquisitionTime') @@ -1977,19 +2154,23 @@ def _has_frame_content(self, tags: dict) -> bool: acquisition_date_tg in tags) def _add_module_to_dataset_frame_content( - self, source: Dataset, destination: Dataset) -> None: + self, + source: Dataset, + destination: Dataset, + ) -> None: """Copies/adds attributes related to `frame_content` to destination dicom Dataset Parameters ---------- - source: Dataset + source: pydicom.dataset.Dataset the source dicom Dataset from which the modules attributes values are copied - destination: Dataset + destination: pydicom.dataset.Dataset the destination dicom Dataset to which the modules attributes values are copied. The destination Dataset usually is an item from a perframe/shared functional group sequence. """ + item = Dataset() fan_tg = tag_for_keyword('FrameAcquisitionNumber') an_tg = tag_for_keyword('AcquisitionNumber') @@ -2002,7 +2183,7 @@ def _add_module_to_dataset_frame_content( # ---------------------------------------------------------------- acquisition_date_time_a = self._get_or_create_attribute( source, 'AcquisitionDateTime', self.earliest_date_time) - # chnage the keyword to FrameAcquisitionDateTime: + # change the keyword to FrameAcquisitionDateTime: frame_acquisition_date_time_a = DataElement( tag_for_keyword('FrameAcquisitionDateTime'), 'DT', acquisition_date_time_a.value) @@ -2020,8 +2201,8 @@ def _add_module_to_dataset_frame_content( frame_acquisition_date_time_a.value = DT(str(d) + str(t)) if frame_acquisition_date_time_a.value > self.earliest_date_time: if (frame_acquisition_date_time_a.value < - self.EarliestFrameAcquisitionDateTime): - self.EarliestFrameAcquisitionDateTime =\ + self.earliest_frame_acquisition_date_time): + self.earliest_frame_acquisition_date_time =\ frame_acquisition_date_time_a.value if not acquisition_date_time_is_perframe: if ('TriggerTime' in source and @@ -2031,7 +2212,7 @@ def _add_module_to_dataset_frame_content( trigger_time_in_millisecond = int(trigger_time_a.value) if trigger_time_in_millisecond > 0: t_delta = timedelta(trigger_time_in_millisecond) - # this is so rediculous. I'm not able to cnvert + # this is so ridiculous. I'm not able to convert # the DT to datetime (cast to superclass) d_t = datetime.combine( frame_acquisition_date_time_a.value.date(), @@ -2066,21 +2247,23 @@ def _add_acquisition_info_frame_content(self) -> None: attribute. """ + for i in range(0, len(self._legacy_datasets)): - item = self._perframe_functional_group[i] + item = self._perframe_functional_groups[i] self._add_module_to_dataset_frame_content( self._legacy_datasets[i], item) - if (self.EarliestFrameAcquisitionDateTime < + if (self.earliest_frame_acquisition_date_time < self.farthest_future_date_time): kw = 'AcquisitionDateTime' self[kw] = DataElement( tag_for_keyword(kw), - 'DT', self.EarliestFrameAcquisitionDateTime) + 'DT', self.earliest_frame_acquisition_date_time) def _add_module_to_mf_frame_content(self) -> None: """Copies/adds a/an 'frame_content` multiframe module to the current SOPClass from its single frame source. """ + self._add_acquisition_info_frame_content() self._add_stack_info_frame_content() @@ -2088,19 +2271,24 @@ def _is_other_byte_vr_pixel_data(self, vr: str) -> bool: """checks if `PixelData` dicom value representation is OB. """ + return vr[0] == 'O' and vr[1] == 'B' def _is_other_word_vr_pixel_data(self, vr: str) -> bool: """checks if `PixelData` dicom value representation is OW. """ + return vr[0] == 'O' and vr[1] == 'W' # def _has(self, tags: dict) -> bool: """ # image_position_patient_tg = tag_for_keyword('ImagePositionPatient') # return image_position_patient_tg in tags def _copy_data_pixel_data( - self, src: bytearray, word_data: bool = False) -> None: + self, + src: bytearray, + word_data: bool = False, + ) -> None: """Copies contnet of PixelData from one frame and appends it to the content of PixelData for multiframe Parameters @@ -2108,18 +2296,18 @@ def _copy_data_pixel_data( src: bytearray content of pixel data from source frame(one of the single frames) word_data: bool = False - flag representing if the data is word-wise instead of blyte-wise + flag representing if the data is word-wise instead of byte-wise """ # Make sure that the length complies by row and col if word_data: des = self._word_data - ByteCount = 2 * self._number_of_pixels_per_frame + byte_count = 2 * self._number_of_pixels_per_frame else: des = self._byte_data - ByteCount = self._number_of_pixels_per_frame - if len(src) != ByteCount: - tmp: bytearray = bytearray(ByteCount) + byte_count = self._number_of_pixels_per_frame + if len(src) != byte_count: + tmp: bytearray = bytearray(byte_count) tmp[:len(src)] = src[:] src = tmp des.extend(src) @@ -2128,6 +2316,7 @@ def _add_module_to_mf_pixel_data(self) -> None: """Copies/add`s a/an pixel_data` multiframe module to the current SOPClass from its single frame source. """ + kw = 'NumberOfFrames' tg = tag_for_keyword(kw) self._frame_count = len(self._legacy_datasets) @@ -2158,17 +2347,18 @@ def _add_module_to_mf_pixel_data(self) -> None: raise TypeError( 'Cannot mix OB and OW Pixel Data VR from different frames') if len(self._byte_data) != 0: - MF_PixelData = DataElement(tag_for_keyword(kw), - 'OB', bytes(self._byte_data)) + mf_pixel_data = DataElement( + tag_for_keyword(kw), 'OB', bytes(self._byte_data)) elif len(self._word_data) != 0: - MF_PixelData = DataElement(tag_for_keyword(kw), - 'OW', bytes(self._word_data)) - self[kw] = MF_PixelData + mf_pixel_data = DataElement( + tag_for_keyword(kw), 'OW', bytes(self._word_data)) + self[kw] = mf_pixel_data def _add_module_to_mf_content_date_time(self) -> None: """Copies/adds a/an `content_date_time` multiframe module to the current SOPClass from its single frame source. """ + default_atrs = ["Acquisition", "Series", "Study"] for i in range(0, len(self._legacy_datasets)): src = self._legacy_datasets[i] @@ -2211,18 +2401,23 @@ def _add_module_to_mf_content_date_time(self) -> None: tag_for_keyword(kw), 'TM', n_t) def _add_data_element_to_target_contributing_equipment( - self, target: Dataset, kw: str, value: Any) -> None: + self, + target: Dataset, + kw: str, + value: Any, + ) -> None: """Add new data element related to ContributingEquipmentSequence to a target dataset(usually an item). Parameters ---------- - target: Dataset + target: pydicom.dataset.Dataset destination dicom Dataset. kw: str keyword if the attribute being added. value: Any value if the attribute being added. """ + tg = tag_for_keyword(kw) target[kw] = DataElement(tg, dictionary_VR(tg), value) @@ -2230,6 +2425,7 @@ def _add_module_to_mf_contributing_equipment(self) -> None: """Copies/adds a/an `contributing_equipment` multiframe module to the current SOPClass from its single frame source. """ + code_value_tg = tag_for_keyword('CodeValue') code_meaning_tg = tag_for_keyword('CodeMeaning') coding_scheme_designator_tg = tag_for_keyword('CodingSchemeDesignator') @@ -2263,7 +2459,7 @@ def _add_module_to_mf_contributing_equipment(self) -> None: self._add_data_element_to_target_contributing_equipment( item, "InstitutionAddress", - 'Radialogy Department, B&W Hospital, Boston, MA') + 'Radiology Department, B&W Hospital, Boston, MA') self._add_data_element_to_target_contributing_equipment( item, "SoftwareVersions", @@ -2279,6 +2475,7 @@ def _add_module_to_mf_instance_creation_date_time(self) -> None: """Copies/adds a/an `instance_creation_date_time` multiframe module to the current SOPClass from its single frame source. """ + nnooww = datetime.now() n_d = DA(nnooww.date().strftime('%Y%m%d')) n_t = TM(nnooww.time().strftime('%H%M%S')) @@ -2292,8 +2489,20 @@ def _add_module_to_mf_instance_creation_date_time(self) -> None: @classmethod def default_sort_key(cls, x: Dataset) -> tuple: """The default sort key to sort all single frames before conversion + Parameters + ---------- + x: pydicom.dataset.Dataset + input Dataset to be sorted. + Returns + ------- + tuple: + a sort key of three elements. + 1st priority: SeriesNumber + 2nd priority: InstanceNumber + 3rd priority: SOPInstanceUID """ + out: tuple = tuple() if 'SeriesNumber' in x: out += (x['SeriesNumber'].value, ) @@ -2307,6 +2516,7 @@ def _clear_build_blocks(self) -> None: """Clears the array containing all methods for multiframe conversion """ + self.__build_blocks = [] def _add_common_ct_pet_mr_build_blocks(self) -> None: @@ -2314,6 +2524,7 @@ def _add_common_ct_pet_mr_build_blocks(self) -> None: put them in place. """ + blocks = [ [self._add_module_to_mf_image_pixel, None], [self._add_module_to_mf_composite_instance_contex, None], @@ -2343,6 +2554,7 @@ def _add_ct_specific_build_blocks(self) -> None: put them in place. """ + blocks = [ [ self._add_module_to_mf_common_ct_mr_pet_image_description, @@ -2359,6 +2571,7 @@ def _add_mr_specific_build_blocks(self) -> None: put them in place """ + blocks = [ [ self._add_module_to_mf_common_ct_mr_pet_image_description, @@ -2375,6 +2588,7 @@ def _add_pet_specific_build_blocks(self) -> None: put them in place """ + blocks = [ [ self._add_module_to_mf_common_ct_mr_pet_image_description, @@ -2390,6 +2604,7 @@ def _add_build_blocks_for_mr(self) -> None: put them in place """ + self._clear_build_blocks() self._add_common_ct_pet_mr_build_blocks() self._add_mr_specific_build_blocks() @@ -2399,6 +2614,7 @@ def _add_build_blocks_for_pet(self) -> None: put them in place """ + self._clear_build_blocks() self._add_common_ct_pet_mr_build_blocks() self._add_pet_specific_build_blocks() @@ -2408,16 +2624,18 @@ def _add_build_blocks_for_ct(self) -> None: put them in place. """ + self._clear_build_blocks() self._add_common_ct_pet_mr_build_blocks() self._add_ct_specific_build_blocks() def _convert2multiframe(self) -> None: - """Runs all necessary methods to conver from single frame to + """Runs all necessary methods to convert from single frame to multi-frame. """ - logger.debug('Strt singleframe to multiframe conversion') + + logger.debug('Start singleframe to multiframe conversion') for fun, args in self.__build_blocks: if not args: fun() @@ -2442,17 +2660,6 @@ def __init__( """ Parameters ---------- - - Returns - ------- - - Note - ---- - - """ - """ - Parameters - ---------- legacy_datasets: Sequence[pydicom.dataset.Dataset] DICOM data sets of legacy single-frame image instances that should be converted @@ -2465,6 +2672,7 @@ def __init__( instance_number: int Number that should be assigned to the instance """ + try: ref_ds = legacy_datasets[0] except IndexError: @@ -2517,6 +2725,7 @@ def __init__( instance_number: int Number that should be assigned to the instance """ + try: ref_ds = legacy_datasets[0] except IndexError: @@ -2557,17 +2766,6 @@ def __init__( """ Parameters ---------- - - Returns - ------- - - Note - ---- - - """ - """ - Parameters - ---------- legacy_datasets: Sequence[pydicom.dataset.Dataset] DICOM data sets of legacy single-frame image instances that should be converted @@ -2580,6 +2778,7 @@ def __init__( instance_number: int Number that should be assigned to the instance """ + try: ref_ds = legacy_datasets[0] except IndexError: From 98501258c8ea3a863cf4bfbacc9277d2392f45a9 Mon Sep 17 00:00:00 2001 From: afshin Date: Tue, 27 Apr 2021 10:56:31 -0400 Subject: [PATCH 37/44] modified commnets for 3rd round --- src/highdicom/legacy/sop.py | 67 +++++++++++++++++++++---------------- 1 file changed, 38 insertions(+), 29 deletions(-) diff --git a/src/highdicom/legacy/sop.py b/src/highdicom/legacy/sop.py index 0645540c..be99d64c 100644 --- a/src/highdicom/legacy/sop.py +++ b/src/highdicom/legacy/sop.py @@ -4,7 +4,7 @@ """ import logging -from typing import Any, List, Union, Callable, Sequence +from typing import Any, List, Union, Callable, Sequence, Optional from datetime import datetime, timedelta from copy import deepcopy @@ -558,7 +558,7 @@ def __init__( series_number: int, sop_instance_uid: str, instance_number: int, - sort_key: Callable = None, + sort_key: Optional[Callable] = None, ) -> None: """ Parameters @@ -574,6 +574,8 @@ def __init__( UID that should be assigned to the instance instance_number: int Number that should be assigned to the instance + sort_key: Callable, optional + A function by which the single-frame instances will be sorted """ try: @@ -596,28 +598,29 @@ def __init__( sop_instance_uid=sop_instance_uid, sop_class_uid=sop_class_uid, instance_number=instance_number, - manufacturer="" if 'Manufacturer' not in ref_ds - else ref_ds.Manufacturer, - modality="" if 'Modality' not in ref_ds - else ref_ds.Modality, - patient_id=None if 'PatientID' not in ref_ds - else ref_ds.PatientID, - patient_name=None if 'PatientName' not in ref_ds - else ref_ds.PatientName, - patient_birth_date=None if 'PatientBirthDate' not in ref_ds - else ref_ds.PatientBirthDate, - patient_sex=None if 'PatientSex' not in ref_ds - else ref_ds.PatientSex, - accession_number=None if 'AccessionNumber' not in ref_ds - else ref_ds.AccessionNumber, - study_id=None if 'StudyID' not in ref_ds - else ref_ds.StudyID, - study_date=None if 'StudyDate' not in ref_ds - else ref_ds.StudyDate, - study_time=None if 'StudyTime' not in ref_ds - else ref_ds.StudyTime, - referring_physician_name=None if 'ReferringPhysicianName' not in - ref_ds else ref_ds.ReferringPhysicianName, + # Manufacturer is type 2 + manufacturer=getattr(ref_ds, "Manufacturer", None), + # Modality is type 1 + modality=ref_ds.Modality, + # PatientID is type 2 + patient_id=getattr(ref_ds, "PatientID", None), + # PatientName is type 2 + patient_name=getattr(ref_ds, "PatientName", None), + # PatientBirthDate is type 2 + patient_birth_date=getattr(ref_ds, "PatientBirthDate", None), + # PatientSex is type 2 + patient_sex=getattr(ref_ds, "PatientSex", None), + # AccessionNumber is type 2 + accession_number=getattr(ref_ds, "AccessionNumber", None), + # StudyID is type 2 + study_id=getattr(ref_ds, "StudyID", None), + # StudyDate is type 2 + study_date=getattr(ref_ds, "StudyDate", None), + # StudyTime is type 2 + study_time=getattr(ref_ds, "StudyTime", None), + # ReferringPhysicianName is type 2 + referring_physician_name=getattr( + ref_ds, "ReferringPhysicianName", None) ) self._legacy_datasets = legacy_datasets self._perframe_functional_groups = Ù€PerframeFunctionalGroups( @@ -792,7 +795,7 @@ def _copy_attrib_if_present( src_ds: Dataset, dest_ds: Dataset, src_kw_or_tg: str, - dest_kw_or_tg: str = None, + dest_kw_or_tg: Optional[str] = None, check_not_to_be_perframe: bool = True, check_not_to_be_empty: bool = False ) -> None: @@ -806,7 +809,7 @@ def _copy_attrib_if_present( Destination Dataset to copy the attribute to. src_kw_or_tg: str The keyword from the source Dataset to copy its value. - dest_kw_or_tg: str = None + dest_kw_or_tg: str, optional The keyword of the destination Dataset, the value is copied to. If its value is None, then the destination keyword will be exactly the source keyword. @@ -2655,7 +2658,7 @@ def __init__( series_number: int, sop_instance_uid: str, instance_number: int, - sort_key: Callable = None, + sort_key: Optional[Callable] = None, ) -> None: """ Parameters @@ -2671,6 +2674,8 @@ def __init__( UID that should be assigned to the instance instance_number: int Number that should be assigned to the instance + sort_key: Callable, optional + A function by which the single-frame instances will be sorted """ try: @@ -2708,7 +2713,7 @@ def __init__( series_number: int, sop_instance_uid: str, instance_number: int, - sort_key: Callable = None, + sort_key: Optional[Callable] = None, ) -> None: """ Parameters @@ -2724,6 +2729,8 @@ def __init__( UID that should be assigned to the instance instance_number: int Number that should be assigned to the instance + sort_key: Callable, optional + A function by which the single-frame instances will be sorted """ try: @@ -2761,7 +2768,7 @@ def __init__( series_number: int, sop_instance_uid: str, instance_number: int, - sort_key: Callable = None, + sort_key: Optional[Callable] = None, ) -> None: """ Parameters @@ -2777,6 +2784,8 @@ def __init__( UID that should be assigned to the instance instance_number: int Number that should be assigned to the instance + sort_key: Callable, optional + A function by which the single-frame instances will be sorted """ try: From 6455b85edc5e37742de615468ce8e2bb355dc19a Mon Sep 17 00:00:00 2001 From: afshin Date: Sun, 9 May 2021 04:31:05 -0400 Subject: [PATCH 38/44] Applied Chris's comments --- src/highdicom/legacy/sop.py | 623 ++++++++++++++---------------------- src/highdicom/spatial.py | 78 +++++ tests/test_legacy.py | 4 +- 3 files changed, 324 insertions(+), 381 deletions(-) diff --git a/src/highdicom/legacy/sop.py b/src/highdicom/legacy/sop.py index be99d64c..a0dec55f 100644 --- a/src/highdicom/legacy/sop.py +++ b/src/highdicom/legacy/sop.py @@ -4,11 +4,11 @@ """ import logging -from typing import Any, List, Union, Callable, Sequence, Optional +from typing import Any, List, Union, Callable, Sequence, Optional, Dict, Tuple from datetime import datetime, timedelta from copy import deepcopy -from numpy import log10, array, ceil, cross, dot, ndarray +from numpy import log10, array, ceil from pydicom.datadict import tag_for_keyword, dictionary_VR, keyword_for_tag from pydicom.dataset import Dataset from pydicom.tag import Tag, BaseTag @@ -21,6 +21,7 @@ from highdicom.base import SOPClass from highdicom._iods import IOD_MODULE_MAP from highdicom._modules import MODULE_ATTRIBUTE_MAP +from highdicom.spatial import _GeometryOfSlice logger = logging.getLogger(__name__) @@ -43,26 +44,26 @@ } -class Ù€DicomHelper: +class _DicomHelper: """A class for checking dicom tags and comparing dicom attributes""" - @classmethod - def istag_file_meta_information_group(cls, t: BaseTag) -> bool: + @staticmethod + def istag_file_meta_information_group(t: BaseTag) -> bool: return t.group == 0x0002 - @classmethod - def istag_repeating_group(cls, t: BaseTag) -> bool: + @staticmethod + def istag_repeating_group(t: BaseTag) -> bool: g = t.group return (g >= 0x5000 and g <= 0x501e) or\ (g >= 0x6000 and g <= 0x601e) - @classmethod - def istag_group_length(cls, t: BaseTag) -> bool: + @staticmethod + def istag_group_length(t: BaseTag) -> bool: return t.element == 0 - @classmethod - def isequal(cls, v1: Any, v2: Any, float_tolerance: float = 1.0e-5) -> bool: + @staticmethod + def isequal(v1: Any, v2: Any, float_tolerance: float = 1.0e-5) -> bool: from pydicom.valuerep import DSfloat def is_equal_float(x1: float, x2: float) -> bool: @@ -71,7 +72,7 @@ def is_equal_float(x1: float, x2: float) -> bool: return False if isinstance(v1, DataElementSequence): for item1, item2 in zip(v1, v2): - if not cls.isequal_dicom_dataset(item1, item2): + if not _DicomHelper.isequal_dicom_dataset(item1, item2): return False if not isinstance(v1, MultiValue): v11 = [v1] @@ -90,20 +91,22 @@ def is_equal_float(x1: float, x2: float) -> bool: return False return True - @classmethod - def isequal_dicom_dataset(cls, ds1: Dataset, ds2: Dataset) -> bool: + @staticmethod + def isequal_dicom_dataset(ds1: Dataset, ds2: Dataset) -> bool: """Checks if two dicom dataset have the same value in all attributes + Parameters ---------- ds1: pydicom.dataset.Dataset 1st dicom dataset ds2: pydicom.dataset.Dataset 2nd dicom dataset + Returns ------- - True of dicom datasets are equal otherwise False - """ + True if dicom datasets are equal otherwise False + """ if type(ds1) != type(ds2): return False if not isinstance(ds1, Dataset): @@ -112,137 +115,15 @@ def isequal_dicom_dataset(cls, ds1: Dataset, ds2: Dataset) -> bool: if k1 not in ds2: return False elem2 = ds2[k1] - if not cls.isequal(elem2.value, elem1.value): + if not _DicomHelper.isequal(elem2.value, elem1.value): return False return True - @classmethod - def tag2str(cls, tg: BaseTag) -> str: - """Converts tag to hex format like (group, element) - - """ - - if not isinstance(tg, BaseTag): - tg = Tag(tg) - return '(0x{:0>4x}, 0x{:0>4x})'.format(tg.group, tg.element) - - @classmethod - def tag2kwstr(cls, tg: BaseTag) -> str: - """Converts tag to keyword and (group, element) form - - """ - + @staticmethod + def tag2kwstr(tg: BaseTag) -> str: + """Converts tag to keyword and (group, element) form""" return '{}-{:32.32s}'.format( - cls.tag2str(tg), keyword_for_tag(tg)) - - -class GeometryOfSlice: - - """A class for checking dicom slices geometry/parallelity""" - - def __init__(self, - row_vector: ndarray, - col_vector: ndarray, - top_left_corner_pos: ndarray, - voxel_spacing: ndarray, - dimensions: tuple) -> None: - """ - Parameters - ---------- - row_vector: numpy.ndarray - 3D vector representing row of the input slice - col_vector: numpy.ndarray - 3D vector representing column the input slice - top_left_corner_pos: numpy.ndarray - 3D point representing top left corner position of the input slice - voxel_spacing: numpy.ndarray - Three element array. 1st and 2nd copied from PixelSpacing and the - 3rd copied from SliceThickness - dimensions: tuple - 3 element tuple holding x as number of cols, y as number of rows - and z as 1 - """ - - self.row_vector = row_vector - self.col_vector = col_vector - self.top_left_corner_position = top_left_corner_pos - self.voxel_spacing = voxel_spacing - self.Dim = dimensions - - def get_normal_vector(self) -> ndarray: - """Returns the normal vector of the input slice - - """ - - n: ndarray = cross(self.row_vector, self.col_vector) - n[2] = -n[2] - return n - - def get_distance_along_origin(self) -> float: - """Returns the shortest distance of the slice from the origin - - """ - - n = self.get_normal_vector() - return float( - dot(self.top_left_corner_position, n)) - - def are_parallel( - slice1: Any, - slice2: Any, - tolerance: float = 0.0001, - ) -> bool: - """Returns False if two slices are not parallel else True - - """ - - if (not isinstance(slice1, GeometryOfSlice) or - not isinstance(slice2, GeometryOfSlice)): - logger.warning( - 'slice1 and slice2 are not of the same ' - 'type: type(slice1) = {} and type(slice2) = {}'.format( - type(slice1), type(slice2))) - return False - else: - n1: ndarray = slice1.get_normal_vector() - n2: ndarray = slice2.get_normal_vector() - for el1, el2 in zip(n1, n2): - if abs(el1 - el2) > tolerance: - return False - return True - - -class Ù€PerframeFunctionalGroups(DataElementSequence): - - """A sequence class for perframe functional group""" - - def __init__(self, number_of_frames: int) -> None: - """ - Parameters - ---------- - number_of_frames: int - The perframe functional group sequence will have items equal to - the whole number of frames - """ - - super().__init__() - for i in range(0, number_of_frames): - item = Dataset() - self.append(item) - - -class Ù€SharedFunctionalGroups(DataElementSequence): - - """A sequence class for shared functional group""" - - def __init__(self) -> None: - """Constructs a shared frame functional group holding only one item - - """ - - super().__init__() - item = Dataset() - self.append(item) + str(tg), keyword_for_tag(tg)) class FrameSet: @@ -259,14 +140,15 @@ def __init__( distinguishing_tags: List[BaseTag], ) -> None: """ + Parameters ---------- single_frame_list: List[pydicom.dataset.Dataset] list of single frames that have equal distinguising attributes distinguishing_tags: List[pydicom.tag.BaseTag] list of distinguishing attributes tags - """ + """ self._frames = single_frame_list self._distinguishing_attributes_tags = distinguishing_tags tmp = [ @@ -302,50 +184,33 @@ def shared_tags(self) -> List[Tag]: @property def series_instance_uid(self) -> UID: - """Returns the series instance uid of the FrameSet - - """ - + """Returns the series instance uid of the FrameSet""" return self._frames[0].SeriesInstanceUID @property def study_instance_uid(self) -> UID: - """Returns the study instance uid of the FrameSet - - """ - + """Returns the study instance uid of the FrameSet""" return self._frames[0].StudyInstanceUID def get_sop_instance_uid_list(self) -> list: - """Returns a list containing all SOPInstanceUID of the FrameSet - - """ - - output_list: List[UID] = [] - for f in self._frames: - output_list.append(f.SOPInstanceUID) + """Returns a list containing all SOPInstanceUID of the FrameSet""" + output_list = [f.SOPInstanceUID for f in self._frames] return output_list def get_sop_class_uid(self) -> UID: - """Returns the sop class uid of the FrameSet - - """ - + """Returns the sop class uid of the FrameSet""" return self._frames[0].SOPClassUID def _find_per_frame_and_shared_tags(self) -> None: - """Detects and collects all shared and perframe attributes - - """ - + """Detects and collects all shared and perframe attributes""" rough_shared: dict = {} sfs = self.frames for ds in sfs: for ttag, elem in ds.items(): if (not ttag.is_private and not - Ù€DicomHelper.istag_file_meta_information_group(ttag) and not - Ù€DicomHelper.istag_repeating_group(ttag) and not - Ù€DicomHelper.istag_group_length(ttag) and not + _DicomHelper.istag_file_meta_information_group(ttag) and not + _DicomHelper.istag_repeating_group(ttag) and not + _DicomHelper.istag_group_length(ttag) and not self._istag_excluded_from_perframe(ttag) and ttag != tag_for_keyword('PixelData')): elem = ds[ttag] @@ -361,17 +226,10 @@ def _find_per_frame_and_shared_tags(self) -> None: if len(v) < len(self.frames): to_be_removed_from_shared.append(ttag) else: - all_values_are_equal = True - for v_i in v: - if not Ù€DicomHelper.isequal(v_i, v[0]): - all_values_are_equal = False - break + all_values_are_equal = all( + _DicomHelper.isequal(v_i, v[0]) for v_i in v) if not all_values_are_equal: to_be_removed_from_shared.append(ttag) - from pydicom.datadict import keyword_for_tag - for t, v in rough_shared.items(): - if keyword_for_tag(t) != 'PatientSex': - continue for t in to_be_removed_from_shared: del rough_shared[t] for t, v in rough_shared.items(): @@ -386,7 +244,7 @@ class FrameSetCollection: """A class to extract framesets based on distinguishing dicom attributes""" - def __init__(self, single_frame_list: Sequence[Any]) -> None: + def __init__(self, single_frame_list: Sequence[Dataset]) -> None: """Forms framesets based on a list of distinguishing attributes. The list of "distinguishing" attributes that are used to determine commonality is currently fixed, and includes the unique identifying @@ -396,16 +254,18 @@ def __init__(self, single_frame_list: Sequence[Any]) -> None: consistent sampling, such as ImageOrientationPatient, PixelSpacing and SliceThickness, and in addition AcquisitionContextSequence and BurnedInAnnotation. + Parameters ---------- - single_frame_list: Sequence[Any] + single_frame_list: Sequence[pydicom.dataset.Dataset] list of mixed or non-mixed single frame dicom images - Note + + Notes ----- Note that Series identification, specifically SeriesInstanceUID is NOT a distinguishing attribute; i.e. FrameSets may span Series - """ + """ self.mixed_frames = single_frame_list self.mixed_frames_copy = self.mixed_frames[:] self._distinguishing_attribute_keywords = [ @@ -441,7 +301,6 @@ def __init__(self, single_frame_list: Sequence[Any]) -> None: 'PixelSpacing', 'SliceThickness', 'AcquisitionContextSequence'] - to_be_removed_from_distinguishing_attribs: set = set() self._frame_sets: List[FrameSet] = [] frame_counts = [] frameset_counter = 0 @@ -451,48 +310,48 @@ def __init__(self, single_frame_list: Sequence[Any]) -> None: self._frame_sets.append(FrameSet(x[0], x[1])) frame_counts.append(len(x[0])) # log information - logger.debug("Frameset({:02d}) including {:03d} frames".format( - frameset_counter, len(x[0]))) + logger.debug( + f"Frameset({frameset_counter:02d}) " + "including {len(x[0]):03d} frames") logger.debug('\t Distinguishing tags:') for dg_i, dg_tg in enumerate(x[1], 1): - logger.debug('\t\t{:02d}/{})\t{}-{:32.32s} = {:32.32s}'.format( - dg_i, len(x[1]), Ù€DicomHelper.tag2str(dg_tg), - keyword_for_tag(dg_tg), - str(x[0][0][dg_tg].value))) + logger.debug( + f'\t\t{dg_i:02d}/{len(x[1])})\t{str(dg_tg)}-' + '{keyword_for_tag(dg_tg):32.32s} = ' + '{str(x[0][0][dg_tg].value):32.32s}') logger.debug('\t dicom datasets in this frame set:') for dicom_i, dicom_ds in enumerate(x[0], 1): - logger.debug('\t\t{}/{})\t {}'.format( - dicom_i, len(x[0]), dicom_ds['SOPInstanceUID'])) + logger.debug( + f'\t\t{dicom_i}/{len(x[0])})\t ' + '{dicom_ds["SOPInstanceUID"]}') frames = '' for i, f_count in enumerate(frame_counts, 1): frames += '{: 2d}){:03d}\t'.format(i, f_count) frames = '{: 2d} frameset(s) out of all {: 3d} instances:'.format( len(frame_counts), len(self.mixed_frames)) + frames logger.info(frames) - for kw in to_be_removed_from_distinguishing_attribs: - self.distinguishing_attribute_keywords.remove(kw) self._excluded_from_perframe_tags = {} - for kwkw in self.distinguishing_attribute_keywords: + for kwkw in self._distinguishing_attribute_keywords: + self._excluded_from_perframe_tags[tag_for_keyword(kwkw)] = False + excluded_kws = [ + 'AcquisitionDateTime' + 'AcquisitionDate' + 'AcquisitionTime' + 'SpecificCharacterSet' + ] + for kwkw in excluded_kws: self._excluded_from_perframe_tags[tag_for_keyword(kwkw)] = False - self._excluded_from_perframe_tags[ - tag_for_keyword('AcquisitionDateTime')] = False - self._excluded_from_perframe_tags[ - tag_for_keyword('AcquisitionDate')] = False - self._excluded_from_perframe_tags[ - tag_for_keyword('AcquisitionTime')] = False - self.excluded_from_functional_groups_tags = { - tag_for_keyword('SpecificCharacterSet'): False} def _find_all_similar_to_first_datasets(self) -> tuple: """Takes the fist instance from mixed-frames and finds all dicom images that have the same distinguishing attributes. - """ + """ similar_ds: List[Dataset] = [self.mixed_frames_copy[0]] distinguishing_tags_existing = [] distinguishing_tags_missing = [] self.mixed_frames_copy = self.mixed_frames_copy[1:] - for kw in self.distinguishing_attribute_keywords: + for kw in self._distinguishing_attribute_keywords: tg = tag_for_keyword(kw) if tg in similar_ds[0]: distinguishing_tags_existing.append(tg) @@ -505,7 +364,7 @@ def _find_all_similar_to_first_datasets(self) -> tuple: if tg in ds: logger_msg.add( '{} is missing in all but {}'.format( - Ù€DicomHelper.tag2kwstr(tg), ds['SOPInstanceUID'])) + _DicomHelper.tag2kwstr(tg), ds['SOPInstanceUID'])) all_equal = False break if not all_equal: @@ -516,11 +375,11 @@ def _find_all_similar_to_first_datasets(self) -> tuple: all_equal = False break new_val = ds[tg].value - if not Ù€DicomHelper.isequal(ref_val, new_val): + if not _DicomHelper.isequal(ref_val, new_val): logger_msg.add( 'Inequality on distinguishing ' 'attribute{} -> {} != {} \n series uid = {}'.format( - Ù€DicomHelper.tag2kwstr(tg), ref_val, new_val, + _DicomHelper.tag2kwstr(tg), ref_val, new_val, ds.SeriesInstanceUID)) all_equal = False break @@ -559,8 +418,10 @@ def __init__( sop_instance_uid: str, instance_number: int, sort_key: Optional[Callable] = None, + **kwargs: Any, ) -> None: """ + Parameters ---------- legacy_datasets: Sequence[pydicom.dataset.Dataset] @@ -574,10 +435,10 @@ def __init__( UID that should be assigned to the instance instance_number: int Number that should be assigned to the instance - sort_key: Callable, optional + sort_key: Optional[Callable], optional A function by which the single-frame instances will be sorted - """ + """ try: ref_ds = legacy_datasets[0] except IndexError: @@ -620,14 +481,19 @@ def __init__( study_time=getattr(ref_ds, "StudyTime", None), # ReferringPhysicianName is type 2 referring_physician_name=getattr( - ref_ds, "ReferringPhysicianName", None) + ref_ds, "ReferringPhysicianName", None), + **kwargs ) self._legacy_datasets = legacy_datasets - self._perframe_functional_groups = Ù€PerframeFunctionalGroups( - len(legacy_datasets)) + self._perframe_functional_groups = DataElementSequence() + for i in range(0, len(legacy_datasets)): + item = Dataset() + self._perframe_functional_groups.append(item) tg = tag_for_keyword('PerFrameFunctionalGroupsSequence') self[tg] = DataElement(tg, 'SQ', self._perframe_functional_groups) - self._shared_functional_groups = Ù€SharedFunctionalGroups() + self._shared_functional_groups = DataElementSequence() + item = Dataset() + self._shared_functional_groups.append(item) tg = tag_for_keyword('SharedFunctionalGroupsSequence') self[tg] = DataElement(tg, 'SQ', self._shared_functional_groups) self._distinguishing_attributes_tags = self._get_tag_used_dictionary( @@ -640,10 +506,8 @@ def __init__( frame_set.shared_tags) self.excluded_from_functional_groups_tags = { tag_for_keyword('SpecificCharacterSet'): False} - - # -------------------------------------------------------------------- self.__build_blocks: List[Any] = [] - # == == == == == == == == == == == == == == == == == == == == == == == + new_ds = [] for item in sorted(self._legacy_datasets, key=sort_key): new_ds.append(item) @@ -739,7 +603,7 @@ def __init__( self.farthest_future_date = DA('99991231') self.farthest_future_time = TM('235959') self.farthest_future_date_time = DT('99991231235959') - self._slices: List[GeometryOfSlice] = [] + self._slices: List[_GeometryOfSlice] = [] self._tolerance = 0.0001 self._slice_location_map: dict = {} self._byte_data = bytearray() @@ -760,12 +624,13 @@ def _is_empty_or_empty_items(self, attribute: DataElement) -> bool: """Takes a dicom DataElement and check if DataElement is empty or in case of Sequence returns True if there is not item or all the items are empty. + Parameters ---------- attrib: pydicom.dataelem.DataElement input DICOM attribute whose emptiness will be checked. - """ + """ if attribute.is_empty: return True if isinstance(attribute.value, DataElementSequence): @@ -781,8 +646,8 @@ def _is_empty_or_empty_items(self, attribute: DataElement) -> bool: def _mark_tag_as_used(self, tg: BaseTag) -> None: """Checks what group the input tag belongs to and marks it as used to keep track of all used and unused tags - """ + """ if tg in self._shared_tags: self._shared_tags[tg] = True elif tg in self._excluded_from_perframe_tags: @@ -801,6 +666,7 @@ def _copy_attrib_if_present( ) -> None: """Copies a dicom attribute value from a keyword in the source Dataset to a similar or different keyword in the destination Dataset + Parameters ---------- src_ds: pydicom.dataset.Dataset @@ -809,7 +675,7 @@ def _copy_attrib_if_present( Destination Dataset to copy the attribute to. src_kw_or_tg: str The keyword from the source Dataset to copy its value. - dest_kw_or_tg: str, optional + dest_kw_or_tg: Optional[str], optional The keyword of the destination Dataset, the value is copied to. If its value is None, then the destination keyword will be exactly the source keyword. @@ -819,8 +685,8 @@ def _copy_attrib_if_present( check_not_to_be_empty: bool = False If this arg is true, then copy is aborted if the source attribute is empty. - """ + """ if isinstance(src_kw_or_tg, str): src_kw_or_tg = tag_for_keyword(src_kw_or_tg) if dest_kw_or_tg is None: @@ -855,6 +721,7 @@ def _get_or_create_attribute( """Creates a new DataElement with a value copied from the source Dataset. If the attribute is absent in source Dataset, then its value will be the default value. + Parameters ---------- src: pydicom.dataset.Dataset @@ -864,12 +731,13 @@ def _get_or_create_attribute( default: Any The default value created DataElement if the keyword was absent in the source Dataset. + Returns ------- pydicom.dataelem.DataElement A new DataElement created. - """ + """ if kw is str: tg = tag_for_keyword(kw) else: @@ -911,6 +779,7 @@ def _add_module( """Copies all attribute of a particular module to current SOPClass, excepting the excepted_attributes, from a reference frame (the first frame on the single frame list). + Parameters ---------- module_name: str: @@ -922,8 +791,8 @@ def _add_module( copied. check_not_to_be_empty: bool = False If this flag is true, then the empty attributes will not be copied. - """ + """ attribs: List[dict] = MODULE_ATTRIBUTE_MAP[module_name] ref_dataset = self._legacy_datasets[0] for a in attribs: @@ -937,10 +806,10 @@ def _add_module( check_not_to_be_empty=check_not_to_be_empty) def _add_module_to_mf_image_pixel(self) -> None: - """Copies/adds` a/an image_pixel` multiframe module to + """Copies/adds an `image_pixel` multiframe module to the current SOPClass from its single frame source. - """ + """ module_and_excepted_at = { "image-pixel": [ @@ -959,10 +828,10 @@ def _add_module_to_mf_image_pixel(self) -> None: check_not_to_be_perframe=True) # don't check the perframe set def _add_module_to_mf_enhanced_common_image(self) -> None: - """Copies/adds a/an `enhanced_common_image` multiframe module to + """Copies/adds an `enhanced_common_image` multiframe module to the current SOPClass from its single frame source. - """ + """ ref_dataset = self._legacy_datasets[0] attribs_to_be_added = [ 'ContentQualification', @@ -1017,23 +886,24 @@ def _add_module_to_mf_enhanced_common_image(self) -> None: # Icon Image Sequence - always discard these def _add_module_to_mf_contrast_bolus(self) -> None: - """Copies/adds a/an `contrast_bolus` multiframe module to + """Copies/adds a `contrast_bolus` multiframe module to the current SOPClass from its single frame source. - """ + """ self._add_module('contrast-bolus') def _add_module_to_mf_enhanced_ct_image(self) -> None: - """Copies/adds a/an `enhanced_ct_image` multiframe module to + """Copies/adds an `enhanced_ct_image` multiframe module to the current SOPClass from its single frame source. - """ + """ pass # David's code doesn't hold anything for this module ... should ask him def _add_module_to_mf_enhanced_pet_image(self) -> None: - """Copies/adds a/an `enhanced_pet_image` multiframe module to + """Copies/adds an `enhanced_pet_image` multiframe module to the current SOPClass from its single frame source. + """ # David's code doesn't hold anything for this module ... should ask him kw = 'ContentQualification' @@ -1043,10 +913,10 @@ def _add_module_to_mf_enhanced_pet_image(self) -> None: self[tg] = elem def _add_module_to_mf_enhanced_mr_image(self) -> None: - """Copies/adds a/an `enhanced_mr_image` multiframe module to + """Copies/adds an `enhanced_mr_image` multiframe module to the current SOPClass from its single frame source. - """ + """ self._copy_attrib_if_present( self._legacy_datasets[0], self, @@ -1089,10 +959,10 @@ def _add_module_to_mf_enhanced_mr_image(self) -> None: check_not_to_be_empty=True) def _add_module_to_mf_acquisition_context(self) -> None: - """Copies/adds a/an `acquisition_context` multiframe module to + """Copies/adds an `acquisition_context` multiframe module to the current SOPClass from its single frame source. - """ + """ tg = tag_for_keyword('AcquisitionContextSequence') if tg not in self._perframe_tags: self[tg] = self._get_or_create_attribute( @@ -1105,17 +975,19 @@ def _get_value_for_frame_type( attrib: DataElement, ) -> Union[list, None]: """Guesses the appropriate FrameType attribute value from ImageType. + Parameters ---------- attrib: pydicom.dataelem.DataElement source attribute from which the frame type is inferred. + Returns ------- Union[list, None] A new list of FrameType value is returned. If attrib is not of type DataElement None is returned. - """ + """ if not isinstance(attrib, DataElement): return None output = ['', '', '', ''] @@ -1133,16 +1005,18 @@ def _get_frame_type_seq_tag( ) -> int: """Detects the correct tag/keyword for the frame type sq based on the modality name. + Parameters ---------- modality: str: A string representing DICOM image Modality. + Returns ------- int: Appropriate DICOM tag integer is returned. - """ + """ seq_kw = '{}{}FrameTypeSequence' if modality == 'PET': seq_kw = seq_kw.format(modality, '') @@ -1158,6 +1032,7 @@ def _add_module_to_dataset_common_ct_mr_pet_image_description( ) -> None: """Copies/adds attributes related to `common_ct_mr_pet_image_description` to destination dicom Dataset + Parameters ---------- source: pydicom.dataset.Dataset @@ -1172,8 +1047,8 @@ def _add_module_to_dataset_common_ct_mr_pet_image_description( of dicom Dataset like `ImageType`. If level is not `0`, then the destination attributes will be in functional groups items like `FrameType` - """ + """ frame_type_a = source['ImageType'] if level == 0: frame_type_tg = tag_for_keyword('ImageType') @@ -1200,14 +1075,13 @@ def _add_module_to_mf_common_ct_mr_pet_image_description( ) -> None: """Copies/adds the common attributes for ct/mr/pet description module to the current SOPClass from its single frame source. - """ + """ im_type_tag = tag_for_keyword('ImageType') seq_tg = self._get_frame_type_seq_tag(modality) if im_type_tag not in self._perframe_tags: self._add_module_to_dataset_common_ct_mr_pet_image_description( self._legacy_datasets[0], self, 0) - # ---------------------------- item = self._shared_functional_groups[0] inner_item = Dataset() self._add_module_to_dataset_common_ct_mr_pet_image_description( @@ -1224,10 +1098,10 @@ def _add_module_to_mf_common_ct_mr_pet_image_description( seq_tg, 'SQ', DataElementSequence([inner_item])) def _add_module_to_mf_composite_instance_contex(self) -> None: - """Copies/adds a/an `composite_instance_contex` multiframe module to + """Copies/adds a `composite_instance_contex` multiframe module to the current SOPClass from its single frame source. - """ + """ for module_name, excepted_a in self._module_excepted_list.items(): self._add_module( module_name, @@ -1242,6 +1116,7 @@ def _add_module_to_dataset_frame_anatomy( ) -> None: """Copies/adds attributes related to `frame_anatomy` to destination dicom Dataset + Parameters ---------- source: pydicom.dataset.Dataset @@ -1251,6 +1126,7 @@ def _add_module_to_dataset_frame_anatomy( the destination dicom Dataset to which the modules attributes values are copied. The destination Dataset usually is an item from a perframe/shared functional group sequence. + """ # David's code is more complicated than mine # Should check it out later. @@ -1286,12 +1162,12 @@ def _add_module_to_dataset_frame_anatomy( DataElementSequence([item])) destination['FrameAnatomySequence'] = frame_anatomy_a - def _has_frame_anatomy(self, tags: dict) -> bool: + def _has_frame_anatomy(self, tags: Dict[BaseTag, bool]) -> bool: """returns true if attributes specific to `frame_anatomy` present in source single frames. Otherwise returns false. - """ + """ laterality_tg = tag_for_keyword('Laterality') im_laterality_tg = tag_for_keyword('ImageLaterality') bodypart_tg = tag_for_keyword('BodyPartExamined') @@ -1302,10 +1178,10 @@ def _has_frame_anatomy(self, tags: dict) -> bool: anatomical_reg_tg) def _add_module_to_mf_frame_anatomy(self) -> None: - """Copies/adds a/an `frame_anatomy` multiframe module to + """Copies/adds a `frame_anatomy` multiframe module to the current SOPClass from its single frame source. - """ + """ if (not self._has_frame_anatomy(self._perframe_tags) and (self._has_frame_anatomy(self._shared_tags) or self._has_frame_anatomy(self._excluded_from_perframe_tags)) @@ -1319,12 +1195,12 @@ def _add_module_to_mf_frame_anatomy(self) -> None: self._add_module_to_dataset_frame_anatomy( self._legacy_datasets[i], item) - def _has_pixel_measures(self, tags: dict) -> bool: + def _has_pixel_measures(self, tags: Dict[BaseTag, bool]) -> bool: """returns true if attributes specific to `pixel_measures` present in source single frames. Otherwise returns false. - """ + """ pixel_spacing_tg = tag_for_keyword('PixelSpacing') slice_thickness_tg = tag_for_keyword('SliceThickness') imager_pixel_spacing_tg = tag_for_keyword('ImagerPixelSpacing') @@ -1339,6 +1215,7 @@ def _add_module_to_dataset_pixel_measures( ) -> None: """Copies/adds attributes related to `pixel_measures` to destination dicom Dataset + Parameters ---------- source: pydicom.dataset.Dataset @@ -1348,8 +1225,8 @@ def _add_module_to_dataset_pixel_measures( the destination dicom Dataset to which the modules attributes values are copied. The destination Dataset usually is an item from a perframe/shared functional group sequence. - """ + """ item = Dataset() self._copy_attrib_if_present(source, item, @@ -1374,10 +1251,10 @@ def _add_module_to_dataset_pixel_measures( destination[pixel_measures_tg] = seq def _add_module_to_mf_pixel_measures(self) -> None: - """Copies/adds a/an `pixel_measures` multiframe module to + """Copies/adds a `pixel_measures` multiframe module to the current SOPClass from its single frame source. - """ + """ if (not self._has_pixel_measures(self._perframe_tags) and (self._has_pixel_measures(self._shared_tags) or self._has_pixel_measures(self._excluded_from_perframe_tags)) @@ -1391,12 +1268,12 @@ def _add_module_to_mf_pixel_measures(self) -> None: self._add_module_to_dataset_pixel_measures( self._legacy_datasets[i], item) - def _has_plane_position(self, tags: dict) -> bool: + def _has_plane_position(self, tags: Dict[BaseTag, bool]) -> bool: """returns true if attributes specific to `plane_position` present in source single frames. Otherwise returns false. - """ + """ image_position_patient_tg = tag_for_keyword('ImagePositionPatient') return image_position_patient_tg in tags @@ -1407,6 +1284,7 @@ def _add_module_to_dataset_plane_position( ) -> None: """Copies/adds attributes related to `plane_position` to destination dicom Dataset + Parameters ---------- source: pydicom.dataset.Dataset @@ -1416,8 +1294,8 @@ def _add_module_to_dataset_plane_position( the destination dicom Dataset to which the modules attributes values are copied. The destination Dataset usually is an item from a perframe/shared functional group sequence. - """ + """ item = Dataset() self._copy_attrib_if_present(source, item, @@ -1432,10 +1310,10 @@ def _add_module_to_dataset_plane_position( destination[plane_position_sequence_tg] = seq def _add_module_to_mf_plane_position(self) -> None: - """Copies/adds a/an `plane_position` multiframe module to + """Copies/adds a `plane_position` multiframe module to the current SOPClass from its single frame source. - """ + """ if (not self._has_plane_position(self._perframe_tags) and (self._has_plane_position(self._shared_tags) or self._has_plane_position(self._excluded_from_perframe_tags)) @@ -1449,12 +1327,12 @@ def _add_module_to_mf_plane_position(self) -> None: self._add_module_to_dataset_plane_position( self._legacy_datasets[i], item) - def _has_plane_orientation(self, tags: dict) -> bool: + def _has_plane_orientation(self, tags: Dict[BaseTag, bool]) -> bool: """returns true if attributes specific to `plane_orientation` present in source single frames. Otherwise returns false. - """ + """ image_orientation_patient_tg = tag_for_keyword( 'ImageOrientationPatient') return image_orientation_patient_tg in tags @@ -1466,6 +1344,7 @@ def _add_module_to_dataset_plane_orientation( ) -> None: """Copies/adds attributes related to `plane_orientation` to destination dicom Dataset + Parameters ---------- source: pydicom.dataset.Dataset @@ -1475,8 +1354,8 @@ def _add_module_to_dataset_plane_orientation( the destination dicom Dataset to which the modules attributes values are copied. The destination Dataset usually is an item from a perframe/shared functional group sequence. - """ + """ item = Dataset() self._copy_attrib_if_present(source, item, @@ -1489,10 +1368,10 @@ def _add_module_to_dataset_plane_orientation( destination[tg] = seq def _add_module_to_mf_plane_orientation(self) -> None: - """Copies/adds a/an `plane_orientation` multiframe module to + """Copies/adds a `plane_orientation` multiframe module to the current SOPClass from its single frame source. - """ + """ if (not self._has_plane_orientation(self._perframe_tags) and (self._has_plane_orientation(self._shared_tags) or self._has_plane_orientation(self._excluded_from_perframe_tags)) @@ -1506,12 +1385,12 @@ def _add_module_to_mf_plane_orientation(self) -> None: self._add_module_to_dataset_plane_orientation( self._legacy_datasets[i], item) - def _has_frame_voi_lut(self, tags: dict) -> bool: + def _has_frame_voi_lut(self, tags: Dict[BaseTag, bool]) -> bool: """returns true if attributes specific to `frame_voi_lut` present in source single frames. Otherwise returns false. - """ + """ window_width_tg = tag_for_keyword('WindowWidth') window_center_tg = tag_for_keyword('WindowCenter') window_center_width_explanation_tg = tag_for_keyword( @@ -1527,6 +1406,7 @@ def _add_module_to_dataset_frame_voi_lut( ) -> None: """Copies/adds attributes related to `frame_voi_lut` to destination dicom Dataset + Parameters ---------- source: pydicom.dataset.Dataset @@ -1536,8 +1416,8 @@ def _add_module_to_dataset_frame_voi_lut( the destination dicom Dataset to which the modules attributes values are copied. The destination Dataset usually is an item from a perframe/shared functional group sequence. - """ + """ item = Dataset() self._copy_attrib_if_present(source, item, @@ -1560,10 +1440,10 @@ def _add_module_to_dataset_frame_voi_lut( destination[tg] = seq def _add_module_to_mf_frame_voi_lut(self) -> None: - """Copies/adds a/an `frame_voi_lut` multiframe module to + """Copies/adds a `frame_voi_lut` multiframe module to the current SOPClass from its single frame source. - """ + """ if (not self._has_frame_voi_lut(self._perframe_tags) and (self._has_frame_voi_lut(self._shared_tags) or self._has_frame_voi_lut(self._excluded_from_perframe_tags)) @@ -1577,12 +1457,13 @@ def _add_module_to_mf_frame_voi_lut(self) -> None: self._add_module_to_dataset_frame_voi_lut( self._legacy_datasets[i], item) - def _has_pixel_value_transformation(self, tags: dict) -> bool: + def _has_pixel_value_transformation( + self, tags: Dict[BaseTag, bool]) -> bool: """returns true if attributes specific to `pixel_value_transformation` present in source single frames. Otherwise returns false. - """ + """ rescale_intercept_tg = tag_for_keyword('RescaleIntercept') rescale_slope_tg = tag_for_keyword('RescaleSlope') rescale_type_tg = tag_for_keyword('RescaleType') @@ -1597,6 +1478,7 @@ def _add_module_to_dataset_pixel_value_transformation( ) -> None: """Copies/adds attributes related to `pixel_value_transformation` to destination dicom Dataset + Parameters ---------- source: pydicom.dataset.Dataset @@ -1606,8 +1488,8 @@ def _add_module_to_dataset_pixel_value_transformation( the destination dicom Dataset to which the modules attributes values are copied. The destination Dataset usually is an item from a perframe/shared functional group sequence. - """ + """ item = Dataset() self._copy_attrib_if_present(source, item, @@ -1661,10 +1543,10 @@ def _add_module_to_dataset_pixel_value_transformation( destination[tg] = seq def _add_module_to_mf_pixel_value_transformation(self) -> None: - """Copies/adds a/an `pixel_value_transformation` multiframe module to + """Copies/adds a `pixel_value_transformation` multiframe module to the current SOPClass from its single frame source. - """ + """ if (not self._has_pixel_value_transformation(self._perframe_tags) and (self._has_pixel_value_transformation(self._shared_tags) or self._has_pixel_value_transformation( @@ -1678,12 +1560,12 @@ def _add_module_to_mf_pixel_value_transformation(self) -> None: self._add_module_to_dataset_pixel_value_transformation( self._legacy_datasets[i], item) - def _has_referenced_image(self, tags: dict) -> bool: + def _has_referenced_image(self, tags: Dict[BaseTag, bool]) -> bool: """returns true if attributes specific to `referenced_image` present in source single frames. Otherwise returns false. - """ + """ return tag_for_keyword('ReferencedImageSequence') in tags def _add_module_to_dataset_referenced_image( @@ -1693,6 +1575,7 @@ def _add_module_to_dataset_referenced_image( ) -> None: """Copies/adds attributes related to `referenced_image` to destination dicom Dataset + Parameters ---------- source: pydicom.dataset.Dataset @@ -1702,8 +1585,8 @@ def _add_module_to_dataset_referenced_image( the destination dicom Dataset to which the modules attributes values are copied. The destination Dataset usually is an item from a perframe/shared functional group sequence. - """ + """ self._copy_attrib_if_present(source, destination, 'ReferencedImageSequence', @@ -1711,10 +1594,10 @@ def _add_module_to_dataset_referenced_image( check_not_to_be_empty=False) def _add_module_to_mf_referenced_image(self) -> None: - """Copies/adds a/an `referenced_image` multiframe module to + """Copies/adds a `referenced_image` multiframe module to the current SOPClass from its single frame source. - """ + """ if (not self._has_referenced_image(self._perframe_tags) and (self._has_referenced_image(self._shared_tags) or self._has_referenced_image(self._excluded_from_perframe_tags)) @@ -1728,12 +1611,12 @@ def _add_module_to_mf_referenced_image(self) -> None: self._add_module_to_dataset_referenced_image( self._legacy_datasets[i], item) - def _has_derivation_image(self, tags: dict) -> bool: + def _has_derivation_image(self, tags: Dict[BaseTag, bool]) -> bool: """returns true if attributes specific to `derivation_image` present in source single frames. Otherwise returns false. - """ + """ return tag_for_keyword('SourceImageSequence') in tags def _add_module_to_dataset_derivation_image( @@ -1743,6 +1626,7 @@ def _add_module_to_dataset_derivation_image( ) -> None: """Copies/adds attributes related to `derivation_image` to destination dicom Dataset + Parameters ---------- source: pydicom.dataset.Dataset @@ -1752,8 +1636,8 @@ def _add_module_to_dataset_derivation_image( the destination dicom Dataset to which the modules attributes values are copied. The destination Dataset usually is an item from a perframe/shared functional group sequence. - """ + """ item = Dataset() self._copy_attrib_if_present(source, item, @@ -1776,10 +1660,10 @@ def _add_module_to_dataset_derivation_image( destination[tg] = seq def _add_module_to_mf_derivation_image(self) -> None: - """Copies/adds a/an `derivation_image` multiframe module to + """Copies/adds a `derivation_image` multiframe module to the current SOPClass from its single frame source. - """ + """ if (not self._has_derivation_image(self._perframe_tags) and (self._has_derivation_image(self._shared_tags) or self._has_derivation_image(self._excluded_from_perframe_tags)) @@ -1793,22 +1677,22 @@ def _add_module_to_mf_derivation_image(self) -> None: self._add_module_to_dataset_derivation_image( self._legacy_datasets[i], item) - def _get_tag_used_dictionary(self, input: List[BaseTag]) -> dict: + def _get_tag_used_dictionary( + self, input: List[BaseTag]) -> Dict[BaseTag, bool]: """Returns a dictionary of input tags with a use flag + Parameters ---------- input: List[pydicom.tag.BaseTag] list of tags to build dictionary holding their used flag. + Returns ------- - dict: + dict: Dict[pydicom.tag.BaseTag, bool] a dictionary type of tags with used flag. """ - - out: dict = {} - for item in input: - out[item] = False + out = {item: False for item in input} return out def _add_module_to_dataset_unassigned_perframe( @@ -1818,6 +1702,7 @@ def _add_module_to_dataset_unassigned_perframe( ) -> None: """Copies/adds attributes related to `unassigned_perframe` to destination dicom Dataset + Parameters ---------- source: pydicom.dataset.Dataset @@ -1827,8 +1712,8 @@ def _add_module_to_dataset_unassigned_perframe( the destination dicom Dataset to which the modules attributes values are copied. The destination Dataset usually is an item from a perframe/shared functional group sequence. - """ + """ item = Dataset() for tg in self._eligible_tags: self._copy_attrib_if_present(source, @@ -1846,7 +1731,6 @@ def _add_largest_smallest_pixel_value(self) -> None: current SOPClass object """ - ltg = tag_for_keyword("LargestImagePixelValue") from sys import float_info lval = float_info.min @@ -1859,7 +1743,7 @@ def _add_largest_smallest_pixel_value(self) -> None: lval = nval if lval < nval else lval if lval > float_info.min: self[ltg] = DataElement(ltg, 'SS', int(lval)) - # ========================== + stg = tag_for_keyword("SmallestImagePixelValue") sval = float_info.max if stg in self._perframe_tags: @@ -1875,8 +1759,9 @@ def _add_largest_smallest_pixel_value(self) -> None: stg = "SmallestImagePixelValue" def _add_module_to_mf_unassigned_perframe(self) -> None: - """Copies/adds a/an `unassigned_perframe` multiframe module to + """Copies/adds an `unassigned_perframe` multiframe module to the current SOPClass from its single frame source. + """ # first collect all not used tags # note that this is module is order dependent @@ -1896,6 +1781,7 @@ def _add_module_to_dataset_unassigned_shared( ) -> None: """Copies/adds attributes related to `unassigned_shared` to destination dicom Dataset + Parameters ---------- source: pydicom.dataset.Dataset @@ -1905,8 +1791,8 @@ def _add_module_to_dataset_unassigned_shared( the destination dicom Dataset to which the modules attributes values are copied. The destination Dataset usually is an item from a perframe/shared functional group sequence. - """ + """ item = Dataset() for tg, used in self._shared_tags.items(): if (not used and @@ -1923,34 +1809,35 @@ def _add_module_to_dataset_unassigned_shared( destination[tg] = seq def _add_module_to_mf_unassigned_shared(self) -> None: - """Copies/adds a/an `unassigned_shared` multiframe module to + """Copies/adds an `unassigned_shared` multiframe module to the current SOPClass from its single frame source. - """ + """ item = self._shared_functional_groups[0] self._add_module_to_dataset_unassigned_shared( self._legacy_datasets[0], item) def _create_empty_element(self, tg: BaseTag) -> DataElement: """Creates an empty dicom DataElement for input tag + Parameters ---------- tg: pydicom.tag.BaseTag input tag. + Returns ------- pydicom.dataelem.DataElement an empty DataElement created. """ - return DataElement(tg, dictionary_VR(tg), None) def _add_module_to_mf_empty_type2_attributes(self) -> None: """Adds empty type2 attributes to the current SOPClass to avoid type2 missing error. - """ + """ iod_name = _SOP_CLASS_UID_IOD_KEY_MAP[ self['SOPClassUID'].value] modules = IOD_MODULE_MAP[iod_name] @@ -1975,6 +1862,7 @@ def _add_module_to_dataset_conversion_source( ) -> None: """Copies/adds attributes related to `conversion_source` to destination dicom Dataset + Parameters ---------- source: pydicom.dataset.Dataset @@ -1984,8 +1872,8 @@ def _add_module_to_dataset_conversion_source( the destination dicom Dataset to which the modules attributes values are copied. The destination Dataset usually is an item from a perframe/shared functional group sequence. - """ + """ item = Dataset() self._copy_attrib_if_present(source, item, @@ -2005,10 +1893,10 @@ def _add_module_to_dataset_conversion_source( destination[tg] = seq def _add_module_to_mf_conversion_source(self) -> None: - """Copies/adds a/an `conversion_source` multiframe module to + """Copies/adds a `conversion_source` multiframe module to the current SOPClass from its single frame source. - """ + """ for i in range(0, len(self._legacy_datasets)): item = self._perframe_functional_groups[i] self._add_module_to_dataset_conversion_source( @@ -2018,10 +1906,7 @@ def _add_module_to_mf_conversion_source(self) -> None: self.farthest_future_date_time def _build_slices_geometry_frame_content(self) -> None: - """Instantiates an object of GeometryOfSlice for each slice. - - """ - + """Instantiates an object of _GeometryOfSlice for each slice.""" frame_count = len(self._legacy_datasets) for i in range(0, frame_count): curr_frame = self._legacy_datasets[i] @@ -2046,12 +1931,7 @@ def _build_slices_geometry_frame_content(self) -> None: # slice_location_v = None \ # if 'SliceLocation' not in curr_frame\ # else curr_frame['SliceLocation'].value - rows_v = 0 \ - if 'Rows' not in curr_frame\ - else curr_frame['Rows'].value - columns_v = 0 \ - if 'Columns' not in curr_frame\ - else curr_frame['Columns'].value + if (image_orientation_patient_v is not None and image_position_patient_v is not None and pixel_spacing_v is not None): @@ -2064,32 +1944,31 @@ def _build_slices_geometry_frame_content(self) -> None: slice_thickness_v ]) tpl = array(image_position_patient_v) - dim = (rows_v, columns_v, 1) - self._slices.append(GeometryOfSlice(row, col, - tpl, voxel_spacing, dim)) + + self._slices.append(_GeometryOfSlice(row, col, + tpl, voxel_spacing)) else: logger.error( "Error in geometry. One or more required " "attributes are not available") - logger.error("\tImageOrientationPatient = {}".format( - image_orientation_patient_v)) - logger.error("\tImagePositionPatient = {}".format( - image_position_patient_v)) - logger.error("\tPixelSpacing = {}".format(pixel_spacing_v)) + logger.error( + "\tImageOrientationPatient =" + f" {image_orientation_patient_v}") + logger.error( + "\tImagePositionPatient =" + f" {image_position_patient_v}") + logger.error(f"\tPixelSpacing = {pixel_spacing_v}") self._slices = [] # clear the slices break def _are_all_slices_parallel_frame_content(self) -> bool: - """Returns true if all slices are parallel otherwise, false. - - """ - + """Returns true if all slices are parallel otherwise, false.""" slice_count = len(self._slices) if slice_count >= 2: last_slice = self._slices[0] for i in range(1, slice_count): curr_slice = self._slices[i] - if not GeometryOfSlice.are_parallel( + if not _GeometryOfSlice.are_parallel( curr_slice, last_slice, self._tolerance): return False last_slice = curr_slice @@ -2100,10 +1979,7 @@ def _are_all_slices_parallel_frame_content(self) -> bool: return False def _add_stack_info_frame_content(self) -> None: - """Adds stack information to the FrameContentSequence dicom attribute. - - """ - + """Adds stack info to the FrameContentSequence dicom attribute.""" self._build_slices_geometry_frame_content() round_digits = int(ceil(-log10(self._tolerance))) source_series_uid = '' @@ -2113,9 +1989,8 @@ def _add_stack_info_frame_content(self) -> None: not_round_dist = s.get_distance_along_origin() dist = round(not_round_dist, round_digits) logger.debug( - 'Slice location {} rounded by {} digits to {}'.format( - not_round_dist, round_digits, dist - )) + f'Slice location {not_round_dist} ' + f'rounded by {round_digits} digits to {dist}') if dist in self._slice_location_map: self._slice_location_map[dist].append(idx) else: @@ -2143,12 +2018,12 @@ def _add_stack_info_frame_content(self) -> None: "InStackPositionNumber", distance_index) distance_index += 1 - def _has_frame_content(self, tags: dict) -> bool: + def _has_frame_content(self, tags: Dict[BaseTag, bool]) -> bool: """returns true if attributes specific to `frame_content` present in source single frames. Otherwise returns false. - """ + """ acquisition_date_time_tg = tag_for_keyword('AcquisitionDateTime') acquisition_date_tg = tag_for_keyword('AcquisitionDate') acquisition_time_tg = tag_for_keyword('AcquisitionTime') @@ -2163,6 +2038,7 @@ def _add_module_to_dataset_frame_content( ) -> None: """Copies/adds attributes related to `frame_content` to destination dicom Dataset + Parameters ---------- source: pydicom.dataset.Dataset @@ -2172,8 +2048,8 @@ def _add_module_to_dataset_frame_content( the destination dicom Dataset to which the modules attributes values are copied. The destination Dataset usually is an item from a perframe/shared functional group sequence. - """ + """ item = Dataset() fan_tg = tag_for_keyword('FrameAcquisitionNumber') an_tg = tag_for_keyword('AcquisitionNumber') @@ -2183,7 +2059,6 @@ def _add_module_to_dataset_frame_content( fan_val = 0 item[fan_tg] = DataElement(fan_tg, dictionary_VR(fan_tg), fan_val) self._mark_tag_as_used(an_tg) - # ---------------------------------------------------------------- acquisition_date_time_a = self._get_or_create_attribute( source, 'AcquisitionDateTime', self.earliest_date_time) # change the keyword to FrameAcquisitionDateTime: @@ -2224,7 +2099,6 @@ def _add_module_to_dataset_frame_content( frame_acquisition_date_time_a.value =\ DT(d_t.strftime('%Y%m%d%H%M%S')) item['FrameAcquisitionDateTime'] = frame_acquisition_date_time_a - # --------------------------------- self._copy_attrib_if_present( source, item, "AcquisitionDuration", "FrameAcquisitionDuration", @@ -2240,7 +2114,6 @@ def _add_module_to_dataset_frame_content( "FrameComments", check_not_to_be_perframe=False, check_not_to_be_empty=True) - # ----------------------------------- seq_tg = tag_for_keyword('FrameContentSequence') destination[seq_tg] = DataElement( seq_tg, dictionary_VR(seq_tg), DataElementSequence([item])) @@ -2250,7 +2123,6 @@ def _add_acquisition_info_frame_content(self) -> None: attribute. """ - for i in range(0, len(self._legacy_datasets)): item = self._perframe_functional_groups[i] self._add_module_to_dataset_frame_content( @@ -2263,25 +2135,19 @@ def _add_acquisition_info_frame_content(self) -> None: 'DT', self.earliest_frame_acquisition_date_time) def _add_module_to_mf_frame_content(self) -> None: - """Copies/adds a/an 'frame_content` multiframe module to + """Copies/adds a 'frame_content` multiframe module to the current SOPClass from its single frame source. - """ + """ self._add_acquisition_info_frame_content() self._add_stack_info_frame_content() def _is_other_byte_vr_pixel_data(self, vr: str) -> bool: - """checks if `PixelData` dicom value representation is OB. - - """ - + """checks if `PixelData` dicom value representation is OB.""" return vr[0] == 'O' and vr[1] == 'B' def _is_other_word_vr_pixel_data(self, vr: str) -> bool: - """checks if `PixelData` dicom value representation is OW. - - """ - + """checks if `PixelData` dicom value representation is OW.""" return vr[0] == 'O' and vr[1] == 'W' # def _has(self, tags: dict) -> bool: """ # image_position_patient_tg = tag_for_keyword('ImagePositionPatient') @@ -2294,6 +2160,7 @@ def _copy_data_pixel_data( ) -> None: """Copies contnet of PixelData from one frame and appends it to the content of PixelData for multiframe + Parameters ---------- src: bytearray @@ -2316,10 +2183,10 @@ def _copy_data_pixel_data( des.extend(src) def _add_module_to_mf_pixel_data(self) -> None: - """Copies/add`s a/an pixel_data` multiframe module to + """Copies/add`s a pixel_data` multiframe module to the current SOPClass from its single frame source. - """ + """ kw = 'NumberOfFrames' tg = tag_for_keyword(kw) self._frame_count = len(self._legacy_datasets) @@ -2358,10 +2225,10 @@ def _add_module_to_mf_pixel_data(self) -> None: self[kw] = mf_pixel_data def _add_module_to_mf_content_date_time(self) -> None: - """Copies/adds a/an `content_date_time` multiframe module to + """Copies/adds a `content_date_time` multiframe module to the current SOPClass from its single frame source. - """ + """ default_atrs = ["Acquisition", "Series", "Study"] for i in range(0, len(self._legacy_datasets)): src = self._legacy_datasets[i] @@ -2411,6 +2278,7 @@ def _add_data_element_to_target_contributing_equipment( ) -> None: """Add new data element related to ContributingEquipmentSequence to a target dataset(usually an item). + Parameters ---------- target: pydicom.dataset.Dataset @@ -2419,16 +2287,16 @@ def _add_data_element_to_target_contributing_equipment( keyword if the attribute being added. value: Any value if the attribute being added. - """ + """ tg = tag_for_keyword(kw) target[kw] = DataElement(tg, dictionary_VR(tg), value) def _add_module_to_mf_contributing_equipment(self) -> None: - """Copies/adds a/an `contributing_equipment` multiframe module to + """Copies/adds a `contributing_equipment` multiframe module to the current SOPClass from its single frame source. - """ + """ code_value_tg = tag_for_keyword('CodeValue') code_meaning_tg = tag_for_keyword('CodeMeaning') coding_scheme_designator_tg = tag_for_keyword('CodingSchemeDesignator') @@ -2475,10 +2343,10 @@ def _add_module_to_mf_contributing_equipment(self) -> None: self[tg] = DataElement(tg, 'SQ', DataElementSequence([item])) def _add_module_to_mf_instance_creation_date_time(self) -> None: - """Copies/adds a/an `instance_creation_date_time` multiframe module to + """Copies/adds an `instance_creation_date_time` multiframe module to the current SOPClass from its single frame source. - """ + """ nnooww = datetime.now() n_d = DA(nnooww.date().strftime('%Y%m%d')) n_t = TM(nnooww.time().strftime('%H%M%S')) @@ -2489,23 +2357,25 @@ def _add_module_to_mf_instance_creation_date_time(self) -> None: self[kw] = DataElement( tag_for_keyword(kw), 'TM', n_t) - @classmethod - def default_sort_key(cls, x: Dataset) -> tuple: + @staticmethod + def default_sort_key( + x: Dataset) -> Tuple[Union[int, str, UID], ...]: """The default sort key to sort all single frames before conversion + Parameters ---------- x: pydicom.dataset.Dataset input Dataset to be sorted. + Returns ------- - tuple: + tuple: Tuple[Union[int, str, UID]] a sort key of three elements. 1st priority: SeriesNumber 2nd priority: InstanceNumber 3rd priority: SOPInstanceUID """ - out: tuple = tuple() if 'SeriesNumber' in x: out += (x['SeriesNumber'].value, ) @@ -2516,10 +2386,7 @@ def default_sort_key(cls, x: Dataset) -> tuple: return out def _clear_build_blocks(self) -> None: - """Clears the array containing all methods for multiframe conversion - - """ - + """Clears the array containing all methods for multiframe conversion""" self.__build_blocks = [] def _add_common_ct_pet_mr_build_blocks(self) -> None: @@ -2527,7 +2394,6 @@ def _add_common_ct_pet_mr_build_blocks(self) -> None: put them in place. """ - blocks = [ [self._add_module_to_mf_image_pixel, None], [self._add_module_to_mf_composite_instance_contex, None], @@ -2557,7 +2423,6 @@ def _add_ct_specific_build_blocks(self) -> None: put them in place. """ - blocks = [ [ self._add_module_to_mf_common_ct_mr_pet_image_description, @@ -2574,7 +2439,6 @@ def _add_mr_specific_build_blocks(self) -> None: put them in place """ - blocks = [ [ self._add_module_to_mf_common_ct_mr_pet_image_description, @@ -2591,7 +2455,6 @@ def _add_pet_specific_build_blocks(self) -> None: put them in place """ - blocks = [ [ self._add_module_to_mf_common_ct_mr_pet_image_description, @@ -2607,7 +2470,6 @@ def _add_build_blocks_for_mr(self) -> None: put them in place """ - self._clear_build_blocks() self._add_common_ct_pet_mr_build_blocks() self._add_mr_specific_build_blocks() @@ -2617,7 +2479,6 @@ def _add_build_blocks_for_pet(self) -> None: put them in place """ - self._clear_build_blocks() self._add_common_ct_pet_mr_build_blocks() self._add_pet_specific_build_blocks() @@ -2627,7 +2488,6 @@ def _add_build_blocks_for_ct(self) -> None: put them in place. """ - self._clear_build_blocks() self._add_common_ct_pet_mr_build_blocks() self._add_ct_specific_build_blocks() @@ -2637,7 +2497,6 @@ def _convert2multiframe(self) -> None: multi-frame. """ - logger.debug('Start singleframe to multiframe conversion') for fun, args in self.__build_blocks: if not args: @@ -2659,8 +2518,10 @@ def __init__( sop_instance_uid: str, instance_number: int, sort_key: Optional[Callable] = None, + **kwargs: Any, ) -> None: """ + Parameters ---------- legacy_datasets: Sequence[pydicom.dataset.Dataset] @@ -2674,10 +2535,10 @@ def __init__( UID that should be assigned to the instance instance_number: int Number that should be assigned to the instance - sort_key: Callable, optional + sort_key: Optional[Callable], optional A function by which the single-frame instances will be sorted - """ + """ try: ref_ds = legacy_datasets[0] except IndexError: @@ -2714,8 +2575,10 @@ def __init__( sop_instance_uid: str, instance_number: int, sort_key: Optional[Callable] = None, + **kwargs: Any, ) -> None: """ + Parameters ---------- legacy_datasets: Sequence[pydicom.dataset.Dataset] @@ -2729,10 +2592,10 @@ def __init__( UID that should be assigned to the instance instance_number: int Number that should be assigned to the instance - sort_key: Callable, optional + sort_key: Optional[Callable], optional A function by which the single-frame instances will be sorted - """ + """ try: ref_ds = legacy_datasets[0] except IndexError: @@ -2769,8 +2632,10 @@ def __init__( sop_instance_uid: str, instance_number: int, sort_key: Optional[Callable] = None, + **kwargs: Any, ) -> None: """ + Parameters ---------- legacy_datasets: Sequence[pydicom.dataset.Dataset] @@ -2784,10 +2649,10 @@ def __init__( UID that should be assigned to the instance instance_number: int Number that should be assigned to the instance - sort_key: Callable, optional + sort_key: Optional[Callable], optional A function by which the single-frame instances will be sorted - """ + """ try: ref_ds = legacy_datasets[0] except IndexError: diff --git a/src/highdicom/spatial.py b/src/highdicom/spatial.py index 3c4a56a6..594f9039 100644 --- a/src/highdicom/spatial.py +++ b/src/highdicom/spatial.py @@ -456,3 +456,81 @@ def map_coordinate_into_pixel_matrix( pixel_matrix_coordinates[1], pixel_matrix_coordinates[2], ) + + +class _GeometryOfSlice: + + """A class for checking dicom slices geometry/parallelity""" + + def __init__(self, + row_vector: np.ndarray, + col_vector: np.ndarray, + top_left_corner_pos: np.ndarray, + voxel_spacing: np.ndarray) -> None: + """ + + Parameters + ---------- + row_vector: numpy.ndarray + 3D vector representing row of the input slice + col_vector: numpy.ndarray + 3D vector representing column the input slice + top_left_corner_pos: numpy.ndarray + 3D point representing top left corner position of the input slice + voxel_spacing: numpy.ndarray + Three element array. 1st and 2nd copied from PixelSpacing and the + 3rd copied from SliceThickness + + """ + + self.row_vector = row_vector + self.col_vector = col_vector + self.top_left_corner_position = top_left_corner_pos + self.voxel_spacing = voxel_spacing + + def get_normal_vector(self) -> np.ndarray: + """Returns the normal vector of the input slice + + """ + + n: np.ndarray = np.cross(self.row_vector, self.col_vector) + n[2] = -n[2] + return n + + def get_distance_along_origin(self) -> float: + """Returns the shortest distance of the slice from the origin + + """ + + n = self.get_normal_vector() + return float( + np.dot(self.top_left_corner_position, n)) + + @staticmethod + def are_parallel( + slice1: '_GeometryOfSlice', + slice2: '_GeometryOfSlice', + tolerance: float = 0.0001, + ) -> bool: + """Returns False if two slices are not parallel else True + + """ + + if (not isinstance(slice1, _GeometryOfSlice) or + not isinstance(slice2, _GeometryOfSlice)): + raise TypeError( + 'slice1 and slice2 are not of the same ' + f'type: type(slice1) = {type(slice1)} and ' + f'type(slice2) = {type(slice2)}') + # logger.warning( + # 'slice1 and slice2 are not of the same ' + # 'type: type(slice1) = {} and type(slice2) = {}'.format( + # type(slice1), type(slice2))) + # return False + else: + n1: np.ndarray = slice1.get_normal_vector() + n2: np.ndarray = slice2.get_normal_vector() + for el1, el2 in zip(n1, n2): + if abs(el1 - el2) > tolerance: + return False + return True diff --git a/tests/test_legacy.py b/tests/test_legacy.py index aec14d3a..0244f561 100644 --- a/tests/test_legacy.py +++ b/tests/test_legacy.py @@ -312,8 +312,8 @@ def setUp(self) -> None: def test_attribute_equality(self) -> None: for vr, [v1, v2, v3] in self.data.items(): - assert sop.Ù€DicomHelper.isequal(v1.value, v2.value) is True - assert sop.Ù€DicomHelper.isequal(v1.value, v3.value) is False + assert sop._DicomHelper.isequal(v1.value, v2.value) is True + assert sop._DicomHelper.isequal(v1.value, v3.value) is False class TestFrameSetCollection(unittest.TestCase): From 64190d71222712eebe1c3bef8def4ecb6408f6fc Mon Sep 17 00:00:00 2001 From: afshin Date: Sun, 9 May 2021 19:50:31 -0400 Subject: [PATCH 39/44] Swapped voctors in normal calculation --- src/highdicom/spatial.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/highdicom/spatial.py b/src/highdicom/spatial.py index c9c1f04e..2a78ee89 100644 --- a/src/highdicom/spatial.py +++ b/src/highdicom/spatial.py @@ -531,8 +531,9 @@ def get_normal_vector(self) -> np.ndarray: """ - n: np.ndarray = np.cross(self.row_vector, self.col_vector) - n[2] = -n[2] + n: np.ndarray = np.cross(self.col_vector, self.row_vector) + # n: np.ndarray = np.cross(self.row_vector, self.col_vector) + # n[2] = -n[2] return n def get_distance_along_origin(self) -> float: From ee44fd18016bd7659c8fa4e700ec1163a03ae725 Mon Sep 17 00:00:00 2001 From: afshin Date: Mon, 10 May 2021 10:49:01 -0400 Subject: [PATCH 40/44] Corrected how the normal vector was calculated --- src/highdicom/spatial.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/src/highdicom/spatial.py b/src/highdicom/spatial.py index 2a78ee89..b77e7817 100644 --- a/src/highdicom/spatial.py +++ b/src/highdicom/spatial.py @@ -520,7 +520,6 @@ def __init__(self, 3rd copied from SliceThickness """ - self.row_vector = row_vector self.col_vector = col_vector self.top_left_corner_position = top_left_corner_pos @@ -530,17 +529,13 @@ def get_normal_vector(self) -> np.ndarray: """Returns the normal vector of the input slice """ - - n: np.ndarray = np.cross(self.col_vector, self.row_vector) - # n: np.ndarray = np.cross(self.row_vector, self.col_vector) - # n[2] = -n[2] + n: np.ndarray = np.cross(self.row_vector, self.col_vector) return n def get_distance_along_origin(self) -> float: """Returns the shortest distance of the slice from the origin """ - n = self.get_normal_vector() return float( np.dot(self.top_left_corner_position, n)) @@ -554,7 +549,6 @@ def are_parallel( """Returns False if two slices are not parallel else True """ - if (not isinstance(slice1, _GeometryOfSlice) or not isinstance(slice2, _GeometryOfSlice)): raise TypeError( From 6c6fbe0b4fc11ee40cadf524ae1a0ac492a8669f Mon Sep 17 00:00:00 2001 From: afshin Date: Wed, 12 May 2021 11:03:13 -0400 Subject: [PATCH 41/44] Moved FrameSet, FrameSetCollection and _DicomHelper classes into highdicom.utils --- src/highdicom/legacy/sop.py | 401 ++---------------------------------- src/highdicom/utils.py | 373 ++++++++++++++++++++++++++++++++- tests/test_legacy.py | 140 ------------- tests/test_utils.py | 144 ++++++++++++- 4 files changed, 527 insertions(+), 531 deletions(-) diff --git a/src/highdicom/legacy/sop.py b/src/highdicom/legacy/sop.py index a0dec55f..d9c1ea04 100644 --- a/src/highdicom/legacy/sop.py +++ b/src/highdicom/legacy/sop.py @@ -9,12 +9,11 @@ from copy import deepcopy from numpy import log10, array, ceil -from pydicom.datadict import tag_for_keyword, dictionary_VR, keyword_for_tag +from pydicom.datadict import tag_for_keyword, dictionary_VR from pydicom.dataset import Dataset from pydicom.tag import Tag, BaseTag from pydicom.dataelem import DataElement from pydicom.sequence import Sequence as DataElementSequence -from pydicom.multival import MultiValue from pydicom.valuerep import DT, DA, TM from pydicom.uid import UID @@ -22,6 +21,7 @@ from highdicom._iods import IOD_MODULE_MAP from highdicom._modules import MODULE_ATTRIBUTE_MAP from highdicom.spatial import _GeometryOfSlice +from highdicom.utils import FrameSetCollection logger = logging.getLogger(__name__) @@ -44,368 +44,6 @@ } -class _DicomHelper: - - """A class for checking dicom tags and comparing dicom attributes""" - - @staticmethod - def istag_file_meta_information_group(t: BaseTag) -> bool: - return t.group == 0x0002 - - @staticmethod - def istag_repeating_group(t: BaseTag) -> bool: - g = t.group - return (g >= 0x5000 and g <= 0x501e) or\ - (g >= 0x6000 and g <= 0x601e) - - @staticmethod - def istag_group_length(t: BaseTag) -> bool: - return t.element == 0 - - @staticmethod - def isequal(v1: Any, v2: Any, float_tolerance: float = 1.0e-5) -> bool: - from pydicom.valuerep import DSfloat - - def is_equal_float(x1: float, x2: float) -> bool: - return abs(x1 - x2) < float_tolerance - if type(v1) != type(v2): - return False - if isinstance(v1, DataElementSequence): - for item1, item2 in zip(v1, v2): - if not _DicomHelper.isequal_dicom_dataset(item1, item2): - return False - if not isinstance(v1, MultiValue): - v11 = [v1] - v22 = [v2] - else: - v11 = v1 - v22 = v2 - if len(v11) != len(v22): - return False - for xx, yy in zip(v11, v22): - if isinstance(xx, DSfloat) or isinstance(xx, float): - if not is_equal_float(xx, yy): - return False - else: - if xx != yy: - return False - return True - - @staticmethod - def isequal_dicom_dataset(ds1: Dataset, ds2: Dataset) -> bool: - """Checks if two dicom dataset have the same value in all attributes - - Parameters - ---------- - ds1: pydicom.dataset.Dataset - 1st dicom dataset - ds2: pydicom.dataset.Dataset - 2nd dicom dataset - - Returns - ------- - True if dicom datasets are equal otherwise False - - """ - if type(ds1) != type(ds2): - return False - if not isinstance(ds1, Dataset): - return False - for k1, elem1 in ds1.items(): - if k1 not in ds2: - return False - elem2 = ds2[k1] - if not _DicomHelper.isequal(elem2.value, elem1.value): - return False - return True - - @staticmethod - def tag2kwstr(tg: BaseTag) -> str: - """Converts tag to keyword and (group, element) form""" - return '{}-{:32.32s}'.format( - str(tg), keyword_for_tag(tg)) - - -class FrameSet: - - """ - - A class containing the dicom frames that hold equal distinguishing - attributes to detect all perframe and shared dicom attributes - """ - - def __init__( - self, - single_frame_list: List[Dataset], - distinguishing_tags: List[BaseTag], - ) -> None: - """ - - Parameters - ---------- - single_frame_list: List[pydicom.dataset.Dataset] - list of single frames that have equal distinguising attributes - distinguishing_tags: List[pydicom.tag.BaseTag] - list of distinguishing attributes tags - - """ - self._frames = single_frame_list - self._distinguishing_attributes_tags = distinguishing_tags - tmp = [ - tag_for_keyword('AcquisitionDateTime'), - tag_for_keyword('AcquisitionDate'), - tag_for_keyword('AcquisitionTime'), - tag_for_keyword('SpecificCharacterSet')] - self._excluded_from_perframe_tags =\ - self._distinguishing_attributes_tags + tmp - self._perframe_tags: List[BaseTag] = [] - self._shared_tags: List[BaseTag] = [] - self._find_per_frame_and_shared_tags() - - @property - def frames(self) -> List[Dataset]: - return self._frames[:] - - @property - def distinguishing_attributes_tags(self) -> List[Tag]: - return self._distinguishing_attributes_tags[:] - - @property - def excluded_from_perframe_tags(self) -> List[Tag]: - return self._excluded_from_perframe_tags[:] - - @property - def perframe_tags(self) -> List[Tag]: - return self._perframe_tags[:] - - @property - def shared_tags(self) -> List[Tag]: - return self._shared_tags[:] - - @property - def series_instance_uid(self) -> UID: - """Returns the series instance uid of the FrameSet""" - return self._frames[0].SeriesInstanceUID - - @property - def study_instance_uid(self) -> UID: - """Returns the study instance uid of the FrameSet""" - return self._frames[0].StudyInstanceUID - - def get_sop_instance_uid_list(self) -> list: - """Returns a list containing all SOPInstanceUID of the FrameSet""" - output_list = [f.SOPInstanceUID for f in self._frames] - return output_list - - def get_sop_class_uid(self) -> UID: - """Returns the sop class uid of the FrameSet""" - return self._frames[0].SOPClassUID - - def _find_per_frame_and_shared_tags(self) -> None: - """Detects and collects all shared and perframe attributes""" - rough_shared: dict = {} - sfs = self.frames - for ds in sfs: - for ttag, elem in ds.items(): - if (not ttag.is_private and not - _DicomHelper.istag_file_meta_information_group(ttag) and not - _DicomHelper.istag_repeating_group(ttag) and not - _DicomHelper.istag_group_length(ttag) and not - self._istag_excluded_from_perframe(ttag) and - ttag != tag_for_keyword('PixelData')): - elem = ds[ttag] - if ttag not in self._perframe_tags: - self._perframe_tags.append(ttag) - if ttag in rough_shared: - rough_shared[ttag].append(elem.value) - else: - rough_shared[ttag] = [elem.value] - to_be_removed_from_shared = [] - for ttag, v in rough_shared.items(): - v = rough_shared[ttag] - if len(v) < len(self.frames): - to_be_removed_from_shared.append(ttag) - else: - all_values_are_equal = all( - _DicomHelper.isequal(v_i, v[0]) for v_i in v) - if not all_values_are_equal: - to_be_removed_from_shared.append(ttag) - for t in to_be_removed_from_shared: - del rough_shared[t] - for t, v in rough_shared.items(): - self._shared_tags.append(t) - self._perframe_tags.remove(t) - - def _istag_excluded_from_perframe(self, t: BaseTag) -> bool: - return t in self._excluded_from_perframe_tags - - -class FrameSetCollection: - - """A class to extract framesets based on distinguishing dicom attributes""" - - def __init__(self, single_frame_list: Sequence[Dataset]) -> None: - """Forms framesets based on a list of distinguishing attributes. - The list of "distinguishing" attributes that are used to determine - commonality is currently fixed, and includes the unique identifying - attributes at the Patient, Study, Equipment levels, the Modality and - SOP Class, and ImageType as well as the characteristics of the Pixel - Data, and those attributes that for cross-sectional images imply - consistent sampling, such as ImageOrientationPatient, PixelSpacing and - SliceThickness, and in addition AcquisitionContextSequence and - BurnedInAnnotation. - - Parameters - ---------- - single_frame_list: Sequence[pydicom.dataset.Dataset] - list of mixed or non-mixed single frame dicom images - - Notes - ----- - Note that Series identification, specifically SeriesInstanceUID is NOT - a distinguishing attribute; i.e. FrameSets may span Series - - """ - self.mixed_frames = single_frame_list - self.mixed_frames_copy = self.mixed_frames[:] - self._distinguishing_attribute_keywords = [ - 'PatientID', - 'PatientName', - 'StudyInstanceUID', - 'FrameOfReferenceUID', - 'Manufacturer', - 'InstitutionName', - 'InstitutionAddress', - 'StationName', - 'InstitutionalDepartmentName', - 'ManufacturerModelName', - 'DeviceSerialNumber', - 'SoftwareVersions', - 'GantryID', - 'PixelPaddingValue', - 'Modality', - 'ImageType', - 'BurnedInAnnotation', - 'SOPClassUID', - 'Rows', - 'Columns', - 'BitsStored', - 'BitsAllocated', - 'HighBit', - 'PixelRepresentation', - 'PhotometricInterpretation', - 'PlanarConfiguration', - 'SamplesPerPixel', - 'ProtocolName', - 'ImageOrientationPatient', - 'PixelSpacing', - 'SliceThickness', - 'AcquisitionContextSequence'] - self._frame_sets: List[FrameSet] = [] - frame_counts = [] - frameset_counter = 0 - while len(self.mixed_frames_copy) != 0: - frameset_counter += 1 - x = self._find_all_similar_to_first_datasets() - self._frame_sets.append(FrameSet(x[0], x[1])) - frame_counts.append(len(x[0])) - # log information - logger.debug( - f"Frameset({frameset_counter:02d}) " - "including {len(x[0]):03d} frames") - logger.debug('\t Distinguishing tags:') - for dg_i, dg_tg in enumerate(x[1], 1): - logger.debug( - f'\t\t{dg_i:02d}/{len(x[1])})\t{str(dg_tg)}-' - '{keyword_for_tag(dg_tg):32.32s} = ' - '{str(x[0][0][dg_tg].value):32.32s}') - logger.debug('\t dicom datasets in this frame set:') - for dicom_i, dicom_ds in enumerate(x[0], 1): - logger.debug( - f'\t\t{dicom_i}/{len(x[0])})\t ' - '{dicom_ds["SOPInstanceUID"]}') - frames = '' - for i, f_count in enumerate(frame_counts, 1): - frames += '{: 2d}){:03d}\t'.format(i, f_count) - frames = '{: 2d} frameset(s) out of all {: 3d} instances:'.format( - len(frame_counts), len(self.mixed_frames)) + frames - logger.info(frames) - self._excluded_from_perframe_tags = {} - for kwkw in self._distinguishing_attribute_keywords: - self._excluded_from_perframe_tags[tag_for_keyword(kwkw)] = False - excluded_kws = [ - 'AcquisitionDateTime' - 'AcquisitionDate' - 'AcquisitionTime' - 'SpecificCharacterSet' - ] - for kwkw in excluded_kws: - self._excluded_from_perframe_tags[tag_for_keyword(kwkw)] = False - - def _find_all_similar_to_first_datasets(self) -> tuple: - """Takes the fist instance from mixed-frames and finds all dicom images - that have the same distinguishing attributes. - - """ - similar_ds: List[Dataset] = [self.mixed_frames_copy[0]] - distinguishing_tags_existing = [] - distinguishing_tags_missing = [] - self.mixed_frames_copy = self.mixed_frames_copy[1:] - for kw in self._distinguishing_attribute_keywords: - tg = tag_for_keyword(kw) - if tg in similar_ds[0]: - distinguishing_tags_existing.append(tg) - else: - distinguishing_tags_missing.append(tg) - logger_msg = set() - for ds in self.mixed_frames_copy: - all_equal = True - for tg in distinguishing_tags_missing: - if tg in ds: - logger_msg.add( - '{} is missing in all but {}'.format( - _DicomHelper.tag2kwstr(tg), ds['SOPInstanceUID'])) - all_equal = False - break - if not all_equal: - continue - for tg in distinguishing_tags_existing: - ref_val = similar_ds[0][tg].value - if tg not in ds: - all_equal = False - break - new_val = ds[tg].value - if not _DicomHelper.isequal(ref_val, new_val): - logger_msg.add( - 'Inequality on distinguishing ' - 'attribute{} -> {} != {} \n series uid = {}'.format( - _DicomHelper.tag2kwstr(tg), ref_val, new_val, - ds.SeriesInstanceUID)) - all_equal = False - break - if all_equal: - similar_ds.append(ds) - for msg_ in logger_msg: - logger.info(msg_) - for ds in similar_ds: - if ds in self.mixed_frames_copy: - self.mixed_frames_copy = [ - nds for nds in self.mixed_frames_copy if nds != ds] - return (similar_ds, distinguishing_tags_existing) - - @property - def distinguishing_attribute_keywords(self) -> List[str]: - """Returns the list of all distinguising attributes found.""" - - return self._distinguishing_attribute_keywords[:] - - @property - def frame_sets(self) -> List[FrameSet]: - """Returns the list of all FrameSets found.""" - - return self._frame_sets - - class _CommonLegacyConvertedEnhanceImage(SOPClass): """SOP class for common Legacy Converted Enhanced instances.""" @@ -512,7 +150,6 @@ def __init__( for item in sorted(self._legacy_datasets, key=sort_key): new_ds.append(item) - # self = multi_frame_output self._module_excepted_list: dict = { "patient": [], "clinical-trial-subject": [], @@ -825,7 +462,7 @@ def _add_module_to_mf_image_pixel(self) -> None: module, excepted_attributes=except_at, check_not_to_be_empty=False, - check_not_to_be_perframe=True) # don't check the perframe set + check_not_to_be_perframe=True) def _add_module_to_mf_enhanced_common_image(self) -> None: """Copies/adds an `enhanced_common_image` multiframe module to @@ -869,7 +506,7 @@ def _add_module_to_mf_enhanced_common_image(self) -> None: if tag_for_keyword('PresentationLUTShape') not in self._perframe_tags: # actually should really invert the pixel data if MONOCHROME1, - # since only MONOCHROME2 is permitted : ( + # since only MONOCHROME2 is permitted :( # also, do not need to check if PhotometricInterpretation is # per-frame, since a distinguishing attribute phmi_kw = 'PhotometricInterpretation' @@ -905,7 +542,6 @@ def _add_module_to_mf_enhanced_pet_image(self) -> None: the current SOPClass from its single frame source. """ - # David's code doesn't hold anything for this module ... should ask him kw = 'ContentQualification' tg = tag_for_keyword(kw) elem = self._get_or_create_attribute( @@ -926,7 +562,7 @@ def _add_module_to_mf_enhanced_mr_image(self) -> None: if 'ResonantNucleus' not in self: # derive from ImagedNucleus, which is the one used in legacy MR # IOD, but does not have a standard list of defined terms ... - # (could check these : () + # (could check these :() self._copy_attrib_if_present( self._legacy_datasets[0], self, @@ -1107,7 +743,7 @@ def _add_module_to_mf_composite_instance_contex(self) -> None: module_name, excepted_attributes=excepted_a, check_not_to_be_empty=False, - check_not_to_be_perframe=True) # don't check the perframe set + check_not_to_be_perframe=True) def _add_module_to_dataset_frame_anatomy( self, @@ -1128,8 +764,6 @@ def _add_module_to_dataset_frame_anatomy( from a perframe/shared functional group sequence. """ - # David's code is more complicated than mine - # Should check it out later. fa_seq_tg = tag_for_keyword('FrameAnatomySequence') item = Dataset() self._copy_attrib_if_present(source, item, 'AnatomicRegionSequence', @@ -1523,9 +1157,6 @@ def _add_module_to_dataset_pixel_value_transformation( break if not containes_localizer: value = "HU" - # elif modality == 'PT': - # value = 'US' if 'Units' not in source\ - # else source['Units'].value else: value = 'US' tg = tag_for_keyword('RescaleType') @@ -1928,10 +1559,6 @@ def _build_slices_geometry_frame_content(self) -> None: slice_thickness_v = 0.0 else: slice_thickness_v = curr_frame['SliceThickness'].value - # slice_location_v = None \ - # if 'SliceLocation' not in curr_frame\ - # else curr_frame['SliceLocation'].value - if (image_orientation_patient_v is not None and image_position_patient_v is not None and pixel_spacing_v is not None): @@ -2074,8 +1701,6 @@ def _add_module_to_dataset_frame_content( source, 'AcquisitionTime', self.earliest_time) d = acquisition_date_a.value t = acquisition_time_a.value - # frame_acquisition_date_time_a.value = (DT(d.strftime('%Y%m%d') + - # t.strftime('%H%M%S'))) frame_acquisition_date_time_a.value = DT(str(d) + str(t)) if frame_acquisition_date_time_a.value > self.earliest_date_time: if (frame_acquisition_date_time_a.value < @@ -2090,8 +1715,6 @@ def _add_module_to_dataset_frame_content( trigger_time_in_millisecond = int(trigger_time_a.value) if trigger_time_in_millisecond > 0: t_delta = timedelta(trigger_time_in_millisecond) - # this is so ridiculous. I'm not able to convert - # the DT to datetime (cast to superclass) d_t = datetime.combine( frame_acquisition_date_time_a.value.date(), frame_acquisition_date_time_a.value.time()) @@ -2149,9 +1772,6 @@ def _is_other_byte_vr_pixel_data(self, vr: str) -> bool: def _is_other_word_vr_pixel_data(self, vr: str) -> bool: """checks if `PixelData` dicom value representation is OW.""" return vr[0] == 'O' and vr[1] == 'W' - # def _has(self, tags: dict) -> bool: """ - # image_position_patient_tg = tag_for_keyword('ImagePositionPatient') - # return image_position_patient_tg in tags def _copy_data_pixel_data( self, @@ -2557,7 +2177,8 @@ def __init__( series_number=series_number, sop_instance_uid=sop_instance_uid, instance_number=instance_number, - sort_key=sort_key + sort_key=sort_key, + **kwargs ) self._add_build_blocks_for_ct() self._convert2multiframe() @@ -2614,7 +2235,8 @@ def __init__( series_number=series_number, sop_instance_uid=sop_instance_uid, instance_number=instance_number, - sort_key=sort_key + sort_key=sort_key, + **kwargs ) self._add_build_blocks_for_pet() self._convert2multiframe() @@ -2671,7 +2293,8 @@ def __init__( series_number=series_number, sop_instance_uid=sop_instance_uid, instance_number=instance_number, - sort_key=sort_key + sort_key=sort_key, + **kwargs ) self._add_build_blocks_for_mr() self._convert2multiframe() diff --git a/src/highdicom/utils.py b/src/highdicom/utils.py index eea35425..7282f686 100644 --- a/src/highdicom/utils.py +++ b/src/highdicom/utils.py @@ -1,14 +1,23 @@ +import logging import itertools -from typing import Iterator, List, Optional, Sequence, Tuple +from typing import Iterator, List, Optional, Sequence, Tuple, Any import numpy as np +from pydicom.datadict import tag_for_keyword, keyword_for_tag from pydicom.dataset import Dataset +from pydicom.tag import Tag, BaseTag +from pydicom.sequence import Sequence as DataElementSequence +from pydicom.multival import MultiValue +from pydicom.uid import UID from highdicom.content import PlanePositionSequence from highdicom.enum import CoordinateSystemNames from highdicom.spatial import map_pixel_into_coordinate_system +logger = logging.getLogger(__name__) + + def tile_pixel_matrix( total_pixel_matrix_rows: int, total_pixel_matrix_columns: int, @@ -230,3 +239,365 @@ def compute_plane_position_slide_per_frame( range(1, tiles_per_row + 1), # row direction, left to right ) ] + + +class _DicomHelper: + + """A class for checking dicom tags and comparing dicom attributes""" + + @staticmethod + def istag_file_meta_information_group(t: BaseTag) -> bool: + return t.group == 0x0002 + + @staticmethod + def istag_repeating_group(t: BaseTag) -> bool: + g = t.group + return (g >= 0x5000 and g <= 0x501e) or\ + (g >= 0x6000 and g <= 0x601e) + + @staticmethod + def istag_group_length(t: BaseTag) -> bool: + return t.element == 0 + + @staticmethod + def isequal(v1: Any, v2: Any, float_tolerance: float = 1.0e-5) -> bool: + from pydicom.valuerep import DSfloat + + def is_equal_float(x1: float, x2: float) -> bool: + return abs(x1 - x2) < float_tolerance + if type(v1) != type(v2): + return False + if isinstance(v1, DataElementSequence): + for item1, item2 in zip(v1, v2): + if not _DicomHelper.isequal_dicom_dataset(item1, item2): + return False + if not isinstance(v1, MultiValue): + v11 = [v1] + v22 = [v2] + else: + v11 = v1 + v22 = v2 + if len(v11) != len(v22): + return False + for xx, yy in zip(v11, v22): + if isinstance(xx, DSfloat) or isinstance(xx, float): + if not is_equal_float(xx, yy): + return False + else: + if xx != yy: + return False + return True + + @staticmethod + def isequal_dicom_dataset(ds1: Dataset, ds2: Dataset) -> bool: + """Checks if two dicom dataset have the same value in all attributes + + Parameters + ---------- + ds1: pydicom.dataset.Dataset + 1st dicom dataset + ds2: pydicom.dataset.Dataset + 2nd dicom dataset + + Returns + ------- + True if dicom datasets are equal otherwise False + + """ + if type(ds1) != type(ds2): + return False + if not isinstance(ds1, Dataset): + return False + for k1, elem1 in ds1.items(): + if k1 not in ds2: + return False + elem2 = ds2[k1] + if not _DicomHelper.isequal(elem2.value, elem1.value): + return False + return True + + @staticmethod + def tag2kwstr(tg: BaseTag) -> str: + """Converts tag to keyword and (group, element) form""" + return '{}-{:32.32s}'.format( + str(tg), keyword_for_tag(tg)) + + +class FrameSet: + + """ + + A class containing the dicom frames that hold equal distinguishing + attributes to detect all perframe and shared dicom attributes + """ + + def __init__( + self, + single_frame_list: List[Dataset], + distinguishing_tags: List[BaseTag], + ) -> None: + """ + + Parameters + ---------- + single_frame_list: List[pydicom.dataset.Dataset] + list of single frames that have equal distinguising attributes + distinguishing_tags: List[pydicom.tag.BaseTag] + list of distinguishing attributes tags + + """ + self._frames = single_frame_list + self._distinguishing_attributes_tags = distinguishing_tags + tmp = [ + tag_for_keyword('AcquisitionDateTime'), + tag_for_keyword('AcquisitionDate'), + tag_for_keyword('AcquisitionTime'), + tag_for_keyword('SpecificCharacterSet')] + self._excluded_from_perframe_tags =\ + self._distinguishing_attributes_tags + tmp + self._perframe_tags: List[BaseTag] = [] + self._shared_tags: List[BaseTag] = [] + self._find_per_frame_and_shared_tags() + + @property + def frames(self) -> List[Dataset]: + return self._frames[:] + + @property + def distinguishing_attributes_tags(self) -> List[Tag]: + return self._distinguishing_attributes_tags[:] + + @property + def excluded_from_perframe_tags(self) -> List[Tag]: + return self._excluded_from_perframe_tags[:] + + @property + def perframe_tags(self) -> List[Tag]: + return self._perframe_tags[:] + + @property + def shared_tags(self) -> List[Tag]: + return self._shared_tags[:] + + @property + def series_instance_uid(self) -> UID: + """Returns the series instance uid of the FrameSet""" + return self._frames[0].SeriesInstanceUID + + @property + def study_instance_uid(self) -> UID: + """Returns the study instance uid of the FrameSet""" + return self._frames[0].StudyInstanceUID + + def get_sop_instance_uid_list(self) -> list: + """Returns a list containing all SOPInstanceUID of the FrameSet""" + output_list = [f.SOPInstanceUID for f in self._frames] + return output_list + + def get_sop_class_uid(self) -> UID: + """Returns the sop class uid of the FrameSet""" + return self._frames[0].SOPClassUID + + def _find_per_frame_and_shared_tags(self) -> None: + """Detects and collects all shared and perframe attributes""" + rough_shared: dict = {} + sfs = self.frames + for ds in sfs: + for ttag, elem in ds.items(): + if (not ttag.is_private and not + _DicomHelper.istag_file_meta_information_group(ttag) and not + _DicomHelper.istag_repeating_group(ttag) and not + _DicomHelper.istag_group_length(ttag) and not + self._istag_excluded_from_perframe(ttag) and + ttag != tag_for_keyword('PixelData')): + elem = ds[ttag] + if ttag not in self._perframe_tags: + self._perframe_tags.append(ttag) + if ttag in rough_shared: + rough_shared[ttag].append(elem.value) + else: + rough_shared[ttag] = [elem.value] + to_be_removed_from_shared = [] + for ttag, v in rough_shared.items(): + v = rough_shared[ttag] + if len(v) < len(self.frames): + to_be_removed_from_shared.append(ttag) + else: + all_values_are_equal = all( + _DicomHelper.isequal(v_i, v[0]) for v_i in v) + if not all_values_are_equal: + to_be_removed_from_shared.append(ttag) + for t in to_be_removed_from_shared: + del rough_shared[t] + for t, v in rough_shared.items(): + self._shared_tags.append(t) + self._perframe_tags.remove(t) + + def _istag_excluded_from_perframe(self, t: BaseTag) -> bool: + return t in self._excluded_from_perframe_tags + + +class FrameSetCollection: + + """A class to extract framesets based on distinguishing dicom attributes""" + + def __init__(self, single_frame_list: Sequence[Dataset]) -> None: + """Forms framesets based on a list of distinguishing attributes. + The list of "distinguishing" attributes that are used to determine + commonality is currently fixed, and includes the unique identifying + attributes at the Patient, Study, Equipment levels, the Modality and + SOP Class, and ImageType as well as the characteristics of the Pixel + Data, and those attributes that for cross-sectional images imply + consistent sampling, such as ImageOrientationPatient, PixelSpacing and + SliceThickness, and in addition AcquisitionContextSequence and + BurnedInAnnotation. + + Parameters + ---------- + single_frame_list: Sequence[pydicom.dataset.Dataset] + list of mixed or non-mixed single frame dicom images + + Notes + ----- + Note that Series identification, specifically SeriesInstanceUID is NOT + a distinguishing attribute; i.e. FrameSets may span Series + + """ + self.mixed_frames = single_frame_list + self.mixed_frames_copy = self.mixed_frames[:] + self._distinguishing_attribute_keywords = [ + 'PatientID', + 'PatientName', + 'StudyInstanceUID', + 'FrameOfReferenceUID', + 'Manufacturer', + 'InstitutionName', + 'InstitutionAddress', + 'StationName', + 'InstitutionalDepartmentName', + 'ManufacturerModelName', + 'DeviceSerialNumber', + 'SoftwareVersions', + 'GantryID', + 'PixelPaddingValue', + 'Modality', + 'ImageType', + 'BurnedInAnnotation', + 'SOPClassUID', + 'Rows', + 'Columns', + 'BitsStored', + 'BitsAllocated', + 'HighBit', + 'PixelRepresentation', + 'PhotometricInterpretation', + 'PlanarConfiguration', + 'SamplesPerPixel', + 'ProtocolName', + 'ImageOrientationPatient', + 'PixelSpacing', + 'SliceThickness', + 'AcquisitionContextSequence'] + self._frame_sets: List[FrameSet] = [] + frame_counts = [] + frameset_counter = 0 + while len(self.mixed_frames_copy) != 0: + frameset_counter += 1 + x = self._find_all_similar_to_first_datasets() + self._frame_sets.append(FrameSet(x[0], x[1])) + frame_counts.append(len(x[0])) + # log information + logger.debug( + f"Frameset({frameset_counter:02d}) " + "including {len(x[0]):03d} frames") + logger.debug('\t Distinguishing tags:') + for dg_i, dg_tg in enumerate(x[1], 1): + logger.debug( + f'\t\t{dg_i:02d}/{len(x[1])})\t{str(dg_tg)}-' + '{keyword_for_tag(dg_tg):32.32s} = ' + '{str(x[0][0][dg_tg].value):32.32s}') + logger.debug('\t dicom datasets in this frame set:') + for dicom_i, dicom_ds in enumerate(x[0], 1): + logger.debug( + f'\t\t{dicom_i}/{len(x[0])})\t ' + '{dicom_ds["SOPInstanceUID"]}') + frames = '' + for i, f_count in enumerate(frame_counts, 1): + frames += '{: 2d}){:03d}\t'.format(i, f_count) + frames = '{: 2d} frameset(s) out of all {: 3d} instances:'.format( + len(frame_counts), len(self.mixed_frames)) + frames + logger.info(frames) + self._excluded_from_perframe_tags = {} + for kwkw in self._distinguishing_attribute_keywords: + self._excluded_from_perframe_tags[tag_for_keyword(kwkw)] = False + excluded_kws = [ + 'AcquisitionDateTime' + 'AcquisitionDate' + 'AcquisitionTime' + 'SpecificCharacterSet' + ] + for kwkw in excluded_kws: + self._excluded_from_perframe_tags[tag_for_keyword(kwkw)] = False + + def _find_all_similar_to_first_datasets(self) -> tuple: + """Takes the fist instance from mixed-frames and finds all dicom images + that have the same distinguishing attributes. + + """ + similar_ds: List[Dataset] = [self.mixed_frames_copy[0]] + distinguishing_tags_existing = [] + distinguishing_tags_missing = [] + self.mixed_frames_copy = self.mixed_frames_copy[1:] + for kw in self._distinguishing_attribute_keywords: + tg = tag_for_keyword(kw) + if tg in similar_ds[0]: + distinguishing_tags_existing.append(tg) + else: + distinguishing_tags_missing.append(tg) + logger_msg = set() + for ds in self.mixed_frames_copy: + all_equal = True + for tg in distinguishing_tags_missing: + if tg in ds: + logger_msg.add( + '{} is missing in all but {}'.format( + _DicomHelper.tag2kwstr(tg), ds['SOPInstanceUID'])) + all_equal = False + break + if not all_equal: + continue + for tg in distinguishing_tags_existing: + ref_val = similar_ds[0][tg].value + if tg not in ds: + all_equal = False + break + new_val = ds[tg].value + if not _DicomHelper.isequal(ref_val, new_val): + logger_msg.add( + 'Inequality on distinguishing ' + 'attribute{} -> {} != {} \n series uid = {}'.format( + _DicomHelper.tag2kwstr(tg), ref_val, new_val, + ds.SeriesInstanceUID)) + all_equal = False + break + if all_equal: + similar_ds.append(ds) + for msg_ in logger_msg: + logger.info(msg_) + for ds in similar_ds: + if ds in self.mixed_frames_copy: + self.mixed_frames_copy = [ + nds for nds in self.mixed_frames_copy if nds != ds] + return (similar_ds, distinguishing_tags_existing) + + @property + def distinguishing_attribute_keywords(self) -> List[str]: + """Returns the list of all distinguising attributes found.""" + + return self._distinguishing_attribute_keywords[:] + + @property + def frame_sets(self) -> List[FrameSet]: + """Returns the list of all FrameSets found.""" + + return self._frame_sets diff --git a/tests/test_legacy.py b/tests/test_legacy.py index 0244f561..07959f5b 100644 --- a/tests/test_legacy.py +++ b/tests/test_legacy.py @@ -1,6 +1,5 @@ import unittest from pydicom import FileDataset, Dataset -from pydicom.dataelem import DataElement from pydicom.uid import generate_uid from highdicom.legacy import sop from datetime import datetime, timedelta @@ -177,145 +176,6 @@ def generate_mixed_framesets( return out -class TestDicomHelper(unittest.TestCase): - - def setUp(self) -> None: - super().setUp() - # Build data element for all value representations: - # vrs = [ - # 'AE', 'AS', 'AT', 'CS', 'DA', 'DS', 'DT', 'FL', 'FD', 'IS', 'LO', - # 'LT', 'OB', 'OD', 'OF', 'OL', 'OV', 'OW', 'PN', 'SH', 'SL', 'SQ', - # 'SS', 'ST', 'SV', 'TM', 'UC', 'UI', 'UL', 'UN', 'UR', - # 'US', 'UT', 'UV'] - self.data = { - "UL": [ - # Keyword: (0008, 0000) - DataElement(524288, "UL", 506), - DataElement(524288, "UL", 506), - DataElement(524288, "UL", 6), - ], - "CS": [ - # Keyword: (0008, 0005) SpecificCharacterSet - DataElement(524293, "CS", "ISO_IR 100"), - DataElement(524293, "CS", "ISO_IR 100"), - DataElement(524293, "CS", "ISO_IR 00"), - ], - "UI": [ - # Keyword: (0008, 0016) SOPClassUID - DataElement(524310, "UI", "1.2.840.10008.5.1.4.1.1.1"), - DataElement(524310, "UI", "1.2.840.10008.5.1.4.1.1.1"), - DataElement(524310, "UI", "1.2.840.10008.5.1.4.1.1."), - ], - "DA": [ - # Keyword: (0008, 0020) StudyDate - DataElement(524320, "DA", "19950809"), - DataElement(524320, "DA", "19950809"), - DataElement(524320, "DA", "9950809"), - ], - "TM": [ - # Keyword: (0008, 0030) StudyTime - DataElement(524336, "TM", "100044"), - DataElement(524336, "TM", "100044"), - DataElement(524336, "TM", "00044"), - ], - "US": [ - # Keyword: (0008, 0040) DataSetType - DataElement(524352, "US", 0), - DataElement(524352, "US", 0), - DataElement(524352, "US", 1), - ], - "LO": [ - # Keyword: (0008, 0041) DataSetSubtype - DataElement(524353, "LO", "IMA NONE"), - DataElement(524353, "LO", "IMA NONE"), - DataElement(524353, "LO", "IMA ONE"), - ], - "SH": [ - # Keyword: (0008, 0050) AccessionNumber - DataElement(524368, "SH", "1157687691469610"), - DataElement(524368, "SH", "1157687691469610"), - DataElement(524368, "SH", "157687691469610"), - ], - "PN": [ - # Keyword: (0008, 0090) ReferringPhysicianName - DataElement(524432, "PN", "Dr Alpha"), - DataElement(524432, "PN", "Dr Alpha"), - DataElement(524432, "PN", "Dr Beta"), - ], - "ST": [ - # Keyword: (0008, 2111) DerivationDescription - DataElement(532753, "ST", "G0.9D#1.60+0.00,R4R0.5,,D2B0.6,,,"), - DataElement(532753, "ST", "G0.9D#1.60+0.00,R4R0.5,,D2B0.6,,,"), - DataElement(532753, "ST", "G0.9D#1.60+0.00,R4R0.5,,D2B0.,,,"), - ], - "UN": [ - # Keyword: (0013, 0000) - DataElement(1245184, "UN", b'\x00\x00\x00'), - DataElement(1245184, "UN", b'\x00\x00\x00'), - DataElement(1245184, "UN", b'\x00\x00\x01'), - ], - "DS": [ - # Keyword: (0018, 0060) KVP - DataElement(1572960, "DS", 110), - DataElement(1572960, "DS", 110), - DataElement(1572960, "DS", 10), - ], - "IS": [ - # Keyword: (0018, 1150) ExposureTime - DataElement(1577296, "IS", 32), - DataElement(1577296, "IS", 32), - DataElement(1577296, "IS", 2), - ], - "AS": [ - # Keyword: (0010, 1010) PatientAge - DataElement(1052688, "AS", "075Y"), - DataElement(1052688, "AS", "075Y"), - DataElement(1052688, "AS", "75Y"), - ], - "OW": [ - # Keyword: (7fe0, 0010) PixelData - DataElement(2145386512, "OW", b'\x00\x00\x00\x00\x00\x00'), - DataElement(2145386512, "OW", b'\x00\x00\x00\x00\x00\x00'), - DataElement(2145386512, "OW", b'\x00\x00\x00\x00\x00\x01'), - ], - "SS": [ - # Keyword: (0028, 0106) SmallestImagePixelValue - DataElement(2621702, "SS", 0), - DataElement(2621702, "SS", 0), - DataElement(2621702, "SS", 1), - ], - "DT": [ - # Keyword: (0008, 002a) AcquisitionDateTime - DataElement(524330, "DT", "20030922101033.000000"), - DataElement(524330, "DT", "20030922101033.000000"), - DataElement(524330, "DT", "20030922101033.00000"), - ], - "LT": [ - # Keyword: (0018, 7006) DetectorDescription - DataElement(1601542, "LT", "DETECTOR VERSION 1.0 MTFCOMP 1.0"), - DataElement(1601542, "LT", "DETECTOR VERSION 1.0 MTFCOMP 1.0"), - DataElement(1601542, "LT", "DETECTOR VERSION 1.0 MTFCOMP 1."), - ], - "OB": [ - # Keyword: (0029, 1131) - DataElement(2691377, "OB", b'4.0.701169981 '), - DataElement(2691377, "OB", b'4.0.701169981 '), - DataElement(2691377, "OB", b'4.0.01169981 '), - ], - "AT": [ - # Keyword: (0028, 0009) FrameIncrementPointer - DataElement(2621449, "AT", 5505152), - DataElement(2621449, "AT", 5505152), - DataElement(2621449, "AT", 505152), - ], - } - - def test_attribute_equality(self) -> None: - for vr, [v1, v2, v3] in self.data.items(): - assert sop._DicomHelper.isequal(v1.value, v2.value) is True - assert sop._DicomHelper.isequal(v1.value, v3.value) is False - - class TestFrameSetCollection(unittest.TestCase): def setUp(self) -> None: diff --git a/tests/test_utils.py b/tests/test_utils.py index 01921c47..fa7d3fca 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -1,8 +1,10 @@ import pytest +import unittest +from pydicom.dataelem import DataElement from highdicom.content import PlanePositionSequence from highdicom.enum import CoordinateSystemNames -from highdicom.utils import compute_plane_position_tiled_full +from highdicom.utils import compute_plane_position_tiled_full, _DicomHelper params_plane_positions = [ @@ -127,3 +129,143 @@ def test_should_raise_error_when_3d_param_is_missing(): pixel_spacing=(1.0, 1.0), spacing_between_slices=1.0 ) + + + +class TestDicomHelper(unittest.TestCase): + + def setUp(self) -> None: + super().setUp() + # Build data element for all value representations: + # vrs = [ + # 'AE', 'AS', 'AT', 'CS', 'DA', 'DS', 'DT', 'FL', 'FD', 'IS', 'LO', + # 'LT', 'OB', 'OD', 'OF', 'OL', 'OV', 'OW', 'PN', 'SH', 'SL', 'SQ', + # 'SS', 'ST', 'SV', 'TM', 'UC', 'UI', 'UL', 'UN', 'UR', + # 'US', 'UT', 'UV'] + self.data = { + "UL": [ + # Keyword: (0008, 0000) + DataElement(524288, "UL", 506), + DataElement(524288, "UL", 506), + DataElement(524288, "UL", 6), + ], + "CS": [ + # Keyword: (0008, 0005) SpecificCharacterSet + DataElement(524293, "CS", "ISO_IR 100"), + DataElement(524293, "CS", "ISO_IR 100"), + DataElement(524293, "CS", "ISO_IR 00"), + ], + "UI": [ + # Keyword: (0008, 0016) SOPClassUID + DataElement(524310, "UI", "1.2.840.10008.5.1.4.1.1.1"), + DataElement(524310, "UI", "1.2.840.10008.5.1.4.1.1.1"), + DataElement(524310, "UI", "1.2.840.10008.5.1.4.1.1."), + ], + "DA": [ + # Keyword: (0008, 0020) StudyDate + DataElement(524320, "DA", "19950809"), + DataElement(524320, "DA", "19950809"), + DataElement(524320, "DA", "9950809"), + ], + "TM": [ + # Keyword: (0008, 0030) StudyTime + DataElement(524336, "TM", "100044"), + DataElement(524336, "TM", "100044"), + DataElement(524336, "TM", "00044"), + ], + "US": [ + # Keyword: (0008, 0040) DataSetType + DataElement(524352, "US", 0), + DataElement(524352, "US", 0), + DataElement(524352, "US", 1), + ], + "LO": [ + # Keyword: (0008, 0041) DataSetSubtype + DataElement(524353, "LO", "IMA NONE"), + DataElement(524353, "LO", "IMA NONE"), + DataElement(524353, "LO", "IMA ONE"), + ], + "SH": [ + # Keyword: (0008, 0050) AccessionNumber + DataElement(524368, "SH", "1157687691469610"), + DataElement(524368, "SH", "1157687691469610"), + DataElement(524368, "SH", "157687691469610"), + ], + "PN": [ + # Keyword: (0008, 0090) ReferringPhysicianName + DataElement(524432, "PN", "Dr Alpha"), + DataElement(524432, "PN", "Dr Alpha"), + DataElement(524432, "PN", "Dr Beta"), + ], + "ST": [ + # Keyword: (0008, 2111) DerivationDescription + DataElement(532753, "ST", "G0.9D#1.60+0.00,R4R0.5,,D2B0.6,,,"), + DataElement(532753, "ST", "G0.9D#1.60+0.00,R4R0.5,,D2B0.6,,,"), + DataElement(532753, "ST", "G0.9D#1.60+0.00,R4R0.5,,D2B0.,,,"), + ], + "UN": [ + # Keyword: (0013, 0000) + DataElement(1245184, "UN", b'\x00\x00\x00'), + DataElement(1245184, "UN", b'\x00\x00\x00'), + DataElement(1245184, "UN", b'\x00\x00\x01'), + ], + "DS": [ + # Keyword: (0018, 0060) KVP + DataElement(1572960, "DS", 110), + DataElement(1572960, "DS", 110), + DataElement(1572960, "DS", 10), + ], + "IS": [ + # Keyword: (0018, 1150) ExposureTime + DataElement(1577296, "IS", 32), + DataElement(1577296, "IS", 32), + DataElement(1577296, "IS", 2), + ], + "AS": [ + # Keyword: (0010, 1010) PatientAge + DataElement(1052688, "AS", "075Y"), + DataElement(1052688, "AS", "075Y"), + DataElement(1052688, "AS", "75Y"), + ], + "OW": [ + # Keyword: (7fe0, 0010) PixelData + DataElement(2145386512, "OW", b'\x00\x00\x00\x00\x00\x00'), + DataElement(2145386512, "OW", b'\x00\x00\x00\x00\x00\x00'), + DataElement(2145386512, "OW", b'\x00\x00\x00\x00\x00\x01'), + ], + "SS": [ + # Keyword: (0028, 0106) SmallestImagePixelValue + DataElement(2621702, "SS", 0), + DataElement(2621702, "SS", 0), + DataElement(2621702, "SS", 1), + ], + "DT": [ + # Keyword: (0008, 002a) AcquisitionDateTime + DataElement(524330, "DT", "20030922101033.000000"), + DataElement(524330, "DT", "20030922101033.000000"), + DataElement(524330, "DT", "20030922101033.00000"), + ], + "LT": [ + # Keyword: (0018, 7006) DetectorDescription + DataElement(1601542, "LT", "DETECTOR VERSION 1.0 MTFCOMP 1.0"), + DataElement(1601542, "LT", "DETECTOR VERSION 1.0 MTFCOMP 1.0"), + DataElement(1601542, "LT", "DETECTOR VERSION 1.0 MTFCOMP 1."), + ], + "OB": [ + # Keyword: (0029, 1131) + DataElement(2691377, "OB", b'4.0.701169981 '), + DataElement(2691377, "OB", b'4.0.701169981 '), + DataElement(2691377, "OB", b'4.0.01169981 '), + ], + "AT": [ + # Keyword: (0028, 0009) FrameIncrementPointer + DataElement(2621449, "AT", 5505152), + DataElement(2621449, "AT", 5505152), + DataElement(2621449, "AT", 505152), + ], + } + + def test_attribute_equality(self) -> None: + for vr, [v1, v2, v3] in self.data.items(): + assert _DicomHelper.isequal(v1.value, v2.value) is True + assert _DicomHelper.isequal(v1.value, v3.value) is False \ No newline at end of file From 6382dec762417d6e1954c6f6aad63c3afe924515 Mon Sep 17 00:00:00 2001 From: afshin Date: Wed, 12 May 2021 11:23:06 -0400 Subject: [PATCH 42/44] mend --- tests/test_utils.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/test_utils.py b/tests/test_utils.py index fa7d3fca..6a256020 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -131,7 +131,6 @@ def test_should_raise_error_when_3d_param_is_missing(): ) - class TestDicomHelper(unittest.TestCase): def setUp(self) -> None: @@ -268,4 +267,4 @@ def setUp(self) -> None: def test_attribute_equality(self) -> None: for vr, [v1, v2, v3] in self.data.items(): assert _DicomHelper.isequal(v1.value, v2.value) is True - assert _DicomHelper.isequal(v1.value, v3.value) is False \ No newline at end of file + assert _DicomHelper.isequal(v1.value, v3.value) is False From 796dc5d2e0b949de31110f5d5f265396977715e7 Mon Sep 17 00:00:00 2001 From: afshin Date: Wed, 12 May 2021 18:21:26 -0400 Subject: [PATCH 43/44] made both FrameSet and FrameSetCollection classes private --- src/highdicom/legacy/sop.py | 368 ++++++++++++++++++++++++++++++++++- src/highdicom/utils.py | 373 +----------------------------------- tests/test_legacy.py | 146 +++++++++++++- tests/test_utils.py | 143 +------------- 4 files changed, 510 insertions(+), 520 deletions(-) diff --git a/src/highdicom/legacy/sop.py b/src/highdicom/legacy/sop.py index d9c1ea04..d76754ef 100644 --- a/src/highdicom/legacy/sop.py +++ b/src/highdicom/legacy/sop.py @@ -9,11 +9,12 @@ from copy import deepcopy from numpy import log10, array, ceil -from pydicom.datadict import tag_for_keyword, dictionary_VR +from pydicom.datadict import tag_for_keyword, dictionary_VR, keyword_for_tag from pydicom.dataset import Dataset from pydicom.tag import Tag, BaseTag from pydicom.dataelem import DataElement from pydicom.sequence import Sequence as DataElementSequence +from pydicom.multival import MultiValue from pydicom.valuerep import DT, DA, TM from pydicom.uid import UID @@ -21,7 +22,6 @@ from highdicom._iods import IOD_MODULE_MAP from highdicom._modules import MODULE_ATTRIBUTE_MAP from highdicom.spatial import _GeometryOfSlice -from highdicom.utils import FrameSetCollection logger = logging.getLogger(__name__) @@ -44,6 +44,368 @@ } +class _DicomHelper: + + """A class for checking dicom tags and comparing dicom attributes""" + + @staticmethod + def istag_file_meta_information_group(t: BaseTag) -> bool: + return t.group == 0x0002 + + @staticmethod + def istag_repeating_group(t: BaseTag) -> bool: + g = t.group + return (g >= 0x5000 and g <= 0x501e) or\ + (g >= 0x6000 and g <= 0x601e) + + @staticmethod + def istag_group_length(t: BaseTag) -> bool: + return t.element == 0 + + @staticmethod + def isequal(v1: Any, v2: Any, float_tolerance: float = 1.0e-5) -> bool: + from pydicom.valuerep import DSfloat + + def is_equal_float(x1: float, x2: float) -> bool: + return abs(x1 - x2) < float_tolerance + if type(v1) != type(v2): + return False + if isinstance(v1, DataElementSequence): + for item1, item2 in zip(v1, v2): + if not _DicomHelper.isequal_dicom_dataset(item1, item2): + return False + if not isinstance(v1, MultiValue): + v11 = [v1] + v22 = [v2] + else: + v11 = v1 + v22 = v2 + if len(v11) != len(v22): + return False + for xx, yy in zip(v11, v22): + if isinstance(xx, DSfloat) or isinstance(xx, float): + if not is_equal_float(xx, yy): + return False + else: + if xx != yy: + return False + return True + + @staticmethod + def isequal_dicom_dataset(ds1: Dataset, ds2: Dataset) -> bool: + """Checks if two dicom dataset have the same value in all attributes + + Parameters + ---------- + ds1: pydicom.dataset.Dataset + 1st dicom dataset + ds2: pydicom.dataset.Dataset + 2nd dicom dataset + + Returns + ------- + True if dicom datasets are equal otherwise False + + """ + if type(ds1) != type(ds2): + return False + if not isinstance(ds1, Dataset): + return False + for k1, elem1 in ds1.items(): + if k1 not in ds2: + return False + elem2 = ds2[k1] + if not _DicomHelper.isequal(elem2.value, elem1.value): + return False + return True + + @staticmethod + def tag2kwstr(tg: BaseTag) -> str: + """Converts tag to keyword and (group, element) form""" + return '{}-{:32.32s}'.format( + str(tg), keyword_for_tag(tg)) + + +class _FrameSet: + + """ + + A class containing the dicom frames that hold equal distinguishing + attributes to detect all perframe and shared dicom attributes + """ + + def __init__( + self, + single_frame_list: List[Dataset], + distinguishing_tags: List[BaseTag], + ) -> None: + """ + + Parameters + ---------- + single_frame_list: List[pydicom.dataset.Dataset] + list of single frames that have equal distinguising attributes + distinguishing_tags: List[pydicom.tag.BaseTag] + list of distinguishing attributes tags + + """ + self._frames = single_frame_list + self._distinguishing_attributes_tags = distinguishing_tags + tmp = [ + tag_for_keyword('AcquisitionDateTime'), + tag_for_keyword('AcquisitionDate'), + tag_for_keyword('AcquisitionTime'), + tag_for_keyword('SpecificCharacterSet')] + self._excluded_from_perframe_tags =\ + self._distinguishing_attributes_tags + tmp + self._perframe_tags: List[BaseTag] = [] + self._shared_tags: List[BaseTag] = [] + self._find_per_frame_and_shared_tags() + + @property + def frames(self) -> List[Dataset]: + return self._frames[:] + + @property + def distinguishing_attributes_tags(self) -> List[Tag]: + return self._distinguishing_attributes_tags[:] + + @property + def excluded_from_perframe_tags(self) -> List[Tag]: + return self._excluded_from_perframe_tags[:] + + @property + def perframe_tags(self) -> List[Tag]: + return self._perframe_tags[:] + + @property + def shared_tags(self) -> List[Tag]: + return self._shared_tags[:] + + @property + def series_instance_uid(self) -> UID: + """Returns the series instance uid of the _FrameSet""" + return self._frames[0].SeriesInstanceUID + + @property + def study_instance_uid(self) -> UID: + """Returns the study instance uid of the _FrameSet""" + return self._frames[0].StudyInstanceUID + + def get_sop_instance_uid_list(self) -> list: + """Returns a list containing all SOPInstanceUID of the _FrameSet""" + output_list = [f.SOPInstanceUID for f in self._frames] + return output_list + + def get_sop_class_uid(self) -> UID: + """Returns the sop class uid of the _FrameSet""" + return self._frames[0].SOPClassUID + + def _find_per_frame_and_shared_tags(self) -> None: + """Detects and collects all shared and perframe attributes""" + rough_shared: dict = {} + sfs = self.frames + for ds in sfs: + for ttag, elem in ds.items(): + if (not ttag.is_private and not + _DicomHelper.istag_file_meta_information_group(ttag) and not + _DicomHelper.istag_repeating_group(ttag) and not + _DicomHelper.istag_group_length(ttag) and not + self._istag_excluded_from_perframe(ttag) and + ttag != tag_for_keyword('PixelData')): + elem = ds[ttag] + if ttag not in self._perframe_tags: + self._perframe_tags.append(ttag) + if ttag in rough_shared: + rough_shared[ttag].append(elem.value) + else: + rough_shared[ttag] = [elem.value] + to_be_removed_from_shared = [] + for ttag, v in rough_shared.items(): + v = rough_shared[ttag] + if len(v) < len(self.frames): + to_be_removed_from_shared.append(ttag) + else: + all_values_are_equal = all( + _DicomHelper.isequal(v_i, v[0]) for v_i in v) + if not all_values_are_equal: + to_be_removed_from_shared.append(ttag) + for t in to_be_removed_from_shared: + del rough_shared[t] + for t, v in rough_shared.items(): + self._shared_tags.append(t) + self._perframe_tags.remove(t) + + def _istag_excluded_from_perframe(self, t: BaseTag) -> bool: + return t in self._excluded_from_perframe_tags + + +class _FrameSetCollection: + + """A class to extract framesets based on distinguishing dicom attributes""" + + def __init__(self, single_frame_list: Sequence[Dataset]) -> None: + """Forms framesets based on a list of distinguishing attributes. + The list of "distinguishing" attributes that are used to determine + commonality is currently fixed, and includes the unique identifying + attributes at the Patient, Study, Equipment levels, the Modality and + SOP Class, and ImageType as well as the characteristics of the Pixel + Data, and those attributes that for cross-sectional images imply + consistent sampling, such as ImageOrientationPatient, PixelSpacing and + SliceThickness, and in addition AcquisitionContextSequence and + BurnedInAnnotation. + + Parameters + ---------- + single_frame_list: Sequence[pydicom.dataset.Dataset] + list of mixed or non-mixed single frame dicom images + + Notes + ----- + Note that Series identification, specifically SeriesInstanceUID is NOT + a distinguishing attribute; i.e. FrameSets may span Series + + """ + self.mixed_frames = single_frame_list + self.mixed_frames_copy = self.mixed_frames[:] + self._distinguishing_attribute_keywords = [ + 'PatientID', + 'PatientName', + 'StudyInstanceUID', + 'FrameOfReferenceUID', + 'Manufacturer', + 'InstitutionName', + 'InstitutionAddress', + 'StationName', + 'InstitutionalDepartmentName', + 'ManufacturerModelName', + 'DeviceSerialNumber', + 'SoftwareVersions', + 'GantryID', + 'PixelPaddingValue', + 'Modality', + 'ImageType', + 'BurnedInAnnotation', + 'SOPClassUID', + 'Rows', + 'Columns', + 'BitsStored', + 'BitsAllocated', + 'HighBit', + 'PixelRepresentation', + 'PhotometricInterpretation', + 'PlanarConfiguration', + 'SamplesPerPixel', + 'ProtocolName', + 'ImageOrientationPatient', + 'PixelSpacing', + 'SliceThickness', + 'AcquisitionContextSequence'] + self._frame_sets: List[_FrameSet] = [] + frame_counts = [] + frameset_counter = 0 + while len(self.mixed_frames_copy) != 0: + frameset_counter += 1 + x = self._find_all_similar_to_first_datasets() + self._frame_sets.append(_FrameSet(x[0], x[1])) + frame_counts.append(len(x[0])) + # log information + logger.debug( + f"Frameset({frameset_counter:02d}) " + "including {len(x[0]):03d} frames") + logger.debug('\t Distinguishing tags:') + for dg_i, dg_tg in enumerate(x[1], 1): + logger.debug( + f'\t\t{dg_i:02d}/{len(x[1])})\t{str(dg_tg)}-' + '{keyword_for_tag(dg_tg):32.32s} = ' + '{str(x[0][0][dg_tg].value):32.32s}') + logger.debug('\t dicom datasets in this frame set:') + for dicom_i, dicom_ds in enumerate(x[0], 1): + logger.debug( + f'\t\t{dicom_i}/{len(x[0])})\t ' + '{dicom_ds["SOPInstanceUID"]}') + frames = '' + for i, f_count in enumerate(frame_counts, 1): + frames += '{: 2d}){:03d}\t'.format(i, f_count) + frames = '{: 2d} frameset(s) out of all {: 3d} instances:'.format( + len(frame_counts), len(self.mixed_frames)) + frames + logger.info(frames) + self._excluded_from_perframe_tags = {} + for kwkw in self._distinguishing_attribute_keywords: + self._excluded_from_perframe_tags[tag_for_keyword(kwkw)] = False + excluded_kws = [ + 'AcquisitionDateTime' + 'AcquisitionDate' + 'AcquisitionTime' + 'SpecificCharacterSet' + ] + for kwkw in excluded_kws: + self._excluded_from_perframe_tags[tag_for_keyword(kwkw)] = False + + def _find_all_similar_to_first_datasets(self) -> tuple: + """Takes the fist instance from mixed-frames and finds all dicom images + that have the same distinguishing attributes. + + """ + similar_ds: List[Dataset] = [self.mixed_frames_copy[0]] + distinguishing_tags_existing = [] + distinguishing_tags_missing = [] + self.mixed_frames_copy = self.mixed_frames_copy[1:] + for kw in self._distinguishing_attribute_keywords: + tg = tag_for_keyword(kw) + if tg in similar_ds[0]: + distinguishing_tags_existing.append(tg) + else: + distinguishing_tags_missing.append(tg) + logger_msg = set() + for ds in self.mixed_frames_copy: + all_equal = True + for tg in distinguishing_tags_missing: + if tg in ds: + logger_msg.add( + '{} is missing in all but {}'.format( + _DicomHelper.tag2kwstr(tg), ds['SOPInstanceUID'])) + all_equal = False + break + if not all_equal: + continue + for tg in distinguishing_tags_existing: + ref_val = similar_ds[0][tg].value + if tg not in ds: + all_equal = False + break + new_val = ds[tg].value + if not _DicomHelper.isequal(ref_val, new_val): + logger_msg.add( + 'Inequality on distinguishing ' + 'attribute{} -> {} != {} \n series uid = {}'.format( + _DicomHelper.tag2kwstr(tg), ref_val, new_val, + ds.SeriesInstanceUID)) + all_equal = False + break + if all_equal: + similar_ds.append(ds) + for msg_ in logger_msg: + logger.info(msg_) + for ds in similar_ds: + if ds in self.mixed_frames_copy: + self.mixed_frames_copy = [ + nds for nds in self.mixed_frames_copy if nds != ds] + return (similar_ds, distinguishing_tags_existing) + + @property + def distinguishing_attribute_keywords(self) -> List[str]: + """Returns the list of all distinguising attributes found.""" + + return self._distinguishing_attribute_keywords[:] + + @property + def frame_sets(self) -> List[_FrameSet]: + """Returns the list of all FrameSets found.""" + + return self._frame_sets + + class _CommonLegacyConvertedEnhanceImage(SOPClass): """SOP class for common Legacy Converted Enhanced instances.""" @@ -82,7 +444,7 @@ def __init__( except IndexError: raise ValueError('No DICOM data sets of provided.') sop_class_uid = LEGACY_ENHANCED_SOP_CLASS_UID_MAP[ref_ds.SOPClassUID] - all_framesets = FrameSetCollection(legacy_datasets) + all_framesets = _FrameSetCollection(legacy_datasets) if len(all_framesets.frame_sets) > 1: raise ValueError( 'Mixed frames sets: the input single frame list contain more ' diff --git a/src/highdicom/utils.py b/src/highdicom/utils.py index 7282f686..eea35425 100644 --- a/src/highdicom/utils.py +++ b/src/highdicom/utils.py @@ -1,23 +1,14 @@ -import logging import itertools -from typing import Iterator, List, Optional, Sequence, Tuple, Any +from typing import Iterator, List, Optional, Sequence, Tuple import numpy as np -from pydicom.datadict import tag_for_keyword, keyword_for_tag from pydicom.dataset import Dataset -from pydicom.tag import Tag, BaseTag -from pydicom.sequence import Sequence as DataElementSequence -from pydicom.multival import MultiValue -from pydicom.uid import UID from highdicom.content import PlanePositionSequence from highdicom.enum import CoordinateSystemNames from highdicom.spatial import map_pixel_into_coordinate_system -logger = logging.getLogger(__name__) - - def tile_pixel_matrix( total_pixel_matrix_rows: int, total_pixel_matrix_columns: int, @@ -239,365 +230,3 @@ def compute_plane_position_slide_per_frame( range(1, tiles_per_row + 1), # row direction, left to right ) ] - - -class _DicomHelper: - - """A class for checking dicom tags and comparing dicom attributes""" - - @staticmethod - def istag_file_meta_information_group(t: BaseTag) -> bool: - return t.group == 0x0002 - - @staticmethod - def istag_repeating_group(t: BaseTag) -> bool: - g = t.group - return (g >= 0x5000 and g <= 0x501e) or\ - (g >= 0x6000 and g <= 0x601e) - - @staticmethod - def istag_group_length(t: BaseTag) -> bool: - return t.element == 0 - - @staticmethod - def isequal(v1: Any, v2: Any, float_tolerance: float = 1.0e-5) -> bool: - from pydicom.valuerep import DSfloat - - def is_equal_float(x1: float, x2: float) -> bool: - return abs(x1 - x2) < float_tolerance - if type(v1) != type(v2): - return False - if isinstance(v1, DataElementSequence): - for item1, item2 in zip(v1, v2): - if not _DicomHelper.isequal_dicom_dataset(item1, item2): - return False - if not isinstance(v1, MultiValue): - v11 = [v1] - v22 = [v2] - else: - v11 = v1 - v22 = v2 - if len(v11) != len(v22): - return False - for xx, yy in zip(v11, v22): - if isinstance(xx, DSfloat) or isinstance(xx, float): - if not is_equal_float(xx, yy): - return False - else: - if xx != yy: - return False - return True - - @staticmethod - def isequal_dicom_dataset(ds1: Dataset, ds2: Dataset) -> bool: - """Checks if two dicom dataset have the same value in all attributes - - Parameters - ---------- - ds1: pydicom.dataset.Dataset - 1st dicom dataset - ds2: pydicom.dataset.Dataset - 2nd dicom dataset - - Returns - ------- - True if dicom datasets are equal otherwise False - - """ - if type(ds1) != type(ds2): - return False - if not isinstance(ds1, Dataset): - return False - for k1, elem1 in ds1.items(): - if k1 not in ds2: - return False - elem2 = ds2[k1] - if not _DicomHelper.isequal(elem2.value, elem1.value): - return False - return True - - @staticmethod - def tag2kwstr(tg: BaseTag) -> str: - """Converts tag to keyword and (group, element) form""" - return '{}-{:32.32s}'.format( - str(tg), keyword_for_tag(tg)) - - -class FrameSet: - - """ - - A class containing the dicom frames that hold equal distinguishing - attributes to detect all perframe and shared dicom attributes - """ - - def __init__( - self, - single_frame_list: List[Dataset], - distinguishing_tags: List[BaseTag], - ) -> None: - """ - - Parameters - ---------- - single_frame_list: List[pydicom.dataset.Dataset] - list of single frames that have equal distinguising attributes - distinguishing_tags: List[pydicom.tag.BaseTag] - list of distinguishing attributes tags - - """ - self._frames = single_frame_list - self._distinguishing_attributes_tags = distinguishing_tags - tmp = [ - tag_for_keyword('AcquisitionDateTime'), - tag_for_keyword('AcquisitionDate'), - tag_for_keyword('AcquisitionTime'), - tag_for_keyword('SpecificCharacterSet')] - self._excluded_from_perframe_tags =\ - self._distinguishing_attributes_tags + tmp - self._perframe_tags: List[BaseTag] = [] - self._shared_tags: List[BaseTag] = [] - self._find_per_frame_and_shared_tags() - - @property - def frames(self) -> List[Dataset]: - return self._frames[:] - - @property - def distinguishing_attributes_tags(self) -> List[Tag]: - return self._distinguishing_attributes_tags[:] - - @property - def excluded_from_perframe_tags(self) -> List[Tag]: - return self._excluded_from_perframe_tags[:] - - @property - def perframe_tags(self) -> List[Tag]: - return self._perframe_tags[:] - - @property - def shared_tags(self) -> List[Tag]: - return self._shared_tags[:] - - @property - def series_instance_uid(self) -> UID: - """Returns the series instance uid of the FrameSet""" - return self._frames[0].SeriesInstanceUID - - @property - def study_instance_uid(self) -> UID: - """Returns the study instance uid of the FrameSet""" - return self._frames[0].StudyInstanceUID - - def get_sop_instance_uid_list(self) -> list: - """Returns a list containing all SOPInstanceUID of the FrameSet""" - output_list = [f.SOPInstanceUID for f in self._frames] - return output_list - - def get_sop_class_uid(self) -> UID: - """Returns the sop class uid of the FrameSet""" - return self._frames[0].SOPClassUID - - def _find_per_frame_and_shared_tags(self) -> None: - """Detects and collects all shared and perframe attributes""" - rough_shared: dict = {} - sfs = self.frames - for ds in sfs: - for ttag, elem in ds.items(): - if (not ttag.is_private and not - _DicomHelper.istag_file_meta_information_group(ttag) and not - _DicomHelper.istag_repeating_group(ttag) and not - _DicomHelper.istag_group_length(ttag) and not - self._istag_excluded_from_perframe(ttag) and - ttag != tag_for_keyword('PixelData')): - elem = ds[ttag] - if ttag not in self._perframe_tags: - self._perframe_tags.append(ttag) - if ttag in rough_shared: - rough_shared[ttag].append(elem.value) - else: - rough_shared[ttag] = [elem.value] - to_be_removed_from_shared = [] - for ttag, v in rough_shared.items(): - v = rough_shared[ttag] - if len(v) < len(self.frames): - to_be_removed_from_shared.append(ttag) - else: - all_values_are_equal = all( - _DicomHelper.isequal(v_i, v[0]) for v_i in v) - if not all_values_are_equal: - to_be_removed_from_shared.append(ttag) - for t in to_be_removed_from_shared: - del rough_shared[t] - for t, v in rough_shared.items(): - self._shared_tags.append(t) - self._perframe_tags.remove(t) - - def _istag_excluded_from_perframe(self, t: BaseTag) -> bool: - return t in self._excluded_from_perframe_tags - - -class FrameSetCollection: - - """A class to extract framesets based on distinguishing dicom attributes""" - - def __init__(self, single_frame_list: Sequence[Dataset]) -> None: - """Forms framesets based on a list of distinguishing attributes. - The list of "distinguishing" attributes that are used to determine - commonality is currently fixed, and includes the unique identifying - attributes at the Patient, Study, Equipment levels, the Modality and - SOP Class, and ImageType as well as the characteristics of the Pixel - Data, and those attributes that for cross-sectional images imply - consistent sampling, such as ImageOrientationPatient, PixelSpacing and - SliceThickness, and in addition AcquisitionContextSequence and - BurnedInAnnotation. - - Parameters - ---------- - single_frame_list: Sequence[pydicom.dataset.Dataset] - list of mixed or non-mixed single frame dicom images - - Notes - ----- - Note that Series identification, specifically SeriesInstanceUID is NOT - a distinguishing attribute; i.e. FrameSets may span Series - - """ - self.mixed_frames = single_frame_list - self.mixed_frames_copy = self.mixed_frames[:] - self._distinguishing_attribute_keywords = [ - 'PatientID', - 'PatientName', - 'StudyInstanceUID', - 'FrameOfReferenceUID', - 'Manufacturer', - 'InstitutionName', - 'InstitutionAddress', - 'StationName', - 'InstitutionalDepartmentName', - 'ManufacturerModelName', - 'DeviceSerialNumber', - 'SoftwareVersions', - 'GantryID', - 'PixelPaddingValue', - 'Modality', - 'ImageType', - 'BurnedInAnnotation', - 'SOPClassUID', - 'Rows', - 'Columns', - 'BitsStored', - 'BitsAllocated', - 'HighBit', - 'PixelRepresentation', - 'PhotometricInterpretation', - 'PlanarConfiguration', - 'SamplesPerPixel', - 'ProtocolName', - 'ImageOrientationPatient', - 'PixelSpacing', - 'SliceThickness', - 'AcquisitionContextSequence'] - self._frame_sets: List[FrameSet] = [] - frame_counts = [] - frameset_counter = 0 - while len(self.mixed_frames_copy) != 0: - frameset_counter += 1 - x = self._find_all_similar_to_first_datasets() - self._frame_sets.append(FrameSet(x[0], x[1])) - frame_counts.append(len(x[0])) - # log information - logger.debug( - f"Frameset({frameset_counter:02d}) " - "including {len(x[0]):03d} frames") - logger.debug('\t Distinguishing tags:') - for dg_i, dg_tg in enumerate(x[1], 1): - logger.debug( - f'\t\t{dg_i:02d}/{len(x[1])})\t{str(dg_tg)}-' - '{keyword_for_tag(dg_tg):32.32s} = ' - '{str(x[0][0][dg_tg].value):32.32s}') - logger.debug('\t dicom datasets in this frame set:') - for dicom_i, dicom_ds in enumerate(x[0], 1): - logger.debug( - f'\t\t{dicom_i}/{len(x[0])})\t ' - '{dicom_ds["SOPInstanceUID"]}') - frames = '' - for i, f_count in enumerate(frame_counts, 1): - frames += '{: 2d}){:03d}\t'.format(i, f_count) - frames = '{: 2d} frameset(s) out of all {: 3d} instances:'.format( - len(frame_counts), len(self.mixed_frames)) + frames - logger.info(frames) - self._excluded_from_perframe_tags = {} - for kwkw in self._distinguishing_attribute_keywords: - self._excluded_from_perframe_tags[tag_for_keyword(kwkw)] = False - excluded_kws = [ - 'AcquisitionDateTime' - 'AcquisitionDate' - 'AcquisitionTime' - 'SpecificCharacterSet' - ] - for kwkw in excluded_kws: - self._excluded_from_perframe_tags[tag_for_keyword(kwkw)] = False - - def _find_all_similar_to_first_datasets(self) -> tuple: - """Takes the fist instance from mixed-frames and finds all dicom images - that have the same distinguishing attributes. - - """ - similar_ds: List[Dataset] = [self.mixed_frames_copy[0]] - distinguishing_tags_existing = [] - distinguishing_tags_missing = [] - self.mixed_frames_copy = self.mixed_frames_copy[1:] - for kw in self._distinguishing_attribute_keywords: - tg = tag_for_keyword(kw) - if tg in similar_ds[0]: - distinguishing_tags_existing.append(tg) - else: - distinguishing_tags_missing.append(tg) - logger_msg = set() - for ds in self.mixed_frames_copy: - all_equal = True - for tg in distinguishing_tags_missing: - if tg in ds: - logger_msg.add( - '{} is missing in all but {}'.format( - _DicomHelper.tag2kwstr(tg), ds['SOPInstanceUID'])) - all_equal = False - break - if not all_equal: - continue - for tg in distinguishing_tags_existing: - ref_val = similar_ds[0][tg].value - if tg not in ds: - all_equal = False - break - new_val = ds[tg].value - if not _DicomHelper.isequal(ref_val, new_val): - logger_msg.add( - 'Inequality on distinguishing ' - 'attribute{} -> {} != {} \n series uid = {}'.format( - _DicomHelper.tag2kwstr(tg), ref_val, new_val, - ds.SeriesInstanceUID)) - all_equal = False - break - if all_equal: - similar_ds.append(ds) - for msg_ in logger_msg: - logger.info(msg_) - for ds in similar_ds: - if ds in self.mixed_frames_copy: - self.mixed_frames_copy = [ - nds for nds in self.mixed_frames_copy if nds != ds] - return (similar_ds, distinguishing_tags_existing) - - @property - def distinguishing_attribute_keywords(self) -> List[str]: - """Returns the list of all distinguising attributes found.""" - - return self._distinguishing_attribute_keywords[:] - - @property - def frame_sets(self) -> List[FrameSet]: - """Returns the list of all FrameSets found.""" - - return self._frame_sets diff --git a/tests/test_legacy.py b/tests/test_legacy.py index 07959f5b..973095fe 100644 --- a/tests/test_legacy.py +++ b/tests/test_legacy.py @@ -1,5 +1,6 @@ import unittest from pydicom import FileDataset, Dataset +from pydicom.dataelem import DataElement from pydicom.uid import generate_uid from highdicom.legacy import sop from datetime import datetime, timedelta @@ -176,6 +177,145 @@ def generate_mixed_framesets( return out +class TestDicomHelper(unittest.TestCase): + + def setUp(self) -> None: + super().setUp() + # Build data element for all value representations: + # vrs = [ + # 'AE', 'AS', 'AT', 'CS', 'DA', 'DS', 'DT', 'FL', 'FD', 'IS', 'LO', + # 'LT', 'OB', 'OD', 'OF', 'OL', 'OV', 'OW', 'PN', 'SH', 'SL', 'SQ', + # 'SS', 'ST', 'SV', 'TM', 'UC', 'UI', 'UL', 'UN', 'UR', + # 'US', 'UT', 'UV'] + self.data = { + "UL": [ + # Keyword: (0008, 0000) + DataElement(524288, "UL", 506), + DataElement(524288, "UL", 506), + DataElement(524288, "UL", 6), + ], + "CS": [ + # Keyword: (0008, 0005) SpecificCharacterSet + DataElement(524293, "CS", "ISO_IR 100"), + DataElement(524293, "CS", "ISO_IR 100"), + DataElement(524293, "CS", "ISO_IR 00"), + ], + "UI": [ + # Keyword: (0008, 0016) SOPClassUID + DataElement(524310, "UI", "1.2.840.10008.5.1.4.1.1.1"), + DataElement(524310, "UI", "1.2.840.10008.5.1.4.1.1.1"), + DataElement(524310, "UI", "1.2.840.10008.5.1.4.1.1."), + ], + "DA": [ + # Keyword: (0008, 0020) StudyDate + DataElement(524320, "DA", "19950809"), + DataElement(524320, "DA", "19950809"), + DataElement(524320, "DA", "9950809"), + ], + "TM": [ + # Keyword: (0008, 0030) StudyTime + DataElement(524336, "TM", "100044"), + DataElement(524336, "TM", "100044"), + DataElement(524336, "TM", "00044"), + ], + "US": [ + # Keyword: (0008, 0040) DataSetType + DataElement(524352, "US", 0), + DataElement(524352, "US", 0), + DataElement(524352, "US", 1), + ], + "LO": [ + # Keyword: (0008, 0041) DataSetSubtype + DataElement(524353, "LO", "IMA NONE"), + DataElement(524353, "LO", "IMA NONE"), + DataElement(524353, "LO", "IMA ONE"), + ], + "SH": [ + # Keyword: (0008, 0050) AccessionNumber + DataElement(524368, "SH", "1157687691469610"), + DataElement(524368, "SH", "1157687691469610"), + DataElement(524368, "SH", "157687691469610"), + ], + "PN": [ + # Keyword: (0008, 0090) ReferringPhysicianName + DataElement(524432, "PN", "Dr Alpha"), + DataElement(524432, "PN", "Dr Alpha"), + DataElement(524432, "PN", "Dr Beta"), + ], + "ST": [ + # Keyword: (0008, 2111) DerivationDescription + DataElement(532753, "ST", "G0.9D#1.60+0.00,R4R0.5,,D2B0.6,,,"), + DataElement(532753, "ST", "G0.9D#1.60+0.00,R4R0.5,,D2B0.6,,,"), + DataElement(532753, "ST", "G0.9D#1.60+0.00,R4R0.5,,D2B0.,,,"), + ], + "UN": [ + # Keyword: (0013, 0000) + DataElement(1245184, "UN", b'\x00\x00\x00'), + DataElement(1245184, "UN", b'\x00\x00\x00'), + DataElement(1245184, "UN", b'\x00\x00\x01'), + ], + "DS": [ + # Keyword: (0018, 0060) KVP + DataElement(1572960, "DS", 110), + DataElement(1572960, "DS", 110), + DataElement(1572960, "DS", 10), + ], + "IS": [ + # Keyword: (0018, 1150) ExposureTime + DataElement(1577296, "IS", 32), + DataElement(1577296, "IS", 32), + DataElement(1577296, "IS", 2), + ], + "AS": [ + # Keyword: (0010, 1010) PatientAge + DataElement(1052688, "AS", "075Y"), + DataElement(1052688, "AS", "075Y"), + DataElement(1052688, "AS", "75Y"), + ], + "OW": [ + # Keyword: (7fe0, 0010) PixelData + DataElement(2145386512, "OW", b'\x00\x00\x00\x00\x00\x00'), + DataElement(2145386512, "OW", b'\x00\x00\x00\x00\x00\x00'), + DataElement(2145386512, "OW", b'\x00\x00\x00\x00\x00\x01'), + ], + "SS": [ + # Keyword: (0028, 0106) SmallestImagePixelValue + DataElement(2621702, "SS", 0), + DataElement(2621702, "SS", 0), + DataElement(2621702, "SS", 1), + ], + "DT": [ + # Keyword: (0008, 002a) AcquisitionDateTime + DataElement(524330, "DT", "20030922101033.000000"), + DataElement(524330, "DT", "20030922101033.000000"), + DataElement(524330, "DT", "20030922101033.00000"), + ], + "LT": [ + # Keyword: (0018, 7006) DetectorDescription + DataElement(1601542, "LT", "DETECTOR VERSION 1.0 MTFCOMP 1.0"), + DataElement(1601542, "LT", "DETECTOR VERSION 1.0 MTFCOMP 1.0"), + DataElement(1601542, "LT", "DETECTOR VERSION 1.0 MTFCOMP 1."), + ], + "OB": [ + # Keyword: (0029, 1131) + DataElement(2691377, "OB", b'4.0.701169981 '), + DataElement(2691377, "OB", b'4.0.701169981 '), + DataElement(2691377, "OB", b'4.0.01169981 '), + ], + "AT": [ + # Keyword: (0028, 0009) FrameIncrementPointer + DataElement(2621449, "AT", 5505152), + DataElement(2621449, "AT", 5505152), + DataElement(2621449, "AT", 505152), + ], + } + + def test_attribute_equality(self) -> None: + for vr, [v1, v2, v3] in self.data.items(): + assert sop._DicomHelper.isequal(v1.value, v2.value) is True + assert sop._DicomHelper.isequal(v1.value, v3.value) is False + + class TestFrameSetCollection(unittest.TestCase): def setUp(self) -> None: @@ -186,7 +326,7 @@ def test_frameset_detection(self) -> None: for i in range(1, 10): data = data_generator.generate_mixed_framesets( Modality.CT, i, True, True) - fset_collection = sop.FrameSetCollection(data) + fset_collection = sop._FrameSetCollection(data) assert len(fset_collection.frame_sets) == i def test_frameset_framecount_detection(self) -> None: @@ -194,7 +334,7 @@ def test_frameset_framecount_detection(self) -> None: data_generator = DicomGenerator(i) data = data_generator.generate_mixed_framesets( Modality.CT, 1, True, True) - fset_collection = sop.FrameSetCollection(data) + fset_collection = sop._FrameSetCollection(data) assert len(fset_collection.frame_sets) == 1 assert len(fset_collection.frame_sets[0].frames) == i @@ -227,7 +367,7 @@ def test_conversion(self) -> None: data_generator = DicomGenerator(i) data = data_generator.generate_mixed_framesets( Modality(j), 1, True, True) - fset_collection = sop.FrameSetCollection(data) + fset_collection = sop._FrameSetCollection(data) assert len(fset_collection.frame_sets) == 1 assert len(fset_collection.frame_sets[0].frames) == i convertor = LegacyConverterClass( diff --git a/tests/test_utils.py b/tests/test_utils.py index 6a256020..01921c47 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -1,10 +1,8 @@ import pytest -import unittest -from pydicom.dataelem import DataElement from highdicom.content import PlanePositionSequence from highdicom.enum import CoordinateSystemNames -from highdicom.utils import compute_plane_position_tiled_full, _DicomHelper +from highdicom.utils import compute_plane_position_tiled_full params_plane_positions = [ @@ -129,142 +127,3 @@ def test_should_raise_error_when_3d_param_is_missing(): pixel_spacing=(1.0, 1.0), spacing_between_slices=1.0 ) - - -class TestDicomHelper(unittest.TestCase): - - def setUp(self) -> None: - super().setUp() - # Build data element for all value representations: - # vrs = [ - # 'AE', 'AS', 'AT', 'CS', 'DA', 'DS', 'DT', 'FL', 'FD', 'IS', 'LO', - # 'LT', 'OB', 'OD', 'OF', 'OL', 'OV', 'OW', 'PN', 'SH', 'SL', 'SQ', - # 'SS', 'ST', 'SV', 'TM', 'UC', 'UI', 'UL', 'UN', 'UR', - # 'US', 'UT', 'UV'] - self.data = { - "UL": [ - # Keyword: (0008, 0000) - DataElement(524288, "UL", 506), - DataElement(524288, "UL", 506), - DataElement(524288, "UL", 6), - ], - "CS": [ - # Keyword: (0008, 0005) SpecificCharacterSet - DataElement(524293, "CS", "ISO_IR 100"), - DataElement(524293, "CS", "ISO_IR 100"), - DataElement(524293, "CS", "ISO_IR 00"), - ], - "UI": [ - # Keyword: (0008, 0016) SOPClassUID - DataElement(524310, "UI", "1.2.840.10008.5.1.4.1.1.1"), - DataElement(524310, "UI", "1.2.840.10008.5.1.4.1.1.1"), - DataElement(524310, "UI", "1.2.840.10008.5.1.4.1.1."), - ], - "DA": [ - # Keyword: (0008, 0020) StudyDate - DataElement(524320, "DA", "19950809"), - DataElement(524320, "DA", "19950809"), - DataElement(524320, "DA", "9950809"), - ], - "TM": [ - # Keyword: (0008, 0030) StudyTime - DataElement(524336, "TM", "100044"), - DataElement(524336, "TM", "100044"), - DataElement(524336, "TM", "00044"), - ], - "US": [ - # Keyword: (0008, 0040) DataSetType - DataElement(524352, "US", 0), - DataElement(524352, "US", 0), - DataElement(524352, "US", 1), - ], - "LO": [ - # Keyword: (0008, 0041) DataSetSubtype - DataElement(524353, "LO", "IMA NONE"), - DataElement(524353, "LO", "IMA NONE"), - DataElement(524353, "LO", "IMA ONE"), - ], - "SH": [ - # Keyword: (0008, 0050) AccessionNumber - DataElement(524368, "SH", "1157687691469610"), - DataElement(524368, "SH", "1157687691469610"), - DataElement(524368, "SH", "157687691469610"), - ], - "PN": [ - # Keyword: (0008, 0090) ReferringPhysicianName - DataElement(524432, "PN", "Dr Alpha"), - DataElement(524432, "PN", "Dr Alpha"), - DataElement(524432, "PN", "Dr Beta"), - ], - "ST": [ - # Keyword: (0008, 2111) DerivationDescription - DataElement(532753, "ST", "G0.9D#1.60+0.00,R4R0.5,,D2B0.6,,,"), - DataElement(532753, "ST", "G0.9D#1.60+0.00,R4R0.5,,D2B0.6,,,"), - DataElement(532753, "ST", "G0.9D#1.60+0.00,R4R0.5,,D2B0.,,,"), - ], - "UN": [ - # Keyword: (0013, 0000) - DataElement(1245184, "UN", b'\x00\x00\x00'), - DataElement(1245184, "UN", b'\x00\x00\x00'), - DataElement(1245184, "UN", b'\x00\x00\x01'), - ], - "DS": [ - # Keyword: (0018, 0060) KVP - DataElement(1572960, "DS", 110), - DataElement(1572960, "DS", 110), - DataElement(1572960, "DS", 10), - ], - "IS": [ - # Keyword: (0018, 1150) ExposureTime - DataElement(1577296, "IS", 32), - DataElement(1577296, "IS", 32), - DataElement(1577296, "IS", 2), - ], - "AS": [ - # Keyword: (0010, 1010) PatientAge - DataElement(1052688, "AS", "075Y"), - DataElement(1052688, "AS", "075Y"), - DataElement(1052688, "AS", "75Y"), - ], - "OW": [ - # Keyword: (7fe0, 0010) PixelData - DataElement(2145386512, "OW", b'\x00\x00\x00\x00\x00\x00'), - DataElement(2145386512, "OW", b'\x00\x00\x00\x00\x00\x00'), - DataElement(2145386512, "OW", b'\x00\x00\x00\x00\x00\x01'), - ], - "SS": [ - # Keyword: (0028, 0106) SmallestImagePixelValue - DataElement(2621702, "SS", 0), - DataElement(2621702, "SS", 0), - DataElement(2621702, "SS", 1), - ], - "DT": [ - # Keyword: (0008, 002a) AcquisitionDateTime - DataElement(524330, "DT", "20030922101033.000000"), - DataElement(524330, "DT", "20030922101033.000000"), - DataElement(524330, "DT", "20030922101033.00000"), - ], - "LT": [ - # Keyword: (0018, 7006) DetectorDescription - DataElement(1601542, "LT", "DETECTOR VERSION 1.0 MTFCOMP 1.0"), - DataElement(1601542, "LT", "DETECTOR VERSION 1.0 MTFCOMP 1.0"), - DataElement(1601542, "LT", "DETECTOR VERSION 1.0 MTFCOMP 1."), - ], - "OB": [ - # Keyword: (0029, 1131) - DataElement(2691377, "OB", b'4.0.701169981 '), - DataElement(2691377, "OB", b'4.0.701169981 '), - DataElement(2691377, "OB", b'4.0.01169981 '), - ], - "AT": [ - # Keyword: (0028, 0009) FrameIncrementPointer - DataElement(2621449, "AT", 5505152), - DataElement(2621449, "AT", 5505152), - DataElement(2621449, "AT", 505152), - ], - } - - def test_attribute_equality(self) -> None: - for vr, [v1, v2, v3] in self.data.items(): - assert _DicomHelper.isequal(v1.value, v2.value) is True - assert _DicomHelper.isequal(v1.value, v3.value) is False From 55fec52198381e82ed25bcecbc2202bcce08785f Mon Sep 17 00:00:00 2001 From: afshin Date: Fri, 14 May 2021 11:57:53 -0400 Subject: [PATCH 44/44] applied Chris's comments for the 2nd round --- src/highdicom/legacy/sop.py | 261 ++++++++++++++++-------------------- tests/test_legacy.py | 6 +- 2 files changed, 121 insertions(+), 146 deletions(-) diff --git a/src/highdicom/legacy/sop.py b/src/highdicom/legacy/sop.py index d76754ef..18151744 100644 --- a/src/highdicom/legacy/sop.py +++ b/src/highdicom/legacy/sop.py @@ -7,6 +7,8 @@ from typing import Any, List, Union, Callable, Sequence, Optional, Dict, Tuple from datetime import datetime, timedelta from copy import deepcopy +from collections import defaultdict +from sys import float_info from numpy import log10, array, ceil from pydicom.datadict import tag_for_keyword, dictionary_VR, keyword_for_tag @@ -15,7 +17,7 @@ from pydicom.dataelem import DataElement from pydicom.sequence import Sequence as DataElementSequence from pydicom.multival import MultiValue -from pydicom.valuerep import DT, DA, TM +from pydicom.valuerep import DT, DA, TM, DSfloat from pydicom.uid import UID from highdicom.base import SOPClass @@ -64,8 +66,6 @@ def istag_group_length(t: BaseTag) -> bool: @staticmethod def isequal(v1: Any, v2: Any, float_tolerance: float = 1.0e-5) -> bool: - from pydicom.valuerep import DSfloat - def is_equal_float(x1: float, x2: float) -> bool: return abs(x1 - x2) < float_tolerance if type(v1) != type(v2): @@ -144,7 +144,7 @@ def __init__( Parameters ---------- single_frame_list: List[pydicom.dataset.Dataset] - list of single frames that have equal distinguising attributes + list of single frames that have equal distinguishing attributes distinguishing_tags: List[pydicom.tag.BaseTag] list of distinguishing attributes tags @@ -203,7 +203,9 @@ def get_sop_class_uid(self) -> UID: def _find_per_frame_and_shared_tags(self) -> None: """Detects and collects all shared and perframe attributes""" - rough_shared: dict = {} + rough_shared: Dict[BaseTag, List[DataElement]] = defaultdict(list) + sh_tgs = set() + pf_tgs = set() sfs = self.frames for ds in sfs: for ttag, elem in ds.items(): @@ -213,28 +215,23 @@ def _find_per_frame_and_shared_tags(self) -> None: _DicomHelper.istag_group_length(ttag) and not self._istag_excluded_from_perframe(ttag) and ttag != tag_for_keyword('PixelData')): + # Since elem could be a RawDataElement so __getattr__ is + # safer and gives DataElement type as output elem = ds[ttag] - if ttag not in self._perframe_tags: - self._perframe_tags.append(ttag) - if ttag in rough_shared: - rough_shared[ttag].append(elem.value) - else: - rough_shared[ttag] = [elem.value] - to_be_removed_from_shared = [] + pf_tgs.add(ttag) + rough_shared[ttag].append(elem.value) + sh_tgs = set(rough_shared.keys()) for ttag, v in rough_shared.items(): - v = rough_shared[ttag] if len(v) < len(self.frames): - to_be_removed_from_shared.append(ttag) + sh_tgs.remove(ttag) else: all_values_are_equal = all( _DicomHelper.isequal(v_i, v[0]) for v_i in v) if not all_values_are_equal: - to_be_removed_from_shared.append(ttag) - for t in to_be_removed_from_shared: - del rough_shared[t] - for t, v in rough_shared.items(): - self._shared_tags.append(t) - self._perframe_tags.remove(t) + sh_tgs.remove(ttag) + pf_tgs -= sh_tgs + self._shared_tags = list(sh_tgs) + self._perframe_tags = list(pf_tgs) def _istag_excluded_from_perframe(self, t: BaseTag) -> bool: return t in self._excluded_from_perframe_tags @@ -306,23 +303,29 @@ def __init__(self, single_frame_list: Sequence[Dataset]) -> None: frameset_counter = 0 while len(self.mixed_frames_copy) != 0: frameset_counter += 1 - x = self._find_all_similar_to_first_datasets() - self._frame_sets.append(_FrameSet(x[0], x[1])) - frame_counts.append(len(x[0])) + ds_list, distinguishing_tgs = \ + self._find_all_similar_to_first_datasets() + # removing similar datasets from mixed frames + for ds in ds_list: + if ds in self.mixed_frames_copy: + self.mixed_frames_copy = [ + nds for nds in self.mixed_frames_copy if nds != ds] + self._frame_sets.append(_FrameSet(ds_list, distinguishing_tgs)) + frame_counts.append(len(ds_list)) # log information logger.debug( f"Frameset({frameset_counter:02d}) " - "including {len(x[0]):03d} frames") + "including {len(ds_list):03d} frames") logger.debug('\t Distinguishing tags:') - for dg_i, dg_tg in enumerate(x[1], 1): + for dg_i, dg_tg in enumerate(distinguishing_tgs, 1): logger.debug( - f'\t\t{dg_i:02d}/{len(x[1])})\t{str(dg_tg)}-' + f'\t\t{dg_i:02d}/{len(distinguishing_tgs)})\t{str(dg_tg)}-' '{keyword_for_tag(dg_tg):32.32s} = ' - '{str(x[0][0][dg_tg].value):32.32s}') + '{str(ds_list[0][dg_tg].value):32.32s}') logger.debug('\t dicom datasets in this frame set:') - for dicom_i, dicom_ds in enumerate(x[0], 1): + for dicom_i, dicom_ds in enumerate(ds_list, 1): logger.debug( - f'\t\t{dicom_i}/{len(x[0])})\t ' + f'\t\t{dicom_i}/{len(ds_list)})\t ' '{dicom_ds["SOPInstanceUID"]}') frames = '' for i, f_count in enumerate(frame_counts, 1): @@ -342,10 +345,17 @@ def __init__(self, single_frame_list: Sequence[Dataset]) -> None: for kwkw in excluded_kws: self._excluded_from_perframe_tags[tag_for_keyword(kwkw)] = False - def _find_all_similar_to_first_datasets(self) -> tuple: + def _find_all_similar_to_first_datasets( + self) -> Tuple[List[Dataset], List[BaseTag]]: """Takes the fist instance from mixed-frames and finds all dicom images that have the same distinguishing attributes. + Returns + ------- + Tuple[List[pydicom.dataset.Dataset], List[pydicom.tag.BaseTag]] + a pair of similar datasets and the corresponding list of + distinguishing tags + """ similar_ds: List[Dataset] = [self.mixed_frames_copy[0]] distinguishing_tags_existing = [] @@ -387,26 +397,20 @@ def _find_all_similar_to_first_datasets(self) -> tuple: similar_ds.append(ds) for msg_ in logger_msg: logger.info(msg_) - for ds in similar_ds: - if ds in self.mixed_frames_copy: - self.mixed_frames_copy = [ - nds for nds in self.mixed_frames_copy if nds != ds] return (similar_ds, distinguishing_tags_existing) @property def distinguishing_attribute_keywords(self) -> List[str]: - """Returns the list of all distinguising attributes found.""" - + """Returns the list of all distinguishing attributes found.""" return self._distinguishing_attribute_keywords[:] @property def frame_sets(self) -> List[_FrameSet]: """Returns the list of all FrameSets found.""" - return self._frame_sets -class _CommonLegacyConvertedEnhanceImage(SOPClass): +class _CommonLegacyConvertedEnhancedImage(SOPClass): """SOP class for common Legacy Converted Enhanced instances.""" @@ -451,7 +455,7 @@ def __init__( 'than one multiframe collection') frame_set = all_framesets.frame_sets[0] if sort_key is None: - sort_key = _CommonLegacyConvertedEnhanceImage.default_sort_key + sort_key = _CommonLegacyConvertedEnhancedImage.default_sort_key super().__init__( study_instance_uid=ref_ds.StudyInstanceUID, series_instance_uid=series_instance_uid, @@ -488,12 +492,12 @@ def __init__( self._perframe_functional_groups = DataElementSequence() for i in range(0, len(legacy_datasets)): item = Dataset() - self._perframe_functional_groups.append(item) + self._perframe_functional_groups = [ + Dataset() for _ in range(len(legacy_datasets))] tg = tag_for_keyword('PerFrameFunctionalGroupsSequence') self[tg] = DataElement(tg, 'SQ', self._perframe_functional_groups) self._shared_functional_groups = DataElementSequence() - item = Dataset() - self._shared_functional_groups.append(item) + self._shared_functional_groups.append(Dataset()) tg = tag_for_keyword('SharedFunctionalGroupsSequence') self[tg] = DataElement(tg, 'SQ', self._shared_functional_groups) self._distinguishing_attributes_tags = self._get_tag_used_dictionary( @@ -506,13 +510,13 @@ def __init__( frame_set.shared_tags) self.excluded_from_functional_groups_tags = { tag_for_keyword('SpecificCharacterSet'): False} - self.__build_blocks: List[Any] = [] + self._build_blocks: List[Any] = [] new_ds = [] for item in sorted(self._legacy_datasets, key=sort_key): new_ds.append(item) - self._module_excepted_list: dict = { + self._module_excepted_list: Dict[str, List[str]] = { "patient": [], "clinical-trial-subject": [], "general-study": @@ -737,7 +741,7 @@ def _get_or_create_attribute( A new DataElement created. """ - if kw is str: + if isinstance(kw, str): tg = tag_for_keyword(kw) else: tg = kw @@ -745,7 +749,6 @@ def _get_or_create_attribute( a = deepcopy(src[kw]) else: a = DataElement(tg, dictionary_VR(tg), default) - from pydicom.valuerep import DT, TM, DA if a.VR == 'DA' and isinstance(a.value, str): try: d_tmp = DA(a.value) @@ -809,17 +812,16 @@ def _add_module_to_mf_image_pixel(self) -> None: the current SOPClass from its single frame source. """ - module_and_excepted_at = { - "image-pixel": - [ - "ColorSpace", - "PixelDataProviderURL", - "ExtendedOffsetTable", - "ExtendedOffsetTableLengths", - "PixelData" - ] + module_and_excepted_attr = { + "image-pixel": [ + "ColorSpace", + "PixelDataProviderURL", + "ExtendedOffsetTable", + "ExtendedOffsetTableLengths", + "PixelData" + ] } - for module, except_at in module_and_excepted_at.items(): + for module, except_at in module_and_excepted_attr.items(): self._add_module( module, excepted_attributes=except_at, @@ -845,7 +847,7 @@ def _add_module_to_mf_enhanced_common_image(self) -> None: ref_dataset, self, kw, check_not_to_be_perframe=True, check_not_to_be_empty=False) - sum_compression_ratio: float = 0 + sum_compression_ratio = 0.0 c_ratio_tag = tag_for_keyword('LossyImageCompressionRatio') if tag_for_keyword('LossyImageCompression') in self._shared_tags and \ tag_for_keyword( @@ -874,14 +876,14 @@ def _add_module_to_mf_enhanced_common_image(self) -> None: phmi_kw = 'PhotometricInterpretation' phmi_a = self._get_or_create_attribute( self._legacy_datasets[0], phmi_kw, "MONOCHROME2") - LUT_shape_default = "INVERTED" if phmi_a.value == 'MONOCHROME1'\ + lut_shape_default = "INVERTED" if phmi_a.value == 'MONOCHROME1'\ else "IDENTITY" - LUT_shape_a = self._get_or_create_attribute( + lut_shape_a = self._get_or_create_attribute( self._legacy_datasets[0], 'PresentationLUTShape', - LUT_shape_default) - if not LUT_shape_a.is_empty: - self['PresentationLUTShape'] = LUT_shape_a + lut_shape_default) + if not lut_shape_a.is_empty: + self['PresentationLUTShape'] = lut_shape_a # Icon Image Sequence - always discard these def _add_module_to_mf_contrast_bolus(self) -> None: @@ -931,30 +933,19 @@ def _add_module_to_mf_enhanced_mr_image(self) -> None: "ImagedNucleus", check_not_to_be_perframe=True, check_not_to_be_empty=True) - self._copy_attrib_if_present( - self._legacy_datasets[0], - self, + attr_to_bo_copied = [ "KSpaceFiltering", - check_not_to_be_perframe=True, - check_not_to_be_empty=True) - self._copy_attrib_if_present( - self._legacy_datasets[0], - self, "MagneticFieldStrength", - check_not_to_be_perframe=True, - check_not_to_be_empty=True) - self._copy_attrib_if_present( - self._legacy_datasets[0], - self, "ApplicableSafetyStandardAgency", - check_not_to_be_perframe=True, - check_not_to_be_empty=True) - self._copy_attrib_if_present( - self._legacy_datasets[0], - self, "ApplicableSafetyStandardDescription", - check_not_to_be_perframe=True, - check_not_to_be_empty=True) + ] + for attr in attr_to_bo_copied: + self._copy_attrib_if_present( + self._legacy_datasets[0], + self, + attr, + check_not_to_be_perframe=True, + check_not_to_be_empty=True) def _add_module_to_mf_acquisition_context(self) -> None: """Copies/adds an `acquisition_context` multiframe module to @@ -971,7 +962,7 @@ def _add_module_to_mf_acquisition_context(self) -> None: def _get_value_for_frame_type( self, attrib: DataElement, - ) -> Union[list, None]: + ) -> Optional[List[str]]: """Guesses the appropriate FrameType attribute value from ImageType. Parameters @@ -981,7 +972,7 @@ def _get_value_for_frame_type( Returns ------- - Union[list, None] + Optional[List[str]] A new list of FrameType value is returned. If attrib is not of type DataElement None is returned. @@ -1497,8 +1488,8 @@ def _add_module_to_dataset_pixel_value_transformation( 'RescaleIntercept', check_not_to_be_perframe=False, check_not_to_be_empty=False) - haveValuesSoAddType = ('RescaleSlope' in item or - 'RescaleIntercept' in item) + have_values_so_add_type = ('RescaleSlope' in item or + 'RescaleIntercept' in item) self._copy_attrib_if_present(source, item, 'RescaleType', @@ -1507,17 +1498,15 @@ def _add_module_to_dataset_pixel_value_transformation( value = '' modality = '' if 'Modality' not in source\ else source["Modality"].value - if haveValuesSoAddType: + if have_values_so_add_type: value = 'US' if modality == 'CT': - containes_localizer = False + contains_localizer = False image_type_v = [] if 'ImageType' not in source\ else source['ImageType'].value - for i in image_type_v: - if i == 'LOCALIZER': - containes_localizer = True - break - if not containes_localizer: + contains_localizer = any( + i == 'LOCALIZER' for i in image_type_v) + if not contains_localizer: value = "HU" else: value = 'US' @@ -1548,10 +1537,11 @@ def _add_module_to_mf_pixel_value_transformation(self) -> None: self._add_module_to_dataset_pixel_value_transformation( self._legacy_datasets[0], item) elif self._has_pixel_value_transformation(self._perframe_tags): - for i in range(0, len(self._legacy_datasets)): - item = self._perframe_functional_groups[i] - self._add_module_to_dataset_pixel_value_transformation( - self._legacy_datasets[i], item) + for item, legacy in zip( + self._perframe_functional_groups, + self._legacy_datasets + ): + self._add_module_to_dataset_referenced_image(legacy, item) def _has_referenced_image(self, tags: Dict[BaseTag, bool]) -> bool: """returns true if attributes specific to @@ -1725,7 +1715,6 @@ def _add_largest_smallest_pixel_value(self) -> None: """ ltg = tag_for_keyword("LargestImagePixelValue") - from sys import float_info lval = float_info.min if ltg in self._perframe_tags: for frame in self._legacy_datasets: @@ -1900,27 +1889,23 @@ def _add_module_to_mf_conversion_source(self) -> None: def _build_slices_geometry_frame_content(self) -> None: """Instantiates an object of _GeometryOfSlice for each slice.""" - frame_count = len(self._legacy_datasets) - for i in range(0, frame_count): - curr_frame = self._legacy_datasets[i] + for curr_frame in self._legacy_datasets: if 'ImagePositionPatient' not in curr_frame: image_position_patient_v = None else: - image_position_patient_v =\ - curr_frame['ImagePositionPatient'].value + image_position_patient_v = curr_frame.ImagePositionPatient if 'ImageOrientationPatient' not in curr_frame: image_orientation_patient_v = None else: - image_orientation_patient_v =\ - curr_frame['ImageOrientationPatient'].value + image_orientation_patient_v = curr_frame.ImageOrientationPatient if 'PixelSpacing' not in curr_frame: pixel_spacing_v = None else: - pixel_spacing_v = curr_frame['PixelSpacing'].value + pixel_spacing_v = curr_frame.PixelSpacing if 'SliceThickness' not in curr_frame: slice_thickness_v = 0.0 else: - slice_thickness_v = curr_frame['SliceThickness'].value + slice_thickness_v = curr_frame.SliceThickness if (image_orientation_patient_v is not None and image_position_patient_v is not None and pixel_spacing_v is not None): @@ -1937,35 +1922,26 @@ def _build_slices_geometry_frame_content(self) -> None: self._slices.append(_GeometryOfSlice(row, col, tpl, voxel_spacing)) else: - logger.error( + logger.warning( "Error in geometry. One or more required " "attributes are not available") - logger.error( + logger.warning( "\tImageOrientationPatient =" f" {image_orientation_patient_v}") - logger.error( + logger.warning( "\tImagePositionPatient =" f" {image_position_patient_v}") - logger.error(f"\tPixelSpacing = {pixel_spacing_v}") + logger.warning(f"\tPixelSpacing = {pixel_spacing_v}") self._slices = [] # clear the slices break def _are_all_slices_parallel_frame_content(self) -> bool: """Returns true if all slices are parallel otherwise, false.""" - slice_count = len(self._slices) - if slice_count >= 2: - last_slice = self._slices[0] - for i in range(1, slice_count): - curr_slice = self._slices[i] - if not _GeometryOfSlice.are_parallel( - curr_slice, last_slice, self._tolerance): - return False - last_slice = curr_slice - return True - elif slice_count == 1: - return True - else: - return False + return all( + _GeometryOfSlice.are_parallel( + sl, self._slices[0], self._tolerance) + for sl in self._slices + ) def _add_stack_info_frame_content(self) -> None: """Adds stack info to the FrameContentSequence dicom attribute.""" @@ -2165,7 +2141,7 @@ def _copy_data_pixel_data( des.extend(src) def _add_module_to_mf_pixel_data(self) -> None: - """Copies/add`s a pixel_data` multiframe module to + """Copies/adds a `pixel_data` multiframe module to the current SOPClass from its single frame source. """ @@ -2179,10 +2155,10 @@ def _add_module_to_mf_pixel_data(self) -> None: self._number_of_pixels_per_frame = row * col self._number_of_pixels = row * col * self._frame_count kw = "PixelData" - for i in range(0, len(self._legacy_datasets)): - if kw not in self._legacy_datasets[i]: + for legacy_ds in self._legacy_datasets: + if kw not in legacy_ds: continue - pixel_data_a = self._legacy_datasets[i][kw] + pixel_data_a = legacy_ds[kw] if self._is_other_byte_vr_pixel_data(pixel_data_a.VR): if len(self._word_data) != 0: raise TypeError( @@ -2212,8 +2188,7 @@ def _add_module_to_mf_content_date_time(self) -> None: """ default_atrs = ["Acquisition", "Series", "Study"] - for i in range(0, len(self._legacy_datasets)): - src = self._legacy_datasets[i] + for src in self._legacy_datasets: default_date = self.farthest_future_date for def_atr in default_atrs: at_tg = tag_for_keyword(def_atr + "Date") @@ -2360,16 +2335,16 @@ def default_sort_key( """ out: tuple = tuple() if 'SeriesNumber' in x: - out += (x['SeriesNumber'].value, ) + out += (x.SeriesNumber, ) if 'InstanceNumber' in x: - out += (x['InstanceNumber'].value, ) + out += (x.InstanceNumber, ) if 'SOPInstanceUID' in x: - out += (x['SOPInstanceUID'].value, ) + out += (x.SOPInstanceUID, ) return out def _clear_build_blocks(self) -> None: """Clears the array containing all methods for multiframe conversion""" - self.__build_blocks = [] + self._build_blocks = [] def _add_common_ct_pet_mr_build_blocks(self) -> None: """Arranges common methods for multiframe conversion and @@ -2398,7 +2373,7 @@ def _add_common_ct_pet_mr_build_blocks(self) -> None: [self._add_module_to_mf_unassigned_shared, None], ] for b in blocks: - self.__build_blocks.append(b) + self._build_blocks.append(b) def _add_ct_specific_build_blocks(self) -> None: """Arranges CT specific methods for multiframe conversion and @@ -2414,7 +2389,7 @@ def _add_ct_specific_build_blocks(self) -> None: [self._add_module_to_mf_contrast_bolus, None], ] for b in blocks: - self.__build_blocks.append(b) + self._build_blocks.append(b) def _add_mr_specific_build_blocks(self) -> None: """Arranges MRI specific methods for multiframe conversion and @@ -2430,7 +2405,7 @@ def _add_mr_specific_build_blocks(self) -> None: [self._add_module_to_mf_contrast_bolus, None], ] for b in blocks: - self.__build_blocks.append(b) + self._build_blocks.append(b) def _add_pet_specific_build_blocks(self) -> None: """Arranges PET specific methods for multiframe conversion and @@ -2445,7 +2420,7 @@ def _add_pet_specific_build_blocks(self) -> None: [self._add_module_to_mf_enhanced_pet_image, None], ] for b in blocks: - self.__build_blocks.append(b) + self._build_blocks.append(b) def _add_build_blocks_for_mr(self) -> None: """Arranges all methods necessary for MRI multiframe conversion and @@ -2480,7 +2455,7 @@ def _convert2multiframe(self) -> None: """ logger.debug('Start singleframe to multiframe conversion') - for fun, args in self.__build_blocks: + for fun, args in self._build_blocks: if not args: fun() else: @@ -2488,7 +2463,7 @@ def _convert2multiframe(self) -> None: logger.debug('Conversion succeeded') -class LegacyConvertedEnhancedCTImage(_CommonLegacyConvertedEnhanceImage): +class LegacyConvertedEnhancedCTImage(_CommonLegacyConvertedEnhancedImage): """SOP class for Legacy Converted Enhanced CT Image instances.""" @@ -2546,7 +2521,7 @@ def __init__( self._convert2multiframe() -class LegacyConvertedEnhancedPETImage(_CommonLegacyConvertedEnhanceImage): +class LegacyConvertedEnhancedPETImage(_CommonLegacyConvertedEnhancedImage): """SOP class for Legacy Converted Enhanced PET Image instances.""" @@ -2604,7 +2579,7 @@ def __init__( self._convert2multiframe() -class LegacyConvertedEnhancedMRImage(_CommonLegacyConvertedEnhanceImage): +class LegacyConvertedEnhancedMRImage(_CommonLegacyConvertedEnhancedImage): """SOP class for Legacy Converted Enhanced MR Image instances.""" diff --git a/tests/test_legacy.py b/tests/test_legacy.py index 973095fe..1d4b354a 100644 --- a/tests/test_legacy.py +++ b/tests/test_legacy.py @@ -93,7 +93,7 @@ def _generate_frameset(self, 'ORIGINAL', 'PRIMARY', 'RECON', 'EMISSION'] tmp_dataset.PixelSpacing = [ self._pixel_spacing, self._pixel_spacing] - tmp_dataset.PatientName = 'John^Doe' + tmp_dataset.PatientName = 'Doe^John' tmp_dataset.FrameOfReferenceUID = frame_of_ref_uid tmp_dataset.SOPClassUID = sop_classes[system][1] tmp_dataset.SOPInstanceUID = generate_uid() @@ -312,8 +312,8 @@ def setUp(self) -> None: def test_attribute_equality(self) -> None: for vr, [v1, v2, v3] in self.data.items(): - assert sop._DicomHelper.isequal(v1.value, v2.value) is True - assert sop._DicomHelper.isequal(v1.value, v3.value) is False + assert sop._DicomHelper.isequal(v1.value, v2.value) + assert not sop._DicomHelper.isequal(v1.value, v3.value) class TestFrameSetCollection(unittest.TestCase):