Ejemplo n.º 1
0
def create_attribute_list(attrs):
    ds = Dataset()
    ds.PatientID = '12345'
    ds.PatientName = 'Test^User'
    for attr in attrs:
        print('(VR, VM, Name, Retired, Keyword)\n', DicomDictionary.get(attr))  #
        didic = DicomDictionary.get(attr)
        elem = DataElement(attr, didic[0], '')
        ds.add(elem)
    return ds
Ejemplo n.º 2
0
def fix_VM1_callback(dataset, data_element):
    r"""Update the data element fixing VM based on public tag definition

    This addresses the following none conformance for element with string VR having
    a `\` in the their value which gets interpret as array by pydicom.
    This function re-join string and is aimed to be used as callback.

    From the DICOM Standard, Part 5, Section 6.2, for elements with a VR of LO, such as
    Series Description: A character string that may be padded with leading and/or
    spaces. The character code 5CH (the BACKSLASH "\" in ISO-IR 6) shall not be
    present, as it is used as the delimiter between values in multi-valued data
    elements. The string shall not have Control Characters except for ESC.

    Args:
        dataset (pydicom.DataSet): A pydicom DataSet
        data_element (pydicom.DataElement): A pydicom DataElement from the DataSet

    Returns:
        pydicom.DataElement: An updated pydicom DataElement
    """
    vr, vm, _, _, _ = DicomDictionary.get(data_element.tag)
    # Check if it is a VR string
    if vr not in ['UT', 'ST', 'LT', 'FL', 'FD', 'AT', 'OB', 'OW', 'OF', 'SL', 'SQ',
                  'SS', 'UL', 'OB/OW', 'OW/OB', 'OB or OW', 'OW or OB', 'UN'] \
            and 'US' not in vr:
        if vm == '1' and hasattr(data_element, 'VM') and data_element.VM > 1:
            data_element._value = '\\'.join(data_element.value)
Ejemplo n.º 3
0
def fix_type_based_on_dicom_vm(header):

    exc_keys = []
    for key, val in header.items():
        try:
            vr, vm, _, _, _ = DicomDictionary.get(tag_for_keyword(key))
        except (ValueError, TypeError):
            exc_keys.append(key)
            continue

        if vr != "SQ":
            if vm != "1" and not isinstance(val, list):  # anything else is a list
                header[key] = [val]
            elif vm == "1" and isinstance(val, list):
                if len(val) == 1:
                    header[key] = val[0]
                else:
                    if (
                        vr
                        not in [
                            "UT",
                            "ST",
                            "LT",
                            "FL",
                            "FD",
                            "AT",
                            "OB",
                            "OW",
                            "OF",
                            "SL",
                            "SQ",
                            "SS",
                            "UL",
                            "OB/OW",
                            "OW/OB",
                            "OB or OW",
                            "OW or OB",
                            "UN",
                        ]
                        and "US" not in vr
                    ):
                        val = cast_castable_floats(val)

                        header[key] = "\\".join([str(item) for item in val])
        else:
            for dataset in val:
                if isinstance(dataset, dict):
                    fix_type_based_on_dicom_vm(dataset)
                else:
                    log.warning(
                        "%s SQ list item is not a dictionary - value = %s", key, dataset
                    )
    if len(exc_keys) > 0:
        log.warning(
            "%s Dicom data elements were not type fixed based on VM", len(exc_keys)
        )
Ejemplo n.º 4
0
    def __init__(self, file_name, IsCreate=False):
        ## This code block was taken from the output of a MATLAB secondary
        ## capture.  I do not know what the long dotted UIDs mean, but
        ## this code works.
        # Define items as (VR, VM, description, is_retired flag, keyword)
        #   Leave is_retired flag blank.

        # Update the dictionary itself
        DicomDictionary.update(self.AI_ANLAYSIS_ITEM)

        if IsCreate:
            self.__file_name__ = file_name
            file_meta = Dataset()

            # Ultrasound Multiframe Image Storage - https://www.dicomlibrary.com/dicom/sop/
            file_meta.MediaStorageSOPClassUID = '1.2.840.10008.5.1.4.1.1.3.1'
            file_meta.MediaStorageSOPInstanceUID = '999.999.2.19941105.134500.2.101'
            file_meta.ImplementationClassUID = '999.999'
            # Transfer Syntax - https://www.dicomlibrary.com/dicom/transfer-syntax/
            file_meta.TransferSyntaxUID = '1.2.840.10008.1.2'

            ds = FileDataset(file_name, {},
                             file_meta=file_meta,
                             preamble=b"\0" * 128)
            # DICOM modality, that represents DICOM file type - https://www.dicomlibrary.com/dicom/modality/
            ds.Modality = 'US'  # Ultrasound
            ds.ContentDate = str(datetime.date.today()).replace('-', '')
            ds.ContentTime = str(time.time())  # milliseconds since the epoch
            ds.StudyInstanceUID = '999.999.2.19941105.134500'
            ds.SeriesInstanceUID = '999.999.2.19941105.134500.2'
            ds.SOPInstanceUID = '999.999.2.19941105.134500.2.101'

            # https://searchcode.com/codesearch/view/13929148/
            ds.SOPClassUID = '1.2.840.10008.5.1.4.1.1.3.1'  # 'Ultrasound Multi-frame Image Storage' - 1.2.840.10008.5.1.4.1.1.3.1
            # ds.SecondaryCaptureDeviceManufctur = 'Python 2.7.3'
            self.Dataset = ds
        else:
            self.Dataset = dicom.read_file(file_name)
Ejemplo n.º 5
0
def write(path: Path) -> None:
    # read methods up to start of tag enum values
    lines: List[str] = []
    with open(path, "r") as f:
        for line in f.readlines():
            lines.append(line)
            if line.strip() == MARKER:
                break

    # write tag enum values
    with open(path, "w") as f:
        f.writelines(lines)
        for tag, v in DicomDictionary.items():
            keyword = v[-1]
            if keyword:
                f.write(f"    {keyword} = {tag}\n")
Ejemplo n.º 6
0
def fix_type_based_on_dicom_vm(header):
    exc_keys = []
    for key, val in header.items():
        try:
            vr, vm, _, _, _ = DicomDictionary.get(tag_for_keyword(key))
        except (ValueError, TypeError):
            exc_keys.append(key)
            continue

        if vr != 'SQ':
            if vm != '1' and not isinstance(val,
                                            list):  # anything else is a list
                header[key] = [val]
        else:
            for dataset in val:
                fix_type_based_on_dicom_vm(dataset)
    if len(exc_keys) > 0:
        log.warning('%s Dicom data elements were not type fixed based on VM',
                    len(exc_keys))
Ejemplo n.º 7
0
def fix_type_based_on_dicom_vm(header):
    exc_keys = []
    for key, val in header.items():
        try:
            vr, vm, _, _, _ = DicomDictionary.get(tag_for_keyword(key))
        except (ValueError, TypeError):
            exc_keys.append(key)
            continue

        if vr != 'SQ':
            if vm != '1' and not isinstance(val, list):  # anything else is a list
                header[key] = [val]
        elif not isinstance(val, list):
            # To deal with DataElement that pydicom did not read as sequence
            # (e.g. stored as OB and pydicom parsing them as binary string)
            exc_keys.append(key)
        else:
            for dataset in val:
                fix_type_based_on_dicom_vm(dataset)
    if len(exc_keys) > 0:
        log.warning('%s Dicom data elements were not type fixed based on VM', len(exc_keys))
Ejemplo n.º 8
0
Add dictionary items in the standard DICOM dictionary
본 예제는 DICOM 표준 Dictionary에 어떻게 동적으로 Python Dictionary를 추가하는지 보여준다.
"""

from __future__ import print_function

from pydicom.datadict import DicomDictionary, keyword_dict
from pydicom.dataset import Dataset

print(__doc__)

new_dict_items = {
    0x10011001: ('UL', '1', 'Test One', '', 'TestOne'),
    0x10011002: ('OB', '1', 'Test Two', '', 'TestTwo'),
    0x10011003: ('UI', '1', 'Test Three', '', 'TestThree')
}

DicomDictionary.update(new_dict_items)

new_names_dict = dict([(val[4], tag) for tag, val in new_dict_items.items()])
print(new_names_dict)
keyword_dict.update(new_names_dict)

ds = Dataset()

ds.TestOne = 42
ds.TestTwo = '12345'
ds.TestThree = '1.2.3.4.5'

print(ds.top())
Ejemplo n.º 9
0
""" Add DICOM Attributes not yet part of DICOM

    This is from CP-1570, and should be removed once the CP becomes part of
    the main standard. Note that tags may differ.
"""
from pydicom.datadict import DicomDictionary, keyword_dict

# Define items as (VR, VM, description, is_retired flag, keyword)
#   Leave is_retired flag blank.
dental_acquisition_context = {
    0x10011001: ('SQ', '1', "Acquisition View", '', 'AcquisitionView'),
    0x10011002: ('SQ', '1', "Image View", '', 'ImageView'),
    0x10011003: ('SQ', '1', "Functional condition present during"
                 "acquisition", '', 'FunctionalCondition'),
    0x10011004:
    ('SQ', '1', "Occlusal Relationship", '', 'OcclusalRelationship'),
}

# Update the dictionary itself
DicomDictionary.update(dental_acquisition_context)

# Update the reverse mapping from name to tag
keyword_dict.update(
    dict([(val[4], tag) for tag, val in dental_acquisition_context.items()]))
Ejemplo n.º 10
0
# mean the AGPL-3.0+.

# You should have received a copy of the Apache-2.0 along with this
# program. If not, see <http://www.apache.org/licenses/LICENSE-2.0>.

from copy import deepcopy

import numpy as np

from pydicom import Dataset
from pydicom.datadict import DicomDictionary

from ...libutils import get_imports
IMPORTS = get_imports(globals())

DICOM_NAMES = [item[-1] for _, item in DicomDictionary.items()]


def convert_nparray_and_set_key_value_in_dataset(dataset, key, value):
    if isinstance(value, np.ndarray):
        value = value.tolist()

    setattr(dataset, key, value)


def dicom_dataset_from_dict(input_dict: dict, template_ds=None):
    """Create a pydicom DICOM object from a dictionary"""
    if template_ds is None:
        dataset = Dataset()
    else:
        dataset = deepcopy(template_ds)
Ejemplo n.º 11
0
from pydicom.datadict import DicomDictionary, keyword_dict
from pydicom.dataset import Dataset

print(__doc__)

# Define items as (VR, VM, description, is_retired flag, keyword)
#   Leave is_retired flag blank.
new_dict_items = {
    0x10011001: ('UL', '1', "Test One", '', 'TestOne'),
    0x10011002: ('OB', '1', "Test Two", '', 'TestTwo'),
    0x10011003: ('UI', '1', "Test Three", '', 'TestThree'),
}

# Update the dictionary itself
DicomDictionary.update(new_dict_items)

# Update the reverse mapping from name to tag
new_names_dict = dict([(val[4], tag) for tag, val in
                       new_dict_items.items()])
keyword_dict.update(new_names_dict)

# Test that it is working
ds = Dataset()  # or could get one from dcmread, etc

ds.TestOne = 42
ds.TestTwo = '12345'
ds.TestThree = '1.2.3.4.5'

print(ds.top())
#!/usr/bin/python3

# Gets a list of tags from the pyDicom dictionary

import csv
import pydicom

from pydicom.datadict import DicomDictionary

keyList = list(DicomDictionary.keys())

keyList.sort()

tagsAndKeywords = len(keyList) * [None]

for i in range(len(keyList)):
    hexKey = '%08X' % keyList[i]
    tag = '(' + hexKey[0:4] + ',' + hexKey[4:] + ')'
    tagsAndKeywords[i] = [tag, DicomDictionary[keyList[i]][4]]

# Add header
tagsAndKeywords.insert(0, ['Tag', 'Keyword'])

with open('pydicomTags.csv', 'w') as csvFile:
    writer = csv.writer(csvFile)
    writer.writerows(tagsAndKeywords)

csvFile.close()
def table_dicom_data(dicom_tree):
    if VERBOSE > 1:
        print_this(currLog,
                   st.RESET_ALL + fg.GREEN + '* VISITING' + st.RESET_ALL)

    new_dict_items = {
        0x0021105e:
        ('DS', '1', "FloatSlopRTIATimer", '', 'FloatSlopRTIATimer'),
        0x00181060: ('DS', '1', "TriggerTime", '', 'TriggerTime'),
    }
    DicomDictionary.update(new_dict_items)
    new_names_dict = dict([(val[4], tag)
                           for tag, val in new_dict_items.items()])
    keyword_dict.update(new_names_dict)

    props = [
        'Path',
        'NumFiles',
        'StudyID',
        'StudyDate',
        'StudyTime',
        'PatientID',
        'PatientName',
        'ProtocolName',
        'SeriesDescription',
        'SeriesNumber',
        'SeriesTime',
        'ImagesInAcquisition',
        'InStackPositionNumber',
        'InstanceNumber',
        'SliceLocation',
        'TriggerTime',
        'FloatSlopRTIATimer',
    ]

    db = OrderedDict([(ii, []) for ii in props])
    if isinstance(dicom_tree, dict):
        for dicom in dicom_tree:
            if isinstance(dicom_tree[dicom], dict):
                for patient in dicom_tree[dicom]:
                    if isinstance(dicom_tree[dicom][patient], dict):
                        for exam in dicom_tree[dicom][patient]:
                            if isinstance(dicom_tree[dicom][patient][exam],
                                          dict):
                                for serie in dicom_tree[dicom][patient][exam]:
                                    if isinstance(
                                            dicom_tree[dicom][patient][exam]
                                        [serie], dict):
                                        for fn in dicom_tree[dicom][patient][
                                                exam][serie]:
                                            if VERBOSE > 1:
                                                print_this(
                                                    currLog, fg.CYAN + '  ' +
                                                    fn + st.RESET_ALL)

                                            ds = pydicom.dcmread(
                                                fn, stop_before_pixels=True)
                                            ds.decode()
                                            for prop in props:
                                                if prop == 'Path':
                                                    db[prop] += [
                                                        os.path.dirname(fn)
                                                    ]
                                                elif prop == 'NumFiles':
                                                    db[prop] += [
                                                        len(
                                                            glob.glob(
                                                                os.path.
                                                                dirname(fn) +
                                                                '/*'))
                                                    ]
                                                else:
                                                    db[prop] += [
                                                        ''
                                                    ] if not hasattr(
                                                        ds, prop) else [
                                                            getattr(ds, prop)
                                                        ]

                                            if False:
                                                print_this(currLog, str(db))
                                            if False:
                                                print_this(currLog, str(ds))

    df = pd.DataFrame(db)
    df = df.sort_values(by=['StudyID', 'SeriesNumber', 'InstanceNumber'],
                        axis=0)
    if not args.include_stacked_screen_saves:
        df = df[df.SeriesDescription.str.contains("Stacked Screen Save") ==
                False]
        df = df[df.SeriesDescription.str.contains("Screen Save") == False]
    if not args.keep_sequence_information:
        # Trim path.
        df.Path = df.Path.apply(lambda xx: os.path.dirname(xx))
        # Remove duplicates (must be done AFTER path trimming).
        df = df.drop_duplicates(subset='Path', keep="last").reset_index()
    if not args.more_info:
        # df = df.drop(labels='index',                 axis='columns')
        df = df.drop(labels='SeriesDescription', axis='columns')
        df = df.drop(labels='SeriesNumber', axis='columns')
        df = df.drop(labels='SeriesTime', axis='columns')
        df = df.drop(labels='NumFiles', axis='columns')
    if not args.report_files:
        df = df.drop(labels='ImagesInAcquisition', axis='columns')
        df = df.drop(labels='InStackPositionNumber', axis='columns')
        df = df.drop(labels='InstanceNumber', axis='columns')
        df = df.drop(labels='SliceLocation', axis='columns')
        df = df.drop(labels='FloatSlopRTIATimer', axis='columns')
        df = df.drop(labels='TriggerTime', axis='columns')

    print_this(currLog, st.RESET_ALL + fg.GREEN + '* TABLE' + st.RESET_ALL)
    print_this(currLog, tabulate(df, headers='keys', tablefmt="orgtbl"))
    if args.write_csv:
        fn = args.log_file
        df.to_csv(path_or_buf=rchop(fn, '.org') + '.csv', sep=',')