コード例 #1
0
 def setUp(self):
     self.mr_small = dcmread(mr_name)
     self.mr_rle = dcmread(mr_rle)
     self.emri_small = dcmread(emri_name)
     self.emri_rle = dcmread(emri_rle)
     self.original_handlers = pydicom.config.image_handlers
     pydicom.config.image_handlers = [rle_handler, numpy_handler]
コード例 #2
0
ファイル: test_charset.py プロジェクト: scaramallion/pydicom
    def test_charset_patient_names(self, filename, patient_name):
        """Test patient names are correctly decoded and encoded."""
        # check that patient names are correctly read
        file_path = get_charset_files(filename + '.dcm')[0]
        ds = dcmread(file_path)
        ds.decode()
        assert patient_name == ds.PatientName

        # check that patient names are correctly written back
        fp = DicomBytesIO()
        fp.is_implicit_VR = False
        fp.is_little_endian = True
        ds.save_as(fp, write_like_original=False)
        fp.seek(0)
        ds = dcmread(fp)
        assert patient_name == ds.PatientName

        # check that patient names are correctly written back
        # without original byte string (PersonName3 only)
        if hasattr(ds.PatientName, 'original_string'):
            ds.PatientName.original_string = None
            fp = DicomBytesIO()
            fp.is_implicit_VR = False
            fp.is_little_endian = True
            ds.save_as(fp, write_like_original=False)
            fp.seek(0)
            ds = dcmread(fp)
            assert patient_name == ds.PatientName
コード例 #3
0
ファイル: fsbrowser.py プロジェクト: mjirik/io3d
 def get_path_info_preview(self, path):
     path_lower = path.lower()
     #name
     name = os.path.basename(os.path.normpath(path))
     name_final = ("name: " + name)
     path_sl = path + "/"
     
     if ".jpg" in path_lower:
         preview = ("Used path leads to current image.")
         img = io.imread(path)
         io.imshow(img)
         io.show()
         
     elif ".png" in path_lower:
         preview = ("Used path leads to current image.")
         img = io.imread(path)
         io.imshow(img)
         io.show()
         
     elif ".dcm" in path_lower:
         preview = ("Used path leads to current image.")
         ds = pdicom.dcmread(path)
         plt.imshow(ds.pixel_array, cmap=plt.cm.bone)
         
     else:
         preview = ("Preview of files in dir: " + name) 
         only_files = [f for f in listdir(path) if isfile(join(path, f))]
         
         for x in only_files:
             if (".dcm" or ".Dcm" or ".DCM") in x:
                 ending = os.path.basename(os.path.normpath(path_sl + x))
                 preview_path = path_sl + ending
                 ds = pdicom.dcmread(preview_path)
                 plt.imshow(ds.pixel_array, cmap=plt.cm.bone)
                 break
             elif (".jpg" or ".Jpg" or ".JPG") in x:
                 ending = os.path.basename(os.path.normpath(path_sl + x))
                 preview_path = path_sl + ending
                 img = io.imread(preview_path)
                 io.imshow(img)
                 io.show()
                 break
                 
             elif (".png" or ".Png" or ".PNG") in x:
                 ending = os.path.basename(os.path.normpath(path_sl + x))
                 preview_path = path_sl + ending
                 img = io.imread(preview_path)
                 io.imshow(img)
                 io.show()
                 break
                 
             else:
                 None
                 break
コード例 #4
0
ファイル: test_charset.py プロジェクト: scaramallion/pydicom
 def test_changed_character_set(self):
     # Regression test for #629
     multiPN_name = get_charset_files("chrFrenMulti.dcm")[0]
     ds = dcmread(multiPN_name)  # is Latin-1
     ds.SpecificCharacterSet = 'ISO_IR 192'
     from pydicom.filebase import DicomBytesIO
     fp = DicomBytesIO()
     ds.save_as(fp, write_like_original=False)
     fp.seek(0)
     ds_out = dcmread(fp)
     # we expect UTF-8 encoding here
     assert b'Buc^J\xc3\xa9r\xc3\xb4me' == ds_out.get_item(0x00100010).value
コード例 #5
0
ファイル: test_encaps.py プロジェクト: darcymason/pydicom
    def test_encapsulate_single_fragment_per_frame_bot(self):
        """Test encapsulating single fragment per frame with BOT values."""
        ds = dcmread(JP2K_10FRAME_NOBOT)
        frames = decode_data_sequence(ds.PixelData)
        assert len(frames) == 10

        data = encapsulate(frames, fragments_per_frame=1, has_bot=True)
        test_frames = decode_data_sequence(data)
        for a, b in zip(test_frames, frames):
            assert a == b

        fp = DicomBytesIO(data)
        fp.is_little_endian = True
        offsets = get_frame_offsets(fp)
        assert offsets == [
            0x0000,  # 0
            0x0eee,  # 3822
            0x1df6,  # 7670
            0x2cf8,  # 11512
            0x3bfc,  # 15356
            0x4ade,  # 19166
            0x59a2,  # 22946
            0x6834,  # 26676
            0x76e2,  # 30434
            0x8594  # 34196
        ]
コード例 #6
0
    def setup(self):
        # MONOCHROME2, 64x64, 1 sample/pixel, 16 bits allocated, 12 bits stored
        self.ds = dcmread(EMRI_RLE_10F)
        self.frames = decode_data_sequence(self.ds.PixelData)
        assert len(self.frames) == 10

        self.no_runs = 100
コード例 #7
0
ファイル: image.py プロジェクト: jrkerns/pylinac
 def is_CT_slice(file: str) -> bool:
     """Test if the file is a CT Image storage DICOM file."""
     try:
         ds = pydicom.dcmread(file, force=True, stop_before_pixels=True)
         return ds.SOPClassUID.name == 'CT Image Storage'
     except (InvalidDicomError, AttributeError, MemoryError):
         return False
コード例 #8
0
ファイル: test_charset.py プロジェクト: scaramallion/pydicom
 def test_invalid_character_set_enforce_valid(self):
     """charset: raise on invalid encoding"""
     config.enforce_valid_values = True
     ds = dcmread(get_testdata_files("CT_small.dcm")[0])
     ds.read_encoding = None
     ds.SpecificCharacterSet = 'Unsupported'
     with pytest.raises(LookupError, match='unknown encoding: Unsupported'):
         ds.decode()
コード例 #9
0
ファイル: test_dataset.py プロジェクト: moloney/pydicom
 def test_trait_names(self):
     """Test Dataset.trait_names contains element keywords"""
     test_file = get_testdata_files('CT_small.dcm')[0]
     ds = dcmread(test_file, force=True)
     names = ds.trait_names()
     assert 'PatientName' in names
     assert 'save_as' in names
     assert 'PixelData' in names
コード例 #10
0
ファイル: tools.py プロジェクト: jrkerns/pylinac
def is_dicom(path):
    """Whether the file is a readable DICOM file via pydicom."""
    try:
        ds = pydicom.dcmread(path, force=True)
        ds.pixel_array
        return True
    except:
        return False
コード例 #11
0
ファイル: test_charset.py プロジェクト: scaramallion/pydicom
 def test_decoding_with_specific_tags(self):
     """Decoding is correctly applied even if  Specific Character Set
     is not in specific tags..."""
     rus_file = get_charset_files("chrRuss.dcm")[0]
     ds = dcmread(rus_file, specific_tags=['PatientName'])
     ds.decode()
     assert 2 == len(ds)  # specific character set is always decoded
     assert u'Люкceмбypг' == ds.PatientName
コード例 #12
0
ファイル: test_charset.py プロジェクト: kayarre/pydicom
 def test_latin1(self):
     """charset: can read and decode latin_1 file........................"""
     ds = dcmread(latin1_file)
     ds.decode()
     # Make sure don't get unicode encode error on converting to string
     expected = u'Buc^J\xe9r\xf4me'
     got = ds.PatientName
     self.assertEqual(expected, got,
                      "Expected %r, got %r" % (expected, got))
コード例 #13
0
ファイル: test_unicode.py プロジェクト: jrkerns/pydicom
    def testRead(self):
        """Unicode: Can read a file with unicode characters in name..."""
        uni_name = u'test°'

        # verify first that we could encode file name in this environment
        try:
            _ = uni_name.encode(sys.getfilesystemencoding())
        except UnicodeEncodeError:
            print("SKIP: Environment doesn't support unicode filenames")
            return

        try:
            dcmread(uni_name)
        except UnicodeEncodeError:
            self.fail("UnicodeEncodeError generated for unicode name")
        # ignore file doesn't exist error
        except IOError:
            pass
コード例 #14
0
ファイル: utilities.py プロジェクト: midamo/pylinac
def assign2machine(source_file: str, machine_file: str):
    """Assign a DICOM RT Plan file to a specific machine. The source file is overwritten to contain
    the machine of the machine file.

    Parameters
    ----------
    source_file : str
        Path to the DICOM RTPlan file that contains the fields/plan desired
        (e.g. a Winston Lutz set of fields or Varian's default PF files).
    machine_file : str
        Path to a DICOM RTPlan file that has the desired machine. This is easily obtained from pushing a plan from the TPS
        for that specific machine. The file must contain at least one valid field.
    """
    dcm_source = pydicom.dcmread(source_file)
    dcm_machine = pydicom.dcmread(machine_file)
    for beam in dcm_source.BeamSequence:
        beam.TreatmentMachineName = dcm_machine.BeamSequence[0].TreatmentMachineName
    dcm_source.save_as(source_file)
コード例 #15
0
ファイル: test_dataset.py プロジェクト: moloney/pydicom
    def test_equality_file_meta(self):
        """Dataset: equality returns correct value if with metadata"""
        d = dcmread(self.test_file)
        e = dcmread(self.test_file)
        self.assertTrue(d == e)

        e.is_implicit_VR = not e.is_implicit_VR
        self.assertFalse(d == e)

        e.is_implicit_VR = not e.is_implicit_VR
        self.assertTrue(d == e)
        e.is_little_endian = not e.is_little_endian
        self.assertFalse(d == e)

        e.is_little_endian = not e.is_little_endian
        self.assertTrue(d == e)
        e.filename = 'test_filename.dcm'
        self.assertFalse(d == e)
コード例 #16
0
ファイル: test_dataset.py プロジェクト: moloney/pydicom
    def test_get_item(self):
        """Test Dataset.get_item"""
        ds = Dataset()
        ds.CommandGroupLength = 120  # 0000,0000
        ds.SOPInstanceUID = '1.2.3.4'  # 0008,0018

        # Test non-deferred read
        assert ds.get_item(0x00000000) == ds[0x00000000]
        assert ds.get_item(0x00000000).value == 120
        assert ds.get_item(0x00080018) == ds[0x00080018]
        assert ds.get_item(0x00080018).value == '1.2.3.4'

        # Test deferred read
        test_file = get_testdata_files('MR_small.dcm')[0]
        ds = dcmread(test_file, force=True, defer_size='0.8 kB')
        ds_ref = dcmread(test_file, force=True)
        # get_item will follow the deferred read branch
        assert ds.get_item((0x7fe00010)).value == ds_ref.PixelData
コード例 #17
0
ファイル: test_charset.py プロジェクト: kayarre/pydicom
 def test_encoding_with_specific_tags(self):
     """Encoding is correctly applied even if  Specific Character Set
     is not in specific tags..."""
     ds = dcmread(jp_file, specific_tags=['PatientName'])
     ds.decode()
     self.assertEqual(1, len(ds))
     expected = ('Yamada^Tarou='
                 '\033$B;3ED\033(B^\033$BB@O:\033(B='
                 '\033$B$d$^$@\033(B^\033$B$?$m$&\033(B')
     self.assertEqual(expected, ds.PatientName)
コード例 #18
0
ファイル: test_charset.py プロジェクト: scaramallion/pydicom
    def test_inherited_character_set_in_sequence(self):
        """charset: can read and decode SQ with parent encoding............."""
        ds = dcmread(get_charset_files('chrSQEncoding1.dcm')[0])
        ds.decode()

        # These datasets inside of the SQ shall be decoded with the parent
        # dataset's encoding
        sequence = ds[0x32, 0x1064][0]
        assert ['shift_jis', 'iso2022_jp'] == sequence._character_set
        assert u'ヤマダ^タロウ=山田^太郎=やまだ^たろう' == sequence.PatientName
コード例 #19
0
ファイル: test_charset.py プロジェクト: scaramallion/pydicom
 def test_invalid_character_set(self):
     """charset: replace invalid encoding with default encoding"""
     ds = dcmread(get_testdata_files("CT_small.dcm")[0])
     ds.read_encoding = None
     ds.SpecificCharacterSet = 'Unsupported'
     with pytest.warns(UserWarning,
                       match=u"Unknown encoding 'Unsupported' "
                             u"- using default encoding instead"):
         ds.decode()
         assert u'CompressedSamples^CT1' == ds.PatientName
コード例 #20
0
ファイル: test_dataset.py プロジェクト: moloney/pydicom
    def test_set_convert_private_elem_from_raw(self):
        """Test Dataset.__setitem__ with a raw private element"""
        test_file = get_testdata_files('CT_small.dcm')[0]
        ds = dcmread(test_file, force=True)
        # 'tag VR length value value_tell is_implicit_VR is_little_endian'
        elem = RawDataElement((0x0043, 0x1029), 'OB', 2, b'\x00\x01', 0,
                              True, True)
        ds.__setitem__((0x0043, 0x1029), elem)

        assert ds[(0x0043, 0x1029)].value == b'\x00\x01'
        assert type(ds[(0x0043, 0x1029)]) == DataElement
コード例 #21
0
ファイル: parse_dicom.py プロジェクト: amirkogit/QtTestGround
    def openFileOfItem(self, row, column):
        item = self.filesTable.item(row, 0)
        path = self.directoryComboBox.currentText() + "/" + item.text()
        dataset = pydicom.dcmread(path)

        all_tags = ''
        for elem in dataset:
            print(elem)
            tag = str(elem) + '\n'
            all_tags += tag

        self.displayDicomInformation(all_tags)
コード例 #22
0
ファイル: io.py プロジェクト: jrkerns/pylinac
def retrieve_dicom_file(file: str) -> pydicom.FileDataset:
    """Read and return the DICOM dataset.

    Parameters
    ----------
    file : str
        The path to the file.
    """
    img = pydicom.dcmread(file, force=True)
    if 'TransferSyntaxUID' not in img.file_meta:
        img.file_meta.TransferSyntaxUID = pydicom.uid.ImplicitVRLittleEndian
    return img
コード例 #23
0
ファイル: test_encaps.py プロジェクト: darcymason/pydicom
    def test_encapsulate_single_fragment_per_frame_no_bot(self):
        """Test encapsulating single fragment per frame with no BOT values."""
        ds = dcmread(JP2K_10FRAME_NOBOT)
        frames = decode_data_sequence(ds.PixelData)
        assert len(frames) == 10

        data = encapsulate(frames, fragments_per_frame=1, has_bot=False)
        test_frames = decode_data_sequence(data)
        for a, b in zip(test_frames, frames):
            assert a == b

        # Original data has no BOT values
        assert data == ds.PixelData
コード例 #24
0
ファイル: test_charset.py プロジェクト: scaramallion/pydicom
    def test_nested_character_sets(self):
        """charset: can read and decode SQ with different encodings........."""
        ds = dcmread(get_charset_files("chrSQEncoding.dcm")[0])
        ds.decode()

        # These datasets inside of the SQ cannot be decoded with
        # default_encoding OR UTF-8 (the parent dataset's encoding).
        # Instead, we make sure that it is decoded using the
        # (0008,0005) tag of the dataset

        sequence = ds[0x32, 0x1064][0]
        assert ['shift_jis', 'iso2022_jp'] == sequence._character_set
        assert u'ヤマダ^タロウ=山田^太郎=やまだ^たろう' == sequence.PatientName
コード例 #25
0
ファイル: input.py プロジェクト: john-kloss/aneurysmDetection
def import_dicom(file):
    ds = pydicom.dcmread(os.getcwd() + "/data/" + file)
    file = file.replace(".dcm", "")     
    
    
    #preprocessing.augment.shear_images(dicoms[0],1)
    #ds.PixelData = dicoms[0].shears["pixel_array"][0]
    #ds.save_as(os.getcwd() + "/data/shear.dcm")
    if file in aneurysm_coordinates.keys():
        ac = aneurysm_coordinates[file]
    else:
        ac = []
    
    return Dicom(file, ac , ds.pixel_array)
コード例 #26
0
ファイル: __init__.py プロジェクト: girder/girder
def _parseFile(f):
    try:
        # download file and try to parse dicom
        with File().open(f) as fp:
            dataset = pydicom.dcmread(
                fp,
                # don't read huge fields, esp. if this isn't even really dicom
                defer_size=1024,
                # don't read image data, just metadata
                stop_before_pixels=True)
            return _coerceMetadata(dataset)
    except pydicom.errors.InvalidDicomError:
        # if this error occurs, probably not a dicom file
        return None
コード例 #27
0
ファイル: worker.py プロジェクト: data-exp-lab/girder
def _getImage(mimeType, extension, data):
    """
    Check extension of image and opens it.

    :param extension: The extension of the image that needs to be opened.
    :param data: The image file stream.
    """
    if (extension and extension[-1] == 'dcm') or mimeType == 'application/dicom':
        # Open the dicom image
        dicomData = pydicom.dcmread(six.BytesIO(data))
        return scaleDicomLevels(dicomData)
    else:
        # Open other types of images
        return Image.open(six.BytesIO(data))
コード例 #28
0
ファイル: test_charset.py プロジェクト: kayarre/pydicom
    def test_inherited_character_set_in_sequence(self):
        """charset: can read and decode SQ with parent encoding............."""
        ds = dcmread(sq_encoding1_file)
        ds.decode()

        # These datasets inside of the SQ shall be decoded with the parent
        # dataset's encoding
        expected = (u'\uff94\uff8f\uff80\uff9e^\uff80\uff9b\uff73='
                    u'\u5c71\u7530^\u592a\u90ce='
                    u'\u3084\u307e\u3060^\u305f\u308d\u3046')

        sequence = ds[0x32, 0x1064][0]
        assert sequence._character_set == [
            'shift_jis', 'iso2022_jp', 'iso2022_jp']
        assert expected == sequence.PatientName
コード例 #29
0
    def setup(self):
        """Setup the test"""
        self.ds_8_1_1 = dcmread(OB_RLE_1F)
        self.ds_8_3_1 = dcmread(SC_RLE_1F)
        self.ds_16_1_1 = dcmread(MR_RLE_1F)
        self.ds_16_3_1 = dcmread(SC_RLE_16_1F)
        self.ds_32_1_1 = dcmread(RTDOSE_RLE_1F)
        self.ds_32_3_1 = dcmread(SC_RLE_32_1F)

        self.no_runs = 100
コード例 #30
0
ファイル: codify.py プロジェクト: jrkerns/pydicom
def code_file(filename, exclude_size=None, include_private=False):
    """Write a complete source code file to recreate a DICOM file

    :arg filename: complete path and filename of a DICOM file to convert
    :arg exclude_size: if specified, values longer than this (in bytes)
                       will only have a commented string for a value,
                       causing a syntax error when the code is run,
                       and thus prompting the user to remove or fix that line.
    :arg include_private: If True, private data elements will be coded.
                          If False, private elements are skipped
    :return: a string containing code lines to recreate entire file

    """
    lines = []

    ds = pydicom.dcmread(filename, force=True)

    # Code a nice header for the python file
    lines.append("# Coded version of DICOM file '{0}'".format(filename))
    lines.append("# Produced by pydicom codify utility script")

    # Code the necessary imports
    lines.append(code_imports())
    lines.append('')

    # Code the file_meta information
    lines.append("# File meta info data elements")
    code_meta = code_dataset(ds.file_meta, "file_meta", exclude_size,
                             include_private)
    lines.append(code_meta)
    lines.append('')

    # Code the main dataset
    lines.append("# Main data elements")
    code_ds = code_dataset(
        ds, exclude_size=exclude_size, include_private=include_private)
    lines.append(code_ds)
    lines.append('')

    # Add the file meta to the dataset, and set transfer syntax
    lines.append("ds.file_meta = file_meta")
    lines.append("ds.is_implicit_VR = " + str(ds.is_implicit_VR))
    lines.append("ds.is_little_endian = " + str(ds.is_little_endian))

    # Return the complete code string
    return line_term.join(lines)
コード例 #31
0
import pydicom
import os

filePath = "C:/zhaoyl/Backup/01.DICOM/04.DICOM_Files/07.Multi-ISO-Center/2 iso plan/RP1.2.752.243.1.1.20190128151700217.4000.28177.dcm"

ds = pydicom.dcmread(filePath)

for i, beam in enumerate(ds.IonBeamSequence):
    ## Change the Machine Name
    beam.TreatmentMachineName = "GTR2-PBS"
    ## Change the Snout ID
    for j, snout in enumerate(beam.SnoutSequence):
        snout.SnoutID = "snout40"

# Get Save As File Name
folder = os.path.dirname(filePath)
fileName = os.path.basename(filePath)
fileName, ext = os.path.splitext(fileName)
fileName = fileName + "_modified" + ext
saveFileName = os.path.join(folder, fileName)

## Save to DICOM file
ds.save_as(saveFileName)
コード例 #32
0
ファイル: Instance.py プロジェクト: wendyrvllr/Dicom-To-CNN
 def __load_full_instance(self):
     """load metadata and image 
     """
     self.dicomData = pydicom.dcmread(self.path, force=True)
コード例 #33
0
data_dir = '/projectnb/ece601/kaggle-pulmonary-embolism/rsna-str-pulmonary-embolism-detection/'
train_csv = data_dir + 'train.csv'
train_dir = data_dir + 'train/'

pedataframe = pd.read_csv(train_csv)

print(len(pedataframe))

for file_num in range(18):
    h5py_file = h5py.File('/scratch/npy-' + str(file_num + 1) + '.hdf5', "w")
    for idx in range(file_num * 100000,
                     min((file_num + 1) * 100000, len(pedataframe))):
        img_name = os.path.join(train_dir, pedataframe.StudyInstanceUID[idx],
                                pedataframe.SeriesInstanceUID[idx],
                                pedataframe.SOPInstanceUID[idx] + '.dcm')
        dicom_image = pydicom.dcmread(img_name)

        if idx % 10000 == 0:
            print(idx)

        try:
            # RuntimeError: The following handlers are available to decode the pixel ...
            # data however they are missing required dependencies: GDCM (req. GDCM)
            image = dicom_image.pixel_array
        except:
            print('Error parsing ', img_name)
            continue

        # in OSIC we find outside-scanner-regions with raw-values of -2000.
        # Let's threshold between air (0) and this default (-2000) using -1000
        image[image <= -1000] = 0
コード例 #34
0
from pydicom import dcmread
from pydicom.data import get_testdata_files

from pynetdicom import AE
from pynetdicom.sop_class import CTImageStorage

ae = AE()

ae.add_requested_context(CTImageStorage)

file_name = get_testdata_files('CT_small')[0]

ds = dcmread(file_name)

assoc = ae.associate('127.0.0.1', 11112)

if assoc.is_established:
    status = assoc.send_c_store(ds)

    if status:
        print('C-STORE request status: 0x{0:04x}'.format(status.Status))
    else:
        print('Connection timed out, was aborted or received invalid response')

    assoc.release()
else:
    print('Association rejected, aborted or never connected')
コード例 #35
0
#coding: utf8

import pydicom

filepath = "./dataone/1/I9500000"
filepath = "/tmp/I9500000"
ds = pydicom.dcmread(filepath)

metas = [
    "PatientID",
    "PatientName",
    "PatientBirthDate",
    "PatientSex",
    "InstitutionName",
]

for meta in metas:
    print(ds.data_element(meta))
コード例 #36
0
# Bonus points:
# 1) What is the modality that you are dealing with here?
# 2) Try to figure out which axis corresponds to what plane is which by searching online.
# You should have a good guess of what anatomy you are looking at if you visualize a middle slice
# 3) Try plotting the slices in non-primary planes with proper aspect ratio
#
# Hints:
# - You may want to rescale the output because your voxels are non-square.
# - Don't forget that you need to order your slices properly. Filename
# may not be the best indicator of the slice order.
# If you're confused, try looking up the first value of ImagePositionPatient

# %%
# Load the volume into array of slices
path = f"volume"
slices = [pydicom.dcmread(os.path.join(path, f)) for f in os.listdir(path)]
slices = sorted(slices, key=lambda dcm: dcm.ImagePositionPatient[0])

# What are the dimensions?
print(f"{len(slices)} of size {slices[0].Rows}x{slices[0].Columns}")

# What is the modality?
print(f"Modality: {slices[0].Modality}")

# %%
# What is the slice spacing?
print(
    f"Pixel Spacing: {slices[0].PixelSpacing}, slice thickness: {slices[0].SliceThickness}"
)

# Load into numpy array
コード例 #37
0
ファイル: main.py プロジェクト: ACRCode/AILAB_documentation
    logging.error(e, exc_info=True)

predictions_list = []  # empty list of predictions
i = 0  # number of successfully processed studies

### Generate predictions ###
for study in studies:
    # load study and create prediction
    try:
        # predictions are at a study level for this particular data element
        # for each studyinstanceUID, find all filepaths
        filesByStudy = data.loc[data['studyInstanceUID'] ==
                                study]['filepath'].to_numpy()
        prediction = []
        for file in filesByStudy:
            dcm = pydicom.dcmread('/input/' + file)  # load each image file

            image = (255 * (dcm.pixel_array.astype(float) /
                            ((2**dcm.BitsStored) - 1))).astype(np.uint8)

            # move image data to device
            image = torch.from_numpy(image).float().to(device)
            # generate a new prediction for each image
            prediction.append(torch.rand(num_classes))

        # normalize prediction
        prediction = sum(prediction) / (sum(sum(prediction)))
        prediction = prediction.numpy().astype(np.float64)

        # save study prediction in JSON format
        predictions_list.append({
コード例 #38
0
def main(args):
    train = []
    test = []
    test_count = {'normal': 0, 'pneumonia': 0, 'COVID-19': 0}
    train_count = {'normal': 0, 'pneumonia': 0, 'COVID-19': 0}

    # Create export test and train dirs
    TEST_EXPORT = os.path.join(args.save_path, 'test')
    os.makedirs(TEST_EXPORT, exist_ok=True)
    TRAIN_EXPORT = os.path.join(args.save_path, 'train')
    os.makedirs(TRAIN_EXPORT, exist_ok=True)

    mapping = dict()
    mapping['COVID-19'] = 'COVID-19'
    mapping['SARS'] = 'pneumonia'
    mapping['MERS'] = 'pneumonia'
    mapping['Streptococcus'] = 'pneumonia'
    mapping['No Finding'] = 'normal'
    mapping['Lung Opacity'] = 'pneumonia'
    mapping['1'] = 'pneumonia'

    covid_imgs = os.path.join(args.covid_dir, "images")
    covid_csv = os.path.join(args.covid_dir, "metadata.csv")

    csv = pd.read_csv(covid_csv, nrows=None)
    csv = csv[csv["view"] == "PA"]
    log.info("Metadata contains {} items with PA".format(len(csv)))

    pneumonias = ["COVID-19", "SARS", "MERS", "ARDS", "Streptococcus"]
    pathologies = [
        "Pneumonia", "Viral Pneumonia", "Bacterial Pneumonia", "No Finding"
    ] + pneumonias
    pathologies = sorted(pathologies)

    filename_label = {'normal': [], 'pneumonia': [], 'COVID-19': []}
    count = {'normal': 0, 'pneumonia': 0, 'COVID-19': 0}
    for row in csv.itertuples():
        f = row.finding.split('/')[-1]
        if f in mapping:
            count[mapping[f]] += 1
            entry = [row.patientid, row.filename, mapping[f]]
            filename_label[mapping[f]].append(entry)

    log.info('Data distribution from covid-chestxray-dataset:')
    log.info(count)

    # add covid-chestxray-dataset into COVIDx dataset
    for key in filename_label.keys():
        arr = np.array(filename_label[key])
        if arr.size == 0:
            continue

        # Randomly sample test set patients
        patient_ids = np.unique(arr[:, 0])
        test_size = int(len(patient_ids) * args.test_size)
        test_patients = np.random.choice(patient_ids, test_size, replace=False)
        log.info('Category: {}, N test patients {}'.format(key, test_size))

        # go through all the patients
        for patient in arr:
            src_img_pth = os.path.join(covid_imgs, patient[1])
            if patient[0] in test_patients:
                dst_img_pth = os.path.join(TEST_EXPORT, patient[1])
                copyfile(src_img_pth, dst_img_pth)
                test.append(patient)
                test_count[patient[2]] += 1
            else:
                dst_img_pth = os.path.join(TRAIN_EXPORT, patient[1])
                copyfile(src_img_pth, dst_img_pth)
                train.append(patient)
                train_count[patient[2]] += 1

    log.info('test count: {}'.format(test_count))
    log.info('train count: {}'.format(train_count))

    # add normal and rest of pneumonia cases from
    # https://www.kaggle.com/c/rsna-pneumonia-detection-challenge
    kaggle_csv_normal = os.path.join(args.kaggle_data,
                                     "stage_2_detailed_class_info.csv")
    kaggle_csv_pneu = os.path.join(args.kaggle_data,
                                   "stage_2_train_labels.csv")
    csv_normal = pd.read_csv(kaggle_csv_normal, nrows=None)
    csv_pneu = pd.read_csv(kaggle_csv_pneu, nrows=None)
    patients = {'normal': [], 'pneumonia': []}

    for row in csv_normal.itertuples():
        if row[2] == 'Normal':
            patients['normal'].append(row.patientId)

    for row in csv_pneu.itertuples():
        if row.Target == 1:
            patients['pneumonia'].append(row.patientId)

    log.info("Preparing Kaggle dataset...")
    counter = 0
    for key in patients.keys():
        arr = np.array(patients[key])
        if arr.size == 0:
            continue

        # Choose random test patients
        patient_ids = np.unique(arr)
        test_size = int(len(patient_ids) * args.test_size)
        test_patients = np.random.choice(patient_ids, test_size, replace=False)
        log.info('Category: {}, N Test examples: {}'.format(key, test_size))

        for patient in arr:
            ds = dicom.dcmread(
                os.path.join(args.kaggle_data, "stage_2_train_images",
                             patient + '.dcm'))
            pixel_array_numpy = ds.pixel_array
            imgname = patient + '.png'
            pil_img = Image.fromarray(pixel_array_numpy)

            if patient in test_patients:
                pil_img.save(os.path.join(TEST_EXPORT, imgname))
                test.append([patient, imgname, key])
                test_count[key] += 1
            else:
                pil_img.save(os.path.join(TRAIN_EXPORT, imgname))
                train.append([patient, imgname, key])
                train_count[key] += 1
            counter += 1

            if counter % 500 == 0 and counter > 0:
                log.info("Converted {} Kaggle dataset images".format(counter))

    log.info('test count: {}'.format(test_count))
    log.info('train count: {}'.format(train_count))

    write_metadata(os.path.join(args.save_path, 'train_metadata.txt'), train)
    write_metadata(os.path.join(args.save_path, 'test_metadata.txt'), test)
コード例 #39
0
def dicomloaddir(files, filenamepattern='*.dcm', maxtoread=None, phasemode=None,\
    desiredinplansize=None, dformat='float'):
    '''
    dicomloaddir(files, filenamepattern='*.dcm', maxtoread=None, phasemode=None,\
        desiredinplansize=None, dformat='float'):

    load multiple dicom files in one or multi directories

    Input:
        <files>: can be:
            (1) a string of a directory
            (2) a list of (2)
            (3) a rzpath object
            (4) a list of (3) object
        <filenamepattern>: str, the wildcard for the dicom file in a directory
        <maxtoread>: int, maximum number of dicom files to read
        <phasemodel>: ...implement later, ignore for now...
        <desiredinplansize>: ...implement later, ignore for now...,
            a 1x2 array, desired inplace size, if dicom files
            do not follow this size, we resize it.
        <dformat>: ...implement later, ignore for now
            read in data format
    Output:
        <vollist>: a list of volume arrays for multiple runs, if just one run,
            we return the array
        <dicominfolist>: a list of dicom info dict for multiple runs, if just one run,
            we return the dicom info dict

    Note:
        1. This function currently works with Siemens Prisma 3T and Magnetom 7T, not sure
        other scanners like GE. For Siemens, we focus on these attributes (can update this):
            (0018, 0050) Slice Thickness
            (0028, 0030) Pixel Spacing
            (0051, 100b) AcquisitionMatrixText
            (0019, 100a) NumberOfImagesInMosaic, 1 if anatomical data
            (0018, 0080) Time (TR)
            (0018, 0081) Echo Time (TE)
            (0018, 1312) Inplane Phase Encoding Direction
            (0019, 1029) MosaicRefAcqTimes (slicetimeorder), None if anatomical data
            (0051, 1016) a str, check mosaic, read from the dicom file
            (0051, 100c) FOV
            We also add keys:
            'ismosaic': boolean, whether this is a mosaic image
            'voxelsize': 1x3 list, based on Slice Thickness and Pixel Spacing
            'AcquisitionMatrix': [phase, frequency] matrix, derived from AcquisitionMatrixText. Phase step
                has no meaning if data is structure?

            'FovSize':[phase_len, frequency_len] mm, derived from FOV
            'epireadouttime': calculated from rz.mri.dicom_readout_msec, only valid for epi, None if other files

        2. Note that all these keys are scanner specific. Most of these should work for
            Siemens scanner but might not work for GE or Phillipe scanner.


    Example:


    Todo:
        1. figure out how to add read phase data
        2. check if some of the fields do no exist
        3. resize image to accommodate desired inplane size
        4. save all metafile using pickel

    History:
        20180720 <files> now can accept path-like objects
        20180626 RZ fixed the bug for reading the anatomical files
        20180605 RZ use nibabel.nicom.csareader.get_csa_header() function to read
            csa file and get the [BandWidthPerPixelPhaseEncode]
        20180422 RZ change the stack images in the last step so user can see
            report while waiting for image stack
        20180420 RZ created this function

    '''

    from pydicom import dcmread
    from RZutilpy.rzio import matchfiles
    from RZutilpy.array import split2d
    from RZutilpy.mri import dicom_readout_msec
    from RZutilpy.system import rzpath
    from numpy import stack
    from progressbar import progressbar as pbar
    import re
    import time

    # deal with input
    files = [files] if not isinstance(files, list) else files
    # convert it to path-like object
    files = [rzpath(p) if not isinstance(p, rzpath) else p for p in files]

    # start to load
    dicominfolist = []
    vollist = []
    for iDir, filedir in enumerate(files):  # loop directory
        filepattern = filedir / filenamepattern
        dcmnames = matchfiles(filepattern.str)
        if len(dcmnames) == 0:
            print(
                'This {} does not appear to be a directory containing {} files, so skipping.\n'
                .format(filedir, filenamepattern))
            break
        else:
            print(
                'This {} appear to be a directory containing {} files, so loading.\n'
                .format(filedir, filenamepattern))

        dcmnames = dcmnames[:maxtoread]  # remove last couple of dcm files

        # ====== deal with dicom info, save a customized dicominfo dict =======
        ds = dcmread(dcmnames[0].str)  # read 1st vol for info purpose
        # note current we assume this dicom have all fields below!! And we save
        # the very raw dicom info here
        dcminfothisrun = dict()
        dcminfothisrun['SliceThickness'] = ds.SliceThickness
        dcminfothisrun['PixelSpacing'] = ds.PixelSpacing
        dcminfothisrun['AcquisitionMatrixText'] = ds.AcquisitionMatrixText
        dcminfothisrun['RepetitionTime'] = ds.RepetitionTime
        dcminfothisrun['EchoTime'] = ds.EchoTime
        dcminfothisrun[
            'InPlanePhaseEncodingDirection'] = ds.InPlanePhaseEncodingDirection
        dcminfothisrun['FOV'] = ds[int('0051', 16), int('100c', 16)].value
        dcminfothisrun['checkmosaic'] = ds[int('0051', 16),
                                           int('1016', 16)].value

        # figure out whether it is mosaic image
        if dcminfothisrun['checkmosaic'].find('MOSAIC') >= 0:
            dcminfothisrun['ismosaic'] = True  # indicate this is a epi file
            print(
                'We are loading some mosaic images, need to convert a mosaic image to 3d,\
                this directory might contain epi data ...\n')
        else:
            dcminfothisrun[
                'ismosaic'] = False  # indicate this is not a epi file
        if [int('0019', 16), int('100a', 16)] in ds:  # simense
            dcminfothisrun['NumberOfImagesInMosaic'] = ds[
                int('0019', 16),
                int('100a', 16)].value if dcminfothisrun['ismosaic'] else 1
        elif [int('0021', 16), int('104f', 16)] in ds:  # GE
            dcminfothisrun['NumberOfImagesInMosaic'] = ds[
                int('0021', 16),
                int('104f', 16)].value if dcminfothisrun['ismosaic'] else 1

        dcminfothisrun['MosaicRefAcqTimes'] = ds[
            int('0019', 16),
            int('1029', 16)].value if dcminfothisrun['ismosaic'] else None
        dcminfothisrun['epireadouttime'] = dicom_readout_msec(
            ds)[0] if dcminfothisrun['ismosaic'] else None

        # save voxel size
        dcminfothisrun['voxelsize'] = list(dcminfothisrun['PixelSpacing']) + [
            dcminfothisrun['SliceThickness']
        ]

        # figure out inplane matrix, not that we assume
        # note this regular expression might fail in normal resolution imaging

        p = re.compile(r'^(\d{1,4}).?\*(\d{1,4}).?$')
        matchgroup = p.match(dcminfothisrun['AcquisitionMatrixText'])
        if matchgroup:
            plines = int(
                matchgroup.group(1))  # step in phase encoding direction
            flines = int(
                matchgroup.group(2))  # step in frequency encoding direction
            dcminfothisrun['AcquisitionMatrix'] = [plines, flines]
        else:
            ValueError('can not find the phase encoding direction!')

        # figure out inplane matrix, not that we assume
        p = re.compile(r'^FoV (\d{1,6})\*(\d{1,6})$')
        matchgroup = p.match(dcminfothisrun['FOV'])
        p_len = int(matchgroup.group(1))  # step in phase encoding direction
        f_len = int(
            matchgroup.group(2))  # step in frequency encoding direction
        dcminfothisrun['FovSize'] = [
            p_len / 10, f_len / 10
        ] if dcminfothisrun['ismosaic'] else [p_len, f_len]
        # have to divide this number by 10 for epidata, not sure why....

        # save dicom info in this run
        dicominfolist.append(dcminfothisrun)
        # show some information
        print(dcminfothisrun)

        # ================  deal with the volumes ====================
        print('\nReading in dicoms ......')
        vol = [dcmread(i.str).pixel_array
               for i in pbar(dcmnames)]  # read pixel data
        # split mosaic images
        if dcminfothisrun['ismosaic']:
            # Note that we assume plines and flines will be exact divided by the image
            # this is typically true
            vol = [split2d(i, plines, flines)
                   for i in vol]  # split each 2d mosaic image to 3d image
            # only keep acquired slices, the last several images are sometimes black
            vol = [
                i[:, :, :dcminfothisrun['NumberOfImagesInMosaic']] for i in vol
            ]

        # stack images, take a while
        print('\n\nStack images ......\n')
        vol = stack(vol, axis=-1)  # stack to a 3d/4d file
        if vol.ndim == 3:  # expand to 4d if only 3d
            vol = vol[..., None]
        vollist.append(vol)

        # report info
        print(
            'The 3D dimensions of the final returned volume are {}.\n'.format(
                vol.shape[:3]))
        print('There are {} volumes in the fourth dimension.\n'.format(
            vol.shape[-1]))
        if dcminfothisrun['ismosaic']:
            print('These are mosaic images, might be epi data.\n')
        else:
            print('These are not mosaic images, might not be epi data.\n')
        print('The voxel size (mm) of the final returned volume is {}.\n'.format\
            (dcminfothisrun['voxelsize']))
        print('The in-plane matrix size (PE x FE) appears to be {}.\n'.format\
            (dcminfothisrun['AcquisitionMatrix']))
        print('The field-of-view (mm) of the final returned volume is {}.\n'.format\
            (dcminfothisrun['FovSize']))
        print('The TR is {} ms.\n\n\n\n'.format(
            dcminfothisrun['RepetitionTime']))

    if len(vollist) == 1:
        vollist = vollist[0]
    if len(dicominfolist) == 1:
        dicominfolist = dicominfolist[0]
    return vollist, dicominfolist
コード例 #40
0
if test == "Cart":
    col1, col2 = st.beta_columns(2)
    default_displacement_forward = col1.text_input(
        "Selected displacement forward", "", key="1")
    measured_displacement_forward = col2.text_input(
        "Measured displacement forward", "", key="2")

    default_displacement_backward = col1.text_input(
        "Selected displacement backward", "", key="3")
    measured_displacement_backward = col2.text_input(
        "Measured displacement backward", "", key="4")

if file is not None:

    if len(file) > 0:
        im_list = [pydicom.dcmread(i) for i in file]
        if len(im_list) > 1:
            slider = st.slider("Image:", min_value=1, max_value=len(im_list))

            # test_img=pydicom.dcmread(file[0])
            images = [get_data(x)[0] for x in im_list]

            fig, ax = plt.subplots()
            ax.imshow(images[slider - 1], cmap="gray")
            st.write(fig)

        else:
            #test_img=pydicom.dcmread(file[0])
            img, px_dim = get_data(im_list[0])
            # dst=np.zeros(img.shape)
            # cv_img=cv2.normalize(img,dst,0,255,cv2.NORM_MINMAX)
コード例 #41
0
with xnatutils.connect() as xlogin:

    xsession = xlogin.experiments[args.xnat_id]  # noqa pylint:disable=no-member

    for scan_id in args.scan_ids:

        xscan = xsession.scans[scan_id]

        xscan.download_dir(args.download_dir)
        files_path = op.join(
            args.download_dir, args.xnat_id,
            'scans', '{}-{}'.format(xscan.id,
                                    re.sub(r'[^a-zA-Z_0-9]', '_', xscan.type)),
            'resources', 'DICOM', 'files')

        for fname, xfile in sorted(xscan.files.items(), key=itemgetter(0)):
            fpath = op.join(files_path, fname)
            with open(fpath, 'rb') as f:
                dcm = pydicom.dcmread(fpath)
            if dcm.file_meta.MediaStorageSOPClassUID == ENHANCED_MR_STORAGE:
                print("Deleting '{}".format(fname))
                if not args.dry_run:
                    xfile.delete()

if args.dry_run:
    print('Would delete proceeding "Enhanced MR Image Storage" from {}:[{}]'.
          format(args.xnat_id, ', '.join(args.scan_ids)))
else:
    print('Deleted all "Enhanced MR Image Storage" from {}:[{}]'.format(
        args.xnat_id, ', '.join(args.scan_ids)))
def load_image(full_path):
    f = pydicom.dcmread(full_path)
    return f.pixel_array.astype(np.int)
コード例 #43
0
         os.makedirs(new_dir_CT)
      
      #Change directory to the original CT data
      os.chdir(original_dir_CT)

      #Get the list of file names in the original data directory.
      orig_filelist = os.listdir(original_dir_CT)

      #Start iterating through the file list in the original DICOM dataset.
      for num, file in enumerate(orig_filelist):

         #Check to make sure that the file being interrogated is an actual DICOM file in the folder.
         if file.endswith('.dcm'):
            
            #Read the first DICOM file in the original data directory.
            ds = pydicom.dcmread(file)

            #Check if the SOPClassUID tag says that the images are
            #from a CT dataset.  Since we are only looking for CT images
            #with this command, it will also ignore all secondary captures
            #and snapshots, so that these won't be part of the de-identified dataset.
            #These secondary captures can also contain sensitive, identifying patient
            #information.
            if ds[0x0008,0x0016].repval == 'CT Image Storage':
                
                #Begin the deidentification of all relevant tags.
                ds.PatientName = 'CT_Patient'
                ds.AccessionNumber = ' '
                ds.Manufacturer = ' '
                ds.InstitutionName = ' '
                ds.InstitutionAddress = ' '
コード例 #44
0
    def slotAdd3(self, a, b):
     


        filenames = os.listdir(path)

        self.slices_liver = []
        self.slices_tumor = []

        idx = []
        for i, name in enumerate(filenames):
            name = os.path.join(path, name)
            slice = pydicom.dcmread(name)
            idx.append(slice.InstanceNumber)
            self.slices_liver.append(WL(slice, 0, 2048))
            self.slices_tumor.append(WL(slice, 100, 150))

        idx_new = np.argsort(idx)
        
        self.slices_liver = np.stack(self.slices_liver)[idx_new]
        self.slices_tumor = np.stack(self.slices_tumor)[idx_new]



        self.liver = np.fromstring(a)
        self.liver = np.reshape(self.liver, (len(filenames), 512, 512))
        self.liver_backup = self.liver
        self.liver *= 255.

        self.tumor = np.fromstring(b)
        self.tumor = np.reshape(self.tumor, (len(filenames), 512, 512))
        self.tumor_backup = self.tumor
        self.tumor *= 255.


        global sum_number
        sum_number = filenames
        half_sample_num = sample_num/2
        self.num.setText(str(int(half_sample_num)))

        num_pic = self.num.text()
        num2 = int(num_pic)
        
        self.show_img(self.tumor[num2])
        
        a = self.liver[num2]#liver
        b = self.slices_tumor[num2]
        c = self.tumor[num2]
        overlay = b
      
        overlay = np.uint8(overlay)
        overlay = cv.cvtColor(overlay, cv.COLOR_GRAY2RGB)
        mask = np.uint8(c)
       
        _, binary_pred = cv.threshold(mask, 127, 255, cv.THRESH_BINARY)
        contours_pred, _ = cv.findContours(binary_pred, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
        cv.drawContours(overlay, contours_pred, -1, (255, 20, 147), 2) # pink contours stand for prediction
  
        mask_2 = np.uint8(a)
    
        _, binary_pred = cv.threshold(mask_2, 127, 255, cv.THRESH_BINARY)
        contours_pred, _ = cv.findContours(binary_pred, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
        cv.drawContours(overlay, contours_pred, -1, (0, 255, 0), 2) # pink contours stand for prediction  
        self.show_img(overlay)
        
        if flag == 1:
            self.liver_layer.setEnabled(True)
            self.liver_layer.setChecked(True)
       
        elif flag == 3 or flag == 2:
            self.liver_layer.setEnabled(True)
            self.tumor_layer.setEnabled(True)
            
            self.liver_layer.setChecked(True)
            self.tumor_layer.setChecked(True)
            
        
        self.tri_d_run.setEnabled(True)
        self.left.setEnabled(True)  
        self.right_R.setEnabled(True)
        self.show_1.setEnabled(True)  
コード例 #45
0
def read_dicom(folder):
    """read_dicom reads MRI dicom files from Siemens, GE, or Philips scanners.
    
    Return values:
        raw_data: complex-valued, H x W x D x E
        voxel_size: resolution of scan in mm/voxel (ex: .4688 .4688 2.0000)
        CF: center frequency in Hz (ex: 127782747)
        delta_TE: difference between echoes in ms (ex: 3.6)
        TE: times in ms of echoes (ex: 4.5 8.1 ...)
        B0_dir: direction of B0 (ex: 0 0 1)
        B0: strength of B0 in Tesla (ex: 3)
    
    Sample usage:
    >>> file_path = '.../test_data_GE'
    >>> raw_data, params = read_DICOM_HW(file_path)
    
    Steven Cao, Hongjiang Wei, Chunlei Liu
    University of California, Berkeley
    """
    files = [join(folder, f) for f in listdir(folder) if is_dcm(f)]
    assert len(files) > 0, 'No dicom files in {0}'.format(folder)
    info = dcmread(files[0])
    maker = info.Manufacturer
    print('Reading', len(files), maker, 'dicom files...')

    # Find min and max slice location, number of echoes
    min_slice = np.float(info.SliceLocation)
    max_slice = np.float(info.SliceLocation)
    min_pos = np.array(info.ImagePositionPatient)
    max_pos = np.array(info.ImagePositionPatient)
    max_echo = int(info.EchoNumbers)
    for f in files[1:]:
        file = dcmread(f)
        slice_loc = np.float(file.SliceLocation)
        echo = int(file.EchoNumbers)
        if slice_loc < min_slice:
            min_slice = slice_loc
            min_pos = np.array(file.ImagePositionPatient)
        if slice_loc > max_slice:
            max_slice = slice_loc
            max_pos = np.array(file.ImagePositionPatient)
        if echo > max_echo:
            max_echo = echo

    voxel_size = np.array(
        [info.PixelSpacing[0], info.PixelSpacing[1], info.SliceThickness])
    slices = np.round(norm(max_pos - min_pos) / voxel_size[2]) + 1

    # Fill mag, phase, and TE arrays
    shape = (int(info.Rows), int(info.Columns), int(slices), int(max_echo))
    mag = np.zeros(shape)
    phase = np.zeros(shape)
    TE = np.zeros(max_echo)
    for f in files:
        file = dcmread(f)
        slice_num = int(
            np.round((norm(np.array(file.ImagePositionPatient) - min_pos) /
                      voxel_size[2])))
        echo = int(file.EchoNumbers) - 1
        TE[echo] = float(file.EchoTime)
        if maker.startswith('GE'):
            if int(file.InstanceNumber) % 2 == 1:
                mag[:, :, slice_num, echo] = file.pixel_array
            else:
                phase[:, :, slice_num, echo] = file.pixel_array
        elif maker.startswith('Ph'):
            if 'm' in file.ImageType or 'M' in file.ImageType:
                mag[:, :, slice_num, echo] = file.pixel_array
            elif 'p' in file.ImageType or 'P' in file.ImageType:
                phase[:, :, slice_num, echo] = file.pixel_array
        elif maker.startswith('SIE'):  # does not work with multiple coils
            if 'm' in file.ImageType or 'M' in file.ImageType:
                mag[:, :, slice_num, echo] = file.pixel_array
            elif 'p' in file.ImageType or 'P' in file.ImageType:
                phase[:, :, slice_num, echo] = (
                    (file.pixel_array * np.float(file.RescaleSlope) +
                     np.float(file.RescaleIntercept)) /
                    (np.float(file.LargestImagePixelValue) * np.pi))
    if maker.startswith('GE') or maker.startswith('Ph'):
        phase = 2 * np.pi * phase / (np.max(phase) - np.min(phase))
        if maker.startswith('GE'):
            phase[:, :, ::2, :] = phase[:, :, ::2, :] + np.pi
    data = mag * np.exp(-1j * phase)

    # Acq params
    CF = info.ImagingFrequency * 1e6
    if len(TE) == 1:
        delta_TE = TE
    else:
        delta_TE = TE[1] - TE[0]
    affine_2d = np.array(info.ImageOrientationPatient).reshape(3, 2)
    z = (max_pos - min_pos) / ((slices - 1) * voxel_size[2] - 1)
    z = np.array([z]).T
    affine_3d = np.concatenate((affine_2d, z), axis=1)
    B0_dir = np.linalg.lstsq(affine_3d, [0, 0, 1])[0]
    B0 = int(info.MagneticFieldStrength)
    params = {
        'voxel_size': voxel_size,
        'CF': CF,
        'delta_TE': delta_TE,
        'TE': TE,
        'B0_dir': B0_dir,
        'B0': B0
    }
    return data, params
コード例 #46
0
def addMetadata(frame, filepath):
    dicom_file = dicom.dcmread(filepath)
    frame['filepath'] = filepath
    metadata_attributes_names = ['PatientID', 'StudyInstanceUID', 'SeriesInstanceUID', 'ImagePositionPatient', 'ImageOrientationPatient']
    for name in metadata_attributes_names:
        frame[name] = dicom_file[name].value
コード例 #47
0
     ]
     list_series = sorted(list_series, key=int)
     list_series = ['{}/{}'.format(folder, i) for i in list_series]
 except:
     call(['rmdir', os.path.join(patient_folder, 'Neeb')])
     log.info("8. Non-integer named series... Deleting Neeb folder.")
     sys.exit(1)
 if (list_series):
     for series in list_series:
         #print(series)
         list_dicoms = sorted(glob.glob(series + '/*[dD][cC][mM]*'))
         #print(os.listdir(os.path.join(folder, series)))
         if "gz" in list_dicoms[0]:
             call(['gunzip', list_dicoms[0]])
             list_dicoms[0] = list_dicoms[0][:-3]
         ds = pydicom.dcmread(list_dicoms[0])
         if ("epi_bh90" in ds.SeriesDescription):
             log.info("9. " + str(ds.SeriesDescription))
             # call(['cp', '-r', series, os.path.join(patient_folder,'Neeb/epi_highflip')])
             #pdb.set_trace()
             copytree(series,
                      os.path.join(patient_folder, 'Neeb/epi_highflip'))
         if ("epi_bh30" in ds.SeriesDescription):
             log.info("10. " + str(ds.SeriesDescription))
             call([
                 'cp', '-r', series,
                 os.path.join(patient_folder, 'Neeb/epi_lowflip')
             ])
         if ("epi_bb90" in ds.SeriesDescription):
             log.info("11. " + str(ds.SeriesDescription))
             call([
コード例 #48
0
import pydicom
from itertools import cycle
from pyTG43 import *
from bokeh.plotting import figure, show, output_file
from bokeh.palettes import Category20
from bokeh.models import CrosshairTool, HoverTool, Legend

rp = pydicom.dcmread('examples/PDR/RP.PDR.dcm')
rs = pydicom.dcmread('examples/PDR/RS.PDR.dcm')
rd = pydicom.dcmread('examples/PDR/RD.PDR.dcm')

source = Source(rp, 'examples/PDR/')
plan = Plan(source, rp, rs, rd)

for roi in plan.ROIs:
    if roi.name in ['ctv','bladder','rectum']:
        roi.get_DVH_pts()
        roi.get_TPS_DVH(rp,rs,rd)

calcDVHs(source,plan,plan.rx*10,['ctv','bladder','rectum'])

cmap = Category20[20]
itr = cycle(cmap)

output_file("dvh.html")

p = figure(plot_width=900, plot_height=560, x_range=(0,plan.rx*10), y_range=(0,101), active_scroll='wheel_zoom', toolbar_location='above')
p.xaxis.axis_label = 'Dose (Gy)'
p.yaxis.axis_label = 'Relative Volume (%)'

items = []
コード例 #49
0
})

slice_size = {}
# COVID-19
all_paths = sorted(
    [x for x in os.listdir(dataset_path_covid) if not x.startswith('.')])

for patient_number, path in enumerate(all_paths):
    subpath = sorted([
        x for x in os.listdir(dataset_path_covid + path)
        if not x.startswith('.')
    ])

    slice_numbers = len(subpath)  #number of slices
    dataset = pydicom.dcmread(
        os.path.join(dataset_path_covid + path,
                     subpath[0]))  #read a sample image

    #information
    patient_csv_data = {}
    patient_csv_data['Diagnosis'] = 'COVID-19'
    patient_csv_data['Patient Sex'] = [dataset.PatientSex]
    patient_csv_data['Patient Age'] = [dataset.PatientAge]
    patient_csv_data['Slice Thickness'] = [dataset.SliceThickness]
    patient_csv_data['Slices'] = [slice_numbers]
    patient_csv_data['Study Date'] = [dataset.StudyDate]
    patient_csv_data['XRayTubeCurrent'] = [dataset.XRayTubeCurrent]
    patient_csv_data['KVP'] = [dataset.KVP]
    patient_csv_data['Exposure Time'] = [dataset.ExposureTime]
    patient_csv_data['Study Date'] = [dataset.StudyDate]
    patient_csv_data['Date of Last Calibration'] = [
コード例 #50
0
ファイル: main.py プロジェクト: ibrandiay/imagerie-medical-
import matplotlib.pyplot as plt
import pydicom
from pydicom.data import get_testdata_files

# Il faut changer le chemin de l'image selon le path ou elle se trouve

base = "/home/zoheir/Documents/Imageire médicale/Projet/AAA/DICOM/S00001/SER00001/"
pass_dicom = "I00002"
filename = pydicom.data.data_manager.get_files(base, pass_dicom)[0]

dataset = pydicom.dcmread(filename)

# Mode normal:
print()
print("Nom de Fichier.........:", filename)
print("Type de sauvegarde.....:", dataset.SOPClassUID)
print()

pat_name = dataset.PatientName
display_name = pat_name.family_name + ", " + pat_name.given_name
print("Nom du Patient...:", display_name)
print("Id du Patient.......:", dataset.PatientID)
print("Modalité.........:", dataset.Modality)
print("Date de passage.......:", dataset.StudyDate)

if 'PixelData' in dataset:
    rows = int(dataset.Rows)
    cols = int(dataset.Columns)
    print("Image size.......: {rows:d} x {cols:d}, {size:d} bytes".format(
        rows=rows, cols=cols, size=len(dataset.PixelData)))
    if 'PixelSpacing' in dataset:
コード例 #51
0
ファイル: window_v2.py プロジェクト: kartik-nighania/rsna
        dicom = pydicom.dcmread(DicomBytesIO(data))
        image = dicom.pixel_array
        rescale_slope, rescale_intercept = int(dicom.RescaleSlope), int(dicom.RescaleIntercept)
        image = rescale_image(image, rescale_slope, rescale_intercept)
        image = apply_window_policy(image)
        image -= image.min((0,1))
        image = (255*image).astype(np.uint8)
        Image.fromarray(image)
        cv2.imwrite(os.path.join(path_proc, imgnm)+'.jpg', image)
    except:
        print(name)

    
path_img = '/Users/dhanley2/Documents/Personal/rsna/data/CQ500'
zipms = glob.glob(path_img+'/*zip')
namesls = []
import zipfile
from pydicom.filebase import DicomBytesIO
for zipf in zipms:
    with zipfile.ZipFile(zipf,  "r") as f:
        for t, name in enumerate(tqdm(f.namelist())):
            namesls.append(name)
            data = f.read(name)
            dicom = pydicom.dcmread(DicomBytesIO(data))
            break
            
            
            imgnm = (name.split('/')[-1]).replace('.dcm', '')
            dicom = pydicom.dcmread(DicomBytesIO(data))
        
        convert_dicom_to_jpg(name)
コード例 #52
0
ファイル: run.py プロジェクト: PBibiloni/11763_E2
def load_dcm(filename):
    return pydicom.dcmread(f'data/{filename}')
コード例 #53
0
   This example requires the Numpy library to manipulate the pixel data.

"""

# authors : Guillaume Lemaitre <*****@*****.**>
# license : MIT

import pydicom
from pydicom.data import get_testdata_files

print(__doc__)

# FIXME: add a full-sized MR image in the testing data
filename = get_testdata_files('MR_small.dcm')[0]
ds = pydicom.dcmread(filename)

# get the pixel information into a numpy array
data = ds.pixel_array
print('The image has {} x {} voxels'.format(data.shape[0], data.shape[1]))
data_downsampling = data[::8, ::8]
print('The downsampled image has {} x {} voxels'.format(
    data_downsampling.shape[0], data_downsampling.shape[1]))

# copy the data back to the original data set
ds.PixelData = data_downsampling.tobytes()
# update the information regarding the shape of the data array
ds.Rows, ds.Columns = data_downsampling.shape

# print the image information given in the dataset
print('The information of the data set after downsampling: \n')
コード例 #54
0
ファイル: mdai_deploy.py プロジェクト: mdai/model-deploy
    def predict(self, data):
        """
        See https://github.com/mdai/model-deploy/blob/master/mdai/server.py for details on the
        schema of `data` and the required schema of the outputs returned by this function.
        """
        input_files = data["files"]
        input_annotations = data["annotations"]
        input_args = data["args"]

        outputs = []

        for file in input_files:
            if file["content_type"] != "application/dicom":
                continue

            ds = pydicom.dcmread(BytesIO(file["content"]))
            image = ds.pixel_array
            x = preprocess_image(image)

            y_prob = self.model.predict(x)
            y_classes = y_prob.argmax(axis=-1)

            class_index = y_classes[0]
            probability = y_prob[0][class_index]

            gradcam = GradCAM()
            gradcam_output = gradcam.explain(
                validation_data=(x, None),
                model=self.model,
                layer_name="conv_pw_13_relu",
                class_index=class_index,
                colormap=cv2.COLORMAP_TURBO,
            )
            gradcam_output_buffer = BytesIO()
            Image.fromarray(gradcam_output).save(gradcam_output_buffer, format="PNG")

            smoothgrad = SmoothGrad()
            smoothgrad_output = smoothgrad.explain(
                validation_data=(x, None), model=self.model, class_index=class_index
            )
            smoothgrad_output_buffer = BytesIO()
            Image.fromarray(smoothgrad_output).save(smoothgrad_output_buffer, format="PNG")

            output = {
                "type": "ANNOTATION",
                "study_uid": str(ds.StudyInstanceUID),
                "series_uid": str(ds.SeriesInstanceUID),
                "instance_uid": str(ds.SOPInstanceUID),
                "class_index": int(class_index),
                "probability": float(probability),
                "explanations": [
                    {
                        "name": "Grad-CAM",
                        "description": "Visualize how parts of the image affects neural network’s output by looking into the activation maps. From _Grad-CAM: Visual Explanations from Deep Networks via Gradient-based Localization_ (https://arxiv.org/abs/1610.02391)",
                        "content": gradcam_output_buffer.getvalue(),
                        "content_type": "image/png",
                    },
                    {
                        "name": "SmoothGrad",
                        "description": "Visualize stabilized gradients on the inputs towards the decision. From _SmoothGrad: removing noise by adding noise_ (https://arxiv.org/abs/1706.03825)",
                        "content": smoothgrad_output_buffer.getvalue(),
                        "content_type": "image/png",
                    },
                ],
            }
            outputs.append(output)

        return outputs
コード例 #55
0
    r'C:\Users\Julia Scott\Desktop\Varian 2020_2021\Prashul\Excel\OralCavityImages.xlsx',
    'rb'),
                       sheet_name='Sheet1')

dict = defaultdict(list)

for i in df.index:
    try:
        startSlice = int(df['Start Slice'][i])
        endSlice = int(df['End Slice'][i])
        fileLocation = df['File Location'][i]
        print(str(startSlice) + "," + str(endSlice) + "   " + fileLocation)
        dict[fileLocation].append(startSlice)
        dict[fileLocation].append(endSlice)
    except ValueError:
        continue

for folderPath in dict:
    startSlice = dict[folderPath][0]
    endSlice = dict[folderPath][1]
    sliceNo = startSlice
    while sliceNo <= endSlice:
        fileName = "1-" + str(sliceNo).zfill(3) + ".dcm"
        jpegFileName = "1-" + str(sliceNo).zfill(3) + ".jpeg"
        jpgFileName = "1-" + str(sliceNo).zfill(3) + ".jpg"
        ds = dicom.dcmread(os.path.join(folderPath, fileName), force=True)
        pixel_array_numpy = ds.pixel_array
        cv2.imwrite(os.path.join(folderPath, jpgFileName), pixel_array_numpy)
        os.remove(os.path.join(folderPath, jpegFileName))
        sliceNo = sliceNo + 1
コード例 #56
0
# import pdb

logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
handler = logging.FileHandler("storage_scu_log.txt")
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s\
                              - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)

console = logging.StreamHandler()
console.setLevel(logging.INFO)
logger.addHandler(console)

ds = dcmread('/Volumes/Transcend/mediclouds/PACS/MRI.dcm')
# ds = dcmread('/Volumes/Transcend/mediclouds/PACS/IMG00001')
ae = AE(ae_title=b'ilab_scu')

ae.requested_contexts = StoragePresentationContexts
# ae.add_requested_context(VerificationPresentationContexts)
# ae.add_requested_context(VerificationSOPClass)

# for cx in ae.requested_contexts:
#     print(cx)

assoc = ae.associate('192.168.3.5', 4100, ae_title=b'lkjds')

if assoc.is_established:
    logger.info('assoc is established')
    dataset = dcmread('./MRI.dcm')
コード例 #57
0
ファイル: Instance.py プロジェクト: wendyrvllr/Dicom-To-CNN
 def __load_metadata(self):
     """load only metadata 
     """
     self.dicomData = pydicom.dcmread(self.path,
                                      stop_before_pixels=True,
                                      force=True)
コード例 #58
0
 def test_standard_file(self):
     """charset: can read and decode standard file without special char.."""
     ds = dcmread(get_testdata_file("CT_small.dcm"))
     ds.decode()
     assert 'CompressedSamples^CT1' == ds.PatientName
コード例 #59
0
def loadDicom(fullpath: str):
    return pydicom.dcmread(fullpath)
コード例 #60
0
    def run(self):

        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")            
     
        if torch.cuda.is_available():
            gpu_flag = '(on GPU)'
        else:
            gpu_flag = '(on CPU)'
        t_start = time.time()
 
        global t_elapsed
        if flag == 1:
            print('flag = 1')
            
            filenames = os.listdir(path)


            results_liver = np.zeros([len(filenames), 512, 512])
            results_tumor = np.zeros([len(filenames), 512, 512])

            slices_liver = []
            slices_tumor = []
            idx = []
            for i, name in enumerate(filenames):
                name = os.path.join(path, name)
                slice = pydicom.dcmread(name)
                idx.append(int(slice.InstanceNumber))
                slices_liver.append(WL(slice, 0, 2048))
                slices_tumor.append(WL(slice, 100, 150))

            idx_new = np.argsort(idx)

            slices_liver = np.stack(slices_liver)
            slices_tumor = np.stack(slices_tumor)

            slices_liver = slices_liver[idx_new]
            slices_tumor = slices_tumor[idx_new]

            slices_liver_tensor = torch.tensor(slices_liver)
            slices_liver_tensor = slices_liver_tensor.unsqueeze(1).float() / 255.

            slices_tumor_tensor = torch.tensor(slices_tumor)
            slices_tumor_tensor = slices_tumor_tensor.unsqueeze(1).float() / 255.
            
            
            model_path = 'liver_7WL.pth'
            model = torch.load(model_path, map_location=device)
            model = model.to(device)
            model = model.eval()
            sm = nn.Softmax(dim=1)
            
            for i in range(slices_liver_tensor.shape[0]):

                self.sinOut.emit("标记肝脏: " ,  str(i+1)+"/" +
                                 str(slices_liver_tensor.shape[0]) + gpu_flag)
                                 
                output = model(slices_liver_tensor[i, :].unsqueeze(0).to(device))
                output_sm = sm(output)
                _, result = torch.max(output_sm, dim=1)
                results_liver[i] = result[0, :].cpu().detach().numpy()
                
            print(results_liver.shape)
            
            a = results_liver.tostring()
 
            b = results_tumor.tostring()
            
            t_end = time.time()
            global t_elapsed
            t_elapsed = t_end - t_start         
            
            t_elapsed = t_end - t_start
            
            
            #print(str(round(t_elapsed, 4)))
            
            self.sinOut4.emit("耗时: " ,  str(round(t_elapsed, 4)))
            #self.sinOut.emit("耗时: " ,  's')
            
            self.sinOut3.emit(a, b)

            
           
        elif flag == 3 or flag == 2:

            filenames = os.listdir(path)


            results_liver = np.zeros([len(filenames), 512, 512])
            results_tumor = np.zeros([len(filenames), 512, 512])

            slices_liver = []
            slices_tumor = []
            idx = []
            for i, name in enumerate(filenames):
                name = os.path.join(path, name)
                slice = pydicom.dcmread(name)
                idx.append(int(slice.InstanceNumber))
                slices_liver.append(WL(slice, 0, 2048))
                slices_tumor.append(WL(slice, 100, 150))

            idx_new = np.argsort(idx)

            slices_liver = np.stack(slices_liver)
            slices_tumor = np.stack(slices_tumor)

            slices_liver = slices_liver[idx_new]
            slices_tumor = slices_tumor[idx_new]

            slices_liver_tensor = torch.tensor(slices_liver)
            slices_liver_tensor = slices_liver_tensor.unsqueeze(1).float() / 255.

            slices_tumor_tensor = torch.tensor(slices_tumor)
            slices_tumor_tensor = slices_tumor_tensor.unsqueeze(1).float() / 255.
            
            
            model_path = 'liver_7WL.pth'
            model = torch.load(model_path, map_location=device)
            model = model.to(device)
            model = model.eval()
            sm = nn.Softmax(dim=1)
            
            for i in range(slices_liver_tensor.shape[0]):

                self.sinOut.emit("标记肝脏: " ,  str(i+1)+"/" +
                                 str(slices_liver_tensor.shape[0]) + gpu_flag)
                                 
                output = model(slices_liver_tensor[i, :].unsqueeze(0).to(device))
                output_sm = sm(output)
                _, result = torch.max(output_sm, dim=1)
                results_liver[i] = result[0, :].cpu().detach().numpy()
            a = results_liver.tostring()


            del(model)
            del(output)
            del(output_sm)
            del(result)

            model_path_2 = './best_tumor.pth'
            model_2 = torch.load(model_path_2, map_location=device)
            model_2 = model_2.to(device)
            model_2 = model_2.eval()
            sm = nn.Softmax(dim=1)
            for i in range(slices_tumor_tensor.shape[0]):
          
                self.sinOut.emit("标记肿瘤: " ,  str(i+1)+"/" +
                                 str(slices_tumor_tensor.shape[0]) + gpu_flag)

                output_2 = model_2(slices_tumor_tensor[i, :].unsqueeze(0).to(device))
                output_sm_2 = sm(output_2)
                _, result_2 = torch.max(output_sm_2, dim=1)
                results_tumor[i] = result_2[0, :].cpu().detach().numpy()
            b = results_tumor.tostring()
            t_end = time.time()
      
            t_elapsed = t_end - t_start
         
            self.sinOut4.emit("耗时: " ,  str(round(t_elapsed, 4)))
        
            self.sinOut3.emit(a, b)