Ejemplo n.º 1
0
def DicomRead (exam, series, noEchos):
    #Call in the images within the exam and series of interest
    directory = 'exam_' + str(exam) + '/Ser' + str(series)
    os.chdir(directory)
    listFilesDCM = natsorted(glob.glob('E*S*I*.MR.dcm'))
    #Get the reference information from the very first image of that list of images    
    RefDs = pydicom.read_file(listFilesDCM[0])  #Stored ref file 
    #Import array dims are the dimensions of the imported 3D array. ConstPixelDims are the dimensions of the 4D matrix (row, column, slice, echo)
    #spacing is the size of each voxel in 3D space along the three axes.    
    ImportArrayPixelDims = (int(RefDs.Rows), int(RefDs.Columns), len(listFilesDCM)) #Dimensions of rows, columns, slicers
    ConstPixelDims = (int(RefDs.Rows), int(RefDs.Columns), int(len(listFilesDCM)/noEchos), noEchos)
    ConstPixelSpacing = (float(RefDs.PixelSpacing[0]), float(RefDs.PixelSpacing[1]), float(RefDs.SliceThickness))  #spacing values (mm)
    
    #these are a list of the dimensions. Not used at the moment. 
    x = numpy.arange(0.0, (ConstPixelDims[0]+1)*ConstPixelSpacing[0], ConstPixelSpacing[0])
    y = numpy.arange(0.0, (ConstPixelDims[1]+1)*ConstPixelSpacing[1], ConstPixelSpacing[1])
    z = numpy.arange(0.0, (ConstPixelDims[2]+1)*ConstPixelSpacing[2], ConstPixelSpacing[2])  
    
    #Below seciont of cose will import each dcm in the folder of interest. The array data is all saved to ArrayDicom. 
    #The header data is all saved to header.X where X is the image of interest
    #The first Y echoes are saved so echos, where Y is the number of echos for each slice.     
    ArrayDicom = numpy.zeros(ImportArrayPixelDims, dtype=RefDs.pixel_array.dtype)    
    echoTimes =[]
    class container:
        pass
    header = container()
    for filenameDCM in listFilesDCM:
        ds = pydicom.read_file(filenameDCM)
        ArrayDicom[:,:,listFilesDCM.index(filenameDCM)] = ds.pixel_array
        echoTimes = numpy.hstack((echoTimes, ds.EchoTime))
        echos = echoTimes[0:noEchos]
    for image in range (0, len(listFilesDCM)):
        ds = pydicom.read_file(listFilesDCM[image])
        header.image = ds 
    return(ArrayDicom, echos, ConstPixelDims, RefDs, header)
Ejemplo n.º 2
0
def is_dicom_dir(datapath):
    """
    Check if in dir is one or more dicom file. We use two methods.
    First is based on dcm extension detection.
    """
    # Second tries open files
    # with dicom module.

    retval = False
    datapath = op.expanduser(datapath)
    for f in os.listdir(datapath):
        if f.endswith((".dcm", ".DCM")):
            retval = True
            return True
        # @todo not working and I dont know why
        try:
            pydicom.read_file(os.path.join(datapath, f))

            retval = True
        # except pydicom.errors.InvalidDicomError:
        #     logger.debug("Invalid Dicom while reading file " + str(f))
        except Exception as e:
            logger.warning("Unable to read dicom file " + str(f))
            logger.warning(e)
            # import traceback
            # traceback.print_exc()

        if retval:
            return True
    return False
Ejemplo n.º 3
0
    def setUp(self):
        dcm_path = path.join(test_dir,
                             'data',
                             'dcmstack',
                             '2D_16Echo_qT2',
                             'TE_20_SlcPos_-33.707626341697.dcm')
        self.dcm = pydicom.read_file(dcm_path)

        self.stack = dcmstack.DicomStack()
        self.stack.add_dcm(self.dcm)
        self.dcm = pydicom.read_file(dcm_path)
Ejemplo n.º 4
0
def _extract_series_frames(simulation, dicom_dir):
    #TODO(pjm): give user a choice between multiple study/series if present
    selected_series = None
    frames = {}
    dicom_dose = None
    rt_struct_path = None
    res = {
        'description': '',
    }
    for path in pkio.walk_tree(dicom_dir):
        if pkio.has_file_extension(str(path), 'dcm'):
            plan = dicom.read_file(str(path))
            if plan.SOPClassUID == _DICOM_CLASS['RT_STRUCT']:
                rt_struct_path = str(path)
            elif plan.SOPClassUID == _DICOM_CLASS['RT_DOSE']:
                res['dicom_dose'] = _summarize_rt_dose(simulation, plan)
                plan.save_as(_dose_dicom_filename(simulation))
            if plan.SOPClassUID != _DICOM_CLASS['CT_IMAGE']:
                continue
            orientation = _float_list(plan.ImageOrientationPatient)
            if not (_EXPECTED_ORIENTATION == orientation).all():
                continue
            if not selected_series:
                selected_series = plan.SeriesInstanceUID
                res['StudyInstanceUID'] = plan.StudyInstanceUID
                res['PixelSpacing'] = plan.PixelSpacing
                if hasattr(plan, 'SeriesDescription'):
                    res['description'] = plan.SeriesDescription
            if selected_series != plan.SeriesInstanceUID:
                continue
            info = {
                'pixels': np.float32(plan.pixel_array),
                'shape': plan.pixel_array.shape,
                'ImagePositionPatient': _string_list(plan.ImagePositionPatient),
                'ImageOrientationPatient': _float_list(plan.ImageOrientationPatient),
                'PixelSpacing': _float_list(plan.PixelSpacing),
            }
            for f in ('FrameOfReferenceUID', 'StudyInstanceUID', 'SeriesInstanceUID', 'SOPInstanceUID'):
                info[f] = getattr(plan, f)
            z = _frame_id(info['ImagePositionPatient'][2])
            info['frameId'] = z
            if z in frames:
                raise RuntimeError('duplicate frame with z coord: {}'.format(z))
            _scale_pixel_data(plan, info['pixels'])
            frames[z] = info
    if not selected_series:
        raise RuntimeError('No series found with {} orientation'.format(_EXPECTED_ORIENTATION))
    if rt_struct_path:
        res['regionsOfInterest'] = _summarize_rt_structure(simulation, dicom.read_file(rt_struct_path), frames.keys())
    sorted_frames = []
    res['frames'] = sorted_frames
    for z in sorted(_float_list(frames.keys())):
        sorted_frames.append(frames[_frame_id(z)])
    return res
Ejemplo n.º 5
0
def pydicom_zapping(dicom_file, dicom_fields):
    """
    Actual zapping method for PyDICOM

    :param dicom_file: DICOM to anonymize
     :type dicom_file: str
    :param dicom_fields: Dictionary with DICOM fields & values to use
     :type dicom_fields: dict

    :return: None

    """

    dicom_dataset = dicom.read_file(dicom_file)

    for name in dicom_fields:
        new_val = ""
        if 'Value' in dicom_fields[name]:
            new_val = dicom_fields[name]['Value']

        if dicom_fields[name]['Editable'] is True:
            try:
                dicom_dataset.data_element(
                    dicom_fields[name]['Description']).value = new_val
            except:
                continue
        else:
            try:
                dicom_dataset.data_element(
                    dicom_fields[name]['Description']).value = ''
            except:
                continue
    dicom_dataset.save_as(dicom_file)
Ejemplo n.º 6
0
Archivo: client.py Proyecto: cni/rtfmri
 def retrieve_dicom(self, filename):
     """Return a file as a dicom object."""
     try:
         return pydicom.read_file(self.retrieve_file(filename), force=True)
     except Exception as e:
         raise(e, "Exception {} raised with {}, {}".format(
               (filename, type(e).__name__)))
Ejemplo n.º 7
0
def read_slice(path, d):
    """Read a single slice."""
    try:
        df = pydicom.read_file(str(path))
    except pydicom.filereader.InvalidDicomError as e:
        log.error('Error reading %s: %s', path, e)
        return
    if 'PixelData' not in df:
        return
    d.setdefault('orientation', df.ImageOrientationPatient)
    if d['orientation'] != df.ImageOrientationPatient:
        raise Exception('Orientation mismatch.')
    d.setdefault('shape', df.pixel_array.shape)
    if d['shape'] != df.pixel_array.shape:
        raise Exception('Shape mismatch: {}'.format(path))
    d.setdefault('dtype', df.pixel_array.dtype)
    if d['dtype'] != df.pixel_array.dtype:
        raise Exception('Type mismatch.')
    d.setdefault('voxel_spacing', get_voxel_spacing(df))
    position = tuple(float(x) for x in df.ImagePositionPatient)
    bvalue = get_bvalue(df)
    echotime = get_echotime(df)
    pixels = get_pixels(df)
    d.setdefault('positions', set()).add(position)
    d.setdefault('bvalues', set()).add(bvalue)
    d.setdefault('echotimes', set()).add(echotime)
    key = (position, bvalue, echotime)
    slices = d.setdefault('slices', {})
    if key in slices:
        log.error('Overlapping slices (%s), discarding %s', key, path)
        s = 'Overlapping slices, discarding {}'.format(path)
        d['errors'].append(s)
    slices[key] = pixels
Ejemplo n.º 8
0
def anon(fname):
    patient_list = contained_dirs(fname)

    for i in patient_list:
        if not os.listdir(i):
            continue
        elif os.path.split(os.path.split(i)[1])[1][:4] == 'AAA_':
            acquis_list = contained_dirs(i)
            for j in acquis_list:
                f = []
                for (dirpath, dirnames, filenames) in os.walk(j):
                    f.extend(filenames)
                    break
                ll = 0
                for k in filenames:
                    image = j + '/' + k
                    ds = pydicom.read_file(image)
                    ID = ds.PatientID
                    new_PN = ID
                    new_ID = 'FLOW_' + os.path.split(i)[1] + '_' + ID
                    anonymize(image, image, new_person_name=new_PN,
                              new_patient_id=new_ID, remove_curves=True, remove_private_tags=True)

                    if ll % 50 == 0:
                        print 'anonymized', ll, 'over', len(filenames)
                    ll += 1

        else:
            continue
    return
Ejemplo n.º 9
0
def read_dicom_with_pydicom(dicom_file, dicom_fields):
    """
    Read DICOM file using PyDICOM python library.

    :param dicom_file: DICOM file to read
     :type dicom_file: str
    :param dicom_fields: Dictionary containing DICOM fields and values
     :type dicom_fields: dict

    :return: updated dictionary of DICOM fields and values
     :rtype : dict

    """

    # Read DICOM file
    dicom_dataset = dicom.read_file(dicom_file)

    # Grep information from DICOM header and store them
    # into dicom_fields dictionary under flag Value
    # Dictionnary of DICOM values to be returned
    for name in dicom_fields:
        try:
            description = dicom_fields[name]['Description']
            value = dicom_dataset.data_element(description).value
            dicom_fields[name]['Value'] = value
        except:
            continue

    return dicom_fields
Ejemplo n.º 10
0
Archivo: tools.py Proyecto: mjirik/lisa
def read_data(dcmdir, indices=None, wildcard='*.dcm', type=np.int16):

    dcmlist = []
    for infile in glob.glob(os.path.join(dcmdir, wildcard)):
        dcmlist.append(infile)

    if indices == None:
        indices = range(len(dcmlist))

    data3d = []
    for i in range(len(indices)):
        ind = indices[i]
        onefile = dcmlist[ind]
        if wildcard == '*.dcm':
            data = pydicom.read_file(onefile)
            data2d = data.pixel_array
            try:
                data2d = (np.float(data.RescaleSlope) * data2d) + np.float(data.RescaleIntercept)
            except:
                print('problem with RescaleSlope and RescaleIntercept')
        else:
            data2d = cv2.imread(onefile, 0)

        if len(data3d) == 0:
            shp2 = data2d.shape
            data3d = np.zeros([shp2[0], shp2[1], len(indices)], dtype=type)

        data3d[:,:,i] = data2d

    #need to reshape data to have slice index (ndim==3)
    if data3d.ndim == 2:
        data3d.resize(np.hstack((data3d.shape,1)))

    return data3d
Ejemplo n.º 11
0
def isdicom(filename):
    if os.path.basename(filename).lower() == 'dicomdir':
        return False
    try:
        return pydicom.read_file(filename)
    except pydicom.filereader.InvalidDicomError:
        return False
Ejemplo n.º 12
0
 def test_invalid_sop_file_meta(self):
     """Test exception raised if SOP Class is not Media Storage Directory"""
     ds = read_file(get_testdata_files('CT_small.dcm')[0])
     with pytest.raises(InvalidDicomError,
                        match="SOP Class is not Media Storage "
                              "Directory \(DICOMDIR\)"):
         DicomDir("some_name", ds, b'\x00' * 128, ds.file_meta, True, True)
Ejemplo n.º 13
0
    def loadDicomImages(self, dicomFilesPath):

        # TODO Implement verification if dicom files exists
        dicomPath = os.path.join(dicomFilesPath, os.listdir(dicomFilesPath)[1])
        try:
            dicomFile = pydicom.read_file(dicomPath)
        except Exception as e:
            raise e

        if 0x00280106 in dicomFile and 0x00280107 in dicomFile:
            self.IMAGE_SMALLEST_PIXEL = dicomFile[0x00280106].value  # SmallestImagePixelValue
            self.IMAGE_LARGEST_PIXEL = dicomFile[0x00280107].value  # LargestImagePixelValue
        else:
            self.IMAGE_SMALLEST_PIXEL = 0
            self.IMAGE_LARGEST_PIXEL = 100

        self.wxUpperSlider.SetRange(self.IMAGE_SMALLEST_PIXEL, self.IMAGE_LARGEST_PIXEL)
        self.wxLowerSlider.SetRange(self.IMAGE_SMALLEST_PIXEL, self.IMAGE_LARGEST_PIXEL)
        self.wxLowerSlider.Update()
        self.Layout()

        dicomImages = vtkDICOMImageReader()
        dicomImages.SetDirectoryName(dicomFilesPath)
        dicomImages.Update()

        self.DICOM_IMAGES = dicomImages
        self.adjustImageThreshold(800)
        #self.__plotImage(self.ROOT_PIPE)
        self.createVolume()
        self.decimateVolume(0.5)
        self.view3DImage(self.ROOT_PIPE)
Ejemplo n.º 14
0
 def test_invalid_sop_no_file_meta(self):
     """Test exception raised if invalid sop class but no file_meta"""
     ds = read_file(get_testdata_files('CT_small.dcm')[0])
     with pytest.raises(AttributeError,
                        match="'DicomDir' object has no attribute "
                              "'DirectoryRecordSequence'"):
         DicomDir("some_name", ds, b'\x00' * 128, None, True, True)
Ejemplo n.º 15
0
    def _create_dicomdir_info(self):
        """
        Function crates list of all files in dicom dir with all IDs
        """

        filelist = files_in_dir(self.dirpath)
        files = []
        metadataline = {}

        for filepath in filelist:
            head, teil = os.path.split(filepath)
            dcmdata = None
            if os.path.isdir(filepath):
                logger.debug("Subdirectory found in series dir is ignored: " + str(filepath))
                continue
            try:
                dcmdata = pydicom.read_file(filepath)

            except pydicom.errors.InvalidDicomError as e:
                # some files doesnt have DICM marker
                try:
                    dcmdata = pydicom.read_file(filepath, force=self.force_read)

                    # if e.[0].startswith("File is missing \\'DICM\\' marker. Use force=True to force reading")
                except Exception as e:
                    if teil != self.dicomdir_filename:
                        # print('Dicom read problem with file ' + filepath)
                        logger.info('Dicom read problem with file ' + filepath)
                        import traceback
                        logger.debug(traceback.format_exc())
            if hasattr(dcmdata, "DirectoryRecordSequence"):
                # file is DICOMDIR - metainfo about files in directory
                # we are not using this info
                dcmdata = None

            if dcmdata is not None:
                metadataline = _prepare_metadata_line(dcmdata, teil)
                files.append(metadataline)

        # if SliceLocation is None, it is sorted to the end
        # this is not necessary it can be deleted
        files.sort(key=lambda x: (x['SliceLocation'] is None, x["SliceLocation"]))

        dcmdirplus = {'version': __version__, 'filesinfo': files, }
        if "StudyDate" in metadataline:
            dcmdirplus["StudyDate"] = metadataline["StudyDate"]
        return dcmdirplus
Ejemplo n.º 16
0
 def test_parse_records(self):
     """Test DicomDir.parse_records"""
     ds = read_file(TEST_FILE)
     assert hasattr(ds, 'patient_records')
     # There are two top level PATIENT records
     assert len(ds.patient_records) == 2
     assert ds.patient_records[0].PatientName == 'Doe^Archibald'
     assert ds.patient_records[1].PatientName == 'Doe^Peter'
Ejemplo n.º 17
0
def is_file_a_dicom(file):
    """
    Check whether a given file is of type DICOM

    :param file: path to the file to identify
     :type file: str

    :return: True if the file is DICOM, False otherwise
     :rtype: bool

    """

    try:
        dicom.read_file(file)
    except InvalidDicomError:
        return False
    return True
Ejemplo n.º 18
0
def on_c_get(dataset):
    basedir = '../test/dicom_files/'
    dcm_files = ['CTImageStorage.dcm']
    dcm_files = [os.path.join(basedir, x) for x in dcm_files]
    yield len(dcm_files)
    
    for dcm in dcm_files:
        data = read_file(dcm, force=True)
        yield data
Ejemplo n.º 19
0
 def on_receive_store(self, context, ds):
     d = dicom.read_file(ds)
     self.test.assertEqual(context.sop_class, self.rq.SOPClassUID)
     self.test.assertEqual(d.PatientName, self.rq.PatientName)
     self.test.assertEqual(d.StudyInstanceUID, self.rq.StudyInstanceUID)
     self.test.assertEqual(d.SeriesInstanceUID, self.rq.SeriesInstanceUID)
     self.test.assertEqual(d.SOPInstanceUID, self.rq.SOPInstanceUID)
     self.test.assertEqual(d.SOPClassUID, self.rq.SOPClassUID)
     return statuses.SUCCESS
Ejemplo n.º 20
0
    def testRead(self):
        """Unicode: Can read a file with unicode characters in name................"""
        uni_name = u'test°'

        # verify first that we could encode file name in this environment
        try:
            _ = uni_name.encode(sys.getfilesystemencoding())
        except UnicodeEncodeError:
            print("SKIP: Environment doesn't support unicode filenames")
            return

        try:
            pydicom.read_file(uni_name)
        except UnicodeEncodeError:
            self.fail("UnicodeEncodeError generated for unicode name")
        # ignore file doesn't exist error
        except IOError:
            pass
Ejemplo n.º 21
0
 def testLatin1(self):
     """charset: can read and decode latin_1 file........................"""
     ds = pydicom.read_file(latin1_file)
     ds.decode()
     # Make sure don't get unicode encode error on converting to string
     expected = u'Buc^J\xe9r\xf4me'
     got = ds.PatientName
     self.assertEqual(expected, got,
                      "Expected %r, got %r" % (expected, got))
Ejemplo n.º 22
0
 def test_default(self):
     res = dcmstack.parse_and_group(self.in_paths)
     eq_(len(res), 1)
     ds = pydicom.read_file(self.in_paths[0])
     group_key = list(res.keys())[0]
     for attr_idx, attr in enumerate(dcmstack.default_group_keys):
         if attr in dcmstack.default_close_keys:
             ok_(np.allclose(group_key[attr_idx], getattr(ds, attr)))
         else:
             eq_(group_key[attr_idx], getattr(ds, attr))
Ejemplo n.º 23
0
    def read_from_dicom(self, path):  # TODO: not implemented
        """ Load a Dicom file from 'path'

        Currently, this function merely stores the dicom data into self.data.
        No interpretation is done.

        :param str path: Full path to Dicom file.
        """
        self.data = dicom.read_file(path)
        logger.warning("Rst.read_from_dicom() is not implemented.")
Ejemplo n.º 24
0
 def setUp(self):
     self.data_dir = path.join(test_dir,
                          'data',
                          'dcmstack',
                          '2D_16Echo_qT2')
     self.inputs = [pydicom.read_file(path.join(self.data_dir, fn))
                    for fn in ('TE_20_SlcPos_-33.707626341697.dcm',
                               'TE_20_SlcPos_-23.207628249046.dcm'
                              )
                   ]
Ejemplo n.º 25
0
 def testNestedCharacterSets(self):
     """charset: can read and decode SQ with different encodings........."""
     ds = pydicom.read_file(sq_encoding_file)
     ds.decode()
     # These datasets inside of the SQ cannot be decoded with default_encoding
     # OR UTF-8 (the parent dataset's encoding). Instead, we make sure that it
     # is decoded using the (0008,0005) tag of the dataset
     expected = u'\uff94\uff8f\uff80\uff9e^\uff80\uff9b\uff73=\u5c71\u7530^\u592a\u90ce=\u3084\u307e\u3060^\u305f\u308d\u3046'
     got = ds[0x32, 0x1064][0].PatientName
     self.assertEqual(expected, got,
                      "Expected %r, got %r" % (expected, got))
Ejemplo n.º 26
0
def load_scans(path):
    """Reads data files and returns a list of Pandas dataframes"""
    slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
    slices.sort(key = lambda x: int(x.ImagePositionPatient[2]))
    try:
        slice_thickness = np.abs(slices[0].ImagePositionPatient[2] - slices[1].ImagePositionPatient[2])
    except:
        slice_thickness = np.abs(slices[0].SliceLocation - slices[1].SliceLocation)   
    for s in slices:
        s.SliceThickness = slice_thickness
    return slices
Ejemplo n.º 27
0
def draw_center_for_check(dcm_path, id, sax, point, points):
    debug_folder = os.path.join('..', 'calc', 'center_find')
    if not os.path.isdir(debug_folder):
        os.mkdir(debug_folder)
    ds = pydicom.read_file(dcm_path)
    img = convert_to_grayscale_with_increase_brightness_fast(ds.pixel_array, 1)
    cv2.circle(img, (int(round(point[1], 0)), int(round(point[0], 0))), 5, 255, 3)
    img = cv2.line(img, (points[1], points[0]), (points[3], points[2]), 127, thickness=2)
    img = cv2.line(img, (points[5], points[4]), (points[7], points[6]), 127, thickness=2)
    # show_image(img)
    cv2.imwrite(os.path.join(debug_folder, str(id) + '_' + sax + '.jpg'), img)
Ejemplo n.º 28
0
def on_c_find(dataset):
    basedir = "../test/dicom_files/"
    dcm_files = ["CTImageStorage.dcm"]
    dcm_files = [os.path.join(basedir, x) for x in dcm_files]
    for dcm in dcm_files:
        data = read_file(dcm, force=True)

        d = Dataset()
        d.QueryRetrieveLevel = dataset.QueryRetrieveLevel
        d.RetrieveAETitle = args.aetitle
        d.PatientName = data.PatientName
        yield d
Ejemplo n.º 29
0
def test_image_collision():
    dcm_path = path.join(test_dir,
                         'data',
                         'dcmstack',
                         '2D_16Echo_qT2',
                         'TE_20_SlcPos_-33.707626341697.dcm')
    dcm = pydicom.read_file(dcm_path)
    stack = dcmstack.DicomStack('EchoTime')
    stack.add_dcm(dcm)
    assert_raises(dcmstack.ImageCollisionError,
                  stack.add_dcm,
                  dcm)
Ejemplo n.º 30
0
    def testPlanSum(self):
        ss = pydicom.read_file('./testdata/rtss.dcm')
        rtd1 = pydicom.read_file('./testdata/rtdose1.dcm')
        rtd2 = pydicom.read_file('./testdata/rtdose2.dcm')

        sum = SumPlan(rtd1, rtd2, None)
        
        self.assertEqual(sum.pixel_array.shape,(4,10,8))
        self.assertEqual(sum.PixelSpacing, [5.,5.])
        
        for pos1,pos2 in zip(sum.ImagePositionPatient,[-15.44,-20.44,-7.5]):
            self.assertAlmostEqual(pos1,pos2,2)
            
        difference = abs(sum.pixel_array[1,4,3]*sum.DoseGridScaling - 3.006)
        delta = 0.01
        self.assertTrue(difference < delta,
                "difference: %s is not less than %s" % (difference, delta))
        
        difference = abs(sum.pixel_array[1,1,1]*sum.DoseGridScaling - 2.9)
        self.assertTrue(difference < delta,
                "difference: %s is not less than %s" % (difference, delta))
Ejemplo n.º 31
0
 def DcmToPng(self):
     self.d = dicom.read_file(image_path)
     self.Mate = plt.imsave("defalt.png",
                            self.d.pixel_array,
                            cmap=plt.cm.bone)
 def __init__(self, template):
     self.RTStruct = pydicom.read_file(template, force=True)
Ejemplo n.º 33
0
    labels_img2[ tmp==1  , 2 ] = opacity * img[tmp==1]
    labels_img2[ tmp==1  , 1 ] = opacity * img[tmp==1]
    labels_img2[ tmp==1  , 0 ] = opacity * img[tmp==1] + (1 - opacity) * 255
    
    skimage.io.imsave(INPUT_FOLDER+pat_id+'_Labels.tiff', labels_img2.astype(np.uint8), plugin='tifffile', compress = 1)

#test_single_patient()
#sys.exit(0)

for patient in patients:
    contours = {}
    for subdir, dirs, files in os.walk(patient):
        # print("Folders: ",subdir,dirs)
        dcms = glob.glob(os.path.join(subdir, "*.dcm"))
        if len(dcms) == 1:
            structure = dicom.read_file(os.path.join(subdir, files[0]))
            contours = read_structure(structure)
        elif len(dcms) > 1:
            slices = [dicom.read_file(dcm,force=True) for dcm in dcms]

    pat_id = slices[0].PatientID        
    print ("Patient ID : ",pat_id)

    slices.sort(key = lambda x: float(x.ImagePositionPatient[2]))
    image = np.stack([s.pixel_array for s in slices], axis=-1)
    #print contours
    if(len(contours) !=0 ):
        label, colors = get_mask(contours, slices)
    
    image = image.astype(np.int16)
    image = image* slices[0].RescaleSlope + slices[0].RescaleIntercept
Ejemplo n.º 34
0
import cv2
import numpy as np
import pydicom
cotourpngpath = r'C:\Users\fafafa\Desktop\wuda\result\8512contourimage'
contourlist = glob.glob(os.path.join(cotourpngpath,'*.png'))
contourlist.sort()
origindcmpath = r'C:\Users\fafafa\Desktop\wuda\result\8dcm'
originlist = glob.glob(os.path.join(origindcmpath,'*.dcm'))
originlist.sort()

preddcmpath = r'C:\Users\fafafa\Desktop\wuda\result\8512imagedcm'
predlist = glob.glob(os.path.join(preddcmpath,'*.dcm'))
predlist.sort()
savecontourpngpath = r'C:\Users\fafafa\Desktop\wuda\result\rg\\'
for slice in range(32):
    origin = pydicom.read_file(originlist[slice])
    pred = pydicom.read_file(predlist[slice])
    contour = cv2.imread(contourlist[slice], 0)
    matrixcontour = np.asarray(contour)
    for i in range(512):
        for j in range(512):
            if matrixcontour[i][j] != 0:
                for x in range(-1, 2):
                    for y in range(-1, 2):
                        if (origin.pixel_array[i + x][j + y] >= 100) & (origin.pixel_array[i + x][j + y] <= 300):
                            pred.pixel_array[i + x][j + y] = 65535
                            print('change', i + x, i + y, pred.pixel_array[i + x][j + y])
                            pred.PixelData = pred.pixel_array.tostring()
    if slice <= 9:  # 保存膨胀后的pre去原路经
        pred.save_as(savecontourpngpath + "0{}.dcm".format(slice))
    else:
Ejemplo n.º 35
0
# Generate list of all files in directory
file_list = []
for dirname, dirnames, filenames in os.walk(args.in_dir):
    for filename in filenames:
        file_list.append(os.path.abspath(os.path.join(dirname, filename)))

num_files = len(file_list)
print "Creating map for " + str(num_files) + " files."

# Create dict mapping instance numbers to gradient directions, b values, and acqusition times.
file_dict = {}
InstanceNumber_max = 0
zero_based_gradient_index = False  # whether first non-zero gradient is numbered '#0' or '#1'
for file_path in file_list:
    ds = pydicom.read_file(file_path, force='force')
    InstanceNumber = int(ds[0x0020, 0x0013].value)
    if (InstanceNumber > InstanceNumber_max):
        InstanceNumber_max = InstanceNumber
    AcquisitionTime = ds[0x0008, 0x0032].value
    SequenceName = ds[0x0018, 0x0024].value
    #    print InstanceNumber, type(InstanceNumber) # int (after forcing it to be)
    #    print AcquisitionTime, type(AcquisitionTime) # str
    #    print SequenceName, type(SequenceName) # str

    # Try to extract gradient direction and b value from SequenceName.
    # Examples of SequenceName values in the UBC preterm cohort are (including the asterisks):
    # *ep_b0
    # *ep_b600#6
    # *ep_b700#12
    BValue = SequenceName.split('b')[1].split('#')[0]
Ejemplo n.º 36
0
def dicom_to_sql(start_path=None,
                 force_update=False,
                 move_files=True,
                 update_dicom_catalogue_table=True,
                 import_latest_plan_only=True):

    start_time = datetime.now()
    print(str(start_time), 'Beginning import', sep=' ')

    dicom_catalogue_update = []

    # Read SQL configuration file
    abs_file_path = get_settings('import')
    import_settings = parse_settings_file(abs_file_path)

    if start_path:
        abs_file_path = os.path.join(SCRIPT_DIR, start_path)
        import_settings['inbox'] = abs_file_path

    sqlcnx = DVH_SQL()

    file_paths = get_file_paths(import_settings['inbox'])

    for uid in list(file_paths):

        if is_uid_imported(uid):
            print("The UID from the following files is already imported.")
            if not force_update:
                print(
                    "Must delete content associated with this UID from database before reimporting."
                )
                print(
                    "These files have been moved into the 'misc' folder within your 'imported' folder."
                )
                for file_type in FILE_TYPES:
                    print(file_paths[uid][file_type]['file_path'])
                print("The UID is %s" % uid)
                continue

            else:
                print("Force Update set to True. Processing with import.")
                print(
                    "WARNING: This import may contain duplicate data already in the database."
                )

        dicom_catalogue_update.append(uid)

        # Collect and print the file paths
        plan_file = file_paths[uid]['rtplan']['latest_file']
        struct_file = file_paths[uid]['rtstruct']['latest_file']
        dose_file = file_paths[uid]['rtdose']['latest_file']
        if import_latest_plan_only:
            print("plan file: %s" % plan_file)
        else:
            for f in file_paths[uid]['rtplan']['file_path']:
                print("plan file: %s" % f)
        print("struct file: %s" % struct_file)
        print("dose file: %s" % dose_file)

        # Process DICOM files into Python objects
        plan, beams, dvhs, rxs = [], [], [], []
        mp, ms, md = [], [], []
        if plan_file:
            mp = dicom.read_file(plan_file).ManufacturerModelName.lower()
        if struct_file:
            ms = dicom.read_file(struct_file).ManufacturerModelName.lower()
        if dose_file:
            md = dicom.read_file(dose_file).ManufacturerModelName.lower()

        if 'gammaplan' in "%s %s %s" % (mp, ms, md):
            print(
                "Leksell Gamma Plan is not currently supported. Skipping import."
            )
            continue

        if plan_file and struct_file and dose_file:
            if import_latest_plan_only:
                plan = PlanRow(plan_file, struct_file, dose_file)
                sqlcnx.insert_plan(plan)
            else:
                for f in file_paths[uid]['rtplan']['file_path']:
                    plan = PlanRow(f, struct_file, dose_file)
                    sqlcnx.insert_plan(plan)
        else:
            print(
                'WARNING: Missing complete set of plan, struct, and dose files for uid %s'
                % uid)
            if not force_update:
                print(
                    'WARNING: Skipping this import. If you wish to import an incomplete DICOM set, use Force Update'
                )
                print(
                    'WARNING: The current file will be moved to the misc folder with in your imported folder'
                )
                continue

        if plan_file:
            if not hasattr(dicom.read_file(plan_file), 'BrachyTreatmentType'):
                if import_latest_plan_only:
                    beams = BeamTable(plan_file)
                    sqlcnx.insert_beams(beams)
                else:
                    for f in file_paths[uid]['rtplan']['file_path']:
                        sqlcnx.insert_beams(BeamTable(f))
        if struct_file and dose_file:
            dvhs = DVHTable(struct_file, dose_file)
            setattr(dvhs, 'ptv_number', rank_ptvs_by_D95(dvhs))
            sqlcnx.insert_dvhs(dvhs)
        if plan_file and struct_file:
            if import_latest_plan_only:
                rxs = RxTable(plan_file, struct_file)
                sqlcnx.insert_rxs(rxs)
            else:
                for f in file_paths[uid]['rtplan']['file_path']:
                    sqlcnx.insert_rxs(RxTable(f, struct_file))

        # get mrn for folder name, can't assume a complete set of dose, plan, struct files
        mrn = []
        if dose_file:
            mrn = dicom.read_file(dose_file).PatientID
        elif plan_file:
            mrn = dicom.read_file(plan_file).PatientID
        elif struct_file:
            mrn = dicom.read_file(struct_file).PatientID
        if mrn:
            mrn = "".join(x for x in mrn
                          if x.isalnum())  # remove any special characters
        else:
            mrn = 'NoMRN'

        # convert file_paths[uid] into a list of file paths
        if move_files:
            files_to_move = []
            move_types = list(FILE_TYPES) + ['other']
            for file_type in move_types:
                files_to_move.extend(file_paths[uid][file_type]['file_path'])

            new_folder = os.path.join(import_settings['imported'], mrn)
            move_files_to_new_path(files_to_move, new_folder)

        if plan_file:
            plan_file = os.path.basename(plan_file)
        if struct_file:
            struct_file = os.path.basename(struct_file)
        if dose_file:
            dose_file = os.path.basename(dose_file)

        if update_dicom_catalogue_table:
            if not import_latest_plan_only:
                plan_file = ', '.join([
                    os.path.basename(fp)
                    for fp in file_paths[uid]['rtplan']['file_path']
                ])
            update_dicom_catalogue(mrn, uid, new_folder, plan_file,
                                   struct_file, dose_file)

    # Move remaining files, if any
    if move_files:
        move_all_files(import_settings['imported'], import_settings['inbox'])
        remove_empty_folders(import_settings['inbox'])

    sqlcnx.close()

    end_time = datetime.now()
    print(str(end_time), 'Import complete', sep=' ')

    total_time = end_time - start_time
    seconds = total_time.seconds
    m, s = divmod(seconds, 60)
    h, m = divmod(m, 60)
    if h:
        print("This import took %dhrs %02dmin %02dsec to complete" % (h, m, s))
    elif m:
        print("This import took %02dmin %02dsec to complete" % (m, s))
    else:
        print("This import took %02dsec to complete" % s)
def read_rtstruct_contour_data(rtstruct_file, roinames=None):
    """Read dicom RTSTRUCT contour data

  Parameters
  ----------
  rtstruct_file : str
    a dicom RTSTRUCT file

  roinames : list of strings
    ROINames to read - default None means read all ROIs

  Returns
  -------
  list of length n
    with the contour data of all n ROIs saved in the RTSTRUCT.
    Every element of the list is a dictionary wih several keys.
    The actual contour points are saved in the key 'contour_points'
    which itself is a list of (x,3) numpy arrays containg the coordinates
    of the 2D planar contours.

  Note
  ----
  The most important dicom fields for RTSTRUCT are:
  -FrameOfReferenceUID
  -ROIContourSequence  (1 element for every ROI)
    -ReferenceROINumber
    -ContourSequence   (1 element for every 2D contour in a given ROI)
      -ContourData
      -GeometruCType
  """
    ds = pydicom.read_file(rtstruct_file)

    # get the Frame of Reference UID
    FrameOfReferenceUID = [
        x.FrameOfReferenceUID for x in ds.ReferencedFrameOfReferenceSequence
    ]

    ctrs = ds.ROIContourSequence

    contour_data = []

    allroinames = [
        x.ROIName if 'ROIName' in x else '' for x in ds.StructureSetROISequence
    ]

    if roinames is None: roinames = allroinames.copy()

    for roiname in roinames:
        i = allroinames.index(roiname)

        contour_seq = ctrs[i].ContourSequence
        contour_points = []
        contour_orientations = []

        for cs in contour_seq:
            cp = np.array(cs.ContourData).reshape(-1, 3)
            if cp.shape[0] >= 3:
                contour_points.append(cp)
                contour_orientations.append(contour_orientation(cp[:, :2]))

        if len(contour_points) > 0:
            cd = {
                'contour_points': contour_points,
                'contour_orientations': contour_orientations,
                'GeometricType': cs.ContourGeometricType,
                'Number': ctrs[i].ReferencedROINumber,
                'FrameOfReferenceUID': FrameOfReferenceUID
            }

            for key in [
                    'ROIName', 'ROIDescription', 'ROINumber',
                    'ReferencedFrameOfReferenceUID', 'ROIGenerationAlgorithm'
            ]:
                if key in ds.StructureSetROISequence[i]:
                    cd[key] = getattr(ds.StructureSetROISequence[i], key)

            contour_data.append(cd)

    return contour_data
Ejemplo n.º 38
0
def getChaosDataset():
    base = 'CHAOS_Train_Sets/Train_Sets/MR'
    patients = os.listdir('CHAOS_Train_Sets/Train_Sets/MR')
    strucs = []
    labels = []
    liver = [55, 70]
    Rkidney = [110, 135]  #,126]
    Lkidney = [175, 200]  #,189]
    spleen = [240, 255]  #,252]
    #BR=0
    for idx in range(len(patients)):

        pat = str(patients[idx])

        typeMR2 = ('T2SPIR')

        patiType = os.path.join(base, pat, typeMR2)
        imgs = os.path.join(patiType, 'DICOM_anon')
        ground = os.path.join(patiType, 'Ground')
        names = os.listdir(imgs)
        names = np.sort(names)
        names_g = os.listdir(ground)
        names_g = np.sort(names_g)

        struct = np.zeros((len(names), 256, 256))
        label = np.zeros((len(names), 256, 256))

        for jdx in range(len(names)):
            time = names[jdx]
            ann = names_g[jdx]

            OGimagePath = os.path.join(imgs, time)
            ann_path = os.path.join(ground, ann)
            annotation = io.imread(ann_path)

            heigth, width = annotation.shape
            if (heigth != 256 or width != 256):
                annotation = cv2.resize(annotation, (256, 256))
            transformedAnnotation = np.zeros((256, 256))
            for kdx in range(256):
                for ldx in range(256):
                    if (annotation[kdx, ldx] >= liver[0]
                            and annotation[kdx, ldx] <= liver[1]):
                        transformedAnnotation[kdx, ldx] = 1
                    elif (annotation[kdx, ldx] >= Rkidney[0]
                          and annotation[kdx, ldx] <= Rkidney[1]):
                        transformedAnnotation[kdx, ldx] = 2
                    elif (annotation[kdx, ldx] >= Lkidney[0]
                          and annotation[kdx, ldx] <= Lkidney[1]):
                        transformedAnnotation[kdx, ldx] = 3
                    elif (annotation[kdx, ldx] >= spleen[0]
                          and annotation[kdx, ldx] <= spleen[1]):
                        transformedAnnotation[kdx, ldx] = 4
                    else:
                        transformedAnnotation[kdx, ldx] = 0

            heigth, width = transformedAnnotation.shape

            ds = pydicom.read_file(OGimagePath)
            mapp = ds.pixel_array

            heigth, width = mapp.shape
            if (heigth != 256 or width != 256):
                mapp = cv2.resize(mapp, (256, 256))

            mapp = (mapp)

            struct[jdx, :, :] = mapp.astype('double')
            label[jdx, :, :] = transformedAnnotation

        strucs.append(struct)
        labels.append(label)
    with open('dataSet_network.pickle', 'wb') as handle:
        pickle.dump([strucs, labels], handle, protocol=pickle.HIGHEST_PROTOCOL)
import pydicom
import numpy as np
import matplotlib.pyplot as plt
import sys
import glob

files = []
print('glob: {}'.format(sys.argv[1]))
for fname in glob.glob(sys.argv[1], recursive=False):
    print('loading: {}'.format(fname))
    files.append(pydicom.read_file(fname))

print("file count: {}".format(len(files)))

# skip files with no SliceLocation (e.g scout views)
slices = []
skipcount = 0
for f in files:
    if hasattr(f, 'SliceLocation'):
        slices.append(f)
    else:
        skipcount = skipcount + 1

print("skipped, no SliceLocation: {}".format(skipcount))

# ensure they are in correct order
slices = sorted(slices, key=lambda s: s.SliceLocation)

# pixel aspects, assuming all slices are the same
ps = slices[0].PixelSpacing
ss = slices[0].SliceThickness
Ejemplo n.º 40
0
def generate():
    model = load_model('./savedmodel/unet_lung_seg.hdf5',
                       custom_objects={'dice_coef_loss': dice_coef_loss, 'dice_coef': dice_coef})
    for split in ["val","test","train"]:
        for path in ['D:/dataset/COVID/'+split+'/','D:/dataset/JPG/'+split+'/NORMAL/','D:/dataset/JPG/' + split + '/PNEUMONIA/','D:/dataset/JPG/' + split + '/VIRUS/']:
            makedirs(path)
            if "COVID" in path:
                for root, dirs, files in os.walk(path):
                    for f in sorted(files):
                        print(f)
                        ds = pydicom.read_file(os.path.join(root, f))
                        try:
                            if ds[0x0028, 0x0004].value == "MONOCHROME1":
                                # print(ds.pixel_array.dtype)
                                h = np.invert(ds.pixel_array)
                                small = np.min(h)
                                high = np.max(h)
                                image = (h - small) / (high - small)
                            else:
                                h=ds.pixel_array
                                print(ds[0x0028, 0x0004].value ,ds[0x0028, 0x1050].value,ds[0x0028, 0x1051].value)
                                h[h<=(ds[0x0028, 0x1050].value-ds[0x0028, 0x1051].value/2)]=0
                                h[h>=(ds[0x0028, 0x1050].value + ds[0x0028, 0x1051].value / 2)] = ds[0x0028, 0x1050].value + ds[0x0028, 0x1051].value / 2
                                image = (h) / (ds[0x0028, 0x1050].value + ds[0x0028, 0x1051].value / 2)
                        except:
                            if ds[0x0028, 0x0004].value=="MONOCHROME1":
                                h=np.invert(ds.pixel_array)
                            else:
                                h=ds.pixel_array
                            small = np.min(h)
                            high = np.max(h)
                            image = (h - small) / (high - small)

                        # cv2.imshow("img", image)
                        # cv2.waitKey(0)
                        # image=image*255

                        # image = cv2.imread(path, 0)
                        image = cv2.resize(image, (512, 512))
                        image = np.array(image).reshape(1, 512, 512, 1).astype(np.float32)
                        preds = model.predict(image)
                        #
                        # plt.figure(figsize=(30, 10))
                        # plt.subplot(1, 3, 1)
                        # plt.imshow(np.squeeze(image))
                        # plt.xlabel("Base Image")
                        # plt.subplot(1, 3, 2)
                        # plt.imshow(np.squeeze(preds))
                        # print(np.squeeze(preds))


                        # gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
                        #
                        ret, binary = cv2.threshold(np.squeeze(preds), 0.5, 1, cv2.THRESH_BINARY)
                        img = ndimage.binary_fill_holes(binary).astype(np.uint8)
                        # img=ndimage.binary_fill_holes(np.squeeze(preds)).astype(np.uint8)
                        img[img>0]=255
                        img=img.astype(np.uint8)
                        emptyimage=np.zeros((512,512))
                        # emptyimage=np.zeros((512,512)).astype(np.uint8)
                        # ret, binary = cv2.threshold(gray,127,255,cv2.THRESH_BINARY)
                        contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
                        # cv2.drawContours(img, contours, -1, (0, 0, 255), 3)
                        # cv2.imshow("img", img)
                        # cv2.waitKey(0)
                        corrd={}
                        for i in range(0,len(contours)):
                            corrd[cv2.contourArea(contours[i])]=i
                        sorteddict=sorted(corrd.items(), key = lambda kv:(kv[0], kv[1]),reverse=True)
                        # print(sorteddict[0][1],sorteddict[1][1])
                        try:
                            newcontours=[contours[sorteddict[0][1]],contours[sorteddict[1][1]]]
                        except:
                            newcontours = [contours[sorteddict[0][1]]]
                        cv2.drawContours(emptyimage, newcontours, -1, (1,1,1), -1)
                        # cv2.imshow("img", emptyimage)
                        # cv2.waitKey(0)





                        # plt.xlabel("Mask")
                        # plt.subplot(1, 3, 3)
                        # for i in range(0,emptyimage.shape[0]):
                        #     for j in range(emptyimage.shape[1]):
                        #         if emptyimage[i][j]!=0 and  emptyimage[i][j]!=1:
                        #             print(emptyimage[i][j])
                        final=np.squeeze(image)*emptyimage*255
                        storepath='D:/dataset/Segmentation/' + split + '/COVID/'
                        makedirs(storepath)
                        cv2.imwrite(storepath + '' + f + '.png', final)
                        # plt.imshow(final)
                        # plt.xlabel("Result")
                        # plt.show()
            else:
                for root, dirs, files in os.walk(path):
                    for f in sorted(files):
                        image = cv2.imread(os.path.join(root, f), 0)
                        image = cv2.resize(image, (512, 512))
                        image = np.array(image).reshape(1, 512, 512, 1).astype(np.float32)
                        preds = model.predict(image)
                        #
                        # plt.figure(figsize=(30, 10))
                        # plt.subplot(1, 3, 1)
                        # plt.imshow(np.squeeze(image))
                        # plt.xlabel("Base Image")
                        # plt.subplot(1, 3, 2)
                        # plt.imshow(np.squeeze(preds))
                        # print(np.squeeze(preds))

                        # gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
                        #
                        img = ndimage.binary_fill_holes(np.squeeze(preds)).astype(np.uint8)
                        img[img > 0] = 255
                        img = img.astype(np.uint8)
                        emptyimage = np.zeros((512, 512))
                        # emptyimage=np.zeros((512,512)).astype(np.uint8)
                        # ret, binary = cv2.threshold(gray,127,255,cv2.THRESH_BINARY)
                        contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
                        # cv2.drawContours(img, contours, -1, (0, 0, 255), 3)
                        # cv2.imshow("img", img)
                        # cv2.waitKey(0)
                        corrd = {}
                        for i in range(0, len(contours)):
                            corrd[cv2.contourArea(contours[i])] = i
                        sorteddict = sorted(corrd.items(), key=lambda kv: (kv[0], kv[1]), reverse=True)
                        # print(sorteddict[0][1],sorteddict[1][1])
                        try:
                            newcontours = [contours[sorteddict[0][1]], contours[sorteddict[1][1]]]
                        except:
                            newcontours = [contours[sorteddict[0][1]]]
                        cv2.drawContours(emptyimage, newcontours, -1, (1, 1, 1), -1)
                        # cv2.imshow("img", emptyimage)
                        # cv2.waitKey(0)

                        # plt.xlabel("Mask")
                        # plt.subplot(1, 3, 3)
                        # for i in range(0,emptyimage.shape[0]):
                        #     for j in range(emptyimage.shape[1]):
                        #         if emptyimage[i][j]!=0 and  emptyimage[i][j]!=1:
                        #             print(emptyimage[i][j])
                        final = np.squeeze(image) * emptyimage
                        storepath=path.replace("JPG","Segmentation")
                        makedirs(storepath)
                        cv2.imwrite(storepath+'' + f + '.png', final)
Ejemplo n.º 41
0
def pydicom_read_image(filename):
    return get_pixels_hu(pydicom.read_file(filename))
        os.makedirs(pathout)
    if not os.path.exists(pathoutTBI):
        os.makedirs(pathoutTBI)
    num_files = len([
        f for f in os.listdir(OriginalPath)
        if os.path.isfile(os.path.join(OriginalPath, f))
    ])
    print("That folder has " + str(num_files) + " total files")
else:
    print("Paitent folder " + var + " dose not exist!")
    exit()

for root, dirs, filenames in os.walk(OriginalPath):
    for f in filenames:
        filepath = OriginalPath + f
        plan = pydicom.read_file(filepath)
        if ("scout" in plan.SeriesDescription):
            num_files = num_files - 1
            print("Scout file (" + f +
                  ") removed from folder, new total for folder is " +
                  str(num_files) + " files")
            os.remove(filepath)

coronal_plane = []
coronal_grid = numpy.zeros((num_files, 4, 512))
i = 0
fcount = 0
head_count = 0
feet_count = 0
hfound = 0
Ejemplo n.º 43
0
    # gather test DICOM images
    test_images = glob.glob(os.path.join(args.test_dir, '*.dcm'))

    # create the outputdir if it doesn't exist already
    if not os.path.exists(args.output_dir):
        os.mkdir(args.output_dir)

    # dictionary for submission file
    submit_dict = {'patientId': [], 'PredictionString': []}

    for batch in tqdm(batchify(test_images, args.batch_size)):
        # create a numpy array of the current batch
        image_nps = []
        for image in batch:
            dcm = pydicom.read_file(image)
            image_nps.append(load_dcm_into_numpy_array(dcm))

        # run the inference on the batch
        output = run_inference_for_image_batch(np.stack(image_nps),
                                               frozen_graph)

        # draw the detection boxes
        # TODO: draw the ground truth boxes if available
        for i in range(args.batch_size):
            instance_masks = output.get('detection_masks')
            if instance_masks:
                instance_masks = instance_masks[i]
            vis_util.visualize_boxes_and_labels_on_image_array(
                image_nps[i],
                output['detection_boxes'][i],
Ejemplo n.º 44
0
def parse_single_dcm(fn):
    d = pydicom.read_file(fn)
    output = extract_metadata(d)
    df = pd.DataFrame.from_dict(output, orient='index')
    df.columns = ['value']
    return df
Ejemplo n.º 45
0
    def _run_interface(self, runtime):

        list_dicom = sorted(glob.glob(self.inputs.dicom_folder + '/*'))
        multivol = self.inputs.multivol
        _, out_name, _ = split_filename(self.inputs.dicom_folder)
        ped = ''
        phase_offset = ''
        self.dict_output = {}
        dwi_directions = None

        try:
            phase_offset, ped = self.get_phase_encoding_direction(
                list_dicom[0])
        except KeyError:
            pass  # image does not have ped info in the header

        with open(list_dicom[0], 'rb') as f:
            for line in f:
                try:
                    line = line[:-1].decode('utf-8')
                except UnicodeDecodeError:
                    continue
                if 'TotalScan' in line:
                    total_duration = line.split('=')[-1].strip()
                    if not multivol:
                        real_duration = total_duration
                elif 'alTR[0]' in line:
                    tr = float(line.split('=')[-1].strip()) / 1000000
                elif ('SliceArray.asSlice[0].dInPlaneRot' in line
                      and (not phase_offset or not ped)):
                    if len(line.split('=')) > 1:
                        phase_offset = float(line.split('=')[-1].strip())
                        if (np.abs(phase_offset) > 1
                                and np.abs(phase_offset) < 3):
                            ped = 'ROW'
                        elif (np.abs(phase_offset) < 1
                              or np.abs(phase_offset) > 3):
                            ped = 'COL'
                            if np.abs(phase_offset) > 3:
                                phase_offset = -1
                            else:
                                phase_offset = 1
                elif 'lDiffDirections' in line:
                    dwi_directions = float(line.split('=')[-1].strip())
        if multivol:
            if dwi_directions:
                n_vols = dwi_directions
            else:
                n_vols = len(list_dicom)
            real_duration = n_vols * tr

        hd = pydicom.read_file(list_dicom[0])
        try:
            start_time = str(hd.AcquisitionTime)
        except AttributeError:
            try:
                start_time = str(hd.AcquisitionDateTime)[8:]
            except AttributeError:
                raise Exception('No acquisition time found for this scan.')
        self.dict_output['start_time'] = str(start_time)
        self.dict_output['tr'] = tr
        self.dict_output['total_duration'] = str(total_duration)
        self.dict_output['real_duration'] = str(real_duration)
        self.dict_output['ped'] = ped
        self.dict_output['pe_angle'] = str(phase_offset)
        keys = [
            'start_time', 'tr', 'total_duration', 'real_duration', 'ped',
            'pe_angle'
        ]
        with open('scan_header_info.txt', 'w') as f:
            f.write(str(out_name) + '\n')
            for k in keys:
                f.write(k + ' ' + str(self.dict_output[k]) + '\n')
            f.close()
        if self.inputs.reference:
            os.mkdir('reference_motion_mats')
            np.savetxt('reference_motion_mats/reference_motion_mat.mat',
                       np.eye(4))
            np.savetxt('reference_motion_mats/reference_motion_mat_inv.mat',
                       np.eye(4))

        return runtime
Ejemplo n.º 46
0
    def exportData(self):
        if self.inputDirectory:
            self.outputDirectory = QFileDialog.getSaveFileName(
                None, "Save file", "", "Excel (*.xlsx)")
            files = os.listdir(self.inputDirectory)
            dfAll = pd.DataFrame(columns=[
                'Acquisition Date', 'kVp', 'Exposure', 'Grid',
                'Anode Target Material', 'Detector ID',
                'Date of Last Detector Calibration', 'Filter Material LT'
            ])
            for file in files:
                if file.endswith(".dcm"):
                    ds = dicom.read_file(self.inputDirectory + '/' + file,
                                         force=True)
                    ImageType = ' | '.join(filter(None, ds.ImageType))
                    SoftwareVersions = ''.join(
                        filter(None, ds.SoftwareVersions))
                    attribut = [
                        'AccessionNumber', 'AcquisitionContextSequence',
                        'AcquisitionDate', 'AcquisitionDeviceProcessingCode',
                        'AcquisitionDeviceProcessingDescription',
                        'AcquisitionNumber', 'AcquisitionTime',
                        'AnatomicRegionSequence', 'AnodeTargetMaterial',
                        'BitsAllocated', 'BitsStored', 'BodyPartExamined',
                        'BodyPartThickness', 'BreastImplantPresent',
                        'BurnedInAnnotation', 'CalibrationImage', 'Columns',
                        'CompressionForce', 'ContentDate', 'ContentTime',
                        'ContrastBolusAgent', 'DateOfLastDetectorCalibration',
                        'DerivationDescription',
                        'DetectorActivationOffsetFromExposure',
                        'DetectorActiveDimensions', 'DetectorActiveOrigin',
                        'DetectorActiveShape', 'DetectorActiveTime',
                        'DetectorBinning', 'DetectorConditionsNominalFlag',
                        'DetectorConfiguration', 'DetectorDescription',
                        'DetectorElementPhysicalSize',
                        'DetectorElementSpacing', 'DetectorID', 'DetectorMode',
                        'DetectorPrimaryAngle', 'DetectorSecondaryAngle',
                        'DetectorTemperature', 'DetectorTimeSinceLastExposure',
                        'DetectorType', 'DeviceSerialNumber',
                        'DistanceSourceToDetector', 'DistanceSourceToEntrance',
                        'DistanceSourceToPatient', 'EntranceDose',
                        'EntranceDoseInmGy',
                        'EstimatedRadiographicMagnificationFactor', 'Exposure',
                        'ExposureControlMode',
                        'ExposureControlModeDescription', 'ExposureInuAs',
                        'ExposureStatus', 'ExposureTime', 'ExposureTimeInuS',
                        'ExposuresOnDetectorSinceLastCalibration',
                        'ExposuresOnDetectorSinceManufactured',
                        'FieldOfViewDimensions', 'FieldOfViewHorizontalFlip',
                        'FieldOfViewOrigin', 'FieldOfViewRotation',
                        'FieldOfViewShape', 'FilterMaterial',
                        'FilterThicknessMaximum', 'FilterThicknessMinimum',
                        'FocalSpots', 'Grid', 'HighBit', 'ImageComments',
                        'ImageLaterality', 'ImageType', 'ImagerPixelSpacing',
                        'InstanceNumber', 'InstitutionAddress',
                        'InstitutionName', 'InstitutionalDepartmentName',
                        'KVP', 'LossyImageCompression', 'Manufacturer',
                        'ManufacturerModelName', 'Modality', 'OrganDose',
                        'OrganExposed', 'PartialView', 'PatientAge',
                        'PatientBirthDate', 'PatientID', 'PatientName',
                        'PatientOrientation', 'PatientSex',
                        'PerformedProcedureStepDescription',
                        'PerformedProcedureStepID',
                        'PerformedProcedureStepStartDate',
                        'PerformedProcedureStepStartTime',
                        'PhotometricInterpretation', 'PixelData',
                        'PixelIntensityRelationship',
                        'PixelIntensityRelationshipSign',
                        'PixelRepresentation', 'PositionerPrimaryAngle',
                        'PositionerSecondaryAngle', 'PositionerType',
                        'PresentationIntentType', 'PresentationLUTShape',
                        'ProtocolName', 'QualityControlImage',
                        'RectificationType',
                        'ReferencedPerformedProcedureStepSequence',
                        'ReferringPhysicianName', 'RelativeXRayExposure',
                        'RequestAttributesSequence',
                        'RequestedProcedureDescription', 'RequestingPhysician',
                        'RescaleIntercept', 'RescaleSlope', 'RescaleType',
                        'Rows', 'SOPClassUID', 'SOPInstanceUID',
                        'SamplesPerPixel', 'SeriesDate', 'SeriesDescription',
                        'SeriesInstanceUID', 'SeriesNumber', 'SeriesTime',
                        'ShutterLeftVerticalEdge',
                        'ShutterLowerHorizontalEdge',
                        'ShutterRightVerticalEdge', 'ShutterShape',
                        'ShutterUpperHorizontalEdge', 'SoftwareVersions',
                        'SpecificCharacterSet', 'StationName', 'StudyDate',
                        'StudyDescription', 'StudyID', 'StudyInstanceUID',
                        'StudyTime', 'TableAngle', 'TableType',
                        'TimeOfLastDetectorCalibration', 'TomoLayerHeight',
                        'ViewCodeSequence', 'ViewPosition',
                        'WindowCenterWidthExplanation', 'XRayTubeCurrent',
                        'XRayTubeCurrentInuA'
                    ]
                    for at in attribut:
                        if hasattr(ds, at) == False:
                            setattr(ds, at, None)
                    dataDicom = {
                        'Implementation Version Name':
                        [ds.file_meta.ImplementationVersionName],
                        'Image Type': [ImageType],
                        'Acquisition Date': [ds.AcquisitionDate],
                        'Acquisition Time': [ds.AcquisitionTime],
                        'Modality': [ds.Modality],
                        'Presentation Intent Type':
                        [ds.PresentationIntentType],
                        'Manufacturer': [ds.Manufacturer],
                        'Institution Name': [ds.InstitutionName],
                        'Manufacturer Model Name': [ds.ManufacturerModelName],
                        'Patient Name': [ds.PatientName],
                        'kVp': [ds.KVP],
                        'Device Serial Number': [ds.DeviceSerialNumber],
                        'Software Versions(s)': [SoftwareVersions],
                        'Distance Source to Detector':
                        [ds.DistanceSourceToDetector],
                        'Distance Source to Patient':
                        [ds.DistanceSourceToPatient],
                        'Field of View Shape': [ds.FieldOfViewShape],
                        'Field of View Dimensions(s)':
                        [str(ds.FieldOfViewDimensions)],
                        'Exposure Time': [ds.ExposureTime],
                        'X-ray Tube Current': [ds.XRayTubeCurrent],
                        'Exposure': [ds.Exposure],
                        'Exposure in uAs': [ds.ExposureInuAs],
                        'Rectification Type': [ds.RectificationType],
                        'Imager Pixel Spacing': [str(ds.ImagerPixelSpacing)],
                        'Grid': [ds.Grid],
                        'Focal Spot(s)': [ds.FocalSpots],
                        'Anode Target Material': [ds.AnodeTargetMaterial],
                        'Body Part Thickness': [ds.BodyPartThickness],
                        'Compression Force': [ds.CompressionForce],
                        'Detector Type': [ds.DetectorType],
                        'Detector Configuration': [ds.DetectorConfiguration],
                        'Detector Description': [ds.DetectorDescription],
                        'Detector Mode': [ds.DetectorMode],
                        'Detector ID': [ds.DetectorID],
                        'Date of Last Detector Calibration':
                        [ds.DateOfLastDetectorCalibration],
                        'Time of Last Detector Calibration':
                        [ds.TimeOfLastDetectorCalibration],
                        'Exposures on Detector Since Last Calibration':
                        [ds.ExposuresOnDetectorSinceLastCalibration],
                        'Exposures on Detector Since Manufactured':
                        [ds.ExposuresOnDetectorSinceManufactured],
                        'Detector Time Since Last Exposure':
                        [ds.DetectorTimeSinceLastExposure],
                        'Detector Active Time': [ds.DetectorActiveTime],
                        'Detector Activation Offset From Exposure':
                        [ds.DetectorActivationOffsetFromExposure],
                        'Detector Binning': [str(ds.DetectorBinning)],
                        'Detector Element Physical Size':
                        [str(ds.DetectorElementPhysicalSize)],
                        'Detector Element Spacing': [
                            str(ds.DetectorElementSpacing)
                        ],
                        'Detector Active Shape': [ds.DetectorActiveShape],
                        'Detector Active Dimension(s)': [
                            str(ds.DetectorActiveDimensions)
                        ],
                        'Filter Material LT': [ds.FilterMaterial],
                        'Filter Thickness Minimum': [
                            ds.FilterThicknessMinimum
                        ],
                        'Filter Thickness Maximum': [
                            ds.FilterThicknessMaximum
                        ],
                        'Exposure Control Mode': [ds.ExposureControlMode],
                        'Exposure Control Mode Description': [
                            ds.ExposureControlModeDescription
                        ],
                        'Photometric Interpretation': [
                            ds.PhotometricInterpretation
                        ],
                        'Rows': [ds.Rows],
                        'Columns': [ds.Columns],
                        'Bits Allocated': [ds.BitsAllocated],
                        'Bits Stored': [ds.BitsStored],
                        'High Bit': [ds.HighBit],
                        'Pixel Intensity Relationship': [
                            ds.PixelIntensityRelationship
                        ],
                        'Distance Source to Entrance': [
                            ds.DistanceSourceToEntrance
                        ],
                        'Organ Dose': [ds.OrganDose],
                        'Entrance Dose in mGy': [ds.EntranceDoseInmGy]
                    }
                    df = pd.DataFrame(dataDicom)
                    dfAll = dfAll.append(df, ignore_index=True)

            dfAll.to_excel(self.outputDirectory[0], index=False, header=True)
            self.show_popup('Process complete')
        else:
            self.show_popup('No folder has not been selected')
            pass
Ejemplo n.º 47
0
def get_file_paths(start_path):
    print('Collecting DICOM file paths')
    f = []
    for root, dirs, files in os.walk(start_path, topdown=False):
        for name in files:
            f.append(os.path.join(root, name))

    # Collect all dicom files by UID, separate non-dicom files into misc
    file_paths = {}
    for file_path in f:
        try:
            dicom_file = dicom.read_file(file_path)
        except:
            dicom_file = False

        if dicom_file:
            uid = dicom_file.StudyInstanceUID
            file_type = dicom_file.Modality.lower(
            )  # (rtplan, rtstruct, rtdose)
            timestamp = os.path.getmtime(file_path)

            if uid not in list(file_paths):
                file_paths[uid] = {
                    'rtplan': {
                        'file_path': [],
                        'timestamp': [],
                        'latest_file': []
                    },
                    'rtstruct': {
                        'file_path': [],
                        'timestamp': [],
                        'latest_file': []
                    },
                    'rtdose': {
                        'file_path': [],
                        'timestamp': [],
                        'latest_file': []
                    },
                    'other': {
                        'file_path': [],
                        'timestamp': []
                    }
                }

            if file_type not in FILE_TYPES:
                file_type = 'other'

            file_paths[uid][file_type]['file_path'].append(file_path)
            file_paths[uid][file_type]['timestamp'].append(timestamp)

    for uid in list(file_paths):
        for file_type in FILE_TYPES:
            latest_index, latest_time = [], []
            for i, ts in enumerate(file_paths[uid][file_type]['timestamp']):
                if not latest_time or ts > latest_time:
                    latest_index, latest_time = i, ts
            if isinstance(latest_index, int):
                file_paths[uid][file_type]['latest_file'] = file_paths[uid][
                    file_type]['file_path'][latest_index]
            else:
                file_paths[uid][file_type]['latest_file'] = []
    print('DICOM file paths collected')
    return file_paths
Ejemplo n.º 48
0
                if 'SeriesDescription' not in series:
                    series.SeriesDescription = "N/A"
                print(" " * 8 + "Series {0.SeriesNumber}:  {0.Modality}: {0.SeriesDescription}"
                      " ({1} image{2})".format(series, image_count, plural))

                # Open and read something from each image, for demonstration purposes
                # For simple quick overview of DICOMDIR, leave the following out
                print(" " * 12 + "Reading images...")
                image_records = series.children
                image_filenames = [os.path.join(base_dir, *image_rec.ReferencedFileID)
                                   for image_rec in image_records]

                # slice_locations = [pydicom.read_file(image_filename).SliceLocation
                #                   for image_filename in image_filenames]

                datasets = [pydicom.read_file(image_filename)
                            for image_filename in image_filenames]

                patient_names = set(ds.PatientName for ds in datasets)
                patient_IDs = set(ds.PatientID for ds in datasets)

                # List the image filenames
                print("\n" + " " * 12 + "Image filenames:")
                print(" " * 12, end=' ')
                pprint(image_filenames, indent=12)

                # Expect all images to have same patient name, id
                # Show the set of all names, IDs found (should each have one)
                # In python3.4+, must make conversion to str explicit
                print(" " * 12 + "Patient Names in images..: "
                      "{0:s}".format(str(patient_names)))
Ejemplo n.º 49
0
def main(dicom_file_path: Path, TR: str, TE: str, pat_id: str):
    """
    Curve fitting a series of SE images with respect to variable TE values to generate a T2 map.

    Parameters
    ----------
    dicom_file_path : path
        Path of folder where dicom files reside
    TR : str
        TR value used in SE experiments (unit in milliseconds, should be constant)
    TE : str
        TE value used in SE experiments (unit in milliseconds)
    pat_id : str
        primary key in REGISTRATION table

    Returns
    -------
    png_map_name : str
        File name of T2 map in png format
    dicom_map_path : str
        Path of T2 map in dicom format
    """
    TR = np.fromstring(TR, dtype=int, sep=',')
    TE_acq1 = np.fromstring(TE, dtype=float, sep=',')
    TE_acq2 = np.array([12, 15, 18, 21])
    TR = TR / 1000
    TE_acq1 = TE_acq1 / 1000
    TE_acq2 = TE_acq2 / 1000

    lstFilesDCM = sorted(list(dicom_file_path.glob('*.dcm')))  # create an empty list
    ref_image = pydicom.read_file(str(lstFilesDCM[0]))  # Get ref file
    image_size = (int(ref_image.Rows), int(ref_image.Columns), len(lstFilesDCM))  # Load dimensions
    image_data_final = np.zeros(image_size, dtype=ref_image.pixel_array.dtype)

    for filenameDCM in lstFilesDCM:
        ds = pydicom.read_file(str(filenameDCM))  # read the file, data type is uint16 (0~65535)
        image_data_final[:, :, lstFilesDCM.index(filenameDCM)] = ds.pixel_array
    image_data_final = image_data_final.astype(np.float64)  # convert data type
    image_data_final_acq1 = image_data_final[:, :, :7]
    image_data_final_acq2 = image_data_final[:, :, 7:]  # to separate two acqs

    # image_data_final_acq1 = np.divide(image_data_final_acq1, np.amax(image_data_final_acq1))
    # image_data_final_acq2 = np.divide(image_data_final_acq2, np.amax(image_data_final_acq2))
    image_data_final_acq1 = image_data_final_acq1/1000
    image_data_final_acq2 = image_data_final_acq2/1000

    T2_map = np.zeros([image_size[0], image_size[1]])
    # p0 = (0.655477890177557, 0.171186687811562)  # initial guess for parameters
    p0 = (0.65, 0.65)
    # n2 = 35
    # n3 = 64
    # y_data = image_data_final_acq1[n2, n3, :]
    # popt, pcov = curve_fit(T2_sig_eq, TE_acq1, y_data, p0, bounds=([0, 0], [10, 6]))
    #
    # plt.figure()
    # plt.plot(TE_acq1, y_data, label='Data', marker='o')
    # plt.plot(TE_acq1, T2_sig_eq(TE_acq1, popt[0], popt[1]), 'g--')
    # plt.show()
    x_range = np.arange(43, 83, 1)
    y_range = np.arange(70, 84, 1)

    for n2 in range(image_size[0]):
        for n3 in range(image_size[1]):
            if n2 in y_range and n3 in x_range:
                y_data = image_data_final_acq2[n2, n3, :]
                popt, pcov = curve_fit(T2_sig_eq, TE_acq2, y_data, p0, bounds=([0, 0], [10, 6]))
            else:
                y_data = image_data_final_acq1[n2, n3, :]
                popt, pcov = curve_fit(T2_sig_eq, TE_acq1, y_data, p0, bounds=([0, 0], [10, 6]))
            T2_map[n2, n3] = popt[1]

    T2_map[T2_map > 2] = 2
    # plt.figure()
    # imshowobj = plt.imshow(T2_map, cmap='hot')
    # imshowobj.set_clim(0, 2)
    # plt.show()

    timestr = time.strftime("%Y%m%d%H%M%S")
    png_map_path = COMS_ANALYZE_PATH / 'static' / 'ana' / 'outputs' / pat_id
    dicom_map_path = SERVER_ANALYZE_PATH / 'outputs' / pat_id / 'T2_map'
    np_map_path = SERVER_ANALYZE_PATH / 'outputs' / pat_id / 'T2_map'

    if not os.path.isdir(png_map_path):
        os.makedirs(png_map_path)
    if not os.path.isdir(dicom_map_path):
        os.makedirs(dicom_map_path)
    if not os.path.isdir(np_map_path):
        os.makedirs(np_map_path)

    np_map_name = 'T2_map' + timestr + '.npy'
    np.save(str(np_map_path) + '/T2_map' + timestr + '.npy', T2_map)

    plt.figure(frameon=False)
    plt.imshow(T2_map, cmap='hot')
    cb = plt.colorbar()
    cb.set_label('Time (s)')
    plt.axis('off')
    plt.gca().set_axis_off()
    plt.subplots_adjust(top=1, bottom=0, right=1, left=0,
                        hspace=0, wspace=0)
    plt.margins(0, 0)
    plt.gca().xaxis.set_major_locator(plt.NullLocator())
    plt.gca().yaxis.set_major_locator(plt.NullLocator())
    plt.savefig(str(png_map_path) + '/T2_map' + timestr + '.png', bbox_inches='tight', pad_inches=0)

    png_map_name = "T2_map" + timestr + ".png"

    pixel_array = (T2_map / 2) * 65535
    pixel_array_int = pixel_array.astype(np.uint16)
    ds.PixelData = pixel_array_int.tostring()
    ds.save_as(str(dicom_map_path) + '/T2_map' + timestr + '.dcm')

    return png_map_name, dicom_map_path, np_map_name
Ejemplo n.º 50
0
def get_array(path):
    ds=pydicom.read_file(path)
    dt=ds.pixel_array
    return dt
Ejemplo n.º 51
0
import skimage
import numpy as np
import os

from skimage import measure, filters

from utils import display


def contours(ds):
    level = ds.pixel_array.mean()
    # print(level)
    contours = measure.find_contours(ds.pixel_array, level)
    # print(contours)
    for contour in contours:
        for x, y in contour:
            ds.pixel_array[x][y] = ds.pixel_array.max()

PathDicom = "/Users/Mariana/Desktop/ILDdatabase/ILD_DB_txtROIs/3"

lstFilesDCM = []
for dirName, subdirList, fileList in os.walk(PathDicom):
    for filename in fileList:
        if ".dcm" in filename.lower():
            lstFilesDCM.append(os.path.join(dirName, filename))

for fileDCM in lstFilesDCM:
    ds = pydicom.read_file(fileDCM)
    contours(ds)
    display(ds)
def testThreshold():
    path2 = 'D:\\MyLab\\GraduateProject\\LIDC-IDRI\\LIDC-IDRI-0256\\01-01-2000-CT  CAP  WO CONT-35073\\4-Recon 3 C-A-P-08658\\dicoms_detail.pkl'
    path1 = 'D:\\MyLab\\GraduateProject\\LIDC-IDRI\\LIDC-IDRI-0256\\01-01-2000-CT  CAP  WO CONT-35073\\4-Recon 3 C-A-P-08658\\annotation_flatten.pkl'
    step1Path = 'D:\\MyLab\\GraduateProject\\Imgs_\\step1-.jpg'
    step2Path2 = 'D:\\MyLab\\GraduateProject\\Imgs_\\step2-5-.jpg'
    patha = 'D:\\MyLab\\GraduateProject\\Imgs_\\step3-nosmooth.jpg'
    pathb = 'D:\\MyLab\\GraduateProject\\Imgs_\\step3-triangle.jpg'
    pathc = 'D:\\MyLab\\GraduateProject\\Imgs_\\step3-ostu.jpg'
    pathd = 'D:\\MyLab\\GraduateProject\\Imgs_\\step3-thr128.jpg'

    f = readfile(path1)
    f2 = readfile(path2)
    # print(f)
    for i in f:
        if i.endswith(
                '1.3.6.1.4.1.14519.5.2.1.6279.6001.334276986366937900163861106093'
        ):
            print(f[i])

    for i2 in f2:
        if f2[i2]['InstanceNumber'] == 84:
            print(f2[i2])
            pathdic = f2[i2]['Path']

    # _dcm = pydicom.read_file(pathdic)
    # dicomPixel = _dcm.pixel_array

    # DICOM source (PIXEL)
    _dcm = pydicom.read_file(pathdic)
    dicomPixel = _dcm.pixel_array
    # plt.imshow(dicomPixel,'gray')
    # plt.show()

    # Step1 Get Histogram of JPEG pixel (0-256)
    cv2.imwrite(step1Path, dicomPixel)
    pixel1DImg = cv2.imread(step1Path, 0)

    # Step2 Filtering (SMOOTHING)
    pixelForFilteration = pixel1DImg
    plt.figure("Before Smoothing")
    arr = pixelForFilteration.flatten()
    n, bins, patches = plt.hist(arr,
                                bins=256,
                                normed=1,
                                edgecolor='None',
                                facecolor='red')
    plt.show()
    ret, thresh = cv2.threshold(pixelForFilteration, 0, 255,
                                cv2.THRESH_TRIANGLE)
    print('Before smoothing : {ret}'.format(ret=ret))
    ioski.imsave(patha, thresh)
    img_median = cv2.medianBlur(pixelForFilteration, 5)
    cv2.imwrite(step2Path2, img_median)

    # Step3 Optimal binarization
    pixelForBinarization = cv2.imread(step2Path2, 0)
    plt.figure("After Smoothing")
    arr = pixelForBinarization.flatten()
    n, bins, patches = plt.hist(arr,
                                bins=256,
                                normed=1,
                                edgecolor='None',
                                facecolor='red')
    plt.show()

    ret, thresh = cv2.threshold(pixelForBinarization, 0, 255,
                                cv2.THRESH_TRIANGLE)
    print('TRIANGLE : {ret}'.format(ret=ret))
    ioski.imsave(pathb, thresh)
    pixelForBinarization = cv2.imread(step2Path2, 0)
    ret, thresh = cv2.threshold(pixelForBinarization, 0, 255, cv2.THRESH_OTSU)
    print('THRESH_OTSU : {ret}'.format(ret=ret))
    ioski.imsave(pathc, thresh)
    pixelForBinarization = cv2.imread(step2Path2, 0)
    ret, thresh = cv2.threshold(pixelForBinarization, 128, 255,
                                cv2.THRESH_BINARY)
    print('THR128 : {ret}'.format(ret=ret))
    ioski.imsave(pathd, thresh)
def convert_images(filename, outdir):
    ds = pydicom.read_file(str(filename))
    img = ds.pixel_array
    if args.resize != '':
        img = cv2.resize(img, (128, 128))
    cv2.imwrite(outdir + filename.split('/')[-1][:-4] + '.png', img)
def dicom_header_extract(zip_file_path):
    """
    dicom_header_extract [summary]

    Args:
        zip_file_path ([type]): [description]

    Returns:
        [type]: [description]
    """
    # Extract the last file in the zip_file to /tmp/ and read it
    if zipfile.is_zipfile(zip_file_path):
        dcm_list = []
        zip_file = zipfile.ZipFile(zip_file_path)
        num_files = len(zip_file.namelist())
        for n in range((num_files - 1), -1, -1):
            dcm_path = zip_file.extract(zip_file.namelist()[n], "/tmp")
            dcm_tmp = None
            if os.path.isfile(dcm_path):
                try:
                    log.info("reading %s", dcm_path)
                    dcm_tmp = pydicom.read_file(dcm_path)
                    # Here we check for the Raw Data Storage SOP Class, if there
                    # are other pydicom files in the zip_file then we read the next one,
                    # if this is the only class of pydicom in the file, we accept
                    # our fate and move on.
                    if (dcm_tmp.get("SOPClassUID") == "Raw Data Storage"
                            and n != range((num_files - 1), -1, -1)[-1]):
                        continue
                    else:
                        dcm_list.append(dcm_tmp)
                except:
                    pass
            else:
                log.warning("%s does not exist!", dcm_path)
        dcm = dcm_list[-1]
    else:
        log.info("Not a zip_file. Attempting to read %s directly",
                 os.path.basename(zip_file_path))
        dcm = pydicom.read_file(zip_file_path)
        dcm_list = [dcm]
    if not dcm:
        log.warning("dcm is empty!!!")
        os.sys.exit(1)

    # Handle date on dcm
    dcm = dicom_date_handler(dcm)

    # Create pandas object for comparing headers
    df_list = []
    for header in dcm_list:
        tmp_dict = get_pydicom_header(header)
        for key in tmp_dict:
            if type(tmp_dict[key]) == list:
                tmp_dict[key] = str(tmp_dict[key])
            else:
                tmp_dict[key] = [tmp_dict[key]]
        df_tmp = pd.DataFrame.from_dict(tmp_dict)
        df_list.append(df_tmp)
    df_headers = pd.concat(df_list, ignore_index=True, sort=True)

    # File classification
    pydicom_file = {}
    pydicom_file["name"] = os.path.basename(zip_file_path)
    pydicom_file["modality"] = format_string(dcm.get("Modality"))
    pydicom_file["info"] = {"header": {"dicom": {}}}

    # File metadata from pydicom header
    pydicom_file["info"]["header"]["dicom"] = get_pydicom_header(dcm)

    # # Add CSAHeader to DICOM
    # if dcm.get('Manufacturer') == 'SIEMENS':
    #     csa_header = get_csa_header(dcm)
    #     if csa_header:
    #         pydicom_file['info']['header']['dicom']['CSAHeader'] = csa_header

    return pydicom_file["info"]["header"]["dicom"]
Ejemplo n.º 55
0
def extract_CT(CT_path):
    dicom_meta_dictionary = {}
    for current_file in os.listdir(CT_path):
        current_file = pydicom.read_file(os.path.join(CT_path, current_file))

        current_image_position = current_file.ImagePositionPatient
        current_z_location = current_image_position[2]

        intercept = np.float(current_file.RescaleIntercept)
        slop = np.float32(current_file.RescaleSlope)

        current_slice_thickness = np.float32(current_file.SliceThickness)
        current_slice_spacing = [np.float32(x) for x in current_file.PixelSpacing]

        current_pixel_data = current_file.PixelData
        current_byte_depth = len(current_pixel_data) // (current_file.Rows * current_file.Columns)
        current_pixel_data = np.copy(np.reshape(np.frombuffer(current_pixel_data,
                                                              dtype=byte_depth_table[current_byte_depth],
                                                              count=current_file.Rows * current_file.Columns),
                                                (current_file.Rows, current_file.Columns))).astype(np.float32)
        current_pixel_data = current_pixel_data * slop + intercept

        dicom_meta_dictionary[current_z_location] = {}
        dicom_meta_dictionary[current_z_location]['ImagePositionPatient'] = current_image_position
        dicom_meta_dictionary[current_z_location]['SliceThickness'] = current_slice_thickness
        dicom_meta_dictionary[current_z_location]['PixelSpacing'] = current_slice_spacing
        dicom_meta_dictionary[current_z_location]['PixelData'] = current_pixel_data
        dicom_meta_dictionary[current_z_location]['ByteDepth'] = np.float32

    # generate CT numpy array
    slice_thickness = [v['SliceThickness'] for k, v in dicom_meta_dictionary.items()]
    pixel_spacing_x = [v['PixelSpacing'][0] for k, v in dicom_meta_dictionary.items()]
    pixel_spacing_y = [v['PixelSpacing'][1] for k, v in dicom_meta_dictionary.items()]

    slice_thickness = list(set(slice_thickness))
    pixel_spacing_x = list(set(pixel_spacing_x))
    pixel_spacing_y = list(set(pixel_spacing_y))

    if len(slice_thickness) != 1:
        logger.info(f'Warning! The number of the slice_thickness does not equal to 1!'
                    f' Data path: {CT_path}')

    if len(pixel_spacing_x) != 1 or len(pixel_spacing_y) != 1:
        logger.info(f'Warning! The number of the pixel_spacing does not equal to 1!'
                    f' Data path: {CT_path}')

    slice_thickness = slice_thickness[0]
    pixel_spacing = (pixel_spacing_x[0], pixel_spacing_y[0])

    # get z-position
    z_positions = [k for k, v in dicom_meta_dictionary.items()]
    sorted_z_positions = sorted(z_positions)
    slice_thickness_list = [sorted_z_positions[i] - sorted_z_positions[i - 1] for i in
                            range(1, len(sorted_z_positions))]
    if 'nan' in str(slice_thickness):
        slice_thickness = np.mean(slice_thickness_list)
        logger.info('Using averaged slice thickness {}'.format(slice_thickness))
    pixel_data = [dicom_meta_dictionary[current_z_location]['PixelData'] for current_z_location in sorted_z_positions]
    pixel_data = np.stack(pixel_data, axis=0)

    pixel_data = (pixel_data + 1000)
    pixel_data[pixel_data < 0] = 0

    current_study = {}
    current_study['CT'] = pixel_data.astype(np.uint16)
    current_study['z_positions'] = sorted_z_positions
    current_study['pixel_spacing'] = pixel_spacing
    current_study['slice_thickness'] = slice_thickness
    current_study['offset'] = {'start': dicom_meta_dictionary[sorted_z_positions[0]]['ImagePositionPatient'],
                               'end': dicom_meta_dictionary[sorted_z_positions[-1]]['ImagePositionPatient']}

    return current_study
Ejemplo n.º 56
0
def read_single_pixmap(path):
    dicom_file = pydicom.read_file(path)
    return dicom_file.pixel_array
import pydicom
import os
import numpy
from matplotlib import pyplot, cm
import cv2

PathDicom = "C://Users//arvin//Documents//CNN Project//cleaned"
lstFilesDCM = []  # create an empty list
for dirName, subdirList, fileList in os.walk(PathDicom):
    for filename in fileList:
        if ".dcm" in filename.lower():  # check whether the file's DICOM
            lstFilesDCM.append(os.path.join(dirName,filename))

lstFilesDCM = lstFilesDCM[0:100]

RefDs = pydicom.read_file(lstFilesDCM[0])
ConstPixelDims = (len(lstFilesDCM),368, 368)

# The array is sized based on 'ConstPixelDims'
arraydicom = numpy.zeros(ConstPixelDims, dtype=RefDs.pixel_array.dtype)

def img_resize(img,size):
    desired_size = size
    im = img
    old_size = im.shape[:2]
    ratio = float(desired_size)/max(old_size)
    new_size = tuple([int(x*ratio) for x in old_size])
    im = cv2.resize(im, (new_size[1], new_size[0]))
    delta_w = desired_size - new_size[1]
    delta_h = desired_size - new_size[0]
    top, bottom = delta_h//2, delta_h-(delta_h//2)
Ejemplo n.º 58
0
def get_random_data(annotation_line,
                    input_shape,
                    random=True,
                    max_boxes=20,
                    jitter=.3,
                    hue=.1,
                    sat=1.5,
                    val=1.5,
                    proc_img=True):
    '''random preprocessing for real-time data augmentation'''
    line = annotation_line.split()
    d = pydicom.read_file(line[0])
    image = d.pixel_array.astype('uint8')
    image = Image.fromarray(np.stack((image, ) * 3, -1), 'RGB')
    iw, ih = image.size
    h, w = input_shape
    box = np.array(
        [np.array(list(map(int, box.split(',')))) for box in line[1:]])

    if not random:
        # resize image
        scale = min(w / iw, h / ih)
        nw = int(iw * scale)
        nh = int(ih * scale)
        dx = (w - nw) // 2
        dy = (h - nh) // 2
        image_data = 0
        if proc_img:
            image = image.resize((nw, nh), Image.BICUBIC)
            new_image = Image.new('RGB', (w, h), (128, 128, 128))
            new_image.paste(image, (dx, dy))
            image_data = np.array(new_image) / 255.

        # correct boxes
        box_data = np.zeros((max_boxes, 5))
        if len(box) > 0:
            np.random.shuffle(box)
            if len(box) > max_boxes: box = box[:max_boxes]
            box[:, [0, 2]] = box[:, [0, 2]] * scale + dx
            box[:, [1, 3]] = box[:, [1, 3]] * scale + dy
            box_data[:len(box)] = box

        return image_data, box_data

    print('coockoo')

    # resize image
    new_ar = w / h * rand(1 - jitter, 1 + jitter) / rand(
        1 - jitter, 1 + jitter)
    print(new_ar)
    scale = rand(.8, 1.2)
    if new_ar < 1:
        nh = int(scale * h)
        nw = int(nh * new_ar)
    else:
        nw = int(scale * w)
        nh = int(nw / new_ar)

    print(new_ar, scale, nh, nw)
    image = image.resize((nw, nh), Image.BICUBIC)

    # place image
    dx = int(rand(0, w - nw))
    dy = int(rand(0, h - nh))
    new_image = Image.new('RGB', (w, h), (128, 128, 128))
    new_image.paste(image, (dx, dy))
    image = new_image

    # flip image or not
    flip = rand() < .5
    if flip: image = image.transpose(Image.FLIP_LEFT_RIGHT)

    image_data = np.array(image) / 255.
    # distort image
    #     hue = rand(-hue, hue)
    #     sat = rand(1, sat) if rand()<.5 else 1/rand(1, sat)
    #     val = rand(1, val) if rand()<.5 else 1/rand(1, val)
    #     x = rgb_to_hsv(np.array(image)/255.)
    #     x[..., 0] += hue
    #     x[..., 0][x[..., 0]>1] -= 1
    #     x[..., 0][x[..., 0]<0] += 1
    #     x[..., 1] *= sat
    #     x[..., 2] *= val
    #     x[x>1] = 1
    #     x[x<0] = 0
    #     image_data = hsv_to_rgb(x) # numpy array, 0 to 1

    # correct boxes
    box_data = np.zeros((max_boxes, 5))
    if len(box) > 0:
        np.random.shuffle(box)
        box[:, [0, 2]] = box[:, [0, 2]] * nw / iw + dx
        box[:, [1, 3]] = box[:, [1, 3]] * nh / ih + dy
        if flip: box[:, [0, 2]] = w - box[:, [2, 0]]
        box[:, 0:2][box[:, 0:2] < 0] = 0
        box[:, 2][box[:, 2] > w] = w
        box[:, 3][box[:, 3] > h] = h
        box_w = box[:, 2] - box[:, 0]
        box_h = box[:, 3] - box[:, 1]
        box = box[np.logical_and(box_w > 1, box_h > 1)]  # discard invalid box
        if len(box) > max_boxes: box = box[:max_boxes]
        box_data[:len(box)] = box

    return image_data, box_data
Ejemplo n.º 59
0
def test_single_patient():
    patient = patients[89] # Just get the first patient for demo 
    print (patient)
    for subdir, dirs, files in os.walk(patient):
        #print subdir
        #print dirs
        #print files
        dcms = glob.glob(os.path.join(subdir, "*.dcm"))
        #print dcms
        if len(dcms) == 1:
            structure = dicom.read_file(os.path.join(subdir, files[0]))
            contours = read_structure(structure)
        elif len(dcms) > 1:
            slices = [dicom.read_file(dcm) for dcm in dcms]
    
    pat_id = slices[0].PatientID        
    print ("Patient ID : ",pat_id)

    slices.sort(key = lambda x: float(x.ImagePositionPatient[2]))
    image = np.stack([s.pixel_array for s in slices], axis=-1)

    print (image.dtype)
    image = image.astype(np.int16)
    image = image* slices[0].RescaleSlope + slices[0].RescaleIntercept

    #print contours
    if(len(contours) !=0 ):
        label, colors = get_mask(contours, slices)

    
    image = np.swapaxes(image,0,2)
    image = np.swapaxes(image,1,2)
    skimage.io.imsave(INPUT_FOLDER+pat_id+'.tiff', image.astype(np.int16), plugin='tifffile', compress = 1)
    if(len(contours) ==0 ):
        sys.exit(0)
    label = (label > 0).astype(np.uint8)*255
    label = np.swapaxes(label,0,2)
    label = np.swapaxes(label,1,2)
    skimage.io.imsave(INPUT_FOLDER+pat_id+'_GT.tiff', label.astype(np.uint8), plugin='tifffile', compress = 1)

    print ("Img shape and # contours :: ",image.shape, len(contours))
    # Plot to check slices, for example 50 to 59
    plt.figure(figsize=(15, 15))
    for i in range(9):
        plt.subplot(3, 3, i + 1)
        plt.imshow(image[ i + 64, ...], cmap="gray",label="%d"%(i+64) )
        plt.contour(label[ i + 64, ...], levels=[0.5, 1.5, 2.5, 3.5, 4.5], colors=colors)
        #plt.imshow(label[..., i + 64], cmap="gray")
    plt.show()

    labels_img2 = np.zeros((image.shape[0], image.shape[1], image.shape[2], 3)) 
    print (image.min(), image.max())
    img = 255*normalize_array(image.astype(np.float32), -500,300)
    print (img.min(), img.max())
    skimage.io.imsave(INPUT_FOLDER+pat_id+'_uc.tiff', img.astype(np.uint8), plugin='tifffile', compress = 1)
    labels_img2[ ... , 0 ] = img; labels_img2[ ... , 1 ] = img; labels_img2[ ... , 2 ] = img
    tmp = np.where( label > 0, 1, 0)
    opacity = 0.5
    print (tmp.shape)
    
    labels_img2[ tmp==1  , 2 ] = opacity * img[tmp==1]
    labels_img2[ tmp==1  , 1 ] = opacity * img[tmp==1]
    labels_img2[ tmp==1  , 0 ] = opacity * img[tmp==1] + (1 - opacity) * 255
    
    skimage.io.imsave(INPUT_FOLDER+pat_id+'_Labels.tiff', labels_img2.astype(np.uint8), plugin='tifffile', compress = 1)
def testMedian():
    path2 = 'D:\\MyLab\\GraduateProject\\LIDC-IDRI\\LIDC-IDRI-0256\\01-01-2000-CT  CAP  WO CONT-35073\\4-Recon 3 C-A-P-08658\\dicoms_detail.pkl'
    path1 = 'D:\\MyLab\\GraduateProject\\LIDC-IDRI\\LIDC-IDRI-0256\\01-01-2000-CT  CAP  WO CONT-35073\\4-Recon 3 C-A-P-08658\\annotation_flatten.pkl'
    step1Path = 'D:\\MyLab\\GraduateProject\\Imgs_\\step1.jpg'
    step2Path = 'D:\\MyLab\\GraduateProject\\Imgs_\\step2-3.jpg'
    step2Path2 = 'D:\\MyLab\\GraduateProject\\Imgs_\\step2-5.jpg'
    step2Path3 = 'D:\\MyLab\\GraduateProject\\Imgs_\\step2-7.jpg'
    step3Path0 = 'D:\\MyLab\\GraduateProject\\Imgs_\\step3-0.jpg'
    step3Path = 'D:\\MyLab\\GraduateProject\\Imgs_\\step3-3.jpg'
    step3Path2 = 'D:\\MyLab\\GraduateProject\\Imgs_\\step3-5.jpg'
    step3Path3 = 'D:\\MyLab\\GraduateProject\\Imgs_\\step3-7.jpg'
    f = readfile(path1)
    f2 = readfile(path2)
    # print(f)
    for i in f:
        if i.endswith(
                '1.3.6.1.4.1.14519.5.2.1.6279.6001.334276986366937900163861106093'
        ):
            print(f[i])

    for i2 in f2:
        if f2[i2]['InstanceNumber'] == 84:
            print(f2[i2])
            pathdic = f2[i2]['Path']

    # _dcm = pydicom.read_file(pathdic)
    # dicomPixel = _dcm.pixel_array

    # DICOM source (PIXEL)
    _dcm = pydicom.read_file(pathdic)
    dicomPixel = _dcm.pixel_array
    plt.imshow(dicomPixel, 'gray')
    plt.show()

    # Step1 Get Histogram of JPEG pixel (0-256)
    cv2.imwrite(step1Path, dicomPixel)
    pixel1DImg = cv2.imread(step1Path, 0)

    # Step2 Filtering (SMOOTHING)
    pixelForFilteration = pixel1DImg
    img_median = cv2.medianBlur(pixelForFilteration, 3)
    cv2.imwrite(step2Path, img_median)
    img_median = cv2.medianBlur(pixelForFilteration, 5)
    cv2.imwrite(step2Path2, img_median)
    img_median = cv2.medianBlur(pixelForFilteration, 7)
    cv2.imwrite(step2Path3, img_median)

    # Step3 Optimal binarization
    pixelForBinarization = cv2.imread(step1Path, 0)
    ret, thresh = cv2.threshold(pixelForBinarization, 0, 255,
                                cv2.THRESH_TRIANGLE)
    ioski.imsave(step3Path0, thresh)
    pixelForBinarization = cv2.imread(step2Path, 0)
    ret, thresh = cv2.threshold(pixelForBinarization, 0, 255,
                                cv2.THRESH_TRIANGLE)
    ioski.imsave(step3Path, thresh)
    pixelForBinarization = cv2.imread(step2Path2, 0)
    ret, thresh = cv2.threshold(pixelForBinarization, 0, 255,
                                cv2.THRESH_TRIANGLE)
    ioski.imsave(step3Path2, thresh)
    pixelForBinarization = cv2.imread(step2Path3, 0)
    ret, thresh = cv2.threshold(pixelForBinarization, 0, 255,
                                cv2.THRESH_TRIANGLE)
    ioski.imsave(step3Path3, thresh)