def main(tgz, newid, outdir=None):
    """ untars tgz, replaces PatientID PatientName
    with new id, and save to outdir (default origdir/newtgz"""
    # mv dicom to tmp directory
    tgz = os.path.abspath(tgz)
    origdir, orignme = os.path.split(tgz)
    tmptgz = bg.copy_tmpdir(tgz)
    # un-archive
    pth = bg.tar_cmd(tmptgz)
    newdir, exists = bg.make_dir(pth, dirname='dicomfiles')
    startdir = os.getcwd()
    os.chdir(pth)
    dcms = bg.find_dicoms(pth)
    keys = dcms.keys()
    keys = [x for x in keys if not newdir in x]
    for k in keys:
        for dcm in dcms[k]:
            plan = dicom.read_file(dcm)
            plan.PatientID = newid
            plan.PatientName = newid
            _, dcm_name = os.path.split(dcm)
            newdcm = os.path.join(newdir, dcm_name)
            dicom.write_file(newdcm, plan)
    # create tar archive of updated dicoms
    if outdir is None:
        outdir, _ = bg.make_dir(origdir, dirname="newtgz")
    newtgz = os.path.join(outdir, orignme)
    cmd = 'tar cfvz %s  dicomfiles'%(newtgz)
    os.system(cmd)
    os.chdir(startdir)
    print 'removing %s'%pth
    os.system('rm -rf %s'%(pth))
    print 'wrote ', newtgz
    return newtgz
Beispiel #2
0
    def save_as(self, filename, write_like_original=True):
        """Write the dataset to a file.

        :param filename: full path and filename to save the file to
        :write_like_original: see dicom.filewriter.write_file for info on this parameter.
        """
        dicom.write_file(filename, self, write_like_original)
Beispiel #3
0
def main(tgz, newid, outdir=None):
    """ untars tgz, replaces PatientID PatientName
    with new id, and save to outdir (default origdir/newtgz"""
    # mv dicom to tmp directory
    tgz = os.path.abspath(tgz)
    origdir, orignme = os.path.split(tgz)
    tmptgz = bg.copy_tmpdir(tgz)
    # un-archive
    pth = bg.tar_cmd(tmptgz)
    newdir, exists = bg.make_dir(pth, dirname='dicomfiles')
    startdir = os.getcwd()
    os.chdir(pth)
    dcms = bg.find_dicoms(pth)
    keys = dcms.keys()
    keys = [x for x in keys if not newdir in x]
    for k in keys:
        for dcm in dcms[k]:
            plan = dicom.read_file(dcm)
            plan.PatientID = newid
            plan.PatientName = newid
            _, dcm_name = os.path.split(dcm)
            newdcm = os.path.join(newdir, dcm_name)
            dicom.write_file(newdcm, plan)
    # create tar archive of updated dicoms
    if outdir is None:
        outdir, _ = bg.make_dir(origdir, dirname="newtgz")
    newtgz = os.path.join(outdir, orignme)
    cmd = 'tar cfvz %s  dicomfiles' % (newtgz)
    os.system(cmd)
    os.chdir(startdir)
    print 'removing %s' % pth
    os.system('rm -rf %s' % (pth))
    print 'wrote ', newtgz
    return newtgz
Beispiel #4
0
    def save_as(self, filename, write_like_original=True):
        """Write the dataset to a file.

        :param filename: full path and filename to save the file to
        :write_like_original: see dicom.filewriter.write_file for info on this parameter.
        """
        dicom.write_file(filename, self, write_like_original)
Beispiel #5
0
    def save_as(self, filename, WriteLikeOriginal=True):
        """Write the dataset to a file.

        filename -- full path and filename to save the file to
        WriteLikeOriginal -- see dicom.filewriter.write_file for info on this parameter.
        """
        dicom.write_file(filename, self, WriteLikeOriginal)
Beispiel #6
0
    def save_as(self, filename, WriteLikeOriginal=True):
        """Write the dataset to a file.

        filename -- full path and filename to save the file to
        WriteLikeOriginal -- see dicom.filewriter.write_file for info on this parameter.
        """
        dicom.write_file(filename, self, WriteLikeOriginal)
Beispiel #7
0
 def write(self, outdir='.', print_filenames=False):
     for modality in self.modalityorder:
         for sb in self.seriesbuilders[modality]:
             print modality, sb
             for ds in sb.build():
                 dicom.write_file(os.path.join(outdir, ds.filename), ds)
                 if print_filenames:
                     print ds.filename
Beispiel #8
0
 def write(self, outdir='.', print_filenames=False):
     for modality in self.modalityorder:
         for sb in self.seriesbuilders[modality]:
             print modality, sb
             for ds in sb.build():
                 dicom.write_file(os.path.join(outdir, ds.filename), ds)
                 if print_filenames:
                     print ds.filename
Beispiel #9
0
 def _fix_sitk_bug(self, path, metadata):
     """
     There is a bug in simple ITK for Z axis in 3D images. This is a fix
     :param path:
     :param metadata:
     :return:
     """
     ds = dicom.read_file(path)
     ds.SpacingBetweenSlices = str(metadata["voxelsize_mm"][0])[:16]
     dicom.write_file(path, ds)
Beispiel #10
0
 def _fix_sitk_bug(self, path, metadata):
     """
     There is a bug in simple ITK for Z axis in 3D images. This is a fix
     :param path:
     :param metadata:
     :return:
     """
     ds = dicom.read_file(path)
     ds.SpacingBetweenSlices = str(metadata["voxelsize_mm"][0])[:16]
     dicom.write_file(path, ds)
Beispiel #11
0
def replace_iso_gantry_spots(pdir,
                             x=None,
                             y=None,
                             z=None,
                             angle=None,
                             fgs=None,
                             ibs=None):

    rtip = [
        f for f in os.listdir(pdir) if 'RTIP' in f and '.dcm' in f.lower()
    ][0]
    data = pd.read_file(pdir + rtip)

    num_beams = len(data.IonBeamSequence)

    if x and y and z:
        for i in range(num_beams):
            data.IonBeamSequence[i].IonControlPointSequence[
                0].IsocenterPosition[0] = str(x)
            data.IonBeamSequence[i].IonControlPointSequence[
                0].IsocenterPosition[1] = str(y)
            data.IonBeamSequence[i].IonControlPointSequence[
                0].IsocenterPosition[2] = str(z)
    else:
        pass

    if angle:
        for i in range(num_beams):
            data.IonBeamSequence[i].IonControlPointSequence[
                0].GantryAngle = str(angle)
    else:
        pass

    #Copy fraction group sequence and ion beam sequence if we are making a new plan
    if fgs and ibs:
        data.FractionGroupSequence = fgs
        data.IonBeamSequence = ibs
    else:
        pass

    #Make sure other items are dicom-compliant (labels/names/geometry/)
    data.RTPlanLabel = 'label'
    data.RTPlanName = 'name'
    data.RTPlanGeometry = 'PATIENT'
    data.FractionGroupSequence[0].FractionGroupNumber = '1'

    #Check the setup beam
    if data.IonBeamSequence[0].IonControlPointSequence[
            0].NominalBeamEnergy == '0':
        data.IonBeamSequence[0].TreatmentDeliveryType = 'SETUP'
        data.IonBeamSequence[0].IonControlPointSequence[
            0].PatientSupportAngle = '90'

    #Write data to file
    pd.write_file(pdir + rtip[0], data)
Beispiel #12
0
def write_ds(ds, fn, default_sopclass=None):
    ds.file_meta = dicom.dataset.Dataset()
    ds.file_meta.TransferSyntaxUID = dicom.UID.ImplicitVRLittleEndian
    if default_sopclass == None:
        default_sopclass = get_uid("Study Root Query/Retrieve Information Model - FIND")
    ds.file_meta.MediaStorageSOPClassUID = getattr(ds, 'SOPClassUID', default_sopclass)
    ds.file_meta.MediaStorageSOPInstanceUID = getattr(ds, 'SOPInstanceUID', generate_uid())
    ds.is_little_endian = True
    ds.is_implicit_VR = True
    ds.file_meta.ImplementationClassUID = '2.25.4282708245307149051252828097685724107'
    dicom.write_file(fn, ds, WriteLikeOriginal=False)
Beispiel #13
0
 def updateDicomFile(self, event):
     """
         Переопределяет секцию байтов, отвечающих за PN, записывает изменения в текущий файл
         и рендерит его сигнатуру
     """
     self.pre_save_ds[0x10, 0x10].value = self.patientname
     dicom.write_file(self.filename, self.pre_save_ds)
     self.status_text.delete(3.0, END)
     str = u"Выбран файл: %s" % self.filename
     ds = dicom.read_file(self.filename)
     self.fillTextField(str, ds)
Beispiel #14
0
    def export_dynalog_plan(self,plan_name,filename,export_expected=False,leafgap=0.7):
        """
        Parameter
        -----------------------------------------------------------------------
        plan_name : str
            Wert des RTPlanLabel-Tags im exportierten Plan. Maximal 13
            Zeichen, längere Namen werden abgeschnitten.

        UID : str
            Wert des SOPInstanceUID-Tags im exportierten Plan. Muss der UID-Syntax
            entsprechen.

        filename : str
            Der Dateiname, unter dem das exportierte RTPLAN Objekt abgelegt
            werden soll.

        Beschreibung
        -----------------------------------------------------------------------
        Exportiert den derzeitigen Planzustand als DICOM-Objekt. Basis ist eine
        Kopie des DICOM-Files mit dem der Plan initialisiert wurde. StudyInstance,
        SeriesInstance und Study UIDs werden ersetzt/geändert. Alle im DynaLog
        enthaltenen Werte werden anstelle der Originalparameter exportiert.

        Plan muss validiert sein bevor der Export erfolgen kann!

        Ausgabe
        -----------------------------------------------------------------------
        output : DICOM RTPLAN Objekt
            Planobjekt mit DynaLog-Werten.

        """
        if self.validated == False:
            raise PlanMismatchError(self.header["plan_uid"],"validation",
            "can't export unvalidated plan.")
        exportplan = copy.deepcopy(self.dicom_data)
        for num in range(len(self.beams)):
            exportplan.BeamSequence[num] = self.beams[num].export_logbeam(export_expected,leafgap)
        exportplan.RTPlanLabel = ("dyn_"+plan_name)[:13]

        ltime = time.localtime()
        study_instance = exportplan.StudyInstanceUID.split(".")
        series_instance = exportplan.SeriesInstanceUID.split(".")
        instance_id = exportplan.SOPInstanceUID.split(".")

        exportplan.StudyInstanceUID = ".".join(study_instance[:-1])+\
            "."+"".join([str(t) for t in ltime[3:6]])
        exportplan.SeriesInstanceUID = ".".join(series_instance[:-1])+\
            "."+"".join([str(t) for t in ltime[:6]])
        exportplan.StudyID = "Id"+\
            "".join([str(t) for t in ltime[3:6]])
        exportplan.SOPInstanceUID = ".".join(instance_id[:-1])+\
            "."+"".join([str(t) for t in ltime[:6]])
        exportplan.ApprovalStatus = "UNAPPROVED"
        dcm.write_file(filename,exportplan)
Beispiel #15
0
    def _write_file(cls, ds, filename):
        """ Write out the Dicom files to an internal location.
            Could be extended to use S3 or another file store later on.
        """
        if not filename.endswith('.dcm'):
            filename = '%s.dcm' % filename

        # Generate a unique filename to avoid collisions.
        retries = 0
        raw_base_path = current_app.config['DICOM_ABS_UPLOAD_DIR']
        while retries < cls.MAX_RETRIES:
            prefix = str(uuid.uuid4().hex[0:6])
            raw_filename = '%s_%s' % (prefix, filename)
            raw_path = os.path.join(raw_base_path, raw_filename)
            if not os.path.exists(raw_path):
                break
            retries += 1

        if retries > cls.MAX_RETRIES:
            # We didn't get a unique filename?!
            raise Exception("Unable to generate a unique filename after "
                            "%d retries!" % cls.MAX_RETRIES)

        # Write the raw file.
        with open(raw_path, 'wb+') as f:
            dicom.write_file(f, ds)

        # Attempt to convert to an image and write out.
        img_base_path = current_app.config['IMG_ABS_UPLOAD_DIR']
        try:
            img = dicom_lib.pil_from_dataset(ds)
        except Exception:
            # Just move on if we can't get an image.
            return {'raw': raw_filename}

        img_filename = raw_filename.rsplit('.', 1)[0] + '.jpg'
        img_path = os.path.join(img_base_path, img_filename)
        with open(img_path, 'wb+') as f:
            img.save(f, format=cls.IMG_FORMAT)

        # Also save a thumbnail.
        img.thumbnail((64, 64))
        thumb_filename = raw_filename.rsplit('.', 1)[0] + '.thumb.jpg'
        thumb_path = os.path.join(img_base_path, thumb_filename)
        with open(thumb_path, 'wb+') as f:
            img.save(f, format=cls.IMG_FORMAT)

        return {
            'raw': raw_filename,
            'img': img_filename,
            'thumb': thumb_filename,
        }
Beispiel #16
0
def prepare_data(dicom_dir, new_dicom_foldername, FS_folder, dict_new_metadata, ignore_FS_folder):
    print new_dicom_foldername
    try:
        shutil.copytree(os.path.join(TEST_DATA, "dicoms", dicom_dir), os.path.join(TEST_DATA, "dicoms", new_dicom_foldername))
    except OSError as e:
        if(e.errno == 17):
            print "New DICOM directory already exists, using this."
        else:
            print "Problem copying files into new directory: %s" % e
            raise e
    except Exception as e:
        raise e
    

    try:
        files = os.listdir(os.path.join(TEST_DATA, "dicoms", new_dicom_foldername))
    except Exception as e:
        print "Problem getting files in new DICOM directory: %s" %e
        raise e

    
    for f in files:
        d = dicom.read_file(os.path.join(TEST_DATA, "dicoms", new_dicom_foldername, f))


        
        change_meta = {"PatientName" : d[0x10,0x10],
                       "AcquisitionDate" : d[0x08,0x22],
                       "PatientID" : d[0x10, 0x20],
                       "AccessionNumber": d[0x08, 0x50],
                       "SeriesDescription": d[0x08, 0x103e]
        }

        for key in dict_new_metadata.keys():
            change_meta[key].value = dict_new_metadata[key]

        dicom.write_file(os.path.join(TEST_DATA, "dicoms", new_dicom_foldername, f), d)

    if(files and not ignore_FS_folder and
       ("PatientID" in dict_new_metadata.keys() or
       "AccessionNumber" in dict_new_metadata.keys() or
       "SeriesDescription" in dict_new_metadata.keys())
       ):

        d = dicom.read_file(os.path.join(TEST_DATA, "dicoms", new_dicom_foldername, files[0]))
        ptuniqid = create_internal_ptid_uniq(d.PatientID, d.AccessionNumber, d.SeriesDescription)
        try:
            shutil.copytree(os.path.join(TEST_DATA, "fs_res", FS_folder),
                            os.path.join(TEST_DATA, "fs_res", ptuniqid))
        except OSError, e:
            if(e.errno == 17): print "New FS folder already exists."
            else: raise IOError("Problem copying content from original FS folder %s" %e)
Beispiel #17
0
def save_dicom_data(dicom_file_data,
                    file_path_to_save,
                    with_original_meta=True):
    """
    Saves dicom data as a dicom file
    :param dicom_file_data: Dicom data like pydicom.dataset.FileDataset
    :param file_path_to_save: Full path to the file to save as a dicom file
    :param with_original_meta: Whether to save new file with original metadata.
    If original file does not have metadata writes None
    """
    dicom.write_file(file_path_to_save,
                     dicom_file_data,
                     write_like_original=with_original_meta)
Beispiel #18
0
def write_ds(ds, fn, default_sopclass=None):
    ds.file_meta = dicom.dataset.Dataset()
    ds.file_meta.TransferSyntaxUID = dicom.UID.ImplicitVRLittleEndian
    if default_sopclass == None:
        default_sopclass = get_uid(
            "Study Root Query/Retrieve Information Model - FIND")
    ds.file_meta.MediaStorageSOPClassUID = getattr(ds, 'SOPClassUID',
                                                   default_sopclass)
    ds.file_meta.MediaStorageSOPInstanceUID = getattr(ds, 'SOPInstanceUID',
                                                      generate_uid())
    ds.is_little_endian = True
    ds.is_implicit_VR = True
    ds.file_meta.ImplementationClassUID = '2.25.4282708245307149051252828097685724107'
    dicom.write_file(fn, ds, WriteLikeOriginal=False)
Beispiel #19
0
 def C_STORE_RQ_received(self, presentation_context_id, store_rq, dimse_data):
     log.msg("received DIMSE command %s" % store_rq)
     assert store_rq.__class__ == dimsemessages.C_STORE_RQ
     log.msg("replying to %s" % store_rq)
     status = 0
     try:
         dimse_data.file_meta = dicom.dataset.Dataset()
         dimse_data.file_meta.TransferSyntaxUID = dicom.UID.ImplicitVRLittleEndian
         dimse_data.file_meta.MediaStorageSOPClassUID = dimse_data.SOPClassUID
         dimse_data.file_meta.MediaStorageSOPInstanceUID = generate_uid()
         dimse_data.is_little_endian = True
         dimse_data.is_implicit_VR = True
         dimse_data.file_meta.ImplementationClassUID = '2.25.4282708245307149051252828097685724107'
         dicom.write_file("%s_%s.dcm" % (getattr(dimse_data, 'Modality', 'XX'), dimse_data.SOPInstanceUID), dimse_data, WriteLikeOriginal=False)
     except Exception, e:
         log.err(e)
         status = 1
Beispiel #20
0
    def over_write_file(self, outputDir):

        print("over writing ROI file! ")
        pix2pat = self.imageInfo['Pix2Pat']
        ind2loc = self.imageInfo['Ind2Loc']

        for ROI in self.ROI_List:
            print('{}: {}'.format(ROI.Number, ROI.Name))

        ROInames_in_file = []
        for SSROISeq in self.di.StructureSetROISequence:
            ROInames_in_file.append(SSROISeq.ROIName.lower())
        fileROI_set = set(ROInames_in_file)

        PatientROI_set = set(self.ROI_byName.keys())
        patient_not_file = list(PatientROI_set.difference(fileROI_set))
        print('ROIs in pat but not file', patient_not_file)

        for patientName in patient_not_file:
            thisROI = self.ROI_byName[patientName]

            ROIObsSeq = mkNewROIObs_dataset(thisROI)
            self.di.RTROIObservationsSequence.append(ROIObsSeq)

            SSROI = mkNewStructureSetROI_dataset(thisROI, self.FrameRef_UID)
            self.di.StructureSetROISequence.append(SSROI)

            ROIContour = mkNewROIContour_dataset(thisROI)
            self.di.ROIContourSequence.append(ROIContour)

        for index, SS in enumerate(self.di.StructureSetROISequence):

            thisROI = self.ROI_byName[SS.ROIName.lower()]

            ContourSequence = mkNewContour_Sequence(thisROI, ind2loc, pix2pat)

            self.di.ROIContourSequence[index].ContourSequence = ContourSequence

        outFile = self.SSFile
        outpath = os.path.join(outputDir, outFile)
        print('saving to {}'.format(outpath))
        dicom.write_file(outpath, self.di)
Beispiel #21
0
def swap(source, t1, t2, force=False, backup=True):
    ''' Swaps values from two tags in a DICOM file or a directory'''
    if backup:
        print 'Backup to ', backup_archive
        os.system('tar cfz %s %s'%(backup_archive, source))
    dcm = collect_dicom(source)
    table = [['PatientID', 'first_tag', 'tag1_value', 'second_tag', 'tag2_value', 'filepath']]

    print 'WARNING: will swap tags %s and %s in the following files:\n%s\n(%s files)'\
        %(t1, t2, '\n'.join(dcm), len(dcm))
    if (force or raw_input('Proceed? y/N ') == 'y'):
        for i, each in enumerate(dcm):
            progress = i/float(len(dcm)) * 100.0
            sys.stdout.write("Operation progress: %d%%   \r" % (progress) )
            sys.stdout.flush()

            try:
                d = dicom.read_file(each)
                first = getattr(d, t1)
                second = getattr(d, t2)
                pid = d.PatientID

                table.append([pid, t1, first, t2, second, each])
                setattr(d, t2, first)
                setattr(d, t1, second)
                dicom.write_file(each, d)

            except (dicom.filereader.InvalidDicomError, IOError) as e:
                print 'WARNING: file %s raised the following error:\n%s'%(each, e)
            except KeyboardInterrupt:
                print '<Keyboard Interrupt>'
                df = pd.DataFrame(table[1:], columns=table[0])
                return df
            except Exception as e:
                print e
                df = pd.DataFrame(table[1:], columns=table[0])
                return df
        df = pd.DataFrame(table[1:], columns=table[0])
        return df
Beispiel #22
0
    def run(self):
        """
        Reads the DICOM file, anonymizes it and write the result.
        """
        try:
            ds = dicom.read_file(self._dicom_filein)
            try:
                meta_data = dicom.filereader.read_file_meta_info(
                    self._dicom_filein)
                if meta_data[0x0002, 0x0002].value == "Media Storage Directory Storage":
                    print "This file is a DICOMDIR:", self._dicom_filein
                    return
            except:
                pass
            ds.walk(self.anonymize)
        except:
            print "This file is not a DICOM file:", self._dicom_filein
            return

        try:
            dicom.write_file(self._dicom_fileout, ds)
        except:
            print "The anonymization fails on", self._dicom_filein
            return
Beispiel #23
0
def remove(source, tag, value='', force=False, backup=False):
    ''' Clears the value of a given tag in a DICOM file or directory and replaces
    it with a new value (default='')'''
    if backup:
        print 'Backup to ', backup_archive
        os.system('tar cfz %s %s'%(backup_archive, source))
    dcm = collect_dicom(source)
    table = [['PatientID', 'removed_tag', 'tag_value', 'filepath']]

    print 'WARNING: will remove tag %s from the following files:\n%s\n(%s files)'\
        %(tag, '\n'.join(dcm), len(dcm))
    if (force or raw_input('Proceed? y/N ') == 'y'):
        for i, each in enumerate(dcm):
            progress = i/float(len(dcm)) * 100.0
            sys.stdout.write("Operation progress: %d%%   \r" % (progress) )
            sys.stdout.flush()
            try:
                d = dicom.read_file(each)
                pid = d.PatientID
                tagval =  getattr(d, tag)
                table.append([pid, tag, tagval, each])
                setattr(d, tag, value)
                dicom.write_file(each, d)
            except (dicom.filereader.InvalidDicomError, IOError) as e:
                print 'WARNING: file %s raised the following error:\n%s'%(each, e)
            except KeyboardInterrupt:
                print '<Keyboard Interrupt>'
                df = pd.DataFrame(table[1:], columns=table[0])
                return df
            except Exception as e:
                print e
                df = pd.DataFrame(table[1:], columns=table[0])
                return df

        df = pd.DataFrame(table[1:], columns=table[0])
        return df
Beispiel #24
0
def sabr_deid(subj_info, scan_df, raw_dir, deid_outdir):

    #Join raw dir with subject name (assumes directory structure is ./rawdir/subj_name/...
    subj_main_dir = os.path.join(raw_dir, subj_info['subj_name'])

    new_id = subj_info['subj_id']

    #Get list of sessions within main subj directory, make dir and loop over sessions.
    subj_sessions = os.walk(subj_main_dir).next()[1]
    subj_sessions.sort()

    print('\n***{} has {} session(s)'.format(new_id, len(subj_sessions)))

    #Create deidentified main (root) directory for subject
    subj_deid_main_dir = os.path.join(deid_outdir, new_id)
    try:
        os.mkdir(subj_deid_main_dir)
    except:
        print('\nDirectory {} exists\n'.format(subj_deid_main_dir))

    #WARNING! LAZY CODING AHEAD!
    if len(subj_sessions) == 0:
        raise Exception(
            '\n***ERROR! NUMBER OF SESSIONS = 0!***\nPlease check directory structure of {}'
            .format(subj_info['subj_name']))

    elif len(subj_sessions) == 1:
        session = subj_sessions[0]
        subj_session_dir = os.path.join(subj_main_dir, session)
        subj_deid_session_dir = os.path.join(subj_deid_main_dir, 'ses-01')

        try:
            os.mkdir(subj_deid_session_dir)
        except:
            print('\nSession folder {} exists\n'.format(subj_deid_session_dir))

        for j, scan_type in enumerate(scan_df['scan_type']):
            subj_deid_meta_dir = os.path.join(subj_deid_session_dir, scan_type)

            try:
                os.mkdir(subj_deid_meta_dir)
            except:
                print('Meta directory {} exists.'.format(scan_type))

            #Match common sequence substring with path in os.walk
            for root, dr, files in os.walk(subj_session_dir):
                match = scan_df.scan_match[j]

                match_regex = fnmatch.translate(match)
                found = re.search(match_regex, root)
                #print('\n***{}***\n'.format(found))

                #If match, start deid process. If not, move onto next folder.
                if found != None:

                    subj_deid_sequence_dir = os.path.join(
                        subj_deid_meta_dir, scan_df.scan_filename[j])
                    print('Making directory {}'.format(subj_deid_sequence_dir))
                    try:
                        os.mkdir(
                            subj_deid_sequence_dir
                        )  #Make "housing" directory to keep dicoms of different sequences but same meta-category separate.
                    except:
                        print(
                            '\n***SEQUENCE DIRECTORY ALREADY EXISTS!***\nSkipping.'
                        )
                        continue

                    #Create list of dicoms in sequence dir rather than use
                    #files (more control in case any non-dicoms)
                    anon_files = os.listdir(root)
                    anon_files = [
                        x for x in anon_files if 'nii' not in x
                    ]  #Remove any previous nii files that may be present < To do - expand to other file types (mgh, analyze, etc)
                    anon_files.sort()

                    for anon_file in anon_files:
                        #Read files in 1 at a time, remove the remove / alter the below tags.
                        dcm = dicom.read_file(
                            os.path.join(root, anon_file), force=True
                        )  #Uses force = True incase dicoms haven't had identifier added to header

                        #Strip aquisition date information
                        aqusition_date_list = [[0x0008, 0x0020],
                                               [0x0008, 0x0021],
                                               [0x0008, 0x0022],
                                               [0x0008, 0x0023]]

                        for tag in aqusition_date_list:
                            try:
                                dcm[hex(tag[0]), hex(tag[1])].value = ''
                            except:
                                print(
                                    'Tag {} {} does not exist in {}. Moving to next tag'
                                    .format(hex(tag[0]), hex(tag[1]),
                                            scan_df.scan_filename[j]))

                        #Strip aquisition time information
                        aqusition_time_list = [[0x0008, 0x0030],
                                               [0x0008, 0x0031],
                                               [0x0008, 0x0032],
                                               [0x0008, 0x0033]]

                        for tag in aqusition_time_list:
                            try:
                                dcm[hex(tag[0]), hex(tag[1])].value = ''
                            except:
                                print(
                                    'Tag {} {} does not exist in {}. Moving to next tag'
                                    .format(hex(tag[0]), hex(tag[1]),
                                            scan_df.scan_filename[j]))

                        #Strip physician information
                        physician_list = [[0x0008, 0x0090], [0x0008, 0x1050]]

                        for tag in physician_list:
                            try:
                                dcm[hex(tag[0]), hex(tag[1])].value = ''
                            except:
                                print(
                                    'Tag {} {} does not exist in {}. Moving to next tag'
                                    .format(hex(tag[0]), hex(tag[1]),
                                            scan_df.scan_filename[j]))

                            #Strip study description
                            #dcm[0x0008,0x1030].value = ''

                            #Strip subject name / patient ID
                        subj_name_list = [[0x0010, 0x0010], [0x0010, 0x0020]]
                        #PatientName, PatientID

                        for tag in subj_name_list:
                            try:
                                dcm[hex(tag[0]), hex(tag[1])].value = new_id
                            except:
                                print(
                                    'Tag {} {} does not exist in {}. Moving to next tag'
                                    .format(hex(tag[0]), hex(tag[1]),
                                            scan_df.scan_filename[j]))

                            #Strip subject attributes
                        subj_attrib_list = [[0x0010, 0x0030], [0x0010, 0x1010],
                                            [0x0010, 0x1020], [0x0010, 0x1030]]
                        #, DoB, Age, PatientHeight, PatientWeight

                        for tag in subj_attrib_list:
                            try:
                                dcm[hex(tag[0]), hex(tag[1])].value = ''
                            except:
                                print(
                                    'Tag {} {} does not exist in {}. Moving to next tag'
                                    .format(hex(tag[0]), hex(tag[1]),
                                            scan_df.scan_filename[j]))

                        #Write anonymised file
                        dicom.write_file(
                            os.path.join(subj_deid_sequence_dir, anon_file),
                            dcm)

    elif len(subj_sessions) > 1:
        for sn, session in enumerate(subj_sessions):

            #MAKE DIRECTORIES BUT ZERO PAD SESSION
            subj_deid_session_dir = os.path.join(
                subj_deid_main_dir, 'ses-'
                '{:02d}'.format(sn + 1))

            try:
                os.mkdir(subj_deid_session_dir)
            except:
                print('\nSession folder {} exists\n'.format(
                    subj_deid_session_dir))

            #Session folder for identifiable subject
            subj_session_dir = os.path.join(subj_main_dir, session)

            #Loop over scan folder types within scan dataframe (anat, task, etc)
            for j, scan_type in enumerate(scan_df['scan_type']):
                subj_deid_meta_dir = os.path.join(subj_deid_session_dir,
                                                  scan_type)
                try:
                    os.mkdir(subj_deid_meta_dir)
                except:
                    print('Meta directory {} exists.'.format(scan_type))

                #Match common sequence substring with path in os.walk
                for root, dr, files in os.walk(subj_session_dir):
                    match = scan_df.scan_match[j]

                    match_regex = fnmatch.translate(match)
                    found = re.search(match_regex, root)

                    #If match, start deid process, not not, move onto next folder.
                    if found != None:

                        subj_deid_sequence_dir = os.path.join(
                            subj_deid_meta_dir, scan_df.scan_filename[j])
                        print('Making directory {}'.format(
                            subj_deid_sequence_dir))
                        try:
                            os.mkdir(
                                subj_deid_sequence_dir
                            )  #Make "housing" directory to keep dicoms of different sequences but same meta-category separate.
                        except:
                            print(
                                '\n***SEQUENCE DIRECTORY ALREADY EXISTS!***\nSkipping.'
                            )
                            continue

                        #Create list of dicoms in sequence dir rather than use
                        #files (more control in case any non-dicoms)
                        anon_files = os.listdir(root)
                        anon_files = [
                            x for x in anon_files if 'nii' not in x
                        ]  #Remove any previous nii files that may be present < To do - expand to other file types (mgh, analyze, etc)
                        anon_files.sort()

                        for anon_file in anon_files:
                            #Read files in 1 at a time, remove the remove / alter the below tags.
                            dcm = dicom.read_file(
                                os.path.join(root, anon_file), force=True
                            )  #Uses force = True incase dicoms haven't had identifier added to header

                            #Strip aquisition date information
                            aqusition_date_list = [[0x0008, 0x0020],
                                                   [0x0008, 0x0021],
                                                   [0x0008, 0x0022],
                                                   [0x0008, 0x0023]]

                            for tag in aqusition_date_list:
                                try:
                                    dcm[hex(tag[0]), hex(tag[1])].value = ''
                                except:
                                    print(
                                        'Tag {} {} does not exist in {}. Moving to next tag'
                                        .format(hex(tag[0]), hex(tag[1]),
                                                scan_df.scan_filename[j]))

                            #Strip aquisition time information
                            aqusition_time_list = [[0x0008, 0x0030],
                                                   [0x0008, 0x0031],
                                                   [0x0008, 0x0032],
                                                   [0x0008, 0x0033]]

                            for tag in aqusition_time_list:
                                try:
                                    dcm[hex(tag[0]), hex(tag[1])].value = ''
                                except:
                                    print(
                                        'Tag {} {} does not exist in {}. Moving to next tag'
                                        .format(hex(tag[0]), hex(tag[1]),
                                                scan_df.scan_filename[j]))

                            #Strip physician information
                            physician_list = [[0x0008, 0x0090],
                                              [0x0008, 0x1050]]

                            for tag in physician_list:
                                try:
                                    dcm[hex(tag[0]), hex(tag[1])].value = ''
                                except:
                                    print(
                                        'Tag {} {} does not exist in {}. Moving to next tag'
                                        .format(hex(tag[0]), hex(tag[1]),
                                                scan_df.scan_filename[j]))

                                #Strip study description
                                #dcm[0x0008,0x1030].value = ''

                                #Strip subject name / patient ID
                            subj_name_list = [[0x0010, 0x0010],
                                              [0x0010, 0x0020]]
                            #PatientName, PatientID

                            for tag in subj_name_list:
                                try:
                                    dcm[hex(tag[0]),
                                        hex(tag[1])].value = new_id
                                except:
                                    print(
                                        'Tag {} {} does not exist in {}. Moving to next tag'
                                        .format(hex(tag[0]), hex(tag[1]),
                                                scan_df.scan_filename[j]))

                                #Strip subject attributes
                            subj_attrib_list = [[0x0010, 0x0030],
                                                [0x0010, 0x1010],
                                                [0x0010, 0x1020],
                                                [0x0010, 0x1030]]
                            #, DoB, Age, PatientHeight, PatientWeight

                            for tag in subj_attrib_list:
                                try:
                                    dcm[hex(tag[0]), hex(tag[1])].value = ''
                                except:
                                    print(
                                        'Tag {} {} does not exist in {}. Moving to next tag'
                                        .format(hex(tag[0]), hex(tag[1]),
                                                scan_df.scan_filename[j]))

                            #Write anonymised file
                            dicom.write_file(
                                os.path.join(subj_deid_sequence_dir,
                                             anon_file), dcm)

    return (subj_deid_main_dir, subj_sessions, new_id)
Beispiel #25
0
import dicom

path = "/media/sf_hserver1_images/hugh/patient/"
orig = dicom.read_file(path + "orig.ima")
mcor = dicom.read_file(path + "mcor_hermes_axis.ima")

nf = mcor.NumberOfFrames
nf2 = nf ** 2
pixel_list = [mcor.PixelData[2 * a * nf2:2 * (a + 1) * nf2] for a in range(nf)]
pixel_list = [pixel_list[p - nf / 2] for p in range(nf)]
orig.PixelData = ''.join(pixel_list)

orig.SeriesDescription = "DMSA SPECT [E1 Corrected]"
orig.SeriesInstanceUID = dicom.UID.generate_uid()
orig.SOPInstanceUID = dicom.UID.generate_uid()

dicom.write_file(path + "fixed.ima", orig, WriteLikeOriginal=True)
Beispiel #26
0
def write_dicom(path, dataset):
    """write a pydicom dataset to dicom file"""
    if not os.path.splitext(path)[1] == '.dcm':
        path += '.dcm'
    dicom.write_file(path, dataset)
Beispiel #27
0
    def patchDicomDir(self,
                      inputDirPath,
                      outputDirPath,
                      exportDicom=True,
                      anonymizeDicom=False,
                      exportUltrasoundToNrrd=False):
        """
    Since CTK (rightly) requires certain basic information [1] before it can import
    data files that purport to be dicom, this code patches the files in a directory
    with some needed fields.  Apparently it is possible to export files from the
    Philips PMS QLAB system with these fields missing.

    Calling this function with a directory path will make a patched copy of each file.
    Importing the old files to CTK should still fail, but the new ones should work.

    The directory is assumed to have a set of instances that are all from the
    same study of the same patient.  Also that each instance (file) is an
    independent (multiframe) series.

    [1] https://github.com/commontk/CTK/blob/16aa09540dcb59c6eafde4d9a88dfee1f0948edc/Libs/DICOM/Core/ctkDICOMDatabase.cpp#L1283-L1287
    """

        if slicer.app.majorVersion == 4 and slicer.app.minorVersion <= 10:
            import dicom
        else:
            import pydicom as dicom

        if not outputDirPath:
            outputDirPath = inputDirPath

        self.addLog('DICOM patching started...')
        logging.debug('DICOM patch input directory: ' + inputDirPath)
        logging.debug('DICOM patch output directory: ' + outputDirPath)

        patientIDToRandomIDMap = {}
        studyUIDToRandomUIDMap = {}
        seriesUIDToRandomUIDMap = {}
        numberOfSeriesInStudyMap = {}

        # All files without a patient ID will be assigned to the same patient
        randomPatientID = self.generateUid()

        requiredTags = [
            'PatientName', 'PatientID', 'StudyInstanceUID',
            'SeriesInstanceUID', 'SeriesNumber'
        ]
        for root, subFolders, files in os.walk(inputDirPath):

            # Assume that all files in a directory belongs to the same study
            randomStudyUID = self.generateUid()

            currentSubDir = os.path.relpath(root, inputDirPath)
            rootOutput = os.path.join(outputDirPath, currentSubDir)

            for file in files:
                filePath = os.path.join(root, file)
                self.addLog('Examining %s...' %
                            os.path.join(currentSubDir, file))
                try:
                    ds = dicom.read_file(filePath)
                except (IOError, dicom.filereader.InvalidDicomError):
                    self.addLog('  Not DICOM file. Skipped.')
                    continue

                if not hasattr(ds, 'SOPClassUID'):
                    self.addLog('  No SOPClassUID tag found. Skipped.')
                    continue

                if ds.SOPClassUID != '1.2.840.113543.6.6.1.3.10002':
                    self.addLog(
                        '  Not recognized as Philips Cartesian 4D ultrasound DICOM file. Skipped.'
                    )
                    continue

                self.addLog('  Patching...')

                for tag in requiredTags:
                    if not hasattr(ds, tag):
                        setattr(ds, tag, '')

                # Generate a new SOPInstanceUID to avoid different files having the same SOPInstanceUID
                ds.SOPInstanceUID = self.generateUid()

                if ds.PatientName == '':
                    ds.PatientName = "Unspecified Patient"
                if ds.PatientID == '':
                    ds.PatientID = randomPatientID
                if ds.StudyInstanceUID == '':
                    ds.StudyInstanceUID = randomStudyUID
                if ds.SeriesInstanceUID == '':
                    ds.SeriesInstanceUID = self.generateUid()

                # Generate series number to make it easier to identify a sequence within a study
                if ds.SeriesNumber == '':
                    if ds.StudyInstanceUID not in numberOfSeriesInStudyMap:
                        numberOfSeriesInStudyMap[ds.StudyInstanceUID] = 0
                    numberOfSeriesInStudyMap[
                        ds.StudyInstanceUID] = numberOfSeriesInStudyMap[
                            ds.StudyInstanceUID] + 1
                    ds.SeriesNumber = numberOfSeriesInStudyMap[
                        ds.StudyInstanceUID]

                if anonymizeDicom:

                    self.addLog('  Anonymizing...')

                    ds.StudyDate = ''
                    ds.StudyTime = ''
                    ds.ContentDate = ''
                    ds.ContentTime = ''
                    ds.AccessionNumber = ''
                    ds.ReferringPhysiciansName = ''
                    ds.PatientsBirthDate = ''
                    ds.PatientsSex = ''
                    ds.StudyID = ''
                    ds[(
                        0x3001, 0x1004
                    )].value = ''  # Some ID in a private tag - clear it, just in case
                    ds.PatientName = "Unspecified Patient"

                    # replace ids with random values - re-use if we have seen them before
                    if ds.PatientID not in patientIDToRandomIDMap:
                        patientIDToRandomIDMap[
                            ds.PatientID] = self.generateUid()
                    ds.PatientID = patientIDToRandomIDMap[ds.PatientID]
                    if ds.StudyInstanceUID not in studyUIDToRandomUIDMap:
                        studyUIDToRandomUIDMap[
                            ds.StudyInstanceUID] = self.generateUid()
                    ds.StudyInstanceUID = studyUIDToRandomUIDMap[
                        ds.StudyInstanceUID]
                    if ds.SeriesInstanceUID not in studyUIDToRandomUIDMap:
                        seriesUIDToRandomUIDMap[
                            ds.SeriesInstanceUID] = self.generateUid()
                    ds.SeriesInstanceUID = seriesUIDToRandomUIDMap[
                        ds.SeriesInstanceUID]

                if inputDirPath == outputDirPath:
                    (name, ext) = os.path.splitext(filePath)
                    patchedFilePath = name + ('-anon' if anonymizeDicom else
                                              '') + '-patched' + ext
                    nrrdFilePath = name + '.seq.nrrd'
                else:
                    patchedFilePath = os.path.abspath(
                        os.path.join(rootOutput, file))
                    nrrdFilePath = os.path.splitext(
                        patchedFilePath)[0] + '.seq.nrrd'
                    if not os.path.exists(rootOutput):
                        os.makedirs(rootOutput)

                self.addLog('  Writing DICOM...')
                dicom.write_file(patchedFilePath, ds)
                self.addLog('  Created DICOM file: %s' % patchedFilePath)

                if exportUltrasoundToNrrd and self.isDicomUltrasoundFile(
                        patchedFilePath):
                    self.addLog('  Writing NRRD...')

                    if self.convertUltrasoundDicomToNrrd(
                            patchedFilePath, nrrdFilePath):
                        self.addLog('  Created NRRD file: %s' % nrrdFilePath)
                    else:
                        self.addLog('  NRRD file save failed')

                if not exportDicom:
                    os.remove(patchedFilePath)
                    self.addLog('  Deleted temporary DICOM file')

        self.addLog('DICOM patching completed.')
Beispiel #28
0
def save_scan(path, dataset):
    for i in range(0, len(dataset) - 1):
        dicom.write_file(path + "/slice" + i, dataset[i])
    return 0
Beispiel #29
0
        pix[j][i] = 0

    if (output_dcm==1):  
      bin_value = int(round(pix[j][i]*100))        # Convert cm floats into tenths of mm integers to store in dicom image
      for k1 in range(0, output_binning):
        for k2 in range(0, output_binning):      
          mammo.pixel_array[j+k1][i+k2] = bin_value
          
sys.stdout.write('\n'); sys.stdout.flush()
  
if (output_dcm==1):  
  # Save final data as dicom:      help(dicom.write_file)  
  print '\n\n Saving final data as DICOM image in integer units of tenths of mm of plastic...'
  mammo.PixelData = mammo.pixel_array.tostring()   # Save the data array into the actual pixel data string
  
  dicom.write_file(image_file_name+"_bin"+str(output_binning)+".dcm", mammo)


# Generate triangle mesh:
print '\n Generating the output triangle mesh focused to the x-ray source focal spot (num_air_pix='+str(num_air_pix)+')...'
num_binned_pixels = (num_columns/output_binning) * (num_rows/output_binning) - num_air_pix
num_vertices  = num_binned_pixels * 8     #  8 vertices per printed column  

if (output_base==1):
  num_triangles = num_binned_pixels * 8 + 2*(num_rows/output_binning) + 2*(num_columns/output_binning)        #!!ReducedTriangles!!
else:    
  num_triangles = num_binned_pixels * 6 + 2*(num_rows/output_binning) + 2*(num_columns/output_binning)

  
image_file_name2 = image_file_name+"_RawData_bin"+str(output_binning)+"_focusedZ"+str(source_coord[2])
if (subtract_layer>0.00001):
import dicom, os, shutil, sys, string
from dicom.tag import Tag

# input: directory with the DICOM files

inputDir = sys.argv[1]
project = sys.argv[2]
subject = sys.argv[3]
outputDir = sys.argv[4]

for r,d,files in os.walk(inputDir):
  for f in files:
    print 'Processing ', f
    inputFile = r+'/'+f
    dcm = dicom.read_file(inputFile)
    outputFile = outputDir+'/'+f

    pcTag = Tag(0x10,0x4000)
    time = string.split(dcm.StudyTime,'.')[0]
    dcm.add_new(pcTag,'LT',"Project: "+project+"; Subject: "+subject+"; Session: "+subject+"_"+dcm.StudyDate+"_"+time+';')

    print dcm.PatientComments

    f = open(outputFile,'wb')
    dicom.write_file(outputFile,dcm)
    # shutil.copyfile(inputFile, outputFile)
    def patchDicomDir(self,
                      inputDirPath,
                      outputDirPath,
                      generateMissingIds=True,
                      generateImagePositionFromSliceThickness=True,
                      anonymize=False):
        """
    Since CTK (rightly) requires certain basic information [1] before it can import
    data files that purport to be dicom, this code patches the files in a directory
    with some needed fields.  Apparently it is possible to export files from the
    Philips PMS QLAB system with these fields missing.

    Calling this function with a directory path will make a patched copy of each file.
    Importing the old files to CTK should still fail, but the new ones should work.

    The directory is assumed to have a set of instances that are all from the
    same study of the same patient.  Also that each instance (file) is an
    independent (multiframe) series.

    [1] https://github.com/commontk/CTK/blob/16aa09540dcb59c6eafde4d9a88dfee1f0948edc/Libs/DICOM/Core/ctkDICOMDatabase.cpp#L1283-L1287
    """

        import dicom

        if not outputDirPath:
            outputDirPath = inputDirPath

        self.addLog('DICOM patching started...')
        logging.debug('DICOM patch input directory: ' + inputDirPath)
        logging.debug('DICOM patch output directory: ' + outputDirPath)

        patientIDToRandomIDMap = {}
        studyUIDToRandomUIDMap = {}
        seriesUIDToRandomUIDMap = {}
        numberOfSeriesInStudyMap = {}

        # All files without a patient ID will be assigned to the same patient
        randomPatientID = dicom.UID.generate_uid(None)

        requiredTags = [
            'PatientName', 'PatientID', 'StudyInstanceUID',
            'SeriesInstanceUID', 'SeriesNumber'
        ]
        for root, subFolders, files in os.walk(inputDirPath):

            # Assume that all files in a directory belongs to the same study
            randomStudyUID = dicom.UID.generate_uid(None)

            # Assume that all files in a directory belongs to the same series
            randomSeriesInstanceUID = dicom.UID.generate_uid(None)

            currentSubDir = os.path.relpath(root, inputDirPath)
            rootOutput = os.path.join(outputDirPath, currentSubDir)

            for file in files:
                filePath = os.path.join(root, file)
                self.addLog('Examining %s...' %
                            os.path.join(currentSubDir, file))
                if file == 'DICOMDIR':
                    self.addLog(
                        'DICOMDIR file is ignored (its contents may be inconsistent with the contents of the indexed DICOM files, therefore it is safer not to use it)'
                    )
                    continue
                try:
                    ds = dicom.read_file(filePath)
                except (IOError, dicom.filereader.InvalidDicomError):
                    self.addLog('  Not DICOM file. Skipped.')
                    continue

                self.addLog('  Patching...')

                ######################################################
                # Add missing IDs
                if generateMissingIds:

                    for tag in requiredTags:
                        if not hasattr(ds, tag):
                            setattr(ds, tag, '')

                    # Generate a new SOPInstanceUID to avoid different files having the same SOPInstanceUID
                    ds.SOPInstanceUID = dicom.UID.generate_uid(None)

                    if ds.PatientName == '':
                        ds.PatientName = "Unspecified Patient"
                    if ds.PatientID == '':
                        ds.PatientID = randomPatientID
                    if ds.StudyInstanceUID == '':
                        ds.StudyInstanceUID = randomStudyUID
                    if ds.SeriesInstanceUID == '':
                        #ds.SeriesInstanceUID = dicom.UID.generate_uid(None) # this line has to be used if each file is a separate series
                        ds.SeriesInstanceUID = randomSeriesInstanceUID

                    # Generate series number to make it easier to identify a sequence within a study
                    if ds.SeriesNumber == '':
                        if ds.StudyInstanceUID not in numberOfSeriesInStudyMap:
                            numberOfSeriesInStudyMap[ds.StudyInstanceUID] = 0
                        numberOfSeriesInStudyMap[
                            ds.StudyInstanceUID] = numberOfSeriesInStudyMap[
                                ds.StudyInstanceUID] + 1
                        ds.SeriesNumber = numberOfSeriesInStudyMap[
                            ds.StudyInstanceUID]

                ######################################################
                # Add missing slice spacing info to multiframe files
                numberOfFrames = ds.NumberOfFrames if hasattr(
                    ds, 'NumberOfFrames') else 1
                if generateImagePositionFromSliceThickness and numberOfFrames > 1:
                    # Multi-frame sequence, we may need to add slice positions

                    # Error in Dolphin 3D CBCT scanners, they store multiple frames but they keep using CTImageStorage as storage class
                    if ds.SOPClassUID == '1.2.840.10008.5.1.4.1.1.2':  # Computed Tomography Image IOD
                        ds.SOPClassUID = '1.2.840.10008.5.1.4.1.1.2.1'  # Enhanced CT Image IOD

                    sliceStartPosition = ds.ImagePositionPatient if hasattr(
                        ds, 'ImagePositionPatient') else [0, 0, 0]
                    sliceAxes = ds.ImageOrientationPatient if hasattr(
                        ds, 'ImagePositionPatient') else [1, 0, 0, 0, 1, 0]
                    x = sliceAxes[:3]
                    y = sliceAxes[3:]
                    z = [
                        x[1] * y[2] - x[2] * y[1], x[2] * y[0] - x[0] * y[2],
                        x[0] * y[1] - x[1] * y[0]
                    ]  # cross(x,y)
                    sliceSpacing = ds.SliceThickness if hasattr(
                        ds, 'SliceThickness') else 1.0
                    pixelSpacing = ds.PixelSpacing if hasattr(
                        ds, 'PixelSpacing') else [1.0, 1.0]

                    if not (dicom.tag.Tag(0x5200, 0x9229) in ds):

                        # (5200,9229) SQ (Sequence with undefined length #=1)     # u/l, 1 SharedFunctionalGroupsSequence
                        #   (0020,9116) SQ (Sequence with undefined length #=1)     # u/l, 1 PlaneOrientationSequence
                        #       (0020,0037) DS [1.00000\0.00000\0.00000\0.00000\1.00000\0.00000] #  48, 6 ImageOrientationPatient
                        #   (0028,9110) SQ (Sequence with undefined length #=1)     # u/l, 1 PixelMeasuresSequence
                        #       (0018,0050) DS [3.00000]                                #   8, 1 SliceThickness
                        #       (0028,0030) DS [0.597656\0.597656]                      #  18, 2 PixelSpacing

                        planeOrientationDataSet = dicom.dataset.Dataset()
                        planeOrientationDataSet.ImageOrientationPatient = sliceAxes
                        planeOrientationSequence = dicom.sequence.Sequence()
                        planeOrientationSequence.insert(
                            dicom.tag.Tag(0x0020, 0x9116),
                            planeOrientationDataSet)

                        pixelMeasuresDataSet = dicom.dataset.Dataset()
                        pixelMeasuresDataSet.SliceThickness = sliceSpacing
                        pixelMeasuresDataSet.PixelSpacing = pixelSpacing
                        pixelMeasuresSequence = dicom.sequence.Sequence()
                        pixelMeasuresSequence.insert(
                            dicom.tag.Tag(0x0028, 0x9110),
                            pixelMeasuresDataSet)

                        sharedFunctionalGroupsDataSet = dicom.dataset.Dataset()
                        sharedFunctionalGroupsDataSet.PlaneOrientationSequence = planeOrientationSequence
                        sharedFunctionalGroupsDataSet.PixelMeasuresSequence = pixelMeasuresSequence
                        sharedFunctionalGroupsSequence = dicom.sequence.Sequence(
                        )
                        sharedFunctionalGroupsSequence.insert(
                            dicom.tag.Tag(0x5200, 0x9229),
                            sharedFunctionalGroupsDataSet)
                        ds.SharedFunctionalGroupsSequence = sharedFunctionalGroupsSequence

                    if not (dicom.tag.Tag(0x5200, 0x9230) in ds):

                        #(5200,9230) SQ (Sequence with undefined length #=54)    # u/l, 1 PerFrameFunctionalGroupsSequence
                        #  (0020,9113) SQ (Sequence with undefined length #=1)     # u/l, 1 PlanePositionSequence
                        #    (0020,0032) DS [-94.7012\-312.701\-806.500]             #  26, 3 ImagePositionPatient
                        #  (0020,9113) SQ (Sequence with undefined length #=1)     # u/l, 1 PlanePositionSequence
                        #    (0020,0032) DS [-94.7012\-312.701\-809.500]             #  26, 3 ImagePositionPatient
                        #  ...

                        perFrameFunctionalGroupsSequence = dicom.sequence.Sequence(
                        )

                        for frameIndex in range(numberOfFrames):
                            planePositionDataSet = dicom.dataset.Dataset()
                            slicePosition = [
                                sliceStartPosition[0] +
                                frameIndex * z[0] * sliceSpacing,
                                sliceStartPosition[1] +
                                frameIndex * z[1] * sliceSpacing,
                                sliceStartPosition[2] +
                                frameIndex * z[2] * sliceSpacing
                            ]
                            planePositionDataSet.ImagePositionPatient = slicePosition
                            planePositionSequence = dicom.sequence.Sequence()
                            planePositionSequence.insert(
                                dicom.tag.Tag(0x0020, 0x9113),
                                planePositionDataSet)
                            perFrameFunctionalGroupsDataSet = dicom.dataset.Dataset(
                            )
                            perFrameFunctionalGroupsDataSet.PlanePositionSequence = planePositionSequence
                            perFrameFunctionalGroupsSequence.insert(
                                dicom.tag.Tag(0x5200, 0x9230),
                                perFrameFunctionalGroupsDataSet)

                        ds.PerFrameFunctionalGroupsSequence = perFrameFunctionalGroupsSequence

                ######################################################
                # Anonymize
                if anonymize:

                    self.addLog('  Anonymizing...')

                    ds.StudyDate = ''
                    ds.StudyTime = ''
                    ds.ContentDate = ''
                    ds.ContentTime = ''
                    ds.AccessionNumber = ''
                    ds.ReferringPhysiciansName = ''
                    ds.PatientsBirthDate = ''
                    ds.PatientsSex = ''
                    ds.StudyID = ''
                    ds.PatientName = "Unspecified Patient"

                    # replace ids with random values - re-use if we have seen them before
                    if ds.PatientID not in patientIDToRandomIDMap:
                        patientIDToRandomIDMap[
                            ds.PatientID] = dicom.UID.generate_uid(None)
                    ds.PatientID = patientIDToRandomIDMap[ds.PatientID]
                    if ds.StudyInstanceUID not in studyUIDToRandomUIDMap:
                        studyUIDToRandomUIDMap[
                            ds.StudyInstanceUID] = dicom.UID.generate_uid(None)
                    ds.StudyInstanceUID = studyUIDToRandomUIDMap[
                        ds.StudyInstanceUID]
                    if ds.SeriesInstanceUID not in studyUIDToRandomUIDMap:
                        seriesUIDToRandomUIDMap[
                            ds.SeriesInstanceUID] = dicom.UID.generate_uid(
                                None)
                    ds.SeriesInstanceUID = seriesUIDToRandomUIDMap[
                        ds.SeriesInstanceUID]

                ######################################################
                # Write
                if inputDirPath == outputDirPath:
                    (name, ext) = os.path.splitext(filePath)
                    patchedFilePath = name + ('-anon' if anonymize else
                                              '') + '-patched' + ext
                else:
                    patchedFilePath = os.path.abspath(
                        os.path.join(rootOutput, file))
                    if not os.path.exists(rootOutput):
                        os.makedirs(rootOutput)

                self.addLog('  Writing DICOM...')
                dicom.write_file(patchedFilePath, ds)
                self.addLog('  Created DICOM file: %s' % patchedFilePath)

        self.addLog('DICOM patching completed.')
 def export(self, filename):
     dcm.write_file(filename, self.plan)
Beispiel #33
0
# di = np.array(0.01*(d/d_cal5cm)*MU*nFrac/dosescaling,np.int)
di = np.array((d / d_ch) * nFrac * MU * D_ch_MU / dosescaling, np.int)
dcmdose.PixelData = struct.pack(format, *(di.tolist()))

print 'check dose volume sizes'
print 'Pixel Range:', x[0], x[-1], y[0], y[-1], z[0], z[-1]
print 'dx, dy, dz:', dx, dy, dz
print 'ImageOrigien:', dcmdose.ImagePositionPatient
print 'sizes:', xsize, ysize, zsize, np.size(x), np.size(y), np.size(z)
print 'Max D/Ne:', d.max(
), 'Abs Dmax:', d.max() * MU * nFrac * D_ch_MU / d_ch, '<--'
print 'PixelSpacing:', dcmdose.PixelSpacing, 'MaxDose:', di.max()
print length, np.size(d), np.size(e)
print 'New Image Position:', dcmdose.ImagePositionPatient
print 'OffsetVector Length:', np.size(dcmdose.GridFrameOffsetVector)
print

dcmdose.SOPInstanceUID = get_dicomuid()
dcmdose.StudyUID = get_dicomuid()
dcmdose.SeriesUID = get_dicomuid()
dcmdose.FrameUID = get_dicomuid()
dcmdose.SyncUID = get_dicomuid()
dcmdose.SrUID = get_dicomuid()
dcmdose.StudyInstanceUID = get_dicomuid()
dcmdose.SeriesInstanceUID = get_dicomuid()

dicom.write_file(outfile, dcmdose)
print 'New DCM RD file created: ', outfile

#print xsize, ysize, zsize, length, length*dcmdose.BitsAllocated/8, max(di)
 def export(self,filename):
     dcm.write_file(filename,self.plan)
Beispiel #35
0
# di = np.array(d*pdmax/d.max(),np.int)
# di = np.array(0.01*(d/d_cal5cm)*MU*nFrac/dosescaling,np.int)
di = np.array((d/d_ch)*nFrac*MU*D_ch_MU/dosescaling,np.int)
dcmdose.PixelData = struct.pack(format,*(di.tolist()))

print 'check dose volume sizes'
print 'Pixel Range:', x[0],x[-1],y[0],y[-1],z[0],z[-1]
print 'dx, dy, dz:', dx, dy, dz
print 'ImageOrigien:',dcmdose.ImagePositionPatient
print 'sizes:',xsize, ysize, zsize, np.size(x),np.size(y),np.size(z)
print 'Max D/Ne:',d.max(), 'Abs Dmax:',d.max()*MU*nFrac*D_ch_MU/d_ch, '<--'
print 'PixelSpacing:',dcmdose.PixelSpacing, 'MaxDose:',di.max()
print length, np.size(d), np.size(e)
print 'New Image Position:', dcmdose.ImagePositionPatient
print 'OffsetVector Length:', np.size(dcmdose.GridFrameOffsetVector)
print 

dcmdose.SOPInstanceUID = get_dicomuid()
dcmdose.StudyUID = get_dicomuid()
dcmdose.SeriesUID = get_dicomuid()
dcmdose.FrameUID = get_dicomuid()
dcmdose.SyncUID = get_dicomuid()
dcmdose.SrUID = get_dicomuid()
dcmdose.StudyInstanceUID = get_dicomuid()
dcmdose.SeriesInstanceUID = get_dicomuid()
        
dicom.write_file(outfile, dcmdose)
print 'New DCM RD file created: ',outfile

#print xsize, ysize, zsize, length, length*dcmdose.BitsAllocated/8, max(di)
    parser.add_option('--in_dir',\
                      help='Input dicom directory. All dicom images inside \
                      this directory will be anonymized. It is assumed that \
                      all files inside this directory are dicom files.',\
                      dest='in_dir', metavar='<string>', default=None)
    parser.add_option('--out_dir',\
                      help='Output dicom directory. If none specified, the input \
                      directory be used as the output directory, overwriting \
                      the dicom images that were read from the directory.',\
                      dest='out_dir', metavar='<string>', default=None)    

    (options, args) = parser.parse_args()

    if options.in_im is not None:
        assert isfile(options.in_im), 'File does not exist'
        ds = dicom.read_file(options.in_im)
        anonymize_dicom(ds)
        if options.out_im is not None:
            dicom.write_file(options.out_im, ds)

    if options.in_dir is not None:
        assert isdir(options.in_dir), 'Directory does not exist'
        files = [f for f in listdir(options.in_dir) if \
                 isfile(join(options.in_dir, f))]
        for f in files:
            ds = dicom.read_file(join(options.in_dir, f))
            anonymize_dicom(ds)
            if options.out_dir is not None:
                assert isdir(options.out_dir), 'Directory does not exist'
                dicom.write_file(join(options.out_dir, f), ds)
Beispiel #37
0
def encrypt_dicom_name(dcm):

    # ( dicomname, (bool_encrypt, bool_digitcheck))
    dcmname = dcm[0]
    opts = dcm[1]
    bool_encrypt = opts[0]
    bool_digitcheck = opts[1]

    bool_write = False

    
    try:
        dcminf = dicom.read_file(dcmname)
    except:
        # If not readable, simply exit
        return

    # If it is a dicom, scramble all information.
    for field in fields_to_anon:

        # Check to make sure dicom field exists.
        if hasattr(dcminf,field):
            name = getattr(dcminf,field)

            # If it's not a string, skip any anonymization for the field
            if not isinstance(name, basestring):
                continue
            else:
                name = name.encode('ascii','ignore')

            if bool_encrypt:
                # encrypt the dicomfield

                if bool_digitcheck:
                    bool_hasdigits = re.findall('\d+',name)
                else:
                    bool_hasdigits = False

                # Ignore if the name has the words "anonymous" or "volunteer" in it
                if ( name.lower().find("anonymous") >= 0 ) | ( name.lower().find("volunteer") >= 0 ):
                    continue


                # The additional "_JNO" ending is a safety to prevent items 
                # from being re-encrypted. It is assumed that if the name 
                # has any numbers, the patient field has already been anonymized.
                if (name[-4:] != "_JNO") & (not bool_hasdigits):

                    anon_name = encrypt_string(name,KEY) + "_JNO"
                    setattr(dcminf,field,anon_name)
                    bool_write = True

            else:

                # If the name has "_JNO" as the ending, it has been encrypted
                # and needs to be unencrypted.
                if name[-4:] == "_JNO":
                    anon_name = unencrypt_string(name[:-4],KEY)
                    setattr(dcminf,field,anon_name)
                    bool_write = True

    if bool_write:
        dicom.write_file(dcmname,dcminf)
      else:
        pix[j][i] = 0

    if (output_dcm==1):  
      bin_value = int(round(pix[j][i]*100))        # Convert cm floats into tenths of mm integers to store in dicom image
      for k1 in range(0, output_binning):
        for k2 in range(0, output_binning):      
          mammo.pixel_array[j+k1][i+k2] = bin_value

if (output_dcm==1):  
  # Save final data as dicom:      help(dicom.write_file)  
  print '\n\n Saving final data as DICOM image in integer units of tenths of mm of plastic...'
  mammo.PixelData = mammo.pixel_array.tostring()   # Save the data array into the actual pixel data string
  #dicom.write_file(image_file_name+"_plastic_bin"+str(output_binning)+".dcm", mammo)
  
  dicom.write_file(image_file_name+"_STL_bin"+str(output_binning)+".dcm", mammo)       # !!STL!!


# Generate triangle mesh:
print '\n Generating the output triangle mesh focused to the x-ray source focal spot (num_air_pix='+str(num_air_pix)+')...'
num_binned_pixels = (num_columns/output_binning) * (num_rows/output_binning) - num_air_pix
num_vertices  = num_binned_pixels * 8     #  8 vertices per printed column  
num_triangles = num_binned_pixels * 8 + 2*(num_rows/output_binning) + 2*(num_columns/output_binning)        #!!ReducedTriangles!!

print ' Writing ',num_triangles,' triangles in output file: '+image_file_name+"_bin"+str(output_binning)+'.ply'


image_file_name2 = image_file_name+"_STL_bin"+str(output_binning)+"_focusedZ"+str(source_coord[2])      # !!STL!!
if (subtract_layer>0.00001):
  image_file_name2 = image_file_name2+"_-"+str(subtract_layer)+"cm"
if (output_mm==1):
Beispiel #39
0
    def BrowseFile(self, fname):
        """Allows the user to select a filename"""

        self._savedFileName = fname
        self._reader._reader = None
        full_name = os.path.abspath(fname)
        _dir, fname = os.path.split(full_name)

        self._num_list = []

        self._file_list = [full_name]
        self._slice_locations = []

        # is file a DICOM file?
        ds = {}
        examiner = MicroViewIO.wxMicroViewDICOMExaminer(_dir)
        self.bIsDICOM = examiner.IsDICOMFile(full_name)

        self.match_tags = {}

        if self.bIsDICOM:

            try:
                ds = dicom.read_file(full_name,
                                     stop_before_pixels=True,
                                     force=True)
                if 'PhotometricInterpretation' not in ds:
                    self.bIsDICOM = False
                if 'AcquisitionNumber' in ds:
                    self.bHasAcquisitionNumber = True

                tags = ['StudyID', 'SeriesNumber', 'AcquisitionNumber']
                if ds.Modality == 'MR':
                    tags.append('EchoNumbers')

                for tag in tags:
                    if tag in ds:
                        self.match_tags[tag] = ds.get(tag)
            except:
                pass

        with wx.BusyCursor():
            self.update_file_list(full_name, ds, examiner)

            # update DICOMDIR file
            if self.bIsDICOM:
                dicomdir_filename = os.path.join(_dir, 'DICOMDIR')
                dicomdir = examiner.GetDICOMDIR().get_dicomdir()
                if dicomdir:
                    try:
                        dicom.write_file(dicomdir_filename, dicomdir)
                        logging.info(
                            "Successfully wrote {}.".format(dicomdir_filename))
                    except:
                        pass

        self.guess_image_pixel_size(full_name)

        self.updateGUIState()
        self.updateButtonState()

        # if files are DICOM-format, fill in a reasonable guess at a name
        if self.bIsDICOM:
            name = ''
            if 'StudyID' in ds:
                name += '{}_'.format(ds.StudyID)
            if 'SeriesDescription' in ds:
                name += '{}_'.format(ds.SeriesDescription.strip()).replace(
                    ' ', '_')
            if 'PatientName' in ds:
                name += '{}'.format(ds.PatientName.strip()).replace(' ', '_')
            if not name:
                name = "DICOM Import"

            self.m_textCtrlImageTitle.SetValue(name)
Beispiel #40
0
import dicom, os, shutil, sys, string
from dicom.tag import Tag

# for each DICOM file in inputDir, set the content of the tagName
# to be the same as in the first file in that dir

if len(sys.argv)<4:
  print 'Usage: ',sys.argv[0],' inputDir tagName outputDir'
  exit

inputDir = sys.argv[1]
inputTag = sys.argv[2]
outputDir = sys.argv[3]

files = os.listdir(inputDir)
f = files[0]

dcm = dicom.read_file(os.path.join(inputDir,f))
dicom.write_file(os.path.join(outputDir,f),dcm)

firstVal = dcm.data_element(inputTag).value

for f in files[1:]:
  dcm = dicom.read_file(os.path.join(inputDir,f))
  dcm.data_element(inputTag).value = firstVal
  dicom.write_file(os.path.join(outputDir,f),dcm)
  def patchDicomDir(self, inputDirPath, outputDirPath, exportDicom = True, anonymizeDicom = False, exportUltrasoundToNrrd = False):
    """
    Since CTK (rightly) requires certain basic information [1] before it can import
    data files that purport to be dicom, this code patches the files in a directory
    with some needed fields.  Apparently it is possible to export files from the
    Philips PMS QLAB system with these fields missing.

    Calling this function with a directory path will make a patched copy of each file.
    Importing the old files to CTK should still fail, but the new ones should work.

    The directory is assumed to have a set of instances that are all from the
    same study of the same patient.  Also that each instance (file) is an
    independent (multiframe) series.

    [1] https://github.com/commontk/CTK/blob/16aa09540dcb59c6eafde4d9a88dfee1f0948edc/Libs/DICOM/Core/ctkDICOMDatabase.cpp#L1283-L1287
    """

    import dicom

    if not outputDirPath:
      outputDirPath = inputDirPath
    
    self.addLog('DICOM patching started...')
    logging.debug('DICOM patch input directory: '+inputDirPath)
    logging.debug('DICOM patch output directory: '+outputDirPath)

    patientIDToRandomIDMap = {}
    studyUIDToRandomUIDMap = {}
    seriesUIDToRandomUIDMap = {}
    numberOfSeriesInStudyMap = {}

    # All files without a patient ID will be assigned to the same patient
    randomPatientID = dicom.UID.generate_uid(None)
    
    requiredTags = ['PatientName', 'PatientID', 'StudyInstanceUID', 'SeriesInstanceUID', 'SeriesNumber']
    for root, subFolders, files in os.walk(inputDirPath):
    
      # Assume that all files in a directory belongs to the same study
      randomStudyUID = dicom.UID.generate_uid(None)
      
      currentSubDir = os.path.relpath(root, inputDirPath)
      rootOutput = os.path.join(outputDirPath, currentSubDir)
      
      for file in files:
        filePath = os.path.join(root,file)
        self.addLog('Examining %s...' % os.path.join(currentSubDir,file))
        try:
          ds = dicom.read_file(filePath)
        except (IOError, dicom.filereader.InvalidDicomError):
          self.addLog('  Not DICOM file. Skipped.')
          continue

        if ds.SOPClassUID != '1.2.840.113543.6.6.1.3.10002':
          self.addLog('  Not Philips 4D ultrasound DICOM file. Skipped.')
          continue

        self.addLog('  Patching...')

        for tag in requiredTags:
          if not hasattr(ds,tag):
            setattr(ds,tag,'')

        # Generate a new SOPInstanceUID to avoid different files having the same SOPInstanceUID
        ds.SOPInstanceUID = dicom.UID.generate_uid(None)
            
        if ds.PatientName == '':
          ds.PatientName = "Unspecified Patient"
        if ds.PatientID == '':
          ds.PatientID = randomPatientID
        if ds.StudyInstanceUID == '':
          ds.StudyInstanceUID = randomStudyUID
        if ds.SeriesInstanceUID == '':
          ds.SeriesInstanceUID = dicom.UID.generate_uid(None)
          
        # Generate series number to make it easier to identify a sequence within a study
        if ds.SeriesNumber == '':
          if ds.StudyInstanceUID not in numberOfSeriesInStudyMap:
            numberOfSeriesInStudyMap[ds.StudyInstanceUID] = 0
          numberOfSeriesInStudyMap[ds.StudyInstanceUID] = numberOfSeriesInStudyMap[ds.StudyInstanceUID] + 1
          ds.SeriesNumber = numberOfSeriesInStudyMap[ds.StudyInstanceUID]

        if anonymizeDicom:

          self.addLog('  Anonymizing...')

          ds.StudyDate = ''
          ds.StudyTime = ''
          ds.ContentDate = ''
          ds.ContentTime = ''
          ds.AccessionNumber = ''
          ds.ReferringPhysiciansName = ''
          ds.PatientsBirthDate = ''
          ds.PatientsSex = ''
          ds.StudyID = ''
          ds[(0x3001,0x1004)].value = '' # Some ID in a private tag - clear it, just in case
          ds.PatientName = "Unspecified Patient"

          # replace ids with random values - re-use if we have seen them before
          if ds.PatientID not in patientIDToRandomIDMap:  
            patientIDToRandomIDMap[ds.PatientID] = dicom.UID.generate_uid(None)
          ds.PatientID = patientIDToRandomIDMap[ds.PatientID]
          if ds.StudyInstanceUID not in studyUIDToRandomUIDMap:  
            studyUIDToRandomUIDMap[ds.StudyInstanceUID] = dicom.UID.generate_uid(None)  
          ds.StudyInstanceUID = studyUIDToRandomUIDMap[ds.StudyInstanceUID]  
          if ds.SeriesInstanceUID not in studyUIDToRandomUIDMap:
            seriesUIDToRandomUIDMap[ds.SeriesInstanceUID] = dicom.UID.generate_uid(None)  
          ds.SeriesInstanceUID = seriesUIDToRandomUIDMap[ds.SeriesInstanceUID]

        if inputDirPath==outputDirPath:
          (name, ext) = os.path.splitext(filePath)
          patchedFilePath = name + ('-anon' if anonymizeDicom else '') + '-patched' + ext
          nrrdFilePath = name + '.seq.nrrd'
        else:
          patchedFilePath = os.path.abspath(os.path.join(rootOutput,file))
          nrrdFilePath = os.path.splitext(patchedFilePath)[0]+'.nrrd'
          if not os.path.exists(rootOutput):
            os.makedirs(rootOutput)

        self.addLog('  Writing DICOM...')
        dicom.write_file(patchedFilePath, ds)
        self.addLog('  Created DICOM file: %s' % patchedFilePath)

        if exportUltrasoundToNrrd and self.isDicomUltrasoundFile(patchedFilePath):
          self.addLog('  Writing NRRD...')

          if self.convertUltrasoundDicomToNrrd(patchedFilePath, nrrdFilePath):
            self.addLog('  Created NRRD file: %s' % nrrdFilePath)
          else:
            self.addLog('  NRRD file save failed')

        if not exportDicom:
          os.remove(patchedFilePath)
          self.addLog('  Deleted temporary DICOM file')

    self.addLog('DICOM patching completed.')
Beispiel #42
0
def anonymize(dir_input, dir_out_base, siteCodeDict, lut_dcm_type, lut_dcm_hdr,
              lut_rda_type, lut_twix_type):
    # anonymize dicom
    if re.match('[0-9]{3}-*', dir_input.split('/')[-1]):
        [subjectID, sessionID] = get_patientID(dir_input, siteCodeDict)
        dir_input_clean = dir_input.replace('-', '_')
        series_num = int(
            dir_input_clean.split('/')[-1].split('_')[0].lstrip("0"))
        new_series_description = get_scan_type(dir_input_clean, lut_dcm_type)

        # series number is stripped of leading zeroes
        # pad with zeroes here so directories are listed in proper order
        dir_out_full = '%s/%s/%s/%03d-%s/DICOM' % (dir_out_base, subjectID,
                                                   sessionID, series_num,
                                                   new_series_description)
        if new_series_description is None:
            print('ERROR - Unable to determine scan type for: %s' % dir_input)
        else:
            shutil.copytree(dir_input, dir_out_full)
            os.system('chmod +w -R %s' % (dir_out_full))
            list_dcm_files = os.listdir(dir_input)
            for fname_scan in list_dcm_files:
                ds = pydicom.read_file('%s/%s' % (dir_out_full, fname_scan))
                ds.PatientName = subjectID
                ds.PatientID = sessionID
                ds.SeriesDescription = new_series_description
                ds.SeriesNumber = series_num
                # instead of this, do for loop (eg. for each key in lut_dcm_hdr, do ...)
                for key in lut_dcm_hdr:
                    setattr(ds, key, lut_dcm_hdr[key])
                pydicom.write_file('%s/%s' % (dir_out_full, fname_scan), ds)

    # anonymize rda
    if dir_input.split('/')[-1] == 'MRS':
        [subjectID, sessionID] = get_patientID(dir_input, siteCodeDict)
        scan_types = os.listdir(dir_input)
        for scan_type in scan_types:
            if scan_type.endswith('.rda'):
                series_num = int(get_rda_field("SeriesNumber", scan_type))
                scan_name = get_rda_field("SeriesDescription", scan_type)
                new_series_description = get_scan_type(scan_name, lut_mrs_type)
                dir_out_full = '%s/%s/%s/%03d-%s/RDA' % (
                    dir_out_base, subjectID, sessionID, series_num,
                    new_series_description)
                if new_series_description is None:
                    print('ERROR - Unable to determine scan type for: %s/%s' %
                          (dir_input, scan_type))
                else:
                    if not os.path.exists(dir_out_full):
                        os.makedirs(dir_out_full)
                    # TO DO: remove series_num from output rda filename, as per Steve Arnott's request
                    anonymize_rda(dir_input + '/' + scan_type,
                                  dir_out_full + '/' + scan_type)

    # anonymize twix
    if dir_input.split('/')[-1] == 'twix':
        [subjectID, sessionID] = get_patientID(dir_input, siteCodeDict)
        scan_types = os.listdir(dir_input)
        for scan_type in scan_types:
            if scan_type.endswith('.dat'):
                new_series_description = get_scan_type(scan_type,
                                                       lut_twix_type)
                if new_series_description is None:
                    print('ERROR - Unable to determine scan type for: %s/%s' %
                          (dir_input, scan_type))
                else:
                    dir_out_partial = '%s/%s/%s' % (dir_out_base, subjectID,
                                                    sessionID)
                    dir_to_match = os.listdir(dir_out_partial)
                    dir_match = [
                        x for x in dir_to_match
                        if '%s' % (new_series_description) in x
                    ]
                    if not len(dir_match) == 1:
                        print(
                            'ERROR - Unable to determine matching directory for: %s/%s'
                            % (dir_input, scan_type))
                    else:
                        dir_out_full = '%s/%s/TWIX' % (dir_out_partial,
                                                       dir_match[0])
                        if not os.path.exists(dir_out_full):
                            os.makedirs(dir_out_full)
                        anonymize_twix(dir_input + '/' + scan_type,
                                       dir_out_full + '/' + scan_type)
Beispiel #43
0
  def patchDicomDir(self, inputDirPath, outputDirPath):
    """
    Since CTK (rightly) requires certain basic information [1] before it can import
    data files that purport to be dicom, this code patches the files in a directory
    with some needed fields.

    Calling this function with a directory path will make a patched copy of each file.
    Importing the old files to CTK should still fail, but the new ones should work.

    The directory is assumed to have a set of instances that are all from the
    same study of the same patient.  Also that each instance (file) is an
    independent (multiframe) series.

    [1] https://github.com/commontk/CTK/blob/16aa09540dcb59c6eafde4d9a88dfee1f0948edc/Libs/DICOM/Core/ctkDICOMDatabase.cpp#L1283-L1287
    """

    import dicom

    self.addLog('DICOM patching started...')
    logging.debug('DICOM patch input directory: '+inputDirPath)
    logging.debug('DICOM patch output directory: '+outputDirPath)

    for rule in self.patchingRules:
      rule.logCallback = self.addLog
      rule.processStart(inputDirPath, outputDirPath)

    for root, subFolders, files in os.walk(inputDirPath):

      currentSubDir = os.path.relpath(root, inputDirPath)
      rootOutput = os.path.join(outputDirPath, currentSubDir)

      # Notify rules that processing of a new subdirectory started
      for rule in self.patchingRules:
        rule.processDirectory(currentSubDir)

      for file in files:
        filePath = os.path.join(root,file)
        self.addLog('Examining %s...' % os.path.join(currentSubDir,file))

        skipFileRequestingRule = None
        for rule in self.patchingRules:
          if rule.skipFile(currentSubDir):
            skipFileRequestingRule = rule
            break
        if skipFileRequestingRule:
          self.addLog('  Rule '+rule.__class__.__name__+' requested to skip this file.')
          continue

        try:
          ds = dicom.read_file(filePath)
        except (IOError, dicom.filereader.InvalidDicomError):
          self.addLog('  Not DICOM file. Skipped.')
          continue

        self.addLog('  Patching...')

        for rule in self.patchingRules:
          rule.processDataSet(ds)

        patchedFilePath = os.path.abspath(os.path.join(rootOutput,file))
        for rule in self.patchingRules:
          patchedFilePath = rule.generateOutputFilePath(ds, patchedFilePath)

        ######################################################
        # Write

        dirName = os.path.dirname(patchedFilePath)
        if not os.path.exists(dirName):
          os.makedirs(dirName)

        self.addLog('  Writing DICOM...')
        dicom.write_file(patchedFilePath, ds)
        self.addLog('  Created DICOM file: %s' % patchedFilePath)

    self.addLog('DICOM patching completed. Patched files are written to:\n{0}'.format(outputDirPath))
Beispiel #44
0
    parser.add_option('--in_dir',\
                      help='Input dicom directory. All dicom images inside \
                      this directory will be anonymized. It is assumed that \
                      all files inside this directory are dicom files.'                                                                       ,\
                      dest='in_dir', metavar='<string>', default=None)
    parser.add_option('--out_dir',\
                      help='Output dicom directory. If none specified, the input \
                      directory be used as the output directory, overwriting \
                      the dicom images that were read from the directory.'                                                                          ,\
                      dest='out_dir', metavar='<string>', default=None)

    (options, args) = parser.parse_args()

    if options.in_im is not None:
        assert isfile(options.in_im), 'File does not exist'
        ds = dicom.read_file(options.in_im)
        anonymize_dicom(ds)
        if options.out_im is not None:
            dicom.write_file(options.out_im, ds)

    if options.in_dir is not None:
        assert isdir(options.in_dir), 'Directory does not exist'
        files = [f for f in listdir(options.in_dir) if \
                 isfile(join(options.in_dir, f))]
        for f in files:
            ds = dicom.read_file(join(options.in_dir, f))
            anonymize_dicom(ds)
            if options.out_dir is not None:
                assert isdir(options.out_dir), 'Directory does not exist'
                dicom.write_file(join(options.out_dir, f), ds)
  def patchDicomDir(self, inputDirPath, outputDirPath, generateMissingIds = True, generateImagePositionFromSliceThickness = True, anonymize = False):
    """
    Since CTK (rightly) requires certain basic information [1] before it can import
    data files that purport to be dicom, this code patches the files in a directory
    with some needed fields.  Apparently it is possible to export files from the
    Philips PMS QLAB system with these fields missing.

    Calling this function with a directory path will make a patched copy of each file.
    Importing the old files to CTK should still fail, but the new ones should work.

    The directory is assumed to have a set of instances that are all from the
    same study of the same patient.  Also that each instance (file) is an
    independent (multiframe) series.

    [1] https://github.com/commontk/CTK/blob/16aa09540dcb59c6eafde4d9a88dfee1f0948edc/Libs/DICOM/Core/ctkDICOMDatabase.cpp#L1283-L1287
    """

    import dicom

    if not outputDirPath:
      outputDirPath = inputDirPath
    
    self.addLog('DICOM patching started...')
    logging.debug('DICOM patch input directory: '+inputDirPath)
    logging.debug('DICOM patch output directory: '+outputDirPath)

    patientIDToRandomIDMap = {}
    studyUIDToRandomUIDMap = {}
    seriesUIDToRandomUIDMap = {}
    numberOfSeriesInStudyMap = {}

    # All files without a patient ID will be assigned to the same patient
    randomPatientID = dicom.UID.generate_uid(None)
    
    requiredTags = ['PatientName', 'PatientID', 'StudyInstanceUID', 'SeriesInstanceUID', 'SeriesNumber']
    for root, subFolders, files in os.walk(inputDirPath):
    
      # Assume that all files in a directory belongs to the same study
      randomStudyUID = dicom.UID.generate_uid(None)

      # Assume that all files in a directory belongs to the same series
      randomSeriesInstanceUID = dicom.UID.generate_uid(None)
      
      currentSubDir = os.path.relpath(root, inputDirPath)
      rootOutput = os.path.join(outputDirPath, currentSubDir)
      
      for file in files:
        filePath = os.path.join(root,file)
        self.addLog('Examining %s...' % os.path.join(currentSubDir,file))
        if file=='DICOMDIR':
          self.addLog('DICOMDIR file is ignored (its contents may be inconsistent with the contents of the indexed DICOM files, therefore it is safer not to use it)')
          continue
        try:
          ds = dicom.read_file(filePath)
        except (IOError, dicom.filereader.InvalidDicomError):
          self.addLog('  Not DICOM file. Skipped.')
          continue

        self.addLog('  Patching...')

        ######################################################
        # Add missing IDs
        if generateMissingIds:
        
          for tag in requiredTags:
            if not hasattr(ds,tag):
              setattr(ds,tag,'')
              
          # Generate a new SOPInstanceUID to avoid different files having the same SOPInstanceUID
          ds.SOPInstanceUID = dicom.UID.generate_uid(None)
              
          if ds.PatientName == '':
            ds.PatientName = "Unspecified Patient"
          if ds.PatientID == '':
            ds.PatientID = randomPatientID
          if ds.StudyInstanceUID == '':
            ds.StudyInstanceUID = randomStudyUID
          if ds.SeriesInstanceUID == '':
            #ds.SeriesInstanceUID = dicom.UID.generate_uid(None) # this line has to be used if each file is a separate series
            ds.SeriesInstanceUID = randomSeriesInstanceUID
            
          # Generate series number to make it easier to identify a sequence within a study
          if ds.SeriesNumber == '':
            if ds.StudyInstanceUID not in numberOfSeriesInStudyMap:
              numberOfSeriesInStudyMap[ds.StudyInstanceUID] = 0
            numberOfSeriesInStudyMap[ds.StudyInstanceUID] = numberOfSeriesInStudyMap[ds.StudyInstanceUID] + 1
            ds.SeriesNumber = numberOfSeriesInStudyMap[ds.StudyInstanceUID]

        ######################################################
        # Add missing slice spacing info to multiframe files
        numberOfFrames = ds.NumberOfFrames if hasattr(ds,'NumberOfFrames') else 1
        if generateImagePositionFromSliceThickness and numberOfFrames>1:
          # Multi-frame sequence, we may need to add slice positions
          
          # Error in Dolphin 3D CBCT scanners, they store multiple frames but they keep using CTImageStorage as storage class
          if ds.SOPClassUID == '1.2.840.10008.5.1.4.1.1.2': # Computed Tomography Image IOD
            ds.SOPClassUID = '1.2.840.10008.5.1.4.1.1.2.1' # Enhanced CT Image IOD

          sliceStartPosition = ds.ImagePositionPatient if hasattr(ds,'ImagePositionPatient') else [0,0,0]
          sliceAxes = ds.ImageOrientationPatient if hasattr(ds,'ImagePositionPatient') else [1,0,0,0,1,0]
          x = sliceAxes[:3]
          y = sliceAxes[3:]
          z = [x[1] * y[2] - x[2] * y[1], x[2] * y[0] - x[0] * y[2], x[0] * y[1] - x[1] * y[0]] # cross(x,y)
          sliceSpacing = ds.SliceThickness if hasattr(ds,'SliceThickness') else 1.0
          pixelSpacing = ds.PixelSpacing if hasattr(ds,'PixelSpacing') else [1.0, 1.0]
            
          if not (dicom.tag.Tag(0x5200,0x9229) in ds):

            # (5200,9229) SQ (Sequence with undefined length #=1)     # u/l, 1 SharedFunctionalGroupsSequence
            #   (0020,9116) SQ (Sequence with undefined length #=1)     # u/l, 1 PlaneOrientationSequence
            #       (0020,0037) DS [1.00000\0.00000\0.00000\0.00000\1.00000\0.00000] #  48, 6 ImageOrientationPatient
            #   (0028,9110) SQ (Sequence with undefined length #=1)     # u/l, 1 PixelMeasuresSequence
            #       (0018,0050) DS [3.00000]                                #   8, 1 SliceThickness
            #       (0028,0030) DS [0.597656\0.597656]                      #  18, 2 PixelSpacing

            planeOrientationDataSet = dicom.dataset.Dataset()
            planeOrientationDataSet.ImageOrientationPatient = sliceAxes
            planeOrientationSequence = dicom.sequence.Sequence()
            planeOrientationSequence.insert(dicom.tag.Tag(0x0020,0x9116),planeOrientationDataSet)

            pixelMeasuresDataSet = dicom.dataset.Dataset()
            pixelMeasuresDataSet.SliceThickness = sliceSpacing
            pixelMeasuresDataSet.PixelSpacing = pixelSpacing
            pixelMeasuresSequence = dicom.sequence.Sequence()
            pixelMeasuresSequence.insert(dicom.tag.Tag(0x0028,0x9110),pixelMeasuresDataSet)

            sharedFunctionalGroupsDataSet = dicom.dataset.Dataset()
            sharedFunctionalGroupsDataSet.PlaneOrientationSequence = planeOrientationSequence
            sharedFunctionalGroupsDataSet.PixelMeasuresSequence = pixelMeasuresSequence
            sharedFunctionalGroupsSequence = dicom.sequence.Sequence()
            sharedFunctionalGroupsSequence.insert(dicom.tag.Tag(0x5200,0x9229),sharedFunctionalGroupsDataSet)
            ds.SharedFunctionalGroupsSequence = sharedFunctionalGroupsSequence

          if not (dicom.tag.Tag(0x5200,0x9230) in ds):

            #(5200,9230) SQ (Sequence with undefined length #=54)    # u/l, 1 PerFrameFunctionalGroupsSequence
            #  (0020,9113) SQ (Sequence with undefined length #=1)     # u/l, 1 PlanePositionSequence
            #    (0020,0032) DS [-94.7012\-312.701\-806.500]             #  26, 3 ImagePositionPatient
            #  (0020,9113) SQ (Sequence with undefined length #=1)     # u/l, 1 PlanePositionSequence
            #    (0020,0032) DS [-94.7012\-312.701\-809.500]             #  26, 3 ImagePositionPatient
            #  ...

            perFrameFunctionalGroupsSequence = dicom.sequence.Sequence()

            for frameIndex in range(numberOfFrames):
              planePositionDataSet = dicom.dataset.Dataset()
              slicePosition = [sliceStartPosition[0]+frameIndex*z[0]*sliceSpacing, sliceStartPosition[1]+frameIndex*z[1]*sliceSpacing, sliceStartPosition[2]+frameIndex*z[2]*sliceSpacing]
              planePositionDataSet.ImagePositionPatient = slicePosition
              planePositionSequence = dicom.sequence.Sequence()
              planePositionSequence.insert(dicom.tag.Tag(0x0020,0x9113),planePositionDataSet)
              perFrameFunctionalGroupsDataSet = dicom.dataset.Dataset()
              perFrameFunctionalGroupsDataSet.PlanePositionSequence = planePositionSequence
              perFrameFunctionalGroupsSequence.insert(dicom.tag.Tag(0x5200,0x9230),perFrameFunctionalGroupsDataSet)

            ds.PerFrameFunctionalGroupsSequence = perFrameFunctionalGroupsSequence
            
        ######################################################
        # Anonymize
        if anonymize:

          self.addLog('  Anonymizing...')

          ds.StudyDate = ''
          ds.StudyTime = ''
          ds.ContentDate = ''
          ds.ContentTime = ''
          ds.AccessionNumber = ''
          ds.ReferringPhysiciansName = ''
          ds.PatientsBirthDate = ''
          ds.PatientsSex = ''
          ds.StudyID = ''
          ds.PatientName = "Unspecified Patient"

          # replace ids with random values - re-use if we have seen them before
          if ds.PatientID not in patientIDToRandomIDMap:  
            patientIDToRandomIDMap[ds.PatientID] = dicom.UID.generate_uid(None)
          ds.PatientID = patientIDToRandomIDMap[ds.PatientID]
          if ds.StudyInstanceUID not in studyUIDToRandomUIDMap:  
            studyUIDToRandomUIDMap[ds.StudyInstanceUID] = dicom.UID.generate_uid(None)
          ds.StudyInstanceUID = studyUIDToRandomUIDMap[ds.StudyInstanceUID]  
          if ds.SeriesInstanceUID not in studyUIDToRandomUIDMap:
            seriesUIDToRandomUIDMap[ds.SeriesInstanceUID] = dicom.UID.generate_uid(None)
          ds.SeriesInstanceUID = seriesUIDToRandomUIDMap[ds.SeriesInstanceUID]

        ######################################################
        # Write
        if inputDirPath==outputDirPath:
          (name, ext) = os.path.splitext(filePath)
          patchedFilePath = name + ('-anon' if anonymize else '') + '-patched' + ext
        else:
          patchedFilePath = os.path.abspath(os.path.join(rootOutput,file))
          if not os.path.exists(rootOutput):
            os.makedirs(rootOutput)

        self.addLog('  Writing DICOM...')
        dicom.write_file(patchedFilePath, ds)
        self.addLog('  Created DICOM file: %s' % patchedFilePath)

    self.addLog('DICOM patching completed.')
Beispiel #46
0
  def patchDicomDir(self, inputDirPath, outputDirPath):
    """
    Since CTK (rightly) requires certain basic information [1] before it can import
    data files that purport to be dicom, this code patches the files in a directory
    with some needed fields.

    Calling this function with a directory path will make a patched copy of each file.
    Importing the old files to CTK should still fail, but the new ones should work.

    The directory is assumed to have a set of instances that are all from the
    same study of the same patient.  Also that each instance (file) is an
    independent (multiframe) series.

    [1] https://github.com/commontk/CTK/blob/16aa09540dcb59c6eafde4d9a88dfee1f0948edc/Libs/DICOM/Core/ctkDICOMDatabase.cpp#L1283-L1287
    """

    import dicom

    self.addLog('DICOM patching started...')
    logging.debug('DICOM patch input directory: '+inputDirPath)
    logging.debug('DICOM patch output directory: '+outputDirPath)

    for rule in self.patchingRules:
      rule.logCallback = self.addLog
      rule.processStart(inputDirPath, outputDirPath)

    for root, subFolders, files in os.walk(inputDirPath):

      currentSubDir = os.path.relpath(root, inputDirPath)
      rootOutput = os.path.join(outputDirPath, currentSubDir)

      # Notify rules that processing of a new subdirectory started
      for rule in self.patchingRules:
        rule.processDirectory(currentSubDir)

      for file in files:
        filePath = os.path.join(root,file)
        self.addLog('Examining %s...' % os.path.join(currentSubDir,file))

        skipFileRequestingRule = None
        for rule in self.patchingRules:
          if rule.skipFile(currentSubDir):
            skipFileRequestingRule = rule
            break
        if skipFileRequestingRule:
          self.addLog('  Rule '+rule.__class__.__name__+' requested to skip this file.')
          continue

        try:
          ds = dicom.read_file(filePath)
        except (IOError, dicom.filereader.InvalidDicomError):
          self.addLog('  Not DICOM file. Skipped.')
          continue

        self.addLog('  Patching...')

        for rule in self.patchingRules:
          rule.processDataSet(ds)

        patchedFilePath = os.path.abspath(os.path.join(rootOutput,file))
        for rule in self.patchingRules:
          patchedFilePath = rule.generateOutputFilePath(ds, patchedFilePath)

        ######################################################
        # Write

        dirName = os.path.dirname(patchedFilePath)
        if not os.path.exists(dirName):
          os.makedirs(dirName)

        self.addLog('  Writing DICOM...')
        dicom.write_file(patchedFilePath, ds)
        self.addLog('  Created DICOM file: %s' % patchedFilePath)

    self.addLog('DICOM patching completed. Patched files are written to:\n{0}'.format(outputDirPath))
    def BrowseFile(self, fname):
        """Allows the user to select a filename"""

        self._savedFileName = fname
        self._reader._reader = None
        full_name = os.path.abspath(fname)
        _dir, fname = os.path.split(full_name)

        self._num_list = []

        self._file_list = [full_name]
        self._slice_locations = []

        # is file a DICOM file?
        ds = {}
        examiner = MicroViewIO.wxMicroViewDICOMExaminer(_dir)
        self.bIsDICOM = examiner.IsDICOMFile(full_name)

        self.match_tags = {}

        if self.bIsDICOM:

            try:
                ds = dicom.read_file(
                    full_name, stop_before_pixels=True, force=True)
                if 'PhotometricInterpretation' not in ds:
                    self.bIsDICOM = False
                if 'AcquisitionNumber' in ds:
                    self.bHasAcquisitionNumber = True

                tags = ['StudyID', 'SeriesNumber', 'AcquisitionNumber']
                if ds.Modality == 'MR':
                    tags.append('EchoNumbers')

                for tag in tags:
                    if tag in ds:
                        self.match_tags[tag] = ds.get(tag)
            except:
                pass

        with wx.BusyCursor():
            self.update_file_list(full_name, ds, examiner)

            # update DICOMDIR file
            if self.bIsDICOM:
                dicomdir_filename = os.path.join(_dir, 'DICOMDIR')
                dicomdir = examiner.GetDICOMDIR().get_dicomdir()
                if dicomdir:
                    try:
                        dicom.write_file(dicomdir_filename, dicomdir)
                        logging.info(
                            "Successfully wrote {}.".format(dicomdir_filename))
                    except:
                        pass

        self.guess_image_pixel_size(full_name)

        self.updateGUIState()
        self.updateButtonState()

        # if files are DICOM-format, fill in a reasonable guess at a name
        if self.bIsDICOM:
            name = ''
            if 'StudyID' in ds:
                name += '{}_'.format(ds.StudyID)
            if 'SeriesDescription' in ds:
                name += '{}_'.format(ds.SeriesDescription.strip()
                                     ).replace(' ', '_')
            if 'PatientName' in ds:
                name += '{}'.format(ds.PatientName.strip()).replace(' ', '_')
            if not name:
                name = "DICOM Import"

            self.m_textCtrlImageTitle.SetValue(name)
Beispiel #48
0
    anon_name = args.anon_name
else:
    anon_name = 'anon'
if args.anon_id:
    anon_id = args.anon_id
else:
    anon_id = '00000000'

FOR_uid = get_dicomuid()

for dcmfile in dcm_list:    
    dcmfilename = dcmfile.strip()
    dcm = dicom.read_file(dcmfilename)

    dcm.FrameOfReferenceUID = FOR_uid        
    dcm.PatientsName = anon_name
    dcm.PatientID = anon_id
    dcm.PatientsSex = 'O'
    dcm.SOPInstanceUID = get_dicomuid()
    dcm.StudyUID = get_dicomuid()
    dcm.SeriesUID = get_dicomuid()
    dcm.FrameUID = get_dicomuid()
    dcm.SyncUID = get_dicomuid()
    dcm.SrUID = get_dicomuid()
    dcm.StudyInstanceUID = get_dicomuid()
    dcm.SeriesInstanceUID = get_dicomuid()
    
    outfile = 'anon_'+dcmfilename
    dicom.write_file(outfile, dcm)