示例#1
0
def _force_consistent_uids(data_dir: Union[str, PathLike],
                           target_uid: str = None):
    """
    Forces all DICOMs inside `data_dir` to have consistent study instance UID

    Will recursively crawl sub-directories of `data_dir` to check for DICOMs

    Parameters
    ----------
    data_dir : str or pathlib.Path
        Path to data directory containg DICOMs that should be overwritten with
        new study instance UID.
    target_uid : str, optional
        Study instance UID to assign to all DICOMs found in `data_dir`. If not
        specified the study instance UID from the first DICOM found will be
        used instead. Default: None
    """

    data_dir = pathlib.Path(data_dir).resolve()

    for n, fn in enumerate(data_dir.rglob('*dcm')):
        img = dcm.read_file(str(fn))
        if target_uid is None and n == 0:
            target_uid = str(img[('0020', '000d')].value)
            continue
        img[('0020', '000d')].value = target_uid
        dcm.write_file(str(fn), img)
def create_new_uids(folder, dicom_tag, dry_run=True):
    """Search folder (and subfolders) for DICOM files and create new UIDs for specified DICOM tag
    
    Parameters
    ----------
    folder : Folder to (recursively) search for DICOM objects
    dicom_tag : DICOM tag specifying which UID to create new UIDs for (format as "0x0020000D")
    dry_run : Dry run, (True)/False
    """
    # find dcm files
    dcm_files = glob.glob(os.path.join(folder,"**","*.dcm"), recursive=True)
    dcm_dict = dict()
    # loop over dcm files
    for dcm_file in dcm_files:
        dcm = pydicom.read_file(dcm_file)
        # if dicom tag exists in current dcm file
        if int(dicom_tag, 16) in dcm:
            # record replaceed uid and set new uid
            uid = dcm[dicom_tag].value
            if uid not in dcm_dict:
                dcm_dict[uid] = pydicom.uid.generate_uid()
            dcm[dicom_tag] = DataElement(int(dicom_tag,16), "UI", dcm_dict[uid])
        else:
            # add dicom tag with new uid
            uid = pydicom.uid.generate_uid()
            dcm[dicom_tag] = DataElement(int(dicom_tag,16), "UI", uid)
        if not dry_run:
            # write dcm file to disk
            pydicom.write_file(dcm_file, dcm, write_like_original=False)
    if dry_run:
        print("Uids to update")
        print(dcm_file)
示例#3
0
def save3dDicom(volume,
                info,
                path,
                newSeriesNumber=None,
                newSeriesDescription=None,
                newImageComment=None,
                startImageNumber=1):
    try:
        os.mkdir(path)
    except:
        pass
    if newSeriesNumber is None:
        newSeriesNumber = info[0].SeriesNumber
        newSeriesUID = info[0].SeriesInstanceUID
    else:
        newSeriesUID = UID.generate_uid()

    if newSeriesDescription is None:
        newSeriesDescription = info[0].SeriesDescription

    if len(info) == 1:
        dataArray = volume[:, :, 0]
        dicomFileData = copy.deepcopy(info[0])  # create a copy of object
        if dataArray.dtype == 'uint16':  # check correct format
            dicomFileData.PixelData = dataArray.tostring()
        else:
            dicomFileData.PixelData = dataArray.round().astype(
                'uint16').tostring()
        dicomFileData.SeriesNumber = newSeriesNumber
        dicomFileData.SeriesInstanceUID = newSeriesUID
        dicomFileData.SOPInstanceUID = UID.generate_uid()
        if newImageComment is not None:
            dicomFileData.ImageComment = newImageComment

        fName = os.path.join(
            path, "image0001.dcm")  # if one wants to save a part of a dataset
        dicom.write_file(fName, dicomFileData)
    else:
        bar = Bar('Saving Dicom', max=len(info))
        for sl in range(len(info)):
            dataArray = volume[..., sl]
            dicomFileData = copy.deepcopy(info[sl])  # create a copy of object
            if dataArray.dtype == 'uint16':  # check correct format
                dicomFileData.PixelData = dataArray.tostring()
            else:
                dicomFileData.PixelData = dataArray.round().astype(
                    'uint16').tostring()
            dicomFileData.SeriesNumber = newSeriesNumber
            dicomFileData.SeriesInstanceUID = newSeriesUID
            dicomFileData.SOPInstanceUID = UID.generate_uid()
            if newImageComment is not None:
                dicomFileData.ImageComment = newImageComment

            fName = os.path.join(
                path, "image%04d.dcm" %
                (sl +
                 startImageNumber))  # if one wants to save a part of a dataset
            dicom.write_file(fName, dicomFileData)
            bar.next()
        bar.finish()
示例#4
0
def nii2dcm(dataset, filename):
    """
    write dicom filename
    filename = 'test.dcm'
    """
    # TODO transfer array to dicom directly
    pydicom.write_file(filename, dataset)
示例#5
0
 def write(self, outdir='.', print_filenames=False):
     for modality in self.modalityorder:
         for sb in self.seriesbuilders[modality]:
             print modality, sb
             for ds in sb.build():
                 dicom.write_file(os.path.join(outdir, ds.filename), ds)
                 if print_filenames:
                     print ds.filename
示例#6
0
 def write(self, outdir='.', print_filenames=False):
     for modality in self.modalityorder:
         for sb in self.seriesbuilders[modality]:
             print modality, sb
             for ds in sb.build():
                 dicom.write_file(os.path.join(outdir, ds.filename), ds)
                 if print_filenames:
                     print ds.filename
 def test_non_fatal_errors(self):
     dataset = Dataset()
     dataset.SOPClassUID = '1.2.840.10008.5.1.4.1.1.2'  # CT Image Storage
     file_dataset = FileDataset('test', dataset, file_meta=self.create_metadata())
     write_file('test', file_dataset, write_like_original=False)
     error_dict = self.validator.validate('test')
     self.assertEqual(1, len(error_dict))
     errors = error_dict['test']
     self.assertNotIn('fatal', errors)
示例#8
0
 def write_to_file(self, output_file, write_like_original=False):
     """Writes the current IOD to file
     Parameters
     ----------
     output_file : Complete path of file to write to
     """
     write_file(output_file,
                self.dataset,
                write_like_original=write_like_original)
示例#9
0
def maskTheInfo(inp, opRoot, indexBegin=0):
    if not os.path.exists(opRoot):
        os.mkdir(opRoot)

    patientNames = natsort.natsorted((os.listdir(inp)))
    index = indexBegin
    menu = []
    for patientFileId in patientNames:
        patientOutRootPath = os.path.join(opRoot, str(index))
        dirCtPath = os.path.join(inp, patientFileId, config.ctInDir)
        print(dirCtPath + ':' + str(index))
        fileCtNames = os.listdir(dirCtPath)
        # patientDirPath = os.path.join(opRoot, str(index))
        patientDirPath = os.path.join(opRoot, str(index))
        os.mkdir(patientDirPath)
        opCtDir = os.path.join(patientOutRootPath, config.ctOutDir)
        os.mkdir(opCtDir)
        for fileCtName in fileCtNames:
            if fileCtName.startswith('CT_'):
                meta = pdm.read_file(os.path.join(dirCtPath, fileCtName))
                id = meta.PatientID
                meta.__setattr__('PatientID', str(index))
                meta.__setattr__('Accession Number', str(index))  #这里也包含了患者的id
                meta.__setattr__('PatientName', str(index))

                pdm.write_file(os.path.join(opCtDir, fileCtName + '.dcm'),
                               meta, True)

        dirPetPath = os.path.join(inp, patientFileId, config.ptInDir)
        filePetNames = os.listdir(dirPetPath)
        opPetDir = os.path.join(patientDirPath, config.ptOutDir)
        os.mkdir(opPetDir)
        for filePetName in filePetNames:
            if filePetName.startswith('PT_'):
                petmeta = pdm.read_file(os.path.join(dirPetPath, filePetName))
                pid = petmeta.PatientID
                petmeta.__setattr__('PatientID', str(index))
                meta.__setattr__('Accession Number', str(index))  # 这里也包含了患者的id
                petmeta.__setattr__('PatientName', str(index))
                pdm.write_file(os.path.join(opPetDir, filePetName + '.dcm'),
                               petmeta, True)

        #保存文档
        tryDoc = glob.glob(
            os.path.join(inp, patientFileId, '*' + config.docEnd))
        docExist = len(tryDoc) != 0
        if docExist:
            docSrcPath = tryDoc[0]
            docDesPath = os.path.join(patientDirPath,
                                      str(index) + 'report' + config.docEnd)
            shutil.copy(docSrcPath, docDesPath)

        menu.append((index, pid + '_' + patientFileId))
        index = index + 1

        writeDic(menu)
def fix_file_and_write(dicom_file: str,
                       dicom_fixed_file: str,
                       anatomy: tuple,
                       reference: dict,
                       dicom_fix_report_file: str = ''):
    ds, fix_report = fix_file(dicom_file, anatomy, reference)
    pydicom.write_file(dicom_fixed_file, ds)
    if dicom_fix_report_file:
        ctools.WriteStringToFile(dicom_fix_report_file, fix_report)
    return (ds, fix_report)
示例#11
0
def generate_dicom_scans(dst, num_scans=10, intercept=0, slope=1):
    spacing = (0.4 + 0.4 * np.random.rand(num_scans, 3) +
               np.array([1 + 0.5 * np.random.rand(), 0, 0]))
    origin = np.random.randint(-200, 200, (num_scans, 3))
    for i in range(num_scans):
        num_slices = np.random.randint(128, 169)
        scan_id = np.random.randint(2**16)
        scan_data = np.random.randint(0, 256, (num_slices, 128, 128))
        folder = os.path.join(dst,
                              hex(scan_id).replace('x', '').upper().zfill(8))

        if not os.path.exists(folder):
            os.makedirs(folder)

        for k in range(num_slices):
            slice_name = (hex(scan_id + k).replace('x', '').upper().zfill(8))
            filename = os.path.join(folder, slice_name)
            pixel_array = (scan_data[k, ...] - intercept) / slope
            locZ = float(origin[i, 0] + spacing[i, 0] * k)
            locY, locX = float(origin[i, 1]), float(origin[i, 2])

            file_meta = DicomDataset()
            file_meta.MediaStorageSOPClassUID = "Secondary Capture Image Storage"
            file_meta.MediaStorateSOPInstanceUID = (hex(scan_id).replace(
                'x', '').upper().zfill(8))

            file_meta.ImplementationClassUID = slice_name

            dataset = DicomFileDataset(filename, {},
                                       file_meta=file_meta,
                                       preamble=b"\0" * 128)

            dataset.PixelData = pixel_array.astype(np.uint16).tostring()
            dataset.RescaleSlope = slope
            dataset.RescaleIntercept = intercept

            dataset.ImagePositionPatient = MultiValue(
                type_constructor=float, iterable=[locZ, locY, locX])

            dataset.PixelSpacing = MultiValue(
                type_constructor=float,
                iterable=[float(spacing[i, 1]),
                          float(spacing[i, 2])])
            dataset.SliceThickness = float(spacing[i, 0])

            dataset.Modality = 'WSD'
            dataset.Columns = pixel_array.shape[0]
            dataset.Rows = pixel_array.shape[1]
            dataset.file_meta.TransferSyntaxUID = pydicom.uid.ImplicitVRLittleEndian
            dataset.PixelRepresentation = 1
            dataset.BitsAllocated = 16
            dataset.BitsStored = 16
            dataset.SamplesPerPixel = 1

            write_file(filename, dataset)
示例#12
0
def compare_dicom_cli(command, original, expected):
    pydicom.write_file(ORIGINAL_DICOM_FILENAME, original)

    try:
        subprocess.check_call(command)
        cli_adjusted_ds = pydicom.read_file(ADJUSTED_DICOM_FILENAME, force=True)

        assert str(cli_adjusted_ds) == str(expected)
    finally:
        remove_file(ORIGINAL_DICOM_FILENAME)
        remove_file(ADJUSTED_DICOM_FILENAME)
def update_frame_of_ref(root):
    finished = os.path.join(root, 'Finished.txt')
    folder_dict = {}
    image_reader = sitk.ImageFileReader()
    dicom_files = [
        os.path.join(root, i) for i in os.listdir(root) if i.endswith('.dcm')
    ]
    for file in dicom_files:
        image_reader.SetFileName(file)
        try:
            image_reader.Execute()
        except:
            continue
        rows_cols = '{}_{}'.format(image_reader.GetMetaData("0028|0010"),
                                   image_reader.GetMetaData("0028|0011"))
        if rows_cols not in folder_dict:
            folder_dict[rows_cols] = {
                'files': [],
                'uid': None,
                'needs_uid': []
            }
        if image_reader.HasMetaDataKey(
                "0020|0052") and image_reader.GetMetaData(
                    "0020|0052") is not '':
            if folder_dict[rows_cols]['uid'] is None:
                folder_dict[rows_cols]['uid'] = image_reader.GetMetaData(
                    "0020|0052")
            folder_dict[rows_cols]['needs_uid'].append(False)
        else:
            folder_dict[rows_cols]['needs_uid'].append(True)
        folder_dict[rows_cols]['files'].append(file)
    for key in folder_dict:
        base_uid = folder_dict[key]['uid']
        if base_uid is None:
            base_uid = '1.3.12.2.1107.5.1.4.95642.30000017092813491242400000003.{}'.format(
                np.random.randint(9999))
        file_list = np.asarray(
            folder_dict[key]['files'])[folder_dict[key]['needs_uid']]
        for file in file_list:
            ds = pydicom.read_file(file)
            ds.FrameOfReferenceUID = base_uid
            pydicom.write_file(filename=file, dataset=ds)
        if len(folder_dict) > 1:
            os.makedirs(os.path.join(root, key))
            for file in folder_dict[key]['files']:
                file_name = os.path.split(file)[-1]
                os.rename(file, os.path.join(root, key, file_name))
            fid = open(os.path.join(root, key, 'Finished.txt'), 'w+')
            fid.close()
        else:
            fid = open(finished, 'w+')
            fid.close()
示例#14
0
def adjust_RED_cli(args):
    adjustment_map = {
        key: item
        for key, item in zip(
            args.adjustment_map[::2], args.adjustment_map[1::2])
    }

    dicom_dataset = pydicom.read_file(args.input_file, force=True)
    new_dicom_dataset = adjust_rel_elec_density(
        dicom_dataset, adjustment_map,
        ignore_missing_structure=args.ignore_missing_structure)

    pydicom.write_file(args.output_file, new_dicom_dataset)
def change_name_and_pat_id(patient_id, root):
    print(root)
    files = [i for i in os.listdir(root) if i.startswith('IM')]
    for file in files:
        if not file.endswith('.dcm'):
            os.rename(os.path.join(root, file),
                      os.path.join(root, file + '.dcm'))
            file += '.dcm'
        ds = pydicom.read_file(os.path.join(root, file))
        if ds.PatientID == patient_id:  # Already have converted the files in this folder
            break
        ds.PatientID = patient_id
        pydicom.write_file(os.path.join(root, file), ds)
    return None
示例#16
0
def pseudo_anonymize(dicom_file):
    try:
        data = read_file(dicom_file)
        if hasattr(data, 'PatientID'):
            data.PatientID = anonymize(data.PatientID)
        if hasattr(data, 'PatientName'):
            data.PatientName = ''
        if hasattr(data, 'PatientBirthDate'):
            data.PatientBirthDate = data.PatientBirthDate[0:4] + "0101"
        if hasattr(data, 'PerformingPhysicianName'):
            data.PerformingPhysicianName = ''
        write_file(dicom_file, data)
    except:
        pass
示例#17
0
def compare_dicom_cli(command, original, expected):
    # TODO: Extract CLI definition from subpackages and test separately

    if "SUBPACKAGE" not in os.environ:

        pydicom.write_file(ORIGINAL_DICOM_FILENAME, original)

        try:
            subprocess.check_call(command)
            cli_adjusted_ds = pydicom.read_file(ADJUSTED_DICOM_FILENAME,
                                                force=True)

            assert str(cli_adjusted_ds) == str(expected)
        finally:
            remove_file(ORIGINAL_DICOM_FILENAME)
            remove_file(ADJUSTED_DICOM_FILENAME)
示例#18
0
    def run_ano(self):
        """
        Reads the DICOM file, anonymizes it and write the result.
        """
        if not self.ano_run:
            self._dataset.walk(self._anonymize_check)

        # Patient Identity Removed
        self._dataset.add_new((0x0012, 0x0062), 'CS', 'YES')
        # De-identification Method
        method = f'CATI DEIDENTIFICATION - {deid.__version__}'
        if self.config_profile:
            method += f' - {self.config_profile}'
        self._dataset.add_new((0x0012, 0x0063), 'LO', method)

        pydicom.write_file(self._dicom_fileout, self._dataset)
        return 1
示例#19
0
    def add(self, contours, roiname, roinumber, color):
        def newROI():
            old = self.findFirstClosedPlanarROI()
            new = copy.deepcopy(old)
            new.ROINumber = roinumber
            new.ROIGenerationAlgorithm = 'PW'
            new.ROIName = asASCII(roiname)
            return new

        self.dataset.StructureSetROIs.append(newROI())

        def newContours():
            old = self.findFirstClosedPlanarContour()
            new = copy.deepcopy(old)
            new.RefdROINumber = roinumber
            new.ReferencedROINumber = roinumber
            new.ROIDisplayColor = color
            template = copy.deepcopy(new.Contours[0])
            del new.Contours[:]
            for c in contours:
                c = c.flatten()
                numberOfPoints = int(len(c) / 3)
                if numberOfPoints > 1:
                    nc = copy.deepcopy(template)
                    nc.NumberofContourPoints = numberOfPoints
                    nc.ContourData = ['%g' % x for x in c]
                    nc.ContourGeometricType = 'CLOSED_PLANAR'
                    new.Contours.append(nc)
            return new

        self.dataset.ROIContours.append(newContours())

        def newObservation():
            old = self.findLastObservation()
            new = copy.deepcopy(old)
            new.ObservationNumber = old.ObservationNumber + 1
            new.ROIObservationLabel = asASCII(roiname)
            new.ReferencedROINumber = roinumber
            new.RefdROINumber = roinumber
            return new

        self.dataset.RTROIObservations.append(newObservation())
        # overwrite the dataset
        pydicom.write_file(self.dataset.filename, self.dataset)
示例#20
0
def handle_and_mv_export_dir(data, file):
    data.walk(person_names_callback)
    try:
        storage = data.MIMETypeOfEncapsulatedDocument
        if storage == 'application/pdf':
            os.remove(file)
        else:
            id = get_id(data)
            if not os.path.isdir(os.path.join(EXPORT_DIR, id)):
                os.mkdir(os.path.join(EXPORT_DIR, id))
            write_file(os.path.join(EXPORT_DIR, id, os.path.basename(file)),
                       data)
            os.remove(file)
    except Exception:
        id = get_id(data)
        if not os.path.isdir(os.path.join(EXPORT_DIR, id)):
            os.mkdir(os.path.join(EXPORT_DIR, id))
        write_file(os.path.join(EXPORT_DIR, id, os.path.basename(file)), data)
        os.remove(file)
def add_frame_of_reference_uid(path):
    file_reader = sitk.ImageFileReader()
    for root, dirs, files in os.walk(path):
        has_ref_file = os.path.join(root, 'HasFrameOfReferenceUID.txt')
        if os.path.exists(has_ref_file):
            continue
        dicom_files = [
            os.path.join(root, i) for i in files if i.endswith('.dcm')
        ]
        if dicom_files:
            base_uid = '1.3.12.2.1107.5.1.4.95642.30000017092813491242400000003.{}'.format(
                np.random.randint(9999))
            for file in dicom_files:
                file_reader.SetFileName(file)
                file_reader.Execute()
                if not file_reader.HasMetaDataKey("0020|0052"):
                    ds = pydicom.read_file(file)
                    ds.FrameOfReferenceUID = base_uid
                    pydicom.write_file(filename=file, dataset=ds)
            fid = open(has_ref_file, 'w+')
            fid.close()
    return None
示例#22
0
    def save_as(self, filename, write_like_original=True):
        """Write the dataset to a file.

        Parameters
        ----------
        filename : str
            Name of file to save new DICOM file to.
        write_like_original : boolean
            If True (default), preserves the following information from
            the dataset:
            -preamble -- if no preamble in read file, than not used here
            -hasFileMeta -- if writer did not do file meta information,
                then don't write here either
            -seq.is_undefined_length -- if original had delimiters, write them now too,
                instead of the more sensible length characters
            - is_undefined_length_sequence_item -- for datasets that belong to a
                sequence, write the undefined length delimiters if that is
                what the original had.
            If False, produces a "nicer" DICOM file for other readers,
                where all lengths are explicit.

        See Also
        --------
        pydicom.filewriter.write_file
            Write a DICOM file from a FileDataset instance.

        Notes
        -----
        Set dataset.preamble if you want something other than 128 0-bytes.
        If the dataset was read from an existing dicom file, then its preamble
        was stored at read time. It is up to the user to ensure the preamble is still
        correct for its purposes.

        If there is no Transfer Syntax tag in the dataset, then set
        dataset.is_implicit_VR and dataset.is_little_endian
        to determine the transfer syntax used to write the file.
        """
        pydicom.write_file(filename, self, write_like_original)
示例#23
0
    def save_as(self, filename, write_like_original=True):
        """Write the dataset to a file.

        Parameters
        ----------
        filename : str
            Name of file to save new DICOM file to.
        write_like_original : boolean
            If True (default), preserves the following information from
            the dataset:
            -preamble -- if no preamble in read file, than not used here
            -hasFileMeta -- if writer did not do file meta information,
                then don't write here either
            -seq.is_undefined_length -- if original had delimiters, write them now too,
                instead of the more sensible length characters
            - is_undefined_length_sequence_item -- for datasets that belong to a
                sequence, write the undefined length delimiters if that is
                what the original had.
            If False, produces a "nicer" DICOM file for other readers,
                where all lengths are explicit.

        See Also
        --------
        pydicom.filewriter.write_file
            Write a DICOM file from a FileDataset instance.

        Notes
        -----
        Set dataset.preamble if you want something other than 128 0-bytes.
        If the dataset was read from an existing dicom file, then its preamble
        was stored at read time. It is up to the user to ensure the preamble is still
        correct for its purposes.

        If there is no Transfer Syntax tag in the dataset, then set
        dataset.is_implicit_VR and dataset.is_little_endian
        to determine the transfer syntax used to write the file.
        """
        pydicom.write_file(filename, self, write_like_original)
示例#24
0
def Decompress(FileInput, FileOutput):  # Descomprime el dicom en si
    Out = dc.read_file(FileInput, force=True)
    # Out.file_meta.TransferSyntaxUID = dc.uid.ImplicitVRLittleEndian  # or whatever is the correct transfer syntax for the file
    Out.decompress()
    dc.write_file(FileOutput, Out)
示例#25
0
def sabr_deid(participant_info, scan_df, raw_dir, deid_outdir):

    #Join raw dir with subject name (assumes directory structure is ./rawdir/subj_name/...
    subj_main_dir = os.path.join(raw_dir, participant_info['participant_name'])

    #Adds BIDS required specifier 'sub' to new id
    participant_id = 'sub-' + participant_info['participant_id']

    #Get list of sessions within main subj directory, make dir and loop over sessions.
    subj_sessions = next(os.walk(subj_main_dir))[1]
    subj_sessions.sort()

    print('\n***{} has {} session(s)'.format(participant_id,
                                             len(subj_sessions)))

    #Create deidentified main (root) directory for subject
    subj_deid_main_dir = os.path.join(deid_outdir, participant_id)
    try:
        os.mkdir(subj_deid_main_dir)
    except:
        print('\nDirectory {} exists\n'.format(subj_deid_main_dir))

    #WARNING! LAZY CODING AHEAD!
    if len(subj_sessions) == 0:
        raise Exception(
            '\n***ERROR! NUMBER OF SESSIONS = 0!***\nPlease check directory structure of {}'
            .format(participant_info['participant_name']))

    elif len(subj_sessions) == 1:
        session = subj_sessions[0]
        subj_session_dir = os.path.join(subj_main_dir, session)

        for j, scan_type in enumerate(scan_df['scan_type']):
            subj_deid_meta_dir = os.path.join(subj_deid_main_dir, scan_type)
            try:
                os.mkdir(subj_deid_meta_dir)
            except:
                print('Meta directory {} exists.'.format(scan_type))

            #Match common sequence substring with path in os.walk
            for root, dr, files in os.walk(subj_session_dir):
                match = scan_df.scan_match[j]

                match_regex = fnmatch.translate(match)
                found = re.search(match_regex, root)
                print('\n***{}***\n'.format(found))

                #If match, start deid process. If not, move onto next folder.
                if found != None:

                    subj_deid_sequence_dir = os.path.join(
                        subj_deid_meta_dir, scan_df.scan_filename[j])
                    print('Making directory {}'.format(subj_deid_sequence_dir))
                    try:
                        os.mkdir(
                            subj_deid_sequence_dir
                        )  #Make "housing" directory to keep dicoms of different sequences but same meta-category separate.
                    except:
                        print(
                            '\n***SEQUENCE DIRECTORY ALREADY EXISTS!***\nSkipping.'
                        )
                        continue

                    #Create list of dicoms in sequence dir rather than use
                    #files (more control in case any non-dicoms)
                    anon_files = os.listdir(root)
                    anon_files = [
                        x for x in anon_files if 'nii' not in x
                    ]  #Remove any previous nii files that may be present < To do - expand to other file types (mgh, analyze, etc)
                    anon_files.sort()

                    for anon_file in anon_files:
                        #Read files in 1 at a time, remove the remove / alter the below tags.
                        dcm = pydicom.read_file(
                            os.path.join(root, anon_file), force=True
                        )  #Uses force = True incase dicoms haven't had identifier added to header

                        #Strip aquisition date information
                        aqusition_date_list = [[0x0008, 0x0020],
                                               [0x0008, 0x0021],
                                               [0x0008, 0x0022],
                                               [0x0008, 0x0023]]

                        for tag in aqusition_date_list:
                            try:
                                dcm[hex(tag[0]), hex(tag[1])].value = ''
                            except:
                                print(
                                    'Tag {} {} does not exist in {}. Moving to next tag'
                                    .format(hex(tag[0]), hex(tag[1]),
                                            scan_df.scan_filename[j]))

                        #Strip aquisition time information
                        aqusition_time_list = [[0x0008, 0x0030],
                                               [0x0008, 0x0031],
                                               [0x0008, 0x0032],
                                               [0x0008, 0x0033]]

                        for tag in aqusition_time_list:
                            try:
                                dcm[hex(tag[0]), hex(tag[1])].value = ''
                            except:
                                print(
                                    'Tag {} {} does not exist in {}. Moving to next tag'
                                    .format(hex(tag[0]), hex(tag[1]),
                                            scan_df.scan_filename[j]))

                        #Strip physician information
                        physician_list = [[0x0008, 0x0090], [0x0008, 0x1050]]

                        for tag in physician_list:
                            try:
                                dcm[hex(tag[0]), hex(tag[1])].value = ''
                            except:
                                print(
                                    'Tag {} {} does not exist in {}. Moving to next tag'
                                    .format(hex(tag[0]), hex(tag[1]),
                                            scan_df.scan_filename[j]))

                            #Strip study description
                            #dcm[0x0008,0x1030].value = ''

                            #Strip subject name / patient ID
                        subj_name_list = [[0x0010, 0x0010], [0x0010, 0x0020]]
                        #PatientName, PatientID

                        for tag in subj_name_list:
                            try:
                                dcm[hex(tag[0]),
                                    hex(tag[1])].value = participant_id
                            except:
                                print(
                                    'Tag {} {} does not exist in {}. Moving to next tag'
                                    .format(hex(tag[0]), hex(tag[1]),
                                            scan_df.scan_filename[j]))

                            #Strip subject attributes
                        subj_attrib_list = [[0x0010, 0x0030], [0x0010, 0x1010],
                                            [0x0010, 0x1020], [0x0010, 0x1030]]
                        #, DoB, Age, PatientHeight, PatientWeight

                        for tag in subj_attrib_list:
                            try:
                                dcm[hex(tag[0]), hex(tag[1])].value = ''
                            except:
                                print(
                                    'Tag {} {} does not exist in {}. Moving to next tag'
                                    .format(hex(tag[0]), hex(tag[1]),
                                            scan_df.scan_filename[j]))

                        #Write anonymised file
                        pydicom.write_file(
                            os.path.join(subj_deid_sequence_dir, anon_file),
                            dcm)

    elif len(subj_sessions) > 1:
        for sn, session in enumerate(subj_sessions):

            #MAKE DIRECTORIES BUT ZERO PAD SESSION
            subj_deid_session_dir = os.path.join(
                subj_deid_main_dir, 'ses-'
                '{:02d}'.format(sn + 1))

            try:
                os.mkdir(subj_deid_session_dir)
            except:
                print('\nSession folder {} exists\n'.format(
                    subj_deid_session_dir))

            #Session folder for identifiable subject
            subj_session_dir = os.path.join(subj_main_dir, session)

            #Loop over scan folder types within scan dataframe (anat, task, etc)
            for j, scan_type in enumerate(scan_df['scan_type']):
                subj_deid_meta_dir = os.path.join(subj_deid_session_dir,
                                                  scan_type)
                try:
                    os.mkdir(subj_deid_meta_dir)
                except:
                    print('Meta directory {} exists.'.format(scan_type))

                #Match common sequence substring with path in os.walk
                for root, dr, files in os.walk(subj_session_dir):
                    match = scan_df.scan_match[j]

                    match_regex = fnmatch.translate(match)
                    found = re.search(match_regex, root)

                    #If match, start deid process, not not, move onto next folder.
                    if found != None:

                        subj_deid_sequence_dir = os.path.join(
                            subj_deid_meta_dir, scan_df.scan_filename[j])
                        print('Making directory {}'.format(
                            subj_deid_sequence_dir))
                        try:
                            os.mkdir(
                                subj_deid_sequence_dir
                            )  #Make "housing" directory to keep dicoms of different sequences but same meta-category separate.
                        except:
                            print(
                                '\n***SEQUENCE DIRECTORY ALREADY EXISTS!***\nSkipping.'
                            )
                            continue

                        #Create list of dicoms in sequence dir rather than use
                        #files (more control in case any non-dicoms)
                        anon_files = os.listdir(root)
                        anon_files = [
                            x for x in anon_files if 'nii' not in x
                        ]  #Remove any previous nii files that may be present < To do - expand to other file types (mgh, analyze, etc)
                        anon_files.sort()

                        for anon_file in anon_files:
                            #Read files in 1 at a time, remove the remove / alter the below tags.
                            dcm = pydicom.read_file(
                                os.path.join(root, anon_file), force=True
                            )  #Uses force = True incase dicoms haven't had identifier added to header

                            #Strip aquisition date information
                            aqusition_date_list = [[0x0008, 0x0020],
                                                   [0x0008, 0x0021],
                                                   [0x0008, 0x0022],
                                                   [0x0008, 0x0023]]

                            for tag in aqusition_date_list:
                                try:
                                    dcm[hex(tag[0]), hex(tag[1])].value = ''
                                except:
                                    print(
                                        'Tag {} {} does not exist in {}. Moving to next tag'
                                        .format(hex(tag[0]), hex(tag[1]),
                                                scan_df.scan_filename[j]))

                            #Strip aquisition time information
                            aqusition_time_list = [[0x0008, 0x0030],
                                                   [0x0008, 0x0031],
                                                   [0x0008, 0x0032],
                                                   [0x0008, 0x0033]]

                            for tag in aqusition_time_list:
                                try:
                                    dcm[hex(tag[0]), hex(tag[1])].value = ''
                                except:
                                    print(
                                        'Tag {} {} does not exist in {}. Moving to next tag'
                                        .format(hex(tag[0]), hex(tag[1]),
                                                scan_df.scan_filename[j]))

                            #Strip physician information
                            physician_list = [[0x0008, 0x0090],
                                              [0x0008, 0x1050]]

                            for tag in physician_list:
                                try:
                                    dcm[hex(tag[0]), hex(tag[1])].value = ''
                                except:
                                    print(
                                        'Tag {} {} does not exist in {}. Moving to next tag'
                                        .format(hex(tag[0]), hex(tag[1]),
                                                scan_df.scan_filename[j]))

                                #Strip study description
                                #dcm[0x0008,0x1030].value = ''

                                #Strip subject name / patient ID
                            subj_name_list = [[0x0010, 0x0010],
                                              [0x0010, 0x0020]]
                            #PatientName, PatientID

                            for tag in subj_name_list:
                                try:
                                    dcm[hex(tag[0]),
                                        hex(tag[1])].value = participant_id
                                except:
                                    print(
                                        'Tag {} {} does not exist in {}. Moving to next tag'
                                        .format(hex(tag[0]), hex(tag[1]),
                                                scan_df.scan_filename[j]))

                                #Strip subject attributes
                            subj_attrib_list = [[0x0010, 0x0030],
                                                [0x0010, 0x1010],
                                                [0x0010, 0x1020],
                                                [0x0010, 0x1030]]
                            #, DoB, Age, PatientHeight, PatientWeight

                            for tag in subj_attrib_list:
                                try:
                                    dcm[hex(tag[0]), hex(tag[1])].value = ''
                                except:
                                    print(
                                        'Tag {} {} does not exist in {}. Moving to next tag'
                                        .format(hex(tag[0]), hex(tag[1]),
                                                scan_df.scan_filename[j]))

                            #Write anonymised file
                            pydicom.write_file(
                                os.path.join(subj_deid_sequence_dir,
                                             anon_file), dcm)

    return (subj_deid_main_dir, subj_sessions, participant_id)
示例#26
0
    [purposeOfReferenceDataset])
sr_out.ReferencedInstanceSequence = Sequence([referencedSequenceDataset])
sr_out.ValueType = "CONTAINER"
conceptNameDataset = Dataset()
conceptNameDataset.CodeValue = "126000"
conceptNameDataset.CodingSchemeDesignator = "DCM"
conceptNameDataset.CodeMeaning = "Imaging Measurement Report"
sr_out.ConceptCodeSequence = Sequence([conceptNameDataset])
referencedSopSequence = Dataset()
referencedSopSequence.ReferencedSOPClassUID = dcm_in.SOPClassUID
referencedSopSequence.ReferencedSOPInstanceUID = dcm_in.SOPInstanceUID
sr_out.ReferencedSOPSequence = Sequence([referencedSopSequence])
nameDataset = Dataset()
nameDataset.RelationshipType = "CONTAINS"
nameDataset.ValueType = "TEXT"
nameConceptNameDataset = Dataset()
nameConceptNameDataset.CodeValue = "371484003"
nameConceptNameDataset.CodingSchemeDesignator = "SNM3"
nameConceptNameDataset.CodeMeaning = "Patient name (observable entity)"
nameDataset.ConceptNameCodeSequence = Sequence([nameConceptNameDataset])
nameDataset.TextValue = display_name
sr_out.ContentSequence = Sequence([nameDataset])
pydicom.write_file('/envoyai/output/sr.dcm', sr_out)

print("SeriesInstanceUid............:", sr_out.SeriesInstanceUID)
print("SopInstanceUid...............:", sr_out.SOPInstanceUID)
print("MediaStorageSOPInstanceUID...:",
      sr_out.file_meta.MediaStorageSOPInstanceUID)
pydicom.write_file('/envoyai/output/out.dcm', sr_out)
exit(0)
 def test_missing_sop_class(self):
     filename = 'test.dcm'
     file_dataset = FileDataset(filename, Dataset(), file_meta=self.create_metadata())
     write_file(filename, file_dataset, write_like_original=False)
     self.assert_fatal_error(filename, 'Missing SOPClassUID')
 def test_unknown_sop_class(self):
     dataset = Dataset()
     dataset.SOPClassUID = 'Unknown'
     file_dataset = FileDataset('test', dataset, file_meta=self.create_metadata())
     write_file('test', file_dataset, write_like_original=False)
     self.assert_fatal_error('test', 'Unknown SOPClassUID (probably retired): Unknown')
示例#29
0
  def patchDicomDir(self, inputDirPath, outputDirPath):
    """
    Since CTK (rightly) requires certain basic information [1] before it can import
    data files that purport to be dicom, this code patches the files in a directory
    with some needed fields.

    Calling this function with a directory path will make a patched copy of each file.
    Importing the old files to CTK should still fail, but the new ones should work.

    The directory is assumed to have a set of instances that are all from the
    same study of the same patient.  Also that each instance (file) is an
    independent (multiframe) series.

    [1] https://github.com/commontk/CTK/blob/16aa09540dcb59c6eafde4d9a88dfee1f0948edc/Libs/DICOM/Core/ctkDICOMDatabase.cpp#L1283-L1287
    """

    import pydicom

    self.addLog('DICOM patching started...')
    logging.debug('DICOM patch input directory: '+inputDirPath)
    logging.debug('DICOM patch output directory: '+outputDirPath)

    for rule in self.patchingRules:
      rule.logCallback = self.addLog
      rule.processStart(inputDirPath, outputDirPath)

    for root, subFolders, files in os.walk(inputDirPath):

      currentSubDir = os.path.relpath(root, inputDirPath)
      rootOutput = os.path.join(outputDirPath, currentSubDir)

      # Notify rules that processing of a new subdirectory started
      for rule in self.patchingRules:
        rule.processDirectory(currentSubDir)

      for file in files:
        filePath = os.path.join(root,file)
        self.addLog('Examining %s...' % os.path.join(currentSubDir,file))

        skipFileRequestingRule = None
        for rule in self.patchingRules:
          if rule.skipFile(currentSubDir):
            skipFileRequestingRule = rule
            break
        if skipFileRequestingRule:
          self.addLog('  Rule '+rule.__class__.__name__+' requested to skip this file.')
          continue

        try:
          ds = pydicom.read_file(filePath)
        except (OSError, pydicom.filereader.InvalidDicomError):
          self.addLog('  Not DICOM file. Skipped.')
          continue

        self.addLog('  Patching...')

        for rule in self.patchingRules:
          rule.processDataSet(ds)

        patchedFilePath = os.path.abspath(os.path.join(rootOutput,file))
        for rule in self.patchingRules:
          patchedFilePath = rule.generateOutputFilePath(ds, patchedFilePath)

        ######################################################
        # Write

        dirName = os.path.dirname(patchedFilePath)
        if not os.path.exists(dirName):
          os.makedirs(dirName)

        self.addLog('  Writing DICOM...')
        pydicom.write_file(patchedFilePath, ds)
        self.addLog('  Created DICOM file: %s' % patchedFilePath)

    self.addLog(f'DICOM patching completed. Patched files are written to:\n{outputDirPath}')
示例#30
0
    def Mask_to_Contours(self):
        self.RefDs = self.ds
        self.shift_list = [[
            float(i)
            for i in self.reader.GetMetaData(j, "0020|0032").split('\\')
        ] for j in range(len(self.reader.GetFileNames()))
                           ]  #ShiftRows, ShiftCols, ShiftZBase
        self.mv = Xx, Xy, Xz, Yx, Yy, Yz = [
            float(i)
            for i in self.reader.GetMetaData(0, "0020|0037").split('\\')
        ]
        self.ShiftRows = [
            i[0] * Xx + i[1] * Xy + i[2] * Xz for i in self.shift_list
        ]
        self.ShiftCols = [
            i[0] * Xy + i[1] * Yy + i[2] * Yz for i in self.shift_list
        ]
        self.ShiftZ = [i[2] for i in self.shift_list]
        self.mult1 = self.mult2 = 1
        self.PixelSize = self.dicom_handle.GetSpacing()
        current_names = []
        for names in self.RS_struct.StructureSetROISequence:
            current_names.append(names.ROIName)
        Contour_Key = {}
        xxx = 1
        for name in self.ROI_Names:
            Contour_Key[name] = xxx
            xxx += 1
        self.all_annotations = self.annotations
        base_annotations = copy.deepcopy(self.annotations)
        temp_color_list = []
        color_list = [[128, 0, 0], [170, 110, 40], [0, 128, 128], [0, 0, 128],
                      [230, 25, 75], [225, 225, 25], [0, 130, 200],
                      [145, 30, 180], [255, 255, 255]]
        self.struct_index = 0
        new_ROINumber = 1000
        for Name in self.ROI_Names:
            new_ROINumber -= 1
            if not temp_color_list:
                temp_color_list = copy.deepcopy(color_list)
            color_int = np.random.randint(len(temp_color_list))
            print('Writing data for ' + Name)
            self.annotations = copy.deepcopy(
                base_annotations[:, :, :,
                                 int(self.ROI_Names.index(Name) + 1)])
            self.annotations = self.annotations.astype('int')

            make_new = 1
            allow_slip_in = True
            if (Name not in current_names
                    and allow_slip_in) or self.delete_previous_rois:
                self.RS_struct.StructureSetROISequence.insert(
                    0,
                    copy.deepcopy(self.RS_struct.StructureSetROISequence[0]))
            else:
                print(
                    'Prediction ROI {} is already within RT structure'.format(
                        Name))
                continue
            self.RS_struct.StructureSetROISequence[
                self.struct_index].ROINumber = new_ROINumber
            self.RS_struct.StructureSetROISequence[self.struct_index].ReferencedFrameOfReferenceUID = \
                self.ds.FrameOfReferenceUID
            self.RS_struct.StructureSetROISequence[
                self.struct_index].ROIName = Name
            self.RS_struct.StructureSetROISequence[
                self.struct_index].ROIVolume = 0
            self.RS_struct.StructureSetROISequence[
                self.struct_index].ROIGenerationAlgorithm = 'SEMIAUTOMATIC'
            if make_new == 1:
                self.RS_struct.RTROIObservationsSequence.insert(
                    0,
                    copy.deepcopy(self.RS_struct.RTROIObservationsSequence[0]))
                if 'MaterialID' in self.RS_struct.RTROIObservationsSequence[
                        self.struct_index]:
                    del self.RS_struct.RTROIObservationsSequence[
                        self.struct_index].MaterialID
            self.RS_struct.RTROIObservationsSequence[
                self.struct_index].ObservationNumber = new_ROINumber
            self.RS_struct.RTROIObservationsSequence[
                self.struct_index].ReferencedROINumber = new_ROINumber
            self.RS_struct.RTROIObservationsSequence[
                self.struct_index].ROIObservationLabel = Name
            self.RS_struct.RTROIObservationsSequence[
                self.struct_index].RTROIInterpretedType = 'ORGAN'

            if make_new == 1:
                self.RS_struct.ROIContourSequence.insert(
                    0, copy.deepcopy(self.RS_struct.ROIContourSequence[0]))
            self.RS_struct.ROIContourSequence[
                self.struct_index].ReferencedROINumber = new_ROINumber
            del self.RS_struct.ROIContourSequence[
                self.struct_index].ContourSequence[1:]
            self.RS_struct.ROIContourSequence[
                self.struct_index].ROIDisplayColor = temp_color_list[color_int]
            del temp_color_list[color_int]
            thread_count = int(cpu_count() * 0.9 - 1)
            # thread_count = 1
            contour_dict = {}
            q = Queue(maxsize=thread_count)
            threads = []
            kwargs = {
                'image_size_rows': self.image_size_rows,
                'image_size_cols': self.image_size_cols,
                'slice_info': self.slice_info,
                'PixelSize': self.PixelSize,
                'mult1': self.mult1,
                'mult2': self.mult2,
                'ShiftZ': self.ShiftZ,
                'mv': self.mv,
                'shift_list': self.shift_list,
                'ShiftRows': self.ShiftRows,
                'ShiftCols': self.ShiftCols,
                'contour_dict': contour_dict
            }

            A = [q, kwargs]
            for worker in range(thread_count):
                t = Thread(target=contour_worker, args=(A, ))
                t.start()
                threads.append(t)

            contour_num = 0
            if np.max(self.annotations
                      ) > 0:  # If we have an annotation, write it
                image_locations = np.max(self.annotations, axis=(1, 2))
                indexes = np.where(image_locations > 0)[0]
                for index in indexes:
                    item = [self.annotations[index, ...], index]
                    q.put(item)
                for i in range(thread_count):
                    q.put(None)
                for t in threads:
                    t.join()
                for i in contour_dict.keys():
                    for output in contour_dict[i]:
                        if contour_num > 0:
                            self.RS_struct.ROIContourSequence[
                                self.struct_index].ContourSequence.append(
                                    copy.
                                    deepcopy(self.RS_struct.ROIContourSequence[
                                        self.struct_index].ContourSequence[0]))
                        self.RS_struct.ROIContourSequence[
                            self.struct_index].ContourSequence[
                                contour_num].ContourNumber = str(contour_num)
                        self.RS_struct.ROIContourSequence[
                            self.struct_index].ContourSequence[
                                contour_num].ContourImageSequence[
                                    0].ReferencedSOPInstanceUID = self.SOPInstanceUIDs[
                                        i]
                        self.RS_struct.ROIContourSequence[
                            self.struct_index].ContourSequence[
                                contour_num].ContourData = output
                        self.RS_struct.ROIContourSequence[
                            self.struct_index].ContourSequence[
                                contour_num].NumberofContourPoints = round(
                                    len(output) / 3)
                        contour_num += 1
        self.RS_struct.SOPInstanceUID += '.' + str(np.random.randint(999))
        if self.template or self.delete_previous_rois:
            for i in range(len(self.RS_struct.StructureSetROISequence),
                           len(self.ROI_Names), -1):
                del self.RS_struct.StructureSetROISequence[-1]
            for i in range(len(self.RS_struct.RTROIObservationsSequence),
                           len(self.ROI_Names), -1):
                del self.RS_struct.RTROIObservationsSequence[-1]
            for i in range(len(self.RS_struct.ROIContourSequence),
                           len(self.ROI_Names), -1):
                del self.RS_struct.ROIContourSequence[-1]
            for i in range(len(self.RS_struct.StructureSetROISequence)):
                self.RS_struct.StructureSetROISequence[i].ROINumber = i + 1
                self.RS_struct.RTROIObservationsSequence[
                    i].ReferencedROINumber = i + 1
                self.RS_struct.ROIContourSequence[
                    i].ReferencedROINumber = i + 1
        if not os.path.exists(self.output_dir):
            os.makedirs(self.output_dir)

        out_name = os.path.join(
            self.output_dir, 'RS_MRN' + self.RS_struct.PatientID + '_' +
            self.RS_struct.SeriesInstanceUID + '.dcm')
        if os.path.exists(out_name):
            out_name = os.path.join(
                self.output_dir, 'RS_MRN' + self.RS_struct.PatientID + '_' +
                self.RS_struct.SeriesInstanceUID + '1.dcm')
        print('Writing out data...')
        pydicom.write_file(out_name, self.RS_struct)
        fid = open(os.path.join(self.output_dir, 'Completed.txt'), 'w+')
        fid.close()
        print('Finished!')
        return None
示例#31
0
    def make_uid(self,dicom_path):
        print(dicom_path)
        if os.path.exists(os.path.join(dicom_path,'UID_val.txt')):
            os.remove(os.path.join(dicom_path,'UID_val.txt'))
        if os.path.exists(os.path.join(dicom_path,'MRN_val.txt')):
            os.remove(os.path.join(dicom_path,'MRN_val.txt'))
            #)
        file_list = []
        dirs = []
        for root, dirs, file_list in os.walk(dicom_path):
            break
        files = [file for file in file_list if file.find('.dcm') != -1]
        if not files:
            files = file_list
        uid_dic = {}
        if not self.overlapping_images:  # If we know there aren't overlapping image sets, move on
            for file in files:
                if file.find('{') != -1 or file.find('}') != -1:
                    os.remove(os.path.join(dicom_path,file))
                    continue
                try:
                    ds = pydicom.read_file(os.path.join(dicom_path,file))
                    try:
                        Acquistion_time = str(ds.SeriesTime)
                    except:
                        Acquistion_time = ds.StudyTime
                        Acquistion_time = Acquistion_time.replace('.', '')
                    keys = uid_dic.keys()
                    if ds.Modality.find('RTSTRUCT') == 0:
                        RT_val = 1
                        Acquistion_time += str(RT_val)
                        if Acquistion_time not in keys:
                            uid_dic[Acquistion_time] = [file]
                    elif len(files) > self.min_size:
                        if Acquistion_time not in keys:
                            uid_dic[Acquistion_time] = [file]
                    break
                except:
                    continue
        else:
            data_keys = [(0x08, 0x031), (0x020, 0x012)]
            try:
                data_all, ds = self.make_uid_and_del_dic(dicom_path,files, data_keys) # Acquisition Time
            except:
                return None
            go_on = False
            output = 2
            while not go_on:
                output -= 1
                min_images = 51
                while min_images > 0:
                    for key in data_all.keys():
                        if len(list(data_all[key][1].keys())) > output and \
                                len(files)/len(list(data_all[key][1].keys())) >= min_images: #len(list(data_all[key][1].keys())) <= 7 and
                            output = len(list(data_all[key][1].keys()))
                            del_dict, uid_dic = data_all[key]
                            go_on = True
                    if go_on:
                        break
                    min_images -= 10
            for key in del_dict.keys():
                if len(del_dict[key]) == 1: # If we have one image that wrong orientation, delete it
                    os.remove(os.path.join(dicom_path,del_dict[key][0]))
        if (len(uid_dic.keys()) > 1) or (len(dirs) > 0 and 'query_file' not in dirs): # We have multiple scans.. need to make new folders for each and change the uids
            i = -1
            for key in uid_dic.keys():
                i += 1
                files_total = uid_dic[key]
                for file_name in files_total:
                    if os.path.exists(os.path.join(dicom_path,key,file_name)):
                        os.remove(os.path.join(dicom_path,file_name)) # If it already exists, just move on
                        continue
                    if self.overlapping_liver and key.find('RT') != 0 and len(files_total) > self.min_size:
                        ds = pydicom.read_file(os.path.join(dicom_path,file_name))
                        ds.SeriesInstanceUID += '.' + str(i)
                        pydicom.write_file(os.path.join(dicom_path,file_name),ds)
                    if ds.Modality != 'US': #Raystation can't handle ultrasound
                        if len(files_total) > self.min_size or key.find('RT') == 0:
                            if not os.path.exists(os.path.join(dicom_path,key)):
                                os.mkdir(os.path.join(dicom_path ,key))
                            os.rename(os.path.join(dicom_path,file_name),os.path.join(dicom_path,key,file_name))
                    else:
                        os.remove(os.path.join(dicom_path,file_name))
                if ds.Modality != 'US' and (len(files_total) > self.min_size or key.find('RT') == 0):
                    fid = open(os.path.join(dicom_path,key,'UID_val.txt'), 'w+')
                    if ds.Modality != 'RTSTRUCT':
                        fid.write(ds.SeriesInstanceUID)
                    else:
                        fid.write(ds.StudyInstanceUID)
                    fid.close()
                    fid = open(os.path.join(dicom_path,key,'MRN_val.txt'), 'w+')
                    fid.write(ds.PatientID)
                    fid.close()
                    fid = open(os.path.join(dicom_path,key,'prepped.txt'), 'w+')
                    fid.close()
        elif len(uid_dic.keys()) > 0:

            fid = open(os.path.join(dicom_path ,'UID_val.txt'), 'w+')
            if ds.Modality != 'RTSTRUCT' and ds.Modality != 'RTPLAN' and ds.Modality != 'RTDOSE':
                fid.write(ds.SeriesInstanceUID)
            else:
                fid.write(ds.StudyInstanceUID)
            fid.close()

            try:
                MRN_val = ds.PatientID
            except:
                MRN_val = 0

            fid = open(os.path.join(dicom_path, 'MRN_val.txt'), 'w+')
            fid.write(MRN_val)
            fid.close()
            fid = open(os.path.join(dicom_path, 'prepped.txt'), 'w+')
            fid.close()
        return None
示例#32
0
def adjust_machine_name_cli(args):
    dicom_dataset = pydicom.read_file(args.input_file, force=True)
    new_dicom_dataset = adjust_machine_name(
        dicom_dataset, args.new_machine_name)

    pydicom.write_file(args.output_file, new_dicom_dataset)
示例#33
0
def adjust_RED_by_structure_name_cli(args):
    dicom_dataset = pydicom.read_file(args.input_file, force=True)
    new_dicom_dataset = adjust_RED_by_structure_name(dicom_dataset)

    pydicom.write_file(args.output_file, new_dicom_dataset)
示例#34
0
    def save_as(self, filename, write_like_original=True):
        """Write the Dataset to `filename`.

        Saving a Dataset requires that the Dataset.is_implicit_VR and
        Dataset.is_little_endian attributes exist and are set appropriately. If
        Dataset.file_meta.TransferSyntaxUID is present then it should be set to
        a consistent value to ensure conformance.

        Conformance with DICOM File Format
        ----------------------------------
        If `write_like_original` is False, the Dataset will be stored in the
        DICOM File Format in accordance with DICOM Standard Part 10 Section 7.
        To do so requires that the `Dataset.file_meta` attribute exists and
        contains a Dataset with the required (Type 1) File Meta Information
        Group elements (see pydicom.filewriter.write_file and
        pydicom.filewriter.write_file_meta_info for more information).

        If `write_like_original` is True then the Dataset will be written as is
        (after minimal validation checking) and may or may not contain all or
        parts of the File Meta Information (and hence may or may not be
        conformant with the DICOM File Format).

        Parameters
        ----------
        filename : str or file-like
            Name of file or the file-like to write the new DICOM file to.
        write_like_original : bool
            If True (default), preserves the following information from
            the Dataset (and may result in a non-conformant file):
            - preamble -- if the original file has no preamble then none will
                be written.
            - file_meta -- if the original file was missing any required File
                Meta Information Group elements then they will not be added or
                written.
                If (0002,0000) 'File Meta Information Group Length' is present
                then it may have its value updated.
            - seq.is_undefined_length -- if original had delimiters, write them
                now too, instead of the more sensible length characters
            - is_undefined_length_sequence_item -- for datasets that belong to
                a sequence, write the undefined length delimiters if that is
                what the original had.
            If False, produces a file conformant with the DICOM File Format,
            with explicit lengths for all elements.

        See Also
        --------
        pydicom.filewriter.write_dataset
            Write a DICOM Dataset to a file.
        pydicom.filewriter.write_file_meta_info
            Write the DICOM File Meta Information Group elements to a file.
        pydicom.filewriter.write_file
            Write a DICOM file from a FileDataset instance.
        """
        # Ensure is_little_endian and is_implicit_VR exist
        if not (hasattr(self, 'is_little_endian')
                and hasattr(self, 'is_implicit_VR')):
            raise AttributeError("'{0}.is_little_endian' and "
                                 "'{0}.is_implicit_VR' must exist and be "
                                 "set appropriately before "
                                 "saving.".format(self.__class__.__name__))

        pydicom.write_file(filename, self, write_like_original)
示例#35
0
def dump_dicom(data, folder, spacing=(1, 1, 1),
               origin=(0, 0, 0), intercept=0, slope=1):
    """ Dump 3D scan in dicom format.

    Parameters
    ----------
    data : ndarray
        3D numpy array containing ct scan's data.
    folder : str
        folder where dicom files will be dumped.
    spacing : ArrayLike
        ndarray of shape (3,) that contains spacing along z, y, x axes.
    origin : ArrayLike
        ndarray of shape (3,) that contains origin for z, y, x axes.
    interception : float
        interception value. Default is 0.
    slope : float
        slope value. Default is 1.
    """
    spacing = np.array(spacing).reshape(-1)
    origin = np.array(origin).reshape(-1)

    if not os.path.exists(folder):
        os.makedirs(folder)

    num_slices = data.shape[0]
    scan_id = np.random.randint(2 ** 16)
    for i in range(num_slices):
        slice_name = (
            hex(scan_id + i)
            .replace('x', '')
            .upper()
            .zfill(8)
        )
        filename = os.path.join(folder, slice_name)
        pixel_array = (data[i, ...] - intercept) / slope
        locZ, locY, locX = (float(origin[0] + spacing[0] * i),
                            float(origin[1]), float(origin[2]))

        file_meta = Dataset()
        file_meta.MediaStorageSOPClassUID = 'Secondary Capture Image Storage'
        file_meta.MediaStorageSOPInstanceUID = (
            hex(scan_id)
            .replace('x', '')
            .upper()
            .zfill(8)
        )
        file_meta.ImplementationClassUID = slice_name

        dataset = FileDataset(filename, {},
                              file_meta=file_meta,
                              preamble=b"\0"*128)

        dataset.PixelData = pixel_array.astype(np.uint16).tostring()
        dataset.RescaleSlope = slope
        dataset.RescaleIntercept = intercept

        dataset.ImagePositionPatient = MultiValue(type_constructor=float,
                                                  iterable=[locZ, locY, locX])

        dataset.PixelSpacing = MultiValue(type_constructor=float,
                                          iterable=[float(spacing[1]),
                                                    float(spacing[2])])
        dataset.SliceThickness = float(spacing[0])

        dataset.Modality = 'WSD'
        dataset.Columns = pixel_array.shape[0]
        dataset.Rows = pixel_array.shape[1]
        dataset.file_meta.TransferSyntaxUID = pydicom.uid.ImplicitVRLittleEndian
        dataset.PixelRepresentation = 1
        dataset.BitsAllocated = 16
        dataset.BitsStored = 16
        dataset.SamplesPerPixel = 1

        write_file(filename, dataset)