예제 #1
0
def size_cube_phantom(reference_image, output_folder):
    """ Takes an input image and outputs
    """

    nifti_3d = nib.load(reference_image)
    image_3d = nifti_3d.get_data()

    for size_ratio in np.arange(.1, 1, .1):
        phantom_3d = np.zeros((image_3d.shape[0], image_3d.shape[1], 1))
        phantom_3d[(phantom_3d.shape[0] *
                    (size_ratio / 2)):(phantom_3d.shape[0] -
                                       (phantom_3d.shape[0] *
                                        (size_ratio / 2))),
                   (phantom_3d.shape[1] *
                    (size_ratio / 2)):(phantom_3d.shape[1] -
                                       (phantom_3d.shape[1] *
                                        (size_ratio / 2))), 0] = 1
        nifti_util.save_numpy_2_nifti(
            phantom_3d, reference_image,
            os.path.join(
                output_folder,
                'Size_' + str(int(10 * (1 - size_ratio))) + '_Phantom.nii.gz'))
        nifti_util.save_numpy_2_nifti(
            phantom_3d, reference_image,
            os.path.join(
                output_folder, 'Size_' + str(int(10 * (1 - size_ratio))) +
                '_Phantom-label.nii.gz'))
예제 #2
0
def convert_v9_phantoms():

    for folder in glob.glob(
            '../tofts_v9_phantom/QIBA_v9_Tofts/QIBA_v9_Tofts_GE_Orig/*/'):

        files = glob.glob(os.path.join(folder, 'DICOM', '*'))
        files = sorted(files)

        output_array = None

        for file in files:

            print file
            array = dicom.read_file(file).pixel_array.T[..., np.newaxis]
            print array.shape

            if output_array is None:
                output_array = array
            else:
                print output_array.shape, array.shape
                output_array = np.concatenate((output_array, array), axis=2)

        output_filepath = os.path.basename(os.path.dirname(folder)) + '.nii.gz'
        output_filepath = os.path.join('../tofts_v9_phantom/', output_filepath)
        save_numpy_2_nifti(output_array, None, output_filepath)
예제 #3
0
def Slicer_PkModeling(input_folder,
                      Slicer_path="/opt/Slicer-4.5.0-1-linux-amd64/Slicer"):

    # os.chdir('C:/Users/azb22/Documents/Scripting/DCE_Motion_Phantom')
    # input_folder = '.'
    # Slicer_path = 'C:/Users/azb22/Documents/Software/SlicerNightly/Slicer_4.6.0/Slicer.exe'

    Slicer_Command = Slicer_path + ' --launch'

    T1Blood = '--T1Blood 1440'
    T1Tissue = '--T1Tissue 1000'
    relaxivity = '--relaxivity .0045'
    hematocrit = '--hematocrit .45'
    BAT_mode = '--BATCalculationMode UseConstantBAT'
    BAT_arrival = '--constantBAT 8'
    aif_mask_command = '--aifMask '
    roi_mask_command = '--roiMask '
    t1_map_command = '--T1Map '

    # image_list = glob.glob(input_folder + '/*.nrrd')
    image_list = os.listdir(input_folder)

    for nrrd_image in image_list:

        if '.nrrd' in nrrd_image:

            move(nrrd_image, nrrd_image.replace(' ', ''))
            nrrd_image = nrrd_image.replace(' ', '')

            output_ktrans_image = str.split(nrrd_image,
                                            '.')[0] + '_ktrans.nii.gz'
            output_ve_image = str.split(nrrd_image, '.')[0] + '_ve.nii.gz'

            output_ktrans_command = '--outputKtrans ' + output_ktrans_image
            output_ve_command = '--outputVe ' + output_ve_image

            PkModeling_command = ' '.join([
                Slicer_Command, 'PkModeling', nrrd_image, T1Blood, T1Tissue,
                relaxivity, hematocrit, BAT_mode, BAT_arrival, '--usePopAif',
                output_ve_command, output_ktrans_command
            ])

            # call(PkModeling_command, shell=True)

            ktrans_array = convert_input_2_numpy(output_ktrans_image)
            ve_array = convert_input_2_numpy(output_ve_image)

            for z in range(ktrans_array.shape[-1]):
                ktrans_array[..., z] = medfilt(ktrans_array[..., z], [3, 3])
                ve_array[..., z] = medfilt(ve_array[..., z], [3, 3])

            ktrans_array[ve_array == 0] = .001
            ve_array[ve_array == 0] = .001

            save_numpy_2_nifti(ktrans_array, output_ktrans_image,
                               output_ktrans_image)
            save_numpy_2_nifti(ve_array, output_ve_image, output_ve_image)

    return
def calc_DCE_properties_single(filepath, T1_tissue=1000, T1_blood=1440, relaxivity=.0045, TR=5, TE=2.1, scan_time_seconds=(11*60), hematocrit=0.45, injection_start_time_seconds=60, flip_angle_degrees=30, label_file=[], label_suffix=[], label_value=1, mask_value=0, mask_threshold=0, T1_map_file=[], T1_map_suffix='-T1Map', AIF_label_file=[],  AIF_value_data=[], AIF_value_suffix=[], convert_AIF_values=True, AIF_mode='label_average', AIF_label_suffix=[], AIF_label_value=1, label_mode='separate', param_file=[], default_population_AIF=False, initial_fitting_function_parameters=[.01,.1], outputs=['ktrans','ve','auc'], outfile_prefix='', processes=1, gaussian_blur=.65, gaussian_blur_axis=2):

    """ This is a master function that creates ktrans, ve, and auc values from raw intensity 1D-4D volumes.
    """

    print('\n')

    # NaN values are cleaned for ease of calculation.
    if isinstance(filepath, str):
        image = np.nan_to_num(nifti_2_numpy(filepath))
    else:
        image = np.nan_to_num(np.copy(filepath))

    # Unlikely that this circumstance will apply to 4D images in the future..
    dimension = len(image.shape)
    if dimension > 4:
        print('Error: Images greater than dimension 4 are currently not supported. Skipping this volume...')
        return []

    # Convenience variables created from input parameters.
    flip_angle_radians = flip_angle_degrees*np.pi/180
    time_interval_seconds = float(scan_time_seconds / image.shape[dimension-1])
    timepoints = image.shape[-1]
    bolus_time = int(np.ceil((injection_start_time_seconds / scan_time_seconds) * timepoints))

    # This step applies a gaussian blur to the provided axes. Blurring greatly increases DCE accuracy on noisy data.
    image = preprocess_dce(image, gaussian_blur=gaussian_blur, gaussian_blur_axis=gaussian_blur_axis)

    # Data store in other files may be relevant to DCE calculations. This utility function collects them and stores them in local variables.
    AIF_label_image, label_image, T1_image, AIF = retreive_data_from_files(filepath, label_file, label_mode, label_suffix, label_value, AIF_label_file, AIF_label_value, AIF_mode, AIF_label_suffix, T1_map_file, T1_map_suffix, AIF_value_data, AIF_value_suffix, image)

    # If no pre-set AIF text file is provided, one must be generated either from a label-map or a population AIF.
    if AIF == []:
        AIF = generate_AIF(scan_time_seconds, injection_start_time_seconds, time_interval_seconds, image, AIF_label_image, AIF_value_data, AIF_mode, dimension, AIF_label_value)

    # Error-catching for broken AIFs.
    if AIF == []:
        print('Problem calculating AIF. Skipping this volume...')
        return []

    # Signal conversion is required in order for raw data to interface with the Tofts model.
    contrast_image = convert_intensity_to_concentration(image, T1_tissue, TR, flip_angle_degrees, injection_start_time_seconds, relaxivity, time_interval_seconds, hematocrit)

    # Depending on where the AIF is derived from, AIF values may also need to be convered into Gd concentration.
    if AIF_mode == 'population':
        contrast_AIF = AIF
    elif AIF_value_data != [] and convert_AIF_values == False:
        contrast_AIF = AIF
    else:
        contrast_AIF = convert_intensity_to_concentration(AIF, T1_tissue, TR, flip_angle_degrees, injection_start_time_seconds, relaxivity, time_interval_seconds, hematocrit, T1_blood=T1_blood)

    # The optimization portion of the program is run here.
    parameter_maps = simplex_optimize(contrast_image, contrast_AIF, time_interval_seconds, bolus_time, image, label_image, mask_value, mask_threshold, initial_fitting_function_parameters, outputs, processes)

    # Outputs are saved, and then returned.
    for param_idx, param in enumerate(outputs):
        save_numpy_2_nifti(parameter_maps[...,param_idx], filepath, outfile_prefix + param + '.nii.gz')
    return outputs
예제 #5
0
def Slicer_Rotate(input_numpy, reference_nifti, affine_matrix, Slicer_path="/opt/Slicer-4.5.0-1-linux-amd64/Slicer"):

    save_numpy_2_nifti(input_numpy, reference_nifti, 'temp.nii.gz')
    save_affine(affine_matrix, 'temp.txt')

    Slicer_Command = [Slicer_path, '--launch', 'ResampleScalarVectorDWIVolume', 'temp.nii.gz', 'temp_out.nii.gz', '-f', 'temp.txt', '-i', 'bs']

    call(' '.join(Slicer_Command), shell=True)

    return convert_input_2_numpy('temp_out.nii.gz')
예제 #6
0
def fill_in_convex_outline(input_data,
                           output_file=None,
                           reference_nifti=None,
                           threshold_limit=[0, 100],
                           color_threshold_limits=[[100, 300], [0, 100],
                                                   [0, 100]],
                           output_label_num=1):
    """ Thresholds a jpg according to certain color parameters. Uses a hole-filling algorithm to color in
        regions of interest.

        TODO: Reorganize into two separate tracks, instead of two winding tracks.
    """

    image_nifti, image_type = convert_input_2_numpy(input_data,
                                                    return_type=True)

    label_nifti = np.zeros_like(image_nifti)

    if image_type == 'image':

        red_range = np.logical_and(
            color_threshold_limits[0][0] < image_nifti[:, :, 0],
            image_nifti[:, :, 0] < color_threshold_limits[0][1])
        green_range = np.logical_and(
            color_threshold_limits[1][0] < image_nifti[:, :, 1],
            image_nifti[:, :, 1] < color_threshold_limits[1][1])
        blue_range = np.logical_and(
            color_threshold_limits[2][0] < image_nifti[:, :, 2],
            image_nifti[:, :, 2] < color_threshold_limits[2][1])
        valid_range = np.logical_and(red_range, green_range, blue_range)

        label_nifti[valid_range] = 1

        label_nifti = ndimage.morphology.binary_fill_holes(
            label_nifti[:, :, 0]).astype(label_nifti.dtype)

        if output_file is not None:
            misc.imsave(output_file, label_nifti * 255)

    else:
        image_nifti[image_nifti != 0] = output_label_num
        if image_nifti.ndim == 3:
            for z in range(image_nifti.shape[2]):
                label_nifti[..., z] = ndimage.morphology.binary_fill_holes(
                    image_nifti[..., z]).astype(image_nifti.dtype)
        else:
            label_nifti = ndimage.morphology.binary_fill_holes(
                image_nifti).astype(image_nifti.dtype)

        print(np.sum(label_nifti), 'HOLE FILLED SUM')

        if output_file is not None:
            save_numpy_2_nifti(label_nifti, reference_nifti, output_file)

    return label_nifti
예제 #7
0
def Add_White_Noise(input_folder, noise_scale=1, noise_multiplier=10):

    input_niis = glob.glob(os.path.join(input_folder, '*Signal.nii*'))

    for input_4d_nifti in input_niis:

        input_numpy = convert_input_2_numpy(input_4d_nifti)

        for t in xrange(input_numpy.shape[-1]):
            input_numpy[..., t] = input_numpy[..., t] + np.random.normal(scale=noise_scale, size=input_numpy[..., t].shape).reshape(input_numpy[..., t].shape) * noise_multiplier

        save_numpy_2_nifti(input_numpy, input_4d_nifti, str.split(input_4d_nifti, '.')[0] + '_noise_' + str(noise_multiplier) +'.nii.gz')
예제 #8
0
def Add_Head_Jerks(input_folder, random_rotations=5, random_duration_range=[4,9], random_rotation_peaks=[[-4,4],[-4,4],[-4,4]], durations=7, timepoints=7, rotation_peaks=[4, 4, 0],):

    input_niis = glob.glob(os.path.join(input_folder, '*Signal*noise*'))
    print os.path.join(input_folder, '*Signal*noise*')
    input_niis = [x for x in input_niis if 'jerk' not in x]

    for input_4d_nifti in input_niis:

        print input_4d_nifti
        input_4d_numpy = convert_input_2_numpy(input_4d_nifti)
        print input_4d_numpy.shape
        output_motion_array = generate_identity_affine(input_4d_numpy.shape[-1])

        if random_rotations > 0:

            total_jerk_windows = []

            for random_rotation in xrange(random_rotations):

                # Will hang if more random_rotations are specified than can fit in available timepoints.
                overlapping = True
                while overlapping:
                    random_duration = np.random.randint(*random_duration_range)
                    random_timepoint = np.random.randint(0, input_4d_numpy.shape[-1]-random_duration)
                    random_jerk_window = np.arange(random_timepoint, random_timepoint + random_duration)
                    if not any(x in total_jerk_windows for x in random_jerk_window):
                        overlapping = False
                        total_jerk_windows.extend(random_jerk_window)

                random_motion = generate_motion_jerk(duration=random_duration, timepoint=random_timepoint, rotation_peaks=[np.random.randint(*random_rotation_peaks[0]),np.random.randint(*random_rotation_peaks[1]),np.random.randint(*random_rotation_peaks[2])], total_timepoints=input_4d_numpy.shape[-1])

                print random_motion.shape
                print output_motion_array.shape

                for t in xrange(input_4d_numpy.shape[-1]):
                    print output_motion_array[..., t]

                output_motion_array = compose_affines(output_motion_array, random_motion)

            output_4d_numpy = np.zeros_like(input_4d_numpy)

            for t in xrange(input_4d_numpy.shape[-1]):
                print output_motion_array[..., t]
                output_4d_numpy[..., t] = apply_affine(input_4d_numpy[...,t], output_motion_array[...,t], method='slicer', Slicer_path="C:/Users/azb22/Documents/Software/SlicerNightly/Slicer_4.6.0/Slicer.exe")

        else:
            pass

        save_numpy_2_nifti(output_4d_numpy, input_4d_nifti, str.split(input_4d_nifti, '.')[0] + '_jerk.nii.gz')
예제 #9
0
def intensity_cube_phantom(reference_image, output_folder):

    nifti_3d = nib.load(reference_image)
    image_3d = nifti_3d.get_data()

    for phantom_type in ['grey', 'split', 'checker', 'noisy_grey', 'one_spot']:
        phantom_3d = np.zeros((200, 200, 2))
        label_3d = np.zeros_like(phantom_3d)
        label_3d[70:130, 70:130, :] = 1
        phantom_3d[phantom_3d < 0] = 0

        if phantom_type == 'grey':
            phantom_3d[:, :, :] = 100

        elif phantom_type == 'split':
            phantom_3d[0:100, :, :] = 50
            phantom_3d[100:, :, :] = 150

        elif phantom_type == 'checker':
            indice_list = []
            for i in xrange(200):
                if i % (20) < 10:
                    indice_list += [i]
            phantom_3d[:, :, :] = 50
            for indice in indice_list:
                phantom_3d[indice, indice_list, :] = 150

        elif phantom_type == 'noisy_grey':
            phantom_3d = 100 + 10 * np.random.randn(200, 200, 2)
            phantom_3d[phantom_3d < 0] = 0

        elif phantom_type == 'one_spot':
            phantom_3d[:, :, :] = 100
            phantom_3d[80:100, 80:100, :] = 150

        nifti_util.save_numpy_2_nifti(
            phantom_3d, reference_image,
            os.path.join(output_folder,
                         'Intensity_' + phantom_type + '_Phantom.nii.gz'))
        nifti_util.save_numpy_2_nifti(
            label_3d, reference_image,
            os.path.join(output_folder, 'Intensity_' + phantom_type +
                         '_Phantom-label.nii.gz'))

        print[phantom_type]

    return
예제 #10
0
    def reconstruct_parameter_maps(self):

        if self.sess == None:
            self.sess = tf.Session()
            self.saver = tf.train.Saver()
            self.sess.run(self.init_op)

        output_array = np.zeros((self.testset.voxel_count, 2), dtype=float)
        remainder = self.testset.voxel_count % self.test_batch_size
        output_row_idx = 0
        completed = False  # This is dumb, come back to it

        while not completed:
            batch_x, batch_seqlen = self.testset.next(self.test_batch_size)
            preds = self.sess.run(self.prediction,
                                  feed_dict={
                                      self.data: batch_x,
                                      self.seqlen: batch_seqlen
                                  })

            print output_row_idx
            print output_array.shape

            output_array[
                output_row_idx:min(output_row_idx +
                                   self.test_batch_size, output_row_idx +
                                   preds.shape[0]), :] = preds

            output_row_idx += self.test_batch_size

            if output_row_idx > self.testset.voxel_count:
                completed = True

        ktrans_array = output_array[:, 0].reshape(self.testset.data_shape)
        ve_array = output_array[:, 1].reshape(self.testset.data_shape)

        # Modfiy reference file to create 3D from 4D.
        save_numpy_2_nifti(ktrans_array,
                           self.ktrans_filepath,
                           output_filepath=self.output_ktrans_filepath)
        save_numpy_2_nifti(ve_array,
                           self.ktrans_filepath,
                           output_filepath=self.output_ve_filepath)

        return
예제 #11
0
def correlation_analysis(input_volume):

    image_numpy = convert_input_2_numpy(input_volume)

    displacement_list = np.mgrid[1:17:1, 1:17:1, 1:17:1].reshape(3, -1).T - 8

    output_correlation_matrix = np.zeros((17, 17, 17), dtype=float)

    for displacement in displacement_list:
        print displacement
        x, y, z = displacement
        slice_list = []
        displacement_slice_list = []

        for axis in [x, y, z]:
            if axis < 0:
                slice_list += [slice(-axis, None, 1)]
                displacement_slice_list += [slice(0, axis, 1)]
            elif axis > 0:
                slice_list += [slice(0, -axis, 1)]
                displacement_slice_list += [slice(axis, None, 1)]
            else:
                slice_list += [slice(None)]
                displacement_slice_list += [slice(None)]

        print slice_list
        print displacement_slice_list

        compare_array_1 = image_numpy[slice_list]
        compare_array_2 = image_numpy[displacement_slice_list]

        print compare_array_1.shape
        print compare_array_2.shape

        correlation = np.corrcoef(compare_array_1.reshape(-1),
                                  compare_array_2.reshape(-1))
        print correlation
        print '\n'

        output_correlation_matrix[x + 8, y + 8, z + 8] = correlation[1, 0]

    save_numpy_2_nifti(
        output_correlation_matrix, None,
        nifti_splitext(os.path.basename(input_volume))[0] + '_array' +
        nifti_splitext(os.path.basename(input_volume))[-1])
예제 #12
0
def Preprocess_Volumes(input_directory, output_directory, r2_threshold=.9):

    if not os.path.exists(output_directory):
        os.mkdir(output_directory)

    file_database = glob.glob(os.path.join(input_directory, '*r2*.nii*'))
    print(os.path.join(input_directory, '*r2*.nii*'))

    for file in file_database:

        print(file)

        input_ktrans = replace_suffix(file, 'r2', 'ktrans')
        input_ve = replace_suffix(file, 'r2', 've')

        output_ktrans = os.path.join(
            output_directory,
            replace_suffix(os.path.basename(file), 'r2',
                           'ktrans_r2_' + str(r2_threshold)))
        output_ve = os.path.join(
            output_directory,
            replace_suffix(os.path.basename(file), 'r2',
                           've_r2_' + str(r2_threshold)))
        output_kep = os.path.join(
            output_directory,
            replace_suffix(os.path.basename(file), 'r2',
                           'kep_r2_' + str(r2_threshold)))
        output_r2 = os.path.join(
            output_directory,
            replace_suffix(os.path.basename(file), 'r2',
                           'r2_r2_' + str(r2_threshold)))

        print(input_ktrans)

        r2_map = np.nan_to_num(convert_input_2_numpy(file))
        ktrans_map = convert_input_2_numpy(input_ktrans)
        ve_map = convert_input_2_numpy(input_ve)

        print((r2_map < r2_threshold).sum())

        ve_map[ktrans_map > 10] = 0
        ktrans_map[ktrans_map > 10] = 0
        ktrans_map[ve_map > 1] = 0
        ve_map[ve_map > 1] = 0

        ktrans_map[r2_map < r2_threshold] = -.01
        ve_map[r2_map < r2_threshold] = -.01
        kep_map = np.nan_to_num(ktrans_map / ve_map)
        kep_map[r2_map < r2_threshold] = -.01

        save_numpy_2_nifti(ktrans_map, input_ktrans, output_ktrans)
        save_numpy_2_nifti(ve_map, input_ktrans, output_ve)
        save_numpy_2_nifti(kep_map, input_ktrans, output_kep)
        save_numpy_2_nifti(r2_map, input_ktrans, output_r2)
예제 #13
0
def glcm_cube_phantom(reference_image, output_folder):

    nifti_3d = nib.load(reference_image)
    image_3d = nifti_3d.get_data()

    for direction in ['Vertical', 'Horizontal', 'Grid']:
        for alternation_rate in np.arange(0, 6, 1):
            phantom_3d = 100 + 10 * np.random.randn(200, 200, 2)
            label_3d = np.zeros_like(phantom_3d)
            label_3d[0:20, 0:20, :] = 1
            phantom_3d[phantom_3d < 0] = 0

            indice_list = []
            both_indice_list = []

            for i in xrange(200):
                if i % (alternation_rate * 2) < alternation_rate:
                    indice_list += [i]
                    # both_indice_list ++ [i, i, 1]
                    # both_indice_list ++ [i, i, 2]

            if alternation_rate == 0:
                pass
            elif direction == 'Vertical':
                phantom_3d[indice_list, :, :] += 100
            elif direction == 'Horizontal':
                phantom_3d[:, indice_list, :] += 100
            elif direction == 'Grid':
                for indice in indice_list:
                    phantom_3d[indice, indice_list, :] += 100

            nifti_util.save_numpy_2_nifti(
                phantom_3d, reference_image,
                os.path.join(
                    output_folder, 'GLCM_' + direction + '_' +
                    str(alternation_rate) + '_Phantom.nii.gz'))
            nifti_util.save_numpy_2_nifti(
                label_3d, reference_image,
                os.path.join(
                    output_folder, 'GLCM_' + direction + '_' +
                    str(alternation_rate) + '_Phantom-label.nii.gz'))

            print[direction, alternation_rate]
예제 #14
0
    def train(self,
              training_data_collection,
              validation_data_collection=None,
              output_model_filepath=None,
              input_groups=None,
              training_batch_size=32,
              validation_batch_size=32,
              training_steps_per_epoch=None,
              validation_steps_per_epoch=None,
              initial_learning_rate=.0001,
              learning_rate_drop=None,
              learning_rate_epochs=None,
              num_epochs=None,
              callbacks=['save_model'],
              **kwargs):

        with tf.Session() as sess:

            self.sess = sess
            self.batch_size = training_batch_size

            if training_steps_per_epoch is None:
                training_steps_per_epoch = training_data_collection.total_cases // training_batch_size + 1

            training_data_generator = training_data_collection.data_generator(
                perpetual=True,
                data_group_labels=input_groups,
                verbose=False,
                batch_size=training_batch_size)

            sample_vector = np.random.uniform(-1,
                                              1,
                                              size=(self.batch_size,
                                                    self.vector_size))

            discriminator_optimizer = tf.train.AdamOptimizer(
                initial_learning_rate).minimize(self.d_loss,
                                                var_list=self.d_vars)
            generator_optimizer = tf.train.AdamOptimizer(
                initial_learning_rate).minimize(self.g_loss,
                                                var_list=self.g_vars)

            try:
                tf.global_variables_initializer().run()
            except:
                tf.initialize_all_variables().run()

            start_time = time.time()

            try:
                # Save output.
                with open('loss_log.csv', 'ab') as writefile:
                    csvfile = csv.writer(writefile, delimiter=',')
                    csvfile.writerow(
                        ['g_loss', 'd_loss_real', 'd_loss_fake', 'd_loss'])
                    for epoch in xrange(num_epochs):

                        for batch_idx in xrange(training_steps_per_epoch):

                            batch_vector = np.random.uniform(
                                -1,
                                1,
                                size=(self.batch_size,
                                      self.vector_size)).astype(np.float32)
                            batch_images = next(training_data_generator)[0]

                            # batch_images = ((batch_images - np.min(batch_images)) / (np.max(batch_images) - np.min(batch_images))) * 2 - 1

                            if batch_idx % 10 == 0:
                                true = np.random.normal(
                                    0, 0.3,
                                    [self.batch_size, 1]).astype(np.float32)
                                false = np.random.normal(
                                    0.7, 1.3,
                                    [self.batch_size, 1]).astype(np.float32)
                            else:
                                false = np.random.normal(
                                    0, 0.3,
                                    [self.batch_size, 1]).astype(np.float32)
                                true = np.random.normal(
                                    0.7, 1.3,
                                    [self.batch_size, 1]).astype(np.float32)

                            # Update D network
                            _ = self.sess.run(
                                [discriminator_optimizer],
                                feed_dict={
                                    self.inputs: batch_images,
                                    self.vectors: batch_vector,
                                    self.true_labels: true,
                                    self.false_labels: false
                                })

                            # Update G network (twice to help training)
                            _ = self.sess.run(
                                [generator_optimizer],
                                feed_dict={
                                    self.vectors: batch_vector,
                                    self.true_labels: true,
                                    self.false_labels: false
                                })
                            # _ = self.sess.run([generator_optimizer], feed_dict={ self.vectors: batch_vector, self.true_labels: true, self.false_labels: false })

                            errD_fake = self.d_loss_fake.eval({
                                self.vectors:
                                batch_vector,
                                self.true_labels:
                                true,
                                self.false_labels:
                                false
                            })
                            errD_real = self.d_loss_real.eval({
                                self.inputs:
                                batch_images,
                                self.true_labels:
                                true,
                                self.false_labels:
                                false
                            })
                            errG = self.g_loss.eval({
                                self.vectors: batch_vector,
                                self.true_labels: true,
                                self.false_labels: false
                            })

                            # print 'FAKE ERR', errD_fake, 'REAL ERR', errD_real, 'G ERR', errG

                            # if batch_idx % 50 == 0:
                            #     print batch_idx
                            #     samples, d_loss, g_loss = self.sess.run([self.sampler, self.d_loss, self.g_loss],feed_dict={self.z: sample_z, self.inputs: sample_inputs})
                            #     save_images(samples, image_manifold_size(samples.shape[0]), './{}/train_{:02d}_{:04d}.png'.format(config.sample_dir, epoch, idx))
                            #     print("[Sample] d_loss: %.8f, g_loss: %.8f" % (d_loss, g_loss))

                        print(
                            "Epoch: [%2d] [%4d/%4d] time: %4.4f, d_loss: %.8f, g_loss: %.8f"
                            % (epoch, batch_idx, batch_idx, time.time() -
                               start_time, errD_fake + errD_real, errG))
                        self.save(epoch)
                        # csvfile.writerow([errG, errD_real, errD_fake, errD_fake+errD_real])

                        batch_vector = np.random.uniform(
                            -1, 1, [self.batch_size, self.vector_size]).astype(
                                np.float32)
                        test_output = self.sess.run(
                            [self.G], feed_dict={self.vectors: batch_vector})
                        for i in xrange(test_output[0].shape[0]):
                            data = test_output[0][i, ..., 0]
                            save_numpy_2_nifti(
                                data, np.eye(4),
                                'other_gan_test_' + str(i) + '.nii.gz')
                            if epoch == 0:
                                save_numpy_2_nifti(
                                    batch_images[i, ..., 0], np.eye(4),
                                    'sample_patch_' + str(i) + '.nii.gz')

            except KeyboardInterrupt:
                pass
예제 #15
0
def resample(input_data,
             output_filename='',
             input_transform=None,
             method="slicer",
             command="Slicer",
             temp_dir='./',
             interpolation='linear',
             dimensions=[1, 1, 1],
             reference_volume=None):
    """ A catch-all function for resampling. Will resample a 3D volume to given dimensions according
        to the method provided.

        TODO: Add resampling for 4D volumes.
        TODO: Add dimension, interpolation, reference parameter. Currently set to linear/isotropic.

        Parameters
        ----------
        input_data: str or array
            Can be a 3D volume or a filename.
        output_filename: str
            Location to save output data to. If left as '', will return numpy array.
        input_transform: str
            detatails TBD, unimplemented
        method: str
            Will perform motion correction according to the provided method.
            Currently available: ['fsl']
        command: str
            The literal command-line string to be inputted via Python's subprocess module.
        temp_dir: str
            If temporary files are created, they will be saved here.

        Returns
        -------
        output: array
            Output data, only if output_filename is left as ''.
    """

    skull_strip_methods = ['slicer']
    if method not in skull_strip_methods:
        print(
            'Input \"method\" parameter is not available. Available methods: ',
            skull_strip_methods)
        return

    if method == 'slicer':

        # A good reason to have a Class for qtim methods is to cut through all of this extra code.

        temp_input, temp_output = False, False

        if not isinstance(input_data, str):
            input_filename = os.path.join(temp_dir, 'temp.nii.gz')
            nifti_util.save_numpy_2_nifti(input_data, input_filename)
            temp_input = True
        else:
            input_filename = input_data

        if output_filename == '':
            temp_output = True
            output_filename = os.path.join(temp_dir, 'temp_out.nii.gz')

        dimensions = str(dimensions).strip('[]').replace(' ', '')

        if reference_volume or input_transform is not None:
            resample_command = [
                command, '--launch', 'ResampleScalarVectorDWIVolume',
                input_filename, output_filename, '-R', reference_volume,
                '--interpolation', interpolation
            ]
            if input_transform is not None:
                resample_command += ['-f', input_transform]
            print(' '.join(resample_command))
            subprocess.call(resample_command)
        else:
            resample_command = [
                command, '--launch', 'ResampleScalarVolume', '-i',
                interpolation, '-s', dimensions, input_filename,
                output_filename
            ]
            print(' '.join(resample_command))
            subprocess.call(resample_command)

        if temp_input:
            os.remove(input_filename)
            pass

        if temp_output:
            output = format_util.convert_input_2_numpy(output_filename)
            os.remove(output_filename)
            return output
예제 #16
0
def Convert_BRATS_Data():

    subdirs = []

    for grade in GRADES:
        subdirs += glob.glob(os.path.join(BRATS_PATH, grade, '*') + '/')

    for subdir in subdirs:

        print(subdir)

        #Flair
        Flair_dir = glob.glob(subdir + '*Flair*/')
        Flair_file = glob.glob(Flair_dir[0] + '/*Flair*.mha')[0]

        #T1
        T1_dir = glob.glob(subdir + '/*T1*/')
        T1_file = glob.glob(T1_dir[0] + '/*T1*.mha')[0]

        #T1c
        T1c_dir = glob.glob(subdir + '/*T1c*/')
        T1c_file = glob.glob(T1c_dir[0] + '/*T1c*.mha')[0]

        #T2
        T2_dir = glob.glob(subdir + '/*T2*/')
        T2_file = glob.glob(T2_dir[0] + '/*T2*.mha')[0]

        #ROI
        ROI_dir = glob.glob(subdir + '/*more*/')
        if len(ROI_dir) == 0:
            ROI_dir = glob.glob(subdir + '/*OT*/')
            ROI_file = glob.glob(ROI_dir[0] + '/*OT*.mha')[0]
        else:
            ROI_file = glob.glob(ROI_dir[0] + '/*more*.mha')[0]

        output_directory = os.path.join(
            OUTPUT_PATH, os.path.basename(os.path.dirname(subdir)))

        if not os.path.exists(output_directory):
            os.mkdir(output_directory)

        images_mha = [Flair_file, T1_file, T1c_file, T2_file, ROI_file]
        images_output = ['FLAIR', 'T1', 'T1c', 'T2', 'ROI']
        images_output_filenames = [
            label + '.nii.gz' for label in images_output
        ]
        images_output_filenames_preprocessed = [
            label + '_pp.nii.gz' for label in images_output
        ]

        for img_idx, image_mha in enumerate(images_mha):

            output_filename = os.path.join(output_directory,
                                           images_output_filenames[img_idx])
            output_filename_preprocessed = os.path.join(
                output_directory,
                images_output_filenames_preprocessed[img_idx])

            print(output_filename)
            if not os.path.exists(output_filename):
                Slicer_Command = [
                    'Slicer', '--launch', 'ResampleScalarVectorDWIVolume',
                    image_mha, output_filename
                ]
                call(' '.join(Slicer_Command), shell=True)

            if not os.path.exists(output_filename_preprocessed):
                img = nifti_2_numpy(output_filename)

                if images_output[img_idx] == 'FLAIR':
                    mask = np.copy(img)
                    mask[mask > 0] = 1
                    save_numpy_2_nifti(
                        mask, output_filename,
                        os.path.join(output_directory, 'MASK.nii.gz'))

                if images_output[img_idx] == 'ROI':
                    img[img > 0] = 1
                else:
                    masked_img = np.ma.masked_where(img == 0, img)
                    normed_img = (
                        img - np.ma.mean(masked_img)) / np.ma.std(masked_img)
                    normed_img[img == 0] = 0
                    img = normed_img

                print(images_output_filenames[img_idx])
                save_numpy_2_nifti(img, output_filename,
                                   output_filename_preprocessed)

    return
예제 #17
0
def return_connected_components(input_volume,
                                mask_value=0,
                                return_split=True,
                                truncate=False,
                                truncate_padding=0,
                                output_filepath=None):
    """ This function takes in an N-dimensional array and uses scikit-image's measure.label function
        to split it into individual connected components. One can either return a split version of
        the original label, which will be stackd in a new batch dimension (N, ...), or return a renumbered
        version of the original label. One can also choose to truncate the output of the original image,
        instead returning a list of arrays of different sizes.

        Parameters
        ----------

        input_volume: N-dimensional array
            The volume to be queried.
        mask_value: int or float
            Islands composed of "mask_value" will be ignored.
        return_split: bool
            Whether to a return a stacked output of equal-size binary arrays for each island,
            or to return one array with differently-labeled islands for each output.
        truncate: bool
            Whether or not to truncate the output. Irrelevant if return_split is False
        truncate_padding: int
            How many voxels of padding to leave when truncating.
        output_filepath: str
            If return_split is False, output will be saved to this file. If return_split
            is True, output will be save to this file with the suffix "_[#]" for island
            number

        Returns
        -------
        output_array: N+1 or N-dimensional array
            Output array(s) depending on return_split

    """

    image_numpy = convert_input_2_numpy(input_volume)

    connected_components = measure.label(image_numpy,
                                         background=mask_value,
                                         connectivity=2)

    if not return_split:
        if output_filepath is not None:
            save_numpy_2_nifti(connected_components, input_volume,
                               output_filepath)
        return connected_components

    else:
        all_islands = split_image(connected_components)
        for island in all_islands:
            all_islands[island] = truncate_image(
                island, truncate_padding=truncate_padding)

        if output_filepath is not None:
            for island_idx, island in enumerate(all_islands):
                save_numpy_2_nifti(
                    connected_components, input_volume,
                    replace_suffix(output_filepath, '', str(island_idx)))

        return all_islands
예제 #18
0
def Create_Ideal_DCE(input_folder, output_filepath = '', input_aif=''):

    input_DCEs = []
    input_niis = glob.glob(os.path.join(input_folder, '*nrrd'))

    for nii in input_niis:
        if 'ktrans' in nii or 've' in nii:
            continue
        else:
            input_DCEs += [nii]

    for input_4d_nifti in input_DCEs:

        print 'Regenerating... ', input_4d_nifti

        # if output_filepath == '':
        output_filepath = str.split(input_4d_nifti, '.')[0]

        input_ktrans = output_filepath + '_ktrans.nii.gz'
        input_ve = output_filepath + '_ve.nii.gz'

        input_4d_nifti = Convert_NRRD_to_Nifti(input_4d_nifti, input_ktrans)

        input_numpy_4d = convert_input_2_numpy(input_4d_nifti)
        output_numpy_4d = np.zeros_like(input_numpy_4d)
        input_numpy_ktrans = convert_input_2_numpy(input_ktrans)
        input_numpy_ve = convert_input_2_numpy(input_ve)

        baseline_numpy = np.mean(input_numpy_4d[..., 0:7], axis=3)

        scan_time_seconds = 307.2
        time_interval_seconds = float((scan_time_seconds) / input_numpy_4d.shape[-1])
        time_interval_minutes = time_interval_seconds/60
        time_series = np.arange(0, input_numpy_4d.shape[-1]) / (60 / time_interval_seconds)
        injection_start_time_seconds=38.4

        T1_tissue=1000
        T1_blood=1440
        TR=3.8
        flip_angle_degrees=25
        relaxivity=.0045
        hematocrit=.45

        if input_aif == '':
            population_AIF = parker_model_AIF(scan_time_seconds, injection_start_time_seconds, time_interval_seconds, input_numpy_4d)
            concentration_AIF = population_AIF
        else:
            print 'extracting AIF...'
            AIF_label_numpy = convert_input_2_numpy(input_aif)
            AIF = generate_AIF(scan_time_seconds, injection_start_time_seconds, time_interval_seconds, input_numpy_4d, AIF_label_numpy)
            concentration_AIF = convert_intensity_to_concentration(AIF, T1_tissue, TR, flip_angle_degrees, injection_start_time_seconds, relaxivity, time_interval_seconds, hematocrit, T1_blood=T1_blood)

        for index in np.ndindex(input_numpy_ktrans.shape):

            output_numpy_4d[index] = np.array(estimate_concentration([input_numpy_ktrans[index],input_numpy_ve[index]], concentration_AIF, time_interval_minutes))

        # Or load presaved..
        # output_numpy_4d = nifti_2_numpy('DCE_MRI_Phantom_Regenerated_Concentrations.nii.gz')

        save_numpy_2_nifti(output_numpy_4d, input_4d_nifti, output_filepath + '_Regenerated_Concentrations.nii.gz')

        output_numpy_4d = revert_concentration_to_intensity(data_numpy=output_numpy_4d, reference_data_numpy=input_numpy_4d, T1_tissue=T1_tissue, TR=TR, flip_angle_degrees=flip_angle_degrees, injection_start_time_seconds=injection_start_time_seconds, relaxivity=relaxivity, time_interval_seconds=time_interval_seconds, hematocrit=hematocrit, T1_blood=0, T1_map = [])

        save_numpy_2_nifti(output_numpy_4d, input_4d_nifti, output_filepath + '_Regenerated_Signal.nii.gz')

    return
예제 #19
0
def store_preloaded_hdf5_file(
        data_directories,
        output_filepath,
        modalities=['FLAIR_pp.nii.gz', 'T1post_pp.nii.gz'],
        label='full_edemamask_pp.nii.gz',
        verbose=True,
        levels=[4, 8, 16, 32, 64, 128],
        boundary_padding=10,
        max_dimension=64,
        samples_per_patient=100,
        preload_levels=False,
        wholevolume=False):

    patient_vols = []
    for directory in data_directories:
        patients = glob.glob(os.path.join(directory, '*/'))
        for patient in patients:
            single_patient_vols = []
            for modality in modalities + [label]:
                if modality is None:
                    continue
                single_patient_vols += [
                    glob.glob(os.path.join(patient, modality))[0]
                ]
            patient_vols += [single_patient_vols]

    if wholevolume:
        num_cases = len(patient_vols)
    else:
        num_cases = len(modalities) * len(patient_vols)

    hdf5_file = tables.open_file(output_filepath, mode='w')
    filters = tables.Filters(complevel=5, complib='blosc')
    hdf5_file.create_earray(hdf5_file.root,
                            'imagenames',
                            tables.StringAtom(256),
                            shape=(0, 1),
                            filters=filters,
                            expectedrows=num_cases)

    # If we want to pre-store different levels...
    if preload_levels:
        for dimension in levels:
            data_shape = (0, dimension + boundary_padding,
                          dimension + boundary_padding,
                          dimension + boundary_padding, 2)
            hdf5_file.create_earray(hdf5_file.root,
                                    'data_' + str(dimension),
                                    tables.Float32Atom(),
                                    shape=data_shape,
                                    filters=filters,
                                    expectedrows=num_cases)
    else:
        # If we don't.
        if wholevolume:
            data_shape = (0, 200, 200, 200, len(modalities))
        else:
            data_shape = (0, max_dimension + boundary_padding,
                          max_dimension + boundary_padding,
                          max_dimension + boundary_padding, len(modalities))
        print data_shape
        hdf5_file.create_earray(hdf5_file.root,
                                'data',
                                tables.Float32Atom(),
                                shape=data_shape,
                                filters=filters,
                                expectedrows=num_cases)

    for p_idx, single_patient_vols in enumerate(patient_vols):

        hdf5_file.root.imagenames.append(
            np.array(os.path.basename(os.path.dirname(
                single_patient_vols[0])))[np.newaxis][np.newaxis])
        print os.path.basename(os.path.dirname(single_patient_vols[0]))

        if label is not None:
            # Find tumor label center of mass
            label = single_patient_vols[-1]
            label_numpy = convert_input_2_numpy(label)
            label_center = [int(x) for x in center_of_mass(label_numpy)]

            # Load volumes,
            volumes = np.stack([
                convert_input_2_numpy(vol) for vol in single_patient_vols[:-1]
            ],
                               axis=3)

            # pad if necessary, using black magic
            pad_dims = []
            radius = (max_dimension + boundary_padding) / 2
            for idx, dim in enumerate(volumes.shape[:-1]):
                padding = (-1 * min(0, label_center[idx] - radius),
                           -1 * min(0, dim - (label_center[idx] + radius)))
                pad_dims += [padding]
            pad_dims += [(0, 0)]
            print pad_dims
            volumes = np.pad(volumes, pad_dims, mode='constant')

            # and subsample, with more black magic ;)
            print label_center
            label_center = [
                x + pad_dims[i][0] for i, x in enumerate(label_center)
            ]
            print label_center
            print volumes.shape
            patch = volumes[label_center[0] - radius:label_center[0] + radius,
                            label_center[1] - radius:label_center[1] + radius,
                            label_center[2] - radius:label_center[2] +
                            radius, :]
            print patch.shape

            # Add to HDF5
            getattr(hdf5_file.root, 'data').append(patch[np.newaxis])

            save_numpy_2_nifti(
                patch[..., 1], single_patient_vols[0],
                os.path.join(os.path.dirname(single_patient_vols[0]),
                             'gan_patch.nii.gz'))

        elif wholevolume:

            # Load volumes,
            volumes = np.stack(
                [convert_input_2_numpy(vol) for vol in single_patient_vols],
                axis=3)

            # Crop volumes
            volumes = crop2(volumes)

            large = False

            # Skip strangely processed volumes
            for dim in volumes.shape:
                if dim > 200:
                    large = True

            if large:
                continue

            same_size_volume = np.zeros((200, 200, 200, len(modalities)))
            same_size_volume[0:volumes.shape[0], 0:volumes.shape[1],
                             0:volumes.shape[2], :] = volumes

            # Add to HDF5
            getattr(hdf5_file.root,
                    'data').append(same_size_volume[np.newaxis])

        else:

            # Generic MRI patching goes on here..

            continue

            if verbose:
                print 'Processed...', os.path.basename(
                    os.path.dirname(single_patient_vols[0])), 'idx', p_idx

        # except KeyboardInterrupt:
        #     raise
        # except:
        #     print 'ERROR converting', filepath, 'at dimension', dimension

    hdf5_file.close()

    return
예제 #20
0
def imsave(images, size, path):
    return save_numpy_2_nifti(merge(images, size), np.eye(4), path)
예제 #21
0
def create_gradient_phantom(output_prefix,
                            output_shape=(20, 20),
                            ktrans_range=[.01, .5],
                            ve_range=[.01, .8],
                            scan_time_seconds=120,
                            time_interval_seconds=1,
                            injection_start_time_seconds=40,
                            flip_angle_degrees=15,
                            TR=6.8,
                            relaxivity=.0045,
                            hematocrit=.45,
                            T1_tissue=1350,
                            aif='population'):
    """ TO-DO: Fix the ktrans variation so that it correctly ends in 0.35, instead of whatever
        it currently ends in. Also parameterize and generalize everything for more interesting
        phantoms.
    """

    # Initialize variables
    timepoints = int(scan_time_seconds / time_interval_seconds)
    time_series_minutes = np.arange(0,
                                    timepoints) / (60 / time_interval_seconds)
    time_interval_minutes = time_interval_seconds / 60
    print time_interval_minutes

    # Create empty phantom, labels, and outputs
    output_phantom_concentration = np.zeros(
        output_shape + (2, int(scan_time_seconds / time_interval_seconds)),
        dtype=float)
    output_phantom_signal = np.zeros_like(output_phantom_concentration)

    output_AIF_mask = np.zeros(output_shape + (2, ))
    output_region_mask = np.zeros_like(output_AIF_mask)
    output_AIF_mask[:, :, 1] = 1
    output_region_mask[:, :, 0] = 1
    output_ktrans = np.zeros_like(output_AIF_mask)
    output_ve = np.zeros_like(output_AIF_mask)

    # Create Parker AIF
    AIF = np.array(
        parker_model_AIF(scan_time_seconds,
                         injection_start_time_seconds,
                         time_interval_seconds,
                         timepoints=int(scan_time_seconds /
                                        time_interval_seconds)))
    AIF = AIF[np.newaxis, np.newaxis, np.newaxis, :]
    output_phantom_concentration[:, :, 1, :] = AIF

    # Fill in answers
    for ve_idx, ve in enumerate(
            np.linspace(ktrans_range[0], ktrans_range[1], output_shape[0])):
        for ktrans_idx, ktrans in enumerate(
                np.linspace(ve_range[0], ve_range[1], output_shape[1])):
            output_ktrans[ve_idx, ktrans_idx, 0] = float(ktrans)
            output_ve[ve_idx, ktrans_idx, 0] = float(ve)

            # print np.squeeze(AIF).shape
            print ve_idx, ktrans_idx
            print ktrans, ve
            # conc = estimate_concentration([ktrans,ve], np.squeeze(AIF)[:], time_series_minutes)
            # print len(conc)
            # print conc[-1]
            # print len(conc[-1])
            output_phantom_concentration[ve_idx, ktrans_idx,
                                         0, :] = estimate_concentration(
                                             [ktrans, ve], np.squeeze(AIF),
                                             time_interval_minutes)

    save_numpy_2_nifti(output_phantom_concentration, None,
                       output_prefix + '_concentrations.nii.gz')
    save_numpy_2_nifti(output_ktrans, None, output_prefix + '_ktrans.nii.gz')
    save_numpy_2_nifti(output_ve, None, output_prefix + '_ve.nii.gz')

    output_phantom_signal = revert_concentration_to_intensity(
        data_numpy=output_phantom_concentration,
        reference_data_numpy=None,
        T1_tissue=T1_tissue,
        TR=TR,
        flip_angle_degrees=flip_angle_degrees,
        injection_start_time_seconds=injection_start_time_seconds,
        relaxivity=relaxivity,
        time_interval_seconds=time_interval_seconds,
        hematocrit=hematocrit,
        T1_blood=0,
        T1_map=[],
        static_baseline=140)

    save_numpy_2_nifti(output_phantom_signal, None,
                       output_prefix + '_phantom.nii.gz')
예제 #22
0
def dcm_2_numpy(input_folder, verbose=False):
    """ Uses pydicom to stack an alphabetical list of DICOM files. TODO: Make it
        take slice_order into account.
    """

    if verbose:
        print('Searching for dicom files...')

    found_files = grab_files_recursive(input_folder)

    if verbose:
        print('Found', len(found_files), 'in directory. \n')
        print('Checking DICOM compatability...')

    dicom_files = []
    for file in found_files:
        try:
            temp_dicom = pydicom.read_file(file)
            dicom_files += [[
                file, temp_dicom.data_element('SeriesInstanceUID').value
            ]]
        except:
            continue

    if verbose:
        print('Found', len(dicom_files), 'DICOM files in directory. \n')
        print('Counting volumes..')

    unique_dicoms = defaultdict(list)
    for dicom_file in dicom_files:
        UID = dicom_file[1]
        unique_dicoms[UID] += [dicom_file[0]]

    if verbose:
        print('Found', len(list(unique_dicoms.keys())), 'unique volumes \n')
        print('Saving out files from these volumes.')

    output_dict = {}
    output_filenames = []
    for UID in list(unique_dicoms.keys()):

        try:
            # Grab DICOMs for a certain Instance
            current_files = unique_dicoms[UID]
            current_dicoms = [
                get_uncompressed_dicom(dcm) for dcm in unique_dicoms[UID]
            ]
            # print current_files

            # Sort DICOMs by Instance.
            dicom_instances = [
                x.data_element('InstanceNumber').value for x in current_dicoms
            ]
            current_dicoms = [
                x for _, x in sorted(zip(dicom_instances, current_dicoms))
            ]
            current_files = [
                x for _, x in sorted(zip(dicom_instances, current_files))
            ]
            first_dicom, last_dicom = current_dicoms[0], current_dicoms[-1]

            print(first_dicom.file_meta)
            print(first_dicom.file_meta.TransferSyntaxUID)

            # Create a filename for the DICOM
            volume_label = '_'.join([
                first_dicom.data_element(tag).value for tag in naming_tags
            ]).replace(" ", "")
            volume_label = prefix + sanitize_filename(
                volume_label) + suffix + '.nii.gz'

            if verbose:
                print('Saving...', volume_label)

        except:
            print(
                'Could not read DICOM volume SeriesDescription. Skipping UID...',
                str(UID))
            continue

        try:
            # Extract patient position information for affine creation.
            output_affine = np.eye(4)
            image_position_patient = np.array(
                first_dicom.data_element('ImagePositionPatient').value).astype(
                    float)
            image_orientation_patient = np.array(
                first_dicom.data_element(
                    'ImageOrientationPatient').value).astype(float)
            last_image_position_patient = np.array(
                last_dicom.data_element('ImagePositionPatient').value).astype(
                    float)
            pixel_spacing_patient = np.array(
                first_dicom.data_element('PixelSpacing').value).astype(float)

            # Create DICOM Space affine (don't fully understand, TODO)
            output_affine[
                0:3,
                0] = pixel_spacing_patient[0] * image_orientation_patient[0:3]
            output_affine[
                0:3,
                1] = pixel_spacing_patient[1] * image_orientation_patient[3:6]
            output_affine[
                0:3,
                2] = (image_position_patient -
                      last_image_position_patient) / (1 - len(current_dicoms))
            output_affine[0:3, 3] = image_position_patient

            # Transformations from DICOM to Nifti Space (don't fully understand, TOO)
            cr_flip = np.eye(4)
            cr_flip[0:2, 0:2] = [[0, 1], [1, 0]]
            neg_flip = np.eye(4)
            neg_flip[0:2, 0:2] = [[-1, 0], [0, -1]]
            output_affine = np.matmul(neg_flip,
                                      np.matmul(output_affine, cr_flip))

            # Create numpy array data...
            output_shape = get_dicom_pixel_array(current_dicoms[0],
                                                 current_files[0]).shape
            output_numpy = []
            for i in range(len(current_dicoms)):
                try:
                    output_numpy += [
                        get_dicom_pixel_array(current_dicoms[i],
                                              current_files[i])
                    ]
                except:
                    print('Warning, error at slice', i)
            output_numpy = np.stack(output_numpy, -1)

            # If preferred, harden to identity matrix space (LPS, maybe?)
            # Also unsure of the dynamic here, but they work.
            if harden_orientation is not None:

                cx, cy, cz = np.argmax(np.abs(output_affine[0:3, 0:3]), axis=0)

                output_numpy = np.transpose(output_numpy, (cx, cy, cz))

                harden_matrix = np.eye(4)
                for dim, i in enumerate([cx, cy, cz]):
                    harden_matrix[i, i] = 0
                    harden_matrix[dim, i] = 1
                output_affine = np.matmul(output_affine, harden_matrix)

                flip_matrix = np.eye(4)
                for i in range(3):
                    if output_affine[i, i] < 0:
                        flip_matrix[i, i] = -1
                        output_numpy = np.flip(output_numpy, i)

                output_affine = np.matmul(output_affine, flip_matrix)

            # Create output folder according to tags.
            specific_folder = output_folder
            for tag in folder_tags:
                if specific_folder == output_folder or folder_mode == 'recursive':
                    specific_folder = os.path.join(
                        specific_folder,
                        sanitize_filename(first_dicom.data_element(tag).value))
                elif folder_mode == 'combine':
                    specific_folder = specific_folder + '_' + sanitize_filename(
                        first_dicom.data_element(tag).value)
            if not os.path.exists(specific_folder):
                os.makedirs(specific_folder)

            # Save out file.
            output_filename = os.path.join(specific_folder, volume_label)
            if os.path.exists(
                    output_filename) and output_filename in output_filenames:
                output_filename = replace_suffix(output_filename, '', '_copy')
            save_numpy_2_nifti(output_numpy, output_affine, output_filename)
            output_filenames += [output_filename]

        except:
            print('Could not read DICOM at SeriesDescription...', volume_label)

    return output_filenames

    return output_dict