Beispiel #1
0
def test_extract_patches_square():
    # test same patch size for all dimensions
    face = downsampled_face
    i_h, i_w = face.shape
    p = 8
    expected_n_patches = ((i_h - p + 1), (i_w - p + 1))
    patches = extract_patches(face, patch_shape=p)
    assert_true(patches.shape == (expected_n_patches[0], expected_n_patches[1],
                                  p, p))
def image_conv_patches(image):
    # Convolution patches
    patches = extract_patches(image, 16)
    w, h = patches.shape[0], patches.shape[1]
    patches = patches.reshape((w*h,16 * 16))
    # GCN
    patches = patches - patches.mean(axis=1)[:, np.newaxis]
    patches /= np.sqrt(patches.var(axis=1) + 10)[:, np.newaxis]
    return (w, h, patches)
Beispiel #3
0
	def __init__(self, n_in, n_features, n_patches, 
					sampling_method = None,
					*args, **kwargs):
		"""
		n_in: number of features in original space
		n_features: total number of features 
		n_patches: how many random local patches to use in the original feature space
		so in each patch, there are n_features/n_patches clusters constructed
		sampling_method: {None, '1d', '2d'}: the way of finding random patchs 
			None: no continousness constraint 
			'1d': features sampled in the same patch are continous in 1D 
			'2d': features sampled in the same patch are continous in 2D (e.g. for image), 
				  assume image dimension is sqrt(nin) x sqrt(nin)

		TODO: to fix the conceptual bug that number_of_orignal_features_in_each_patch
		= number_clusters_in_each_patch
		"""
		self.n_in = n_in
		self.n_features = n_features
		self.n_patches = n_patches
		self.sampling_method = sampling_method
		if self.sampling_method is None: #pure randomness
			self.feat_indices_ = np.array_split(np.random.randint(low = 0, 
														high = n_in, 
														size = n_features),
												n_patches)
		elif self.sampling_method is '1d':
			nfeat_perpatch = n_features / n_patches
			nfeat_last = n_features - nfeat_perpatch * (n_patches-1)
			feat_starts = np.random.randint(low=0, high=n_in-nfeat_perpatch, size=n_patches)
			self.feat_indices_ = [np.arange(start, start+nfeat_perpatch) 
					for start in feat_starts[:-1]]
			self.feat_indices_.append(np.arange(feat_starts[-1], feat_starts[-1]+nfeat_last))
		elif self.sampling_method is '2d':
			nrows = int(math.sqrt(n_in))
			ncols = n_in / nrows
			grid = np.arange(nrows*ncols).reshape((nrows, ncols)) 
			nfeat_perpatch = n_features / n_patches
			nfeat_perrow = int(math.sqrt(nfeat_perpatch))
			nfeat_percol = nfeat_perpatch / nfeat_perrow
			nfeat_last = n_features - nfeat_perrow*nfeat_percol*n_patches

			self.feat_indices_ = (image.extract_patches(grid, (nfeat_perrow, nfeat_percol))
										.reshape(-1, nfeat_perrow*nfeat_percol))
			self.feat_indices_ = self.feat_indices_[np.random.choice(len(self.feat_indices_), n_patches)]
			if nfeat_last > 0:
				self.feat_indices_.append(np.random.randint(low = 0, 
														high = n_in, 
														size = nfeat_last))
		else:
			raise ValueError('param sampling_method=%s is not understandable' % self.sampling_method)
		self.models_ = [cluster.MiniBatchKMeans(len(findex), *args, **kwargs) 
								for findex in self.feat_indices_]
def featurize_image(shrink, alpha, dictionary, image):
    w, h, patches = image_conv_patches(image)
    nclusters = dictionary.shape[0]
    patches = dict_threshold(alpha, dictionary, patches)
    # Now, perform pooling (with averaging)
    patches = patches.reshape((w, h, -1))
    wr = int(w / shrink)
    hr = int(h / shrink)
    step = min(wr-1,hr-1)
    patches = extract_patches(patches,patch_shape=(wr,hr,nclusters),extraction_step=step)
    nw, nh = patches.shape[0], patches.shape[1]
    patches = patches.reshape((nw,nh,wr,hr,nclusters))
    patches = patches.mean(axis=(2,3))
    patches = patches.reshape((nw*nh*nclusters))
    return patches
Beispiel #5
0
def test_extract_patches_strided():

    image_shapes_1D = [(10,), (10,), (11,), (10,)]
    patch_sizes_1D = [(1,), (2,), (3,), (8,)]
    patch_steps_1D = [(1,), (1,), (4,), (2,)]

    expected_views_1D = [(10,), (9,), (3,), (2,)]
    last_patch_1D = [(10,), (8,), (8,), (2,)]

    image_shapes_2D = [(10, 20), (10, 20), (10, 20), (11, 20)]
    patch_sizes_2D = [(2, 2), (10, 10), (10, 11), (6, 6)]
    patch_steps_2D = [(5, 5), (3, 10), (3, 4), (4, 2)]

    expected_views_2D = [(2, 4), (1, 2), (1, 3), (2, 8)]
    last_patch_2D = [(5, 15), (0, 10), (0, 8), (4, 14)]

    image_shapes_3D = [(5, 4, 3), (3, 3, 3), (7, 8, 9), (7, 8, 9)]
    patch_sizes_3D = [(2, 2, 3), (2, 2, 2), (1, 7, 3), (1, 3, 3)]
    patch_steps_3D = [(1, 2, 10), (1, 1, 1), (2, 1, 3), (3, 3, 4)]

    expected_views_3D = [(4, 2, 1), (2, 2, 2), (4, 2, 3), (3, 2, 2)]
    last_patch_3D = [(3, 2, 0), (1, 1, 1), (6, 1, 6), (6, 3, 4)]

    image_shapes = image_shapes_1D + image_shapes_2D + image_shapes_3D
    patch_sizes = patch_sizes_1D + patch_sizes_2D + patch_sizes_3D
    patch_steps = patch_steps_1D + patch_steps_2D + patch_steps_3D
    expected_views = expected_views_1D + expected_views_2D + expected_views_3D
    last_patches = last_patch_1D + last_patch_2D + last_patch_3D

    for (image_shape, patch_size, patch_step, expected_view,
         last_patch) in zip(image_shapes, patch_sizes, patch_steps,
                            expected_views, last_patches):
        image = np.arange(np.prod(image_shape)).reshape(image_shape)
        patches = extract_patches(image, patch_shape=patch_size,
                                  extraction_step=patch_step)

        ndim = len(image_shape)

        assert_true(patches.shape[:ndim] == expected_view)
        last_patch_slices = [slice(i, i + j, None) for i, j in
                             zip(last_patch, patch_size)]
        assert_true((patches[[slice(-1, None, None)] * ndim] ==
                    image[last_patch_slices].squeeze()).all())
def extract_patches_2d(image, patch_size, patch_stride=1,
                       max_patches=None, random_state=None, reshape=True):
    from sklearn.utils.validation import check_array, check_random_state
    from sklearn.feature_extraction.image import extract_patches, _compute_n_patches

    i_h, i_w = image.shape[:2]
    p_h, p_w = patch_size
    if p_h > i_h:
        raise ValueError("Height of the patch should be less than the height"
                         " of the image.")
    if p_w > i_w:
        raise ValueError("Width of the patch should be less than the width"
                         " of the image.")

    image = check_array(image, allow_nd=True)
    image = image.reshape((i_h, i_w, -1))
    n_colors = image.shape[-1]
    stride = patch_stride
    if type(patch_stride) is not int:
        assert len(stride) == 2
        stride = np.ones(len(image.shape), dtype=int)
        stride[:2] = patch_stride
    extracted_patches = extract_patches(image,
                                        patch_shape=(p_h, p_w, n_colors),
                                        extraction_step=stride)
    if reshape:
        extracted_patches = extracted_patches.reshape(extracted_patches.shape[0], extracted_patches.shape[1], -1)

    n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches)
    if max_patches:
        rng = check_random_state(random_state)
        i_s = rng.randint(extracted_patches.shape[0], size=n_patches)
        j_s = rng.randint(extracted_patches.shape[1], size=n_patches)
        patches = extracted_patches[i_s, j_s, :]
    else:
        patches = extracted_patches

    return patches
Beispiel #7
0
def array_to_patches(arr, patch_shape=(3,3,3), extraction_step=1, normalization=False):
  #Make use of skleanr function extract_patches
  #https://github.com/scikit-learn/scikit-learn/blob/51a765a/sklearn/feature_extraction/image.py
  """Extracts patches of any n-dimensional array in place using strides.
  Given an n-dimensional array it will return a 2n-dimensional array with
  the first n dimensions indexing patch position and the last n indexing
  the patch content. 
  Parameters
  ----------
  arr : 3darray
      3-dimensional array of which patches are to be extracted
  patch_shape : integer or tuple of length arr.ndim
      Indicates the shape of the patches to be extracted. If an
      integer is given, the shape will be a hypercube of
      sidelength given by its value.
  extraction_step : integer or tuple of length arr.ndim
      Indicates step size at which extraction shall be performed.
      If integer is given, then the step is uniform in all dimensions.
  Returns
  -------
  patches : strided ndarray
      2n-dimensional array indexing patches on first n dimensions and
      containing patches on the last n dimensions. These dimensions
      are fake, but this way no data is copied. A simple reshape invokes
      a copying operation to obtain a list of patches:
      result.reshape([-1] + list(patch_shape))
  """
  
  patches = extract_patches(arr, patch_shape, extraction_step)
  patches = patches.reshape(-1, patch_shape[0],patch_shape[1],patch_shape[2])
  patches = patches.reshape(patches.shape[0], -1) 
  if normalization==True:
    patches = patches.astype(np.float32)
    patches -= np.mean(patches, axis=0)
    patches /= np.std(patches, axis=0)
  #print('%.2d patches have been extracted' % patches.shape[0])  
  return patches
def classify_test_data_3d(activate2, W_list, b_list, path, patch_size_x, patch_size_y, patch_size_z, prefix, recon_flag, writer, itk_py_converter):
    
    Flair = []
    T1 = []
    T2 = []
    T_1c = []
    Truth = []
    Folder = []
    Recon = []
    Subdir_array = []
#    patch_size = 11

    for subdir, dirs, files in os.walk(path):
#        if(len(Flair) is 1):
#            break
        for file1 in files:
            if file1[-3:]=='mha' and ('Flair' in file1):
                Flair.append(file1)
                Folder.append(subdir+'/')
                Subdir_array.append(subdir[-5:])
            elif file1[-3:]=='mha' and ('t1_z' in file1 or 'T1' in file1):
                T1.append(file1)
            elif file1[-3:]=='mha' and ('t2' in file1 or 'T2' in file1):
                T2.append(file1)
            elif file1[-3:]=='mha' and ('t1c_z' in file1 or 'T_1c' in file1):
                T_1c.append(file1)
            elif file1[-3:]=='mha' and 'OT' in file1:
                Truth.append(file1)            
            elif file1[-3:]=='mha' and 'Recon' in file1:
                Recon.append(file1)
    number_of_images = len(Flair)
    
    for image_iterator in range(number_of_images):
        print 'Iteration : ',image_iterator+1
        print 'Folder : ', Folder[image_iterator]
        
        print '... predicting'

        Flair_image = mha.new(Folder[image_iterator]+Flair[image_iterator])
        T1_image = mha.new(Folder[image_iterator]+T1[image_iterator])
        T2_image = mha.new(Folder[image_iterator]+T2[image_iterator])
        T_1c_image = mha.new(Folder[image_iterator]+T_1c[image_iterator])
        if recon_flag is True:
            Recon_image = mha.new(Folder[image_iterator]+Recon[image_iterator])
        Flair_image = Flair_image.data
        T1_image = T1_image.data
        T2_image = T2_image.data
        T_1c_image = T_1c_image.data
        if recon_flag is True:
            Recon_image = Recon_image.data

        xdim, ydim, zdim = Flair_image.shape
        prediction_image = []
        Flair_patch = image.extract_patches(Flair_image, [patch_size_x,patch_size_y,patch_size_z])
        T1_patch = image.extract_patches(T1_image, [patch_size_x,patch_size_y,patch_size_z])
        T2_patch = image.extract_patches(T2_image, [patch_size_x,patch_size_y,patch_size_z])
        T_1c_patch = image.extract_patches(T_1c_image, [patch_size_x,patch_size_y,patch_size_z])
        if recon_flag is True:
            Recon_patch = image.extract_patches(Recon_image, [patch_size_x,patch_size_y,patch_size_z])
        
        print 'Raw patches extracted'
        #print Flair_patch.shape
        #print T1_patch.shape
        #print T2_patch.shape
        #print T_1c_patch.shape
        
        for j in range(Flair_patch.shape[2]):
            #print 'Slice : ',j+1
            F_slice = Flair_patch[:,:,j,:,:,:]
            T1_slice = T1_patch[:,:,j,:,:,:]
            T2_slice = T2_patch[:,:,j,:,:,:]
            T_1c_slice = T_1c_patch[:,:,j,:,:,:]
            if recon_flag is True:
                Recon_slice = Recon_patch[:,:,j,:,:,:]
            
            F_slice = F_slice.reshape(F_slice.shape[0]*F_slice.shape[1], patch_size_x*patch_size_y*patch_size_z)
            T1_slice = T1_slice.reshape(T1_slice.shape[0]*T1_slice.shape[1], patch_size_x*patch_size_y*patch_size_z)
            T2_slice = T2_slice.reshape(T2_slice.shape[0]*T2_slice.shape[1], patch_size_x*patch_size_y*patch_size_z)
            T_1c_slice = T_1c_slice.reshape(T_1c_slice.shape[0]*T_1c_slice.shape[1], patch_size_x*patch_size_y*patch_size_z)
            if recon_flag is True:
                Recon_slice = Recon_slice.reshape(Recon_slice.shape[0]*Recon_slice.shape[1], patch_size_x*patch_size_y*patch_size_z)
            
            if recon_flag == True:
                temp_patch = np.concatenate([F_slice,T1_slice,T2_slice,T_1c_slice,Recon_slice],axis=1)
            else:
                temp_patch = np.concatenate([F_slice,T1_slice,T2_slice,T_1c_slice],axis=1)
            #print 'Size of temp_patch : ',temp_patch.shape
            prediction_slice = predictOutput(temp_patch, activate2, W_list, b_list)
            prediction_image.append(prediction_slice)
            
        prediction_image = np.array(prediction_image)
        prediction_image = np.transpose(prediction_image)
        prediction_image = prediction_image.reshape([xdim-patch_size_x+1, ydim-patch_size_y+1, zdim-patch_size_z+1])
        output_image = np.zeros([xdim,ydim,zdim])
        output_image[1+((patch_size_x-1)/2):xdim-((patch_size_x-1)/2)+1,1+((patch_size_y-1)/2):ydim-((patch_size_y-1)/2)+1,1+((patch_size_z-1)/2):zdim-((patch_size_z-1)/2)+1] = prediction_image      
#        np.save(Folder[image_iterator]+Subdir_array[image_iterator]+'_'+prefix+'_output_image.npy',output_image)#TODO: save it in meaningful name in corresponding folder
        
        a=np.transpose(output_image)
#        for j in xrange(a.shape[0]):
#            a[j,:,:] = np.transpose(a[j,:,:])        
        #a=a.reshape(155,240,240)
        print 'writing mha...'
        
        output_image = itk_py_converter.GetImageFromArray(a.tolist())
        writer.SetFileName(Folder[image_iterator]+Subdir_array[image_iterator]+'_'+prefix+'_.mha')
        writer.SetInput(output_image)
        writer.Update()
        print '########success########'
Beispiel #9
0
def extract_multiscale_patches_from_mri(data_index_list, data_dir, is_test=False, is_oversampling=True, \
        row_size=16, channel_size=8, num_patch=100, proportion=0.1, \
        addClinical=False, patch_r_stride=4, patch_c_stride=4, \
        is_fixed_size=True, n_time_point=6, fixed_width=256, fixed_depth=32):
    """
	is_oversampling : indicator for oversampling patches near the lesion, this deicides whether 
	'extract_train_core_multscale' is used or not
	is_test : indicator for extract y_label or not. Extract y_label when 'is_test' is false.
	"""

    if is_test is not True:
        img_list, label_list=load_mri_from_directory(data_index_list, fixed_width, fixed_depth, \
         is_test=is_test, data_dir=data_dir, is_fixed_size=is_fixed_size)
    else:
        img_list=load_mri_from_directory(data_index_list, fixed_width, fixed_depth, \
         is_test=is_test, data_dir=data_dir, is_fixed_size=is_fixed_size)

    n = len(img_list)
    if is_test is not True:
        """
		Extract patches for training and validation time
		"""

        args = []
        if is_oversampling is True:
            for k in xrange(n):
                args.append((extract_train_core_multiscale, \
                 (img_list[k], label_list[k], row_size, channel_size, num_patch, n_time_point, proportion)))
        else:
            for k in xrange(n):
                args.append((extract_val_core_multiscale, \
                 (img_list[k], label_list[k], row_size, channel_size, num_patch, n_time_point, proportion)))

        pool = Pool(processes=MULTI)
        batchsize = MULTI
        batches = n // batchsize + (n % batchsize != 0)

        logging.info('pooling : begin')
        result = []
        for k in xrange(batches):
            logging.info("batch {:>2} / {}".format(k + 1, batches))
            result.extend(
                pool.map(process, args[k * batchsize:(k + 1) * batchsize]))
        pool.close()
        logging.info('pooling : done')

        X_main = []
        X_aug = []
        Y = []
        lesion_dicator_list = []

        logging.info('number of extracted cases are :{}'.format(len(result)))
        for j in xrange(len(result)):
            X_main = X_main + result[j][0]
            X_aug = X_aug + result[j][1]
            Y = Y + result[j][2]
            lesion_dicator_list = lesion_dicator_list + result[j][3]

        X_main, X_aug, Y, lesion_dicator_list = np.array(X_main).astype('float32'), np.array(X_aug).astype('float32'), \
                np.array(Y), np.array(lesion_dicator_list)

        if addClinical is True:
            pass
        else:
            return X_main, X_aug, Y, lesion_dicator_list

    else:
        """
		Extract patches for test time
		"""
        X_main = []
        X_aug = []
        img_shape_cache = []
        clinical_list = []
        for k in xrange(n):
            x_main = []
            x_aug = []
            for time_point in xrange(n_time_point):
                #Load image
                img = img_list[k][time_point]
                img_aug = np.ones(img.shape + np.array(
                    [row_size, row_size, channel_size])) * img[0, 0, 0]
                img_aug[(row_size/2):(img.shape[0] + row_size/2), \
                 (row_size/2):(img.shape[1] + row_size/2), \
                 (channel_size/2):(img.shape[2] + channel_size/2)] = img
                img_aug = transform_shrink(img_aug)

                #Transpose
                img = np.transpose(np.array(img), (2, 0, 1))
                img_aug = np.transpose(np.array(img_aug), (2, 0, 1))

                #Make patches
                patches = extract_patches(img, (channel_size, row_size, row_size), \
                 extraction_step=(patch_c_stride, patch_r_stride, patch_r_stride))
                x_main.append(patches)
                patches_aug = extract_patches(img_aug, (channel_size, row_size, row_size), \
                 extraction_step=(patch_c_stride/2, patch_r_stride/2, patch_r_stride/2))
                x_aug.append(patches_aug)

            N_patches = np.prod(patches.shape) // (channel_size *
                                                   (row_size**2))
            X_main.append(np.array(x_main))
            X_aug.append(np.array(x_aug))
            img_shape_cache.append(img.shape)

        if addClinical is True:
            pass
        else:
            return X_main, X_aug, img_shape_cache
Beispiel #10
0
        print 'No truth file found'
        print ''
        continue    
    print(truth_data.shape)

    image_data = imread(Image[i])
    print(image_data.shape)
    image_data = image_data[:,:,2]
    print(image_data.shape)
    
    patch_size_x = 31
    patch_size_y = 31

    pixel_offset = 1

    Image_patch = image.extract_patches(image_data,[patch_size_x,patch_size_y],extraction_step=pixel_offset)
    Image_patch = Image_patch.reshape(Image_patch.shape[0]*Image_patch.shape[1], patch_size_x, patch_size_y)
    print(Image_patch.shape)

    T_patch = image.extract_patches(truth_data,[patch_size_x,patch_size_y],extraction_step=pixel_offset)
    T_patch = T_patch.reshape(T_patch.shape[0]*T_patch.shape[1], patch_size_x, patch_size_y)
    T_patch = T_patch[:,(patch_size_x-1)/2,(patch_size_y-1)/2]
    print(np.unique(T_patch))
    truth_patches = np.zeros(T_patch.shape)
    truth_patches[T_patch>200] = 1
    truth_patches = truth_patches.astype(int)
    print(np.unique(truth_patches))
    print(truth_patches.shape)
    # print T_patch.shape

    # grass_index = np.where(truth_patches == 0)
Beispiel #11
0
    def fit(self, modality, ground_truth=None, cat=None):
        """Compute the images images.

        Parameters
        ----------
        modality : object of type TemporalModality
            The modality object of interest.

        ground-truth : object of type GTModality or None
            The ground-truth of GTModality. If None, the whole data will be
            considered.

        cat : str or None
            String corresponding at the ground-truth of interest. Cannot be
            None if ground-truth is not None.

        Return
        ------
        self : object
             Return self.

        """
        super(DCTExtraction, self).fit(modality=modality,
                                       ground_truth=ground_truth,
                                       cat=cat)

        # Extract the set of patches from the modality data
        patches = extract_patches(modality.data_, patch_shape=self.patch_size)

        # Allocate the DCT maps, one for each feature that
        # will be computed
        nb_features = np.prod(self.patch_size)
        self.data_ = np.zeros((modality.data_.shape[0],
                               modality.data_.shape[1],
                               modality.data_.shape[2],
                               nb_features))

        # # Extract DCT feature for each patch
        # # Define the shift to apply
        if isinstance(self.patch_size, tuple):
            y_shift = int(np.ceil((self.patch_size[0] - 1) / 2.))
            x_shift = int(np.ceil((self.patch_size[1] - 1) / 2.))
            z_shift = int(np.ceil((self.patch_size[2] - 1) / 2.))
        elif isinstance(self.patch_size, int):
            y_shift = int(np.ceil((self.patch_size - 1) / 2.))
            x_shift = int(np.ceil((self.patch_size - 1) / 2.))
            z_shift = int(np.ceil((self.patch_size - 1) / 2.))

        # Create the list of indices to process
        yy, xx, zz = np.meshgrid(range(patches.shape[0]),
                                 range(patches.shape[1]),
                                 range(patches.shape[2]))
        # Linearize for fast processing
        yy = yy.reshape(-1)
        xx = xx.reshape(-1)
        zz = zz.reshape(-1)

        # Go for the parallel loop
        dct_features = Parallel(n_jobs=-1)(delayed(
            _compute_dct_features)(patches[y, x, z, :])
                                                for y, x, z in zip(yy, xx, zz))

        # Convert to numpy array
        dct_features = np.array(dct_features)
        # Reshape the feature matrix
        dct_features = dct_features.reshape((patches.shape[0],
                                             patches.shape[1],
                                             patches.shape[2],
                                             nb_features))

        # Copy the feature into the object
        self.data_[y_shift : -y_shift,
                   x_shift : -x_shift,
                   z_shift : -z_shift] = dct_features

        return self
Beispiel #12
0
def B_Patch_Preprocess_recon_3D(patch_size_x=5,
                                patch_size_y=5,
                                patch_size_z=5,
                                prefix='SdA',
                                in_root='',
                                out_root='',
                                recon_flag=True):

    patch_pixels = patch_size_x * patch_size_y * patch_size_z

    pixel_offset_x = int(2 * patch_size_x * 0.7)
    pixel_offset_y = int(2 * patch_size_y * 0.7)
    pixel_offset_z = 1

    padding = patch_size_x
    #threshold = patch_pixels*0.3
    #patches = np.zeros(patch_pixels*4)
    if recon_flag is True:
        recon_num = 5
    else:
        recon_num = 4
    patches = np.zeros(patch_size_x * patch_size_y * patch_size_z * recon_num)
    ground_truth = np.zeros(1)

    #paths to images
    path = in_root

    Flair = []
    T1 = []
    T2 = []
    T_1c = []
    Truth = []
    Recon = []
    Folder = []

    for subdir, dirs, files in os.walk(path):
        #        if len(Flair) is 1:
        #            break
        for file1 in files:

            if file1[-3:] == 'mha' and ('Flair' in file1):
                Flair.append(file1)
                Folder.append(subdir + '/')
            elif file1[-3:] == 'mha' and ('T1' in file1
                                          and 'T1c' not in file1):
                T1.append(file1)
            elif file1[-3:] == 'mha' and ('T2' in file1):
                T2.append(file1)
            elif file1[-3:] == 'mha' and ('T1c' in file1 or 'T_1c' in file1):
                T_1c.append(file1)
            elif file1[-3:] == 'mha' and 'OT' in file1:
                Truth.append(file1)
            elif file1[-3:] == 'mha' and 'Recon' in file1:
                Recon.append(file1)

    number_of_images = len(Flair)
    print 'Number of Patients : ', number_of_images

    #
    #
    for image_iterator in range(number_of_images):
        print 'Image number : ', image_iterator + 1
        print 'Folder : ', Folder[image_iterator]

        Flair_image = new(Folder[image_iterator] + Flair[image_iterator])
        T1_image = new(Folder[image_iterator] + T1[image_iterator])
        T2_image = new(Folder[image_iterator] + T2[image_iterator])
        T_1c_image = new(Folder[image_iterator] + T_1c[image_iterator])
        #        print 'image created'
        print Folder[image_iterator] + Truth[image_iterator]
        try:
            Truth_image = new(Folder[image_iterator] + Truth[image_iterator])
        except:
            Truth_image = new2(Folder[image_iterator] + Truth[image_iterator])
#        print 'image created'

        if recon_flag is True:
            Recon_image = new(Folder[image_iterator] + Recon[image_iterator])

        Flair_image = Flair_image.data
        T1_image = T1_image.data
        T2_image = T2_image.data
        T_1c_image = T_1c_image.data
        if recon_flag is True:
            Recon_image = Recon_image.data
        Truth_image = Truth_image.data

        x_span, y_span, z_span = np.where(Truth_image != 0)
        x_start = np.min(x_span) - padding
        x_stop = np.max(x_span) + padding + 1
        y_start = np.min(y_span) - padding
        y_stop = np.max(y_span) + padding + 1
        z_start = np.min(z_span) - padding
        z_stop = np.max(z_span) + padding + 1

        Flair_patch = image.extract_patches(
            Flair_image[x_start:x_stop, y_start:y_stop, z_start:z_stop],
            [patch_size_x, patch_size_y, patch_size_z],
            (pixel_offset_x, pixel_offset_y, pixel_offset_z))
        T1_patch = image.extract_patches(
            T1_image[x_start:x_stop, y_start:y_stop, z_start:z_stop],
            [patch_size_x, patch_size_y, patch_size_z],
            (pixel_offset_x, pixel_offset_y, pixel_offset_z))
        T2_patch = image.extract_patches(
            T2_image[x_start:x_stop, y_start:y_stop, z_start:z_stop],
            [patch_size_x, patch_size_y, patch_size_z],
            (pixel_offset_x, pixel_offset_y, pixel_offset_z))
        T_1c_patch = image.extract_patches(
            T_1c_image[x_start:x_stop, y_start:y_stop, z_start:z_stop],
            [patch_size_x, patch_size_y, patch_size_z],
            (pixel_offset_x, pixel_offset_y, pixel_offset_z))
        if recon_flag is True:
            Recon_patch = image.extract_patches(
                Recon_image[x_start:x_stop, y_start:y_stop, z_start:z_stop],
                [patch_size_x, patch_size_y, patch_size_z],
                (pixel_offset_x, pixel_offset_y, pixel_offset_z))
        Truth_patch = image.extract_patches(
            Truth_image[x_start:x_stop, y_start:y_stop, z_start:z_stop],
            [patch_size_x, patch_size_y, patch_size_z],
            (pixel_offset_x, pixel_offset_y, pixel_offset_z))

        print 'Raw patches extracted'

        Flair_patch = Flair_patch.reshape(
            Flair_patch.shape[0] * Flair_patch.shape[1] * Flair_patch.shape[2],
            patch_size_x * patch_size_y * patch_size_z)
        T1_patch = T1_patch.reshape(
            T1_patch.shape[0] * T1_patch.shape[1] * T1_patch.shape[2],
            patch_size_x * patch_size_y * patch_size_z)
        T2_patch = T2_patch.reshape(
            T2_patch.shape[0] * T2_patch.shape[1] * T2_patch.shape[2],
            patch_size_x * patch_size_y * patch_size_z)
        T_1c_patch = T_1c_patch.reshape(
            T_1c_patch.shape[0] * T_1c_patch.shape[1] * T_1c_patch.shape[2],
            patch_size_x * patch_size_y * patch_size_z)
        if recon_flag is True:
            Recon_patch = Recon_patch.reshape(
                Recon_patch.shape[0] * Recon_patch.shape[1] *
                Recon_patch.shape[2],
                patch_size_x * patch_size_y * patch_size_z)
        Truth_patch = Truth_patch.reshape(
            Truth_patch.shape[0] * Truth_patch.shape[1] * Truth_patch.shape[2],
            patch_size_x, patch_size_y, patch_size_z)

        print 'Patches reshaped'

        if recon_flag is True:
            slice_patch = np.concatenate(
                [Flair_patch, T1_patch, T2_patch, T_1c_patch, Recon_patch],
                axis=1)
        else:
            slice_patch = np.concatenate(
                [Flair_patch, T1_patch, T2_patch, T_1c_patch], axis=1)
        Truth_patch = Truth_patch[:, (patch_size_x - 1) / 2,
                                  (patch_size_y - 1) / 2,
                                  (patch_size_z - 1) / 2]
        Truth_patch = np.array(Truth_patch)
        Truth_patch = Truth_patch.reshape(len(Truth_patch), 1)
        #print '3. truth dimension :', Truth_patch.shape
        num_of_class = []
        for i in xrange(1, 5):
            num_of_class.append(np.sum((Truth_patch == i).astype(int)))
        max_num = max(num_of_class)
        max_num_2 = max(x for x in num_of_class if x != max_num)

        Flair_patch = image.extract_patches(
            Flair_image[x_start:x_stop, y_start:y_stop, z_start:z_stop],
            [patch_size_x, patch_size_y, patch_size_z])
        Flair_patch = Flair_patch.reshape(
            Flair_patch.shape[0] * Flair_patch.shape[1] * Flair_patch.shape[2],
            patch_size_x * patch_size_y * patch_size_z)

        T1_patch = image.extract_patches(
            T1_image[x_start:x_stop, y_start:y_stop, z_start:z_stop],
            [patch_size_x, patch_size_y, patch_size_z])
        T1_patch = T1_patch.reshape(
            T1_patch.shape[0] * T1_patch.shape[1] * T1_patch.shape[2],
            patch_size_x * patch_size_y * patch_size_z)

        T2_patch = image.extract_patches(
            T2_image[x_start:x_stop, y_start:y_stop, z_start:z_stop],
            [patch_size_x, patch_size_y, patch_size_z])
        T2_patch = T2_patch.reshape(
            T2_patch.shape[0] * T2_patch.shape[1] * T2_patch.shape[2],
            patch_size_x * patch_size_y * patch_size_z)

        T_1c_patch = image.extract_patches(
            T_1c_image[x_start:x_stop, y_start:y_stop, z_start:z_stop],
            [patch_size_x, patch_size_y, patch_size_z])
        T_1c_patch = T_1c_patch.reshape(
            T_1c_patch.shape[0] * T_1c_patch.shape[1] * T_1c_patch.shape[2],
            patch_size_x * patch_size_y * patch_size_z)

        T_patch = image.extract_patches(
            Truth_image[x_start:x_stop, y_start:y_stop, z_start:z_stop],
            [patch_size_x, patch_size_y, patch_size_z])
        T_patch = T_patch.reshape(
            T_patch.shape[0] * T_patch.shape[1] * T_patch.shape[2],
            patch_size_x, patch_size_y, patch_size_z)
        T_patch = T_patch[:, (patch_size_x - 1) / 2, (patch_size_y - 1) / 2,
                          (patch_size_z - 1) / 2]

        for i in xrange(1, 5):
            #print 'Max : ', max_num_2
            #print 'Present : ', np.sum(image_label==i).astype(int)
            diff = max_num_2 - np.sum(T_patch == i).astype(int)
            #print 'Difference: ', diff
            #print 'Diff : ', diff
            if np.sum(T_patch == i).astype(int) >= max_num_2:
                #print 'Continuing i = ', i
                continue
            #print 'TEST : ', Truth_patch.shape
            if i not in T_patch:
                continue
            #print T_patch.shape
            #print np.sum(T_patch==i).astype(int)
            index_x = np.where(T_patch == i)
            #print 'Length : ',len(index_x)
            index = np.arange(len(index_x))
            shuffle(index)
            temp = T_patch[index_x[index[0:diff]]]
            temp = temp.reshape(len(temp), 1)
            Truth_patch = np.vstack([Truth_patch, temp])

            #print 'pppp'
            #print len(index_x[index[0:diff]])
            #print Flair_patch.shape

            F_p = Flair_patch[index_x[index[0:diff]], :]
            T1_p = T1_patch[index_x[index[0:diff]], :]
            T2_p = T2_patch[index_x[index[0:diff]], :]
            T_1c_p = T_1c_patch[index_x[index[0:diff]], :]
            temp_patch = np.concatenate([F_p, T1_p, T2_p, T_1c_p], axis=1)
            slice_patch = np.vstack([slice_patch, temp_patch])

        print 'No. of 1 : ', np.sum((Truth_patch == 1).astype(int))
        print 'No. of 2 : ', np.sum((Truth_patch == 2).astype(int))
        print 'No. of 3 : ', np.sum((Truth_patch == 3).astype(int))
        print 'No. of 4 : ', np.sum((Truth_patch == 4).astype(int))

        patches = np.vstack([patches, slice_patch])

        ground_truth = np.vstack([ground_truth, Truth_patch])
        print ground_truth.shape
        print patches.shape
    #
    #
    #print 'Number of non-zeros in ground truth : ', np.sum((ground_truth!=0).astype(int))
    #print 'Number of zeros in ground truth : ', np.sum((ground_truth==0).astype(int))
    #
    #print
    #
    ground_truth = ground_truth.reshape(len(ground_truth))

    if recon_flag == False:
        patches = patches[:, 0:patch_size_x * patch_size_y * patch_size_z * 4]

    #np.save('Training_patches.npy',patches)
    #np.save('Training_labels.npy',ground_truth)
    #print ground_truth.shape
    #print patches.shape
    if 'training' in out_root and recon_flag == True:
        print '... Saving the balanced training patches'
        np.save(out_root + 'b_trainpatch_3D_' + prefix + '_.npy', patches)
        np.save(out_root + 'b_trainlabel_3D_' + prefix + '_.npy', ground_truth)
    elif recon_flag == True:
        print '... Saving the balance validation patches'
        np.save(out_root + 'b_validpatch_3D_' + prefix + '_.npy', patches)
        np.save(out_root + 'b_validlabel_3D_' + prefix + '_.npy', ground_truth)

    if 'training' in out_root and recon_flag == False:
        print '... Saving the balanced training patches'
        np.save(out_root + 'b_trainpatch_3D_' + prefix + '_.npy', patches)
        np.save(out_root + 'b_trainlabel_3D_' + prefix + '_.npy', ground_truth)
    elif recon_flag == False:
        print '... Saving the balanced testing patches'
        np.save(out_root + 'b_validpatch_3D_' + prefix + '_.npy', patches)
        np.save(out_root + 'b_validlabel_3D_' + prefix + '_.npy', ground_truth)
Beispiel #13
0
def slice_perfect_balance_2D(patch_size_x=5,
                             patch_size_y=5,
                             prefix='Sda',
                             in_root='',
                             out_root='',
                             slice_num=1):

    #Initialize user variables
    patch_size = patch_size_x
    patch_pixels = patch_size * patch_size
    pixel_offset = patch_size
    padding = patch_size
    #threshold = patch_pixels*0.3
    recon_num = 4
    label_num = 5
    patches = np.zeros(patch_pixels * recon_num)
    #    ground_truth = np.zeros(1)
    ground_truth = np.zeros(1)
    #paths to images
    path = in_root

    Flair = []
    T1 = []
    T2 = []
    T_1c = []
    Truth = []
    Folder = []

    for subdir, dirs, files in os.walk(path):
        # if len(Flair) is 1:
        #     break
        for file1 in files:
            #print file1
            if file1[-3:] == 'nii' and ('Flair' in file1):
                Flair.append(file1)
                Folder.append(subdir + '/')
            elif file1[-3:] == 'nii' and ('T1' in file1
                                          and 'T1c' not in file1):
                T1.append(file1)
            elif file1[-3:] == 'nii' and ('T2' in file1):
                T2.append(file1)
            elif file1[-3:] == 'nii' and ('T1c' in file1 or 'T_1c' in file1):
                T_1c.append(file1)
            elif file1[-3:] == 'mha' and 'OT' in file1:
                Truth.append(file1)

    number_of_images = len(Flair)
    print 'Number of images : ', number_of_images

    for image_iterator in range(number_of_images):
        print 'Iteration : ', image_iterator + 1
        print 'Folder : ', Folder[image_iterator]
        Flair_image = nib.load(Folder[image_iterator] + Flair[image_iterator])
        T1_image = nib.load(Folder[image_iterator] + T1[image_iterator])
        T2_image = nib.load(Folder[image_iterator] + T2[image_iterator])
        T_1c_image = nib.load(Folder[image_iterator] + T_1c[image_iterator])
        try:
            Truth_image = new(Folder[image_iterator] + Truth[image_iterator])
        except:
            Truth_image = new2(Folder[image_iterator] + Truth[image_iterator])
        Flair_image = Flair_image.get_data()
        T1_image = T1_image.get_data()
        T2_image = T2_image.get_data()
        T_1c_image = T_1c_image.get_data()
        Truth_image = Truth_image.data

        if slice_num == 2:
            Flair_image = np.swapaxes(Flair_image, 0, 1)
            Flair_image = np.swapaxes(Flair_image, 1, 2)
            T1_image = np.swapaxes(T1_image, 0, 1)
            T1_image = np.swapaxes(T1_image, 1, 2)
            T2_image = np.swapaxes(T2_image, 0, 1)
            T2_image = np.swapaxes(T2_image, 1, 2)
            T_1c_image = np.swapaxes(T_1c_image, 0, 1)
            T_1c_image = np.swapaxes(T_1c_image, 1, 2)
            Truth_image = np.swapaxes(Truth_image, 0, 1)
            Truth_image = np.swapaxes(Truth_image, 1, 2)
        elif slice_num == 3:
            Flair_image = np.swapaxes(Flair_image, 0, 1)
            Flair_image = np.swapaxes(Flair_image, 0, 2)
            T1_image = np.swapaxes(T1_image, 0, 1)
            T1_image = np.swapaxes(T1_image, 0, 2)
            T2_image = np.swapaxes(T2_image, 0, 1)
            T2_image = np.swapaxes(T2_image, 0, 2)
            T_1c_image = np.swapaxes(T_1c_image, 0, 1)
            T_1c_image = np.swapaxes(T_1c_image, 0, 2)
            Truth_image = np.swapaxes(Truth_image, 0, 1)
            Truth_image = np.swapaxes(Truth_image, 0, 2)

        # Truth_image[np.where(Truth_image==3)]=1
        # Truth_image[np.where(Truth_image==4)]=1

        x_span, y_span, z_span = np.where(Truth_image != 0)
        start_slice = min(z_span)
        stop_slice = max(z_span)
        x_start = min(x_span) - padding
        x_stop = max(x_span) + padding
        y_start = min(y_span) - padding
        y_stop = max(y_span) + padding

        Flair_patch = image.extract_patches(
            Flair_image[x_start:x_stop, y_start:y_stop,
                        start_slice:stop_slice],
            [patch_size_x, patch_size_y, 1])
        Flair_patch = Flair_patch.reshape(
            Flair_patch.shape[0] * Flair_patch.shape[1] * Flair_patch.shape[2],
            patch_size_x * patch_size_y)

        T1_patch = image.extract_patches(
            T1_image[x_start:x_stop, y_start:y_stop, start_slice:stop_slice],
            [patch_size_x, patch_size_y, 1])
        T1_patch = T1_patch.reshape(
            T1_patch.shape[0] * T1_patch.shape[1] * T1_patch.shape[2],
            patch_size_x * patch_size_y)

        T2_patch = image.extract_patches(
            T2_image[x_start:x_stop, y_start:y_stop, start_slice:stop_slice],
            [patch_size_x, patch_size_y, 1])
        T2_patch = T2_patch.reshape(
            T2_patch.shape[0] * T2_patch.shape[1] * T2_patch.shape[2],
            patch_size_x * patch_size_y)

        T_1c_patch = image.extract_patches(
            T_1c_image[x_start:x_stop, y_start:y_stop, start_slice:stop_slice],
            [patch_size_x, patch_size_y, 1])
        T_1c_patch = T_1c_patch.reshape(
            T_1c_patch.shape[0] * T_1c_patch.shape[1] * T_1c_patch.shape[2],
            patch_size_x * patch_size_y)

        T_patch = image.extract_patches(
            Truth_image[x_start:x_stop, y_start:y_stop,
                        start_slice:stop_slice],
            [patch_size_x, patch_size_y, 1])
        T_patch = T_patch.reshape(
            T_patch.shape[0] * T_patch.shape[1] * T_patch.shape[2],
            patch_size_x, patch_size_y, 1)
        T_patch = T_patch[:, (patch_size - 1) / 2, (patch_size - 1) / 2]

        num_of_class = []
        for i in xrange(0, label_num):
            num_of_class.append(np.sum((T_patch == i).astype(int)))
        minim = min(x for x in num_of_class if x != 0)
        if minim > 3000:
            minim = 3000
#        flair_patch = np.zeros(patch_size_x*patch_size_y*recon_num)
#        t1_patch = np.zeros(patch_size_x*patch_size_y*recon_num)
#        t2_patch = np.zeros(patch_size_x*patch_size_y*recon_num)
#        t1c_patch = np.zeros(patch_size_x*patch_size_y*recon_num)
        slice_patch = np.zeros(patch_size_x * patch_size_y * recon_num)
        Truth_patch = np.zeros(1)
        #        print minim
        #        print
        for i in xrange(5):
            if num_of_class[i] == 0:
                continue
            index_x, index_y = np.where(T_patch == i)
            index1 = np.arange(len(index_x))
            shuffle(index1)
            #            print index1
            #            print index_x[index1[0:minim]].shape
            #print Flair_patch.shape
            slice_patch1 = np.concatenate([
                Flair_patch[index_x[index1[0:minim]], :],
                T1_patch[index_x[index1[0:minim]], :],
                T2_patch[index_x[index1[0:minim]], :],
                T_1c_patch[index_x[index1[0:minim]], :]
            ],
                                          axis=1)
            slice_patch = np.vstack([slice_patch, slice_patch1])
            #            flair_patch = np.vstack([flair_patch, Flair_patch[index_x[index1[0:minim]],:]])
            #            t1_patch = np.vstack([t1_patch,T1_patch[index_x[index1[0:minim]],:]])
            #            t2_patch = np.vstack([t2_patch, T2_patch[index_x[index1[0:minim]],:]])
            #            t1c_patch = np.vstack([t1c_patch, T_1c_patch[index_x[index1[0:minim]],:]])
            #            print Truth_patch.shape
            #            print T_patch.shape
            Truth_patch = np.vstack(
                [Truth_patch, T_patch[index_x[index1[0:minim]]]])
        print 'No. of 0 : ', np.sum((Truth_patch == 0).astype(int))
        print 'No. of 1 : ', np.sum((Truth_patch == 1).astype(int))
        print 'No. of 2 : ', np.sum((Truth_patch == 2).astype(int))
        print 'No. of 3 : ', np.sum((Truth_patch == 3).astype(int))
        print 'No. of 4 : ', np.sum((Truth_patch == 4).astype(int))

        Truth_patch = Truth_patch.reshape(len(Truth_patch))
        print 'look here ---->', Truth_patch.shape

        #        np.save(out_root+'patches_patient_flair'+str(image_iterator+1)+'.npy',flair_patch)
        #        np.save(out_root+'patches_patient_t1'+str(image_iterator+1)+'.npy',t1_patch)
        #        np.save(out_root+'patches_patient_t2'+str(image_iterator+1)+'.npy',t2_patch)
        #        np.save(out_root+'patches_patient_t1c'+str(image_iterator+1)+'.npy',t1c_patch)
        #        np.save(out_root+'patches_patient_'+str(image_iterator+1)+'.npy',slice_patch)
        #        np.save(out_root+'labels_patient_'+str(image_iterator+1)+'.npy',Truth_patch)
        patches = np.vstack([patches, slice_patch])
        print 'patches balanced shape', patches.shape
        ground_truth = np.append(ground_truth, Truth_patch)
        print 'ground shape--->', ground_truth.shape
    index1 = np.arange(patches.shape[0])
    shuffle(index1)
    print np.shape(patches)
    patches = patches[index1, :]
    ground_truth = ground_truth[index1]
    patches = np.float32(patches)
    ground_truth = np.float32(ground_truth)
    if 'training' in out_root:
        np.save(out_root + 'perfect_balance_trainpatch_' + prefix + '_.npy',
                patches)
        np.save(out_root + 'perfect_balance_traintruth_' + prefix + '_.npy',
                ground_truth)
    else:
        np.save(out_root + 'perfect_balance_validpatch_' + prefix + '_.npy',
                patches)
        np.save(out_root + 'perfect_balance_validtruth_' + prefix + '_.npy',
                ground_truth)
Beispiel #14
0
#!/usr/bin/env python
__author__ = 'ienek'

from skimage import io, filter, feature
from skimage.color import rgb2gray

from sklearn.feature_extraction import image
# test to load any image
test = io.imread('n02854926_28_0.jpg')

# loading to array
img_arr = image.img_to_graph(test)

patch_g = image.extract_patches(test, (4, 4, 2))

patch_g2 = image.extract_patches_2d(test, (4, 4))

print("Patch 1: %s " % str(patch_g.shape))
print("Patch 2: %s " % str(patch_g2.shape))

# grayscaling
gray_test = rgb2gray(test)

# building hog
hog_ = feature.hog(gray_test)

# building surf
# ORB: An efficient alternative to SIFT and SURF
surf_ = feature.ORB()
surf_.detect_and_extract(gray_test)
Beispiel #15
0
def perfect_balance_3D(patch_size_x=5,patch_size_y=5,patch_size_z=5,prefix='SdA',in_root='',out_root='',recon_flag=False):
    
    patch_pixels = patch_size_x*patch_size_y*patch_size_z
    
    pixel_offset_x = int(patch_size_x*0.5)
    pixel_offset_y = int(patch_size_y*0.5)
    pixel_offset_z = 1
    
    padding = patch_size_x
    #threshold = patch_pixels*0.3
    #patches = np.zeros(patch_pixels*4)
    if recon_flag is True:
        recon_num = 5
    else:
        recon_num = 4
    patches = np.zeros(patch_size_x*patch_size_y*patch_size_z*recon_num)
    ground_truth = np.zeros(1)
    
    #paths to images
    path = in_root
    
    Flair = []
    T1 = []
    T2 = []
    T_1c = []
    Truth = []
    Recon=[]
    Folder = []

    print 'Path: ',path
    
    for subdir, dirs, files in os.walk(path):
#        if len(Flair) is 1:
#            break
        for file1 in files:     

            if file1[-3:]=='mha' and ('Flair' in file1):
                Flair.append(file1)
                Folder.append(subdir+'/')
            elif file1[-3:]=='mha' and ('T1' in file1 and 'T1c' not in file1):
                T1.append(file1)
            elif file1[-3:]=='mha' and ('T2' in file1):
                T2.append(file1)
            elif file1[-3:]=='mha' and ('T1c' in file1 or 'T_1c' in file1):
                T_1c.append(file1)
            elif file1[-3:]=='mha' and 'OT' in file1:
                Truth.append(file1)            
            elif file1[-3:]=='mha' and 'Recon' in file1:
                Recon.append(file1)
                
    number_of_images = len(T1)
    print 'Number of Patients : ', number_of_images
    

#    
#    
    for image_iterator in range(number_of_images):
        print 'Image number : ',image_iterator+1
        print 'Folder : ', Folder[image_iterator]
        
        Flair_image = new(Folder[image_iterator]+Flair[image_iterator])
        T1_image = new(Folder[image_iterator]+T1[image_iterator])
        T2_image = new(Folder[image_iterator]+T2[image_iterator])
        T_1c_image = new(Folder[image_iterator]+T_1c[image_iterator])


#        print 'image created'
        print Folder[image_iterator] + Truth[image_iterator]
        try:
            Truth_image = new( Folder[image_iterator] + Truth[image_iterator] )
        except:
            Truth_image = new2( Folder[image_iterator] + Truth[image_iterator] )
#        print 'image created'
        
        
        Flair_image = Flair_image.data
        T1_image = T1_image.data
        T2_image = T2_image.data
        T_1c_image = T_1c_image.data
        Truth_image = Truth_image.data
        
        x_span,y_span,z_span = np.where(Truth_image!=0)  ### have to change this and include condition i!=0 and i!=5... x,y,z=np.where(Truth_image!=0) and np.where(Truth_image!=5)
        x_start = np.min(x_span) - padding
        x_stop = np.max(x_span) + padding+1
        y_start = np.min(y_span) - padding
        y_stop = np.max(y_span) + padding+1
        z_start = np.min(z_span) - padding
        z_stop = np.max(z_span) +padding+1


        Flair_patch = image.extract_patches(Flair_image[x_start:x_stop, y_start:y_stop, z_start:z_stop], [patch_size_x,patch_size_y,patch_size_z])
        Flair_patch = Flair_patch.reshape(Flair_patch.shape[0]*Flair_patch.shape[1]*Flair_patch.shape[2], patch_size_x*patch_size_y*patch_size_z)
        
        T1_patch = image.extract_patches(T1_image[x_start:x_stop, y_start:y_stop, z_start:z_stop],[patch_size_x,patch_size_y,patch_size_z])
        T1_patch = T1_patch.reshape(T1_patch.shape[0]*T1_patch.shape[1]*T1_patch.shape[2], patch_size_x*patch_size_y*patch_size_z)
        
        T2_patch = image.extract_patches(T2_image[x_start:x_stop, y_start:y_stop, z_start:z_stop],[patch_size_x,patch_size_y,patch_size_z])
        T2_patch = T2_patch.reshape(T2_patch.shape[0]*T2_patch.shape[1]*T2_patch.shape[2], patch_size_x*patch_size_y*patch_size_z)
        
        T_1c_patch = image.extract_patches(T_1c_image[x_start:x_stop, y_start:y_stop, z_start:z_stop],[patch_size_x,patch_size_y,patch_size_z])
        T_1c_patch = T_1c_patch.reshape(T_1c_patch.shape[0]*T_1c_patch.shape[1]*T_1c_patch.shape[2], patch_size_x*patch_size_y*patch_size_z)
        
        T_patch = image.extract_patches(Truth_image[x_start:x_stop, y_start:y_stop, z_start:z_stop],[patch_size_x,patch_size_y,patch_size_z])
        T_patch = T_patch.reshape(T_patch.shape[0]*T_patch.shape[1]*T_patch.shape[2],patch_size_x, patch_size_y, patch_size_z)
        T_patch = T_patch[:,(patch_size_x-1)/2,(patch_size_y-1)/2,(patch_size_z-1)/2]
        T_patch = T_patch.reshape(len(T_patch),1)
        num_of_class = []
        for i in xrange(0,5):   # have to change this too... 0,6
            num_of_class.append(np.sum((T_patch==i).astype(int)))
        minim = min(x for x in num_of_class if x!=0)   # have to change this too. include effect of class 5. Dont think this will create any issue and hence o change required..
        if minim > 1000:
            minim = 1000
        slice_patch = np.zeros(patch_size_x*patch_size_y*patch_size_z*recon_num)
#        print ' CHECK ', slice_patch.shape
        Truth_patch = np.zeros(1)
#        print minim
#        print 
        for i in xrange(5):  # change required. 6
            if num_of_class[i]==0:  # change required...
                continue
#            print 'LOOK ' , i
            index_x, index_y = np.where(T_patch==i)
#            print index_x
            index1 = np.arange(len(index_x))
            shuffle(index1)
#            print index1
#            print index_x[index1[0:minim]].shape
            slice_patch1 = np.concatenate([Flair_patch[index_x[index1[0:minim]],:],T1_patch[index_x[index1[0:minim]],:], T2_patch[index_x[index1[0:minim]],:], T_1c_patch[index_x[index1[0:minim]],:]], axis=1)
#            print 'Slice_patch 1', slice_patch1.shape
#            print 'Slice patch ', slice_patch.shape
            slice_patch = np.vstack([slice_patch, slice_patch1])
#            print Truth_patch.shape
#            print T_patch.shape
            Truth_patch = np.vstack([Truth_patch, T_patch[index_x[index1[0:minim]]]])
        print 'No. of 0 : ', np.sum((Truth_patch==0).astype(int))    
        print 'No. of 1 : ', np.sum((Truth_patch==1).astype(int))
        print 'No. of 2 : ', np.sum((Truth_patch==2).astype(int))
        print 'No. of 3 : ', np.sum((Truth_patch==3).astype(int))
        print 'No. of 4 : ', np.sum((Truth_patch==4).astype(int)) 

        print 'No. of class 5',np.sum((Truth_patch==5).astype(int))   # added  now ...
            
        patches = np.vstack([patches,slice_patch])
        
        ground_truth = np.vstack([ground_truth, Truth_patch])
        print ground_truth.shape
        print patches.shape

    ground_truth = ground_truth.reshape(len(ground_truth))
    print 'No. of 0 : ', np.sum((ground_truth==0).astype(int))    
    print 'No. of 1 : ', np.sum((ground_truth==1).astype(int))
    print 'No. of 2 : ', np.sum((ground_truth==2).astype(int))
    print 'No. of 3 : ', np.sum((ground_truth==3).astype(int))
    print 'No. of 4 : ', np.sum((ground_truth==4).astype(int))
    print 'No. of 5 : ', np.sum((ground_truth==5).astype(int))  # added now...
    if recon_flag==False:
        patches = patches[:,0:patch_size_x*patch_size_y*patch_size_z*4]
    
    #np.save('Training_patches.npy',patches)
    #np.save('Training_labels.npy',ground_truth)
    #print ground_truth.shape
    #print patches.shape
    if 'training' in out_root and recon_flag==True:
        print'... Saving the balanced training patches'
        np.save(out_root+'b_trainpatch_3D_'+prefix+'_.npy',patches)
        np.save(out_root+'b_trainlabel_3D_'+prefix+'_.npy',ground_truth)
    elif recon_flag==True:
        print '... Saving the balance validation patches'
        np.save(out_root+'b_validpatch_3D_'+prefix+'_.npy',patches)
        np.save(out_root+'b_validlabel_3D_'+prefix+'_.npy',ground_truth)
        
    if 'training' in out_root and recon_flag==False:
        print'... Saving the balanced training patches'
        np.save(out_root+'b_trainpatch_3D_'+prefix+'_.npy',patches)
        np.save(out_root+'b_trainlabel_3D_'+prefix+'_.npy',ground_truth)
    elif recon_flag==False:
        print '... Saving the balanced testing patches'
        np.save(out_root+'b_validpatch_3D_'+prefix+'_.npy',patches)
        np.save(out_root+'b_validlabel_3D_'+prefix+'_.npy',ground_truth)
        
Beispiel #16
0
    def build(self):

        # assume last axis indexes images
        patch_shape = (self.patch_size, self.patch_size, 1)
        self.patches = extract_patches(self.image3d, patch_shape)
Beispiel #17
0
images_path = '../../test_jpg/'
# images_path_test = '../input/test_jpg/'
names = []
extracted_features = []

file_path = '../input/deepIQA_features_test.csv'
os.mknod(file_path)

train_ids = next(os.walk(images_path))[2]
f = True
for name in tqdm(train_ids):
    try:
        img = cv2.imread(images_path + name)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        patches = extract_patches(img, (32, 32, 3), 32)
        X = np.transpose(patches.reshape((-1, 32, 32, 3)), (0, 3, 1, 2))

        y = []
        weights = []
        batchsize = min(2000, X.shape[0])
        t = xp.zeros((1, 1), np.float32)
        for i in six.moves.range(0, X.shape[0], batchsize):
            X_batch = X[i:i + batchsize]
            X_batch = xp.array(X_batch.astype(np.float32))

            model.forward(X_batch, t, False, X_batch.shape[0])

            y.append(xp.asnumpy(model.y[0].data).reshape((-1,)))
            weights.append(xp.asnumpy(model.a[0].data).reshape((-1,)))
Beispiel #18
0
def get_global_D(datasets,
                 outfilename,
                 block_size,
                 ncores=None,
                 batchsize=32,
                 niter=500,
                 use_std=False,
                 positivity=False,
                 fit_intercept=True,
                 center=True):

    split_b0s = True
    b0_threshold = 20

    # get the data shape so we can preallocate some arrays
    # we also have to assume all datasets have the same 3D shape obviously
    shape = nib.load(datasets[0]).header.get_data_shape()

    if len(block_size) < len(shape):
        # In the event that we only give out a 3D size and that we have b0s with different TE/TR, we remove those volumes
        # Therefore, we need to look at the total number of kept volumes, rather than the shape, to specify the last dimension properly
        last_shape = get_indexer(datasets[0]).sum(
        ) - 1  # We subtract 1 because block_size adds one b0 to it later down
        current_block_size = block_size + (last_shape, )
        print('Using full 4D stuff')
    else:
        current_block_size = block_size

    n_atoms = int(np.prod(current_block_size) * 2)
    b0_block_size = tuple(current_block_size[:-1]) + (
        (current_block_size[-1] + 1, ))
    overlap = b0_block_size
    to_denoise = np.empty(shape[:-1] + (current_block_size[-1] + 1, ),
                          dtype=np.float32)

    train_list = []
    variance_large = []

    for filename in datasets:

        print('Now feeding dataset {}'.format(filename))

        indexer = get_indexer(filename)
        mask = nib.load(filename.replace(
            '.nii', '_mask.nii.gz')).get_data(caching='unchanged')
        data = nib.load(filename).get_data(caching='unchanged') * mask[...,
                                                                       None]
        data = data[..., indexer]
        bvals = np.loadtxt(filename.replace('.nii', '.bval'))[indexer]
        bvecs = np.loadtxt(filename.replace('.nii', '.bvec'))[indexer]

        b0_loc = np.where(bvals <= b0_threshold)[0]
        dwis = np.where(bvals > b0_threshold)[0]
        num_b0s = len(b0_loc)

        # We also convert bvecs associated with b0s to exactly (0,0,0), which
        # is not always the case when we hack around with the scanner.
        bvecs = np.where(bvals[:, None] <= b0_threshold, 0, bvecs)

        # Average all b0s if we don't split them in the training set
        if num_b0s > 1 and not split_b0s:
            num_b0s = 1
            data[..., b0_loc] = np.mean(data[..., b0_loc],
                                        axis=-1,
                                        keepdims=True)

        # Split the b0s in a cyclic fashion along the training data
        # If we only had one, cycle just return b0_loc indefinitely,
        # else we go through all indexes.
        np.random.shuffle(b0_loc)
        split_b0s_idx = cycle(b0_loc)
        sym_bvecs = np.vstack((bvecs, -bvecs))

        neighbors = angular_neighbors(
            sym_bvecs, current_block_size[-1] - 1) % data.shape[-1]
        neighbors = neighbors[:data.
                              shape[-1]]  # everything was doubled for symmetry

        full_indexes = [(dwi, ) + tuple(neighbors[dwi])
                        for dwi in range(data.shape[-1]) if dwi in dwis]
        indexes = greedy_set_finder(full_indexes)

        # If we have more b0s than indexes, then we have to add a few more blocks since
        # we won't do a full cycle. If we have more b0s than indexes after that, then it breaks.
        if num_b0s > len(indexes):
            the_rest = [rest for rest in full_indexes if rest not in indexes]
            indexes += the_rest[:(num_b0s - len(indexes))]

        if num_b0s > len(indexes):
            error = (
                'Seems like you still have more b0s {} than available blocks {},'
                ' either average them or deactivate subsampling.'.format(
                    num_b0s, len(indexes)))
            raise ValueError(error)

        # whole global centering
        if center:
            data -= data.mean(axis=-1, keepdims=True)

        for i, idx in enumerate(indexes):
            b0_loc = tuple((next(split_b0s_idx), ))
            to_denoise[..., 0] = data[..., b0_loc].squeeze()
            to_denoise[..., 1:] = data[..., idx]

            patches = extract_patches(to_denoise, b0_block_size, overlap)
            axis = tuple(range(patches.ndim // 2, patches.ndim))
            mask_patch = np.sum(patches > 0,
                                axis=axis) > np.prod(b0_block_size) // 2
            patches = patches[mask_patch].reshape(-1, np.prod(b0_block_size))

            if use_std:
                try:
                    variance = nib.load(filename.replace(
                        '.nii', '_std.nii.gz')).get_data()**2 * mask
                    variance = np.broadcast_to(variance[..., None], data.shape)
                    variance = extract_patches(variance, b0_block_size,
                                               overlap)
                    axis = tuple(range(variance.ndim // 2, variance.ndim))
                    variance = np.median(variance,
                                         axis=axis)[mask_patch].ravel()
                    print('variance shape', variance.shape)
                except IOError:
                    print('Volume {} not found!'.format(
                        filename.replace('.nii', '_std.nii.gz')))
                    variance = [None]
            else:
                variance = [None]

            # check to build with np.r_ the whole list from stringnames instead
            train_list += [patches]
            variance_large += list(variance)
            # train_data.extend(patches)
            # variance_large.extend(variance)

        print('train', len(train_list), data.shape, b0_block_size, overlap,
              patches.shape)

        del data, mask, patches, variance

    print('Fed everything in')

    lengths = [l.shape[0] for l in train_list]
    train_data = np.empty((np.sum(lengths), np.prod(b0_block_size)))
    print(train_data.shape)

    step = 0
    for i in range(len(train_list)):
        length = lengths[i]
        idx = slice(step, step + length)
        train_data[idx] = train_list[i].reshape(-1, np.prod(b0_block_size))
        step += length

    del train_list

    # if center:
    #     train_data -= train_data.mean(axis=1, keepdims=True)

    # we have variance as a N elements list - so check one element to see if it's an array
    if variance_large[0] is not None:
        variance_large = np.asarray(variance_large).ravel()
    else:
        variance_large = None

    savename = 'Dic_' + outfilename + '_size_{}.npy'.format(
        block_size).replace(' ', '')

    D = online_DL(train_data,
                  ncores=ncores,
                  positivity=positivity,
                  fit_intercept=fit_intercept,
                  standardize=True,
                  nlambdas=100,
                  niter=niter,
                  batchsize=batchsize,
                  n_atoms=n_atoms,
                  variance=variance_large,
                  progressbar=True,
                  disable_mkl=True,
                  saveback=savename,
                  use_joblib=False)

    return D
    
    Flair_image = Flair_image.data
    T1_image = T1_image.data
    T2_image = T2_image.data
    T_1c_image = T_1c_image.data
    Truth_image = Truth_image.data
    
    x_span,y_span,z_span = np.where(Truth_image!=0)
    x_start = np.min(x_span) - padding
    x_stop = np.max(x_span) + padding+1
    y_start = np.min(y_span) - padding
    y_stop = np.max(y_span) + padding+1
    z_start = np.min(z_span) - padding
    z_stop = np.max(z_span) +padding+1
    
    Flair_patch = image.extract_patches(Flair_image[x_start:x_stop, y_start:y_stop, z_start:z_stop], [patch_size_x,patch_size_y,patch_size_z],(pixel_offset_x,pixel_offset_y,1))
    T1_patch = image.extract_patches(T1_image[x_start:x_stop, y_start:y_stop, z_start:z_stop], [patch_size_x,patch_size_y,patch_size_z],(pixel_offset_x,pixel_offset_y,1))
    T2_patch = image.extract_patches(T2_image[x_start:x_stop, y_start:y_stop, z_start:z_stop], [patch_size_x,patch_size_y,patch_size_z],(pixel_offset_x,pixel_offset_y,1))
    T_1c_patch = image.extract_patches(T_1c_image[x_start:x_stop, y_start:y_stop, z_start:z_stop], [patch_size_x,patch_size_y,patch_size_z],(pixel_offset_x,pixel_offset_y,1))
    Truth_patch = image.extract_patches(Truth_image[x_start:x_stop, y_start:y_stop, z_start:z_stop], [patch_size_x,patch_size_y,patch_size_z],(pixel_offset_x,pixel_offset_y,1))
    
    print 'Raw patches extracted'    
    
    Flair_patch = Flair_patch.reshape(Flair_patch.shape[0]*Flair_patch.shape[1]*Flair_patch.shape[2], patch_size_x*patch_size_y*patch_size_z)
    T1_patch = T1_patch.reshape(T1_patch.shape[0]*T1_patch.shape[1]*T1_patch.shape[2], patch_size_x*patch_size_y*patch_size_z)
    T2_patch = T2_patch.reshape(T2_patch.shape[0]*T2_patch.shape[1]*T2_patch.shape[2], patch_size_x*patch_size_y*patch_size_z)  
    T_1c_patch = T_1c_patch.reshape(T_1c_patch.shape[0]*T_1c_patch.shape[1]*T_1c_patch.shape[2], patch_size_x*patch_size_y*patch_size_z)
    Truth_patch = Truth_patch.reshape(Truth_patch.shape[0]*Truth_patch.shape[1]*Truth_patch.shape[2], patch_size_x, patch_size_y, patch_size_z)

    print 'Patches reshaped'    
    
Beispiel #20
0
def crop_tiles(region, tile_size):
    tiles = extract_patches(np.array(region), (tile_size, tile_size, 3), extraction_step=tile_size)
    tiles = tiles.squeeze()  # shape: (nrows, ncols, tile_size, tile_size, 3)
    return tiles
Beispiel #21
0
if FR:
     model = FRModel(top=args.top)
else:
     model = Model(top=args.top)

cuda.cudnn_enabled = True
cuda.check_cuda_available()
xp = cuda.cupy
serializers.load_hdf5(args.model, model)
model.to_gpu()

if FR:
     ref_img = cv2.imread(args.REF)
     ref_img = cv2.cvtColor(ref_img, cv2.COLOR_BGR2RGB)
     patches = extract_patches(ref_img, (32,32,3), 32)
     X_ref = np.transpose(patches.reshape((-1, 32, 32, 3)), (0, 3, 1, 2))

img = cv2.imread(args.INPUT)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
patches = extract_patches(img, (32,32,3), 32)
X = np.transpose(patches.reshape((-1, 32, 32, 3)), (0, 3, 1, 2))


y = []
weights = []
batchsize = min(2000, X.shape[0])
t = xp.zeros((1, 1), np.float32)
for i in six.moves.range(0, X.shape[0], batchsize):
     X_batch = X[i:i + batchsize]
     X_batch = xp.array(X_batch.astype(np.float32))
def augment_slice_perfect_balance_2D(
    patch_size_x=5, patch_size_y=5, prefix="Sda", in_root="", out_root="", slice_num=1
):

    # Initialize user variables
    patch_size = patch_size_x
    patch_pixels = patch_size * patch_size
    pixel_offset = patch_size
    padding = 2 * patch_size
    # threshold = patch_pixels*0.3
    recon_num = 4
    label_num = 5
    patches = np.zeros(patch_pixels * recon_num)
    #    ground_truth = np.zeros(1)
    ground_truth = np.zeros(1)
    # paths to images
    path = in_root

    Flair = []
    T1 = []
    T2 = []
    T_1c = []
    Truth = []
    Folder = []

    for subdir, dirs, files in os.walk(path):
        if len(Flair) is 1:
            break
        for file1 in files:
            # print file1
            if file1[-3:] == "nii" and ("Flair" in file1):
                Flair.append(file1)
                Folder.append(subdir + "/")
            elif file1[-3:] == "nii" and ("T1" in file1 and "T1c" not in file1):
                T1.append(file1)
            elif file1[-3:] == "nii" and ("T2" in file1):
                T2.append(file1)
            elif file1[-3:] == "nii" and ("T1c" in file1 or "T_1c" in file1):
                T_1c.append(file1)
            elif file1[-3:] == "mha" and "OT" in file1:
                Truth.append(file1)

    number_of_images = len(Flair)
    print "Number of images : ", number_of_images

    for image_iterator in range(number_of_images):
        print "Iteration : ", image_iterator + 1
        print "Folder : ", Folder[image_iterator]
        Flair_image = nib.load(Folder[image_iterator] + Flair[image_iterator])
        T1_image = nib.load(Folder[image_iterator] + T1[image_iterator])
        T2_image = nib.load(Folder[image_iterator] + T2[image_iterator])
        T_1c_image = nib.load(Folder[image_iterator] + T_1c[image_iterator])
        try:
            Truth_image = new(Folder[image_iterator] + Truth[image_iterator])
        except:
            Truth_image = new2(Folder[image_iterator] + Truth[image_iterator])
        Flair_image = Flair_image.get_data()
        T1_image = T1_image.get_data()
        T2_image = T2_image.get_data()
        T_1c_image = T_1c_image.get_data()
        Truth_image = Truth_image.data

        if slice_num == 2:
            Flair_image = np.swapaxes(Flair_image, 0, 1)
            Flair_image = np.swapaxes(Flair_image, 1, 2)
            T1_image = np.swapaxes(T1_image, 0, 1)
            T1_image = np.swapaxes(T1_image, 1, 2)
            T2_image = np.swapaxes(T2_image, 0, 1)
            T2_image = np.swapaxes(T2_image, 1, 2)
            T_1c_image = np.swapaxes(T_1c_image, 0, 1)
            T_1c_image = np.swapaxes(T_1c_image, 1, 2)
            Truth_image = np.swapaxes(Truth_image, 0, 1)
            Truth_image = np.swapaxes(Truth_image, 1, 2)
        elif slice_num == 3:
            Flair_image = np.swapaxes(Flair_image, 0, 1)
            Flair_image = np.swapaxes(Flair_image, 0, 2)
            T1_image = np.swapaxes(T1_image, 0, 1)
            T1_image = np.swapaxes(T1_image, 0, 2)
            T2_image = np.swapaxes(T2_image, 0, 1)
            T2_image = np.swapaxes(T2_image, 0, 2)
            T_1c_image = np.swapaxes(T_1c_image, 0, 1)
            T_1c_image = np.swapaxes(T_1c_image, 0, 2)
            Truth_image = np.swapaxes(Truth_image, 0, 1)
            Truth_image = np.swapaxes(Truth_image, 0, 2)

        # Truth_image[np.where(Truth_image==3)]=1
        # Truth_image[np.where(Truth_image==4)]=1

        x_span, y_span, z_span = np.where(Truth_image != 0)
        start_slice = min(z_span)
        stop_slice = max(z_span)
        x_start = min(x_span) - padding
        x_stop = max(x_span) + padding
        y_start = min(y_span) - padding
        y_stop = max(y_span) + padding

        Flair_patch = image.extract_patches(
            Flair_image[x_start:x_stop, y_start:y_stop, start_slice:stop_slice], [patch_size_x, patch_size_y, 1]
        )
        Flair_patch = Flair_patch.reshape(
            Flair_patch.shape[0] * Flair_patch.shape[1] * Flair_patch.shape[2], patch_size_x, patch_size_y
        )

        T1_patch = image.extract_patches(
            T1_image[x_start:x_stop, y_start:y_stop, start_slice:stop_slice], [patch_size_x, patch_size_y, 1]
        )
        T1_patch = T1_patch.reshape(
            T1_patch.shape[0] * T1_patch.shape[1] * T1_patch.shape[2], patch_size_x, patch_size_y
        )

        T2_patch = image.extract_patches(
            T2_image[x_start:x_stop, y_start:y_stop, start_slice:stop_slice], [patch_size_x, patch_size_y, 1]
        )
        T2_patch = T2_patch.reshape(
            T2_patch.shape[0] * T2_patch.shape[1] * T2_patch.shape[2], patch_size_x, patch_size_y
        )

        T_1c_patch = image.extract_patches(
            T_1c_image[x_start:x_stop, y_start:y_stop, start_slice:stop_slice], [patch_size_x, patch_size_y, 1]
        )
        T_1c_patch = T_1c_patch.reshape(
            T_1c_patch.shape[0] * T_1c_patch.shape[1] * T_1c_patch.shape[2], patch_size_x, patch_size_y
        )

        T_patch = image.extract_patches(
            Truth_image[x_start:x_stop, y_start:y_stop, start_slice:stop_slice], [patch_size_x, patch_size_y, 1]
        )
        T_patch = T_patch.reshape(T_patch.shape[0] * T_patch.shape[1] * T_patch.shape[2], patch_size_x, patch_size_y, 1)
        T_patch = T_patch[:, (patch_size - 1) / 2, (patch_size - 1) / 2]

        num_of_class = []
        for i in xrange(0, label_num):
            num_of_class.append(np.sum((T_patch == i).astype(int)))
        minim = min(x for x in num_of_class if x != 0)
        if minim > 3000:
            minim = 3000
        #        flair_patch = np.zeros(patch_size_x*patch_size_y*recon_num)
        #        t1_patch = np.zeros(patch_size_x*patch_size_y*recon_num)
        #        t2_patch = np.zeros(patch_size_x*patch_size_y*recon_num)
        #        t1c_patch = np.zeros(patch_size_x*patch_size_y*recon_num)
        slice_patch = np.zeros(patch_size_x * patch_size_y * recon_num)
        Truth_patch = np.zeros(1)
        #        print minim
        #        print
        for i in xrange(5):
            if num_of_class[i] == 0:
                continue
            index_x, index_y = np.where(T_patch == i)
            index1 = np.arange(len(index_x))
            shuffle(index1)
            #            print index1
            #            print index_x[index1[0:minim]].shape
            # print Flair_patch.shape
            if i == 0:
                minim1 = min(4 * minim, num_of_class[0])
                Fp = Flair_patch[index_x[index1[0:minim1]], :, :]
                T1p = T1_patch[index_x[index1[0:minim1]], :, :]
                T1cp = T_1c_patch[index_x[index1[0:minim1]], :, :]
                T2p = T2_patch[index_x[index1[0:minim1]], :, :]
                Fp = Fp.reshape(Fp.shape[0], patch_size_x * patch_size_y)
                T1p = T1p.reshape(T1p.shape[0], patch_size_x * patch_size_y)
                T1cp = T1p.reshape(T1cp.shape[0], patch_size_x * patch_size_y)
                T2p = T1p.reshape(T2p.shape[0], patch_size_x * patch_size_y)

                slice_patch1 = np.concatenate([Fp, T1p, T2p, T1cp], axis=1)
                print "adding : ", slice_patch1.shape[0]
                slice_patch = np.vstack([slice_patch, slice_patch1])
                Truth_patch = np.vstack([Truth_patch, T_patch[index_x[index1[0:minim1]]]])
            else:
                Fp = Flair_patch[index_x[index1[0:minim]], :, :]
                T1p = T1_patch[index_x[index1[0:minim]], :, :]
                T1cp = T_1c_patch[index_x[index1[0:minim]], :, :]
                T2p = T2_patch[index_x[index1[0:minim]], :, :]

                Fp_n = np.asarray(Fp)
                T1p_n = np.asarray(T1p)
                T2p_n = np.asarray(T2p)
                T1cp_n = np.asarray(T1cp)

                Fp_n = Fp_n.reshape(Fp_n.shape[0], patch_size_x * patch_size_y)
                T1p_n = T1p_n.reshape(T1p_n.shape[0], patch_size_x * patch_size_y)
                T2p_n = T2p_n.reshape(T2p_n.shape[0], patch_size_x * patch_size_y)
                T1cp_n = T1cp_n.reshape(T1cp_n.shape[0], patch_size_x * patch_size_y)
                sp_n = np.concatenate([Fp_n, T1p_n, T2p_n, T1cp_n], axis=1)
                print "adding : ", sp_n.shape[0]

                slice_patch = np.vstack([slice_patch, sp_n])
                Truth_patch = np.vstack([Truth_patch, T_patch[index_x[index1[0:minim]]]])

                for angle in range(3):
                    Fp = np.asarray([np.rot90(Fp[x, :, :]) for x in range(Fp.shape[0])])
                    T1p = np.asarray([np.rot90(T1p[x, :, :]) for x in range(T1p.shape[0])])
                    T2p = np.asarray([np.rot90(T2p[x, :, :]) for x in range(T2p.shape[0])])
                    T1cp = np.asarray([np.rot90(T1cp[x, :, :]) for x in range(T1cp.shape[0])])

                    Fp_n = np.asarray(Fp)
                    T1p_n = np.asarray(T1p)
                    T2p_n = np.asarray(T2p)
                    T1cp_n = np.asarray(T1cp)

                    Fp_n = Fp_n.reshape(Fp_n.shape[0], patch_size_x * patch_size_y)
                    T1p_n = T1p_n.reshape(T1p_n.shape[0], patch_size_x * patch_size_y)
                    T2p_n = T2p_n.reshape(T2p_n.shape[0], patch_size_x * patch_size_y)
                    T1cp_n = T1cp_n.reshape(T1cp_n.shape[0], patch_size_x * patch_size_y)
                    sp_n = np.concatenate([Fp_n, T1p_n, T2p_n, T1cp_n], axis=1)

                    print "adding : ", sp_n.shape[0]

                    slice_patch = np.vstack([slice_patch, sp_n])
                    Truth_patch = np.vstack([Truth_patch, T_patch[index_x[index1[0:minim]]]])

        print "No. of 0 : ", np.sum((Truth_patch == 0).astype(int))
        print "No. of 1 : ", np.sum((Truth_patch == 1).astype(int))
        print "No. of 2 : ", np.sum((Truth_patch == 2).astype(int))
        print "No. of 3 : ", np.sum((Truth_patch == 3).astype(int))
        print "No. of 4 : ", np.sum((Truth_patch == 4).astype(int))

        Truth_patch = Truth_patch.reshape(len(Truth_patch))
        print "look here ---->", Truth_patch.shape

        #        np.save(out_root+'patches_patient_flair'+str(image_iterator+1)+'.npy',flair_patch)
        #        np.save(out_root+'patches_patient_t1'+str(image_iterator+1)+'.npy',t1_patch)
        #        np.save(out_root+'patches_patient_t2'+str(image_iterator+1)+'.npy',t2_patch)
        #        np.save(out_root+'patches_patient_t1c'+str(image_iterator+1)+'.npy',t1c_patch)
        #        np.save(out_root+'patches_patient_'+str(image_iterator+1)+'.npy',slice_patch)
        #        np.save(out_root+'labels_patient_'+str(image_iterator+1)+'.npy',Truth_patch)
        patches = np.vstack([patches, slice_patch])
        print "patches balanced shape", patches.shape
        ground_truth = np.append(ground_truth, Truth_patch)
        print "ground shape--->", ground_truth.shape
    index1 = np.arange(patches.shape[0])
    shuffle(index1)
    print np.shape(patches)
    patches = patches[index1, :]
    ground_truth = ground_truth[index1]
    patches = np.float32(patches)
    ground_truth = np.float32(ground_truth)
    if "training" in out_root:
        np.save(out_root + "perfect_balance_trainpatch_" + prefix + "_.npy", patches)
        np.save(out_root + "perfect_balance_traintruth_" + prefix + "_.npy", ground_truth)
    else:
        np.save(out_root + "perfect_balance_validpatch_" + prefix + "_.npy", patches)
        np.save(out_root + "perfect_balance_validtruth_" + prefix + "_.npy", ground_truth)
Beispiel #23
0
def style_transfer(content,
                   style,
                   segmentation_mask,
                   sigma_r=0.17,
                   sigma_s=15):
    content_arr = build_gaussian_pyramid(content, LMAX)
    style_arr = build_gaussian_pyramid(style, LMAX)
    segm_arr = build_gaussian_pyramid(segmentation_mask, LMAX)
    # Initialize X with the content + strong noise.
    X = random_noise(content_arr[LMAX - 1], mode='gaussian', var=50)
    # Set up Content Fusion constants.
    fus_const1 = []
    fus_const2 = []
    for i in range(LMAX):
        sx, sy = segm_arr[i].shape
        curr_segm = segm_arr[i].reshape(sx, sy, 1)
        fus_const1.append(curr_segm * content_arr[i])
        fus_const2.append(1.0 / (curr_segm + 1))
    print('Starting Style Transfer..')
    for L in range(LMAX - 1, -1, -1):  # over scale L
        print('Scale ', L)
        current_size = style_arr[L].shape[0]
        style_L_sx, style_L_sy, _ = style_arr[L].shape
        X = random_noise(X, mode='gaussian', var=20 / 250.0)
        for n in range(PATCH_SIZES.size):  # over patch size n
            p_size = PATCH_SIZES[n]
            print('Patch Size', p_size)
            npatchx = int((style_L_sx - p_size) / SAMPLING_GAPS[n] + 1)
            # The images are padded to avoid side artifacts.
            padding = p_size - (style_L_sx - npatchx * SAMPLING_GAPS[n])
            padding_arr = ((0, padding), (0, padding), (0, 0))
            current_style = pad(style_arr[L], padding_arr, mode=PADDING_MODE)
            X = pad(X, padding_arr, mode=PADDING_MODE)
            const1 = pad(fus_const1[L], padding_arr, mode=PADDING_MODE)
            const2 = pad(fus_const2[L], padding_arr, mode=PADDING_MODE)
            style_patches = extract_patches(current_style,
                                            patch_shape=(p_size, p_size, 3),
                                            extraction_step=SAMPLING_GAPS[n])
            npatchx, npatchy, _, _, _, _ = style_patches.shape
            npatches = npatchx * npatchy
            # Preparing for NN
            style_patches = style_patches.reshape(-1, p_size * p_size * 3)
            njobs = 1
            if (L == 0) or (L == 1 and p_size <= 13):
                njobs = -1
            projection_matrix = 0
            # for small patch sizes perform PCA
            if p_size <= 21:
                new_style_patches, projection_matrix = pca(style_patches)
                neighbors = NearestNeighbors(
                    n_neighbors=1, p=2, n_jobs=njobs).fit(new_style_patches)
            else:
                neighbors = NearestNeighbors(n_neighbors=1, p=2,
                                             n_jobs=njobs).fit(style_patches)
            style_patches = style_patches.reshape((-1, p_size, p_size, 3))
            for k in range(IALG):
                # Steps 1 & 2: Patch-Extraction and and Robust Patch Aggregation
                X_patches_raw = extract_patches(
                    X,
                    patch_shape=(p_size, p_size, 3),
                    extraction_step=SAMPLING_GAPS[n])
                for i in range(IRLS_it):
                    solve_irls(X, X_patches_raw, n, style_patches, neighbors,
                               projection_matrix)
                # Step 3: Content Fusion
                X = const2 * (X + const1)
                # Step 4: Color Transfer
                X = color_transfer(X, style)
                # Step 5: Denoising
                X[:style_L_sx, :style_L_sx, :] = denoise(
                    X[:style_L_sx, :style_L_sx, :],
                    sigma_r=sigma_r,
                    sigma_s=sigma_s)
            X = X[:style_L_sx, :style_L_sx, :]  # Discard padding.
        # Upscale X
        if (L > 0):
            sizex, sizey, _ = content_arr[L - 1].shape
            X = cv2.resize(X, (sizex, sizey))
    return X
Beispiel #24
0
                                           img.shape[-1]),
                              extraction_step=patch_spacing)
    patches_flattened = patches.reshape(
        -1, patch_size * patch_size * img.shape[-1])

    # Normalize data and create PCA object fitted to patches
    patches_normalized = patches_flattened - np.mean(patches_flattened, axis=0)
    pca = PCA(n_components=0.95, svd_solver="full")
    pca_patches = pca.fit_transform(patches_normalized)

    # Create nearest neighbor matcher based on PCA domain patches
    nn_matcher = NearestNeighbors(n_neighbors=1, algorithm="auto")
    nn_matcher = nn_matcher.fit(pca_patches)

    return patches.reshape(-1, patch_size * patch_size,
                           img.shape[-1]), pca, nn_matcher


if __name__ == "__main__":
    img = cv2.imread(os.path.join("images", "contents", "dog.jpg"),
                     cv2.IMREAD_COLOR)
    x = np.array([[0, 1, 3, 5], [0, 4, 3, 2], [4, 3, 0, 1]])
    patches = extract_patches(x.reshape(4, 3),
                              patch_shape=(1, 2),
                              extraction_step=2)
    print(patches)
    print(x)

    patches[0][0][0] = [100, 100]
    print(patches)
    print(x)
def get_patch_matrix(source, invalid_pixels, patch_size, max_patches=None):
    MAX_MEM = 500e6  # Maximum memory allowed.  These things can get big fast.

    I_s = np.copy(source)
    I_ip = np.copy(invalid_pixels)

    h, w, d = I_s.shape

    # Downsize if the patch matrix will be too big.
    factor = 1
    mem = 8 * h * w * d * patch_size * patch_size
    if mem > MAX_MEM:
        factor = int(np.sqrt(mem / MAX_MEM)) + 1
        mem = 8 * h / factor * w / factor * d * patch_size * patch_size
    if mem > MAX_MEM:
        print("Patch matrix too large.  Size:", mem)
        sys.exit()

    # Mark invalid pixels.
    I_s[I_ip < 1, :] = 1e5

    ## Generate the patch_matrix.
    # Returns a matrix of dims [h-ps, w-ps, 1, ps, ps, nc]
    # Where 'ps' is patch_size and 'nc' is number of colors (3 assumed.)
    patch_matrix = extract_patches(I_s, patch_shape=(patch_size, patch_size, 3), extraction_step=(factor, factor, 1))
    pmh, pmw = patch_matrix.shape[:2]
    patch_matrix = patch_matrix.reshape((pmh, pmw, patch_size, patch_size, 3))

    # Get the differences between adjacent patches in 2D.
    adh = patch_size * patch_size * 3 * np.ones((pmh, pmw))
    adw = np.copy(adh)

    adh[1:, :] = np.sum((patch_matrix[:-1] - patch_matrix[1:]) ** 2, axis=(2, 3, 4))
    adw[:, 1:] = np.sum((patch_matrix[:, :-1] - patch_matrix[:, 1:]) ** 2, axis=(2, 3, 4))

    ad = np.copy(adh)

    replace = adw < adh

    ad[replace] = adw[replace]

    # Reshape to new dims [(h-ps) * (w-ps), ps, ps, nc]
    patch_matrix = patch_matrix.reshape(-1, patch_size, patch_size, 3)
    ad = ad.reshape(pmh * pmw)

    # Remove invalid patches.  Assume image values in range 0.0 to 1.0 for now.
    patch_sum = np.sum(patch_matrix, axis=(1, 2, 3))
    valid = patch_sum <= patch_size * patch_size * I_s.shape[2]

    patch_matrix = patch_matrix[valid]
    ad = ad[valid]

    # If a maximum number of patches is specified, reduce the size if necessary.
    # For speed, only checking for differences of adjacent patches for now.
    if max_patches is not None:
        if patch_matrix.shape[0] > max_patches:
            # adjacent patch difference, 'apd'.  Keep patches with large differences.
            # apd = np.sum((patch_matrix[:-1] - patch_matrix[1:])**2, axis=(1,2,3))

            cutoff = np.sort(ad)[::-1][max_patches]

            # valid = np.zeros((1 + ad.shape[0]), dtype=np.bool)
            # valid[0] = True
            # valid[1:] = ad >= cutoff
            valid = ad >= cutoff

            patch_matrix = patch_matrix[valid]
            patch_matrix = patch_matrix[:max_patches]

    return patch_matrix
Beispiel #26
0
list_label = [line.split()[0]
              for line in f_label.readlines()]  ## get names of training images
list_patch_img = [line.split()[0] for line in f_patch.readlines()
                  ]  ## get names of image patches
f_patch.seek(0)  ## reset file pointer
list_patch = [line.split()
              for line in f_patch.readlines()]  ## get patches info

f_label.close()
f_patch.close()

print '-------------Load data-------------'
train_patches = []
for i in range(train_img_num):
    patches = extract_patches(train[i][0], (3, 32, 32), 32)
    temp = patches.reshape((-1, 3, 32, 32))

    ## select 128 top patches/image
    line_index = list_patch_img.index(
        list_label[i])  ## find the location of corresponding patches
    ## print i, line_index, list_patch_img[line_index], len(temp)

    temp_slice = [
        temp[int(index)]
        for index in list_patch[line_index][1:patches_per_img + 1]
    ]

    ## print temp_slice
    ## temp_slice = random.sample(temp, patches_per_img)
    for j in range(len(temp_slice)):
Beispiel #27
0
 def transform(self, X):
     if type(X) == csr_matrix:
         X = X.toarray()
     if type(X) == pd.DataFrame:
         X = X.as_matrix()
     return np.rollaxis(extract_patches(X, (self.patchsize, X.shape[1]), self.stepsize), 1, 4)
Beispiel #28
0
 print(img_data.dtype)
 assert len(np.unique(img_data)) == 3
 print(img_shape)
 plt.imshow(img_data[:, :, img_shape[-1] // 2], cmap='gray')
 viz.matplot(plt, opts={'title': 'case_original', 'showlegend': True})
 mirrored = np.flip(img_data.copy(), axis=(0, 1, 2))
 # print(mirrored.shape)
 # plt.imshow(mirrored[img_shape[-1] // 2, :, :], cmap='gray')
 # viz.matplot(
 #     plt,
 #     opts={
 #         'title': 'case_mirrored',
 #         'showlegend': True
 #     }
 # )
 patches = image.extract_patches(img_data, patch_size, strides)
 print(patches.shape)
 m_patches = image.extract_patches(mirrored, patch_size, strides)
 print(m_patches.shape)
 # patches = patches.reshape((-1, patch_size[0], patch_size[1], patch_size[2]))
 recovered = reconstruct_labels(patches, img_shape, 3, strides, m_patches)
 plt.imshow(recovered[:, :, img_shape[-1] // 2], cmap='gray')
 viz.matplot(plt, opts={'title': 'case_recovered', 'showlegend': True})
 '''
 img_data = img_data[None, None, :, :, :]
 c = torch.zeros((img_data.shape))
 # ---------- Preprocess --------------------------------------------------------------
 img_shape = img_data.shape
 full_pred = torch.zeros((img_shape))
 background_color = img_data.min()
 img_data = torch.tensor(img_data, dtype=torch.float32)
def B_Patch_Preprocess_recon_3D(patch_size_x=5,patch_size_y=5,patch_size_z=5,prefix='SdA',in_root='',out_root='',recon_flag=True):
    
    patch_pixels = patch_size_x*patch_size_y*patch_size_z
    
    pixel_offset_x = int(2*patch_size_x*0.7)
    pixel_offset_y = int(2*patch_size_y*0.7)
    pixel_offset_z = 1
    
    padding = patch_size_x
    #threshold = patch_pixels*0.3
    #patches = np.zeros(patch_pixels*4)
    if recon_flag is True:
        recon_num = 5
    else:
        recon_num = 4
    patches = np.zeros(patch_size_x*patch_size_y*patch_size_z*recon_num)
    ground_truth = np.zeros(1)
    
    #paths to images
    path = in_root
    
    Flair = []
    T1 = []
    T2 = []
    T_1c = []
    Truth = []
    Recon=[]
    Folder = []
    
    for subdir, dirs, files in os.walk(path):
#        if len(Flair) is 1:
#            break
        for file1 in files:     

            if file1[-3:]=='mha' and ('Flair' in file1):
                Flair.append(file1)
                Folder.append(subdir+'/')
            elif file1[-3:]=='mha' and ('T1' in file1 and 'T1c' not in file1):
                T1.append(file1)
            elif file1[-3:]=='mha' and ('T2' in file1):
                T2.append(file1)
            elif file1[-3:]=='mha' and ('T1c' in file1 or 'T_1c' in file1):
                T_1c.append(file1)
            elif file1[-3:]=='mha' and 'OT' in file1:
                Truth.append(file1)            
            elif file1[-3:]=='mha' and 'Recon' in file1:
                Recon.append(file1)
                
    number_of_images = len(Flair)
    print 'Number of Patients : ', number_of_images
    

#    
#    
    for image_iterator in range(number_of_images):
        print 'Image number : ',image_iterator+1
        print 'Folder : ', Folder[image_iterator]
        
        Flair_image = new(Folder[image_iterator]+Flair[image_iterator])
        T1_image = new(Folder[image_iterator]+T1[image_iterator])
        T2_image = new(Folder[image_iterator]+T2[image_iterator])
        T_1c_image = new(Folder[image_iterator]+T_1c[image_iterator])
#        print 'image created'
        print Folder[image_iterator] + Truth[image_iterator]
        try:
            Truth_image = new( Folder[image_iterator] + Truth[image_iterator] )
        except:
            Truth_image = new2( Folder[image_iterator] + Truth[image_iterator] )
#        print 'image created'
        
        
        
        
        if recon_flag is True:
            Recon_image = new(Folder[image_iterator]+Recon[image_iterator])
        
       
        Flair_image = Flair_image.data
        T1_image = T1_image.data
        T2_image = T2_image.data
        T_1c_image = T_1c_image.data
        if recon_flag is True:
            Recon_image=Recon_image.data
        Truth_image = Truth_image.data
        
        x_span,y_span,z_span = np.where(Truth_image!=0)
        x_start = np.min(x_span) - padding
        x_stop = np.max(x_span) + padding+1
        y_start = np.min(y_span) - padding
        y_stop = np.max(y_span) + padding+1
        z_start = np.min(z_span) - padding
        z_stop = np.max(z_span) +padding+1
        
        Flair_patch = image.extract_patches(Flair_image[x_start:x_stop, y_start:y_stop, z_start:z_stop], [patch_size_x,patch_size_y,patch_size_z],(pixel_offset_x,pixel_offset_y,pixel_offset_z))
        T1_patch = image.extract_patches(T1_image[x_start:x_stop, y_start:y_stop, z_start:z_stop], [patch_size_x,patch_size_y,patch_size_z],(pixel_offset_x,pixel_offset_y,pixel_offset_z))
        T2_patch = image.extract_patches(T2_image[x_start:x_stop, y_start:y_stop, z_start:z_stop], [patch_size_x,patch_size_y,patch_size_z],(pixel_offset_x,pixel_offset_y,pixel_offset_z))
        T_1c_patch = image.extract_patches(T_1c_image[x_start:x_stop, y_start:y_stop, z_start:z_stop], [patch_size_x,patch_size_y,patch_size_z],(pixel_offset_x,pixel_offset_y,pixel_offset_z))
        if recon_flag is True:        
            Recon_patch = image.extract_patches(Recon_image[x_start:x_stop, y_start:y_stop, z_start:z_stop], [patch_size_x,patch_size_y,patch_size_z],(pixel_offset_x,pixel_offset_y,pixel_offset_z))
        Truth_patch = image.extract_patches(Truth_image[x_start:x_stop, y_start:y_stop, z_start:z_stop], [patch_size_x,patch_size_y,patch_size_z],(pixel_offset_x,pixel_offset_y,pixel_offset_z))
        
        print 'Raw patches extracted'    
        
        Flair_patch = Flair_patch.reshape(Flair_patch.shape[0]*Flair_patch.shape[1]*Flair_patch.shape[2], patch_size_x*patch_size_y*patch_size_z)
        T1_patch = T1_patch.reshape(T1_patch.shape[0]*T1_patch.shape[1]*T1_patch.shape[2], patch_size_x*patch_size_y*patch_size_z)
        T2_patch = T2_patch.reshape(T2_patch.shape[0]*T2_patch.shape[1]*T2_patch.shape[2], patch_size_x*patch_size_y*patch_size_z)  
        T_1c_patch = T_1c_patch.reshape(T_1c_patch.shape[0]*T_1c_patch.shape[1]*T_1c_patch.shape[2], patch_size_x*patch_size_y*patch_size_z)
        if recon_flag is True:
            Recon_patch = Recon_patch.reshape(Recon_patch.shape[0]*Recon_patch.shape[1]*Recon_patch.shape[2], patch_size_x*patch_size_y*patch_size_z)
        Truth_patch = Truth_patch.reshape(Truth_patch.shape[0]*Truth_patch.shape[1]*Truth_patch.shape[2], patch_size_x, patch_size_y, patch_size_z)
    
        print 'Patches reshaped'    
        
        if recon_flag is True:
            slice_patch = np.concatenate([Flair_patch, T1_patch, T2_patch, T_1c_patch, Recon_patch], axis=1)
        else:
            slice_patch = np.concatenate([Flair_patch, T1_patch, T2_patch, T_1c_patch], axis=1)
        Truth_patch = Truth_patch[:,(patch_size_x-1)/2,(patch_size_y-1)/2,(patch_size_z-1)/2]
        Truth_patch = np.array(Truth_patch)
        Truth_patch = Truth_patch.reshape(len(Truth_patch),1)
        #print '3. truth dimension :', Truth_patch.shape
        num_of_class = []
        for i in xrange(1,5):
            num_of_class.append(np.sum((Truth_patch==i).astype(int)))
        max_num = max(num_of_class)
        max_num_2 = max(x for x in num_of_class if x!=max_num)
        
        Flair_patch = image.extract_patches(Flair_image[x_start:x_stop, y_start:y_stop, z_start:z_stop], [patch_size_x,patch_size_y,patch_size_z])
        Flair_patch = Flair_patch.reshape(Flair_patch.shape[0]*Flair_patch.shape[1]*Flair_patch.shape[2], patch_size_x*patch_size_y*patch_size_z)
        
        T1_patch = image.extract_patches(T1_image[x_start:x_stop, y_start:y_stop, z_start:z_stop],[patch_size_x,patch_size_y,patch_size_z])
        T1_patch = T1_patch.reshape(T1_patch.shape[0]*T1_patch.shape[1]*T1_patch.shape[2], patch_size_x*patch_size_y*patch_size_z)
        
        T2_patch = image.extract_patches(T2_image[x_start:x_stop, y_start:y_stop, z_start:z_stop],[patch_size_x,patch_size_y,patch_size_z])
        T2_patch = T2_patch.reshape(T2_patch.shape[0]*T2_patch.shape[1]*T2_patch.shape[2], patch_size_x*patch_size_y*patch_size_z)
        
        T_1c_patch = image.extract_patches(T_1c_image[x_start:x_stop, y_start:y_stop, z_start:z_stop],[patch_size_x,patch_size_y,patch_size_z])
        T_1c_patch = T_1c_patch.reshape(T_1c_patch.shape[0]*T_1c_patch.shape[1]*T_1c_patch.shape[2], patch_size_x*patch_size_y*patch_size_z)
        
        T_patch = image.extract_patches(Truth_image[x_start:x_stop, y_start:y_stop, z_start:z_stop],[patch_size_x,patch_size_y,patch_size_z])
        T_patch = T_patch.reshape(T_patch.shape[0]*T_patch.shape[1]*T_patch.shape[2],patch_size_x, patch_size_y, patch_size_z)
        T_patch = T_patch[:,(patch_size_x-1)/2,(patch_size_y-1)/2,(patch_size_z-1)/2]
        
        
        
        
        for i in xrange(1,5):
            #print 'Max : ', max_num_2
            #print 'Present : ', np.sum(image_label==i).astype(int)
            diff = max_num_2-np.sum(T_patch==i).astype(int)
            #print 'Difference: ', diff
            #print 'Diff : ', diff
            if np.sum(T_patch==i).astype(int) >= max_num_2:
                #print 'Continuing i = ', i
                continue
            #print 'TEST : ', Truth_patch.shape
            if i not in T_patch:
                continue
            #print T_patch.shape
            #print np.sum(T_patch==i).astype(int)
            index_x = np.where(T_patch==i)
            #print 'Length : ',len(index_x)
            index = np.arange(len(index_x))
            shuffle(index)
            temp = T_patch[index_x[index[0:diff]]]
            temp=temp.reshape(len(temp),1)
            Truth_patch = np.vstack([Truth_patch,temp])
            
            #print 'pppp'
            #print len(index_x[index[0:diff]])
            #print Flair_patch.shape
            
            F_p = Flair_patch[index_x[index[0:diff]],:]
            T1_p = T1_patch[index_x[index[0:diff]],:]
            T2_p = T2_patch[index_x[index[0:diff]],:]
            T_1c_p = T_1c_patch[index_x[index[0:diff]],:]
            temp_patch = np.concatenate([F_p, T1_p, T2_p, T_1c_p], axis=1)
            slice_patch = np.vstack([slice_patch, temp_patch])
            
        print 'No. of 1 : ', np.sum((Truth_patch==1).astype(int))
        print 'No. of 2 : ', np.sum((Truth_patch==2).astype(int))
        print 'No. of 3 : ', np.sum((Truth_patch==3).astype(int))
        print 'No. of 4 : ', np.sum((Truth_patch==4).astype(int))
            
        patches = np.vstack([patches,slice_patch])
        
        ground_truth = np.vstack([ground_truth, Truth_patch])
        print ground_truth.shape
        print patches.shape
    #
    #
    #print 'Number of non-zeros in ground truth : ', np.sum((ground_truth!=0).astype(int))
    #print 'Number of zeros in ground truth : ', np.sum((ground_truth==0).astype(int))
    #
    #print
    #
    ground_truth = ground_truth.reshape(len(ground_truth))
    
    if recon_flag==False:
        patches = patches[:,0:patch_size_x*patch_size_y*patch_size_z*4]
    
    #np.save('Training_patches.npy',patches)
    #np.save('Training_labels.npy',ground_truth)
    #print ground_truth.shape
    #print patches.shape
    if 'training' in out_root and recon_flag==True:
        print'... Saving the balanced training patches'
        np.save(out_root+'b_trainpatch_3D_'+prefix+'_.npy',patches)
        np.save(out_root+'b_trainlabel_3D_'+prefix+'_.npy',ground_truth)
    elif recon_flag==True:
        print '... Saving the balance validation patches'
        np.save(out_root+'b_validpatch_3D_'+prefix+'_.npy',patches)
        np.save(out_root+'b_validlabel_3D_'+prefix+'_.npy',ground_truth)
        
    if 'training' in out_root and recon_flag==False:
        print'... Saving the balanced training patches'
        np.save(out_root+'b_trainpatch_3D_'+prefix+'_.npy',patches)
        np.save(out_root+'b_trainlabel_3D_'+prefix+'_.npy',ground_truth)
    elif recon_flag==False:
        print '... Saving the balanced testing patches'
        np.save(out_root+'b_validpatch_3D_'+prefix+'_.npy',patches)
        np.save(out_root+'b_validlabel_3D_'+prefix+'_.npy',ground_truth)
Beispiel #30
0
def rebuild(data,
            mask,
            D,
            block_size,
            block_up,
            ncores=None,
            positivity=False,
            variance=None,
            fix_mean=True,
            fit_intercept=False,
            use_crossval=False):

    if ncores is None:
        ncores = cpu_count()
    elif ncores > cpu_count():
        ncores = cpu_count()

    data = data * mask[..., None]

    if len(block_size) == len(block_up):
        last = block_up[-1]
    else:
        last = block_size[-1]

    factor = np.divide(block_up, block_size)
    new_shape = (int(data.shape[0] * factor[0]),
                 int(data.shape[1] * factor[1]),
                 int(data.shape[2] * factor[2]), last)
    overlap = (1, 1, 1, last)
    new_overlap = overlap
    print(new_shape, new_overlap, factor)

    if block_size == block_up:
        D_depimpe = np.copy(D)
    else:
        D_depimpe = depimp_zoom(D, block_size, block_up, zoomarray=False)

    blocks = extract_patches(data, block_size,
                             overlap).reshape(-1, np.prod(block_size))
    del data
    # original_shape = blocks.shape

    # # check if recon is correct / only reconstruct old school way from the blocks to be on the safe side maybe?
    # recon = reconstruct_from_blocks(blocks, new_shape, block_size, block_up[:-1], new_overlap, weights=None)
    # return recon

    # blocks = np.asarray(blocks).reshape(-1, np.prod(block_size))

    # get the variance as blocks
    if variance is not None:
        variance *= mask
        print(variance.shape)
        variance = extract_patches(variance, block_size[:-1], overlap[:-1])
        print(variance.shape)
        # axis = list(range(variance.ndim//2, variance.ndim))
        # variance = np.median(variance, axis=axis)
        # print(variance.shape, np.prod(block_size[:-1]), np.prod(block_size), np.prod(variance.shape))
        variance = np.asarray(variance).reshape(-1, np.prod(block_size[:-1]))
        print(variance.shape)
    # skip empty rows from training since they are probably masked background
    mask = blocks.sum(axis=1) > np.prod(block_size) // 2

    if variance is not None:
        variance = np.median(variance, axis=-1)

        # if we are on an edge, variance can be 0, so truncate those cases as well
        np.logical_and(mask, variance > 0, out=mask)
        variance = variance[mask]
        print(variance.shape, np.sum(variance == 0))

    print(blocks.shape)
    blocks = blocks[mask]
    print(blocks.shape, D.shape, D_depimpe.shape, mask.shape, block_size,
          block_up, new_shape, new_overlap, 'pre l1 stuff')

    # if center:
    #     blocks_mean = blocks.mean(axis=1, keepdims=True)
    #     blocks -= blocks_mean
    tt = time()
    X_small_denoised, alpha, intercept, _ = solve_l1(blocks,
                                                     D_depimpe,
                                                     variance=variance,
                                                     return_all=True,
                                                     nlambdas=100,
                                                     use_joblib=True,
                                                     positivity=positivity,
                                                     fit_intercept=True,
                                                     standardize=True,
                                                     progressbar=True,
                                                     method='fork',
                                                     ncores=ncores,
                                                     use_crossval=use_crossval)

    print(X_small_denoised.shape, D_depimpe.shape, alpha.shape,
          intercept.shape)
    # print(np.min(alpha), np.max(alpha), np.abs(alpha).min(), np.abs(alpha).max())
    # print(np.min(intercept), np.max(intercept), np.abs(intercept).min(), np.abs(intercept).max())
    print('total time was {}'.format(time() - tt))
    # if fix_mean:
    #     mean = blocks.mean(axis=1)
    # else:
    #     mean = None

    # if center:
    #     intercept += blocks_mean

    # # reconstructor = None  # should we put block_up from the original?
    # tt = time()
    # recon = reconstruct_from_indexes(alpha, D, intercept, new_shape, new_overlap, mask, block_size, block_up)  #
    # print('reconstruction took {}'.format(time() - tt))
    # return recon

    # we actually only want alpha and intercept in this step and throw out X if we do upsampling normally
    # for ease of use reason, we now use directly X for the reconstruction, but we should
    # 1. multiply back X
    # 2a. feed X in an empty array with a mask
    #
    #   OR
    #
    # 2b. reconstruct the array and average things internally according to a mask

    tt = time()
    X_final = np.zeros(((mask.shape[0], ) + block_up), dtype=np.float32)

    if block_size != block_up:
        X_small_denoised = np.dot(D, alpha).T + intercept

    print('multiply time was {}'.format(time() - tt))
    tt = time()

    X_final[mask] = X_small_denoised.reshape(-1, *block_up)
    recon = reconstruct_from_blocks(X_final,
                                    new_shape,
                                    block_size,
                                    block_up[:-1],
                                    new_overlap,
                                    weights=None)
    print('recon time was {}'.format(time() - tt))

    del X_final, mask
    return recon
def U_Patch_Preprocess_recon_2D(patch_size_x=5,patch_size_y=5,prefix='SdA',in_root='',out_root=''):
    
    #Initialize user variables
    patch_size = patch_size_x
    patch_pixels = patch_size*patch_size
    pixel_offset = int(patch_size*0.5)
    padding = patch_size
    #threshold = patch_pixels*0.3
    recon_num = 4
    patches = np.zeros(patch_pixels*recon_num)
    ground_truth = np.zeros(1)
    
    #paths to images
    path = in_root
    
    Flair = []
    T1 = []
    T2 = []
    T_1c = []
    Truth = []
    Folder = []
    
    for subdir, dirs, files in os.walk(path):
        # if len(Flair) is 1:
        #     break
        for file1 in files:
            #print file1
            if file1[-3:]=='mha' and ('Flair' in file1):
                
                Flair.append(file1)
                Folder.append(subdir+'/')
            elif file1[-3:]=='mha' and ('T1' in file1 and 'T1c' not in file1):
                T1.append(file1)
            elif file1[-3:]=='mha' and ('T2' in file1):
                T2.append(file1)
            elif file1[-3:]=='mha' and ('T1c' in file1 or 'T_1c' in file1):
                T_1c.append(file1)
            elif file1[-3:]=='mha' and 'OT' in file1:
                Truth.append(file1)
                
    number_of_images = len(Flair)
    print 'Number of images : ', number_of_images
    
    
    for image_iterator in range(number_of_images):
        print 'Iteration : ',image_iterator+1
        print 'Folder : ', Folder[image_iterator]
        Flair_image = new(Folder[image_iterator]+Flair[image_iterator])
        T1_image = new(Folder[image_iterator]+T1[image_iterator])
        T2_image = new(Folder[image_iterator]+T2[image_iterator])
        T_1c_image = new(Folder[image_iterator]+T_1c[image_iterator])
        try:
            Truth_image = new(Folder[image_iterator]+Truth[image_iterator])
        except:
            Truth_image = new2(Folder[image_iterator]+Truth[image_iterator])
        Flair_image = Flair_image.data
        T1_image = T1_image.data
        T2_image = T2_image.data
        T_1c_image = T_1c_image.data
        Truth_image = Truth_image.data
        
        x_span,y_span,z_span = np.where(Truth_image!=0)
        
        start_slice = min(z_span)
        stop_slice = max(z_span)
        image_patch = np.zeros(patch_size*patch_size*recon_num)
        image_label = np.zeros(1)
        for i in range(start_slice, stop_slice+1):    
            Flair_slice = np.transpose(Flair_image[:,:,i])
            T1_slice = np.transpose(T1_image[:,:,i])
            
            T2_slice = np.transpose(T2_image[:,:,i])
            T_1c_slice = np.transpose(T_1c_image[:,:,i])    
            Truth_slice = np.transpose(Truth_image[:,:,i])
            
            x_dim,y_dim = np.size(Flair_slice,axis=0), np.size(Flair_slice, axis=1)
            
            x_span,y_span = np.where(Truth_slice!=0)
            if len(x_span)==0 or len(y_span)==0:
                continue
            x_start = np.min(x_span) - padding
            x_stop = np.max(x_span) + padding+1
            y_start = np.min(y_span) - padding
            y_stop = np.max(y_span) + padding+1
            
            Flair_patch = image.extract_patches(Flair_slice[x_start:x_stop, y_start:y_stop], patch_size, extraction_step = pixel_offset)
            T1_patch = image.extract_patches(T1_slice[x_start:x_stop, y_start:y_stop], patch_size, extraction_step = pixel_offset)
            T2_patch = image.extract_patches(T2_slice[x_start:x_stop, y_start:y_stop], patch_size, extraction_step = pixel_offset)
            T_1c_patch = image.extract_patches(T_1c_slice[x_start:x_stop, y_start:y_stop], patch_size, extraction_step = pixel_offset)     
            Truth_patch = image.extract_patches(Truth_slice[x_start:x_stop, y_start:y_stop], patch_size, extraction_step = pixel_offset)
            
            #print '1. truth dimension :', Truth_patch.shape
            
            Flair_patch = Flair_patch.reshape(Flair_patch.shape[0]*Flair_patch.shape[1], patch_size*patch_size)
            T1_patch = T1_patch.reshape(T1_patch.shape[0]*T1_patch.shape[1], patch_size*patch_size)
            T2_patch = T2_patch.reshape(T2_patch.shape[0]*T2_patch.shape[1], patch_size*patch_size)  
            T_1c_patch = T_1c_patch.reshape(T_1c_patch.shape[0]*T_1c_patch.shape[1], patch_size*patch_size)      
            Truth_patch = Truth_patch.reshape(Truth_patch.shape[0]*Truth_patch.shape[1], patch_size, patch_size)
            
            #print '2. truth dimension :', Truth_patch.shape
            slice_patch = np.concatenate([Flair_patch, T1_patch, T2_patch, T_1c_patch], axis=1)
            Truth_patch = Truth_patch[:,(patch_size-1)/2,(patch_size-1)/2]
            Truth_patch = np.array(Truth_patch)
            Truth_patch = Truth_patch.reshape(len(Truth_patch),1)
            
            patches = np.vstack([patches,slice_patch])
            ground_truth = np.vstack([ground_truth, Truth_patch])
    print 'Number of non-zeros in ground truth : ', np.sum((ground_truth!=0).astype(int))
    print 'Number of zeros in ground truth : ', np.sum((ground_truth==0).astype(int))
    # ground_truth[np.where(ground_truth==3)]=1
    # ground_truth[np.where(ground_truth==4)]=1
    print
    print 'No. of 1 : ', np.sum((ground_truth==1).astype(int))
    print 'No. of 2 : ', np.sum((ground_truth==2).astype(int))
    print 'No. of 3 : ', np.sum((ground_truth==3).astype(int))
    print 'No. of 4 : ', np.sum((ground_truth==4).astype(int))
    
    ground_truth = ground_truth.reshape(len(ground_truth))
    print 'Shape of Un-balanced patches numpy array : ',patches.shape
    print 'Shape of Un-balanced ground truth : ',ground_truth.shape
    
    patches = np.float32(patches)
    ground_truth = np.float32(ground_truth)
    if 'training' in out_root:
        print'... Saving the 2D training patches'
        np.save(out_root+'u_trainpatch_2D_'+prefix+'_.npy',patches)
        np.save(out_root+'u_trainlabel_2D_'+prefix+'_.npy',ground_truth)
        
    elif 'validation' in out_root:
        print '... Saving the 2D validation patches'
        np.save(out_root+'u_validpatch_2D_'+prefix+'_.npy',patches)
        np.save(out_root+'u_validlabel_2D_'+prefix+'_.npy',ground_truth)
Beispiel #32
0
def extract_patches(filename_base,
                    num_images,
                    patch_size=conf.patch_size,
                    phase='train'):
    patches = []
    for i in range(1, num_images + 1):
        if phase == 'train':
            imageid = "satImage_%.3d" % i
            image_filename = filename_base + imageid + ".png"
            if os.path.isfile(image_filename):
                img = mpimg.imread(image_filename)
                img = resize(
                    img, (conf.train_image_resize, conf.train_image_resize))
                patches.append(
                    image.extract_patches(img, (patch_size, patch_size),
                                          extraction_step=1))
                rot90img = rotate(img,
                                  90,
                                  reshape=False,
                                  mode='reflect',
                                  order=3)
                patches.append(
                    image.extract_patches(rot90img, (patch_size, patch_size),
                                          extraction_step=1))
                rot45img = rotate(img,
                                  45,
                                  reshape=False,
                                  mode='reflect',
                                  order=3)
                patches.append(
                    image.extract_patches(rot45img, (patch_size, patch_size),
                                          extraction_step=1))
                rot135img = rotate(img,
                                   135,
                                   reshape=False,
                                   mode='reflect',
                                   order=3)
                patches.append(
                    image.extract_patches(rot135img, (patch_size, patch_size),
                                          extraction_step=1))
        elif phase == 'test':
            imageid = "raw_test_%d_pixels" % i
            image_filename = filename_base + imageid + ".png"
            if os.path.isfile(image_filename):
                img = mpimg.imread(image_filename)
                img = resize(img,
                             (conf.test_image_resize, conf.test_image_resize))
                patches.append(
                    image.extract_patches(img, (patch_size, patch_size),
                                          extraction_step=1))
        elif phase == 'train_cnn_output':
            imageid = "raw_satImage_%.3d_pixels" % i
            image_filename = filename_base + imageid + ".png"
            if os.path.isfile(image_filename):
                img = mpimg.imread(image_filename)
                img = resize(
                    img, (conf.train_image_resize, conf.train_image_resize))
                patches.append(
                    image.extract_patches(img, (patch_size, patch_size),
                                          extraction_step=1))
        else:
            raise ValueError('incorrect phase')
    return patches
def B_Patch_Preprocess_recon_2D(patch_size_x=5,patch_size_y=5,prefix='Sda',in_root='',out_root='',recon_flag=True):
    
    #Initialize user variables
    patch_size = patch_size_x
    patch_pixels = patch_size*patch_size
    pixel_offset = int(patch_size*0.7)
    padding = patch_size*2
    #threshold = patch_pixels*0.3
    if recon_flag == False:
        recon_num = 4
    if recon_flag == True:
        recon_num = 5
    patches = np.zeros(patch_pixels*recon_num)
    ground_truth = np.zeros(1)
    
    #paths to images
    path = in_root
    
    Flair = []
    T1 = []
    T2 = []
    T_1c = []
    Truth = []
    Recon=[]
    Folder = []
    
    for subdir, dirs, files in os.walk(path):
#        if len(Flair) is 1:
#            break
        for file1 in files:
            #print file1
            if file1[-3:]=='mha' and ( 'Flair' in file1):
                
                Flair.append(file1)
                Folder.append(subdir+'/')
            elif file1[-3:]=='mha' and ('t1_z' in file1 or 'T1' in file1):
                T1.append(file1)
            elif file1[-3:]=='mha' and ('t2' in file1 or 'T2' in file1):
                T2.append(file1)
            elif file1[-3:]=='mha' and ('t1c_z' in file1 or 'T_1c' in file1):
                T_1c.append(file1)
            elif file1[-3:]=='mha' and 'OT' in file1:
                Truth.append(file1)            
            elif file1[-3:]=='mha' and 'Recon' in file1:
                Recon.append(file1)
            #elif file1[-3:]=='mha' and 'Recon' in file1:
            #    Recon.append(file1)
                
    number_of_images = len(Flair)
    print 'Number of images : ', number_of_images
    
    
    for image_iterator in range(number_of_images):
        print 'Iteration : ',image_iterator+1
        print 'Folder : ', Folder[image_iterator]
        Flair_image = new(Folder[image_iterator]+Flair[image_iterator])
        T1_image = new(Folder[image_iterator]+T1[image_iterator])
        T2_image = new(Folder[image_iterator]+T2[image_iterator])
        T_1c_image = new(Folder[image_iterator]+T_1c[image_iterator])
        if recon_flag == True:
            Recon_image = new(Folder[image_iterator]+Recon[image_iterator])
        Truth_image = new(Folder[image_iterator]+Truth[image_iterator])
        
        Flair_image = Flair_image.data
        T1_image = T1_image.data
        T2_image = T2_image.data
        T_1c_image = T_1c_image.data
        if recon_flag == True:
            Recon_image=Recon_image.data
        Truth_image = Truth_image.data
        
        x_span,y_span,z_span = np.where(Truth_image!=0)
        
        start_slice = min(z_span)
        stop_slice = max(z_span)
        image_patch = np.zeros(patch_size*patch_size*recon_num)
        image_label = np.zeros(1)
        for i in range(start_slice, stop_slice+1):    
            Flair_slice = np.transpose(Flair_image[:,:,i])
            T1_slice = np.transpose(T1_image[:,:,i])
            
            T2_slice = np.transpose(T2_image[:,:,i])
            T_1c_slice = np.transpose(T_1c_image[:,:,i])
            if recon_flag==True:
                Recon_slice = np.transpose(Recon_image[:,:,i])      
            Truth_slice = np.transpose(Truth_image[:,:,i])
            
            x_dim,y_dim = np.size(Flair_slice,axis=0), np.size(Flair_slice, axis=1)
            
            x_span,y_span = np.where(Truth_slice!=0)
            if len(x_span)==0 or len(y_span)==0:
                continue
            x_start = np.min(x_span) - padding
            x_stop = np.max(x_span) + padding+1
            y_start = np.min(y_span) - padding
            y_stop = np.max(y_span) + padding+1
            
            Flair_patch = image.extract_patches(Flair_slice[x_start:x_stop, y_start:y_stop], patch_size, extraction_step = pixel_offset)
            T1_patch = image.extract_patches(T1_slice[x_start:x_stop, y_start:y_stop], patch_size, extraction_step = pixel_offset)
            T2_patch = image.extract_patches(T2_slice[x_start:x_stop, y_start:y_stop], patch_size, extraction_step = pixel_offset)
            T_1c_patch = image.extract_patches(T_1c_slice[x_start:x_stop, y_start:y_stop], patch_size, extraction_step = pixel_offset)
            if recon_flag==True:
                Recon_patch = image.extract_patches(Recon_slice[x_start:x_stop, y_start:y_stop], patch_size, extraction_step = pixel_offset)      
            Truth_patch = image.extract_patches(Truth_slice[x_start:x_stop, y_start:y_stop], patch_size, extraction_step = pixel_offset)
            
            #print '1. truth dimension :', Truth_patch.shape
            
            Flair_patch = Flair_patch.reshape(Flair_patch.shape[0]*Flair_patch.shape[1], patch_size*patch_size)
            T1_patch = T1_patch.reshape(T1_patch.shape[0]*T1_patch.shape[1], patch_size*patch_size)
            T2_patch = T2_patch.reshape(T2_patch.shape[0]*T2_patch.shape[1], patch_size*patch_size)  
            T_1c_patch = T_1c_patch.reshape(T_1c_patch.shape[0]*T_1c_patch.shape[1], patch_size*patch_size)
            if recon_flag==True:
                Recon_patch = Recon_patch.reshape(Recon_patch.shape[0]*Recon_patch.shape[1], patch_size*patch_size)        
            Truth_patch = Truth_patch.reshape(Truth_patch.shape[0]*Truth_patch.shape[1], patch_size, patch_size)
            
            #print '2. truth dimension :', Truth_patch.shape
            if recon_flag == True:
                slice_patch = np.concatenate([Flair_patch, T1_patch, T2_patch, T_1c_patch,Recon_patch], axis=1)
            else:
                slice_patch = np.concatenate([Flair_patch, T1_patch, T2_patch, T_1c_patch], axis=1)
            Truth_patch = Truth_patch[:,(patch_size-1)/2,(patch_size-1)/2]
            Truth_patch = np.array(Truth_patch)
            Truth_patch = Truth_patch.reshape(len(Truth_patch),1)
            #print '3. truth dimension :', Truth_patch.shape
            
            image_patch = np.vstack([image_patch,slice_patch])
            image_label = np.vstack([image_label, Truth_patch])
        num_of_class = []
        for i in xrange(1,5):
            num_of_class.append(np.sum((image_label==i).astype(int)))
        max_num = max(num_of_class)
        max_num_2 = max(x for x in num_of_class if x!=max_num)
        
        Flair_patch = image.extract_patches(Flair_image[:,:,start_slice:stop_slice],[patch_size_x,patch_size_y,1])
        Flair_patch = Flair_patch.reshape(Flair_patch.shape[0]*Flair_patch.shape[1]*Flair_patch.shape[2], patch_size_x*patch_size_y)
        T1_patch = image.extract_patches(T1_image[:,:,start_slice:stop_slice],[patch_size_x,patch_size_y,1])
        T1_patch = T1_patch.reshape(T1_patch.shape[0]*T1_patch.shape[1]*T1_patch.shape[2], patch_size_x*patch_size_y)
        T2_patch = image.extract_patches(T2_image[:,:,start_slice:stop_slice],[patch_size_x,patch_size_y,1])
        T2_patch = T2_patch.reshape(T2_patch.shape[0]*T2_patch.shape[1]*T2_patch.shape[2], patch_size_x*patch_size_y)
        T_1c_patch = image.extract_patches(T_1c_image[:,:,start_slice:stop_slice],[patch_size_x,patch_size_y,1])
        T_1c_patch = T_1c_patch.reshape(T_1c_patch.shape[0]*T_1c_patch.shape[1]*T_1c_patch.shape[2], patch_size_x*patch_size_y)
        Truth_patch = image.extract_patches(Truth_image[:,:,start_slice:stop_slice],[patch_size_x,patch_size_y,1])
        Truth_patch = Truth_patch.reshape(Truth_patch.shape[0]*Truth_patch.shape[1]*Truth_patch.shape[2],patch_size_x, patch_size_y, 1)
        Truth_patch = Truth_patch[:,(patch_size-1)/2,(patch_size-1)/2]
        
        
        
        
        for i in xrange(1,5):
            #print 'Max : ', max_num_2
            #print 'Present : ', np.sum(image_label==i).astype(int)
            diff = max_num_2-np.sum(image_label==i).astype(int)
            #print 'Diff : ', diff
            if np.sum(image_label==i).astype(int) >= max_num_2:
                #print 'Continuing i = ', i
                continue
            #print 'TEST : ', Truth_patch.shape
            index_x,index_y = np.where(Truth_patch==i)
            #print 'Length : ',len(index_x)
            index = np.arange(len(index_x))
            shuffle(index)
            temp = Truth_patch[index_x[index[0:diff]],:]
            image_label = np.vstack([image_label,temp])
            F_p = Flair_patch[index_x[index[0:diff]],:]
            T1_p = T1_patch[index_x[index[0:diff]],:]
            T2_p = T2_patch[index_x[index[0:diff]],:]
            T_1c_p = T_1c_patch[index_x[index[0:diff]],:]
            temp_patch = np.concatenate([F_p, T1_p, T2_p, T_1c_p], axis=1)
            image_patch = np.vstack([image_patch, temp_patch])
            
            
        
            
#            
#            #print 'image patch : ', image_patch.shape
#            #print 'image_label : ', image_label.shape
#            index_x,index_y = np.where(image_label==i)
#            temp_patch = image_patch[index_x,:]
#            temp_label = image_label[index_x,:]
#            index = np.arange(len(temp_patch))
#            shuffle(index)
#            #print 'Temp patch : ', temp_patch.shape
#            #print 'Temp_label : ', temp_label.shape
#            if len(index)>min_num_2:
#                temp_patch = temp_patch[index[0:min_num_2],:]
#                temp_label = temp_label[index[0:min_num_2],:]
        patches = np.vstack([patches,image_patch])
        ground_truth = np.vstack([ground_truth, image_label])
        
        print 'Number of non-zeros in ground truth : ', np.sum((ground_truth!=0).astype(int))
        print 'Number of zeros in ground truth : ', np.sum((ground_truth==0).astype(int))
    
        print
        print 'No. of 1 : ', np.sum((ground_truth==1).astype(int))
        print 'No. of 2 : ', np.sum((ground_truth==2).astype(int))
        print 'No. of 3 : ', np.sum((ground_truth==3).astype(int))
        print 'No. of 4 : ', np.sum((ground_truth==4).astype(int))
            
            
            
            
#        patches = np.vstack([patches,slice_patch])
#        ground_truth = np.vstack([ground_truth, Truth_patch])
    print 'Number of non-zeros in ground truth : ', np.sum((ground_truth!=0).astype(int))
    print 'Number of zeros in ground truth : ', np.sum((ground_truth==0).astype(int))
    
    print
    print 'No. of 1 : ', np.sum((ground_truth==1).astype(int))
    print 'No. of 2 : ', np.sum((ground_truth==2).astype(int))
    print 'No. of 3 : ', np.sum((ground_truth==3).astype(int))
    print 'No. of 4 : ', np.sum((ground_truth==4).astype(int))
    
    ground_truth = ground_truth.reshape(len(ground_truth))
    print 'Shape of balanced patches numpy array : ',patches.shape
    print 'Shape of balanced ground truth : ',ground_truth.shape
    if recon_flag==False:
        patches = patches[:,0:patch_size*patch_size*4]
    
    if 'training' in out_root and recon_flag == True:
        print'... Saving the 2D training patches'
        np.save(out_root+'b_trainpatch_2D_'+prefix+'_.npy',patches)
        np.save(out_root+'b_trainlabel_2D_'+prefix+'_.npy',ground_truth)
    elif recon_flag == True:
        print '... Saving the 2D validation patches'
        np.save(out_root+'b_validpatch_2D_'+prefix+'_.npy',patches)
        np.save(out_root+'b_validlabel_2D_'+prefix+'_.npy',ground_truth)
    
    if 'training' in out_root and recon_flag == False:
        print'... Saving the 2D training patches'
        np.save(out_root+'b_trainpatch_2D_'+prefix+'_.npy',patches)
        np.save(out_root+'b_trainlabel_2D_'+prefix+'_.npy',ground_truth)
        
    elif recon_flag == False:
        print '... Saving the 2D validation patches'
        np.save(out_root+'b_validpatch_2D_'+prefix+'_.npy',patches)
        np.save(out_root+'b_validlabel_2D_'+prefix+'_.npy',ground_truth)
Beispiel #34
0
	truth_pixels = []

	patches = np.zeros((1,patch_size,patch_size))
	ground_truths = np.zeros((1))

	print '==> Extracting patches..'

	stop_slice = img.shape[2]


	for j in xrange(0, stop_slice):

		image_slice = img[:,:,j]
		truth_slice = truth[:,:,j]

		patch = image.extract_patches(image_slice[100:300,100:300], patch_size, extraction_step = 3)
		patch = patch.reshape(patch.shape[0]*patch.shape[1],patch_size,patch_size)

		truth_patch = image.extract_patches(truth_slice[100:300,100:300], patch_size, extraction_step = 3)
		truth_patch = truth_patch.reshape(truth_patch.shape[0]*truth_patch.shape[1],patch_size,patch_size)

		truth_values = truth_patch[:, (patch_size - 1)/2, (patch_size -1)/2]

		patches = np.append(patches,patch,axis=0)
		ground_truths = np.append(ground_truths,truth_values,axis=0)

	patches = patches[1:patches.shape[0]]
	ground_truths = ground_truths[1:ground_truths.shape[0]]

	# background = patches[np.where(np.mean(patches,axis=(1,2))<0)]
	# background_truths = ground_truths[np.where(np.mean(patches,axis=(1,2))<0)]
Beispiel #35
0
def classify_test_data_3d(activate2, W_list, b_list):
    path = '../BRATS/10_1/Normalised_Test/'
    Flair = []
    T1 = []
    T2 = []
    T_1c = []
    Folder = []
    Subdir_array = []
#    patch_size = 11
    patch_size_x = 5
    patch_size_y = 5
    patch_size_z = 5

    for subdir, dirs, files in os.walk(path):
        if len(Flair) is 20:
            break
        for file1 in files:
            #print file1
            if file1[-3:]=='mha' and 'Flair' in file1:
                Flair.append(file1)
                Folder.append(subdir+'/')
                Subdir_array.append(subdir[-5:])
            elif file1[-3:]=='mha' and 'T1' in file1:
                T1.append(file1)
            elif file1[-3:]=='mha' and 'T2' in file1:
                T2.append(file1)
            elif file1[-3:]=='mha' and 'T_1c' in file1:
                T_1c.append(file1)
    number_of_images = len(Flair)
    
    for image_iterator in range(number_of_images):
        print 'Iteration : ',image_iterator+1
        print 'Folder : ', Folder[image_iterator]
        print '... predicting'

        Flair_image = mha.new(Folder[image_iterator]+Flair[image_iterator])
        T1_image = mha.new(Folder[image_iterator]+T1[image_iterator])
        T2_image = mha.new(Folder[image_iterator]+T2[image_iterator])
        T_1c_image = mha.new(Folder[image_iterator]+T_1c[image_iterator])
        Flair_image = Flair_image.data
        T1_image = T1_image.data
        T2_image = T2_image.data
        T_1c_image = T_1c_image.data

        xdim, ydim, zdim = Flair_image.shape
        prediction_image = []
        Flair_patch = image.extract_patches(Flair_image, [patch_size_x,patch_size_y,patch_size_z])
        T1_patch = image.extract_patches(T1_image, [patch_size_x,patch_size_y,patch_size_z])
        T2_patch = image.extract_patches(T2_image, [patch_size_x,patch_size_y,patch_size_z])
        T_1c_patch = image.extract_patches(T_1c_image, [patch_size_x,patch_size_y,patch_size_z])
        
        print 'Raw patches extracted'
        print Flair_patch.shape
        print T1_patch.shape
        print T2_patch.shape
        print T_1c_patch.shape
        
        for j in range(Flair_patch.shape[2]):
            print 'Slice : ',j+1
            F_slice = Flair_patch[:,:,j,:,:,:]
            T1_slice = T1_patch[:,:,j,:,:,:]
            T2_slice = T2_patch[:,:,j,:,:,:]
            T_1c_slice = T_1c_patch[:,:,j,:,:,:]
            
            F_slice = F_slice.reshape(F_slice.shape[0]*F_slice.shape[1], patch_size_x*patch_size_y*patch_size_z)
            T1_slice = T1_slice.reshape(T1_slice.shape[0]*T1_slice.shape[1], patch_size_x*patch_size_y*patch_size_z)
            T2_slice = T2_slice.reshape(T2_slice.shape[0]*T2_slice.shape[1], patch_size_x*patch_size_y*patch_size_z)
            T_1c_slice = T_1c_slice.reshape(T_1c_slice.shape[0]*T_1c_slice.shape[1], patch_size_x*patch_size_y*patch_size_z)
            
            temp_patch = np.concatenate([F_slice,T1_slice,T2_slice,T_1c_slice],axis=1)
            print 'Size of temp_patch : ',temp_patch.shape
            prediction_slice = predictOutput(temp_patch, activate2, W_list, b_list)
            prediction_image.append(prediction_slice)
            
        prediction_image = np.array(prediction_image)
        prediction_image = np.transpose(prediction_image)
        prediction_image = prediction_image.reshape([xdim-patch_size_x+1, ydim-patch_size_y+1, zdim-patch_size_z+1])
        output_image = np.zeros([xdim,ydim,zdim])
        output_image[1+((patch_size_x-1)/2):xdim-((patch_size_x-1)/2)+1,1+((patch_size_y-1)/2):ydim-((patch_size_y-1)/2)+1,1+((patch_size_z-1)/2):zdim-((patch_size_z-1)/2)+1] = prediction_image      
        np.save(Folder[image_iterator]+Subdir_array[image_iterator]+'_output_image.npy',output_image)#TODO: save it in meaningful name in corresponding folder
 T1_slice = np.transpose(T1_image[:,:,i])
 T2_slice = np.transpose(T2_image[:,:,i])
 T_1c_slice = np.transpose(T_1c_image[:,:,i])
 Truth_slice = np.transpose(Truth_image[:,:,i])
 
 x_dim,y_dim = np.size(Flair_slice,axis=0), np.size(Flair_slice, axis=1)
 
 x_span,y_span = np.where(Truth_slice!=0)
 if len(x_span)==0 or len(y_span)==0:
     continue
 x_start = np.min(x_span) - padding
 x_stop = np.max(x_span) + padding+1
 y_start = np.min(y_span) - padding
 y_stop = np.max(y_span) + padding+1
 
 Flair_patch = image.extract_patches(Flair_slice[x_start:x_stop, y_start:y_stop], patch_size, extraction_step = pixel_offset)
 T1_patch = image.extract_patches(T1_slice[x_start:x_stop, y_start:y_stop], patch_size, extraction_step = pixel_offset)
 T2_patch = image.extract_patches(T2_slice[x_start:x_stop, y_start:y_stop], patch_size, extraction_step = pixel_offset)
 T_1c_patch = image.extract_patches(T_1c_slice[x_start:x_stop, y_start:y_stop], patch_size, extraction_step = pixel_offset)
 Truth_patch = image.extract_patches(Truth_slice[x_start:x_stop, y_start:y_stop], patch_size, extraction_step = pixel_offset)
 
 #print '1. truth dimension :', Truth_patch.shape
 
 Flair_patch = Flair_patch.reshape(Flair_patch.shape[0]*Flair_patch.shape[1], patch_size*patch_size)
 T1_patch = T1_patch.reshape(T1_patch.shape[0]*T1_patch.shape[1], patch_size*patch_size)
 T2_patch = T2_patch.reshape(T2_patch.shape[0]*T2_patch.shape[1], patch_size*patch_size)  
 T_1c_patch = T_1c_patch.reshape(T_1c_patch.shape[0]*T_1c_patch.shape[1], patch_size*patch_size)
 Truth_patch = Truth_patch.reshape(Truth_patch.shape[0]*Truth_patch.shape[1], patch_size, patch_size)
 
 #print '2. truth dimension :', Truth_patch.shape
 
def get_hcp_2dpatches_SegmentationHR(extract_pourcent,patch_size = 128, n_patches = 1000, data = None):
  
    (T1s,T2s,T3s,T4s,masks) = data
	
    n_images = len(T2s)
    T1_patches = None
    T2_patches = None
    T3_patches= None
    T4_patches = None

    T1 = []
    T2 = []
    T3 = []
    T4 = []

    mask_extract = extract_pourcent
    patch_shape = (patch_size,patch_size)
    random_state = None

    for i in tqdm(range(n_images)):
		
        #Normalize data using mask
        T2_norm = array_normalization(X=T2s[i],M=masks[i],norm=0)
        mask = masks[i]
        T1_norm = T1s[i]
        T3_norm= T3s[i]
        T4_norm= T4s[i]
		

        for j in range(T1_norm.shape[2]): #Loop over the slices
            pT1 = extract_patches(T1_norm[:,:,j], patch_shape, extraction_step = 1)
            pT2 = extract_patches(T2_norm[:,:,j], patch_shape, extraction_step = 1)
            pT3 = extract_patches(T3_norm[:,:,j], patch_shape, extraction_step = 1)
            pT4 = extract_patches(T4_norm[:,:,j], patch_shape, extraction_step = 1)

            pmask = extract_patches(mask[:,:,j], patch_shape, extraction_step = 1)
            rng = check_random_state(random_state)
            i_s = rng.randint(T1_norm.shape[0] - patch_shape[0] + 1, size = n_patches)
            j_s = rng.randint(T1_norm.shape[1] - patch_shape[1] + 1, size = n_patches)
            pT1 = pT1[i_s, j_s]
            pT2 = pT2[i_s, j_s]
            pT3 = pT3[i_s, j_s]
            pT4 = pT4[i_s, j_s]

            pmask = pmask[i_s, j_s]

            #Channel last
            pT1 = pT1.reshape(-1, patch_shape[0], patch_shape[1])
            pT2 = pT2.reshape(-1, patch_shape[0], patch_shape[1])

            pT3 = pT3.reshape(-1, patch_shape[0], patch_shape[1])
            pT4 = pT4.reshape(-1, patch_shape[0], patch_shape[1])

            pmask = pmask.reshape(-1, patch_shape[0], patch_shape[1])
            pmask = pmask.reshape(pmask.shape[0],-1)

            #Remove empty patches (<65% of mask)
            pmT1 = pT1[ np.mean(pmask,axis=1)>=mask_extract ]
            pmT2 = pT2[ np.mean(pmask,axis=1)>=mask_extract ]
            pmT3 = pT3[ np.mean(pmask,axis=1)>=mask_extract ]
            pmT4 = pT4[ np.mean(pmask,axis=1)>=mask_extract ]
			

            T1.append(pmT1)
            T2.append(pmT2)
            T3.append(pmT3)
            T4.append(pmT4)

    T1_patches = np.concatenate(T1,axis=0)
    T2_patches = np.concatenate(T2,axis=0) 
    T3_patches = np.concatenate(T3,axis=0)
    T4_patches = np.concatenate(T4,axis=0)        

    return (T1_patches,T2_patches,T3_patches,T4_patches)
		
def U_Patch_Preprocess_recon_3D(patch_size_x=5,patch_size_y=5,patch_size_z=5,prefix='SdA',in_root='',out_root='',recon_flag=True):
    
        
    patch_pixels = patch_size_x*patch_size_y*patch_size_z
    
    pixel_offset_x = int(patch_size_x*0.7)
    pixel_offset_y = int(patch_size_y*0.7)
    pixel_offset_z = 1
    
    padding = patch_size_x
    #patches = np.zeros(patch_pixels*4)
    if recon_flag is True:
        recon_num = 5
    else:
        recon_num = 4
    patches = np.zeros(patch_size_x*patch_size_y*patch_size_z*recon_num)
    ground_truth = np.zeros(1)
    
    #paths to images
    path = in_root
    print path
    
    Flair = []
    T1 = []
    T2 = []
    T_1c = []
    Truth = []
    Recon=[]
    Folder = []
    
    for subdir, dirs, files in os.walk(path):
        if len(Flair) is 21:
            break
        for file1 in files:     
                
            #print file1
            if file1[-3:]=='mha' and ('Flair' in file1):
                
                Flair.append(file1)
                Folder.append(subdir+'/')
            elif file1[-3:]=='mha' and ('T1' in file1 and 'T1c' not in file1):
                T1.append(file1)
            elif file1[-3:]=='mha' and ('T2' in file1):
                T2.append(file1)
            elif file1[-3:]=='mha' and ('T1c' in file1 or 'T_1c' in file1):
                T_1c.append(file1)
            elif file1[-3:]=='mha' and 'OT' in file1:
                Truth.append(file1)            
            elif file1[-3:]=='mha' and 'Recon' in file1:
                Recon.append(file1)
                
    number_of_images = len(Flair)
    print 'Number of images : ', number_of_images
    
    
    for image_iterator in range(number_of_images):
        print 'Image number : ',image_iterator+1
        print 'Folder : ', Folder[image_iterator]
        Flair_image = new(Folder[image_iterator]+Flair[image_iterator])
        T1_image = new(Folder[image_iterator]+T1[image_iterator])
        T2_image = new(Folder[image_iterator]+T2[image_iterator])
        T_1c_image = new(Folder[image_iterator]+T_1c[image_iterator])
        if recon_flag is True:
            Recon_image = new(Folder[image_iterator]+Recon[image_iterator])
        try:
            Truth_image = new(Folder[image_iterator]+Truth[image_iterator])
        except:
            Truth_image = new2(Folder[image_iterator]+Truth[image_iterator])
        
        Flair_image = Flair_image.data
        T1_image = T1_image.data
        T2_image = T2_image.data
        T_1c_image = T_1c_image.data
        if recon_flag is True:
            Recon_image=Recon_image.data
        Truth_image = Truth_image.data
        
        x_span,y_span,z_span = np.where(Truth_image!=0)
        x_start = np.min(x_span) - padding
        x_stop = np.max(x_span) + padding+1
        y_start = np.min(y_span) - padding
        y_stop = np.max(y_span) + padding+1
        z_start = np.min(z_span) - padding
        z_stop = np.max(z_span) +padding+1
        
        Flair_patch = image.extract_patches(Flair_image[x_start:x_stop, y_start:y_stop, z_start:z_stop], [patch_size_x,patch_size_y,patch_size_z],(pixel_offset_x,pixel_offset_y,pixel_offset_z))
        T1_patch = image.extract_patches(T1_image[x_start:x_stop, y_start:y_stop, z_start:z_stop], [patch_size_x,patch_size_y,patch_size_z],(pixel_offset_x,pixel_offset_y,pixel_offset_z))
        T2_patch = image.extract_patches(T2_image[x_start:x_stop, y_start:y_stop, z_start:z_stop], [patch_size_x,patch_size_y,patch_size_z],(pixel_offset_x,pixel_offset_y,pixel_offset_z))
        T_1c_patch = image.extract_patches(T_1c_image[x_start:x_stop, y_start:y_stop, z_start:z_stop], [patch_size_x,patch_size_y,patch_size_z],(pixel_offset_x,pixel_offset_y,pixel_offset_z))
        if recon_flag is True:        
            Recon_patch = image.extract_patches(Recon_image[x_start:x_stop, y_start:y_stop, z_start:z_stop], [patch_size_x,patch_size_y,patch_size_z],(pixel_offset_x,pixel_offset_y,pixel_offset_z))
        Truth_patch = image.extract_patches(Truth_image[x_start:x_stop, y_start:y_stop, z_start:z_stop], [patch_size_x,patch_size_y,patch_size_z],(pixel_offset_x,pixel_offset_y,pixel_offset_z))
        
        print 'Raw patches extracted'    
        
        Flair_patch = Flair_patch.reshape(Flair_patch.shape[0]*Flair_patch.shape[1]*Flair_patch.shape[2], patch_size_x*patch_size_y*patch_size_z)
        T1_patch = T1_patch.reshape(T1_patch.shape[0]*T1_patch.shape[1]*T1_patch.shape[2], patch_size_x*patch_size_y*patch_size_z)
        T2_patch = T2_patch.reshape(T2_patch.shape[0]*T2_patch.shape[1]*T2_patch.shape[2], patch_size_x*patch_size_y*patch_size_z)  
        T_1c_patch = T_1c_patch.reshape(T_1c_patch.shape[0]*T_1c_patch.shape[1]*T_1c_patch.shape[2], patch_size_x*patch_size_y*patch_size_z)
        if recon_flag is True:
            Recon_patch = Recon_patch.reshape(Recon_patch.shape[0]*Recon_patch.shape[1]*Recon_patch.shape[2], patch_size_x*patch_size_y*patch_size_z)
        Truth_patch = Truth_patch.reshape(Truth_patch.shape[0]*Truth_patch.shape[1]*Truth_patch.shape[2], patch_size_x, patch_size_y, patch_size_z)
    
        print 'Patches reshaped'    
        
        if recon_flag is True:
            slice_patch = np.concatenate([Flair_patch, T1_patch, T2_patch, T_1c_patch, Recon_patch], axis=1)
        else:
            slice_patch = np.concatenate([Flair_patch, T1_patch, T2_patch, T_1c_patch], axis=1)
        Truth_patch = Truth_patch[:,(patch_size_x-1)/2,(patch_size_y-1)/2,(patch_size_z-1)/2]
        Truth_patch = np.array(Truth_patch)
        Truth_patch = Truth_patch.reshape(len(Truth_patch),1)
        #print '3. truth dimension :', Truth_patch.shape
            
        patches = np.vstack([patches,slice_patch])
        ground_truth = np.vstack([ground_truth, Truth_patch])
    #
    #
    print 'Number of non-zeros in ground truth : ', np.sum((ground_truth!=0).astype(int))
    print 'Number of zeros in ground truth : ', np.sum((ground_truth==0).astype(int))
    
    print
    print 'No. of 1 : ', np.sum((ground_truth==1).astype(int))
    print 'No. of 2 : ', np.sum((ground_truth==2).astype(int))
    print 'No. of 3 : ', np.sum((ground_truth==3).astype(int))
    print 'No. of 4 : ', np.sum((ground_truth==4).astype(int))
    
    ground_truth = ground_truth.reshape(len(ground_truth))
    
    if recon_flag==False:
        patches = patches[:,0:patch_size_x*patch_size_y*patch_size_z*4]
    
    #np.save('Training_patches.npy',patches)
    #np.save('Training_labels.npy',ground_truth)
    #print ground_truth.shape
    #print patches.shape
    if 'training' in out_root and recon_flag==True:
        print'... Saving the unbalanced training patches'
        np.save(out_root+'u_trainpatch_3D_'+prefix+'_.npy',patches)
        np.save(out_root+'u_trainlabel_3D_'+prefix+'_.npy',ground_truth)
    elif recon_flag==True:
        print '... Saving the unbalanced test ing patches'
        np.save(out_root+'u_validpatch_3D_'+prefix+'_.npy',patches)
        np.save(out_root+'u_validlabel_3D_'+prefix+'_.npy',ground_truth)
        
    if 'training' in out_root and recon_flag==False:
        print'... Saving the unbalanced training patches'
        np.save(out_root+'u_trainpatch_3D_'+prefix+'_.npy',patches)
        np.save(out_root+'u_trainlabel_3D_'+prefix+'_.npy',ground_truth)
    elif recon_flag==False:
        print '... Saving the unbalanced testing patches'
        np.save(out_root+'u_validpatch_3D_'+prefix+'_.npy',patches)
        np.save(out_root+'u_validlabel_3D_'+prefix+'_.npy',ground_truth)
        
def perfect_balance_3D(patch_size_x=5,patch_size_y=5,patch_size_z=5,prefix='SdA',in_root='',out_root='',recon_flag=False):
    
    patch_pixels = patch_size_x*patch_size_y*patch_size_z
    
    pixel_offset_x = int(patch_size_x*0.5)
    pixel_offset_y = int(patch_size_y*0.5)
    pixel_offset_z = 2
    
    padding = patch_size_x
    #threshold = patch_pixels*0.3
    #patches = np.zeros(patch_pixels*4)
    if recon_flag is True:
        recon_num = 5
    else:
        recon_num = 4
    patches = np.zeros(patch_size_x*patch_size_y*patch_size_z*recon_num)
    ground_truth = np.zeros(1)
    
    #paths to images
    path = in_root
    
    Flair = []
    T1 = []
    T2 = []
    T_1c = []
    Truth = []
    Recon=[]
    Folder = []
    
    for subdir, dirs, files in os.walk(path):
#        if len(Flair) is 1:
#            break
        for file1 in files:     

            if file1[-3:]=='mha' and ('Flair' in file1):
                Flair.append(file1)
                Folder.append(subdir+'/')
            elif file1[-3:]=='mha' and ('T1' in file1 and 'T1c' not in file1):
                T1.append(file1)
            elif file1[-3:]=='mha' and ('T2' in file1):
                T2.append(file1)
            elif file1[-3:]=='mha' and ('T1c' in file1 or 'T_1c' in file1):
                T_1c.append(file1)
            elif file1[-3:]=='mha' and 'OT' in file1:
                Truth.append(file1)            
            elif file1[-3:]=='mha' and 'Recon' in file1:
                Recon.append(file1)
                
    number_of_images = len(Flair)
    print 'Number of Patients : ', number_of_images
    

#    
#    
    for image_iterator in range(number_of_images):
        print 'Image number : ',image_iterator+1
        print 'Folder : ', Folder[image_iterator]
        
        Flair_image = new(Folder[image_iterator]+Flair[image_iterator])
        T1_image = new(Folder[image_iterator]+T1[image_iterator])
        T2_image = new(Folder[image_iterator]+T2[image_iterator])
        T_1c_image = new(Folder[image_iterator]+T_1c[image_iterator])
#        print 'image created'
        print Folder[image_iterator] + Truth[image_iterator]
        try:
            Truth_image = new( Folder[image_iterator] + Truth[image_iterator] )
        except:
            Truth_image = new2( Folder[image_iterator] + Truth[image_iterator] )
#        print 'image created'
        
        
        Flair_image = Flair_image.data
        T1_image = T1_image.data
        T2_image = T2_image.data
        T_1c_image = T_1c_image.data
        Truth_image = Truth_image.data
        ######################################################################
        ##################Converting truth####################################
        # new_Truth_image = np.zeros(Truth_image.shape,dtype=int)
        # new_Truth_image[np.where(Truth_image==0)] = 0
        # new_Truth_image[np.where(Truth_image==1)] = 4
        # new_Truth_image[np.where(Truth_image==2)] = 1
        # new_Truth_image[np.where(Truth_image==3)] = 3
        # new_Truth_image[np.where(Truth_image==4)] = 2
        # Truth_image = new_Truth_image
        ######################################################################
        
        x_span,y_span,z_span = np.where(Truth_image!=0)
        x_start = np.min(x_span) - padding
        x_stop = np.max(x_span) + padding+1
        y_start = np.min(y_span) - padding
        y_stop = np.max(y_span) + padding+1
        z_start = np.min(z_span) - padding
        z_stop = np.max(z_span) +padding+1


        Flair_patch = image.extract_patches(Flair_image[x_start:x_stop, y_start:y_stop, z_start:z_stop], [patch_size_x,patch_size_y,patch_size_z])
        Flair_patch = Flair_patch.reshape(Flair_patch.shape[0]*Flair_patch.shape[1]*Flair_patch.shape[2], patch_size_x*patch_size_y*patch_size_z)
        
        T1_patch = image.extract_patches(T1_image[x_start:x_stop, y_start:y_stop, z_start:z_stop],[patch_size_x,patch_size_y,patch_size_z])
        T1_patch = T1_patch.reshape(T1_patch.shape[0]*T1_patch.shape[1]*T1_patch.shape[2], patch_size_x*patch_size_y*patch_size_z)
        
        T2_patch = image.extract_patches(T2_image[x_start:x_stop, y_start:y_stop, z_start:z_stop],[patch_size_x,patch_size_y,patch_size_z])
        T2_patch = T2_patch.reshape(T2_patch.shape[0]*T2_patch.shape[1]*T2_patch.shape[2], patch_size_x*patch_size_y*patch_size_z)
        
        T_1c_patch = image.extract_patches(T_1c_image[x_start:x_stop, y_start:y_stop, z_start:z_stop],[patch_size_x,patch_size_y,patch_size_z])
        T_1c_patch = T_1c_patch.reshape(T_1c_patch.shape[0]*T_1c_patch.shape[1]*T_1c_patch.shape[2], patch_size_x*patch_size_y*patch_size_z)
        
        T_patch = image.extract_patches(Truth_image[x_start:x_stop, y_start:y_stop, z_start:z_stop],[patch_size_x,patch_size_y,patch_size_z])
        T_patch = T_patch.reshape(T_patch.shape[0]*T_patch.shape[1]*T_patch.shape[2],patch_size_x, patch_size_y, patch_size_z)
        T_patch = T_patch[:,(patch_size_x-1)/2,(patch_size_y-1)/2,(patch_size_z-1)/2]
        T_patch = T_patch.reshape(len(T_patch),1)
        num_of_class = []
        for i in xrange(0,5):
            num_of_class.append(np.sum((T_patch==i).astype(int)))
        minim = min(x for x in num_of_class if x!=0)
        if minim > 3000:
            minim = 3000
        slice_patch = np.zeros(patch_size_x*patch_size_y*patch_size_z*recon_num)
#        print ' CHECK ', slice_patch.shape
        Truth_patch = np.zeros(1)
#        print minim
#        print 
        for i in xrange(5):
            if num_of_class[i]==0:
                continue
#            print 'LOOK ' , i
            index_x, index_y = np.where(T_patch==i)
#            print index_x
            index1 = np.arange(len(index_x))
            shuffle(index1)
#            print index1
#            print index_x[index1[0:minim]].shape
            slice_patch1 = np.concatenate([Flair_patch[index_x[index1[0:minim]],:],T1_patch[index_x[index1[0:minim]],:], T2_patch[index_x[index1[0:minim]],:], T_1c_patch[index_x[index1[0:minim]],:]], axis=1)
#            print 'Slice_patch 1', slice_patch1.shape
#            print 'Slice patch ', slice_patch.shape
            slice_patch = np.vstack([slice_patch, slice_patch1])
#            print Truth_patch.shape
#            print T_patch.shape
            Truth_patch = np.vstack([Truth_patch, T_patch[index_x[index1[0:minim]]]])
        
        print 'No. of 0 Normal tissue : ', np.sum((Truth_patch==0).astype(int))    
        print 'No. of 1 Necrotic : ', np.sum((Truth_patch==1).astype(int))
        print 'No. of 2 Edyma : ', np.sum((Truth_patch==2).astype(int))
        print 'No. of 3 Non-Enhancing : ', np.sum((Truth_patch==3).astype(int))
        print 'No. of 4 Enhancing : ', np.sum((Truth_patch==4).astype(int))
            
        patches = np.vstack([patches,slice_patch])        
        ground_truth = np.vstack([ground_truth, Truth_patch])
        
        #######################################################################
        ##########Extracting more patches from ground truth####################
        Flair_image[x_start:x_stop, y_start:y_stop, z_start:z_stop] = 0
        Flair_patch = image.extract_patches(Flair_image, [patch_size_x,patch_size_y,patch_size_z],(pixel_offset_x,pixel_offset_y,pixel_offset_z))
        T1_patch = image.extract_patches(T1_image, [patch_size_x,patch_size_y,patch_size_z],(pixel_offset_x,pixel_offset_y,pixel_offset_z))
        T2_patch = image.extract_patches(T2_image, [patch_size_x,patch_size_y,patch_size_z],(pixel_offset_x,pixel_offset_y,pixel_offset_z))
        T_1c_patch = image.extract_patches(T_1c_image, [patch_size_x,patch_size_y,patch_size_z],(pixel_offset_x,pixel_offset_y,pixel_offset_z))
        Truth_patch = image.extract_patches(Truth_image, [patch_size_x,patch_size_y,patch_size_z],(pixel_offset_x,pixel_offset_y,pixel_offset_z))
        
        print 'New Raw patches extracted' 
        Flair_patch = Flair_patch.reshape(Flair_patch.shape[0]*Flair_patch.shape[1]*Flair_patch.shape[2], patch_size_x*patch_size_y*patch_size_z)
        T1_patch = T1_patch.reshape(T1_patch.shape[0]*T1_patch.shape[1]*T1_patch.shape[2], patch_size_x*patch_size_y*patch_size_z)
        T2_patch = T2_patch.reshape(T2_patch.shape[0]*T2_patch.shape[1]*T2_patch.shape[2], patch_size_x*patch_size_y*patch_size_z)  
        T_1c_patch = T_1c_patch.reshape(T_1c_patch.shape[0]*T_1c_patch.shape[1]*T_1c_patch.shape[2], patch_size_x*patch_size_y*patch_size_z)
        Truth_patch = Truth_patch.reshape(Truth_patch.shape[0]*Truth_patch.shape[1]*Truth_patch.shape[2], patch_size_x, patch_size_y, patch_size_z)
        
        indexx = np.where(Flair_patch[:,int(patch_size_x*patch_size_y*patch_size_z/2)]>1.5)
        indexx = indexx[0]
        shuffle(indexx)
        if len(indexx)>500:
            indexx = indexx[0:500]
#        print indexx.shape
        Flair_patch = Flair_patch[indexx,:]
        T1_patch = T1_patch[indexx,:]
        T2_patch = T2_patch[indexx,:]
        T_1c_patch = T_1c_patch[indexx,:]
        
        Truth_patch =Truth_patch[indexx, :]
#        print Truth_patch.shape
#        print Flair_patch.shape
        slice_patch = np.concatenate([Flair_patch, T1_patch, T2_patch, T_1c_patch], axis=1)
        Truth_patch = Truth_patch[:,(patch_size_x-1)/2,(patch_size_y-1)/2,(patch_size_z-1)/2]
        Truth_patch = np.array(Truth_patch)
#        print Truth_patch.shape
        Truth_patch = Truth_patch.reshape(len(Truth_patch),1)
#        slice_patch = slice_patch[0,:,:]
        
#        print patches.shape
#        print slice_patch.shape
        patches = np.vstack([patches,slice_patch])
        ground_truth = np.vstack([ground_truth, Truth_patch])
        print 'Extra patches added! ', len(indexx)
        print 'Patches reshaped'
        #######################################################################
        print ground_truth.shape
        print patches.shape

    ground_truth = ground_truth.reshape(len(ground_truth))
    print 'No. of 0 Normal tissue: ', np.sum((ground_truth==0).astype(int))    
    print 'No. of 1 Necrotic: ', np.sum((ground_truth==1).astype(int))
    print 'No. of 2 Edyma: ', np.sum((ground_truth==2).astype(int))
    print 'No. of 3 Non-enhancing: ', np.sum((ground_truth==3).astype(int))
    print 'No. of 4 Enhancing: ', np.sum((ground_truth==4).astype(int))
    
    
    #np.save('Training_patches.npy',patches)
    #np.save('Training_labels.npy',ground_truth)
    #print ground_truth.shape
    #print patches.shape
    if 'training' in out_root and recon_flag==True:
        print'... Saving the balanced training patches'
        np.save(out_root+'b_trainpatch_3D_'+prefix+'_.npy',patches)
        np.save(out_root+'b_trainlabel_3D_'+prefix+'_.npy',ground_truth)
    elif recon_flag==True:
        print '... Saving the balance validation patches'
        np.save(out_root+'b_validpatch_3D_'+prefix+'_.npy',patches)
        np.save(out_root+'b_validlabel_3D_'+prefix+'_.npy',ground_truth)
        
    if 'training' in out_root and recon_flag==False:
        print'... Saving the balanced training patches'
        np.save(out_root+'b_trainpatch_3D_'+prefix+'_.npy',patches)
        np.save(out_root+'b_trainlabel_3D_'+prefix+'_.npy',ground_truth)
    elif recon_flag==False:
        print '... Saving the balanced testing patches'
        np.save(out_root+'b_validpatch_3D_'+prefix+'_.npy',patches)
        np.save(out_root+'b_validlabel_3D_'+prefix+'_.npy',ground_truth)
Beispiel #40
0
        Grafull_path = os.path.join(test_Graimg_path, line)

        f = Image.open(full_path)
        Graf = Image.open(Grafull_path)
        img = np.asarray(f, dtype=np.float32)
        Gra = np.asarray(Graf, dtype=np.float32)
        img = img.transpose(2, 0, 1)
        Gra = Gra.transpose(2, 0, 1)

        img1 = np.zeros((1, 3, Gra.shape[1], Gra.shape[2]))
        img1[0, :, :, :] = img
        Gra1 = np.zeros((1, 3, Gra.shape[1], Gra.shape[2]))
        Gra1[0, :, :, :] = Gra


        patches = extract_patches(img, (3, patchSize, patchSize), patchSize)
        Grapatches = extract_patches(Gra, (3, patchSize, patchSize), patchSize)

        test_patches = []
        X = patches.reshape((-1, 3, patchSize, patchSize)) 
        GraX = Grapatches.reshape((-1, 3, patchSize, patchSize))

        y = []
        t = xp.zeros((1, 1), np.float32)
        # t[0][0] = int(la)                                  #for debug

        X_batch = np.zeros(X.shape)
        for i in range(len(X_batch)):
            X_batch[i] = X[int(i)]
        X_batch = X_batch[:patches_per_img]
        X_batch = xp.array(X_batch.astype(np.float32))
Beispiel #41
0
def perfect_balance_2D(patch_size_x=5,patch_size_y=5,prefix='Sda',in_root='',out_root=''):
    
    #Initialize user variables
    patch_size = patch_size_x
    patch_pixels = patch_size*patch_size
    pixel_offset = patch_size
    padding = patch_size
    #threshold = patch_pixels*0.3
    recon_num = 4
    label_num = 5
    patches = np.zeros(patch_pixels*recon_num)
#    ground_truth = np.zeros(1)
    ground_truth = np.zeros(1) 
    #paths to images
    path = in_root
    
    Flair = []
    T1 = []
    T2 = []
    T_1c = []
    Truth = []
    Folder = []
    
    for subdir, dirs, files in os.walk(path):
        # if len(Flair) is 1:
        #     break
        for file1 in files:
            #print file1
            if file1[-3:]=='mha' and ('Flair' in file1):                
                Flair.append(file1)
                Folder.append(subdir+'/')
            elif file1[-3:]=='mha' and ('T1' in file1 and 'T1c' not in file1):
                T1.append(file1)
            elif file1[-3:]=='mha' and ('T2' in file1):
                T2.append(file1)
            elif file1[-3:]=='mha' and ('T1c' in file1 or 'T_1c' in file1):
                T_1c.append(file1)
            elif file1[-3:]=='mha' and 'OT' in file1:
                Truth.append(file1)
                
    number_of_images = len(Flair)
    print 'Number of images : ', number_of_images
    
    
    for image_iterator in range(number_of_images):
        print 'Iteration : ',image_iterator+1
        print 'Folder : ', Folder[image_iterator]
        Flair_image = new(Folder[image_iterator]+Flair[image_iterator])
        T1_image = new(Folder[image_iterator]+T1[image_iterator])
        T2_image = new(Folder[image_iterator]+T2[image_iterator])
        T_1c_image = new(Folder[image_iterator]+T_1c[image_iterator])
        try:
            Truth_image = new(Folder[image_iterator]+Truth[image_iterator])
        except:
            Truth_image = new2(Folder[image_iterator]+Truth[image_iterator])
        Flair_image = Flair_image.data
        T1_image = T1_image.data
        T2_image = T2_image.data
        T_1c_image = T_1c_image.data
        Truth_image = Truth_image.data
        

        # Truth_image[np.where(Truth_image==3)]=1
        # Truth_image[np.where(Truth_image==4)]=1
        
        x_span,y_span,z_span = np.where(Truth_image!=0)        
        start_slice = min(z_span)
        stop_slice = max(z_span)
        x_start = min(x_span)-30
        x_stop = max(x_span)+30
        y_start = min(y_span)-30
        y_stop = max(y_span)+30
        
        Flair_patch = image.extract_patches(Flair_image[x_start:x_stop,y_start:y_stop,start_slice:stop_slice],[patch_size_x,patch_size_y,1])
        Flair_patch = Flair_patch.reshape(Flair_patch.shape[0]*Flair_patch.shape[1]*Flair_patch.shape[2], patch_size_x*patch_size_y)
        
        T1_patch = image.extract_patches(T1_image[x_start:x_stop,y_start:y_stop,start_slice:stop_slice],[patch_size_x,patch_size_y,1])
        T1_patch = T1_patch.reshape(T1_patch.shape[0]*T1_patch.shape[1]*T1_patch.shape[2], patch_size_x*patch_size_y)
        
        T2_patch = image.extract_patches(T2_image[x_start:x_stop,y_start:y_stop,start_slice:stop_slice],[patch_size_x,patch_size_y,1])
        T2_patch = T2_patch.reshape(T2_patch.shape[0]*T2_patch.shape[1]*T2_patch.shape[2], patch_size_x*patch_size_y)
        
        T_1c_patch = image.extract_patches(T_1c_image[x_start:x_stop,y_start:y_stop,start_slice:stop_slice],[patch_size_x,patch_size_y,1])
        T_1c_patch = T_1c_patch.reshape(T_1c_patch.shape[0]*T_1c_patch.shape[1]*T_1c_patch.shape[2], patch_size_x*patch_size_y)
        
        T_patch = image.extract_patches(Truth_image[x_start:x_stop,y_start:y_stop,start_slice:stop_slice],[patch_size_x,patch_size_y,1])
        T_patch = T_patch.reshape(T_patch.shape[0]*T_patch.shape[1]*T_patch.shape[2],patch_size_x, patch_size_y, 1)
        T_patch = T_patch[:,(patch_size-1)/2,(patch_size-1)/2]
        
        num_of_class = []
        for i in xrange(0,label_num):
            num_of_class.append(np.sum((T_patch==i).astype(int)))
        minim = min(x for x in num_of_class if x!=0)
        if minim>3000:
            minim = 3000
#        flair_patch = np.zeros(patch_size_x*patch_size_y*recon_num)
#        t1_patch = np.zeros(patch_size_x*patch_size_y*recon_num)
#        t2_patch = np.zeros(patch_size_x*patch_size_y*recon_num)
#        t1c_patch = np.zeros(patch_size_x*patch_size_y*recon_num)
        slice_patch = np.zeros(patch_size_x*patch_size_y*recon_num)
        Truth_patch = np.zeros(1)
#        print minim
#        print 
        for i in xrange(5):
            if num_of_class[i]==0:
                continue
            index_x, index_y = np.where(T_patch==i)
            index1 = np.arange(len(index_x))
            shuffle(index1)
#            print index1
#            print index_x[index1[0:minim]].shape
            slice_patch1 = np.concatenate([Flair_patch[index_x[index1[0:minim]],:],T1_patch[index_x[index1[0:minim]],:], T2_patch[index_x[index1[0:minim]],:], T_1c_patch[index_x[index1[0:minim]],:]], axis=1)
            slice_patch = np.vstack([slice_patch, slice_patch1])
#            flair_patch = np.vstack([flair_patch, Flair_patch[index_x[index1[0:minim]],:]])
#            t1_patch = np.vstack([t1_patch,T1_patch[index_x[index1[0:minim]],:]])
#            t2_patch = np.vstack([t2_patch, T2_patch[index_x[index1[0:minim]],:]])
#            t1c_patch = np.vstack([t1c_patch, T_1c_patch[index_x[index1[0:minim]],:]])
#            print Truth_patch.shape
#            print T_patch.shape
            Truth_patch = np.vstack([Truth_patch, T_patch[index_x[index1[0:minim]]]])
        print 'No. of 0 : ', np.sum((Truth_patch==0).astype(int))    
        print 'No. of 1 : ', np.sum((Truth_patch==1).astype(int))
        print 'No. of 2 : ', np.sum((Truth_patch==2).astype(int))
        print 'No. of 3 : ', np.sum((Truth_patch==3).astype(int))
        print 'No. of 4 : ', np.sum((Truth_patch==4).astype(int))
        
        Truth_patch = Truth_patch.reshape(len(Truth_patch))
        print 'look here ---->',Truth_patch.shape

#        np.save(out_root+'patches_patient_flair'+str(image_iterator+1)+'.npy',flair_patch)
#        np.save(out_root+'patches_patient_t1'+str(image_iterator+1)+'.npy',t1_patch)
#        np.save(out_root+'patches_patient_t2'+str(image_iterator+1)+'.npy',t2_patch)
#        np.save(out_root+'patches_patient_t1c'+str(image_iterator+1)+'.npy',t1c_patch)
#        np.save(out_root+'patches_patient_'+str(image_iterator+1)+'.npy',slice_patch)
#        np.save(out_root+'labels_patient_'+str(image_iterator+1)+'.npy',Truth_patch)
        patches = np.vstack([patches,slice_patch])
        print 'patches balanced shape',patches.shape
        ground_truth = np.append(ground_truth, Truth_patch)
        print 'ground shape--->',ground_truth.shape
    index1 = np.arange(patches.shape[0])
    shuffle(index1)
    print np.shape(patches)
    patches = patches[index1,:]
    ground_truth = ground_truth[index1]
    patches = np.float32(patches)
    ground_truth = np.float32(ground_truth)
    if 'training' in out_root:
        np.save(out_root+'perfect_balance_trainpatch_'+prefix+'_.npy',patches)
        np.save(out_root+'perfect_balance_traintruth_'+prefix+'_.npy',ground_truth)
    else:        
        np.save(out_root+'perfect_balance_validpatch_'+prefix+'_.npy',patches)
        np.save(out_root+'perfect_balance_validtruth_'+prefix+'_.npy',ground_truth)
if FR:
    model = FRModel(top=args.top)
else:
    model = Model(top=args.top)

cuda.cudnn_enabled = True
cuda.check_cuda_available()
xp = cuda.cupy
serializers.load_hdf5(args.model, model)
model.to_gpu()

if FR:
    ref_img = cv2.imread(args.REF)
    ref_img = cv2.cvtColor(ref_img, cv2.COLOR_BGR2RGB)
    patches = extract_patches(ref_img, (32, 32, 3), 32)
    X_ref = np.transpose(patches.reshape((-1, 32, 32, 3)), (0, 3, 1, 2))

img = cv2.imread(args.INPUT)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
patches = extract_patches(img, (32, 32, 3), 32)
X = np.transpose(patches.reshape((-1, 32, 32, 3)), (0, 3, 1, 2))

y = []
weights = []
batchsize = min(2000, X.shape[0])
t = xp.zeros((1, 1), np.float32)
for i in six.moves.range(0, X.shape[0], batchsize):
    X_batch = X[i:i + batchsize]
    X_batch = xp.array(X_batch.astype(np.float32))
Beispiel #43
0
def U_Patch_Preprocess_recon_2D(patch_size_x=5,patch_size_y=5,prefix='SdA',in_root='',out_root='',recon_flag=True):
    
    #Initialize user variables
    patch_size = patch_size_x
    patch_pixels = patch_size*patch_size
    pixel_offset = int(patch_size*0.7)
    padding = patch_size/2
    threshold = patch_pixels*0.3
    if recon_flag == False:
        recon_num = 4
    if recon_flag == True:
        recon_num = 5
    patches = np.zeros(patch_pixels*recon_num)
    ground_truth = np.zeros(1)
    
    #paths to images
    path = in_root
    
    Flair = []
    T1 = []
    T2 = []
    T_1c = []
    Truth = []
    Recon=[]
    Folder = []
    
    for subdir, dirs, files in os.walk(path):
#        if len(Flair) is 4:
#            break
        for file1 in files:
            #print file1
            if file1[-3:]=='mha' and 'Flair' in file1:
                Flair.append(file1)
                Folder.append(subdir+'/')
            elif file1[-3:]=='mha' and 'T1' in file1:
                T1.append(file1)
            elif file1[-3:]=='mha' and 'T2' in file1:
                T2.append(file1)
            elif file1[-3:]=='mha' and 'T_1c' in file1:
                T_1c.append(file1)
            elif file1[-3:]=='mha' and 'OT' in file1:
                Truth.append(file1)
            #elif file1[-3:]=='mha' and 'Recon' in file1:
            #    Recon.append(file1)
                
    number_of_images = len(Flair)
    print 'Number of images : ', number_of_images
    
    
    for image_iterator in range(number_of_images):
        print 'Iteration : ',image_iterator+1
        print 'Folder : ', Folder[image_iterator]
        Flair_image = mha.new(Folder[image_iterator]+Flair[image_iterator])
        T1_image = mha.new(Folder[image_iterator]+T1[image_iterator])
        T2_image = mha.new(Folder[image_iterator]+T2[image_iterator])
        T_1c_image = mha.new(Folder[image_iterator]+T_1c[image_iterator])
        if recon_flag == True:
            Recon_image = mha.new(Folder[image_iterator]+Recon[image_iterator])
        Truth_image = mha.new(Folder[image_iterator]+Truth[image_iterator])
        
        Flair_image = Flair_image.data
        T1_image = T1_image.data
        T2_image = T2_image.data
        T_1c_image = T_1c_image.data
        if recon_flag == True:
            Recon_image=Recon_image.data
        Truth_image = Truth_image.data
        
        x_span,y_span,z_span = np.where(Truth_image!=0)
        
        start_slice = min(z_span)
        stop_slice = max(z_span)
        image_patch = np.zeros(patch_size*patch_size*recon_num)
        image_label = np.zeros(1)
        for i in range(start_slice, stop_slice+1):    
            Flair_slice = np.transpose(Flair_image[:,:,i])
            T1_slice = np.transpose(T1_image[:,:,i])
            
            T2_slice = np.transpose(T2_image[:,:,i])
            T_1c_slice = np.transpose(T_1c_image[:,:,i])
            if recon_flag==True:
                Recon_slice = np.transpose(Recon_image[:,:,i])      
            Truth_slice = np.transpose(Truth_image[:,:,i])
            
            x_dim,y_dim = np.size(Flair_slice,axis=0), np.size(Flair_slice, axis=1)
            
            x_span,y_span = np.where(Truth_slice!=0)
            if len(x_span)==0 or len(y_span)==0:
                continue
            x_start = np.min(x_span) - padding
            x_stop = np.max(x_span) + padding+1
            y_start = np.min(y_span) - padding
            y_stop = np.max(y_span) + padding+1
            
            Flair_patch = image.extract_patches(Flair_slice[x_start:x_stop, y_start:y_stop], patch_size, extraction_step = pixel_offset)
            T1_patch = image.extract_patches(T1_slice[x_start:x_stop, y_start:y_stop], patch_size, extraction_step = pixel_offset)
            T2_patch = image.extract_patches(T2_slice[x_start:x_stop, y_start:y_stop], patch_size, extraction_step = pixel_offset)
            T_1c_patch = image.extract_patches(T_1c_slice[x_start:x_stop, y_start:y_stop], patch_size, extraction_step = pixel_offset)
            if recon_flag==True:
                Recon_patch = image.extract_patches(Recon_slice[x_start:x_stop, y_start:y_stop], patch_size, extraction_step = pixel_offset)      
            Truth_patch = image.extract_patches(Truth_slice[x_start:x_stop, y_start:y_stop], patch_size, extraction_step = pixel_offset)
            
            #print '1. truth dimension :', Truth_patch.shape
            
            Flair_patch = Flair_patch.reshape(Flair_patch.shape[0]*Flair_patch.shape[1], patch_size*patch_size)
            T1_patch = T1_patch.reshape(T1_patch.shape[0]*T1_patch.shape[1], patch_size*patch_size)
            T2_patch = T2_patch.reshape(T2_patch.shape[0]*T2_patch.shape[1], patch_size*patch_size)  
            T_1c_patch = T_1c_patch.reshape(T_1c_patch.shape[0]*T_1c_patch.shape[1], patch_size*patch_size)
            if recon_flag==True:
                Recon_patch = Recon_patch.reshape(Recon_patch.shape[0]*Recon_patch.shape[1], patch_size*patch_size)        
            Truth_patch = Truth_patch.reshape(Truth_patch.shape[0]*Truth_patch.shape[1], patch_size, patch_size)
            
            #print '2. truth dimension :', Truth_patch.shape
            if recon_flag == True:
                slice_patch = np.concatenate([Flair_patch, T1_patch, T2_patch, T_1c_patch,Recon_patch], axis=1)
            else:
                slice_patch = np.concatenate([Flair_patch, T1_patch, T2_patch, T_1c_patch], axis=1)
            Truth_patch = Truth_patch[:,(patch_size-1)/2,(patch_size-1)/2]
            Truth_patch = np.array(Truth_patch)
            Truth_patch = Truth_patch.reshape(len(Truth_patch),1)
            #print '3. truth dimension :', Truth_patch.shape
            
#            image_patch = np.vstack([image_patch,slice_patch])
#            image_label = np.vstack([image_label, Truth_patch])
#        num_of_class = []
#        for i in xrange(5):
#            num_of_class.append(np.sum((image_label==i).astype(int)))
#        min_num = min(num_of_class)
#        min_num_2 = min(x for x in num_of_class if x!=min_num)
#        for i in xrange(5):
#            #print 'image patch : ', image_patch.shape
#            #print 'image_label : ', image_label.shape
#            index_x,index_y = np.where(image_label==i)
#            temp_patch = image_patch[index_x,:]
#            temp_label = image_label[index_x,:]
#            index = np.arange(len(temp_patch))
#            shuffle(index)
#            #print 'Temp patch : ', temp_patch.shape
#            #print 'Temp_label : ', temp_label.shape
#            if len(index)>min_num_2:
#                temp_patch = temp_patch[index[0:min_num_2],:]
#                temp_label = temp_label[index[0:min_num_2],:]
#            patches = np.vstack([patches,temp_patch])
#            ground_truth = np.vstack([ground_truth, temp_label])
            
            
            
            
            #------check indentation-----#
            patches = np.vstack([patches,slice_patch])
            ground_truth = np.vstack([ground_truth, Truth_patch])
            for k, item in enumerate(ground_truth):
                if item != 0:
                    ground_truth[k] = 1
        
            
    print 'Number of non-zeros in ground truth : ', np.sum((ground_truth!=0).astype(int))
    print 'Number of zeros in ground truth : ', np.sum((ground_truth==0).astype(int))
    
    
    ground_truth = ground_truth.reshape(len(ground_truth))
    
    if recon_flag==False:
        patches = patches[:,0:patch_size*patch_size*4]
    
    if 'training' in out_root and recon_flag == True:
        print'... Saving the 2D training patches'
        np.save(out_root+'u_10_trainpatch_2D_'+prefix+'_.npy',patches)
        np.save(out_root+'u_10_trainlabel_2D_'+prefix+'_.npy',ground_truth)
    elif recon_flag == True:
        print '... Saving the 2D validation patches'
        np.save(out_root+'u_10_validpatch_2D_'+prefix+'_.npy',patches)
        np.save(out_root+'u_10_validlabel_2D_'+prefix+'_.npy',ground_truth)
    
    if 'training' in out_root and recon_flag == False:
        print'... Saving the 2D training patches'
        np.save(out_root+'u_10_trainpatch_2D_'+prefix+'_.npy',patches)
        np.save(out_root+'u_10_trainlabel_2D_'+prefix+'_.npy',ground_truth)
        
    elif recon_flag == False:
        print '... Saving the 2D validation patches'
        np.save(out_root+'u_10_validpatch_2D_'+prefix+'_.npy',patches)
        np.save(out_root+'u_10_validlabel_2D_'+prefix+'_.npy',ground_truth)
Beispiel #44
0
def make_pytable(img_path, label_path, patch_size, stride_size, pad_size,
                 split, num_classes, imgtype, labeltype):
    img_dtype = {}
    img_dtype['mask'] = tables.UInt8Atom()
    img_dtype['img'] = tables.Float32Atom()

    train_file = glob(img_path + "*." + imgtype)
    num_train = int(len(train_file) * (1 - split))
    val_file = train_file[num_train:]
    train_file = train_file[:num_train]

    phases = {}
    phases['train'], phases['val'] = train_file, val_file

    block_shape = {}
    block_shape['img'] = np.array((patch_size, patch_size, 3))
    block_shape['mask'] = np.array((patch_size, patch_size, num_classes))

    filters = tables.Filters(complevel=6, complib='zlib')

    storage = {}

    imgtypes = ['img', 'mask']

    for phase in phases.key():
        print(phase)

        table_file = tables.open_file(f"./table_{phase}.pytable", mode='w')

        for type in imgtypes:
            storage[type] = table_file.create_earray(
                table_file.root,
                type,
                img_dtype[type],
                shape=np.append([0], block_shape[type]),
                chunkshape=np.append([1], block_shape[type]),
                filters=filters)

        for f in phases[phase]:
            print(f)

            for type in imgtypes:
                if type == "img":
                    img = cv2.cvtColor(cv2.imread(f), cv2.COLOR_BGR2RGB)
                    img = stain_normalization.normalizeStaining(img)
                    img = img / 255.0

                    img = np.pad(img, [(pad_size, pad_size),
                                       (pad_size, pad_size), (0, 0)])
                    img = extract_patches(img, (patch_size, patch_size, 3),
                                          stride_size)

                    img = img.reshape(-1, patch_size, patch_size, 3)

                else:
                    img = cv2.cvtColor(
                        cv2.imread(label_path + f.replace(imgtype, labeltype)),
                        cv2.IMREAD_GRAYSCALE)

                    if num_classes > 1:
                        img = to_categorical(img, num_classes=num_classes)
                    else:
                        img = img.reshape(img.shape[0], img.shape[1], 1)
                img = padAndPatch(img, pad_size, patch_size, stride_size)

                storage[type].append(img)
        table_file.close()
    def fit(self, modality, ground_truth=None, cat=None):
        """Compute the images images.

        Parameters
        ----------
        modality : object of type TemporalModality
            The modality object of interest.

        ground-truth : object of type GTModality or None
            The ground-truth of GTModality. If None, the whole data will be
            considered.

        cat : str or None
            String corresponding at the ground-truth of interest. Cannot be
            None if ground-truth is not None.

        Return
        ------
        self : object
             Return self.

        """
        super(HaralickExtraction, self).fit(modality=modality,
                                              ground_truth=ground_truth,
                                              cat=cat)

        # Get the data and rescale as integers within the given levels
        vol_haralick = ((modality.data_ - np.ndarray.min(modality.data_)) *
                        ((self.levels -1) /
                         (np.ndarray.max(modality.data_) -
                          np.ndarray.min(modality.data_)))).astype(int)

        # Extract the set of patches from the modality data
        patches = extract_patches(vol_haralick, patch_shape=self.patch_size)

        # Allocate the haralick maps, one for each feature that
        # will be computed
        nb_directions = 13
        nb_features = 13
        self.data_ = np.zeros((modality.data_.shape[0],
                               modality.data_.shape[1],
                               modality.data_.shape[2],
                               nb_directions,
                               nb_features))

        # WE NEED TO PARALLELIZE THIS CODE

        # # Extract Haralick feature for each patch
        # # Define the shift to apply
        if isinstance(self.patch_size, tuple):
            y_shift = int(np.ceil((self.patch_size[0] - 1) / 2.))
            x_shift = int(np.ceil((self.patch_size[1] - 1) / 2.))
            z_shift = int(np.ceil((self.patch_size[2] - 1) / 2.))
        elif isinstance(self.patch_size, int):
            y_shift = int(np.ceil((self.patch_size - 1) / 2.))
            x_shift = int(np.ceil((self.patch_size - 1) / 2.))
            z_shift = int(np.ceil((self.patch_size - 1) / 2.))

        # for y in range(patches.shape[0]):
        #     for x in range(patches.shape[1]):
        #         for z in range(patches.shape[2]):
        #             print 'Compute for the pixel at position {}{}{}'.format(
        #                 y, x, z)
        #             # Compute the haralick features
        #             self.data_[y + y_shift,
        #                        x + x_shift,
        #                        z + z_shift, :] = haralick(
        #                            patches[y, x, z, :],
        #                            distance=self.distance)

        # Create the list of indices to process
        yy, xx, zz = np.meshgrid(range(patches.shape[0]),
                                 range(patches.shape[1]),
                                 range(patches.shape[2]))
        # Linearize for fast processing
        yy = yy.reshape(-1)
        xx = xx.reshape(-1)
        zz = zz.reshape(-1)

        # Go for the parallel loop
        haralick_features = Parallel(n_jobs=-1)(delayed(
            _compute_haralick_features)(patches[y, x, z, :], self.distance)
                                                for y, x, z in zip(yy, xx, zz))

        # Convert to numpy array
        haralick_features = np.array(haralick_features)
        # Reshape the feature matrix
        haralick_features = haralick_features.reshape((patches.shape[0],
                                                       patches.shape[1],
                                                       patches.shape[2],
                                                       nb_directions,
                                                       nb_features))
        # Copy the feature into the object
        self.data_[y_shift : -y_shift,
                   x_shift : -x_shift,
                   z_shift : -z_shift] = haralick_features

        return self
def LBPpdfExtraction(im, **kwargs):
    # GOAL: extract the LBP pdf from a 2D image
    # We can handle 2D image, 3D image.

    # We need to get the minimum and maximum of the image
    range_lbp = kwargs.pop('range_lbp', (np.min(im), np.max(im)))
    # We need to decide the number of bins to be considered for
    # the computation of the pdf
    bins = range_lbp[1] - range_lbp[0]
    # We need to know if we normalisaed the histogram or not
    density = kwargs.pop('density', True)

    # Check if we wish a sliding window
    strategy_win = kwargs.pop('strategy_win', None)

    # Check the dimension of the input image
    if len(im.shape) == 2:
        nd_im = 2
    elif len(im.shape) == 3:
        nd_im = 3
        # By default, we will extract the LBP along a given axis
        extr_3d = kwargs.pop('extr_3d', '2.5D')
        extr_axis = kwargs.pop('extr_axis', 'y')
    else:
        raise ValueError('mahotas.texture.haralick: Can only handle 2D'
                         ' and 3D images.')

    if nd_im == 2:

        if strategy_win == 'sliding_win':
            win_size = kwargs.pop('win_size', (7, 7))
            overlap = kwargs.pop('overlap', False)

            if not overlap:
                # Import the function needed to extract patches
                from sklearn.feature_extraction.image import extract_patches

                patches = extract_patches(im, patch_shape=win_size,
                                          extraction_step=(win_size[0]))

                patches = patches.reshape(-1, win_size[0], win_size[1])

            else:

                patches = ExtractPatches2D(im, win_size)

            # The number of cores to use
            num_cores = kwargs.pop('num_cores', multiprocessing.cpu_count())

            hist_dict = {'bins' : bins, 'range' : range_lbp,
                         'density' : density}
            vol_hist = Parallel(n_jobs=num_cores)(delayed(hist_alone)
                                                  (p, **hist_dict)
                                                  for p in patches)

            return np.array(vol_hist)

        else:

            hist_dict = {'bins' : bins, 'range' : range_lbp,
                         'density' : density}
            return hist_alone(im, **hist_dict)

    elif nd_im == 3:

        if extr_3d == '2.5D':
            # We will process in parallel the different slice
            # along the given axis
            # The data are stored in (x, y, z) manner. We need to swap to the
            # first position the axis that is not involved in the 2D image
            if extr_axis == 'x':
                # Do not do anythin
                vol = im
            elif extr_axis == 'y':
                # Move y at the beginning
                vol = np.swapaxes(im, 1, 0)
            elif extr_axis == 'z':
                # Move z at the beginning
                vol = np.swapaxes(im, 2, 0)

            # The number of cores to use
            num_cores = kwargs.pop('num_cores', multiprocessing.cpu_count())

            if strategy_win == 'sliding_win':
                win_size = kwargs.pop('win_size', (21, 21))
                overlap = kwargs.pop('overlap', False)

                if not overlap:

                    # Import the function needed to extract patches
                    from sklearn.feature_extraction.image import extract_patches

                    patches = Parallel(n_jobs=num_cores)(delayed(extract_patches)
                                                         (im, patch_shape=win_size,
                                                          extraction_step=(win_size[0]))
                                                         for im in vol)

                    patches = np.array(patches).reshape(-1, win_size[0],
                                                        win_size[1])

                else:

                    raise ValueError('Still did not implemented this feature.'
                                     ' It could take so much merory.')

                hist_dict = {'bins' : bins, 'range' : range_lbp,
                             'density' : density}
                vol_hist = Parallel(n_jobs=num_cores)(delayed(hist_alone)
                                                      (p, **hist_dict)
                                                      for p in patches)

                return np.array(vol_hist)

            else:

                # The number of cores to use
                num_cores = kwargs.pop('num_cores', multiprocessing.cpu_count())

                hist_dict = {'bins' : bins, 'range' : range_lbp,
                             'density' : density}
                vol_hist = Parallel(n_jobs=num_cores)(delayed(hist_alone)
                                                      (sl, **hist_dict)
                                                      for sl in vol)

                return np.array(vol_hist)

        elif extr_3d == '3D':
            # We need to use the LBP TOP
            print 'This strategy is not yet implemented.'
def extract_3d_patches(img_data, gt_data, mask_data, is_extract_more_csf):
    # patch details
    # patch_size = 32
    patch_shape = (patch_size, patch_size, patch_size)

    # empty matrix to hold patches
    imgs_patches_per_volume = np.empty(
        shape=[0, patch_size, patch_size, patch_size], dtype='int16')
    gt_patches_per_volume = np.empty(
        shape=[0, patch_size, patch_size, patch_size], dtype='int16')
    mask_patches_per_volume = np.empty(
        shape=[0, patch_size, patch_size, patch_size], dtype='int16')

    img_patches = extract_patches(img_data, patch_shape, extraction_step)
    gt_patches = extract_patches(gt_data, patch_shape, extraction_step)
    mask_patches = extract_patches(mask_data, patch_shape, extraction_step)

    rows = []
    cols = []
    depths = []
    for i in range(0, mask_patches.shape[0]):
        for j in range(0, mask_patches.shape[1]):
            for k in range(0, mask_patches.shape[2]):
                Point1 = int(patch_size / 2 - 1)
                Point2 = int(patch_size / 2)
                a1 = mask_patches.item((i, j, k, Point1, Point1, Point1))
                a2 = mask_patches.item((i, j, k, Point1, Point1, Point2))
                a3 = mask_patches.item((i, j, k, Point1, Point2, Point1))
                a4 = mask_patches.item((i, j, k, Point1, Point2, Point2))
                a5 = mask_patches.item((i, j, k, Point2, Point1, Point1))
                a6 = mask_patches.item((i, j, k, Point2, Point1, Point2))
                a7 = mask_patches.item((i, j, k, Point2, Point2, Point1))
                a8 = mask_patches.item((i, j, k, Point2, Point2, Point2))

                Sum = a1 + a2 + a3 + a4 + a5 + a6 + a7 + a8
                if Sum > 0:
                    rows.append(i)
                    cols.append(j)
                    depths.append(k)

    # number of non-zero patches
    N = len(rows)
    # select non-zero patches index
    selected_img_patches = img_patches[rows, cols, depths, :, :, :]
    selected_gt_patches = gt_patches[rows, cols, depths, :, :, :]

    # update database
    imgs_patches_per_volume = np.append(imgs_patches_per_volume,
                                        selected_img_patches,
                                        axis=0)
    gt_patches_per_volume = np.append(gt_patches_per_volume,
                                      selected_gt_patches,
                                      axis=0)

    # extract more patches for CSF
    if is_extract_more_csf:
        # create CSF mask
        extraction_step_csf = extraction_step_csf_only
        img_patches_csf = extract_patches(img_data, patch_shape,
                                          extraction_step_csf)
        gt_patches_csf = extract_patches(gt_data, patch_shape,
                                         extraction_step_csf)

        # extract CSF patches with small step

        rows = []
        cols = []
        depths = []
        for i in range(0, gt_patches_csf.shape[0]):
            for j in range(0, gt_patches_csf.shape[1]):
                for k in range(0, gt_patches_csf.shape[2]):
                    Point1 = int(patch_size / 2 - 1)
                    Point2 = int(patch_size / 2)
                    a1 = gt_patches_csf.item((i, j, k, Point1, Point1, Point1))
                    a2 = gt_patches_csf.item((i, j, k, Point1, Point1, Point2))
                    a3 = gt_patches_csf.item((i, j, k, Point1, Point2, Point1))
                    a4 = gt_patches_csf.item((i, j, k, Point1, Point2, Point2))
                    a5 = gt_patches_csf.item((i, j, k, Point2, Point1, Point1))
                    a6 = gt_patches_csf.item((i, j, k, Point2, Point1, Point2))
                    a7 = gt_patches_csf.item((i, j, k, Point2, Point2, Point1))
                    a8 = gt_patches_csf.item((i, j, k, Point2, Point2, Point2))

                    Sum = (a1 == 1 or a2 == 1 or a3 == 1 or a4 == 1 or a5 == 1
                           or a6 == 1 or a7 == 1 or a8 == 1)
                    if Sum:
                        rows.append(i)
                        cols.append(j)
                        depths.append(k)

        N = len(rows)
        if N is not 0:
            csf_more_img_patches = img_patches_csf[rows, cols, depths, :, :, :]
            csf_more_gt_patches = gt_patches_csf[rows, cols, depths, :, :, :]

            # update database
            imgs_patches_per_volume = np.append(imgs_patches_per_volume,
                                                csf_more_img_patches,
                                                axis=0)
            gt_patches_per_volume = np.append(gt_patches_per_volume,
                                              csf_more_gt_patches,
                                              axis=0)

    # convert to categorical
    gt_patches_per_volume = separate_labels(gt_patches_per_volume)
    return imgs_patches_per_volume, gt_patches_per_volume
def extract_2d_patches(img_data, gt_data, mask_data, is_extract_more_csf):
    patch_shape = (patch_size, patch_size)
    # empty matrix to hold patches
    imgs_patches_per_slice = np.empty(shape=[0, patch_size, patch_size],
                                      dtype='int16')
    gt_patches_per_slice = np.empty(shape=[0, patch_size, patch_size],
                                    dtype='int16')
    mask_patches_per_slice = np.empty(shape=[0, patch_size, patch_size],
                                      dtype='int16')

    img_patches = extract_patches(img_data, patch_shape, extraction_step)
    gt_patches = extract_patches(gt_data, patch_shape, extraction_step)
    mask_patches = extract_patches(mask_data, patch_shape, extraction_step)

    # extract patches which has center pixel lying inside mask
    rows = []
    cols = []
    for i in range(0, mask_patches.shape[0]):
        for j in range(0, mask_patches.shape[1]):
            a1 = mask_patches.item(
                (i, j, int(patch_size / 2 - 1), int(patch_size / 2 - 1)))
            a2 = mask_patches.item(
                (i, j, int(patch_size / 2 - 1), int(patch_size / 2)))
            a3 = mask_patches.item(
                (i, j, int(patch_size / 2), int(patch_size / 2 - 1)))
            a4 = mask_patches.item(
                (i, j, int(patch_size / 2), int(patch_size / 2)))
            Sum = a1 + a2 + a3 + a4
            if Sum > 0:
                rows.append(i)
                cols.append(j)

    # number of n0m zero patches
    N = len(rows)

    # select nonzeropatches index
    selected_img_patches = img_patches[rows, cols, :, :]
    selected_gt_patches = gt_patches[rows, cols, :, :]

    # update database
    imgs_patches_per_slice = np.append(imgs_patches_per_slice,
                                       selected_img_patches,
                                       axis=0)
    gt_patches_per_slice = np.append(gt_patches_per_slice,
                                     selected_gt_patches,
                                     axis=0)

    #extract more pathes for CSF
    if is_extract_more_csf:
        #creat CSF mask
        extraction_step_csf = extraction_step_csf_only
        img_patches_csf = extract_patches(img_data, patch_shape,
                                          extraction_step_csf)
        gt_patches_csf = extract_patches(gt_data, patch_shape,
                                         extraction_step_csf)

        # extract CSF patches with small step
        rows = []
        cols = []
        for i in range(0, gt_patches_csf.shape[0]):
            for j in range(0, gt_patches_csf.shape[1]):
                a1 = gt_patches_csf.item(
                    (i, j, int(patch_size / 2 - 1), int(patch_size / 2 - 1)))
                a2 = gt_patches_csf.item(
                    (i, j, int(patch_size / 2 - 1), int(patch_size / 2)))
                a3 = gt_patches_csf.item(
                    (i, j, int(patch_size / 2), int(patch_size / 2 - 1)))
                a4 = gt_patches_csf.item(
                    (i, j, int(patch_size / 2), int(patch_size / 2)))
                Sum = (a1 == 1 or a2 == 1 or a3 == 1 or a4 == 1)
                if Sum:
                    rows.append(i)
                    cols.append(j)

        N = len(rows)
        if N is not 0:
            csf_more_img_patches = img_patches_csf[rows, cols, :, :]
            csf_more_gt_patches = gt_patches_csf[rows, cols, :, :]

            # update database
            imgs_patches_per_slice = np.append(imgs_patches_per_slice,
                                               csf_more_img_patches,
                                               axis=0)
            gt_patches_per_slice = np.append(gt_patches_per_slice,
                                             csf_more_gt_patches,
                                             axis=0)

    # convert to categorical
    gt_patches_per_slice = separate_labels(gt_patches_per_slice)
    return imgs_patches_per_slice, gt_patches_per_slice
Beispiel #49
0
def upsampler_3D(data,
                 variance=None,
                 block_size=(3, 3, 3),
                 block_up=(5, 5, 5),
                 mask=None,
                 dtype=np.float64,
                 ncores=-1,
                 params=None):

    factor = np.array(block_up) / np.array(block_size)[:-1]

    print(block_size, factor, block_up)

    if mask is None:
        mask = np.ones(data.shape[:3])

    if data.ndim == 4:
        overlap = (1, 1, 1, data.shape[-1])
        # overlap = tuple(block_size[:-1]) + (data.shape[-1],)
        # new_overlap = tuple(block_size[:-1]) + (data.shape[-1],)
        # new_overlap = tuple(block_up) + (data.shape[-1],)
        new_overlap = overlap
        # new_overlap = (2, 2, 2, data.shape[-1])

        # new_size = np.array(block_size[:-1]) * factor
        new_size = np.array(block_up, dtype=np.int16)
        new_shape = (np.array(data.shape[:-1]) * factor).astype(np.int16)
        # pad_shape = np.array(data.shape[:-1]) + (np.mod(data.shape[:-1], block_size[:-1]).astype(np.bool) * np.array(block_size[:-1])) - np.mod(data.shape[:-1], block_size[:-1])
        # new_shape = (factor * pad_shape).astype(np.int16)
        # new_shape = np.ceil(np.array(data.shape[:-1]) * new_size // np.array(block_size[:-1]) * new_size).astype(np.int16)
        # new_shape = (new_shape * new_size).astype(np.int16)
        # print(new_shape, data.shape[:-1], factor, 'shape')
        # new_shape = (int(data.shape[0] + 2*nup),
        #              int(data.shape[1] + 2*nup),
        #              int(data.shape[2] + 2*nup))
        # new_shape = np.array(new_shape)

        # new_overlap = np.array(overlap[:-1]) * factor
        # new_overlap = np.array(overlap[:-1])
        # new_overlap = np.array([2,2,2,data.shape[-1]])

        new_size = tuple(new_size) + (data.shape[-1], )
        new_shape = tuple(new_shape) + (data.shape[-1], )
        # new_overlap = tuple(new_overlap.astype(np.int16)) + (data.shape[-1],)
    else:
        overlap = (1, 1, 1)

        new_size = np.ceil(np.array(block_size) * factor).astype(np.int16)
        new_shape = np.ceil(np.array(data.shape) * factor).astype(np.int16)
        new_overlap = np.ceil(np.array(overlap) * factor).astype(np.int16)

    print(block_size, data.shape, overlap, factor)
    print(new_size, new_shape, new_overlap, factor)
    print('train 1', mask.shape, data.shape, new_size[:-1])
    # print(mask.flags)
    # 1/0
    # mask_col = extract_patches(np.broadcast_to(mask[..., None], data.shape), new_size, overlap)
    mask_col = extract_patches(mask, new_size[:-1], overlap[:-1])
    print('train 2', mask_col.shape)
    # 1/0
    dims = tuple(range(mask_col.ndim // 2, mask_col.ndim))
    shape = mask_col.shape[mask_col.ndim // 2:]
    train_idx = np.sum(mask_col, axis=dims) > (np.prod(shape) // 2)
    print(np.sum(mask_col, axis=dims).max(), np.prod(shape), shape)
    # 1/0
    print('train 3', train_idx.shape, train_idx.max(), train_idx.sum(),
          mask.sum(), mask_col.sum())
    trainer = extract_patches(data, new_size, overlap)
    X_upsampled_shape = np.prod(trainer.shape[:trainer.ndim // 2]), np.prod(
        trainer.shape[trainer.ndim // 2:])
    print(X_upsampled_shape, trainer.shape)
    # 1/0
    # trainer_grad = extract_patches(np.sum(np.gradient(data), axis=0), new_size, overlap)

    # train_data = np.concatenate((trainer[train_idx], trainer_grad[train_idx]))
    # train_data = trainer_grad[train_idx]
    train_data = trainer[train_idx]
    # train_idx = train_idx.ravel()
    # print(trainer.shape, train_idx.shape, trainer[train_idx].shape)
    # 1/0
    print(dims, train_data.shape)
    axis = tuple(range(1, train_data.ndim))
    # train_data -= train_data.mean(axis=axis, keepdims=True)

    print('train 4', train_data.shape, trainer.shape, data.shape, new_size)
    print('Constructing big D matrix')
    t = time()

    if variance is None:
        variance_large = None
    else:
        mask_large = extract_patches(mask, new_size[:-1], overlap[:-1])
        shape = mask_large.shape[mask_large.ndim // 2:]
        dims = tuple(range(mask_large.ndim // 2, mask_large.ndim))
        variance_mask = np.sum(mask_large, axis=dims) > (np.prod(shape) // 2)
        # print('variance shape', variance.shape, 0, variance_mask.shape)
        # print(extract_patches(variance, new_size[:-1], overlap[:-1]).shape)
        # print(new_overlap[:-1], new_size[:-1], overlap[:-1])
        # 1/0
        variance_large = extract_patches(variance, new_size[:-1],
                                         overlap[:-1])[variance_mask]
        # 1/0
        # variance_large = extract_patches(np.broadcast_to(variance[..., None], data.shape), new_size, overlap)[train_idx]
        print('variance shape', variance_large.shape, train_idx.shape, dims,
              data.shape)
        axis = tuple(range(1, variance_large.ndim))
        variance_large = np.median(variance_large, axis=axis)
        print('variance shape', variance_large.shape)

    # if False:#'D' in params:
    #     print('found big D, skipping')
    #     D = params['D'].copy()
    # else:
    n_atoms = int(np.prod(block_size) * 2)
    # n_atoms = int(np.prod(new_size) * 2)

    D = online_DL(train_data,
                  ncores=ncores,
                  positivity=True,
                  fit_intercept=True,
                  standardize=True,
                  nlambdas=100,
                  niter=150,
                  batchsize=256,
                  n_atoms=n_atoms,
                  variance=variance_large,
                  progressbar=True)

    print(D.shape)
    params['D'] = D

    print('mean D', np.abs(D).mean())
    print('The D is done, time {}'.format(time() - t), D.shape)

    # D_depimpe = depimp_mean(D, block_size, factor)
    # print(D_depimpe.shape, D.shape, block_size, factor, 'shape 1')

    D_depimpe = depimp_zoom(D, block_size, block_up)
    print(D_depimpe.shape, D.shape, block_size, block_up, factor, 'shape 1')
    del train_data, mask_col, shape

    t = time()
    # padding = tuple(np.array(block_up) - np.array(block_size[:-1])) + (0,)
    # padding = ((padding[0], padding[0]),
    #            (padding[1], padding[1]),
    #            (padding[2], padding[2]),
    #            (padding[3], padding[3]))
    # broad_mask = np.broadcast_to(mask[..., None], data.shape) #np.pad(np.broadcast_to(mask[..., None], data.shape), padding, mode='constant')
    # mask_small = extract_patches(broad_mask, block_size, new_overlap)
    mask_small = extract_patches(mask, block_size[:-1], new_overlap[:-1])
    dims = tuple(range(mask_small.ndim // 2, mask_small.ndim))
    shape = mask_small.shape[mask_small.ndim // 2:]

    # train_small = np.sum(mask_small, axis=dims) > (np.prod(shape)//2)
    train_small = np.sum(mask_small, axis=dims) > (np.prod(shape) // 2)
    # print(padding, data.shape)
    # data = data#np.pad(data, padding, mode='constant')
    # print(padding, data.shape)

    X_small_full = extract_patches(data, block_size, new_overlap)
    # X_small_shape = (np.prod(X_small_full.shape[:X_small_full.ndim//2]), np.prod(shape))
    X_small = X_small_full[train_small]

    # axis = tuple(range(1, X_small.ndim//2 + 1))
    # alpha = np.zeros((D.shape[1], X_small_full.shape[0]))
    # print(X_small_full.shape, D_depimpe.shape, D.shape, 'shape 2')
    # dims = tuple(range(X_small.ndim//2, X_small.ndim))
    axis = tuple(range(1, X_small.ndim))
    X_mean = X_small.mean(axis=axis)[:, None]
    # X_small -= X_mean
    # print(X_small.shape, X_mean.shape, axis, 'axis')
    # return 1
    # mkl.set_num_threads(1)
    if variance is not None:
        # broad_mask = mask #np.pad(mask, padding[:-1], mode='constant')
        # mask_small = extract_patches(broad_mask, block_size[:-1], new_overlap[:-1])
        mask_small = extract_patches(mask, block_size[:-1], new_overlap[:-1])
        shape = mask_small.shape[mask_small.ndim // 2:]
        dims = tuple(range(mask_small.ndim // 2, mask_small.ndim))
        print(mask.shape, dims, mask_small.shape)
        variance_mask = np.sum(mask_small, axis=dims) > (np.prod(shape) // 2)
        print('variance shape', variance.shape, 0)
        # broad_variance = variance #np.pad(variance, padding[:-1], mode='constant')
        variance_small = extract_patches(variance, block_size[:-1],
                                         new_overlap[:-1])[variance_mask]
        print('variance shape', variance_small.shape, variance_mask.shape,
              dims)
        axis = range(1, variance_small.ndim)
        variance_small = np.median(variance_small, axis=axis)
        print('variance shape', variance_small.shape)

    X_small_denoised, alpha, intercept = solve_l1(X_small,
                                                  D_depimpe,
                                                  variance=variance_small,
                                                  return_all=True,
                                                  positivity=True,
                                                  nlambdas=100,
                                                  fit_intercept=True,
                                                  standardize=True,
                                                  progressbar=True)

    # reconstruct_by_indexes = False
    # if reconstruct_by_indexes:
    #     mask = train_small.ravel()
    #     return reconstruct_from_indexes(alpha, D, intercept, new_shape, mask, block_size, block_up)

    # X_small_denoised = np.zeros(10)
    # alpha = np.zeros((D.shape[1], X_small.shape[0]))
    # intercept = np.zeros((alpha.shape[1], 1))
    # mkl.set_num_threads(ncores)
    # Xhat, alpha, intercept
    print('total time : {}'.format(time() - t), train_small.shape, D.shape,
          alpha.shape)
    print('mean alpha', np.abs(alpha).mean())
    # print(X_small_denoised.shape, X_small.shape, X_upsampled_shape, D_depimpe.shape)
    # 1/0
    del X_small_denoised
    # upsampled = np.zeros((X_small_shape[0], D.shape[0]), dtype=np.float32)
    indexes = train_small.ravel()
    upsampled = np.zeros((indexes.shape[0], ) + new_size, dtype=np.float32)
    full_weights = np.ones(indexes.shape[0], dtype=np.float32)
    print(train_small.shape, indexes.shape)
    # weights = 1. / (1. + np.sum(alpha != 0, axis=0, dtype=np.float32))
    weights = 1.
    stuff = np.dot(D, alpha).T + intercept

    # fix offset to enforce original mean consistency
    # offset = X_mean - stuff.mean(axis=-1, keepdims=True)
    # stuff += offset

    print(train_small.shape, indexes.shape, new_shape)
    print(upsampled[indexes].shape, upsampled.shape)

    upsampled[indexes] = stuff.reshape((stuff.shape[0], ) + new_size)
    full_weights[indexes] = weights
    del stuff, weights, offset, indexes

    output = reconstruct_from_blocks(upsampled,
                                     new_shape,
                                     block_size,
                                     block_up,
                                     new_overlap,
                                     weights=full_weights)
    return output
Beispiel #50
0
def test_extract_patches_deprecated():
    msg = ("The function feature_extraction.image.extract_patches has been "
           "deprecated in 0.22 and will be removed in 0.24.")
    with pytest.warns(FutureWarning, match=msg):
        extract_patches(downsampled_face)
def slice_U_Patch_Preprocess_recon_2D(patch_size_x=5,patch_size_y=5,prefix='SdA',in_root='',out_root='',slice_num=1, data_augment=False):
    
    #Initialize user variables
    patch_size = patch_size_x
    patch_pixels = patch_size*patch_size
    pixel_offset = int(patch_size*0.5)
    padding = patch_size
    #threshold = patch_pixels*0.3
    recon_num = 4
    patches = np.zeros(patch_pixels*recon_num)
    ground_truth = np.zeros(1)
    
    #paths to images
    path = in_root
    
    Flair = []
    T1 = []
    T2 = []
    T_1c = []
    Truth = []
    Folder = []
    
    for subdir, dirs, files in os.walk(path):
        # if len(Flair) is 1:
        #     break
        for file1 in files:
            #print file1
            if file1[-3:]=='nii' and ('Flair' in file1):
                
                Flair.append(file1)
                Folder.append(subdir+'/')
            elif file1[-3:]=='nii' and ('T1' in file1 and 'T1c' not in file1):
                T1.append(file1)
            elif file1[-3:]=='nii' and ('T2' in file1):
                T2.append(file1)
            elif file1[-3:]=='nii' and ('T1c' in file1 or 'T_1c' in file1):
                T_1c.append(file1)
            elif file1[-3:]=='mha' and 'OT' in file1:
                Truth.append(file1)
                
    number_of_images = len(Flair)
    print 'Number of images : ', number_of_images
    
    
    for image_iterator in range(number_of_images):
        print 'Iteration : ',image_iterator+1
        print 'Folder : ', Folder[image_iterator]
        Flair_image = nib.load(Folder[image_iterator]+Flair[image_iterator])
        T1_image = nib.load(Folder[image_iterator]+T1[image_iterator])
        T2_image = nib.load(Folder[image_iterator]+T2[image_iterator])
        T_1c_image = nib.load(Folder[image_iterator]+T_1c[image_iterator])
        try:
            Truth_image = new(Folder[image_iterator]+Truth[image_iterator])
        except:
            Truth_image = new2(Folder[image_iterator]+Truth[image_iterator])
        Flair_image = Flair_image.get_data()
        T1_image = T1_image.get_data()
        T2_image = T2_image.get_data()
        T_1c_image = T_1c_image.get_data()
        Truth_image = Truth_image.data
        
        if slice_num ==2:
            Flair_image = np.swapaxes(Flair_image,0,1)
            Flair_image = np.swapaxes(Flair_image,1,2)
            T1_image = np.swapaxes(T1_image, 0,1)
            T1_image = np.swapaxes(T1_image, 1, 2)
            T2_image = np.swapaxes(T2_image, 0,1)
            T2_image = np.swapaxes(T2_image, 1, 2)
            T_1c_image = np.swapaxes(T_1c_image, 0,1)
            T_1c_image = np.swapaxes(T_1c_image, 1, 2)
            Truth_image = np.swapaxes(Truth_image,0,1)
            Truth_image = np.swapaxes(Truth_image,1,2)
        elif slice_num == 3:
            Flair_image = np.swapaxes(Flair_image,0,1)
            Flair_image = np.swapaxes(Flair_image,0,2)
            T1_image = np.swapaxes(T1_image, 0,1)
            T1_image = np.swapaxes(T1_image, 0, 2)
            T2_image = np.swapaxes(T2_image, 0,1)
            T2_image = np.swapaxes(T2_image, 0, 2)
            T_1c_image = np.swapaxes(T_1c_image, 0,1)
            T_1c_image = np.swapaxes(T_1c_image, 0, 2)
            Truth_image = np.swapaxes(Truth_image,0,1)
            Truth_image = np.swapaxes(Truth_image,0,2)


        x_span,y_span,z_span = np.where(Truth_image!=0)
        
        start_slice = min(z_span)
        stop_slice = max(z_span)
        print start_slice, stop_slice
        image_patch = np.zeros(patch_size*patch_size*recon_num)
        image_label = np.zeros(1)
        for i in range(start_slice, stop_slice+1):
            #print 'Slice : '+str(i)    
            Flair_slice = np.transpose(Flair_image[:,:,i])
            T1_slice = np.transpose(T1_image[:,:,i])
            
            T2_slice = np.transpose(T2_image[:,:,i])
            T_1c_slice = np.transpose(T_1c_image[:,:,i])    
            Truth_slice = np.transpose(Truth_image[:,:,i])
            
            x_dim,y_dim = np.size(Flair_slice,axis=0), np.size(Flair_slice, axis=1)
            
            x_span,y_span = np.where(Truth_slice!=0)
            if len(x_span)==0 or len(y_span)==0:
                continue
            x_start = np.min(x_span) - padding
            x_stop = np.max(x_span) + padding+1
            y_start = np.min(y_span) - padding
            y_stop = np.max(y_span) + padding+1
            #print 'X start, X stop ', x_start,x_stop
            #print 'Y start, Y stop ', y_start, y_stop
            Flair_patch = image.extract_patches(Flair_slice[x_start:x_stop, y_start:y_stop], patch_size, extraction_step = pixel_offset)
            T1_patch = image.extract_patches(T1_slice[x_start:x_stop, y_start:y_stop], patch_size, extraction_step = pixel_offset)
            T2_patch = image.extract_patches(T2_slice[x_start:x_stop, y_start:y_stop], patch_size, extraction_step = pixel_offset)
            T_1c_patch = image.extract_patches(T_1c_slice[x_start:x_stop, y_start:y_stop], patch_size, extraction_step = pixel_offset)     
            Truth_patch = image.extract_patches(Truth_slice[x_start:x_stop, y_start:y_stop], patch_size, extraction_step = pixel_offset)
            
            if data_augment == True:
                Fp = Flair_patch.reshape(Flair_patch.shape[0]*Flair_patch.shape[1], patch_size_x, patch_size_y)
                T1p = T1_patch.reshape(T1_patch.shape[0]*T1_patch.shape[1], patch_size,patch_size)
                T2p = T2_patch.reshape(T2_patch.shape[0]*T2_patch.shape[1], patch_size,patch_size)  
                T1cp = T_1c_patch.reshape(T_1c_patch.shape[0]*T_1c_patch.shape[1], patch_size,patch_size)
                Truthp = Truth_patch.reshape(Truth_patch.shape[0]*Truth_patch.shape[1], patch_size, patch_size)
                Truthp = Truthp[:,(patch_size-1)/2,(patch_size-1)/2]
                Truthp = np.array(Truthp)
                Truthp = Truthp.reshape(len(Truthp))

                indexx = np.where(Truthp!=0)
                # print indexx[0]
                indexx=indexx[0]

                Fp = Fp[indexx,:,:]
                T1p = T1p[indexx,:,:]
                T2p = T2p[indexx,:,:]
                T1cp = T1cp[indexx,:,:]
                Truthp = Truthp[indexx]

                for angle in range(3):
                    Fp = np.asarray([np.rot90(Fp[x,:,:]) for x in range(Fp.shape[0])])
                    T1p = np.asarray([np.rot90(T1p[x,:,:]) for x in range(T1p.shape[0])])
                    T2p = np.asarray([np.rot90(T2p[x,:,:]) for x in range(T2p.shape[0])])
                    T1cp = np.asarray([np.rot90(T1cp[x,:,:]) for x in range(T1cp.shape[0])])

                    Fp_n = np.asarray(Fp)
                    T1p_n = np.asarray(T1p)
                    T2p_n = np.asarray(T2p)
                    T1cp_n = np.asarray(T1cp)

                    Fp_n = Fp_n.reshape(Fp_n.shape[0], patch_size_x*patch_size_y)
                    T1p_n = T1p_n.reshape(T1p_n.shape[0], patch_size_x*patch_size_y)
                    T2p_n = T2p_n.reshape(T2p_n.shape[0], patch_size_x*patch_size_y)
                    T1cp_n = T1cp_n.reshape(T1cp_n.shape[0], patch_size_x*patch_size_y)
                    Truthp_n = Truthp.reshape(len(Truthp),1)
                    sp_n = np.concatenate([Fp_n, T1p_n, T2p_n, T1cp_n], axis=1)

                    patches = np.vstack([patches, sp_n])
                    ground_truth = np.vstack([ground_truth, Truthp_n])
                    # print 'Angle : '+str(90*(angle+1))
                    # print 'Augmented Data: '+str(Fp_n.shape[0])+' patches'

            # print Flair_patch.shape
            Flair_patch = Flair_patch.reshape(Flair_patch.shape[0]*Flair_patch.shape[1], patch_size*patch_size)
            T1_patch = T1_patch.reshape(T1_patch.shape[0]*T1_patch.shape[1], patch_size*patch_size)
            T2_patch = T2_patch.reshape(T2_patch.shape[0]*T2_patch.shape[1], patch_size*patch_size)  
            T_1c_patch = T_1c_patch.reshape(T_1c_patch.shape[0]*T_1c_patch.shape[1], patch_size*patch_size)      
            Truth_patch = Truth_patch.reshape(Truth_patch.shape[0]*Truth_patch.shape[1], patch_size, patch_size)
            
            #print '2. truth dimension :', Truth_patch.shape
            slice_patch = np.concatenate([Flair_patch, T1_patch, T2_patch, T_1c_patch], axis=1)
            Truth_patch = Truth_patch[:,(patch_size-1)/2,(patch_size-1)/2]
            Truth_patch = np.array(Truth_patch)
            Truth_patch = Truth_patch.reshape(len(Truth_patch),1)
            
            patches = np.vstack([patches,slice_patch])
            ground_truth = np.vstack([ground_truth, Truth_patch])
    print 'Number of non-zeros in ground truth : ', np.sum((ground_truth!=0).astype(int))
    print 'Number of zeros in ground truth : ', np.sum((ground_truth==0).astype(int))
    # ground_truth[np.where(ground_truth==3)]=1
    # ground_truth[np.where(ground_truth==4)]=1
    print
    print 'No. of 1 : ', np.sum((ground_truth==1).astype(int))
    print 'No. of 2 : ', np.sum((ground_truth==2).astype(int))
    print 'No. of 3 : ', np.sum((ground_truth==3).astype(int))
    print 'No. of 4 : ', np.sum((ground_truth==4).astype(int))
    
    ground_truth = ground_truth.reshape(len(ground_truth))
    print 'Shape of Un-balanced patches numpy array : ',patches.shape
    print 'Shape of Un-balanced ground truth : ',ground_truth.shape
    
    patches = np.float32(patches)
    ground_truth = np.float32(ground_truth)
    if 'training' in out_root:
        print'... Saving the 2D training patches'
        np.save(out_root+'u_trainpatch_2D_'+prefix+'_.npy',patches)
        np.save(out_root+'u_trainlabel_2D_'+prefix+'_.npy',ground_truth)
        
    elif 'validation' in out_root:
        print '... Saving the 2D validation patches'
        np.save(out_root+'u_validpatch_2D_'+prefix+'_.npy',patches)
        np.save(out_root+'u_validlabel_2D_'+prefix+'_.npy',ground_truth)
			y_start = min(y_span) - padding
			y_stop = max(y_span) + padding + 1

			if ((x_stop - x_start) * (y_stop - y_start)) < patch_size * patch_size:
				x_start = x_start - 20
				x_stop = x_stop + 20
				y_start = y_start - 20
				y_stop = y_stop + 20

			for j in xrange(start_slice, stop_slice):

				image_slice = img[:,:,j]
				truth_slice = truth[:,:,j]

				patch = image.extract_patches(image_slice[x_start:x_stop,y_start:y_stop], patch_size, extraction_step = 1)
				patch = patch.reshape(patch.shape[0]*patch.shape[1],patch_size,patch_size)

				truth_patch = image.extract_patches(truth_slice[x_start:x_stop,y_start:y_stop], patch_size, extraction_step = 1)
				truth_patch = truth_patch.reshape(truth_patch.shape[0]*truth_patch.shape[1],patch_size,patch_size)

				truth_values = truth_patch[:, (patch_size - 1)/2, (patch_size -1)/2]

				patches = np.append(patches,patch,axis=0)
				ground_truths = np.append(ground_truths,truth_values,axis=0)

			patches = patches[1:patches.shape[0]]
			ground_truths = ground_truths[1:ground_truths.shape[0]]

		except:
			print '==> Error in label: ', z