def test_scipy_filter_gaussian_laplace(self, width): """ Test MexicanHat kernels against SciPy ndimage gaussian laplace filters. """ mexican_kernel_1D = MexicanHat1DKernel(width) mexican_kernel_2D = MexicanHat2DKernel(width) astropy_1D = convolve(delta_pulse_1D, mexican_kernel_1D, boundary='fill', normalize_kernel=False) astropy_2D = convolve(delta_pulse_2D, mexican_kernel_2D, boundary='fill', normalize_kernel=False) with pytest.raises(Exception) as exc: astropy_1D = convolve(delta_pulse_1D, mexican_kernel_1D, boundary='fill', normalize_kernel=True) assert 'sum is close to zero' in exc.value.args[0] with pytest.raises(Exception) as exc: astropy_2D = convolve(delta_pulse_2D, mexican_kernel_2D, boundary='fill', normalize_kernel=True) assert 'sum is close to zero' in exc.value.args[0] # The Laplace of Gaussian filter is an inverted Mexican Hat # filter. scipy_1D = -filters.gaussian_laplace(delta_pulse_1D, width) scipy_2D = -filters.gaussian_laplace(delta_pulse_2D, width) # There is a slight deviation in the normalization. They differ by a # factor of ~1.0000284132604045. The reason is not known. assert_almost_equal(astropy_1D, scipy_1D, decimal=5) assert_almost_equal(astropy_2D, scipy_2D, decimal=5)
def loadImage(self, fname): self._imageFilename = fname self.templateBox((0, 0, 0, 0)) self._boxes = [] self._currentBox = None logging.info("Load image %s" % fname) self._image = array(Image.open(fname).convert('L'), dtype='f') / 255 (h, w) = shape(self._image) self._imageSize = (w, h) im1 = filters.gaussian_filter(self._image, 2) im2 = filters.gaussian_filter(self._image, 3) im4 = filters.gaussian_filter(self._image, 4) im8 = filters.gaussian_filter(self._image, 5) features = [] features.append(im1) features.append(im2) features.append(im4) features.append(im8) features.append(filters.gaussian_laplace(self._image, 2)) features.append(filters.gaussian_laplace(self._image, 3)) features.append(filters.gaussian_laplace(self._image, 4)) features.append(filters.gaussian_laplace(self._image, 5)) features.append(filters.sobel(im4, 0)) features.append(filters.sobel(im8, 0)) features.append(filters.sobel(im4, 1)) features.append(filters.sobel(im8, 1)) self._features = dstack(features)
def compute_hfen(prediction, target): """Calculates high frequency error norm [1] between target and prediction Implementation follows [2], who define a normalized version of HFEN. [1]: Ravishankar and Bresler: MR Image Reconstruction From Highly Undersampled k-Space Data by Dictionary Learning, 2011 [2]: Han et al: Image Reconstruction Using Analysis Model Prior, 2016 Parameters ---------- prediction : torch.Tensor Predicted image target : torch.Tensor Target image """ from scipy.ndimage.filters import gaussian_laplace # HFEN is defined to use a kernel of size 15x15. Kernel size is defined as # 2 * int(truncate * sigma + 0.5) + 1, so we have to use truncate=4.5 pred_filtered = gaussian_laplace(prediction.data, truncate=4.5, sigma=1.5) target_filtered = gaussian_laplace(target.data, truncate=4.5, sigma=1.5) norm_diff = np.linalg.norm((pred_filtered - target_filtered).flatten()) norm_target = np.linalg.norm(target_filtered.flatten()) return norm_diff / norm_target
def loadImage(self, fname): self._imageFilename = fname self.templateBox((0,0,0,0)) self._boxes = [] self._currentBox = None logging.info("Load image %s" % fname) self._image = array(Image.open(fname).convert('L'), dtype='f')/255 (h,w) = shape(self._image) self._imageSize = (w,h) im1 = filters.gaussian_filter(self._image, 2) im2 = filters.gaussian_filter(self._image, 3) im4 = filters.gaussian_filter(self._image, 4) im8 = filters.gaussian_filter(self._image, 5) features = [] features.append(im1) features.append(im2) features.append(im4) features.append(im8) features.append(filters.gaussian_laplace(self._image, 2)) features.append(filters.gaussian_laplace(self._image, 3)) features.append(filters.gaussian_laplace(self._image, 4)) features.append(filters.gaussian_laplace(self._image, 5)) features.append(filters.sobel(im4, 0)) features.append(filters.sobel(im8, 0)) features.append(filters.sobel(im4, 1)) features.append(filters.sobel(im8, 1)) self._features = dstack(features)
def HFEN(x, y, sigma=1.5, truncate=4.5): """High Frequency Error Norm Works for any dimension, but needs to be float. Gaussian kernel size is defined by: 2*int(truncate*sigma + 0.5) + 1. so if 15x15 Gauss kernel with sigma 1.5: truncate=4.5, sigma=1.5 13x13 Gauss kernel with sigma 1.5: truncate=4, sigma=1.5 Parameters: ---------- x: reference y: reconstruction Returns: -------- hfen: scalar """ x_log = gaussian_laplace(abs(x), sigma) y_log = gaussian_laplace(abs(y), sigma) return np.linalg.norm(x_log.reshape(-1) - y_log.reshape(-1)) / np.linalg.norm(x_log)
def harris(im): ''' gauss1d_k = numpy.array(gen_gauss1d_k([-1, 0, 1], 1)) res1 = numpy.zeros(im.shape, dtype=numpy.uint8) for i in range(im.shape[0]): for j in range(im.shape[1]): if j >= (len(gauss1d_k)//2) and j <= (im.shape[1] - len(gauss1d_k)//2 - 1):#Skipping the border pixels res1[i][j] = int(sum(im[i][j-len(gauss1d_k)//2:j+1+len(gauss1d_k)//2]*gauss1d_k)) else: # copying the border pixels of the original image as it is res1[i][j] = im[i][j] ''' #Gim = gaussian_filter(im, 1.5) #print Gim #log = create_log(2, 7) derx = numpy.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]]) #derivative filter in X direction dery = derx.transpose() # derivative filter on y component Ix = cv2.filter2D(im, -1, derx) # Derivative of X component of Image Iy = cv2.filter2D(im, -1, dery) Ixy = cv2.filter2D(im, -1, dery) Lx = gaussian_laplace(Ix, 1.5) #cv2.filter2D(Ix, -1, log) Ly = gaussian_laplace(Iy, 1.5) #cv2.filter2D(Iy, -1, log) Lxy = gaussian_laplace(Ixy, 1.5) #cv2.filter2D(Ixy, -1, log) alpha = 0.05 R = numpy.empty(im.shape) res = cv2.cvtColor(im, cv2.COLOR_GRAY2RGB) for i in range(im.shape[0]): for j in range(im.shape[1]): h2 = numpy.matrix([[Lx[i, j] * Lx[i, j], Lxy[i, j]], [Lxy[i, j], Ly[i, j] * Ly[i, j]]]) R[i, j] = numpy.linalg.det(h2) - (alpha * numpy.trace(h2)) th = (numpy.max(numpy.absolute(R))) * 0.99 for i in range(im.shape[0]): for j in range(im.shape[1]): if fabs(R[i, j]) > th: res[i, j] = [0, 0, 255] cv2.imshow("im", im) cv2.waitKey(0) cv2.imshow("Ix", Ix) cv2.waitKey(0) cv2.imshow("Iy", Iy) cv2.waitKey(0) cv2.imshow("Lx", Lx) cv2.waitKey(0) cv2.imshow("Ly", Ly) cv2.waitKey(0) cv2.imshow("Lxy", Lxy) cv2.waitKey(0) cv2.imshow("res", res) cv2.waitKey(0)
def edge_coordinates(image, alg='log', sigma=0.5): """Separates the edges of each area in the input image, returning the corresponding coordinates. Parameters ---------- image : array Grayscale input image. alg : string, optional (default : 'log') Algorithm used to extract edges. Accepts the values 'log', for Laplacian of Gaussian, and 'canny', for Canny. Default is 'log'. sigma : float, optional (default : 0.5) Sigma value used on the algorithm. Returns ------- regions : array Array containing the regions separated by labeled region. edges : list List containing the coordinates to the edge points, separated according to their region. """ img_label, num_regions = measure.label(image, return_num=True) alg2edge = { 'log': gaussian_laplace(image, sigma), 'canny': canny(image, sigma) } edges = alg2edge.get(alg, gaussian_laplace(image, sigma)) edges = [list() for _ in range(num_regions)] rows, cols = img_label.shape regions = np.empty((num_regions, rows, cols)) # separating regions and their edges, according to measure.label() for num in range(1, num_regions+1): regions[num-1] = img_label == num contour = measure.find_contours(regions[num-1], level=0) edges[num-1] = contour[0].astype(int).tolist() # ordering edge pixels for edge in edges: # calculating centroid cntrd = (sum([pt[0] for pt in edge])/len(edge), sum([pt[1] for pt in edge])/len(edge)) # sorting by polar angle edge.sort(key=lambda pt: atan2(pt[1]-cntrd[1], pt[0]-cntrd[0])) return regions, edges
def laplace(data, sigmas): """ Smooth data by convolving 2nd derivative of Gaussian kernel """ assert len(data.shape) == len(sigmas) from scipy.ndimage.filters import gaussian_laplace return gaussian_laplace(data.astype(float), sigmas)
def prepare_images(): '''Preprocess the images and store them in `data` dictionary.''' data = {} data['original'] = [] data['gaussian'] = [] data['targets'] = [] # for folder in FOLDERS: imgs = [] targets = [] gradient_images = [] for folder in FOLDERS: directory = DIR + str(folder) for camera in CAMERAS: for number in range(10): path = join(directory,str(camera).join(K_FILENAME) + str(number) + '.bmp') image = imread(path, mode='L') for j in range(0,len(image[0])-1,C_PIXELS): # Crop images to 28x28 img = image[7:35,j+3:j+31] imgs.append(img) targets.append(number) # Get image gradient with Laplacian of the Gaussian laplacian = gaussian_laplace(img,sigma=2) gradient_images.append(laplacian) # Add images to data dictionary. data['original'] = imgs data['gaussian'] = gradient_images data['targets'] = targets return data
def laplacian(data, sigmas): """ Apply Laplacian filter """ assert len(data.shape) == len(sigmas) from scipy.ndimage.filters import gaussian_laplace return gaussian_laplace(data.astype(float), sigmas)
def clean_frame(frame, median_radius=6, log_sigma=4): """ Cleanup function to be run on SRAD output. Median filter for further denoising, followed by edge sharpening with a Laplacian of Gaussian (LoG) mask. Inputs: ndarray image, filter kernel settings median_radius: median filter radius; should be odd integer log_sigma: LoG sigma; controls kernel size Output: cleaned; a processed ndarray """ # TODO provide default for median_radius that is # sensitive to image dimensions norm_check(frame) # median filter cleaned = median_filter(frame, median_radius) # add LoG, protecting against overflow logmask = gaussian_laplace(cleaned, log_sigma) frame_ceil = np.finfo(frame.dtype).max logmask = frame_ceil - logmask np.putmask(cleaned, logmask < cleaned, logmask) cleaned += frame_ceil - logmask return cleaned
def blob_detector_downsample(image): threshold = 0.003 gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) gray = gray.astype(np.float32) / 255. h, w = gray.shape scale_space = np.zeros((h, w, level)) for i in range(level): scale = k**i scale_gray = transform.resize(gray, (int(h / scale), int(w / scale)), mode='reflect') square_lg = gaussian_laplace(scale_gray, sigma=initial)**2 scale_space[:, :, i] = transform.resize(square_lg, (h, w), mode='reflect') nms_3d = generic_filter(scale_space, nms, size=(3, 3, 3)) # nms_3d = rank_nms(scale_space) # nms_3d = nms_3d/np.max(nms_3d) cx = [] cy = [] radius = [] for i in range(level): sigma = initial * k**i cx.append(list(np.where(nms_3d[:, :, i] > threshold)[1])) cy.append(list(np.where(nms_3d[:, :, i] > threshold)[0])) radius.append([np.sqrt(2) * sigma] * len(cx[i])) cx = np.concatenate(cx) cy = np.concatenate(cy) radius = np.concatenate(radius) return gray, cx, cy, radius
def blob_detector_increase(image): threshold = 0.01 gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) gray = gray.astype(np.float32) / 255. h, w = gray.shape scale_space = np.zeros((h, w, level)) # Laplacian Gaussian filter for i in range(level): sigma = initial * k**i scale_normalize = sigma**2 * gaussian_laplace(gray, sigma=sigma) scale_space[:, :, i] = scale_normalize**2 # generic filter nms_3d = generic_filter(scale_space, nms, size=(3, 3, 3)) # nms_3d = rank_nms(scale_space) # nms_3d = nms_3d/np.max(nms_3d) cx = [] cy = [] radius = [] for i in range(level): sigma = initial * k**i cx.append(list(np.where(nms_3d[:, :, i] > threshold)[1])) cy.append(list(np.where(nms_3d[:, :, i] > threshold)[0])) radius.append([np.sqrt(2) * sigma] * len(cx[i])) cx = np.concatenate(cx) cy = np.concatenate(cy) radius = np.concatenate(radius) return gray, cx, cy, radius
def do_si_kp_detection(): gray = cv2.imread('./synthetic.png', 0) synthetic_img_copy = cv2.imread('./synthetic.png').copy() sigma_list = [3, 5, 9, 12, 25, 30, 35] img_result = [] rows, columns = gray.shape result = np.zeros((rows, columns)) for sigma in sigma_list: #gau_img = cv2.GaussianBlur(gray, (5, 5), 5) lap_img = gaussian_laplace(gray, sigma) nms_img = perform_non_maximal_suppression(lap_img, 50) lap_img[lap_img < 0.035] = 0 lap_img[lap_img > 10] = 0 img_result.append(nms_img) # find maxima among 5 images that have gaussian lapacian applied for i in range(0, rows): for j in range(0, columns): max = 0 for processed_img in img_result: temp = processed_img[i][j] if temp > max: max = temp result[i][j] = max # add circle on origianl image to circle out the interest points for s in range(0, result.shape[0]): for t in range(0, result.shape[1]): if not (result[s][t] == 0): cv2.circle(synthetic_img_copy, (t, s), 10, (255, 0, 0)) cv2.imwrite("./si_kp_detection.jpg", synthetic_img_copy)
def pick_coords(self, arr, i_list, j_list): HYPNUM = 5 gl = gaussian_laplace(arr, 1) # Divide images into 25 blocks and calculate minimum BLK = 5 hbl, wbl = np.linspace(0, gl.shape[0], BLK, dtype=int), np.linspace(0, gl.shape[1], BLK, dtype=int) gl_min = [] for (h1, h2) in zip(hbl[:-1], hbl[1:]): for (w1, w2) in zip(wbl[:-1], wbl[1:]): gl_min.append(gl[h1:h2, w1:w2].min()) gl_min.sort() ji_list = [] # plt.imshow(arr) # Pick top HYPNUM minimum from 25 blocks. j_all, i_all = [], [] for ii in range(HYPNUM): a1, a2 = np.where(gl == gl_min[ii]) # if len(a1) > 1: a1, a2 = a1[0], a2[0] j_all.append(j_list[a2]) i_all.append(i_list[a1]) return j_all, i_all
def count_maxima_laplace(par_obj, time_pt, fileno, reset_max=False): #count maxima won't work properly if have selected a random set of Z min_d = par_obj.min_distance imfile = par_obj.filehandlers[fileno] #if par_obj.min_distance[2] == 0 or par_obj.max_z == 0: # count_maxima_2d(par_obj, time_pt, fileno) # return predMtx = np.zeros((par_obj.height, par_obj.width, imfile.max_z + 1)) for i in range(imfile.max_z + 1): predMtx[:, :, i] = par_obj.data_store['pred_arr'][fileno][time_pt][i] laplace = -filters.gaussian_laplace(predMtx, min_d, mode='constant') #if not already set, create. This is then used for the entire image and all subsequent training. #A little hacky, but otherwise the normalisation screws everything up if not par_obj.max_det: par_obj.max_det = np.max(laplace) elif reset_max: par_obj.max_det = np.max(laplace) laplace = laplace / par_obj.max_det par_obj.data_store['maxi_arr'][fileno][time_pt] = {} for i in range(imfile.max_z + 1): par_obj.data_store['maxi_arr'][fileno][time_pt][i] = laplace[:, :, i] pts = peak_local_max(laplace, min_distance=min_d, threshold_abs=par_obj.abs_thr) pts2keep = [] for pt2d in pts: #determinants of submatrices pts2keep.append([pt2d[0], pt2d[1], pt2d[2], 1]) pts = pts2keep par_obj.show_pts = 1 #Filter those which are not inside the region. if par_obj.data_store['roi_stkint_x'][fileno][time_pt].__len__() > 0: pts2keep = [] for i in par_obj.data_store['roi_stkint_x'][fileno][time_pt]: for pt2d in pts: if pt2d[2] == i: #Find the region of interest. ppt_x = par_obj.data_store['roi_stkint_x'][fileno][ time_pt][i] ppt_y = par_obj.data_store['roi_stkint_y'][fileno][ time_pt][i] #Reformat to make the path object. pot = [] for b in range(0, ppt_x.__len__()): pot.append([ppt_x[b], ppt_y[b]]) p = Path(pot) if p.contains_point([pt2d[1], pt2d[0]]) is True: pts2keep.append(pt2d) pts = pts2keep par_obj.data_store['pts'][fileno][time_pt] = pts
def test_scipy_filter_gaussian_laplace(self, width): """ Test MexicanHat kernels against SciPy ndimage gaussian laplace filters. """ mexican_kernel_1D = MexicanHat1DKernel(width) mexican_kernel_2D = MexicanHat2DKernel(width) astropy_1D = convolve(delta_pulse_1D, mexican_kernel_1D, boundary='fill') astropy_2D = convolve(delta_pulse_2D, mexican_kernel_2D, boundary='fill') scipy_1D = filters.gaussian_laplace(delta_pulse_1D, width) scipy_2D = filters.gaussian_laplace(delta_pulse_2D, width) # There is a slight deviation in the normalization. They differ by a # factor of ~1.0000284132604045. The reason is not known. assert_almost_equal(astropy_1D, scipy_1D, decimal=5) assert_almost_equal(astropy_2D, scipy_2D, decimal=5)
def pick_coords(self, arr, i_list, j_list): # cut 10% due to the instability of GL at edges cri, crj = int(arr.shape[0] * 0.1), int(arr.shape[1] * 0.1) arr = arr[cri:-cri, crj:-crj] i_list, j_list = i_list[cri:-cri], j_list[crj:-crj] self.gl = gaussian_laplace(arr, 1) a1, a2 = np.where(self.gl == self.gl.min()) return j_list[a2[0]], i_list[a1[0]]
def test_scipy_filter_gaussian_laplace(self, width): """ Test MexicanHat kernels against SciPy ndimage gaussian laplace filters. """ mexican_kernel_1D = MexicanHat1DKernel(width) mexican_kernel_2D = MexicanHat2DKernel(width) astropy_1D = convolve(delta_pulse_1D, mexican_kernel_1D, boundary='fill') astropy_2D = convolve(delta_pulse_2D, mexican_kernel_2D, boundary='fill') # The Laplace of Gaussian filter is an inverted Mexican Hat # filter. scipy_1D = -filters.gaussian_laplace(delta_pulse_1D, width) scipy_2D = -filters.gaussian_laplace(delta_pulse_2D, width) # There is a slight deviation in the normalization. They differ by a # factor of ~1.0000284132604045. The reason is not known. assert_almost_equal(astropy_1D, scipy_1D, decimal=5) assert_almost_equal(astropy_2D, scipy_2D, decimal=5)
def apply_filter(similarity_matrix, filter_type='median', params=None): if filter_type == 'median': mask = np.ones(params[0]) return median_filter(similarity_matrix, footprint=mask) if filter_type == 'gaussian_laplace': return gaussian_laplace(similarity_matrix, sigma=params[0]) return None
def logSlice(image, sigma_list, threshold): gl_images = [-gaussian_laplace(image, s) * (s**2) for s in sigma_list] # get the mask seg = np.zeros_like(image) for zi in range(len(sigma_list)): seg = np.logical_or(seg, gl_images[zi] > threshold) return seg
def log_ndi(data, *args, **kwargs): """Apply laplacian of gaussian to each image in a stack of shape (..., I, J). Extra arguments are passed to scipy.ndimage.filters.gaussian_laplace. """ from scipy.ndimage.filters import gaussian_laplace h, w = data.shape[-2:] arr = [] for frame in data.reshape((-1, h, w)): arr += [gaussian_laplace(frame, *args, **kwargs)] return np.array(arr).reshape(data.shape)
def applyGaussionFilter(): portrait = cv.imread("./portrait.jpg") portraitGray = cv.cvtColor(portrait, cv.COLOR_BGR2GRAY) # apply derivative of Gaussion filter result7_derivative = gaussian_filter(portraitGray, 3, order=[1, 1]) cv.imwrite("./result7_derivative.jpg", result7_derivative) # apply Laplacian of Gaussian filter result7_lap = gaussian_laplace(portraitGray, sigma=3) cv.imwrite("./result7_lap.jpg", result7_lap)
def sphere_log(data, scales=range(5, 9, 1), anisotropy_factor=5.0): data = asarray(data) scales = asarray(scales) log = empty((len(scales), ) + data.shape, dtype=data.dtype) for slog, scale in (tzip(log, scales)): slog[...] = scale**2 * gaussian_laplace( data, asarray([scale / anisotropy_factor, scale, scale])) peaks = local_minima(log) # SZYX peaks_subset, peaks_list, threshold = get_peaks_subset(log, peaks, scales) return peaks_subset, peaks_list, log, peaks, threshold
def l_o_g(img, sigma): ''' Laplacian of Gaussian filter (channel-wise) -> img: input image -> sigma: gaussian_laplace sigma <- filtered image ''' while len(img.shape) < 3: img = img[..., np.newaxis] out = img.copy() for chan in range(img.shape[2]): out[..., chan] = filters.gaussian_laplace(img[..., chan], sigma) return out
def enhancedFeatureExtractor(datum): """ Returns a feature vector of the image datum. Args: datum: 2-dimensional numpy.array representing a single image. Returns: A 1-dimensional numpy.array of features designed by you. The features can have any length. ## DESCRIBE YOUR ENHANCED FEATURES HERE... ## """ features = basicFeatureExtractor(datum) symmetry = get_symmetry(datum).flatten() datum_2d = gaussian_filter(datum.reshape([28, 28]), 0.8) vert_convolve = signal.convolve2d(datum_2d, [[1,-1]]) neighborhood = scipy.ndimage.morphology.generate_binary_structure(2,2) maximum_filter(vert_convolve, footprint=np.ones((3,3))) # vert_convolve = np.asarray(vert_convolve)[vert_convolve == maximum_filter(vert_convolve, footprint=np.ones((3,3)))] vert_convolve = max_pooling(vert_convolve, 2).flatten() hor_convolve = signal.convolve2d(datum_2d, [[1],[-1]]) # hor_convolve = hor_convolve[hor_convolve == maximum_filter(hor_convolve, footprint=np.ones((3,3)))] hor_convolve = max_pooling(hor_convolve, 2).flatten() log_convolve = gaussian_laplace(datum.reshape([28, 28]), 1.8).flatten() chamfer = chamferDist(datum).flatten() pixel_feats = [features, symmetry, vert_convolve, hor_convolve, log_convolve, chamfer] features = np.concatenate(pixel_feats) num_empty_regions = num_empty(datum) num_empty_regions_arr = np.zeros((3,)) if num_empty_regions < 3: num_empty_regions_arr[num_empty_regions] = 1 num_full_regions = num_full(datum) num_full_regions_arr = np.zeros((3,)) if num_full_regions < 3: num_full_regions_arr[num_full_regions] = 1 features = np.append(features, np.array([num_empty_regions])) features = np.append(features, np.array([num_full_regions])) features = np.append(features, num_empty_regions_arr) features = np.append(features, num_full_regions_arr) return features
def generateDetector(image, edgeDetector, custom=0, only_x=1): if edgeDetector == 'prewitt': dx = filters.prewitt(image, 0) # horizontal derivative dy = filters.prewitt(image, 1) # vertical derivative mag = np.hypot(dx.astype('int32'), dy.astype('int32')) # magnitude mag *= 255.0 / np.max(mag) # normalize (Q&D) return mag elif edgeDetector == "sobel": if custom == 1: return sobel_filter(image) else: dx = filters.sobel(image, 0) # horizontal derivative dy = filters.sobel(image, 1) # vertical derivative mag = np.hypot(dx.astype('int32'), dy.astype('int32')) # magnitude mag *= 255.0 / np.max(mag) # normalize (Q&D) if only_x == 1: return dx return mag elif edgeDetector == "log": if custom == 0: print( "No in-built function available for log edge filter. Construct custom filter laplacian_of_gaussian and call generateDetector(image,'log',1)" ) return filters.gaussian_laplace(image, 0.01, output=None, mode='reflect', cval=0.0) else: return laplcian_of_gaussian(image) elif edgeDetector == "roberts": if custom == 0: return roberts(image) if custom == 1: return roberts_filter(image) elif edgeDetector == "canny": # image = image.astype(np.uint8) from scipy import ndimage, misc misc.imsave('fileName.jpg', image) image = ndimage.imread('fileName.jpg', 0) return cv2.Canny(np.uint8(image), 250, 255, 3) #,L2gradient=False) # return feature.canny(np.uint8(image), sigma = 100) else: # lower threshold- 25, upper threshold- 255 return cv2.Canny(image, 25, 255)
def get_scale_space(image, init_sigma, levels, k, method='downsample'): h, w = image.shape[0], image.shape[1] sigma = np.zeros(levels) # sigma = np.asarray([0]*levels) sigma[0] = init_sigma scale_space = np.zeros((h, w, levels)) ord = 3 # Method 2. (faster version) start = time.time() # Ensure odd filter size. n = np.ceil(sigma * 9) filter_size = int(n[0]) if filter_size % 2 != 0: filter_size = int(filter_size) else: filter_size = filter_size + 1 # Initialize filter matrix. gauss_filter = np.zeros((filter_size, filter_size)) gauss_filter[((1 + filter_size) // 2 - 1) // 1][((1 + filter_size) // 2 - 1) // 1] = 1 # gauss_filter[center][center] = 1 # Obtain filter (no normalization needed). LoG = gaussian_laplace(gauss_filter, init_sigma) # Scale the image. for i in range(0, levels, 1): # Down scale. scaled_h = (((1 // k)**i) * h) // 1 scaled_w = (((1 // k)**i) * w) // 1 # scaled_im = transform.resize(image, (scaled_h, scaled_w), order=3) scaled_im = transform.rescale(image, (1 / k)**i, order=ord) # Apply convolution without normalization. im_tmp = convolve(scaled_im, LoG)**(ord - 1) # Upscale. scale_space[:, :, i] = transform.resize(im_tmp, (h, w), order=ord) # Update sigma. if (i + 1 < levels): sigma[i + 1] = sigma[i] * k return scale_space, sigma
def detectBlobs(im, param=None): # Input: # IM - input image # # Ouput: # BLOBS - n x 5 array with blob in each row in (x, y, radius, angle, score) # # Dummy - returns a blob at the center of the image im = rgb2gray(im) numLevels = 15 k = 1.5 sigma = 1.2 threshold = 0.01 blob = [] scaleSigma = [(k**i) * sigma for i in range(15)] #print(scaleSigma[14]) scaleSpace = np.zeros((im.shape[0], im.shape[1], numLevels)) for key, val in enumerate(scaleSigma): #print(val) #kernel = loG(3*val,sigma=val) #print(kernel.shape) #squaredResponse = convolve(im,kernel,mode='constant', cval=0.0)**2 squaredResponse = ( (val**2) * gaussian_laplace(im, sigma=val, mode='constant', cval=0.0))**2 scaleSpace[:, :, key] = squaredResponse out = generic_filter(scaleSpace, func, footprint=np.ones((3, 3, 3)), mode='constant', cval=0.0) max_ind = np.where(out) #print(max_ind[0].shape) #indexs = zip(max_ind[0], max_ind[1], max_ind[2]) #print(indexs) temp = scaleSpace[max_ind] > threshold print(len(temp)) #print(temp) for key, val in enumerate(temp): if val: x = max_ind[0][key] y = max_ind[1][key] n = max_ind[2][key] #print(n) blob.append((y, x, np.sqrt(2) * scaleSigma[n], 0, scaleSpace[x, y, n])) return np.asarray(blob)
def detect_blobs_log(input_path, min_sigma=1, max_sigma=50, num_sigma=10, overlap=0.5, output_path='centroids.csv', print_level=False): # loading tiff stack stack_0 = sitk.ReadImage(input_path) stack_0 = sitk.GetArrayFromImage(stack_0) image_0 = stack_0 * np.float32(255.0 / stack_0.max()) sigma_list = np.linspace(min_sigma, max_sigma, num_sigma) image = np.copy(image_0) # computing gaussian laplace # s**2 provides scale invariance log_stack = [] for sigma in sigma_list: if print_level: print("Sigma: {}".format(sigma)) # TODO: Do we need a inhouse gaussian filter to control filter size? log = -gaussian_laplace(image, sigma) * sigma**2 log_stack.append(log) gl_scale_space = np.array(log_stack) # gl_images = [-gaussian_laplace(image, s) * s ** 2 for s in sigma_list] # gl_scale_space = np.array(gl_images) gl_thresh = threshold_otsu(gl_scale_space) local_maxima_2 = peak_local_max(gl_scale_space, threshold_abs=gl_thresh, min_distance=10, threshold_rel=0.0, exclude_border=False) lm2 = local_maxima_2.astype(np.float64) # Convert the first index to its corresponding scale value lm2[:, 0] = sigma_list[local_maxima_2[:, 0]] local_maxima_2 = lm2 # move the scale column to be the right most column col_permutation = [1, 2, 3, 0] local_maxima_2 = local_maxima_2[:, col_permutation] output = _prune_blobs(local_maxima_2, overlap) write_csv(output[:, 0:3], output_path)
def get_laplacian(image_gray): scales = np.linspace(2.5, 6.5, num=10) laplacians = [] for scale in scales: img_laplacian = -1.0 * filters.gaussian_laplace(image_gray, scale) r = scale * math.sqrt(2) #print img_laplacian.max() #img_laplacian = 1.0*img_laplacian/img_laplacian.max() #img_laplacian_max = maximum(img_laplacian, disk(r)) laplacians.append(img_laplacian) laplacians = np.array(laplacians) return laplacians
def blobLOG(data, scales=range(1, 10, 1), threshold=-30): """Find blobs. Returns [[scale, x, y, ...], ...]""" from numpy import empty, asarray from itertools import repeat data = asarray(data) scales = asarray(scales) log = empty((len(scales),) + data.shape, dtype=data.dtype) for slog, scale in zip(log, scales): slog[...] = scale ** 2 * gaussian_laplace(data, scale) peaks = localMinima(log, threshold=threshold) peaks[:, 0] = scales[peaks[:, 0]] return peaks
def blobLOG(data, scales=range(1, 10, 1), threshold=-30): """Find blobs. Returns [[scale, x, y, ...], ...]""" from numpy import empty, asarray from itertools import repeat data = asarray(data) scales = asarray(scales) log = empty((len(scales), ) + data.shape, dtype=data.dtype) for slog, scale in zip(log, scales): slog[...] = scale**2 * gaussian_laplace(data, scale) peaks = localMinima(log, threshold=threshold) peaks[:, 0] = scales[peaks[:, 0]] return peaks
def get_laplacian(img_hr_rgb,n_features): image_gray = rgb2gray(img_hr_rgb) scales = np.linspace(2.5,7.0,num=n_features) laplacians = [] for scale in scales: img_laplacian = -1.0*filters.gaussian_laplace(image_gray,scale) img_laplacian = 1.0*img_laplacian/img_laplacian.max() laplacians.append(img_laplacian) #r = scale*math.sqrt(2) #print img_laplacian.max() #img_laplacian_max = maximum(img_laplacian, disk(r)) laplacians = np.array(laplacians) return laplacians
def GaussianLaplaceFilter(data, sigma, verbose=False): # bx = image[:,1,1].size # by = image[1,:,1].size # pr_out=np.zeros((bx, by)) result = np.zeros(data.shape) for layer in xrange(0, len(data)): image = data[layer] # pr_out = np.zeros(image.shape) prova = gaussian_laplace(image, sigma, result[layer], mode='constant', cval=0.0) result[layer] = (result[layer] + abs(result[layer].min()))*100 return result
def blob_log(image, min_sigma=1, max_sigma=50, num_sigma=10, threshold=.2, overlap=.5, log_scale=False): """Finds blobs in the given grayscale image. Blobs are found using the Laplacian of Gaussian (LoG) method [1]_. For each blob found, the method returns its coordinates and the standard deviation of the Gaussian kernel that detected the blob. Parameters ---------- image : ndarray Input grayscale image, blobs are assumed to be light on dark background (white on black). min_sigma : float, optional The minimum standard deviation for Gaussian Kernel. Keep this low to detect smaller blobs. max_sigma : float, optional The maximum standard deviation for Gaussian Kernel. Keep this high to detect larger blobs. num_sigma : int, optional The number of intermediate values of standard deviations to consider between `min_sigma` and `max_sigma`. threshold : float, optional. The absolute lower bound for scale space maxima. Local maxima smaller than thresh are ignored. Reduce this to detect blobs with less intensities. overlap : float, optional A value between 0 and 1. If the area of two blobs overlaps by a fraction greater than `threshold`, the smaller blob is eliminated. log_scale : bool, optional If set intermediate values of standard deviations are interpolated using a logarithmic scale to the base `10`. If not, linear interpolation is used. Returns ------- A : (n, 3) ndarray A 2d array with each row representing 3 values, ``(y,x,sigma)`` where ``(y,x)`` are coordinates of the blob and ``sigma`` is the standard deviation of the Gaussian kernel which detected the blob. References ---------- .. [1] http://en.wikipedia.org/wiki/Blob_detection#The_Laplacian_of_Gaussian Examples -------- >>> from skimage import data, feature, exposure >>> img = data.coins() >>> img = exposure.equalize_hist(img) # improves detection >>> feature.blob_log(img, threshold = .3) array([[113, 323, 1], [121, 272, 17], [124, 336, 11], [126, 46, 11], [126, 208, 11], [127, 102, 11], [128, 154, 11], [185, 344, 17], [194, 213, 17], [194, 276, 17], [197, 44, 11], [198, 103, 11], [198, 155, 11], [260, 174, 17], [263, 244, 17], [263, 302, 17], [266, 115, 11]]) Notes ----- The radius of each blob is approximately :math:`\sqrt{2}sigma`. """ assert_nD(image, 2) image = img_as_float(image) if log_scale: start, stop = log(min_sigma, 10), log(max_sigma, 10) sigma_list = np.logspace(start, stop, num_sigma) else: sigma_list = np.linspace(min_sigma, max_sigma, num_sigma) # computing gaussian laplace # s**2 provides scale invariance gl_images = [-gaussian_laplace(image, s) * s ** 2 for s in sigma_list] image_cube = np.dstack(gl_images) local_maxima = peak_local_max(image_cube, threshold_abs=threshold, footprint=np.ones((3, 3, 3)), threshold_rel=0.0, exclude_border=False) # Convert the last index to its corresponding scale value local_maxima[:, 2] = sigma_list[local_maxima[:, 2]] return _prune_blobs(local_maxima, overlap)
def gaussian_laplace_filter(grid,sigma=(1,1,1)): filtered = grid.copy() scifilt.gaussian_laplace(grid,sigma,filtered, mode='nearest') return filtered
from __future__ import division import numpy as np import matplotlib.pyplot as plt from scipy.ndimage.filters import gaussian_laplace from scipy.misc import imresize def phaserand(stim, lower, upper): Y, X = np.ogrid[-256 : 256, -256 : 256] radius = (X ** 2 + Y ** 2) ** .5 image_fft = np.fft.fftshift(np.fft.fft2(stim)) amp = np.abs(image_fft) phase = np.angle(image_fft) phase_shuffled = phase.copy() idx = (radius > lower) & (radius <= upper) phase_shuffled[idx] = np.random.random(np.sum(idx)) * 2 * np.pi - np.pi image_shuffled = amp * np.exp(1j * phase_shuffled) return np.real(np.fft.ifft2(np.fft.ifftshift(image_shuffled))) stim_orig = plt.imread('../figures/che.png') stim = -gaussian_laplace(stim_orig, 2) stim -= stim.mean() plt.imsave('../figures/che_coc.png', stim, vmin = -.2, vmax=.2, cmap='gray') high_shuffled = phaserand(stim, 30, 512) plt.imsave('../figures/che_high.png', high_shuffled, vmin = -.2, vmax=.2, cmap='gray') low_shuffled = phaserand(stim, 0, 30) plt.imsave('../figures/che_low.png', low_shuffled, vmin = -.2, vmax=.2, cmap='gray') very_low_shuffled = phaserand(stim, 0, 6) plt.imsave('../figures/che_verylow.png', very_low_shuffled, vmin = -.2, vmax=.2, cmap='gray')
def log_filtering(imdata, winSize,sigma, fg_thresh,option = 'proplog'): """Blob detection using LOG filter, image is cropped to local windows before filtering. Parameters ---------- imdata: numpy array (2D) The image data winSize: integer The size of local window. sigma: float Parameter for LOG fg_thresh: float(0~1) The foreground extraction percentage level. """ sample_rate = winSize nrow, ncol = imdata.shape rows = np.array(range(winSize,nrow,sample_rate)) cols = np.array(range(winSize,ncol,sample_rate)) rsu = np.maximum(rows - winSize,np.zeros(len(rows), dtype=np.int)) rsd = np.minimum(rows + winSize,np.ones(len(rows), dtype=np.int)*(nrow-1)) csl = np.maximum(cols - winSize,np.zeros(len(cols), dtype=np.int)) csr = np.minimum(cols + winSize,np.ones(len(cols), dtype=np.int)*(ncol-1)) log_response = np.zeros(imdata.shape, dtype=np.double) for rs in range(len(rows)): for cs in range(len(cols)): # extract data block = imdata[rsu[rs]:rsd[rs],csl[cs]:csr[cs]] # extract foreground estimated_bs = bs_Estimatimation(block, fg_thresh) fg = fg_thresholding(block,estimated_bs) # compute log response temp_response = np.zeros(block.shape, np.double) filters.gaussian_laplace(fg, sigma, output=temp_response, mode='reflect') # composite response ub = rsu[rs] + winSize/2 loc_ub = winSize/2 db = rsd[rs] - winSize/2 loc_db = 3*winSize/2 lb = csl[cs] + winSize/2 loc_lb = winSize/2 rb = csr[cs] - winSize/2 loc_rb = 3*winSize/2 # upper bound if rsu[rs] == 0: ub = 0 loc_ub = 0 # lower bound if rsd[rs] == nrow-1: db = nrow-1 loc_db = temp_response.shape[0] # left bound if csl[cs] == 0: lb = 0 loc_lb = 0 # right bound if csr[cs] == ncol - 1: rb = ncol - 1 loc_rb = temp_response.shape[1] log_response[ub:db,lb:rb] = temp_response[loc_ub:loc_db, loc_lb:loc_rb] if option == 'log': return -log_response if option == 'proplog': log_rep_prop = 1000*log_response / imdata return -log_rep_prop
from PIL import Image from scipy.ndimage import filters from numpy import * import matplotlib.pyplot as plt import math plt.gray() # Import images and tranfer them into 2D arrays image1 = (Image.open("/Users/Maria/Documents/ITandcognition/bin/Images/Img001_diffuse_smallgray.png")) image2 = (Image.open("/Users/Maria/Documents/ITandcognition/bin/Images/Img002_diffuse_smallgray.png")) #image1 = array(Image.open("imagedata/Img001_diffuse_smallgray.png")) # image2 = array(Image.open("imagedata/Img002_diffuse_smallgray.png")) # Filter image with Gussian Filter image_GF= filters.gaussian_laplace(image1, sigma=5)#Laplacian Gussian filter image_GF2= filters.gaussian_laplace(image2, sigma=5) neighbor = 4#the number of neighbor is 4 def detectinterest(image_GF): localmin=[] localmax=[] interest=[] for x in range(1,image_GF.shape[0]-1): for y in range(1,image_GF.shape[1]-1): if image_GF[x,y]< image_GF[x,y+1] and image_GF[x,y]< image_GF[x,y-1] and image_GF[x,y]< image_GF[x-1,y] and image_GF[x,y]< image_GF[x+1,y] and image_GF[x,y]<5: localmin.append([x,y]) interest.append([x,y]) if image_GF[x,y]> image_GF[x,y+1] and image_GF[x,y]> image_GF[x,y-1] and image_GF[x,y]> image_GF[x-1,y] and image_GF[x,y]> image_GF[x+1,y] and image_GF[x,y]>250: localmax.append([x,y]) interest.append([x,y])
myimage2 = array(Image.open("/Users/Maria/Documents/ITandcognition/bin/Images/building.png"),dtype='float32') image1 = array(Image.open("/Users/Maria/Documents/ITandcognition/bin/Images/Img001_diffuse_smallgray.png"),dtype='float32') image2 = array(Image.open("/Users/Maria/Documents/ITandcognition/bin/Images/Img002_diffuse_smallgray.png"),dtype='float32') image9 = array(Image.open("/Users/Maria/Documents/ITandcognition/bin/Images/Img009_diffuse_smallgray.png"),dtype='float32') #image1 = array(Image.open("Images/Img001_diffuse_smallgray.png")) #image2 = array(Image.open("Images/Img002_diffuse_smallgray.png")) #image9 = array(Image.open("Images/Img009_diffuse_smallgray.png")) #myimage1 = array(Image.open("Images/human.png"),dtype='float32') #myimage2 = array(Image.open("Images/building.png"),dtype='float32') # Filter image with Gussian Filter image_GF= filters.gaussian_laplace(image1, sigma=1.4)#Laplacian Gussian filter image_GF2= filters.gaussian_laplace(image2, sigma=1.4) image_GF9= filters.gaussian_laplace(image9, sigma=1.4) myimage1_GF= filters.gaussian_laplace(myimage1, sigma=1.4)#Laplacian Gussian filter myimage2_GF= filters.gaussian_laplace(myimage2, sigma=1.4) def detect(image, threshold): extremas=[] for x in range(1,image.shape[0]-1): for y in range(1,image.shape[1]-1): if image[x,y]< image[x,y+1] and image[x,y]< image[x,y-1] and image[x,y]< image[x-1,y] and image[x,y]< image[x+1,y] and image[x,y]< -threshold: #local minima extremas.append([x,y]) if image[x,y]> image[x,y+1] and image[x,y]> image[x,y-1] and image[x,y]> image[x-1,y] and image[x,y]> image[x+1,y] and image[x,y]>threshold: #local maxima extremas.append([x,y])
def getBinaryImage(self): self.ploting = False HEDAB = rgb2hed(self.image) R = self.image[:, :, 0] G = self.image[:, :, 1] B = self.image[:, :, 2] H = HEDAB[:, :, 0] E = HEDAB[:, :, 1] DAB = HEDAB[:, :, 2] BR = B * 2 / ((1 + R + G) * (1 + B + R + G)) # Blue-ratio image V = self.getV() # From HSV (L, L2) = self.getL() # From CIELAB and CIELUV BRSmoothed = ndimage.gaussian_filter(BR, 1) LSmoothed = ndimage.gaussian_filter(L, 1) VSmoothed = ndimage.gaussian_filter(V, 1) HSmoothed = ndimage.gaussian_filter(H, 1) ESmoothed = ndimage.gaussian_filter(E, 1) RSmoothed = ndimage.gaussian_filter(R, 1) DABSmoothed = ndimage.gaussian_filter(DAB, 1) imLLog = self.filterImage(gaussian_laplace(LSmoothed, 9), 85) == False imVLog = self.filterImage(gaussian_laplace(VSmoothed, 9), 85) == False imELog = self.filterImage(gaussian_laplace(ESmoothed, 9), 84) == False imRLog = self.filterImage(gaussian_laplace(RSmoothed, 9), 84) == False imDABLog = self.filterImage(gaussian_laplace(DABSmoothed, 9), 50) imHLog = self.filterImage(gaussian_laplace(HSmoothed, 9), 8) imLog = self.filterImage(gaussian_laplace(BRSmoothed, 9), 9) imR = self.filterImage(R, 2.5) imB = self.filterImage(B, 10.5) imV = self.filterImage(V, 6.5) imL = self.filterImage(L, 2.5) imL2 = self.filterImage(L2, 2.5) imE = self.filterImage(E, 18) imH = self.filterImage(H, 95) == False imDAB = self.filterImage(DAB, 55) == False imBR = self.filterImage(BR, 63) == False binaryImg = ( imR & imV & imB & imL & imL2 & imE & imH & imDAB & imLog & imBR & imLLog & imVLog & imELog & imHLog & imRLog & imDABLog ) openImg = ndimage.binary_opening(binaryImg, iterations=2) closedImg = ndimage.binary_closing(openImg, iterations=8) if self.ploting: plt.imshow(self.image) plt.show() plt.imshow(imR) plt.show() plt.imshow(imV) plt.show() plt.imshow(imB) plt.show() plt.imshow(imL) plt.show() plt.imshow(closedImg) plt.show() BRVL = np.zeros(self.image.shape) BRVL[:, :, 0] = BR BRVL[:, :, 1] = V BRVL[:, :, 2] = L / rangeL # ResizeHEDAB, from 0 to 1. HEDAB[:, :, 0] = (H - minH) / rangeH HEDAB[:, :, 1] = (E - minE) / rangeE HEDAB[:, :, 2] = (DAB - minDAB) / rangeDAB return ( BinaryImageWorker(closedImg, self.rows, self.columns), RGBImageWorker(HEDAB, self.rows, self.columns), RGBImageWorker(BRVL, self.rows, self.columns), BinaryImageWorker(binaryImg, self.rows, self.columns), )
# ################################################################# plt.gray() print "Importing pictures and converting them to arrays" # Import images and transfer them into 2D float arrays im1 = array(Image.open("imagedata/Img001_diffuse_smallgray.png"),dtype="float32") im2 = array(Image.open("imagedata/Img002_diffuse_smallgray.png"),dtype="float32") im3 = array(Image.open("imagedata/Img009_diffuse_smallgray.png"),dtype="float32") imsq = Image.open("imagedata/squirrel.png").convert('L') imot = Image.open("imagedata/otter.png").convert('L') imsq = array(imsq,dtype="float32") imot = array(imot,dtype="float32") im1_gl = filters.gaussian_laplace(im1,sigma=1.4) im2_gl = filters.gaussian_laplace(im2,sigma=1.4) # I have no real argument for sigma 1.4 besides it yields good results im3_gl = filters.gaussian_laplace(im3,sigma=1.4) imsq_gl = filters.gaussian_laplace(imsq,sigma=1.4) imot_gl = filters.gaussian_laplace(imot,sigma=1.4) print "Done." """detect(image) This function finds the local extrema in a picture. It runs through every pixel """ def detect(image): extrema=[]
def transform_LoG(self, sigma): for i_rot in np.arange(self.stack_height): self.score_stack[:,:,i_rot] = -gaussian_laplace(self.score_stack[:,:,i_rot], sigma)
def init_data(): # Training Data imagesDic = scipy.io.loadmat(file_name="labeled_images.mat") tr_images = imagesDic["tr_images"].astype(float) tr_identity = imagesDic["tr_identity"].astype(float) tr_labels = imagesDic["tr_labels"] tr_images_O = tr_images tr_identity_O = tr_identity tr_labels_O = tr_labels # Test Data imagesDic = scipy.io.loadmat(file_name="public_test_images.mat") test_images = imagesDic["public_test_images"].astype(float) test_images_O = test_images SHOW_TRANSFORM_COMPARISON = False if SHOW_TRANSFORM_COMPARISON: trtr = transform_(tr_images, 0, 4) plt.figure(1) plt.clf() plt.imshow(trtr[:,:,0], cmap=plt.cm.gray) plt.show() plt.figure(2) plt.clf() plt.imshow(tr_images[:,:,0], cmap=plt.cm.gray) plt.show() ADD_TRANSFORMED_DATA = False if ADD_TRANSFORMED_DATA: tr_images_0_1 = transform_(tr_images, 0, 1) tr_images_0_m1 = transform_(tr_images, 0, -1) tr_images_1_0 = transform_(tr_images, 1, 0) tr_images_m1_0 = transform_(tr_images, -1, 0) things_to_join = (tr_images, tr_images_0_1, tr_images_0_m1, tr_images_1_0, tr_images_m1_0) tr_images = np.concatenate(things_to_join, axis=2) things_to_join = (tr_labels, tr_labels, tr_labels, tr_labels, tr_labels) tr_labels = np.concatenate(things_to_join) things_to_join = (tr_identity, tr_identity, tr_identity, tr_identity, tr_identity) tr_identity = np.concatenate(things_to_join) ADD_TRANSFORMED_DATA_2 = False if ADD_TRANSFORMED_DATA_2: tr_images_0_1 = transform_(tr_images, 0, 1) tr_images_0_m1 = transform_(tr_images, 0, -1) tr_images_1_0 = transform_(tr_images, 1, 0) tr_images_m1_0 = transform_(tr_images, -1, 0) tr_images_0_2 = transform_(tr_images, 0, 2) tr_images_0_m2 = transform_(tr_images, 0, -2) tr_images_2_0 = transform_(tr_images, 2, 0) tr_images_m2_0 = transform_(tr_images, -2, 0) things_to_join = (tr_images, tr_images_0_1, tr_images_0_m1, tr_images_1_0, tr_images_m1_0, tr_images_0_2, tr_images_0_m2, tr_images_2_0, tr_images_m2_0) tr_images = np.concatenate(things_to_join, axis=2) things_to_join = (tr_labels, tr_labels, tr_labels, tr_labels, tr_labels, tr_labels, tr_labels, tr_labels, tr_labels) tr_labels = np.concatenate(things_to_join) things_to_join = (tr_identity, tr_identity, tr_identity, tr_identity, tr_identity, tr_identity, tr_identity, tr_identity, tr_identity) tr_identity = np.concatenate(things_to_join) # More processing if False: SHOW_FILTER = False if SHOW_FILTER: plt.figure(1) plt.clf() plt.imshow(tr_images[:,:,0], cmap=plt.cm.gray) plt.show() #tr_images = np.array([exposure.equalize_hist(tr_images[:,:,i]) for i in xrange(tr_images.shape[2])]) #test_images = np.array([exposure.equalize_hist(test_images[:,:,i]) for i in xrange(test_images.shape[2])]) #tr_images = np.array([gaussian_filter(tr_images[:,:,i], sigma=0.5) for i in xrange(tr_images.shape[2])]) #test_images = np.array([gaussian_filter(test_images[:,:,i], sigma=0.5) for i in xrange(test_images.shape[2])]) tr_images = np.array([filters.gaussian_laplace(tr_images[:,:,i], sigma=0.4) for i in xrange(tr_images.shape[2])]) test_images = np.array([filters.gaussian_laplace(test_images[:,:,i], sigma=0.4) for i in xrange(test_images.shape[2])]) #tr_images = np.array([filter.edges.prewitt(tr_images[:,:,i]) for i in xrange(tr_images.shape[2])]) #test_images = np.array([filter.edges.prewitt(test_images[:,:,i]) for i in xrange(test_images.shape[2])]) #tr_images = np.array([filter.edges.sobel(tr_images[:,:,i]) for i in xrange(tr_images.shape[2])]) #test_images = np.array([filter.edges.sobel(test_images[:,:,i]) for i in xrange(test_images.shape[2])]) tr_images = np.rollaxis(tr_images, 0, 3) test_images = np.rollaxis(test_images, 0, 3) if SHOW_FILTER: plt.figure(2) plt.clf() plt.imshow(tr_images[:,:,0], cmap=plt.cm.gray) plt.show() sys.exit() if False: ID = 1011 tr_ID = tr_images[:,:,tr_identity.ravel()==ID] for i in range(tr_ID.shape[2]): plt.figure(i + 3) plt.clf() plt.imshow(tr_ID[:,:,i], cmap=plt.cm.gray) plt.show() sys.exit() # Preprocess the training set tr_images = np.array([tr_images[:,:,i].reshape(-1) for i in xrange(tr_images.shape[2])]) tr_images = preprocessing.scale(tr_images,1) tr_images_O = np.array([tr_images_O[:,:,i].reshape(-1) for i in xrange(tr_images_O.shape[2])]) tr_images_O = preprocessing.scale(tr_images_O,1) # Preprocess the test set test_images = np.array([test_images[:,:,i].reshape(-1) for i in xrange(test_images.shape[2])]) test_images = preprocessing.scale(test_images,1) test_images_O = np.array([test_images_O[:,:,i].reshape(-1) for i in xrange(test_images_O.shape[2])]) test_images_O = preprocessing.scale(test_images_O,1) # PCA reduction/projection if False: dim = 250 pca = PCA(n_components=dim) tr_images = pca.fit_transform(tr_images) pca_ = PCA(n_components=dim) test_images = pca.fit_transform(test_images) print "PCA Total explained variance:", np.sum(pca.explained_variance_ratio_) return tr_images, tr_labels, tr_identity, test_images, tr_images_O, tr_labels_O, tr_identity_O, test_images_O