def lcn_mauch(X, kernel=None, rho=0): """Apply a version of local contrast normalization (LCN), inspired by Mauch, Dixon (2009), "Approximate Note Transcription...". Parameters ---------- X : np.ndarray, ndim=2 Input representation. kernel : np.ndarray Convolution kernel (should be roughly low-pass). rho : scalar Scalar applied to the final output for heuristic range control. Returns ------- Z : np.ndarray The processed output. """ if kernel is None: dim0, dim1 = 15, 37 dim0_weights = np.hamming(dim0 * 2 + 1)[:dim0] dim1_weights = np.hamming(dim1) kernel = dim0_weights[:, np.newaxis] * dim1_weights[np.newaxis, :] kernel /= kernel.sum() Xh = convolve2d(X, kernel, mode='same', boundary='symm') V = hwr(X - Xh) S = np.sqrt( convolve2d(np.power(V, 2.0), kernel, mode='same', boundary='symm')) S2 = np.zeros(S.shape) + S.mean() S2[S > S.mean()] = S[S > S.mean()] if S2.sum() == 0.0: S2 += 1.0 return V / S2**rho
def lcn_v2(X, kernel, mean_scalar=1.0): """Apply an alternative version of local contrast normalization (LCN) to an array. Parameters ---------- X : np.ndarray, ndim=2 Input representation. kernel : np.ndarray Convolution kernel (should be roughly low-pass). Returns ------- Z : np.ndarray The processed output. """ if X.ndim != 2: raise ValueError("Input must be a 2D matrix.") Xh = convolve2d(X, kernel, mode='same', boundary='symm') V = X - Xh S = np.sqrt(convolve2d(np.power(V, 2.0), kernel, mode='same', boundary='symm')) thresh = np.exp(np.log(S + np.power(2.0, -5)).mean(axis=-1)) S = S*np.greater(S - thresh.reshape(-1, 1), 0) S += 1.0*np.equal(S, 0.0) return V / S
def lcn(X, kernel): """Apply Local Contrast Normalization (LCN) to an array. Parameters ---------- X : np.ndarray, ndim=2 Input representation. kernel : np.ndarray Convolution kernel (should be roughly low-pass). Returns ------- Z : np.ndarray The processed output. """ if X.ndim != 2: raise ValueError("Input must be a 2D matrix.") Xh = convolve2d(X, kernel, mode='same', boundary='symm') V = X - Xh S = np.sqrt( convolve2d(np.power(V, 2.0), kernel, mode='same', boundary='symm')) S2 = np.zeros(S.shape) + S.mean() S2[S > S.mean()] = S[S > S.mean()] if S2.sum() == 0.0: S2 += 1.0 return V / S2
def lcn(X, kernel): """Apply Local Contrast Normalization (LCN) to an array. Parameters ---------- X : np.ndarray, ndim=2 Input representation. kernel : np.ndarray Convolution kernel (should be roughly low-pass). Returns ------- Z : np.ndarray The processed output. """ if X.ndim != 2: raise ValueError("Input must be a 2D matrix.") Xh = convolve2d(X, kernel, mode='same', boundary='symm') V = X - Xh S = np.sqrt(convolve2d(np.power(V, 2.0), kernel, mode='same', boundary='symm')) S2 = np.zeros(S.shape) + S.mean() S2[S > S.mean()] = S[S > S.mean()] if S2.sum() == 0.0: S2 += 1.0 return V / S2
def lcn_v2(X, kernel, mean_scalar=1.0): """Apply an alternative version of local contrast normalization (LCN) to an array. Parameters ---------- X : np.ndarray, ndim=2 Input representation. kernel : np.ndarray Convolution kernel (should be roughly low-pass). Returns ------- Z : np.ndarray The processed output. """ if X.ndim != 2: raise ValueError("Input must be a 2D matrix.") Xh = convolve2d(X, kernel, mode='same', boundary='symm') V = X - Xh S = np.sqrt( convolve2d(np.power(V, 2.0), kernel, mode='same', boundary='symm')) thresh = np.exp(np.log(S + np.power(2.0, -5)).mean(axis=-1)) S = S * np.greater(S - thresh.reshape(-1, 1), 0) S += 1.0 * np.equal(S, 0.0) return V / S
def directionality(img): newImg = img + 4 convV = np.zeros(newImg.shape) convH = np.zeros(newImg.shape) for i in range(newImg.shape[2]): convV[:,:,i] = convolve2d(newImg[:,:,i],V,mode='same',fillvalue=1) convH[:,:,i] = convolve2d(newImg[:,:,i],H,mode='same',fillvalue=1) convV[convV == 0] = 0.1 convH[convH == 0] = 0.1 theta1 = pi/2. + np.arctan(convV/convH) theta2 = pi/2 + np.arctan(convH/convV) return [theta1.var(),theta2.var()]
def physical_SED_model(self, bases_wave_rest, obs_wave, bases_flux, Av_star, z_star, sigma_star, Rv_coeff=3.4): # Calculate wavelength at object z wave_z = bases_wave_rest * (1 + z_star) # Kernel matrix box = int(np.ceil(max(3 * sigma_star))) kernel_len = 2 * box + 1 kernel_range = np.arange(0, 2 * box + 1) kernel = np.empty((1, kernel_len)) # Filling gaussian values (the norm factor is the sum of the gaussian) kernel[0, :] = np.exp(-0.5 * (np.square((kernel_range - box) / sigma_star))) kernel /= sum(kernel[0, :]) # Convove bases with respect to kernel for dispersion velocity calculation basesGridConvolved = convolve2d(bases_flux, kernel, mode='same', boundary='symm') # Interpolate bases to wavelength ranges basesGridInterp = (interp1d(wave_z, basesGridConvolved, axis=1, bounds_error=True)(obs_wave)).T # Generate final flux model including reddening Av_vector = Av_star * np.ones(basesGridInterp.shape[1]) obs_wave_resam_rest = obs_wave / (1 + z_star) Xx_redd = CCM89_Bal07(Rv_coeff, obs_wave_resam_rest) dust_attenuation = np.power(10, -0.4 * np.outer(Xx_redd, Av_vector)) bases_grid_redd = basesGridInterp * dust_attenuation return bases_grid_redd
def plot_data(xa,ya,za,fig,nfig,colorflag=False,convolveflag=False, clim=None): cmap = pylab.cm.jet cmap.set_bad('w', 1.0) myfilter=N.array([[0.1,0.2,0.1],[0.2,0.8,0.2],[0.1,0.2,0.1]],'d') /2.0 if convolveflag: zout=convolve2d(za,myfilter,mode='same') #to convolve, or not to convolve... else: zout=za zima = ma.masked_where(N.isnan(zout),zout) ax=fig.add_subplot(2,2,nfig) pc=ax.pcolormesh(xa,ya,zima,shading='interp',cmap=cmap) # working good! # pc=ax.imshow(zima,interpolation='bilinear',cmap=cmap) if clim is None: clim = zima.min(),zima.max() pc.set_clim(*clim) if colorflag: #g=pylab.colorbar(pc,ticks=N.arange(0,675,100)) g=pylab.colorbar(pc,ticks=N.arange(clim[0],clim[1],100)) print g #g.ticks=None #gax.yaxis.set_major_locator(MultipleLocator(40)) #g.ticks(N.array([0,20,40,60,80])) return ax,g
def lcn(x): h, w = x.shape[:2] k = np.ones((9, 9)) k /= 81 meaned = convolve2d(x, k, mode='same') p = np.power(x, 2.0) s = convolve2d(p, np.ones((9, 9)), mode='same') s = np.sqrt(s) m = x - meaned lcned = (m / s) lcn_min = np.min(lcned) lcn_max = np.max(lcned) normed = (lcned - lcn_min) * (1 / (lcn_max - lcn_min)) return normed
def plot_data(xa, ya, za, fig, nfig, colorflag=False): cmap = pylab.cm.jet cmap.set_bad('w', 1.0) myfilter = N.array([[0.1, 0.2, 0.1], [0.2, 0.8, 0.2], [0.1, 0.2, 0.1]], 'd') / 2.0 if smooth: zout = convolve2d(za, myfilter, mode='same') else: zout = za zima = ma.masked_where(N.isnan(zout), zout) ax = fig.add_subplot(1, 1, nfig) pc = ax.pcolormesh(xa, ya, zima, shading='interp', cmap=cmap) # working good! # pc=ax.imshow(zima,interpolation='bilinear',cmap=cmap) pc.set_clim(0.0, ctop) if colorflag: g = pylab.colorbar(pc, ticks=N.arange(0, ctop, cstep)) print g #g.ticks=None #gax.yaxis.set_major_locator(MultipleLocator(40)) #g.ticks(N.array([0,20,40,60,80])) return ax, g
def feedforward(self, inputs): """ Calculates output of this layer from the given input. :param inputs: 3D or 2D numpy array, if 3D, first dimension: idx of prev feature map, second and third dimension: image output of this feature map, if 2D just a single image. :return 3D numpy array, 2D numpy array output for each feature map """ if len(np.shape(inputs)) == 2: inputs = np.array([inputs]) self.inputs = np.copy(inputs) in_size = np.shape(self.inputs[0]) out_shape = (in_size[0] - self.kernel_size + 1, in_size[1] - self.kernel_size + 1) self.outputs = np.zeros((self.num_maps, out_shape[0], out_shape[1])) # go through all feature maps of this layer for fm_idx in range(self.num_maps): bias = self.biases[fm_idx] conv_out = np.zeros(out_shape) # convolve inputs with weights and sum the results for prev_fm_idx in range(self.num_prev_maps): kernel = self.weights[prev_fm_idx, fm_idx] prev_out = self.inputs[prev_fm_idx] conv_out += signal.convolve2d(prev_out, kernel, mode='valid') # add bias and apply activation function for final output self.outputs[fm_idx] = self.activation_func(conv_out + bias) if out_shape == (1, 1): return np.array([self.outputs[:, 0, 0]]) return self.outputs
def plot_data(xa, ya, za, fig, nfig, colorflag=False, convolveflag=False): cmap = pylab.cm.jet cmap.set_bad("w", 1.0) myfilter = N.array([[0.1, 0.2, 0.1], [0.2, 0.8, 0.2], [0.1, 0.2, 0.1]], "d") / 2.0 if convolveflag: zout = convolve2d(za, myfilter, mode="same") # to convolve, or not to convolve... else: zout = za zima = ma.masked_where(N.isnan(zout), zout) ax = fig.add_subplot(1, 1, nfig) pc = ax.pcolormesh(xa, ya, zima, shading="interp", cmap=cmap) # working good! # pc=ax.imshow(zima,interpolation='bilinear',cmap=cmap) pmin = zima.min() pmax = zima.max() # pmin=0 # pmax=700 # pc.set_clim(0.0,660.0) pc.set_clim(pmin, pmax) if colorflag: # g=pylab.colorbar(pc,ticks=N.arange(0,675,100)) g = pylab.colorbar(pc, ticks=N.arange(pmin, pmax, 100)) print g # g.ticks=None # gax.yaxis.set_major_locator(MultipleLocator(40)) # g.ticks(N.array([0,20,40,60,80])) return ax, g
def plot_data(xa,ya,za,fig,nfig,colorflag=False): cmap = pylab.cm.jet cmap.set_bad('w', 1.0) myfilter=N.array([[0.1,0.2,0.1],[0.2,0.8,0.2],[0.1,0.2,0.1]],'d') /2.0 if smooth: zout=convolve2d(za,myfilter,mode='same') else: zout=za zima = ma.masked_where(N.isnan(zout),zout) ax=fig.add_subplot(1,1,nfig) pc=ax.pcolormesh(xa,ya,zima,shading='interp',cmap=cmap) # working good! # pc=ax.imshow(zima,interpolation='bilinear',cmap=cmap) pc.set_clim(0.0,ctop) if colorflag: g=pylab.colorbar(pc,ticks=N.arange(0,ctop,cstep)) print g #g.ticks=None #gax.yaxis.set_major_locator(MultipleLocator(40)) #g.ticks(N.array([0,20,40,60,80])) return ax,g
def directionality(img): newImg = img + 4 convV = np.zeros(newImg.shape) convH = np.zeros(newImg.shape) for i in range(newImg.shape[2]): convV[:, :, i] = convolve2d(newImg[:, :, i], V, mode='same', fillvalue=1) convH[:, :, i] = convolve2d(newImg[:, :, i], H, mode='same', fillvalue=1) convV[convV == 0] = 0.1 convH[convH == 0] = 0.1 theta1 = pi / 2. + np.arctan(convV / convH) theta2 = pi / 2 + np.arctan(convH / convV) return [theta1.var(), theta2.var()]
def physical_SED_model(self, rest_wave, obs_wave, bases_flux, Av_star, z_star, sigma_star, Rv_coeff=3.1): #Calculate wavelength at object z wave_z = rest_wave * (1 + z_star) #Compute reddening Av_vector = Av_star * ones(bases_flux.shape[0]) Xx_redd = CCM89_Bal07(Rv_coeff, rest_wave) #Calculate stellar broadening kernel r_sigma = sigma_star / (wave_z[1] - wave_z[0]) box = int(3 * r_sigma) if int(3 * r_sigma) < 3 else 3 kernel_len = 2 * box + 1 kernel = zeros((1, kernel_len)) kernel_range = arange(0, 2 * box + 1) #Generating the kernel with sigma (the norm factor is the sum of the gaussian) kernel[0, :] = exp(-0.5 * ((square(kernel_range - box) / r_sigma))) norm = np_sum(kernel[0, :]) kernel = kernel / norm #Convove bases with respect to kernel for dispersion velocity calculation bases_grid_convolve = convolve2d(bases_flux, kernel, mode='same', boundary='symm') print 'wave_z' print wave_z.shape print 'bases_grid_convolve' print bases_flux.shape #Interpolate bases to wavelength range bases_grid_interp = (interp1d(wave_z, bases_grid_convolve, axis=1, bounds_error=True)(obs_wave)).T #Generate final flux model including dust dust_attenuation = power(10, -0.4 * outer(Xx_redd, Av_vector)) bases_grid_redd = bases_grid_interp * dust_attenuation return bases_grid_redd
def solve_snakes(combined): snake = ''' # # ## ## ### # # # # # # '''.strip('\n') snake = grid_to_array(snake) filter = np.rot90(snake, 2) # convolution filter is flipped snakelen = snake.sum() for i in range(NUM_TRANSFORMATIONS): d = transformations(combined, i) cv = convolve2d(d, filter, mode='valid') snakes = (cv == snakelen).sum() # assumption: no overlaps if snakes: return d.sum() - snakes*snakelen
def patVcut(std=None, psf=None, phi=None, angle_rad=None, size=501, SNR=30): ''' psf ... blur kernel (2d numpy array) phi ... rotation of v-cut (phi=0 -> v-cut opening at 3:00) angle_rad ... v-cut opening angle size ... output image size (x,y) [px] SNR ... signal to noise ratio ''' assert std is not None or psf is not None, "either std or pdf have to be provided" if psf is None: psf = std2PSF( std ) # obtain guassian blur kernel (=point spread function [psf] from standard deviation if angle_rad is None: angle_rad = randAngle_rad() if phi is None: phi = np.random.rand() * 2 * np.pi # random rotation of v-cut angle c = size / 2 rel_noise = 1.0 / SNR s = (size, size) s2 = size * size img_masked = np.zeros(s) rad = 0.5 * angle_rad # points of vCut: p0 = c + np.sin(rad + phi) * 2 * size p1 = c + np.cos(rad + phi) * 2 * size p2 = c + np.sin(-rad + phi) * 2 * size p3 = c + np.cos(-rad + phi) * 2 * size # v-cut positions (triangle): pts = np.array(((c, c), (p0, p1), (p2, p3)), dtype=int) # draw vCut: cv2.fillConvexPoly(img_masked, pts, color=1, lineType=0) # blur: img_masked = convolve2d(img_masked, psf, mode='same', boundary='symm') # add noise: img_masked += np.random.rand(s2).reshape(s) * rel_noise img_unmasked = np.ones(s) + np.random.rand(s2).reshape(s) * rel_noise # line indicating gap position: line = [c, c, c + np.sin(phi) * 0.5 * size, c + np.cos(phi) * 0.5 * size] line = resize(line, 1.2) return img_masked, img_unmasked, line
def highpass(X, kernel): """Produce a highpass kernel from its lowpass complement. Parameters ---------- X : np.ndarray, ndim=2 Input representation. kernel : np.ndarray Convolution kernel (should be roughly low-pass). Returns ------- Z : np.ndarray The processed output. """ if X.ndim != 2: raise ValueError("Input must be a 2D matrix.") Xh = convolve2d(X, kernel, mode='same', boundary='symm') return X - Xh
def local_l2norm(X, kernel): """Apply local l2-normalization over an input with a given kernel. Parameters ---------- X : np.ndarray, ndim=2 Input representation. kernel : np.ndarray Convolution kernel (should be roughly low-pass). Returns ------- Z : np.ndarray The processed output. """ local_mag = np.sqrt( convolve2d(np.power(X, 2.0), kernel, mode='same', boundary='symm')) local_mag = local_mag + 1.0 * (local_mag == 0.0) return X / local_mag
def local_l2norm(X, kernel): """Apply local l2-normalization over an input with a given kernel. Parameters ---------- X : np.ndarray, ndim=2 Input representation. kernel : np.ndarray Convolution kernel (should be roughly low-pass). Returns ------- Z : np.ndarray The processed output. """ local_mag = np.sqrt(convolve2d(np.power(X, 2.0), kernel, mode='same', boundary='symm')) local_mag = local_mag + 1.0*(local_mag == 0.0) return X / local_mag
def get_eye_mask(ball): # Get segments of white pixels white_pixels = (np.asarray(ball.convert("L")) == 255) white_segments, n_labels = ndimage.label(white_pixels) # Get border colors of each white segment eye_labels = [] for label in range(1, n_labels + 1): segment = white_segments == label kernel = np.asarray([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) edges = convolve2d(segment.astype(int), kernel.astype(int), mode='same').astype(bool) diff = edges ^ segment border_colors = np.unique(np.asarray(ball)[diff == 1], axis=0) # Check if only color white segment borders is black if len(border_colors) == 1 and (border_colors[0] == (0, 0, 0, 255)).all(): # If so, then it is an eye eye_labels.append(label) return np.isin(white_segments, eye_labels)
def filterimg(arr, mask): return convolve2d(arr, mask)
def avg(img): return convolve2d(img,np.ones((9,9)),'same')
def generate_synthObs(self, bases_wave, bases_flux, basesCoeff, Av_star, z_star, sigma_star, resample_range=None, resample_int=1): '''basesWave: Bases wavelength must be at rest''' nbases = basesCoeff.shape[0] bases_wave_resam = arange(int(resample_range[0]), int(resample_range[-1]), resample_int, dtype=float) npix_resample = len(bases_wave_resam) #Resampling the range bases_flux_resam = empty((nbases, npix_resample)) for i in range(nbases): # print bases_wave[i][0], bases_wave[i][-1] # print bases_wave_resam[0], bases_wave_resam[-1] bases_flux_resam[i, :] = interp1d( bases_wave[i], bases_flux[i], bounds_error=True)(bases_wave_resam) #Display physical parameters synth_wave = bases_wave_resam * (1 + z_star) Av_vector = Av_star * ones(nbases) Xx_redd = CCM89_Bal07(3.4, bases_wave_resam) r_sigma = sigma_star / (synth_wave[1] - synth_wave[0]) #Defining empty kernel box = int(3 * r_sigma) if int(3 * r_sigma) < 3 else 3 kernel_len = 2 * box + 1 kernel = zeros((1, kernel_len)) kernel_range = arange(0, 2 * box + 1) #Generating the kernel with sigma (the norm factor is the sum of the gaussian) kernel[0, :] = exp(-0.5 * ((square(kernel_range - box) / r_sigma))) norm = np_sum(kernel[0, :]) kernel = kernel / norm #Convove bases with respect to kernel for dispersion velocity calculation bases_grid_convolve = convolve2d(bases_flux_resam, kernel, mode='same', boundary='symm') #Interpolate bases to wavelength range interBases_matrix = (interp1d(bases_wave_resam, bases_grid_convolve, axis=1, bounds_error=True)(bases_wave_resam)).T #Generate final flux model including dust dust_attenuation = power(10, -0.4 * outer(Xx_redd, Av_vector)) bases_grid_model = interBases_matrix * dust_attenuation #Generate combined flux synth_flux = np_sum(basesCoeff.T * bases_grid_model, axis=1) return synth_wave, synth_flux