def register_translation_batch(src_image, target_image, upsample_factor=1, space="real"): # assume complex data is already in Fourier space if space.lower() == 'fourier': src_freq = src_image target_freq = target_image # real data needs to be fft'd. elif space.lower() == 'real': src_freq = cp.fft.fft2(src_image) target_freq = cp.fft.fft2(target_image) # Whole-pixel shift - Compute cross-correlation by an IFFT shape = src_freq.shape image_product = src_freq * target_freq.conj() cross_correlation = cp.fft.ifft2(image_product) A = cp.abs(cross_correlation) maxima = A.reshape(A.shape[0], -1).argmax(1) maxima = cp.column_stack(cp.unravel_index(maxima, A[0, :, :].shape)) midpoints = np.array([cp.fix(axis_size / 2) for axis_size in shape[1:]]) shifts = cp.array(maxima, dtype=cp.float64) ids = cp.where(shifts[:, 0] > midpoints[0]) shifts[ids[0], 0] -= shape[1] ids = cp.where(shifts[:, 1] > midpoints[1]) shifts[ids[0], 1] -= shape[2] if upsample_factor > 1: # Initial shift estimate in upsampled grid shifts = np.round(shifts * upsample_factor) / upsample_factor upsampled_region_size = np.ceil(upsample_factor * 1.5) # Center of output array at dftshift + 1 dftshift = np.fix(upsampled_region_size / 2.0) normalization = (src_freq[0].size * upsample_factor**2) # Matrix multiply DFT around the current shift estimate sample_region_offset = dftshift - shifts * upsample_factor cross_correlation = _upsampled_dft_batch(image_product.conj(), upsampled_region_size, upsample_factor, sample_region_offset).conj() cross_correlation /= normalization # Locate maximum and map back to original pixel grid A = cp.abs(cross_correlation) maxima = A.reshape(A.shape[0], -1).argmax(1) maxima = cp.column_stack(cp.unravel_index(maxima, A[0, :, :].shape)) maxima = cp.array(maxima, dtype=cp.float64) - dftshift shifts = shifts + maxima / upsample_factor # If its only one row or column the shift along that dimension has no # effect. We set to zero. for dim in range(src_freq.ndim): if shape[dim] == 1: shifts[dim] = 0 return shifts
def _log_polar_mapping(output_coords, k_angle, k_radius, center): """Inverse mapping function to convert from Cartesian to polar coordinates Parameters ---------- output_coords : ndarray `(M, 2)` array of `(col, row)` coordinates in the output image k_angle : float Scaling factor that relates the intended number of rows in the output image to angle: ``k_angle = nrows / (2 * np.pi)`` k_radius : float Scaling factor that relates the radius of the circle bounding the area to be transformed to the intended number of columns in the output image: ``k_radius = width / math.log(radius)`` center : tuple (row, col) Coordinates that represent the center of the circle that bounds the area to be transformed in an input image. Returns ------- coords : ndarray `(M, 2)` array of `(col, row)` coordinates in the input image that correspond to the `output_coords` given as input. """ angle = output_coords[:, 1] / k_angle rr = ((cp.exp(output_coords[:, 0] / k_radius)) * cp.sin(angle)) + center[0] cc = ((cp.exp(output_coords[:, 0] / k_radius)) * cp.cos(angle)) + center[1] coords = cp.column_stack((cc, rr)) return coords
def test_indices_with_labels(self): image = cp.asarray(np.random.uniform(size=(40, 60))) i, j = cp.mgrid[0:40, 0:60] labels = 1 + (i >= 20) + (j >= 30) * 2 i, j = cp.mgrid[-3:4, -3:4] footprint = i * i + j * j <= 9 expected = cp.zeros(image.shape, float) for imin, imax in ((0, 20), (20, 40)): for jmin, jmax in ((0, 30), (30, 60)): expected[imin:imax, jmin:jmax] = ndi.maximum_filter(image[imin:imax, jmin:jmax], footprint=footprint) expected = cp.column_stack(cp.nonzero(expected == image)) expected = expected[cp.argsort(image[tuple(expected.T)])[::-1]] result = peak.peak_local_max( image, labels=labels, min_distance=1, threshold_rel=0, footprint=footprint, indices=True, exclude_border=False, ) result = result[cp.argsort(image[tuple(result.T)])[::-1]] assert (result == expected).all()
def exactFilter(tilt_angles, tiltAngle, sX, sY, sliceWidth, arr=[]): """ exactFilter: Generates the exact weighting function required for weighted backprojection - y-axis is tilt axis Reference : Optik, Exact filters for general geometry three dimensional reconstuction, vol.73,146,1986. @param tilt_angles: list of all the tilt angles in one tilt series @param titlAngle: tilt angle for which the exact weighting function is calculated @param sizeX: size of weighted image in X @param sizeY: size of weighted image in Y @return: filter volume """ from cupy import array, matrix, sin, pi, arange, float32, column_stack, argmin, clip, ones, ceil # Using Friedel Symmetry in Fourier space. # sY = sY // 2 + 1 # Calculate the relative angles in radians. diffAngles = (array(tilt_angles) - tiltAngle) * pi / 180. # Closest angle to tiltAngle (but not tiltAngle) sets the maximal frequency of overlap (Crowther's frequency). # Weights only need to be calculated up to this frequency. sampling = min(abs(diffAngles)[abs(diffAngles) > 0.001]) crowtherFreq = min(sX // 2, int(ceil(1 / sin(sampling)))) arrCrowther = matrix(abs(arange(-crowtherFreq, min(sX // 2, crowtherFreq + 1)))) # Calculate weights wfuncCrowther = 1. / (clip(1 - array(matrix(abs(sin(diffAngles))).T * arrCrowther) ** 2, 0, 2)).sum(axis=0) # Create full with weightFunc wfunc = ones((sX, sY, 1), dtype=float32) wfunc[sX // 2 - crowtherFreq:sX // 2 + min(sX // 2, crowtherFreq + 1), :, 0] = column_stack( ([(wfuncCrowther), ] * (sY))).astype(float32) return wfunc
def _get_high_intensity_peaks(image, mask, num_peaks): """ Return the highest intensity peak coordinates. """ # get coordinates of peaks coord = cp.nonzero(mask) intensities = image[coord] # Highest peak first idx_maxsort = cp.argsort(-intensities) coord = cp.column_stack(coord)[idx_maxsort] # select num_peaks peaks if len(coord) > num_peaks: coord = coord[:num_peaks] return coord
def _get_high_intensity_peaks(image, mask, num_peaks, min_distance, p_norm): """ Return the highest intensity peak coordinates. """ # get coordinates of peaks coord = cp.nonzero(mask) intensities = image[coord] # Highest peak first idx_maxsort = cp.argsort(-intensities) coord = cp.column_stack(coord)[idx_maxsort] coord = ensure_spacing(coord, spacing=min_distance, p_norm=p_norm) if len(coord) > num_peaks: coord = coord[:num_peaks] return coord
def test(self): test_images, test_labels = self.load_test() # Set test sample size self.SAMPLES_TEST = test_labels.shape[0] # Bias #test_inputs = np.c_[test_images, np.ones(self.SAMPLES_TEST)] # NOTE: Need to add bias for cupy test_inputs = np.column_stack((test_images, np.ones(self.SAMPLES_TEST))) # Copies test_target_array = np.zeros(test_labels.shape) np.copyto(test_target_array, test_labels) # Init predictions array test_predictions = np.zeros((self.SAMPLES_TEST,self.NEURONS)) # Test samples but this time only with forward prop for N in range(self.SAMPLES_TEST): test_o, test_tar_k, test_prediction_n = self.forward(test_inputs[N], test_target_array[N]) test_predictions[N] = test_prediction_n self.CORRECT_TEST_CONF, acc = self.get_confusion(test_target_array, test_predictions) self.CORRECT_TEST.append(acc)
def get_tsg(self, link_matrix, val_matrix=None, include_neighbors=False): """Returns time series graph matrix. Constructs a matrix of shape (N*tau_max, N*tau_max) from link_matrix. This matrix can be used for plotting the time series graph and analyzing causal pathways. link_matrix : bool array-like, optional (default: None) Matrix of significant links. Must be of same shape as val_matrix. Either sig_thres or link_matrix has to be provided. val_matrix : array_like Matrix of shape (N, N, tau_max+1) containing test statistic values. include_neighbors : bool, optional (default: False) Whether to include causal paths emanating from neighbors of i Returns ------- tsg : array of shape (N*tau_max, N*tau_max) Time series graph matrix. """ N = len(link_matrix) max_lag = link_matrix.shape[2] + 1 # Create TSG tsg = np.zeros((N * max_lag, N * max_lag)) for i, j, tau in np.column_stack(np.where(link_matrix)): if tau > 0 or include_neighbors: for t in range(max_lag): link_start = self.net_to_tsg(i, t - tau, max_lag) link_end = self.net_to_tsg(j, t, max_lag) if (0 <= link_start and (link_start % max_lag) <= (link_end % max_lag)): if val_matrix is not None: tsg[link_start, link_end] = val_matrix[i, j, tau] else: tsg[link_start, link_end] = 1 return tsg
def Newtons_method_feasible_init_point(f, A, x_0, tol, tol_backtracking, x_ast=None, p_ast=None, maxiter=30, gf_symbolic=None, Hf_symbolic=None, Sigma=None): ''' Newton's method to numerically approximate solution of min f subject to Ax = b. IMPORTANT: this implementation requires that initial point x_0, satisfies: Ax_0 = b Args: f (fun): definition of function f as lambda expression or function definition. A (numpy ndarray): 2d numpy array of shape (m,n) defines system of constraints Ax=b. x_0 (numpy ndarray): initial point for Newton's method. Must satisfy: Ax_0 = b tol (float): tolerance that will halt method. Controls stopping criteria. tol_backtracking (float): tolerance that will halt method. Controls value of line search by backtracking. x_ast (numpy ndarray): solution of min f, now it's required that user knows the solution... p_ast (float): value of f(x_ast), now it's required that user knows the solution... maxiter (int): maximum number of iterations gf_symbolic (fun): definition of gradient of f. If given, no approximation is performed via finite differences. Hf_symbolic (fun): definition of Hessian of f. If given, no approximation is performed via fi nite differences. Returns: x (numpy ndarray): numpy array, approximation of x_ast. iteration (int): number of iterations. Err_plot (numpy ndarray): numpy array of absolute error between p_ast and f(x) with x approximation of x_ast. Useful for plotting. x_plot (numpy ndarray): numpy array that containts in columns vector of approximations. Last column contains x, approximation of solution. Useful for plotting. ''' iteration = 0 x = x_0 feval = f(x) if gf_symbolic: gfeval = gf_symbolic(x, Sigma) else: gfeval = gradient_approximation(f, x) if Hf_symbolic: Hfeval = Hf_symbolic(x, Sigma) else: Hfeval = Hessian_approximation(f, x) normgf = np.linalg.norm(gfeval) condHf = solver.utils.condicion_cupy(Hfeval) Err_plot_aux = np.zeros(maxiter) Err_plot_aux[iteration] = solver.utils.compute_error(p_ast, feval) Err = solver.utils.compute_error(x_ast, x) if (A.ndim == 1): p = 1 n = x.size zero_matrix = cp.zeros(p) first_stack = cp.column_stack((Hfeval, A.T)) second_stack = cp.row_stack( (A.reshape(1, n).T, zero_matrix)).reshape(1, n + 1)[0] else: p, n = A.shape zero_matrix = cp.zeros((p, p)) first_stack = np.column_stack((Hfeval, A.T)) second_stack = np.column_stack((A, zero_matrix)) x_plot = cp.zeros((n, maxiter)) x_plot[:, iteration] = x system_matrix = cp.vstack((first_stack, second_stack)) zero_vector = cp.zeros(p) rhs = cp.vstack((gfeval.reshape(n, 1), zero_vector.reshape(p, 1))).T[0] #Newton's direction and Newton's decrement dir_desc = cp.linalg.solve(system_matrix, -rhs) dir_Newton = dir_desc[0:n] dec_Newton = -gfeval.dot(dir_Newton) w_dual_variable_estimation = dir_desc[n:(n + p)] print( 'I\tNormgf \tNewton Decrement\tError x_ast\tError p_ast\tline search\tCondHf' ) print('{}\t{}\t{}\t{}\t{}\t{}\t\t{}'.format( iteration, cp.around(normgf, 4), cp.around(dec_Newton, 4), cp.around(Err, 4), cp.around(Err_plot_aux[iteration], 4), "---", cp.around(condHf, 4))) stopping_criteria = dec_Newton / 2 iteration += 1 while (stopping_criteria > tol and iteration < maxiter): der_direct = -dec_Newton t = solver.line_search.line_search_by_backtracking( f, dir_Newton, x, der_direct) x = x + t * dir_Newton feval = f(x) if gf_symbolic: gfeval = gf_symbolic(x, Sigma) else: gfeval = gradient_approximation(f, x) if Hf_symbolic: Hfeval = Hf_symbolic(x, Sigma) else: Hfeval = Hessian_approximation(f, x) if (A.ndim == 1): first_stack = cp.column_stack((Hfeval, A.T)) else: first_stack = cp.column_stack((Hfeval, A.T)) system_matrix = cp.vstack((first_stack, second_stack)) rhs = cp.vstack((gfeval.reshape(n, 1), zero_vector.reshape(p, 1))).T[0] #Newton's direction and Newton's decrement dir_desc = cp.linalg.solve(system_matrix, -rhs) dir_Newton = dir_desc[0:n] dec_Newton = -gfeval.dot(dir_Newton) w_dual_variable_estimation = dir_desc[n:(n + p)] Err_plot_aux[iteration] = solver.utils.compute_error(p_ast, feval) x_plot[:, iteration] = x Err = solver.utils.compute_error(x_ast, x) print('{}\t{}\t{}\t{}\t{}\t{}\t{}'.format( iteration, cp.around(normgf, 4), cp.around(dec_Newton, 4), cp.around(Err, 4), cp.around(Err_plot_aux[iteration], 4), cp.around(t, 4), cp.around(condHf, 4))) stopping_criteria = dec_Newton / 2 if t < tol_backtracking: #if t is less than tol_backtracking then we need to check the reason iter_salida = iteration iteration = maxiter - 1 iteration += 1 print('{} {}'.format("Error of x with respect to x_ast:", Err)) print('{} {}'.format("Approximate solution:", x)) cond = Err_plot_aux > np.finfo(float).eps * 10**(-2) Err_plot = Err_plot_aux[cond] if iteration == maxiter and t < tol_backtracking: print( "Backtracking value less than tol_backtracking, check approximation" ) iteration = iter_salida else: if iteration == maxiter: print("Reached maximum of iterations, check approximation") x_plot = x_plot[:, :iteration] return [x, iteration, Err_plot, x_plot]
def test_column_stack_wrong_shape(self): a = cupy.zeros((3, 2)) b = cupy.zeros((4, 3)) with self.assertRaises(ValueError): cupy.column_stack((a, b))
def test_column_stack_wrong_ndim1(self): a = cupy.zeros(()) b = cupy.zeros((3, )) with self.assertRaises(ValueError): cupy.column_stack((a, b))
tempwt = [] tempval = [] for i in range(3000000): tempwt.append(random.randint(1, 100)) tempval.append(i + 1) wt = cp.array(tempwt) val = cp.array(tempval) capacity = 5000000 start = time.time() cost = cp.floor_divide(val, wt) iVal = cp.column_stack((wt, val, cost)) # sorting items by value sorted_iVal = iVal[cp.argsort(-iVal[:, -1])] iVal_cpu = cp.asnumpy(sorted_iVal) #print(iVal) #print(sorted_iVal) #print("Finding Max") maxValue = getMaxValue(iVal_cpu, capacity) t = time.time() - start print(t) print("Maximum value in Knapsack =", maxValue)
def test_column_stack_wrong_ndim1(self): a = cupy.zeros(()) b = cupy.zeros((3,)) with self.assertRaises(ValueError): cupy.column_stack((a, b))
def train(self): # Initialize prev update arrays u_h = np.zeros(self.w_h.shape) u_o = np.zeros(self.w_o.shape) for e in range(self.EPOCHS): x, t = self.load() # Check that our set of training data is evenly distributed # NOTE: check_balanced() won't work because CuPy doesn't have random.get_state # while self.check_balanced(t) == False: # x, t = self.load() # Add bias, copy arrays over #inputs = np.c_[x, np.ones(self.SAMPLES)] inputs = np.column_stack((x, np.ones(self.SAMPLES))) target_array = np.zeros((t.shape)) np.copyto(target_array, t) # Hold onto our predictions predictions = np.zeros((self.SAMPLES,self.NEURONS)) print("\n\nEpoch:%s lr:%s n:%s N:%s alpha:%s" % (e+1, self.LR, self.HIDDEN, self.SAMPLES, self.ALPHA)) for N in range(self.SAMPLES): # Forward prop n_o, target_k, prediction_n = self.forward(inputs[N], target_array[N]) # Backprop # Summations on weights use dots of hidden output errors # Output deltas o_deltas = n_o * (1-n_o) * (target_k-n_o) # Hidden deltas h_deltas = self.n_h * (1-self.n_h) * np.dot(o_deltas, np.transpose(self.w_o)) # Hidden to output weight update u_o = (self.LR * (np.dot((np.transpose(np.array(self.n_h)[np.newaxis])), np.array(o_deltas)[np.newaxis])) + (self.ALPHA * u_o)) # Update self.w_o += u_o # Input to hidden weight update u_h = (self.LR * (np.dot((np.transpose((inputs[N])[np.newaxis])), np.array(h_deltas[:-1])[np.newaxis])) + (self.ALPHA * u_h)) #- self.LAMBDA*u_h) self.w_h += u_h # Add predictions of sample to predictions array to compute confusion matrix predictions[N] = prediction_n # Append the returned accuracy to correctness array self.CORRECT_CONF, train_acc = self.get_confusion(target_array, predictions) self.CORRECT.append(train_acc) #print(self.CORRECT) # Run tests after every epoch print("\nRunning test") self.test()
def test_column_stack_wrong_ndim2(self): a = cupy.zeros((3, 2, 3)) b = cupy.zeros((3, 2)) with pytest.raises(ValueError): cupy.column_stack((a, b))
def moments_coords_central(coords, center=None, order=3): """Calculate all central image moments up to a certain order. The following properties can be calculated from raw image moments: * Area as: ``M[0, 0]``. * Centroid as: {``M[1, 0] / M[0, 0]``, ``M[0, 1] / M[0, 0]``}. Note that raw moments are neither translation, scale nor rotation invariant. Parameters ---------- coords : (N, D) double or uint8 array Array of N points that describe an image of D dimensionality in Cartesian space. A tuple of coordinates as returned by ``cp.nonzero`` is also accepted as input. center : tuple of float, optional Coordinates of the image centroid. This will be computed if it is not provided. order : int, optional Maximum order of moments. Default is 3. Returns ------- Mc : (``order + 1``, ``order + 1``, ...) array Central image moments. (D dimensions) References ---------- .. [1] Johannes Kilian. Simple Image Analysis By Moments. Durham University, version 0.2, Durham, 2001. Examples -------- >>> coords = cp.array([[row, col] ... for row in range(13, 17) ... for col in range(14, 18)]) >>> moments_coords_central(coords) array([[16., 0., 20., 0.], [ 0., 0., 0., 0.], [20., 0., 25., 0.], [ 0., 0., 0., 0.]]) As seen above, for symmetric objects, odd-order moments (columns 1 and 3, rows 1 and 3) are zero when centered on the centroid, or center of mass, of the object (the default). If we break the symmetry by adding a new point, this no longer holds: >>> coords2 = cp.concatenate((coords, [[17, 17]]), axis=0) >>> cp.round(moments_coords_central(coords2), ... decimals=2) # doctest: +NORMALIZE_WHITESPACE array([[17. , 0. , 22.12, -2.49], [ 0. , 3.53, 1.73, 7.4 ], [25.88, 6.02, 36.63, 8.83], [ 4.15, 19.17, 14.8 , 39.6 ]]) Image moments and central image moments are equivalent (by definition) when the center is (0, 0): >>> cp.allclose(moments_coords(coords), ... moments_coords_central(coords, (0, 0))) True """ if isinstance(coords, tuple): # This format corresponds to coordinate tuples as returned by # e.g. cp.nonzero: (row_coords, column_coords). # We represent them as an npoints x ndim array. coords = cp.column_stack(coords) check_nD(coords, 2) ndim = coords.shape[1] if center is None: center = cp.mean(coords, axis=0) else: center = cp.asarray(center) # center the coordinates coords = coords.astype(float) - center # Note: for efficiency, sum over the last axis (which is memory contiguous) # generate all possible exponents for each axis in the given set of points # produces a matrix of shape (order + 1, D, N) coords = coords.T powers = cp.arange(order + 1)[:, np.newaxis, np.newaxis] coords = coords[cp.newaxis, ...] ** powers # add extra dimensions for proper broadcasting coords = coords.reshape((1,) * (ndim - 1) + coords.shape) calc = cp.moveaxis(coords[..., 0, :], -2, 0) for axis in range(1, ndim): # isolate each point's axis isolated_axis = coords[..., axis, :] # rotate orientation of matrix for proper broadcasting isolated_axis = cp.moveaxis(isolated_axis, -2, axis) # calculate the moments for each point, one axis at a time calc = calc * isolated_axis # sum all individual point moments to get our final answer Mc = cp.sum(calc, axis=-1) return Mc