def yangDistributionDifference(aNeg, bNeg, aPos, bPos, p=1): """ Eq. (7) from : Yang, R., Jiang, Y., Mathews, S. et al. Data Min Knowl Disc (2019) 33: 995. https://doi.org/10.1007/s10618-019-00622-6 """ sampleSize = 1000 negSample = xp.random.beta(aNeg, bNeg, sampleSize) posSample = xp.random.beta(aPos, bPos, sampleSize) negPDF_NEG, posPDF_NEG, pdfDiffPos_NEG, pdfDiffNeg_NEG, pdfMax_NEG = calcDifference( negSample, aNeg, bNeg, aPos, bPos) negPDF_POS, posPDF_POS, pdfDiffPos_POS, pdfDiffPOS_POS, pdfMax_POS = calcDifference( posSample, aNeg, bNeg, aPos, bPos) numerator1 = xp.mean(pdfDiffNeg_NEG / negPDF_NEG) numerator2 = xp.mean(pdfDiffPos_POS / posPDF_POS) sumVecs = xp.power(numerator1, xp.ones_like(numerator1) * p) + xp.power( numerator2, xp.ones_like(numerator2) * p) dPHat = xp.power(sumVecs, xp.ones_like(sumVecs) * (1 / p)) dTermNeg = (posPDF_NEG * 0.5) + (negPDF_NEG * 0.5) dTermPos = (posPDF_POS * 0.5) + (negPDF_POS * 0.5) denominator = (xp.sum(pdfMax_NEG / dTermNeg) + xp.sum(pdfMax_POS / dTermPos)) / (2 * sampleSize) return dPHat / denominator
def test_cross_correlate_masked_output_shape(): """Masked normalized cross-correlation should return a shape of N + M + 1 for each transform axis.""" shape1 = (15, 4, 5) shape2 = (6, 12, 7) expected_full_shape = tuple(np.array(shape1) + np.array(shape2) - 1) expected_same_shape = shape1 arr1 = cp.zeros(shape1) arr2 = cp.zeros(shape2) # Trivial masks m1 = cp.ones_like(arr1) m2 = cp.ones_like(arr2) full_xcorr = cross_correlate_masked(arr1, arr2, m1, m2, axes=(0, 1, 2), mode="full") assert full_xcorr.dtype.kind != "c" # grlee77: output should be real assert full_xcorr.shape == expected_full_shape same_xcorr = cross_correlate_masked(arr1, arr2, m1, m2, axes=(0, 1, 2), mode="same") assert same_xcorr.shape == expected_same_shape
def triple_phase_boundary(img): phases = cp.unique(cp.asarray(img)) if len(phases) != 3: return None shape = img.shape dim = len(shape) ph_maps = [] img = cp.pad(cp.asarray(img), 1, 'constant', constant_values=-1) if dim == 2: x, y = shape total_edges = (x - 1) * (y - 1) for ph in phases: ph_map = cp.zeros_like(img) ph_map_temp = cp.zeros_like(img) ph_map_temp[img == ph] = 1 for i in [0, 1]: for j in [0, 1]: ph_map += cp.roll(cp.roll(ph_map_temp, i, 0), j, 1) ph_maps.append(ph_map) tpb_map = cp.ones_like(img) for ph_map in ph_maps: tpb_map *= ph_map tpb_map[tpb_map > 1] = 1 tpb_map = tpb_map[1:-1, 1:-1] tpb = np.sum(tpb_map) else: tpb = 0 x, y, z = shape total_edges = z * (x - 1) * (y - 1) + x * (y - 1) * (z - 1) + y * ( x - 1) * (z - 1) print(total_edges) for d in range(dim): ph_maps = [] for ph in phases: ph_map = cp.zeros_like(img) ph_map_temp = cp.zeros_like(img) ph_map_temp[img == ph] = 1 for i in [0, 1]: for j in [0, 1]: d1 = (d + 1) % 3 d2 = (d + 2) % 3 ph_map += cp.roll(cp.roll(ph_map_temp, i, d1), j, d2) ph_maps.append(ph_map) tpb_map = cp.ones_like(img) for ph_map in ph_maps: tpb_map *= ph_map tpb_map[tpb_map > 1] = 1 tpb_map = tpb_map[1:-1, 1:-1, 1:-1] tpb += np.sum(tpb_map) return tpb / total_edges
def test_cross_correlate_masked_test_against_mismatched_dimensions(): """Masked normalized cross-correlation should raise an error if array dimensions along non-transformation axes are mismatched.""" shape1 = (23, 1, 1) shape2 = (6, 2, 2) arr1 = cp.zeros(shape1) arr2 = cp.zeros(shape2) # Trivial masks m1 = cp.ones_like(arr1) m2 = cp.ones_like(arr2) with pytest.raises(ValueError): cross_correlate_masked(arr1, arr2, m1, m2, axes=(1, 2))
def __init__(self, basis, lows, highs, resolutions, fine_all=False, linspace=False): # Grids self.x = Grid1D(low=lows[0], high=highs[0], res=resolutions[0], basis=basis.basis_x, spectrum=True, fine=fine_all, linspace=linspace) self.y = Grid1D(low=lows[1], high=highs[1], res=resolutions[1], basis=basis.basis_y, spectrum=True, fine=fine_all, linspace=linspace) # res self.res_ghosts = [self.x.res_ghosts, self.y.res_ghosts] self.orders = [self.x.order, self.y.order] # spectral radius squared (for laplacian) self.kr_sq = (outer2(self.x.d_wave_numbers, cp.ones_like(self.y.d_wave_numbers)) ** 2.0 + outer2(cp.ones_like(self.x.d_wave_numbers), self.y.d_wave_numbers) ** 2.0) # wave number vector self.wave_numbers = cp.array([self.x.d_wave_numbers, self.y.d_wave_numbers])
def liqlatt(self, sigma_A, gamma_A): '''Apply liquidization transform to reciprocal lattice''' if self.abc[0] == 0: msg = 'Provide rlatt_vox to apply liqlatt (recip. lattice voxel dimensions)' raise AttributeError(msg) s_sq = (2 * cp.pi * sigma_A * self.dgen.qrad)**2 n_max = 0 if self.slimits.max() > 2 * np.pi * sigma_A / self.res_max: n_max = np.where( self.slimits > 2. * np.pi * sigma_A / self.res_max)[0][0] + 1 if n_max == 0: #bzone = cp.zeros(self.abc) #bzone[self.abc[0]//2, self.abc[1]//2, self.abc[2]//2] = 1 #return cp.tile(bzone, ncells) print('Returning ones array') return cp.ones_like(s_sq) liq = cp.zeros_like(s_sq) for n in range(1, n_max): weight = cp.exp(-s_sq + n * cp.log(s_sq) - float(special.loggamma(n + 1))) factor = self.corrdisp_factor(gamma_A / n) liq += weight * factor sys.stderr.write('\rLiquidizing: %d/%d' % (n, n_max - 1)) sys.stderr.write('\n') return liq
def stack_backward(gradient: Tensor, tensors: List[Tensor], axis: int = 0): _check_tensors(*tensors) engine = _get_engine(*tensors) grad_arrays = engine.split(gradient.data, len(tensors), axis=axis) for idx, tensor in enumerate(tensors): _set_grad(tensor, data=grad_arrays[idx] * engine.ones_like(tensor.data))
def test_prior_in_posteriors(self): for frame in self.data: frame["prior"] = 1 like = HyperparameterLikelihood(posteriors=self.data, hyper_prior=self.model) self.assertTrue( xp.array_equal(like.sampling_prior, xp.ones_like(like.data["a"])))
def curvature_to_height(image, h2, iterations=2000): f = image[..., 0] A = image[..., 3] u = cup.ones_like(f) * 0.5 k = 1 t = np.empty_like(u, dtype=np.float32) # periodic gauss seidel iteration for ic in range(iterations): if ic % 100 == 0: print(ic) # roll k, axis=0 t[:-k, :] = u[k:, :] t[-k:, :] = u[:k, :] # roll -k, axis=0 t[k:, :] += u[:-k, :] t[:k, :] += u[-k:, :] # roll k, axis=1 t[:, :-k] += u[:, k:] t[:, -k:] += u[:, :k] # roll -k, axis=1 t[:, k:] += u[:, :-k] t[:, :k] += u[:, -k:] t -= h2 * f t *= 0.25 u = t * A u = -u u -= cup.min(u) u /= cup.max(u) return cup.dstack([u, u, u, image[..., 3]])
def forward(self, bottom, top): self.label = cp.asarray(copy.deepcopy(bottom[1].data),cp.uint8) prob = cp.asarray(copy.deepcopy(bottom[0].data),cp.float64) prob = cp.subtract(prob,cp.max(prob,axis=1)[:,cp.newaxis,...]) prob = cp.exp(prob) self.softmax = cp.divide(prob,cp.sum(prob,axis=1)[:,cp.newaxis,...]) ## mask self.weight_mask = cp.ones_like(self.label, cp.float64) for weight_id in self.weight_dic: self.weight_mask[self.label == weight_id] = self.weight_dic[weight_id] if self.has_ignore_label: self.weight_mask[self.label == self.ignore_label] = 0 # self.weight_mask[self.label == 0] = 0.3 # self.weight_mask[self.label == 1] = 0.25 # self.weight_mask[self.label == 2] = 5 # self.weight_mask[self.label == 4] = 2 self.label[self.label == 3] = 2 compute_count = self.weight_mask[self.weight_mask != 0].size ## nomalize mask self.weight_mask = cp.divide(self.weight_mask, cp.divide(cp.sum(self.weight_mask), compute_count)) ## compute loss prob_compute_matrix = copy.deepcopy(self.softmax[self.index_0,self.label,self.index_2,self.index_3]) prob_compute_matrix[prob_compute_matrix < (1e-10)] = 1e-10 loss = - cp.divide(cp.sum(cp.multiply(cp.log(prob_compute_matrix),self.weight_mask)),compute_count) loss = cp.asnumpy(loss) top[0].data[...] = loss
def forward(self, x): self.mask = (cp.absolute(x) > 1) out = cp.ones_like(x, dtype=np.float32) out[x < 0.5] = 0 out[x < -0.5] = -1 return out
def _generate_init_mask(I, T): """ Generate mask estimation based on SML. Parameters ---------- I : list of np.ndarray List of original raw images. T : float Blur level criteria. """ # temporary buffer for SML S = cp.array(I[0]) nelem = S.size n_threads = 1024 block_sz = (n_threads, ) grid_sz = (int(ceil(nelem / n_threads)), ) M, V = cp.ones_like(S, dtype=cp.int32), sml(S, T) for i, iI in enumerate(I[1:], 2): S = cp.array(iI) S = sml(S, T) keep_max_kernel(grid_sz, block_sz, (M, V, S, nelem, i)) return cp.asnumpy(M)
def all_pair_dist_cuda(X1, X2, feat, metric='cosine'): if metric=='cosine': norm1 = cp.einsum('ij, ij->i', X1, X1) norm1 = cp.sqrt(norm1, norm1).reshape(-1, 1) norm2 = cp.einsum('ij, ij->i', X2, X2) norm2 = cp.sqrt(norm2, norm2).reshape(-1, 1) return cp.dot(X2/norm2, (X1/norm1).T) else: n1 = len(X1) n2 = len(X2) nf = len(feat) feat = cp.array(feat) X1 = cp.array(X1.reshape(1, n1, -1)) X2 = cp.array(X2.reshape(1, n2, -1)) mat1 = cp.repeat(X1, n2, axis=0) mat2 = cp.repeat(X2.reshape(n2, 1, nf), n1, axis=1) isbow = cp.repeat(cp.repeat((feat < 2100).reshape(1, 1, nf), n1, axis=1), n2, axis=0) count_mat = nf - cp.sum((mat1 == 0) & (mat2 == 0) & isbow, axis=2) zeros = (count_mat != 0) dist_mat = cp.ones_like(count_mat) dist_mat[zeros] = cp.sum(np.cbs(mat1 - mat2), axis=2)[zeros] / count_mat[zeros] return cp.asnumpy(dist_mat)
def test_transform_resnet50(self): """ """ cp.random.seed(0) cp.cuda.Device(0).use() with chainer.using_config('dtype', chainer.mixed16): net = ResNet50(arch='he') # stride_first is True net.to_gpu() x = chainer.Variable( cp.random.normal(size=(1, 3, 224, 224)).astype('float16')) y1 = net(x) net_ = AdaLossScaled(net, init_scale=16., transforms=[ AdaLossTransformLinear(), AdaLossTransformBottleneck(), AdaLossTransformConv2DBNActiv(), ], verbose=False, cfg=CFG) net_.to_gpu() y2 = net_(x) self.assertTrue(cp.allclose(y1.array, y2.array)) y2.grad = cp.ones_like(y2.array, dtype='float16') y2.backward() self.assertTrue('loss_scale' in x.grad_var.__dict__) self.assertEqual(x.grad_var.__dict__['loss_scale'], CFG['accum_upper_bound'])
def lrelu(x, alpha=0.01, derivative=False): res = x if derivative: dx = np.ones_like(res) dx[res < 0] = alpha return dx else: return np.maximum(x, x * alpha, x)
def test_background_one_region_center(self): x = cp.zeros((3, 3, 3), int) x[1, 1, 1] = 1 lb = cp.ones_like(x) * BG lb[1, 1, 1] = 1 assert_array_equal(label(x, connectivity=1, background=0), lb)
def prob(self, dataset, axis=None): self._parameter_conversion(dataset) # Do parameter conversion regardless p = 1 for i in [1,2]: p *= bilby.core.prior.Uniform(name='magn', minimum=0, maximum=1).prob(dataset["a_{}".format(i)]) p *= bilby.core.prior.Sine(name='tilt').prob(dataset["tilt_{}".format(i)]) p *= xp.ones_like(dataset["a_{}".format(i)])*1./(2*np.pi) # Independent of how one defines the domain return xp.array(p)
def scaled_dot_product_attention(queries, keys, values, scale=1., mask=None): x1 = F.matmul(queries, keys, transb=True) * xp.array(scale, dtype=keys.dtype) x2 = F.where(mask, xp.ones_like(x1.array) * -xp.inf, x1) if mask is not None else x1 x3 = F.softmax(x2, axis=-1) x4 = F.matmul(x3, values) return x4
def generate_q_u_matrix(x_coordinate: cp.array, y_coordinate: cp.array) -> tuple: flatten_flag = x_coordinate.ndim > 1 if flatten_flag: x_coordinate = x_coordinate.flatten() y_coordinate = y_coordinate.flatten() t, u = cp.modf(y_coordinate) u = u.astype(int) uy = cp.vstack([ cp.minimum(cp.maximum(u - 1, 0), h - 1), cp.minimum(cp.maximum(u, 0), h - 1), cp.minimum(cp.maximum(u + 1, 0), h - 1), cp.minimum(cp.maximum(u + 2, 0), h - 1), ]).astype(int) Qy = cp.dot( coeff, cp.vstack([ cp.ones_like(t, dtype=cp.float32), t, cp.power(t, 2), cp.power(t, 3) ])) t, u = cp.modf(x_coordinate) u = u.astype(int) ux = cp.vstack([ cp.minimum(cp.maximum(u - 1, 0), w - 1), cp.minimum(cp.maximum(u, 0), w - 1), cp.minimum(cp.maximum(u + 1, 0), w - 1), cp.minimum(cp.maximum(u + 2, 0), w - 1), ]) Qx = cp.dot( coeff, cp.vstack([ cp.ones_like(t, dtype=cp.float32), t, cp.power(t, 2), cp.power(t, 3) ])) if flatten_flag: Qx = Qx.reshape(4, frame_n, int(w * mag)).transpose(1, 0, 2).copy() Qy = Qy.reshape(4, frame_n, int(h * mag)).transpose(1, 0, 2).copy() ux = ux.reshape(4, frame_n, int(w * mag)).transpose(1, 0, 2).copy() uy = uy.reshape(4, frame_n, int(h * mag)).transpose(1, 0, 2).copy() return Qx, Qy, ux, uy
def __init__(self,basis_number,extended_basis_number,t_start = 0,t_end=1,mempool=None): self.basis_number = basis_number self.extended_basis_number = extended_basis_number self.basis_number_2D = (2*basis_number-1)*basis_number self.basis_number_2D_ravel = (2*basis_number*basis_number-2*basis_number+1) self.basis_number_2D_sym = (2*basis_number-1)*(2*basis_number-1) self.extended_basis_number_2D = (2*extended_basis_number-1)*extended_basis_number self.extended_basis_number_2D_sym = (2*extended_basis_number-1)*(2*extended_basis_number-1) self.t_end = t_end self.t_start = t_start self.verbose = True if mempool is None: mempool = cp.get_default_memory_pool() # pinned_mempool = cp.get_default_pinned_memory_pool() # self.ix = cp.zeros((2*self.basis_number-1,2*self.basis_number-1),dtype=cp.int32) # self.iy = cp.zeros((2*self.basis_number-1,2*self.basis_number-1),dtype=cp.int32) temp = cp.arange(-(self.basis_number-1),self.basis_number,dtype=cp.int32) # for i in range(2*self.basis_number-1): # self.ix[i,:] = temp # self.iy[:,i] = temp self.ix,self.iy = cp.meshgrid(temp,temp) if self.verbose: print("Used bytes so far, after creating ix and iy {}".format(mempool.used_bytes())) self.Ddiag = -(2*util.PI)**2*(self.ix.ravel(ORDER)**2+self.iy.ravel(ORDER)**2) self.OnePerDdiag = 1/self.Ddiag self.Dmatrix = cpx.scipy.sparse.diags(self.Ddiag,dtype=cp.float32) if self.verbose: print("Used bytes so far, after creating Dmatrix {}".format(mempool.used_bytes())) # self.Imatrix = cp.eye((2*self.basis_number-1)**2,dtype=cp.int8) self.Imatrix = cpx.scipy.sparse.identity((2*self.basis_number-1)**2,dtype=cp.float32) self.Imatrix_dense = cp.eye((2*self.basis_number-1)**2,dtype=cp.float32) if self.verbose: print("Used bytes so far, after creating Imatrix {}".format(mempool.used_bytes())) #K matrix --> 1D fourier index to 2D fourier index ones_temp = cp.ones_like(temp) self.Kmatrix = cp.vstack((cp.kron(temp,ones_temp),cp.kron(ones_temp,temp))) if self.verbose: print("Used bytes so far, after creating Kmatrix {}".format(mempool.used_bytes())) #implement fft plan here self._plan_fft2 = None # self._fft2_axes = (-2, -1) self._plan_ifft2 = None x = np.concatenate((np.arange(self.basis_number)+1,np.zeros(self.basis_number-1))) toep = sla.toeplitz(x) self._Umask = cp.asarray(np.kron(toep,toep),dtype=cp.int16)
def test_background_one_region_center(self): x = cp.zeros((3, 3, 3), int) x[1, 1, 1] = 1 lb = cp.ones_like(x) * BG lb[1, 1, 1] = 1 with expected_warnings(["use 'connectivity'"]): assert_array_equal(label(x, neighbors=4, background=0), lb) assert_array_equal(label(x, connectivity=1, background=0), lb)
def forward(self, bottom, top): self.label = cp.asarray(copy.deepcopy(bottom[1].data),cp.uint8) prob = cp.asarray(copy.deepcopy(bottom[0].data),cp.float64) prob = cp.subtract(prob,cp.max(prob,axis=1)[:,cp.newaxis,...]) prob = cp.exp(prob) self.softmax = cp.divide(prob,cp.sum(prob,axis=1)[:,cp.newaxis,...]) ## mask self.weight_mask = cp.ones_like(self.label, cp.float64) for weight_id in self.weight_dic: self.weight_mask[self.label == weight_id] = self.weight_dic[weight_id] if self.has_ignore_label: self.weight_mask[self.label == self.ignore_label] = 0 # num_total = 15422668800 # empty_num = 3679002314 # road_num = 10565335603 # ped_num = 99066996 # car_num = 995347874 #self.label[self.label == 3] = 2 # w_empty = float((num_total-empty_num)/num_total) # w_road = float((num_total-road_num)/num_total) # w_ped = float((num_total-ped_num)/num_total) # w_car = float((num_total-car_num)/num_total) # print(w_empty) # print(w_road) # print(w_ped) # print(w_car) # empty:0.3 # road:0.25 self.weight_mask[self.label == 0] = 0.3 self.weight_mask[self.label == 1] = 0.25 self.weight_mask[self.label == 3] = 0.5 # self.weight_mask[self.label == 2] = w_ped # self.weight_mask[self.label == 4] = w_car compute_count = self.weight_mask[self.weight_mask != 0].size ## nomalize mask self.weight_mask = cp.divide(self.weight_mask, cp.divide(cp.sum(self.weight_mask), compute_count)) ## compute loss prob_compute_matrix = copy.deepcopy(self.softmax[self.index_0,self.label,self.index_2,self.index_3]) prob_compute_matrix[prob_compute_matrix < (1e-10)] = 1e-10 loss = - cp.divide(cp.sum(cp.multiply(cp.log(prob_compute_matrix),self.weight_mask)),compute_count) loss = cp.asnumpy(loss) top[0].data[...] = loss
def fft_zdist(Q, S, epsilon, alignment=10000, Kahan=0): """ Rolling mean- and amplitude-adjusted Euclidean Distance using FFT to run in loglinear time Equation exploiting cross-correlation (Fourier) theorem: d[k] = sum_i (f(Q[i]) - f(S[i+k]))**2 = sum_i (f(Q[i])**2 - 2*f(Q[i])*f(S[i+k]) + f(S[i+k])**2) = sum_i (f(Q[i])**2 - 2*f(Q[i])*(S[i+k]-mu[k]) + (S[i+k]-mu[k])**2) = sum_i (f(Q[i])**2 - 2*f(Q[i])*S[i+k] + 2*f(Q[i])*mu[k] + (S[i+k]-mu[k])**2) = sum_i (f(Q[i])**2 - 2*f(Q[i])*S[i+k] + (S[i+k]-mu[k])**2) since sum_i f(Q[i]) = 0 by definition = sum_i (f(Q[i])**2 - 2*f(Q[i])*S[i+k] + S[i+k]**2 - 2*S[i+k]*mu[k] + mu[k]**2) = sum_i (f(Q[i])**2 - 2*f(Q[i])*S[i+k] + S[i+k]**2 - 2*|Q|*mu[k]*mu[k] + mu[k]**2) = sum_i f(Q[i])**2 - 2*correlation(k) + Y[k] - 2*X[k]**2/|Q| + X[k]**2/|Q| = sum_i f(Q[i])**2 - 2*correlation(k) + Y[k] - X[k]**2/|Q| = sum_i f(Q[i])**2 - 2*correlation(k) + |Q|*variance[k] Arguments: ------- Q: cupy.core.core.ndarray the input query of length m to be aligned S: cupy.core.core.ndarray the input stream of length n>=m to be scanned epsilon: float non-negative number for regularizing zero stdev Kahan: int non-negative number of Kahan summation adjustment rounds Returns ------- cupy.core.core.ndarray the computed distance array of length n-m+1 """ assert(epsilon > 0) m, Q = len(Q), znorm(Q, epsilon) n = (len(S)+alignment-1)//alignment*alignment iS = cp.zeros(n, dtype=S.dtype) iS[:len(S)] = S delta = n-len(S) X, Y = cumsum(iS, Kahan), cumsum(iS**2, Kahan) X = X[+m:]-X[:-m] Y = Y[+m:]-Y[:-m] Z = cp.sqrt(cp.maximum(Y/m-cp.square(X/m), 0)) E = cp.zeros(n, dtype=Q.dtype) E[:m] = Q R = cp.fft.irfft(cp.fft.rfft(E).conj()*cp.fft.rfft(iS), n=n) F = cp.where(Z > 0 , 2*(m-R[:-m+1]/Z), m*cp.ones_like(Z)) return F[:len(S)-m+1]
def backward(self, dout=1.0): dout *= np.ones_like(self.y) dout[self.mask_border == False] /= self.t[self.mask_border == False] error = (self.y - self.t) / self.t mask = (error == 0.0) error[mask] += 1e-7 dout *= error / np.abs(error) dout[self.mask_border == True] = ( self.y[self.mask_border == True] - self.t[self.mask_border == True] ) / self.t[self.mask_border == True]**2 dout /= float(self.y.size) return dout
def __call__(self, data, iters=5, width=3.0, weights=None, mask=None, keepdims=False): data = cp.asarray(data, dtype=self.dtype) filt = cp.ones_like(data) if mask is not None: mask = cp.asarray(mask, dtype=self.dtype) elementwise_not(cp.broadcast_to(mask, data.shape), filt) if weights is not None: weights = cp.asarray(weights, dtype=self.dtype) try: filt *= weights except ValueError: if isinstance(self.axis, int): ndim = data.ndim axis = self.axis % ndim if weights.size == data.shape[axis]: filt *= weights.reshape(-1 if i == axis else 1 for i in range(ndim)) else: raise ValueError( 'length of weights must be same as' ' the length of data along specified axis.') else: raise ValueError( 'If weights and data are not broadcastable, ' 'axis must be specified as int.') checkfinite(data, filt, filt) iterator = count() if (iters is None) else range(iters) csum = check_sum(filt, axis=self.axis) for _ in iterator: self.updatefilt(data, filt, width) tsum = check_sum(filt, axis=self.axis) if all_equal(csum, tsum): break else: csum = tsum if self.rtnmask: result = elementwise_not(filt, filt) else: result = self.reduce(data, filt, keepdims=keepdims) return result
def forward(self, x): # テンソル対応 self.original_x_shape = x.shape x = x.reshape(x.shape[0], -1) self.x = x bW = cp.ones_like(self.W) bW[self.W<0] = -1 self.bW = (bW).T out = cp.dot(self.x, bW) #+ self.b return out
def test_cross_correlate_masked_side_effects(): """Masked normalized cross-correlation should not modify the inputs.""" shape1 = (2, 2, 2) shape2 = (2, 2, 2) arr1 = cp.zeros(shape1) arr2 = cp.zeros(shape2) # Trivial masks m1 = cp.ones_like(arr1) m2 = cp.ones_like(arr2) # for arr in (arr1, arr2, m1, m2): # arr.setflags(write=False) arr1c, arr2c, m1c, m2c = [a.copy() for a in (arr1, arr2, m1, m2)] cross_correlate_masked(arr1, arr2, m1, m2) cp.testing.assert_array_equal(arr1, arr1c) cp.testing.assert_array_equal(arr2, arr2c) cp.testing.assert_array_equal(m1, m1c) cp.testing.assert_array_equal(m2, m2c)
def test_atomic_and(self, dtype): @jit.rawkernel() def f(x, out): tid = jit.blockDim.x * jit.blockIdx.x + jit.threadIdx.x if tid < x.size: jit.atomic_and(out, tid, x[tid]) x = cupy.arange(1024, dtype=dtype) out = cupy.ones_like(x) f((32, ), (32, ), (x, out)) expected = cupy.zeros_like(out) expected[1::2] = 1 self._check(out, expected)
def test_vertical_mask_line(grad_func): """Vertical edge filters mask pixels surrounding input mask.""" _, hgrad = cp.mgrid[:1:11j, :1:11j] # horizontal gradient with spacing 0.1 hgrad[:, 5] = 1 # bad vertical line mask = cp.ones_like(hgrad) mask[:, 5] = 0 # mask bad line expected = cp.zeros_like(hgrad) expected[1:-1, 1:-1] = 0.2 # constant gradient for most of image, expected[1:-1, 4:7] = 0 # but line and neighbors masked result = grad_func(hgrad, mask) assert_allclose(result, expected)
def precompute_cc_factors(ad, bd, radius, mode="constant"): # factors = cp.zeros((5,) + ad.shape, dtype=ad.dtype) factors = [None] * 5 sum_h = cp.ones((2 * radius + 1, ), dtype=ad.dtype) h_tuple = (sum_h, ) * ad.ndim kwargs = dict(mode=mode) sum_a = convolve_separable(ad, h_tuple, **kwargs) sum_b = convolve_separable(bd, h_tuple, **kwargs) sum_ab = convolve_separable(ad * bd, h_tuple, **kwargs) sum_aa = convolve_separable(ad * ad, h_tuple, **kwargs) sum_bb = convolve_separable(bd * bd, h_tuple, **kwargs) if mode != "constant": cnt = (2 * radius + 1)**ad.ndim else: cnt = convolve_separable(cp.ones_like(ad), (sum_h, ) * ad.ndim, **kwargs).astype(cp.int32) if True: factors[0] = cp.empty_like(ad) factors[1] = cp.empty_like(ad) factors[2] = cp.empty_like(ad) factors[3] = cp.empty_like(ad) factors[4] = cp.empty_like(ad) _cc_precompute( ad, bd, sum_a, sum_b, sum_ab, sum_aa, sum_bb, cnt, factors[0], factors[1], factors[2], factors[3], factors[4], ) else: a_mean = sum_a / cnt b_mean = sum_b / cnt factors[0] = ad - a_mean factors[1] = bd - b_mean factors[2] = sum_ab - b_mean * sum_a - a_mean * sum_b + sum_a * b_mean factors[3] = sum_aa - (a_mean + a_mean) * sum_a + sum_a * a_mean factors[4] = sum_bb - (b_mean + b_mean) * sum_b + sum_b * b_mean return factors
def ones_like(array, stream=None): """Creates a one-filled cupy.ndarray object like the given array. Args: array (cupy.ndarray or numpy.ndarray): Base array. stream (cupy.cuda.Stream): CUDA stream. Returns: cupy.ndarray: One-filled array. """ warnings.warn("chainer.cuda.ones_like is deprecated. Use cupy.ones_like instead.", DeprecationWarning) check_cuda_available() assert stream is None if isinstance(array, cupy.ndarray): return cupy.ones_like(array) return cupy.ones(array.shape, dtype=array.dtype)