Esempio n. 1
0
    def __init__(self,
                 EnAxis,
                 EnOutput,
                 TAxis,
                 TOutput,
                 num_electrons1,
                 num_electrons2,
                 dE=0,
                 dT=0):
        from streaking_cal.statistics import weighted_avg_and_std

        centralE, dE = weighted_avg_and_std(EnAxis.get(),
                                            cp.square(cp.abs(EnOutput)).get())
        self.p0 = eV_in_au(centralE)

        if dT == 0:
            dT = weighted_avg_and_std(TAxis.get(),
                                      cp.square(cp.abs(TOutput)).get())[1]

        self.dE = dE
        self.dT = dT
        self.__spec = EnOutput
        self.__eAxis = EnAxis
        self.__temp = TOutput
        self.__tAxis = TAxis
        self.num_electrons1 = num_electrons1
        self.num_electrons2 = num_electrons2
        self.__is_low_res = False
        self.__streakspeed = 0
        del (EnAxis)
        del (EnOutput)
        del (TAxis)
        del (TOutput)
    def __init__(self, IT, K_dB, wavelength, tx, ty, tz, rx, ry, rz):
        """
        Args:
            IT (int): the number of parallel channel matrices.
            K_dB (float): the rician K factor in dB.
            wavelength (float): the wavelength.
            tx (numpy.array): the x positions of transmit antenna elements.
            ty (numpy.array): the y positions of transmit antenna elements.
            tz (numpy.array): the z positions of transmit antenna elements.
            rx (numpy.array): the x positions of receive antenna elements.
            ry (numpy.array): the y positions of receive antenna elements.
            rz (numpy.array): the z positions of receive antenna elements.
        """
        self.IT = IT
        self.K = 10**(K_dB / 10.0)
        self.M = len(tx)  # the number of transmit antenna elements
        self.N = len(rx)  # the number of receive antenna elements

        r = xp.zeros((self.N, self.M), dtype=xp.complex)
        for n in range(self.N):
            for m in range(self.M):
                r[n][m] = xp.sqrt(
                    xp.square(rx[n] - tx[m]) + xp.square(ry[n] - ty[m]) +
                    xp.square(rz[n] - tz[m]))

        anHLoS = xp.exp(-1j * 2.0 * xp.pi / wavelength * r)
        self.HLoS = xp.tile(anHLoS.T, IT).T  # IT \cdot N \times M
Esempio n. 3
0
def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
    """Returns the variance along an axis.

    Args:
        a (cupy.ndarray): Array to compute variance.
        axis (int): Along which axis to compute variance. The flattened array
            is used by default.
        dtype: Data type specifier.
        out (cupy.ndarray): Output array.
        keepdims (bool): If True, the axis is remained as an axis of size one.

    Returns:
        cupy.ndarray: The variance of the input array along the axis.

    .. seealso:: :func:`numpy.var`

    """
    if axis is None:
        axis = tuple(range(a.ndim))
    if not isinstance(axis, tuple):
        axis = (axis,)

    if dtype is None and issubclass(a.dtype.type,
                                    (numpy.integer, numpy.bool_)):
        dtype = numpy.dtype(numpy.float64)

    arrmean = mean(a, axis=axis, dtype=dtype, keepdims=True)

    x = cupy.subtract(a, arrmean, dtype=dtype)
    cupy.square(x, x)
    ret = cupy.sum(x, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
    rcount = max(_count_reduce_items(a, axis) - ddof, 0)
    return cupy.multiply(ret, ret.dtype.type(1.0 / rcount), out=ret)
Esempio n. 4
0
def _update_wavefront(data, varying_probe, scan, psi, op):

    # Compute the diffraction patterns for all of the probe modes at once.
    # We need access to all of the modes of a position to solve the phase
    # problem. The Ptycho operator doesn't do this natively, so it's messy.
    patches = cp.zeros(data.shape, dtype='complex64')
    patches = op.diffraction.patch.fwd(
        patches=patches,
        images=psi,
        positions=scan,
        patch_width=varying_probe.shape[-1],
    )
    patches = patches.reshape(*scan.shape[:-1], 1, 1, op.detector_shape,
                              op.detector_shape)

    nearplane = cp.tile(patches, reps=(1, 1, 1, varying_probe.shape[-3], 1, 1))
    pad, end = op.diffraction.pad, op.diffraction.end
    nearplane[..., pad:end, pad:end] *= varying_probe

    # Solve the farplane phase problem ----------------------------------------
    farplane = op.propagation.fwd(nearplane, overwrite=True)
    intensity = cp.sum(cp.square(cp.abs(farplane)), axis=(2, 3))
    cost = op.propagation.cost(data, intensity)
    logger.info('%10s cost is %+12.5e', 'farplane', cost)
    farplane -= 0.5 * op.propagation.grad(data, farplane, intensity)

    if __debug__:
        intensity = cp.sum(cp.square(cp.abs(farplane)), axis=(2, 3))
        cost = op.propagation.cost(data, intensity)
        logger.info('%10s cost is %+12.5e', 'farplane', cost)
        # TODO: Only compute cost every 20 iterations or on a log sampling?

    farplane = op.propagation.adj(farplane, overwrite=True)

    return farplane, cost
Esempio n. 5
0
    def update(self, layers):
        if len(self._cache_s) == 0 or len(self._cache_v) == 0:
            self._init_cache(layers)

        for idx, layer in enumerate(layers):
            weights, gradients = layer.get_weight(), layer.get_gradient()
            if weights is None or gradients is None:
                continue

            (w, b), (dw, db) = weights, gradients
            dw_key, db_key = Adam._get_cache_keys(idx)

            self._cache_v[dw_key] = self._beta1 * self._cache_v[dw_key] + (
                1 - self._beta1) * dw
            self._cache_v[db_key] = self._beta1 * self._cache_v[db_key] + (
                1 - self._beta1) * db

            self._cache_s[dw_key] = self._beta2 * self._cache_s[dw_key] + (
                1 - self._beta2) * np.square(dw)
            self._cache_s[db_key] = self._beta2 * self._cache_s[db_key] + (
                1 - self._beta2) * np.square(db)

            dw = self._cache_v[dw_key] / (np.sqrt(self._cache_s[dw_key]) +
                                          self._eps)
            db = self._cache_v[db_key] / (np.sqrt(self._cache_s[db_key]) +
                                          self._eps)

            layer.set_weight(w - self._lr * dw, b - self._lr * db)
Esempio n. 6
0
    def __get_streaked_spectra(self, streakspeed):
        if self.is_low_res == True:
            return None
        else:
            from cupy.fft import fft

            def fs_in_au(t):
                return 41.3414 * t  # from fs to a.u.

            # def eV_in_au(e): return 0.271106*np.sqrt(e)  # from eV to a.u.

            # in V/m; shape of Vectorpotential determines: 232000 V/m = 1 meV/fs max streakspeed
            E0 = 232000 * streakspeed
            ff1 = cp.flip(
                fft(self.__temp * cp.exp(-1j * fs_in_au(self.__tAxis) *
                                         (1 / (2) *
                                          (self.p0 * E0 * p_times_A_vals_up +
                                           1 * E0**2 * A_square_vals_up)))))
            ff2 = cp.flip(
                fft(self.__temp * cp.exp(-1j * fs_in_au(self.__tAxis) *
                                         (1 / (2) *
                                          (self.p0 * E0 * p_times_A_vals_down +
                                           1 * E0**2 * A_square_vals_down)))))

            spectrum1 = cp.square(cp.abs(ff1))
            spectrum2 = cp.square(cp.abs(ff2))

            #         ff1=ff1/(cp.sum(cp.square(cp.abs(ff1))))
            #         ff1=ff2/(cp.sum(cp.square(cp.abs(ff2))))

            return spectrum1, spectrum2
Esempio n. 7
0
 def calc_loss(embeddings, target_embeddings):
     norm = cp.linalg.norm(embeddings, axis=1).reshape(
         (embeddings.shape[0], 1))
     norm_loss = float(cp.sum(cp.square(1 - norm) / norm))
     target_loss = float(
         cp.sum(cp.square(embeddings - target_embeddings) / 2))
     loss = norm_loss + target_loss
     return loss
Esempio n. 8
0
 def cp_trans_formula(self,
                      freqs: cp.ndarray,
                      freq: float = 1.) -> cp.ndarray:
     freqs = freqs / freq * self.peak_freq(freq)
     result = (self.c * cp.pi**(-1 / 4) *
               (cp.exp(-cp.square(self.sigma - freqs) / 2) -
                self.k * cp.exp(-cp.square(freqs) / 2)))
     return result
Esempio n. 9
0
    def get_spectra(self,
                    streakspeed_in_meV_per_fs,
                    keep_originals=False,
                    discretized=True):
        '''returns streaked spectra, measured with "number_electronsx" simulated electrons or nondiscretized as a tuple'''
        from streaking_cal.statistics import weighted_avg_and_std

        if not (self.is_low_res()):

            (streaked1, streaked2
             ) = self.__get_streaked_spectra(streakspeed_in_meV_per_fs)

            streaked1 = interp(tof_ens_gpu, self.__eAxis, streaked1)
            streaked2 = interp(tof_ens_gpu, self.__eAxis, streaked2)
            xuvonly = interp(tof_ens_gpu, self.__eAxis,
                             cp.square(cp.abs(self.__spec)))

            if not (keep_originals):
                self.__eAxis = None
                self.__spec = None
                t_square = cp.square(cp.abs(self.__temp))
                t_mean, _ = weighted_avg_and_std(self.__tAxis.get(),
                                                 t_square.get())
                self.__temp = interp(cp.asarray(standard_full_time),
                                     self.__tAxis - t_mean, t_square).get()
                self.__temp = self.__temp / cp.sum(self.__temp)
                self.__tAxis = standard_full_time
                self.__is_low_res = True
                self.__streakedspectra = np.asarray(
                    (xuvonly.get(), streaked1.get(), streaked2.get()))
                self.__streakspeed = streakspeed_in_meV_per_fs
                self.__tAxis = standard_full_time

            if discretized:
                streaked1 = self.discretized_spectrum(streaked1,
                                                      self.num_electrons1)
                streaked2 = self.discretized_spectrum(streaked2,
                                                      self.num_electrons2)

            self.__streakspeed = streakspeed_in_meV_per_fs

            return cp.asnumpy(xuvonly), cp.asnumpy(streaked1), cp.asnumpy(
                streaked2)

        elif discretized:
            (xuvonly, streaked1, streaked2) = self.__streakedspectra
            streaked1 = self.discretized_spectrum(streaked1,
                                                  self.num_electrons1)
            streaked2 = self.discretized_spectrum(streaked2,
                                                  self.num_electrons2)

            return cp.asnumpy(xuvonly), cp.asnumpy(streaked1), cp.asnumpy(
                streaked2)

        else:
            return self.__streakedspectra.copy()
Esempio n. 10
0
 def _conv_syst(self, inputs, h_ij=None):
     '''
     Convolves the bandwidths with the resolutions scaled by the scale
     systematics. Resolutions are in the units of the scaled dimension.
     '''
     if h_ij is None:
         h_ij = self.h_ij
     scales = inputs[0:3 * len(self.observables.scales):3]
     resolutions = inputs[2:3 * len(self.observables.shifts):3]
     return cp.sqrt(cp.square(scales * h_ij) + cp.square(resolutions))
 def _computenorm(self):
     """Compute norm of probability distribution
     Returns
     -------
     norm : float
     """
     w2 = cp.reshape(self.w,(self.n_features,self.d,self.D,self.D))
     tmp = cp.sum(cp.square(w2[0,:,0,:]),0) #First tensor
     for i in range(1,self.n_features-1):
         tmp = cp.dot(tmp,cp.sum(cp.square(w2[i,:,:,:]),0)) #MPS contraction  
     norm = cp.inner(tmp,cp.sum(cp.square(w2[self.n_features-1,:,:,0]),0))
     return norm
Esempio n. 12
0
def sobel(image):
    width_array, height_array = generate_arrays(3, 3)
    vertical_filter = np.array(([-1, 0, 1], [-2, 0, 2], [-1, 0, 1]))
    horizontal_filter = np.flip(vertical_filter.T, axis=0)

    new_image_x = convolution_sobel(image, vertical_filter)
    new_image_y = convolution_sobel(image, horizontal_filter)

    #magnitud_gradiente = np.sqrt(np.square(new_image_x) + np.square(new_image_y))
    magnitud_gradiente = cp.sqrt(
        cp.square(new_image_x) + cp.square(new_image_y))
    magnitud_gradiente *= 255 / magnitud_gradiente.max()
    return cp.asnumpy(magnitud_gradiente)
Esempio n. 13
0
 def __call__(self, params, g_params):
     new_params, new_v = zip(
         *[(cp.subtract(
             param,
             cp.multiply(
                 cp.divide(
                     self.rate,
                     cp.sqrt(
                         cp.add(cp.add(v, cp.square(g_param)),
                                self.eps).astype(cp.float32))), g_param)),
            cp.add(v, cp.square(g_param)))
           for param, g_param, v in zip(params, g_params, self.v)])
     self.v = new_v
     return new_params
Esempio n. 14
0
 def do_rmsprop(self, X, Y, update, learning_rate, **kwargs):
     layers = len(self.structure) - 1
     grads = self.calculate_grads(X, Y, kwargs["l2_reg_param"])
     for ii in cp.arange(1, layers + 1):
         update["w" + str(ii)] = kwargs["beta"] * update.get(
             "w" + str(ii), 0) + (1 - kwargs["beta"]) * cp.square(
                 cp.sum(grads["w" + str(ii)], axis=0))
         update["b" + str(ii)] = kwargs["beta"] * update.get(
             "b" + str(ii), 0) + (1 - kwargs["beta"]) * cp.square(
                 cp.sum(grads["b" + str(ii)], axis=1).reshape(-1, 1))
         self.params["w"+str(ii)] -= cp.multiply((learning_rate/ cp.sqrt(kwargs["epsilon"] + update["w"+str(ii)])),\
                                                 cp.sum(grads["w"+str(ii)],axis=0))
         self.params["b"+str(ii)] -= cp.multiply((learning_rate / cp.sqrt(kwargs["epsilon"] + update["b"+str(ii)])),\
                                                 cp.sum(grads["b"+str(ii)],axis=1).reshape(-1,1))
     return update
Esempio n. 15
0
def spherical_cosmask(n,mask_radius, edge_width, origin=None):
    """mask = spherical_cosmask(n, mask_radius, edge_width, origin)
    """

    if type(n) is int:
        n = np.array([n])

    sz = np.array([1, 1, 1])
    sz[0:np.size(n)] = n[:]

    szl = -np.floor(sz/2)
    szh = szl + sz

    x,y,z = np.meshgrid( np.arange(szl[0],szh[0]), 
                         np.arange(szl[1],szh[1]), 
                         np.arange(szl[2],szh[2]), indexing='ij', sparse=True)

    r = np.sqrt(x*x + y*y + z*z)

    m = np.zeros(sz.tolist())

#    edgezone = np.where( (x*x + y*y + z*z >= mask_radius) & (x*x + y*y + z*z <= np.square(mask_radius + edge_width)))

    edgezone = np.all( [ (x*x + y*y + z*z >= mask_radius), (x*x + y*y + z*z <= np.square(mask_radius + edge_width))], axis=0)
    m[edgezone] = 0.5 + 0.5*np.cos( 2*np.pi*(r[edgezone] - mask_radius) / (2*edge_width))
    m[ np.all( [ (x*x + y*y + z*z <= mask_radius*mask_radius) ], axis=0 ) ] = 1

#    m[ np.where(x*x + y*y + z*z <= mask_radius*mask_radius)] = 1

    return m
def save_evaluations(user, population, optim):
    # define pearson correlation coefficient vector
    pearsons = cupy.zeros(population.shape[0], dtype=cupy.float64)
    # define mse vector
    mse = cupy.zeros(population.shape[0], dtype=cupy.float64)
    # define common gene counter vector
    common_genes = cupy.zeros(population.shape[0], dtype=cupy.int64)
    # define cosine similarity vector
    cosines = cupy.zeros(population.shape[0], dtype=cupy.float64)
    # compute vectors
    for i in range(population.shape[0]):
        pearsons[i] = cupy.corrcoef(population[i], optim)[0, 1]
        mse[i] = (cupy.square(optim - population[i])).mean(axis=None)
        common_genes[i] = evaluate_chromosome(population[i], optim)
        cosines[i] = 1 - scipy.spatial.distance.cosine(optim, population[i])
    # save vectors to txt files using as prefix the user id
    cupy.savetxt("user_" + str(user) + "-pearsons.txt",
                 pearsons,
                 delimiter="\t")
    cupy.savetxt("user_" + str(user) + "-mse.txt", mse, delimiter="\t")
    cupy.savetxt("user_" + str(user) + "-common_genes.txt",
                 common_genes.astype(int),
                 fmt='%i',
                 delimiter="\t")
    cupy.savetxt("user_" + str(user) + "-cosines.txt", cosines, delimiter="\t")
    return
Esempio n. 17
0
def row_norms(X, squared=False):
    """Row-wise (squared) Euclidean norm of X.

    Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports sparse
    matrices.

    Performs no input validation.

    Parameters
    ----------
    X : array_like
        The input array
    squared : bool, optional (default = False)
        If True, return squared norms.

    Returns
    -------
    array_like
        The row-wise (squared) Euclidean norm of X.
    """
    if sparse.issparse(X):
        if isinstance(
                X, (sparse.csr_matrix, sparse.csc_matrix, sparse.coo_matrix)):
            X_copy = X.copy()
            X_copy.data = np.square(X_copy.data)
            norms = X_copy.sum(axis=1).squeeze()
        else:
            raise ValueError('Sparse matrix not compatible')
    else:
        norms = np.einsum('ij,ij->i', X, X)

    if not squared:
        np.sqrt(norms, norms)
    return norms
Esempio n. 18
0
    def update(self, trainable_variables):
        self.iterations += 1
        if self.ms is None:
            #initialize
            self.ms = [
                cp.zeros_like(p.output_tensor) for p in trainable_variables
            ]
        if self.vs is None:
            #initialize
            self.vs = [
                cp.zeros_like(p.output_tensor) for p in trainable_variables
            ]

        for i, (v, m,
                var) in enumerate(zip(self.vs, self.ms, trainable_variables)):
            v = self.beta1 * v + (1 - self.beta1) * var.grads
            m = self.beta2 * m + (1 - self.beta2) * cp.square(var.grads)
            v_correct = v / (1 - pow(self.beta1, self.iterations))
            m_correct = m / (1 - pow(self.beta2, self.iterations))
            var.output_tensor -= self.lr * (
                v_correct / (cp.sqrt(m_correct) + self.epsilon))

            self.ms[i] = m
            self.vs[i] = v

        super(Adam, self).update(trainable_variables)
Esempio n. 19
0
def sub_routine(vector_u, matrix_V, vector_train, bias, measure, topK=500, gpu=True):

    train_index = vector_train.nonzero()[1]
    if measure == "Cosine":
        vector_predict = matrix_V.dot(vector_u)
    else:
        if gpu:
            import cupy as cp
            vector_predict = -cp.sum(cp.square(matrix_V - vector_u), axis=1)
        else:
            vector_predict = -np.sum(np.square(matrix_V - vector_u), axis=1)
    if bias is not None:
        if gpu:
            import cupy as cp
            vector_predict = vector_predict + cp.array(bias)
        else:
            vector_predict = vector_predict + bias

    if gpu:
        import cupy as cp
        candidate_index = cp.argpartition(-vector_predict, topK+len(train_index))[:topK+len(train_index)]
        vector_predict = candidate_index[vector_predict[candidate_index].argsort()[::-1]]
        vector_predict = cp.asnumpy(vector_predict).astype(np.float32)
    else:
        candidate_index = np.argpartition(-vector_predict, topK+len(train_index))[:topK+len(train_index)]
        vector_predict = candidate_index[vector_predict[candidate_index].argsort()[::-1]]
    vector_predict = np.delete(vector_predict, np.isin(vector_predict, train_index).nonzero()[0])

    return vector_predict[:topK]
Esempio n. 20
0
 def __call__(self, x):
     regularization = 0.
     if self.l1:
         regularization += cp.sum(self.l1 * cp.abs(x)) / x.shape[0]
     if self.l2:
         regularization += cp.sum(self.l1 * cp.square(x)) / (2 * x.shape[0])
     return regularization
Esempio n. 21
0
 def _lossA(self, A, P):
     inum = cupy.array(1.0j, cupy.complex64)
     loss = cupy.square(A['y'] - A['x']).mean()
     grad = self._overlapadd(
         cupy.fft.irfft(
             (A['x'] - A['y']) * cupy.exp(inum * P['x'])) * self.norm)
     return loss, grad
Esempio n. 22
0
def test_score(nrows, ncols, nclusters, random_state):

    X, y = make_blobs(int(nrows),
                      ncols,
                      nclusters,
                      cluster_std=1.0,
                      shuffle=False,
                      random_state=0)

    cuml_kmeans = cuml.KMeans(init="k-means||",
                              n_clusters=nclusters,
                              random_state=random_state,
                              output_type='numpy')

    cuml_kmeans.fit(X)

    actual_score = cuml_kmeans.score(X)
    predictions = cuml_kmeans.predict(X)

    centers = cuml_kmeans.cluster_centers_

    expected_score = 0.0
    for idx, label in enumerate(predictions):
        x = X[idx, :]
        y = cp.array(centers[label, :], dtype=cp.float32)

        sq_euc_dist = cp.sum(cp.square((x - y)))
        expected_score += sq_euc_dist

    expected_score *= -1

    cp.testing.assert_allclose(actual_score,
                               expected_score,
                               atol=0.1,
                               rtol=1e-5)
def phase_optimization(filename_psf_target,scale=1,step=1.0,iterations=100,filename_phi="phases/foo.npy"):
    psf_target = np.load(filename_psf_target)
    print("Performing phase optimization...")
    if scale==1:
        Y = psf_target
        Y= cp.asarray(Y)
    else:
        dim_0 = scale*psf_target.shape[0]
        dim_1 = scale*psf_target.shape[1]
        dim = (dim_0,dim_1)
        Y= cv.resize(psf_target, dim, interpolation=cv.INTER_NEAREST)
        Y= cp.asarray(Y)
    if len(Y.shape)==2:
        Y = Y/cp.sum(Y)
    else:
        Y = Y/cp.reshape(cp.sum(Y,axis=(0,1)),(1,1)+Y.shape[2:])
    
    phi = cp.random.random_sample(Y.shape)*2.0*np.pi
    #phi = cp.fft.fftshift(cp.angle(cp.fft.fft2(cp.sqrt(Y),axes=(0, 1))),axes=(0,1))+2.0*np.pi
    losses = []
    #grads = []
    mempool.free_all_blocks()
    for i in range(iterations):
        
        h = cp.fft.ifft2(cp.fft.ifftshift(cp.exp(1.0j*phi),axes=(0, 1)),axes=(0, 1))
        psf_new = cp.square(cp.abs(h))
        if len(psf_new.shape)==2:
            norm = cp.sum(psf_new)
        else:
            norm = cp.reshape(cp.sum(psf_new,axis=(0,1)),(1,1)+psf_new.shape[2:])
        psf_new = psf_new/norm
        h = h/cp.sqrt(norm)
        
        D = (Y-psf_new)
        loss = cp.sum(cp.square(D))
        losses.append(loss)
        if i%100==0:
            print("Iteration {} of {}, Loss: {}".format(i,iterations,loss))
        
        gradient = -4.0*cp.imag(cp.exp(-1.0j*phi)*cp.fft.fftshift(cp.fft.fft2((D*h),axes=(0, 1)),axes=(0,1)))
        phi = phi - step*gradient
        #grads.append(cp.sum(gradient**2))
        mempool.free_all_blocks()
        
    print("Phase Optimization process completed!")
    phi_np = cp.asnumpy(phi)
    np.save(filename_phi,phi_np)
Esempio n. 24
0
 def get_loss(self, X, Y, l2_reg_param=0, Y_pred=None):
     weight_sum = 0
     for ii in range(1, len(self.structure)):
         weight_sum += cp.sum(cp.square(self.params["w" + str(ii)]))
     if Y_pred is None:
         Y_pred = self.predict(X)
     return cp.asnumpy((cp.sum(-cp.log(cp.choose(Y, Y_pred))) +
                        (l2_reg_param / 2) * weight_sum) / len(Y))
Esempio n. 25
0
def mse(predictions, targets, cuda=False):
    if cuda:
        res = cp.square(cp.subtract(predictions, targets)).mean()
        cp.cuda.Stream.null.synchronize()
        return res
    else:
        return np.square(np.subtract(predictions,
                                     targets)).sum() / len(predictions)
Esempio n. 26
0
def square(x: Array, /) -> Array:
    """
    Array API compatible wrapper for :py:func:`np.square <numpy.square>`.

    See its docstring for more information.
    """
    if x.dtype not in _numeric_dtypes:
        raise TypeError("Only numeric dtypes are allowed in square")
    return Array._new(np.square(x._array))
Esempio n. 27
0
def f(n):
    tot = 0
    num_range = np.arange(0, n, dtype=cp.uint32)
    for p in ittr.product(num_range, repeat=6):
        k = cp.square(cp.array(p, dtype=cp.uint32))
        nn = cp.power(p, 2)
        if gcd(k.sum(), nn) == 1:
            tot += 1
    return tot
Esempio n. 28
0
 def __stopping_rule(self) -> bool:
     """
     Implementation of Morozov discrepancy stopping rule. If the distance between solution and observations is smaller
     than estimated noise level, then the algorithm will stop.
     :return: boolean representing whether the stop condition is reached (False) or not (True).
     """
     residual = cp.matmul(self.KHK, self.solution) - self.q_estimator
     residual = cp.sqrt(cp.sum(cp.square(residual)) / self.grid_size)
     return residual > (cp.sqrt(self.tau) * self.delta)
Esempio n. 29
0
 def rand(self, snr=1.):
     """Fill vector with random number (~U[1,-1]) with a given SNR"""
     rms = cp.sqrt(cp.mean(cp.square(self.getNdArray())))
     amp_noise = 1.0
     if rms != 0.:
         amp_noise = cp.sqrt(3. / snr) * rms  # sqrt(3*Power_signal/SNR)
     self.getNdArray()[:] = amp_noise * (
         2. * cp.random.random(self.getNdArray().shape) - 1.)
     return self
Esempio n. 30
0
 def log_prior(self, par,**args):
     for k,v in args.items():
         if k=='y_train':
             y=v
     K=0
     for var in par.keys():
         dim=(cp.asarray(par[var])).size
         K+=dim*0.5*cp.log(self.hyper['alpha']/(2*np.pi))
         K-=0.5*self.hyper['alpha']*cp.sum(cp.square(par[var]))
     return K
Esempio n. 31
0
 def sample_function(x, y, z):
     return cupy.square(cupy.add(x, y))