Пример #1
0
def not_equal(x1: Array, x2: Array, /) -> Array:
    """
    Array API compatible wrapper for :py:func:`np.not_equal <numpy.not_equal>`.

    See its docstring for more information.
    """
    # Call result type here just to raise on disallowed type combinations
    _result_type(x1.dtype, x2.dtype)
    x1, x2 = Array._normalize_two_args(x1, x2)
    return Array._new(np.not_equal(x1._array, x2._array))
Пример #2
0
    def one_hot(self, y):
        """
        one-hot encode : 1 is hot, -1 is cold
        """
        onehot = cp.ones((len(y), self.n_classes))

        classes = cp.tile(cp.arange(0, self.n_classes), len(y)).reshape(len(y), self.n_classes)
        y = y.reshape(-1, 1)
        mask = cp.not_equal(y, classes)
        onehot[mask] = -1
            
        return onehot
Пример #3
0
def test_csr_norms(norm, ref_norm, dtype, seed, shape):
    X = np.random.RandomState(seed).randn(*shape).astype(dtype)
    X_csr = sp.csr_matrix(X)
    X_csr_gpu = cupyx.scipy.sparse.csr_matrix(X_csr)

    norm(X_csr_gpu)
    ref_norm(X_csr)

    # checks that array have been changed inplace
    assert cp.any(cp.not_equal(X_csr_gpu.todense(), cp.array(X)))

    cp.testing.assert_array_almost_equal(X_csr_gpu.todense(), X_csr.todense())
Пример #4
0
 def __ne__(self, other):
     return cupy.not_equal(self, other)
Пример #5
0
    def compute_loss(self, i, j, neg_i, neg_j):

        xi = self.means[i]
        # lvi = self.vars[i] + self.lbda * cp.eye(self.n_dim).reshape(1, self.n_dim, self.n_dim).repeat(len(i), axis=0)
        lvi = self.vars[i]

        xj = self.c_means[j]
        # lvj = self.c_vars[j] + self.lbda * cp.eye(self.n_dim).reshape(1, self.n_dim, self.n_dim).repeat(len(j), axis=0)
        lvj = self.c_vars[j]

        self.vi = wb.to_full(lvi) + self.lbda * cp.eye(self.n_dim).reshape(
            1, self.n_dim, self.n_dim).repeat(len(i), axis=0)
        self.vj = wb.to_full(lvj) + self.lbda * cp.eye(self.n_dim).reshape(
            1, self.n_dim, self.n_dim).repeat(len(j), axis=0) * cp.not_equal(
                j, 0).reshape(-1, 1, 1)

        # Same thing, with the negative batch
        neg_xi = self.means[neg_i]
        # lneg_vi = self.vars[neg_i] + self.lbda * cp.eye(self.n_dim).reshape(1, self.n_dim, self.n_dim).repeat(len(neg_i), axis=0)
        lneg_vi = self.vars[neg_i]

        neg_xj = self.c_means[neg_j]
        # lneg_vj = self.c_vars[neg_j] + self.lbda * cp.eye(self.n_dim).reshape(1, self.n_dim, self.n_dim).repeat(len(neg_j), axis=0)
        lneg_vj = self.c_vars[neg_j]

        self.neg_vi = wb.to_full(lneg_vi) + self.lbda * cp.eye(
            self.n_dim).reshape(1, self.n_dim, self.n_dim).repeat(len(neg_i),
                                                                  axis=0)
        self.neg_vj = wb.to_full(lneg_vj) + self.lbda * cp.eye(
            self.n_dim).reshape(1, self.n_dim, self.n_dim).repeat(len(neg_j),
                                                                  axis=0)

        neg_wij, self.inv_n_ij, self.v_n_i_s, self.inv_v_n_i_s, self.mid_n_ij = wb.batch_W2(
            neg_xi,
            neg_xj,
            self.neg_vi,
            self.neg_vj,
            Cn=self.Cn,
            numIters=self.num_sqrt_iters,
            prod=True)

        wij, self.inv_ij, self.v_i_s, self.inv_v_i_s, self.mid_ij = wb.batch_W2(
            xi,
            xj,
            self.vi,
            self.vj,
            Cn=self.Cn,
            numIters=self.num_sqrt_iters,
            sU=self.v_n_i_s[::self.num_neg],
            inv_sU=self.inv_v_n_i_s[::self.num_neg],
            prod=True)

        losses = cp.maximum(
            self.margin - wij +
            neg_wij.reshape(-1, self.num_neg).sum(axis=1) / self.num_neg, 0.)
        self.mask = cp.ones_like(losses)
        self.mask[cp.where(losses == 0.)] = 0.

        self.loss = losses.sum()

        return self.loss
Пример #6
0
# for iter in tqdm(range(0,100)): 
    
#     dF_3D = cupy.multiply(cupy.subtract(cupy.divide(cupy.multiply(dn_3D,dn_3D),n_med2),1),-k2)
#     dF_3D = cupy.fft.fftn(dF_3D)
#     dF_3D[cupy.not_equal(dF_3D_2,0)] = dF_3D_2[cupy.not_equal(dF_3D_2,0)]
#     dF_3D   = cupy.fft.ifftn(dF_3D)    
#     dn_3D   = cupy.multiply(cupy.sqrt(cupy.add(cupy.divide(dF_3D,-k2), 1)), n_med)
#     #Positive constraint
#     dn_3D[cupy.less(cupy.real(dn_3D),n_med)] = cupy.real(n_med)+cupy.imag(dn_3D[cupy.less(cupy.real(dn_3D),n_med)])
#     dn_3D[cupy.less(cupy.imag(dn_3D),0)]     = cupy.real(dn_3D[cupy.less(cupy.imag(dn_3D), 0)])



dF_3D = cupy.multiply(cupy.subtract(cupy.divide(cupy.multiply(dn_3D,dn_3D),n_med2),1),-k2)
dF_3D = cupy.fft.fftn(dF_3D)
dF_3D[cupy.not_equal(dF_3D_2,0)] = dF_3D_2[cupy.not_equal(dF_3D_2,0)]
dF_3D   = cupy.fft.ifftn(dF_3D)    
dn_3D   = cupy.multiply(cupy.sqrt(cupy.add(cupy.divide(dF_3D,-k2), 1)), n_med)

dn_3D =  cupy.fft.fftshift(dn_3D);
dn_3D[cupy.less(cupy.real(dn_3D),n_med)] = n_med+1j*cupy.imag(dn_3D[cupy.less(cupy.real(dn_3D),n_med)])
dn_3D[cupy.less(cupy.imag(dn_3D),0)]     = cupy.real(dn_3D[cupy.less(cupy.imag(dn_3D), 0)])


##TVMIN + positiveconstrain + crop
tv = TvMin(dF_3D_2 = dF_3D_2 , lamb = 0.008, iteration = 100)
tv.setInputImage(dn_3D)
tv.minimize()
dn_3D = tv.getResultImage()