示例#1
0
def sortBatches2(ccb0):
    # takes as input a matrix of nBatches by nBatches containing
    # dissimilarities.
    # outputs a matrix of sorted batches, and the sorting order, such that
    # ccb1 = ccb0(isort, isort)

    # put this matrix on the GPU
    ccb0 = cp.asarray(ccb0, order='F')

    # compute its svd on the GPU (this might also be fast enough on CPU)
    u, s, v = svdecon(ccb0)
    # HACK: consistency with MATLAB
    u = u * cp.sign(u[0, 0])
    v = v * cp.sign(u[0, 0])

    # initialize the positions xs of the batch embeddings to be very small but proportional to
    # the first PC
    xs = .01 * u[:, 0] / cp.std(u[:, 0], ddof=1)

    # 200 iterations of gradient descent should be enough
    # TODO: move_to_config
    niB = 200

    # this learning rate should usually work fine, since it scales with the average gradient
    # and ccb0 is z-scored
    # TODO: move_to_config
    eta = 1
    for k in tqdm(range(niB), desc="Sorting %d batches" % ccb0.shape[0]):
        # euclidian distances between 1D embedding positions
        ds = (xs - xs[:, np.newaxis])**2
        # the transformed distances go through this function
        W = cp.log(1 + ds)

        # the error is the difference between ccb0 and W
        err = ccb0 - W

        # ignore the mean value of ccb0
        err = err - cp.mean(err, axis=0)

        # backpropagate the gradients
        err = err / (1 + ds)
        err2 = err * (xs[:, np.newaxis] - xs)
        D = cp.mean(
            err2, axis=1)  # one half of the gradients is along this direction
        E = cp.mean(err2, axis=0)  # the other half is along this direction
        # we don't need to worry about the gradients for the diagonal because those are 0

        # final gradients for the embedding variable
        dx = -D + E.T

        # take a gradient step
        xs = xs - eta * dx

    # sort the embedding positions xs
    isort = cp.argsort(xs, axis=0)

    # sort the matrix of dissimilarities
    ccb1 = ccb0[isort, :][:, isort]

    return ccb1, isort
示例#2
0
def _svd_flip(u, v, u_based_decision=True):
    """Sign correction to ensure deterministic output from SVD.
    Adjusts the columns of u and the rows of v such that the loadings in the
    columns in u that are largest in absolute value are always positive.
    Parameters
    ----------
    u : cupy.ndarray
        u and v are the output of `cupy.linalg.svd`
    v : cupy.ndarray
        u and v are the output of `cupy.linalg.svd`
    u_based_decision : boolean, (default=True)
        If True, use the columns of u as the basis for sign flipping.
        Otherwise, use the rows of v. The choice of which variable to base the
        decision on is generally algorithm dependent.
    Returns
    -------
    u_adjusted, v_adjusted : arrays with the same dimensions as the input.
    """
    if u_based_decision:
        # columns of u, rows of v
        max_abs_cols = cp.argmax(cp.abs(u), axis=0)
        signs = cp.sign(u[max_abs_cols, range(u.shape[1])])
        u *= signs
        v *= signs[:, cp.newaxis]
    else:
        # rows of v, columns of u
        max_abs_rows = cp.argmax(cp.abs(v), axis=1)
        signs = cp.sign(v[list(range(v.shape[0])), max_abs_rows])
        u *= signs
        v *= signs[:, cp.newaxis]
    return u, v
 def iterative_least_likely(self, eps, alpha =1, n_iter = None, index = None):
     xp = AdvImage.xp
     
     if n_iter is None:
         n_iter = int(min(eps + 4, 1.25 * eps))
    
     if index is None:
         probs = AdvImage.model.predict([self.org_image], oversample=False).data[0]
         probs = cuda.to_cpu(probs)
         least_index = np.argmin(probs)
         t = xp.array([least_index], dtype=xp.int32)
     
     out_layer = AdvImage.last_layer
     target_org = self.target.data.copy()
     
     for _ in range(n_iter):
         x = AdvImage.model(self.target, layers=[out_layer])[out_layer]
         loss = F.softmax_cross_entropy(x, t)
         
         self.target.cleargrad()
         AdvImage.model.cleargrads()
         loss.backward()
         
         perturb = xp.sign(self.target.grad)
         updated_data = self.target.data - alpha * perturb
         clipped_data = xp.clip(updated_data, target_org - eps, target_org + eps)
         self.target = Variable(clipped_data)
     
     self.adv_image = self._restore_image(self.target)
示例#4
0
def signedpower(x, a):
    #将inf和-inf处理为nan
    #x[cp.isinf(x)] = cp.nan
    # 经测试where替换更快一些

    x = cp.where(cp.isinf(x), cp.nan, x)
    return cp.sign(x) * cp.abs(x)**a
示例#5
0
def constrain_variable_probe(variable_probe, weights):
    """Add the following constraints to variable probe weights

    1. Remove outliars from weights
    2. Enforce orthogonality once per epoch

    """
    logger.info('Orthogonalize variable probes')
    variable_probe = tike.linalg.orthogonalize_gs(
        variable_probe,
        axis=(-3, -2, -1),
    )

    logger.info('Remove outliars from variable probe weights')
    aevol = cp.abs(weights)
    weights = cp.minimum(
        aevol,
        1.5 * cp.percentile(
            aevol,
            [95],
            axis=[-3],
            keepdims=True,
        ).astype(weights.dtype),
    ) * cp.sign(weights)

    # TODO: Smooth the weights as a function of the frame index.

    return variable_probe, weights
示例#6
0
def natural_compression(x):
    dim = x.shape[0]
    logx = xp.ma.log2(xp.abs(x)).filled(-15)
    logx_floor = xp.floor(logx)
    noise = xp.random.uniform(0.0, 1.0, dim)
    leftx = xp.exp2(logx_floor)
    rounded = xp.floor(xp.ma.log2(xp.abs(x) + leftx * noise).filled(-15))
    compressed = xp.sign(x) * xp.exp2(rounded)
    return compressed
示例#7
0
def sign(x: Array, /) -> Array:
    """
    Array API compatible wrapper for :py:func:`np.sign <numpy.sign>`.

    See its docstring for more information.
    """
    if x.dtype not in _numeric_dtypes:
        raise TypeError("Only numeric dtypes are allowed in sign")
    return Array._new(np.sign(x._array))
示例#8
0
def random_quantization(x, s):
    dim = x.shape[0]
    xnorm = xp.linalg.norm(x)
    if s == 0 or xnorm == 0:
        return xp.zeros(dim, dtype=int)
    noise = xp.random.uniform(0, 1, dim)
    rounded = xp.floor(s * xp.abs(x) / xnorm + noise)
    compressed = (xnorm / s) * xp.sign(x) * rounded
    return compressed
示例#9
0
def tvl1(lp, init_recon, tomo0, num_iter, reg_par, gpu):
    """
    Reconstruction with the total variation method
    with the regularization parameter reg_par.
    Solving the problem ||R(recon)-tomo||_1 + reg_par*TV(recon) -> min
    """

    # choose device
    cp.cuda.Device(gpu).use()
    # Allocating necessary gpu arrays
    recon = cp.array(init_recon)
    tomo = cp.array(tomo0)
    g = tomo * 0
    p = recon * 0
    recon0 = recon
    prox0x = recon * 0
    prox0y = recon * 0
    div0 = recon * 0
    prox1 = tomo * 0

    lam = reg_par
    c = 0.35  # 1/power_method(lp,tomo,num_iter)

    # tv iterations
    for i in range(0, num_iter):
        # forward step
        # compute proximal prox0
        prox0x[:, :, :-1] += c * (recon[:, :, 1:] - recon[:, :, :-1])
        prox0y[:, :-1, :] += c * (recon[:, 1:, :] - recon[:, :-1, :])
        nprox = cp.array(
            cp.maximum(1, (cp.sqrt(prox0x * prox0x + prox0y * prox0y) / lam)))
        prox0x = prox0x / nprox
        prox0y = prox0y / nprox
        # compute proximal prox1
        lp.fwdp(g, recon, gpu)
        tmp = prox1 + c * g - c * tomo
        tmp2 = cp.abs(tmp) - 1
        tmp2[tmp2 < 0] = 0
        prox1 = tmp - tmp2 * cp.sign(tmp)
        # backward step
        recon = recon0
        div0[:, :, 1:] = (prox0x[:, :, 1:] - prox0x[:, :, :-1])
        div0[:, :, 0] = prox0x[:, :, 0]
        div0[:, 1:, :] += (prox0y[:, 1:, :] - prox0y[:, :-1, :])
        div0[:, 0, :] += prox0y[:, 0, :]
        lp.adjp(p, prox1, gpu)
        recon0 = recon0 - c * p + c * div0

        # update recon
        recon = 2 * recon0 - recon

    return recon.get()
示例#10
0
def FW(W, n):
    ## Initially the distance between all pairs are set to infinity
    d = cp.ones([n, n]) * myInf
    all_ones = cp.ones([n, n])
    for k in range(n):
        ## Calculating distance in iteration k
        dk = cp.log(
            cp.dot(cp.exp(W[:, k].reshape(n, 1)), cp.exp(W[k, :].reshape(1,
                                                                         n))))
        a = cp.sign(d - dk)
        where_to = cp.where(a > 0)
        ## Updating distances where necessary
        d[where_to] = dk[where_to]
    return d
 def fast_gradient(self, eps):
     xp = AdvImage.xp
     out_layer = AdvImage.last_layer
     x = AdvImage.model(self.target, layers=[out_layer])[out_layer]
     t = xp.array([self.index], dtype=xp.int32)
     loss = F.softmax_cross_entropy(x, t)
     
     self.target.cleargrad()
     AdvImage.model.cleargrads()
     loss.backward()
     
     perturb = xp.sign(self.target.grad)
     self.target = Variable(self.target.data + eps * perturb)
     self.adv_image = self._restore_image(self.target)
示例#12
0
def itkrm(data,K,S,maxitr,startD=np.array([1])):
    M, N = data.shape
    if startD.all()==1:
        D_init = np.random.randn(M, K)
    else:
        D_init = startD
        
    #Algorithm    
    GPU_D_old = cp.asarray(D_init)
       
    GPU_Y = cp.asarray(data)
    
    GPU_M = int(cp.asarray(M))
    
    GPU_N = int(cp.asarray(N))
    
    GPU_S = int(cp.asarray(S))
    
    GPU_maxitr = int(cp.asarray(maxitr))
    
    GPU_I_D = cp.zeros((S,N),dtype=cp.int32)
   
    for i in range(GPU_maxitr):
        start_time = N_timer.cont_timer(0,0)
        N_timer.Timer(i,maxitr)
        for n in range(GPU_N):
            GPU_I_D[:,n] = max_atoms(GPU_D_old,GPU_Y[:,n],GPU_S)

        GPU_D_new = cp.zeros((M,K))
        
        GPU_DtD = GPU_D_old.T @ GPU_D_old

        for n in range(GPU_N):
            GPU_DtY = GPU_D_old[:,GPU_I_D[:,n]].T @ GPU_Y[:,n]
            GPU_matproj = cp.repeat((GPU_D_old[:,GPU_I_D[:,n]] @ cp.linalg.inv(GPU_DtD[GPU_I_D[:,n,None], GPU_I_D[:,n]]) @ GPU_DtY)[:,None],GPU_S,axis=1)
            GPU_vecproj = GPU_D_old[:,GPU_I_D[:,n]] @ cp.diag(cp.diag( GPU_DtD[GPU_I_D[:,n,None], GPU_I_D[:,n]] )**-1*( GPU_DtY ))
            GPU_signer = cp.sign( GPU_DtY )
            GPU_D_new[:,GPU_I_D[:,n]] = GPU_D_new[:,GPU_I_D[:,n]] + (cp.repeat(GPU_Y[:,n,None], S, axis=1) - GPU_matproj + GPU_vecproj)*GPU_signer
            
            


        GPU_scale = cp.sum(GPU_D_new*GPU_D_new, axis=0)
        GPU_iszero = cp.where(GPU_scale < 0.00001)[0]
#        GPU_D_new[:,GPU_iszero] = np.random.randn(GPU_M, len(GPU_iszero))  # generate random with GPU
        GPU_D_new[:,GPU_iszero] = cp.asarray(np.random.randn(M, len(GPU_iszero)))  # generate random with CPU
    #end hugget
        GPU_D_new = normalize_mat_col(GPU_D_new)
        GPU_D_old = 1*GPU_D_new
    return cp.asnumpy(GPU_D_old)
示例#13
0
def extractPCfromSnippets(proc, probe=None, params=None, Nbatch=None):
    # extracts principal components for 1D snippets of spikes from all channels
    # loads a subset of batches to find these snippets

    NT = params.NT
    nPCs = params.nPCs
    Nchan = probe.Nchan

    batchstart = np.arange(0, NT * Nbatch + 1, NT).astype(np.int64)

    # extract the PCA projections
    # initialize the covariance of single-channel spike waveforms
    CC = cp.zeros(params.nt0, dtype=np.float32)

    # from every 100th batch
    for ibatch in range(0, Nbatch, 100):
        offset = Nchan * batchstart[ibatch]
        dat = proc.flat[offset:offset + NT * Nchan].reshape((-1, Nchan),
                                                            order='F')
        if dat.shape[0] == 0:
            continue

        # move data to GPU and scale it back to unit variance
        dataRAW = cp.asarray(dat, dtype=np.float32) / params.scaleproc

        # find isolated spikes from each batch
        row, col, mu = isolated_peaks_new(dataRAW, params)

        # for each peak, get the voltage snippet from that channel
        c = get_SpikeSample(dataRAW, row, col, params)

        # scale covariance down by 1,000 to maintain a good dynamic range
        CC = CC + cp.dot(c, c.T) / 1e3

    # the singular vectors of the covariance matrix are the PCs of the waveforms
    U, Sv, V = svdecon(CC)

    wPCA = U[:, :nPCs]  # take as many as needed

    # adjust the arbitrary sign of the first PC so its negativity is downward
    # TODO: unclear - is 20 here the index into the spike waveform? Should this be hardcoded?
    #               - should it be nt0min instead?
    wPCA[:, 0] = -wPCA[:, 0] * cp.sign(wPCA[20, 0])

    return wPCA
示例#14
0
def inverse(
    data,
    impulse_response=None,
    filter_params={},
    max_gain=2,
    predefined_filter=None,
):
    """Apply the filter in reverse to the given data.

    Parameters
    ----------
    data : (M,N) ndarray
        Input data.
    impulse_response : callable `f(r, c, **filter_params)`
        Impulse response of the filter.  See LPIFilter2D.__init__.
    filter_params : dict
        Additional keyword parameters to the impulse_response function.
    max_gain : float
        Limit the filter gain.  Often, the filter contains zeros, which would
        cause the inverse filter to have infinite gain.  High gain causes
        amplification of artefacts, so a conservative limit is recommended.

    Other Parameters
    ----------------
    predefined_filter : LPIFilter2D
        If you need to apply the same filter multiple times over different
        images, construct the LPIFilter2D and specify it here.

    """
    check_nD(data, 2, "data")
    if predefined_filter is None:
        filt = LPIFilter2D(impulse_response, **filter_params)
    else:
        filt = predefined_filter

    F, G = filt._prepare(data)
    _min_limit(F)

    F = 1 / F
    mask = cp.abs(F) > max_gain
    F[mask] = cp.sign(F[mask]) * max_gain

    return _centre(cp.abs(fft.ifftshift(fft.ifftn(G * F))), data.shape)
 def iterative_gradient(self, eps, alpha =1, n_iter = None):
     xp = AdvImage.xp
     
     if n_iter is None:
         n_iter = int(min(eps + 4, 1.25 * eps))
     
     t = xp.array([self.index], dtype=xp.int32)
     out_layer = AdvImage.last_layer
     target_org = self.target.data.copy()
     
     for _ in range(n_iter):
         x = AdvImage.model(self.target, layers=[out_layer])[out_layer]
         loss = F.softmax_cross_entropy(x, t)
         
         self.target.cleargrad()
         AdvImage.model.cleargrads()
         loss.backward()
         
         perturb = xp.sign(self.target.grad)
         updated_data = self.target.data + alpha * perturb
         clipped_data = xp.clip(updated_data, target_org - eps, target_org + eps)
         self.target = Variable(clipped_data)
     
     self.adv_image = self._restore_image(self.target)
def FISTA(grad_f, x_0, L, LAMBDA, n_iters=100, eps=1e-10):
    '''FISTA'''
    r = xp.zeros(n_iters + 1)

    for t in range(1, n_iters + 1):
        r[t] = 0.5 + xp.sqrt(1 + 4 * r[t - 1]**2) / 2

    gamma = (1 - r[:n_iters]) / r[1:]

    x = x_0.copy()
    y = x_0.copy()

    for t in range(1, n_iters):

        _grad = grad_f(x)
        if xp.linalg.norm(_grad) < eps:
            break

        x -= _grad / L
        y_new = xp.sign(x) * xp.maximum(xp.abs(x) - LAMBDA / L, 0)
        x = (1 - gamma[t]) * y_new + gamma[t] * y
        y = y_new

    return y, t + 1
示例#17
0
文件: fix.py 项目: PepeJoseHU/Eclair
def fixpix(data, mask, out=None, dtype=None, fix_NaN=False):
    '''
    fill the bad pixel with mean of surrounding pixels

    Parameters
    ----------
    data : ndarray
        An array of image
        If a 3D array containing multiple images,
        the images must be stacked along the 1st dimension (axis=0).
    mask : ndarray
        An array indicates bad pixel positions
        The shape must be same as image.
        The value of bad pixel is nonzero, and the others is 0.
        If all pixels are bad, raise ValueError.
    out : cupy.ndarray, default None
        Alternate output array in which to place the result. The default
        is ``None``; if provided, it must have the same shape as the
        expected output, but the type will be cast if necessary.
    dtype : str or dtype, default 'float32'
        dtype of array used internally
        If None, this value will be usually "float32", 
        but this can be changed with eclair.set_dtype.
        If the input dtype is different, use a casted copy.
    fix_NaN : bool, default False
        If true, fix NaN pixel even if it's not bad pixel in the mask.

    Returns
    -------
    fixed : ndarray
        An array of images fixed bad pixel

    Notes
    -----
    NaN is ignored in interpolation calculations,
    but is not fixed if fix_NaN is False.
    '''
    dtype = judge_dtype(dtype)
    if data.shape[-2:] != mask.shape[-2:]:
        raise ValueError('shape differs between data and mask')
    elif mask.all():
        raise ValueError('No available pixel')

    data = cp.array(data, dtype=dtype, copy=False, ndmin=3)
    mask = cp.array(mask, dtype=dtype, copy=False, ndmin=3)

    convolution = lambda data, out: conv_kernel(data, out)

    if out is None:
        out = cp.empty_like(data)
    cp.copyto(out, data)

    filt = mask2filter(mask)
    if fix_NaN:
        filt = checkfinite(data, filt)

    out *= filt

    dconv = cp.empty_like(out)
    nconv = cp.empty_like(filt)

    while not filt.all():
        convolution(out, dconv)
        convolution(filt, nconv)
        fix_kernel(out, filt, dconv, nconv, out)
        cp.sign(nconv, out=filt)

    return cp.squeeze(out)
示例#18
0
def gsgd(x, b):
    norm = xp.linalg.norm(x, axis=0)
    return norm / (2**(b - 1)) * xp.sign(x) * xp.floor(
        (2**(b - 1)) / norm * xp.abs(x) + xp.random.uniform(0, 1, x.shape))
示例#19
0
def extractTemplatesfromSnippets(proc=None,
                                 probe=None,
                                 params=None,
                                 Nbatch=None,
                                 nPCs=None):
    # this function is very similar to extractPCfromSnippets.
    # outputs not just the PC waveforms, but also the template "prototype",
    # basically k-means clustering of 1D waveforms.

    NT = params.NT
    # skip every this many batches
    nskip = params.nskip
    nPCs = nPCs or params.nPCs
    nt0min = params.nt0min
    Nchan = probe.Nchan
    batchstart = np.arange(0, NT * Nbatch + 1, NT).astype(np.int64)

    k = 0
    # preallocate matrix to hold 1D spike snippets
    # dd = cp.zeros((params.nt0, int(5e4)), dtype=np.float32, order='F')
    dds = []

    for ibatch in tqdm(range(0, Nbatch, nskip), desc="Extracting templates"):
        offset = Nchan * batchstart[ibatch]
        dat = proc.flat[offset:offset + NT * Nchan].reshape((-1, Nchan),
                                                            order='F')

        # move data to GPU and scale it back to unit variance
        dataRAW = cp.asarray(dat, dtype=np.float32) / params.scaleproc

        # find isolated spikes from each batch
        row, col, mu = isolated_peaks_new(dataRAW, params)

        # for each peak, get the voltage snippet from that channel
        c = get_SpikeSample(dataRAW, row, col, params)

        # if k + c.shape[1] > dd.shape[1]:
        #     dd = cp.pad(dd, (0, dd.shape[1]), mode='constant')

        # dd[:, k:k + c.shape[1]] = c
        dds.append(c)
        k = k + c.shape[1]
        if k > 1e5:
            break

    # discard empty samples
    # dd = dd[:, :k]
    dd = cp.asfortranarray(cp.concatenate(dds, axis=1).astype(np.float32))

    # initialize the template clustering with random waveforms
    uu = np.random.permutation(dd.shape[1])[:nPCs]
    wTEMP = dd[:, uu]
    wTEMP = wTEMP / cp.sum(wTEMP**2, axis=0)**.5  # normalize them

    for i in range(10):
        # at each iteration, assign the waveform to its most correlated cluster
        cc = cp.dot(wTEMP.T, dd)
        imax = cp.argmax(cc, axis=0)
        amax = cc[imax, np.arange(cc.shape[1])]
        for j in range(nPCs):
            # weighted average to get new cluster means
            wTEMP[:, j] = cp.dot(dd[:, imax == j], amax[imax == j].T)
        wTEMP = wTEMP / cp.sum(wTEMP**2, axis=0)**.5  # unit normalize

    # the PCs are just the left singular vectors of the waveforms
    U, Sv, V = svdecon(dd)

    # take as many as needed
    wPCA = U[:, :nPCs]

    # adjust the arbitrary sign of the first PC so its negativity is downward
    wPCA[:, 0] = -wPCA[:, 0] * cp.sign(wPCA[nt0min, 0])

    return wTEMP, wPCA
示例#20
0
def _tvl1(
    reference_image,
    moving_image,
    flow0,
    attachment,
    tightness,
    num_warp,
    num_iter,
    tol,
    prefilter,
):
    """TV-L1 solver for optical flow estimation.

    Parameters
    ----------
    reference_image : ndarray, shape (M, N[, P[, ...]])
        The first gray scale image of the sequence.
    moving_image : ndarray, shape (M, N[, P[, ...]])
        The second gray scale image of the sequence.
    flow0 : ndarray, shape (image0.ndim, M, N[, P[, ...]])
        Initialization for the vector field.
    attachment : float
        Attachment parameter. The smaller this parameter is,
        the smoother is the solutions.
    tightness : float
        Tightness parameter. It should have a small value in order to
        maintain attachement and regularization parts in
        correspondence.
    num_warp : int
        Number of times image1 is warped.
    num_iter : int
        Number of fixed point iteration.
    tol : float
        Tolerance used as stopping criterion based on the L² distance
        between two consecutive values of (u, v).
    prefilter : bool
        Whether to prefilter the estimated optical flow before each
        image warp.

    Returns
    -------
    flow : ndarray, shape ((image0.ndim, M, N[, P[, ...]])
        The estimated optical flow components for each axis.

    """

    dtype = reference_image.dtype
    grid = cp.stack(
        cp.meshgrid(
            *[cp.arange(n, dtype=dtype) for n in reference_image.shape],
            indexing="ij",
        ),
        axis=0,
    )

    dt = 0.5 / reference_image.ndim
    reg_num_iter = 2
    f0 = attachment * tightness
    f1 = dt / tightness
    tol *= reference_image.size

    flow_current = flow_previous = flow0

    g = cp.zeros((reference_image.ndim, ) + reference_image.shape, dtype=dtype)
    proj = cp.zeros(
        (reference_image.ndim, reference_image.ndim) + reference_image.shape,
        dtype=dtype,
    )

    s_g = [slice(None)] * g.ndim
    s_p = [slice(None)] * proj.ndim
    s_d = [slice(None)] * (proj.ndim - 2)

    for _ in range(num_warp):
        if prefilter:
            flow_current = ndi.median_filter(flow_current,
                                             [1] + reference_image.ndim * [3])

        image1_warp = warp(moving_image, grid + flow_current, mode="nearest")
        grad = cp.stack(cnp.gradient(image1_warp))
        NI = (grad * grad).sum(0)
        NI[NI == 0] = 1

        rho_0 = image1_warp - reference_image - (grad * flow_current).sum(0)

        for _ in range(num_iter):

            # Data term

            rho = rho_0 + (grad * flow_current).sum(0)

            idx = abs(rho) <= f0 * NI

            flow_auxiliary = flow_current

            flow_auxiliary[:, idx] -= rho[idx] * grad[:, idx] / NI[idx]

            idx = ~idx
            srho = f0 * cp.sign(rho[idx])
            flow_auxiliary[:, idx] -= srho * grad[:, idx]

            # Regularization term
            flow_current = flow_auxiliary.copy()

            for idx in range(reference_image.ndim):
                s_p[0] = idx
                for _ in range(reg_num_iter):
                    for ax in range(reference_image.ndim):
                        s_g[0] = ax
                        s_g[ax + 1] = slice(0, -1)
                        g[tuple(s_g)] = cp.diff(flow_current[idx], axis=ax)
                        s_g[ax + 1] = slice(None)

                    norm = cp.sqrt((g * g).sum(0, keepdims=True))
                    norm *= f1
                    norm += 1.0
                    proj[idx] -= dt * g
                    proj[idx] /= norm

                    # d will be the (negative) divergence of proj[idx]
                    d = -proj[idx].sum(0)
                    for ax in range(reference_image.ndim):
                        s_p[1] = ax
                        s_p[ax + 2] = slice(0, -1)
                        s_d[ax] = slice(1, None)
                        d[tuple(s_d)] += proj[tuple(s_p)]
                        s_p[ax + 2] = slice(None)
                        s_d[ax] = slice(None)

                    flow_current[idx] = flow_auxiliary[idx] + d

        flow_previous -= flow_current  # The difference as stopping criteria
        if (flow_previous * flow_previous).sum() < tol:
            break

        flow_previous = flow_current

    return flow_current
    def compute(self, ab, krv, cartesian=True, bohren=True):
        '''Returns the field scattered by the particle at each coordinate

        Parameters
        ----------
        ab : numpy.ndarray
            [2, norders] Mie scattering coefficients
        krv : numpy.ndarray
            Reduced vector displacements of particle from image coordinates
        cartesian : bool
            If set, return field projected onto Cartesian coordinates.
            Otherwise, return polar projection.
        bohren : bool
            If set, use sign convention from Bohren and Huffman.
            Otherwise, use opposite sign convention.
        Returns
        -------
        field : numpy.ndarray
            [3, npts] array of complex vector values of the
            scattered field at each coordinate.
        '''

        norders = ab.shape[0]  # number of partial waves in sum

        # GEOMETRY
        # 1. particle displacement [pixel]
        # Note: The sign convention used here is appropriate
        # for illumination propagating in the -z direction.
        # This means that a particle forming an image in the
        # focal plane (z = 0) is located at positive z.
        # Accounting for this by flipping the axial coordinate
        # is equivalent to using a mirrored (left-handed)
        # coordinate system.
        shape = krv.shape
        kx = krv[0, :]
        ky = krv[1, :]
        kz = -krv[2, :]

        # 2. geometric factors
        krho = cp.sqrt(kx**2 + ky**2)
        kr = cp.sqrt(krho**2 + kz**2)

        self.cosphi[...] = safe_division(kx, krho, 1.)
        self.sinphi[...] = safe_division(ky, krho, 0.)
        self.costheta[...] = safe_division(kz, kr, 1.)  # z convention
        self.sintheta[...] = safe_division(krho, kr, 0.)
        sinkr = cp.sin(kr)
        coskr = cp.cos(kr)

        # SPECIAL FUNCTIONS
        # starting points for recursive function evaluation ...
        # 1. Riccati-Bessel radial functions, page 478.
        # Particles above the focal plane create diverging waves
        # described by Eq. (4.13) for $h_n^{(1)}(kr)$. These have z > 0.
        # Those below the focal plane appear to be converging from the
        # perspective of the camera. They are descrinbed by Eq. (4.14)
        # for $h_n^{(2)}(kr)$, and have z < 0. We can select the
        # appropriate case by applying the correct sign of the imaginary
        # part of the starting functions...
        if bohren:
            factor = 1.j * cp.sign(kz)
        else:
            factor = -1.j * cp.sign(kz)

        xi_nm2 = coskr + factor * sinkr  # \xi_{-1}(kr)
        xi_nm1 = sinkr - factor * coskr  # \xi_0(kr)

        # 2. Angular functions (4.47), page 95
        pi_nm1 = 0.  # \pi_0(\cos\theta)
        pi_n = 1.  # \pi_1(\cos\theta)

        # 3. Vector spherical harmonics: [r,theta,phi]
        self.mo1n[0, :] = 0.j  # no radial component

        # storage for scattered field
        self.es.fill(0.j)

        # COMPUTE field by summing partial waves
        for n in range(1, norders):
            # upward recurrences ...
            # 4. Legendre factor (4.47)
            # Method described by Wiscombe (1980)

            swisc = pi_n * self.costheta
            twisc = swisc - pi_nm1
            tau_n = pi_nm1 - n * twisc  # -\tau_n(\cos\theta)

            # ... Riccati-Bessel function, page 478
            xi_n = (2. * n - 1.) * (xi_nm1 / kr) - xi_nm2  # \xi_n(kr)

            # ... Deirmendjian's derivative
            dn = (n * xi_n) / kr - xi_nm1

            # vector spherical harmonics (4.50)
            self.mo1n[1, :] = pi_n * xi_n  # ... divided by cosphi/kr
            self.mo1n[2, :] = tau_n * xi_n  # ... divided by sinphi/kr

            # ... divided by cosphi sintheta/kr^2
            self.ne1n[0, :] = n * (n + 1.) * pi_n * xi_n
            self.ne1n[1, :] = tau_n * dn  # ... divided by cosphi/kr
            self.ne1n[2, :] = pi_n * dn  # ... divided by sinphi/kr

            # prefactor, page 93
            en = 1.j**n * (2. * n + 1.) / n / (n + 1.)

            # the scattered field in spherical coordinates (4.45)
            self.es += (1.j * en * ab[n, 0]) * self.ne1n
            self.es -= (en * ab[n, 1]) * self.mo1n

            # upward recurrences ...
            # ... angular functions (4.47)
            # Method described by Wiscombe (1980)
            pi_nm1 = pi_n
            pi_n = swisc + ((n + 1.) / n) * twisc

            # ... Riccati-Bessel function
            xi_nm2 = xi_nm1
            xi_nm1 = xi_n
        # n: multipole sum

        # geometric factors were divided out of the vector
        # spherical harmonics for accuracy and efficiency ...
        # ... put them back at the end.
        radialfactor = 1. / kr
        self.es[0, :] *= self.cosphi * self.sintheta * radialfactor**2
        self.es[1, :] *= self.cosphi * radialfactor
        self.es[2, :] *= self.sinphi * radialfactor

        # By default, the scattered wave is returned in spherical
        # coordinates.  Project components onto Cartesian coordinates.
        # Assumes that the incident wave propagates along z and
        # is linearly polarized along x

        if cartesian:
            self.ec[0, :] = self.es[0, :] * self.sintheta * self.cosphi
            self.ec[0, :] += self.es[1, :] * self.costheta * self.cosphi
            self.ec[0, :] -= self.es[2, :] * self.sinphi

            self.ec[1, :] = self.es[0, :] * self.sintheta * self.sinphi
            self.ec[1, :] += self.es[1, :] * self.costheta * self.sinphi
            self.ec[1, :] += self.es[2, :] * self.cosphi
            self.ec[2, :] = (self.es[0, :] * self.costheta -
                             self.es[1, :] * self.sintheta)
            return self.ec
        else:
            return self.es
def soft_py(x, tau):
    threshed = np.maximum(np.abs(x) - tau, 0)
    threshed = threshed * np.sign(x)
    return threshed
示例#23
0
def _min_limit(x, val=eps):
    mask = cp.abs(x) < eps
    x[mask] = cp.sign(x[mask]) * eps
def T(x,d,dim=2):
    assert dim <= d
    assert dim >= 1
    assert dim == int(dim)
    return x + 2*cp.sign(x)*cp.array(dim*[1]+(d-dim)*[0])
示例#25
0
 def soft_thresh(self, x, tau):
     out = np.maximum(np.abs(x) - tau, 0)
     out = out * np.sign(x)
     return out
示例#26
0
def fixpix(data,mask,out=None,dtype=None,fix_NaN=False):
    '''
    fill the bad pixel with linear interpolation using surrounding pixels

    Parameters
    ----------
    data : array-like
        An array of image
        If a 3D array containing multiple images,
        the images must be stacked along the 1st dimension (axis=0).
    mask : array-like
        An array indicates bad pixel positions
        The shape must be same as image.
        The value of bad pixel is nonzero, and the others is 0.
        If all pixels are bad, raise ValueError.
    out : cupy.ndarray, default None
        Alternate output array in which to place the result. The default
        is None; if provided, it must have the same shape as the
        expected output, but the type will be cast if necessary.
    dtype : str or dtype, default None
        dtype of ndarray used internally
        If None, use eclair.common.default_dtype.
        If the input dtype is different, use a casted copy.
    fix_NaN : bool, default False
        If true, fix NaN pixel even if it's not specified
        as bad pixel in mask

    Returns
    -------
    fixed : ndarray
        An array of images fixed bad pixel

    Notes
    -----
    NaN is ignored in interpolation calculations,
    but is not fixed if fix_NaN is False.
    '''
    dtype = judge_dtype(dtype)

    data = cp.asarray(data,dtype=dtype)
    mask = cp.asarray(mask,dtype=dtype)

    dshape = data.shape
    imshape = data.shape[-2:]
    if imshape != mask.shape[-2:]:
        raise ValueError('shape differs between data and mask')
    if mask.all(axis=(-2,-1)).any():
        raise ValueError('No available pixel')

    if out is None:
        out = data.copy()
    else:
        cp.copyto(out,data)
    
    tout = cp.array(out,copy=False,ndmin=3)
    filt = cp.array(mask,ndmin=3)
    elementwise_not(filt,filt)

    ternary_operation(filt,tout,0,tout)

    dconv = cp.empty_like(tout)
    nconv = cp.empty_like(filt)

    for _ in range(max(imshape)):
        conv_kernel(tout,dconv)
        conv_kernel(filt,nconv)
        fix_core(filt,dconv,nconv,fix_NaN,tout)
        if nconv.all():
            break
        else:
            cp.sign(nconv,out=filt)
    
    return out
示例#27
0
 def sign(self):
     self.getNdArray()[:] = cp.sign(self.getNdArray())
     return self
        variation = proc.calc_variations('EQUALIZED_SUM')
        np.save(video_folder / 'variation', variation)
        variation = cp.asarray(variation)
        hz = 1 / proc.fps
        yfft_eq_sum = fft.fft(variation)
        xfft_eq_sum = fft.fftfreq(variation.size, hz)[:variation.size // 2]
        sum_energy_eq_sum = cp.cumsum(
            2.0 / variation.size * cp.abs(yfft_eq_sum[0:variation.size // 2]))
        eq_sum_limiar_pc_energy = sum_energy_eq_sum.max() * ENERGY_LIMIT
        frequency = float(
            xfft_eq_sum[sum_energy_eq_sum <= eq_sum_limiar_pc_energy].max())
        filter_coef = firwin(NUM_TAPS, frequency, fs=2 / hz)
        result = lfilter(filter_coef, 1, cp.asnumpy(variation))
        np.save(video_folder / 'filtered', result)
        result = cp.asarray(result)
        subs = cp.diff(result)
        subs2 = cp.diff(subs)
        critical_points = (cp.sign(subs[1:]) != cp.sign(
            subs[:-1])).astype(bool)
        maxes = critical_points & (subs2 < 0).astype(bool)
        mins = critical_points & (subs2 > 0).astype(bool)
        cp.save(video_folder / 'maxes', maxes)
        cp.save(video_folder / 'mins', mins)
        cp.save(video_folder / 'diff', subs)
        cp.save(video_folder / 'diff2', subs2)
        with (video_folder / 'metadata.json').open('w') as fp:
            dump({
                'hz': hz,
                'frequency': frequency,
            }, fp)
    def General_n_Balance_n_Collision_Eff(self,
                                      _new_path,
                                      length_only = True,
                                      GPU_accelerating = False,
                                      GPU_accelerating_data = None,
                                      matrix_data = None):
        ITC = {}
        max_ITC = 1
        min_ITC = sys.maxsize
        total_cost = 0
        max_order = 0
        total_order = 0

        standard_index = self.tools.GetWidth()**2 + self.tools.GetHeight()**2

        # Parallelization
        if GPU_accelerating and length_only:
            n_AGV, population_size = GPU_accelerating_data

            T_matrix, S_matrix = matrix_data

            T_matrix = cp.array(T_matrix)
            S_matrix = cp.array(np.array(S_matrix).astype(float))

            ITC_matrix = cp.reshape(cp.dot(T_matrix, cp.array([[1],[1]])), (population_size, n_AGV))
            O_matrix = cp.reshape(cp.dot(T_matrix, cp.array([[0],[1]])), (population_size, n_AGV))
            TC_matrix = cp.reshape(cp.dot(ITC_matrix, cp.ones((n_AGV, 1))), (population_size))
            TO_matrix = cp.reshape(cp.dot(O_matrix, cp.ones((n_AGV, 1))), (population_size))

            max_ITC_matrix = cp.amax(ITC_matrix, axis=1)
            min_ITC_matrix = cp.amin(ITC_matrix, axis=1)
            max_order_matrix = cp.amax(O_matrix, axis=1)
            
            _, n_order_points, _  = S_matrix.shape
            
            t_m = cp.reshape(cp.dot(S_matrix, cp.array([[[1],[0],[0],[0],[0]]]*n_order_points)),
                             (population_size, n_order_points, n_order_points))
            x_m = cp.reshape(cp.dot(S_matrix, cp.array([[[0],[1],[0],[0],[0]]]*n_order_points)),
                             (population_size, n_order_points, n_order_points))
            y_m = cp.reshape(cp.dot(S_matrix, cp.array([[[0],[0],[1],[0],[0]]]*n_order_points)),
                             (population_size, n_order_points, n_order_points))
            l_m = cp.reshape(cp.dot(S_matrix, cp.array([[[0],[0],[0],[1],[0]]]*n_order_points)),
                             (population_size, n_order_points, n_order_points))
            o_m = cp.reshape(cp.dot(S_matrix, cp.array([[[0],[0],[0],[0],[1]]]*n_order_points)),
                             (population_size, n_order_points, n_order_points))
            t_m_l = cp.reshape(cp.dot(S_matrix, cp.array([[[1],[0],[0],[0],[0]]])),
                               (population_size, n_order_points))

            t_m_diff = t_m - cp.transpose(t_m, (0, 2, 1))
            x_m_diff = x_m - cp.transpose(x_m, (0, 2, 1))
            y_m_diff = y_m - cp.transpose(y_m, (0, 2, 1))

            m_xy_diff = cp.absolute(x_m_diff) + cp.absolute(y_m_diff)

            m_diff = cp.absolute(t_m_diff) + m_xy_diff
            
            m_diff_l = m_diff - l_m * 2
            
            m_diff_l_sign = (cp.logical_xor(cp.sign(m_diff_l) + 1, True))

            m_diff_l_eff = cp.multiply(m_diff, m_diff_l_sign)

            m_diff_l_sign = cp.sign(m_diff_l_eff)

            m_diff_l_H = cp.multiply(cp.multiply(cp.reciprocal(m_diff_l_eff + m_diff_l_sign - 1), m_diff_l_sign),
                                     cp.log10(m_diff_l_eff + cp.absolute(m_diff_l_sign - 1)))
            
            d_m = cp.reciprocal(cp.sum(m_diff_l_H,
                                       (1,2)))

            # Occupancy test
            """
            t_m_o = t_m + o_m - 1
            m_diff_o = cp.absolute(t_m_o - cp.transpose(t_m_o, (0, 2, 1))) - o_m - 1
            m_occupancy = (cp.logical_xor(cp.sign(m_diff_o) + 1, True))
            
            m_idn = cp.identity(n_order_points)
            OT = cp.prod(cp.logical_or(m_xy_diff,
                                       cp.logical_not(m_occupancy - m_idn)),
                         (1,2))
            """
            
            G1 = max_order_matrix/max_ITC_matrix
            G2 = TO_matrix/TC_matrix
            BU = min_ITC_matrix/max_ITC_matrix
            CI = cp.multiply(d_m, BU) # d_m * 0.1
            
            E_matrix = G1 + G2 + BU + CI
            
            cp.cuda.Stream.null.synchronize()

            return (list(E_matrix), (list(max_ITC_matrix), list(TC_matrix), list(BU), list(CI)))

        # Non-Paralleization
        else:
            print("[Scheduling] Must be use GPU to calculate")
            
            for each_AGV_ID in _new_path.keys():
                each_AGV_len_schedule = 0
                each_AGV_num_orders = 0

                if length_only:
                    each_AGV_len_schedule, each_num_order, each_order_list = _new_path[each_AGV_ID]
                    each_AGV_num_orders = each_num_order
                else:
                    each_path = _new_path[each_AGV_ID]
                    for each_pos_path in each_path:
                        if len(each_pos_path) == 3:
                            each_AGV_num_orders += 1
                        each_AGV_len_schedule += 1
                    
                cost = each_AGV_len_schedule + each_AGV_num_orders
                ITC[each_AGV_ID] = cost
                
                if each_AGV_num_orders > max_order:
                    max_order = each_AGV_num_orders
                total_order += each_AGV_num_orders
        
            for _, each_value in ITC.items():
                
                if each_value > max_ITC:
                    max_ITC = each_value
                if each_value < min_ITC:
                    min_ITC = each_value
                total_cost += each_value

            TT = max_ITC
            TTC = total_cost
            BU = min_ITC / max_ITC
            CI = 0
            
            G1 = max_order/TT
            G2 = total_order/TTC
            
            value = G1 + G2 + BU + CI
            return (value, (TT, TTC, BU, CI))
def compute_gradients_gpu(u, v, lr, batch_size, num_negs, dimensions, indices,
                          negatives):
    """
    This function computes and applies the gradient updates for the batch os samples, applying them inside
    the function then returning the final values that the embedding should be set too.

    :param u: the embedding values for the words we are computing gradients for (batch_size, dimensions)
    :param v: the embedding values for the positive sample + negatives samples (1 + num_negs, dimensions)
    :param lr: learning rate
    :param batch_size: size of the current batch, may vary from regular batch_size because of last remaining samples
    that can't fit into an entire batch_size in an epoch
    :param num_negs: number of negative samples for each positive sample
    :param dimensions: dimensions of the embedding
    :param indices: the indices of all the u values (batch_size)
    :param negatives: the indices of all the v values (batch_size, 1 + num_negs)
    :return: Returns the values that the embedding of the samples and negatives should be set too as well as the loss

    Once again, calculation was adapted from R. Řehůřek and P. Sojka
    RaRe-Technologies: Software Framework for Topic Modelling with Large Corpora
    https://github.com/RaRe-Technologies/gensim
    """
    u = cp.asarray(u)
    v = cp.asarray(v)
    u_orig = u
    v_orig = v
    u = u.T[cp.newaxis, :, :]
    v = cp.swapaxes(cp.swapaxes(v, 1, 2), 0, 2)
    norm_u = cp.linalg.norm(u, axis=1)
    norm_v = cp.linalg.norm(v, axis=1)
    distances = cp.linalg.norm(u - v, axis=1)
    alpha = 1 - norm_u**2
    beta = 1 - norm_v**2
    gamma = 1 + 2 * ((distances**2) / (alpha * beta))
    poincare_distances = cp.arccosh(gamma)
    exp_negative_distances = cp.exp(-poincare_distances)
    sum = cp.sum(exp_negative_distances, axis=0)

    distances_squared = distances**2
    c = (4 / (alpha * beta * cp.sqrt(gamma**2 - 1)))[:, cp.newaxis, :]
    u_coeff = ((distances_squared + alpha) / alpha)[:, cp.newaxis, :]
    distance_gradient_u = (u_coeff * u - v)
    distance_gradient_u *= c

    v_coeffs = ((distances_squared + beta) / beta)[:, cp.newaxis, :]
    distance_gradients_v = (v_coeffs * v - u)
    distance_gradients_v *= c

    gradients_v = -exp_negative_distances[:,
                                          cp.newaxis, :] * distance_gradients_v
    gradients_v /= sum
    gradients_v[0] += distance_gradients_v[0]
    gradients_v[0] += lr * 2 * v[0]

    gradients_u = -exp_negative_distances[:,
                                          cp.newaxis, :] * distance_gradient_u
    gradients_u /= sum
    gradient_u = cp.sum(gradients_u, axis=0)
    gradient_u += distance_gradient_u[0]

    u_update = (lr * (alpha**2) / 4 * gradient_u).T
    handle_duplicates(u_update, indices)
    v_updates = cp.swapaxes(
        cp.swapaxes((lr * (beta**2)[:, cp.newaxis, :] / 4 * gradients_v), 0,
                    2), 1, 2)
    v_updates = cp.reshape(v_updates,
                           (batch_size * (num_negs + 1), dimensions))
    handle_duplicates(v_updates, np.ravel(negatives))

    u_orig -= u_update
    v_orig = cp.reshape(v_orig, (batch_size * (num_negs + 1), dimensions))
    v_orig -= v_updates
    u_norms = cp.linalg.norm(u_orig, axis=1)
    v_norms = cp.linalg.norm(v_orig, axis=1)

    u_orig = (u_norms >= 1 - epsilon)[:, cp.newaxis] * (
        u_orig / u_norms[:, cp.newaxis] - cp.sign(u_orig) * 0.00001) + (
            u_norms < 1 - epsilon)[:, cp.newaxis] * u_orig

    v_orig = (v_norms >= 1 - epsilon)[:, cp.newaxis] * (
        v_orig / v_norms[:, cp.newaxis] - cp.sign(v_orig) * 0.00001) + (
            v_norms < 1 - epsilon)[:, cp.newaxis] * v_orig

    loss = cp.sum(-cp.log(exp_negative_distances[0] / sum), axis=0)
    return u_orig, v_orig, loss