Ejemplo n.º 1
0
    def convolve(self, first, second):
        shape = first.shape[0] + second.shape[0] - 1
        best_shape = int(2**cp.ceil(cp.log2(shape)))

        first_f = cp.fft.rfft(first, best_shape)
        second_f = cp.fft.rfft(second, best_shape)
        return cp.fft.irfft(first_f * second_f, best_shape)[:shape]
Ejemplo n.º 2
0
 def cupy_place(arr, mask, vals):
     n = mask.sum()
     vals = vals.flatten()
     if len(vals) < n:
         reps = cupy.ceil(n / len(vals))
         vals = cupy.repeat(vals, int(reps), axis=0)
         arr[mask] = vals[:n]
Ejemplo n.º 3
0
 def sample(self, epochs=1, burnin=1, batch_size=1, rng=None, **args):
     if rng == None:
         rng = cp.random.RandomState()
     X = args['X_train']
     y = args['y_train']
     if 'verbose' in args:
         verbose = args['verbose']
     else:
         verbose = None
     epochs = int(epochs)
     num_batches = cp.ceil(y[:].shape[0] / float(batch_size))
     q, p = self.start, {
         var: cp.zeros_like(self.start[var])
         for var in self.start.keys()
     }
     print('start burnin')
     for i in tqdm(range(int(burnin))):
         j = 0
         for X_batch, y_batch in self.iterate_minibatches(X, y, batch_size):
             kwargs = {
                 'X_train': X_batch,
                 'y_train': y_batch,
                 'verbose': verbose
             }
             q, p = self.step(q, p, rng, **kwargs)
             if verbose and (j % 100) == 0:
                 ll = cp.asnumpy(
                     self.model.negative_log_posterior(q, **kwargs))
                 print('burnin {0}, loss: {1:.4f}, mini-batch update : {2}'.
                       format(i, ll, j))
             j = j + 1
     logp_samples = np.zeros(epochs)
     posterior = {var: [] for var in self.start.keys()}
     print('start sampling')
     initial_step_size = self.step_size
     for i in tqdm(range(epochs)):
         j = 0
         for X_batch, y_batch in self.iterate_minibatches(X, y, batch_size):
             kwargs = {
                 'X_train': X_batch,
                 'y_train': y_batch,
                 'verbose': verbose
             }
             q, p = self.step(q, p, rng, **kwargs)
             self.step_size = self.lr_schedule(initial_step_size, j,
                                               num_batches)
             if verbose and (j % 100) == 0:
                 ll = cp.asnumpy(
                     self.model.negative_log_posterior(q, **kwargs))
                 print('epoch {0}, loss: {1:.4f}, mini-batch update : {2}'.
                       format(i, ll, j))
             j = j + 1
         #initial_step_size=self.step_size
         ll = cp.asnumpy(self.model.negative_log_posterior(q, **kwargs))
         logp_samples[i] = ll
         for var in self.start.keys():
             posterior[var].append(q[var])
     for var in self.start.keys():
         posterior[var] = cp.asarray(posterior[var])
     return posterior, logp_samples
Ejemplo n.º 4
0
 def step(self, state, momentum, rng, **args):
     q = state.copy()
     p = self.draw_momentum(rng)
     q_new = deepcopy(q)
     p_new = deepcopy(p)
     positions, momentums = [cp.asnumpy(q)], [cp.asnumpy(p)]
     epsilon = self.step_size
     path_length = cp.ceil(2 * cp.random.rand() * self.path_length /
                           epsilon)
     grad_q = self.model.grad(q, **args)
     # leapfrog step
     for _ in cp.arange(path_length - 1):
         for var in self.start.keys():
             p_new[var] -= (0.5 * epsilon) * grad_q[var]
             q_new[var] += epsilon * p_new[var]
             grad_q = self.model.grad(q_new, **args)
             p_new[var] -= epsilon * grad_q[var]
         #positions.append(deepcopy(q_new))
         #momentums.append(deepcopy(p_new))
     # negate momentum
     for var in self.start.keys():
         p_new[var] = -p_new[var]
     acceptprob = self.accept(q, q_new, p, p_new, **args)
     if cp.isfinite(acceptprob) and (cp.random.rand() < acceptprob):
         q = q_new.copy()
         p = p_new.copy()
     return q, p, positions, momentums, acceptprob
Ejemplo n.º 5
0
def quantile_bin_array(data, bins=6):
    """Returns symbolified array with equal-quantile binning.

    Parameters
    ----------
    data : array
        Data array of shape (time, variables).

    bins : int, optional (default: 6)
        Number of bins.

    Returns
    -------
    symb_array : array
        Converted data of integer type.
    """
    T, N = data.shape

    # get the bin quantile steps
    bin_edge = int(np.ceil(T / float(bins)))

    symb_array = np.zeros((T, N), dtype='int32')

    # get the lower edges of the bins for every time series
    edges = np.sort(data, axis=0)[::bin_edge, :].T
    bins = edges.shape[1]

    # This gives the symbolic time series
    symb_array = (data.reshape(T, N, 1) >= edges.reshape(1, N, bins)).sum(
        axis=2) - 1

    return symb_array.astype('int32')
Ejemplo n.º 6
0
def exactFilter(tilt_angles, tiltAngle, sX, sY, sliceWidth, arr=[]):
    """
    exactFilter: Generates the exact weighting function required for weighted backprojection - y-axis is tilt axis
    Reference : Optik, Exact filters for general geometry three dimensional reconstuction, vol.73,146,1986.
    @param tilt_angles: list of all the tilt angles in one tilt series
    @param titlAngle: tilt angle for which the exact weighting function is calculated
    @param sizeX: size of weighted image in X
    @param sizeY: size of weighted image in Y

    @return: filter volume

    """

    from cupy import array, matrix, sin, pi, arange, float32, column_stack, argmin, clip, ones, ceil

    # Using Friedel Symmetry in Fourier space.
    # sY = sY // 2 + 1

    # Calculate the relative angles in radians.
    diffAngles = (array(tilt_angles) - tiltAngle) * pi / 180.

    # Closest angle to tiltAngle (but not tiltAngle) sets the maximal frequency of overlap (Crowther's frequency).
    # Weights only need to be calculated up to this frequency.
    sampling = min(abs(diffAngles)[abs(diffAngles) > 0.001])
    crowtherFreq = min(sX // 2, int(ceil(1 / sin(sampling))))
    arrCrowther = matrix(abs(arange(-crowtherFreq, min(sX // 2, crowtherFreq + 1))))

    # Calculate weights
    wfuncCrowther = 1. / (clip(1 - array(matrix(abs(sin(diffAngles))).T * arrCrowther) ** 2, 0, 2)).sum(axis=0)

    # Create full with weightFunc
    wfunc = ones((sX, sY, 1), dtype=float32)
    wfunc[sX // 2 - crowtherFreq:sX // 2 + min(sX // 2, crowtherFreq + 1), :, 0] = column_stack(
        ([(wfuncCrowther), ] * (sY))).astype(float32)
    return wfunc
Ejemplo n.º 7
0
def ceil(inp) -> 'Tensor':
    _check_tensors(inp)
    engine = _get_engine(inp)

    return _create_tensor(
        inp,
        data=engine.ceil(inp.data),
        func=wrapped_partial(ceil_backward, inp=inp)
    )
Ejemplo n.º 8
0
def ceil(x: Array, /) -> Array:
    """
    Array API compatible wrapper for :py:func:`np.ceil <numpy.ceil>`.

    See its docstring for more information.
    """
    if x.dtype not in _numeric_dtypes:
        raise TypeError("Only numeric dtypes are allowed in ceil")
    if x.dtype in _integer_dtypes:
        # Note: The return dtype of ceil is the same as the input
        return x
    return Array._new(np.ceil(x._array))
Ejemplo n.º 9
0
    def _pyczt_cupy(self, x, k=None, w=None, a=None):
        olddim = x.ndim

        if olddim == 1:
            x = x[:, cp.newaxis]

        (m, n) = x.shape
        oldm = m

        if m == 1:
            x = x.transpose()
            (m, n) = x.shape

        if k is None:
            k = len(x)
        if w is None:
            w = cp.exp(-1j * 2 * pi / k)
        if a is None:
            a = 1.

        # %------- Length for power-of-two fft.

        nfft = int(2**cp.ceil(cp.log2(abs(m + k - 1))))

        # %------- Premultiply data.

        kk = cp.arange(-m + 1, max(k, m))[:, cp.newaxis]
        kk2 = (kk**2) / 2
        ww = w**kk2  # <----- Chirp filter is 1./ww
        nn = cp.arange(0, m)[:, cp.newaxis]
        aa = a**(-nn)
        aa = aa * ww[m + nn - 1, 0]
        y = (x * aa).astype(np.complex64)

        # %------- Fast convolution via FFT.

        fy = cp.fft.fft(y, nfft, axis=0)
        fv = cp.fft.fft(1 / ww[0:k - 1 + m], nfft,
                        axis=0)  # <----- Chirp filter.
        fy = fy * fv
        g = cp.fft.ifft(fy, axis=0)

        # %------- Final multiply.

        g = g[m - 1:m + k - 1, :] * ww[m - 1:m + k - 1]

        if oldm == 1:
            g = g.transpose()

        if olddim == 1:
            g = g.squeeze()

        return g
Ejemplo n.º 10
0
def main(tEnd):
    """ N-body simulation """

    # Simulation parameters
    N = 3  # Number of particles
    t = 0  # current time of the simulation
    tEnd = tEnd  # time at which simulation ends
    dt = 0.0005  # timestep
    softening = 0.1  # softening length
    G = 1.0  # Newton's Gravitational Constant

    # Generate Initial Conditions
    cp.random.seed(17)  # set the random number generator seed

    mass = 20.0 * cp.ones((N, 1)) / N  # total mass of particles is 20
    pos = cp.random.randn(N, 3)  # randomly selected positions and velocities
    vel = cp.random.randn(N, 3)

    # Convert to Center-of-Mass frame
    vel -= cp.mean(mass * vel, 0) / cp.mean(mass)

    # calculate initial gravitational accelerations
    acc = get_acc(pos, mass, G, softening)

    # number of timesteps
    Nt = int(cp.ceil(tEnd / dt))

    # Simulation Main Loop
    for i in range(Nt):
        # (1/2) kick
        vel += acc * dt / 2.0

        # drift
        pos += vel * dt

        # update accelerations
        acc = get_acc(pos, mass, G, softening)

        # (1/2) kick
        vel += acc * dt / 2.0

        # update time
        t += dt

    dt = tEnd - Nt
    vel += acc * dt / 2.0
    pos += vel * dt
    acc = get_acc(pos, mass, G, softening)
    vel += acc * dt / 2.0

    cp.cuda.Stream.null.synchronize()

    return 0
Ejemplo n.º 11
0
def deskew(image, angle, dz, pixel_size):
    deskewed = deskewGPU(image, angle, dz, pixel_size)

    image_cp = cp.array(image)
    deskewed_cp = cp.array(deskewed)

    pages, col, row = image_cp.shape
    noise_size = cp.ceil(cp.max(cp.array([row, col])) * 0.1)
    image_noise_patch = image_cp[0:noise_size,
                                 col - (noise_size + 1):col - 1, :]
    image_noise_patch = image_noise_patch.flatten()

    fill_length = deskewed_cp.size - cp.count_nonzero(deskewed_cp)
    repeat_frequency = cp.ceil(fill_length / image_noise_patch.size)
    repeat_frequency = cp.asnumpy(repeat_frequency).flatten().astype(
        dtype=np.uint16)[0]
    noise = cp.tile(image_noise_patch, repeat_frequency + 1)
    noise = noise[0:fill_length]
    deskewed_cp[deskewed_cp == 0] = noise

    return cp.asnumpy(deskewed_cp)
Ejemplo n.º 12
0
    def fit(self, X, w_init=None):
        """Fit the model to the data X, with parameters initialized at w_init
        Parameters
        ----------
        X : {numpy array, integer matrix} shape (n_samples, n_features)
            Training data.
        w_init : {numpy array, float or complex} shape (m_parameters,) (optional)
            Initial value of the parameters
        Returns
        -------
        self : TN
            The fitted model.
        """

        #       Some initial checks of the data, initialize random number generator
        X = check_array(X, dtype=np.int64)
        rng = check_random_state(self.random_state)

        #       Initialize parameters of MPS
        self.n_samples = X.shape[0]
        self.n_features = X.shape[1]
        self.d = np.max(X) + 1
        self.m_parameters = self.n_features * self.d * self.D * self.D
        if w_init is None:
            self._weightinitialization(rng)
        else:
            self.w = w_init
        self.norm = self._computenorm()
        self.history = []

        n_batches = int(np.ceil(float(self.n_samples) / self.batch_size))
        begin = time.time()
        for iteration in xrange(1, self.n_iter + 1):
            batch_slices = list(
                self._gen_even_slices(self.batch_size, n_batches,
                                      self.n_samples, rng))
            for batch_slice in batch_slices:
                self._fit(X[batch_slice])

            end = time.time()

            if self.verbose:
                train_likelihood = self.likelihood(X)
                print("Iteration %d, likelihood = %.3f,"
                      " time = %.2fs" %
                      (iteration, train_likelihood, end - begin))
                self.history.append(train_likelihood)
            begin = end

        return self
Ejemplo n.º 13
0
def inter8_mat_flow5K(LF=None,
                      P_r=None,
                      P_1=None,
                      P_2=None,
                      U_r=None,
                      U_1=None,
                      U_2=None,
                      H_r=None,
                      H_1=None,
                      H_2=None):

    print("inter8_mat_flow5K")

    height = Params.HEIGHT
    width = Params.WIDTH

    P_1[P_r == 1] = 0
    P_2[P_r == 0] = 0  #print P_2

    U_1[U_r == 1] = 0
    U_2[U_r == 0] = 0  #print U_2

    H_1[H_r == 1] = 0
    H_2[H_r == 0] = 0  #print H_2

    #데이터 타입 에러 수정위함
    P_1 = cp.array(P_1, dtype=cp.int64)
    P_2 = cp.array(P_2, dtype=cp.int64)
    U_1 = cp.array(U_1, dtype=cp.int64)
    U_2 = cp.array(U_2, dtype=cp.int64)
    H_1 = cp.array(H_1, dtype=cp.int64)
    H_2 = cp.array(H_2, dtype=cp.int64)


    FLOW_MAT = ((1.0 - P_r) * \
               ((1.0 - U_r) * ((1.0 - H_r) * LF[(P_1) * (height * width) + U_1 * height + H_1 + 1 -1] + \
                                       H_r  * LF[(P_1) * (height * width) + U_1 * height + H_2 + 1 -1]) + \
                     ((U_r) * ((1.0 - H_r) * LF[(P_1) * (height * width) + U_2 * height + H_1 + 1 -1] + \
                                       H_r  * LF[(P_1) * (height * width) + U_2 * height + H_2 + 1 -1])))) + \
                     ((P_r) * \
               ((1.0 - U_r) * ((1.0 - H_r) * LF[(P_2) * (height * width) + U_1 * height + H_1 + 1 -1] + \
                                       H_r  * LF[(P_2) * (height * width) + U_1 * height + H_2 + 1 -1]) + \
                     ((U_r) * ((1.0 - H_r) * LF[(P_2) * (height * width) + U_2 * height + H_1 + 1 -1] + \
                                       H_r  * LF[(P_2) * (height * width) + U_2 * height + H_2 + 1 -1]))))

    FLOW_MAT = cp.ceil((FLOW_MAT * 10000))
    FLOW_MAT = FLOW_MAT / 10000

    return FLOW_MAT
Ejemplo n.º 14
0
    def step(self, model, step_num):
        alpha = self.get_alpha(model, step_num)
        # multiplication
        det_num = model.detector_signal.shape[0]
        for i in range(self.n_slices):
            # get slice
            # ############## may be speeded up if all slices are calc. once out of for cycle
            i1 = int(i * cp.ceil(det_num / self.n_slices))
            i2 = int(min((i + 1) * cp.ceil(det_num / self.n_slices), det_num))
            w_slice = model.detector_geometry[i1:i2]
            wi_slice = self.wi[i1:i2]
            y_slice = model.detector_signal[i1:i2]
            # calculating  correction
            p = signal.get_signal_gpu(model.solution, w_slice)
            dp = y_slice - p
            a = cp.divide(dp, wi_slice)
            a = cp.where(cp.isnan(a), 0, a)

            if self.iter_type == 1:  # SMART
                y_slice = cp.where(y_slice < 1E-20, 0, y_slice)
                a = cp.divide(a, np.abs(y_slice))
                a = cp.where(cp.isnan(a), 0, a)
            correction = alpha / (i2 - i1) * cp.sum(cp.multiply(cp.moveaxis(w_slice, 0, -1), a), axis=-1)
            model._solution = model.solution + correction
Ejemplo n.º 15
0
    def getProj(self, obs, center_pixel, rz, z, ry, rx):
        patch = self.getPatch(obs, center_pixel, torch.zeros_like(rz))
        patch = np.round(patch.cpu().numpy(), 5)
        patch = cp.array(patch)
        projections = []
        size = self.patch_size
        zs = cp.array(z.numpy()) + cp.array(
            [(-size / 2 + j) * self.heightmap_resolution for j in range(size)])
        zs = zs.reshape((zs.shape[0], 1, 1, zs.shape[1]))
        zs = zs.repeat(size, 1).repeat(size, 2)
        c = patch.reshape(patch.shape[0], self.patch_size, self.patch_size,
                          1).repeat(size, 3)
        ori_occupancy = c > zs
        # transform into points
        point_w_d = cp.argwhere(ori_occupancy)

        rz_id = (rz.expand(-1, self.num_rz) - self.rzs).abs().argmin(1)
        ry_id = (ry.expand(-1, self.num_ry) - self.rys).abs().argmin(1)
        rx_id = (rx.expand(-1, self.num_rx) - self.rxs).abs().argmin(1)

        dimension = point_w_d[:, 0]
        point = point_w_d[:, 1:4]

        rz_id = cp.array(rz_id)
        ry_id = cp.array(ry_id)
        rx_id = cp.array(rx_id)
        mapped_point = self.map[rz_id[dimension], ry_id[dimension],
                                rx_id[dimension], point[:, 0], point[:, 1],
                                point[:, 2]].T
        rotated_point = mapped_point.T[(cp.logical_and(
            0 < mapped_point.T, mapped_point.T < size)).all(1)]
        d = dimension[(cp.logical_and(
            0 < mapped_point.T, mapped_point.T < size)).all(1)].T.astype(int)

        for i in range(patch.shape[0]):
            point = rotated_point[d == i].T
            occupancy = cp.zeros((size, size, size))
            if point.shape[0] > 0:
                occupancy[point[0], point[1], point[2]] = 1

            occupancy = median_filter(occupancy, size=2)
            occupancy = cp.ceil(occupancy)

            projection = cp.stack(
                (occupancy.sum(0), occupancy.sum(1), occupancy.sum(2)))
            projections.append(projection)

        return torch.tensor(cp.stack(projections)).float().to(self.device)
Ejemplo n.º 16
0
def bars(x, y, z, t):
    w = 1
    h = 1
    R = 3
    f = .125
    r = (x % R < w) & (y % 4 > R - h) & (z / 2 + 1 < cp.sin(
        f * cp.ceil(x / R) * cp.ceil(y / R) * t / 100))
    g = (x % R < w) & (y % 4 > R - h) & (z / 2 + 1 < cp.sin(
        f * cp.ceil(x / R) * cp.ceil(y / R) * t / 100))
    b = (x % R < w) & (y % 4 > R - h) & (z / 2 + 1 < cp.sin(
        f * cp.ceil(x / R) * cp.ceil(y / R) * t / 100))
    return r * 20, g * 3 - z / 10, b * 20
def tournament_selection(population, optim):
    # initialize random sequence
    SEQUENCE = cupy.random.uniform(0, 1, population.shape[0])
    # get pearsons
    pearsons = cupy.zeros((population.shape[0]), dtype=cupy.int64)
    pearsons = cupy.fromiter((evaluate_chromosome(population[i], optim)
                              for i in range(population.shape[0])),
                             pearsons.dtype)
    # get parents
    parents = cupy.zeros(population.shape[0], dtype=cupy.int64)
    for i in range(population.shape[0]):
        k = cupy.ceil(SEQUENCE[i] * 10).astype(cupy.int64)
        chromosome_pointers = cupy.random.choice(
            cupy.arange(population.shape[0]), k)
        evaluation = pearsons[chromosome_pointers].max()
        if len(cupy.where(pearsons == evaluation)[0]) > 1:
            parents[i] = cupy.where(pearsons == evaluation)[0][0]
        else:
            parents[i] = cupy.where(pearsons == evaluation)[0]
    return index_to_chromosome_decode(parents, population)
Ejemplo n.º 18
0
def create_gl(N0, Nproj, Nslices, cor, interp_type):
    Nspan = 3
    beta = cp.pi / Nspan
    # size after zero padding in radial direction
    N = int(cp.ceil((N0 + abs(N0 / 2 - cor) * 2.0) / 16.0) * 16)

    # size after zero padding in the angle direction (for nondense sampling rate)
    osangles = int(max(round(3.0 * N / 2.0 / Nproj), 1))
    Nproj = osangles * Nproj
    # polar space

    proj = cp.arange(0, Nproj) * cp.pi / Nproj - beta / 2
    s = cp.linspace(-1, 1, N)
    # log-polar parameters
    (Nrho, Ntheta, dtheta, drho, aR, am,
     g) = getparameters(beta, proj[1] - proj[0], 2.0 / (N - 1), N, Nproj)
    # log-polar space
    thsp = (cp.arange(-Ntheta / 2, Ntheta / 2) *
            cp.float32(dtheta)).astype('float32')
    rhosp = (cp.arange(-Nrho, 0) * drho).astype('float32')
    erho = cp.tile(cp.exp(rhosp)[..., cp.newaxis], [1, Ntheta])
    # compensation for cubic interpolation
    B3th = splineB3(thsp, 1)
    B3th = cp.fft.fft(cp.fft.ifftshift(B3th))
    B3rho = splineB3(rhosp, 1)
    B3rho = (cp.fft.fft(cp.fft.ifftshift(B3rho)))
    B3com = cp.outer(B3rho, B3th)
    # struct with global parameters
    P = Pgl(Nspan, N, N0, Nproj, Nslices, Ntheta, Nrho, proj, s, thsp, rhosp,
            aR, beta, B3com, am, g, cor, osangles, interp_type)
    # represent as array
    parsi = cp.array([
        P.N, P.N0, P.Ntheta, P.Nrho, P.Nspan, P.Nproj, P.Nslices, P.cor,
        P.osangles, P.interp_type == 'cubic'
    ],
                     dtype='float32')
    params = cp.concatenate((parsi, erho.flatten())).get()
    return (P, params)
Ejemplo n.º 19
0
def bilateral(img_in, sigma_s, sigma_v, eps=1e-8):
    # gaussian
    gsi = lambda r2, sigma: cup.exp(-0.5 * r2 / sigma**2)
    win_width = int(cup.ceil(3 * sigma_s))
    wgt_sum = cup.ones(img_in.shape) * eps
    result = img_in * eps
    off = np.empty_like(img_in, dtype=np.float32)

    assert off.dtype == img_in.dtype
    assert off.shape == img_in.shape

    for shft_x in range(-win_width, win_width + 1):
        for shft_y in range(-win_width, win_width + 1):
            aroll0(off, img_in, shft_y)
            aroll1(off, off, shft_x)

            w = gsi(shft_x**2 + shft_y**2, sigma_s)
            tw = w * gsi((off - img_in)**2, sigma_v)
            result += off * tw
            wgt_sum += tw

    # normalize the result and return
    return result / wgt_sum
Ejemplo n.º 20
0
 def step(self, state, momentum, rng, **args):
     q = state.copy()
     p = self.draw_momentum(rng)
     q_new = deepcopy(q)
     p_new = deepcopy(p)
     epsilon = self.step_size
     path_length = cp.ceil(2 * cp.random.rand() * self.path_length /
                           epsilon)
     grad_q = self.model.grad(q, **args)
     # SG-HMC leapfrog step
     for _ in cp.arange(path_length - 1):
         for var in self.start.keys():
             dim = (cp.array(q_new[var])).size
             rvar = rng.normal(0, 2 * epsilon, dim).reshape(q[var].shape)
             q_new[var] += epsilon * p_new[var]
             grad_q = self.model.grad(q_new, **args)
             p_new[var] = (
                 1 - epsilon) * p_new[var] + epsilon * grad_q[var] + rvar
     acceptprob = self.accept(q, q_new, p, p_new, **args)
     if cp.isfinite(acceptprob) and (cp.random.rand() < acceptprob):
         q = q_new.copy()
         p = p_new.copy()
     return q, p, acceptprob
Ejemplo n.º 21
0
    def __init__(self, params):
        self.params = params

        if self.params.hiddenRatio is not None:
            self.params.n_hidden = int(
                cupy.ceil(self.params.n_visible * self.params.hiddenRatio))

        # for 0-1 normlaization
        self.norm_max = cupy.ones((self.params.n_visible, )) * -cupy.Inf
        self.norm_min = cupy.ones((self.params.n_visible, )) * cupy.Inf
        self.n = 0

        self.rng = cupy.random.RandomState(1234)

        a = 1. / self.params.n_visible
        self.W = cupy.array(
            self.rng.uniform(  # initialize W uniformly
                low=-a,
                high=a,
                size=(self.params.n_visible, self.params.n_hidden)))

        self.hbias = cupy.zeros(self.params.n_hidden)  # initialize h bias 0
        self.vbias = cupy.zeros(self.params.n_visible)  # initialize v bias 0
        self.W_prime = self.W.T
Ejemplo n.º 22
0
def splineB3(x2, r):
    sizex = len(x2)
    x2 = x2 - (x2[-1] + x2[0]) / 2
    stepx = x2[1] - x2[0]
    ri = int(cp.ceil(2 * r))
    r = r * stepx
    x2c = x2[int(cp.ceil((sizex + 1) / 2.0)) - 1]
    x = x2[int(cp.ceil((sizex + 1) / 2.0) - ri -
               1):int(cp.ceil((sizex + 1) / 2.0) + ri)]
    d = cp.abs(x - x2c) / r
    B3 = x * 0
    for ix in range(-ri, ri + 1):
        id = ix + ri
        if d[id] < 1:  # use the first polynomial
            B3[id] = (3 * d[id]**3 - 6 * d[id]**2 + 4) / 6
        else:
            if (d[id] < 2):
                B3[id] = (-d[id]**3 + 6 * d[id]**2 - 12 * d[id] + 8) / 6
    B3f = x2 * 0
    B3f[int(cp.ceil((sizex + 1) / 2.0) - ri -
            1):int(cp.ceil((sizex + 1) / 2.0) + ri)] = B3
    return B3f
Ejemplo n.º 23
0
def _quantile_unchecked(a, q, axis=None, out=None, interpolation='linear',
                        keepdims=False):
    if q.ndim == 0:
        q = q[None]
        zerod = True
    else:
        zerod = False
    if q.ndim > 1:
        raise ValueError('Expected q to have a dimension of 1.\n'
                         'Actual: {0} != 1'.format(q.ndim))
    if keepdims:
        if axis is None:
            keepdim = (1,) * a.ndim
        else:
            keepdim = list(a.shape)
            for ax in axis:
                keepdim[ax % a.ndim] = 1
            keepdim = tuple(keepdim)

    # Copy a since we need it sorted but without modifying the original array
    if isinstance(axis, int):
        axis = axis,
    if axis is None:
        ap = a.flatten()
        nkeep = 0
    else:
        # Reduce axes from a and put them last
        axis = tuple(ax % a.ndim for ax in axis)
        keep = set(range(a.ndim)) - set(axis)
        nkeep = len(keep)
        for i, s in enumerate(sorted(keep)):
            a = a.swapaxes(i, s)
        ap = a.reshape(a.shape[:nkeep] + (-1,)).copy()

    axis = -1
    ap.sort(axis=axis)
    Nx = ap.shape[axis]
    indices = q * (Nx - 1.)

    if interpolation == 'lower':
        indices = cupy.floor(indices).astype(cupy.int32)
    elif interpolation == 'higher':
        indices = cupy.ceil(indices).astype(cupy.int32)
    elif interpolation == 'midpoint':
        indices = 0.5 * (cupy.floor(indices) + cupy.ceil(indices))
    elif interpolation == 'nearest':
        # TODO(hvy): Implement nearest using around
        raise ValueError('\'nearest\' interpolation is not yet supported. '
                         'Please use any other interpolation method.')
    elif interpolation == 'linear':
        pass
    else:
        raise ValueError('Unexpected interpolation method.\n'
                         'Actual: \'{0}\' not in (\'linear\', \'lower\', '
                         '\'higher\', \'midpoint\')'.format(interpolation))

    if indices.dtype == cupy.int32:
        ret = cupy.rollaxis(ap, axis)
        ret = ret.take(indices, axis=0, out=out)
    else:
        if out is None:
            ret = cupy.empty(ap.shape[:-1] + q.shape, dtype=cupy.float64)
        else:
            ret = cupy.rollaxis(out, 0, out.ndim)

        cupy.ElementwiseKernel(
            'S idx, raw T a, raw int32 offset, raw int32 size', 'U ret',
            '''
            ptrdiff_t idx_below = floor(idx);
            U weight_above = idx - idx_below;

            ptrdiff_t max_idx = size - 1;
            ptrdiff_t offset_bottom = _ind.get()[0] * offset + idx_below;
            ptrdiff_t offset_top = min(offset_bottom + 1, max_idx);

            U diff = a[offset_top] - a[offset_bottom];

            if (weight_above < 0.5) {
                ret = a[offset_bottom] + diff * weight_above;
            } else {
                ret = a[offset_top] - diff * (1 - weight_above);
            }
            ''',
            'percentile_weightnening'
        )(indices, ap, ap.shape[-1] if ap.ndim > 1 else 0, ap.size, ret)
        ret = cupy.rollaxis(ret, -1)  # Roll q dimension back to first axis

    if zerod:
        ret = ret.squeeze(0)
    if keepdims:
        if q.size > 1:
            keepdim = (-1,) + keepdim
        ret = ret.reshape(keepdim)

    return _core._internal_ascontiguousarray(ret)
Ejemplo n.º 24
0
def percentile(a, q, axis=None, out=None, interpolation='linear',
               keepdims=False):
    """Computes the q-th percentile of the data along the specified axis.

    Args:
        a (cupy.ndarray): Array for which to compute percentiles.
        q (float, tuple of floats or cupy.ndarray): Percentiles to compute
            in the range between 0 and 100 inclusive.
        axis (int or tuple of ints): Along which axis or axes to compute the
            percentiles. The flattened array is used by default.
        out (cupy.ndarray): Output array.
        interpolation (str): Interpolation method when a quantile lies between
            two data points. ``linear`` interpolation is used by default.
            Supported interpolations are``lower``, ``higher``, ``midpoint``,
            ``nearest`` and ``linear``.
        keepdims (bool): If ``True``, the axis is remained as an axis of
            size one.

    Returns:
        cupy.ndarray: The percentiles of ``a``, along the axis if specified.

    .. seealso:: :func:`numpy.percentile`

    """
    q = cupy.asarray(q, dtype=a.dtype)
    if q.ndim == 0:
        q = q[None]
        zerod = True
    else:
        zerod = False
    if q.ndim > 1:
        raise ValueError('Expected q to have a dimension of 1.\n'
                         'Actual: {0} != 1'.format(q.ndim))

    if keepdims:
        if axis is None:
            keepdim = (1,) * a.ndim
        else:
            keepdim = list(a.shape)
            for ax in axis:
                keepdim[ax % a.ndim] = 1
            keepdim = tuple(keepdim)

    # Copy a since we need it sorted but without modifying the original array
    if isinstance(axis, int):
        axis = axis,
    if axis is None:
        ap = a.flatten()
        nkeep = 0
    else:
        # Reduce axes from a and put them last
        axis = tuple(ax % a.ndim for ax in axis)
        keep = set(range(a.ndim)) - set(axis)
        nkeep = len(keep)
        for i, s in enumerate(sorted(keep)):
            a = a.swapaxes(i, s)
        ap = a.reshape(a.shape[:nkeep] + (-1,)).copy()

    axis = -1
    ap.sort(axis=axis)
    Nx = ap.shape[axis]
    indices = q * 0.01 * (Nx - 1.)  # percents to decimals

    if interpolation == 'lower':
        indices = cupy.floor(indices).astype(cupy.int32)
    elif interpolation == 'higher':
        indices = cupy.ceil(indices).astype(cupy.int32)
    elif interpolation == 'midpoint':
        indices = 0.5 * (cupy.floor(indices) + cupy.ceil(indices))
    elif interpolation == 'nearest':
        # TODO(hvy): Implement nearest using around
        raise ValueError("'nearest' interpolation is not yet supported. "
                         'Please use any other interpolation method.')
    elif interpolation == 'linear':
        pass
    else:
        raise ValueError('Unexpected interpolation method.\n'
                         "Actual: '{0}' not in ('linear', 'lower', 'higher', "
                         "'midpoint')".format(interpolation))

    if indices.dtype == cupy.int32:
        ret = cupy.rollaxis(ap, axis)
        ret = ret.take(indices, axis=0, out=out)
    else:
        if out is None:
            ret = cupy.empty(ap.shape[:-1] + q.shape, dtype=cupy.float64)
        else:
            ret = cupy.rollaxis(out, 0, out.ndim)

        cupy.ElementwiseKernel(
            'S idx, raw T a, raw int32 offset', 'U ret',
            '''
            ptrdiff_t idx_below = floor(idx);
            U weight_above = idx - idx_below;

            ptrdiff_t offset_i = _ind.get()[0] * offset;
            ret = a[offset_i + idx_below] * (1.0 - weight_above)
              + a[offset_i + idx_below + 1] * weight_above;
            ''',
            'percentile_weightnening'
        )(indices, ap, ap.shape[-1] if ap.ndim > 1 else 0, ret)
        ret = cupy.rollaxis(ret, -1)  # Roll q dimension back to first axis

    if zerod:
        ret = ret.squeeze(0)
    if keepdims:
        if q.size > 1:
            keepdim = (-1,) + keepdim
        ret = ret.reshape(keepdim)

    return cupy.ascontiguousarray(ret)
Ejemplo n.º 25
0
    def fit(self, X, y):
        """
        This should fit classifier.
        """
        assert (type(self.C) == float), "C parameter must be float"
        assert (type(self.eta) == float), "eta parameter must be float"
        assert (type(self.max_iter) == int and (self.max_iter >= 1 or self.max_iter == -1)), "max_iter parameter must be positive integer. -1 for no limit"
        assert (type(self.batch_size) == int and self.batch_size >= 1 and self.batch_size <= len(X)), "batch_size parameter must be positive integer"

        self.history_score = list() # for saving score of each epoch

        X = cp.asarray(X)
        y = cp.asarray(y)
        n = X.shape[0] # number of data points
        s = cp.arange(n)
        self.n_features = X.shape[1] # number of features
        self.classes, y_ = cp.unique(y, return_inverse=True)
        self.n_classes = len(self.classes) # number of classes
        y = self.one_hot(y_)

        # init parameter
        self.rgen = cp.random.RandomState(self.random_state) 
        W = self.rgen.normal(loc=0.0, scale=0.01, size=(self.n_features, self.n_classes)) # (784, 10)
        b = cp.ones((1, self.n_classes)) # (1, 10)
        W_ = W[:,:]
        b_ = b[:,:]

        # the best W and b that have the best accuracy
        self.best_W = W_[:]
        self.best_b = b_[:]

        mi = self.max_iter
        
        # SGD Algorithm with Weight Averaging
        for it in range(1, mi+1):
            n_batches = int(cp.ceil(len(y) / self.batch_size)) # number of batches

            # random sampling without replacement
            self.rgen.shuffle(s) # {0, 1, ... , n}

            valid_batch_idx = s[self.batch_size * (n_batches - 1) :]

            X_valid = X[valid_batch_idx]
            y_valid = y_[valid_batch_idx]
            # X_valid = cp.array(X_valid)

            for i in range(n_batches-1):
                # mini-batch
                batch_idx = s[self.batch_size * i : self.batch_size * (i + 1)]
                # gradient
                dw , db = self._SGD(X[batch_idx], y[batch_idx], W, b)

                # update (weight averaging)
                W = cp.subtract(W, cp.multiply(self.eta, dw)) # (784, 10)
                b = cp.subtract(b, cp.multiply(self.eta, db)) # (1, 10)
                W_ = cp.add(cp.multiply((it/(it+1)), W_), cp.multiply((it/(it+1)), W))
                b_ = cp.add(cp.multiply((it/(it+1)), b_), cp.multiply((it/(it+1)), b))


            # keep the best weight
            if self._check_score(X_valid, y_valid, self.best_W, self.best_b) < self._check_score(X_valid, y_valid, W_, b_):
                self.best_W = W_[:]
                self.best_b = b_[:]

            # if it % 100 == 0:
            #     print(f"Iteration {it} / {self.max_iter} \t", end='')
            #     print(f"train_accuracy {accuracy_score(cp.asnumpy(self.predict(X_valid)), cp.asnumpy(y_valid))}")

            # save acc socre of each epoch
            self.history_score.append(self._score(X, y_))
Ejemplo n.º 26
0
def smooth(data, smooth_width, kernel='gaussian', mask=None, residuals=False):
    """Returns either smoothed time series or its residuals.

    the difference between the original and the smoothed time series
    (=residuals) of a kernel smoothing with gaussian (smoothing kernel width =
    twice the sigma!) or heaviside window, equivalent to a running mean.

    Assumes data of shape (T, N) or (T,)
    :rtype: array
    :returns: smoothed/residual data

    Parameters
    ----------
    data : array
        Data array of shape (time, variables).

    smooth_width : float
        Window width of smoothing, 2*sigma for a gaussian.

    kernel : str, optional (default: 'gaussian')
        Smoothing kernel, 'gaussian' or 'heaviside' for a running mean.

    mask : bool array, optional (default: None)
        Data mask where True labels masked samples.

    residuals : bool, optional (default: False)
        True if residuals should be returned instead of smoothed data.

    Returns
    -------
    data : array-like
        Smoothed/residual data.
    """

    print("%s %s smoothing with " % ({
        True: "Take residuals of a ",
        False: ""
    }[residuals], kernel) + "window width %.2f (2*sigma for a gaussian!)" %
          (smooth_width))

    totaltime = len(data)
    if kernel == 'gaussian':
        window = np.exp(-(np.arange(totaltime).reshape(
            (1, totaltime)) - np.arange(totaltime).reshape(
                (totaltime, 1)))**2 / ((2. * smooth_width / 2.)**2))
    elif kernel == 'heaviside':
        import scipy.linalg
        wtmp = np.zeros(totaltime)
        wtmp[:np.ceil(smooth_width / 2.)] = 1
        window = scipy.linalg.toeplitz(wtmp)

    if mask is None:
        if np.ndim(data) == 1:
            smoothed_data = (data * window).sum(axis=1) / window.sum(axis=1)
        else:
            smoothed_data = np.zeros(data.shape)
            for i in range(data.shape[1]):
                smoothed_data[:, i] = (data[:, i] *
                                       window).sum(axis=1) / window.sum(axis=1)
    else:
        if np.ndim(data) == 1:
            smoothed_data = ((data * window * (mask == False)).sum(axis=1) /
                             (window * (mask == False)).sum(axis=1))
        else:
            smoothed_data = np.zeros(data.shape)
            for i in range(data.shape[1]):
                smoothed_data[:, i] = ((data[:, i] * window *
                                        (mask == False)[:, i]).sum(axis=1) /
                                       (window *
                                        (mask == False)[:, i]).sum(axis=1))

    if residuals:
        return data - smoothed_data
    else:
        return smoothed_data
Ejemplo n.º 27
0
def frac__(z):
    return z+1-cp.ceil(z)
Ejemplo n.º 28
0
def subfactorial__(n):
    return cp.ceil(cp.round_(factorial__(n)/cp.exp(1))) - 1.0
Ejemplo n.º 29
0
def random_noise(image, mode='gaussian', seed=None, clip=True, **kwargs):
    """
    Function to add random noise of various types to a floating-point image.

    Parameters
    ----------
    image : ndarray
        Input image data. Will be converted to float.
    mode : str, optional
        One of the following strings, selecting the type of noise to add:

        - 'gaussian'  Gaussian-distributed additive noise.
        - 'localvar'  Gaussian-distributed additive noise, with specified
                      local variance at each point of `image`.
        - 'poisson'   Poisson-distributed noise generated from the data.
        - 'salt'      Replaces random pixels with 1.
        - 'pepper'    Replaces random pixels with 0 (for unsigned images) or
                      -1 (for signed images).
        - 's&p'       Replaces random pixels with either 1 or `low_val`, where
                      `low_val` is 0 for unsigned images or -1 for signed
                      images.
        - 'speckle'   Multiplicative noise using out = image + n*image, where
                      n is Gaussian noise with specified mean & variance.
    seed : int, optional
        If provided, this will set the random seed before generating noise,
        for valid pseudo-random comparisons.
    clip : bool, optional
        If True (default), the output will be clipped after noise applied
        for modes `'speckle'`, `'poisson'`, and `'gaussian'`. This is
        needed to maintain the proper image data range. If False, clipping
        is not applied, and the output may extend beyond the range [-1, 1].
    mean : float, optional
        Mean of random distribution. Used in 'gaussian' and 'speckle'.
        Default : 0.
    var : float, optional
        Variance of random distribution. Used in 'gaussian' and 'speckle'.
        Note: variance = (standard deviation) ** 2. Default : 0.01
    local_vars : ndarray, optional
        Array of positive floats, same shape as `image`, defining the local
        variance at every image point. Used in 'localvar'.
    amount : float, optional
        Proportion of image pixels to replace with noise on range [0, 1].
        Used in 'salt', 'pepper', and 'salt & pepper'. Default : 0.05
    salt_vs_pepper : float, optional
        Proportion of salt vs. pepper noise for 's&p' on range [0, 1].
        Higher values represent more salt. Default : 0.5 (equal amounts)

    Returns
    -------
    out : ndarray
        Output floating-point image data on range [0, 1] or [-1, 1] if the
        input `image` was unsigned or signed, respectively.

    Notes
    -----
    Speckle, Poisson, Localvar, and Gaussian noise may generate noise outside
    the valid image range. The default is to clip (not alias) these values,
    but they may be preserved by setting `clip=False`. Note that in this case
    the output may contain values outside the ranges [0, 1] or [-1, 1].
    Use this option with care.

    Because of the prevalence of exclusively positive floating-point images in
    intermediate calculations, it is not possible to intuit if an input is
    signed based on dtype alone. Instead, negative values are explicitly
    searched for. Only if found does this function assume signed input.
    Unexpected results only occur in rare, poorly exposes cases (e.g. if all
    values are above 50 percent gray in a signed `image`). In this event,
    manually scaling the input to the positive domain will solve the problem.

    The Poisson distribution is only defined for positive integers. To apply
    this noise type, the number of unique values in the image is found and
    the next round power of two is used to scale up the floating-point result,
    after which it is scaled back down to the floating-point image range.

    To generate Poisson noise against a signed image, the signed image is
    temporarily converted to an unsigned image in the floating point domain,
    Poisson noise is generated, then it is returned to the original range.

    """
    mode = mode.lower()

    # Detect if a signed image was input
    if image.min() < 0:
        low_clip = -1.0
    else:
        low_clip = 0.0

    image = img_as_float(image)
    if seed is not None:
        cp.random.seed(seed=seed)

    allowedtypes = {
        'gaussian': 'gaussian_values',
        'localvar': 'localvar_values',
        'poisson': 'poisson_values',
        'salt': 'sp_values',
        'pepper': 'sp_values',
        's&p': 's&p_values',
        'speckle': 'gaussian_values'
    }

    kwdefaults = {
        'mean': 0.0,
        'var': 0.01,
        'amount': 0.05,
        'salt_vs_pepper': 0.5,
        'local_vars': cp.zeros_like(image) + 0.01
    }

    allowedkwargs = {
        'gaussian_values': ['mean', 'var'],
        'localvar_values': ['local_vars'],
        'sp_values': ['amount'],
        's&p_values': ['amount', 'salt_vs_pepper'],
        'poisson_values': []
    }

    for key in kwargs:
        if key not in allowedkwargs[allowedtypes[mode]]:
            raise ValueError('%s keyword not in allowed keywords %s' %
                             (key, allowedkwargs[allowedtypes[mode]]))

    # Set kwarg defaults
    for kw in allowedkwargs[allowedtypes[mode]]:
        kwargs.setdefault(kw, kwdefaults[kw])

    if mode == 'gaussian':
        noise = cp.random.normal(kwargs['mean'], kwargs['var']**0.5,
                                 image.shape)
        out = image + noise

    elif mode == 'localvar':
        # Ensure local variance input is correct
        if (kwargs['local_vars'] <= 0).any():
            raise ValueError('All values of `local_vars` must be > 0.')

        # Safe shortcut usage broadcasts kwargs['local_vars'] as a ufunc

        # CuPy Backend: Must supply size argument to get around a CuPy bug
        #       https://github.com/cupy/cupy/pull/4457
        out = image + cp.random.normal(0, kwargs["local_vars"]**0.5,
                                       kwargs["local_vars"].shape)

    elif mode == 'poisson':
        # Determine unique values in image & calculate the next power of two
        vals = len(cp.unique(image))
        vals = 2**cp.ceil(cp.log2(vals))

        # Ensure image is exclusively positive
        if low_clip == -1.0:
            old_max = image.max()
            image = (image + 1.0) / (old_max + 1.0)

        # Generating noise for each unique value in image.
        out = cp.random.poisson(image * vals) / float(vals)

        # Return image to original range if input was signed
        if low_clip == -1.0:
            out = out * (old_max + 1.0) - 1.0

    elif mode == 'salt':
        # Re-call function with mode='s&p' and p=1 (all salt noise)
        out = random_noise(image,
                           mode='s&p',
                           seed=seed,
                           amount=kwargs['amount'],
                           salt_vs_pepper=1.)

    elif mode == 'pepper':
        # Re-call function with mode='s&p' and p=1 (all pepper noise)
        out = random_noise(image,
                           mode='s&p',
                           seed=seed,
                           amount=kwargs['amount'],
                           salt_vs_pepper=0.)

    elif mode == 's&p':
        out = image.copy()
        p = kwargs['amount']
        q = kwargs['salt_vs_pepper']
        flipped = cp.random.choice([True, False],
                                   size=image.shape,
                                   p=[p, 1 - p])
        salted = cp.random.choice([True, False],
                                  size=image.shape,
                                  p=[q, 1 - q])
        peppered = ~salted
        out[flipped & salted] = 1
        out[flipped & peppered] = low_clip

    elif mode == 'speckle':
        noise = cp.random.normal(kwargs['mean'], kwargs['var']**0.5,
                                 image.shape)
        out = image + image * noise

    # Clip back to original range, if necessary
    if clip:
        out = cp.clip(out, low_clip, 1.0)

    return out
Ejemplo n.º 30
0
def soft_mask(v, voxel_size, num_subunit_residues,
              helical_repeat_distance=None, repeats_to_include=0,
              filter_resolution=20, expansion_factor=1.2, 
              expansion_radius=0, print_progress=True, return_mask=False):

    full_expansion_radius = expansion_radius + filter_resolution/2

# avg AA mol wt. in g/mol, density in g/cm3
    avg_aa_molwt = 110
    protein_density = 1.4

# 2
    print helical_repeat_distance
    v_thresh = np.zeros(v.shape)
    v_thresh[:] = v[:]

    sz = np.array(v.shape).astype(int)

    total_molwt = num_subunit_residues*avg_aa_molwt/6.023e23
    if helical_repeat_distance != None:
        total_molwt = total_molwt * sz[2]*voxel_size / helical_repeat_distance
    total_vol = np.prod(sz) * voxel_size**3                  # vol in A3
    mol_vol = total_molwt/protein_density / (1.0e-24)        # vol in A3
    mol_vol_frac = mol_vol/total_vol
    target_vol_frac = mol_vol_frac*expansion_factor

    thresh = find_binary_threshold(v_thresh, target_vol_frac)
    true_frac = (0.0 + np.sum(v_thresh >= thresh)) / v_thresh.size

    if repeats_to_include != 0:
        zdim = np.round(repeats_to_include * helical_repeat_distance/voxel_size)
    else:
        zdim = sz[2]

    if zdim > sz[2] - 4*np.ceil(filter_resolution/voxel_size):
        zdim = sz[2] - 4*np.ceil(filter_resolution/voxel_size)

    zdim = zdim.astype(int)

    v_thresh[:,:,0:np.floor(sz[2]/2).astype(int) - np.floor(zdim/2).astype(int)] = 0
    v_thresh[:,:,np.floor(sz[2]/2).astype(int) - np.floor(zdim/2).astype(int) + 1 + zdim - 1:] = 0
    v_thresh[v_thresh < thresh] = 0

    if print_progress:
        print 'Target volume fraction: {}'.format(target_vol_frac)
        print 'Achieved volume fraction: {}'.format(true_frac)
        print 'Designated threshold: {}'.format(thresh)

    progress_bar = tqdm(total=5)

    v_thresh = fftpack.fftn(v_thresh)
    progress_bar.update(1)
# 3
    cosmask_filter = np.fft.fftshift(spherical_cosmask(sz, 0, np.ceil(filter_resolution/voxel_size)))
    cosmask_filter = fftpack.fftn(cosmask_filter) / np.sum(cosmask_filter)
    progress_bar.update(1)

    v_thresh = v_thresh * cosmask_filter
    v_thresh = fftpack.ifftn(v_thresh)
    progress_bar.update(1)
    v_thresh = np.real(v_thresh)
    v_thresh[np.abs(v_thresh) < 10*np.finfo(type(v_thresh.ravel()[0])).eps] = 0

    v_thresh[v_thresh != 0] = 1

# The extent of blurring is equal to the diameter of the cosmask sphere; 
#  if we want this to equal the expected falloff for filter_resolution, 
#  we therefore need to divide filter_res by 4 to get the 
#  desired radius for spherical_cosmask.

    v_thresh = fftpack.fftn(v_thresh)
    progress_bar.update(1)

    v_thresh = v_thresh * cosmask_filter
    v_thresh = fftpack.ifftn(v_thresh)
    progress_bar.update(1)
    v_thresh = np.real(v_thresh)
    v_thresh[np.abs(v_thresh) < 10*np.finfo(type(v_thresh.ravel()[0])).eps] = 0

    if return_mask:
        v[:,:,:] = v_thresh
    else:
        v *= v_thresh

    return v_thresh