Пример #1
0
    def hit(self, r: RayList, t_min: float, t_max: Union[float, cp.ndarray]) \
            -> HitRecordList:
        if isinstance(t_max, (int, float, cp.floating)):
            t_max_list = cp.full(len(r), t_max, cp.float32)
        else:
            t_max_list = t_max

        oc: Vec3List = r.origin() - self.center
        a: cp.ndarray = r.direction().length_squared()
        half_b: cp.ndarray = oc @ r.direction()
        c: cp.ndarray = oc.length_squared() - self.radius**2
        discriminant_list: cp.ndarray = half_b**2 - a*c

        discriminant_condition = discriminant_list > 0
        if not discriminant_condition.any():
            return HitRecordList.new(len(r)).set_compress_info(None)

        # Calculate t
        positive_discriminant_list = (
            discriminant_list * discriminant_condition
        )
        root = cp.sqrt(positive_discriminant_list)
        non_zero_a = a - (a == 0)
        t_0 = (-half_b - root) / non_zero_a
        t_1 = (-half_b + root) / non_zero_a

        # Choose t
        t_0_condition = (
            (t_min < t_0) & (t_0 < t_max_list) & discriminant_condition
        )
        t_1_condition = (
            (t_min < t_1) & (t_1 < t_max_list)
            & (~t_0_condition) & discriminant_condition
        )
        t = cp.where(t_0_condition, t_0, 0)
        t = cp.where(t_1_condition, t_1, t)

        # Compression
        condition = t > 0
        full_rate = condition.sum() / len(t)
        if full_rate > 0.5:
            idx = None
        else:
            idx = cp.where(condition)[0]
            t = t[idx]
            r = RayList(
                Vec3List(r.orig.get_ndarray(idx)),
                Vec3List(r.dir.get_ndarray(idx))
            )

        # Wrap up result
        point = r.at(t)
        outward_normal = (point - self.center) / self.radius

        result = HitRecordList(
            point, t, cp.full(len(r), self.material.idx, dtype=cp.int32)
        ).set_face_normal(r, outward_normal).set_compress_info(idx)

        return result
def fetch_optim(user, tabular_np, neighbors_indeces):
    optim_indeces = cupy.where(tabular_np[user] == 0)[0]
    for i in neighbors_indeces:
        optim_indeces = cupy.unique(
            cupy.concatenate((optim_indeces, cupy.where(tabular_np[i] > 0)[0]),
                             0))
    optim_values = cupy.take(tabular_np, optim_indeces, axis=1)
    return optim_values, optim_indeces
Пример #3
0
def adex_spike(V, w, c):
    """ Check potential thresholds for new spikes """

    # Spike projects 0mV or 1 mV (nothing or unit value) to the network
    # with the current parameters
    spike = V > c['Vth']
    V = cp.where(spike, c['V_r'], V)
    w = cp.where(spike, w + c['b'], w)

    return V, w, spike
    def estimate_size(self, masks):
        d = cp.sqrt(
            cp.sum(masks, axis=(1, 2, 3)) / (masks.shape[1] * masks.shape[2]))
        # big: 2, medium: 1, small: 0
        size = cp.zeros(d.shape)
        cp.where(d >= self.size_degree[0], 2, size)
        cp.where(d >= self.size_degree[1], 1, size)

        print('estimate_size Done.')
        return size
def voltMeterwStep(ne, ex_mat, step_arr=None, parser=None):
    '''
    
    Returns all measurements with this step_arr and ex_mat

    takes:

    ex_mat - array shape (n_source/sinks, 2) - excitation matrix with source and sink for each measurement
    step_arr - array shape (n_source/sinks) - step between measuring electrodes for each source/sink pair
    parser - string

    returns:

    pair_mat - array shape (n_measurements, 2) - matrix with all possible meas. electrode combinations
    ind_new - array shape (n_measurements) - helper array

    '''        
    if step_arr is None:
        step_arr = 1 + cp.arange((ex_mat.shape[0])) % (ne)
    elif type(step_arr) is int:
    	step_arr = step_arr * cp.ones(ex_mat.shape[0]) % ne

    drv_a = ex_mat[:, 0]
    drv_b = ex_mat[:, 1]
    i0 = drv_a if parser == 'fmmu' else 0
    A = cp.arange(ne)
    
    #M = cp.dot(cp.ones(ex_mat.shape[0])[:,None], A[None, :]) % self.ne
    #N = (M + step_arr[:, None]) % self.ne

    M = cp.arange(ex_mat.shape[0] * ne) % ne
    N = (M.reshape((ex_mat.shape[0], ne)) + step_arr[:, None]) % ne
    pair_mat = cp.stack((N.ravel(), M), axis=-1)

    #ind_new = cp.arange(pair_mat.shape[0]) % ex_mat.shape[0]
    ind_new = cp.arange(ex_mat.shape[0])        
    ind_new = cp.tile(ind_new, (ne, 1)).T.ravel()
    #print('before indtest', ind_new[20:70])
    nz2 = cp.where(pair_mat == drv_a[ind_new, None])
    nz3 = cp.where(pair_mat == drv_b[ind_new, None])
    #print(ind_new)
    ind_ = cp.arange(pair_mat.shape[0])
    ind_fin = cp.sum(ind_[:, None] == nz2[0][None], axis=1)
    ind_fin2 = cp.sum(ind_[:, None] == nz3[0][None], axis=1)

    ind_test = cp.less((ind_fin + ind_fin2), 0.5 * cp.ones(len(ind_fin)))

    pair_mat = pair_mat[ind_test, :]
    ind_new = ind_new[ind_test]
    sort_index = cp.argsort(ind_new)

    #print('after indtest', ind_new[20:70])
    #meas = cp.concatenate((ex_mat[ind_new], pair_mat), axis=1)
    #print(meas[20:70])
    return pair_mat, ex_mat[ind_new], ind_new
Пример #6
0
def _run_cupy_bin(data, bins, new_values):
    # replace inf by nan to avoid classify these values as we want to treat them as outliers
    data = cupy.where(data == cupy.inf, cupy.nan, data)
    data = cupy.where(data == -cupy.inf, cupy.nan, data)

    bins_cupy = cupy.asarray(bins)
    new_values_cupy = cupy.asarray(new_values)
    out = cupy.empty(data.shape, dtype='f4')
    out[:] = cupy.nan
    griddim, blockdim = cuda_args(data.shape)
    _run_gpu_bin[griddim, blockdim](data, bins_cupy, new_values_cupy, out)
    return out
Пример #7
0
def digitize(x, bins):
    # With right = Flase and bins in increasing order
    out = np.full(shape=x.shape, fill_value=0, dtype=np.int32)
    for i in range(1, len(bins)):
        bool_arr = np.logical_and(bins[i - 1] <= x, x < bins[i])
        matched = np.where(bool_arr)
        out[matched] = i

    bool_arr = x >= bins[-1]
    matched = np.where(bool_arr)
    out[matched] = len(bins)
    return out
Пример #8
0
    def __init__(self, low, high, res, basis, spectrum=False, fine=False, linspace=False):
        self.low = low
        self.high = high
        self.res = int(res)  # somehow gets non-int...
        self.res_ghosts = int(res + 2)  # resolution including ghosts
        self.order = basis.order

        # domain and element widths
        self.length = self.high - self.low
        self.dx = self.length / self.res

        # element Jacobian
        self.J = 2.0 / self.dx

        # The grid does not have a basis but does have quad weights
        self.quad_weights = cp.tensordot(cp.ones(self.res), cp.asarray(basis.weights), axes=0)
        # arrays
        self.arr = np.zeros((self.res_ghosts, self.order))
        self.create_grid(basis.nodes)
        self.arr_cp = cp.asarray(self.arr)
        self.midpoints = np.array([(self.arr[i, -1] + self.arr[i, 0]) / 2.0 for i in range(1, self.res_ghosts - 1)])
        self.arr_max = np.amax(abs(self.arr))

        # velocity axis gets a positive/negative indexing slice
        self.one_negatives = cp.where(condition=self.arr_cp < 0, x=1, y=0)
        self.one_positives = cp.where(condition=self.arr_cp >= 0, x=1, y=0)

        # fine array
        if fine:
            fine_num = 25  # 200 for 1D poisson study
            self.arr_fine = np.array([np.linspace(self.arr[i, 0], self.arr[i, -1], num=fine_num)
                                      for i in range(self.res_ghosts)])

        if linspace:
            lin_num = 400
            self.arr_lin = np.linspace(self.low, self.high, num=lin_num)

        # spectral coefficients
        if spectrum:
            self.nyquist_number = 2.0 * self.res #  # 2.5 *  # mode number of nyquist frequency
            # print(self.nyquist_number)
            self.k1 = 2.0 * np.pi / self.length  # fundamental mode
            self.wave_numbers = self.k1 * np.arange(1 - self.nyquist_number, self.nyquist_number)
            self.d_wave_numbers = cp.asarray(self.wave_numbers)
            self.grid_phases = cp.asarray(np.exp(1j * np.tensordot(self.wave_numbers, self.arr[1:-1, :], axes=0)))
            
            if linspace:
                self.lin_phases = cp.asarray(np.exp(1j * np.tensordot(self.wave_numbers, self.arr_lin, axes=0)))

            # Spectral matrices
            self.spectral_transform = basis.fourier_transform_array(self.midpoints, self.J, self.wave_numbers)
            self.inverse_transform = basis.inverse_transform_array(self.midpoints, self.J, self.wave_numbers)
Пример #9
0
def create_fwd(P):
    # convolution function
    fZ = cp.fft.fftshift(fzeta_loop_weights(
        P.Ntheta, P.Nrho, 2*P.beta, P.g-cp.log(P.am), 0, 4))
    # (lp2C1,lp2C2), transformed log-polar to Cartesian coordinates
    tmp1 = cp.outer(cp.exp(cp.array(P.rhosp)), cp.cos(cp.array(P.thsp))).flatten()
    tmp2 = cp.outer(cp.exp(cp.array(P.rhosp)), cp.sin(cp.array(P.thsp))).flatten()
    lp2C1 = [None]*P.Nspan
    lp2C2 = [None]*P.Nspan
    for k in range(P.Nspan):
        lp2C1[k] = ((tmp1-(1-P.aR))*cp.cos(k*P.beta+P.beta/2) -
                    tmp2*cp.sin(k*P.beta+P.beta/2))/P.aR
        lp2C2[k] = ((tmp1-(1-P.aR))*cp.sin(k*P.beta+P.beta/2) +
                    tmp2*cp.cos(k*P.beta+P.beta/2))/P.aR
        lp2C2[k] *= (-1)  # adjust for Tomopy
        cids = cp.where((lp2C1[k]**2+lp2C2[k]**2) <= 1)[0]
        lp2C1[k] = lp2C1[k][cids]
        lp2C2[k] = lp2C2[k][cids]
    # pids, index in polar grids after splitting by spans
    pids = [None]*P.Nspan
    [s0, th0] = cp.meshgrid(P.s, P.proj)
    th0 = th0.flatten()
    s0 = s0.flatten()
    for k in range(0, P.Nspan):
        pids[k] = cp.where((th0 >= k*P.beta-P.beta/2) &
                           (th0 < k*P.beta+P.beta/2))[0]

    # (p2lp1,p2lp2), transformed polar to log-polar coordinates
    p2lp1 = [None]*P.Nspan
    p2lp2 = [None]*P.Nspan
    for k in range(P.Nspan):
        th00 = th0[pids[k]]-k*P.beta
        s00 = s0[pids[k]]
        p2lp1[k] = th00
        p2lp2[k] = np.log(s00*P.aR+(1-P.aR)*np.cos(th00))

    # adapt for gpu interp
    for k in range(0, P.Nspan):
        lp2C1[k] = (lp2C1[k]+1)/2*(P.N-1)
        lp2C2[k] = (lp2C2[k]+1)/2*(P.N-1)
        p2lp1[k] = (p2lp1[k]-P.thsp[0])/(P.thsp[-1]-P.thsp[0])*(P.Ntheta-1)
        p2lp2[k] = (p2lp2[k]-P.rhosp[0])/(P.rhosp[-1]-P.rhosp[0])*(P.Nrho-1)
    const = cp.sqrt(P.N*P.osangles/P.Nproj)*cp.pi/4 / \
        P.aR/cp.sqrt(2)  # adjust constant
    fZgpu = fZ[:, :P.Ntheta//2+1]*const
    if(P.interp_type == 'cubic'):
        fZgpu = fZgpu/(P.B3com[:, :P.Ntheta//2+1])

    Pfwd0 = Pfwd(fZgpu, lp2C1, lp2C2, p2lp1, p2lp2, cids, pids)
    # array representation
    parsi, parsf = savePfwdpars(Pfwd0)
    return Pfwd0, parsi, parsf
Пример #10
0
def ts_max(x, window):
    if window > len(x):
        return cp.full(len(x), cp.nan)
    # 把空值填充为-inf
    x = cp.where(cp.isinf(x) | cp.isnan(x), -cp.inf, x)

    prefix = cp.full(window - 1, cp.nan)
    x_rolling_array = cp_rolling_window(x, window)
    result = cp.max(x_rolling_array, axis=1)
    # 结果中如果存在-inf,说明此window均为空,返回nan
    result = result.astype(float)
    result = cp.where(cp.isinf(result), cp.nan, result)
    return cp.concatenate((prefix, result))
Пример #11
0
def ts_argmin(x, window):
    if window > len(x):
        return cp.full(len(x), cp.nan)
    # 将nan及-inf填充为inf
    x = cp.where(cp.isinf(x) | cp.isnan(x), cp.inf, x)

    prefix = cp.full(window - 1, cp.nan)
    x_rolling_array = cp_rolling_window(x, window)
    result = cp.argmin(x_rolling_array, axis=1)

    # 找到window中全为-inf的情况,填充为nan
    result = result.astype(float)
    result = cp.where(cp.isinf(x_rolling_array).all(axis=1), cp.nan, result)
    result += 1
    return cp.concatenate((prefix, result))
Пример #12
0
def julia_set_cp(zs, phase):
    ns = cp.zeros_like(Z, dtype=cp.float32)
    for i in range(n_iteration):
        # cupy doesn't support complex in where, we need to decompose it to real and img parts
        zs_real = cp.where(
            cp.abs(zs) < R, cp.real(zs**2 + 0.7885 * cp.exp(phase)),
            cp.real(zs))
        zs_imag = cp.where(
            cp.abs(zs) < R, cp.imag(zs**2 + 0.7885 * cp.exp(phase)),
            cp.imag(zs))
        zs = zs_real + 1j * zs_imag
        not_diverged = cp.abs(zs) < R
        ns = ns + not_diverged.astype(cp.float32)

    return ns, zs
Пример #13
0
def MWU_game_algorithm_experiment(payoff_mat, phi=1/2, steps_number=10000):
    payoff_mat = np.array(payoff_mat)
    rows_number = payoff_mat.shape[0]
    cols_number = payoff_mat.shape[1]
    p_0 = np.ones((1, rows_number))
    p_0 = p_0/rows_number
    p_t = p_0
    j_sumed = np.zeros((cols_number, 1))
    smallest_column_payoff = 1
    p_best = p_0
    p_t_sum = np.zeros((1, rows_number))

    start = time.time()
    row_row = []
    col_col = []
    row_col = []
    times = []
    curr_index = 125

    for i in range (1, steps_number + 1):
        payoffs = np.matmul(p_t, payoff_mat)
        j_best_response = np.argmax(payoffs)
        if(payoffs[0, j_best_response] < smallest_column_payoff):
            smallest_column_payoff = payoffs[0, j_best_response]
            p_best = p_t
        j_sumed[j_best_response] += 1
        m_t = payoff_mat[:,j_best_response]
        m_t_negative = (m_t < 0)
        p_t_significant = (p_t > SIGNIFICANCE_CONST)
        to_update = np.logical_or(m_t_negative, p_t_significant[0])
        m_t_updating = np.where(to_update,m_t,0)
        p_t_updating = np.where(to_update,p_t,0)
        p_t = np.multiply((1 - phi * m_t_updating), p_t_updating)
        p_t = p_t/p_t.sum()
        p_t_sum = p_t_sum + p_t
        if(i == curr_index):
            j_distribution = j_sumed / j_sumed.sum()
            print(i)
            now = time.time()
            times.append(now - start)
            row_row.append(max(epsilon_value(p_best, np.transpose(p_best), payoff_mat)))
            col_col.append(max(epsilon_value(np.transpose(j_distribution), j_distribution, payoff_mat)))
            row_col.append(max(epsilon_value(p_best, j_distribution, payoff_mat)))
            start -= (time.time() - now)
            curr_index *= 2
    # game_value = np.matmul(np.matmul(p_best, payoff_mat), j_distribution)[0][0]
    # print()
    return times, row_row, col_col, row_col
Пример #14
0
def ray_color(r: RayList, world: HittableList, depth: int) \
        -> Tuple[Optional[RayList], Optional[Vec3List], Vec3List]:
    length = len(r)
    if not r.direction().e.any():
        return None, None, Vec3List.new_zero(length)

    # Calculate object hits
    rec_list: HitRecordList = world.hit(r, 0.001, cp.inf)

    # Useful empty arrays
    empty_vec3list = Vec3List.new_zero(length)
    empty_array_float = cp.zeros(length, cp.float32)
    empty_array_bool = cp.zeros(length, cp.bool)
    empty_array_int = cp.zeros(length, cp.int32)

    # Background / Sky
    unit_direction = r.direction().unit_vector()
    sky_condition = Vec3List.from_array((unit_direction.length() > 0)
                                        & (rec_list.material == 0))
    t = (unit_direction.y() + 1) * 0.5
    blue_bg = (Vec3List.from_vec3(Color(1, 1, 1), length).mul_ndarray(1 - t) +
               Vec3List.from_vec3(Color(0.5, 0.7, 1), length).mul_ndarray(t))
    result_bg = Vec3List(cp.where(sky_condition.e, blue_bg.e,
                                  empty_vec3list.e))
    if depth <= 1:
        return None, None, result_bg

    # Material scatter calculations
    materials: Dict[int, Material] = world.get_materials()
    scattered_list = RayList.new_zero(length)
    attenuation_list = Vec3List.new_zero(length)
    for mat_idx in materials:
        mat_condition = (rec_list.material == mat_idx)
        mat_condition_3 = Vec3List.from_array(mat_condition)
        if not mat_condition.any():
            continue

        ray = RayList(
            Vec3List(cp.where(mat_condition_3.e, r.orig.e, empty_vec3list.e)),
            Vec3List(cp.where(mat_condition_3.e, r.dir.e, empty_vec3list.e)))
        rec = HitRecordList(
            Vec3List(
                cp.where(mat_condition_3.e, rec_list.p.e, empty_vec3list.e)),
            cp.where(mat_condition, rec_list.t, empty_array_float),
            cp.where(mat_condition, rec_list.material, empty_array_int),
            Vec3List(
                cp.where(mat_condition_3.e, rec_list.normal.e,
                         empty_vec3list.e)),
            cp.where(mat_condition, rec_list.front_face, empty_array_bool))
        ray, rec, idx_list = compress(ray, rec)

        scattered, attenuation = materials[mat_idx].scatter(ray, rec)
        scattered, attenuation = decompress(scattered, attenuation, idx_list,
                                            length)
        scattered_list += scattered
        attenuation_list += attenuation

    return scattered_list, attenuation_list, result_bg
Пример #15
0
    def leaky_relu(self, Z):
        '''Leaky ReLU function'''

        A = cp.where(Z >= 0, Z, Z * 0.01)
        assert (A.shape == Z.shape)

        return A
def get_text_predictions(df, max_features=25_000):
    model = TfidfVectorizer(stop_words='english',
                            binary=True,
                            max_features=max_features)
    text_embeddings = model.fit_transform(df_cu['title']).toarray()

    print('Finding similar titles...')
    CHUNK = 1024 * 4
    CTS = len(df) // CHUNK
    if (len(df) % CHUNK) != 0:
        CTS += 1

    preds = []
    for j in range(CTS):
        a = j * CHUNK
        b = (j + 1) * CHUNK
        b = min(b, len(df))
        print('chunk', a, 'to', b)

        # COSINE SIMILARITY DISTANCE
        cts = cupy.matmul(text_embeddings, text_embeddings[a:b].T).T
        for k in range(b - a):
            IDX = cupy.where(cts[k, ] > 0.7)[0]
            o = df.iloc[cupy.asnumpy(IDX)].posting_id.values
            preds.append(o)

    del model, text_embeddings
    gc.collect()
    return preds
Пример #17
0
def non_maximum_suppression(bbox, thresh, score=None, limit=None):
    """非极大值抑制"""
    bbox_y1 = bbox[:, 0]
    bbox_x1 = bbox[:, 1]
    bbox_y2 = bbox[:, 2]
    bbox_x2 = bbox[:, 3]

    area = (bbox_x2 - bbox_x1 + 1) * (bbox_y2 - bbox_y1 + 1)
    n_bbox = bbox.shape[0]

    if score is not None:
        order = score.argsort()[::-1].astype(np.int32)
    else:
        order = cp.arange(n_bbox, dtype=np.int32)
    keep = []

    # 预测框之间进行两两比较,去除重叠面积iou大于thresh的框
    while order.size > 0:
        i = order[0]
        keep.append(i)

        xx1 = cp.maximum(bbox_x1[i], bbox_x1[order[1:]])
        yy1 = cp.maximum(bbox_y1[i], bbox_y1[order[1:]])
        xx2 = cp.minimum(bbox_x2[i], bbox_x2[order[1:]])
        yy2 = cp.minimum(bbox_y2[i], bbox_y2[order[1:]])

        width = cp.maximum(0., (xx2 - xx1 + 1))
        height = cp.maximum(0., (yy2 - yy1 + 1))
        inter = width * height
        iou = inter / (area[i] + area[order[1:]] - inter)
        index = cp.where(iou <= thresh)[0]
        order = order[(index + 1).tolist()]
    if limit is not None:
        keep = keep[:limit]
    return cp.asnumpy(keep)
Пример #18
0
def test_lambdaop_misalign(cpu):
    size = 12
    df0 = pd.DataFrame({
        "a":
        np.arange(size),
        "b":
        np.random.choice(["apple", "banana", "orange"], size),
        "c":
        np.random.choice([0, 1], size),
    })

    ddf0 = dd.from_pandas(df0, npartitions=4)

    cont_names = ColumnGroup(["a"])
    cat_names = ColumnGroup(["b"])
    label = ColumnGroup(["c"])
    if cpu:
        label_feature = label >> (lambda col: np.where(col == 4, 0, 1))
    else:
        label_feature = label >> (lambda col: cp.where(col == 4, 0, 1))
    workflow = nvt.Workflow(cat_names + cont_names + label_feature)

    dataset = nvt.Dataset(ddf0, cpu=cpu)
    transformed = workflow.transform(dataset)
    assert_eq_dd(
        df0[["a", "b"]],
        transformed.to_ddf().compute()[["a", "b"]],
        check_index=False,
    )
Пример #19
0
def signedpower(x, a):
    #将inf和-inf处理为nan
    #x[cp.isinf(x)] = cp.nan
    # 经测试where替换更快一些

    x = cp.where(cp.isinf(x), cp.nan, x)
    return cp.sign(x) * cp.abs(x)**a
Пример #20
0
    def registration_flow_batch(self, psi, g, mmin, mmax, flow=None, pars=[0.5, 3, 20, 16, 5, 1.1, 4]):
        """Find optical flow for all projections in parallel on CPU"""
        if (flow is None):
            flow = np.zeros([*psi.shape, 2], dtype='float32')
        total = 0
        for ids in chunk(range(self.ntheta), self.ptheta):
            flownew = flow[ids]
            with cf.ThreadPoolExecutor(16) as e:
                 #update flow in place
                 e.map(partial(self.registration_flow, psi[ids], g[ids], mmin[ids],
                              mmax[ids], flownew, pars), range(len(ids)))

            # control Farneback's (may diverge for small window sizes)
            #err = np.linalg.norm(
            #    g[ids]-self.apply_flow_gpu_batch(psi[ids], flownew), axis=(1, 2))

            #err1 = np.linalg.norm(
            #    g[ids]-self.apply_flow_gpu_batch(psi[ids], flow[ids]), axis=(1, 2))

            #idsgood = np.where(err1 >= err)[0]

            g_gpu = cp.array(g[ids])
            psi_gpu = cp.array(psi[ids])
            flownew_gpu = cp.array(flownew)
            flow_gpu = cp.array(flow[ids])
            err = cp.linalg.norm(
                g_gpu-self.apply_flow_gpu(psi_gpu, flownew_gpu,0), axis=(1, 2))
            err1 = cp.linalg.norm(
                g_gpu-self.apply_flow_gpu(psi_gpu, flow_gpu, 0), axis=(1, 2))
 
            idsgood = cp.where(err1 >= err)[0].get()
            total += len(idsgood)
            flow[ids[idsgood]] = flownew[idsgood]
        # print('bad alignment for:', self.ntheta-total)
        return flow
Пример #21
0
    def _rank(x):

        x = cp.where(cp.isinf(x) | cp.isnan(x), cp.inf, x)

        # 此方式,只一次排序,对于长数组更有优势
        temp = x.argsort()
        ranks = cp.empty_like(temp)
        ranks[temp] = cp.arange(len(x))
        ranks = ranks.astype(float)

        # 将nan和inf的位置恢复为nan,不参与rank

        ranks = cp.where(cp.isinf(x), cp.nan, ranks)
        #ranks[cp.isinf(x)] = cp.nan
        # 返回x的最后一个元素的排名
        return (ranks + 1)[-1]
Пример #22
0
def CheckIfEscaped(photon_state, tau_atm, escaped_mu):
    ######################################################################################
    # This checks if a photon has escaped, calculates the momentum that photon takes with it
    # then removes that photon from the list.
    #
    # photon_state -- A CuPy array consisting of [x,y,z,phi,theta,lambda] for each photon.
    # tau_atm -- The depth of the atmosphere.
    ######################################################################################

    # Looks for all the photons above the atmosphere.
    escaped_photons = cp.where(photon_state[:, 2] >= tau_atm)[0]
    momentum_transfer = 0

    if len(escaped_photons) > 0:
        # Find the angles the particles escaped at
        escaped_mu += cp.cos(photon_state[escaped_photons, 4]).tolist()

        # Calculates the momentum transferred, this is the difference between the initial z momentum and the final z momentum
        momentum_transfer = float(
            cp.sum(h * (-cp.cos(photon_state[escaped_photons, 4])) /
                   photon_state[escaped_photons, 5]))

        # Remove the escaped photons
        photon_state = RemovePhotons(photon_state, escaped_photons)

    return photon_state, momentum_transfer, escaped_mu
Пример #23
0
def preprocess(img, pos, half_size=15, device=0):
    with cp.cuda.Device(device):
        pos_x_left, pos_x_right = pos[:, 0] - half_size, pos[:, 0] + half_size
        pos_y_left, pos_y_right = pos[:, 1] - half_size, pos[:, 1] + half_size

        #TODO: current using filtering option, in the near future, change to padding option
        SELECT_MAP = (pos_x_left >= 0) * (pos_y_left >= 0) * (
            pos_x_right < 2048) * (pos_y_right < 2048)
        SELECT_INDEX = cp.where(SELECT_MAP > 0)[0]

        pos_x_left, pos_x_right, pos_y_left, pos_y_right = pos_x_left[
            SELECT_INDEX], pos_x_right[SELECT_INDEX], pos_y_left[
                SELECT_INDEX], pos_y_right[SELECT_INDEX]

        pos = pos[SELECT_INDEX]

        # shape should be dx * dy * dz
        pos_x = pos_x_left
        pos_x = cp.expand_dims(pos_x, axis=0)
        adding = cp.expand_dims(cp.arange(2 * half_size), 1)
        pos_x = pos_x + adding

        pos_y = pos_y_left
        pos_y = cp.expand_dims(pos_y, axis=0)
        adding = cp.expand_dims(cp.arange(2 * half_size), 1)
        pos_y = pos_y + adding

        # x * y * N
        _x = img[pos_x[:, cp.newaxis, :], pos_y]

    return _x.get(), pos.get()  #, groups_start, groups_end
Пример #24
0
    def add_water(self, atom_histograms_gpu):
        vacs = cp.prod(cp.where(atom_histograms_gpu == 0, True, False), axis=0)
        # average number of water molecules in a voxel
        vox_wat_num = water_num_dens * self.d1 * self.d2 * self.dz
        box = (self.n_slice, self.n1, self.n2)

        oxygens = cp.where(vacs, cp.random.poisson(vox_wat_num, box),
                           0).astype(cp.int)
        hydrogens = cp.where(vacs, cp.random.poisson(vox_wat_num * 2, box),
                             0).astype(cp.int)

        unique_elements_list = list(self.unique_elements)
        for z, hist in [(1, hydrogens), (8, oxygens)]:
            idx = unique_elements_list.index(z)
            atom_histograms_gpu[idx] += hist
        return atom_histograms_gpu
Пример #25
0
def find_similar_titles():
    preds = []
    CHUNK = 1024 * 4

    print('Finding similar titles...')
    CTS = len(test) // CHUNK
    if len(test) % CHUNK != 0: CTS += 1
    for j in range(CTS):

        a = j * CHUNK
        b = (j + 1) * CHUNK
        b = min(b, len(test))
        print('chunk', a, 'to', b)

        # COSINE SIMILARITY DISTANCE
        cts = cupy.matmul(text_embeddings, text_embeddings[a:b].T).T

        for k in range(b - a):
            IDX = cupy.where(cts[k,] > 0.7)[0]
            o = test.iloc[cupy.asnumpy(IDX)].posting_id.values
            preds.append(o)

    del model, text_embeddings
    _ = gc.collect()

    test['preds'] = preds
    test.head()
Пример #26
0
 def remove_outliers(self, data):
     """Remove outliers"""
     if (int(self.dezinger) > 0):
         r = int(self.dezinger)
         fdata = ndimage.median_filter(data, [1, r, r])
         ids = cp.where(cp.abs(fdata - data) > 0.5 * cp.abs(fdata))
         data[ids] = fdata[ids]
Пример #27
0
    def filter_data_by_class_ovo(x, y, classes, prevent_relabel=False):
        x = SVM._select_classes(x, y, classes)
        y = SVM._select_classes(y, y, classes)
        if prevent_relabel == False:
            y = xp.where(y == classes[0], 1, -1)

        return x, y
Пример #28
0
    def _validate_n_bins(self, n_features):
        """Returns n_bins_, the number of bins per feature.
        """
        orig_bins = self.n_bins
        if isinstance(orig_bins, numbers.Number):
            if not isinstance(orig_bins, numbers.Integral):
                raise ValueError("{} received an invalid n_bins type. "
                                 "Received {}, expected int."
                                 .format(KBinsDiscretizer.__name__,
                                         type(orig_bins).__name__))
            if orig_bins < 2:
                raise ValueError("{} received an invalid number "
                                 "of bins. Received {}, expected at least 2."
                                 .format(KBinsDiscretizer.__name__, orig_bins))
            return np.full(n_features, orig_bins, dtype=int)

        n_bins = check_array(orig_bins, dtype=np.int, copy=True,
                             ensure_2d=False)

        if n_bins.ndim > 1 or n_bins.shape[0] != n_features:
            raise ValueError("n_bins must be a scalar or array "
                             "of shape (n_features,).")

        bad_nbins_value = (n_bins < 2) | (n_bins != orig_bins)

        violating_indices = np.where(bad_nbins_value)[0]
        if violating_indices.shape[0] > 0:
            indices = ", ".join(str(i) for i in violating_indices)
            raise ValueError("{} received an invalid number "
                             "of bins at indices {}. Number of bins "
                             "must be at least 2, and must be an int."
                             .format(KBinsDiscretizer.__name__, indices))
        return n_bins
Пример #29
0
    def calc_neighbourhood_stats(self, distances, radius):
        neighbours = distances < radius
        cp.fill_diagonal(neighbours, False)

        neighbours_states_sum = (self.state[cp.newaxis, :, :] *
                                 neighbours[:, :, cp.newaxis]).sum(axis=1)

        neighbours_num = neighbours.sum(axis=1)
        has_neighbours = neighbours_num > 0

        neighbours_num = neighbours_num[cp.where(has_neighbours)[0],
                                        cp.newaxis]
        neighbours_states_sum = neighbours_states_sum[
            cp.where(has_neighbours)[0], :]

        return has_neighbours, neighbours_num, neighbours_states_sum
Пример #30
0
    def _fast_gradient_descent(self):
        eta_init = self._optimal_eta_init()

        alpha = xp.zeros(self._n)
        theta = xp.zeros(self._n)
        eta = eta_init
        grad_theta = self._compute_gradient(theta)
        objective_val_size = int(
            self._max_iter // 10) + (1 if self._max_iter % 10 == 0 else 0)
        objective_vals = xp.ones(objective_val_size)
        misclassification_error_per_iter = xp.ones(objective_val_size)
        iter = 0
        while iter < self._max_iter:
            eta = self._backtracking_line_search(theta, eta=eta)
            alpha_new = theta - eta * grad_theta
            theta = alpha_new + iter / (iter + 3) * (alpha_new - alpha)
            grad_theta = self._compute_gradient(theta)
            alpha = alpha_new
            iter += 1
            if self._display_plots and iter % 10 == 0:
                objective_vals[int(iter / 10)] = self._objective(alpha)
                self._coef_matrix = alpha
                misclassification_error_per_iter[int(
                    iter / 10)] = self.compute_misclassification_error(
                        self._x,
                        xp.where(self._y > 0, self._primary_class,
                                 self._secondary_class))

        return alpha, objective_vals, misclassification_error_per_iter
Пример #31
0
    def interval(self, mx, size):
        """Generate multiple integers independently sampled uniformly from ``[0, mx]``.

        Args:
            mx (int): Upper bound of the interval
            size (None or int or tuple): Shape of the array or the scalar
                returned.
        Returns:
            int or cupy.ndarray: If ``None``, an :class:`cupy.ndarray` with
            shape ``()`` is returned.
            If ``int``, 1-D array of length size is returned.
            If ``tuple``, multi-dimensional array with shape
            ``size`` is returned.
            Currently, each element of the array is ``numpy.int32``.
        """
        dtype = numpy.int32
        if size is None:
            return self.interval(mx, 1).reshape(())
        elif isinstance(size, int):
            size = (size, )

        if mx == 0:
            return cupy.zeros(size, dtype=dtype)

        mask = (1 << mx.bit_length()) - 1
        mask = cupy.array(mask, dtype=dtype)

        ret = cupy.zeros(size, dtype=dtype)
        sample = cupy.zeros(size, dtype=dtype)
        done = cupy.zeros(size, dtype=numpy.bool_)
        while True:
            curand.generate(
                self._generator, sample.data.ptr, sample.size)
            sample &= mask
            success = sample <= mx
            ret = cupy.where(success, sample, ret)
            done |= success
            if done.all():
                return ret
Пример #32
0
 def toi(x):
     return cupy.where(x, 1, 0)
Пример #33
0
  def testInhibition(self):
    """
    Test if the firing number of coincidences after inhibition
    equals spatial pooler numActiveColumnsPerInhArea.
    """
    # Miscellaneous variables:
    # n, w:                 n, w of encoders
    # inputLen:             Length of binary input
    # synPermConnected:     Spatial pooler synPermConnected
    # synPermActiveInc:     Spatial pooler synPermActiveInc
    # connectPct:           Initial connect percentage of permanences
    # columnDimensions:     Number of spatial pooler coincidences
    # numActiveColumnsPerInhArea:  Spatial pooler numActiveColumnsPerInhArea
    # stimulusThreshold:    Spatial pooler stimulusThreshold
    # spSeed:               Spatial pooler for initial permanences
    # stimulusThresholdInh: Parameter for inhibition, default value 0.00001
    # kDutyCycleFactor:     kDutyCycleFactor for dutyCycleTieBreaker in
    #                       Inhibition
    # spVerbosity:          Verbosity to print other sp initial parameters
    # testIter:             Testing iterations
    n = 100
    w = 15
    inputLen = 300
    columnDimensions = 2048
    numActiveColumnsPerInhArea = 40
    stimulusThreshold = 0
    spSeed = 1956
    stimulusThresholdInh = 0.00001
    kDutyCycleFactor = 0.01
    spVerbosity = 0
    testIter = 100

    spTest = SpatialPooler(
                           columnDimensions=(columnDimensions, 1),
                           inputDimensions=(1, inputLen),
                           potentialRadius=inputLen / 2,
                           numActiveColumnsPerInhArea=numActiveColumnsPerInhArea,
                           spVerbosity=spVerbosity,
                           stimulusThreshold=stimulusThreshold,
                           seed=spSeed
                           )
    initialPermanence = spTest._initialPermanence()
    spTest._masterPotentialM, spTest._masterPermanenceM = (
        spTest._makeMasterCoincidences(spTest.numCloneMasters,
                                       spTest._coincRFShape,
                                       spTest.potentialPct,
                                       initialPermanence,
                                       spTest.random))

    spTest._updateInhibitionObj()
    boostFactors = cupy.ones(columnDimensions)

    for i in range(testIter):
      spTest._iterNum = i
      # random binary input
      input_ = cupy.zeros((1, inputLen))
      nonzero = numpy.random.random(inputLen)
      input_[0][cupy.where (nonzero < float(w)/float(n))] = 1

      # overlap step
      spTest._computeOverlapsFP(input_,
                                stimulusThreshold=spTest.stimulusThreshold)
      spTest._overlaps *= boostFactors
      onCellIndices = cupy.where(spTest._overlaps > 0)
      spTest._onCells.fill(0)
      spTest._onCells[onCellIndices] = 1
      denseOn = spTest._onCells

      # update _dutyCycleBeforeInh
      spTest.dutyCyclePeriod = min(i + 1, 1000)
      spTest._dutyCycleBeforeInh = (
          (spTest.dutyCyclePeriod - 1) *
          spTest._dutyCycleBeforeInh +denseOn) / spTest.dutyCyclePeriod
      dutyCycleTieBreaker = spTest._dutyCycleAfterInh.copy()
      dutyCycleTieBreaker *= kDutyCycleFactor

      # inhibition step
      numOn = spTest._inhibitionObj.compute(
          spTest._overlaps + dutyCycleTieBreaker, spTest._onCellIndices,
          stimulusThresholdInh,  # stimulusThresholdInh
          max(spTest._overlaps)/1000,  # addToWinners
      )
      # update _dutyCycleAfterInh
      spTest._onCells.fill(0)
      onCellIndices = spTest._onCellIndices[0:numOn]
      spTest._onCells[onCellIndices] = 1
      denseOn = spTest._onCells
      spTest._dutyCycleAfterInh = (((spTest.dutyCyclePeriod-1) *
                                    spTest._dutyCycleAfterInh + denseOn) /
                                   spTest.dutyCyclePeriod)

      # learning step
      spTest._adaptSynapses(onCellIndices, [], input_)

      # update boostFactor
      spTest._updateBoostFactors()
      boostFactors = spTest._firingBoostFactors

      # update dutyCycle and boost
      if ((spTest._iterNum+1) % 50) == 0:
        spTest._updateInhibitionObj()
        spTest._updateMinDutyCycles(
            spTest._dutyCycleBeforeInh,
            spTest.minPctDutyCycleBeforeInh,
            spTest._minDutyCycleBeforeInh)
        spTest._updateMinDutyCycles(
            spTest._dutyCycleAfterInh,
            spTest.minPctDutyCycleAfterInh,
            spTest._minDutyCycleAfterInh)

      # test numOn and spTest.numActiveColumnsPerInhArea
      self.assertEqual(numOn, spTest.numActiveColumnsPerInhArea,
                       "Error at input %s, actual numOn are: %i, "
                       "numActivePerInhAre is: %s" % (
                           i, numOn, numActiveColumnsPerInhArea))