コード例 #1
0
    def train(self, x_train, y_train, epochs, batch_size):
        for e in range(epochs):
            start = time.time()
            y_hat = np.zeros_like(y_train)
            # print('y_hat_shape : {}'.format(y_hat.shape))
            for idx in range(int(x_train.shape[0]/batch_size)):
                x_train_batch = x_train.take(indices = range(idx, min(idx+batch_size, x_train.shape[0])), axis=0)
                y_train_batch = y_train.take(indices=range(idx, min(idx + batch_size, y_train.shape[0])), axis=0)

                y_hat_batch = self.forward(x_train_batch)
                # print('y_hat_shape : {}'.format(y_hat_batch.shape))  
                loss_batch = y_hat_batch - y_train_batch
                self.backward(loss_batch)
                self.update()

                y_hat[idx*batch_size : idx*batch_size + y_hat_batch.shape[0], :] = y_hat_batch

            # print(np.argmax(y_hat, axis=1))
            # print(np.argmax(y_train, axis=1))
            # print(list(np.argmax(y_hat, axis=1) == np.argmax(y_train, axis=1) ).count(True) / y_hat.shape[0])
            self.train_accuracy.append( list(np.argmax(y_hat, axis=1) == np.argmax(y_train, axis=1) ).count(True) / y_hat.shape[0] )
            self.train_loss.append( (-np.sum(y_train * np.log(np.clip(y_hat, 1e-20, 1.))) / y_hat.shape[0]).tolist() )
            # print(self.train_loss[e][0])
            h, r = divmod(time.time() - start, 3600)
            m, s = divmod(r, 60)
            time_per_epoch = "{:0>2}:{:0>2}:{:05.2f}".format(int(h), int(m), s)
            print("iter: {:05} | train loss: {:.5f} | train accuracy: {:.5f} | time: {}"
                  .format(e + 1, self.train_loss[e], self.train_accuracy[e], time_per_epoch))
コード例 #2
0
def test(model, test_inputs, test_labels):
    num_of_sample = test_inputs.shape[0]
    cnt_correct, cnt_tot = 0, 0
    if model.gpu_backend:
        test_inputs = cp.array(test_inputs)
        test_labels = cp.array(test_labels)
        for i in range(num_of_sample):
            test_input = test_inputs[i:i + 1]
            test_label = test_labels[i]
            res = model.forward_prop(test_input)
            if cp.argmax(res) == cp.argmax(test_label):
                cnt_correct += 1
            cnt_tot += 1
    else:
        for i in range(num_of_sample):
            test_input = test_inputs[i:i + 1]
            test_label = test_labels[i]
            res = model.forward_prop(test_input)
            if np.argmax(res) == np.argmax(test_label):
                cnt_correct += 1
            cnt_tot += 1

    acc = cnt_correct / cnt_tot
    print('[ accuracy ]: ', acc * 100)
    return acc
コード例 #3
0
def test_countvectorizer_max_features_counts():
    JUNK_FOOD_DOCS_GPU = Series(JUNK_FOOD_DOCS)

    cv_1 = CountVectorizer(max_features=1)
    cv_3 = CountVectorizer(max_features=3)
    cv_None = CountVectorizer(max_features=None)

    counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS_GPU).sum(axis=0)
    counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS_GPU).sum(axis=0)
    counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS_GPU).sum(axis=0)

    features_1 = cv_1.get_feature_names()
    features_3 = cv_3.get_feature_names()
    features_None = cv_None.get_feature_names()

    # The most common feature is "the", with frequency 7.
    assert 7 == counts_1.max()
    assert 7 == counts_3.max()
    assert 7 == counts_None.max()

    # The most common feature should be the same
    def as_index(x):
        return x.astype(cp.int32).item()
    assert "the" == features_1[as_index(cp.argmax(counts_1))]
    assert "the" == features_3[as_index(cp.argmax(counts_3))]
    assert "the" == features_None[as_index(cp.argmax(counts_None))]
コード例 #4
0
ファイル: multi_layer_net.py プロジェクト: mbenzaki/Katakana1
    def accuracy(self, x, t):
        y = self.predict(x)
        y = np.argmax(y, axis=1)
        if t.ndim != 1: t = np.argmax(t, axis=1)

        accuracy = np.sum(y == t) / float(x.shape[0])
        return accuracy
コード例 #5
0
    def accuracy(self, x, t):
        y = self.predict(x)
        y = cp.argmax(y, axis=1)
        t = cp.argmax(t, axis=1)

        accuracy = cp.sum(y == t) / float(x.shape[0])
        return accuracy
コード例 #6
0
 def confusion(self, training_data, training_labels):
     """
     Confusion metrics
     Evaluation :
         True Positive  | False Negative
         False Positive | True Negative
     Positive is index 0 of output
     :param training_data: liste de tests
     :param training_labels: resultats attendu des test
     :return: erreur quadratique moyenne
     """
     evaluation = np.zeros((2, 2))
     n = len(training_data)
     for i in range(n):
         result = int(np.argmax(self.forward_propagation(training_data[i])))
         excepted = int(np.argmax(training_labels[i]))
         if result == 0:
             if result == excepted:
                 evaluation[0, 0] += 1  # TP
             else:
                 evaluation[1, 0] += 1  # FP
         else:
             if result == excepted:
                 evaluation[1, 1] += 1  # TN
             else:
                 evaluation[0, 1] += 1  # FN
     accuracy = float(evaluation[0, 0] + evaluation[1, 1]) / n
     precision = float(
         evaluation[0, 0]) / float(evaluation[0, 0] + evaluation[1, 0])
     recall = float(
         evaluation[0, 0]) / float(evaluation[0, 0] + evaluation[0, 1])
     f1_score = float(2 * (recall * precision) / (recall + precision))
     return evaluation, accuracy, precision, recall, f1_score
コード例 #7
0
ファイル: incremental_pca.py プロジェクト: risingsudhir/cuml
def _svd_flip(u, v, u_based_decision=True):
    """Sign correction to ensure deterministic output from SVD.
    Adjusts the columns of u and the rows of v such that the loadings in the
    columns in u that are largest in absolute value are always positive.
    Parameters
    ----------
    u : cupy.ndarray
        u and v are the output of `cupy.linalg.svd`
    v : cupy.ndarray
        u and v are the output of `cupy.linalg.svd`
    u_based_decision : boolean, (default=True)
        If True, use the columns of u as the basis for sign flipping.
        Otherwise, use the rows of v. The choice of which variable to base the
        decision on is generally algorithm dependent.
    Returns
    -------
    u_adjusted, v_adjusted : arrays with the same dimensions as the input.
    """
    if u_based_decision:
        # columns of u, rows of v
        max_abs_cols = cp.argmax(cp.abs(u), axis=0)
        signs = cp.sign(u[max_abs_cols, range(u.shape[1])])
        u *= signs
        v *= signs[:, cp.newaxis]
    else:
        # rows of v, columns of u
        max_abs_rows = cp.argmax(cp.abs(v), axis=1)
        signs = cp.sign(v[list(range(v.shape[0])), max_abs_rows])
        u *= signs
        v *= signs[:, cp.newaxis]
    return u, v
コード例 #8
0
    def accuracy(self, X, T):
        Y = self.predict(X, train_flg=False)
        Y = np.argmax(Y, axis=1)
        if T.ndim != 1: T = np.argmax(T, axis=1)

        accuracy = np.sum(Y == T) / float(X.shape[0])
        return accuracy
コード例 #9
0
def optimize(formula: cupy.ndarray, maximum_number_of_generations: int,
             population_size: int, selection_ratio: float,
             mutation_probability: float) -> cupy.ndarray:
    """
    Finds the best solution to the input formula within the number of generation given as input.

    The optimizer stops, either if it finds the perfect individual, or if the number of generation equals the maximum
    number of generations

    :param formula: The formula to solve
    :param maximum_number_of_generations: The maximum of number of generations before the stopping of the optimizer
    :param population_size: The number of individuals in each generation
    :param selection_ratio: Between 0 and 1. The surviving ratio of a generation, that is then bred
    :param mutation_probability: Between 0 and 1. The probability that has an individual to mutate
    :return: The best individual found
    """
    population = generate_random_first_generation(population_size,
                                                  formula.shape[1])
    population_fitness = evaluate_population(population, formula)
    best_fitness = cupy.max(population_fitness)
    best_individual = population[cupy.argmax(population_fitness)]
    logging.info(
        f'In the first generation the best fitness was {best_fitness * 100}%')

    if best_fitness == 1.:
        logging.info('Lucky ! The first generation contains the solution')
        return best_individual

    generation = 0
    for generation in range(maximum_number_of_generations):
        breeding_population = select_individuals(population,
                                                 population_fitness,
                                                 selection_ratio)
        population = population_mutation(
            population_binary_crossover(breeding_population, population_size),
            mutation_probability)
        population_fitness = evaluate_population(population, formula)

        if cupy.max(population_fitness) > best_fitness:
            best_fitness = cupy.max(population_fitness)
            best_individual = population[cupy.argmax(population_fitness)]
            logging.info(
                f'In the generation {generation + 1} new best individual has been found, '
                f'and it satisfies {best_fitness * 100}% of the formula')

            if best_fitness == 1.:
                break
        else:
            if generation % 1000 == 0:
                logging.info(
                    f'After {generation + 1}, the best individuals satisfies {best_fitness * 100}% '
                    f'of the formula')

    logging.info('Optimization ended.')
    logging.info(
        f'The best individual found after {generation + 1} generations, '
        f'and it satisfies {best_fitness * 100}% of the formula')

    return best_individual
コード例 #10
0
 def predict(self,dialect,standard):
     h_emb_d = dialect 
     h2,_,_ = self.lstm(None,None,h_emb_d)
     h3 = F.relu(h2[0])
     pred_categ = np.argmax(np.array(self.categ(h3).data),axis=1,dtype=np.int32)
     pred_area = np.argmax(np.array(self.area(h3).data),axis=1,dtype=np.int32)
     
     return pred_categ,pred_area
コード例 #11
0
 def eval_acc(best_preds, y, problem_type):
     if problem_type == ProblemType.CLASS_SINGLE:
         return cp.mean(abs(y - best_preds) < .5)
     elif problem_type == ProblemType.CLASS_ONEHOT:
         return cp.mean(
             cp.argmax(best_preds, axis=1) == cp.argmax(y, axis=1))
     elif problem_type == ProblemType.REGRESS:
         return cp.mean(abs(y - best_preds) < .5)
コード例 #12
0
ファイル: mmr.py プロジェクト: VibhuJawa/rapids-examples
def mmr(
    doc_embedding,
    word_embeddings,
    words,
    top_n=5,
    diversity=0.8,
):
    """
    Calculate Maximal Marginal Relevance (MMR)
    between candidate keywords and the document.
    MMR considers the similarity of keywords/keyphrases with the
    document, along with the similarity of already selected
    keywords and keyphrases. This results in a selection of keywords
    that maximize their within diversity with respect to the document.
    Arguments:
        doc_embedding: The document embeddings
        word_embeddings: The embeddings of the selected candidate keywords/phrases
        words: The selected candidate keywords/keyphrases
        top_n: The number of keywords/keyhprases to return
        diversity: How diverse the select keywords/keyphrases are.
                   Values between 0 and 1 with 0 being not diverse at all
                   and 1 being most diverse.
    Returns:
         List[str]: The selected keywords/keyphrases
    """

    # Extract similarity within words, and between words and the document
    word_doc_similarity = 1 - pairwise_distances(
        word_embeddings, doc_embedding, metric="cosine")
    word_similarity = 1 - pairwise_distances(word_embeddings, metric="cosine")

    # Initialize candidates and already choose best keyword/keyphras
    keywords_idx = cp.argmax(word_doc_similarity)
    target = cp.take(keywords_idx, 0)
    candidates_idx = [i for i in range(len(words)) if i != target]
    for i in range(top_n - 1):
        candidate_similarities = word_doc_similarity[candidates_idx, :]
        if i == 0:
            first_row = cp.reshape(
                word_similarity[candidates_idx][:, keywords_idx],
                (word_similarity[candidates_idx][:, keywords_idx].shape[0], 1))
            target_similarities = cp.max(first_row, axis=1)
        else:
            target_similarities = cp.max(
                word_similarity[candidates_idx][:, keywords_idx], axis=1)
        # Calculate MMR
        mmr = (
            1 - diversity
        ) * candidate_similarities - diversity * target_similarities.reshape(
            -1, 1)

        mmr_idx = cp.take(cp.array(candidates_idx), cp.argmax(mmr))

        # Update keywords & candidates
        keywords_idx = cp.append(keywords_idx, mmr_idx)
        candidates_idx.remove(mmr_idx)

    return [words[idx] for idx in keywords_idx.get()]
コード例 #13
0
def train_accuracy():
    test_y = tr_lab
    test_count = 0
    for i in range(160):
        result = training(i, test=False)
        if cp.argmax(test_y[i]) == cp.argmax(result):
            test_count += 1
            accuracy = test_count / 160 * 100
    print('train accuracy:', accuracy)
    return accuracy
コード例 #14
0
ファイル: main.py プロジェクト: shi27feng/dlADMM
def test_accuracy(W1, b1, W2, b2, W3, b3, images, labels):
    nums = labels.shape[1]
    z1 = np.matmul(W1, images) + b1
    a1 = common.relu(z1)
    z2 = np.matmul(W2, a1) + b2
    a2 = common.relu(z2)
    z3 = np.matmul(W3, a2) + b3
    cost = common.cross_entropy_with_softmax(labels, z3) / nums
    pred = np.argmax(labels, axis=0)
    label = np.argmax(z3, axis=0)
    return (np.sum(np.equal(pred, label)) / nums, cost)
コード例 #15
0
ファイル: utils.py プロジェクト: nasiryahm/GAIT-prop
def performance(net, input_data, target_data):
    output = net.forward_pass(input_data)
    if is_cupy:
        correct_mask = xp.asnumpy(xp.argmax(output[-1][:,:10], axis=1)) == xp.asnumpy(xp.argmax(target_data, axis=1))
        loss = float(xp.sum(xp.asnumpy((xp.mean(output[-1][:,:10]) - target_data) ** 2)))
    else:
        correct_mask = xp.argmax(output[-1][:,:10], axis=1) == xp.argmax(target_data, axis=1)
        loss = float(xp.sum(xp.mean((output[-1][:,:10]) - target_data) ** 2))
    accuracy = (np.sum(correct_mask) / np.size(correct_mask))
    # Assuming MSE loss
    return accuracy, loss
コード例 #16
0
def test_accuracy(W1, W2, W3, images, labels):
    nums = labels.shape[1]
    z1 = np.matmul(W1, images)
    a1 = para_func.relu(z1)
    z2 = np.matmul(W2, a1)
    a2 = para_func.relu(z2)
    z3 = np.matmul(W3, a2)
    # print("output shape", z3.shape, labels.shape)
    cost = para_func.cross_entropy_with_softmax(labels, z3) / nums
    pred = np.argmax(labels, axis=0)
    label = np.argmax(z3, axis=0)
    return (100.0 * np.sum(np.equal(pred, label)) / nums, cost)
コード例 #17
0
ファイル: train.py プロジェクト: mbenzaki/Katakana1
    def accuracy(self, x, t, batch_size=100):
        if t.ndim != 1: t = np.argmax(t, axis=1)

        acc = 0.0

        for i in range(int(x.shape[0] / batch_size)):
            tx = x[i * batch_size:(i + 1) * batch_size]
            tt = t[i * batch_size:(i + 1) * batch_size]
            y = self.predict(tx, train_flg=False)
            y = np.argmax(y, axis=1)
            acc += np.sum(y == tt)

        return acc / x.shape[0]
コード例 #18
0
def accuracy_score_(yt, y, top=1):
    if yt.ndim != 1:
        yt = np.argmax(yt, axis=1)
    if top == 1:
        y = np.argmax(y, axis=1)
        acc = np.array(y == yt).mean()
    else:
        y = np.argsort(y, axis=1)[:, -top:]
        lst = []
        for i in range(len(yt)):
            lst.append(yt[i] in y[i, :])
        acc = np.array(lst).mean()
    return acc
コード例 #19
0
def validation_accuracy(gd_truth_n_prediction):
    global epoch
    test_y = te_lab
    test_count = 0
    for i in range(160):
        result = training(i, test=True)
        gd_truth_n_prediction[epoch][0][i] = cp.argmax(test_y[i])
        gd_truth_n_prediction[epoch][1][i] = cp.argmax(result)
        if cp.argmax(test_y[i]) == cp.argmax(result):
            test_count += 1
            accuracy = test_count / 160 * 100
    print('test accuracy:', accuracy)
    return accuracy
コード例 #20
0
 def accuracy_score(self, x, yt, top=1):
     if yt.ndim != 1:
         yt = np.argmax(yt, axis=1)
     y = self.predict(x)
     if top == 1:
         y = np.argmax(y, axis=1)
         acc = np.array(y == yt).mean()
     else:
         y = np.argsort(y, axis=1)[:,-top:]
         lst = []
         for i in range(len(yt)):
             lst.append(yt[i] in y[i,:])
         acc = np.array(lst).mean()
     return cp2np(acc)
コード例 #21
0
def accuracy(output, target, mask, inc_fix=False):
    """ Calculate accuracy from output, target, and mask for the networks """
    output = output.astype(cp.float32)
    target = target.astype(cp.float32)
    mask = mask.astype(cp.float32)

    arg_output = cp.argmax(output, -1)
    arg_target = cp.argmax(target, -1)
    mask = mask if inc_fix else mask * (arg_target != 0)

    acc = cp.sum(mask * (arg_output == arg_target), axis=(0, 2)) / cp.sum(
        mask, axis=(0, 2))

    return acc.astype(cp.float32)
コード例 #22
0
ファイル: cluster.py プロジェクト: rajatsaxena/pykilosort
def initializeWdata2(call, uprojDAT, Nchan, nPCs, Nfilt, iC):
    # this function initializes cluster means for the fast kmeans per batch
    # call are time indices for the spikes
    # uprojDAT are features projections (Nfeatures by Nspikes)
    # some more parameters need to be passed in from the main workspace

    # pick random spikes from the sample
    # WARNING: replace ceil by floor because this is a random index, and 0/1 indexing
    # discrepancy between Python and MATLAB.
    irand = np.floor(np.random.rand(Nfilt) * uprojDAT.shape[1]).astype(
        np.int32)

    W = cp.zeros((nPCs, Nchan, Nfilt), dtype=np.float32)

    for t in range(Nfilt):
        ich = iC[:, call[irand[t]]]  # the channels on which this spike lives
        # for each selected spike, get its features
        W[:, ich, t] = uprojDAT[:, irand[t]].reshape(W[:, ich, t].shape,
                                                     order='F')

    W = W.reshape((-1, Nfilt), order='F')  # HERE
    # add small amount of noise in case we accidentally picked the same spike twice
    W = W + .001 * cp.random.normal(size=W.shape).astype(np.float32)
    mu = cp.sqrt(cp.sum(W**2, axis=0))  # get the mean of the template
    W = W / (1e-5 + mu)  # and normalize the template
    W = W.reshape((nPCs, Nchan, Nfilt), order='F')  # HERE
    nW = (W[0, ...]**2)  # squared amplitude of the first PC feture
    W = W.reshape((nPCs * Nchan, Nfilt), order='F')  # HERE
    # determine biggest channel according to the amplitude of the first PC
    Wheights = cp.argmax(nW, axis=0)

    return W, mu, Wheights, irand
コード例 #23
0
    def predict(self, X):
        for column in self.feature:
            if self.discrete_column[column]:
                new_data = []
                for value in X[column]:
                    idx = list(self.inmapping[column].keys())[list(
                        self.inmapping[column].values()).index(column + "::" +
                                                               value)]
                    new_data.append(self.class_array[column][idx])
                X.drop(columns=column, inplace=True)
                NewData = pd.DataFrame(new_data,
                                       columns=[column],
                                       index=X.index)
                X = pd.concat([X, NewData], axis=1)

        output = self.feedforward(X)

        result = []
        for i in range(len(X)):
            row_result = []
            for key, column in enumerate(self.target):
                idx = cp.argmax(1 - cp.absolute(self.class_array[column] -
                                                output[i, key]))
                row_result.append(
                    self.outmapping[column][int(idx)].split('::')[1])
            result.append(row_result)

        return result
コード例 #24
0
ファイル: naive_bayes.py プロジェクト: tylerjthomas9/cuml
    def predict(self, X) -> CumlArray:
        """
        Perform classification on an array of test vectors X.

        """
        if has_scipy():
            from scipy.sparse import isspmatrix as scipy_sparse_isspmatrix
        else:
            from cuml.common.import_utils import dummy_function_always_false \
                as scipy_sparse_isspmatrix

        # todo: use a sparse CumlArray style approach when ready
        # https://github.com/rapidsai/cuml/issues/2216
        if scipy_sparse_isspmatrix(X) or cupyx.scipy.sparse.isspmatrix(X):
            X = X.tocoo()
            rows = cp.asarray(X.row, dtype=X.row.dtype)
            cols = cp.asarray(X.col, dtype=X.col.dtype)
            data = cp.asarray(X.data, dtype=X.data.dtype)
            X = cupyx.scipy.sparse.coo_matrix((data, (rows, cols)),
                                              shape=X.shape)
        else:
            X = input_to_cupy_array(X, order='K').array

        jll = self._joint_log_likelihood(X)
        indices = cp.argmax(jll, axis=1).astype(self.classes_.dtype)

        y_hat = invert_labels(indices, classes=self.classes_)
        return y_hat
コード例 #25
0
ファイル: xgb_clf.py プロジェクト: daxiongshu/riiid-rapids
 def predict(self, X):
     yp = super().predict(X)
     if len(yp.shape) == 2:
         yp = cp.argmax(yp, axis=1)
     else:
         yp = yp>0.5
     return yp
コード例 #26
0
def compute_csls_accuracy(x_src,
                          x_tgt,
                          lexicon,
                          lexicon_size=-1,
                          k=10,
                          bsz=1024):
    if lexicon_size < 0:
        lexicon_size = len(lexicon)
    idx_src = list(lexicon.keys())

    x_src /= np.linalg.norm(x_src, axis=1)[:, np.newaxis] + 1e-8
    x_tgt /= np.linalg.norm(x_tgt, axis=1)[:, np.newaxis] + 1e-8

    sr = x_src[list(idx_src)]
    sc = np.dot(sr, x_tgt.T)
    similarities = 2 * sc
    sc2 = np.zeros(x_tgt.shape[0])
    for i in range(0, x_tgt.shape[0], bsz):
        j = min(i + bsz, x_tgt.shape[0])
        sc_batch = np.dot(x_tgt[i:j, :], x_src.T)
        dotprod = np.partition(sc_batch, -k, axis=1)[:, -k:]
        sc2[i:j] = np.mean(dotprod, axis=1)
    similarities -= sc2[np.newaxis, :]

    nn = np.argmax(similarities, axis=1).tolist()
    correct = 0.0
    for k in range(0, len(lexicon)):
        if nn[k] in lexicon[idx_src[k]]:
            correct += 1.0
    return correct / lexicon_size
コード例 #27
0
ファイル: train.py プロジェクト: mbenzaki/Katakana1
    def accuracy_and_loss(self, x, t, batch_size=100):
        if t.ndim != 1:
            t = np.argmax(t, axis=1)

        acc = 0.0
        loss = 0.0

        for i in range(int(x.shape[0] / batch_size)):
            tx = x[i * batch_size:(i + 1) * batch_size]
            tt = t[i * batch_size:(i + 1) * batch_size]
            y = self.predict(tx, train_flg=False)
            loss += self.last_layer.forward(y, tt)
            y = np.argmax(y, axis=1)
            acc += np.sum(y == tt)

        return acc / x.shape[0], loss / (int(x.shape[0] / batch_size))
コード例 #28
0
    def infer(self):
        info = EvaluationMetrics(['Time/Step', 'Time/Item', 'Loss', 'Top1'])
        for idx, (data, label) in enumerate(self.val_loader):
            st = time.time()
            self.model.clear()

            data = cp.asarray(data)
            label = cp.asarray(label)
            output = self.model(data)

            loss = self.criterion(output, label)
            free_memory()

            elapsed = time.time() - st
            info.update('Time/Step', elapsed)
            info.update('Time/Item', elapsed / data.shape[0])
            info.update('Loss', cp.asnumpy(loss))

            output = output.reshape(np.prod(label.shape), -1)
            pred = cp.argmax(output, axis=-1).reshape(*label.shape)
            top1 = cp.mean(label == pred)
            info.update('Top1', cp.asnumpy(top1))

        if self.logger is not None:
            self.logger.scalar_summary(info.avg, self.step, self.name)
コード例 #29
0
    def inverse_transform(self, y, threshold=None) -> CumlArray:
        """
        Transform binary labels back to original multi-class labels

        Parameters
        ----------

        y : array of shape [n_samples, n_classes]
        threshold : float this value is currently ignored

        Returns
        -------

        arr : array with original labels
        """

        if has_scipy():
            from scipy.sparse import isspmatrix as scipy_sparse_isspmatrix
        else:
            from cuml.common.import_utils import dummy_function_always_false \
                    as scipy_sparse_isspmatrix

        # If we are already given multi-class, just return it.
        if cupyx.scipy.sparse.isspmatrix(y):
            y_mapped = y.tocsr().indices.astype(self.classes_.dtype)
        elif scipy_sparse_isspmatrix(y):
            y = y.tocsr()
            y_mapped = cp.array(y.indices, dtype=y.indices.dtype)
        else:
            y_mapped = cp.argmax(cp.asarray(y, dtype=y.dtype),
                                 axis=1).astype(y.dtype)

        return invert_labels(y_mapped, self.classes_)
コード例 #30
0
    def calculate_drift_offset(image_mat: cp.array):
        frame_n, h, w = image_mat.shape[:3]
        x = np.arange(w)
        y = np.arange(h)
        bounds = [(0, h), (0, w)]

        def find_max_loc_in_map(cr_map: np.array, rough_yx: cp.array):
            # 调用scipy的插值和优化寻找亚像素最大值
            precise_max_loc = np.empty((2, frame_n), dtype=cp.float32)
            for i in range(frame_n):
                np_cr_map = cr_map[i]
                F2 = interpolate.interp2d(x, y, -np_cr_map, kind="cubic")
                X0 = rough_yx[i]
                precise_max_loc[:, i] = optimize.minimize(lambda arg: F2(*arg),
                                                          X0,
                                                          bounds=bounds).x
            precise_max_loc[0, :] -= w // 2  # X
            precise_max_loc[1, :] -= h // 2  # Y
            return precise_max_loc

        result = cp.abs(
            cp.fft.fftshift(
                cp.fft.ifft2(cp.fft.fft2(image_mat) * base_frame_fft)))
        rough_max_loc = np.array(
            cp.unravel_index(cp.argmax(result.reshape(frame_n, -1), axis=1),
                             shape)).T
        result = find_max_loc_in_map(cp.asnumpy(result), rough_max_loc)
        avg_offset = cp.average(cp.array(result), axis=0)
        print("average offset: x:", avg_offset[0], ", y:", avg_offset[1])
        return result
コード例 #31
0
ファイル: generator.py プロジェクト: hitottiez/chainer
    def choice(self, a, size=None, replace=True, p=None):
        """Returns an array of random values from a given 1-D array.

        .. seealso::
            :func:`cupy.random.choice` for full document,
            :meth:`numpy.random.choice`

        """
        if a is None:
            raise ValueError('a must be 1-dimensional or an integer')
        if isinstance(a, cupy.ndarray) and a.ndim == 0:
            raise NotImplementedError
        if isinstance(a, six.integer_types):
            a_size = a
            if a_size <= 0:
                raise ValueError('a must be greater than 0')
        else:
            a = cupy.array(a, copy=False)
            if a.ndim != 1:
                raise ValueError('a must be 1-dimensional or an integer')
            else:
                a_size = len(a)
                if a_size == 0:
                    raise ValueError('a must be non-empty')

        if p is not None:
            p = cupy.array(p)
            if p.ndim != 1:
                raise ValueError('p must be 1-dimensional')
            if len(p) != a_size:
                raise ValueError('a and p must have same size')
            if not (p >= 0).all():
                raise ValueError('probabilities are not non-negative')
            p_sum = cupy.sum(p).get()
            if not numpy.allclose(p_sum, 1):
                raise ValueError('probabilities do not sum to 1')

        if not replace:
            raise NotImplementedError

        if size is None:
            raise NotImplementedError
        shape = size
        size = numpy.prod(shape)

        if p is not None:
            p = cupy.broadcast_to(p, (size, a_size))
            index = cupy.argmax(cupy.log(p) -
                                cupy.random.gumbel(size=(size, a_size)),
                                axis=1)
            if not isinstance(shape, six.integer_types):
                index = cupy.reshape(index, shape)
        else:
            index = cupy.random.randint(0, a_size, size=shape)
            # Align the dtype with NumPy
            index = index.astype(cupy.int64, copy=False)

        if isinstance(a, six.integer_types):
            return index

        if index.ndim == 0:
            return cupy.array(a[index], dtype=a.dtype)

        return a[index]