コード例 #1
0
 def _modeCheck(Ser1):
     c = np_bincount(Ser1)
     # 返回众数
     i = np_argmax(c)
     rule = (i - 2 > Ser1) | (i + 2 < Ser1)
     index = np_arange(Ser1.shape[0])[rule]
     return index
コード例 #2
0
def merge_overlapped(list_comp, overlap_thres=0.6):
    logging_info("Merging complexes...")

    fin_list = list(list_comp)

    n = len(fin_list)
    if n <= 1:
        return fin_list

    n_changes = 1
    while n_changes != 0:
        if len(fin_list) == 1:
            logging_debug("only one complex")
            break
        n_changes = 0
        ind = 0
        while ind < n:
            if len(fin_list) == 1:
                logging_debug("only one complex")
                break
            else:
                comp = fin_list[ind]
                temp_list = list(fin_list)
                temp_list.remove(comp)
                OS_comp = [
                    jaccard_coeff(set(comp), set(comp2)) for comp2 in temp_list
                ]

                OS_max_ind = int(np_argmax(OS_comp))
                OS_max = OS_comp[OS_max_ind]
                max_over_comp = temp_list[OS_max_ind]
                OS_max_ind_fin = fin_list.index(max_over_comp)

                if OS_max >= overlap_thres:
                    n_changes += 1
                    # Merge and find score. If score is higher than individual complexes
                    # Keep as new complex
                    merge_comp = comp + max_over_comp

                    fin_list.append(merge_comp)
                    fin_list.remove(comp)
                    fin_list.remove(max_over_comp)
                    n -= 1
                    if OS_max_ind_fin <= ind:
                        ind -= 1
                else:
                    ind += 1
        logging_info("No. of changes = %s", str(n_changes))

    logging_info("Finished filtering complexes.")

    return fin_list
コード例 #3
0
 def step(self, timestep):
     super(DefeatRoachesAgent001, self).step(timestep)
     if self.functions.Attack_screen.id in timestep.observation.available_actions:
         player_relative = timestep.observation.feature_screen.player_relative
         hostiles_y, hostiles_x = (player_relative == self.player_hostile).nonzero()
         if not hostiles_y.any():
             return actions.FunctionCall(self.functions.no_op.id, [])
         index = np_argmax(hostiles_y)
         target_unit = [hostiles_x[index], hostiles_y[index]]
         return actions.FunctionCall(self.functions.Attack_screen.id, [self.not_queued, target_unit])
     elif self.functions.select_army.id in timestep.observation.available_actions:
         return actions.FunctionCall(self.functions.select_army.id, [self.select_all])
     return actions.FunctionCall(self.functions.no_op.id, [])
コード例 #4
0
def get_score(g1, model, scaler, mod_type):
    feats = create_feat_mat_1(g1)

    if mod_type == "NN":
        feats = scaler.transform(feats)

        preds = model.predict(feats)
        pred = preds[0]
        comp_bool = np_argmax(pred)
        score_curr = pred[1]

    else:  # mod_type == "tpot"
        comp_bool = model.predict(feats)
        score_curr = model.predict_proba(feats)[:, 1]

    return float(score_curr), comp_bool
コード例 #5
0
ファイル: utils.py プロジェクト: qhduan/MachineLearning
def predict_with_augmentations(model, test_dataset, transform, nr_augments=4, batch_size=64, device="cuda"):
    """
    Uses a provided Pytorch model to calculate predictions for the samples in a passed-in
    torch.utils.dataset.DataLoader using test-time-augmentation. With test-time-augmentation, random
    transformations are applied to a predefined number (=nr_augments) of copies of each sample before making
    the predictions. Then the obtained class probabilities are summed and the final predicted class is calculated
    by taking the argmax of these summed probabilities.
    :param model: (torch.nn.module) the model to be used to create the predictions
    :param test_dataset: (torch.utils.dataset.DataSet) provides the data whose classes should be
                         predicted
    :param transform: (Pytorch transform) the augmentations to apply to the samples before predicting occurs
    :param nr_augments: (int, default=4) the number of times each single sample should be augmented
    :param batch_size: (int, default=64) the batch size
    :param device: (optional, "cpu" or "cuda" etc., default="cuda") which device to use for the training
    :return: (list) the predictions
    """
    if nr_augments < 1:
        raise ValueError("nr_augments must be larger than 1.")

    old_transform = test_dataset.transform
    test_dataset.transform = transform

    augmented_dl = DataLoader(test_dataset, batch_size=batch_size)

    prediction_sum = 0

    for i in range(nr_augments):
        if test_dataset.train:
            predictions = array([model(inputs.to(device)).data.cpu().numpy() for inputs, _ in augmented_dl])
        else:
            predictions = array([model(inputs.to(device)).data.cpu().numpy() for inputs in augmented_dl])

        if i == 0:
            prediction_sum = predictions
        else:
            prediction_sum = add(prediction_sum, predictions)

    test_dataset.transform = old_transform

    prediction_result = concatenate([[np_argmax(prediction) for prediction in batch_predictions]
                                     for batch_predictions in prediction_sum])

    return prediction_result
コード例 #6
0
def lutx(A):
    #
    # Get dimensions and create permutation tracker
    n = A.shape[0]
    p = arange(n)
    #
    # Iterate over n-1 rows...k = 0, 1, ..., n-2
    for k in range(n - 1):
        #
        # Find the largest element below the diagonal
        # in the kth column
        m = np_argmax(np_abs(A[k + 1:n, k]))
        m = m + k
        #
        # Skip elimination if the column is zero
        if A[m, k] != 0:
            #
            # Swap pivot row
            if m != k:
                A[[k, m], :] = A[[m, k], :]
                p[[k, m]] = p[[m, k]]
            #
            # Compute multipliers
            i = arange(k + 1, n)
            A[i, k] = A[i, k] / A[k, k]
            #
            # Update the remainder of the matrix
            j = arange(k + 1, n)
            A[i, j] = A[i, j] - A[i, k] @ A[k, j]
    #
    # Separate result
    L = tril(A, -1) + eye(n)
    U = triu(A)
    #
    print(L)
    print(U)
    #
    return L, U, p
コード例 #7
0
ファイル: cluster.py プロジェクト: jnesme/GroopM-1
    def findArrayCenter(self, vals):
        """Find the center of the numpy array vals, return the index of the center"""
        # parameters
        current_val_max = -1
        delta = 0
        bounce_amount = 0.1
        height = 0
        last_val = 0

        working = np_array([])
        final_index = -1

        # sort and normalise between 0 -> 1
        sorted_indices = np_argsort(vals)
        vals_sorted = [vals[i] for i in sorted_indices]
        vals_sorted -= vals_sorted[0]
        if vals_sorted[-1] != 0:
            vals_sorted /= vals_sorted[-1]

        # print vals_sorted

        # run through in one direction
        for val in vals_sorted:
            # calculate delta
            delta = val - last_val
            # reduce the current value according to the delta value
            height = self.reduceViaDelta(height, bounce_amount, delta)
            # bounce the ball up
            height += bounce_amount

            # store the height
            working = np_append(working, height)
            final_index += 1

            # save the last val
            last_val = val

        current_val_max = -1
        height = 0
        last_val = 0

        # print "===W==="
        # print working
        # print "===E==="

        # run through in the reverse direction
        vals_sorted = vals_sorted[::-1]
        for val in vals_sorted:
            if last_val == 0:
                delta = 0
            else:
                delta = last_val - val
            height = self.reduceViaDelta(height, bounce_amount, delta)
            height += bounce_amount
            # add to the old heights
            working[final_index] += height
            final_index -= 1
            last_val = val

        # print working
        # print "==EEE=="

        # find the original index!
        return sorted_indices[np_argmax(working)]
コード例 #8
0
ファイル: cluster.py プロジェクト: jnesme/GroopM-1
    def findNewClusterCenters(self, ss=0):
        """Find a putative cluster"""

        inRange = lambda x, l, u: x >= l and x < u

        # we work from the top view as this has the base clustering
        max_index = np_argmax(self.blurredMaps[0])
        max_value = self.blurredMaps[0].ravel()[max_index]

        max_x = int(max_index / self.PM.scaleFactor)
        max_y = max_index - self.PM.scaleFactor * max_x
        max_z = -1

        ret_values = [max_value, max_x, max_y]

        start_span = int(1.5 * self.span)
        span_len = 2 * start_span + 1

        if self.debugPlots:
            self.plotRegion(max_x, max_y, max_z, fileName="Image_" + str(self.imageCounter), tag="column", column=True)
            self.imageCounter += 1

        # make a 3d grid to hold the values
        working_block = np_zeros((span_len, span_len, self.PM.scaleFactor))

        # go through the entire column
        (x_lower, x_upper) = self.makeCoordRanges(max_x, start_span)
        (y_lower, y_upper) = self.makeCoordRanges(max_y, start_span)
        super_putative_row_indices = []
        for p in self.im2RowIndicies:
            if inRange(p[0], x_lower, x_upper) and inRange(p[1], y_lower, y_upper):
                for row_index in self.im2RowIndicies[p]:
                    # check that the point is real and that it has not yet been binned
                    if row_index not in self.PM.binnedRowIndicies and row_index not in self.PM.restrictedRowIndicies:
                        # this is an unassigned point.
                        multiplier = np_log10(self.PM.contigLengths[row_index])
                        self.incrementAboutPoint3D(
                            working_block, p[0] - x_lower, p[1] - y_lower, p[2], multiplier=multiplier
                        )
                        super_putative_row_indices.append(row_index)

        # blur and find the highest value
        bwb = ndi.gaussian_filter(working_block, 8)  # self.blurRadius)
        densest_index = np_unravel_index(np_argmax(bwb), (np_shape(bwb)))
        max_x = densest_index[0] + x_lower
        max_y = densest_index[1] + y_lower
        max_z = densest_index[2]

        # now get the basic color of this dense point
        putative_center_row_indices = []

        (x_lower, x_upper) = self.makeCoordRanges(max_x, self.span)
        (y_lower, y_upper) = self.makeCoordRanges(max_y, self.span)
        (z_lower, z_upper) = self.makeCoordRanges(max_z, 2 * self.span)

        for row_index in super_putative_row_indices:
            p = np_around(self.PM.transformedCP[row_index])
            if inRange(p[0], x_lower, x_upper) and inRange(p[1], y_lower, y_upper) and inRange(p[2], z_lower, z_upper):
                # we are within the range!
                putative_center_row_indices.append(row_index)

        # make sure we have something to go on here
        if np_size(putative_center_row_indices) == 0:
            # it's all over!
            return None

        if np_size(putative_center_row_indices) == 1:
            # get out of here but keep trying
            # the calling function may restrict these indices
            return [[np_array(putative_center_row_indices)], ret_values]
        else:
            total_BP = sum([self.PM.contigLengths[i] for i in putative_center_row_indices])
            if not self.isGoodBin(total_BP, len(putative_center_row_indices), ms=5):  # Can we trust very small bins?.
                # get out of here but keep trying
                # the calling function should restrict these indices
                return [[np_array(putative_center_row_indices)], ret_values]
            else:
                # we've got a few good guys here, partition them up!
                # shift these guys around a bit
                center_k_vals = np_array([self.PM.kmerVals[i] for i in putative_center_row_indices])
                k_partitions = self.partitionVals(center_k_vals)

                if len(k_partitions) == 0:
                    return None
                else:
                    center_c_vals = np_array([self.PM.transformedCP[i][-1] for i in putative_center_row_indices])
                    # center_c_vals = np_array([self.PM.averageCoverages[i] for i in putative_center_row_indices])
                    center_c_vals -= np_min(center_c_vals)
                    c_max = np_max(center_c_vals)
                    if c_max != 0:
                        center_c_vals /= c_max
                    c_partitions = self.partitionVals(center_c_vals)

                    # take the intersection of the two partitions
                    tmp_partition_hash_1 = {}
                    id = 1
                    for p in k_partitions:
                        for i in p:
                            tmp_partition_hash_1[i] = id
                        id += 1

                    tmp_partition_hash_2 = {}
                    id = 1
                    for p in c_partitions:
                        for i in p:
                            try:
                                tmp_partition_hash_2[(tmp_partition_hash_1[i], id)].append(i)
                            except KeyError:
                                tmp_partition_hash_2[(tmp_partition_hash_1[i], id)] = [i]
                        id += 1

                    partitions = [
                        np_array([putative_center_row_indices[i] for i in tmp_partition_hash_2[key]])
                        for key in tmp_partition_hash_2.keys()
                    ]

                    # pcs = [[self.PM.averageCoverages[i] for i in p] for p in partitions]
                    # print pcs
                    return [partitions, ret_values]
コード例 #9
0
 def deleteWorstElement(self):
     del self.__list[np_argmax(self.__list)]
     self.size -= 1
コード例 #10
0
    def try_vag(self, entry_portal: dict,
                exit_portal: dict) -> VagSearchResult:
        """
        Tries to setpos to the correct location to do a VAG with the specified portals.

        Teleports the player at the entry portal using setpos. Suppose that places the player in front of the entry
        portal. The next iteration will change the setpos command in the smallest significant way possible by only
        moving along the axis for which the entry portal normal has the greatest magnitude. In this example, on the next
        iteration the setpos command will teleport the player further into the portal. This will repeat until either the
        player is not in either of the portal bubbles (which implies an AG has happened), or the player gets
        teleported to the other portal, which implies that an AG is not possible in the spot where the setpos command is
        being tried. The opposite is done if the first setpos command places the player near the exit portal.

        The example above a bit more visually:
        suppose the right portal is the entry portal facing right.
                    V - first setpos command places player here
        <--|      |-->
        The player is closer (and close) to the entry portal, so the next iteration will setpos the player further into
        the hole of the entry portal.


        This approach has a few shortcomings:

        - If the distance between the portals is small, the algorithm could get confused to which portal the player
        teleported to. TODO - trying comparing the player distance to where the AG will teleport them instead?

        - Since this script is fantastic at finding VAGs, it is equally as fantastic at crashing your game.

        - It seems that it in some cases it is possible for a VAG to work in only some parts of a portal. This script
        does not take that into account - it only tries to teleport the player center to the portal center.

        :param entry_portal: A dict gotten with y_spt_ipc_ent for the entry portal.
        :param exit_portal: A dict gotten with y_spt_ipc_ent for the exit portal.
        """
        class SearchResult(Enum):
            NEXT_TO_ENTRY = 1
            NEXT_TO_EXIT = 2
            BEHIND_ENTRY_PLANE = 3

        entry_xyz = self.get_vec_as_arr(entry_portal["entity"], "m_vecOrigin")
        player = self.send_cmd_and_get_response(
            "y_spt_ipc_properties 0 m_fFlags m_bAnimatedEveryTick", False)[0]
        is_crouched = player["entity"]["m_fFlags"] & 2 != 0
        if not is_crouched:
            print(
                "Warning: player is fully crouched, probably won't work for non-vertical entry portals"
            )
        if player["entity"]["m_bAnimatedEveryTick"] != 0:
            print("Warning: player is probably not noclipping")
        it = 0
        player_setpos = np_array(entry_xyz)
        # change z pos so player center is where the portal center is
        player_half_height = 18 if is_crouched else 36
        player_setpos[2] -= player_half_height
        entry_norm = angles_to_vec(
            self.get_vec_as_arr(entry_portal["entity"], "m_angRotation"))
        # save only component of the portal normal with the largest magnitude, we'll be moving along in that axis
        no_idx = np_argmax(np_abs(entry_norm))
        first_result = None
        while True:
            print('iteration %i' % (it + 1))
            setpos_command = "setpos %f %f %f" % tuple(player_setpos)
            print("trying: " + setpos_command)
            # set expected response to false since the nudge message appears after magic
            self.send_cmd_and_get_response(setpos_command, False)
            if any('spt: nudging entity' in line
                   for line in self.read_lines_from_log_file()):
                print(
                    'this vag would normally cause a crash, not possible here')
                return VagSearcher.VagSearchResult.WOULD_CAUSE_CRASH
            # the player position is wacky - it doesn't seem to be valid right away; sleep
            sleep(0.02)
            p_info = self.send_cmd_and_get_response(
                "y_spt_ipc_properties 0 m_vecOrigin m_hPortalEnvironment",
                False)[0]
            new_player_pos = self.get_vec_as_arr(p_info["entity"],
                                                 "m_vecOrigin")
            new_player_pos[2] += player_half_height
            print("player pos: %s %s %s" %
                  (str(new_player_pos[0]), str(
                      new_player_pos[1]), str(new_player_pos[2])))
            dist_to_entry = np_linalg.norm(new_player_pos - entry_xyz)

            player_portal_idx = h_to_i(
                p_info["entity"]["m_hPortalEnvironment"])

            if player_portal_idx == entry_portal["index"]:
                result = SearchResult.NEXT_TO_ENTRY
            elif player_portal_idx == exit_portal["index"]:
                result = SearchResult.NEXT_TO_EXIT
            elif dist_to_entry < 1:
                result = SearchResult.BEHIND_ENTRY_PLANE  # behind portal but didn't teleport
            else:
                print("vag probably worked: " + setpos_command)
                return VagSearcher.VagSearchResult.SUCCESS

            if first_result is None and result != SearchResult.BEHIND_ENTRY_PLANE:
                first_result = result

            if result == SearchResult.NEXT_TO_ENTRY:
                if first_result == SearchResult.NEXT_TO_EXIT:
                    print("no vag found")
                    return VagSearcher.VagSearchResult.FAIL
                print("trying setpos closer to portal")
                player_setpos[no_idx] = np_nextafter(player_setpos[no_idx],
                                                     entry_norm[no_idx] * -inf,
                                                     dtype=np_float32)
            elif result == SearchResult.NEXT_TO_EXIT:
                if first_result == SearchResult.NEXT_TO_ENTRY:
                    print("no vag found")
                    return VagSearcher.VagSearchResult.FAIL
                print("trying setpos further from portal")
                player_setpos[no_idx] = np_nextafter(player_setpos[no_idx],
                                                     entry_norm[no_idx] * inf,
                                                     dtype=np_float32)
            else:
                print("behind portal plane, trying setpos further from portal")
                player_setpos[no_idx] = np_nextafter(player_setpos[no_idx],
                                                     entry_norm[no_idx] * inf,
                                                     dtype=np_float32)

            it += 1
            if it >= 35:
                print("Maximum iterations reached")
                return VagSearcher.VagSearchResult.MAX_ITERATIONS
コード例 #11
0
 def evaluation(self):
     self.__predictions = self.__classifier.predict(
         np_array(self.__features_test),
         batch_size=self.__batch_size,
         verbose=1)
     self.__predictions = np_argmax(self.__predictions, axis=1)
コード例 #12
0
def test_classi(model, scaler, inputs, X_pos_test, X_neg_test,
                test_complex_graphs, X_test, y_test):
    logging_info("Evaluating test complexes...")
    model_type = inputs['model_type']
    out_comp_nm = inputs['dir_nm'] + inputs['out_comp_nm']
    res = None
    if model_type == "tpot":
        res_pos = model.predict(X_pos_test)
        res = model.predict(X_neg_test)

        if hasattr(model, 'decision_function'):
            score = model.decision_function(X_pos_test)
            np_savetxt(out_comp_nm + '_test_pos_score.out', score)
            # print("Scores for positive complexes are",score)
            score = model.decision_function(X_neg_test)
            np_savetxt(out_comp_nm + '_test_neg_score.out', score)

            # Write the else case

    elif model_type == "NN":

        X_pos_test = scaler.transform(X_pos_test)

        preds = model.predict(X_pos_test)
        res_pos = [np_argmax(pred) for pred in preds]
        score = np_array([pred[1] for pred in preds])
        np_savetxt(out_comp_nm + '_test_pos_score.out', score)

        X_neg_test = scaler.transform(X_neg_test)
        preds = model.predict(X_neg_test)
        res = [np_argmax(pred) for pred in preds]
        # Score of being negative !!
        score = np_array([pred[0] for pred in preds])
        np_savetxt(out_comp_nm + '_test_pos_score.out', score)
        # print("Scores for negative complexes are",score)

    n_pos = len(test_complex_graphs)
    n_neg = len(X_neg_test)

    analyze_sizewise_accuracies(X_pos_test, res_pos, X_neg_test, res,
                                out_comp_nm + '_size_wise_accuracies_test.png')

    acc, acc_neg, Recall, Precision, F1_score = calc_metrics(
        res, res_pos, n_neg, n_pos)

    if model_type == "tpot":
        test_fit_probs = model.predict_proba(X_test)[:, 1]

    elif model_type == "NN":
        X_test = scaler.transform(X_test)
        preds = model.predict(X_test)
        test_fit_probs = np_array([pred[1] for pred in preds])

    test_aps = sklearn_metrics_average_precision_score(y_test, test_fit_probs)
    with open(out_comp_nm + '_metrics.out', "a") as fid:
        print("Test set average precision score = %.3f" % test_aps, file=fid)

    test_p, test_r, _ = sklearn_metrics_precision_recall_curve(
        y_test, test_fit_probs)
    plot_pr_curve(test_p, test_r, test_aps, out_comp_nm)

    with open(out_comp_nm + '_metrics.out', "a") as fid:
        print("Accuracy for test positive complexes = %.3f" % acc, file=fid)
        print(
            "Accuracy for test negative complexes = %.3f" % acc_neg,
            file=fid)  # Really just tells you complex or not for random graphs
        print("Test Precision = %.3f" % Precision, file=fid)
        print("Test Recall = %.3f" % Recall, file=fid)
        print("Test F1 score = %.3f" % F1_score, file=fid)

    logging_info("Finished evaluating test complexes.")
コード例 #13
0
def one2one_matches(known_complex_nodes_list, fin_list_graphs, N_pred_comp,
                    N_test_comp, out_comp_nm, suffix, dir_nm):

    Metric = np_zeros((N_test_comp, N_pred_comp))
    Common_nodes = np_zeros((N_test_comp, N_pred_comp))
    known_comp_lens = np_zeros((N_test_comp, 1))
    pred_comp_lens = np_zeros((1, N_pred_comp))

    fl = 1

    for i, test_complex in enumerate(known_complex_nodes_list):
        T = set(test_complex)
        known_comp_lens[i, 0] = len(T)

        for j, pred_complex in enumerate(fin_list_graphs):
            P = pred_complex[0]

            F1_score, C = f1_similarity(P, T)
            Common_nodes[i, j] = C

            Metric[i, j] = F1_score

            if fl == 1:
                pred_comp_lens[0, j] = len(P)
        fl = 0

    max_indices_i_common = np_argmax(Common_nodes, axis=0)
    ppv_list = [
        float(Common_nodes[i, j]) / pred_comp_lens[0, j]
        for j, i in enumerate(max_indices_i_common)
    ]
    PPV = sum(ppv_list) / len(ppv_list)

    max_indices_j_common = np_argmax(Common_nodes, axis=1)
    sn_list = [
        float(Common_nodes[i, j]) / known_comp_lens[i, 0]
        for i, j in enumerate(max_indices_j_common)
    ]
    Sn = sum(sn_list) / len(sn_list)

    acc_unbiased = np_sqrt(PPV * Sn)

    max_indices_i = np_argmax(Metric, axis=0)
    best_matches_4predicted = [
        (fin_list_graphs[j][0], known_complex_nodes_list[i], Metric[i, j],
         fin_list_graphs[j][1]) for j, i in enumerate(max_indices_i)
    ]

    max_indices_j = np_argmax(Metric, axis=1)
    best_matches_4known = [(fin_list_graphs[j][0], known_complex_nodes_list[i],
                            Metric[i, j], fin_list_graphs[j][1])
                           for i, j in enumerate(max_indices_j)]

    avged_f1_score4known = plot_f1_scores(
        best_matches_4known, out_comp_nm, '_best4known' + suffix,
        'Best predicted match for known complexes - ')
    avged_f1_score4pred = plot_f1_scores(
        best_matches_4predicted, out_comp_nm, '_best4predicted' + suffix,
        'Best known match for predicted complexes - ')

    avg_f1_score = (avged_f1_score4known + avged_f1_score4pred) / 2
    net_f1_score = 2 * avged_f1_score4known * avged_f1_score4pred / (
        avged_f1_score4known + avged_f1_score4pred)

    write_best_matches(best_matches_4known, out_comp_nm, dir_nm,
                       '_best4known' + suffix)
    write_best_matches(best_matches_4predicted, out_comp_nm, dir_nm,
                       '_best4predicted' + suffix)

    prec_MMR, recall_MMR, f1_MMR, max_matching_edges = f1_mmr(Metric)

    plot_pr_curve_mmr(Metric, fin_list_graphs, out_comp_nm + suffix)

    n_matches = int(len(max_matching_edges) / 2)

    return avg_f1_score, net_f1_score, PPV, Sn, acc_unbiased, prec_MMR, recall_MMR, f1_MMR, n_matches