示例#1
0
 def intersection(self, *others):
     """
     returns the intersection of cells between the this cell array and the
     given arrays
     """
     others = [self.cells] + [c.cells for c in others]
     return SudokuCellArray(intersection(*others))
示例#2
0
def get_same_rated_movies(X, Y):
    """
    La méthode list(dict) retoune sous forme de listes les clés du dictionnaires dict. Ici, il s'agit donc d'identifiants de films.
    """
    X_ids_movies = list(X)
    Y_ids_movies = list(Y)

    inters = utils.intersection(X_ids_movies, Y_ids_movies)

    return inters
示例#3
0
def add_grid_obstacles(grid, walls, grid_size, width, height):
    # for each wall (two points), calculate all x,y positions between those points in the given resolution
    # set the square containing that obstacle to true grid[x][y].obstacle = True
    rows = len(grid)
    columns = len(grid[0])
    obstacle_cells = []
    non_obstacle_cells = []
    beacons = 0
    beacon_cells = []
    obstacles = 0
    for i, j in itertools.product(range(rows), range(columns)):
        for wall in walls:
            if (i + 1) * grid_size < min(
                    wall[0][1], wall[1][1]) or (j + 1) * grid_size < min(
                        wall[0][0], wall[1][0]) or (i - 1) * grid_size > max(
                            wall[0][1],
                            wall[1][1]) or (j - 1) * grid_size > max(
                                wall[0][0], wall[1][0]):
                continue
            if not grid[i][j].obstacle:
                wall_line = np.array([wall[0], wall[1]])
                if utils.intersection(
                        grid[i][j].left_line, wall_line) or utils.intersection(
                            grid[i][j].right_line,
                            wall_line) or utils.intersection(
                                grid[i][j].top_line,
                                wall_line) or utils.intersection(
                                    grid[i][j].bottom_line, wall_line):
                    if not grid[i][j].obstacle:
                        grid[i][j].obstacle = True
                        obstacles += 1
                        obstacle_cells.append((i, j))
                beacons_temp, beacon_cells_temp = quick_add_grid_beacons_wall(
                    grid, wall, grid_size, width, height, rows, columns)
                beacons += beacons_temp
                beacon_cells.extend(beacon_cells_temp)
            else:
                break
        if not grid[i][j].obstacle:
            non_obstacle_cells.append((i, j))
    return obstacles, obstacle_cells, non_obstacle_cells, beacons, beacon_cells
def process_go_num(main_dict, expanded_dataframe, godag, hashed_query_dict,
                   slice_go, go):

    return_df_dict = collections.defaultdict(list)
    go_family = [go]
    if go in godag:
        # list concatenation
        go_family.extend(list(godag[go].get_all_children()))

    iteration = 0
    queries = []
    if len(slice_go) > 0:
        queries = list(set(slice_go['query']))
        for query in queries:
            if iteration % 1000 == 0:
                print(f'{go}: {iteration} / {len(queries)}')
            iteration += 1

            query_rows = hashed_query_dict[query]
            # pull slice of main_dictionary
            current_slice = utils.slice_dict(main_dict,
                                             hashed_query_dict[query])
            # slice2 = dataframe2.iloc[hashed_query_dict[query]]

            # fl = utils.slice_dict(expanded_dict, hashed_query_dict[query])
            fl = expanded_dataframe['go']['query' == query
                                          and 'go' in go_family]

            # find the intersection between the two lists
            query_list = utils.intersection(fl, go_family)
            # query_list=np.intersect1d(fl,go_family)
            idx = 0
            # define new dict
            temp_dict = {
                'GO_term': go,
                'query': query,
                'organism': current_slice['organism'][idx],
                'associated_GO_terms': ";".join(query_list),
                'multi_taxids_confidence':
                current_slice['multi_taxids_confidence'],
                'taxid': current_slice['taxid'][idx],
                'gene_name': current_slice['gene_name'][idx],
                'uniprot': current_slice['uniprot'][idx],
                'uniprot evalue': current_slice['uniprot evalue'][idx]
            }

            # append key values to list of values within each dict key
            for k, v in temp_dict.items():
                return_df_dict[k].append(v)
    print(f'{go}: {iteration} / {len(queries)} COMPLETE')

    return return_df_dict
def enum_possible_group (compat_fire): 
    possible_group  = []
    #for each light create the list of compat by taking the intersection of compatibality matrix
    for i in range(1,12): 
        group = []
        group.append(i)
        for j in compat_fire[i-1] :  
            group.append(j)
            if group not in possible_group and group.count(j) < 2: 
                possible_group.append(utils.Cloning(group))
            for k in utils.intersection(compat_fire[i-1],compat_fire[j-1]) : 
                group.append(k)
                if group.sort() not in possible_group  and group.count(j) < 2: 
                    possible_group.append(utils.Cloning(group))
                for l in utils.intersection(utils.intersection(compat_fire[i-1],compat_fire[j-1]),compat_fire[k-1]):
                    group.append(l)
                    if group.sort() not in possible_group and group.count(j) < 2 : 
                        possible_group.append(utils.Cloning(group))
                    group.remove(l)
                group.remove(k)
            group.remove(j)
    return possible_group
示例#6
0
    def __vanish_points(self):
        lines = self.clear_lines
        combs = combinations(range(len(lines)), 2)
        d_interseptions = dict()
        for comb in combs:
            l1 = lines[comb[0]]
            l2 = lines[comb[1]]

            p_intersept = intersection(l1, l2)
            if p_intersept:
                d_interseptions[p_intersept] = (l1, l2)

        max_x, max_y = -float("inf"), -float("inf")
        min_x, min_y = float("inf"), float("inf")
        vp_bottom = []
        vp_top = []
        vp_left = []
        vp_right = []
        for point in d_interseptions.keys():

            # print("point intersept", point)

            x, y = point
            if x < min_x:
                vp_left = point
                min_x = x
            if x > max_x:
                vp_right = point
                max_x = x
            if y < min_y:
                vp_bottom = point
                min_y = y
            if y > max_y:
                vp_top = point
                max_y = y
        # print("durty vps", vp_bottom, vp_left, vp_top, vp_right)

        vps = [vp_bottom, vp_left, vp_top, vp_right]
        combs = combinations(range(4), 2)
        min_norm = float("inf")
        vp_looser_i = 0
        for comb in combs:
            vp1 = np.array(vps[comb[0]])
            vp2 = np.array(vps[comb[1]])

            if norm(vp1 - vp2) < min_norm:
                vp_looser_i = comb[1]
                min_norm = norm(vp1 - vp2)

        del vps[vp_looser_i]
        return vps
示例#7
0
def get_possibilities(matrix, position):
    v_possibilities = [str(i).zfill(1) for i in range(1, len(matrix) + 1)]
    h_possibilities = [str(i).zfill(1) for i in range(1, len(matrix) + 1)]
    sq_possibilities = [str(i).zfill(1) for i in range(1, len(matrix) + 1)]

    for i in range(0, len(matrix)):
        value = matrix[position.x][i]
        if value != '.':
            h_possibilities.remove(value)

        value = matrix[i][position.y]
        if value != '.':
            v_possibilities.remove(value)

    dim = int(math.sqrt(len(matrix)))

    offset_x = 0
    offset_y = 0
    if dim == 2:
        if position.x % 2 != 0:
            offset_x = -1
        if position.y % 2 != 0:
            offset_y = -1
    else:
        offset_x = -(position.x % 3)
        offset_y = -(position.y % 3)

    new_position_x = position.x + offset_x
    new_position_y = position.y + offset_y

    for i in range(new_position_x, new_position_x + dim):
        for j in range(new_position_y, new_position_y + dim):
            value = matrix[i][j]
            if value != '.':
                sq_possibilities.remove(value)

    return intersection(v_possibilities,
                        intersection(h_possibilities, sq_possibilities))
示例#8
0
 def adjust_sensors(self, Walls):
     """
     Adjust the sensors attached to the robot by calculating their intersections with accompanying walls.
     :param Walls: The walls to calculate the intersections with
     :return:
     """
     for sensor in self.sensors:
         for wall in Walls:
             sensor_line = np.array([self.position, sensor.line_end])
             wall_line = np.array([wall[0], wall[1]])
             # print("lines", sensor_line, wall_line)
             intersec_point = utils.intersection(sensor_line, wall_line)
             if (intersec_point):
                 sensor.update_sensor(self.position, 0, intersec_point)
示例#9
0
    def iter_related(self, inclusive=False, **kwargs):
        """
        returns all the cells that are shared amongst all the cells in this
        collection
        inclusive determines if the cell itself is included
        """
        if not len(self):
            return SudokuCellArray()

        cells = [c.iter_related() for c in self.cells]
        related = SudokuCellArray(intersection(*cells))
        if not inclusive:
            related -= c
        return SudokuCellArray(related).filter(**kwargs)
示例#10
0
文件: morpho.py 项目: tcantenot/DIP
def connected_extraction(input, structure):

    tmp = np.copy(input)

    output = np.zeros(input.shape, input.dtype)

    connections = []

    iter = 0
    while np.count_nonzero(tmp) > 0:

        # Find the beginning of the next connected component
        X0 = np.zeros(input.shape, input.dtype)
        for (x, y), p in np.ndenumerate(tmp):
            if p == 1:
                X0[x, y] = 1
                break

        # Extract the connected component
        X1 = None
        while True:
            #X1 = intersection(dilation(X0, structure), input)
            X1 = intersection(ndimage.binary_dilation(X0, structure), input)
            if np.array_equal(X0, X1): break
            X0 = X1

        # Remove connected component from the copy of the input image
        tmp[X0 == 1] = 0

        # Reconstruct input image
        output[X0 == 1] = 1

        # Count pixels of connected component
        connections.append(np.count_nonzero(X0))

        print "Connected component {}: {} pixel{}".format(
            iter, connections[-1], 's' if connections[-1] > 1 else ''
        )

        iter += 1

    return output, connections
示例#11
0
def greedy_multiskill_selection(week, B, list_of_ri, stud_alpha, avg_deltas,
                                skill_betas, win_params, att_params, q, q_mat,
                                inv_q_mat, train_items_per_skill):
    """
    Select *combination of skills* with largest conditional marginal learning gain (cf. Hunziker et al., 2018).
    Return selected item id.

    Multiskill version of the greedy_skill_selection function above.
    """
    cumulative_gains = []
    for b in range(
            week):  # Loops over all the skills that are available to review
        probas_recall_no_review = []  # No review
        probas_recall_review_1 = []  # Review + outcome = 1
        probas_recall_review_0 = []  # Review + outcome = 0
        # Copy queues
        q_no_review = deepcopy(q)
        q_review_1 = deepcopy(q)
        q_review_0 = deepcopy(q)
        # Simulate attempt/win on the queues
        q_review_1[(b, "attempts")].push(
            week * 3600 * 24 *
            7)  # Simulate attempt on *that* skill on that week
        q_review_0[(b, "attempts")].push(
            week * 3600 * 24 *
            7)  # Simulate attempt on *that* skill on that week
        q_review_1[(b, "wins")].push(
            week * 3600 * 24 * 7)  # Simulate win on *that* skill on that week
        # Generate outcome for expectation on y_t
        win_counters = q_no_review[(b,
                                    "wins")].get_counters(week * 3600 * 24 * 7)
        attempt_counters = q_no_review[(b, "attempts")].get_counters(
            week * 3600 * 24 * 7)
        proba_outcome_1 = gen_outcome(stud_alpha, avg_deltas, skill_betas[b],
                                      win_counters, attempt_counters,
                                      win_params[b], att_params[b])
        for t in B - 1 + np.array(
                list_of_ri):  # we only count for the retention period
            # Attempt counters
            attempt_counters_no_review = q_no_review[(
                b, "attempts")].get_counters(t * 3600 * 24 * 7)
            attempt_counters_review_1 = q_review_1[(b,
                                                    "attempts")].get_counters(
                                                        t * 3600 * 24 * 7)
            attempt_counters_review_0 = q_review_0[(b,
                                                    "attempts")].get_counters(
                                                        t * 3600 * 24 * 7)
            # Win counters
            win_counters_no_review = q_no_review[(b, "wins")].get_counters(
                t * 3600 * 24 * 7)
            win_counters_review_1 = q_review_1[(b, "wins")].get_counters(
                t * 3600 * 24 * 7)
            win_counters_review_0 = q_review_0[(b, "wins")].get_counters(
                t * 3600 * 24 * 7)
            # Proba of recall
            probas_recall_no_review.append(
                gen_outcome(stud_alpha, avg_deltas, skill_betas[b],
                            win_counters_no_review, attempt_counters_no_review,
                            win_params[b], att_params[b]))
            probas_recall_review_1.append(
                gen_outcome(stud_alpha, avg_deltas, skill_betas[b],
                            win_counters_review_1, attempt_counters_review_1,
                            win_params[b], att_params[b]))
            probas_recall_review_0.append(
                gen_outcome(stud_alpha, avg_deltas, skill_betas[b],
                            win_counters_review_0, attempt_counters_review_0,
                            win_params[b], att_params[b]))
        cumulative_gains.append(proba_outcome_1*np.mean(probas_recall_review_1)+ \
                                (1-proba_outcome_1)*np.mean(probas_recall_review_0)-np.mean(probas_recall_no_review))

    selected_skills = []
    selected_skills.append(np.argmax(cumulative_gains))
    # Then, selection of the other skills
    # We need to loop over all different possible skills, in the decreasing order of expected gain
    decreasing_order = np.argsort(cumulative_gains)[::-1]
    acceptable_items = [
        item_id for item_id in inv_q_mat[selected_skills[0]]
        if item_id < week * train_items_per_skill
    ]
    for k in range(1, len(cumulative_gains)):
        intersec_items = intersection(acceptable_items,
                                      inv_q_mat[decreasing_order[k]])
        if len(intersec_items) == 0:
            continue
        else:
            acceptable_items = intersec_items.copy()
    # Choose item
    # There is at least 1 item in acceptable_items
    selected_item = np.random.choice(acceptable_items)
    return selected_item
示例#12
0
 def cands_intersection(self):
     cands = [c.cands for c in self.cells]
     return intersection(*cands)
示例#13
0
    with codecs.open(fname, "w+", "utf-8") as f:
        json.dump(plotly_obj, f)
            
if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="evaluation")
    parser.add_argument("files", nargs='+')
    parser.add_argument("-t", "--tags", default="most_freq")
    parser.add_argument("-o", "--output", required=True)
    parser.add_argument("-m", "--metric", required=True)
    args = parser.parse_args()
    results, years = read_results(args.files)
    if args.tags == "most_freq":
        tags = [u'MD', u'VAN', u'VBP', u'Q', u'C', u'VB', u'NS', u'VBD', u'FW', u'PRO$',
                u'NPR', u'ADV', u'.', u'ADJ', u',', u'CONJ', u'D', u'PRO', u'P', u'N']       
    else:
        tags = intersection(*[r[0] for r in results])
    metrics = {"f1": f1, "precision": precision, "recall": recall}
    to_plotly_json(results, years, metrics[args.metric], tags, args.output)
    

# from penn_data import pos_from_range
# counts = dict()
# test = (1400, 1500, 2000)
# test_counts = counts_per_tag(pos_from_range(*test))
# for start in range(1400, 1850, 50):
#     train = (start, start + 100, 10000)
#     sents = pos_from_range(*train)
#     train_counts = counts_per_tag(sents)
#     counts[start] = train_counts

# todo: measure correlation between per tag accuracy and counts over time
示例#14
0
def boolean_search(client_request,
                   inverted_index,
                   collection_path,
                   n_results=10):
    '''
    Give n_results document IDs corresponding to the user's querry based on the boolean search model
    '''
    time_start = time.clock()
    all_doc_id = get_all_doc_id(collection_path)
    request = tokenize(client_request)
    final_docs = []
    documents = []
    if (len(request) == 1):
        final_docs = inverted_index[request[0]]
    # handling and elements first
    while (next((operator for operator in request if operator == 'and'),
                False)):
        index_treated = []
        forbidden_doc = list(set())
        i = request.index('and')
        if (request[i - 2] == 'not'):
            index_treated = [i - 2, i - 1, i]
            forbidden_doc.extend(element
                                 for element in inverted_index[request[i - 1]]
                                 if element not in forbidden_doc)
        elif (request[i - 2] != 'not'):
            if type(request[i - 1]) == str:  # mot and ..
                doc_treated = inverted_index[request[i - 1]]
            else:  # [ liste doc ] and ..
                doc_treated = request[i - 1]
            documents.extend(element for element in doc_treated
                             if element not in documents)
            index_treated = [i - 1, i]
        if (request[i + 1] == 'not'):
            index_treated.extend((i + 1, i + 2))
            forbidden_doc.extend(element
                                 for element in inverted_index[request[i + 2]]
                                 if element not in forbidden_doc)
        elif (request[i + 1] != 'not'):
            index_treated.append(i + 1)
            if type(request[i + 1]) == str:
                doc_treated = inverted_index[request[i + 1]]
            else:
                doc_treated = request[i + 1]
            if (documents != []):
                documents = intersection(documents, doc_treated)
            else:
                documents.extend(element for element in doc_treated
                                 if element not in documents)
        if len(forbidden_doc) > 0:  # Removing forbidden docs from results
            for docId in documents.copy():
                if docId in forbidden_doc:
                    documents.remove(docId)
        request.insert(index_treated[0], documents)
        for j in range(len(index_treated)):
            request.pop(index_treated[1])  #
        final_docs = documents
    # Handling or elements
    while (next((operator for operator in request if operator == 'or'),
                False)):
        index_treated = []
        i = request.index('or')
        if (request[i - 2] == 'not'):
            index_treated = [i - 2, i - 1, i]
            all_docs = all_doc_id.copy()
            for docId in inverted_index[request[i - 1]]:
                all_docs.remove(docId)
            documents.extend(element for element in all_docs
                             if element not in documents)
        else:
            if type(request[i - 1]) == str:
                doc_treated = inverted_index[request[i - 1]]
            else:
                doc_treated = request[i - 1]
            documents.extend(element for element in doc_treated
                             if element not in documents)
            index_treated = [i - 1, i]
        if (request[i + 1] == 'not'):
            index_treated.extend((i + 1, i + 2))
            all_doc = all_doc_id.copy()
            for docId in inverted_index[request[i + 2]]:
                all_doc.remove(docId)
            documents.extend(element for element in all_doc
                             if element not in documents)
        else:
            index_treated.append(i + 1)
            if type(request[i + 1]) == str:
                doc_treated = inverted_index[request[i + 1]]
            else:
                doc_treated = request[i + 1]
            documents.extend(element for element in doc_treated
                             if element not in documents)
        request.insert(index_treated[0], documents)
        for j in range(len(index_treated)):
            request.pop(index_treated[1])
        final_docs = documents
        time_elapsed = time.clock() - time_start
        print('boolean search took {:.2f} s to run the query'.format(
            time_elapsed))
    return final_docs[:n_results]
    def choose_skill(self, week):
        """
        Choose skill to review on a given week.

        Input:
            * week: int, current week number. Starts in the beginning at 0.
        """
        if self.review_strat == "random_review":
            selected_block = np.random.choice(week, 1)[0]
        elif self.review_strat == "mu_back":  # mu must be > 0
            selected_block = max(week - self.param_review, 0)
        elif self.review_strat.startswith("theta_thres"):
            # Output for uniskill strats = skill id; for multiskill strats = item id
            probas_recall = []
            for j in range(week):
                win_counters = self.q[(j, "wins")].get_counters(week * 3600 *
                                                                24 * 7)
                attempt_counters = self.q[(j, "attempts")].get_counters(
                    week * 3600 * 24 * 7)
                if self.review_strat in [
                        "theta_thres", "theta_thres_multiskill"
                ]:
                    probas_recall.append(
                        gen_outcome(self.stud_alpha, self.item_deltas.mean(),
                                    self.skill_betas[j], win_counters,
                                    attempt_counters,
                                    self.list_of_win_params[j],
                                    self.list_of_att_params[j]))
                elif self.review_strat in [
                        "theta_thres_est", "theta_thres_multiskill_est"
                ]:
                    # We put 0 as the mean ability for the students
                    probas_recall.append(
                        gen_outcome(0, self.est_deltas.mean(),
                                    self.est_betas[j], win_counters,
                                    attempt_counters, self.est_win_params[j],
                                    self.est_att_params[j]))
            if self.review_strat in ["theta_thres", "theta_thres_est"]:
                selected_block = np.argmin(
                    np.absolute(np.array(probas_recall) - self.param_review))
            elif self.review_strat in [
                    "theta_thres_multiskill", "theta_thres_multiskill_est"
            ]:
                selected_skills = []
                selected_skills.append(
                    np.argmin(
                        np.absolute(
                            np.array(probas_recall) - self.param_review)))
                # Then, selection of the other skills
                # We need to loop over all possible skills, in the decreasing order of proximity with theta
                increasing_order = np.argsort(
                    np.absolute(np.array(probas_recall) - self.param_review))
                # We need to define a list of acceptable items that do *not* involve skills seen after the current week (included)
                acceptable_items = [
                    item_id for item_id in self.inv_q_mat[selected_skills[0]]
                    if item_id < week * self.items_per_skill
                ]
                for k in range(1, len(probas_recall)):
                    intersec_items = intersection(
                        acceptable_items, self.inv_q_mat[increasing_order[k]])
                    if len(intersec_items) == 0:
                        # Goes to next skill if there is no other item that meets the skills criterion
                        continue
                    else:
                        acceptable_items = intersec_items.copy()
                # Choose item
                # There will be at least 1 item inside acceptable_items
                selected_block = np.random.choice(
                    acceptable_items
                )  # We call it "block" but in fact it's the item index
        elif self.review_strat in ["greedy", "greedy_est"]:
            # Output for uniskill strats = skill id
            if self.review_strat == "greedy":
                selected_block = greedy_skill_selection(
                    week, self.B, self.list_of_ri, self.stud_alpha,
                    self.item_deltas.mean(), self.skill_betas,
                    self.list_of_win_params, self.list_of_att_params, self.q)
            elif self.review_strat == "greedy_est":
                selected_block = greedy_skill_selection(
                    week, self.B, self.list_of_ri, 0, self.est_deltas.mean(),
                    self.est_betas, self.est_win_params, self.est_att_params,
                    self.q)
        elif self.review_strat in [
                "greedy_multiskill", "greedy_multiskill_est"
        ]:
            # Output for multiskill strats = item id
            if self.review_strat == "greedy_multiskill":
                selected_block = greedy_multiskill_selection(
                    week, self.B, self.list_of_ri, self.stud_alpha,
                    self.item_deltas.mean(), self.skill_betas,
                    self.list_of_win_params, self.list_of_att_params, self.q,
                    self.qmat, self.inv_q_mat, self.items_per_skill)
            elif self.review_strat == "greedy_multiskill_est":
                selected_block = greedy_multiskill_selection(
                    week, self.B, self.list_of_ri, 0, self.est_deltas.mean(),
                    self.est_betas, self.est_win_params, self.est_att_params,
                    self.q, self.qmat, self.inv_q_mat, self.items_per_skill)
        return selected_block
示例#16
0
    def __init__(self, labels, sent_id, sent_id_dict, n_batch, n_cls, n_shot, n_query, test=False):
        '''
        Args:
            labels: size=(dataset_size), label indices of instances in a data set
            n_batch: int, number of batches for episode training
            n_cls: int, number of sampled classes
            n_ins: int, number of instances considered for a class
        '''
        self.n_batch = n_batch
        self.n_cls = n_cls
        self.n_ins = n_shot + n_query
        self.n_shot = n_shot

        self.classes = list(set(labels))
        self.sent_id = sent_id
        
        labels = np.array(labels)
        self.cls_indices = {}
        self.cls_indices_shot = {}
        self.cls_indices_query = {}
        self.max_ins = -1

        self.max_sent_id = -1
        for c in self.classes:
            if c == 0:
                continue
            indices = np.argwhere(labels == c).reshape(-1)
            #print(c)
            #print(indices)
            #print(len(indices))
            self.max_sent_id = max(self.max_sent_id, sent_id[indices[int(len(indices)/2)]])
        
        for c in self.classes:
            #print(c)
            #print(self.max_sent_id)
            indices = np.argwhere(labels == c).reshape(-1)
            indices_query = np.argwhere(np.array(self.sent_id) <= self.max_sent_id).reshape(-1)
            indices_shot = np.argwhere(np.array(self.sent_id) > self.max_sent_id).reshape(-1)
            #print(len(indices_query))
            #print(len(indices_shot))
            
            #if c != 0:
            #    self.max_ins = max(self.max_ins, len(indices))
            self.cls_indices[c] = indices
            #intersection of the indices and indices_shot
            self.cls_indices_shot[c] = intersection(indices, indices_shot)
            #####self.cls_indices_query[c] = intersection(indices, indices_query)
            #self.cls_indices_query[c] = indices_query
            #print(c)
            #print(indices_query)
            cls_indices_sent = np.arange(self.sent_id[indices_query[-1]])
            #print(cls_indices_sent)
            #print(len(cls_indices_sent))
            #print(cls_indices_sent)
            #print(sent_id_dict)
            self.cls_indices_query[c] = []
            for query in cls_indices_sent:
                #print(query)
                self.cls_indices_query[c].append(sent_id_dict[query][0])
                #print(sent_id_dict[self.cls_indices_query])
            #print(self.cls_indices_query)
            #exit()

            '''if c != 0:
                print(indices_query)
                print(len(indices_query))
                print(indices)
                print(len(indices))
                print(self.cls_indices_query[c])
                print(len(self.cls_indices_query[c]))
                print(len(self.cls_indices_query[0]))
                exit()'''
            #print(self.cls_indices_shot)
            #print(self.cls_indices_query)

            # if c != 0:
            #self.max_query_ins = max(self.max_ins, len(self.cls_indices_query))                  
            
        self.test = test
        self.max_ins = min(self.max_ins, 300)
示例#17
0
    def calculateCosts(self,
                       output: str = None,
                       quiet: bool = False,
                       policy: Policy = None):
        """Model the usability costs needed to reach found communities."""
        if not self.clusters:
            raise ValueError("Clusters for a graph must be computed "
                             "before calculating its cost.")

        msg = ""
        appStore = ApplicationStore.get()

        crossing = self.clusters.crossing()
        grantingCost = 0
        isolationCost = 0
        splittingCost = 0
        for (index, x) in enumerate(crossing):
            if not x:
                continue

            edge = self.g.es[index]
            source = self.g.vs[edge.source]
            target = self.g.vs[edge.target]
            sourceType = source.attributes()['type']
            targetType = target.attributes()['type']
            sourceName = source.attributes()['name']
            targetName = target.attributes()['name']

            # Case where a file-file node was removed. Should normally not
            # happen so we will not write support for it yet.
            if sourceType == "file":
                if targetType == "app":
                    grantingCost += 1
                    if policy:
                        app = appStore.lookupUid(targetName)
                        policy.incrementScore('graphGrantingCost', None, app)
                else:
                    # Check if an app co-accessed the files. If so, increase the
                    # cost of splitting that app instance into two.
                    sAccessors = []
                    for n in source.neighbors():
                        if n.attributes()['type'] == 'app':
                            sAccessors.append(n)
                    tAccessors = []
                    for n in target.neighbors():
                        if n.attributes()['type'] == 'app':
                            tAccessors.append(n)

                    inter = intersection(sAccessors, tAccessors)

                    for i in inter:
                        splittingCost += 1
                        if policy:
                            app = appStore.lookupUid(sourceName)
                            policy.incrementScore('graphSplittingCost', None,
                                                  app)
                    if not inter:
                        print(
                            "Warning: file-file node removed by graph "
                            "community finding algorithm. Not supported.",
                            file=sys.stderr)
                        print(source, target)
                        raise NotImplementedError
            elif targetType == "file":  # sourceType in "app", "appstate"
                grantingCost += 1
                if sourceType == "app" and policy:
                    app = appStore.lookupUid(sourceName)
                    policy.incrementScore('graphGrantingCost', None, app)
                elif policy:
                    policy.incrementScore('graphGranting', None, None)
            else:
                # app-app links are just noise in the UnifiedGraph
                if sourceType != "app" and targetType == "app":
                    isolationCost += 1
                    if policy:
                        app = appStore.lookupUid(targetName)
                        policy.incrementScore('graphIsolationCost', None, app)
                elif sourceType == "app" and targetType != "app":
                    isolationCost += 1
                    if policy:
                        app = appStore.lookupUid(sourceName)
                        policy.incrementScore('graphIsolationCost', None, app)

        editCount = grantingCost + isolationCost + splittingCost
        msg += ("%d edits performed: %d apps isolated, %d apps split and "
                "%d accesses revoked.\n" %
                (editCount, isolationCost, splittingCost, grantingCost))

        if not quiet:
            tprnt(msg)

        if output:
            path = self.outputDir + "/" + output + ".graphstats.txt"
            os.makedirs(File.getParentNameFromName(path), exist_ok=True)
            with open(path, "w") as f:
                print(msg, file=f)

        self.editCount = editCount
示例#18
0
def main():
    operations = [addr, addi, mulr, muli, banr, bani, borr, bori, setr, seti, gtir, gtri, gtrr, eqir, eqri, eqrr]

    puzzle = load_input('input.txt')

    before = None
    instruction = None
    part1_count = 0
    possible_int = {}

    for (k,line) in enumerate(puzzle):
        if k % 4 == 0:
            before = get_numbers(line)
        if k % 4 == 1:
            instruction = get_numbers(line)
        if k % 4 == 2:
            after = get_numbers(line)
            opcode = instruction[0]

            # get the operations that this instruction could correspond to
            possible = check(before,after,instruction[1:], operations)

            # count the number of samples that behave like three or more opcodes
            if len(possible) >= 3:
                part1_count += 1

            # we keep intersecting the list of possible operations based on the result of each input
            if opcode not in possible_int:
                possible_int[opcode] = possible
            else:
                possible_int[opcode] = intersection(possible_int[opcode],possible)

    # for part 1, count how many sample in our input behave like three or more opcodes
    print('Part 1:', part1_count)

    secured = []
    # for part 2, we need to figure out which opcode corresponds to which operation
    # at each stage, there is always (at least) one opcode that we can deduce
    while len(secured) < 15:
        for key in possible_int:
            # if we've already figured this one out, skip it
            if key in secured:
                continue

            # if there's only 1 possibility for what this opcode could be, then we're done!
            if len(possible_int[key]) == 1:
                secured.append(key)
                new_possible_int = possible_int.copy()
                # remove this operation as a possibility for all other opcodes
                for k in possible_int:
                    if k == key:
                        continue
                    if possible_int[key][0] in new_possible_int[k]:
                        new_possible_int[k].remove(possible_int[key][0])
                possible_int = new_possible_int
                break

    # apply the operations to our puzzle input
    registers = [0,0,0,0]
    for line in load_input('input2.txt'):
        op, a, b, c = get_numbers(line)
        registers = operations[possible_int[op][0]](registers,a,b,c)

    print('Part 2:', registers[0])