Exemplo n.º 1
0
def main():
    with open("15/input.txt", encoding="UTF-8") as file:
        content = file.read()

    matrix = get_matrix(content)
    searcher = Searcher(matrix)
    path = searcher.get_shortest_path()

    print(path)
Exemplo n.º 2
0
    def __init__(self, p, q, matrix_name, dataset):
        '''
        Function init.
        - Param(s):
            p, q : neighborhood parametres
            matrix_name : name of the similarity matrix to use
            dataset : a sequence list created by function gen_data in utils.py
        '''
        self.p = p
        self.q = q
        self.lettre_list = 'ARNDCQEGHILKMFPSTWYVBZX*'
        self.matrix = utils.get_matrix(matrix_name)
        self.dataset = dataset

        self.debug = False
Exemplo n.º 3
0
def textrank_keywords(processed_sentences, window_size, top_num):
    """
    Inspired by pagerank, textrank considers each word as a node,
    give the weight to each edge by calculating word window pairs.
    And rank words by their score.
    :param processed_sentences: processed sentences, at least remove stopwords.
    :param window_size: the number of words following a word.
    :param top_num: the number of top words.
    :return: a list of Top top_num words (index 0) with their scores (index 1).
    """
    vocab = get_vocab(processed_sentences)
    token_pairs = get_token_pairs(window_size, processed_sentences)
    # Get normalized matrix
    g = get_matrix(vocab, token_pairs)
    # Initionlization for weight(pagerank value)
    pr = np.array([1] * len(vocab))
    d = 0.85  # damping coefficient, usually is .85
    min_diff = 1e-5  # convergence threshold
    steps = 10
    node_weight = None  # save keywords and its weight
    # Iteration
    previous_pr = 0
    for epoch in range(steps):
        pr = (1 - d) + d * np.dot(g, pr)
        if abs(previous_pr - sum(pr)) < min_diff:
            break
        else:
            previous_pr = sum(pr)
    # Get weight for each node
    node_weight = dict()
    for word, index in vocab.items():
        node_weight[word] = pr[index]
    # Print Top Keywords
    node_weight = OrderedDict(
        sorted(node_weight.items(), key=lambda t: t[1], reverse=True))
    keywords = []
    for i, (key, value) in enumerate(node_weight.items()):
        keywords.append((key, value))
        if i > (top_num - 2):
            break
    return keywords
Exemplo n.º 4
0
    def convert_global_transform_to_actor_frame(self, actor=None, transform=None):

        if(actor == None or transform == None):
            print("Input is None. Please Check")
            return None
        else:


            actor_to_world_transform = actor.get_transform()
            R_actor_to_world = get_matrix(actor_to_world_transform)
            R_world_to_actor = np.linalg.inv(R_actor_to_world)

            transform_coords = np.zeros((4, 1))
            transform_coords[0] = transform.location.x
            transform_coords[1] = transform.location.y
            transform_coords[2] = transform.location.z
            transform_coords[3] = 1

            transform_position_as_seen_from_actor = np.dot(R_world_to_actor, transform_coords)
            
            return transform_position_as_seen_from_actor
Exemplo n.º 5
0
spawn_point.location.z = spawn_point.location.z + 2
vehicle_front_2, vehicle_front_2_id = carla_handler_1.spawn_vehicle(spawn_point=spawn_point)

spawn_point = filtered_waypoints[50].transform
spawn_point.location.z = spawn_point.location.z + 2
vehicle_back, vehicle_back_id = carla_handler_1.spawn_vehicle(spawn_point=spawn_point)

spawn_point = filtered_waypoints[0].transform
spawn_point.location.z = spawn_point.location.z + 2
vehicle_back_2, vehicle_back_2_id = carla_handler_1.spawn_vehicle(spawn_point=spawn_point)

time.sleep(3)

# Get rotation matrix from : ego vehicle location and rotation vectors (i.e ego vehicle transform)
ego_vehicle_to_world_transform = ego_vehicle.get_transform()
R_ego_vehicle_to_world = get_matrix(ego_vehicle_to_world_transform)
R_world_to_ego_vehicle = np.linalg.inv(R_ego_vehicle_to_world)


while(True):
	# Get all actors
	all_actors = carla_handler_1.world.get_actors()

	vehicle_in_front = None
	vehicle_in_rear = None

	actors_in_current_lane = []
	actors_in_other_lane = []
	for actor in all_actors.filter('vehicle.*'):
		if(actor.id ~= ego_vehicle.id):
			actor_nearest_waypoint = carla_handler_1.world_map.get_waypoint(actor.get_location(), project_to_road=True)
Exemplo n.º 6
0
        MATCH_DATA, CORNER_DATA = get_homographies_contour(
            FRAME, REF_IMAGES, MATCH_DATA, CORNER_DATA)

        if cv2.waitKey(1) & 0xFF == ord("q"):
            break

        if MATCH_DATA[0] is not None and MATCH_DATA[1] is not None:

            HOMOGRAPHY1 = MATCH_DATA[0]
            HOMOGRAPHY2 = MATCH_DATA[1]

            CORNER1 = CORNER_DATA[0]
            CORNER2 = CORNER_DATA[1]

            PROJ_MAT1, R, T = get_matrix(CAM_MAT, HOMOGRAPHY1)

            DIST = calculate_dist_corners(CORNER1, CORNER2)
            DIST_X = DIST[0]
            DIST_Y = DIST[1]

            STEP_X = DIST_X / 40
            STEP_Y = DIST_Y / 40

            if abs(REACHED_X) >= abs(DIST_X) or abs(REACHED_Y) >= abs(DIST_Y):
                REACHED_X = 0
                REACHED_Y = 0
            else:
                REACHED_X += STEP_X
                REACHED_Y += STEP_Y
Exemplo n.º 7
0
)
parser_grn.add_argument(
    '--gene-attribute',
    type=str,
    default='Gene',
    dest="gene_attribute",
    help=
    'The name of the row attribute that specifies the gene symbols in the loom file.'
)

args = parser_grn.parse_args()

# Do stuff

ex_matrix_df = utils.get_matrix(loom_file_path=args.expression_mtx_fname.name,
                                gene_attribute=args.gene_attribute,
                                cell_id_attribute=args.cell_id_attribute)
signatures = utils.read_signatures_from_tsv_dir(
    dpath=args.signatures_fname,
    noweights=False,
    weight_threshold=args.min_regulon_gene_occurrence,
    min_genes=args.min_genes)

if len(signatures) == 0:
    raise Exception(
        f"No signature passing filtering. Please consider to adapt 'min_genes_regulon = {args.min_genes_regulon}' and 'min_regulon_gene_occurrence = {args.min_regulon_gene_occurrence}' (see params.sc.scenic.aucell). Make sure these settings are smaller than numRuns (params.sc.scenic)."
    )

auc_threshold = args.auc_threshold

if args.percentile_threshold is not None:
Exemplo n.º 8
0
        RET, FRAME = VID_FEED.read()
        if not RET:
            print("Unable to capture video")
            sys.exit()

        if DRAW_HARRIS:
            FRAME = draw_harris_kps(FRAME)

        # MATCH_DATA = find_homographies(REF_DSC, FRAME)
        MATCH_DATA, _ = get_homographies_contour(FRAME, REF_IMAGES, MATCH_DATA,
                                                 None)

        for ind, homography in enumerate(MATCH_DATA):
            # homography = match_tuple[0]
            if homography is not None:
                projection_matrix, R, T = get_matrix(CAM_PARAMS, homography)
                print(R, T)
                # FRAME = draw_corners(FRAME, homography)
                FRAME = render(FRAME, OBJ, projection_matrix, REF_IMAGES[ind],
                               False)
                if RECTANGLE:
                    FRAME, _ = draw_rectangle(homography, REF_IMAGES[ind],
                                              FRAME)

        cv2.imshow('frame', FRAME)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    VID_FEED.release()
    cv2.destroyAllWindows()
Exemplo n.º 9
0
surfs.append(xyz[:, :,  0, :])
surfs.append(xyz[:, :, -1, :])
surfs.append(xyz[:,  0, :, :])
surfs.append(xyz[:, -1, :, :])
surfs.append(xyz[ 0, :, :, :])
surfs.append(xyz[-1, :, :, :])
tecwrite.write_surf_multi('output/surf.dat', surfs)

writeBDF('output/mesh.bdf', nodes, bars+1)






mat, Kmat = get_matrix(EA, nodes, bars, constrained)

rhs = numpy.zeros(mat.shape[0])
rhs[:3*len(forces)] = forces.flatten()



sol = solve(mat, rhs)[:3*len(forces)]
sol_surf = sol.reshape(xyz.shape)
xyz += sol_surf

surfs = []
surfs.append(xyz[:, :,  0, :])
surfs.append(xyz[:, :, -1, :])
surfs.append(xyz[:,  0, :, :])
surfs.append(xyz[:, -1, :, :])