コード例 #1
0
ファイル: exp.py プロジェクト: yunshengb/GraphSim
def exp5():
    """ Query visualization. """
    dataset = 'imdbmulti'
    model = 'astar'
    concise = True
    norms = [True, False]
    dir = get_result_path() + '/{}/query_vis/{}'.format(dataset, model)
    create_dir_if_not_exists(dir)
    info_dict = {
        # draw node config
        'draw_node_size': 150 if dataset != 'linux' else 10,
        'draw_node_label_enable': True,
        'node_label_name': None if dataset == 'linux' else 'type',
        'draw_node_label_font_size': 6,
        'draw_node_color_map': TYPE_COLOR_MAP,
        # draw edge config
        'draw_edge_label_enable': False,
        'edge_label_name': 'valence',
        'draw_edge_label_font_size': 6,
        # graph text info config
        'each_graph_text_list': [],
        'each_graph_text_font_size': 8,
        'each_graph_text_pos': [0.5, 1.05],
        # graph padding: value range: [0, 1]
        'top_space': 0.20 if concise else 0.26,  # out of whole graph
        'bottom_space': 0.05,
        'hbetween_space': 0.6 if concise else 1,  # out of the subgraph
        'wbetween_space': 0,
        # plot config
        'plot_dpi': 200,
        'plot_save_path_eps': '',
        'plot_save_path_png': ''
    }
    train_data = load_data(dataset, train=True)
    test_data = load_data(dataset, train=False)
    row_graphs = test_data.graphs
    col_graphs = train_data.graphs
    r = load_result(dataset, model, row_graphs=row_graphs, col_graphs=col_graphs)
    tr = load_result(dataset, TRUE_MODEL, row_graphs=row_graphs, col_graphs=col_graphs)
    for norm in norms:
        ids = r.get_sort_id_mat(norm)
        m, n = r.m_n()
        num_vis = 10
        for i in range(num_vis):
            q = test_data.graphs[i]
            gids = np.concatenate([ids[i][:3], [ids[i][int(n / 2)]], ids[i][-3:]])
            gs = [train_data.graphs[j] for j in gids]
            info_dict['each_graph_text_list'] = \
                [get_text_label(dataset, r, tr, i, i, q, model, norm, True, concise)] + \
                [get_text_label(dataset, r, tr, i, j,
                                train_data.graphs[j], model, norm, False, concise) \
                 for j in gids]
            # print(info_dict['each_graph_text_list'])
            info_dict['plot_save_path_png'] = '{}/query_vis_{}_{}_{}{}.{}'.format(
                dir, dataset, model, i, get_norm_str(norm), 'png')
            info_dict['plot_save_path_eps'] = '{}/query_vis_{}_{}_{}{}.{}'.format(
                dir, dataset, model, i, get_norm_str(norm), 'eps')
            vis(q, gs, info_dict)
コード例 #2
0
def pics(ticker):
	#return send_file(vis(ticker), mimetype='image/png')
	#return send_file(BytesIO(vis(ticker)), mimetype='image/png')
	rv = cache.get(ticker)
	if rv: print 'cached'
	if rv is None:
		rv = BytesIO(vis(ticker))
		cache.set(ticker, rv, timeout=5 * 60)
	return send_file(rv, mimetype='image/png')
コード例 #3
0
ファイル: exp_ged.py プロジェクト: sherwin29/GraphEmbedding
def exp10():
    # Query visualization.
    dataset = 'aids10k'
    model = 'beam80'
    k = 5
    info_dict = {
        'draw_node_size': 10,
        'draw_node_label_enable': True,
        'draw_node_label_font_size': 8,
        'draw_node_color_map': {
            'C': 'red',
            'O': 'blue',
            'N': 'green'
        },
        'draw_edge_label_enable': True,
        'draw_edge_label_font_size': 6,
        'each_graph_text_list': [],
        'each_graph_font_size': 10,
        'plot_dpi': 200,
        'plot_save_path': ''
    }
    r = load_result(dataset, model)
    ged_mat = r.ged_mat()
    time_mat = r.time_mat()
    ids = r.ged_sort_id_mat()
    m, n = ged_mat.shape
    train_data = load_data(dataset, train=True)
    test_data = load_data(dataset, train=False)
    for i in range(m):
        q = test_data.graphs[i]
        gids = ids[i][:k]
        gs = [train_data.graphs[j] for j in gids]
        info_dict['each_graph_text_list'] = \
            ['query id: {}'.format(q.graph['gid'])] + \
            [get_text_label(ged_mat, time_mat, i, j, \
                            train_data.graphs[j]) for j in gids]
        info_dict['plot_save_path'] = \
            get_root_path() + \
            '/files/{}//query_vis/{}/query_vis_{}_{}_{}.png'.format( \
                dataset, model, dataset, model, i)
        vis(q, gs, info_dict)
コード例 #4
0
ファイル: main.py プロジェクト: royxue/self-driving
def process(image):
    global left_line, right_line, detected

    undist = cal_undistort(image, objp, imgp)

    thresh_s = (170, 255)
    thresh_sx = (50, 255)
    thresh_sy = (50, 255)
    thresh_mag = (50, 255)
    thresh_gdir = (0.7, 1.3)
    com = cal_thresh_img(undist, thresh_s, thresh_sx, thresh_sy, thresh_mag, thresh_gdir)
    warped = warper(com, src, dst)

    if not detected:
        fit = sliding_window_search(warped)
        if fit != None:
            left_line.add_fit(fit[0])
            right_line.add_fit(fit[1])
            detected = True
    else:
        fit = sliding_window_search_with_known(warped, left_line.get_fit(), right_line.get_fit())
        if fit != None:
            left_line.add_fit(fit[0])
            right_line.add_fit(fit[1])
        else:
            detected = False

    left_fit, right_fit = left_line.get_fit(), right_line.get_fit()
    res = vis(undist, warped, left_fit, right_fit, src, dst)

    left_c, right_c = cal_curvature(warped, left_fit, right_fit)
    offset = cal_offset(undist, left_fit, right_fit)

    avg_c = (left_c + right_c)/2
    label_c = 'Radius of curvature: %.2f m' % (avg_c)
    offset_c = 'Offset from the center of lane: %.2f m' % (offset)

    cv2.putText(res, label_c, (20, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,0), 2)
    cv2.putText(res, offset_c, (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,0), 2)

    cv2.imwrite('./res.jpg', res)

    return res
コード例 #5
0
ファイル: main.py プロジェクト: royxue/self-driving
def output_each_step():
    images = glob.glob('./test_images/*.jpg')
    objp, imgp = cal_calib_points('./camera_cal/calibration*.jpg')
    output_path = './output_images/'

    for img in images:
        print('Processing' + img)
        img_name = img.split('/')[-1]
        image = cv2.imread(img)

        undist = cal_undistort(image, objp, imgp)
        cv2.imwrite(output_path+'undist_'+img_name, undist)

        thresh_s = (170, 255)
        thresh_sx = (30, 255)
        thresh_sy = (30, 255)
        thresh_mag = (50, 255)
        thresh_gdir = (0.7, 1.3)
        com = cal_thresh_img(undist, thresh_s, thresh_sx, thresh_sy, thresh_mag, thresh_gdir)
        cv2.imwrite(output_path+'thresh_'+img_name, com*255)

        src = np.float32([[200, 720], [1100, 720], [595, 450], [685, 450]])
        dst = np.float32([[300, 720], [980, 720], [300, 0], [980, 0]])
        warped = warper(com, src, dst)
        cv2.imwrite(output_path+'warped_'+img_name, warped*255)

        fit_name = output_path+'fit_'+img_name
        left_fit, right_fit = sliding_window_search(warped, vis=True, file_name=fit_name)

        out_img = np.dstack((warped, warped, warped))*255
        ploty = np.linspace(0, warped.shape[0]-1, warped.shape[0] )
        left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
        right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]

        res = vis(undist, warped, left_fit, right_fit, src, dst)
        cv2.imwrite(output_path+'final_'+img_name,res)
コード例 #6
0
    print('total time: {0}'.format((end - start).total_seconds()))

    robot_path = dict()
    robot_pre_suf_time = dict()
    for type_robot in robot_path_pre.keys():
        robot_path[type_robot] = robot_path_pre[type_robot] + robot_path_suf[type_robot][1:]
        if len(robot_path_suf[type_robot]) == 1:
            robot_pre_suf_time[type_robot] = [robot_time_pre[type_robot][-1], robot_time_pre[type_robot][-1]]
        else:
            robot_pre_suf_time[type_robot] = [robot_time_suf[type_robot][1], robot_time_suf[type_robot][-1]]
    # # only display robots that are assigned tasks
    for robot, time in list(robot_pre_suf_time.items()):
        if time[-1] == 0:
            del robot_path[robot]
            del robot_pre_suf_time[robot]

    vis(workspace, robot_path, robot_pre_suf_time, task.ap)

    for type_robot, waypoint in robot_waypoint_pre.items():
        print(type_robot, " : ", waypoint)
        print(type_robot, " : ", robot_time_pre[type_robot])
        print(type_robot, " : ", robot_path_pre[type_robot])
        print(type_robot, " : ", list(range(round(robot_time_pre[type_robot][-1])+1)))

    for type_robot, waypoint in robot_waypoint_plus_suf.items():
        print(type_robot, " : ", waypoint)
        print(type_robot, " : ", robot_time_suf[type_robot])
        print(type_robot, " : ", robot_path_suf[type_robot])
        print(type_robot, " : ", list(range(round(robot_time_suf[type_robot][0]),
                                            round(robot_time_suf[type_robot][-1]) + 1)))
コード例 #7
0
                      help="""Write test data to OUTPUT_FILE
                      (defaults to PSYSH_DIR/test/fixtures)""")
    args = argp.parse_args()

    cp_range = RANGES['bmp'] if args.all else RANGES[args.range]
    indent = 2 if args.format_output else None
    if args.output_file:
        OUTPUT_FILE = abspath(expanduser(args.output_file))

    fixtures = []

    # use SMALL_RANGE by default, it should be enough.
    # use BMP_RANGE for a more complete smoke test
    for codepoint in cp_range:
        char = chr(codepoint)
        encoded = vis(char, VIS_WHITE)
        decoded = unvis(encoded)
        fixtures.append((encoded, decoded))

    # Add our own custom fixtures at the end,
    # since they would fail anyway if one of the previous did.
    for fixture in CUSTOM_FIXTURES:
        encoded = vis(fixture, VIS_WHITE)
        decoded = unvis(encoded)
        fixtures.append((encoded, decoded))

    with open(OUTPUT_FILE, 'w') as fp:
        # dump as json to avoid backslashin and quotin nightmare
        # between php and python
        json.dump(fixtures, fp, indent=indent)
コード例 #8
0
ファイル: vis_test.py プロジェクト: yunshengb/SimGNN
    0,
    'hbetween_space':
    0.4,  # out of the subgraph
    'wbetween_space':
    0.01,

    # plot config
    'plot_dpi':
    200,
    'plot_save_path':
    get_root_path() + '/temp/test_vis.png'
}

test_data = load_data('aids10k', train=False)
train_data = load_data('aids10k', train=True)
q = test_data.graphs[0]

gs = []
for i in range(1, 5):
    print(i)
    gs.append(train_data.graphs[i])

vis(q, gs, info_dict)
'''
TODO:
1. node color [done]
2. support graph-level text [done]
3. plt.save in vis [done]
4. edge color
'''
コード例 #9
0
                      help="""Write test data to OUTPUT_FILE
                      (defaults to PSYSH_DIR/test/fixtures)""")
    args = argp.parse_args()

    cp_range = RANGES['bmp'] if args.all else RANGES[args.range]
    indent = 2 if args.format_output else None
    if args.output_file:
        OUTPUT_FILE = abspath(expanduser(args.output_file))

    fixtures = []

    # use SMALL_RANGE by default, it should be enough.
    # use BMP_RANGE for a more complete smoke test
    for codepoint in cp_range:
        char = chr(codepoint)
        encoded = vis(char, VIS_WHITE)
        decoded = unvis(encoded)
        fixtures.append((encoded, decoded))

    # Add our own custom fixtures at the end,
    # since they would fail anyway if one of the previous did.
    for fixture in CUSTOM_FIXTURES:
        encoded = vis(fixture, VIS_WHITE)
        decoded = unvis(encoded)
        fixtures.append((encoded, decoded))

    with open(OUTPUT_FILE, 'w') as fp:
        # dump as json to avoid backslashin and quotin nightmare
        # between php and python
        json.dump(fixtures, fp, indent=indent)