Exemplo n.º 1
0
def run_test(net,
             data_root,
             exp_name,
             work_dir,
             griding_num,
             use_aux,
             distributed,
             batch_size=8):
    # torch.backends.cudnn.benchmark = True
    output_path = os.path.join(work_dir, exp_name)
    if not os.path.exists(output_path) and is_main_process():
        os.mkdir(output_path)
    synchronize()
    loader = get_test_loader(batch_size, data_root, 'CULane', distributed)
    # import pdb;pdb.set_trace()
    for i, data in enumerate(dist_tqdm(loader)):
        imgs, names = data
        imgs = imgs.cuda()
        with torch.no_grad():
            out = net(imgs)
        if len(out) == 2 and use_aux:
            out, seg_out = out

        generate_lines(out,
                       imgs[0, 0].shape,
                       names,
                       output_path,
                       griding_num,
                       localization_type='rel',
                       flip_updown=True)
Exemplo n.º 2
0
def run_test_tusimple(net,
                      data_root,
                      work_dir,
                      exp_name,
                      griding_num,
                      use_aux,
                      distributed,
                      batch_size=8):
    output_path = os.path.join(work_dir, exp_name + '.%d.txt' % get_rank())
    fp = open(output_path, 'w')
    loader = get_test_loader(batch_size, data_root, 'Tusimple', distributed)
    for i, data in enumerate(dist_tqdm(loader)):
        imgs, names = data
        imgs = imgs.cuda()
        with torch.no_grad():
            out = net(imgs)
        if len(out) == 2 and use_aux:
            out = out[0]
        for i, name in enumerate(names):
            tmp_dict = {}
            tmp_dict['lanes'] = generate_tusimple_lines(
                out[i], imgs[0, 0].shape, griding_num)
            tmp_dict['h_samples'] = [
                160, 170, 180, 190, 200, 210, 220, 230, 240, 250, 260, 270,
                280, 290, 300, 310, 320, 330, 340, 350, 360, 370, 380, 390,
                400, 410, 420, 430, 440, 450, 460, 470, 480, 490, 500, 510,
                520, 530, 540, 550, 560, 570, 580, 590, 600, 610, 620, 630,
                640, 650, 660, 670, 680, 690, 700, 710
            ]
            tmp_dict['raw_file'] = name
            tmp_dict['run_time'] = 10
            json_str = json.dumps(tmp_dict)

            fp.write(json_str + '\n')
    fp.close()
Exemplo n.º 3
0
        torch.distributed.init_process_group(backend='nccl',
                                             init_method='env://')
    w, h = cfg['dataset']['w'], cfg['dataset']['h']
    net = parsingNet(network=cfg['network'], datasets=cfg['dataset']).cuda()
    if distributed:
        net = torch.nn.parallel.DistributedDataParallel(
            net, device_ids=[args.local_rank])
    # try:
    #     from thop import profile
    #     macs, params = profile(net, inputs=(torch.zeros(1, 3, h, w).to(device)))
    #     ms = 'FLOPs:  %.2f GFLOPS, Params: %.2f M'%(params/ 1E9, params/ 1E6)
    # except:
    #     ms = 'Model profile error'
    # logger.log(ms)
    train_loader = get_train_loader(cfg['dataset'], args.local_rank)
    test_loader = get_test_loader(cfg['dataset'], args.local_rank)
    optimizer = get_optimizer(net, cfg['train'])

    if cfg['finetune'] is not None:
        state_all = torch.load(cfg['finetune'])['model']
        state_clip = {}  # only use backbone parameters
        for k, v in state_all.items():
            if 'model' in k:
                state_clip[k] = v
        net.load_state_dict(state_clip, strict=False)
    if cfg['resume'] is not None:
        logger.log('==> Resume model from ' + cfg['resume'])
        resume_dict = torch.load(cfg['resume'], map_location='cpu')
        net.load_state_dict(resume_dict['model'])
        if 'optimizer' in resume_dict.keys():
            optimizer.load_state_dict(resume_dict['optimizer'])
Exemplo n.º 4
0
def run_test(net,
             data_root,
             exp_name,
             work_dir,
             distributed,
             cfg,
             batch_size=1):
    # torch.backends.cudnn.benchmark = True
    output_path = os.path.join(work_dir, exp_name)
    if not os.path.exists(output_path) and is_main_process():
        os.mkdir(output_path)
    synchronize()

    row_anchor = np.linspace(90, 255, 128).tolist()
    col_sample = np.linspace(0, 1640 - 1, 256)
    col_sample_w = col_sample[1] - col_sample[0]

    loader = get_test_loader(batch_size, data_root, 'CULane', distributed)

    filter_f = lambda x: int(np.round(x))

    # import pdb;pdb.set_trace()
    for i, data in enumerate(dist_tqdm(loader)):
        imgs, names = data
        imgs = imgs.cuda()
        with torch.no_grad():
            out = net(imgs)

        for j in range(len(names)):
            name = names[j]

            line_save_path = os.path.join(output_path, name[:-3] + 'lines.txt')
            save_dir, _ = os.path.split(line_save_path)
            if not os.path.exists(save_dir):
                os.makedirs(save_dir)
            with open(line_save_path, 'w') as writer:
                lane_exit_out = out["lane_exit_out"].sigmoid()
                lane_exit_out = lane_exit_out > cfg.thresh_lc

                for lane_index in range(lane_exit_out.size(1)):
                    if lane_exit_out[0][lane_index] == True:
                        x_list = []
                        y_list = []
                        vertex_wise_confidence_out = out[
                            "vertex_wise_confidence_out_" +
                            str(lane_index + 1)].sigmoid()
                        vertex_wise_confidence_out = vertex_wise_confidence_out > cfg.thresh_vc

                        row_wise_vertex_location_out = F.log_softmax(
                            out["row_wise_vertex_location_out_" +
                                str(lane_index + 1)],
                            dim=0)
                        row_wise_vertex_location_out = torch.argmax(
                            row_wise_vertex_location_out, dim=0)
                        row_wise_vertex_location_out[
                            ~vertex_wise_confidence_out] = 256

                        row_wise_vertex_location_out = row_wise_vertex_location_out.detach(
                        ).cpu().numpy()

                        estimator = RANSACRegressor(random_state=42,
                                                    min_samples=2,
                                                    residual_threshold=10.0)
                        ##model = make_pipeline(PolynomialFeatures(2), estimator)

                        for k in range(row_wise_vertex_location_out.shape[0]):
                            if row_wise_vertex_location_out[k] != 256:
                                x = row_wise_vertex_location_out[
                                    k] * col_sample_w
                                y = row_anchor[k] / 256 * 590
                                x_list.append(x)
                                y_list.append(y)
                                #writer.write('%d %d ' % (filter_f(row_wise_vertex_location_out[k] * col_sample_w), filter_f(row_anchor[k] / 256 * 590)))
                        #writer.write('\n')

                        if len(x_list) <= 1:
                            continue
                        X = np.array(x_list)
                        y = np.array(y_list)
                        y = y[:, np.newaxis]
                        y_plot = np.linspace(y.min(), y.max())
                        estimator.fit(y, X)
                        x_plot = estimator.predict(y_plot[:, np.newaxis])

                        for x, y in zip(x_plot, y_plot):
                            writer.write('%d %d ' % (filter_f(x), filter_f(y)))
                        writer.write('\n')