Example #1
0
def eval_lane(net, dataset, data_root, work_dir, griding_num, use_aux, distributed):
    net.eval()
    if dataset == 'CULane':
        run_test(net,data_root, 'culane_eval_tmp', work_dir, griding_num, use_aux, distributed)
        synchronize()   # wait for all results
        if is_main_process():
            res = call_culane_eval(data_root, 'culane_eval_tmp', work_dir)
            TP,FP,FN = 0,0,0
            for k, v in res.items():
                val = float(v['Fmeasure']) if 'nan' not in v['Fmeasure'] else 0
                val_tp,val_fp,val_fn = int(v['tp']),int(v['fp']),int(v['fn'])
                TP += val_tp
                FP += val_fp
                FN += val_fn
                dist_print(k,val)
            P = TP * 1.0/(TP + FP)
            R = TP * 1.0/(TP + FN)
            F = 2*P*R/(P + R)
            dist_print(F)
        synchronize()

    elif dataset == 'Tusimple':
        exp_name = 'tusimple_eval_tmp'
        run_test_tusimple(net, data_root, work_dir, exp_name, griding_num, use_aux, distributed)
        synchronize()  # wait for all results
        if is_main_process():
            combine_tusimple_test(work_dir,exp_name)
            res = LaneEval.bench_one_submit(os.path.join(work_dir,exp_name + '.txt'),os.path.join(data_root,'test_label.json'))
            res = json.loads(res)
            for r in res:
                dist_print(r['name'], r['value'])
        synchronize()
def run_test(net,
             data_root,
             exp_name,
             work_dir,
             griding_num,
             use_aux,
             distributed,
             batch_size=8):
    # torch.backends.cudnn.benchmark = True
    output_path = os.path.join(work_dir, exp_name)
    if not os.path.exists(output_path) and is_main_process():
        os.mkdir(output_path)
    synchronize()
    loader = get_test_loader(batch_size, data_root, 'CULane', distributed)
    # import pdb;pdb.set_trace()
    for i, data in enumerate(dist_tqdm(loader)):
        imgs, names = data
        imgs = imgs.cuda()
        with torch.no_grad():
            out = net(imgs)
        if len(out) == 2 and use_aux:
            out, seg_out = out

        generate_lines(out,
                       imgs[0, 0].shape,
                       names,
                       output_path,
                       griding_num,
                       localization_type='rel',
                       flip_updown=True)
Example #3
0
def cp_projects(auto_backup, to_path):
    if is_main_process() and auto_backup:
        with open('./.gitignore', 'r') as fp:
            ign = fp.read()
        ign += '\n.git'
        spec = pathspec.PathSpec.from_lines(
            pathspec.patterns.GitWildMatchPattern, ign.splitlines())
        all_files = {
            os.path.join(root, name)
            for root, dirs, files in os.walk('./') for name in files
        }
        matches = spec.match_files(all_files)
        matches = set(matches)
        to_cp_files = all_files - matches
        dist_print('Copying projects to ' + to_path + ' for backup')
        t0 = time.time()
        warning_flag = True
        for f in to_cp_files:
            dirs = os.path.join(to_path, 'code', os.path.split(f[2:])[0])
            if not os.path.exists(dirs):
                os.makedirs(dirs)
            os.system('cp %s %s' % (f, os.path.join(to_path, 'code', f[2:])))
            elapsed_time = time.time() - t0
            if elapsed_time > 5 and warning_flag:
                dist_print(
                    'If the program is stuck, it might be copying large files in this directory. please don\'t set --auto_backup. Or please make you working directory clean, i.e, don\'t place large files like dataset, log results under this directory.'
                )
                warning_flag = False
Example #4
0
def get_logger(work_dir, cfg):
    logger = DistSummaryWriter(work_dir)
    config_txt = os.path.join(work_dir, 'cfg.txt')
    if is_main_process():
        with open(config_txt, 'w') as fp:
            fp.write(str(cfg))

    return logger
Example #5
0
def save_model(net, optimizer, epoch,save_path, distributed):
    if is_main_process():
        model_state_dict = net.state_dict()
        state = {'model': model_state_dict, 'optimizer': optimizer.state_dict()}
        # state = {'model': model_state_dict}
        assert os.path.exists(save_path)
        model_path = os.path.join(save_path, 'ep%03d.pth' % epoch)
        torch.save(state, model_path)
Example #6
0
def cp_projects(to_path):
    if is_main_process():
        with open('./.gitignore','r') as fp:
            ign = fp.read()
        ign += '\n.git'
        spec = pathspec.PathSpec.from_lines(pathspec.patterns.GitWildMatchPattern, ign.splitlines())
        all_files = {os.path.join(root,name) for root,dirs,files in os.walk('./') for name in files}
        matches = spec.match_files(all_files)
        matches = set(matches)
        to_cp_files = all_files - matches
        # to_cp_files = [f[2:] for f in to_cp_files]
        # pdb.set_trace()
        for f in to_cp_files:
            dirs = os.path.join(to_path,'code',os.path.split(f[2:])[0])
            if not os.path.exists(dirs):
                os.makedirs(dirs)
            os.system('cp %s %s'%(f,os.path.join(to_path,'code',f[2:])))
Example #7
0
def eval_lane(net, dataset, data_root, work_dir, distributed, cfg):
    net.eval()
    run_test(net, data_root, 'culane_eval_tmp', work_dir, distributed, cfg)
    synchronize()  # wait for all results
    if is_main_process():
        res = call_culane_eval(data_root, 'culane_eval_tmp', work_dir)
        TP, FP, FN = 0, 0, 0
        for k, v in res.items():
            val = float(v['Fmeasure']) if 'nan' not in v['Fmeasure'] else 0
            val_tp, val_fp, val_fn = int(v['tp']), int(v['fp']), int(v['fn'])
            TP += val_tp
            FP += val_fp
            FN += val_fn
            dist_print(k, val)
        P = TP * 1.0 / (TP + FP)
        R = TP * 1.0 / (TP + FN)
        F = 2 * P * R / (P + R)
        dist_print(F)
    synchronize()
    if val_first:
        dist_print("initially validating with {} cls data...".format(
            len(val_loader)))
        val(net, val_loader, loss_dict, scheduler, logger, resume_epoch - 1,
            metric_dict, cfg)
    for epoch in range(resume_epoch, cfg.epoch):
        dist_print("epoch:", epoch)
        dist_print("trainging with {} cls data...".format(len(train_loader)))

        train(net, train_loader, loss_dict, optimizer, scheduler, logger,
              epoch, metric_dict, cfg)
        if cfg.val:
            dist_print("validating with and {} cls data...".format(
                len(val_loader)))
            val(net, val_loader, loss_dict, scheduler, logger, epoch,
                metric_dict, cfg)

        save_model(net, optimizer, epoch, work_dir, distributed)
    if cfg.val:
        txt = "\nbest metric is got at epoch {}\n".format(cfg.best_epoch)
        for me_name, me_val in metric_dict["best_metric"].items():
            txt += me_name + ":" + str(me_val) + "\n"

        dist_print(txt)
        config_txt = os.path.join(work_dir, 'cfg.txt')
        if is_main_process():
            with open(config_txt, 'a') as fp:
                fp.write(txt)

    logger.close()
Example #9
0
def run_test(net,
             data_root,
             exp_name,
             work_dir,
             distributed,
             cfg,
             batch_size=1):
    # torch.backends.cudnn.benchmark = True
    output_path = os.path.join(work_dir, exp_name)
    if not os.path.exists(output_path) and is_main_process():
        os.mkdir(output_path)
    synchronize()

    row_anchor = np.linspace(90, 255, 128).tolist()
    col_sample = np.linspace(0, 1640 - 1, 256)
    col_sample_w = col_sample[1] - col_sample[0]

    loader = get_test_loader(batch_size, data_root, 'CULane', distributed)

    filter_f = lambda x: int(np.round(x))

    # import pdb;pdb.set_trace()
    for i, data in enumerate(dist_tqdm(loader)):
        imgs, names = data
        imgs = imgs.cuda()
        with torch.no_grad():
            out = net(imgs)

        for j in range(len(names)):
            name = names[j]

            line_save_path = os.path.join(output_path, name[:-3] + 'lines.txt')
            save_dir, _ = os.path.split(line_save_path)
            if not os.path.exists(save_dir):
                os.makedirs(save_dir)
            with open(line_save_path, 'w') as writer:
                lane_exit_out = out["lane_exit_out"].sigmoid()
                lane_exit_out = lane_exit_out > cfg.thresh_lc

                for lane_index in range(lane_exit_out.size(1)):
                    if lane_exit_out[0][lane_index] == True:
                        x_list = []
                        y_list = []
                        vertex_wise_confidence_out = out[
                            "vertex_wise_confidence_out_" +
                            str(lane_index + 1)].sigmoid()
                        vertex_wise_confidence_out = vertex_wise_confidence_out > cfg.thresh_vc

                        row_wise_vertex_location_out = F.log_softmax(
                            out["row_wise_vertex_location_out_" +
                                str(lane_index + 1)],
                            dim=0)
                        row_wise_vertex_location_out = torch.argmax(
                            row_wise_vertex_location_out, dim=0)
                        row_wise_vertex_location_out[
                            ~vertex_wise_confidence_out] = 256

                        row_wise_vertex_location_out = row_wise_vertex_location_out.detach(
                        ).cpu().numpy()

                        estimator = RANSACRegressor(random_state=42,
                                                    min_samples=2,
                                                    residual_threshold=10.0)
                        ##model = make_pipeline(PolynomialFeatures(2), estimator)

                        for k in range(row_wise_vertex_location_out.shape[0]):
                            if row_wise_vertex_location_out[k] != 256:
                                x = row_wise_vertex_location_out[
                                    k] * col_sample_w
                                y = row_anchor[k] / 256 * 590
                                x_list.append(x)
                                y_list.append(y)
                                #writer.write('%d %d ' % (filter_f(row_wise_vertex_location_out[k] * col_sample_w), filter_f(row_anchor[k] / 256 * 590)))
                        #writer.write('\n')

                        if len(x_list) <= 1:
                            continue
                        X = np.array(x_list)
                        y = np.array(y_list)
                        y = y[:, np.newaxis]
                        y_plot = np.linspace(y.min(), y.max())
                        estimator.fit(y, X)
                        x_plot = estimator.predict(y_plot[:, np.newaxis])

                        for x, y in zip(x_plot, y_plot):
                            writer.write('%d %d ' % (filter_f(x), filter_f(y)))
                        writer.write('\n')