예제 #1
0
def run_test_tusimple(net,
                      data_root,
                      work_dir,
                      exp_name,
                      griding_num,
                      use_aux,
                      distributed,
                      batch_size=8):
    output_path = os.path.join(work_dir, exp_name + '.%d.txt' % get_rank())
    fp = open(output_path, 'w')
    loader = get_test_loader(batch_size, data_root, 'Tusimple', distributed)
    for i, data in enumerate(dist_tqdm(loader)):
        imgs, names = data
        imgs = imgs.cuda()
        with torch.no_grad():
            out = net(imgs)
        if len(out) == 2 and use_aux:
            out = out[0]
        for i, name in enumerate(names):
            tmp_dict = {}
            tmp_dict['lanes'] = generate_tusimple_lines(
                out[i], imgs[0, 0].shape, griding_num)
            tmp_dict['h_samples'] = [
                160, 170, 180, 190, 200, 210, 220, 230, 240, 250, 260, 270,
                280, 290, 300, 310, 320, 330, 340, 350, 360, 370, 380, 390,
                400, 410, 420, 430, 440, 450, 460, 470, 480, 490, 500, 510,
                520, 530, 540, 550, 560, 570, 580, 590, 600, 610, 620, 630,
                640, 650, 660, 670, 680, 690, 700, 710
            ]
            tmp_dict['raw_file'] = name
            tmp_dict['run_time'] = 10
            json_str = json.dumps(tmp_dict)

            fp.write(json_str + '\n')
    fp.close()
예제 #2
0
def run_test(net,
             data_root,
             exp_name,
             work_dir,
             griding_num,
             use_aux,
             distributed,
             batch_size=8):
    # torch.backends.cudnn.benchmark = True
    output_path = os.path.join(work_dir, exp_name)
    if not os.path.exists(output_path) and is_main_process():
        os.mkdir(output_path)
    synchronize()
    loader = get_test_loader(batch_size, data_root, 'CULane', distributed)
    # import pdb;pdb.set_trace()
    for i, data in enumerate(dist_tqdm(loader)):
        imgs, names = data
        imgs = imgs.cuda()
        with torch.no_grad():
            out = net(imgs)
        if len(out) == 2 and use_aux:
            out, seg_out = out

        generate_lines(out,
                       imgs[0, 0].shape,
                       names,
                       output_path,
                       griding_num,
                       localization_type='rel',
                       flip_updown=True)
예제 #3
0
def train(net, data_loader, loss_dict, optimizer, scheduler,logger, epoch, metric_dict, use_aux):
    net.train()
    progress_bar = dist_tqdm(train_loader)
    t_data_0 = time.time()
    for b_idx, data_label in enumerate(progress_bar):
        t_data_1 = time.time()
        reset_metrics(metric_dict)
        global_step = epoch * len(data_loader) + b_idx

        t_net_0 = time.time()
        results = inference(net, data_label, use_aux)

        loss = calc_loss(loss_dict, results, logger, global_step)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        scheduler.step(global_step)
        t_net_1 = time.time()

        results = resolve_val_data(results, use_aux)

        update_metrics(metric_dict, results)
        if global_step % 20 == 0:
            for me_name, me_op in zip(metric_dict['name'], metric_dict['op']):
                logger.add_scalar('metric/' + me_name, me_op.get(), global_step=global_step)
        logger.add_scalar('meta/lr', optimizer.param_groups[0]['lr'], global_step=global_step)

        if hasattr(progress_bar,'set_postfix'):
            kwargs = {me_name: '%.3f' % me_op.get() for me_name, me_op in zip(metric_dict['name'], metric_dict['op'])}
            progress_bar.set_postfix(loss = '%.3f' % float(loss), 
                                    data_time = '%.3f' % float(t_data_1 - t_data_0), 
                                    net_time = '%.3f' % float(t_net_1 - t_net_0), 
                                    **kwargs)
        t_data_0 = time.time()
예제 #4
0
def train(net, data_loader, loss_dict, optimizer, scheduler, logger, epoch,
          metric_dict, use_aux, local_rank):
    net.train()
    if local_rank != -1:
        data_loader.sampler.set_epoch(epoch)
    progress_bar = dist_tqdm(data_loader)
    t_data_0 = time.time()
    for b_idx, data_label in enumerate(progress_bar):
        t_data_1 = time.time()
        reset_metrics(metric_dict)
        global_step = epoch * len(data_loader) + b_idx

        t_net_0 = time.time()
        results = inference(net, data_label, use_aux)

        loss = calc_loss(loss_dict, results, logger, global_step)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        scheduler.step(global_step)
        t_net_1 = time.time()

        results = resolve_val_data(results, use_aux)

        update_metrics(metric_dict, results)
        if global_step % 20 == 0:
            for me_name, me_op in zip(metric_dict['name'], metric_dict['op']):
                logger.scalar_summary('metric/' + me_name, 'train',
                                      me_op.get(), global_step)
        logger.scalar_summary('meta/lr', 'train',
                              optimizer.param_groups[0]['lr'], global_step)

        if hasattr(progress_bar, 'set_postfix'):
            kwargs = {
                me_name: '%.3f' % me_op.get()
                for me_name, me_op in zip(metric_dict['name'],
                                          metric_dict['op'])
            }
            log_msg = 'Epoch{}/{}|Iter{}'.format(epoch, scheduler.total_epoch,
                                                 b_idx)
            # log_msg = 'Epoch{}/{}|Iter{} '.format(epoch, scheduler.total_epoch,
            #         global_step, b_idx, len(data_loader), optimizer.param_groups[0]['lr'])
            progress_bar.set_description(log_msg)
            progress_bar.set_postfix(loss='%.3f' % float(loss), **kwargs)
        t_data_0 = time.time()
예제 #5
0
def train(net, train_loader, criterion, optimizer, scheduler, logger, epoch,
          device):
    net.train()
    progress_bar = dist_tqdm(train_loader)
    t_data_0 = time.time()
    total_loss = 0
    for b_idx, (images, labels) in enumerate(progress_bar):
        t_data_1 = time.time()

        t_net_0 = time.time()

        preds = net(images.to(device))

        t_net_1 = time.time()

        loss = criterion(preds, labels)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        scheduler.step()

        t_data_1 = time.time()

        if hasattr(progress_bar, 'set_postfix'):
            progress_bar.set_postfix(
                loss='%.3f' % float(loss),
                data_time='%.3f' % float(t_data_1 - t_data_0),
                net_time='%.3f' % float(t_net_1 - t_net_0))

        total_loss += loss.item()

    logger.add_scalar('metric/loss',
                      total_loss / len(train_loader),
                      global_step=epoch)
    logger.add_scalar('meta/lr',
                      optimizer.param_groups[0]['lr'],
                      global_step=epoch)
예제 #6
0
def test(net, data_loader, dataset, work_dir, logger, use_aux=True):
    output_path = os.path.join(work_dir, 'culane_eval_tmp')
    if not os.path.exists(output_path):
        os.mkdir(output_path)
    net.eval()
    if dataset['name'] == 'CULane':
        for i, data in enumerate(dist_tqdm(data_loader)):
            imgs, names = data
            imgs = imgs.cuda()
            with torch.no_grad():
                out = net(imgs)
            if len(out) == 2 and use_aux:
                out, seg_out = out

            generate_lines(out,
                           imgs[0, 0].shape,
                           names,
                           output_path,
                           dataset['griding_num'],
                           localization_type='rel',
                           flip_updown=True)
        res = call_culane_eval(dataset['data_root'], 'culane_eval_tmp',
                               work_dir)
        TP, FP, FN = 0, 0, 0
        for k, v in res.items():
            val = float(v['Fmeasure']) if 'nan' not in v['Fmeasure'] else 0
            val_tp, val_fp, val_fn = int(v['tp']), int(v['fp']), int(v['fn'])
            TP += val_tp
            FP += val_fp
            FN += val_fn
            logger.log('k:{} val{}'.format(k, val))
        P = TP * 1.0 / (TP + FP)
        R = TP * 1.0 / (TP + FN)
        F = 2 * P * R / (P + R)
        logger.log('F:{}'.format(F))
        return F
예제 #7
0
def train(net, data_loader, loss_dict, optimizer, scheduler, logger, epoch,
          metric_dict, cfg):
    net.train()
    progress_bar = dist_tqdm(data_loader)
    t_data_0 = time.time()
    # Pyten-20201019-FixBug
    reset_metrics(metric_dict)
    total_loss = 0
    for b_idx, data_label in enumerate(progress_bar):
        t_data_1 = time.time()
        global_step = epoch * len(data_loader) + b_idx

        t_net_0 = time.time()
        results = inference(net, data_label, cfg.use_aux)

        loss = calc_loss(loss_dict, results, logger, global_step, "train")
        optimizer.zero_grad()
        loss.backward()
        # Pyten-20210201-ClipGrad
        clip_grad_norm_(net.parameters(), max_norm=10.0)
        optimizer.step()
        total_loss = total_loss + loss.detach()
        scheduler.step(global_step)
        t_net_1 = time.time()

        results = resolve_val_data(results, cfg.use_aux)
        update_metrics(metric_dict, results)
        if global_step % 20 == 0:
            # Pyten-20210201-TransformImg
            img = img_detrans(data_label[0][0])
            logger.add_image("train_image/org", img, global_step=global_step)
            logger.add_image("train_image/std",
                             data_label[0][0],
                             global_step=global_step)
            if cfg.use_aux:
                seg_color_out = decode_seg_color_map(results["seg_out"][0])
                seg_color_label = decode_seg_color_map(data_label[2][0])
                logger.add_image("train_seg/predict",
                                 seg_color_out,
                                 global_step=global_step,
                                 dataformats='HWC')
                logger.add_image("train_seg/label",
                                 seg_color_label,
                                 global_step=global_step,
                                 dataformats='HWC')
            cls_color_out = decode_cls_color_map(data_label[0][0],
                                                 results["cls_out"][0], cfg)
            cls_color_label = decode_cls_color_map(data_label[0][0],
                                                   data_label[1][0], cfg)
            logger.add_image("train_cls/predict",
                             cls_color_out,
                             global_step=global_step,
                             dataformats='HWC')
            logger.add_image("train_cls/label",
                             cls_color_label,
                             global_step=global_step,
                             dataformats='HWC')

            for me_name, me_op in zip(metric_dict['name'], metric_dict['op']):
                logger.add_scalar('train_metric/' + me_name,
                                  me_op.get(),
                                  global_step=global_step)
        logger.add_scalar('train/meta/lr',
                          optimizer.param_groups[0]['lr'],
                          global_step=global_step)

        if hasattr(progress_bar, 'set_postfix'):
            kwargs = {
                me_name: '%.4f' % me_op.get()
                for me_name, me_op in zip(metric_dict['name'],
                                          metric_dict['op'])
            }
            progress_bar.set_postfix(
                loss='%.3f' % float(loss),
                avg_loss='%.3f' % float(total_loss / (b_idx + 1)),
                #data_time = '%.3f' % float(t_data_1 - t_data_0),
                net_time='%.3f' % float(t_net_1 - t_net_0),
                **kwargs)
        t_data_0 = time.time()

    dist_print("avg_loss_over_epoch", total_loss / len(data_loader))
예제 #8
0
def val(net, data_loader, loss_dict, scheduler, logger, epoch, metric_dict,
        cfg):
    net.eval()
    progress_bar = dist_tqdm(data_loader)
    t_data_0 = time.time()
    reset_metrics(metric_dict)
    total_loss = 0
    with torch.no_grad():
        for b_idx, data_label in enumerate(progress_bar):
            t_data_1 = time.time()
            # reset_metrics(metric_dict)
            global_step = epoch * len(data_loader) + b_idx

            t_net_0 = time.time()
            # pdb.set_trace()
            results = inference(net, data_label, cfg.use_aux)
            loss = calc_loss(loss_dict, results, logger, global_step, "val")
            total_loss = total_loss + loss.detach()

            t_net_1 = time.time()

            results = resolve_val_data(results, cfg.use_aux)

            update_metrics(metric_dict, results)
            if global_step % 20 == 0:
                # Pyten-20210201-TransformImg
                img = img_detrans(data_label[0][0])
                logger.add_image("val_image/org", img, global_step=global_step)
                logger.add_image("val_image/std",
                                 data_label[0][0],
                                 global_step=global_step)
                if cfg.use_aux:
                    # import pdb; pdb.set_trace()
                    seg_color_out = decode_seg_color_map(results["seg_out"][0])
                    seg_color_label = decode_seg_color_map(data_label[2][0])
                    logger.add_image("val_seg/predict",
                                     seg_color_out,
                                     global_step=global_step,
                                     dataformats='HWC')
                    logger.add_image("val_seg/label",
                                     seg_color_label,
                                     global_step=global_step,
                                     dataformats='HWC')

                cls_color_out = decode_cls_color_map(data_label[0][0],
                                                     results["cls_out"][0],
                                                     cfg)
                cls_color_label = decode_cls_color_map(data_label[0][0],
                                                       data_label[1][0], cfg)
                logger.add_image("val_cls/predict",
                                 cls_color_out,
                                 global_step=global_step,
                                 dataformats='HWC')
                logger.add_image("val_cls/label",
                                 cls_color_label,
                                 global_step=global_step,
                                 dataformats='HWC')

            if hasattr(progress_bar, 'set_postfix'):
                kwargs = {
                    me_name: '%.4f' % me_op.get()
                    for me_name, me_op in zip(metric_dict['name'],
                                              metric_dict['op'])
                }
                progress_bar.set_postfix(
                    loss='%.3f' % float(loss),
                    avg_loss='%.3f' % float(total_loss / (b_idx + 1)),
                    # data_time = '%.3f' % float(t_data_1 - t_data_0),
                    net_time='%.3f' % float(t_net_1 - t_net_0),
                    **kwargs)
            t_data_0 = time.time()

    dist_print("avg_loss_over_epoch", total_loss / len(data_loader))
    for me_name, me_op in zip(metric_dict['name'], metric_dict['op']):
        logger.add_scalar('val_metric/' + me_name,
                          me_op.get(),
                          global_step=epoch)
    # Pyten-20201019-SaveBestMetric
    update_best_metric = True
    for me_name, me_op in zip(metric_dict['name'], metric_dict['op']):
        if me_name == "iou":
            continue
        cur_metric = me_op.get()
        if cur_metric < metric_dict["best_metric"][me_name]:
            update_best_metric = False
    if update_best_metric:
        for me_name, me_op in zip(metric_dict['name'], metric_dict['op']):
            metric_dict["best_metric"][me_name] = me_op.get()
        cfg.best_epoch = epoch
        dist_print("best metric updated!(epoch%d)" % epoch)
예제 #9
0
def run_test(net,
             data_root,
             exp_name,
             work_dir,
             distributed,
             cfg,
             batch_size=1):
    # torch.backends.cudnn.benchmark = True
    output_path = os.path.join(work_dir, exp_name)
    if not os.path.exists(output_path) and is_main_process():
        os.mkdir(output_path)
    synchronize()

    row_anchor = np.linspace(90, 255, 128).tolist()
    col_sample = np.linspace(0, 1640 - 1, 256)
    col_sample_w = col_sample[1] - col_sample[0]

    loader = get_test_loader(batch_size, data_root, 'CULane', distributed)

    filter_f = lambda x: int(np.round(x))

    # import pdb;pdb.set_trace()
    for i, data in enumerate(dist_tqdm(loader)):
        imgs, names = data
        imgs = imgs.cuda()
        with torch.no_grad():
            out = net(imgs)

        for j in range(len(names)):
            name = names[j]

            line_save_path = os.path.join(output_path, name[:-3] + 'lines.txt')
            save_dir, _ = os.path.split(line_save_path)
            if not os.path.exists(save_dir):
                os.makedirs(save_dir)
            with open(line_save_path, 'w') as writer:
                lane_exit_out = out["lane_exit_out"].sigmoid()
                lane_exit_out = lane_exit_out > cfg.thresh_lc

                for lane_index in range(lane_exit_out.size(1)):
                    if lane_exit_out[0][lane_index] == True:
                        x_list = []
                        y_list = []
                        vertex_wise_confidence_out = out[
                            "vertex_wise_confidence_out_" +
                            str(lane_index + 1)].sigmoid()
                        vertex_wise_confidence_out = vertex_wise_confidence_out > cfg.thresh_vc

                        row_wise_vertex_location_out = F.log_softmax(
                            out["row_wise_vertex_location_out_" +
                                str(lane_index + 1)],
                            dim=0)
                        row_wise_vertex_location_out = torch.argmax(
                            row_wise_vertex_location_out, dim=0)
                        row_wise_vertex_location_out[
                            ~vertex_wise_confidence_out] = 256

                        row_wise_vertex_location_out = row_wise_vertex_location_out.detach(
                        ).cpu().numpy()

                        estimator = RANSACRegressor(random_state=42,
                                                    min_samples=2,
                                                    residual_threshold=10.0)
                        ##model = make_pipeline(PolynomialFeatures(2), estimator)

                        for k in range(row_wise_vertex_location_out.shape[0]):
                            if row_wise_vertex_location_out[k] != 256:
                                x = row_wise_vertex_location_out[
                                    k] * col_sample_w
                                y = row_anchor[k] / 256 * 590
                                x_list.append(x)
                                y_list.append(y)
                                #writer.write('%d %d ' % (filter_f(row_wise_vertex_location_out[k] * col_sample_w), filter_f(row_anchor[k] / 256 * 590)))
                        #writer.write('\n')

                        if len(x_list) <= 1:
                            continue
                        X = np.array(x_list)
                        y = np.array(y_list)
                        y = y[:, np.newaxis]
                        y_plot = np.linspace(y.min(), y.max())
                        estimator.fit(y, X)
                        x_plot = estimator.predict(y_plot[:, np.newaxis])

                        for x, y in zip(x_plot, y_plot):
                            writer.write('%d %d ' % (filter_f(x), filter_f(y)))
                        writer.write('\n')