Example #1
0
def deal_eval_ctw1500(debug=False):
    # compute DetEval
    eval_dir = os.path.join(cfg.output_dir, "Analysis", "output_eval")
    if not os.path.exists(eval_dir):
        os.makedirs(eval_dir)

    print('Computing DetEval in {}/{}'.format(cfg.output_dir, cfg.exp_name))
    subprocess.call([
        'python', 'dataset/ctw1500/Evaluation_Protocol/ctw1500_eval.py',
        cfg.exp_name
    ])

    if debug:
        source_dir = os.path.join(cfg.vis_dir, '{}_test'.format(cfg.exp_name))
        outpt_dir_base = os.path.join(cfg.output_dir, "Analysis", "eval_view",
                                      "ctw1500")
        if not os.path.exists(outpt_dir_base):
            mkdirs(outpt_dir_base)

        outpt_dir = os.path.join(
            outpt_dir_base,
            "{}_{}_{}".format(cfg.test_size[0], cfg.test_size[1],
                              cfg.checkepoch))
        osmkdir(outpt_dir)
        fid_path1 = '{}/Eval_ctw1500_{}.txt'.format(eval_dir, 0.5)

        analysize_result(source_dir, fid_path1, outpt_dir, "ctw1500")

    print('End.')
Example #2
0
def deal_eval_total_text(debug=False):
    # compute DetEval
    eval_dir = os.path.join(cfg.output_dir, "Analysis", "output_eval")
    if not os.path.exists(eval_dir):
        os.makedirs(eval_dir)

    print('Computing DetEval in {}/{}'.format(cfg.output_dir, cfg.exp_name))
    subprocess.call(
        ['python', 'dataset/total_text/Evaluation_Protocol/Python_scripts/Deteval.py', cfg.exp_name, '--tr', '0.7',
         '--tp', '0.6'])
    subprocess.call(
        ['python', 'dataset/total_text/Evaluation_Protocol/Python_scripts/Deteval.py', cfg.exp_name, '--tr', '0.8',
         '--tp', '0.4'])

    if debug:
        source_dir = os.path.join(cfg.vis_dir, '{}_test'.format(cfg.exp_name))
        outpt_dir_base = os.path.join(cfg.output_dir, "Analysis", "eval_view", "total_text")
        if not os.path.exists(outpt_dir_base):
            mkdirs(outpt_dir_base)

        outpt_dir1 = os.path.join(outpt_dir_base, "{}_{}_{}_{}_{}"
                                  .format(cfg.test_size[0], cfg.test_size[1], cfg.checkepoch, 0.7, 0.6))
        osmkdir(outpt_dir1)
        fid_path1 = '{}/Eval_TotalText_{}_{}.txt'.format(eval_dir, 0.7, 0.6)

        analysize_result(source_dir, fid_path1, outpt_dir1, "totalText")

        outpt_dir2 = os.path.join(outpt_dir_base, "{}_{}_{}_{}_{}"
                                  .format(cfg.test_size[0], cfg.test_size[1], cfg.checkepoch, 0.8, 0.4))
        osmkdir(outpt_dir2)
        fid_path2 = '{}/Eval_TotalText_{}_{}.txt'.format(eval_dir, 0.8, 0.4)

        analysize_result(source_dir, fid_path2, outpt_dir2, "totalText")

    print('End.')
Example #3
0
def main():

    global lr

    if cfg.dataset == 'total-text':

        trainset = TotalText(data_root='data/total-text',
                             ignore_list=None,
                             is_training=True,
                             transform=Augmentation(size=cfg.input_size,
                                                    mean=cfg.means,
                                                    std=cfg.stds))

        valset = TotalText(data_root='data/total-text',
                           ignore_list=None,
                           is_training=False,
                           transform=BaseTransform(size=cfg.input_size,
                                                   mean=cfg.means,
                                                   std=cfg.stds))
    else:
        pass

    train_loader = data.DataLoader(trainset,
                                   batch_size=cfg.batch_size,
                                   shuffle=True,
                                   num_workers=cfg.num_workers)
    val_loader = data.DataLoader(valset,
                                 batch_size=cfg.batch_size,
                                 shuffle=False,
                                 num_workers=cfg.num_workers)

    log_dir = os.path.join(
        cfg.log_dir,
        datetime.now().strftime('%b%d_%H-%M-%S_') + cfg.exp_name)
    mkdirs(log_dir)
    logger = LogSummary(log_dir)

    # Model
    model = TextNet()
    if cfg.mgpu:
        model = nn.DataParallel(model, device_ids=cfg.gpu_ids)

    model = model.to(cfg.device)
    if cfg.cuda:
        cudnn.benchmark = True

    criterion = TextLoss()
    lr = cfg.lr
    optimizer = torch.optim.Adam(model.parameters(), lr=cfg.lr)
    scheduler = lr_scheduler.StepLR(optimizer, step_size=10000, gamma=0.94)

    print('Start training TextSnake.')

    for epoch in range(cfg.start_epoch, cfg.max_epoch):
        train(model, train_loader, criterion, scheduler, optimizer, epoch,
              logger)
        with torch.no_grad():
            validation(model, val_loader, criterion, epoch, logger)

    print('End.')
Example #4
0
def deal_eval_TD500(debug=False):
    # compute DetEval
    eval_dir = os.path.join(cfg.output_dir, "Analysis", "output_eval")
    if not os.path.exists(eval_dir):
        os.makedirs(eval_dir)

    input_dir = 'output/{}'.format(cfg.exp_name)
    father_path = os.path.abspath(input_dir)
    print(father_path)
    print('Computing DetEval in {}/{}'.format(cfg.output_dir, cfg.exp_name))
    subprocess.call(['sh', 'dataset/TD500/eval.sh', father_path])

    if debug:
        source_dir = os.path.join(cfg.vis_dir, '{}_test'.format(cfg.exp_name))
        outpt_dir_base = os.path.join(cfg.output_dir, "Analysis", "eval_view",
                                      "TD500")
        if not os.path.exists(outpt_dir_base):
            mkdirs(outpt_dir_base)

        outpt_dir = os.path.join(
            outpt_dir_base,
            "{}_{}_{}".format(cfg.test_size[0], cfg.test_size[1],
                              cfg.checkepoch))
        osmkdir(outpt_dir)
        fid_path1 = '{}/Eval_TD500.txt'.format(eval_dir)

        analysize_result(source_dir, fid_path1, outpt_dir, "TD500")

    print('End.')
Example #5
0
def inference(detector, test_loader, output_dir):

    total_time = 0.
    if cfg.exp_name != "MLT2017":
        osmkdir(output_dir)
    else:
        if not os.path.exists(output_dir):
            mkdirs(output_dir)
    for i, (image, meta) in enumerate(test_loader):

        image = to_device(image)
        torch.cuda.synchronize()
        idx = 0  # test mode can only run with batch_size == 1

        # visualization
        img_show = image[idx].permute(1, 2, 0).cpu().numpy()
        img_show = ((img_show * cfg.stds + cfg.means) * 255).astype(np.uint8)
        img_show = cv2.cvtColor(img_show, cv2.COLOR_BGR2RGB)

        # get detection result
        contours, output = detector.detect(image, img_show)
        tr_pred, tcl_pred = output['tr'], output['tcl']

        torch.cuda.synchronize()
        print('detect {} / {} images: {}.'.format(i + 1, len(test_loader),
                                                  meta['image_id'][idx]))

        pred_vis = visualize_detection(img_show, contours, tr_pred[1],
                                       tcl_pred[1])

        path = os.path.join(cfg.vis_dir, '{}_test'.format(cfg.exp_name),
                            meta['image_id'][idx])
        cv2.imwrite(path, pred_vis)

        H, W = meta['Height'][idx].item(), meta['Width'][idx].item()
        img_show, contours = rescale_result(img_show, contours, H, W)

        # write to file
        if cfg.exp_name == "Icdar2015":
            fname = "res_" + meta['image_id'][idx].replace('jpg', 'txt')
            contours = data_transfer_ICDAR(contours)
            write_to_file(contours, os.path.join(output_dir, fname))

        elif cfg.exp_name == "TD500":
            fname = "res_" + meta['image_id'][idx].replace('JPG', 'txt')
            im_show = data_transfer_TD500(contours,
                                          os.path.join(output_dir, fname),
                                          img_show)
            id_img = meta['image_id'][idx].replace("img_",
                                                   "").replace("JPG", "jpg")
            path = os.path.join(cfg.vis_dir, '{}_test'.format(cfg.exp_name),
                                id_img)
            cv2.imwrite(path, im_show)

        else:
            fname = meta['image_id'][idx].replace('jpg', 'txt')
            write_to_file(contours, os.path.join(output_dir, fname))
Example #6
0
def save_model(model, epoch, lr):

    save_dir = os.path.join(cfg.save_dir, cfg.exp_name)
    if not os.path.exists(save_dir):
        mkdirs(save_dir)

    save_path = os.path.join(
        save_dir, 'textsnake_{}_{}.pth'.format(model.backbone_name, epoch))
    print('Saving to {}.'.format(save_path))
    state_dict = {'lr': lr, 'epoch': epoch, 'model': model.state_dict()}
    torch.save(state_dict, save_path)
def inference(detector, test_loader, output_dir):

    total_time = 0.

    for i, (image, train_mask, tr_mask, tcl_mask, radius_map, sin_map, cos_map,
            meta) in enumerate(test_loader):

        image, train_mask, tr_mask, tcl_mask, radius_map, sin_map, cos_map = to_device(
            image, train_mask, tr_mask, tcl_mask, radius_map, sin_map, cos_map)

        torch.cuda.synchronize()
        start = time.time()

        idx = 0  # test mode can only run with batch_size == 1

        # get detection result
        contours, output = detector.detect(image)

        torch.cuda.synchronize()
        end = time.time()
        total_time += end - start
        fps = (i + 1) / total_time
        print('detect {} / {} images: {}. ({:.2f} fps)'.format(
            i + 1, len(test_loader), meta['image_id'][idx], fps))

        # visualization
        tr_pred, tcl_pred = output['tr'], output['tcl']
        img_show = image[idx].permute(1, 2, 0).cpu().numpy()
        img_show = ((img_show * cfg.stds + cfg.means) * 255).astype(np.uint8)

        pred_vis = visualize_detection(img_show, contours, tr_pred[1],
                                       tcl_pred[1])
        gt_contour = []
        for annot, n_annot in zip(meta['annotation'][idx],
                                  meta['n_annotation'][idx]):
            if n_annot.item() > 0:
                gt_contour.append(annot[:n_annot].int().cpu().numpy())
        gt_vis = visualize_detection(img_show, gt_contour,
                                     tr_mask[idx].cpu().numpy(),
                                     tcl_mask[idx].cpu().numpy())
        im_vis = np.concatenate([pred_vis, gt_vis], axis=0)
        path = os.path.join(cfg.vis_dir, '{}_test'.format(cfg.exp_name),
                            meta['image_id'][idx])
        cv2.imwrite(path, im_vis)

        H, W = meta['Height'][idx].item(), meta['Width'][idx].item()
        img_show, contours = rescale_result(img_show, contours, H, W)

        # write to file
        mkdirs(output_dir)
        write_to_file(
            contours,
            os.path.join(output_dir,
                         meta['image_id'][idx].replace('jpg', 'txt')))
Example #8
0
def save_model(model, epoch, lr, optimzer, accuracy):
    save_dir = os.path.join(cfg.save_dir, cfg.exp_name)
    if not os.path.exists(save_dir):
        mkdirs(save_dir)
    save_path = os.path.join(save_dir, 'sleep_class_{}-{:.3f}.pth'.format(epoch, accuracy))
    print('Saving to {}.'.format(save_path))
    state_dict = {
        'lr': lr,
        'epoch': epoch,
        'model': model.state_dict(),
        'optimizer': optimzer.state_dict()
    }
    torch.save(state_dict, save_path)
Example #9
0
def save_model(model, optimizer, scheduler, epoch):
    save_dir = os.path.join(cfg.save_dir, cfg.exp_name)
    if not os.path.exists(save_dir):
        mkdirs(save_dir)

    save_path = os.path.join(
        save_dir, 'textsnake_{}_{}.pth'.format(model.backbone_name, epoch))
    print('Saving to {}.'.format(save_path))
    state_dict = {
        'epoch': epoch,
        'model': model.state_dict(),
        'optim': optimizer.state_dict()
        # 'scheduler': scheduler.state_dict()
    }
    torch.save(state_dict, save_path)
Example #10
0
def save_model(model, epoch, lr, optimzer):

    save_dir = os.path.join(cfg.save_dir, cfg.exp_name)
    if not os.path.exists(save_dir):
        mkdirs(save_dir)

    save_path = os.path.join(
        save_dir, 'textgraph_{}_{}.pth'.format(model.backbone_name, epoch))
    print('Saving to {}.'.format(save_path))
    state_dict = {
        'lr': lr,
        'epoch': epoch,
        'model':
        model.state_dict() if not cfg.mgpu else model.module.state_dict(),
        'optimizer': optimzer.state_dict()
    }
    torch.save(state_dict, save_path)
Example #11
0
def inference(detector, test_loader, output_dir):

    total_time = 0.0

    for i, (image, reg_mask, meta) in enumerate(test_loader):

        image, reg_mask = to_device(image, reg_mask)

        torch.cuda.synchronize()
        start = time.time()

        index = 0
        contours, aster_text, output = detector.detect(image)

        torch.cuda.synchronize()
        end = time.time()
        total_time += end - start
        fps = (i + 1) / total_time
        print('detect {} | {} images: {}. ({:.2f} fps)'.format(i + 1, len(test_loader), meta['image_id'][index], fps))

        # visualization
        pred_mask = output['reg']
        img_show = image[index].permute(1, 2, 0).cpu().numpy()
        img_show = ((img_show * cfg.stds + cfg.means) * 255).astype(np.uint8)

        if (cfg.spotter):
            pred_vis = visualize_detection_end_to_end(img_show, contours, aster_text, pred_mask)
        else:
            pred_vis = visualize_detection(img_show, contours, pred_mask)
        gt_contour = []
        for annot, n_annot in zip(meta['annotation'][index], meta['n_annotation'][index]):
            if n_annot.item() > 0:
                gt_contour.append(annot[:n_annot].int().cpu().numpy())
        gt_vis = visualize_detection(img_show, gt_contour, reg_mask[index].cpu().numpy())
        im_vis = np.concatenate([pred_vis, gt_vis], axis=0)
        path = os.path.join(cfg.visualization_directory, '{0}_{1}_test'.format(cfg.dataset_name, cfg.backbone), meta['image_id'][index])
        cv2.imwrite(path.replace('.gif', '.png'), im_vis)

        H, W = meta['Height'][index].item(), meta['Width'][index].item()
        img_show, contours = rescale_result(img_show, contours, H, W)

        mkdirs(output_dir)
        write_to_file(contours, aster_text,
                      os.path.join(output_dir, meta['image_id'][index].replace('ts_', '')
                                   .replace('.jpg', '.txt').replace('.JPG', '.txt').replace('.png', '.txt').replace('.gif', '.txt')))
Example #12
0
def inference(model, detector, test_loader, output_dir):

    model.eval()

    for i, (img, train_mask, tr_mask, tcl_mask, radius_map, sin_map, cos_map, meta) in enumerate(test_loader):

        img, train_mask, tr_mask, tcl_mask, radius_map, sin_map, cos_map = to_device(
            img, train_mask, tr_mask, tcl_mask, radius_map, sin_map, cos_map)
        # inference
        output = model(img)

        for idx in range(img.size(0)):
            print('detect {} / {} images: {}.'.format(i, len(test_loader), meta['image_id'][idx]))

            tr_pred = output[idx, 0:2].softmax(dim=0).data.cpu().numpy()
            tcl_pred = output[idx, 2:4].softmax(dim=0).data.cpu().numpy()
            sin_pred = output[idx, 4].data.cpu().numpy()
            cos_pred = output[idx, 5].data.cpu().numpy()
            radii_pred = output[idx, 6].data.cpu().numpy()

            # get model output
            det_result, tcl_contour = detector.detect(tr_pred, tcl_pred, sin_pred, cos_pred, radii_pred)  # (n_tcl, 3)

            # visualization
            img_show = img[idx].permute(1, 2, 0).cpu().numpy()
            img_show = ((img_show * cfg.stds + cfg.means) * 255).astype(np.uint8)
            contours = result2polygon(img_show, det_result, tcl_contour)

            pred_vis = visualize_detection(img_show, tr_pred[1], tcl_pred[1], contours)
            gt_contour = []
            for annot, n_annot in zip(meta['annotation'][idx], meta['n_annotation'][idx]):
                if n_annot.item() > 0:
                    gt_contour.append(annot[:n_annot].int().cpu().numpy())
            gt_vis = visualize_detection(img_show, tr_mask[idx].cpu().numpy(), tcl_mask[idx].cpu().numpy(), gt_contour)
            im_vis = np.concatenate([pred_vis, gt_vis], axis=0)
            path = os.path.join(cfg.vis_dir, '{}_test'.format(cfg.exp_name), meta['image_id'][idx])
            cv2.imwrite(path, im_vis)

            H, W = meta['Height'][idx].item(), meta['Width'][idx].item()
            img_show, contours = rescale_result(img_show, contours, H, W)

            # write to file
            mkdirs(output_dir)
            write_to_file(contours, os.path.join(output_dir, meta['image_id'][idx].replace('jpg', 'txt')))
Example #13
0
def inference(detector, test_loader, output_dir):

    total_time = 0.

    for i, (image, meta) in enumerate(test_loader):
        # print (image)
        image = to_device(image)

        torch.cuda.synchronize()
        start = time.time()

        idx = 0  # test mode can only run with batch_size == 1

        # get detection result
        contours, output = detector.detect(image)

        torch.cuda.synchronize()
        end = time.time()
        total_time += end - start
        fps = (i + 1) / total_time
        print('detect {} / {} images: {}. ({:.2f} fps)'.format(
            i, len(test_loader), meta['image_id'][idx], fps))

        # visualization
        tr_pred, tcl_pred = output['tr'], output['tcl']
        img_show = image[idx].permute(1, 2, 0).cpu().numpy()
        img_show = ((img_show * cfg.stds + cfg.means) * 255).astype(np.uint8)
        # print (meta)
        H, W = meta['Height'][idx].item(), meta['Width'][idx].item()
        img_show, contours = rescale_result(img_show, contours, H, W)
        # print (contours)
        pred_vis = visualize_detection(img_show, contours)
        path = os.path.join(cfg.vis_dir, '{}_deploy'.format(cfg.exp_name),
                            meta['image_id'][idx])
        cv2.imwrite(path, pred_vis)

        # write to file
        mkdirs(output_dir)
        write_to_file(
            contours,
            os.path.join(output_dir,
                         meta['image_id'][idx].replace('jpg', 'txt')))
Example #14
0
def save_model(model, epoch, lr, optimzer):

    exp_name = cfg.dataset_name + '_' + cfg.backbone
    save_dir = os.path.join(cfg.save_directory, exp_name)
    if not os.path.exists(save_dir):
        mkdirs(save_dir)

    save_path = os.path.join(save_dir, 'model_{}.pth'.format(epoch))
    print('Saving to {}.'.format(save_path))
    state_dict = {
        'lr':
        lr,
        'epoch':
        epoch,
        'model':
        model.state_dict() if not cfg.multi_gpu else model.module.state_dict(),
        'optimizer':
        optimzer.state_dict()
    }
    torch.save(state_dict, save_path)
Example #15
0
    output_dir = os.path.join(cfg.output_dir, cfg.exp_name)
    inference(detector, test_loader, output_dir)

    # compute DetEval
    print('Computing DetEval in {}/{}'.format(cfg.output_dir, cfg.exp_name))
    subprocess.call([
        'python',
        'dataset/total_text/Evaluation_Protocol/Python_scripts/Deteval.py',
        args.exp_name, '--tr', '0.7', '--tp', '0.6'
    ])
    subprocess.call([
        'python',
        'dataset/total_text/Evaluation_Protocol/Python_scripts/Deteval.py',
        args.exp_name, '--tr', '0.8', '--tp', '0.4'
    ])
    print('End.')


if __name__ == "__main__":
    # parse arguments
    option = BaseOptions()
    args = option.initialize()

    update_config(cfg, args)
    print_config(cfg)

    vis_dir = os.path.join(cfg.vis_dir, '{}_test'.format(cfg.exp_name))
    if not os.path.exists(vis_dir):
        mkdirs(vis_dir)
    # main
    main()
Example #16
0
def inference(detector, test_loader, output_dir):

    total_time = 0.
    post_all_time =0.
    net_all_time = 0.
    backbone_all_time = 0.
    IM_all_time = 0.
    detach_all_time =0.
    if cfg.exp_name != "MLT2017":
        osmkdir(output_dir)
    else:
        if not os.path.exists(output_dir):
            mkdirs(output_dir)
    for i, (image, train_mask, tr_mask, meta) in enumerate(test_loader):

        image, train_mask, tr_mask = to_device(image, train_mask, tr_mask)

        torch.cuda.synchronize()
        idx = 0  # test mode can only run with batch_size == 1

        # visualization
        img_show = image[idx].permute(1, 2, 0).cpu().numpy()
        img_show = ((img_show * cfg.stds + cfg.means) * 255).astype(np.uint8)

        # compute time
        start = time.time()
        # get detection result
        contours, output, net_time, post_time = detector.detect(image, img_show)
        end = time.time()
        #total_time += end - start
        total_time += (net_time + post_time)
        post_all_time += post_time
        net_all_time += net_time
        backbone_all_time+= output["backbone_time"]
        IM_all_time += output["IM_time"]
        detach_all_time += output["detach_time"]
        fps = (i + 1) / total_time
        print('detect {} / {} images: {}. ({:.2f} fps); backbone-time:{:.2f}, IM-time:{:.2f}, post-time:{:0.2f}, Transfer-time:{:.2f}'.format(i + 1, len(test_loader), meta['image_id'][idx], fps, backbone_all_time*1000/(i+1), IM_all_time*1000/(i+1), post_all_time*1000/(i+1), detach_all_time*1000/(i+1)))

        if cfg.exp_name == "Icdar2015" or cfg.exp_name == "MLT2017" or cfg.exp_name == "TD500":
            pred_vis = visualize_detection(img_show, output['bbox'], output['tr'])
        else:
            pred_vis = visualize_detection(img_show, contours, output['tr'])

        gt_contour = []
        for annot, n_annot in zip(meta['annotation'][idx], meta['n_annotation'][idx]):
            if n_annot.item() > 0:
                gt_contour.append(annot[:n_annot].int().cpu().numpy())
        gt_vis = visualize_gt(img_show, gt_contour, tr_mask[idx].cpu().numpy())
        im_vis = np.concatenate([pred_vis, gt_vis], axis=0)

        path = os.path.join(cfg.vis_dir, '{}_test'.format(cfg.exp_name), meta['image_id'][idx].split(".")[0]+".jpg")
        cv2.imwrite(path, im_vis)

        H, W = meta['Height'][idx].item(), meta['Width'][idx].item()
        img_show, contours = rescale_result(img_show, contours, H, W)

        # write to file
        if cfg.exp_name == "Icdar2015":
            fname = "res_" + meta['image_id'][idx].replace('jpg', 'txt')
            contours = data_transfer_ICDAR(contours)
            write_to_file(contours, os.path.join(output_dir, fname))
        elif cfg.exp_name == "MLT2017":
            out_dir = os.path.join(output_dir, str(cfg.checkepoch))
            if not os.path.exists(out_dir):
                mkdirs(out_dir)
            fname = meta['image_id'][idx].split("/")[-1].replace('ts', 'res')
            fname = fname.split(".")[0] + ".txt"
            data_transfer_MLT2017(contours, os.path.join(out_dir, fname))
        elif cfg.exp_name == "TD500":
            fname = "res_" + meta['image_id'][idx].split(".")[0]+".txt"
            data_transfer_TD500(contours, os.path.join(output_dir, fname))

        else:
            fname = meta['image_id'][idx].replace('jpg', 'txt')
            write_to_file(contours, os.path.join(output_dir, fname))
Example #17
0
def inference(detector, test_loader, output_dir):

    total_time = 0.
    if cfg.exp_name != "MLT2017":
        osmkdir(output_dir)
    else:
        if not os.path.exists(output_dir):
            mkdirs(output_dir)
    for i, (image, train_mask, tr_mask, tcl_mask, radius_map, sin_map, cos_map, meta) in enumerate(test_loader):

        image, train_mask, tr_mask, tcl_mask, radius_map, sin_map, cos_map = to_device(
            image, train_mask, tr_mask, tcl_mask, radius_map, sin_map, cos_map)

        torch.cuda.synchronize()
        start = time.time()

        idx = 0 # test mode can only run with batch_size == 1

        # visualization
        img_show = image[idx].permute(1, 2, 0).cpu().numpy()
        img_show = ((img_show * cfg.stds + cfg.means) * 255).astype(np.uint8)

        # get detection result
        contours, output = detector.detect(image, img_show)
        tr_pred, tcl_pred = output['tr'], output['tcl']

        torch.cuda.synchronize()
        end = time.time()
        total_time += end - start
        fps = (i + 1) / total_time
        print('detect {} / {} images: {}. ({:.2f} fps)'.format(i + 1, len(test_loader), meta['image_id'][idx], fps))

        pred_vis = visualize_detection(img_show, contours, tr_pred[1], tcl_pred[1])

        gt_contour = []
        for annot, n_annot in zip(meta['annotation'][idx], meta['n_annotation'][idx]):
            if n_annot.item() > 0:
                gt_contour.append(annot[:n_annot].int().cpu().numpy())
        gt_vis = visualize_gt(img_show, gt_contour,
                              tr_mask[idx].cpu().numpy(), tcl_mask[idx, :, :, 0].cpu().numpy())
        im_vis = np.concatenate([pred_vis, gt_vis], axis=0)
        # path = os.path.join(cfg.vis_dir, '{}_test'.format(cfg.exp_name), meta['image_id'][idx])
        # cv2.imwrite(path, im_vis)

        H, W = meta['Height'][idx].item(), meta['Width'][idx].item()
        img_show, contours = rescale_result(img_show, contours, H, W)

        # write to file
        if cfg.exp_name == "Icdar2015":
            fname = "res_" + meta['image_id'][idx].replace('jpg', 'txt')
            contours = data_transfer_ICDAR(contours)
            write_to_file(contours, os.path.join(output_dir, fname))
        elif cfg.exp_name == "MLT2017":
            path = os.path.join(cfg.vis_dir, '{}_test'.format(cfg.exp_name),
                                meta['image_id'][idx].split("/")[-1])
            cv2.imwrite(path, im_vis)

            out_dir = os.path.join(output_dir, str(cfg.checkepoch))
            if not os.path.exists(out_dir):
                mkdirs(out_dir)
            fname = meta['image_id'][idx].split("/")[-1].replace('ts', 'res')
            fname = fname.split(".")[0] + ".txt"
            data_transfer_MLT2017(contours, os.path.join(out_dir, fname))
        elif cfg.exp_name == "TD500":
            fname = "res_img_" + meta['image_id'][idx].replace('jpg', 'txt')
            data_transfer_TD500(contours, os.path.join(output_dir, fname))

        else:
            fname = meta['image_id'][idx].replace('jpg', 'txt')
            write_to_file(contours, os.path.join(output_dir, fname))
Example #18
0
    def __init__(self, log_path):

        mkdirs(log_path)
        self.writer = SummaryWriter(log_path)