Example #1
0
def main():

    # seed for random generator libraries
    global seed
    #seed = np.random.randint(0, 10000)
    seed = 9345
    print(seed)

    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    np.random.seed(seed)
    random.seed(seed)

    # deterministic cudnn
    print('Additional cudnn determinism')
    torch.backends.cudnn.deterministic = True

    # get the working directory
    root = os.getcwd()

    out_path = os.path.join(root, 'masks')
    if not os.path.isdir(out_path):
        os.mkdir(out_path)

    # Load dataset
    trainset = custom_dset(root, 'train')
    valset = custom_dset(root, 'validation')
    train_loader = DataLoader(trainset,
                              batch_size=28,
                              shuffle=True,
                              collate_fn=collate_fn,
                              num_workers=4)

    # for some technical issues I had to specify shuffle=True and  use a low batch size for validation.
    val_loader = DataLoader(valset,
                            batch_size=4,
                            shuffle=True,
                            collate_fn=collate_fn,
                            num_workers=4)

    # global variables
    global val_len
    global train_len
    val_len = len(valset)

    # ShapeRecognizer model is able to count three type shapes
    net = ShapeRecognizer()
    net = net.cuda()
    # optimizer
    optimizer = torch.optim.Adam(net.parameters(), lr=1e-4)

    train(epochs=11,
          net=net,
          train_loader=train_loader,
          val_loader=val_loader,
          optimizer=optimizer,
          save_step=2,
          out_path=out_path)
Example #2
0
def main():
    root_path = '/home/mathu/Documents/express_recognition/data/telephone_txt/result/'
    train_img = root_path + 'print_pic'
    train_txt = root_path + 'print_txt'
    # root_path = '/home/mathu/Documents/express_recognition/data/icdar2015/'
    # train_img = root_path + 'train2015'
    # train_txt = root_path + 'train_label'

    trainset = custom_dset(train_img, train_txt)
    trainloader = DataLoader(trainset,
                             batch_size=16,
                             shuffle=True,
                             collate_fn=collate_fn,
                             num_workers=4)
    model = East()
    model = model.cuda()
    model.load_state_dict(torch.load('./checkpoints_total/model_1440.pth'))

    crit = LossFunc()
    weight_decay = 0
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
    #  weight_decay=1)
    scheduler = lr_scheduler.StepLR(optimizer, step_size=10000, gamma=0.94)

    train(epochs=1500,
          model=model,
          trainloader=trainloader,
          crit=crit,
          optimizer=optimizer,
          scheduler=scheduler,
          save_step=20,
          weight_decay=weight_decay)

    write.close()
Example #3
0
def main():
    global args
    args = parser.parse_args()
    init_log('global', logging.INFO)
    logger = logging.getLogger('global')

    train_data = custom_dset(args.data_img, args.data_txt)
    train_loader = DataLoader(train_data, batch_size=args.batch_size, shuffle=True,
                              collate_fn=collate_fn, num_workers=args.workers)
    logger.info("==============Build Dataset Done==============")

    model = East(args.pretrained)
    logger.info("==============Build Model Done================")
    logger.info(model)

    model = torch.nn.DataParallel(model).cuda()

    if args.resume:
        if os.path.isfile(args.resume):
            logger.info("=> loading checkpoint '{}'".format(args.resume))
            pretrained_dict = torch.load(args.resume)
            model.load_state_dict(pretrained_dict, strict=True)
            logger.info("=> loaded checkpoint '{}'".format(args.resume))
        else:
            logger.info("=> no checkpoint found at '{}'".format(args.resume))

    crit = LossFunc()

    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    scheduler = lr_scheduler.StepLR(optimizer, step_size=10000, 
                                    gamma=0.94)   
    
    train(epochs=args.epochs, model=model, train_loader=train_loader,
          crit=crit, optimizer=optimizer,scheduler=scheduler, 
          save_step=args.save_freq, weight_decay=args.weight_decay)
Example #4
0
def main():
    root_path = './dataset/'
    train_img = root_path + 'train2015/'
    train_txt = root_path + 'train_label/'

    trainset = custom_dset(train_img, train_txt)
    print(trainset)
    trainloader = DataLoader(trainset,
                             batch_size=16,
                             shuffle=True,
                             collate_fn=collate_fn,
                             num_workers=4)
    model = East()
    model = model.cuda()

    crit = LossFunc()
    weight_decay = 0
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
    #  weight_decay=1)
    scheduler = lr_scheduler.StepLR(optimizer, step_size=10000, gamma=0.94)

    train(epochs=1500,
          model=model,
          trainloader=trainloader,
          crit=crit,
          optimizer=optimizer,
          scheduler=scheduler,
          save_step=20,
          weight_decay=weight_decay)

    write.close()
Example #5
0
def model_init(config):
    train_root_path = os.path.abspath(os.path.join(config["dataroot"],
                                                   'train'))
    train_img = os.path.join(train_root_path, 'img')
    train_gt = os.path.join(train_root_path, 'gt')

    trainset = custom_dset(train_img, train_gt)
    train_loader = DataLoader(trainset,
                              batch_size=config["train_batch_size_per_gpu"] *
                              config["gpu"],
                              shuffle=True,
                              collate_fn=collate_fn,
                              num_workers=config["num_workers"])

    logging.debug('Data loader created: Batch_size:{}, GPU {}:({})'.format(
        config["train_batch_size_per_gpu"] * config["gpu"], config["gpu"],
        config["gpu_ids"]))

    # Model
    model = East()
    model = nn.DataParallel(model, device_ids=config["gpu_ids"])
    model = model.cuda()
    init_weights(model, init_type=config["init_type"])
    logging.debug("Model initiated, init type: {}".format(config["init_type"]))

    cudnn.benchmark = True
    criterion = LossFunc()
    optimizer = torch.optim.Adam(model.parameters(), lr=config["lr"])
    scheduler = lr_scheduler.StepLR(optimizer, step_size=10000, gamma=0.94)

    # init or resume
    if config["resume"] and os.path.isfile(config["checkpoint"]):
        start_epoch = load_checkpoint(config, model, optimizer)
    else:
        start_epoch = 0
    logging.debug("Model is running...")
    return model, criterion, optimizer, scheduler, train_loader, start_epoch
Example #6
0
def main():
    hmean = .0
    is_best = False

    warnings.simplefilter('ignore', np.RankWarning)
    # Prepare for dataset
    print('EAST <==> Prepare <==> DataLoader <==> Begin')
    # train_root_path = os.path.abspath(os.path.join('./dataset/', 'train'))
    train_root_path = cfg.dataroot
    train_img = os.path.join(train_root_path, 'img')
    train_gt = os.path.join(train_root_path, 'gt')

    trainset = custom_dset(train_img, train_gt)
    train_loader = DataLoader(trainset,
                              batch_size=cfg.train_batch_size_per_gpu *
                              cfg.gpu,
                              shuffle=True,
                              collate_fn=collate_fn,
                              num_workers=cfg.num_workers)
    print('EAST <==> Prepare <==> Batch_size:{} <==> Begin'.format(
        cfg.train_batch_size_per_gpu * cfg.gpu))
    print('EAST <==> Prepare <==> DataLoader <==> Done')

    # test datalodaer
    """
    for i in range(100000):
        for j, (a,b,c,d) in enumerate(train_loader):
            print(i, j,'/',len(train_loader))
    """

    # Model
    print('EAST <==> Prepare <==> Network <==> Begin')
    model = East()
    model = nn.DataParallel(model, device_ids=cfg.gpu_ids)
    model = model.cuda()
    init_weights(model, init_type=cfg.init_type)
    cudnn.benchmark = True

    criterion = LossFunc()
    optimizer = torch.optim.Adam(model.parameters(), lr=cfg.lr)
    scheduler = lr_scheduler.StepLR(optimizer, step_size=10000, gamma=0.94)

    # init or resume
    if cfg.resume and os.path.isfile(cfg.checkpoint):
        weightpath = os.path.abspath(cfg.checkpoint)
        print(
            "EAST <==> Prepare <==> Loading checkpoint '{}' <==> Begin".format(
                weightpath))
        checkpoint = torch.load(weightpath)
        start_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        print(
            "EAST <==> Prepare <==> Loading checkpoint '{}' <==> Done".format(
                weightpath))
    else:
        start_epoch = 0
    print('EAST <==> Prepare <==> Network <==> Done')

    for epoch in range(start_epoch, cfg.max_epochs):

        train(train_loader, model, criterion, scheduler, optimizer, epoch)

        if epoch % cfg.eval_iteration == 0:

            # create res_file and img_with_box
            output_txt_dir_path = predict(model, criterion, epoch)

            # Zip file
            submit_path = MyZip(output_txt_dir_path, epoch)

            # submit and compute Hmean
            hmean_ = compute_hmean(submit_path)

            if hmean_ > hmean:
                is_best = True

            state = {
                'epoch': epoch,
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict(),
                'is_best': is_best,
            }
            save_checkpoint(state, epoch)
Example #7
0
def main():
    warnings.simplefilter('ignore', np.RankWarning)
    # save train config
    save_config = "cp ./*.py %s" % cfg.log_path
    os.system(save_config)
    # Prepare for dataset
    train_path = os.path.join(cfg.dataroot, 'train')
    train_img = os.path.join(train_path, 'img')
    train_gt = os.path.join(train_path, 'gt')

    trainset = custom_dset(train_img, train_gt)
    train_loader = DataLoader(trainset,
                              batch_size=cfg.train_batch_size,
                              shuffle=True,
                              collate_fn=collate_fn,
                              num_workers=cfg.num_workers)
    print('load img from {}'.format(train_path))
    print('load gt from {}'.format(train_gt))
    print('Data loader Done')

    # Model
    model = East()
    model = nn.DataParallel(model, device_ids=cfg.gpu_ids)
    model = model.cuda()
    init_weights(model, init_type=cfg.init_type)
    cudnn.benchmark = True
    print('Model initialization Done')

    # init or resume
    if cfg.resume and os.path.isfile(cfg.checkpoint):
        weightpath = os.path.abspath(cfg.checkpoint)
        print(weightpath)
        print("=> loading checkpoint '{}'".format(weightpath))
        checkpoint = torch.load(weightpath)
        start_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        print("=> loaded checkpoint '{}' (epoch {})".format(
            weightpath, checkpoint['epoch']))
    else:
        print("=> no checkpoint found at '{}'".format(cfg.checkpoint))
        start_epoch = 0

    criterion = LossFunc()
    optimizer = torch.optim.Adam(model.parameters(), lr=cfg.lr)
    scheduler = lr_scheduler.StepLR(optimizer, step_size=10000, gamma=0.94)
    print('Criterion and Optimizer is Done')

    for epoch in range(start_epoch, cfg.max_epochs):

        train(train_loader, model, criterion, scheduler, optimizer, epoch)

        if (epoch + 1) > 100 and (epoch + 1) % cfg.save_iteration == 0:
            state = {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict(),
            }
            save_checkpoint(state, epoch + 1)
            print('epoch {} saved'.format(epoch))

        if (epoch + 1) > 400 and (epoch + 1) % cfg.eval_iteration == 0:

            predict(model, epoch + 1)
Example #8
0
def main():
    warnings.simplefilter('ignore', np.RankWarning)
    #Model
    video_root_path = os.path.abspath('./dataset/train/')
    video_name_list = sorted(
        [p for p in os.listdir(video_root_path) if p.split('_')[0] == 'Video'])
    #print('video_name_list', video_name_list)
    print('EAST <==> Prepare <==> Network <==> Begin')
    model = East()
    AGD_model = AGD()
    model = nn.DataParallel(model, device_ids=cfg.gpu_ids)
    #AGD_model = nn.DataParallel(AGD_model, device_ids=cfg.gpu_ids)
    model = model.cuda()
    AGD_model = AGD_model.cuda()
    init_weights(model, init_type=cfg.init_type)
    cudnn.benchmark = True

    criterion1 = LossFunc()
    #
    criterion2 = Ass_loss()

    optimizer1 = torch.optim.Adam(model.parameters(), lr=cfg.lr)
    optimizer2 = torch.optim.Adam(AGD_model.parameters(), lr=cfg.lr)
    scheduler = lr_scheduler.StepLR(optimizer1, step_size=10000, gamma=0.94)

    # init or resume
    if cfg.resume and os.path.isfile(cfg.checkpoint):
        weightpath = os.path.abspath(cfg.checkpoint)
        print(
            "EAST <==> Prepare <==> Loading checkpoint '{}' <==> Begin".format(
                weightpath))
        checkpoint = torch.load(weightpath)
        start_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        #AGD_model.load_state_dict(checkpoint['model2.state_dict'])
        optimizer1.load_state_dict(checkpoint['optimizer'])
        #optimizer2.load_state_dict(checkpoint['optimizer2'])
        print(
            "EAST <==> Prepare <==> Loading checkpoint '{}' <==> Done".format(
                weightpath))
    else:
        start_epoch = 0
    print('EAST <==> Prepare <==> Network <==> Done')

    for epoch in range(start_epoch + 1, cfg.max_epochs):
        for video_name in video_name_list:
            print(
                'EAST <==> epoch:{} <==> Prepare <==> DataLoader <==>{} Begin'.
                format(epoch, video_name))
            trainset = custom_dset(os.path.join(video_root_path, video_name))
            #sampler = sampler_for_video_clip(len(trainset))
            train_loader = DataLoader(trainset,
                                      batch_size=cfg.train_batch_size_per_gpu *
                                      cfg.gpu,
                                      shuffle=False,
                                      collate_fn=collate_fn,
                                      num_workers=cfg.num_workers,
                                      drop_last=True)
            print('EAST <==> Prepare <==> Batch_size:{} <==> Begin'.format(
                cfg.train_batch_size_per_gpu * cfg.gpu))
            print(
                'EAST <==> epoch:{} <==> Prepare <==> DataLoader <==>{} Done'.
                format(epoch, video_name))

            train(train_loader, model, AGD_model, criterion1, criterion2,
                  scheduler, optimizer1, optimizer2, epoch)
            '''
            for i, (img, score_map, geo_map, training_mask, coord_ids) in enumerate(train_loader):
                print('i{} img.shape:{} geo_map.shape{} training_mask.shape{} coord_ids.len{}'.format(i, score_map.shape, geo_map.shape, training_mask.shape, len(coord_ids)))
            '''

        if epoch % cfg.eval_iteration == 0:
            state = {
                'epoch': epoch,
                'model1.state_dict': model.state_dict(),
                'model2.state_dict': AGD_model.state_dict(),
                'optimizer1': optimizer1.state_dict(),
                'optimizer2': optimizer2.state_dict()
            }
            save_checkpoint(state, epoch)