Exemple #1
0
def restore_checkpoint(folder, contunue):
    model = FOTSModel().to(torch.device("cuda"))
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=0.001,
                                 weight_decay=1e-5)
    lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer,
        mode='min',
        factor=0.5,
        patience=32,
        verbose=True,
        threshold=0.05,
        threshold_mode='rel')

    checkppoint_name = os.path.join(folder, 'epoch_8_checkpoint.pt')
    if os.path.isfile(checkppoint_name) and contunue:
        checkpoint = torch.load(checkppoint_name)
        model.load_state_dict(checkpoint['model_state_dict'])
        # return 0, model, optimizer, lr_scheduler, +math.inf
        epoch = checkpoint['epoch'] + 1
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        lr_scheduler.load_state_dict(checkpoint['lr_scheduler_state_dict'])
        best_score = checkpoint['best_score']
        return epoch, model, optimizer, lr_scheduler, best_score
    else:
        return 0, model, optimizer, lr_scheduler, +math.inf
Exemple #2
0
def restore_checkpoint(folder, contunue):
    model = FOTSModel().to(torch.device("cuda"))
    optimizer = torch.optim.Adam(model.parameters(), lr=0.005, weight_decay=1e-5)
    lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=25, verbose=True, threshold=0.0001, threshold_mode='rel')
    #lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[3, 8, 14])

    if os.path.isfile(os.path.join(folder, 'last_checkpoint.pt')) and contunue:
        checkpoint = torch.load(os.path.join(folder, 'last_checkpoint.pt'))
        epoch = checkpoint['epoch'] + 1
        model.load_state_dict(checkpoint['model_state_dict'])
        # return 0, model, optimizer, lr_scheduler, +math.inf
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        lr_scheduler.load_state_dict(checkpoint['lr_scheduler_state_dict'])
        best_score = checkpoint['best_score']
        return epoch, model, optimizer, lr_scheduler, best_score
    else:
        return 0, model, optimizer, lr_scheduler, +math.inf
Exemple #3
0
def _load_model(model_path):
    """Load model from given path to available device."""
    model = FOTSModel()
    model.to(DEVICE)
    model.load_state_dict(torch.load(model_path, map_location=DEVICE))
    return model
Exemple #4
0
    parser = argparse.ArgumentParser()
    parser.add_argument('--images-folder',
                        type=str,
                        default='data/ICDAR2015/ch4_test_images',
                        help='path to the folder with test images')
    parser.add_argument('--output-folder',
                        type=str,
                        default='fots_test_results',
                        help='path to the output folder with result labels')
    parser.add_argument(
        '--checkpoint',
        type=str,
        default='data/model_checkpoint/epoch_276_checkpoint.pt',
        help='path to the checkpoint to test')
    parser.add_argument('--height-size',
                        type=int,
                        default=1260,
                        help='height size to resize input image')
    args = parser.parse_args()

    if not os.path.exists(args.output_folder):
        os.makedirs(args.output_folder)

    net = FOTSModel()
    checkpoint = torch.load(args.checkpoint)
    print('Epoch ', checkpoint['epoch'])
    net.load_state_dict(checkpoint['model_state_dict'])
    net = net.eval().cuda()
    with torch.no_grad():
        test(net, args.images_folder, args.output_folder, args.height_size)