Example #1
0
def restore_checkpoint(folder, contunue):
    model = FOTSModel().to(torch.device("cuda"))
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=0.001,
                                 weight_decay=1e-5)
    lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer,
        mode='min',
        factor=0.5,
        patience=32,
        verbose=True,
        threshold=0.05,
        threshold_mode='rel')

    checkppoint_name = os.path.join(folder, 'epoch_8_checkpoint.pt')
    if os.path.isfile(checkppoint_name) and contunue:
        checkpoint = torch.load(checkppoint_name)
        model.load_state_dict(checkpoint['model_state_dict'])
        # return 0, model, optimizer, lr_scheduler, +math.inf
        epoch = checkpoint['epoch'] + 1
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        lr_scheduler.load_state_dict(checkpoint['lr_scheduler_state_dict'])
        best_score = checkpoint['best_score']
        return epoch, model, optimizer, lr_scheduler, best_score
    else:
        return 0, model, optimizer, lr_scheduler, +math.inf
Example #2
0
def main(config):
    """Main entry point of train module."""
    # Initialize the dataset
    # Full dataset
    # dataset = ICDARDataset('/content/ch4_training_images', '/content/ch4_training_localization_transcription_gt')
    data_df = pd.read_csv(f"{config['data_base_dir']}/train.csv")
    dataset = Synth800kPreprocessedDataset(config["data_base_dir"], data_df)

    # Train test split
    val_size = config["val_fraction"]
    val_len = int(val_size * len(dataset))
    train_len = len(dataset) - val_len
    icdar_train_dataset, icdar_val_dataset = torch.utils.data.random_split(
        dataset, [train_len, val_len])

    icdar_train_data_loader = DataLoader(icdar_train_dataset,
                                         pin_memory=True,
                                         **config["dataset_config"],
                                         worker_init_fn=seed_worker
                                         # collate_fn=icdar_collate
                                         )

    icdar_val_data_loader = DataLoader(icdar_val_dataset,
                                       **config["dataset_config"],
                                       pin_memory=True,
                                       worker_init_fn=seed_worker
                                       # collate_fn=icdar_collate
                                       )

    # Initialize the model
    model = FOTSModel()

    # Count trainable parameters
    print(f'The model has {count_parameters(model):,} trainable parameters.')

    loss = FOTSLoss(config)
    optimizer = model.get_optimizer(config["optimizer"],
                                    config["optimizer_config"])

    lr_schedular = getattr(optim.lr_scheduler, config["lr_schedular"],
                           "ReduceLROnPlateau")(
                               optimizer, **config["lr_scheduler_config"])

    trainer = Train(model, icdar_train_data_loader, icdar_val_data_loader,
                    loss, fots_metric, optimizer, lr_schedular, config)

    trainer.train()
Example #3
0
def restore_checkpoint(folder, contunue):
    model = FOTSModel().to(torch.device("cuda"))
    optimizer = torch.optim.Adam(model.parameters(), lr=0.005, weight_decay=1e-5)
    lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=25, verbose=True, threshold=0.0001, threshold_mode='rel')
    #lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[3, 8, 14])

    if os.path.isfile(os.path.join(folder, 'last_checkpoint.pt')) and contunue:
        checkpoint = torch.load(os.path.join(folder, 'last_checkpoint.pt'))
        epoch = checkpoint['epoch'] + 1
        model.load_state_dict(checkpoint['model_state_dict'])
        # return 0, model, optimizer, lr_scheduler, +math.inf
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        lr_scheduler.load_state_dict(checkpoint['lr_scheduler_state_dict'])
        best_score = checkpoint['best_score']
        return epoch, model, optimizer, lr_scheduler, best_score
    else:
        return 0, model, optimizer, lr_scheduler, +math.inf
Example #4
0
def _load_model(model_path):
    """Load model from given path to available device."""
    model = FOTSModel()
    model.to(DEVICE)
    model.load_state_dict(torch.load(model_path, map_location=DEVICE))
    return model
Example #5
0
    parser = argparse.ArgumentParser()
    parser.add_argument('--images-folder',
                        type=str,
                        default='data/ICDAR2015/ch4_test_images',
                        help='path to the folder with test images')
    parser.add_argument('--output-folder',
                        type=str,
                        default='fots_test_results',
                        help='path to the output folder with result labels')
    parser.add_argument(
        '--checkpoint',
        type=str,
        default='data/model_checkpoint/epoch_276_checkpoint.pt',
        help='path to the checkpoint to test')
    parser.add_argument('--height-size',
                        type=int,
                        default=1260,
                        help='height size to resize input image')
    args = parser.parse_args()

    if not os.path.exists(args.output_folder):
        os.makedirs(args.output_folder)

    net = FOTSModel()
    checkpoint = torch.load(args.checkpoint)
    print('Epoch ', checkpoint['epoch'])
    net.load_state_dict(checkpoint['model_state_dict'])
    net = net.eval().cuda()
    with torch.no_grad():
        test(net, args.images_folder, args.output_folder, args.height_size)