Example #1
0
def _build_trainer(config_file: Dict):
    train = DataLoader(
        PairedDataset.from_config(config_file["train"]),
        config_file["batch_size"],
        shuffle=True,
        drop_last=False,
    )

    val = DataLoader(
        PairedDataset.from_config(config_file["val"]),
        config_file["batch_size"],
        shuffle=True,
        drop_last=False,
    )

    trainer = Trainer(config_file, train=train, val=val)
    trainer.train()
    return trainer
Example #2
0
def main(_):
    with open(FLAGS.config, "r") as f:
        config = yaml.safe_load(f)

    validate_config(config)

    train = DataLoader(
        PairedDataset.from_config(config["train"]),
        config["batch_size"],
        shuffle=True,
        drop_last=True,
    )

    val = DataLoader(
        PairedDataset.from_config(config["val"]),
        config["batch_size"],
        shuffle=True,
        drop_last=True,
    )

    Trainer(config, train=train, val=val).train()
train_transform = get_transforms(config["train"]["transform"])
val_transform = get_transforms(config["val"]["transform"])

train_ds = PascalCRFSegmentationDataset(
    config["train"]["path"],
    transform=train_transform,
    image_set="train",
    masks_folder=config["train"]["masks"],
    scale_factor=config["crf"]["scale_factor"],
)
val_ds = PascalCRFSegmentationDataset(
    config["val"]["path"],
    transform=val_transform,
    image_set="validation",
    masks_folder=config["val"]["masks"],
    scale_factor=config["crf"]["scale_factor"],
)

train_dl = torch.utils.data.DataLoader(train_ds,
                                       batch_size=config["batch_size"],
                                       shuffle=True,
                                       num_workers=12)
val_dl = torch.utils.data.DataLoader(val_ds,
                                     batch_size=config["batch_size"],
                                     shuffle=True,
                                     num_workers=12)

trainer = Trainer(config, train_dl, val_dl)
trainer.train()
Example #4
0
def _get_model(config):
    model_config = config['model']
    if model_config['name'] == 'ssd':
        model = build_ssd(model_config)
    elif model_config['name'] == 'retina_net':
        model = build_retinanet(model_config)
    else:
        raise ValueError("Model [%s] not recognized." % model_config['name'])
    return model


if __name__ == '__main__':
    config = get_config("config/train.yaml")

    batch_size = config.pop('batch_size')
    get_dataloader = partial(DataLoader,
                             batch_size=batch_size,
                             num_workers=cpu_count(),
                             shuffle=True,
                             drop_last=True,
                             collate_fn=detection_collate,
                             pin_memory=True)

    datasets = map(config.pop, ('train', 'val'))
    datasets = map(get_dataset, datasets)
    train, val = map(get_dataloader, datasets)

    trainer = Trainer(_get_model(config).cuda(), config, train=train, val=val)
    trainer.train()