Ejemplo n.º 1
0
def test_wandb_pickle(tmpdir):
    """Verify that pickling trainer with wandb logger works."""
    tutils.reset_seed()

    from pytorch_lightning.logging import WandbLogger
    wandb_dir = str(tmpdir)
    logger = WandbLogger(save_dir=wandb_dir, anonymous=True)
Ejemplo n.º 2
0
def test_wandb_logger(tmpdir):
    """Verify that basic functionality of wandb logger works."""
    tutils.reset_seed()

    from pytorch_lightning.logging import WandbLogger

    wandb_dir = os.path.join(tmpdir, "wandb")
    logger = WandbLogger(save_dir=wandb_dir, anonymous=True)
Ejemplo n.º 3
0
def main():
    args = get_args()

    with open(args.config_path) as f:
        hparams = yaml.load(f, Loader=yaml.SafeLoader)

    pipeline = FaceMask(hparams)

    Path(hparams["checkpoint_callback"]["filepath"]).mkdir(exist_ok=True,
                                                           parents=True)

    trainer = object_from_dict(
        hparams["trainer"],
        logger=WandbLogger(hparams["experiment_name"]),
        checkpoint_callback=object_from_dict(hparams["checkpoint_callback"]),
    )

    trainer.fit(pipeline)
Ejemplo n.º 4
0
def main(hparams):
    generator = Generator(hparams)
    discriminator = Discriminator(hparams)

    # scorer = models.mobilenet_v2(pretrained=True)
    model = GAN(hparams, generator, discriminator)

    experiment_name = hparams.loss_strategy + "+" + hparams.architecture
    if hparams.gradient_penalty_strategy != "none":
        experiment_name += "+" + hparams.gradient_penalty_strategy
    if hparams.multi_scale_gradient:
        experiment_name += "+msg"
    if hparams.instance_noise:
        experiment_name += "+in"
    if hparams.spectral_normalization:
        experiment_name += "+sn"
    if hparams.equalized_learning_rate:
        experiment_name += "+eqlr"

    experiment_name += " (" + hparams.dataset + ")"

    if hparams.logger == "none":
        logger = False
    elif hparams.logger == "comet.ml":
        logger = CometLogger(
            api_key=os.environ["COMET_KEY"],
            workspace=os.environ["COMET_WORKSPACE"],  # Optional
            project_name="gan-research-project",  # Optional
            rest_api_key=os.environ["COMET_REST_KEY"],  # Optional
            experiment_name=experiment_name)
    elif hparams.logger == "wandb":
        logger = WandbLogger(project="gan-research-project",
                             name=experiment_name)

        logger.watch(model)
    elif hparams.logger == "tensorboard":
        logger = TensorBoardLogger(save_dir=os.getcwd() + "/lightning_logs")
    else:
        raise ValueError("Must specific a logger")

    if hparams.save_checkpoints:
        checkpoint_callback = ModelCheckpoint(
            filepath=os.getcwd() + "/checkpoints/{epoch}-" +
            hparams.loss_strategy + "+" + hparams.gradient_penalty_strategy +
            "+" + hparams.dataset +
            "-{discriminator_loss:.5f}-{ic_score_mean:.5f}",
            monitor="discriminator_loss",
            mode="max",
            save_top_k=1,
            period=1)
    else:
        checkpoint_callback = False

    trainer = Trainer(min_epochs=hparams.min_epochs,
                      max_epochs=hparams.max_epochs,
                      gpus=hparams.gpus,
                      nb_gpu_nodes=hparams.nodes,
                      accumulate_grad_batches=hparams.accumulate_grad_batches,
                      progress_bar_refresh_rate=20,
                      early_stop_callback=False,
                      checkpoint_callback=checkpoint_callback,
                      logger=logger,
                      fast_dev_run=False,
                      num_sanity_val_steps=0,
                      distributed_backend="dp",
                      weights_summary=None)

    trainer.fit(model)