Esempio n. 1
0
def main(params):

    arg_parser = argparse.ArgumentParser()
    arg_parser = populate_experiment_params(arg_parser)
    arg_parser = populate_common_params(arg_parser)
    opts = arg_parser.parse_args(params)

    # fix seed
    pl.seed_everything(opts.random_seed)

    signal_game = get_model(opts)

    experiment_name = 'signal-game'
    model_name = '%s/%s' % (experiment_name, opts.mode)
    other_info = [
        "lr-{}".format(opts.lr),
    ]

    other_info.append("entrcoeff-{}".format(opts.entropy_coeff))

    if opts.mode == "gs":
        if opts.straight_through:
            other_info.append("straight_through")
        other_info.append("decay-{}".format(opts.temperature_decay))
        other_info.append("updatefreq-{}".format(opts.temperature_update_freq))
    elif opts.mode == 'sfe':
        other_info.append("baseline-{}".format(opts.baseline_type))
    elif opts.mode == 'vimco':
        other_info.append("k-{}".format(opts.vimco_k))
    elif opts.mode == "marg":
        other_info.append("norm-{}".format(opts.normalizer))
    elif opts.mode == 'sumsample':
        other_info.append("k-{}".format(opts.topk))
        other_info.append("baseline-{}".format(opts.baseline_type))

    model_name = '%s/%s' % (model_name, '_'.join(other_info))

    tb_logger = pl_loggers.TensorBoardLogger('logs/', name=model_name)

    tb_logger.log_hyperparams(opts, metrics=None)

    trainer = pl.Trainer(
        progress_bar_refresh_rate=20,
        logger=tb_logger,
        callbacks=[CheckpointEveryNSteps(opts.batches_per_epoch)],
        max_steps=opts.batches_per_epoch * opts.n_epochs,
        limit_val_batches=1024 / opts.batch_size,
        limit_test_batches=10000 // opts.batch_size,
        val_check_interval=opts.batches_per_epoch,
        weights_save_path='checkpoints/',
        weights_summary='full',
        gpus=1 if torch.cuda.is_available() else 0,
        resume_from_checkpoint=opts.load_from_checkpoint,
        deterministic=True)
    trainer.fit(signal_game)
Esempio n. 2
0
def main(params):

    arg_parser = argparse.ArgumentParser()
    arg_parser = populate_experiment_params(arg_parser)
    arg_parser = populate_common_params(arg_parser)
    opts = arg_parser.parse_args(params)

    # fix seed
    pl.seed_everything(opts.random_seed)

    pathlib.Path('data/bit_vector-vae/fmnist_data/').mkdir(parents=True,
                                                           exist_ok=True)

    bit_vector_vae = get_model(opts)

    experiment_name = 'bit-vector'
    model_name = '%s/%s' % (experiment_name, opts.mode)
    other_info = [
        "lr-{}".format(opts.lr),
        "latent_size-{}".format(opts.latent_size),
    ]
    if opts.mode == "sparsemap":
        if opts.budget > 0:
            other_info.append(f"b{opts.budget}")
        if opts.noinit:
            other_info.append("noinit")
    elif opts.mode == "gs":
        if opts.straight_through:
            other_info.append("straight_through")
        other_info.append("decay-{}".format(opts.temperature_decay))
        other_info.append("updatefreq-{}".format(opts.temperature_update_freq))
    elif opts.mode == 'sfe':
        other_info.append("baseline-{}".format(opts.baseline_type))
    elif opts.mode == 'vimco':
        other_info.append("k-{}".format(opts.vimco_k))
    elif opts.mode == "topksparse":
        other_info.append("k-{}".format(opts.topksparse))

    model_name = '%s/%s' % (model_name, '_'.join(other_info))

    tb_logger = pl_loggers.TensorBoardLogger('logs/', name=model_name)

    tb_logger.log_hyperparams(opts, metrics=None)

    trainer = pl.Trainer(progress_bar_refresh_rate=20,
                         logger=tb_logger,
                         max_epochs=opts.n_epochs,
                         weights_save_path='checkpoints/',
                         weights_summary='full',
                         gpus=1 if torch.cuda.is_available() else 0,
                         resume_from_checkpoint=opts.load_from_checkpoint,
                         deterministic=True)

    trainer.fit(bit_vector_vae)
Esempio n. 3
0
def main(params):

    arg_parser = argparse.ArgumentParser()
    arg_parser = populate_experiment_params(arg_parser)
    arg_parser = populate_common_params(arg_parser)
    opts = arg_parser.parse_args(params)

    # fix seed
    pl.seed_everything(opts.random_seed)

    model = get_model(opts)

    experiment_name = 'ssvae'
    if not opts.labeled_only:
        model_name = '%s/%s' % (experiment_name, opts.mode)
    else:
        model_name = '%s/warm_start/%s' % (experiment_name, opts.normalizer)
    other_info = [
        "lr-{}".format(opts.lr),
    ]

    if opts.mode == "gs":
        if opts.straight_through:
            other_info.append("straight_through")
        other_info.append("decay-{}".format(opts.temperature_decay))
        other_info.append("updatefreq-{}".format(opts.temperature_update_freq))
    elif opts.mode == 'sfe':
        other_info.append("baseline-{}".format(opts.baseline_type))
    elif opts.mode == 'vimco':
        other_info.append("k-{}".format(opts.vimco_k))
    elif opts.mode == "marg":
        other_info.append("norm-{}".format(opts.normalizer))
    elif opts.mode == 'sumsample':
        other_info.append("k-{}".format(opts.topk))
        other_info.append("baseline-{}".format(opts.baseline_type))

    model_name = '%s/%s' % (model_name, '_'.join(other_info))

    tb_logger = pl_loggers.TensorBoardLogger('logs/', name=model_name)

    tb_logger.log_hyperparams(opts, metrics=None)

    trainer = pl.Trainer(progress_bar_refresh_rate=20,
                         logger=tb_logger,
                         max_epochs=opts.n_epochs,
                         weights_save_path='checkpoints/',
                         weights_summary='full',
                         gpus=1 if torch.cuda.is_available() else 0,
                         resume_from_checkpoint=opts.load_from_checkpoint,
                         deterministic=True)
    trainer.fit(model)