Beispiel #1
0
def train_manifold_flow_sequential(args, dataset, model, simulator):
    """ MFMF-A training """

    assert not args.specified

    trainer = ManifoldFlowTrainer(model) if simulator.parameter_dim(
    ) is None else ConditionalManifoldFlowTrainer(model)

    common_kwargs = {
        "dataset": dataset,
        "batch_size": args.batchsize,
        "initial_lr": args.lr,
        "scheduler": optim.lr_scheduler.CosineAnnealingLR,
        "clip_gradient": args.clip,
    }
    if args.weightdecay is not None:
        common_kwargs["optimizer_kwargs"] = {
            "weight_decay": float(args.weightdecay)
        }

    logger.info("Starting training MF, phase 1: manifold training")
    learning_curves = trainer.train(
        loss_functions=[losses.mse],
        loss_labels=["MSE"],
        loss_weights=[args.msefactor],
        epochs=args.epochs // 2,
        parameters=list(model.outer_transform.parameters()) +
        list(model.encoder.parameters())
        if args.algorithm == "emf" else model.outer_transform.parameters(),
        callbacks=[
            callbacks.save_model_after_every_epoch(
                create_filename("checkpoint", None, args)[:-3] +
                "_epoch_A{}.pt")
        ],
        forward_kwargs={"mode": "projection"},
        **common_kwargs,
    )
    learning_curves = np.vstack(learning_curves).T

    logger.info("Starting training MF, phase 2: density training")
    learning_curves_ = trainer.train(
        loss_functions=[losses.nll],
        loss_labels=["NLL"],
        loss_weights=[args.nllfactor],
        epochs=args.epochs - (args.epochs // 2),
        parameters=model.inner_transform.parameters(),
        callbacks=[
            callbacks.save_model_after_every_epoch(
                create_filename("checkpoint", None, args)[:-3] +
                "_epoch_B{}.pt")
        ],
        forward_kwargs={"mode": "mf-fixed-manifold"},
        **common_kwargs,
    )
    learning_curves_ = np.vstack(learning_curves_).T
    learning_curves = learning_curves_ if learning_curves is None else np.vstack(
        (learning_curves, learning_curves_))

    return learning_curves
Beispiel #2
0
def train_dough(args, dataset, model, simulator):
    """ PIE with variable epsilons training """

    trainer = VariableDimensionManifoldFlowTrainer(
        model) if simulator.parameter_dim(
        ) is None else ConditionalVariableDimensionManifoldFlowTrainer(model)
    common_kwargs = {
        "dataset": dataset,
        "batch_size": args.batchsize,
        "initial_lr": args.lr,
        "scheduler": optim.lr_scheduler.CosineAnnealingLR,
        "clip_gradient": args.clip,
    }
    if args.weightdecay is not None:
        common_kwargs["optimizer_kwargs"] = {
            "weight_decay": float(args.weightdecay)
        }

    logger.info(
        "Starting training dough, phase 1: NLL without latent regularization")
    learning_curves = trainer.train(
        loss_functions=[losses.nll],
        loss_labels=["NLL"],
        loss_weights=[args.nllfactor],
        epochs=args.epochs,
        callbacks=[
            callbacks.save_model_after_every_epoch(
                create_filename("checkpoint", None, args)[:-3] +
                "_epoch_{}.pt")
        ],
        l1=args.doughl1reg,
        **common_kwargs,
    )
    learning_curves = np.vstack(learning_curves).T
    return learning_curves
Beispiel #3
0
def train_pie(args, dataset, model, simulator):
    """ PIE training """

    trainer = ManifoldFlowTrainer(model) if simulator.parameter_dim(
    ) is None else ConditionalManifoldFlowTrainer(model)
    logger.info("Starting training PIE on NLL")
    common_kwargs = {
        "dataset": dataset,
        "batch_size": args.batchsize,
        "initial_lr": args.lr,
        "scheduler": optim.lr_scheduler.CosineAnnealingLR,
        "clip_gradient": args.clip,
    }
    if args.weightdecay is not None:
        common_kwargs["optimizer_kwargs"] = {
            "weight_decay": float(args.weightdecay)
        }

    learning_curves = trainer.train(
        loss_functions=[losses.nll],
        loss_labels=["NLL"],
        loss_weights=[args.nllfactor],
        epochs=args.epochs,
        callbacks=[
            callbacks.save_model_after_every_epoch(
                create_filename("checkpoint", None, args)[:-3] +
                "_epoch_{}.pt")
        ],
        forward_kwargs={"mode": "pie"},
        **common_kwargs,
    )
    learning_curves = np.vstack(learning_curves).T
    return learning_curves
Beispiel #4
0
def train_manifold_flow_alternating(args, dataset, model, simulator):
    """ MFMF-A training """

    assert not args.specified

    trainer = ManifoldFlowTrainer(model) if simulator.parameter_dim(
    ) is None else ConditionalManifoldFlowTrainer(model)
    metatrainer = AlternatingTrainer(model, trainer, trainer)

    meta_kwargs = {
        "dataset": dataset,
        "initial_lr": args.lr,
        "scheduler": optim.lr_scheduler.CosineAnnealingLR
    }
    if args.weightdecay is not None:
        meta_kwargs["optimizer_kwargs"] = {
            "weight_decay": float(args.weightdecay)
        }

    phase1_kwargs = {
        "forward_kwargs": {
            "mode": "projection"
        },
        "clip_gradient": args.clip
    }
    phase2_kwargs = {
        "forward_kwargs": {
            "mode": "mf-fixed-manifold"
        },
        "clip_gradient": args.clip
    }

    phase1_parameters = (list(model.outer_transform.parameters()) +
                         list(model.encoder.parameters()) if args.algorithm
                         == "emf" else model.outer_transform.parameters())
    phase2_parameters = model.inner_transform.parameters()

    logger.info(
        "Starting training MF, alternating between reconstruction error and log likelihood"
    )
    learning_curves_ = metatrainer.train(
        loss_functions=[losses.mse, losses.nll],
        loss_function_trainers=[0, 1],
        loss_labels=["MSE", "NLL"],
        loss_weights=[args.msefactor, args.nllfactor],
        epochs=args.epochs // 2,
        subsets=args.subsets,
        batch_sizes=[args.batchsize, args.batchsize],
        parameters=[phase1_parameters, phase2_parameters],
        callbacks=[
            callbacks.save_model_after_every_epoch(
                create_filename("checkpoint", None, args)[:-3] +
                "_epoch_{}.pt")
        ],
        trainer_kwargs=[phase1_kwargs, phase2_kwargs],
        **meta_kwargs,
    )
    learning_curves = np.vstack(learning_curves_).T

    return learning_curves
Beispiel #5
0
def train_generative_adversarial_manifold_flow_alternating(
        args, dataset, model, simulator):
    """ MFMF-OTA training """

    assert not args.specified

    gen_trainer = GenerativeTrainer(model) if simulator.parameter_dim(
    ) is None else ConditionalGenerativeTrainer(model)
    likelihood_trainer = ManifoldFlowTrainer(model) if simulator.parameter_dim(
    ) is None else ConditionalManifoldFlowTrainer(model)
    metatrainer = AlternatingTrainer(model, gen_trainer, likelihood_trainer)

    meta_kwargs = {
        "dataset": dataset,
        "initial_lr": args.lr,
        "scheduler": optim.lr_scheduler.CosineAnnealingLR
    }
    if args.weightdecay is not None:
        meta_kwargs["optimizer_kwargs"] = {
            "weight_decay": float(args.weightdecay)
        }

    phase1_kwargs = {"clip_gradient": args.clip}
    phase2_kwargs = {
        "forward_kwargs": {
            "mode": "mf-fixed-manifold"
        },
        "clip_gradient": args.clip
    }

    phase1_parameters = model.parameters()
    phase2_parameters = model.inner_transform.parameters()

    logger.info(
        "Starting training GAMF, alternating between Sinkhorn divergence and log likelihood"
    )
    learning_curves_ = metatrainer.train(
        loss_functions=[losses.make_sinkhorn_divergence(), losses.nll],
        loss_function_trainers=[0, 1],
        loss_labels=["GED", "NLL"],
        loss_weights=[args.sinkhornfactor, args.nllfactor],
        batch_sizes=[args.genbatchsize, args.batchsize],
        epochs=args.epochs // 2,
        parameters=[phase1_parameters, phase2_parameters],
        callbacks=[
            callbacks.save_model_after_every_epoch(
                create_filename("checkpoint", None, args)[:-3] +
                "_epoch_{}.pt")
        ],
        trainer_kwargs=[phase1_kwargs, phase2_kwargs],
        subsets=args.subsets,
        subset_callbacks=[callbacks.print_mf_weight_statistics()]
        if args.debug else None,
        **meta_kwargs,
    )
    learning_curves = np.vstack(learning_curves_).T

    return learning_curves
Beispiel #6
0
def sample_from_model(args, model, simulator):
    """ Generate samples from model and store """

    logger.info("Sampling from model")
    if simulator.parameter_dim() is None:
        x_gen = model.sample(n=args.generate).detach().numpy()
    else:
        params = simulator.default_parameters(true_param_id=args.trueparam)
        params = np.asarray([params for _ in range(args.generate)])
        params = torch.tensor(params, dtype=torch.float)
        x_gen = model.sample(n=args.generate, context=params).detach().numpy()
    np.save(create_filename("results", "samples", args), x_gen)
    return x_gen
Beispiel #7
0
    def objective(trial):
        global counter

        counter += 1

        # Hyperparameters
        margs = pick_parameters(args, trial, counter)

        logger.info("Starting training for the following hyperparameters:")
        for k, v in margs.__dict__.items():
            logger.info("  %s: %s", k, v)

        # Bug fix related to some num_workers > 1 and CUDA. Bad things happen otherwise!
        torch.multiprocessing.set_start_method("spawn", force=True)

        # Load data
        simulator = load_simulator(margs)
        dataset = load_training_dataset(simulator, margs)

        # Create model
        model = create_model(margs, simulator)

        # Train
        _ = train.train_model(margs, dataset, model, simulator)

        # Save
        torch.save(model.state_dict(), create_filename("model", None, margs))

        # Evaluate
        model.eval()

        # Evaluate test samples
        log_likelihood_test, reconstruction_error_test, _ = evaluate.evaluate_test_samples(
            margs, simulator, model, paramscan=True)
        mean_log_likelihood_test = np.mean(log_likelihood_test)
        mean_reco_error_test = np.mean(reconstruction_error_test)

        # Generate samples
        x_gen = evaluate.sample_from_model(margs, model, simulator)
        distances_gen = simulator.distance_from_manifold(x_gen)
        mean_gen_distance = np.mean(distances_gen)

        # Report results
        logger.info("Results:")
        logger.info("  test log p:    %s", mean_log_likelihood_test)
        logger.info("  test reco err: %s", mean_reco_error_test)
        logger.info("  gen distance:  %s", mean_gen_distance)

        return (-1.0 * margs.metricnllfactor * mean_log_likelihood_test +
                margs.metricrecoerrorfactor * mean_reco_error_test +
                margs.metricdistancefactor * mean_gen_distance)
Beispiel #8
0
def evaluate_model_samples(args, simulator, x_gen):
    """ Evaluate model samples and save results """

    logger.info("Calculating likelihood of generated samples")

    try:
        if simulator.parameter_dim() is None:
            log_likelihood_gen = simulator.log_density(x_gen)
        else:
            params = simulator.default_parameters(true_param_id=args.trueparam)
            params = np.asarray([params for _ in range(args.generate)])
            log_likelihood_gen = simulator.log_density(x_gen, parameters=params)
        log_likelihood_gen[np.isnan(log_likelihood_gen)] = -1.0e-12
        np.save(create_filename("results", "samples_likelihood", args), log_likelihood_gen)
    except IntractableLikelihoodError:
        logger.info("True simulator likelihood is intractable for dataset %s", args.dataset)

    # Distance from manifold
    try:
        logger.info("Calculating distance from manifold of generated samples")
        distances_gen = simulator.distance_from_manifold(x_gen)
        np.save(create_filename("results", "samples_manifold_distance", args), distances_gen)
    except NotImplementedError:
        logger.info("Cannot calculate distance from manifold for dataset %s", args.dataset)
Beispiel #9
0
def timing(args):
    logger.info(
        "Timing algorithm %s with %s outer layers with transformation %s and %s inner layers with transformation %s",
        args.algorithm,
        args.outerlayers,
        args.outertransform,
        args.innerlayers,
        args.innertransform,
    )

    # Bug fix related to some num_workers > 1 and CUDA. Bad things happen otherwise!
    torch.multiprocessing.set_start_method("spawn", force=True)

    if torch.cuda.is_available():
        torch.set_default_tensor_type("torch.cuda.DoubleTensor")

    # Loop over data dims
    all_times = []
    for datadim in args.datadims:
        logger.info("Starting timing for %s-dimensional data", datadim)
        args.datadim = datadim

        # Data
        data = torch.randn(args.batchsize, datadim)
        data.requires_grad = True

        # Model
        model = create_model(args, context_features=None)
        if torch.cuda.is_available():
            model = model.to(torch.device("cuda"))

        # Time forward pass
        times = []
        for _ in range(args.repeats):
            time_before = time.time()
            _ = model(data)
            times.append(time.time() - time_before)

        logger.info("Mean time: %s s", np.mean(times))

        all_times.append(times)

    # Save results
    logger.info("Saving results")
    np.save(create_filename("timing", None, args), all_times)
Beispiel #10
0
def train_generative_adversarial_manifold_flow(args, dataset, model,
                                               simulator):
    """ MFMF-OT training """

    gen_trainer = GenerativeTrainer(model) if simulator.parameter_dim(
    ) is None else ConditionalGenerativeTrainer(model)
    common_kwargs = {
        "dataset": dataset,
        "initial_lr": args.lr,
        "scheduler": optim.lr_scheduler.CosineAnnealingLR,
        "clip_gradient": args.clip
    }
    if args.weightdecay is not None:
        common_kwargs["optimizer_kwargs"] = {
            "weight_decay": float(args.weightdecay)
        }

    logger.info("Starting training GAMF: Sinkhorn-GAN")

    callbacks_ = [
        callbacks.save_model_after_every_epoch(
            create_filename("checkpoint", None, args)[:-3] + "_epoch_{}.pt")
    ]
    if args.debug:
        callbacks_.append(callbacks.print_mf_weight_statistics())

    learning_curves_ = gen_trainer.train(
        loss_functions=[losses.make_sinkhorn_divergence()],
        loss_labels=["GED"],
        loss_weights=[args.sinkhornfactor],
        epochs=args.epochs,
        callbacks=callbacks_,
        batch_size=args.genbatchsize,
        compute_loss_variance=True,
        **common_kwargs,
    )

    learning_curves = np.vstack(learning_curves_).T
    return learning_curves
    # Simulator
    simulator = load_simulator(args)

    # Parameters?
    conditional = simulator.parameter_dim() is not None

    parameters_train = simulator.sample_from_prior(
        args.train) if conditional else None

    # Sample
    if args.train > 0:
        logger.info("Generating %s training samples at parameters %s",
                    args.train, parameters_train)
        x_train = simulator.sample(args.train, parameters=parameters_train)
        np.save(create_filename("sample", "x_train", args), x_train)
        if conditional:
            np.save(create_filename("sample", "parameters_train", args),
                    parameters_train)

    if args.paramscan > 0:
        parameters_val = np.array(
            [simulator.default_parameters()
             for _ in range(args.paramscan)]).reshape(
                 (args.paramscan, -1)) if conditional else None
        logger.info("Generating %s param-scan samples at parameters %s",
                    args.paramscan, parameters_val)
        x_val = simulator.sample(args.paramscan, parameters=parameters_val)
        np.save(create_filename("sample", "x_paramscan", args), x_val)
        if conditional:
            np.save(create_filename("sample", "parameters_paramscan", args),
Beispiel #12
0
def train_slice_of_pie(args, dataset, model, simulator):
    """ SLICE training """

    trainer = ManifoldFlowTrainer(model) if simulator.parameter_dim(
    ) is None else ConditionalManifoldFlowTrainer(model)
    common_kwargs = {
        "dataset": dataset,
        "batch_size": args.batchsize,
        "initial_lr": args.lr,
        "scheduler": optim.lr_scheduler.CosineAnnealingLR,
        "clip_gradient": args.clip,
    }
    if args.weightdecay is not None:
        common_kwargs["optimizer_kwargs"] = {
            "weight_decay": float(args.weightdecay)
        }

    if args.nopretraining or args.epochs // 3 < 1:
        logger.info("Skipping pretraining phase")
        learning_curves = np.zeros((0, 2))
    else:
        logger.info(
            "Starting training slice of PIE, phase 1: pretraining on reconstruction error"
        )
        learning_curves = trainer.train(
            loss_functions=[losses.mse],
            loss_labels=["MSE"],
            loss_weights=[args.initialmsefactor],
            epochs=args.epochs // 3,
            callbacks=[
                callbacks.save_model_after_every_epoch(
                    create_filename("checkpoint", None, args)[:-3] +
                    "_epoch_A{}.pt")
            ],
            forward_kwargs={"mode": "projection"},
            **common_kwargs,
        )
        learning_curves = np.vstack(learning_curves).T

    logger.info("Starting training slice of PIE, phase 2: mixed training")
    learning_curves_ = trainer.train(
        loss_functions=[losses.mse, losses.nll],
        loss_labels=["MSE", "NLL"],
        loss_weights=[args.initialmsefactor, args.initialnllfactor],
        epochs=args.epochs - (1 if args.nopretraining else 2) *
        (args.epochs // 3),
        parameters=model.inner_transform.parameters(),
        callbacks=[
            callbacks.save_model_after_every_epoch(
                create_filename("checkpoint", None, args)[:-3] +
                "_epoch_B{}.pt")
        ],
        forward_kwargs={"mode": "slice"},
        **common_kwargs,
    )
    learning_curves_ = np.vstack(learning_curves_).T
    learning_curves = np.vstack((learning_curves, learning_curves_))

    logger.info(
        "Starting training slice of PIE, phase 3: training only inner flow on NLL"
    )
    learning_curves_ = trainer.train(
        loss_functions=[losses.mse, losses.nll],
        loss_labels=["MSE", "NLL"],
        loss_weights=[args.msefactor, args.nllfactor],
        epochs=args.epochs // 3,
        parameters=model.inner_transform.parameters(),
        callbacks=[
            callbacks.save_model_after_every_epoch(
                create_filename("checkpoint", None, args)[:-3] +
                "_epoch_C{}.pt")
        ],
        forward_kwargs={"mode": "slice"},
        **common_kwargs,
    )
    learning_curves_ = np.vstack(learning_curves_).T
    learning_curves = np.vstack((learning_curves, learning_curves_))

    return learning_curves
Beispiel #13
0
def train_manifold_flow(args, dataset, model, simulator):
    """ MFMF-S training """

    trainer = ManifoldFlowTrainer(model) if simulator.parameter_dim(
    ) is None else ConditionalManifoldFlowTrainer(model)
    common_kwargs = {
        "dataset": dataset,
        "batch_size": args.batchsize,
        "initial_lr": args.lr,
        "scheduler": optim.lr_scheduler.CosineAnnealingLR,
        "clip_gradient": args.clip,
    }
    if args.weightdecay is not None:
        common_kwargs["optimizer_kwargs"] = {
            "weight_decay": float(args.weightdecay)
        }

    if args.specified:
        logger.info("Starting training MF with specified manifold on NLL")
        learning_curves = trainer.train(
            loss_functions=[losses.mse, losses.nll],
            loss_labels=["MSE", "NLL"],
            loss_weights=[0.0, args.nllfactor],
            epochs=args.epochs,
            callbacks=[
                callbacks.save_model_after_every_epoch(
                    create_filename("checkpoint", None, args)[:-3] +
                    "_epoch_{}.pt")
            ],
            forward_kwargs={"mode": "mf"},
            **common_kwargs,
        )
        learning_curves = np.vstack(learning_curves).T
    else:
        if args.nopretraining or args.epochs // args.prepostfraction < 1:
            logger.info("Skipping pretraining phase")
            learning_curves = None
        elif args.prepie:
            logger.info(
                "Starting training MF, phase 1: pretraining on PIE likelihood")
            learning_curves = trainer.train(
                loss_functions=[losses.nll],
                loss_labels=["NLL"],
                loss_weights=[args.nllfactor],
                epochs=args.epochs // args.prepostfraction,
                callbacks=[
                    callbacks.save_model_after_every_epoch(
                        create_filename("checkpoint", None, args)[:-3] +
                        "_epoch_A{}.pt")
                ],
                forward_kwargs={"mode": "pie"},
                **common_kwargs,
            )
            learning_curves = np.vstack(learning_curves).T
        else:
            logger.info(
                "Starting training MF, phase 1: pretraining on reconstruction error"
            )
            learning_curves = trainer.train(
                loss_functions=[losses.mse],
                loss_labels=["MSE"],
                loss_weights=[args.msefactor],
                epochs=args.epochs // args.prepostfraction,
                callbacks=[
                    callbacks.save_model_after_every_epoch(
                        create_filename("checkpoint", None, args)[:-3] +
                        "_epoch_A{}.pt")
                ],
                forward_kwargs={"mode": "projection"},
                **common_kwargs,
            )
            learning_curves = np.vstack(learning_curves).T

        logger.info("Starting training MF, phase 2: mixed training")
        learning_curves_ = trainer.train(
            loss_functions=[losses.mse, losses.nll],
            loss_labels=["MSE", "NLL"],
            loss_weights=[args.msefactor, args.addnllfactor],
            epochs=args.epochs -
            (2 - int(args.nopretraining) - int(args.noposttraining)) *
            (args.epochs // args.prepostfraction),
            parameters=model.parameters(),
            callbacks=[
                callbacks.save_model_after_every_epoch(
                    create_filename("checkpoint", None, args)[:-3] +
                    "_epoch_B{}.pt")
            ],
            forward_kwargs={"mode": "mf"},
            **common_kwargs,
        )
        learning_curves_ = np.vstack(learning_curves_).T
        learning_curves = learning_curves_ if learning_curves is None else np.vstack(
            (learning_curves, learning_curves_))

        if args.nopretraining or args.epochs // args.prepostfraction < 1:
            logger.info("Skipping inner flow phase")
        else:
            logger.info(
                "Starting training MF, phase 3: training only inner flow on NLL"
            )
            learning_curves_ = trainer.train(
                loss_functions=[losses.mse, losses.nll],
                loss_labels=["MSE", "NLL"],
                loss_weights=[0.0, args.nllfactor],
                epochs=args.epochs // args.prepostfraction,
                parameters=model.inner_transform.parameters(),
                callbacks=[
                    callbacks.save_model_after_every_epoch(
                        create_filename("checkpoint", None, args)[:-3] +
                        "_epoch_C{}.pt")
                ],
                forward_kwargs={"mode": "mf-fixed-manifold"},
                **common_kwargs,
            )
            learning_curves_ = np.vstack(learning_curves_).T
            learning_curves = np.vstack((learning_curves, learning_curves_))

    return learning_curves
Beispiel #14
0
    # Bug fix related to some num_workers > 1 and CUDA. Bad things happen otherwise!
    torch.multiprocessing.set_start_method("spawn", force=True)

    # Data
    simulator = load_simulator(args)
    dataset = load_training_dataset(simulator, args)

    # Model
    model = create_model(args, simulator)

    # Maybe load pretrained model
    if args.load is not None:
        args_ = copy.deepcopy(args)
        args_.modelname = args.load
        if args_.i > 0:
            args_.modelname += "_run{}".format(args_.i)
        logger.info("Loading model %s", args_.modelname)
        model.load_state_dict(
            torch.load(create_filename("model", None, args_),
                       map_location=torch.device("cpu")))

    # Train and save
    learning_curves = train_model(args, dataset, model, simulator)

    # Save
    logger.info("Saving model")
    torch.save(model.state_dict(), create_filename("model", None, args))
    np.save(create_filename("learning_curve", None, args), learning_curves)

    logger.info("All done! Have a nice day!")
Beispiel #15
0
        distances_gen = simulator.distance_from_manifold(x_gen)
        mean_gen_distance = np.mean(distances_gen)

        # Report results
        logger.info("Results:")
        logger.info("  test log p:    %s", mean_log_likelihood_test)
        logger.info("  test reco err: %s", mean_reco_error_test)
        logger.info("  gen distance:  %s", mean_gen_distance)

        return (-1.0 * margs.metricnllfactor * mean_log_likelihood_test +
                margs.metricrecoerrorfactor * mean_reco_error_test +
                margs.metricdistancefactor * mean_gen_distance)

    # Load saved study object
    if args.resumestudy:
        filename = create_filename("paramscan", None, args)
        logger.info("Loading parameter scan from %s", filename)

        with open(filename, "rb") as file:
            study = pickle.load(file)

    # Optimize!
    study = optuna.create_study(study_name=args.paramscanstudyname,
                                direction="minimize")
    try:
        study.optimize(objective, n_trials=args.trials)
    except (KeyboardInterrupt, SystemExit):
        logger.warning("Optimization interrupted!")

    # Report best results
    logger.info("Best parameters:")
Beispiel #16
0
        create_modelname(args)
        logger.info("Evaluating simulator truth")
    else:
        create_modelname(args)
        logger.info("Evaluating model %s", args.modelname)

    # Bug fix related to some num_workers > 1 and CUDA. Bad things happen otherwise!
    torch.multiprocessing.set_start_method("spawn", force=True)

    # Data set
    simulator = load_simulator(args)

    # Load model
    if not args.truth:
        model = create_model(args, simulator=simulator)
        model.load_state_dict(torch.load(create_filename("model", None, args), map_location=torch.device("cpu")))
        model.eval()
    else:
        model = None

    # Evaluate generative performance
    if args.skipgeneration:
        logger.info("Skipping generative evaluation as per request.")
    elif not args.truth:
        x_gen = sample_from_model(args, model, simulator)
        evaluate_model_samples(args, simulator, x_gen)

    if args.skipinference:
        logger.info("Skipping all inference tasks as per request. Have a nice day!")
        exit()