Ejemplo n.º 1
0
# Notice that breaking constraints will result in a penalty and therefore we might get results that break the constraints
constraints = np.array([
    [10, 100],  #  10 < weight < 100
    [15, None],  # stress > 15
    [None, 100],  # buckling < 100
    [None, None],  # deflection no constraint
])

# To create the problem we can call the create_problem method with the parameters defined earlier
# The method returns a MOProblem and a scalarmethod instance which can be passed to different Desdeo objects
problem, method = create_problem(load, obj, constraints)

# Example on solving the pareto front : This might take some time so feel free to comment this out.

# We will use the solve_pareto_front_representation method but one can change this to something else.
# The method takes the problem instance and a step size array

# The method will create reference points from nadir to ideal with these step sizes
# large step sizes => less solutions but faster calculation
# The create_problem method below will print approximate values of the nadir and ideal
# This might help you set the step sizes to fit the problem.
step_sizes = np.array([100, 177, 100, 4])[obj]

# The method returns the decision vectors and corresponding objective vectors
var, obj = solve_pareto_front_representation(problem, step_sizes)

# save the solution if you wish, make sure to change the name to not accidentally overwrite an existing solution.
# Saved solutions can be used later to visualize it
# The solution will be saved to modules/DataAndVisualization/'name'
save("tbExample", obj, var, problem.nadir, problem.ideal)
Ejemplo n.º 2
0
def train(
    model,
    train_loader,
    val_loader,
    epochs,
    save_iter=10,
    vis_iter=4,
    optimization_args=None,
    log_dir=None,
    args_to_log=None,
    stopping_param=50,
    metrics=None,
):
    """ Trains the model. Validation loader can be none.
    Assumptions:
    1. loaders return (batch_inputs, batch_labels), where both can be lists or torch.Tensors
    """

    # print the architecture of the model, helps to notice mistakes
    print(model)

    # if log_dir is not given, logging will be done a new directory in 'logs/' directory
    if log_dir is None:
        log_root = "logs/"
        utils.make_path(log_root)
        last_run = max([0] +
                       [int(k) for k in os.listdir(log_root) if k.isdigit()])
        log_dir = os.path.join(log_root, "{0:04d}".format(last_run + 1))
        utils.make_path(log_dir)

    tensorboard = SummaryWriter(log_dir)
    print("Visualize logs using: tensorboard --logdir={0}".format(log_dir))

    # add args_to_log to tensorboard, but also store it separately for easier access
    tensorboard.add_text("script arguments", repr(args_to_log))
    tensorboard.add_text("script arguments table",
                         make_markdown_table_from_dict(vars(args_to_log)))
    with open(os.path.join(log_dir, "args.pkl"), "wb") as f:
        pickle.dump(args_to_log, f)

    optimizer = build_optimizer(model.named_parameters(), optimization_args)
    scheduler = build_scheduler(optimizer, optimization_args)

    # convert metrics to list
    if metrics is None:
        metrics = []
    assert isinstance(metrics, (list, tuple))

    last_best_epoch = (
        0  # this is used to shut down training if its performance is degraded
    )
    for epoch in range(epochs):
        t0 = time.time()

        model.train()
        train_losses = run_partition(
            model=model,
            epoch=epoch,
            tensorboard=tensorboard,
            optimizer=optimizer,
            loader=train_loader,
            partition="train",
            training=True,
            metrics=metrics,
        )

        val_losses = {}
        if val_loader is not None:
            model.eval()
            val_losses = run_partition(
                model=model,
                epoch=epoch,
                tensorboard=tensorboard,
                optimizer=optimizer,
                loader=val_loader,
                partition="val",
                training=False,
                metrics=metrics,
            )

        # log some statistics
        t = time.time()
        log_string = "Epoch: {}/{}".format(epoch, epochs)
        for k, v in list(train_losses.items()) + list(val_losses.items()):
            log_string += ", {}: {:0.6f}".format(k, v)
        log_string += ", Time: {:0.1f}s".format(t - t0)
        print(log_string)

        # add visualizations
        if (epoch + 1) % vis_iter == 0 and hasattr(model, "visualize"):
            visualizations = model.visualize(train_loader,
                                             val_loader,
                                             tensorboard=tensorboard,
                                             epoch=epoch)
            # visualizations is a dictionary containing figures in (name, fig) format.
            # there are visualizations created using matplotlib rather than tensorboard
            for (name, fig) in visualizations.items():
                tensorboard.add_figure(name, fig, epoch)

        # save the model according to our schedule
        if (epoch + 1) % save_iter == 0:
            utils.save(
                model=model,
                optimizer=optimizer,
                scheduler=scheduler,
                path=os.path.join(log_dir, "checkpoints",
                                  "epoch{}.mdl".format(epoch)),
            )

        # save the model if it gives the best validation score and stop the training if needed
        if hasattr(model, "is_best_val_result"):
            is_best_val, best_val_result = model.is_best_val_result()
            if is_best_val:
                last_best_epoch = epoch
                print(
                    "This is the best validation result so far. Saving the model ..."
                )
                utils.save(
                    model=model,
                    optimizer=optimizer,
                    scheduler=scheduler,
                    path=os.path.join(log_dir, "checkpoints", "best_val.mdl"),
                )

                # save the validation result for doing model selection later
                with open(os.path.join(log_dir, "best_val_result.txt"),
                          "w") as f:
                    f.write("{}\n".format(best_val_result))

            # stop the training if the best result was not updated in the last 50 epochs
            if epoch - last_best_epoch >= stopping_param:
                break

        # update the learning rate
        scheduler.step()

    # enable testing mode
    model.eval()

    # save the final version of the network
    utils.save(
        model=model,
        optimizer=optimizer,
        scheduler=scheduler,
        path=os.path.join(log_dir, "checkpoints", "final.mdl"),
    )