Example #1
0
def objective(trial):
    # Generate the model.
    # main training loop
    h_net = define_model(neurons=trial.suggest_int("n_neurons", 8, 128))
    parameters = h_net.parameters()
    lr = trial.suggest_loguniform("lr", 1e-5, 1e-1)
    optimizer = torch.optim.Adam(parameters, lr=lr)

    startEpoches = time.time()
    for i in range(int(richards_celia.EPOCHS)):
        optimizer.zero_grad()  # clear gradients for next train
        loss = loss_func(h_net, tz_pairs)
        with torch.no_grad():
            if i % richards_celia.LOGSTEPS == 0:
                print("step " + str(i) + ": ")
                print(loss)
        loss.backward()  # backpropagation, compute gradients
        optimizer.step()  # apply gradients
        # check if has converged, break early
        with torch.no_grad():
            if i % 10 == 0:
                if len(h_net.losses) >= 11:
                    last_loss_changes = [
                        torch.abs(a_i - b_i)
                        for a_i, b_i in zip(h_net.losses[-10:], h_net.losses[-11:-1])
                    ]
                    if all(llc <= torch.finfo(richards_celia.DTYPE).eps for llc in last_loss_changes):
                        # or: use max instead of all
                        break
    endEpoches = time.time()

    last_losses = h_net.losses[:-100]
    accuracy = sum(last_losses) / len(last_losses)

    return accuracy
Example #2
0
def train():
    # main training loop
    h_net = define_model(n_hidden=richards_celia.HIDDEN,
                         n_neurons=richards_celia.NEURONS)
    parameters = h_net.parameters()
    optimizer = torch.optim.Adam(parameters, lr=richards_celia.LR)

    startEpoches = time.time()
    for i in range(int(richards_celia.EPOCHS)):
        optimizer.zero_grad()  # clear gradients for next train
        loss = loss_func(h_net=h_net, input=tz_pairs)
        with torch.no_grad():
            if i % richards_celia.LOGSTEPS == 0:
                print("step " + str(i) + ": ")
                print(loss)
        loss.backward()  # backpropagation, compute gradients
        optimizer.step()  # apply gradients
        # check if has converged, break early
        with torch.no_grad():
            if i % 10 == 0:
                if len(h_net.losses) >= 11:
                    last_loss_changes = [
                        torch.abs(a_i - b_i) for a_i, b_i in zip(
                            h_net.losses[-10:], h_net.losses[-11:-1])
                    ]
                    if all(llc <= torch.finfo(richards_celia.DTYPE).eps
                           for llc in last_loss_changes):
                        # or: use max instead of all
                        break
    endEpoches = time.time()

    torch.save(h_net, "./h_model.pt")

    print("Runtime of training loop: " + str(endEpoches - startEpoches) + " s")

    plt.plot([i for i in range(len(h_net.losses))], [i for i in h_net.losses],
             label="total loss")
    plt.plot([i for i in range(len(h_net.pde_loss))],
             [i for i in h_net.pde_loss],
             label="pde loss")
    plt.plot([i for i in range(len(h_net.bc_initial_losses))],
             [i for i in h_net.bc_initial_losses],
             label="bc initial loss")
    plt.plot([i for i in range(len(h_net.bc_top_losses))],
             [i for i in h_net.bc_top_losses],
             label="bc top loss")
    plt.plot([i for i in range(len(h_net.bc_bottom_losses))],
             [i for i in h_net.bc_bottom_losses],
             label="bc bottom loss")
    plt.legend()
    plt.savefig('loss.pdf')
    plt.savefig('loss.png')
    plt.show()