Example #1
0
        def __init__(self, truth_problem, **kwargs):
            # Call to parent
            EllipticCoerciveReducedProblem_DerivedClass.__init__(
                self, truth_problem, **kwargs)

            # Copy of greedy snapshots
            self.snapshots_mu = GreedySelectedParametersList(
            )  # the difference between this list and greedy_selected_parameters in the reduction method is that this one also stores the initial parameter
            self.snapshots = SnapshotsMatrix(truth_problem.V)

            # Extend allowed keywords argument in solve
            self._online_solve_default_kwargs["online_rectification"] = True
            self.OnlineSolveKwargs = OnlineSolveKwargsGenerator(
                **self._online_solve_default_kwargs)

            # Generate all combinations of allowed keyword arguments in solve
            online_solve_kwargs_with_rectification = list()
            online_solve_kwargs_without_rectification = list()
            for other_args in cartesian_product(
                (True, False),
                    repeat=len(self._online_solve_default_kwargs) - 1):
                args_with_rectification = self.OnlineSolveKwargs(*(other_args +
                                                                   (True, )))
                args_without_rectification = self.OnlineSolveKwargs(
                    *(other_args + (False, )))
                online_solve_kwargs_with_rectification.append(
                    args_with_rectification)
                online_solve_kwargs_without_rectification.append(
                    args_without_rectification)
            self.online_solve_kwargs_with_rectification = online_solve_kwargs_with_rectification
            self.online_solve_kwargs_without_rectification = online_solve_kwargs_without_rectification

            # Flag to disable error estimation after rectification has been setup
            self._disable_error_estimation = False
 def _POD_greedy_orthogonalize_snapshot(self, snapshot_over_time):
     if self.reduced_problem.N > 0:
         basis_functions = self.reduced_problem.basis_functions
         projected_snapshot_N_over_time = self.reduced_problem.project(snapshot_over_time, on_dirichlet_bc=False)
         orthogonal_snapshot_over_time = SnapshotsMatrix(self.truth_problem.V)
         for (snapshot, projected_snapshot_N) in zip(snapshot_over_time, projected_snapshot_N_over_time):
             orthogonal_snapshot_over_time.enrich(snapshot - basis_functions*projected_snapshot_N)
         return orthogonal_snapshot_over_time
     else:
         return snapshot_over_time
Example #3
0
def train_data_driven(N):
    (mesh, _, _, restrictions) = read_mesh()
    W = generate_block_function_space(mesh, restrictions)

    # L2 projection object
    basis_functions = read_basis_functions(W, N)
    X = get_inner_products(W, "L2 projection")
    l2_projection = {
        c: L2ProjectionSolver(X[c], basis_functions[c], N)
        for c in components
    }

    # Solution storage
    solution = BlockFunction(W)

    # Training set
    training_set = get_set("training_set")
    mu_len = len(training_set[0])

    # Read in snapshots
    snapshots_matrix = SnapshotsMatrix(W)
    for i, mu in enumerate(training_set):
        print("Appending solution for mu =", mu, "to snapshots matrix")
        read_solution(mu, "truth_solve", solution)
        snapshots_matrix.enrich(solution)

        filename = os.path.join("dis_x", "dis_x_" + str(i))
        write_file = open(filename, 'wb')
        pickle.dump(snapshots_matrix[-1][0].vector()[::3], write_file)
        write_file.close()

        filename = os.path.join("dis_y", "dis_y_" + str(i))
        write_file = open(filename, 'wb')
        pickle.dump(snapshots_matrix[-1][0].vector()[1::3], write_file)
        write_file.close()

        filename = os.path.join("dis_z", "dis_z_" + str(i))
        write_file = open(filename, 'wb')
        pickle.dump(snapshots_matrix[-1][0].vector()[2::3], write_file)
        write_file.close()

    quit()

    # Data driven training component by component
    normalize_inputs = NormalizeInputs(mu_range)
    for c in components:
        projected_snapshots = [
            l2_projection[c].solve(mu, c, snapshots_matrix[i])
            for i, mu in enumerate(training_set)
        ]
        inputs = torch.unsqueeze(torch.FloatTensor(training_set._list),
                                 dim=mu_len)
        inputs = normalize_inputs(inputs)
        outputs = torch.stack([
            torch.from_numpy(projected_snapshot)
            for projected_snapshot in projected_snapshots
        ])
        with open(
                os.path.join("networks",
                             "output_normalization_" + c + "_" + str(N)),
                "w") as file_:
            file_.write(str(torch.min(outputs).detach().numpy()) + "\n")
            file_.write(str(torch.max(outputs).detach().numpy()) + "\n")
        normalize_outputs = NormalizeOutputs(
            os.path.join("networks",
                         "output_normalization_" + c + "_" + str(N)))
        outputs = normalize_outputs(outputs)
        # print(len(training_set[0]))
        # print(len(training_set))
        # print(mu_len)
        # print(inputs.shape)
        # print(outputs.shape)
        # quit()
        network = Network(mu_len, c, N)
        network.apply(init_weights)

        criterion = nn.MSELoss()
        learning_rate = 0.3
        optimizer = optim.Adam(network.parameters(),
                               lr=learning_rate,
                               eps=1.e-08)
        torch_dataset = TensorDataset(inputs.float(), outputs.float())

        n_snpashots = len(training_set)
        n_trainining = 4 * int(n_snpashots / 6)
        n_validation = n_snpashots - n_trainining
        batch_size_training = int(round(np.sqrt(n_snpashots)))
        batch_size_validation = int(round(np.sqrt(n_snpashots)))
        epochs = 10000
        n_epochs_stop = epochs

        training_dataset, validation_dataset = random_split(
            torch_dataset, [n_trainining, n_validation])
        training_loader = DataLoader(dataset=training_dataset,
                                     batch_size=batch_size_training)
        validation_loader = DataLoader(dataset=validation_dataset,
                                       batch_size=batch_size_validation)

        training_losses = [None] * epochs
        validation_losses = [None] * epochs
        min_validation_loss = np.Inf

        for epoch in range(epochs):
            for param_group in optimizer.param_groups:
                param_group["lr"] = learning_rate / (1 + np.sqrt(epoch))

            total_training_loss = 0.0
            for batch_x, batch_y in training_loader:  # for each training step
                network.train()
                optimizer.zero_grad()
                batch_x_normalized = batch_x.squeeze(1)
                prediction = network(batch_x_normalized)
                loss = criterion(prediction, batch_y)
                loss.backward()
                optimizer.step()
                total_training_loss += loss.item()
            training_losses[epoch] = total_training_loss / len(training_loader)
            print("[%d] Training loss: %.10f" %
                  (epoch + 1, training_losses[epoch]))

            network.eval()

            total_validation_loss = 0.0
            with torch.no_grad():
                for validation_x, validation_y in validation_loader:
                    validation_x_normalized = validation_x.squeeze(1)
                    network_y = network(validation_x_normalized)
                    loss = criterion(network_y, validation_y)
                    total_validation_loss += loss.item()
            validation_losses[epoch] = total_validation_loss / len(
                validation_loader)
            print("[%d] Validation loss: %.10f" %
                  (epoch + 1, validation_losses[epoch]))

            # add less than or eq
            if validation_losses[epoch] <= min_validation_loss:
                epochs_no_improvement = 0
                min_validation_loss = validation_losses[epoch]
                torch.save(
                    network.state_dict(),
                    os.path.join("networks", "network_" + c + "_" + str(N)))
            else:
                epochs_no_improvement += 1

            if epochs_no_improvement == n_epochs_stop:
                print("Early stopping!")
                break