Exemplo n.º 1
0
    def idn_init(self, t, x, u, u_layers, pde_layers):
        self.t = t
        self.x = x
        self.u = u

        self.u_layers = u_layers
        self.pde_layers = pde_layers

        self.u_params = list(map(nn.initialize_nn, self.u_layers))
        self.pde_weights, self.pde_biases = nn.initialize_nn(self.pde_layers)

        self.t_phs = [tf.placeholder(tf.float32, [None, 1])] * len(u_layers)
        self.u_phs = [tf.placeholder(tf.float32, [None, 1])] * len(u_layers)
        self.x_phs = [tf.placeholder(tf.float32, [None, 1])] * len(u_layers)
        self.terms_phs = tf.placeholder(tf.float32, [None, pde_layers[0]])

        self.u_preds = self.idn_net(self.t_phs, self.x_phs)
        self.pde_pred = self.pde_net(self.terms_phs)
        self.f_pred = self.identifier_f(self.t_phs, self.x_phs)

        self.u_loss = tf.reduce_sum(
            sum(
                list(
                    map(tf.square,
                        map(lambda x, y: x - y, self.u_preds, self.u_phs)))))
        self.f_loss = tf.reduce_sum(sum(list(map(tf.square, self.f_pred))))

        unnested = []
        for n in self.u_params:
            for p in n:
                unnested += p
        self.scipy_u_optimizer = tf.contrib.opt.ScipyOptimizerInterface(
            self.u_loss,
            var_list=unnested,
            method="L-BFGS-B",
            options={
                "maxiter": 100000,
                "maxfun": 100000,
                "maxcor": 50,
                "maxls": 50,
                "ftol": 1.0 * np.finfo(float).eps
            })
        self.scipy_f_optimizer = tf.contrib.opt.ScipyOptimizerInterface(
            self.f_loss,
            var_list=self.pde_weights + self.pde_biases,
            method="L-BFGS-B",
            options={
                "maxiter": 100000,
                "maxfun": 100000,
                "maxcor": 50,
                "maxls": 50,
                "ftol": 1.0 * np.finfo(float).eps
            })
        self.adam_u_optimizer = tf.train.AdamOptimizer()
        self.adam_f_optimizer = tf.train.AdamOptimizer()
        self.adam_u_optimizer_train = self.adam_u_optimizer.minimize(
            self.u_loss, var_list=unnested)
        self.adam_f_optimizer_train = self.adam_f_optimizer.minimize(
            self.f_loss, var_list=self.pde_weights + self.pde_biases)
Exemplo n.º 2
0
    def sol_init(self, x0, u0, tb, X_f, layers):
        # Initialize the Vector
        X0 = np.concatenate((0 * x0, x0), 1)
        X_lb = np.concatenate((tb, 0 * tb + self.sol_lb[1]), 1)
        X_ub = np.concatenate((tb, 0 * tb + self.sol_ub[1]), 1)

        self.X_f = X_f
        self.t0 = X0[:, 0:1]  # Initial Data (time)
        self.x0 = X0[:, 1:2]  # Initial Data (space)
        self.t_lb = X_lb[:, 0:1]  # Lower Boundary Data (time)
        self.t_ub = X_ub[:, 0:1]  # Upper Boundary Data (time)
        self.x_lb = X_lb[:, 1:2]  # Lower Boundary Data (space)
        self.x_ub = X_ub[:, 1:2]  # Upper Boundary Data (space)
        self.t_f = X_f[:, 0:1]  # Collocation Points (time)
        self.x_f = X_f[:, 1:2]  # Collocation Points (space)
        self.u0 = u0  # Boundary Data

        # Layers for Solution
        self.layers = layers

        # Initialize NNs for Solution
        self.weights, self.biases = nn.initialize_nn(layers)

        # TF placeholders for Solution
        self.t0_placeholder = tf.placeholder(tf.float32, [None, 1])
        self.x0_placeholder = tf.placeholder(tf.float32, [None, 1])
        self.u0_placeholder = tf.placeholder(tf.float32, [None, 1])
        self.t_lb_placeholder = tf.placeholder(tf.float32, [None, 1])
        self.x_lb_placeholder = tf.placeholder(tf.float32, [None, 1])
        self.t_ub_placeholder = tf.placeholder(tf.float32, [None, 1])
        self.x_ub_placeholder = tf.placeholder(tf.float32, [None, 1])
        self.t_f_placeholder = tf.placeholder(tf.float32, [None, 1])
        self.x_f_placeholder = tf.placeholder(tf.float32, [None, 1])

        # TF graphs for Solution
        self.u0_pred, _ = self.solver_net_u(self.t0_placeholder,
                                            self.x0_placeholder)
        self.u_lb_pred, self.u_x_lb_pred = self.solver_net_u(
            self.t_lb_placeholder, self.x_lb_placeholder)
        self.u_ub_pred, self.u_x_ub_pred = self.solver_net_u(
            self.t_ub_placeholder, self.x_ub_placeholder)
        self.solver_f_pred = self.solver_net_f(self.t_f_placeholder,
                                               self.x_f_placeholder)

        # Loss for Solution
        self.solver_loss = \
            tf.reduce_sum(tf.square(self.u0_placeholder - self.u0_pred)) + \
            tf.reduce_sum(tf.square(self.u_lb_pred - self.u_ub_pred)) + \
            tf.reduce_sum(tf.square(self.u_x_lb_pred - self.u_x_ub_pred)) + \
            tf.reduce_sum(tf.square(self.solver_f_pred))

        # Scipy Optimizer for Solution
        self.scipy_solver_optimizer = tf.contrib.opt.ScipyOptimizerInterface(
            self.solver_loss,
            var_list=self.weights + self.biases,
            method="L-BFGS-B",
            options={
                "maxiter": 50000,
                "maxfun": 50000,
                "maxcor": 50,
                "maxls": 50,
                "ftol": 1.0 * np.finfo(float).eps
            })

        # Adam Optimizer for Solution
        self.adam_solver_optimizer = tf.train.AdamOptimizer()
        self.sol_train_op_Adam = self.adam_solver_optimizer.minimize(
            self.solver_loss, var_list=self.weights + self.biases)
Exemplo n.º 3
0
    def identifier_init(self, t, x, u, u_layers, pde_layers):
        # Training Data for Identification
        self.t = t
        self.x = x
        self.u = u

        # Layers for Identification
        self.u_layers = u_layers
        self.pde_layers = pde_layers

        # Initialize NNs for Identification
        self.u_weights, self.u_biases = nn.initialize_nn(u_layers)
        self.pde_weights, self.pde_biases = nn.initialize_nn(pde_layers)

        # TF placeholders
        self.t_placeholder = tf.placeholder(tf.float32, [None, 1])
        self.u_placeholder = tf.placeholder(tf.float32, [None, 1])
        self.x_placeholder = tf.placeholder(tf.float32, [None, 1])
        self.terms_placeholder = tf.placeholder(tf.float32,
                                                [None, pde_layers[0]])

        # TF graphs
        self.u_pred = self.identifier_net(self.t_placeholder,
                                          self.x_placeholder)
        self.pde_pred = self.pde_net(self.terms_placeholder)
        self.f_pred = self.identifier_f(self.t_placeholder, self.x_placeholder)

        # Loss
        self.u_loss = tf.reduce_sum(
            tf.square(self.u_pred - self.u_placeholder) +
            tf.square(self.f_pred))
        self.f_loss = tf.reduce_sum(tf.square(self.f_pred))
        # Scipy Optimizer
        self.scipy_u_optimizer = tf.contrib.opt.ScipyOptimizerInterface(
            self.u_loss,
            var_list=self.u_weights + self.u_biases + self.pde_weights +
            self.pde_biases,
            method="L-BFGS-B",
            options={
                "maxiter": 50000,
                "maxfun": 50000,
                "maxcor": 50,
                "maxls": 50,
                "ftol": 1.0 * np.finfo(float).eps
            })
        self.scipy_f_optimizer = tf.contrib.opt.ScipyOptimizerInterface(
            self.f_loss,
            var_list=self.pde_weights + self.pde_biases,
            method="L-BFGS-B",
            options={
                "maxiter": 50000,
                "maxfun": 50000,
                "maxcor": 50,
                "maxls": 50,
                "ftol": 1.0 * np.finfo(float).eps
            })

        # Adam Optimizer
        self.adam_u_optimizer = tf.train.AdamOptimizer()
        self.adam_f_optimizer = tf.train.AdamOptimizer()
        self.adam_u_optimizer_train = self.adam_u_optimizer.minimize(
            self.u_loss,
            var_list=self.u_weights + self.u_biases + self.pde_weights +
            self.pde_biases)
        self.adam_f_optimizer_train = self.adam_f_optimizer.minimize(
            self.f_loss, var_list=self.pde_weights + self.pde_biases)