예제 #1
0
    def load_model(self, location, scale_file="", logp=True):
        """Read the autoencoder from a tensorflow file"""
        self._graph = tf.Graph()
        self._sess = tf.Session(graph=self._graph)
        if LOAD_PROTOBUF:
            with tf.gfile.GFile(location + '/final_variables_frz', 'rb') as f:
                gdef = tf.GraphDef()
                gdef.ParseFromString(f.read())
            with self._graph.as_default():
                tf.import_graph_def(gdef)
            pfx = 'import/'
        else:
            with self._graph.as_default():
                saver = tf.train.import_meta_graph(location +
                                                   '/final_variables.meta')
            saver.restore(self._sess, location + '/final_variables')
            pfx = ''
        # hooks to the decoder
        self.i_q = self._graph.get_tensor_by_name(pfx + 'i_q:0')
        self.o_x = self._graph.get_tensor_by_name(pfx + 'decode:0')
        # hooks to the encoder
        self.i_x = self._graph.get_tensor_by_name(pfx + 'i_x:0')
        self.o_q = self._graph.get_tensor_by_name(pfx + 'encode:0')
        # The simulation inputs and outputs in the unscaled space
        # (the prior conditioning of the data put it on [-1,1])
        if scale_file:
            self.logp = logp
            raw_scale = np.loadtxt(scale_file, skiprows=1, delimiter=',')
            self.scale = raw_scale[:, 0:4]
            unshifted = unshift(self.o_x, self.scale[:, 0:4])
            if logp:
                self.o_s = tf.concat([
                    tf.expand_dims(unshifted[:, 0], -1),
                    tf.expand_dims(tf.math.exp(unshifted[:, 1]), -1),
                    unshifted[:, 2:]
                ],
                                     axis=1)
            else:
                self.o_s = unshifted
        else:
            self.scale = None
            self.o_s = self.o_x

        self.o_dxdq = atu.vector_gradient_dep(self.o_x, self.i_q)
        self.o_dsdq = atu.vector_gradient_dep(self.o_s, self.i_q)

        self.dtype = self.i_x.dtype
예제 #2
0
 def make_goal(self, data):
     q = self.encode(data)
     pred = self.decode(q)
     loss = tf.losses.mean_squared_error(data, pred)
     if self.cae_lambda != 0:
         dqdx = atu.vector_gradient_dep(q,data)
         cae = tf.constant(self.cae_lambda,dtype=self.dtype) \
                 * tf.norm(dqdx, axis=(1,2))
         return loss + tf.metrics.mean(cae)[0] # CHECK
     else:
         return loss
예제 #3
0
    def build_flux(self):
        """This should really go into the child LatentFlow."""
        with self._graph.as_default():
            self.i_XA = tf.placeholder(name='i_XA',
                                       shape=(None, 2),
                                       dtype=self.dtype)
            self.i_XB = tf.placeholder(name='i_XB',
                                       shape=(None, 2),
                                       dtype=self.dtype)
            self.i_q2 = tf.placeholder(name='i_q2',
                                       shape=(None, 2),
                                       dtype=self.dtype)
            self.o_s2 = atu.replicate_subgraph(self.o_s, {self.i_q: self.i_q2})
            TA, pA, rhoA, rho_hA = tf.split(self.o_s, 4, axis=-1)
            TB, pB, rhoB, rho_hB = tf.split(self.o_s2, 4, axis=-1)
            self.o_F = self.flux(TA, pA, rhoA, rho_hA, TB, pB, rhoB, rho_hB,
                                 self.i_XA, self.i_XB)

            self.o_KF = tf.concat([
                atu.vector_gradient_dep(self.o_F, self.i_q),
                atu.vector_gradient_dep(self.o_F, self.i_q2)
            ],
                                  axis=-1)
        self._sess.run(tf.variables_initializer(self._vars.values()))
예제 #4
0
    def build_dae(self, method='BWEuler'):
        """Builds the differential algebraic equation and sets up the system
        lhs(q)=rhs
        K = d lhs / dq
        """
        aii = {'BWEuler': 1.0, 'Trap': 0.5}[method]
        with self._graph.as_default():
            aii = self.regvar('aii', aii)
            Dt = self.regvar('Dt', 1.0)

            T, p, rho, rho_h = tf.split(self.o_s, 4, axis=-1)
            m, r = self.m_and_r(T, p, rho, rho_h)
            self.m = m
            self.r = r
            self.lhs = m - Dt * aii * r
            self.rhs = m + (1.0 - aii) * Dt * r
            self.K_lhs = atu.vector_gradient_dep(self.lhs, self.i_q)

            # Initialize parameters
            ini = tf.variables_initializer(self._vars.values())
        self._sess.run(ini)