コード例 #1
0
    def _grab_RNN(self, initial_states):
        '''Creates objects for interfacing with the RNN.

        These objects include 1) the optimization variables (initialized to
        the user-specified initial_states) which will, after optimization,
        contain fixed points of the RNN, and 2) hooks into those optimization
        variables that are required for building the TF graph.

        Args:
            initial_states: Either an [n_inits x n_dims] numpy array or an
            LSTMStateTuple with initial_states.c and initial_states.h as
            [n_inits x n_dims/2] numpy arrays. These data specify the initial
            states of the RNN, from which the optimization will search for
            fixed points. The choice of type must be consistent with state
            type of rnn_cell.

        Returns:
            x: An [n_inits x n_dims] tf.Variable (the optimization variable)
            representing RNN states, initialized to the values in
            initial_states. If the RNN is an LSTM, n_dims represents the
            concatenated hidden and cell states.

            F: An [n_inits x n_dims] tf op representing the state transition
            function of the RNN applied to x.

            states: Contains the same data as in x, but formatted to interface
            with self.rnn_cell (e.g., formatted as LSTMStateTuple if rnn_cell
            is a LSTMCell)

            new_states: Contains the same data as in F, but formatted to
            interface with self.rnn_cell
        '''
        if self.is_lstm:
            # [1 x (2*n_dims)]
            c_h_init = tf_utils.convert_from_LSTMStateTuple(initial_states)

            # [1 x (2*n_dims)]
            x = tf.Variable(c_h_init, dtype=tf.float32)

            states = tf_utils.convert_to_LSTMStateTuple(x)
        else:
            x = tf.Variable(initial_states, dtype=tf.float32)
            states = x

        n_inits = x.shape[0]
        tiled_inputs = np.tile(self.inputs, [n_inits, 1])
        inputs_tf = tf.constant(tiled_inputs, dtype=tf.float32)

        output, new_states = self.rnn_cell(inputs_tf, states)

        if self.is_lstm:
            # [1 x (2*n_dims)]
            F = tf_utils.convert_from_LSTMStateTuple(new_states)
        else:
            F = new_states

        init = tf.variables_initializer(var_list=[x])
        self.session.run(init)

        return x, F, states, new_states
コード例 #2
0
    def _compute_multiple_jacobians_np(self, fps):
        '''Computes the Jacobian of the RNN state transition function.

        Args:
            fps: A FixedPoints object containing the RNN states (fps.xstar)
            and inputs (fps.inputs) at which to compute the Jacobians.

        Returns:
            J_np: An [n x n_states x n_states] numpy array containing the
            Jacobian of the RNN state transition function at the states
            specified in fps, given the inputs in fps.

        '''
        inputs_np = fps.inputs

        if self.is_lstm:
            states_np = tf_utils.convert_to_LSTMStateTuple(fps.xstar)
        else:
            states_np = fps.xstar

        x_tf, F_tf = self._grab_RNN(states_np, inputs_np)
        try:
            J_tf = pfor.batch_jacobian(F_tf, x_tf)
        except absl.flags._exceptions.UnparsedFlagAccessError:
            J_tf = pfor.batch_jacobian(F_tf, x_tf, use_pfor=False)

        J_np = self.session.run(J_tf)

        return J_np
コード例 #3
0
    def sample_states(self,
                      state_traj,
                      n_inits,
                      noise_scale=0.0,
                      rng=npr.RandomState(0)):
        '''Draws random samples from trajectories of the RNN state. Samples
        can optionally be corrupted by independent and identically distributed
        (IID) Gaussian noise. These samples are intended to be used as initial
        states for fixed point optimizations.

        Args:
            state_traj: [n_batch x n_time x n_states] numpy array or
            LSTMStateTuple with .c and .h as [n_batch x n_time x n_states]
            numpy arrays. Contains example trajectories of the RNN state.

            n_inits: int specifying the number of sampled states to return.

            noise_scale (optional): non-negative float specifying the standard
            deviation of IID Gaussian noise samples added to the sampled
            states.

        Returns:
            initial_states: Sampled RNN states as a [n_inits x n_states] numpy
            array or as an LSTMStateTuple with .c and .h as [n_inits x
            n_states] numpy arrays (type matches than of state_traj).

        Raises:
            ValueError if noise_scale is negative.
        '''
        if self.is_lstm:
            state_traj_bxtxd = tf_utils.convert_from_LSTMStateTuple(state_traj)
        else:
            state_traj_bxtxd = state_traj

        [n_batch, n_time, n_states] = state_traj_bxtxd.shape

        # Draw random samples from state trajectories
        states = np.zeros([n_inits, n_states])
        for init_idx in range(n_inits):
            trial_idx = rng.randint(n_batch)
            time_idx = rng.randint(n_time)
            states[init_idx, :] = state_traj_bxtxd[trial_idx, time_idx, :]

        # Add IID Gaussian noise to the sampled states
        if noise_scale > 0.0:
            states += noise_scale * rng.randn(n_inits, n_states)
        elif noise_scale < 0.0:
            raise ValueError('noise_scale must be non-negative,'
                             ' but was %f' % noise_scale)
        else:  # noise_scale == 0 --> don't add noise
            pass

        if self.is_lstm:
            return tf_utils.convert_to_LSTMStateTuple(states)
        else:
            return states
コード例 #4
0
    def _identify_unique_fixed_points(self):
        '''Identifies the unique fixed points found after optimizing from all
        initiali_states

        After running, the following class variables contain the data
        corresponding to the unique fixed points (see find_fixed_points for
        detailed descriptions):

        unique_xstar, unique_F_xstar, unique_qstar, unique_dq, unique_n_iters

        Args:
            None.

        Returns:
            None.

        '''
        def unique_rows(x, approx_tol):
            # Quick and dirty. Can update using pdist if necessary
            d = int(np.round(np.max([0 - np.log10(approx_tol)])))
            ux, idx = np.unique(x.round(decimals=d), axis=0, return_index=True)
            return ux, idx

        self.unique_xstar, idx = unique_rows(self.xstar, self.tol_unique)

        self.n_unique = len(idx)
        self.unique_F_xstar = self.F_xstar[idx]

        if self.is_lstm:
            self.unique_states = \
                tf_utils.convert_to_LSTMStateTuple(self.unique_xstar)
            self.unique_new_states = \
                tf_utils.convert_to_LSTMStateTuple(self.unique_F_xstar)
        else:
            self.unique_states = self.unique_xstar
            self.unique_new_states = self.unique_F_xstar

        self.unique_qstar = self.qstar[idx]
        self.unique_dq = self.dq[idx]
        self.unique_n_iters = self.n_iters[idx]
        '''In normal operation, Jacobians haven't yet been computed, so don't
コード例 #5
0
    def _grab_RNN(self, initial_states, inputs):
        '''Creates objects for interfacing with the RNN.

        These objects include 1) the optimization variables (initialized to
        the user-specified initial_states) which will, after optimization,
        contain fixed points of the RNN, and 2) hooks into those optimization
        variables that are required for building the TF graph.

        Args:
            initial_states: Either an [n x n_states] numpy array or an
            LSTMStateTuple with initial_states.c and initial_states.h as
            [n x n_states/2] numpy arrays. These data specify the initial
            states of the RNN, from which the optimization will search for
            fixed points. The choice of type must be consistent with state
            type of rnn_cell.

            inputs: A [n x n_inputs] numpy array specifying the inputs to the
            RNN for this fixed point optimization.

        Returns:
            x: An [n x n_states] tf.Variable (the optimization variable)
            representing RNN states, initialized to the values in
            initial_states. If the RNN is an LSTM, n_states represents the
            concatenated hidden and cell states.

            F: An [n x n_states] tf op representing the state transition
            function of the RNN applied to x.
        '''

        if self.is_lstm:
            c_h_init = tf_utils.convert_from_LSTMStateTuple(initial_states)
            x = tf.Variable(c_h_init, dtype=self.tf_dtype)
            x_rnncell = tf_utils.convert_to_LSTMStateTuple(x)
        else:
            x = tf.Variable(initial_states, dtype=self.tf_dtype)
            x_rnncell = x

        n = x.shape[0]
        inputs_tf = tf.constant(inputs, dtype=self.tf_dtype)

        output, F_rnncell = self.rnn_cell(inputs_tf, x_rnncell)

        if self.is_lstm:
            F = tf_utils.convert_from_LSTMStateTuple(F_rnncell)
        else:
            F = F_rnncell

        init = tf.variables_initializer(var_list=[x])
        self.session.run(init)

        return x, F
コード例 #6
0
    def _get_rnncell_compatible_states(self, states):
        '''Converts RNN states if necessary to be compatible with
        self.rnn_cell.

        Args:
            states:
                Either a numpy array or LSTMStateTuple.

        Returns:
            A representation of states that is compatible with self.rnn_cell.
            If self.rnn_cell is an LSTMCell, the representation is as an
            LSTMStateTuple. Otherwise, the representation is a numpy array.
        '''
        if self.is_lstm:
            return tf_utils.convert_to_LSTMStateTuple(states)
        else:
            return states
コード例 #7
0
    def _run_additional_iterations_on_outliers(self):
        '''Detects outlier states with respect to the q function and runs
        additional optimization iterations on those states, using the (slow)
        sequential optimization procedure. This should only be used after
        calling either _run_joint_optimization or
        _run_sequential_optimizations. Updates class variables containing
        optimization results.

        Args:
            None.

        Returns:
            None.
        '''
        outlier_min_q = np.median(self.qstar) * self.outlier_q_scale
        is_outlier = self.qstar > outlier_min_q
        idx_outliers = np.where(is_outlier)[0]
        n_outliers = len(idx_outliers)

        print('Detected %d \"outliers.\"' % n_outliers)

        if n_outliers == 0:
            return

        print('Performing additional optimization iterations.')
        for counter, idx in enumerate(idx_outliers):
            print('\n\tOutlier %d of %d (q=%.3e).' %
                  (counter + 1, n_outliers, self.qstar[idx]))

            initial_state = np.expand_dims(self.xstar[idx], axis=0)
            if self.is_lstm:
                initial_state = tf_utils.convert_to_LSTMStateTuple(
                    initial_state)
            xstar, F_xstar, qstar, dq, n_iters = self._run_single_optimization(
                initial_state)
            self.xstar[idx] = xstar
            self.F_xstar[idx] = F_xstar
            self.qstar[idx] = qstar
            self.dq[idx] = dq
            self.n_iters[idx] += n_iters