Beispiel #1
0
    def likelihood_loss(self):
        if self.output_layer.nonlinearity == tf.nn.softmax:
            logits = self.output_layer.get_logits_for(
                L.get_output(self.layers[-2]))
            loss = tf.reduce_mean(
                tf.nn.softmax_cross_entropy_with_logits(
                    logits, tf.squeeze(self.target_var))
            )

        elif self.output_layer.nonlinearity == tf.identity:
            outputs = self.output_layer.get_output_for(
                L.get_output(self._layers[-2]))
            loss = tf.reduce_mean(
                0.5 * tf.square(outputs - self.target_var), name='like_loss'
            )

        elif self.output_layer.nonlinearity == tf.nn.sigmoid:

            logits = self.output_layer.get_logits_for(
                L.get_output(self.layers[-2]))
            sigmoid_loss = tf.nn.sigmoid_cross_entropy_with_logits(
                logits, tf.squeeze(self.target_var))

            if sigmoid_loss.get_shape().ndims == 2:
                loss = tf.reduce_mean(
                    tf.reduce_sum(sigmoid_loss, reduction_indices=1)
                )
            else:
                loss = tf.reduce_mean(sigmoid_loss)

        return loss
 def log_likelihood_sym(self, x_var, y_var):
     if config.TF_NN_SETTRACE:
         ipdb.set_trace()
     normalized_xs_var = (x_var - self.x_mean_var) / self.x_std_var
     prob = L.get_output(self.l_prob,
                         {self.prob_network.input_layer: normalized_xs_var})
     return self._dist.log_likelihood_sym(y_var, dict(prob=prob))
 def dist_info_sym(self, x_var):
     if config.TF_NN_SETTRACE:
         ipdb.set_trace()
     normalized_xs_var = (x_var - self.x_mean_var) / self.x_std_var
     prob = L.get_output(self.l_prob,
                         {self.prob_network.input_layer: normalized_xs_var})
     return dict(prob=prob)
Beispiel #4
0
 def likelihood_loss(self):
     logits = self.output_layer.get_logits_for(
         L.get_output(self.layers[-2]))
     #logits = L.get_output(self.layers[-1])
     loss = tf.nn.sigmoid_cross_entropy_with_logits(logits, self.target_var)
     #ent_B = tfutil.logit_bernoulli_entropy(logits)
     #self.obj = tf.reduce_sum(loss_B - self.ent_reg_weight * ent_B)
     return tf.reduce_sum(loss)
Beispiel #5
0
    def __init__(self, name, output_dim, hidden_sizes, hidden_nonlinearity,
                 output_nonlinearity, hidden_W_init=L.XavierUniformInitializer(), hidden_b_init=tf.zeros_initializer,
                 output_W_init=L.XavierUniformInitializer(), output_b_init=tf.zeros_initializer, batch_size=None,
                 input_var=None, input_layer=None, input_shape=None, batch_normalization=False, weight_normalization=False,
                 ):
        Serializable.quick_init(self, locals())
        self.name = name

        with tf.variable_scope(name):
            if input_layer is None:
                l_in = L.InputLayer(
                    shape=(batch_size,) + input_shape, input_var=input_var, name="input")
            else:
                l_in = input_layer
            self._layers = [l_in]
            l_hid = l_in
            if batch_normalization:
                ls = L.batch_norm(l_hid)
                l_hid = ls[-1]
                self._layers += ls
            for idx, hidden_size in enumerate(hidden_sizes):
                l_hid = L.DenseLayer(
                    l_hid,
                    num_units=hidden_size,
                    nonlinearity=hidden_nonlinearity,
                    name="hidden_%d" % idx,
                    W=hidden_W_init,
                    b=hidden_b_init,
                    weight_normalization=weight_normalization
                )
                if batch_normalization:
                    ls = L.batch_norm(l_hid)
                    l_hid = ls[-1]
                    self._layers += ls
                self._layers.append(l_hid)
            l_out = L.DenseLayer(
                l_hid,
                num_units=output_dim,
                nonlinearity=output_nonlinearity,
                name="output",
                W=output_W_init,
                b=output_b_init,
                weight_normalization=weight_normalization
            )
            if batch_normalization:
                ls = L.batch_norm(l_out)
                l_out = ls[-1]
                self._layers += ls
            self._layers.append(l_out)
            self._l_in = l_in
            self._l_out = l_out
            self._l_tar = L.InputLayer(
                shape=(batch_size,) + (output_dim,), input_var=input_var, name="target")

            # self._input_var = l_in.input_var
            self._output = L.get_output(l_out)

            LayersPowered.__init__(self, l_out)
Beispiel #6
0
 def compute_score(self, X):
     """
     predict logits ...
     """
     logits = self.output_layer.get_logits_for(
         L.get_output(self.layers[-2]))
     #logits = self.output
     Y_p = self._predict(logits, X)
     return Y_p
    def log_likelihood_sym(self, x_var, y_var):
        normalized_xs_var = (x_var - self._x_mean_var) / self._x_std_var

        normalized_means_var, normalized_log_stds_var = \
            L.get_output([self._l_mean, self._l_log_std], {self._mean_network.input_layer: normalized_xs_var})

        means_var = normalized_means_var * self._y_std_var + self._y_mean_var
        log_stds_var = normalized_log_stds_var + TT.log(self._y_std_var)

        return self._dist.log_likelihood_sym(y_var, dict(mean=means_var, log_std=log_stds_var))
 def dist_info_sym(self, obs_var, state_info_vars=None):
     mean_var, std_param_var = L.get_output([self._l_mean, self._l_std_param], obs_var)
     if self.min_std_param is not None:
         std_param_var = tf.maximum(std_param_var, self.min_std_param)
     if self.std_parametrization == 'exp':
         log_std_var = std_param_var
     elif self.std_parametrization == 'softplus':
         log_std_var = tf.log(tf.log(1. + tf.exp(std_param_var)))
     else:
         raise NotImplementedError
     return dict(mean=mean_var, log_std=log_std_var)
Beispiel #9
0
 def compute_score(self, X):
     """
     predict logits ...
     """
     if config.TF_NN_SETTRACE:
         ipdb.set_trace()
     logits = self.output_layer.get_logits_for(L.get_output(
         self.layers[-2]))
     #logits = self.output
     Y_p = self._predict(logits, X)
     return Y_p
 def dist_info_sym(self, obs_var, state_info_vars):
     n_batches = tf.shape(obs_var)[0]
     n_steps = tf.shape(obs_var)[1]
     obs_var = tf.reshape(obs_var, tf.pack([n_batches, n_steps, -1]))
     if self.state_include_action:
         prev_action_var = state_info_vars["prev_action"]
         all_input_var = tf.concat(2, [obs_var, prev_action_var])
     else:
         all_input_var = obs_var
     if self.feature_network is None:
         means, log_stds = L.get_output(
             [self.mean_network.output_layer, self.l_log_std],
             {self.l_input: all_input_var})
     else:
         flat_input_var = tf.reshape(all_input_var, (-1, self.input_dim))
         means, log_stds = L.get_output(
             [self.mean_network.output_layer, self.l_log_std], {
                 self.l_input: all_input_var,
                 self.feature_network.input_layer: flat_input_var
             })
     return dict(mean=means, log_std=log_stds)
Beispiel #11
0
    def complexity_loss(self, reg, cmx):
        """
        Compute penalties for model complexity (e.g., l2 regularization, or kl penalties for vae and bnn).
        """
        # loss coming from weight regularization
        loss = reg * tf.reduce_sum(
            tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))

        # loss coming from data-dependent regularization
        for layer in self.layers:
            if layer.penalize_complexity:
                z_mu, z_sig = layer.get_dparams_for(
                    L.get_output(layer.input_layer))
                d_loss = layer.bayesreg.activ_kl(z_mu, z_sig)

                loss += cmx * d_loss

        return reg * loss
    def __init__(
        self,
        name,
        env_spec,
        hidden_dim=32,
        feature_network=None,
        state_include_action=True,
        hidden_nonlinearity=tf.tanh,
        gru_layer_cls=L.GRULayer,
        learn_std=True,
        init_std=1.0,
        output_nonlinearity=None,
    ):
        """
        :param env_spec: A spec for the env.
        :param hidden_dim: dimension of hidden layer
        :param hidden_nonlinearity: nonlinearity used for each hidden layer
        :return:
        """
        with tf.variable_scope(name):
            Serializable.quick_init(self, locals())
            super(GaussianGRUPolicy, self).__init__(env_spec)

            obs_dim = env_spec.observation_space.flat_dim
            action_dim = env_spec.action_space.flat_dim

            if state_include_action:
                input_dim = obs_dim + action_dim
            else:
                input_dim = obs_dim

            l_input = L.InputLayer(shape=(None, None, input_dim), name="input")

            if feature_network is None:
                feature_dim = input_dim
                l_flat_feature = None
                l_feature = l_input
            else:
                feature_dim = feature_network.output_layer.output_shape[-1]
                l_flat_feature = feature_network.output_layer
                l_feature = L.OpLayer(
                    l_flat_feature,
                    extras=[l_input],
                    name="reshape_feature",
                    op=lambda flat_feature, input: tf.reshape(
                        flat_feature,
                        tf.pack([
                            tf.shape(input)[0],
                            tf.shape(input)[1], feature_dim
                        ])),
                    shape_op=lambda _, input_shape:
                    (input_shape[0], input_shape[1], feature_dim))

            mean_network = GRUNetwork(input_shape=(feature_dim, ),
                                      input_layer=l_feature,
                                      output_dim=action_dim,
                                      hidden_dim=hidden_dim,
                                      hidden_nonlinearity=hidden_nonlinearity,
                                      output_nonlinearity=output_nonlinearity,
                                      gru_layer_cls=gru_layer_cls,
                                      name="mean_network")

            l_log_std = L.ParamLayer(
                mean_network.input_layer,
                num_units=action_dim,
                param=tf.constant_initializer(np.log(init_std)),
                name="output_log_std",
                trainable=learn_std,
            )

            l_step_log_std = L.ParamLayer(
                mean_network.step_input_layer,
                num_units=action_dim,
                param=l_log_std.param,
                name="step_output_log_std",
                trainable=learn_std,
            )

            self.mean_network = mean_network
            self.feature_network = feature_network
            self.l_input = l_input
            self.state_include_action = state_include_action

            flat_input_var = tf.placeholder(dtype=tf.float32,
                                            shape=(None, input_dim),
                                            name="flat_input")
            if feature_network is None:
                feature_var = flat_input_var
            else:
                feature_var = L.get_output(
                    l_flat_feature,
                    {feature_network.input_layer: flat_input_var})

            self.f_step_mean_std = tensor_utils.compile_function(
                [
                    flat_input_var,
                    #mean_network.step_prev_hidden_layer.input_var,
                    mean_network.step_prev_state_layer.input_var
                ],
                L.get_output([
                    mean_network.step_output_layer,
                    l_step_log_std,
                    mean_network.step_hidden_layer,
                ], {mean_network.step_input_layer: feature_var}))

            self.l_log_std = l_log_std

            self.input_dim = input_dim
            self.action_dim = action_dim
            self.hidden_dim = hidden_dim

            self.prev_actions = None
            self.prev_hiddens = None
            self.dist = RecurrentDiagonalGaussian(action_dim)

            out_layers = [mean_network.output_layer, l_log_std, l_step_log_std]
            if feature_network is not None:
                out_layers.append(feature_network.output_layer)

            LayersPowered.__init__(self, out_layers)
 def dist_info_sym(self, x_var):
     normalized_xs_var = (x_var - self.x_mean_var) / self.x_std_var
     prob = L.get_output(self.l_prob,
                         {self.prob_network.input_layer: normalized_xs_var})
     return dict(prob=prob)
 def log_likelihood_sym(self, x_var, y_var):
     normalized_xs_var = (x_var - self.x_mean_var) / self.x_std_var
     prob = L.get_output(self.l_prob,
                         {self.prob_network.input_layer: normalized_xs_var})
     return self._dist.log_likelihood_sym(y_var, dict(prob=prob))
    def __init__(
        self,
        name,
        input_shape,
        output_dim,
        prob_network=None,
        hidden_sizes=(32, 32),
        hidden_nonlinearity=tf.nn.tanh,
        optimizer=None,
        tr_optimizer=None,
        use_trust_region=True,
        step_size=0.01,
        normalize_inputs=True,
        no_initial_trust_region=True,
    ):
        """
        :param input_shape: Shape of the input data.
        :param output_dim: Dimension of output.
        :param hidden_sizes: Number of hidden units of each layer of the mean network.
        :param hidden_nonlinearity: Non-linearity used for each layer of the mean network.
        :param optimizer: Optimizer for minimizing the negative log-likelihood.
        :param use_trust_region: Whether to use trust region constraint.
        :param step_size: KL divergence constraint for each iteration
        """
        Serializable.quick_init(self, locals())

        with tf.variable_scope(name):
            if optimizer is None:
                optimizer = LbfgsOptimizer(name="optimizer")
            if tr_optimizer is None:
                tr_optimizer = ConjugateGradientOptimizer()

            self.output_dim = output_dim
            self.optimizer = optimizer
            self.tr_optimizer = tr_optimizer

            if prob_network is None:
                prob_network = MLP(input_shape=input_shape,
                                   output_dim=output_dim,
                                   hidden_sizes=hidden_sizes,
                                   hidden_nonlinearity=hidden_nonlinearity,
                                   output_nonlinearity=tf.nn.softmax,
                                   name="prob_network")

            l_prob = prob_network.output_layer

            LayersPowered.__init__(self, [l_prob])

            xs_var = prob_network.input_layer.input_var
            ys_var = tf.placeholder(dtype=tf.float32,
                                    shape=[None, output_dim],
                                    name="ys")
            old_prob_var = tf.placeholder(dtype=tf.float32,
                                          shape=[None, output_dim],
                                          name="old_prob")

            x_mean_var = tf.get_variable(name="x_mean",
                                         shape=(1, ) + input_shape,
                                         initializer=tf.constant_initializer(
                                             0., dtype=tf.float32))
            x_std_var = tf.get_variable(name="x_std",
                                        shape=(1, ) + input_shape,
                                        initializer=tf.constant_initializer(
                                            1., dtype=tf.float32))

            normalized_xs_var = (xs_var - x_mean_var) / x_std_var

            prob_var = L.get_output(
                l_prob, {prob_network.input_layer: normalized_xs_var})

            old_info_vars = dict(prob=old_prob_var)
            info_vars = dict(prob=prob_var)

            dist = self._dist = Categorical(output_dim)

            mean_kl = tf.reduce_mean(dist.kl_sym(old_info_vars, info_vars))

            loss = -tf.reduce_mean(dist.log_likelihood_sym(ys_var, info_vars))

            predicted = tensor_utils.to_onehot_sym(
                tf.argmax(prob_var, dimension=1), output_dim)

            self.prob_network = prob_network
            self.f_predict = tensor_utils.compile_function([xs_var], predicted)
            self.f_prob = tensor_utils.compile_function([xs_var], prob_var)
            self.l_prob = l_prob

            self.optimizer.update_opt(loss=loss,
                                      target=self,
                                      network_outputs=[prob_var],
                                      inputs=[xs_var, ys_var])
            self.tr_optimizer.update_opt(loss=loss,
                                         target=self,
                                         network_outputs=[prob_var],
                                         inputs=[xs_var, ys_var, old_prob_var],
                                         leq_constraint=(mean_kl, step_size))

            self.use_trust_region = use_trust_region
            self.name = name

            self.normalize_inputs = normalize_inputs
            self.x_mean_var = x_mean_var
            self.x_std_var = x_std_var
            self.first_optimized = not no_initial_trust_region
Beispiel #16
0
    def __init__(self,
                 name,
                 input_shape,
                 output_dim,
                 mean_network=None,
                 hidden_sizes=(32, 32),
                 hidden_nonlinearity=tf.nn.tanh,
                 optimizer=None,
                 use_trust_region=True,
                 step_size=0.01,
                 learn_std=True,
                 init_std=1.0,
                 adaptive_std=False,
                 std_share_network=False,
                 std_hidden_sizes=(32, 32),
                 std_nonlinearity=None,
                 normalize_inputs=True,
                 normalize_outputs=True,
                 subsample_factor=1.0):
        """
        :param input_shape: Shape of the input data.
        :param output_dim: Dimension of output.
        :param hidden_sizes: Number of hidden units of each layer of the mean network.
        :param hidden_nonlinearity: Non-linearity used for each layer of the mean network.
        :param optimizer: Optimizer for minimizing the negative log-likelihood.
        :param use_trust_region: Whether to use trust region constraint.
        :param step_size: KL divergence constraint for each iteration
        :param learn_std: Whether to learn the standard deviations. Only effective if adaptive_std is False. If
        adaptive_std is True, this parameter is ignored, and the weights for the std network are always learned.
        :param adaptive_std: Whether to make the std a function of the states.
        :param std_share_network: Whether to use the same network as the mean.
        :param std_hidden_sizes: Number of hidden units of each layer of the std network. Only used if
        `std_share_network` is False. It defaults to the same architecture as the mean.
        :param std_nonlinearity: Non-linearity used for each layer of the std network. Only used if `std_share_network`
        is False. It defaults to the same non-linearity as the mean.
        """
        if config.TF_NN_SETTRACE:
            ipdb.set_trace()
        Serializable.quick_init(self, locals())

        with tf.variable_scope(name):

            if optimizer is None:
                if use_trust_region:
                    optimizer = PenaltyLbfgsOptimizer("optimizer")
                else:
                    optimizer = LbfgsOptimizer("optimizer")

            self._optimizer = optimizer
            self._subsample_factor = subsample_factor

            if mean_network is None:
                mean_network = MLP(
                    name="mean_network",
                    input_shape=input_shape,
                    output_dim=output_dim,
                    hidden_sizes=hidden_sizes,
                    hidden_nonlinearity=hidden_nonlinearity,
                    output_nonlinearity=None,
                )

            l_mean = mean_network.output_layer

            if adaptive_std:
                l_log_std = MLP(
                    name="log_std_network",
                    input_shape=input_shape,
                    input_var=mean_network.input_layer.input_var,
                    output_dim=output_dim,
                    hidden_sizes=std_hidden_sizes,
                    hidden_nonlinearity=std_nonlinearity,
                    output_nonlinearity=None,
                ).output_layer
            else:
                l_log_std = L.ParamLayer(
                    mean_network.input_layer,
                    num_units=output_dim,
                    param=tf.constant_initializer(np.log(init_std)),
                    name="output_log_std",
                    trainable=learn_std,
                )

            LayersPowered.__init__(self, [l_mean, l_log_std])

            xs_var = mean_network.input_layer.input_var
            ys_var = tf.placeholder(dtype=tf.float32,
                                    name="ys",
                                    shape=(None, output_dim))
            old_means_var = tf.placeholder(dtype=tf.float32,
                                           name="ys",
                                           shape=(None, output_dim))
            old_log_stds_var = tf.placeholder(dtype=tf.float32,
                                              name="old_log_stds",
                                              shape=(None, output_dim))

            x_mean_var = tf.Variable(
                np.zeros((1, ) + input_shape, dtype=np.float32),
                name="x_mean",
            )
            x_std_var = tf.Variable(
                np.ones((1, ) + input_shape, dtype=np.float32),
                name="x_std",
            )
            y_mean_var = tf.Variable(
                np.zeros((1, output_dim), dtype=np.float32),
                name="y_mean",
            )
            y_std_var = tf.Variable(
                np.ones((1, output_dim), dtype=np.float32),
                name="y_std",
            )

            normalized_xs_var = (xs_var - x_mean_var) / x_std_var
            normalized_ys_var = (ys_var - y_mean_var) / y_std_var

            normalized_means_var = L.get_output(
                l_mean, {mean_network.input_layer: normalized_xs_var})
            normalized_log_stds_var = L.get_output(
                l_log_std, {mean_network.input_layer: normalized_xs_var})

            means_var = normalized_means_var * y_std_var + y_mean_var
            log_stds_var = normalized_log_stds_var + tf.log(y_std_var)

            normalized_old_means_var = (old_means_var - y_mean_var) / y_std_var
            normalized_old_log_stds_var = old_log_stds_var - tf.log(y_std_var)

            dist = self._dist = DiagonalGaussian(output_dim)

            normalized_dist_info_vars = dict(mean=normalized_means_var,
                                             log_std=normalized_log_stds_var)

            mean_kl = tf.reduce_mean(
                dist.kl_sym(
                    dict(mean=normalized_old_means_var,
                         log_std=normalized_old_log_stds_var),
                    normalized_dist_info_vars,
                ))

            loss = - \
                tf.reduce_mean(dist.log_likelihood_sym(
                    normalized_ys_var, normalized_dist_info_vars))

            self._f_predict = tensor_utils.compile_function([xs_var],
                                                            means_var)
            self._f_pdists = tensor_utils.compile_function(
                [xs_var], [means_var, log_stds_var])
            self._l_mean = l_mean
            self._l_log_std = l_log_std

            optimizer_args = dict(
                loss=loss,
                target=self,
                network_outputs=[
                    normalized_means_var, normalized_log_stds_var
                ],
            )

            if use_trust_region:
                optimizer_args["leq_constraint"] = (mean_kl, step_size)
                optimizer_args["inputs"] = [
                    xs_var, ys_var, old_means_var, old_log_stds_var
                ]
            else:
                optimizer_args["inputs"] = [xs_var, ys_var]

            self._optimizer.update_opt(**optimizer_args)

            self._use_trust_region = use_trust_region
            self._name = name

            self._normalize_inputs = normalize_inputs
            self._normalize_outputs = normalize_outputs
            self._mean_network = mean_network
            self._x_mean_var = x_mean_var
            self._x_std_var = x_std_var
            self._y_mean_var = y_mean_var
            self._y_std_var = y_std_var
    def __init__(
            self,
            input_shape,
            output_dim,
            name,
            hidden_sizes=(32, 32),
            hidden_nonlinearity=tf.nn.relu,
            optimizer=None,
            tr_optimizer=None,
            use_trust_region=True,
            step_size=0.01,
            normalize_inputs=True,
            no_initial_trust_region=True,
    ):
        """
        :param input_shape: Shape of the input data.
        :param output_dim: Dimension of output.
        :param hidden_sizes: Number of hidden units of each layer of the mean network.
        :param hidden_nonlinearity: Non-linearity used for each layer of the mean network.
        :param optimizer: Optimizer for minimizing the negative log-likelihood.
        :param use_trust_region: Whether to use trust region constraint.
        :param step_size: KL divergence constraint for each iteration
        """
        if config.TF_NN_SETTRACE:
            ipdb.set_trace()
        Serializable.quick_init(self, locals())

        with tf.variable_scope(name):

            if optimizer is None:
                optimizer = LbfgsOptimizer(name="optimizer")
            if tr_optimizer is None:
                tr_optimizer = ConjugateGradientOptimizer()

            self.output_dim = output_dim
            self.optimizer = optimizer
            self.tr_optimizer = tr_optimizer

            p_network = MLP(
                input_shape=input_shape,
                output_dim=output_dim,
                hidden_sizes=hidden_sizes,
                hidden_nonlinearity=hidden_nonlinearity,
                output_nonlinearity=tf.nn.sigmoid,
                name="p_network"
            )

            l_p = p_network.output_layer

            LayersPowered.__init__(self, [l_p])

            xs_var = p_network.input_layer.input_var
            ys_var = tf.placeholder(
                dtype=tf.float32, shape=(None, output_dim), name="ys")
            old_p_var = tf.placeholder(
                dtype=tf.float32, shape=(None, output_dim), name="old_p")

            x_mean_var = tf.get_variable(
                name="x_mean", initializer=tf.zeros_initializer, shape=(1,) + input_shape)
            x_std_var = tf.get_variable(
                name="x_std", initializer=tf.ones_initializer, shape=(1,) + input_shape)

            normalized_xs_var = (xs_var - x_mean_var) / x_std_var

            p_var = L.get_output(
                l_p, {p_network.input_layer: normalized_xs_var})

            old_info_vars = dict(p=old_p_var)
            info_vars = dict(p=p_var)

            dist = self._dist = Bernoulli(output_dim)

            mean_kl = tf.reduce_mean(dist.kl_sym(old_info_vars, info_vars))

            loss = - tf.reduce_mean(dist.log_likelihood_sym(ys_var, info_vars))

            predicted = p_var >= 0.5

            self.f_predict = tensor_utils.compile_function([xs_var], predicted)
            self.f_p = tensor_utils.compile_function([xs_var], p_var)
            self.l_p = l_p

            self.optimizer.update_opt(loss=loss, target=self, network_outputs=[
                                      p_var], inputs=[xs_var, ys_var])
            self.tr_optimizer.update_opt(loss=loss, target=self, network_outputs=[p_var],
                                         inputs=[xs_var, ys_var, old_p_var],
                                         leq_constraint=(mean_kl, step_size)
                                         )

            self.use_trust_region = use_trust_region
            self.name = name

            self.normalize_inputs = normalize_inputs
            self.x_mean_var = x_mean_var
            self.x_std_var = x_std_var
            self.first_optimized = not no_initial_trust_region
    def __init__(
            self,
            name,
            input_shape,
            output_dim,
            network=None,
            hidden_sizes=(32, 32),
            hidden_nonlinearity=tf.nn.tanh,
            output_nonlinearity=None,
            optimizer=None,
            normalize_inputs=True,
    ):
        """
        :param input_shape: Shape of the input data.
        :param output_dim: Dimension of output.
        :param hidden_sizes: Number of hidden units of each layer of the mean network.
        :param hidden_nonlinearity: Non-linearity used for each layer of the mean network.
        :param optimizer: Optimizer for minimizing the negative log-likelihood.
        """
        if config.TF_NN_SETTRACE:
            ipdb.set_trace()
        Serializable.quick_init(self, locals())

        with tf.variable_scope(name):

            if optimizer is None:
                optimizer = LbfgsOptimizer(name="optimizer")

            self.output_dim = output_dim
            self.optimizer = optimizer

            if network is None:
                network = MLP(
                    input_shape=input_shape,
                    output_dim=output_dim,
                    hidden_sizes=hidden_sizes,
                    hidden_nonlinearity=hidden_nonlinearity,
                    output_nonlinearity=output_nonlinearity,
                    name="network"
                )

            l_out = network.output_layer

            LayersPowered.__init__(self, [l_out])

            xs_var = network.input_layer.input_var
            ys_var = tf.placeholder(dtype=tf.float32, shape=[
                                    None, output_dim], name="ys")

            x_mean_var = tf.get_variable(
                name="x_mean",
                shape=(1,) + input_shape,
                initializer=tf.constant_initializer(0., dtype=tf.float32)
            )
            x_std_var = tf.get_variable(
                name="x_std",
                shape=(1,) + input_shape,
                initializer=tf.constant_initializer(1., dtype=tf.float32)
            )

            normalized_xs_var = (xs_var - x_mean_var) / x_std_var

            fit_ys_var = L.get_output(
                l_out, {network.input_layer: normalized_xs_var})

            loss = - tf.reduce_mean(tf.square(fit_ys_var - ys_var))

            self.f_predict = tensor_utils.compile_function(
                [xs_var], fit_ys_var)

            optimizer_args = dict(
                loss=loss,
                target=self,
                network_outputs=[fit_ys_var],
            )

            optimizer_args["inputs"] = [xs_var, ys_var]

            self.optimizer.update_opt(**optimizer_args)

            self.name = name
            self.l_out = l_out

            self.normalize_inputs = normalize_inputs
            self.x_mean_var = x_mean_var
            self.x_std_var = x_std_var
 def predict_sym(self, xs):
     if config.TF_NN_SETTRACE:
         ipdb.set_trace()
     return L.get_output(self.l_out, xs)
Beispiel #20
0
 def predict_sym(self, xs):
     return L.get_output(self.l_out, xs)
Beispiel #21
0
    def __init__(
        self,
        name,
        input_shape,
        output_dim,
        hidden_sizes,
        hidden_nonlinearity,
        output_nonlinearity,
        z_dim,
        z_idx,
        z_hidden_sizes,
        merge="mul",
        hidden_W_init=L.XavierUniformInitializer(),
        hidden_b_init=tf.zeros_initializer,
        output_W_init=L.XavierUniformInitializer(),
        output_b_init=tf.zeros_initializer,
        batch_size=None,
        input_var=None,
        input_layer=None,
        batch_normalization=False,
        weight_normalization=False,
    ):

        Serializable.quick_init(self, locals())
        self.name = name

        total_dim = np.prod(input_shape)

        with tf.variable_scope(name):
            if input_layer is None:
                l_in = L.InputLayer(shape=(batch_size, ) + input_shape,
                                    input_var=input_var,
                                    name="input")
            else:
                l_in = input_layer
            self._layers = [l_in]

            # slice off features / observation
            l_feat = L.SliceLayer(l_in,
                                  indices=slice(0, total_dim - z_dim),
                                  name="l_feat")

            # slice off z "style" variable
            l_z = L.SliceLayer(l_in,
                               indices=slice(total_dim - z_dim, total_dim),
                               name="l_z")

            l_pre = feedforward(l_feat,
                                hidden_sizes[:z_idx],
                                hidden_nonlinearity,
                                linear_output=True)
            with tf.variable_scope("z"):
                # if merging mul, ensure dimensionalities match.
                if merge == "mul":
                    _head = [total_dim] + hidden_sizes
                    _head = [_head[z_idx]]
                elif merge == "concat":
                    _head = []
                l_z = feedforward(l_z,
                                  z_hidden_sizes + _head,
                                  hidden_nonlinearity,
                                  linear_output=True)

            # merge latent code with features
            if merge == "mul":
                l_merge = L.ElemwiseMulLayer([l_pre, l_z])
            elif merge == "concat":
                l_merge = L.ConcatLayer([l_pre, l_z], axis=1)
            else:
                raise NotImplementedError

            if z_idx > 0:
                l_merge = L.NonlinearityLayer(l_merge, hidden_nonlinearity)
            l_hid = feedforward(l_merge,
                                hidden_sizes[z_idx:],
                                hidden_nonlinearity,
                                start_idx=z_idx)

            l_out = L.DenseLayer(l_hid,
                                 num_units=output_dim,
                                 nonlinearity=output_nonlinearity,
                                 name="output",
                                 W=output_W_init,
                                 b=output_b_init,
                                 weight_normalization=weight_normalization)
            #if batch_normalization:
            #    ls = L.batch_norm(l_out)
            #    l_out = ls[-1]
            #    self._layers += ls
            self._layers.append(l_out)
            self._l_in = l_in
            self._l_out = l_out
            self._l_tar = L.InputLayer(shape=(batch_size, ) + (output_dim, ),
                                       input_var=input_var,
                                       name="target")

            # self._input_var = l_in.input_var
            self._output = L.get_output(l_out)

            LayersPowered.__init__(self, l_out)
Beispiel #22
0
    def __init__(self,
                 name,
                 input_shape,
                 output_dim,
                 z_dim,
                 pre_hidden_sizes,
                 post_hidden_sizes,
                 hidden_nonlinearity,
                 output_nonlinearity,
                 hidden_W_init=L.XavierUniformInitializer(),
                 hidden_b_init=tf.zeros_initializer,
                 output_W_init=L.XavierUniformInitializer(),
                 output_b_init=tf.zeros_initializer,
                 batch_size=None,
                 input_var=None,
                 input_layer=None,
                 weight_normalization=False):

        Serializable.quick_init(self, locals())
        self.name = name

        with tf.variable_scope(name):
            if input_layer is None:
                l_in = L.InputLayer(shape=(batch_size, ) + input_shape,
                                    input_var=input_var,
                                    name="input")
            else:
                l_in = input_layer
            self._layers = [l_in]

            # construct graph
            l_hid = feedforward(l_in,
                                pre_hidden_sizes,
                                hidden_nonlinearity,
                                hidden_W_init=hidden_W_init,
                                hidden_b_init=hidden_b_init,
                                weight_normalization=weight_normalization,
                                start_idx=0)
            l_lat = L.LatentLayer(l_hid, z_dim)
            l_hid = feedforward(l_lat,
                                post_hidden_sizes,
                                hidden_nonlinearity,
                                hidden_W_init=hidden_W_init,
                                hidden_b_init=hidden_b_init,
                                weight_normalization=weight_normalization,
                                start_idx=len(pre_hidden_sizes))

            # create output layer
            l_out = L.DenseLayer(l_hid,
                                 num_units=output_dim,
                                 nonlinearity=output_nonlinearity,
                                 name="output",
                                 W=output_W_init,
                                 b=output_b_init,
                                 weight_normalization=weight_normalization)

            self._layers.append(l_out)
            self._l_lat = l_lat
            self._z_dim = z_dim
            self._l_in = l_in
            self._l_out = l_out
            self._l_tar = L.InputLayer(shape=(batch_size, ) + (output_dim, ),
                                       input_var=input_var,
                                       name="target")

            # complexity loss for variational posterior
            z_mu, z_sig = self._l_lat.get_dparams_for(
                L.get_output(self._l_lat.input_layer))
            self.kl_cost = kl_from_prior(z_mu, z_sig, self._z_dim)

            # self._input_var = l_in.input_var
            self._output = L.get_output(l_out)

            LayersPowered.__init__(self, l_out)