Ejemplo n.º 1
0
    def __init__(self, name, input_shape, output_dim, hidden_dim, hidden_nonlinearity=tf.nn.relu,
                 lstm_layer_cls=L.LSTMLayer,
                 output_nonlinearity=None, input_var=None, input_layer=None, forget_bias=1.0, use_peepholes=False,
                 layer_args=None):
        with tf.variable_scope(name):
            if input_layer is None:
                l_in = L.InputLayer(shape=(None, None) + input_shape, input_var=input_var, name="input")
            else:
                l_in = input_layer
            l_step_input = L.InputLayer(shape=(None,) + input_shape, name="step_input")
            # contains previous hidden and cell state
            l_step_prev_state = L.InputLayer(shape=(None, hidden_dim * 2), name="step_prev_state")
            if layer_args is None:
                layer_args = dict()
            l_lstm = lstm_layer_cls(l_in, num_units=hidden_dim, hidden_nonlinearity=hidden_nonlinearity,
                                    hidden_init_trainable=False, name="lstm", forget_bias=forget_bias,
                                    cell_init_trainable=False, use_peepholes=use_peepholes, **layer_args)
            l_lstm_flat = L.ReshapeLayer(
                l_lstm, shape=(-1, hidden_dim),
                name="lstm_flat"
            )
            l_output_flat = L.DenseLayer(
                l_lstm_flat,
                num_units=output_dim,
                nonlinearity=output_nonlinearity,
                name="output_flat"
            )
            l_output = L.OpLayer(
                l_output_flat,
                op=lambda flat_output, l_input:
                tf.reshape(flat_output, tf.stack((tf.shape(l_input)[0], tf.shape(l_input)[1], -1))),
                shape_op=lambda flat_output_shape, l_input_shape:
                (l_input_shape[0], l_input_shape[1], flat_output_shape[-1]),
                extras=[l_in],
                name="output"
            )
            l_step_state = l_lstm.get_step_layer(l_step_input, l_step_prev_state, name="step_state")
            l_step_hidden = L.SliceLayer(l_step_state, indices=slice(hidden_dim), name="step_hidden")
            l_step_cell = L.SliceLayer(l_step_state, indices=slice(hidden_dim, None), name="step_cell")
            l_step_output = L.DenseLayer(
                l_step_hidden,
                num_units=output_dim,
                nonlinearity=output_nonlinearity,
                W=l_output_flat.W,
                b=l_output_flat.b,
                name="step_output"
            )

            self._l_in = l_in
            self._hid_init_param = l_lstm.h0
            self._cell_init_param = l_lstm.c0
            self._l_lstm = l_lstm
            self._l_out = l_output
            self._l_step_input = l_step_input
            self._l_step_prev_state = l_step_prev_state
            self._l_step_hidden = l_step_hidden
            self._l_step_cell = l_step_cell
            self._l_step_state = l_step_state
            self._l_step_output = l_step_output
            self._hidden_dim = hidden_dim
Ejemplo n.º 2
0
    def __init__(self,
                 env_spec,
                 hidden_sizes=(32, 32),
                 hidden_nonlinearity=tf.nn.relu,
                 action_merge_layer=-2,
                 output_nonlinearity=None,
                 bn=False,
                 dropout=.05):
        Serializable.quick_init(self, locals())

        l_obs = L.InputLayer(shape=(None, env_spec.observation_space.flat_dim),
                             name="obs")
        l_action = L.InputLayer(shape=(None, env_spec.action_space.flat_dim),
                                name="actions")

        n_layers = len(hidden_sizes) + 1

        if n_layers > 1:
            action_merge_layer = \
                (action_merge_layer % n_layers + n_layers) % n_layers
        else:
            action_merge_layer = 1

        l_hidden = l_obs

        for idx, size in enumerate(hidden_sizes):
            if bn:
                l_hidden = batch_norm(l_hidden)

            if idx == action_merge_layer:
                l_hidden = L.ConcatLayer([l_hidden, l_action])

            l_hidden = L.DenseLayer(l_hidden,
                                    num_units=size,
                                    nonlinearity=hidden_nonlinearity,
                                    name="h%d" % (idx + 1))
            l_hidden = L.DropoutLayer(l_hidden, dropout)

        if action_merge_layer == n_layers:
            l_hidden = L.ConcatLayer([l_hidden, l_action])

        l_output = L.DenseLayer(l_hidden,
                                num_units=1,
                                nonlinearity=output_nonlinearity,
                                name="output")

        output_var = L.get_output(l_output, deterministic=True)
        output_var_drop = L.get_output(l_output, deterministic=False)

        self._f_qval = tensor_utils.compile_function(
            [l_obs.input_var, l_action.input_var], output_var)
        self._f_qval_drop = tensor_utils.compile_function(
            [l_obs.input_var, l_action.input_var], output_var_drop)
        self._output_layer = l_output
        self._obs_layer = l_obs
        self._action_layer = l_action
        self._output_nonlinearity = output_nonlinearity

        LayersPowered.__init__(self, [l_output])
Ejemplo n.º 3
0
    def __init__(
        self,
        name,
        output_dim,
        hidden_sizes,
        hidden_nonlinearity,
        output_nonlinearity,
        hidden_W_init=L.XavierUniformInitializer(),
        hidden_b_init=tf.zeros_initializer(),
        output_W_init=L.XavierUniformInitializer(),
        output_b_init=tf.zeros_initializer(),
        input_var=None,
        input_layer=None,
        input_shape=None,
        batch_normalization=False,
        weight_normalization=False,
    ):

        Serializable.quick_init(self, locals())

        with tf.variable_scope(name):
            if input_layer is None:
                l_in = L.InputLayer(shape=(None, ) + input_shape,
                                    input_var=input_var,
                                    name="input")
            else:
                l_in = input_layer
            self._layers = [l_in]
            l_hid = l_in
            if batch_normalization:
                l_hid = L.batch_norm(l_hid)
            for idx, hidden_size in enumerate(hidden_sizes):
                l_hid = L.DenseLayer(l_hid,
                                     num_units=hidden_size,
                                     nonlinearity=hidden_nonlinearity,
                                     name="hidden_%d" % idx,
                                     W=hidden_W_init,
                                     b=hidden_b_init,
                                     weight_normalization=weight_normalization)
                if batch_normalization:
                    l_hid = L.batch_norm(l_hid)
                self._layers.append(l_hid)
            l_out = L.DenseLayer(l_hid,
                                 num_units=output_dim,
                                 nonlinearity=output_nonlinearity,
                                 name="output",
                                 W=output_W_init,
                                 b=output_b_init,
                                 weight_normalization=weight_normalization)
            if batch_normalization:
                l_out = L.batch_norm(l_out)
            self._layers.append(l_out)
            self._l_in = l_in
            self._l_out = l_out
            # self._input_var = l_in.input_var
            self._output = L.get_output(l_out)

            LayersPowered.__init__(self, l_out)
Ejemplo n.º 4
0
    def __init__(self, name, input_shape, output_dim, hidden_dim, hidden_nonlinearity=tf.nn.relu,
                 gru_layer_cls=L.GRULayer,
                 output_nonlinearity=None, input_var=None, input_layer=None, layer_args=None):
        with tf.variable_scope(name):
            if input_layer is None:
                l_in = L.InputLayer(shape=(None, None) + input_shape, input_var=input_var, name="input")
            else:
                l_in = input_layer
            l_step_input = L.InputLayer(shape=(None,) + input_shape, name="step_input")
            l_step_prev_state = L.InputLayer(shape=(None, hidden_dim), name="step_prev_state")
            if layer_args is None:
                layer_args = dict()
            l_gru = gru_layer_cls(l_in, num_units=hidden_dim, hidden_nonlinearity=hidden_nonlinearity,
                                  hidden_init_trainable=False, name="gru", **layer_args)
            l_gru_flat = L.ReshapeLayer(
                l_gru, shape=(-1, hidden_dim),
                name="gru_flat"
            )
            l_output_flat = L.DenseLayer(
                l_gru_flat,
                num_units=output_dim,
                nonlinearity=output_nonlinearity,
                name="output_flat"
            )
            l_output = L.OpLayer(
                l_output_flat,
                op=lambda flat_output, l_input:
                tf.reshape(flat_output, tf.stack((tf.shape(l_input)[0], tf.shape(l_input)[1], -1))),
                shape_op=lambda flat_output_shape, l_input_shape:
                (l_input_shape[0], l_input_shape[1], flat_output_shape[-1]),
                extras=[l_in],
                name="output"
            )
            l_step_state = l_gru.get_step_layer(l_step_input, l_step_prev_state, name="step_state")
            l_step_hidden = l_step_state
            l_step_output = L.DenseLayer(
                l_step_hidden,
                num_units=output_dim,
                nonlinearity=output_nonlinearity,
                W=l_output_flat.W,
                b=l_output_flat.b,
                name="step_output"
            )

            self._l_in = l_in
            self._hid_init_param = l_gru.h0
            self._l_gru = l_gru
            self._l_out = l_output
            self._l_step_input = l_step_input
            self._l_step_prev_state = l_step_prev_state
            self._l_step_hidden = l_step_hidden
            self._l_step_state = l_step_state
            self._l_step_output = l_step_output
            self._hidden_dim = hidden_dim
Ejemplo n.º 5
0
 def create_MLP(
     self,
     name,
     output_dim,
     hidden_sizes,
     hidden_nonlinearity,
     output_nonlinearity,
     hidden_W_init=L.XavierUniformInitializer(),
     hidden_b_init=tf.zeros_initializer,
     output_W_init=L.XavierUniformInitializer(),
     output_b_init=tf.zeros_initializer,
     input_var=None,
     input_layer=None,
     input_shape=None,
     batch_normalization=False,
     weight_normalization=False,
 ):
     with tf.variable_scope(name):
         if input_layer is None:
             l_in = L.InputLayer(shape=(None, ) + input_shape,
                                 input_var=input_var,
                                 name="input")
         else:
             l_in = input_layer
         all_layers = [l_in]
         l_hid = l_in
         if batch_normalization:
             l_hid = L.batch_norm(l_hid)
         for idx, hidden_size in enumerate(hidden_sizes):
             l_hid = L.DenseLayer(l_hid,
                                  num_units=hidden_size,
                                  nonlinearity=hidden_nonlinearity,
                                  name="hidden_%d" % idx,
                                  W=hidden_W_init,
                                  b=hidden_b_init,
                                  weight_normalization=weight_normalization)
             if batch_normalization:
                 l_hid = L.batch_norm(l_hid)
             all_layers.append(l_hid)
         l_out = L.DenseLayer(l_hid,
                              num_units=output_dim,
                              nonlinearity=output_nonlinearity,
                              name="output",
                              W=output_W_init,
                              b=output_b_init,
                              weight_normalization=weight_normalization)
         if batch_normalization:
             l_out = L.batch_norm(l_out)
         all_layers.append(l_out)
         output = L.get_output(l_out)
         # returns layers(), input_layer, output_layer, input_var, output
         return all_layers, l_in, l_out, l_in.input_var, output
Ejemplo n.º 6
0
    def __init__(self,
                 env_spec,
                 name='QuadraticPhinet',
                 hidden_sizes=(32, 32),
                 hidden_nonlinearity=tf.nn.relu,
                 output_nonlinearity=None,
                 vs_form=None,
                 bn=False,
                 A=None,
                 init_a=1.0,
                 a_parameterization='exp'):
        Serializable.quick_init(self, locals())

        assert not env_spec.action_space.is_discrete
        self._env_spec = env_spec
        self.vs_form = vs_form
        with tf.variable_scope(name):
            obs_dim = env_spec.observation_space.flat_dim
            action_dim = env_spec.action_space.flat_dim

            l_act = L.InputLayer(shape=(None, action_dim), name="action")
            action_var = l_act.input_var
            l_obs = L.InputLayer(shape=(None, obs_dim), name="obs")

            self.obs_rms = RunningMeanStd(shape=(obs_dim, ))

            obz = L.NormalizeLayer(l_obs, rms=self.obs_rms)
            l_hidden = l_obs
            hidden_sizes += (action_dim, )

            for idx, size in enumerate(hidden_sizes):
                if bn:
                    l_hidden = batch_norm(l_hidden)

                l_hidden = L.DenseLayer(l_hidden,
                                        num_units=size,
                                        nonlinearity=hidden_nonlinearity,
                                        name="h%d" % (idx + 1))

            obs_var = l_obs.input_var
            fs = l_hidden  # fs_network.output_layer

            if A is not None:
                l_A_param = A.output_layer
            else:
                if a_parameterization == 'exp':
                    init_a_param = np.log(init_a) - .5
                elif a_parameterization == 'softplus':
                    init_a_param = np.log(np.exp(init_a) - 1)
                else:
                    raise NotImplementedError

                l_log_A = L.ParamLayer(
                    l_obs,
                    num_units=action_dim,
                    param=tf.constant_initializer(init_a_param),
                    name="diagonal_a_matrix",
                    trainable=True)
            if vs_form is not None:
                raise NotImplementedError

            self._l_log_A = l_log_A
            self.a_parameterization = a_parameterization
            self.fs = fs

            if vs_form is not None:
                self._output_vs = vs
                LayersPowered.__init__(
                    self, [self.fs, self._l_log_A, self._output_vs])
            else:
                LayersPowered.__init__(self, [self.fs, self._l_log_A])

            output_var = self.get_phival_sym(obs_var, action_var)

            self._f_phival = tensor_utils.compile_function(
                inputs=[obs_var, action_var], outputs=output_var)
    def make_network_image(self,
                           dim_input,
                           dim_output,
                           nn_input=None,
                           target=None):
        """
        An example a network in tf that has both state and image inputs.
        Args:
            dim_input: Dimensionality of input. expecting 2d tuple (num_frames x num_batches)
            dim_output: Dimensionality of the output.
            batch_size: Batch size.
            network_config: dictionary of network structure parameters
        Returns:
            A tfMap object that stores inputs, outputs, and scalar loss.
        """
        if dim_input[0] != 1:
            raise Exception(
                "Currently don't support concatenating timesteps for images")

        n_mlp_layers = 2
        layer_size = 128
        dim_hidden = (n_mlp_layers - 1) * [layer_size]
        dim_hidden.append(dim_output)
        pool_size = 2
        filter_size = 3

        num_filters = [5, 5]

        #TODO: don't do this grossness
        if nn_input is None:
            nn_input, _ = self.get_input_layer_image(dim_input[1], dim_output)

        if target is None:
            _, target = self.get_input_layer_image(dim_input[1], dim_output)

        conv_filters = [5, 5]
        conv_filter_sizes = [3, 3]
        conv_pads = ['SAME', 'SAME']
        max_pool_sizes = [2, 2]
        conv_strides = [1, 1]
        hidden_sizes = [100, 100]
        hidden_nonlinearity = tf.nn.relu
        output_nonlinearity = None

        l_in = L.InputLayer(shape=tuple(nn_input.get_shape().as_list()),
                            input_var=nn_input)

        l_hid = L.reshape(l_in, ([0], ) + dim_input[1], name="reshape_input")

        for idx, conv_filter, filter_size, stride, pad, max_pool_size in zip(
                range(len(conv_filters)), conv_filters, conv_filter_sizes,
                conv_strides, conv_pads, max_pool_sizes):
            l_hid = L.Conv2DLayer(
                l_hid,
                num_filters=conv_filter,
                filter_size=filter_size,
                stride=(stride, stride),
                pad=pad,
                nonlinearity=hidden_nonlinearity,
                name="conv_hidden_%d" % idx,
            )
            if max_pool_size is not None:
                l_hid = L.Pool2DLayer(l_hid, max_pool_size, pad="SAME")

        l_hid = L.flatten(l_hid, name="conv_flatten")

        for idx, hidden_size in enumerate(hidden_sizes):
            l_hid = L.DenseLayer(
                l_hid,
                num_units=hidden_size,
                nonlinearity=hidden_nonlinearity,
                name="hidden_%d" % idx,
            )

        fc_output = L.get_output(
            L.DenseLayer(
                l_hid,
                num_units=dim_output,
                nonlinearity=output_nonlinearity,
                name="output",
            ))

        loss, optimizer = self.get_loss_layer(pred=fc_output,
                                              target_output=target)

        self.class_target = target
        self.nn_input = nn_input
        self.discrimination_logits = fc_output
        self.optimizer = optimizer
        self.loss = loss
        label_accuracy = tf.equal(
            tf.round(tf.nn.sigmoid(self.discrimination_logits)),
            tf.round(self.class_target))
        self.label_accuracy = tf.reduce_mean(
            tf.cast(label_accuracy, tf.float32))
        self.mse = tf.reduce_mean(
            tf.nn.l2_loss(
                tf.nn.sigmoid(self.discrimination_logits) - self.class_target))
        ones = tf.ones_like(self.class_target)

        true_positives = tf.round(tf.nn.sigmoid(
            self.discrimination_logits)) * tf.round(self.class_target)
        predicted_positives = tf.round(
            tf.nn.sigmoid(self.discrimination_logits))

        false_negatives = tf.logical_not(
            tf.logical_xor(
                tf.equal(tf.round(tf.nn.sigmoid(self.discrimination_logits)),
                         ones), tf.equal(tf.round(self.class_target), ones)))

        self.label_precision = tf.reduce_sum(
            tf.cast(true_positives, tf.float32)) / tf.reduce_sum(
                tf.cast(predicted_positives, tf.float32))
        self.label_recall = tf.reduce_sum(tf.cast(
            true_positives, tf.float32)) / (
                tf.reduce_sum(tf.cast(true_positives, tf.float32)) +
                tf.reduce_sum(tf.cast(false_negatives, tf.float32)))
Ejemplo n.º 8
0
    def __init__(self, name, input_shape, output_dim,
                 conv_filters, conv_filter_sizes, conv_strides, conv_pads,
                 hidden_sizes, hidden_nonlinearity, output_nonlinearity,
                 hidden_W_init=L.XavierUniformInitializer(), hidden_b_init=tf.zeros_initializer(),
                 output_W_init=L.XavierUniformInitializer(), output_b_init=tf.zeros_initializer(),
                 input_var=None, input_layer=None, batch_normalization=False, weight_normalization=False):
        Serializable.quick_init(self, locals())
        """
        A network composed of several convolution layers followed by some fc layers.
        input_shape: (width,height,channel)
            HOWEVER, network inputs are assumed flattened. This network will first unflatten the inputs and then apply the standard convolutions and so on.
        conv_filters: a list of numbers of convolution kernel
        conv_filter_sizes: a list of sizes (int) of the convolution kernels
        conv_strides: a list of strides (int) of the conv kernels
        conv_pads: a list of pad formats (either 'SAME' or 'VALID')
        hidden_nonlinearity: a nonlinearity from tf.nn, shared by all conv and fc layers
        hidden_sizes: a list of numbers of hidden units for all fc layers
        """
        with tf.variable_scope(name):
            if input_layer is not None:
                l_in = input_layer
                l_hid = l_in
            elif len(input_shape) == 3:
                l_in = L.InputLayer(shape=(None, np.prod(input_shape)), input_var=input_var, name="input")
                l_hid = L.reshape(l_in, ([0],) + input_shape, name="reshape_input")
            elif len(input_shape) == 2:
                l_in = L.InputLayer(shape=(None, np.prod(input_shape)), input_var=input_var, name="input")
                input_shape = (1,) + input_shape
                l_hid = L.reshape(l_in, ([0],) + input_shape, name="reshape_input")
            else:
                l_in = L.InputLayer(shape=(None,) + input_shape, input_var=input_var, name="input")
                l_hid = l_in

            if batch_normalization:
                l_hid = L.batch_norm(l_hid)

            critical_size = hidden_sizes[0]
            
            for idx, conv_filter, filter_size, stride, pad in zip(range(len(conv_filters)),
                    conv_filters,
                    conv_filter_sizes,
                    conv_strides,
                    conv_pads,
            ):
                l_hid = L.Conv2DLayer(
                    l_hid,
                    num_filters=conv_filter,
                    filter_size=filter_size,
                    stride=(stride, stride),
                    pad=pad,
                    nonlinearity=hidden_nonlinearity,
                    name="SL_conv_hidden_%d" % idx,
                    weight_normalization=weight_normalization,
                )
                if batch_normalization:
                    l_hid = L.batch_norm(l_hid)
           
            
            l_hid = L.flatten(l_hid, name="conv_flatten")
            critical_layer = L.DenseLayer(
                    l_hid,
                    num_units=hidden_sizes[0],
                    nonlinearity=None,
                    name="SL_fc",
                    W=hidden_W_init,
                    b=hidden_b_init,
                    weight_normalization=weight_normalization,
                )
            #critical_layer = L.flatten(critical_layer)

            # if output_nonlinearity == L.spatial_expected_softmax:
            #     assert len(hidden_sizes) == 0
            #     assert output_dim == conv_filters[-1] * 2
            #     l_hid.nonlinearity = tf.identity
            #     l_out = L.SpatialExpectedSoftmaxLayer(l_hid)
            
            
            self.actValues = L.get_output(critical_layer)

            #list_rem = hidden_sizes[1:]
            

            #####Forward pass block#################################
            with tf.variable_scope("PG"):

                
                # fcFor = L.DenseLayer(
                #     critical_layer,
                #     num_units = hidden_sizes[1],
                #     nonlinearity=hidden_nonlinearity,
                #     name="pgLayer_init",
                #     W=hidden_W_init,
                #     b=hidden_b_init,
                #     weight_normalization=weight_normalization,
                # )
                
                fc_1 = L.DenseLayer(
                    critical_layer,
                    num_units=hidden_sizes[1],
                    nonlinearity=hidden_nonlinearity,
                    name="pgLayer_1",
                    W=hidden_W_init,
                    b=hidden_b_init,
                    weight_normalization=weight_normalization,
                )


                fc_2 = L.DenseLayer(
                    fc_1,
                    num_units=hidden_sizes[2],
                    nonlinearity=hidden_nonlinearity,
                    name="pgLayer_2" ,
                    W=hidden_W_init,
                    b=hidden_b_init,
                    weight_normalization=weight_normalization,
                )
                if batch_normalization:
                    fc_2 = L.batch_norm(fcFor) 

                fcOut = L.DenseLayer(
                    fc_2,
                    num_units=output_dim,
                    nonlinearity=output_nonlinearity,
                    name="output",
                    W=output_W_init,
                    b=output_b_init,
                    weight_normalization=weight_normalization,
                )
                if batch_normalization:
                    fcOut = L.batch_norm(fcOut)
            ###################################################

            self.actVariable = tf.Variable(initial_value = tf.zeros([ 10000, 32], dtype = tf.float32),name = "act_var1", trainable = True)

           
            bcOut = fcOut.get_output_for(fc_2.get_output_for(fc_1.get_output_for(self.actVariable)))
            self.bcOut = bcOut

            backOutLayer = L.InputLayer(shape = (), input_var= bcOut , name="OutputLayer") 
            # shape is (actVariable[0] , 2)



            self._l_in = l_in
            self.forwardOutLayer = fcOut
            self.backOutLayer = backOutLayer
            
            outLayers = [fcOut, backOutLayer]
            # self._input_var = l_in.input_var

        LayersPowered.__init__(self, outLayers)
Ejemplo n.º 9
0
    def __init__(
            self,
            env_spec,
            name='qnet',
            hidden_sizes=(32, 32),
            hidden_nonlinearity=tf.nn.relu,
            action_merge_layer=-2,
            output_nonlinearity=None,
            hidden_W_init=L.XavierUniformInitializer(),
            hidden_b_init=tf.zeros_initializer,
            output_W_init=L.XavierUniformInitializer(),
            output_b_init=tf.zeros_initializer,
            bn=False):
        Serializable.quick_init(self, locals())

        with tf.variable_scope(name):
            l_obs = L.InputLayer(shape=(None, env_spec.observation_space.flat_dim), name="obs")
            l_action = L.InputLayer(shape=(None, env_spec.action_space.flat_dim), name="actions")

            n_layers = len(hidden_sizes) + 1

            if n_layers > 1:
                action_merge_layer = \
                    (action_merge_layer % n_layers + n_layers) % n_layers
            else:
                action_merge_layer = 1

            l_hidden = l_obs

            for idx, size in enumerate(hidden_sizes):
                if bn:
                    l_hidden = L.batch_norm(l_hidden)

                if idx == action_merge_layer:
                    l_hidden = L.ConcatLayer([l_hidden, l_action])

                l_hidden = L.DenseLayer(
                    l_hidden,
                    num_units=size,
                    W=hidden_W_init,
                    b=hidden_b_init,
                    nonlinearity=hidden_nonlinearity,
                    name="h%d" % (idx + 1)
                )

            if action_merge_layer == n_layers:
                l_hidden = L.ConcatLayer([l_hidden, l_action])

            l_output = L.DenseLayer(
                l_hidden,
                num_units=1,
                W=output_W_init,
                b=output_b_init,
                nonlinearity=output_nonlinearity,
                name="output"
            )

            #output_var = L.get_output(l_output, deterministic=True).flatten()
            output_var = tf.reshape(L.get_output(l_output, deterministic=True),(-1,))

            self._f_qval = tensor_utils.compile_function([l_obs.input_var, l_action.input_var], output_var)
            self._output_layer = l_output
            self._obs_layer = l_obs
            self._action_layer = l_action
            self._output_nonlinearity = output_nonlinearity

            LayersPowered.__init__(self, [l_output])
Ejemplo n.º 10
0
    def __init__(self,
                 env_spec,
                 name='nafqnet',
                 hidden_sizes=(32, 32),
                 hidden_nonlinearity=tf.nn.relu,
                 action_merge_layer=0,
                 output_nonlinearity=None,
                 hidden_W_init=L.XavierUniformInitializer(),
                 hidden_b_init=L.ZerosInitializer(),
                 output_W_init=L.XavierUniformInitializer(),
                 output_b_init=L.ZerosInitializer(),
                 bn=False):
        Serializable.quick_init(self, locals())

        assert not env_spec.action_space.is_discrete

        action_dim = env_spec.action_space.flat_dim
        self._action_dim = action_dim
        self._env_spec = env_spec

        n_layers = len(hidden_sizes)
        action_merge_layer = \
            (action_merge_layer % n_layers + n_layers) % n_layers

        with tf.variable_scope(name):
            l_obs = L.InputLayer(shape=(None,
                                        env_spec.observation_space.flat_dim),
                                 name="obs")
            l_action = L.InputLayer(shape=(None,
                                           env_spec.action_space.flat_dim),
                                    name="actions")
            l_policy_mu = L.InputLayer(shape=(None, action_dim),
                                       name="policy_mu")
            l_policy_sigma = L.InputLayer(shape=(None, action_dim, action_dim),
                                          name="policy_sigma")

            l_hidden = l_obs
            idx = 0

            l_hidden_kwargs = dict(
                W=hidden_W_init,
                b=hidden_b_init,
                nonlinearity=hidden_nonlinearity,
            )

            l_output_kwargs = dict(
                W=output_W_init,
                b=output_b_init,
            )

            while idx < action_merge_layer:
                if bn: l_hidden = L.batch_norm(l_hidden)
                l_hidden = L.DenseLayer(
                    l_hidden,
                    num_units=hidden_sizes[idx],
                    name="h%d" % (idx + 1),
                    **l_hidden_kwargs,
                )
                idx += 1

            _idx = idx
            _l_hidden = l_hidden

            # compute L network
            while idx < n_layers:
                if bn: l_hidden = L.batch_norm(l_hidden)
                l_hidden = L.DenseLayer(
                    l_hidden,
                    num_units=hidden_sizes[idx],
                    name="L_h%d" % (idx + 1),
                    **l_hidden_kwargs,
                )
                idx += 1
            l_L = L.DenseLayer(
                l_hidden,
                num_units=action_dim**2,
                nonlinearity=None,
                name="L_h%d" % (idx + 1),
                **l_output_kwargs,
            )

            # compute V network
            idx = _idx
            l_hidden = _l_hidden
            while idx < n_layers:
                if bn: l_hidden = L.batch_norm(l_hidden)
                l_hidden = L.DenseLayer(
                    l_hidden,
                    num_units=hidden_sizes[idx],
                    name="V_h%d" % (idx + 1),
                    **l_hidden_kwargs,
                )
                idx += 1
            l_V = L.DenseLayer(
                l_hidden,
                num_units=1,
                nonlinearity=None,
                name="V_h%d" % (idx + 1),
                **l_output_kwargs,
            )

            # compute mu network
            idx = _idx
            l_hidden = _l_hidden
            while idx < n_layers:
                if bn: l_hidden = L.batch_norm(l_hidden)
                l_hidden = L.DenseLayer(
                    l_hidden,
                    num_units=hidden_sizes[idx],
                    name="mu_h%d" % (idx + 1),
                    **l_hidden_kwargs,
                )
                idx += 1
            if bn: l_hidden = L.batch_norm(l_hidden)
            l_mu = L.DenseLayer(
                l_hidden,
                num_units=action_dim,
                nonlinearity=tf.nn.tanh,
                name="mu_h%d" % (idx + 1),
                **l_output_kwargs,
            )

            L_var, V_var, mu_var = L.get_output([l_L, l_V, l_mu],
                                                deterministic=True)
            V_var = tf.reshape(V_var, (-1, ))

            # compute advantage
            L_mat_var = self.get_L_sym(L_var)
            P_var = self.get_P_sym(L_mat_var)
            A_var = self.get_A_sym(P_var, mu_var, l_action.input_var)

            # compute Q
            Q_var = A_var + V_var

            # compute expected Q under Gaussian policy
            e_A_var = self.get_e_A_sym(P_var, mu_var, l_policy_mu.input_var,
                                       l_policy_sigma.input_var)
            e_Q_var = e_A_var + V_var

            self._f_qval = tensor_utils.compile_function(
                [l_obs.input_var, l_action.input_var], Q_var)
            self._f_e_qval = tensor_utils.compile_function([
                l_obs.input_var, l_policy_mu.input_var,
                l_policy_sigma.input_var
            ], e_Q_var)
            self._L_layer = l_L
            self._V_layer = l_V
            self._mu_layer = l_mu
            self._obs_layer = l_obs
            self._action_layer = l_action
            self._policy_mu_layer = l_policy_mu
            self._policy_sigma_layer = l_policy_sigma
            self._output_nonlinearity = output_nonlinearity

            self.init_policy()

            LayersPowered.__init__(self, [l_L, l_V, l_mu])
Ejemplo n.º 11
0
    def __init__(self,
                 name,
                 input_shape,
                 output_dim,
                 conv_filters,
                 conv_filter_sizes,
                 conv_strides,
                 conv_pads,
                 hidden_sizes,
                 hidden_nonlinearity,
                 output_nonlinearity,
                 hidden_W_init=L.XavierUniformInitializer(),
                 hidden_b_init=tf.zeros_initializer(),
                 output_W_init=L.XavierUniformInitializer(),
                 output_b_init=tf.zeros_initializer(),
                 input_var=None,
                 input_layer=None,
                 batch_normalization=False,
                 weight_normalization=False):
        Serializable.quick_init(self, locals())
        """
        A network composed of several convolution layers followed by some fc layers.
        input_shape: (width,height,channel)
            HOWEVER, network inputs are assumed flattened. This network will first unflatten the inputs and then apply the standard convolutions and so on.
        conv_filters: a list of numbers of convolution kernel
        conv_filter_sizes: a list of sizes (int) of the convolution kernels
        conv_strides: a list of strides (int) of the conv kernels
        conv_pads: a list of pad formats (either 'SAME' or 'VALID')
        hidden_nonlinearity: a nonlinearity from tf.nn, shared by all conv and fc layers
        hidden_sizes: a list of numbers of hidden units for all fc layers
        """
        with tf.variable_scope(name):
            if input_layer is not None:
                l_in = input_layer
                l_hid = l_in
            elif len(input_shape) == 3:
                l_in = L.InputLayer(shape=(None, np.prod(input_shape)),
                                    input_var=input_var,
                                    name="input")
                l_hid = L.reshape(l_in, ([0], ) + input_shape,
                                  name="reshape_input")
            elif len(input_shape) == 2:
                l_in = L.InputLayer(shape=(None, np.prod(input_shape)),
                                    input_var=input_var,
                                    name="input")
                input_shape = (1, ) + input_shape
                l_hid = L.reshape(l_in, ([0], ) + input_shape,
                                  name="reshape_input")
            else:
                l_in = L.InputLayer(shape=(None, ) + input_shape,
                                    input_var=input_var,
                                    name="input")
                l_hid = l_in

            if batch_normalization:
                l_hid = L.batch_norm(l_hid)
            for idx, conv_filter, filter_size, stride, pad in zip(
                    range(len(conv_filters)),
                    conv_filters,
                    conv_filter_sizes,
                    conv_strides,
                    conv_pads,
            ):
                l_hid = L.Conv2DLayer(
                    l_hid,
                    num_filters=conv_filter,
                    filter_size=filter_size,
                    stride=(stride, stride),
                    pad=pad,
                    nonlinearity=hidden_nonlinearity,
                    name="conv_hidden_%d" % idx,
                    weight_normalization=weight_normalization,
                )
                if batch_normalization:
                    l_hid = L.batch_norm(l_hid)

            if output_nonlinearity == L.spatial_expected_softmax:
                assert len(hidden_sizes) == 0
                assert output_dim == conv_filters[-1] * 2
                l_hid.nonlinearity = tf.identity
                l_out = L.SpatialExpectedSoftmaxLayer(l_hid)
            else:
                l_hid = L.flatten(l_hid, name="conv_flatten")
                for idx, hidden_size in enumerate(hidden_sizes):
                    l_hid = L.DenseLayer(
                        l_hid,
                        num_units=hidden_size,
                        nonlinearity=hidden_nonlinearity,
                        name="hidden_%d" % idx,
                        W=hidden_W_init,
                        b=hidden_b_init,
                        weight_normalization=weight_normalization,
                    )
                    if batch_normalization:
                        l_hid = L.batch_norm(l_hid)
                l_out = L.DenseLayer(
                    l_hid,
                    num_units=output_dim,
                    nonlinearity=output_nonlinearity,
                    name="output",
                    W=output_W_init,
                    b=output_b_init,
                    weight_normalization=weight_normalization,
                )
                if batch_normalization:
                    l_out = L.batch_norm(l_out)
            self._l_in = l_in
            self._l_out = l_out
            # self._input_var = l_in.input_var

        LayersPowered.__init__(self, l_out)
Ejemplo n.º 12
0
    def __init__(self,
                 name,
                 output_dim,
                 hidden_sizes,
                 hidden_nonlinearity,
                 output_nonlinearity,
                 hidden_W_init=L.XavierUniformInitializer(),
                 hidden_b_init=tf.zeros_initializer(),
                 output_W_init=L.XavierUniformInitializer(),
                 output_b_init=tf.zeros_initializer(),
                 input_var=None,
                 input_layer=None,
                 input_shape=None,
                 batch_normalization=False,
                 weight_normalization=False,
                 latent_dim=0,
                 latent_shape=None,
                 obs_shape=None):

        Serializable.quick_init(self, locals())

        with tf.variable_scope(name):
            if input_layer is None:
                l_in = L.InputLayer(shape=(None, ) + input_shape,
                                    input_var=input_var,
                                    name="input")
            else:
                l_in = input_layer
            # latent_in = L.InputLayer(shape=(None,) + latent_shape, input_var=l_in.input_var[:, -latent_dim:], name='latent')
            # obs_in = L.InputLayer(shape=(None,) + obs_shape, input_var=l_in.input_var[:, :-latent_dim], name='obs_input')
            latent_in = L.SliceLayer(l_in,
                                     slice(-latent_dim, None, None),
                                     axis=-1)
            obs_in = L.SliceLayer(l_in, slice(0, -latent_dim, None), axis=-1)
            self._layers = [obs_in]
            l_hid = obs_in
            if batch_normalization:
                l_hid = L.batch_norm(l_hid)
            for idx, hidden_size in enumerate(hidden_sizes):
                l_hid = L.DenseLayer(l_hid,
                                     num_units=hidden_size,
                                     nonlinearity=hidden_nonlinearity,
                                     name="hidden_%d" % idx,
                                     W=hidden_W_init,
                                     b=hidden_b_init,
                                     weight_normalization=weight_normalization)
                if batch_normalization:
                    l_hid = L.batch_norm(l_hid)
                self._layers.append(l_hid)
            l_latent_out = L.DenseLayer(
                latent_in,
                num_units=hidden_size,
                nonlinearity=hidden_nonlinearity,
                name="hidden_latent_0",
                W=hidden_W_init,
                b=hidden_b_init,
                weight_normalization=weight_normalization)
            if batch_normalization:
                l_latent_out = L.batch_norm(l_latent_out)
            self._layers.append(l_latent_out)

            l_hid = L.ElemwiseSumLayer([l_hid, l_latent_out])

            # l_hid = L.OpLayer(
            #     l_hid,
            #     op=lambda l_hid, l_latent:
            #     l_hid + l_latent,
            #     shape_op=lambda l_hid_shape, l_latent_shape:
            #     l_hid_shape,
            #     extras=[l_latent_out],
            #     name='sum_obs_latent')

            l_out = L.DenseLayer(l_hid,
                                 num_units=output_dim,
                                 nonlinearity=output_nonlinearity,
                                 name="output",
                                 W=output_W_init,
                                 b=output_b_init,
                                 weight_normalization=weight_normalization)
            if batch_normalization:
                l_out = L.batch_norm(l_out)
            self._layers.append(l_out)
            self._l_in = l_in
            self._l_out = l_out
            # self._input_var = l_in.input_var
            self._output = L.get_output(l_out)

            LayersPowered.__init__(self, l_out)
Ejemplo n.º 13
0
    def __init__(
        self,
        name,
        output_dim,
        hidden_sizes,
        hidden_nonlinearity,
        hidden_W_init=L.XavierUniformInitializer(),
        hidden_b_init=tf.zeros_initializer(),
        output_W_init=L.XavierUniformInitializer(),
        output_b_init=tf.zeros_initializer(),
        input_var=None,
        input_layer=None,
        input_shape=None,
        batch_normalization=False,
        weight_normalization=False,
    ):

        Serializable.quick_init(self, locals())

        with tf.variable_scope(name):
            if input_layer is None:
                assert input_shape is not None, \
                    "input_layer or input_shape must be supplied"
                l_in = L.InputLayer(shape=(None, ) + input_shape,
                                    input_var=input_var,
                                    name="input")
            else:
                l_in = input_layer
            self._layers = [l_in]
            l_hid = l_in
            if batch_normalization:
                l_hid = L.batch_norm(l_hid)
            for idx, hidden_size in enumerate(hidden_sizes):
                l_hid = L.DenseLayer(l_hid,
                                     num_units=hidden_size,
                                     nonlinearity=hidden_nonlinearity,
                                     name="hidden_%d" % idx,
                                     W=hidden_W_init,
                                     b=hidden_b_init,
                                     weight_normalization=weight_normalization)
                if batch_normalization:
                    l_hid = L.batch_norm(l_hid)
                self._layers.append(l_hid)
            l_out_raw = L.DenseLayer(l_hid,
                                     num_units=output_dim,
                                     name="output",
                                     W=output_W_init,
                                     b=output_b_init,
                                     weight_normalization=weight_normalization)
            if batch_normalization:
                l_out_raw = L.batch_norm(l_out_raw)
            self._layers.append(l_out_raw)

            # mask assumed to occupy first output_dim elements
            def mask_op(X):
                return X[..., :output_dim]

            def mask_shape_op(old_shape):
                return old_shape[:-1] + (output_dim, )

            mask = L.OpLayer(l_in, mask_op, shape_op=mask_shape_op)
            self._layers.append(mask)
            l_out = L.OpLayer(l_out_raw, masked_softmax, extras=[mask])
            self._layers.append(l_out)

            self._l_in = l_in
            self._l_out = l_out
            # self._input_var = l_in.input_var
            self._output = L.get_output(l_out)

            LayersPowered.__init__(self, l_out)
Ejemplo n.º 14
0
    def __init__(self,
                 env_spec,
                 name='Phinet',
                 hidden_sizes=(32, 32),
                 hidden_nonlinearity=tf.nn.relu,
                 action_merge_layer=-2,
                 output_nonlinearity=None,
                 bn=False):
        Serializable.quick_init(self, locals())

        assert not env_spec.action_space.is_discrete
        self._env_spec = env_spec

        with tf.variable_scope(name):
            l_obs = L.InputLayer(shape=(None,
                                        env_spec.observation_space.flat_dim),
                                 name="obs")
            l_action = L.InputLayer(shape=(None,
                                           env_spec.action_space.flat_dim),
                                    name="action")

            n_layers = len(hidden_sizes) + 1

            if n_layers > 1:
                action_merge_layer = \
                    (action_merge_layer % n_layers + n_layers) % n_layers
            else:
                action_merge_layer = 1

            # self.obs_rms = RunningMeanStd(shape=(env_spec.observation_space.flat_dim, ))

            # obz = L.NormalizeLayer(l_obs, rms=self.obs_rms, clip_min=-5., clip_max=5.)
            obz = l_obs
            obs_hidden = L.DenseLayer(obz,
                                      num_units=hidden_sizes[0],
                                      nonlinearity=hidden_nonlinearity,
                                      name="obs_h%d" % (0))

            act_hidden = L.DenseLayer(l_action,
                                      num_units=hidden_sizes[0],
                                      nonlinearity=hidden_nonlinearity,
                                      name="act_h%d" % (0))

            merge_hidden = L.OpLayer(obs_hidden,
                                     op=lambda x, y: x + y,
                                     shape_op=lambda x, y: x,
                                     extras=[act_hidden])

            l_hidden = merge_hidden

            for idx, size in enumerate(hidden_sizes[1:]):
                if bn:
                    l_hidden = batch_norm(l_hidden)

                l_hidden = L.DenseLayer(l_hidden,
                                        num_units=size,
                                        nonlinearity=hidden_nonlinearity,
                                        name="h%d" % (idx + 1))

            # for idx, size in enumerate(hidden_sizes):
            #     if bn:
            #         l_hidden = batch_norm(l_hidden)

            #     if idx == action_merge_layer:
            #         l_hidden = L.ConcatLayer([l_hidden, l_action])

            #     l_hidden = L.DenseLayer(
            #         l_hidden,
            #         num_units=size,
            #         nonlinearity=hidden_nonlinearity,
            #         name="h%d" % (idx + 1)
            #     )

            # if action_merge_layer == n_layers:
            #     l_hidden = L.ConcatLayer([l_hidden, l_action])

            l_output = L.DenseLayer(l_hidden,
                                    num_units=1,
                                    nonlinearity=output_nonlinearity,
                                    name="output")

            output_var = L.get_output(l_output, deterministic=True)
            output_var = tf.reshape(output_var, (-1, ))

            self._f_phival = tensor_utils.compile_function(
                [l_obs.input_var, l_action.input_var], output_var)
            self._output_layer = l_output
            self._obs_layer = l_obs
            self._action_layer = l_action
            self.output_nonlinearity = output_nonlinearity

            LayersPowered.__init__(self, [l_output])
Ejemplo n.º 15
0
    def __init__(self,
                 name,
                 input_dim,
                 output_dim,
                 hidden_sizes,
                 hidden_nonlinearity,
                 output_nonlinearity,
                 vocab_size,
                 embedding_size,
                 hidden_W_init=L.xavier_init,
                 hidden_b_init=tf.zeros_initializer,
                 output_W_init=L.xavier_init,
                 output_b_init=tf.zeros_initializer,
                 has_other_input=True,
                 input_var=None,
                 input_layer=None,
                 **kwargs):
        Serializable.quick_init(self, locals())

        with tf.variable_scope(name):
            if input_layer is None:
                input_layer = L.InputLayer(shape=(None, input_dim),
                                           input_var=input_var,
                                           name="input")
            l_in = input_layer

            if has_other_input:
                # Slice apart
                l_other_in = L.SliceLayer(l_in,
                                          "slice_other",
                                          slice(0, input_dim - vocab_size),
                                          axis=-1)
                l_emb_in = L.SliceLayer(l_in,
                                        "slice_emb",
                                        slice(input_dim - vocab_size,
                                              input_dim),
                                        axis=-1)

                # HACK: This is cheap with small embedding matrices but will not scale well..
                # Find a better way to lookup from this representation + mean-pool
                l_embs = MeanPoolEmbeddingLayer(l_emb_in, "embeddings",
                                                embedding_size)

                l_hidden_input = L.ConcatLayer([l_other_in, l_embs], "merge")
            else:
                l_hidden_input = l_in

            hidden_layers = [l_hidden_input]
            for i, hidden_size in enumerate(hidden_sizes):
                l_hid = L.DenseLayer(hidden_layers[-1],
                                     num_units=hidden_size,
                                     nonlinearity=hidden_nonlinearity,
                                     name="hidden_%i" % i,
                                     W=hidden_W_init,
                                     b=hidden_b_init)
                hidden_layers.append(l_hid)

            l_out = L.DenseLayer(hidden_layers[-1],
                                 num_units=output_dim,
                                 nonlinearity=output_nonlinearity,
                                 name="output",
                                 W=output_W_init,
                                 b=output_b_init)

            self.input_layer = l_in
            self.input_var = l_in.input_var
            self.output_layer = l_out

            LayersPowered.__init__(self, l_out)
Ejemplo n.º 16
0
    def __init__(self,
                 name,
                 input_shape,
                 output_dim,
                 hidden_dims,
                 hidden_nonlinearity=tf.nn.relu,
                 output_nonlinearity=None,
                 input_var=None,
                 input_layer=None):
        with tf.variable_scope(name):
            if input_layer is None:
                l_in = L.InputLayer(shape=(None, None) + input_shape,
                                    input_var=input_var,
                                    name="input")
            else:
                l_in = input_layer

            l_step_input = L.InputLayer(shape=(None, ) + input_shape,
                                        name="step_input")
            l_step_prev_hiddens = [
                L.InputLayer(shape=(None, hidden_dim),
                             name="step_prev_hidden%i" % i)
                for i, hidden_dim in enumerate(hidden_dims)
            ]

            # Build the unrolled GRU network, which operates laterally, then
            # vertically
            below = l_in
            l_grus = []
            for i, hidden_dim in enumerate(hidden_dims):
                l_gru = L.GRULayer(below,
                                   num_units=hidden_dim,
                                   hidden_nonlinearity=hidden_nonlinearity,
                                   hidden_init_trainable=False,
                                   name="gru%i" % i)
                l_grus.append(l_gru)
                below = l_gru

            # Convert final hidden layer to flat representation
            l_gru_flat = L.ReshapeLayer(l_grus[-1],
                                        shape=(-1, hidden_dims[-1]),
                                        name="gru_flat")
            l_output_flat = L.DenseLayer(l_gru_flat,
                                         num_units=output_dim,
                                         nonlinearity=output_nonlinearity,
                                         name="output_flat")
            l_output = L.OpLayer(
                l_output_flat,
                op=lambda flat_output, l_input: tf.reshape(
                    flat_output,
                    tf.pack((tf.shape(l_input)[0], tf.shape(l_input)[1], -1))),
                shape_op=lambda flat_output_shape, l_input_shape:
                (l_input_shape[0], l_input_shape[1], flat_output_shape[-1]),
                extras=[l_in],
                name="output")

            # Build a single step of the GRU network, which operates vertically
            # and is replicated laterally
            below = l_step_input
            l_step_hiddens = []
            for i, (l_gru,
                    prev_hidden) in enumerate(zip(l_grus,
                                                  l_step_prev_hiddens)):
                l_step_hidden = L.GRUStepLayer([below, prev_hidden],
                                               "step_hidden%i" % i, l_gru)
                l_step_hiddens.append(l_step_hidden)
                below = l_step_hidden

            l_step_output = L.DenseLayer(l_step_hiddens[-1],
                                         num_units=output_dim,
                                         nonlinearity=output_nonlinearity,
                                         W=l_output_flat.W,
                                         b=l_output_flat.b,
                                         name="step_output")

            self._l_in = l_in
            self._hid_inits = [l_gru.h0 for l_gru in l_grus]
            self._l_grus = l_grus
            self._l_out = l_output

            self._l_step_input = l_step_input
            self._l_step_prev_hiddens = l_step_prev_hiddens
            self._l_step_hiddens = l_step_hiddens
            self._l_step_output = l_step_output
    def __init__(
            self,
            env_spec,
            name='qnet',
            hidden_sizes=(32, 32),
            hidden_nonlinearity=tf.nn.relu,
            action_merge_layer=-2,
            output_nonlinearity=None,
            # hidden_W_init=L.XavierUniformInitializer(),
            # hidden_b_init=L.ZerosInitializer(),
            # output_W_init=L.XavierUniformInitializer(),
            # output_b_init=L.ZerosInitializer(),
            c=1.0,  # temperature variable for stochastic policy
            bn=False):
        Serializable.quick_init(self, locals())

        # assert env_spec.action_space.is_discrete
        self._n = 2
        self._c = c
        self._env_spec = env_spec

        with tf.variable_scope(name):
            l_obs = L.InputLayer(shape=(None,
                                        env_spec.observation_space.flat_dim),
                                 name="obs")
            l_action = L.InputLayer(shape=(None, 2),
                                    var_type=tf.uint8,
                                    name="actions")

            n_layers = len(hidden_sizes) + 1

            l_hidden = l_obs

            for idx, size in enumerate(hidden_sizes):
                if bn:
                    l_hidden = L.batch_norm(l_hidden)

                l_hidden = L.DenseLayer(
                    l_hidden,
                    num_units=size,
                    # W=hidden_W_init,
                    # b=hidden_b_init,
                    nonlinearity=hidden_nonlinearity,
                    name="h%d" % (idx + 1))

            l_output_vec = L.DenseLayer(
                l_hidden,
                num_units=2,
                # W=output_W_init,
                # b=output_b_init,
                nonlinearity=output_nonlinearity,
                name="output")

            output_vec_var = L.get_output(l_output_vec, deterministic=True)

            output_var = tf.reduce_sum(
                output_vec_var * tf.to_float(l_action.input_var), 1)

            self._f_qval = tensor_utils.compile_function(
                [l_obs.input_var, l_action.input_var], output_var)
            self._f_qval_vec = tensor_utils.compile_function([l_obs.input_var],
                                                             output_vec_var)
            self._output_vec_layer = l_output_vec
            self._obs_layer = l_obs
            self._action_layer = l_action
            self._output_nonlinearity = output_nonlinearity

            self.init_policy()

            LayersPowered.__init__(self, [l_output_vec])
Ejemplo n.º 18
0
    def __init__(self,
                 name,
                 input_shape,
                 extra_input_shape,
                 output_dim,
                 hidden_sizes,
                 conv_filters,
                 conv_filter_sizes,
                 conv_strides,
                 conv_pads,
                 extra_hidden_sizes=None,
                 hidden_W_init=L.XavierUniformInitializer(),
                 hidden_b_init=tf.zeros_initializer(),
                 output_W_init=L.XavierUniformInitializer(),
                 output_b_init=tf.zeros_initializer(),
                 hidden_nonlinearity=tf.nn.relu,
                 output_nonlinearity=None,
                 input_var=None,
                 input_layer=None):
        Serializable.quick_init(self, locals())

        if extra_hidden_sizes is None:
            extra_hidden_sizes = []

        with tf.variable_scope(name):

            input_flat_dim = np.prod(input_shape)
            extra_input_flat_dim = np.prod(extra_input_shape)
            total_input_flat_dim = input_flat_dim + extra_input_flat_dim

            if input_layer is None:
                l_in = L.InputLayer(shape=(None, total_input_flat_dim),
                                    input_var=input_var,
                                    name="input")
            else:
                l_in = input_layer

            l_conv_in = L.reshape(L.SliceLayer(l_in,
                                               indices=slice(input_flat_dim),
                                               name="conv_slice"),
                                  ([0], ) + input_shape,
                                  name="conv_reshaped")
            l_extra_in = L.reshape(L.SliceLayer(l_in,
                                                indices=slice(
                                                    input_flat_dim, None),
                                                name="extra_slice"),
                                   ([0], ) + extra_input_shape,
                                   name="extra_reshaped")

            l_conv_hid = l_conv_in
            for idx, conv_filter, filter_size, stride, pad in zip(
                    range(len(conv_filters)),
                    conv_filters,
                    conv_filter_sizes,
                    conv_strides,
                    conv_pads,
            ):
                l_conv_hid = L.Conv2DLayer(
                    l_conv_hid,
                    num_filters=conv_filter,
                    filter_size=filter_size,
                    stride=(stride, stride),
                    pad=pad,
                    nonlinearity=hidden_nonlinearity,
                    name="conv_hidden_%d" % idx,
                )

            l_extra_hid = l_extra_in
            for idx, hidden_size in enumerate(extra_hidden_sizes):
                l_extra_hid = L.DenseLayer(
                    l_extra_hid,
                    num_units=hidden_size,
                    nonlinearity=hidden_nonlinearity,
                    name="extra_hidden_%d" % idx,
                    W=hidden_W_init,
                    b=hidden_b_init,
                )

            l_joint_hid = L.concat(
                [L.flatten(l_conv_hid, name="conv_hidden_flat"), l_extra_hid],
                name="joint_hidden")

            for idx, hidden_size in enumerate(hidden_sizes):
                l_joint_hid = L.DenseLayer(
                    l_joint_hid,
                    num_units=hidden_size,
                    nonlinearity=hidden_nonlinearity,
                    name="joint_hidden_%d" % idx,
                    W=hidden_W_init,
                    b=hidden_b_init,
                )
            l_out = L.DenseLayer(
                l_joint_hid,
                num_units=output_dim,
                nonlinearity=output_nonlinearity,
                name="output",
                W=output_W_init,
                b=output_b_init,
            )
            self._l_in = l_in
            self._l_out = l_out

            LayersPowered.__init__(self, [l_out], input_layers=[l_in])
Ejemplo n.º 19
0
    def __init__(self,
                 env_spec,
                 name='MLPPhinet',
                 hidden_sizes=(100, 100),
                 hidden_nonlinearity=tf.nn.relu,
                 action_merge_layer=-2,
                 output_nonlinearity=None,
                 vs_form=None,
                 bn=False):
        Serializable.quick_init(self, locals())

        assert not env_spec.action_space.is_discrete
        self._env_spec = env_spec
        self.vs_form = vs_form
        with tf.variable_scope(name):
            obs_dim = env_spec.observation_space.flat_dim
            action_dim = env_spec.action_space.flat_dim

            l_obs = L.InputLayer(shape=(None, obs_dim), name="obs")
            l_action = L.InputLayer(shape=(None, action_dim), name="action")

            self.obs_rms = RunningMeanStd(shape=(obs_dim, ))

            obz = L.NormalizeLayer(l_obs,
                                   rms=self.obs_rms,
                                   clip_min=-5.,
                                   clip_max=5.)

            obs_hidden = L.DenseLayer(obz,
                                      num_units=hidden_sizes[0],
                                      nonlinearity=hidden_nonlinearity,
                                      name="obs_h%d" % (0))
            print("hidden sizes...", hidden_sizes[0], hidden_sizes[1:])
            act_hidden = L.DenseLayer(l_action,
                                      num_units=hidden_sizes[0],
                                      nonlinearity=hidden_nonlinearity,
                                      name="act_h%d" % (0))
            merge_hidden = L.OpLayer(obs_hidden,
                                     op=lambda x, y: x + y,
                                     shape_op=lambda x, y: y,
                                     extras=[act_hidden])

            l_hidden = merge_hidden

            for idx, size in enumerate(hidden_sizes[1:]):
                if bn:
                    l_hidden = batch_norm(l_hidden)

                l_hidden = L.DenseLayer(l_hidden,
                                        num_units=size,
                                        nonlinearity=hidden_nonlinearity,
                                        name="h%d" % (idx + 1))

            l_output = L.DenseLayer(l_hidden,
                                    num_units=1,
                                    nonlinearity=output_nonlinearity,
                                    name="output")

            if vs_form is not None:
                if vs_form == 'linear':
                    vs = L.DenseLayer(l_obs,
                                      num_units=1,
                                      nonlinearity=None,
                                      name='vs')

                elif vs_form == 'mlp':
                    vs = L.DenseLayer(l_obs,
                                      num_units=64,
                                      nonlinearity=tf.nn.relu,
                                      name='hidden_vs')
                    vs = L.DenseLayer(vs,
                                      num_units=1,
                                      nonlinearity=None,
                                      name='vs')
                else:
                    raise NotImplementedError

                output_var = L.get_output(l_output, deterministic=True) + \
                                    L.get_output(vs, deterministic=True)
                output_var = tf.reshape(output_var, (-1, ))
            else:
                output_var = L.get_output(l_output, deterministic=True)
                output_var = tf.reshape(output_var, (-1, ))

            self._f_phival = tensor_utils.compile_function(
                inputs=[l_obs.input_var, l_action.input_var],
                outputs=output_var)
            self._output_layer = l_output
            self._obs_layer = l_obs
            self._action_layer = l_action
            self.output_nonlinearity = output_nonlinearity

            if vs_form is not None:
                self._output_vs = vs
                LayersPowered.__init__(self, [l_output, self._output_vs])
            else:
                LayersPowered.__init__(self, [l_output])
Ejemplo n.º 20
0
    def __init__(self,
                 env_spec,
                 name='qnet',
                 hidden_sizes=(32, 32),
                 hidden_nonlinearity=tf.nn.relu,
                 action_merge_layer=-2,
                 output_nonlinearity=None,
                 eqf_use_full_qf=False,
                 eqf_sample_size=1,
                 mqprop=False,
                 bn=False):
        Serializable.quick_init(self, locals())

        assert not env_spec.action_space.is_discrete
        self._env_spec = env_spec

        with tf.variable_scope(name):
            l_obs = L.InputLayer(shape=(None,
                                        env_spec.observation_space.flat_dim),
                                 name="obs")
            l_action = L.InputLayer(shape=(None,
                                           env_spec.action_space.flat_dim),
                                    name="actions")

            n_layers = len(hidden_sizes) + 1

            if n_layers > 1:
                action_merge_layer = \
                    (action_merge_layer % n_layers + n_layers) % n_layers
            else:
                action_merge_layer = 1

            l_hidden = l_obs

            for idx, size in enumerate(hidden_sizes):
                if bn:
                    l_hidden = batch_norm(l_hidden)

                if idx == action_merge_layer:
                    l_hidden = L.ConcatLayer([l_hidden, l_action])

                l_hidden = L.DenseLayer(l_hidden,
                                        num_units=size,
                                        nonlinearity=hidden_nonlinearity,
                                        name="h%d" % (idx + 1))

            if action_merge_layer == n_layers:
                l_hidden = L.ConcatLayer([l_hidden, l_action])

            l_output = L.DenseLayer(l_hidden,
                                    num_units=1,
                                    nonlinearity=output_nonlinearity,
                                    name="output")

            output_var = L.get_output(l_output, deterministic=True)
            output_var = tf.reshape(output_var, (-1, ))

            self._f_qval = tensor_utils.compile_function(
                [l_obs.input_var, l_action.input_var], output_var)
            self._output_layer = l_output
            self._obs_layer = l_obs
            self._action_layer = l_action
            self._output_nonlinearity = output_nonlinearity

            self.eqf_use_full_qf = eqf_use_full_qf
            self.eqf_sample_size = eqf_sample_size
            self.mqprop = mqprop

            LayersPowered.__init__(self, [l_output])
    def __init__(
        self,
        name,
        output_dim,
        hidden_sizes,
        hidden_nonlinearity,
        dropout_prob,
        output_nonlinearity,
        hidden_W_init=L.XavierUniformInitializer(),
        hidden_b_init=tf.zeros_initializer(),
        output_W_init=L.XavierUniformInitializer(),
        output_b_init=tf.zeros_initializer(),
        input_var=None,
        input_layer=None,
        input_shape=None,
        batch_normalization=False,
        weight_normalization=False,
    ):

        Serializable.quick_init(self, locals())

        with tf.variable_scope(name):
            if input_layer is None:
                l_in = L.InputLayer(shape=(None, ) + input_shape,
                                    input_var=input_var,
                                    name="input")
            else:
                l_in = input_layer
            self._layers = [l_in]

            ##applying dropout on all layers?
            l_hid_dropout_input = L.DropoutLayer(l_in, p=dropout_prob)
            l_hid = l_hid_dropout_input

            # l_hid = l_in
            if batch_normalization:
                l_hid = L.batch_norm(l_hid)
            for idx, hidden_size in enumerate(hidden_sizes):
                l_hid = L.DenseLayer(l_hid,
                                     num_units=hidden_size,
                                     nonlinearity=hidden_nonlinearity,
                                     name="hidden_%d" % idx,
                                     W=hidden_W_init,
                                     b=hidden_b_init,
                                     weight_normalization=weight_normalization)
                if batch_normalization:
                    l_hid = L.batch_norm(l_hid)
                self._layers.append(l_hid)

            ###applying dropout to the last hidden layer?
            l_hid_dropout = L.DropoutLayer(l_hid, p=dropout_prob)

            l_out = L.DenseLayer(l_hid_dropout,
                                 num_units=output_dim,
                                 nonlinearity=output_nonlinearity,
                                 name="output",
                                 W=output_W_init,
                                 b=output_b_init,
                                 weight_normalization=weight_normalization)

            # l_out = L.DenseLayer(
            #     l_hid,
            #     num_units=output_dim,
            #     nonlinearity=output_nonlinearity,
            #     name="output",
            #     W=output_W_init,
            #     b=output_b_init,
            #     weight_normalization=weight_normalization
            # )

            #Alternative, making output layer the dropout layer
            # l_out = L.DropoutLayer(l_hid, p=dropout_prob)

            if batch_normalization:
                l_out = L.batch_norm(l_out)

            self._layers.append(l_out)
            self._l_in = l_in
            self._l_out = l_out
            # self._input_var = l_in.input_var
            self._output = L.get_output(l_out)

            LayersPowered.__init__(self, l_out)