Ejemplo n.º 1
0
    def __init__(self,
                 name,
                 input_shape,
                 extra_input_shape,
                 output_dim,
                 hidden_sizes,
                 conv_filters,
                 conv_filter_sizes,
                 conv_strides,
                 conv_pads,
                 extra_hidden_sizes=None,
                 hidden_W_init=L.XavierUniformInitializer(),
                 hidden_b_init=tf.zeros_initializer,
                 output_W_init=L.XavierUniformInitializer(),
                 output_b_init=tf.zeros_initializer,
                 hidden_nonlinearity=tf.nn.relu,
                 output_nonlinearity=None,
                 input_var=None,
                 input_layer=None):
        Serializable.quick_init(self, locals())

        if extra_hidden_sizes is None:
            extra_hidden_sizes = []

        with tf.variable_scope(name):

            input_flat_dim = np.prod(input_shape)
            extra_input_flat_dim = np.prod(extra_input_shape)
            total_input_flat_dim = input_flat_dim + extra_input_flat_dim

            if input_layer is None:
                l_in = L.InputLayer(shape=(None, total_input_flat_dim),
                                    input_var=input_var,
                                    name="input")
            else:
                l_in = input_layer

            l_conv_in = L.reshape(L.SliceLayer(l_in,
                                               indices=slice(input_flat_dim),
                                               name="conv_slice"),
                                  ([0], ) + input_shape,
                                  name="conv_reshaped")
            l_extra_in = L.reshape(L.SliceLayer(l_in,
                                                indices=slice(
                                                    input_flat_dim, None),
                                                name="extra_slice"),
                                   ([0], ) + extra_input_shape,
                                   name="extra_reshaped")

            l_conv_hid = l_conv_in
            for idx, conv_filter, filter_size, stride, pad in zip(
                    range(len(conv_filters)),
                    conv_filters,
                    conv_filter_sizes,
                    conv_strides,
                    conv_pads,
            ):
                l_conv_hid = L.Conv2DLayer(
                    l_conv_hid,
                    num_filters=conv_filter,
                    filter_size=filter_size,
                    stride=(stride, stride),
                    pad=pad,
                    nonlinearity=hidden_nonlinearity,
                    name="conv_hidden_%d" % idx,
                )

            l_extra_hid = l_extra_in
            for idx, hidden_size in enumerate(extra_hidden_sizes):
                l_extra_hid = L.DenseLayer(
                    l_extra_hid,
                    num_units=hidden_size,
                    nonlinearity=hidden_nonlinearity,
                    name="extra_hidden_%d" % idx,
                    W=hidden_W_init,
                    b=hidden_b_init,
                )

            l_joint_hid = L.concat(
                [L.flatten(l_conv_hid, name="conv_hidden_flat"), l_extra_hid],
                name="joint_hidden")

            for idx, hidden_size in enumerate(hidden_sizes):
                l_joint_hid = L.DenseLayer(
                    l_joint_hid,
                    num_units=hidden_size,
                    nonlinearity=hidden_nonlinearity,
                    name="joint_hidden_%d" % idx,
                    W=hidden_W_init,
                    b=hidden_b_init,
                )
            l_out = L.DenseLayer(
                l_joint_hid,
                num_units=output_dim,
                nonlinearity=output_nonlinearity,
                name="output",
                W=output_W_init,
                b=output_b_init,
            )
            self._l_in = l_in
            self._l_out = l_out

            LayersPowered.__init__(self, [l_out], input_layers=[l_in])
Ejemplo n.º 2
0
    def __init__(self,
                 name,
                 input_shape,
                 output_dim,
                 conv_filters,
                 conv_filter_sizes,
                 conv_strides,
                 conv_pads,
                 hidden_sizes,
                 hidden_nonlinearity,
                 output_nonlinearity,
                 hidden_W_init=L.XavierUniformInitializer(),
                 hidden_b_init=tf.zeros_initializer,
                 output_W_init=L.XavierUniformInitializer(),
                 output_b_init=tf.zeros_initializer,
                 input_var=None,
                 input_layer=None,
                 batch_normalization=False,
                 weight_normalization=False):
        Serializable.quick_init(self, locals())
        """
        A network composed of several convolution layers followed by some fc layers.
        input_shape: (width,height,channel)
            HOWEVER, network inputs are assumed flattened. This network will first unflatten the inputs and then apply the standard convolutions and so on.
        conv_filters: a list of numbers of convolution kernel
        conv_filter_sizes: a list of sizes (int) of the convolution kernels
        conv_strides: a list of strides (int) of the conv kernels
        conv_pads: a list of pad formats (either 'SAME' or 'VALID')
        hidden_nonlinearity: a nonlinearity from tf.nn, shared by all conv and fc layers
        hidden_sizes: a list of numbers of hidden units for all fc layers
        """
        with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
            if input_layer is not None:
                l_in = input_layer
                l_hid = l_in
            elif len(input_shape) == 3:
                l_in = L.InputLayer(shape=(None, np.prod(input_shape)),
                                    input_var=input_var,
                                    name="input")
                l_hid = L.reshape(l_in, ([0], ) + input_shape,
                                  name="reshape_input")
            elif len(input_shape) == 2:
                l_in = L.InputLayer(shape=(None, np.prod(input_shape)),
                                    input_var=input_var,
                                    name="input")
                input_shape = (1, ) + input_shape
                l_hid = L.reshape(l_in, ([0], ) + input_shape,
                                  name="reshape_input")
            else:
                l_in = L.InputLayer(shape=(None, ) + input_shape,
                                    input_var=input_var,
                                    name="input")
                l_hid = l_in

            if batch_normalization:
                l_hid = L.batch_norm(l_hid)
            for idx, conv_filter, filter_size, stride, pad in zip(
                    range(len(conv_filters)),
                    conv_filters,
                    conv_filter_sizes,
                    conv_strides,
                    conv_pads,
            ):
                # print("debug123",conv_filter,filter_size,stride,pad,hidden_nonlinearity,idx,weight_normalization)
                l_hid = L.Conv2DLayer(
                    l_hid,
                    num_filters=conv_filter,
                    filter_size=filter_size,
                    stride=(stride, stride),
                    pad=pad,
                    nonlinearity=hidden_nonlinearity,
                    name="conv_hidden_%d" % idx,
                    weight_normalization=weight_normalization,
                )
                if batch_normalization:
                    l_hid = L.batch_norm(l_hid)

            if output_nonlinearity == L.spatial_expected_softmax:
                assert len(hidden_sizes) == 0
                assert output_dim == conv_filters[-1] * 2
                l_hid.nonlinearity = tf.identity
                l_out = L.SpatialExpectedSoftmaxLayer(l_hid)
            else:
                l_hid = L.flatten(l_hid, name="conv_flatten")
                for idx, hidden_size in enumerate(hidden_sizes):
                    l_hid = L.DenseLayer(
                        l_hid,
                        num_units=hidden_size,
                        nonlinearity=hidden_nonlinearity,
                        name="hidden_%d" % idx,
                        W=hidden_W_init,
                        b=hidden_b_init,
                        weight_normalization=weight_normalization,
                    )
                    if batch_normalization:
                        l_hid = L.batch_norm(l_hid)
                l_out = L.DenseLayer(
                    l_hid,
                    num_units=output_dim,
                    nonlinearity=output_nonlinearity,
                    name="output",
                    W=output_W_init,
                    b=output_b_init,
                    weight_normalization=weight_normalization,
                )
                if batch_normalization:
                    l_out = L.batch_norm(l_out)
            self._l_in = l_in
            self._l_out = l_out
            # self._input_var = l_in.input_var

        LayersPowered.__init__(self, l_out)
    def __init__(
        self,
        name,
        output_dim,
        output_dim_binary,
        hidden_sizes,
        hidden_nonlinearity,
        output_nonlinearity,
        output_nonlinearity_binary,
        hidden_W_init=L.XavierUniformInitializer(),
        hidden_b_init=tf.zeros_initializer(),
        output_W_init=L.XavierUniformInitializer(),
        output_b_init=tf.zeros_initializer(),
        input_var=None,
        input_layer=None,
        input_shape=None,
        batch_normalization=False,
        weight_normalization=False,
    ):

        Serializable.quick_init(self, locals())

        with tf.variable_scope(name):
            if input_layer is None:
                l_in = L.InputLayer(shape=(None, ) + input_shape,
                                    input_var=input_var,
                                    name="input")
            else:
                l_in = input_layer
            self._layers = [l_in]
            l_hid = l_in
            if batch_normalization:
                l_hid = L.batch_norm(l_hid)
            for idx, hidden_size in enumerate(hidden_sizes):

                l_hid = L.DenseLayer(l_hid,
                                     num_units=hidden_size,
                                     nonlinearity=hidden_nonlinearity,
                                     name="hidden_%d" % idx,
                                     W=hidden_W_init,
                                     b=hidden_b_init,
                                     weight_normalization=weight_normalization)
                if batch_normalization:
                    l_hid = L.batch_norm(l_hid)
                self._layers.append(l_hid)

            l_hid_binary = L.DenseLayer(
                l_hid,
                num_units=hidden_size,
                nonlinearity=hidden_nonlinearity,
                name="hidden_binary",
                W=hidden_W_init,
                b=hidden_b_init,
                weight_normalization=weight_normalization)

            l_out_binary = L.DenseLayer(
                l_hid_binary,
                num_units=output_dim_binary,
                nonlinearity=output_nonlinearity_binary,
                name="output_binary",
                W=output_W_init,
                b=output_b_init,
                weight_normalization=weight_normalization)

            self._layers.append(l_out_binary)

            l_hid_out = L.DenseLayer(l_hid,
                                     num_units=hidden_size,
                                     nonlinearity=hidden_nonlinearity,
                                     name="hidden_final",
                                     W=hidden_W_init,
                                     b=hidden_b_init,
                                     weight_normalization=weight_normalization)

            l_out = L.DenseLayer(l_hid_out,
                                 num_units=output_dim,
                                 nonlinearity=output_nonlinearity,
                                 name="output",
                                 W=output_W_init,
                                 b=output_b_init,
                                 weight_normalization=weight_normalization)

            if batch_normalization:
                l_out = L.batch_norm(l_out)

            self._layers.append(l_out)
            self._l_in = l_in

            self._l_out = l_out

            self._l_out_binary = l_out_binary

            self._output_binary = L.get_output(l_out_binary)
            self._output = L.get_output(l_out)

            LayersPowered.__init__(self, [l_out, l_out_binary])
Ejemplo n.º 4
0
    def __init__(
        self,
        name,
        output_dim,
        hidden_sizes,
        hidden_nonlinearity,
        hidden_W_init=L.XavierUniformInitializer(),
        hidden_b_init=tf.zeros_initializer(),
        output_W_init=L.XavierUniformInitializer(),
        output_b_init=tf.zeros_initializer(),
        input_var=None,
        input_layer=None,
        input_shape=None,
        batch_normalization=False,
        weight_normalization=False,
    ):

        Serializable.quick_init(self, locals())

        with tf.variable_scope(name):
            if input_layer is None:
                assert input_shape is not None, \
                    "input_layer or input_shape must be supplied"
                l_in = L.InputLayer(shape=(None, ) + input_shape,
                                    input_var=input_var,
                                    name="input")
            else:
                l_in = input_layer
            self._layers = [l_in]
            l_hid = l_in
            if batch_normalization:
                l_hid = L.batch_norm(l_hid)
            for idx, hidden_size in enumerate(hidden_sizes):
                l_hid = L.DenseLayer(l_hid,
                                     num_units=hidden_size,
                                     nonlinearity=hidden_nonlinearity,
                                     name="hidden_%d" % idx,
                                     W=hidden_W_init,
                                     b=hidden_b_init,
                                     weight_normalization=weight_normalization)
                if batch_normalization:
                    l_hid = L.batch_norm(l_hid)
                self._layers.append(l_hid)
            l_out_raw = L.DenseLayer(l_hid,
                                     num_units=output_dim,
                                     name="output",
                                     W=output_W_init,
                                     b=output_b_init,
                                     weight_normalization=weight_normalization)
            if batch_normalization:
                l_out_raw = L.batch_norm(l_out_raw)
            self._layers.append(l_out_raw)

            # mask assumed to occupy first output_dim elements
            def mask_op(X):
                return X[..., :output_dim]

            def mask_shape_op(old_shape):
                return old_shape[:-1] + (output_dim, )

            mask = L.OpLayer(l_in, mask_op, shape_op=mask_shape_op)
            self._layers.append(mask)
            l_out = L.OpLayer(l_out_raw, masked_softmax, extras=[mask])
            self._layers.append(l_out)

            self._l_in = l_in
            self._l_out = l_out
            # self._input_var = l_in.input_var
            self._output = L.get_output(l_out)

            LayersPowered.__init__(self, l_out)
Ejemplo n.º 5
0
    def _make_weights(self, old_prop_weights=None, old_act_weights=None):
        # prop_weights[i] is a dictionary mapping predicate names to weights
        # for modules in the i-th proposition layer
        self.prop_weights = []
        self.act_weights = []
        self.all_weights = []

        for hid_idx, hid_sizes in enumerate(self.hidden_sizes):
            act_size, prop_size = hid_sizes

            # make action layer weights
            act_dict = {}
            for unbound_act in self.dom_meta.unbound_acts:
                preds = self.dom_meta.rel_pred_names(unbound_act)
                if not hid_idx:
                    # first layer, so our input is actually a binary vector
                    # giving a truth value for each proposition
                    in_size = len(preds) * 2 + self.extra_dim
                else:
                    in_size = len(preds) * self.hidden_sizes[hid_idx - 1][1]

                name_pfx = 'hid_%d_act_%s' % (hid_idx, unbound_act.schema_name)
                # TODO: doing "if old_act_weights" check each time is silly.
                # Should store parameters *purely* by name, and have code
                # responsible for automatically re-instantiating old weights if
                # they exist.
                if old_act_weights is not None:
                    W_init, b_init = map(L.const,
                                         old_act_weights[hid_idx][unbound_act])
                else:
                    W_init = L.XavierUniformInitializer()
                    b_init = tf.zeros_initializer()
                act_W = L.create_param(
                    W_init, shape=(1, in_size, act_size), name=name_pfx + '/W')
                act_b = L.create_param(
                    b_init, shape=(act_size, ), name=name_pfx + '/b')
                act_dict[unbound_act] = (act_W, act_b)
                self.all_weights.extend([act_W, act_b])

            self.act_weights.append(act_dict)

            # make hidden proposition layer weights
            pred_dict = {}
            for pred_name in self.dom_meta.pred_names:
                rel_acts = self.dom_meta.rel_acts(pred_name)
                # We should never end up with NO relevant actions for a
                # predicate. Why bother including the predicate?
                assert len(rel_acts) > 0, \
                    "no relevant actions for proposition %s" % pred_name

                in_size = len(rel_acts) * act_size
                name_pfx = 'hid_%d_prop_%s' % (hid_idx, pred_name)
                if old_prop_weights is not None:
                    W_init, b_init = map(L.const,
                                         old_prop_weights[hid_idx][pred_name])
                else:
                    W_init = L.XavierUniformInitializer()
                    b_init = tf.zeros_initializer()
                prop_W = L.create_param(
                    W_init,
                    shape=(1, in_size, prop_size),
                    name=name_pfx + '/W')
                prop_b = L.create_param(
                    b_init, shape=(prop_size, ), name=name_pfx + '/b')
                pred_dict[pred_name] = (prop_W, prop_b)
                self.all_weights.extend([prop_W, prop_b])

            self.prop_weights.append(pred_dict)

        # make final layer weights (action)
        final_act_dict = {}
        for unbound_act in self.dom_meta.unbound_acts:
            preds = self.dom_meta.rel_pred_names(unbound_act)
            if not self.hidden_sizes:
                in_size = len(preds) * 2 + self.extra_dim
            else:
                in_size = len(preds) * self.hidden_sizes[-1][1]

            name_pfx = 'final_act_%s' % unbound_act.schema_name
            if old_act_weights is not None:
                W_init, b_init = map(L.const, old_act_weights[-1][unbound_act])
            else:
                W_init = L.XavierUniformInitializer()
                b_init = tf.zeros_initializer()
            final_act_W = L.create_param(
                W_init, shape=(1, in_size, 1), name=name_pfx + '/W')
            final_act_b = L.create_param(
                b_init, shape=(1, ), name=name_pfx + '/b')
            final_act_dict[unbound_act] = (final_act_W, final_act_b)
            self.all_weights.extend([final_act_W, final_act_b])

        self.act_weights.append(final_act_dict)
    def __init__(self,
                 env_spec,
                 name='qnet',
                 hidden_sizes=(32, 32),
                 hidden_nonlinearity=tf.nn.relu,
                 action_merge_layer=-2,
                 output_nonlinearity=None,
                 hidden_W_init=L.XavierUniformInitializer(),
                 hidden_b_init=tf.zeros_initializer(),
                 output_W_init=L.XavierUniformInitializer(),
                 output_b_init=tf.zeros_initializer(),
                 bn=False):
        Serializable.quick_init(self, locals())

        with tf.variable_scope(name):
            l_obs = L.InputLayer(shape=(None,
                                        env_spec.observation_space.flat_dim),
                                 name="obs")
            l_action = L.InputLayer(shape=(None,
                                           env_spec.action_space.flat_dim),
                                    name="actions")

            n_layers = len(hidden_sizes) + 1

            if n_layers > 1:
                action_merge_layer = \
                    (action_merge_layer % n_layers + n_layers) % n_layers
            else:
                action_merge_layer = 1

            l_hidden = l_obs

            for idx, size in enumerate(hidden_sizes):
                if bn:
                    l_hidden = L.batch_norm(l_hidden)

                if idx == action_merge_layer:
                    l_hidden = L.ConcatLayer([l_hidden, l_action])

                l_hidden = L.DenseLayer(l_hidden,
                                        num_units=size,
                                        W=hidden_W_init,
                                        b=hidden_b_init,
                                        nonlinearity=hidden_nonlinearity,
                                        name="h%d" % (idx + 1))

            if action_merge_layer == n_layers:
                l_hidden = L.ConcatLayer([l_hidden, l_action])

            l_output = L.DenseLayer(l_hidden,
                                    num_units=1,
                                    W=output_W_init,
                                    b=output_b_init,
                                    nonlinearity=output_nonlinearity,
                                    name="output")

            #output_var = L.get_output(l_output, deterministic=True).flatten()
            output_var = tf.reshape(L.get_output(l_output, deterministic=True),
                                    (-1, ))

            self._f_qval = tensor_utils.compile_function(
                [l_obs.input_var, l_action.input_var], output_var)
            self._output_layer = l_output
            self._obs_layer = l_obs
            self._action_layer = l_action
            self._output_nonlinearity = output_nonlinearity

            LayersPowered.__init__(self, [l_output])
Ejemplo n.º 7
0
    def __init__(
        self,
        name,
        output_dim,
        hidden_sizes,
        hidden_nonlinearity,
        output_nonlinearity,
        hidden_W_init=L.XavierUniformInitializer(),
        hidden_b_init=tf.zeros_initializer(),
        output_W_init=L.XavierUniformInitializer(),
        output_b_init=tf.zeros_initializer(),
        input_var=None,
        input_layer=None,
        input_shape=None,
        batch_normalization=False,
        weight_normalization=False,
        # added arguments
        w_auxiliary=False,
        auxliary_classes=0.,
    ):

        Serializable.quick_init(self, locals())

        with tf.variable_scope(name):
            if input_layer is None:
                l_in = L.InputLayer(shape=(None, ) + input_shape,
                                    input_var=input_var,
                                    name="input")
            else:
                l_in = input_layer
            self._layers = [l_in]
            l_hid = l_in
            if batch_normalization:
                l_hid = L.batch_norm(l_hid)
            for idx, hidden_size in enumerate(hidden_sizes):
                l_hid = L.DenseLayer(l_hid,
                                     num_units=hidden_size,
                                     nonlinearity=hidden_nonlinearity,
                                     name="hidden_%d" % idx,
                                     W=hidden_W_init,
                                     b=hidden_b_init,
                                     weight_normalization=weight_normalization)
                if batch_normalization:
                    l_hid = L.batch_norm(l_hid)
                self._layers.append(l_hid)
            if w_auxiliary:
                assert auxliary_classes > 0
                l_hid_aux = L.DenseLayer(
                    l_hid,
                    num_units=64,
                    nonlinearity=hidden_nonlinearity,
                    name="auxiliary_hidden_0",
                    W=hidden_W_init,
                    b=hidden_b_init,
                    weight_normalization=weight_normalization)
                if batch_normalization:
                    l_hid_aux = L.batch_norm(l_hid_aux)
                self._layers.append(l_hid_aux)
                l_aux = L.DenseLayer(l_hid_aux,
                                     num_units=auxliary_classes,
                                     nonlinearity=output_nonlinearity,
                                     name="aux_output",
                                     W=output_W_init,
                                     b=output_b_init,
                                     weight_normalization=weight_normalization)
                if batch_normalization:
                    l_aux = L.batch_norm(l_aux)
                self._layers.append(l_aux)
                self._l_aux = l_aux

            l_out = L.DenseLayer(l_hid,
                                 num_units=output_dim,
                                 nonlinearity=output_nonlinearity,
                                 name="output",
                                 W=output_W_init,
                                 b=output_b_init,
                                 weight_normalization=weight_normalization)
            if batch_normalization:
                l_out = L.batch_norm(l_out)
            self._layers.append(l_out)
            self._l_in = l_in
            self._l_out = l_out
            # self._input_var = l_in.input_var
            self._output = L.get_output(l_out)

            LayersPowered.__init__(self, l_out)
Ejemplo n.º 8
0
    def __init__(self,
                 env_spec,
                 name='nafqnet',
                 hidden_sizes=(32, 32),
                 hidden_nonlinearity=tf.nn.relu,
                 action_merge_layer=0,
                 output_nonlinearity=None,
                 hidden_W_init=L.XavierUniformInitializer(),
                 hidden_b_init=L.ZerosInitializer(),
                 output_W_init=L.XavierUniformInitializer(),
                 output_b_init=L.ZerosInitializer(),
                 bn=False):
        Serializable.quick_init(self, locals())

        assert not env_spec.action_space.is_discrete

        action_dim = env_spec.action_space.flat_dim
        self._action_dim = action_dim
        self._env_spec = env_spec

        n_layers = len(hidden_sizes)
        action_merge_layer = \
            (action_merge_layer % n_layers + n_layers) % n_layers

        with tf.variable_scope(name):
            l_obs = L.InputLayer(shape=(None,
                                        env_spec.observation_space.flat_dim),
                                 name="obs")
            l_action = L.InputLayer(shape=(None,
                                           env_spec.action_space.flat_dim),
                                    name="actions")
            l_policy_mu = L.InputLayer(shape=(None, action_dim),
                                       name="policy_mu")
            l_policy_sigma = L.InputLayer(shape=(None, action_dim, action_dim),
                                          name="policy_sigma")

            l_hidden = l_obs
            idx = 0

            l_hidden_kwargs = dict(
                W=hidden_W_init,
                b=hidden_b_init,
                nonlinearity=hidden_nonlinearity,
            )

            l_output_kwargs = dict(
                W=output_W_init,
                b=output_b_init,
            )

            while idx < action_merge_layer:
                if bn: l_hidden = L.batch_norm(l_hidden)
                l_hidden = L.DenseLayer(
                    l_hidden,
                    num_units=hidden_sizes[idx],
                    name="h%d" % (idx + 1),
                    **l_hidden_kwargs,
                )
                idx += 1

            _idx = idx
            _l_hidden = l_hidden

            # compute L network
            while idx < n_layers:
                if bn: l_hidden = L.batch_norm(l_hidden)
                l_hidden = L.DenseLayer(
                    l_hidden,
                    num_units=hidden_sizes[idx],
                    name="L_h%d" % (idx + 1),
                    **l_hidden_kwargs,
                )
                idx += 1
            l_L = L.DenseLayer(
                l_hidden,
                num_units=action_dim**2,
                nonlinearity=None,
                name="L_h%d" % (idx + 1),
                **l_output_kwargs,
            )

            # compute V network
            idx = _idx
            l_hidden = _l_hidden
            while idx < n_layers:
                if bn: l_hidden = L.batch_norm(l_hidden)
                l_hidden = L.DenseLayer(
                    l_hidden,
                    num_units=hidden_sizes[idx],
                    name="V_h%d" % (idx + 1),
                    **l_hidden_kwargs,
                )
                idx += 1
            l_V = L.DenseLayer(
                l_hidden,
                num_units=1,
                nonlinearity=None,
                name="V_h%d" % (idx + 1),
                **l_output_kwargs,
            )

            # compute mu network
            idx = _idx
            l_hidden = _l_hidden
            while idx < n_layers:
                if bn: l_hidden = L.batch_norm(l_hidden)
                l_hidden = L.DenseLayer(
                    l_hidden,
                    num_units=hidden_sizes[idx],
                    name="mu_h%d" % (idx + 1),
                    **l_hidden_kwargs,
                )
                idx += 1
            if bn: l_hidden = L.batch_norm(l_hidden)
            l_mu = L.DenseLayer(
                l_hidden,
                num_units=action_dim,
                nonlinearity=tf.nn.tanh,
                name="mu_h%d" % (idx + 1),
                **l_output_kwargs,
            )

            L_var, V_var, mu_var = L.get_output([l_L, l_V, l_mu],
                                                deterministic=True)
            V_var = tf.reshape(V_var, (-1, ))

            # compute advantage
            L_mat_var = self.get_L_sym(L_var)
            P_var = self.get_P_sym(L_mat_var)
            A_var = self.get_A_sym(P_var, mu_var, l_action.input_var)

            # compute Q
            Q_var = A_var + V_var

            # compute expected Q under Gaussian policy
            e_A_var = self.get_e_A_sym(P_var, mu_var, l_policy_mu.input_var,
                                       l_policy_sigma.input_var)
            e_Q_var = e_A_var + V_var

            self._f_qval = tensor_utils.compile_function(
                [l_obs.input_var, l_action.input_var], Q_var)
            self._f_e_qval = tensor_utils.compile_function([
                l_obs.input_var, l_policy_mu.input_var,
                l_policy_sigma.input_var
            ], e_Q_var)
            self._L_layer = l_L
            self._V_layer = l_V
            self._mu_layer = l_mu
            self._obs_layer = l_obs
            self._action_layer = l_action
            self._policy_mu_layer = l_policy_mu
            self._policy_sigma_layer = l_policy_sigma
            self._output_nonlinearity = output_nonlinearity

            self.init_policy()

            LayersPowered.__init__(self, [l_L, l_V, l_mu])
    def __init__(
        self,
        name,
        output_dim,
        hidden_sizes,
        hidden_nonlinearity,
        dropout_prob,
        output_nonlinearity,
        hidden_W_init=L.XavierUniformInitializer(),
        hidden_b_init=tf.zeros_initializer(),
        output_W_init=L.XavierUniformInitializer(),
        output_b_init=tf.zeros_initializer(),
        input_var=None,
        input_layer=None,
        input_shape=None,
        batch_normalization=False,
        weight_normalization=False,
    ):

        Serializable.quick_init(self, locals())

        with tf.variable_scope(name):
            if input_layer is None:
                l_in = L.InputLayer(shape=(None, ) + input_shape,
                                    input_var=input_var,
                                    name="input")
            else:
                l_in = input_layer
            self._layers = [l_in]

            ##applying dropout on all layers?
            l_hid_dropout_input = L.DropoutLayer(l_in, p=dropout_prob)
            l_hid = l_hid_dropout_input

            # l_hid = l_in
            if batch_normalization:
                l_hid = L.batch_norm(l_hid)
            for idx, hidden_size in enumerate(hidden_sizes):
                l_hid = L.DenseLayer(l_hid,
                                     num_units=hidden_size,
                                     nonlinearity=hidden_nonlinearity,
                                     name="hidden_%d" % idx,
                                     W=hidden_W_init,
                                     b=hidden_b_init,
                                     weight_normalization=weight_normalization)
                if batch_normalization:
                    l_hid = L.batch_norm(l_hid)
                self._layers.append(l_hid)

            ###applying dropout to the last hidden layer?
            l_hid_dropout = L.DropoutLayer(l_hid, p=dropout_prob)

            l_out = L.DenseLayer(l_hid_dropout,
                                 num_units=output_dim,
                                 nonlinearity=output_nonlinearity,
                                 name="output",
                                 W=output_W_init,
                                 b=output_b_init,
                                 weight_normalization=weight_normalization)

            # l_out = L.DenseLayer(
            #     l_hid,
            #     num_units=output_dim,
            #     nonlinearity=output_nonlinearity,
            #     name="output",
            #     W=output_W_init,
            #     b=output_b_init,
            #     weight_normalization=weight_normalization
            # )

            #Alternative, making output layer the dropout layer
            # l_out = L.DropoutLayer(l_hid, p=dropout_prob)

            if batch_normalization:
                l_out = L.batch_norm(l_out)

            self._layers.append(l_out)
            self._l_in = l_in
            self._l_out = l_out
            # self._input_var = l_in.input_var
            self._output = L.get_output(l_out)

            LayersPowered.__init__(self, l_out)
Ejemplo n.º 10
0
    def __init__(
            self,
            env_spec,
            name='qnet',
            hidden_sizes=(32, 32),
            hidden_nonlinearity=tf.nn.relu,
            action_merge_layer=-2,
            output_nonlinearity=None,
            hidden_W_init=L.XavierUniformInitializer(),
            hidden_b_init=L.ZerosInitializer(),
            output_W_init=L.XavierUniformInitializer(),
            output_b_init=L.ZerosInitializer(),
            c=1.0,  # temperature variable for stochastic policy
            bn=False):
        Serializable.quick_init(self, locals())

        assert env_spec.action_space.is_discrete
        self._n = env_spec.action_space.n
        self._c = c
        self._env_spec = env_spec

        with tf.variable_scope(name):
            l_obs = L.InputLayer(shape=(None,
                                        env_spec.observation_space.flat_dim),
                                 name="obs")
            l_action = L.InputLayer(shape=(None, env_spec.action_space.n),
                                    var_type=tf.uint8,
                                    name="actions")

            n_layers = len(hidden_sizes) + 1

            l_hidden = l_obs

            for idx, size in enumerate(hidden_sizes):
                if bn:
                    l_hidden = L.batch_norm(l_hidden)

                l_hidden = L.DenseLayer(l_hidden,
                                        num_units=size,
                                        W=hidden_W_init,
                                        b=hidden_b_init,
                                        nonlinearity=hidden_nonlinearity,
                                        name="h%d" % (idx + 1))

            l_output_vec = L.DenseLayer(l_hidden,
                                        num_units=env_spec.action_space.n,
                                        W=output_W_init,
                                        b=output_b_init,
                                        nonlinearity=output_nonlinearity,
                                        name="output")

            output_vec_var = L.get_output(l_output_vec, deterministic=True)

            output_var = tf.reduce_sum(
                output_vec_var * tf.to_float(l_action.input_var), 1)

            self._f_qval = tensor_utils.compile_function(
                [l_obs.input_var, l_action.input_var], output_var)
            self._f_qval_vec = tensor_utils.compile_function([l_obs.input_var],
                                                             output_vec_var)
            self._output_vec_layer = l_output_vec
            self._obs_layer = l_obs
            self._action_layer = l_action
            self._output_nonlinearity = output_nonlinearity

            self.init_policy()

            LayersPowered.__init__(self, [l_output_vec])