Ejemplo n.º 1
0
def build_cnn(config, use_noise=True, use_bn=True):
    
    # NOTE: Neither Conv2DDNNLayer nor Conv2DMMLayer will not work
    # with T.Rop operation, which used for the Fisher-vector product.
    
    l_input = L.InputLayer((None, 1, config['height'], config['width']))

    l_out = L.Conv2DLayer(l_input,
        num_filters=config['cnn_f1'], filter_size=(6,6), stride=2,
        nonlinearity=relu, W=LI.HeUniform('relu'), b=LI.Constant(0.)
    )
    
    # https://arxiv.org/pdf/1602.01407v2.pdf
    # QUOTE: KFC-pre and BN can be combined synergistically.
    
    if use_bn: l_out = L.batch_norm(l_out, beta=None, gamma=None)

    l_out = L.Conv2DLayer(l_out,
        num_filters=config['cnn_f2'], filter_size=(4,4), stride=2,
        nonlinearity=relu, W=LI.HeUniform('relu'), b=LI.Constant(0.)
    )
    
    if use_bn: l_out = L.batch_norm(l_out, beta=None, gamma=None)
    if use_noise: l_out = L.dropout(l_out)
    
    l_out = L.Conv2DLayer(l_out,
        num_filters=config['cnn_f3'], filter_size=(4,4), stride=2,
        nonlinearity=relu, W=LI.HeUniform('relu'), b=LI.Constant(0.)
    )
    
    if use_bn: l_out = L.batch_norm(l_out, beta=None, gamma=None)
    if use_noise: l_out = L.dropout(l_out)
    
    return l_input, l_out
Ejemplo n.º 2
0
    def __init__(self, incomings, num_steps, num_units, att_units, num_actns=7,
                 zero_trainable=True, zero_init=LI.Constant(0.),
                 W_init=LI.HeUniform(), b_init=LI.Constant(0.), s_init=LI.Constant(1.)):

        super(GRULayer, self).__init__(incomings, name=None)

        # Output of CNN has shape [batch * step, channel, height, width]
        cnn_shape = self.input_shapes[1]
        num_chann = cnn_shape[1]
        num_pixel = cnn_shape[2] * cnn_shape[3]
        
        # Weights for the initial hidden state and attention map
        self.a0 = self.add_param(zero_init, (1, num_pixel), name="a0",
                                 trainable=zero_trainable,
                                 regularizable=False)
        self.h01 = self.add_param(zero_init, (1, num_units), name="h01",
                                 trainable=zero_trainable,
                                 regularizable=False)
        self.h02 = self.add_param(zero_init, (1, num_units), name="h02",
                                 trainable=zero_trainable,
                                 regularizable=False)
        
        # Weights for the attention gate
        self.W_ha = self.add_param(W_init, (num_units, att_units), name="W_ha")
        self.W_aa = self.add_param(W_init, (num_pixel, att_units), name="W_aa")
        self.W_ua = self.add_param(W_init, (num_actns, att_units), name="W_ua")
        self.W_xa = self.add_param(W_init, (num_chann, att_units), name="W_xa")
        self.b_p = self.add_param(b_init, (att_units,), name="b_p", regularizable=False)
        self.W_pa = self.add_param(W_init, (att_units,1), name="W_pa")
        self.b_a = self.add_param(b_init, (1,), name="b_a", regularizable=False)  
        
        # Weights for the reset/update gate of 1 layer
        self.W_xg = self.add_param(W_init, (num_chann, num_units*2), name="W_xg")
        self.W_hg = self.add_param(W_init, (num_units, num_units*2), name="W_hg")
        self.b_g = self.add_param(b_init, (num_units*2,), name="b_g", regularizable=False)
        # Weights for the cell gate of 1 layer
        self.W_xc = self.add_param(W_init, (num_chann, num_units), name="W_xc")
        self.W_hc = self.add_param(W_init, (num_units, num_units), name="W_hc")
        self.b_c = self.add_param(b_init, (num_units,), name="b_c", regularizable=False)      
        
        # Weights for the reset/update gate of 2 layer
        self.W2_xg = self.add_param(W_init, (num_units, num_units*2), name="W2_xg")
        self.W2_hg = self.add_param(W_init, (num_units, num_units*2), name="W2_hg")
        self.b2_g = self.add_param(b_init, (num_units*2,), name="b2_g", regularizable=False)
        # Weights for the cell gate of 2 layer
        self.W2_xc = self.add_param(W_init, (num_units, num_units), name="W2_xc")
        self.W2_hc = self.add_param(W_init, (num_units, num_units), name="W2_hc")
        self.b2_c = self.add_param(b_init, (num_units,), name="b2_c", regularizable=False)
        
        self.attention = None
        self.hidden1 = None
        
        self.num_units = num_units
        self.num_steps = num_steps
        self.num_pixel = num_pixel
        self.num_chann = num_chann
        self.step_shape = (-1, num_chann, num_pixel)
Ejemplo n.º 3
0
    def __init__(self,
                 env_spec,
                 hidden_sizes=(32, 32),
                 hidden_nonlinearity=NL.rectify,
                 hidden_w_init=LI.HeUniform(),
                 hidden_b_init=LI.Constant(0.),
                 output_nonlinearity=NL.tanh,
                 output_w_init=LI.Uniform(-3e-3, 3e-3),
                 output_b_init=LI.Uniform(-3e-3, 3e-3),
                 bn=False):

        assert isinstance(env_spec.action_space, Box)

        Serializable.quick_init(self, locals())

        l_obs = L.InputLayer(shape=(None, env_spec.observation_space.flat_dim))

        l_hidden = l_obs
        if bn:
            l_hidden = batch_norm(l_hidden)

        for idx, size in enumerate(hidden_sizes):
            l_hidden = L.DenseLayer(
                l_hidden,
                num_units=size,
                W=hidden_w_init,
                b=hidden_b_init,
                nonlinearity=hidden_nonlinearity,
                name="h%d" % idx)
            if bn:
                l_hidden = batch_norm(l_hidden)

        l_output = L.DenseLayer(
            l_hidden,
            num_units=env_spec.action_space.flat_dim,
            W=output_w_init,
            b=output_b_init,
            nonlinearity=output_nonlinearity,
            name="output")

        # Note the deterministic=True argument. It makes sure that when getting
        # actions from single observations, we do not update params in the
        # batch normalization layers

        action_var = L.get_output(l_output, deterministic=True)
        self._output_layer = l_output

        self._f_actions = tensor_utils.compile_function([l_obs.input_var],
                                                        action_var)

        super(DeterministicMLPPolicy, self).__init__(env_spec)
        LasagnePowered.__init__(self, [l_output])
Ejemplo n.º 4
0
    def __init__(self,
                 incoming,
                 num_units,
                 hidden_nonlinearity,
                 gate_nonlinearity=LN.sigmoid,
                 name=None,
                 W_init=LI.HeUniform(),
                 b_init=LI.Constant(0.),
                 hidden_init=LI.Constant(0.),
                 hidden_init_trainable=True):

        if hidden_nonlinearity is None:
            hidden_nonlinearity = LN.identity

        if gate_nonlinearity is None:
            gate_nonlinearity = LN.identity

        super(GRULayer, self).__init__(incoming, name=name)

        input_shape = self.input_shape[2:]

        input_dim = ext.flatten_shape_dim(input_shape)
        # self._name = name
        # Weights for the initial hidden state
        self.h0 = self.add_param(hidden_init, (num_units, ),
                                 name="h0",
                                 trainable=hidden_init_trainable,
                                 regularizable=False)
        # Weights for the reset gate
        self.W_xr = self.add_param(W_init, (input_dim, num_units), name="W_xr")
        self.W_hr = self.add_param(W_init, (num_units, num_units), name="W_hr")
        self.b_r = self.add_param(b_init, (num_units, ),
                                  name="b_r",
                                  regularizable=False)
        # Weights for the update gate
        self.W_xu = self.add_param(W_init, (input_dim, num_units), name="W_xu")
        self.W_hu = self.add_param(W_init, (num_units, num_units), name="W_hu")
        self.b_u = self.add_param(b_init, (num_units, ),
                                  name="b_u",
                                  regularizable=False)
        # Weights for the cell gate
        self.W_xc = self.add_param(W_init, (input_dim, num_units), name="W_xc")
        self.W_hc = self.add_param(W_init, (num_units, num_units), name="W_hc")
        self.b_c = self.add_param(b_init, (num_units, ),
                                  name="b_c",
                                  regularizable=False)
        self.gate_nonlinearity = gate_nonlinearity
        self.num_units = num_units
        self.nonlinearity = hidden_nonlinearity
Ejemplo n.º 5
0
 def run(self, shape):
     return LI.HeUniform(gain=self.gain).sample(shape)