Exemplo n.º 1
0
    def _set_network(self):
        """Set up network.

        Args:
            void

        Returns:
            input (): Placeholder for network input.
            output (): Output operation.
        """

        # input
        input = tf.placeholder(shape=self.s_dim, dtype=tf.float32)

        # initializers for fc layers
        unif_init = unif_initializer(-0.003, 0.003)

        # flatten
        net = tf.contrib.layers.flatten(input)

        # fc layers
        net = tf.layers.dense(net, 400, kernel_initializer=xav_init())
        net = tf.nn.relu(net)
        net = tf.layers.dense(net, 300, kernel_initializer=xav_init())
        net = tf.nn.relu(net)
        net = tf.layers.dense(net, self.a_dim[1], kernel_initializer=unif_init)
        output = tf.nn.tanh(net)

        return input, output
Exemplo n.º 2
0
    def _set_network(self):
        """Set up network.

        Args:
            void

        Returns:
            input (): Placeholder for network input.
            actions (): Placeholder for action inputs.
            output (): Output operation.
        """

        # regularizer
        regularizer = l2_regularizer(scale=0.01)

        # inputs
        input = tf.placeholder(shape=self.s_dim, dtype=tf.float32)
        actions = tf.placeholder(shape=self.a_dim, dtype=tf.float32)

        # initializers for fc layers
        unif_init = unif_initializer(-0.003, 0.003)

        # flatten
        net = tf.contrib.layers.flatten(input)

        # fc layers
        net = tf.layers.dense(net,
                              400,
                              kernel_initializer=xav_init(),
                              kernel_regularizer=regularizer)
        net = tf.nn.relu(net)
        net = tf.layers.dense(net,
                              300,
                              kernel_initializer=xav_init(),
                              kernel_regularizer=regularizer,
                              use_bias=False)
        action_net = tf.layers.dense(actions,
                                     300,
                                     kernel_initializer=xav_init(),
                                     kernel_regularizer=regularizer,
                                     use_bias=True)
        net += action_net
        net = tf.nn.relu(net)
        output = tf.layers.dense(net,
                                 1,
                                 kernel_initializer=unif_init,
                                 kernel_regularizer=regularizer)

        return input, actions, output
Exemplo n.º 3
0
def _vgg16(inputs, training=True, embedding_size=64, 
           dropout_keep_prob=0.5,
           middleRepr=False,
           scope='vgg_16'):
    """ From https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/slim/python/slim/nets/vgg.py
    
Oxford Net VGG 16-Layers version D Example without fcn/convolutional layers at end

  """
    with variable_scope.variable_scope(scope, 'vgg_16', [inputs]) as sc:
        net = layers_lib.repeat(inputs, 2, layers.conv2d, 64, [3, 3], scope='conv1')
        net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')
        
        net = layers_lib.repeat(net, 2, layers.conv2d, 128, [3, 3], scope='conv2')
        net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')
        
        net = layers_lib.repeat(net, 3, layers.conv2d, 256, [3, 3], scope='conv3')
        net = layers_lib.max_pool2d(net, [2, 2], scope='pool3')
        
        net = layers_lib.repeat(net, 3, layers.conv2d, 512, [3, 3], scope='conv4')
        net = layers_lib.max_pool2d(net, [2, 2], scope='pool4')
        
        net = layers_lib.repeat(net, 3, layers.conv2d, 512, [3, 3], scope='conv5')
        net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')
        
        net = layers.conv2d(net, 4096, [7, 7], padding='VALID', scope='fc6')

        if middleRepr:
            return net

        net = tf.layers.dense(net, embedding_size, 
                              activation=tf.nn.relu, kernel_initializer=xav_init(), name='fc8')
            
        return net   
    def _embeddings(self, pretrained=False, scope_name=None):
        """Compute word embeddings for sentence.

        Parameters
        ----------
        pretrained: bool, default False
            Whether to use pretrained embeddings
        scope_name: str, default None
            Variable scope
        """
        if not scope_name:
            scope_name = "Embedding"

        self.sentence_ph = tf.placeholder(dtype=tf.int32, shape=[None, self.time_steps + 1],
                                        name="Sentence_placeholder")

        with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE):
            self.embedding_matrix = tf.get_variable(
                name="embedding_matrix",
                shape=[self.len_corpus, self.embedding_size],
                initializer=xav_init()
            )

            if pretrained:
                print("Loading pretrained embeddings...")
                load_embedding(session=self.session,
                               vocab=self.dataset.word_to_idx,
                               emb=self.embedding_matrix,
                               path=self.dataset.embedding_file,
                               vocab_size=self.len_corpus,
                               dim_embedding=self.embedding_size)

            self.word_embeddings = tf.nn.embedding_lookup(self.embedding_matrix,
                                                          self.sentence_ph)
    def _output_layer(self, scope_name=None):
        """Self explanatory."""
        if scope_name is None:
            scope_name = "Output_layer"
        if self.project:
            shape = [self.project_size, self.len_corpus]
        else:
            shape = [self.lstm_hidden_size, self.len_corpus]

        with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE):
            self.output_layer = dict()
            self.output_layer['weights'] = tf.get_variable(name="weights",
                shape=shape, dtype=tf.float32, initializer=xav_init())

            self.output_layer['bias'] = tf.get_variable(name='bias',
                shape=[self.len_corpus], dtype=tf.float32, initializer=xav_init()
            )
    def _projection_layer(self, scope_name=None):
        """Creates the weight matrix for projection, when a larger LSTM is used."""
        if scope_name is None:
            scope_name = "Projection"

        with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE):
            self.project_W = tf.get_variable(name="proj_weights",
                shape=[self.lstm_hidden_size, self.project_size],
                dtype=tf.float32,
                initializer=xav_init()
            )