def setup_network(self):
        # Setup character embedding
        embedded_encoder_input, embedded_decoder_input, embed_func = self.setup_character_embedding(
        )

        # Output projection
        with tf.variable_scope('alphabet_projection') as scope:
            self.projection_W, self.projection_b = intialize_projections(
                input_size=4 *
                self.config.num_units,  # Because we use bidirectional encoder
                output_size=self.config.alphabet_size,
                scope=scope)

        # Encoder
        with tf.variable_scope('encoder') as scope:
            # Normalize batch
            embedded_encoder_input = tf.layers.batch_normalization(
                inputs=embedded_encoder_input,
                center=True,
                scale=True,
                training=not self.prediction_mode,
                trainable=True,
            )

            enc_outputs, enc_final_state = self.encoder.encode(
                inputs=embedded_encoder_input,
                seq_lengths=self.encoder_sequence_length,
                scope=scope)

        # Predict question categories
        with tf.variable_scope('question') as scope:
            # Convert StateTuple to vector
            state_vector = tf.concat(flatten(enc_final_state),
                                     axis=1,
                                     name='combined-state-vec')

            # Add dense layer
            layer = projection(x=state_vector,
                               input_size=4 * self.config.num_units *
                               self.config.num_cells,
                               output_size=128,
                               nonlinearity=tf.nn.relu)
            if self.add_dropout:
                layer = tf.nn.dropout(x=layer, keep_prob=self.keep_prob_ph)

            class_logits = projection(x=layer,
                                      input_size=128,
                                      output_size=self.config.num_classes)

        W_penalty = 0.0

        # Define loss
        self.setup_losses(class_logits=class_logits,
                          class_idx=self.class_idx,
                          W_penalty=W_penalty)
    def __init__(self,
                 num_actions,
                 state,
                 action=None,
                 target=None,
                 learning_rate=None,
                 scope='DQN'):
        # State - Input state to pass through the network
        # Action - Action for which the Q value should be predicted (only required for training)
        # Target - Target Q value (only required for training)
        self.input = state
        self.action = action
        self.target = target
        self.num_actions = num_actions
        self.scope = scope
        if learning_rate is not None:
            self.optimizer = tf.train.RMSPropOptimizer(learning_rate,
                                                       momentum=0.95,
                                                       epsilon=0.01)

        with tf.variable_scope(self.scope):

            with tf.variable_scope('input_layers'):
                self.input_float = tf.to_float(self.input)
                self.input_norm = tf.divide(self.input_float, 255.0)

            self.conv1 = conv2d(self.input_norm,
                                8,
                                32,
                                4,
                                tf.nn.relu,
                                scope='conv1')
            self.conv2 = conv2d(self.conv1,
                                4,
                                64,
                                2,
                                tf.nn.relu,
                                scope='conv2')
            self.conv3 = conv2d(self.conv2,
                                3,
                                64,
                                1,
                                tf.nn.relu,
                                scope='conv3')
            self.flatten = flatten(self.conv3, scope='flatten')
            self.dense = dense(self.flatten, 512, tf.nn.relu, scope='dense')
            self.output = dense(self.dense, self.num_actions, scope='output')

        self.network_params = tf.trainable_variables(scope=self.scope)
Exemplo n.º 3
0
    def build_discriminator(self,
                            input,
                            channels=3,
                            ndf=64,
                            norm_type='batch',
                            init_type='normal',
                            init_gain=0.02,
                            is_training=True):
        """
        SRGAN Discriminator
        """
        conv_block1 = ops.conv(input,
                               in_channels=channels,
                               out_channels=ndf,
                               filter_size=3,
                               stride=1,
                               weight_init_type=init_type,
                               weight_init_gain=init_gain,
                               norm_type=None,
                               activation_type='LeakyReLU',
                               is_training=is_training,
                               scope='conv1',
                               reuse=self.reuse)

        conv_block2 = ops.conv(conv_block1,
                               in_channels=ndf,
                               out_channels=ndf,
                               filter_size=3,
                               stride=2,
                               weight_init_type=init_type,
                               weight_init_gain=init_gain,
                               norm_type=norm_type,
                               activation_type='LeakyReLU',
                               is_training=is_training,
                               scope='conv2',
                               reuse=self.reuse)

        conv_block3 = ops.conv(conv_block2,
                               in_channels=ndf,
                               out_channels=2 * ndf,
                               filter_size=3,
                               stride=1,
                               weight_init_type=init_type,
                               weight_init_gain=init_gain,
                               norm_type=norm_type,
                               activation_type='LeakyReLU',
                               is_training=is_training,
                               scope='conv3',
                               reuse=self.reuse)

        conv_block4 = ops.conv(conv_block3,
                               in_channels=2 * ndf,
                               out_channels=2 * ndf,
                               filter_size=3,
                               stride=2,
                               weight_init_type=init_type,
                               weight_init_gain=init_gain,
                               norm_type=norm_type,
                               activation_type='LeakyReLU',
                               is_training=is_training,
                               scope='conv4',
                               reuse=self.reuse)

        conv_block5 = ops.conv(conv_block4,
                               in_channels=2 * ndf,
                               out_channels=4 * ndf,
                               filter_size=3,
                               stride=1,
                               weight_init_type=init_type,
                               weight_init_gain=init_gain,
                               norm_type=norm_type,
                               activation_type='LeakyReLU',
                               is_training=is_training,
                               scope='conv5',
                               reuse=self.reuse)

        conv_block6 = ops.conv(conv_block5,
                               in_channels=4 * ndf,
                               out_channels=4 * ndf,
                               filter_size=3,
                               stride=2,
                               weight_init_type=init_type,
                               weight_init_gain=init_gain,
                               norm_type=norm_type,
                               activation_type='LeakyReLU',
                               is_training=is_training,
                               scope='conv6',
                               reuse=self.reuse)

        conv_block7 = ops.conv(conv_block6,
                               in_channels=4 * ndf,
                               out_channels=8 * ndf,
                               filter_size=3,
                               stride=1,
                               weight_init_type=init_type,
                               weight_init_gain=init_gain,
                               norm_type=norm_type,
                               activation_type='LeakyReLU',
                               is_training=is_training,
                               scope='conv7',
                               reuse=self.reuse)

        conv_block8 = ops.conv(conv_block7,
                               in_channels=8 * ndf,
                               out_channels=8 * ndf,
                               filter_size=3,
                               stride=2,
                               weight_init_type=init_type,
                               weight_init_gain=init_gain,
                               norm_type=norm_type,
                               activation_type='LeakyReLU',
                               is_training=is_training,
                               scope='conv8',
                               reuse=self.reuse)

        x = ops.flatten(conv_block8)

        dense = ops.dense(x,
                          in_size=x.get_shape().as_list()[1],
                          out_size=1024,
                          weight_init_type=init_type,
                          weight_init_gain=init_gain,
                          norm_type=None,
                          activation_type='LeakyReLU',
                          is_training=is_training,
                          scope='dense',
                          reuse=self.reuse)

        output = ops.dense(dense,
                           in_size=1024,
                           out_size=1,
                           weight_init_type=init_type,
                           weight_init_gain=init_gain,
                           norm_type=None,
                           activation_type='sigmoid',
                           is_training=is_training,
                           scope='output',
                           reuse=self.reuse)

        return output
    def setup_network(self):
        # Setup character embedding
        embedded_encoder_input, embedded_decoder_input, embed_func = self.setup_character_embedding(
        )

        # Output projection
        with tf.variable_scope('alphabet_projection') as scope:
            self.projection_W, self.projection_b = intialize_projections(
                input_size=4 *
                self.config.num_units,  # Because we use bidirectional encoder
                output_size=self.config.alphabet_size,
                scope=scope)

        # Encoder
        with tf.variable_scope('encoder') as scope:
            # Normalize batch
            # TODO: What axis should it be?
            embedded_encoder_input = tf.layers.batch_normalization(
                inputs=embedded_encoder_input,
                center=True,
                scale=True,
                training=not self.prediction_mode,
                trainable=True,
            )

            enc_outputs, enc_final_state = self.encoder.encode(
                inputs=embedded_encoder_input,
                seq_lengths=self.encoder_sequence_length,
                enc_word_indices=self.enc_word_indices,
                word_seq_lengths=self.word_seq_lengths,
                max_words=self.config.max_words,
                scope=scope)

        # Predict question categories
        with tf.variable_scope('question') as scope:

            # Convert StateTuple to vector
            state_vector = tf.concat(flatten(enc_final_state),
                                     axis=1,
                                     name='combined-state-vec')

            # Add dense layer
            W, b = intialize_projections(input_size=4 * self.config.num_units *
                                         self.config.num_cells,
                                         output_size=128)
            layer = tf.nn.relu(tf.matmul(state_vector, W) + b)
            if self.add_dropout:
                layer = tf.nn.dropout(x=layer, keep_prob=self.keep_prob_ph)

            # Compute L2-weight decay
            W_penalty = tf.contrib.layers.apply_regularization(
                regularizer=tf.contrib.layers.l2_regularizer(
                    scale=self.config.W_lambda),
                weights_list=[W])

            class_logits = projection(x=layer,
                                      input_size=128,
                                      output_size=self.config.num_classes)

        # Define loss
        self.setup_losses(class_logits=class_logits,
                          class_idx=self.class_idx,
                          W_penalty=W_penalty)
    def setup_network(self):
        # Setup character embedding
        embedded_encoder_input, embedded_decoder_input, embed_func = self.setup_character_embedding(
        )

        # Output projection
        with tf.variable_scope('alphabet_projection') as scope:
            self.projection_W, self.projection_b = intialize_projections(
                input_size=4 * self.config.num_units,
                output_size=self.config.alphabet_size,
                scope=scope)

            # Define alphabet projection function
            def project_func(output):
                return projection(output,
                                  W=self.projection_W,
                                  b=self.projection_b)

        # Encoder
        with tf.variable_scope('encoder') as scope:
            # Normalize batch
            embedded_encoder_input = tf.layers.batch_normalization(
                inputs=embedded_encoder_input,
                center=True,
                scale=True,
                # training=not self.prediction_mode,
                training=
                True,  # I think this should be true always, because in training
                # and inference we have the entire question text.
                trainable=True,
            )

            enc_outputs, enc_final_state = self.encoder.encode(
                inputs=embedded_encoder_input,
                seq_lengths=self.encoder_sequence_length,
                enc_word_indices=self.enc_word_indices,
                word_seq_lengths=self.word_seq_lengths,
                max_words=self.config.max_words,
                scope=scope)

        # Predict question categories
        with tf.variable_scope('question') as scope:
            # Convert StateTuple to vector
            state_vector = tf.concat(flatten(enc_final_state),
                                     axis=1,
                                     name='combined-state-vec')

            # Add dense layer
            W, b = intialize_projections(input_size=4 * self.config.num_units *
                                         self.config.num_cells,
                                         output_size=128)
            layer = tf.nn.relu(tf.matmul(state_vector, W) + b)
            if self.add_dropout:
                layer = tf.nn.dropout(x=layer, keep_prob=self.keep_prob_ph)

            # Compute L2-weight decay
            W_penalty = tf.contrib.layers.apply_regularization(
                regularizer=tf.contrib.layers.l2_regularizer(
                    scale=self.config.W_lambda),
                weights_list=[W])

            class_logits = projection(x=layer,
                                      input_size=128,
                                      output_size=self.config.num_classes)

        # Set decoder initial state and encoder outputs based on the binary
        # mode input value
        # - If `self.is_lm_mode=0` Use the passed initial state from encoder
        # - If `self.is_lm_mode=1` Use the zero vector
        self.enc_outputs, enc_final_state = select_decoder_inputs(
            is_lm_mode=self.is_lm_mode,
            enc_outputs=enc_outputs,
            initial_state=enc_final_state,
        )

        # If an observation has a class -> Pass the true class as 1-hot-encoded
        # vector to the decoder input.
        # If an observation doesn't have a class -> Pass the class logits for
        # the given observation to the decoder input.
        class_is_known = tf.greater_equal(self.class_idx, 0)

        # Create one-hot-encoded vectors
        class_one_hot = tf.one_hot(indices=self.class_idx,
                                   depth=self.config.num_classes,
                                   on_value=1.0,
                                   off_value=0.0,
                                   axis=-1,
                                   dtype=tf.float32,
                                   name='class-one-hot-encoded')

        # Compute class probabilities
        class_probs = tf.nn.softmax(class_logits)

        # Select what to pass on
        self.class_info_vec = tf.where(condition=class_is_known,
                                       x=class_one_hot,
                                       y=class_probs)

        # Concatenate class info vector with decoder input
        _class_info_vec = tf.expand_dims(self.class_info_vec, axis=1)
        _class_info_vec = tf.tile(
            _class_info_vec,
            multiples=[1, self.config.max_dec_seq_length + 1, 1])
        decoder_input = tf.concat([embedded_decoder_input, _class_info_vec],
                                  axis=2)

        # Pack state to tensor
        self.enc_final_state_tensor = pack_state_tuple(enc_final_state)

        # Initialize decoder attention function using encoder outputs
        self.decoder.initialize_attention_func(
            input_size=decoder_input.get_shape().as_list()[-1],
            attention_states=self.enc_outputs)

        # Define decoder
        with tf.variable_scope('decoder'):
            dec_outputs, dec_final_state = self.decoder.decode(
                inputs=decoder_input,
                initial_state=enc_final_state,
                seq_length=self.decoder_sequence_length,
                embed_func=embed_func,
                project_func=project_func)

            # Project output to alphabet size and reshape
            dec_outputs = tf.reshape(dec_outputs,
                                     [-1, 4 * self.config.num_units])
            dec_outputs = projection(dec_outputs,
                                     W=self.projection_W,
                                     b=self.projection_b)
            dec_outputs = tf.reshape(dec_outputs, [
                -1, self.config.max_dec_seq_length + 1,
                self.config.alphabet_size
            ])

        if self.prediction_mode:
            dec_outputs = self.decoder_logits

        # Define loss
        self.setup_losses(dec_outputs=dec_outputs,
                          target_chars=self.target_chars,
                          decoder_sequence_length=self.decoder_sequence_length,
                          class_probs=class_probs,
                          class_idx=self.class_idx,
                          class_is_known=class_is_known,
                          class_one_hot=class_one_hot,
                          W_penalty=W_penalty)

        if self.prediction_mode:
            # Define initial attention tensor
            self.initial_attention = self.decoder.attention_func(
                enc_final_state)

            # Look up inputs
            decoder_inputs_embedded = tf.nn.embedding_lookup(
                self.embedding_matrix,
                self.decoder_inputs,
                name='decoder_input')
            is_lm_mode_tensor = tf.to_float(
                tf.expand_dims(self.is_lm_mode, axis=1))
            decoder_inputs = tf.concat(
                [decoder_inputs_embedded, is_lm_mode_tensor], axis=1)

            # Concatenate class info vector
            decoder_inputs = tf.concat([decoder_inputs, self.class_info_vec],
                                       axis=1)

            # Unpack state
            initial_state = unpack_state_tensor(self.decoder_state)

            with tf.variable_scope('decoder', reuse=True):
                decoder_output, decoder_final_state, self.decoder_new_attention = self.decoder.predict(
                    inputs=decoder_inputs,
                    initial_state=initial_state,
                    attention_states=self.decoder_attention)

            # Project output to alphabet size
            self.decoder_output = projection(decoder_output,
                                             W=self.projection_W,
                                             b=self.projection_b,
                                             name='decoder_output')

            # Compute decayed logits
            self.decoder_probs_decayed = compute_decayed_probs(
                logits=self.decoder_output,
                decay_parameter_ph=self.probs_decay_parameter)

            # Pack state to tensor
            self.decoder_final_state = pack_state_tuple(
                decoder_final_state, name='decoder_final_state')
Exemplo n.º 6
0
    def forward_pass(self,
                     state_in,
                     reshape=True,
                     sigmoid_out=False,
                     reuse=None):
        self.state_in = state_in

        shape_in = self.state_in.get_shape().as_list()

        # Get number of input channels for weight/bias init
        channels_in = shape_in[-1]

        with tf.variable_scope(self.scope, reuse=reuse):

            if reshape:
                # Reshape [batch_size, traj_len, H, W, C] into [batch_size*traj_len, H, W, C]
                self.state_in = tf.reshape(
                    self.state_in, [-1, shape_in[2], shape_in[3], shape_in[4]])

            self.conv1 = conv2d(
                self.state_in,
                self.num_filters,
                self.kernels[0],
                self.strides[0],
                kernel_init=tf.random_uniform_initializer((-1.0 / tf.sqrt(
                    float(channels_in * self.kernels[0] * self.kernels[0]))), (
                        1.0 / tf.sqrt(
                            float(channels_in * self.kernels[0] *
                                  self.kernels[0])))),
                bias_init=tf.random_uniform_initializer((-1.0 / tf.sqrt(
                    float(channels_in * self.kernels[0] * self.kernels[0]))), (
                        1.0 / tf.sqrt(
                            float(channels_in * self.kernels[0] *
                                  self.kernels[0])))),
                scope='conv1')

            self.conv1 = lrelu(self.conv1, self.lrelu_alpha, scope='conv1')

            self.conv2 = conv2d(
                self.conv1,
                self.num_filters,
                self.kernels[1],
                self.strides[1],
                kernel_init=tf.random_uniform_initializer((-1.0 / tf.sqrt(
                    float(self.num_filters * self.kernels[1] *
                          self.kernels[1]))), (1.0 / tf.sqrt(
                              float(self.num_filters * self.kernels[1] *
                                    self.kernels[1])))),
                bias_init=tf.random_uniform_initializer((-1.0 / tf.sqrt(
                    float(self.num_filters * self.kernels[1] *
                          self.kernels[1]))), (1.0 / tf.sqrt(
                              float(self.num_filters * self.kernels[1] *
                                    self.kernels[1])))),
                scope='conv2')

            self.conv2 = lrelu(self.conv2, self.lrelu_alpha, scope='conv2')

            self.conv3 = conv2d(
                self.conv2,
                self.num_filters,
                self.kernels[2],
                self.strides[2],
                kernel_init=tf.random_uniform_initializer((-1.0 / tf.sqrt(
                    float(self.num_filters * self.kernels[2] *
                          self.kernels[2]))), (1.0 / tf.sqrt(
                              float(self.num_filters * self.kernels[2] *
                                    self.kernels[2])))),
                bias_init=tf.random_uniform_initializer((-1.0 / tf.sqrt(
                    float(self.num_filters * self.kernels[2] *
                          self.kernels[2]))), (1.0 / tf.sqrt(
                              float(self.num_filters * self.kernels[2] *
                                    self.kernels[2])))),
                scope='conv3')

            self.conv3 = lrelu(self.conv3, self.lrelu_alpha, scope='conv3')

            self.conv4 = conv2d(
                self.conv3,
                self.num_filters,
                self.kernels[3],
                self.strides[3],
                kernel_init=tf.random_uniform_initializer((-1.0 / tf.sqrt(
                    float(self.num_filters * self.kernels[3] *
                          self.kernels[3]))), (1.0 / tf.sqrt(
                              float(self.num_filters * self.kernels[3] *
                                    self.kernels[3])))),
                bias_init=tf.random_uniform_initializer((-1.0 / tf.sqrt(
                    float(self.num_filters * self.kernels[3] *
                          self.kernels[3]))), (1.0 / tf.sqrt(
                              float(self.num_filters * self.kernels[3] *
                                    self.kernels[3])))),
                scope='conv4')

            self.conv4 = lrelu(self.conv4, self.lrelu_alpha, scope='conv4')

            self.flatten = flatten(self.conv4)

            self.dense = dense(self.flatten,
                               self.dense_size,
                               kernel_init=tf.random_uniform_initializer(
                                   (-1.0 / tf.sqrt(float(self.num_filters))),
                                   (1.0 / tf.sqrt(float(self.num_filters)))),
                               bias_init=tf.random_uniform_initializer(
                                   (-1.0 / tf.sqrt(float(self.num_filters))),
                                   (1.0 / tf.sqrt(float(self.num_filters)))))

            self.output = dense(self.dense,
                                1,
                                kernel_init=tf.random_uniform_initializer(
                                    (-1.0 / tf.sqrt(float(self.dense_size))),
                                    (1.0 / tf.sqrt(float(self.dense_size)))),
                                bias_init=tf.random_uniform_initializer(
                                    (-1.0 / tf.sqrt(float(self.dense_size))),
                                    (1.0 / tf.sqrt(float(self.dense_size)))),
                                scope='output')

            if sigmoid_out:
                self.output = tf.nn.sigmoid(self.output)

            if reshape:
                # Reshape 1d reward output [batch_size*traj_len] into batches [batch_size, traj_len]
                self.output = tf.reshape(self.output, [-1, shape_in[1]])

            self.network_params = tf.trainable_variables(scope=self.scope)

        return self.output