Пример #1
0
Файл: LeNet.py Проект: iiharu/NN
    def build(self, input_shape=(28, 28, 1), classes=10):
        inputs = keras.Input(shape=input_shape)

        outputs = conv2d(filters=6, kernel_size=(6, 6))(inputs)
        outputs = max_pooling2d(pool_size=(2, 2), strides=(2, 2))(outputs)
        outputs = sigmoid()(outputs)

        outputs = conv2d(filters=16, kernel_size=(6, 6))(inputs)
        outputs = max_pooling2d(pool_size=(2, 2), strides=(2, 2))(outputs)
        outputs = sigmoid()(outputs)

        outputs = flatten()(outputs)

        outputs = dense(120)(outputs)
        outputs = sigmoid()(outputs)

        outputs = dense(64)(outputs)
        outputs = sigmoid()(outputs)

        outputs = dense(classes)(outputs)
        outputs = softmax()(outputs)

        model = keras.Model(inputs, outputs)

        model.summary()

        return model
Пример #2
0
    def encoder(self, x, training=True, reuse=None, name=None):

        # [None, 28, 28, 1]  -->  [None, 14, 14, 64]
        h = conv2d(x, 64, kernel_size=4, strides=2, activation=tf.nn.leaky_relu, reuse=reuse, name='e_conv_1')

        # [None, 14, 14, 64] -->  [None, 7, 7, 128]
        h = conv2d(h, 128, kernel_size=4, strides=2, reuse=reuse, name='e_conv_2')
        h = batch_norm(h, training=training, reuse=reuse, name='e_bn_1')
        h = tf.nn.leaky_relu(h)

        # [None, 7, 7, 128]  -->  [None, 7*7*128]
        h = tf.reshape(h, [-1, 7*7*128])

        # [None, 7*7*128] -->  [None, 1024]
        h = dense(h, 1024, reuse=reuse, name='e_dense_1')
        h = batch_norm(h, training=training, reuse=reuse, name='e_bn_2')
        h = tf.nn.leaky_relu(h)

        # [None, 1024] -->  [None, 2*self.z_dim]
        h = dense(h, 2*self.z_dim, reuse=reuse, name='e_dense_2')

        # Assign names to final outputs
        mean = tf.identity(h[:,:self.z_dim], name=name+"_mean")
        log_sigma = tf.identity(h[:,self.z_dim:], name=name+"_log_sigma")
        return mean, log_sigma
Пример #3
0
    def decoder(state, training=True):
        x = tf.reshape(state, [batch_size, -1])
        x = lyr.dense('decoder.dense1.matrix', 'decoder.dense1.bias',
                      'decoder', latent_dim, 512, x)
        x = tf.nn.leaky_relu(x)
        x = lyr.batchnorm(x, 'decoder.batchnorm1.offset',
                          'decoder.batchnorm1.scale', 'decoder')

        x = lyr.dense('decoder.dense2.matrix', 'decoder.dense2.bias',
                      'decoder', 512, 512, x)
        x = tf.nn.leaky_relu(x)
        x = lyr.batchnorm(x, 'decoder.batchnorm2.offset',
                          'decoder.batchnorm2.scale', 'decoder')

        x = lyr.dense('decoder.dense3.matrix', 'decoder.dense3.bias',
                      'decoder', 512, 256, x)
        x = tf.nn.leaky_relu(x)
        x = lyr.batchnorm(x, 'decoder.batchnorm3.offset',
                          'decoder.batchnorm3.scale', 'decoder')

        x = lyr.dense('decoder.dense4.matrix', 'decoder.dense4.bias',
                      'decoder', 256, max_size * encode_length, x)
        x = tf.reshape(x, [batch_size, max_size, encode_length])
        x = lyr.batchnorm(x, 'decoder.batchnorm4.offset',
                          'decoder.batchnorm4.scale', 'decoder')

        return x
Пример #4
0
 def _stochastic(self, x, dim, scope, ema):
     b_init_var = tf.constant_initializer(0. if self.is_log_var else 1.)
     x = self.activation(x)
     if isinstance(dim, int):
         flatten = tf.contrib.layers.flatten(x)
         mean = dense(flatten,
                      dim,
                      scope=scope + "_mean",
                      training=self.ph_is_training,
                      ema=ema,
                      init=self.init)
         var = dense(flatten,
                     dim,
                     scope=scope + "_var",
                     bias_initializer=b_init_var,
                     training=self.ph_is_training,
                     ema=ema,
                     init=self.init)
     else:
         mean = conv2d(x,
                       dim,
                       scope=scope + "_mean",
                       training=self.ph_is_training,
                       ema=ema,
                       init=self.init)
         var = conv2d(x,
                      dim,
                      scope=scope + "_var",
                      bias_initializer=b_init_var,
                      training=self.ph_is_training,
                      ema=ema,
                      init=self.init)
     var = tf.nn.softplus(var) + self.eps
     z = stochastic_gaussian(mean, var, is_log_var=self.is_log_var)
     return z, mean, var
Пример #5
0
def network(x, weights, biases):
    x = tf.reshape(x, shape=[-1, input_size_h, input_size_w, num_channels])
    x = tf.subtract(x, mean)
    x = tf.divide(x, std)
    x = tf.expand_dims(x, axis=1)
    x = tf.transpose(x, perm=[0, 4, 2, 3, 1])

    conv0 = tf.nn.conv3d(x, weights["wc0"], strides=[1, 1, 1, 1, 1], padding="SAME")
    conv0 = tf.nn.bias_add(conv0, biases["bc0"])
    conv0 = tf.nn.relu(conv0)

    conv0 = tf.transpose(conv0, perm=[0, 1, 4, 2, 3])
    conv0 = tf.reshape(conv0, shape=[-1, 12, 192, 256])
    conv0 = tf.transpose(conv0, perm=[0, 2, 3, 1])

    conv1 = conv2d(conv0, weights["wc1"], biases["bc1"], strides=2)
    conv2 = conv2d(conv1, weights["wc2"], biases["bc2"], strides=2)
    conv3 = conv2d(conv2, weights["wc3"], biases["bc3"], strides=2)
    conv4 = conv2d(conv3, weights["wc4"], biases["bc4"], strides=2)
    conv5 = conv2d(conv4, weights["wc5"], biases["bc5"], strides=2)
    conv6 = conv2d(conv5, weights["wc6"], biases["bc6"], strides=2)

    fc1 = flatten(conv6)

    fc1 = dense(fc1, weights["wd1"], biases["bd1"])
    fc2 = dense(fc1, weights["wd2"], biases["bd2"])
    fc3 = dense(fc2, weights["wd3"], biases["bd3"])

    out = tf.add(tf.matmul(fc3, weights["out"]), biases["bias_out"])

    return out
Пример #6
0
    def __init__(self, sess, input_shape, num_actions, reuse=False, is_training=True, name='train'):
        super().__init__(sess, reuse)
        self.initial_state = []
        with tf.name_scope(name + "policy_input"):
            self.X_input = tf.placeholder(tf.uint8, input_shape)
        with tf.variable_scope("policy", reuse=reuse):
            conv1 = conv2d('conv1', tf.cast(self.X_input, tf.float32) / 255., num_filters=32, kernel_size=(8, 8),
                           padding='VALID', stride=(4, 4),
                           initializer=orthogonal_initializer(np.sqrt(2)), activation=tf.nn.relu,
                           is_training=is_training)

            conv2 = conv2d('conv2', conv1, num_filters=64, kernel_size=(4, 4), padding='VALID', stride=(2, 2),
                           initializer=orthogonal_initializer(np.sqrt(2)), activation=tf.nn.relu,
                           is_training=is_training)

            conv3 = conv2d('conv3', conv2, num_filters=64, kernel_size=(3, 3), padding='VALID', stride=(1, 1),
                           initializer=orthogonal_initializer(np.sqrt(2)), activation=tf.nn.relu,
                           is_training=is_training)

            conv3_flattened = flatten(conv3)

            fc4 = dense('fc4', conv3_flattened, output_dim=512, initializer=orthogonal_initializer(np.sqrt(2)),
                        activation=tf.nn.relu, is_training=is_training)

            self.policy_logits = dense('policy_logits', fc4, output_dim=num_actions,
                                       initializer=orthogonal_initializer(np.sqrt(1.0)), is_training=is_training)

            self.value_function = dense('value_function', fc4, output_dim=1,
                                        initializer=orthogonal_initializer(np.sqrt(1.0)), is_training=is_training)

            with tf.name_scope('value'):
                self.value_s = self.value_function[:, 0]

            with tf.name_scope('action'):
                self.action_s = noise_and_argmax(self.policy_logits)
Пример #7
0
    def decoder(self, z, training=True, reuse=None, name=None):

        # [None, z_dim]  -->  [None, 1024]
        h = dense(z, 1024, reuse=reuse, name='d_dense_1')
        h = batch_norm(h, training=training, reuse=reuse, name='d_bn_1')
        h = tf.nn.relu(h)
        
        # [None, 1024]  -->  [None, 7*7*128]
        h = dense(h, self.min_res*self.min_res*self.min_chans, reuse=reuse, name='d_dense_2')
        h = batch_norm(h, training=training, reuse=reuse, name='d_bn_2')
        h = tf.nn.relu(h)

        # [None, 7*7*128]  -->  [None, 7, 7, 128]
        h = tf.reshape(h, [-1, self.min_res, self.min_res, self.min_chans])

        # [None, 7, 7, 128]  -->  [None, 14, 14, 64]
        h = conv2d_transpose(h, 64, kernel_size=4, strides=2, reuse=reuse, name='d_tconv_1')
        h = batch_norm(h, training=training, reuse=reuse, name='d_bn_3')
        h = tf.nn.relu(h)
                        
        # [None, 14, 14, 64]  -->  [None, 28, 28, 1]
        h = conv2d_transpose(h, 1, kernel_size=4, strides=2, activation=tf.nn.sigmoid, reuse=reuse, name='d_tconv_2')
                        
        # Assign name to final output
        return tf.identity(h, name=name)
Пример #8
0
def build_model(t_params, n_dim_img, n_dim_txt, n_dim_enc, n_dim_dec, n_dim_vocab, optimizer):
    '''
    Build the whole model for training
    '''
    x = tensor.tensor3('x', config.floatX)
    mask_x = tensor.matrix('mask_x', 'int8')
    # Encoder(s) and initialization of hidden layer
    enc = gru(mask_x, dropout(x), t_params, n_dim_img, n_dim_enc, 'enc')[-1]
    init_h = tensor.tanh(dense(enc, t_params, n_dim_enc, n_dim_dec, 'init_h'))

    y = tensor.matrix('y', 'int32')
    mask_y = tensor.matrix('mask_y', 'int8')
    n_steps, n_samples = y.shape
    # Word embedding
    emb = embedding(y, t_params, n_dim_vocab, n_dim_txt, 'emb').reshape((n_steps, n_samples, n_dim_txt))[: -1]
    emb = tensor.concatenate([tensor.zeros((1, n_samples, n_dim_txt), config.floatX), emb])
    # Decoder(s)
    dec = gru(mask_y, emb, t_params, n_dim_txt, n_dim_dec, 'dec', init_h=init_h)
    # Full-connected layer
    fc = dense(dropout(dec), t_params, n_dim_dec, n_dim_vocab, 'fc')
    # Classifier
    prob = tensor.nnet.softmax(fc.reshape((n_steps * n_samples, n_dim_vocab)))
    # Cost function
    cost = prob[tensor.arange(n_steps * n_samples), y.flatten()].reshape((n_steps, n_samples))
    cost = ((-tensor.log(cost + 1e-6) * mask_y).sum(0) / mask_y.astype(config.floatX).sum(0)).mean()
    grads = tensor.grad(cost, list(t_params.values()))
    f_cost, f_update = optimizer(tensor.scalar('lr'), t_params, grads, [x, mask_x, y, mask_y], cost)

    return f_cost, f_update
Пример #9
0
    def __call__(self, x, reuse=True):
        with tf.variable_scope(self.name) as vs:
            if reuse:
                vs.reuse_variables()

            _x = dense(x, 500, activation_='lrelu')
            _x = dense(_x, 500, activation_='lrelu')
            _x = dense(_x, 1, activation_=None)
            return _x
    def __call__(self, x, reuse=False):
        with tf.variable_scope(self.name) as vs:
            if reuse:
                vs.reuse_variables()

            with tf.variable_scope('Encoder'):
                _x = conv_block(x,
                                filters=16,
                                sampling='same',
                                **self.conv_block_params)
                _x = conv_block(_x,
                                filters=16,
                                sampling='down',
                                **self.conv_block_params)

                _x = conv_block(_x,
                                filters=32,
                                sampling='same',
                                **self.conv_block_params)
                _x = conv_block(_x,
                                filters=32,
                                sampling='down',
                                **self.conv_block_params)

                current_shape = _x.get_shape().as_list()[1:]
                _x = flatten(_x)
                _x = dense(_x, 512, activation_='lrelu')
                encoded = dense(_x, self.latent_dim)

            with tf.variable_scope('Decoder'):
                _x = dense(encoded, 512, activation_='lrelu')
                _x = dense(_x,
                           current_shape[0] * current_shape[1] *
                           current_shape[2],
                           activation_='lrelu')
                _x = reshape(_x, current_shape)

                _x = conv_block(_x,
                                filters=32,
                                sampling=self.upsampling,
                                **self.conv_block_params)
                _x = conv_block(_x,
                                filters=16,
                                sampling='same',
                                **self.conv_block_params)

                _x = conv_block(_x,
                                filters=16,
                                sampling=self.upsampling,
                                **self.conv_block_params)
                _x = conv_block(_x,
                                filters=self.channel,
                                sampling='same',
                                **self.last_conv_block_params)

            return encoded, _x
Пример #11
0
    def add_prediction_op(self):
        """Applies a GRU RNN over the input data, then an affine layer projection. Steps to complete
        in this function:

        - Roll over inputs_placeholder with GRUCell, producing a Tensor of shape [batch_s, max_timestep,
          hidden_size].
        - Apply a W * f + b transformation over the data, where f is each hidden layer feature. This
          should produce a Tensor of shape [batch_s, max_timesteps, num_classes]. Set this result to
          "logits".

        Remember:
            * Use the xavier initialization for matrices (W, but not b).
            * W should be shape [hidden_size, num_classes].
        """
        # Non-recurrent hidden layers
        inputs = self.inputs_placeholder
        for i in range(self.config.num_hidden_layers):
            with tf.variable_scope('hidden%d' % (i + 1)) as vs:
                inputs = layers.dense(inputs=inputs,
                                      output_size=self.config.hidden_size,
                                      activation=tf.nn.relu)

        # Construct forward and backward cells of bidirectional RNN
        fwdcell = layers.FactorizedLSTMCell(
            self.config.hidden_size,
            num_proj=self.config.svd_rank,
            activation=self.config.activation_func,
        )
        bckcell = layers.FactorizedLSTMCell(
            self.config.hidden_size,
            num_proj=self.config.svd_rank,
            activation=self.config.activation_func,
        )
        # TODO: look into non-zero initial hidden states?
        rnn_outputs, rnn_last_states = tf.nn.bidirectional_dynamic_rnn(
            fwdcell,
            bckcell,
            inputs=inputs,
            dtype=tf.float32,
            sequence_length=self.seq_lens_placeholder)

        # Reuse projection matrices
        with tf.variable_scope('final'):
            with tf.variable_scope('fw'):
                fw_logits = layers.dense(
                    inputs=rnn_outputs[0],
                    output_size=self.config.num_classes,
                    bias=True,
                )
            with tf.variable_scope('bw'):
                bw_logits = layers.dense(
                    inputs=rnn_outputs[1],
                    output_size=self.config.num_classes,
                    bias=False,
                )
            self.logits = fw_logits + bw_logits
Пример #12
0
    def __call__(self, x):
        x = tf.cast(x, dtype=tf.float32)
        self.conv1 = ll.conv2dx(x, self.model_weights[0], 1)  # 1st Layer
        self.conv1 = ll.conv2dx(self.conv1, self.model_weights[1], 1)
        self.conv1 = ll.conv2dx(self.conv1, self.model_weights[2], 1)
        self.pool1 = ll.maxpool(self.conv1, 2, 2)

        self.conv2 = ll.conv2dx(self.pool1, self.model_weights[3],
                                1)  # 2nd Layer
        self.conv2 = ll.conv2dx(self.conv2, self.model_weights[4], 1)
        self.conv2 = ll.conv2dx(self.conv2, self.model_weights[5], 1)
        self.pool2 = ll.maxpool(self.conv2, 2, 2)

        self.conv3 = ll.conv2dx(self.pool2, self.model_weights[6],
                                1)  # 3rd Layer
        self.conv3 = ll.conv2dx(self.conv3, self.model_weights[7], 1)
        self.conv3 = ll.conv2dx(self.conv3, self.model_weights[8], 1)
        self.pool3 = ll.maxpool(self.conv3, 2, 2)

        self.conv4 = ll.conv2dx(self.pool3, self.model_weights[9],
                                1)  # 4th Layer
        self.conv4 = ll.conv2dx(self.conv4, self.model_weights[10], 1)
        self.conv4 = ll.conv2dx(self.conv4, self.model_weights[11], 1)
        self.pool4 = ll.maxpool(self.conv4, 2, 2)

        self.conv5 = ll.conv2dx(self.pool4, self.model_weights[12],
                                1)  # 5th Layer
        self.conv5 = ll.conv2dx(self.conv5, self.model_weights[13], 1)
        self.conv5 = ll.conv2dx(self.conv5, self.model_weights[14], 1)
        self.pool5 = ll.maxpool(self.conv5, 2, 2)

        self.conv6 = ll.conv2dx(self.pool5, self.model_weights[15],
                                1)  # 6th Layer
        self.conv6 = ll.conv2dx(self.conv6, self.model_weights[16], 1)
        self.conv6 = ll.conv2dx(self.conv6, self.model_weights[17], 1)
        self.pool6 = ll.maxpool(self.conv6, 2, 2)

        self.flatten_layer = tf.reshape(self.pool6,
                                        shape=(tf.shape(self.pool6)[0],
                                               -1))  # flatten

        self.dense1 = ll.dense(self.flatten_layer, self.model_weights[18],
                               self.dropout_rate)
        self.dense2 = ll.dense(self.dense1, self.model_weights[19],
                               self.dropout_rate)
        self.dense3 = ll.dense(self.dense2, self.model_weights[20],
                               self.dropout_rate)
        self.dense4 = ll.dense(self.dense3, self.model_weights[21],
                               self.dropout_rate)
        self.dense5 = ll.dense(self.dense4, self.model_weights[22],
                               self.dropout_rate)
        self.dense6 = tf.matmul(self.dense5, self.model_weights[23])

        return tf.nn.softmax(self.dense6)
Пример #13
0
    def __init__(self):
        self._epochs = 20
        self._learning_rate = 0.01
        self._batch_size = 20
        self._data = self.getData()
        self._model = layers.Model(lr=self._learning_rate,
                                   blr=self._learning_rate)

        self._model.add_layer(
            layers.conv(ems=1,
                        nodes=20,
                        kernel_size=3,
                        padding=1,
                        activation_function_="relu"))
        self._model.add_layer(
            layers.conv(ems=20,
                        nodes=20,
                        kernel_size=3,
                        padding=1,
                        activation_function_="relu"))
        self._model.add_layer(layers.max_pool(kernel_size=2))

        self._model.add_layer(
            layers.conv(ems=20,
                        nodes=12,
                        kernel_size=3,
                        padding=1,
                        activation_function_="relu"))
        self._model.add_layer(
            layers.conv(ems=12,
                        nodes=12,
                        kernel_size=3,
                        padding=1,
                        activation_function_="relu"))
        self._model.add_layer(layers.max_pool(kernel_size=2))

        self._model.add_layer(
            layers.conv(ems=12,
                        nodes=6,
                        kernel_size=3,
                        padding=1,
                        activation_function_="relu"))
        self._model.add_layer(
            layers.conv(ems=6,
                        nodes=6,
                        kernel_size=3,
                        padding=1,
                        activation_function_="relu"))
        self._model.add_layer(layers.max_pool(kernel_size=2))

        self._model.add_layer(layers.dense(eis=96, nodes=48, act_func="tanh"))
        self._model.add_layer(layers.dense(eis=48, nodes=3, act_func="none"))
Пример #14
0
    def add_prediction_op(self):
        """Applies a GRU RNN over the input data, then an affine layer projection. Steps to complete
        in this function:

        - Roll over inputs_placeholder with GRUCell, producing a Tensor of shape [batch_s, max_timestep,
          hidden_size].
        - Apply a W * f + b transformation over the data, where f is each hidden layer feature. This
          should produce a Tensor of shape [batch_s, max_timesteps, num_classes]. Set this result to
          "logits".

        Remember:
            * Use the xavier initialization for matrices (W, but not b).
            * W should be shape [hidden_size, num_classes].
        """
        # Non-recurrent hidden layers
        inputs = self.inputs_placeholder
        for i in range(self.config.num_hidden_layers):
            with tf.variable_scope('hidden%d' % (i + 1)) as vs:
                inputs = layers.dense(inputs=inputs,
                                      output_size=self.config.hidden_size,
                                      activation=tf.nn.relu)

        # Construct forward and backward cells of bidirectional RNN
        construct_cell = getattr(tf.contrib.rnn, self.config.cell_type)
        fwdcell = construct_cell(
            self.config.hidden_size,
            activation=self.config.activation_func,
        )
        bckcell = construct_cell(
            self.config.hidden_size,
            activation=self.config.activation_func,
        )
        # TODO: look into non-zero initial hidden states?
        rnn_outputs, rnn_last_states = tf.nn.bidirectional_dynamic_rnn(
            fwdcell,
            bckcell,
            inputs=inputs,
            dtype=tf.float32,
            sequence_length=self.seq_lens_placeholder)

        # Concatenate the forward and backward hidden states together for the scores
        # scores.shape = [batch_s, max_timestep, 2*num_hidden]
        scores = tf.concat([rnn_outputs[0], rnn_outputs[1]],
                           axis=2,
                           name='scores')

        # Push the scores through an affine layer
        # logits.shape = [batch_s, max_timestep, num_classes]
        with tf.variable_scope('final') as vs:
            self.logits = layers.dense(inputs=scores,
                                       output_size=self.config.num_classes)
 def _build(self):
     
     layer = self.input_state
     init_b = tf.constant_initializer(0.01)
     
     for i, num_unit in enumerate(self.hidden_layers):
         if i != 1:
             layer = dense(layer, num_unit, init_b=init_b, name='hidden_layer_{}'.format(i))
         else:
             layer = tf.concat([layer, self.input_action], axis=1, name='concat_action')
             layer = dense(layer, num_unit, init_b=init_b, name='hidden_layer_{}'.format(i))
     
     output = dense(layer, 1, activation=None, init_b=init_b, name='output')
     return tf.reshape(output, shape=(-1,))
Пример #16
0
    def __call__(self, x, is_training=True, reuse=False, *args, **kwargs):
        with tf.variable_scope(self.__class__.__name__) as vs:
            if reuse:
                vs.reuse_variables()
            conv_params = {'is_training': is_training, 'activation_': 'relu'}

            x = conv_block(x, 16, **conv_params)
            x = conv_block(x, 16, **conv_params, sampling='pool')
            x = conv_block(x, 32, **conv_params)
            x = conv_block(x, 32, **conv_params, sampling='pool')

            x = flatten(x)
            x = dense(x, 512, activation_='relu')
            x = dense(x, self.nb_classes)
            return x
Пример #17
0
def encoder(current, reuse_variables=False):
    """
    Creates encoder network.
    
    @param current: tensor of size 96x96x3
    
    @return: tensor of size config.num_z_channels
    """
    if reuse_variables:
        tf.get_variable_scope().reuse_variables()

    with tf.variable_scope("encoder") as scope:

        # -- transposed convolutional layer 1-4
        for index, num_filters in enumerate([64, 128, 256, 512]):
            name = 'E_conv' + str(index)
            current = conv2d(current,
                             num_filters,
                             name=name,
                             reuse=reuse_variables)
            current = tf.nn.relu(current)

        # reshape
        current = tf.reshape(current, [size_batch, -1])

        # -- fc layer
        name = 'E_fc'
        current = dense(current,
                        num_z_channels,
                        name=name,
                        reuse=reuse_variables)
        return tf.nn.tanh(current)
Пример #18
0
def build_dec(t_params, n_dim_txt, n_dim_enc, n_dim_dec, n_dim_vocab, beam_size):
    '''
    Build the decoder for texts
    '''
    def _step(_prob):
        _y = _prob.argmax(-1)
        _log_prob = tensor.log(_prob[tensor.arange(_y.shape[0]), _y] + 1e-6)
        tensor.set_subtensor(_prob[tensor.arange(_y.shape[0]), _y], 0)

        return _y, _log_prob

    y = tensor.vector('y', 'int32')
    init_h = tensor.matrix('init_h', config.floatX)
    n_samples = y.shape[0]
    # Word embedding
    emb = tensor.switch(y[:, None] < 0, tensor.zeros((n_samples, n_dim_txt), config.floatX), embedding(y, t_params, n_dim_vocab, n_dim_txt, 'emb'))
    # Decoder(s) - Initialization of hidden layer in the next step
    next_h = gru(tensor.ones_like(y, 'int8'), emb, t_params, n_dim_txt, n_dim_dec, 'dec', True, init_h)
    # Full-connected layer
    fc = dense(0.5 * next_h, t_params, n_dim_dec, n_dim_vocab, 'fc')
    # Classifier
    prob = tensor.nnet.softmax(fc)
    # Hypo words
    [next_y, next_log_prob], _ = theano.scan(_step, non_sequences=prob, n_steps=beam_size)

    return theano.function([y, init_h], [next_y, next_log_prob, next_h], name='f_dec')
Пример #19
0
def build_dec(t_params, n_dim_txt, n_dim_enc, n_dim_dec, n_dim_vocab,
              beam_size):
    '''
    Build the decoder for texts
    '''
    def _step(_prob):
        _y = _prob.argmax(-1)
        _log_prob = tensor.log(_prob[tensor.arange(_y.shape[0]), _y] + 1e-6)
        tensor.set_subtensor(_prob[tensor.arange(_y.shape[0]), _y], 0)

        return _y, _log_prob

    y = tensor.vector('y', 'int32')
    init_h = tensor.matrix('init_h', config.floatX)
    n_samples = y.shape[0]
    # Word embedding
    emb = tensor.switch(y[:, None] < 0,
                        tensor.zeros((n_samples, n_dim_txt), config.floatX),
                        embedding(y, t_params, n_dim_vocab, n_dim_txt, 'emb'))
    # Decoder(s) - Initialization of hidden layer in the next step
    next_h = gru(tensor.ones_like(y, 'int8'), emb, t_params, n_dim_txt,
                 n_dim_dec, 'dec', True, init_h)
    # Full-connected layer
    fc = dense(0.5 * next_h, t_params, n_dim_dec, n_dim_vocab, 'fc')
    # Classifier
    prob = tensor.nnet.softmax(fc)
    # Hypo words
    [next_y, next_log_prob], _ = theano.scan(_step,
                                             non_sequences=prob,
                                             n_steps=beam_size)

    return theano.function([y, init_h], [next_y, next_log_prob, next_h],
                           name='f_dec')
Пример #20
0
    def __call__(self, x, reuse=True, is_feature=False, is_training=True):
        nb_downsampling = int(np.log2(self.input_shape[0] // 4))
        with tf.variable_scope(self.name, reuse=reuse) as vs:
            if reuse:
                vs.reuse_variables()
            _x = x
            first_filters = 32
            for i in range(nb_downsampling):
                filters = first_filters * (2**i)
                _x = conv_block(_x,
                                is_training=is_training,
                                filters=filters,
                                activation_='lrelu',
                                sampling='down',
                                normalization=self.normalization)
            _x = flatten(_x)

            if self.normalization == 'spectral':
                _x = sn_dense(_x,
                              is_training=is_training,
                              units=1,
                              activation_=None)
            else:
                _x = dense(_x, units=1, activation_=None)
            return _x
Пример #21
0
def first_block(x,
                target_size,
                noise_dim,
                upsampling='deconv',
                normalization='batch',
                is_training=True):
    if upsampling == 'deconv':
        _x = reshape(x, (1, 1, noise_dim))
        _x = conv2d_transpose(_x,
                              1024,
                              target_size,
                              strides=(1, 1),
                              padding='valid')
    elif upsampling == 'dense':
        _x = dense(x, target_size[0] * target_size[1] * 1024)
        _x = reshape(_x, (target_size[1], target_size[0], 1024))
    else:
        raise ValueError

    if normalization == 'batch':
        _x = batch_norm(_x, is_training=is_training)
    elif normalization == 'layer':
        _x = layer_norm(_x, is_training=is_training)
    elif normalization is None:
        pass
    else:
        raise ValueError
    _x = activation(_x, 'relu')
    return _x
Пример #22
0
 def discriminator(self, img, const_init=False, trainable=True, reuse=False):
     # (n, 1, 28, 28)
     h0 = layers.conv2d(
         img,
         64,
         5,
         name="d_conv1",
         const_init=const_init,
         trainable=trainable,
         reuse=reuse,
     )
     h0 = flow.nn.leaky_relu(h0, 0.3)
     h0 = flow.nn.dropout(h0, rate=0.3)
     # (n, 64, 14, 14)
     h1 = layers.conv2d(
         h0,
         128,
         5,
         name="d_conv2",
         const_init=const_init,
         trainable=trainable,
         reuse=reuse,
     )
     h1 = flow.nn.leaky_relu(h1, 0.3)
     h1 = flow.nn.dropout(h1, rate=0.3)
     # (n, 128 * 7 * 7)
     out = flow.reshape(h1, (self.batch_size, -1))
     # (n, 1)
     out = layers.dense(
         out, 1, name="d_fc", const_init=const_init, trainable=trainable, reuse=reuse
     )
     return out
Пример #23
0
    def get_opt_dict(self):
        x = tf.placeholder(tf.float32, shape=[None, self.hw, self.hw])
        y = tf.placeholder(tf.float32, shape=[1, ])
        is_training = tf.placeholder(tf.bool, shape=[])

        embedding = self.encoder(tf.expand_dims(x,-1))
        attn = None
        if self.use_attn:
            embedding, attn = self.attention(embedding)
        
        else:
            embedding = tf.reduce_mean(
                tf.expand_dims(embedding, axis=0), axis=1)

        logits = dense(embedding, 1)
        logits = tf.reshape(logits, [-1])

        loss = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits))

        var_total = tf.trainable_variables()
        decay_var = [v for v in var_total if 'kernel' in v.name]
        optimizer = tf.contrib.opt.AdamWOptimizer(
            weight_decay=self.wd, learning_rate=self.lr)
        train_op = optimizer.minimize(loss=loss, var_list=var_total, 
                                      decay_var_list=decay_var)

        return EasyDict(
            x=x, y=y, is_training=is_training, attn=attn,
            logits=logits, loss=loss, train_op=train_op)    
Пример #24
0
def discriminator_img(current, valence, arousal, reuse_variables=False):
    """
    Creates discriminator network on generated image + desired emotion.

    @param current: tensor of size 96x96x3
    @param valence: tensor of size 1
    @param arousal: tensor of size 1

    @return:  sigmoid(output), output
              (output tensor is of size 1)
    """
    if reuse_variables:
        tf.get_variable_scope().reuse_variables()

    with tf.variable_scope("discriminator_img") as scope:

        # -- convolutional blocks (= convolution+batch_norm+relu) 1-4
        for index, num_filters in enumerate([16, 32, 64, 128]):

            # convolution
            name = 'D_img_conv' + str(index + 1)
            current = conv2d(current,
                             num_filters,
                             name=name,
                             reuse=reuse_variables)

            # batch normalization
            name = 'D_img_bn' + str(index + 1)
            current = batch_norm(current, name, reuse=reuse_variables)
            # relu activation
            current = tf.nn.relu(current)

            if index == 0:
                current = concat_label(current, valence, 16)
                current = concat_label(current, arousal, 16)

        # reshape
        current = tf.reshape(current, [size_batch, -1])

        # -- fc 1
        name = 'D_img_fc1'
        current = lrelu(dense(current, 1024, name=name, reuse=reuse_variables))

        # -- fc 2
        name = 'D_img_fc2'
        current = dense(current, 1, name=name, reuse=reuse_variables)
        return tf.nn.sigmoid(current), current
    def _build(self):

        layer = self.x
        init_b = tf.constant_initializer(0.01)

        for i, num_unit in enumerate(self.hidden_layers):
            layer = dense(layer,
                          num_unit,
                          init_b=init_b,
                          name='hidden_layer_{}'.format(i))

        output = dense(layer,
                       self.output_dim,
                       activation=self.activation,
                       init_b=init_b,
                       name='output')
        return output
Пример #26
0
 def encoder(self, x):
     out = conv2d(x, 20, 5, activation=tf.nn.relu)
     out = max_pool(out, 2, 2)
     out = conv2d(out, 50, 5, activation=tf.nn.relu)
     out = max_pool(out, 2, 2)
     out = tf.layers.flatten(out)
     out = dense(out, 500, activation=tf.nn.relu)
     return out
Пример #27
0
    def __init__(self):

        conv_stride = [4, 4]
        self.conv1 = layers.conv2d(filters=96, kernel=[11, 11], padding='SAME', name='conv1', activation='relu', normalization='local_response_normalization', stride=conv_stride)
        self.conv2 = layers.conv2d(filters=256, kernel=[5, 5], padding='SAME', name='conv2', activation='relu', normalization="local_response_normalization", stride=[1, 1])
        self.conv1 = layers.conv2d(filters=96, kernel=[11, 11], padding='SAME', name='conv1', activation='relu', normalization='local_response_normalization', stride=conv_stride)
        self.conv2 = layers.conv2d(filters=256, kernel=[5, 5], padding='SAME', name='conv2', activation='relu', normalization="local_response_normalization", stride=[1, 1])
        self.conv3 = layers.conv2d(filters=384, kernel=[3, 3], padding='SAME', name='conv3', activation='relu', stride=[1, 1])
        self.conv4 = layers.conv2d(filters=384, kernel=[3, 3], padding='SAME', name='conv4', activation='relu', stride=[1, 1])
        self.conv5 = layers.conv2d(filters=256, kernel=[3, 3], padding='SAME', name='conv5', activation='relu', stride=[1, 1])

        self.fc6 = layers.dense(4096, activation='relu', dropout=True, name='fc6')
        self.fc7 = layers.dense(4096, activation='relu', dropout=True, name='fc7')
        self.fc8 = layers.dense(1000, activation='relu', name='fc8')

        self.max_pool1 = layers.max_pool2d(ksize=[3, 3], stride=[2, 2])
        self.max_pool2 = layers.max_pool2d(ksize=[3, 3], stride=[2, 2])
        self.max_pool5 = layers.max_pool2d(ksize=[3, 3], stride=[2, 2])
Пример #28
0
Файл: LeNet.py Проект: iiharu/NN
    def build(self, input_shape=(28, 28, 1), classes=10):
        inputs = keras.Input(shape=input_shape)

        outputs = flatten()(inputs)
        outputs = dense(300)(outputs)
        outputs = sigmoid()(outputs)

        outputs = dense(100)(outputs)
        outputs = sigmoid()(outputs)

        outputs = dense(10)(outputs)
        outputs = softmax()(outputs)

        model = keras.Model(inputs, outputs)

        model.summary()

        return model
Пример #29
0
    def classifier_aux(self, inputs, classes):
        filters = 128
        outputs = average_pooling2d(pool_size=(5, 5),
                                    strides=3,
                                    padding='valid')(inputs)
        outputs = conv2d(filters=filters,
                         kernel_size=(1, 1),
                         strides=1,
                         padding='same')(outputs)
        outputs = flatten()(outputs)
        outputs = relu()(outputs)
        outputs = dense(1024)(outputs)
        outputs = relu()(outputs)
        outputs = dropout(0.7)(outputs)
        outputs = dense(classes)(outputs)
        outputs = softmax()(outputs)

        return outputs
Пример #30
0
    def __call__(self, x, reuse=True, is_feature=False):
        with tf.variable_scope(self.name) as vs:
            if reuse:
                vs.reuse_variables()

            x = conv_block(x,
                           32,
                           kernel_size=(4, 4),
                           sampling='down',
                           **self.conv_kwargs)

            for i in range(2):
                x = discriminator_block(x, 32, **self.conv_kwargs)
            x = conv_block(x,
                           64,
                           kernel_size=(4, 4),
                           sampling='down',
                           **self.conv_kwargs)

            for i in range(4):
                x = discriminator_block(x, 64, **self.conv_kwargs)
            x = conv_block(x,
                           128,
                           kernel_size=(4, 4),
                           sampling='down',
                           **self.conv_kwargs)

            for i in range(4):
                x = discriminator_block(x, 128, **self.conv_kwargs)
            x = conv_block(x,
                           256,
                           kernel_size=(4, 4),
                           sampling='down',
                           **self.conv_kwargs)

            for i in range(4):
                x = discriminator_block(x, 256, **self.conv_kwargs)
            x = conv_block(x,
                           512,
                           kernel_size=(4, 4),
                           sampling='down',
                           **self.conv_kwargs)

            for i in range(4):
                x = discriminator_block(x, 512, **self.conv_kwargs)
            x = conv_block(x,
                           1024,
                           kernel_size=(4, 4),
                           sampling='down',
                           **self.conv_kwargs)

            if is_feature:
                return x

            x = global_average_pool2d(x)
            x = dense(x, units=1, activation_=None)
            return x
Пример #31
0
    def __call__(self, x,
                 is_training=True,
                 reuse=False,
                 *args,
                 **kwargs):
        with tf.variable_scope(self.__class__.__name__) as vs:
            if reuse:
                vs.reuse_variables()
            conv_params = {'is_training': is_training,
                           'activation_': 'relu',
                           'normalization': 'batch'}
            x = conv_block(x, 64, **conv_params, dropout_rate=0.3)
            x = conv_block(x, 64, **conv_params, dropout_rate=0.3)

            x = conv_block(x, 128, **conv_params, dropout_rate=0.4)
            x = conv_block(x, 128, **conv_params, dropout_rate=0.4)

            x = conv_block(x, 256, **conv_params, dropout_rate=0.4)
            x = conv_block(x, 256, **conv_params, dropout_rate=0.4)
            x = conv_block(x, 256, **conv_params)
            l1 = x
            x = max_pool2d(x)

            x = conv_block(x, 512, **conv_params, dropout_rate=0.4)
            x = conv_block(x, 512, **conv_params, dropout_rate=0.4)
            x = conv_block(x, 512, **conv_params)
            l2 = x
            x = max_pool2d(x)

            x = conv_block(x, 512, **conv_params, dropout_rate=0.4)
            x = conv_block(x, 512, **conv_params, dropout_rate=0.4)
            x = conv_block(x, 512, **conv_params)
            l3 = x
            x = max_pool2d(x)

            x = conv_block(x, 512, **conv_params, sampling='pool')
            x = conv_block(x, 512, **conv_params, sampling='pool')

            x = flatten(x)
            g = dense(x, 512, activation_='relu')

            x, attentions = attention_module([l1, l2, l3], g)
            x = dense(x, self.nb_classes)
            return x, attentions
Пример #32
0
    def classifier_main(self, inputs, classes):
        outputs = average_pooling2d(pool_size=(7, 7),
                                    strides=1,
                                    padding='valid')(inputs)
        outputs = flatten()(outputs)
        outputs = dropout(0.4)(outputs)
        outputs = dense(classes)(outputs)
        outputs = softmax()(outputs)

        return outputs
Пример #33
0
def build_enc(t_params, n_dim_img, n_dim_enc, n_dim_dec):
    '''
    Build the encoder for images
    '''
    x = tensor.tensor3('x', config.floatX)
    mask_x = tensor.matrix('mask_x', 'int8')
    # Encoder(s) and initialization of hidden layer
    enc = gru(mask_x, 0.5 * x, t_params, n_dim_img, n_dim_enc, 'enc')[-1]
    init_h = tensor.tanh(dense(enc, t_params, n_dim_enc, n_dim_dec, 'init_h'))

    return theano.function([x, mask_x], [init_h], name='f_enc')
Пример #34
0
def build_dec(t_params, n_dim_txt, n_dim_enc, n_dim_dec, n_dim_vocab):
    '''
    Build the decoder for texts
    '''
    y = tensor.vector('y', 'int32')
    prev_h = tensor.matrix('init_h', config.floatX)
    n_samples = y.shape[0]
    # Word embedding
    emb = tensor.switch(y[:, None] < 0, tensor.zeros((n_samples, n_dim_txt), config.floatX), embedding(y, t_params, n_dim_vocab, n_dim_txt, 'emb'))
    # Decoder(s) - Initialization of hidden layer in the next step
    next_h = gru(tensor.ones_like(y, 'int8'), emb, t_params, n_dim_txt, n_dim_dec, 'dec', True, prev_h)
    # Full-connected layer
    fc = dense(0.5 * next_h, t_params, n_dim_dec, n_dim_vocab, 'fc')
    # Classifier
    prob = tensor.nnet.softmax(fc)

    return theano.function([y, prev_h], [prob.argmax(-1), next_h], name='f_dec')