Exemplo n.º 1
0
    def model(self):
        block0 = layers.gate_block(
            inputs=self.embed, k_size=3, filters=100, scope_name='block0')

        pool0 = layers.one_maxpool(
            inputs=block0, padding='VALID', scope_name='pool0')

        flatten0 = layers.flatten(pool0, scope_name='flatten0')

        block1 = layers.gate_block(
            inputs=self.embed, k_size=4, filters=100, scope_name='block1')

        pool1 = layers.one_maxpool(
            inputs=block1, padding='VALID', scope_name='pool1')

        flatten1 = layers.flatten(pool1, scope_name='flatten1')

        block2 = layers.gate_block(
            inputs=self.embed, k_size=5, filters=100, scope_name='block2')

        pool2 = layers.one_maxpool(
            inputs=block2, padding='VALID', scope_name='pool2')

        flatten2 = layers.flatten(pool2, scope_name='flatten2')

        concat0 = layers.concatinate(
            inputs=[flatten0, flatten1, flatten2], scope_name='concat0')

        dropout0 = layers.Dropout(
            inputs=concat0, rate=1 - self.keep_prob, scope_name='dropout0')

        self.logits = layers.fully_connected(
            inputs=dropout0, out_dim=self.n_classes, scope_name='fc0')
Exemplo n.º 2
0
    def model(self):

        conv0 = layers.conv1d(
            inputs=self.embed,
            filters=100,
            k_size=3,
            stride=1,
            padding="SAME",
            scope_name="conv0",
        )
        relu0 = layers.relu(inputs=conv0, scope_name="relu0")
        pool0 = layers.one_maxpool(inputs=relu0,
                                   padding="VALID",
                                   scope_name="pool0")

        flatten0 = layers.flatten(inputs=pool0, scope_name="flatten0")

        conv1 = layers.conv1d(
            inputs=self.embed,
            filters=100,
            k_size=4,
            stride=1,
            padding="SAME",
            scope_name="conv1",
        )
        relu1 = layers.relu(inputs=conv1, scope_name="relu0")
        pool1 = layers.one_maxpool(inputs=relu1,
                                   padding="VALID",
                                   scope_name="pool1")

        flatten1 = layers.flatten(inputs=pool1, scope_name="flatten1")

        conv2 = layers.conv1d(
            inputs=self.embed,
            filters=100,
            k_size=5,
            stride=1,
            padding="SAME",
            scope_name="conv2",
        )
        relu2 = layers.relu(inputs=conv2, scope_name="relu0")
        pool2 = layers.one_maxpool(inputs=relu2,
                                   padding="VALID",
                                   scope_name="pool2")

        flatten2 = layers.flatten(inputs=pool2, scope_name="flatten2")

        concat0 = layers.concatinate([flatten0, flatten1, flatten2],
                                     scope_name="concat0")

        dropout0 = layers.Dropout(inputs=concat0,
                                  rate=1 - self.keep_prob,
                                  scope_name="dropout0")

        self.logits = layers.fully_connected(inputs=dropout0,
                                             out_dim=self.n_classes,
                                             scope_name="fc0")
Exemplo n.º 3
0
    def _build_encoder(self, vd):
        with tf.variable_scope(self.name):
            if self.arch == 'FC':
                layer_i = layers.flatten(self.input_ph)
                for i, layer_size in enumerate(self.fc_layer_sizes):
                    layer_i = layers.fc('fc{}'.format(i+1), layer_i, layer_size, activation=self.activation)[-1]
                self.ox = layer_i
            elif self.arch == 'ATARI-TRPO':
                self.w1, self.b1, self.o1 = layers.conv2d('conv1', self.input_ph, 16, 4, self.input_channels, 2, activation=self.activation)
                self.w2, self.b2, self.o2 = layers.conv2d('conv2', self.o1, 16, 4, 16, 2, activation=self.activation)
                self.w3, self.b3, self.o3 = layers.fc('fc3', layers.flatten(self.o2), 20, activation=self.activation)
                self.ox = self.o3
            elif self.arch == 'NIPS':
                self.w1, self.b1, self.o1 = layers.conv2d('conv1', vd, self.input_ph, 16, 8, self.input_channels, 4, activation=self.activation)
                self.w2, self.b2, self.o2 = layers.conv2d('conv2', vd, self.o1, 32, 4, 16, 2, activation=self.activation)
                self.w3, self.b3, self.o3 = layers.fc('fc3', vd, layers.flatten(self.o2), 256, activation=self.activation)
                self.ox = self.o3
            elif self.arch == 'NATURE':
                self.w1, self.b1, self.o1 = layers.conv2d('conv1', self.input_ph, 32, 8, self.input_channels, 4, activation=self.activation)
                self.w2, self.b2, self.o2 = layers.conv2d('conv2', self.o1, 64, 4, 32, 2, activation=self.activation)
                self.w3, self.b3, self.o3 = layers.conv2d('conv3', self.o2, 64, 3, 64, 1, activation=self.activation)
                self.w4, self.b4, self.o4 = layers.fc('fc4', layers.flatten(self.o3), 512, activation=self.activation)
                self.ox = self.o4
            else:
                raise Exception('Invalid architecture `{}`'.format(self.arch))

            if self.use_recurrent:
                with tf.variable_scope('lstm_layer') as vs:
                    self.lstm_cell = tf.contrib.rnn.BasicLSTMCell(
                        self.hidden_state_size, state_is_tuple=True, forget_bias=1.0)
                    
                    batch_size = tf.shape(self.step_size)[0]
                    self.ox_reshaped = tf.reshape(self.ox,
                        [batch_size, -1, self.ox.get_shape().as_list()[-1]])
                    state_tuple = tf.contrib.rnn.LSTMStateTuple(
                        *tf.split(self.initial_lstm_state, 2, 1))

                    self.lstm_outputs, self.lstm_state = tf.nn.dynamic_rnn(
                        self.lstm_cell,
                        self.ox_reshaped,
                        initial_state=state_tuple,
                        sequence_length=self.step_size,
                        time_major=False)

                    self.lstm_state = tf.concat(self.lstm_state, 1)
                    self.ox = tf.reshape(self.lstm_outputs, [-1,self.hidden_state_size], name='reshaped_lstm_outputs')

                    # Get all LSTM trainable params
                    self.lstm_trainable_variables = [v for v in 
                        tf.trainable_variables() if v.name.startswith(vs.name)]

            return self.ox
Exemplo n.º 4
0
def graph_forward():
    model = graph.Graph()
    model.add(
        layers.conv(layers.xaxier_initilizer, layers.zero_initilizer, 1, 1, 32,
                    4, 3, 3))
    #model.add(layers.Relu())
    model.add(
        layers.conv(layers.xaxier_initilizer, layers.zero_initilizer, 1, 2, 16,
                    32, 3, 3))
    #model.add(layers.Relu())
    model.add(
        layers.max_pooling(layers.xaxier_initilizer, layers.zero_initilizer, 0,
                           2, 2, 2))
    model.add(layers.flatten())
    model.add(
        layers.FullConn(layers.xaxier_initilizer, layers.zero_initilizer,
                        (1024, 10)))
    #model.add(layers.Relu())
    crit = layers.softmax_with_loss()
    y = np.array([1, 2, 3])

    def foo(x):
        logits = model.forward(x)
        prob = crit.forward(logits)
        dy, loss = crit.backward(logits, y)
        dx = model.backward(x, dy)
        return loss, dx

    return foo
Exemplo n.º 5
0
    def __init__(self, sess, input_shape, num_actions, reuse=False, is_training=True, name='train'):
        super().__init__(sess, reuse)
        self.initial_state = []
        with tf.name_scope(name + "policy_input"):
            self.X_input = tf.placeholder(tf.uint8, input_shape)
        with tf.variable_scope("policy", reuse=reuse):
            conv1 = conv2d('conv1', tf.cast(self.X_input, tf.float32) / 255., num_filters=32, kernel_size=(8, 8),
                           padding='VALID', stride=(4, 4),
                           initializer=orthogonal_initializer(np.sqrt(2)), activation=tf.nn.relu,
                           is_training=is_training)

            conv2 = conv2d('conv2', conv1, num_filters=64, kernel_size=(4, 4), padding='VALID', stride=(2, 2),
                           initializer=orthogonal_initializer(np.sqrt(2)), activation=tf.nn.relu,
                           is_training=is_training)

            conv3 = conv2d('conv3', conv2, num_filters=64, kernel_size=(3, 3), padding='VALID', stride=(1, 1),
                           initializer=orthogonal_initializer(np.sqrt(2)), activation=tf.nn.relu,
                           is_training=is_training)

            conv3_flattened = flatten(conv3)

            fc4 = dense('fc4', conv3_flattened, output_dim=512, initializer=orthogonal_initializer(np.sqrt(2)),
                        activation=tf.nn.relu, is_training=is_training)

            self.policy_logits = dense('policy_logits', fc4, output_dim=num_actions,
                                       initializer=orthogonal_initializer(np.sqrt(1.0)), is_training=is_training)

            self.value_function = dense('value_function', fc4, output_dim=1,
                                        initializer=orthogonal_initializer(np.sqrt(1.0)), is_training=is_training)

            with tf.name_scope('value'):
                self.value_s = self.value_function[:, 0]

            with tf.name_scope('action'):
                self.action_s = noise_and_argmax(self.policy_logits)
Exemplo n.º 6
0
    def __call__(self, x, reuse=True, is_feature=False, is_training=True):
        nb_downsampling = int(np.log2(self.input_shape[0] // 4))
        with tf.variable_scope(self.name, reuse=reuse) as vs:
            if reuse:
                vs.reuse_variables()
            _x = x
            first_filters = 32
            for i in range(nb_downsampling):
                filters = first_filters * (2**i)
                _x = conv_block(_x,
                                is_training=is_training,
                                filters=filters,
                                activation_='lrelu',
                                sampling='down',
                                normalization=self.normalization)
            _x = flatten(_x)

            if self.normalization == 'spectral':
                _x = sn_dense(_x,
                              is_training=is_training,
                              units=1,
                              activation_=None)
            else:
                _x = dense(_x, units=1, activation_=None)
            return _x
Exemplo n.º 7
0
def network(x, weights, biases):
    x = tf.reshape(x, shape=[-1, input_size_h, input_size_w, num_channels])
    x = tf.subtract(x, mean)
    x = tf.divide(x, std)
    x = tf.expand_dims(x, axis=1)
    x = tf.transpose(x, perm=[0, 4, 2, 3, 1])

    conv0 = tf.nn.conv3d(x, weights["wc0"], strides=[1, 1, 1, 1, 1], padding="SAME")
    conv0 = tf.nn.bias_add(conv0, biases["bc0"])
    conv0 = tf.nn.relu(conv0)

    conv0 = tf.transpose(conv0, perm=[0, 1, 4, 2, 3])
    conv0 = tf.reshape(conv0, shape=[-1, 12, 192, 256])
    conv0 = tf.transpose(conv0, perm=[0, 2, 3, 1])

    conv1 = conv2d(conv0, weights["wc1"], biases["bc1"], strides=2)
    conv2 = conv2d(conv1, weights["wc2"], biases["bc2"], strides=2)
    conv3 = conv2d(conv2, weights["wc3"], biases["bc3"], strides=2)
    conv4 = conv2d(conv3, weights["wc4"], biases["bc4"], strides=2)
    conv5 = conv2d(conv4, weights["wc5"], biases["bc5"], strides=2)
    conv6 = conv2d(conv5, weights["wc6"], biases["bc6"], strides=2)

    fc1 = flatten(conv6)

    fc1 = dense(fc1, weights["wd1"], biases["bd1"])
    fc2 = dense(fc1, weights["wd2"], biases["bd2"])
    fc3 = dense(fc2, weights["wd3"], biases["bd3"])

    out = tf.add(tf.matmul(fc3, weights["out"]), biases["bias_out"])

    return out
Exemplo n.º 8
0
Arquivo: LeNet.py Projeto: iiharu/NN
    def build(self, input_shape=(28, 28, 1), classes=10):
        inputs = keras.Input(shape=input_shape)

        outputs = conv2d(filters=6, kernel_size=(6, 6))(inputs)
        outputs = max_pooling2d(pool_size=(2, 2), strides=(2, 2))(outputs)
        outputs = sigmoid()(outputs)

        outputs = conv2d(filters=16, kernel_size=(6, 6))(inputs)
        outputs = max_pooling2d(pool_size=(2, 2), strides=(2, 2))(outputs)
        outputs = sigmoid()(outputs)

        outputs = flatten()(outputs)

        outputs = dense(120)(outputs)
        outputs = sigmoid()(outputs)

        outputs = dense(64)(outputs)
        outputs = sigmoid()(outputs)

        outputs = dense(classes)(outputs)
        outputs = softmax()(outputs)

        model = keras.Model(inputs, outputs)

        model.summary()

        return model
Exemplo n.º 9
0
def discriminator(inp, reuse=False):
    with tf.variable_scope('Encoder', reuse=reuse):
        # 32
        inp = gaussnoise(inp, std=0.05)
        conv1 = conv2d(inp, 96, kernel=3, strides=1, name=dname + 'conv1')
        conv1 = lrelu(conv1, 0.2)

        conv1b = conv2d(conv1, 96, kernel=3, strides=2, name=dname + 'conv1b')
        conv1b = batchnorm(conv1b, is_training=is_train, name=dname + 'bn1b')
        conv1b = lrelu(conv1b, 0.2)
        conv1b = tf.nn.dropout(conv1b, keep_prob)
        # 16
        conv2 = conv2d(conv1b, 192, kernel=3, strides=1, name=dname + 'conv2')
        conv2 = batchnorm(conv2, is_training=is_train, name=dname + 'bn2')
        conv2 = lrelu(conv2, 0.2)

        conv2b = conv2d(conv2, 192, kernel=3, strides=2, name=dname + 'conv2b')
        conv2b = batchnorm(conv2b, is_training=is_train, name=dname + 'bn2b')
        conv2b = lrelu(conv2b, 0.2)
        conv2b = tf.nn.dropout(conv2b, keep_prob)
        # 8
        conv3 = conv2d(conv2b, 256, kernel=3, strides=1, name=dname + 'conv3')
        conv3 = batchnorm(conv3, is_training=is_train, name=dname + 'bn3')
        conv3 = lrelu(conv3, 0.2)

        conv3b = conv2d(conv3, 256, kernel=1, strides=1, name=dname + 'conv3b')
        conv3b = batchnorm(conv3b, is_training=is_train, name=dname + 'bn3b')
        conv3b = lrelu(conv3b, 0.2)

        conv4 = conv2d(conv3b, 512, kernel=1, strides=1, name=dname + 'conv4')
        conv4 = batchnorm(conv4, is_training=is_train, name=dname + 'bn4')
        conv4 = lrelu(conv4, 0.2)

        flat = flatten(conv4)
        # Classifier
        clspred = linear(flat, n_classes, name=dname + 'cpred')
        # Decoder
        g2 = conv2d(conv4, nout=256, kernel=3, name=dname + 'deconv2')
        g2 = batchnorm(g2, is_training=tf.constant(True), name=dname + 'bn2g')
        g2 = lrelu(g2, 0.2)

        g3 = nnupsampling(g2, [16, 16])
        g3 = conv2d(g3, nout=128, kernel=3, name=dname + 'deconv3')
        g3 = batchnorm(g3, is_training=tf.constant(True), name=dname + 'bn3g')
        g3 = lrelu(g3, 0.2)

        g3b = conv2d(g3, nout=128, kernel=3, name=dname + 'deconv3b')
        g3b = batchnorm(g3b,
                        is_training=tf.constant(True),
                        name=dname + 'bn3bg')
        g3b = lrelu(g3b, 0.2)

        g4 = nnupsampling(g3b, [32, 32])
        g4 = conv2d(g4, nout=64, kernel=3, name=dname + 'deconv4')
        g4 = batchnorm(g4, is_training=tf.constant(True), name=dname + 'bn4g')
        g4 = lrelu(g4, 0.2)

        g4b = conv2d(g4, nout=3, kernel=3, name=dname + 'deconv4b')
        g4b = tf.nn.tanh(g4b)
        return clspred, g4b
Exemplo n.º 10
0
    def __build(self):
        self.__init_global_epoch()
        self.__init_global_step()
        self.__init_input()

        with tf.name_scope('Preprocessing'):
            red, green, blue = tf.split(self.X, num_or_size_splits=3, axis=3)
            preprocessed_input = tf.concat([
                tf.subtract(blue, ShuffleNet.MEAN[0]) * ShuffleNet.NORMALIZER,
                tf.subtract(green, ShuffleNet.MEAN[1]) * ShuffleNet.NORMALIZER,
                tf.subtract(red, ShuffleNet.MEAN[2]) * ShuffleNet.NORMALIZER,
            ], 3)
        x_padded = tf.pad(preprocessed_input, [[0, 0], [1, 1], [1, 1], [0, 0]], "CONSTANT")
        conv1 = conv2d('conv1', x=x_padded, w=None, num_filters=self.output_channels['conv1'], kernel_size=(3, 3),
                       stride=(2, 2), l2_strength=self.args.l2_strength, bias=self.args.bias,
                       batchnorm_enabled=self.args.batchnorm_enabled, is_training=self.is_training,
                       activation=tf.nn.relu, padding='VALID')
        padded = tf.pad(conv1, [[0, 0], [0, 1], [0, 1], [0, 0]], "CONSTANT")
        max_pool = max_pool_2d(padded, size=(3, 3), stride=(2, 2), name='max_pool')
        stage2 = self.__stage(max_pool, stage=2, repeat=3)
        stage3 = self.__stage(stage2, stage=3, repeat=7)
        stage4 = self.__stage(stage3, stage=4, repeat=3)
        global_pool = avg_pool_2d(stage4, size=(7, 7), stride=(1, 1), name='global_pool', padding='VALID')

        logits_unflattened = conv2d('fc', global_pool, w=None, num_filters=self.args.num_classes,
                                    kernel_size=(1, 1),
                                    l2_strength=self.args.l2_strength,
                                    bias=self.args.bias,
                                    is_training=self.is_training)
        self.logits = flatten(logits_unflattened)

        self.__init_output()
Exemplo n.º 11
0
def discriminator(inp, reuse=False):
    with tf.variable_scope('Encoder', reuse=reuse):
        # 64
        inp = gaussnoise(inp, std=0.05)
        conv1 = conv2d(inp, 128, kernel=3, strides=2, name=dname + 'conv1')
        conv1 = lrelu(conv1, 0.2)
        # 32
        conv2 = tf.nn.dropout(conv1, keep_prob)
        conv2 = conv2d(conv2, 256, kernel=3, strides=2, name=dname + 'conv2')
        conv2 = batchnorm(conv2, is_training=is_train, name=dname + 'bn2')
        conv2 = lrelu(conv2, 0.2)
        # 16
        conv3 = tf.nn.dropout(conv2, keep_prob)
        conv3 = conv2d(conv3, 512, kernel=3, strides=2, name=dname + 'conv3')
        conv3 = batchnorm(conv3, is_training=is_train, name=dname + 'bn3')
        conv3 = lrelu(conv3, 0.2)
        # 8
        conv3b = conv2d(conv3, 512, kernel=3, strides=1, name=dname + 'conv3b')
        conv3b = batchnorm(conv3b, is_training=is_train, name=dname + 'bn3b')
        conv3b = lrelu(conv3b, 0.2)

        conv4 = tf.nn.dropout(conv3b, keep_prob)
        conv4 = conv2d(conv4, 1024, kernel=3, strides=2, name=dname + 'conv4')
        conv4 = batchnorm(conv4, is_training=is_train, name=dname + 'bn4')
        conv4 = lrelu(conv4, 0.2)
        # 4

        flat = flatten(conv4)
        # Classifier
        clspred = linear(flat, n_classes, name=dname + 'cpred')
        # Decoder
        g1 = conv2d(conv4, nout=512, kernel=3, name=dname + 'deconv1')
        g1 = batchnorm(g1, is_training=tf.constant(True), name=dname + 'bn1g')
        g1 = lrelu(g1, 0.2)

        g2 = nnupsampling(g1, [8, 8])
        g2 = conv2d(g2, nout=256, kernel=3, name=dname + 'deconv2')
        g2 = batchnorm(g2, is_training=tf.constant(True), name=dname + 'bn2g')
        g2 = lrelu(g2, 0.2)

        g3 = nnupsampling(g2, [16, 16])
        g3 = conv2d(g3, nout=128, kernel=3, name=dname + 'deconv3')
        g3 = batchnorm(g3, is_training=tf.constant(True), name=dname + 'bn3g')
        g3 = lrelu(g3, 0.2)

        g4 = nnupsampling(g3, [32, 32])
        g4 = conv2d(g4, nout=64, kernel=3, name=dname + 'deconv4')
        g4 = batchnorm(g4, is_training=tf.constant(True), name=dname + 'bn4g')
        g4 = lrelu(g4, 0.2)

        g5 = nnupsampling(g4, [64, 64])
        g5 = conv2d(g5, nout=32, kernel=3, name=dname + 'deconv5')
        g5 = batchnorm(g5, is_training=tf.constant(True), name=dname + 'bn5g')
        g5 = lrelu(g5, 0.2)

        g5b = conv2d(g5, nout=3, kernel=3, name=dname + 'deconv5b')
        g5b = tf.nn.tanh(g5b)
        return clspred, g5b
    def __call__(self, x, reuse=False):
        with tf.variable_scope(self.name) as vs:
            if reuse:
                vs.reuse_variables()

            with tf.variable_scope('Encoder'):
                _x = conv_block(x,
                                filters=16,
                                sampling='same',
                                **self.conv_block_params)
                _x = conv_block(_x,
                                filters=16,
                                sampling='down',
                                **self.conv_block_params)

                _x = conv_block(_x,
                                filters=32,
                                sampling='same',
                                **self.conv_block_params)
                _x = conv_block(_x,
                                filters=32,
                                sampling='down',
                                **self.conv_block_params)

                current_shape = _x.get_shape().as_list()[1:]
                _x = flatten(_x)
                _x = dense(_x, 512, activation_='lrelu')
                encoded = dense(_x, self.latent_dim)

            with tf.variable_scope('Decoder'):
                _x = dense(encoded, 512, activation_='lrelu')
                _x = dense(_x,
                           current_shape[0] * current_shape[1] *
                           current_shape[2],
                           activation_='lrelu')
                _x = reshape(_x, current_shape)

                _x = conv_block(_x,
                                filters=32,
                                sampling=self.upsampling,
                                **self.conv_block_params)
                _x = conv_block(_x,
                                filters=16,
                                sampling='same',
                                **self.conv_block_params)

                _x = conv_block(_x,
                                filters=16,
                                sampling=self.upsampling,
                                **self.conv_block_params)
                _x = conv_block(_x,
                                filters=self.channel,
                                sampling='same',
                                **self.last_conv_block_params)

            return encoded, _x
Exemplo n.º 13
0
    def classifier_main(self, inputs, classes):
        outputs = average_pooling2d(pool_size=(7, 7),
                                    strides=1,
                                    padding='valid')(inputs)
        outputs = flatten()(outputs)
        outputs = dropout(0.4)(outputs)
        outputs = dense(classes)(outputs)
        outputs = softmax()(outputs)

        return outputs
Exemplo n.º 14
0
    def __build(self):
        self.__init_global_epoch()
        self.__init_global_step()
        self.__init_input()

        with tf.name_scope('Preprocessing'):
            red, green, blue = tf.split(self.X, num_or_size_splits=3, axis=3)
            preprocessed_input = tf.concat([
                tf.subtract(blue, ShuffleNet.MEAN[0]) * ShuffleNet.NORMALIZER,
                tf.subtract(green, ShuffleNet.MEAN[1]) * ShuffleNet.NORMALIZER,
                tf.subtract(red, ShuffleNet.MEAN[2]) * ShuffleNet.NORMALIZER,
            ], 3)
        x_padded = tf.pad(preprocessed_input, [[0, 0], [1, 1], [1, 1], [0, 0]],
                          "CONSTANT")
        conv1 = conv2d('conv1',
                       x=x_padded,
                       w=None,
                       num_filters=self.output_channels['conv1'],
                       kernel_size=(3, 3),
                       stride=(2, 2),
                       l2_strength=self.args.l2_strength,
                       bias=self.args.bias,
                       batchnorm_enabled=self.args.batchnorm_enabled,
                       is_training=self.is_training,
                       activation=tf.nn.relu,
                       padding='VALID')
        padded = tf.pad(conv1, [[0, 0], [0, 1], [0, 1], [0, 0]], "CONSTANT")
        max_pool = max_pool_2d(padded,
                               size=(3, 3),
                               stride=(2, 2),
                               name='max_pool')
        stage2 = self.__stage(max_pool, stage=2, repeat=3)
        stage3 = self.__stage(stage2, stage=3, repeat=7)
        stage4 = self.__stage(stage3, stage=4, repeat=3)
        global_pool = avg_pool_2d(stage4,
                                  size=(7, 7),
                                  stride=(1, 1),
                                  name='global_pool',
                                  padding='VALID')

        logits_unflattened = conv2d('fc',
                                    global_pool,
                                    w=None,
                                    num_filters=self.args.num_classes,
                                    kernel_size=(1, 1),
                                    l2_strength=self.args.l2_strength,
                                    bias=self.args.bias,
                                    is_training=self.is_training)
        self.logits = flatten(logits_unflattened)

        self.__init_output()
Exemplo n.º 15
0
def attention_estimator(x, g):
    with tf.variable_scope(None, 'attention_estimator'):
        _, height, width, x_dim = x.get_shape().as_list()
        g_dim = g.get_shape().as_list()[-1]

        if not x_dim == g_dim:
            x = dense(x, g_dim, use_bias=False)

        c = tf.reduce_sum(x * tf.expand_dims(tf.expand_dims(g, 1), 1), axis=-1)
        a = tf.nn.softmax(flatten(c))
        a = tf.reshape(a, (-1, height, width))
        g_out = x * tf.expand_dims(a, -1)
        g_out = tf.reduce_sum(g_out, axis=[1, 2])
        return g_out, a
Exemplo n.º 16
0
    def __call__(self, x, reuse=True, is_feature=False, is_training=True):
        nb_downsampling = int(np.log2(self.input_shape[0] // 4))
        nb_blocks = [2]
        if nb_downsampling - 2 > 0:
            nb_blocks += [4] * (nb_downsampling - 2)

        with tf.variable_scope(self.name) as vs:
            if reuse:
                vs.reuse_variables()

            first_conv_channel = 32
            _x = conv_block(x,
                            is_training=is_training,
                            filters=first_conv_channel,
                            activation_='lrelu',
                            kernel_size=(4, 4),
                            sampling='down',
                            normalization=self.normalization)

            for index, nb_block in enumerate(nb_blocks):
                for i in range(nb_block):
                    _x = discriminator_block(_x,
                                             is_training=is_training,
                                             filters=first_conv_channel *
                                             (2**index),
                                             activation_='lrelu',
                                             kernel_size=(3, 3),
                                             normalization=self.normalization,
                                             residual=True)
                _x = conv_block(_x,
                                is_training=is_training,
                                filters=first_conv_channel * (2**(index + 1)),
                                activation_='lrelu',
                                kernel_size=(4, 4) if index < 2 else (3, 3),
                                sampling='down',
                                normalization=self.normalization)

            if is_feature:
                return _x

            _x = flatten(_x)

            if self.normalization == 'spectral':
                _x = sn_dense(_x,
                              is_training=is_training,
                              units=1,
                              activation_=None)
            else:
                _x = dense(_x, units=1, activation_=None)
            return _x
Exemplo n.º 17
0
    def __call__(self, x, is_training=True, reuse=False, *args, **kwargs):
        with tf.variable_scope(self.__class__.__name__) as vs:
            if reuse:
                vs.reuse_variables()
            conv_params = {'is_training': is_training, 'activation_': 'relu'}

            x = conv_block(x, 16, **conv_params)
            x = conv_block(x, 16, **conv_params, sampling='pool')
            x = conv_block(x, 32, **conv_params)
            x = conv_block(x, 32, **conv_params, sampling='pool')

            x = flatten(x)
            x = dense(x, 512, activation_='relu')
            x = dense(x, self.nb_classes)
            return x
Exemplo n.º 18
0
    def inference(self):
        print('input_shape:', self.X.shape.as_list())
        conv1 = conv3d('conv1',
                       x=self.X,
                       w=None,
                       num_filters=self.output_channels['conv1'],
                       kernel_size=(3, 3, 3),
                       stride=(1, 2, 2),
                       l2_strength=L2_DECAY,
                       bias=0.0,
                       batchnorm_enabled=True,
                       is_training=self.is_training,
                       activation=tf.nn.relu,
                       padding='SAME')
        print('conv1_shape:', conv1.shape.as_list())
        max_pool = max_pool_3d(conv1,
                               size=(3, 3, 3),
                               stride=(2, 2, 2),
                               name='max_pool')
        print('max3d_shape:', max_pool.shape.as_list())
        stage2 = self.__stage(max_pool, stage=2, repeat=3)
        print('stag2_shape:', stage2.shape.as_list())
        stage3 = self.__stage(stage2, stage=3, repeat=7)
        print('stag3_shape:', stage3.shape.as_list())
        stage4 = self.__stage(stage3, stage=4, repeat=3)
        print('stag4_shape:', stage4.shape.as_list())
        global_pool = avg_pool_3d(stage4,
                                  size=(1, 5, 5),
                                  stride=(1, 1, 1),
                                  name='global_pool',
                                  padding='VALID')
        print('avg3d_shape:', global_pool.shape.as_list())

        drop_out = dropout(global_pool,
                           is_training=self.is_training,
                           keep_prob=0.5)
        logits_unflattened = conv3d('fc',
                                    drop_out,
                                    w=None,
                                    num_filters=NUM_CLASS,
                                    kernel_size=(1, 1, 1),
                                    l2_strength=L2_DECAY,
                                    bias=0.0,
                                    is_training=self.is_training)
        print('convn_shape:', logits_unflattened.shape.as_list())
        logits = flatten(logits_unflattened)
        print('fc_re_shape:', logits.shape.as_list())
        return logits
Exemplo n.º 19
0
    def discriminator(self,input, reuse = True):
        depth = [64,128,256,512,1]
        with tf.variable_scope("Discriminator", reuse = reuse):
            with tf.variable_scope("d_1", reuse= reuse):
                net = lrelu(layers.batch_norm(layers.dc_conv(input, depth[0],'d_w1')))
            with tf.variable_scope("d_2", reuse=reuse):
                net = lrelu(layers.batch_norm(layers.dc_conv(net, depth[1],'d_w2')))
            with tf.variable_scope("d_3",reuse=reuse):
                net = lrelu(layers.batch_norm(layers.dc_conv(net, depth[2],'d_w3')))
            with tf.variable_scope("d_4", reuse= reuse):
                net = lrelu(layers.batch_norm(layers.dc_conv(net, depth[3],'d_w4')))
            with tf.variable_scope("d_5", reuse = reuse):
                net = layers.flatten(net)
                net = layers.dc_dense(net, 1,name = "d_fc")

        return net
Exemplo n.º 20
0
    def classifier_aux(self, inputs, classes):
        filters = 128
        outputs = average_pooling2d(pool_size=(5, 5),
                                    strides=3,
                                    padding='valid')(inputs)
        outputs = conv2d(filters=filters,
                         kernel_size=(1, 1),
                         strides=1,
                         padding='same')(outputs)
        outputs = flatten()(outputs)
        outputs = relu()(outputs)
        outputs = dense(1024)(outputs)
        outputs = relu()(outputs)
        outputs = dropout(0.7)(outputs)
        outputs = dense(classes)(outputs)
        outputs = softmax()(outputs)

        return outputs
Exemplo n.º 21
0
Arquivo: LeNet.py Projeto: iiharu/NN
    def build(self, input_shape=(28, 28, 1), classes=10):
        inputs = keras.Input(shape=input_shape)

        outputs = flatten()(inputs)
        outputs = dense(300)(outputs)
        outputs = sigmoid()(outputs)

        outputs = dense(100)(outputs)
        outputs = sigmoid()(outputs)

        outputs = dense(10)(outputs)
        outputs = softmax()(outputs)

        model = keras.Model(inputs, outputs)

        model.summary()

        return model
Exemplo n.º 22
0
    def __call__(self, x,
                 is_training=True,
                 reuse=False,
                 *args,
                 **kwargs):
        with tf.variable_scope(self.__class__.__name__) as vs:
            if reuse:
                vs.reuse_variables()
            conv_params = {'is_training': is_training,
                           'activation_': 'relu',
                           'normalization': 'batch'}
            x = conv_block(x, 64, **conv_params, dropout_rate=0.3)
            x = conv_block(x, 64, **conv_params, dropout_rate=0.3)

            x = conv_block(x, 128, **conv_params, dropout_rate=0.4)
            x = conv_block(x, 128, **conv_params, dropout_rate=0.4)

            x = conv_block(x, 256, **conv_params, dropout_rate=0.4)
            x = conv_block(x, 256, **conv_params, dropout_rate=0.4)
            x = conv_block(x, 256, **conv_params)
            l1 = x
            x = max_pool2d(x)

            x = conv_block(x, 512, **conv_params, dropout_rate=0.4)
            x = conv_block(x, 512, **conv_params, dropout_rate=0.4)
            x = conv_block(x, 512, **conv_params)
            l2 = x
            x = max_pool2d(x)

            x = conv_block(x, 512, **conv_params, dropout_rate=0.4)
            x = conv_block(x, 512, **conv_params, dropout_rate=0.4)
            x = conv_block(x, 512, **conv_params)
            l3 = x
            x = max_pool2d(x)

            x = conv_block(x, 512, **conv_params, sampling='pool')
            x = conv_block(x, 512, **conv_params, sampling='pool')

            x = flatten(x)
            g = dense(x, 512, activation_='relu')

            x, attentions = attention_module([l1, l2, l3], g)
            x = dense(x, self.nb_classes)
            return x, attentions
Exemplo n.º 23
0
Arquivo: ResNet.py Projeto: iiharu/NN
 def __init__(
         self,
         blocks,
         filters,
         bottleneck=False,
         input_layers=[
             batch_normalization(),
             relu(),
             conv2d(filters=64, kernel_size=(7, 7), strides=2),
         ],
         output_layers=[average_pooling2d(pool_size=(2, 2)),
                        flatten()]):
     self.blocks = blocks
     self.filters = filters
     self.bottleneck = bottleneck
     self.bn_axis = -1 if keras.backend.image_data_format(
     ) == 'channels_last' else 1
     self.input_layers = input_layers
     self.output_layer = output_layers
Exemplo n.º 24
0
 def __build(self):
     self.__init_global_epoch()
     self.__init_global_step()
     self.__init_input()
     # 0. 图像预处理 减去均值 乘以归一化系数##################################
     with tf.name_scope('Preprocessing'):
         # 分割成三通道
         red, green, blue = tf.split(self.X, num_or_size_splits=3, axis=3)
         # 每个通道 减去均值 乘以归一化系数 后再concat/merge 通道扩展合并
         preprocessed_input = tf.concat([
             tf.subtract(blue, ShuffleNet.MEAN[0]) * ShuffleNet.NORMALIZER,
             tf.subtract(green, ShuffleNet.MEAN[1]) * ShuffleNet.NORMALIZER,
             tf.subtract(red, ShuffleNet.MEAN[2]) * ShuffleNet.NORMALIZER,
         ], 3)
     # 1. conv1 3*3*3*24 卷积 步长 2 BN RELU #########################################################
     ######## 周围填充 
     x_padded = tf.pad(preprocessed_input, [[0, 0], [1, 1], [1, 1], [0, 0]], "CONSTANT")
     ######## conv
     conv1 = conv2d('conv1', x=x_padded, w=None, num_filters=self.output_channels['conv1'], kernel_size=(3, 3),
                    stride=(2, 2), l2_strength=self.args.l2_strength, bias=self.args.bias,
                    batchnorm_enabled=self.args.batchnorm_enabled, is_training=self.is_training,
                    activation=tf.nn.relu, padding='VALID')
     # 2. 最大值池化 3*3 步长2 ##################################################
     padded = tf.pad(conv1, [[0, 0], [0, 1], [0, 1], [0, 0]], "CONSTANT")
     max_pool = max_pool_2d(padded, size=(3, 3), stride=(2, 2), name='max_pool')
     # 3. 一次 步长为2 非分组点卷积 concate通道扩展合并模块, 再进行3次步长为1的 add通道叠加模块
     stage2 = self.__stage(max_pool, stage=2, repeat=3)
     # 4. 一次 步长为2 分组点卷积   concate通道扩展合并模块, 再进行7次步长为1的 add通道叠加模块
     stage3 = self.__stage(stage2, stage=3, repeat=7)
     # 5. 一次 步长为2 分组点卷积   concate通道扩展合并模块, 再进行3次步长为1的 add通道叠加模块
     stage4 = self.__stage(stage3, stage=4, repeat=3)
     # 6. 全局均值池化层 7*7 池化核 步长1
     global_pool = avg_pool_2d(stage4, size=(7, 7), stride=(1, 1), name='global_pool', padding='VALID')
     # 7. 1*1点卷积 输出 类别数量个 卷积特征图
     logits_unflattened = conv2d('fc', global_pool, w=None, num_filters=self.args.num_classes,
                                 kernel_size=(1, 1),# 1*1点卷积
                                 l2_strength=self.args.l2_strength,
                                 bias=self.args.bias,
                                 is_training=self.is_training)
     # 8. 摊平 到 一维
     self.logits = flatten(logits_unflattened)
     # 9. 计算误差 
     self.__init_output()
Exemplo n.º 25
0
def ConvNet(x,
            input_shape,
            filters_out=64,
            n_classes=10,
            non_linearity='relu'):
    # Basic CNN from Cleverhans MNIST tutorial:
    # https://github.com/mmarius/cleverhans/blob/master/cleverhans_tutorials/tutorial_models.py#L155
    h = x
    input_shape = list(input_shape)
    h, output_shape = l.conv2d(h,
                               kernel_size=8,
                               stride=2,
                               filters_in=input_shape[-1],
                               filters_out=filters_out,
                               padding='SAME',
                               name='conv1')
    h = l.non_linearity(h, name=non_linearity)
    h, output_shape = l.conv2d(h,
                               kernel_size=6,
                               stride=2,
                               filters_in=output_shape[-1],
                               filters_out=filters_out * 2,
                               padding='VALID',
                               name='conv2')
    h = l.non_linearity(h, name=non_linearity)
    h, output_shape = l.conv2d(h,
                               kernel_size=5,
                               stride=1,
                               filters_in=output_shape[-1],
                               filters_out=filters_out * 2,
                               padding='VALID',
                               name='conv3')
    h = l.non_linearity(h, name=non_linearity)

    h, output_shape = l.flatten(input_shape=output_shape, x=h)
    logits, output_shape = l.linear(input_shape=output_shape,
                                    n_hidden=n_classes,
                                    x=h,
                                    name='output-layer')
    utils.variable_summaries(logits, name='unscaled-logits-output-layer')

    return logits
Exemplo n.º 26
0
    def build_network(self, scope):
        with tf.variable_scope(scope):
            X = tf.placeholder(
                shape=[None, cfg.height, cfg.width, cfg.state_length],
                dtype=tf.float32)

            conv1 = conv(X, [8, 8, cfg.state_length, 32], [1, 4, 4, 1],
                         activation_fn=tf.nn.relu,
                         scope="conv1")
            conv2 = conv(conv1, [4, 4, 32, 64], [1, 2, 2, 1],
                         activation_fn=tf.nn.relu,
                         scope="conv2")
            conv3 = conv(conv2, [3, 3, 64, 64], [1, 1, 1, 1],
                         activation_fn=tf.nn.relu,
                         scope="conv3")
            flt, dim = flatten(conv3)
            fc1 = fc(flt, dim, 512, activation_fn=tf.nn.relu, scope="fc1")
            output = fc(fc1, 512, self.action_n, scope="output")

        return X, output
Exemplo n.º 27
0
def vgg_7(x, layers=3, base_channel=32, input_channel=2):
    output_channel = input_channel

    for i in range(layers):
        x = conv2d(x, [3, 3, input_channel, output_channel], 'conv_0_'+str(i))
        input_channel = output_channel

        x = conv2d(x, [3, 3, input_channel, output_channel], 'conv_1_'+str(i))
        input_channel = output_channel
        output_channel *= 2

        x = maxpooling(x)

    x = flatten(x)
    x = fully_connnected(x, 256, 'fc_1')
    x = fully_connnected(x, 1, 'fc_2', activation='no')

    logits = tf.squeeze(x)

    return logits
Exemplo n.º 28
0
 def _model(self, X, reuse=False):
     X = tf.cast(X, tf.float32)
     
     with tf.variable_scope(self.scope, reuse=reuse):
         """
         conv1 = conv(X, [9, 9, cfg.state_length, 256], [1, 1, 1, 1], activation_fn=tf.nn.relu)
         primaryCaps = capsule(conv1, num_outputs=32, vec_len=8, kernel=9, strides=2)
         digitCaps = capsule(primaryCaps, num_outputs=self.a, vec_len=16)
         v_length = tf.sqrt(tf.reduce_sum(tf.square(digitCaps), axis=2, keepdims=True))
         output = tf.squeeze(tf.nn.softmax(v_length, axis=1), axis=[2, 3])
         """
         
         conv1 = conv(X, [8, 8, cfg.state_length, 32], [1, 4, 4, 1], activation_fn=tf.nn.relu, scope="conv1")
         conv2 = conv(conv1, [4, 4, 32, 64], [1, 2, 2, 1], activation_fn=tf.nn.relu, scope="conv2")
         conv3 = conv(conv2, [3, 3, 64, 64], [1, 1 ,1, 1], activation_fn=tf.nn.relu, scope="conv3")
         flt, dim = flatten(conv3)
         fc1 = fc(flt, dim, 512, activation_fn=tf.nn.relu, scope="fc1")
         output = fc(fc1, 512, self.a, scope="output")
     
         return output
Exemplo n.º 29
0
    def __build(self):
        self.__init_global_epoch()
        self.__init_global_step()
        self.__init_input()

        x_resized = self.__resize(self.X)
        conv1 = conv2d('conv1', x=x_resized, w=None, num_filters=self.output_channels['conv1'], kernel_size=(3, 3),
                       stride=(2, 2), l2_strength=self.args.l2_strength, bias=self.args.bias,
                       batchnorm_enabled=self.args.batchnorm_enabled, is_training=self.is_training)
        conv1_padded = tf.pad(conv1, [[0, 0], [1, 1], [1, 1], [0, 0]], "CONSTANT")
        max_pool = max_pool_2d(conv1_padded, size=(3, 3), stride=(2, 2), name='max_pool')
        stage2 = self.__stage(max_pool, stage=2, repeat=3)
        stage3 = self.__stage(stage2, stage=3, repeat=7)
        stage4 = self.__stage(stage3, stage=4, repeat=3)
        global_pool = avg_pool_2d(stage4, size=(7, 7), stride=(1, 1), name='global_pool')
        flattened = flatten(global_pool)

        self.logits = dense('fc', flattened, w=None, output_dim=self.args.num_classes,
                            l2_strength=self.args.l2_strength,
                            bias=self.args.bias,
                            is_training=self.is_training)
        self.__init_output()
Exemplo n.º 30
0
    def __init__(self,
                 input_layers=None,
                 outputs_layers=None,
                 inception_layers=None):
        self.inception_version = 1
        if input_layers is None:
            self.input_layers = [
                conv2d(filters=64,
                       kernel_size=(7, 7),
                       strides=2,
                       padding='same'),
                max_pooling2d(pool_size=(3, 3), strides=2, padding='same'),
                batch_normalization(),
                conv2d(filters=192,
                       kernel_size=(1, 1),
                       strides=1,
                       padding='valid'),
                conv2d(filters=192,
                       kernel_size=(3, 3),
                       strides=1,
                       padding='same'),
                batch_normalization(),
                max_pooling2d(pool_size=(3, 3), strides=2, padding='same')
            ]
        else:
            self.input_layers = input_layers

        if outputs_layers is None:
            self.output_layers = [
                average_pooling2d(pool_size=(7, 7), strides=1,
                                  padding='valid'),
                flatten(),
                dropout(0.4),
                dense(1000),
                softmax()
            ]
        else:
            self.output_layers = outputs_layers
Exemplo n.º 31
0
def SimpleNet1(x,
               input_shape,
               neurons=1024,
               n_classes=10,
               non_linearity='relu',
               create_summaries=True):
    h = x
    h, output_shape = l.flatten(input_shape, h)
    h, output_shape = l.linear(output_shape, neurons, h, name='linear1')
    if create_summaries:
        utils.variable_summaries(h, name='linear-comb-hidden-layer')

    h = l.non_linearity(h, name=non_linearity)
    if create_summaries:
        utils.variable_summaries(h, name='activation-hidden-layer')
        sparsity = tf.nn.zero_fraction(h,
                                       name='activation-hidden-layer-sparsity')
        tf.summary.scalar(sparsity.op.name, sparsity)

    logits, output_shape = l.linear(output_shape, n_classes, h, name='output')
    if create_summaries:
        utils.variable_summaries(logits, name='unscaled-logits-output-layer')

    return logits