コード例 #1
0
    def _build_model(self, x_input, **kwargs):

        conv1 = conv_layer(x_input, self.weights[0], self.bias[0])
        pool1 = maxpool_layer(conv1, poolSize=3, stride=2)

        conv2 = conv_layer(pool1, self.weights[1], self.bias[1])
        pool2 = maxpool_layer(conv2, poolSize=3, stride=2)

        conv3 = conv_layer(pool2, self.weights[2], self.bias[2])
        conv4 = conv_layer(conv3, self.weights[3], self.bias[3])
        conv5 = conv_layer(conv4, self.weights[4], self.bias[4])

        flat1 = tf.reshape(conv5, [-1, conv5.shape[1] * conv5.shape[2] * conv5.shape[3]])

        fully1 = tf.nn.relu(fc_layer(flat1, self.weights[5], self.bias[5]))
        fully1_dropout = tf.nn.dropout(fully1, rate=self.dropout)

        fully2 = tf.nn.relu(fc_layer(fully1_dropout, self.weights[6], self.bias[6]))
        fully2_dropout = tf.nn.dropout(fully2, rate=self.dropout)

        y_predict = fc_layer(fully2_dropout, self.weights[7], self.bias[7])

        # print(conv1.shape,pool1.shape,conv2.shape,pool2.shape,flat1.shape,fully1.shape,y_pred.shape)

        return y_predict
コード例 #2
0
ファイル: dtn.py プロジェクト: tbornt/UCDIG_tensorflow
    def f(self, image, reuse=False):
        """
        image:
            32 x 32 x 3

        network:
            1. conv2d filter size 64, maxpool, relu
            2. conv2d filter size 128, maxpool, relu
            3. conv2d filter size 256, maxpool, relu
            4. conv2d filter size 128, maxpool, relu
        """
        if reuse:
            tf.get_variable_scope().reuse_variables()

        conv1 = conv_layer(image, 64, scope='f_conv1')
        conv1 = maxpool_layer(conv1)  # 8 x 8 x 64

        conv2 = conv_layer(conv1, 128, scope='f_conv2')
        covn2 = maxpool_layer(conv2)  # 4 x 4 x 128

        conv3 = conv_layer(conv2, 256, scope='f_conv3')
        covn3 = maxpool_layer(conv3)  # 2 x 2 x 256

        conv4 = conv_layer(conv3, 128, scope='f_conv4')
        covn4 = maxpool_layer(conv4)  # 1 x 1 x 128

        return conv4
コード例 #3
0
ファイル: dtn.py プロジェクト: tbornt/UCDIG_tensorflow
    def discriminator(self, image, reuse=False):
        """
        image:
            32 x 32 x 3

        network:
            4 batch-normalization relu convolution layer
        """
        if reuse:
            tf.get_variable_scope().reuse_variables()

        conv0 = tf.nn.relu(conv_layer(image, 64, scope='d_conv0'))
        conv1 = tf.nn.relu(self.d_bn1(conv_layer(conv0, 128, scope='d_conv1')))
        conv2 = tf.nn.relu(self.d_bn2(conv_layer(conv1, 256, scope='d_conv2')))
        conv3 = tf.nn.relu(self.d_bn3(conv_layer(conv2, 512, scope='d_conv3')))
        fc4 = fc_layer(tf.reshape(conv3, [self.batch_size, -1]),
                       3,
                       scope='d_fc1')
        return tf.nn.softmax(fc4), fc4
コード例 #4
0
    def _build_model(self, **kwargs):
        """
        Build model.
        :param kwargs: dict, extra arguments for building AlexNet.
            - image_mean: np.ndarray, mean image for each input channel, shape: (C,).
            - dropout_prob: float, the probability of dropping out each unit in FC layer.
        :return d: dict, containing outputs on each layer.
        """
        d = dict(
        )  # Dictionary to save intermediate values returned from each layer.
        x_mean = kwargs.pop('image_mean', 0.0)
        dropout_prob = kwargs.pop('dropout_prob', 0.0)
        num_classes = int(self.y.get_shape()[-1])

        # The probability of keeping each unit for dropout layers
        keep_prob = tf.cond(pred=self.is_train,
                            true_fn=lambda: 1. - dropout_prob,
                            false_fn=lambda: 1.)

        # input
        x_input = self.x - x_mean  # perform mean subtraction

        # First Convolution Layer
        # conv1 - relu1 - pool1

        d['conv1'] = conv_layer(x_input,
                                3,
                                1,
                                64,
                                padding='SAME',
                                weights_stddev=0.01,
                                biases_value=1.0)
        print('conv1.shape', d['conv1'].get_shape().as_list())
        d['relu1'] = tf.nn.relu(d['conv1'])
        # max_pool(x, side_l, stride, padding='SAME'):
        d['pool1'] = max_pool(d['relu1'], 2, 1, padding='SAME')
        d['drop1'] = tf.nn.dropout(d['pool1'], 1 - (keep_prob))
        print('pool1.shape', d['pool1'].get_shape().as_list())

        # Second Convolution Layer
        # conv2 - relu2 - pool2
        d['conv2'] = conv_layer(d['pool1'],
                                3,
                                1,
                                128,
                                padding='SAME',
                                weights_stddev=0.01,
                                biases_value=1.0)
        print('conv2.shape', d['conv2'].get_shape().as_list())
        d['relu2'] = tf.nn.relu(d['conv2'])
        d['pool2'] = max_pool(d['relu2'], 2, 1, padding='SAME')
        d['drop2'] = tf.nn.dropout(d['pool2'], 1 - (keep_prob))
        print('pool2.shape', d['pool2'].get_shape().as_list())

        # Third Convolution Layer
        # conv3 - relu3
        d['conv3'] = conv_layer(d['pool2'],
                                3,
                                1,
                                256,
                                padding='SAME',
                                weights_stddev=0.01,
                                biases_value=1.0)
        print('conv3.shape', d['conv3'].get_shape().as_list())
        d['relu3'] = tf.nn.relu(d['conv3'])
        d['pool3'] = max_pool(d['relu3'], 2, 1, padding='SAME')
        d['drop3'] = tf.nn.dropout(d['pool3'], 1 - (keep_prob))
        print('pool3.shape', d['pool3'].get_shape().as_list())

        # Flatten feature maps
        f_dim = int(np.prod(d['drop3'].get_shape()[1:]))
        f_emb = tf.reshape(d['drop3'], [-1, f_dim])

        # fc4
        d['fc4'] = fc_layer(f_emb,
                            1024,
                            weights_stddev=0.005,
                            biases_value=0.1)
        d['relu4'] = tf.nn.relu(d['fc4'])
        print('fc4.shape', d['relu4'].get_shape().as_list())

        # fc5
        d['fc5'] = fc_layer(d['relu4'],
                            1024,
                            weights_stddev=0.005,
                            biases_value=0.1)
        d['relu5'] = tf.nn.relu(d['fc5'])
        print('fc5.shape', d['relu5'].get_shape().as_list())
        d['logits'] = fc_layer(d['relu5'],
                               num_classes,
                               weights_stddev=0.01,
                               biases_value=0.0)
        print('logits.shape', d['logits'].get_shape().as_list())

        # softmax
        d['pred'] = tf.nn.softmax(d['logits'])

        return d