예제 #1
0
    def create(self):
        self.X = tf.reshape(
            self.X,
            shape=[-1, self.IMAGE_SIZE, self.IMAGE_SIZE, self.CHANEELS])
        conv1 = util.conv_layer(self.X,
                                ksize=[11, 11, self.CHANEELS, 96],
                                strides=[1, 4, 4, 1],
                                name='conv1')
        pool1 = util.max_pool_layer(conv1,
                                    ksize=[1, 3, 3, 1],
                                    strides=[1, 2, 2, 1],
                                    name='pool1')

        conv2 = util.conv_layer(pool1,
                                ksize=[5, 5, 96, 256],
                                strides=[1, 1, 1, 1],
                                name='conv2',
                                padding='SAME',
                                group=2)
        pool2 = util.max_pool_layer(conv2,
                                    ksize=[1, 3, 3, 1],
                                    strides=[1, 2, 2, 1],
                                    name='pool2')

        conv3 = util.conv_layer(pool2,
                                ksize=[3, 3, 256, 384],
                                strides=[1, 1, 1, 1],
                                name='conv3',
                                padding='SAME')

        conv4 = util.conv_layer(conv3,
                                ksize=[3, 3, 384, 384],
                                strides=[1, 1, 1, 1],
                                name='conv4',
                                padding='SAME',
                                group=2)

        conv5 = util.conv_layer(conv4,
                                ksize=[3, 3, 384, 256],
                                strides=[1, 1, 1, 1],
                                name='conv5',
                                padding='SAME',
                                group=2)
        pool5 = util.max_pool_layer(conv5,
                                    ksize=[1, 3, 3, 1],
                                    strides=[1, 2, 2, 1],
                                    name='pool5')

        fc6 = util.full_connected_layer(pool5, 4096, name='fc6')
        fc6_dropout = util.dropout(fc6, self.KEEP_PROB)

        fc7 = util.full_connected_layer(fc6_dropout, 4096, name='fc7')
        fc7_dropout = util.dropout(fc7, self.KEEP_PROB)

        self.fc8 = util.full_connected_layer(fc7_dropout,
                                             self.NUM_CLASSES,
                                             name='fc8',
                                             relu=False)
예제 #2
0
    def __init__(self, optimizer, activation):
        super().__init__(optimizer, activation)

        ############################################################################
        #                             Define the graph                             #
        ############################################################################
        # It turns out that this network from ex03 is already capable of memorizing
        # the entire training or validation set, so we need to tweak generalization,
        # not capacity
        # In order to speed up convergence, we added batch normalization.
        # Our best effort was Adam optimizer with bs=32, lr=0.001 (only possible
        # because of norm)
        x = tf.placeholder(tf.float32, shape=(None, 32, 32, 1), name='input')
        y_ = tf.placeholder(dtype=tf.int32, shape=(None,), name='labels')

        self.x = x
        self.y_ = y_

        kernel_shape1 = (5, 5, 1, 8)
        activation1 = conv_layer(x, kernel_shape1, activation=activation)

        normalize1 = batch_norm_layer(activation1)

        pool1 = weighted_pool_layer(
            normalize1, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1)
        )

        kernel_shape2 = (3, 3, 8, 10)
        activation2 = conv_layer(pool1, kernel_shape2, activation=activation)

        normalize2 = batch_norm_layer(activation2)

        pool2 = weighted_pool_layer(
            normalize2, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1)
        )

        pool2_reshaped = tf.reshape(pool2, (-1, 8*8*10), name='reshaped1')
        fc1 = fully_connected(pool2_reshaped, 512, with_activation=True,
                activation=activation)

        fc2_logit = fully_connected(fc1, 10, activation=activation)

        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=fc2_logit,
                                                                labels=y_)
        mean_cross_entropy = tf.reduce_mean(cross_entropy)
        self.mean_cross_entropy = mean_cross_entropy
        train_step = optimizer.minimize(mean_cross_entropy)
        self.train_step = train_step
        self.prediction = tf.cast(tf.argmax(fc2_logit, 1), tf.int32)

        # check if neuron firing strongest coincides with max value position in real
        # labels
        correct_prediction = tf.equal(self.prediction, y_)
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        self.accuracy = accuracy
예제 #3
0
파일: lenet.py 프로젝트: heiheiya/LeNet-5
 def create(self):
     self.X = tf.reshape(self.X, shape=[-1, self.IMAGE_SIZE, self.IMAGE_SIZE, self.CHANEELS])
     conv1 = util.conv_layer(self.X, ksize=[3, 3, 1, 32], strides=[1, 1, 1, 1], name='conv1')
     pool1 = util.max_pool_layer(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], name='pool1')
     conv2 = util.conv_layer(pool1, ksize=[3, 3, 32, 64], strides=[1, 1, 1, 1], name='conv2')
     pool2 = util.max_pool_layer(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], name='pool2')
     conv3 = util.conv_layer(pool2, ksize=[3, 3, 64, 128], strides=[1, 1, 1, 1], name='conv3')
     pool3 = util.max_pool_layer(conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], name='pool3')
     fc4 = util.full_connected_layer(pool3, 625, name='fc4', keep_prob=self.KEEP_PROB)
     fc5 = util.full_connected_layer(fc4, 256, name='fc5', keep_prob=self.KEEP_PROB)
     self.fc6 = util.full_connected_layer(fc5, 10, name='fc6')
예제 #4
0
    def __init__(self, optimizer, activation):
        super().__init__(optimizer, activation)

        ############################################################################
        #                             Define the graph                             #
        ############################################################################
        # It turns out that this network from ex03 is already capable of memorizing
        # the entire training or validation set, so we need to tweak generalization,
        # not capacity
        x = tf.placeholder(tf.float32, shape=(None, 32, 32, 1), name='input')
        y_ = tf.placeholder(dtype=tf.int32, shape=(None, ), name='labels')

        self.x = x
        self.y_ = y_

        kernel_shape1 = (5, 5, 1, 16)
        activation1 = conv_layer(x, kernel_shape1, activation=activation)

        pool1 = max_pool_layer(activation1,
                               ksize=(1, 2, 2, 1),
                               strides=(1, 2, 2, 1))

        kernel_shape2 = (3, 3, 16, 32)
        activation2 = conv_layer(pool1, kernel_shape2, activation=activation)

        pool2 = max_pool_layer(activation2,
                               ksize=(1, 2, 2, 1),
                               strides=(1, 2, 2, 1))

        pool2_reshaped = tf.reshape(pool2, (-1, 2048), name='reshaped1')
        fc1 = fully_connected(pool2_reshaped,
                              512,
                              with_activation=True,
                              activation=activation)

        fc2_logit = fully_connected(fc1, 10, activation=activation)

        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=fc2_logit, labels=y_)
        mean_cross_entropy = tf.reduce_mean(cross_entropy)
        self.mean_cross_entropy = mean_cross_entropy
        train_step = optimizer.minimize(mean_cross_entropy)
        self.train_step = train_step

        # check if neuron firing strongest coincides with max value position in real
        # labels
        correct_prediction = tf.equal(
            tf.argmax(fc2_logit, 1, output_type=tf.int32), y_)
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        self.accuracy = accuracy
예제 #5
0
    def set_model(self, figs, is_training, reuse=False):
        # return only logits

        h = figs

        # convolution
        with tf.variable_scope(self.name_scope_conv, reuse=reuse):
            for i, (in_chan, out_chan) in enumerate(
                    zip(self.layer_chanels, self.layer_chanels[1:])):
                if i == 0:
                    conved = conv_layer(inputs=h,
                                        out_num=out_chan,
                                        filter_width=5,
                                        filter_hight=5,
                                        stride=1,
                                        l_id=i)

                    h = tf.nn.relu(conved)
                    #h = lrelu(conved)
                else:
                    conved = conv_layer(inputs=h,
                                        out_num=out_chan,
                                        filter_width=5,
                                        filter_hight=5,
                                        stride=2,
                                        l_id=i)

                    bn_conved = batch_norm(conved, i, is_training)
                    h = tf.nn.relu(bn_conved)
                    #h = lrelu(bn_conved)

        feature_image = h

        # full connect
        dim = get_dim(h)
        h = tf.reshape(h, [-1, dim])

        with tf.variable_scope(self.name_scope_fc, reuse=reuse):
            weights = get_weights('fc', [dim, self.fc_dim], 0.02)
            biases = get_biases('fc', [self.fc_dim], 0.0)
            h = tf.matmul(h, weights) + biases
            h = batch_norm(h, 'fc', is_training)
            h = tf.nn.relu(h)

            weights = get_weights('fc2', [self.fc_dim, 1], 0.02)
            biases = get_biases('fc2', [1], 0.0)
            h = tf.matmul(h, weights) + biases

        return h, feature_image
예제 #6
0
    def build_model(self, image, reuse=False):
        with tf.variable_scope('is_training', reuse=True):
            is_training = tf.get_variable('is_training', dtype=tf.bool)

        with tf.variable_scope("D_" + self.signature) as scope:
            if reuse:
                scope.reuse_variables()

            conv_num = self.conv_infos['conv_layer_number']
            conv_filter = self.conv_infos['filter']
            conv_stride = self.conv_infos['stride']

            prev = image

            for i in range(conv_num):
                if i == 0 or i == conv_num - 1:
                    prev = conv_layer(prev, conv_filter[i], "d_conv_{}".format(i), activation=lrelu, batch_norm=None, reuse=reuse)
                else:
                    bn = batch_norm(name="d_bn_{}".format(i))
                    prev = conv_layer(prev, conv_filter[i], "d_conv_{}".format(i), activation=lrelu, batch_norm=bn, reuse=reuse)

                setattr(self, "conv_{}".format(i), prev)
            return tf.sigmoid(prev)
예제 #7
0
파일: encoder.py 프로젝트: takat0m0/VAE_GAN
    def set_model(self, figs, is_training, reuse=False):
        u'''
        return only logits. not sigmoid(logits).
        '''

        h = figs

        # convolution
        with tf.variable_scope(self.name_scope_conv, reuse=reuse):
            for i, (in_chan, out_chan) in enumerate(
                    zip(self.layer_chanels, self.layer_chanels[1:])):

                conved = conv_layer(inputs=h,
                                    out_num=out_chan,
                                    filter_width=5,
                                    filter_hight=5,
                                    stride=2,
                                    l_id=i)

                if i == 0:
                    h = tf.nn.relu(conved)
                    #h = lrelu(conved)
                else:
                    bn_conved = batch_norm(conved, i, is_training)
                    h = tf.nn.relu(bn_conved)
                    #h = lrelu(bn_conved)
        # full connect
        dim = get_dim(h)
        h = tf.reshape(h, [-1, dim])

        with tf.variable_scope(self.name_scope_fc, reuse=reuse):
            weights = get_weights('fc', [dim, self.fc_dim], 0.02)
            biases = get_biases('fc', [self.fc_dim], 0.0)
            h = tf.matmul(h, weights) + biases
            h = batch_norm(h, 'en_fc_bn', is_training)
            h = tf.nn.relu(h)

            weights = get_weights('mu', [self.fc_dim, self.z_dim], 0.02)
            biases = get_biases('mu', [self.z_dim], 0.0)
            mu = tf.matmul(h, weights) + biases

            weights = get_weights('sigma', [self.fc_dim, self.z_dim], 0.02)
            biases = get_biases('sigma', [self.z_dim], 0.0)
            log_sigma = tf.matmul(h, weights) + biases

        return mu, log_sigma
예제 #8
0
    def set_model(self, figs, labels, is_training, reuse=False):
        fig_shape = figs.get_shape().as_list()
        height, width = fig_shape[1:3]
        class_num = get_dim(labels)
        with tf.variable_scope(self.name_scope_label, reuse=reuse):
            tmp = linear_layer(labels, class_num, height * width, 'reshape')
            tmp = tf.reshape(tmp, [-1, height, width, 1])
        h = tf.concat((figs, tmp), 3)

        # convolution
        with tf.variable_scope(self.name_scope_conv, reuse=reuse):
            for i, (in_chan, out_chan) in enumerate(
                    zip(self.layer_chanels, self.layer_chanels[1:])):

                conved = conv_layer(inputs=h,
                                    out_num=out_chan,
                                    filter_width=5,
                                    filter_hight=5,
                                    stride=2,
                                    l_id=i)

                if i == 0:
                    h = tf.nn.relu(conved)
                    #h = lrelu(conved)
                else:
                    bn_conved = batch_norm(conved, i, is_training)
                    h = tf.nn.relu(bn_conved)
                    #h = lrelu(bn_conved)
        # full connect
        dim = get_dim(h)
        h = tf.reshape(h, [-1, dim])

        with tf.variable_scope(self.name_scope_fc, reuse=reuse):
            h = linear_layer(h, dim, self.fc_dim, 'fc')
            h = batch_norm(h, 'en_fc_bn', is_training)
            h = tf.nn.relu(h)

            mu = linear_layer(h, self.fc_dim, self.z_dim, 'mu')
            log_sigma = linear_layer(h, self.fc_dim, self.z_dim, 'sigma')

        return mu, log_sigma
예제 #9
0
파일: vggnet16.py 프로젝트: heiheiya/VGGNet
    def create(self):
        self.p = []
        self.X = tf.reshape(
            self.X,
            shape=[-1, self.IMAGE_SIZE, self.IMAGE_SIZE, self.CHANEELS])
        conv1_1 = util.conv_layer(self.X,
                                  kh=3,
                                  kw=3,
                                  n_out=64,
                                  sh=1,
                                  sw=1,
                                  name='conv1_1',
                                  p=self.p,
                                  padding='SAME')
        conv1_2 = util.conv_layer(conv1_1,
                                  kh=3,
                                  kw=3,
                                  n_out=64,
                                  sh=1,
                                  sw=1,
                                  name='conv1_2',
                                  p=self.p,
                                  padding='SAME')
        pool1 = util.max_pool_layer(conv1_2,
                                    kh=2,
                                    kw=2,
                                    sh=2,
                                    sw=2,
                                    name='pool1')

        conv2_1 = util.conv_layer(pool1,
                                  kh=3,
                                  kw=3,
                                  n_out=128,
                                  sh=1,
                                  sw=1,
                                  name='conv2_1',
                                  p=self.p,
                                  padding='SAME')
        conv2_2 = util.conv_layer(conv2_1,
                                  kh=3,
                                  kw=3,
                                  n_out=128,
                                  sh=1,
                                  sw=1,
                                  name='conv2_2',
                                  p=self.p,
                                  padding='SAME')
        pool2 = util.max_pool_layer(conv2_2,
                                    kh=2,
                                    kw=2,
                                    sh=2,
                                    sw=2,
                                    name='pool2')

        conv3_1 = util.conv_layer(pool2,
                                  kh=3,
                                  kw=3,
                                  n_out=256,
                                  sh=1,
                                  sw=1,
                                  name='conv3_1',
                                  p=self.p,
                                  padding='SAME')
        conv3_2 = util.conv_layer(conv3_1,
                                  kh=3,
                                  kw=3,
                                  n_out=256,
                                  sh=1,
                                  sw=1,
                                  name='conv3_2',
                                  p=self.p,
                                  padding='SAME')
        conv3_3 = util.conv_layer(conv3_2,
                                  kh=3,
                                  kw=3,
                                  n_out=256,
                                  sh=1,
                                  sw=1,
                                  name='conv3_3',
                                  p=self.p,
                                  padding='SAME')
        pool3 = util.max_pool_layer(conv3_3,
                                    kh=2,
                                    kw=2,
                                    sh=2,
                                    sw=2,
                                    name='pool3')

        conv4_1 = util.conv_layer(pool3,
                                  kh=3,
                                  kw=3,
                                  n_out=512,
                                  sh=1,
                                  sw=1,
                                  name='conv4_1',
                                  p=self.p,
                                  padding='SAME')
        conv4_2 = util.conv_layer(conv4_1,
                                  kh=3,
                                  kw=3,
                                  n_out=512,
                                  sh=1,
                                  sw=1,
                                  name='conv4_2',
                                  p=self.p,
                                  padding='SAME')
        conv4_3 = util.conv_layer(conv4_2,
                                  kh=3,
                                  kw=3,
                                  n_out=512,
                                  sh=1,
                                  sw=1,
                                  name='conv4_3',
                                  p=self.p,
                                  padding='SAME')
        pool4 = util.max_pool_layer(conv4_3,
                                    kh=2,
                                    kw=2,
                                    sh=2,
                                    sw=2,
                                    name='pool4')

        conv5_1 = util.conv_layer(pool4,
                                  kh=3,
                                  kw=3,
                                  n_out=512,
                                  sh=1,
                                  sw=1,
                                  name='conv5_1',
                                  p=self.p,
                                  padding='SAME')
        conv5_2 = util.conv_layer(conv5_1,
                                  kh=3,
                                  kw=3,
                                  n_out=512,
                                  sh=1,
                                  sw=1,
                                  name='conv5_2',
                                  p=self.p,
                                  padding='SAME')
        conv5_3 = util.conv_layer(conv5_2,
                                  kh=3,
                                  kw=3,
                                  n_out=512,
                                  sh=1,
                                  sw=1,
                                  name='conv5_3',
                                  p=self.p,
                                  padding='SAME')
        pool5 = util.max_pool_layer(conv5_3,
                                    kh=2,
                                    kw=2,
                                    sh=2,
                                    sw=2,
                                    name='pool5')

        fc6 = util.full_connected_layer(pool5,
                                        n_out=4096,
                                        name='fc6',
                                        p=self.p)
        fc6_dropout = util.dropout(fc6, self.KEEP_PROB, name='fc6_drop')

        fc7 = util.full_connected_layer(fc6_dropout,
                                        n_out=4096,
                                        name='fc7',
                                        p=self.p)
        fc7_dropout = util.dropout(fc7, self.KEEP_PROB, name='fc7_drop')

        self.fc8 = util.full_connected_layer(fc7_dropout,
                                             self.NUM_CLASSES,
                                             name='fc8',
                                             p=self.p)
        self.softmax = tf.nn.softmax(self.fc8)
        self.predictions = tf.argmax(self.softmax, 1)
예제 #10
0
    def __init__(self, optimizer, activation):
        super().__init__(optimizer, activation)

        x = tf.placeholder(tf.float32, shape=(None, 32, 32, 1), name='input')
        y_ = tf.placeholder(dtype=tf.int32, shape=(None, ), name='labels')

        self.x = x
        self.y_ = y_
        # logits = (LinearWrap(image)
        #               .Conv2D('conv1', 24, 5, padding='VALID')
        #               .MaxPooling('pool1', 2, padding='SAME')
        #               .Conv2D('conv2', 32, 3, padding='VALID')
        #               .Conv2D('conv3', 32, 3, padding='VALID')
        #               .MaxPooling('pool2', 2, padding='SAME')
        #               .Conv2D('conv4', 64, 3, padding='VALID')
        #               .Dropout('drop', 0.5)
        #               .FullyConnected('fc0', 512,
        #                               b_init=tf.constant_initializer(0.1), nl=tf.nn.relu)
        #               .FullyConnected('linear', out_dim=10, nl=tf.identity)())
        # tf.nn.softmax(logits, name='output')

        l1 = conv_layer(x, (5, 5, 1, 24),
                        activation=None,
                        padding='VALID',
                        use_bias=False)
        l2 = tf.nn.relu(batch_norm_layer(l1))

        l3 = tf.nn.max_pool(l2, (1, 2, 2, 1), (1, 2, 2, 1), padding='SAME')

        l4 = conv_layer(l3, (3, 3, 24, 32),
                        padding='VALID',
                        activation=None,
                        use_bias=False)
        l5 = tf.nn.relu(batch_norm_layer(l4))

        l6 = conv_layer(l5, (3, 3, 32, 32),
                        padding='VALID',
                        activation=None,
                        use_bias=False)
        l7 = tf.nn.relu(batch_norm_layer(l6))

        l8 = tf.nn.max_pool(l7, (1, 2, 2, 1), (1, 2, 2, 1), padding='SAME')

        l8_ = conv_layer(l8, (3, 3, 32, 64),
                         padding='VALID',
                         activation=None,
                         use_bias=False)
        l9 = tf.nn.relu(batch_norm_layer(l8_))

        l10 = tf.nn.dropout(l9, 0.5)

        l11 = tf.reshape(l10, (-1, 3 * 3 * 64), name='reshaped1')

        l12 = fully_connected(l11,
                              512,
                              with_activation=True,
                              activation=tf.nn.relu)

        l13 = fully_connected(l12, 10, with_activation=False, use_bias=False)

        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=l13, labels=y_)
        mean_cross_entropy = tf.reduce_mean(cross_entropy)
        self.mean_cross_entropy = mean_cross_entropy
        train_step = optimizer.minimize(mean_cross_entropy)
        self.train_step = train_step
        self.prediction = tf.cast(tf.argmax(l13, 1), tf.int32)

        # check if neuron firing strongest coincides with max value position in real
        # labels
        correct_prediction = tf.equal(self.prediction, y_)
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        self.accuracy = accuracy