Ejemplo n.º 1
0
    def _inference(self, X, keep_prob, is_train):
        dropout_rate = [0.9, 0.8, 0.7, 0.6, 0.5]
        layers = [64, 128, 256, 512, 512]
        iters = [2, 2, 3, 3]
        h = X

        # VGG Network Layer
        for i in range(4):
            for j in range(iters[i]):
                with tf.variable_scope('layers%s_%s' % (i, j)) as scope:
                    h = F.conv(h, layers[i])
                    h = F.batch_norm(h, is_train)
                    h = F.activation(h)
                    h = F.dropout(h, dropout_rate[i], is_train)
            h = F.max_pool(h)

        # Fully Connected Layer
        with tf.variable_scope('fully_connected_layer') as scope:
            h = F.dense(h, layers[i + 1])
            h = F.batch_norm(h, is_train)
            h = F.activation(h)
            h = F.dropout(h, dropout_rate[i + 1], is_train)

        # Softmax Layer
        with tf.variable_scope('softmax_layer') as scope:
            h = F.dense(h, self._num_classes)

        return h
Ejemplo n.º 2
0
 def _residual(self, h, channels, strides, keep_prob, is_train):
     h0 = h
     h1 = F.conv(F.activation(F.batch_norm(self, 'bn1', h0, is_train)), channels, strides)
     h1 = F.dropout(h1, keep_prob, is_train)
     h2 = F.conv(F.activation(F.batch_norm(self, 'bn2', h1, is_train)), channels)
     if F.volume(h0) == F.volume(h2):
         h = h0 + h2
     else :
         h4 = F.conv(h0, channels, strides)
         h = h2 + h4
     return h
Ejemplo n.º 3
0
 def _residual(self, h, channels, strides, keep_prob, is_train):
     h0 = h
     with tf.variable_scope('residual_first'):
         h1 = F.conv(F.activation(F.batch_norm(h0, is_train)), channels, strides)
         h1 = F.dropout(h1, keep_prob, is_train)
     with tf.variable_scope('residual_second'):
         h2 = F.conv(F.activation(F.batch_norm(h1, is_train)), channels)
     if F.volume(h0) == F.volume(h2):
         h = h0 + h2
     else :
         h4 = F.conv(h0, channels, strides)
         h = h2 + h4
     return h
Ejemplo n.º 4
0
 def _residual(self, h, channels, strides, keep_prob):
     h0 = h
     h1 = F.dropout(
         F.conv(F.activation(F.batch_normalization(h0)), channels, strides),
         keep_prob)
     h2 = F.conv(F.activation(F.batch_normalization(h1)), channels)
     # c.f. http://gitxiv.com/comments/7rffyqcPLirEEsmpX
     if F.volume(h0) == F.volume(h2):
         h = h2 + h0
     else:
         h4 = F.conv(h0, channels, strides)
         h = h2 + h4
     return h
Ejemplo n.º 5
0
    def _inference(self, X, keep_prob, is_train):
        # Conv_layer 1
        conv = F.conv(X, 192)
        batch_norm = F._batch_norm(self, 'bn1', conv, is_train)
        relu = F.activation(batch_norm)
        dropout = F.dropout(relu, 0.9, is_train)

        conv = F.conv(dropout, 192)
        batch_norm = F._batch_norm(self, 'bn2', conv, is_train)
        relu = F.activation(batch_norm)
        dropout = F.dropout(relu, 0.9, is_train)

        max_pool = F.max_pool(dropout) # 16 x 16

        # Conv_layer 2
        conv = F.conv(max_pool, 192)
        batch_norm = F._batch_norm(self, 'bn3', conv, is_train)
        relu = F.activation(batch_norm)
        dropout = F.dropout(relu, 0.8, is_train)

        conv = F.conv(dropout, 192)
        batch_norm = F._batch_norm(self, 'bn4', conv, is_train)
        relu = F.activation(batch_norm)
        dropout = F.dropout(relu, 0.8, is_train)

        max_pool = F.max_pool(dropout) # 8 x 8

        # Conv_layer 3
        conv = F.conv(max_pool, 256)
        batch_norm = F._batch_norm(self, 'bn5', conv, is_train)
        relu = F.activation(batch_norm)
        dropout = F.dropout(relu, 0.7, is_train)

        conv = F.conv(dropout, 256)
        batch_norm = F._batch_norm(self, 'bn6', conv, is_train)
        relu = F.activation(batch_norm)
        dropout = F.dropout(relu, 0.7, is_train)

        conv = F.conv(dropout, 256)
        batch_norm = F._batch_norm(self, 'bn7', conv, is_train)
        dropout = F.dropout(relu, 0.7, is_train)

        max_pool = F.max_pool(dropout) # 4 x 4

         # Conv_layer 4
        conv = F.conv(max_pool, 512)
        batch_norm = F._batch_norm(self, 'bn8', conv, is_train)
        relu = F.activation(batch_norm)
        dropout = F.dropout(relu, 0.6, is_train)

        conv = F.conv(dropout, 512)
        batch_norm = F._batch_norm(self, 'bn9', conv, is_train)
        relu = F.activation(batch_norm)
        dropout = F.dropout(relu, 0.6, is_train)

        conv = F.conv(max_pool, 512)
        batch_norm = F._batch_norm(self, 'bn10', conv, is_train)
        relu = F.activation(batch_norm)
        dropout = F.dropout(relu, 0.6, is_train)

        max_pool = F.max_pool(dropout) # 2 x 2

        # Fully Connected Layer
        h = tf.reduce_mean(max_pool, reduction_indices=[1,2])
        h = F.dropout(h, 0.5, is_train)
        h = F.dense(h, 512)
        h = F._batch_norm(self, 'bn11', h, is_train)
        h = F.activation(h)
        h = F.dropout(h, 0.5, is_train)
        h = F.dense(h, self._num_classes)

        return h
Ejemplo n.º 6
0
alpha = 1  # how many times do you want to learn in 1 epoch. if 1, whole data learns.

input_train, input_test, correct_train, correct_test = cifar10_call(N)

n_train = input_train.shape[0]
n_test = input_test.shape[0]

img_h = 32
img_w = 32
img_ch = 3

# -- 각 층의 초기화 --
cl1 = ConvLayer(img_ch, img_h, img_w, 30, 3, 3, stride=1, pad=1)  # 앞3개:인풋 중간3개:필터
cl2 = ConvLayer(cl1.y_ch, cl1.y_h, cl1.y_w, 30, 3, 3, stride=1, pad=1)
pl1 = PoolingLayer(cl2.y_ch, cl2.y_h, cl2.y_w, pool=2, pad=0)  # pool:풀링크기(2*2), pad:패딩 너비
c_dr1 = fn.dropout(0.25)

cl3 = ConvLayer(pl1.y_ch, pl1.y_h, pl1.y_w, 60, 3, 3, stride=1, pad=1)
pl2 = PoolingLayer(cl3.y_ch, cl3.y_h, cl3.y_w, pool=2, pad=0)
c_dr2 = fn.dropout(0.25)

cl4 = ConvLayer(pl2.y_ch, pl2.y_h, pl2.y_w, 120, 3, 3, stride=1, pad=1)
pl3 = PoolingLayer(cl4.y_ch, cl4.y_h, cl4.y_w, pool=2, pad=0)

n_fc_in = pl3.y_ch * pl3.y_h * pl3.y_w
ml1 = MiddleLayer(n_fc_in, 500)
dr1 = fn.dropout(0.5)
ml2 = MiddleLayer(500, 500)
dr2 = fn.dropout(0.5)
ol1 = OutputLayer(500, 10)