Beispiel #1
0
def resnet(n=18, nr_classes=10):
    input = Input(shape=(img_channels, img_rows, img_cols))

    conv1 = conv_bn_relu(
        input,
        nb_filter=16,
        nb_row=3,
        nb_col=3,
        W_regularizer=l2(weight_decay))  # Filters, filter_size

    # Build residual blocks..
    block_fn = _bottleneck
    block1 = residual_block(conv1,
                            block_fn,
                            nb_filters=16,
                            repetations=n,
                            is_first_layer=True)
    block2 = residual_block(block1, block_fn, nb_filters=32, repetations=n)
    block3 = residual_block(block2,
                            block_fn,
                            nb_filters=64,
                            repetations=n,
                            subsample=True)

    # Classifier block
    pool2 = AveragePooling2D(pool_size=(8, 8))(block3)
    flatten1 = Flatten()(pool2)
    final = Dense(units=nr_classes,
                  kernel_initializer="he_normal",
                  activation="softmax",
                  kernel_regularizer=l2(weight_decay))(flatten1)

    model = Model(inputs=input, outputs=final)
    return model
def resnet_backbone(no_classes=3422,
                    no_channels=3,
                    start_neurons=32,
                    dropout_rate=0.1):
    input_layer = layers.Input(name='input_image',
                               shape=(IMG_SIZE_CLASSIFY, IMG_SIZE_CLASSIFY,
                                      no_channels),
                               dtype='float32')

    for index, i in enumerate([1, 2, 2, 4, 8]):
        if index == 0:
            inner = input_layer
        inner = layers.Conv2D(start_neurons * i, (3, 3),
                              activation=None,
                              padding="same")(inner)
        inner = residual_block(inner, start_neurons * i)
        inner = residual_block(inner, start_neurons * i, True)

        if i <= 4:
            inner = layers.MaxPooling2D((2, 2))(inner)

        if dropout_rate:
            inner = layers.Dropout(dropout_rate)(inner)

    print(inner.get_shape())
    inner = layers.Flatten()(inner)
    inner = layers.Dense(no_classes, activation="softmax")(inner)
    net = models.Model(inputs=[input_layer], outputs=inner)
    return net
Beispiel #3
0
def resnet(inpt, n):
    if n < 20 or (n - 20) % 12 != 0:
        print "ResNet depth invalid."
        return

    num_conv = (n - 20) / 12 + 1
    layers = []

    num_residual = 16

    chOut =0


    with tf.variable_scope('conv_start'):
        conv00 = conv_layer(inpt, [3, 3, 3, 16], 1)
        layers.append(conv00)

    for lv in range(num_residual/2 -2 ):
        with tf.variable_scope('conv_%d' % (lv)):
            chOut += 168
            print layers[-1].get_shape().as_list()[1:]
            conv1_1_x = residual_block(layers[-1], 16+chOut,True)
            print conv1_1_x.get_shape().as_list()[1:]
            conv1_1 = residual_block(conv1_1_x,16+chOut,  False)

            layers.append(conv1_1_x)
            layers.append(conv1_1)


    #print layers[-1].get_shape().as_list()[1:]
    assert layers[-1].get_shape().as_list()[1:] == [7, 7, 1024]


    return layers[-1]
Beispiel #4
0
 def test_residual_block_downsample(self):
     outputs = residual_block(filters=self.filters,
                              kernel_size=(3, 3),
                              downsample=True,
                              padding='same',
                              activation='relu')(self.inputs)
     self.assertListEqual(outputs.shape.as_list(),
                          [None, self.height/2, self.width/2, self.filters])
def resnet():
    input = Input(shape=(img_channels, img_rows, img_cols))

    conv1 = conv_bn_relu(input, nb_filter=16, nb_row=3, nb_col=3, W_regularizer=l2(weight_decay))

    # Build residual blocks..
    block_fn = _bottleneck
    block1 = residual_block(conv1, block_fn, nb_filters=16, repetations=18, is_first_layer=True)
    block2 = residual_block(block1, block_fn, nb_filters=32, repetations=18)
    block3 = residual_block(block2, block_fn, nb_filters=64, repetations=18, subsample=True)
    
    # Classifier block
    pool2 = AveragePooling2D(pool_size=(8, 8))(block3)
    flatten1 = Flatten()(pool2)
    final = Dense(output_dim=10, init="he_normal", activation="softmax", W_regularizer=l2(weight_decay))(flatten1)

    model = Model(input=input, output=final)
    return model
Beispiel #6
0
		def resnet_blocks(x, ch, name, resnet_step=0):
			if resnet_step > 0:
				for i in range(resnet_step):
					x = residual_block(x, channels=ch, is_training=self.is_training, downsample=False, scope=name+"_residual_block_decode_%d" % i)

				x = batch_norm_resnet(x, is_training=self.is_training, scope = name+"_decode_0_batch_norm") # mark
				
				x = tf.nn.relu(x)
			return x 
Beispiel #7
0
def resnet(inpt, n, is_training=False):
    if n < 20 or (n - 20) % 12 != 0:
        print("ResNet depth invalid.")
        return

    num_conv = int((n - 20) / 12 + 1)
    layers = []

    with tf.variable_scope('conv1'):
        conv1 = conv_layer_res(inpt, [3, 3, 3, 16], 1, is_training)
        layers.append(conv1)

    for i in range(num_conv):
        with tf.variable_scope('conv2_%d' % (i + 1)):
            conv2_x = residual_block(layers[-1],
                                     16,
                                     False,
                                     is_training=is_training)
            conv2 = residual_block(conv2_x, 16, False, is_training=is_training)
            layers.append(conv2_x)
            layers.append(conv2)
            print("conv2 shape: {}".format(conv2.shape))

        assert conv2.get_shape().as_list()[1:] == [75, 75, 16]

    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv3_%d' % (i + 1)):
            conv3_x = residual_block(layers[-1],
                                     32,
                                     down_sample,
                                     is_training=is_training)
            conv3 = residual_block(conv3_x, 32, False, is_training=is_training)
            layers.append(conv3_x)
            layers.append(conv3)

        assert conv3.get_shape().as_list()[1:] == [38, 38, 32]

    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv4_%d' % (i + 1)):
            conv4_x = residual_block(layers[-1],
                                     64,
                                     down_sample,
                                     is_training=is_training)
            conv4 = residual_block(conv4_x, 64, False, is_training=is_training)
            layers.append(conv4_x)
            layers.append(conv4)

        assert conv4.get_shape().as_list()[1:] == [19, 19, 64]

    with tf.variable_scope('fc'):
        global_pool = tf.reduce_mean(layers[-1], [1, 2])
        assert global_pool.get_shape().as_list()[1:] == [64]

        out = softmax_layer(global_pool, [64, 2])
        layers.append(out)

    return layers[-1]
Beispiel #8
0
def resnet(inpt, n):
    if n < 20 or (n - 20) % 12 != 0:
        print "ResNet depth invalid."
        return

    num_conv = (n - 20) / 12 + 1
    layers = {}
    layer_list = []

    with tf.variable_scope('conv1'):
        conv1, conv1_w = conv_layer(inpt, [3, 3, 3, 16], 1)
        layers['conv1'] = conv1

        for weights in conv1_w:
            layer_list.append(weights)

    for i in range(num_conv):
        with tf.variable_scope('conv2_%d' % (i + 1)):
            conv2_x, conv2_x_w = residual_block(layers['conv1'], 16, False)
            conv2, conv2_w = residual_block(conv2_x, 16, False)
            layers['conv2_x'] = conv2_x
            layers['conv2'] = conv2

            for weights in conv2_x_w:
                layer_list.append(weights)
            for weights in conv2_w:
                layer_list.append(weights)

    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv3_%d' % (i + 1)):
            conv3_x, conv3_x_w = residual_block(layers['conv2'], 32,
                                                down_sample)
            conv3, conv3_w = residual_block(conv3_x, 32, False)
            layers['conv3_x'] = conv3_x
            layers['conv3'] = conv3

            for weights in conv3_x_w:
                layer_list.append(weights)
            for weights in conv3_w:
                layer_list.append(weights)

    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv4_%d' % (i + 1)):
            conv4_x, conv4_x_w = residual_block(layers['conv3'], 64,
                                                down_sample)
            conv4, conv4_w = residual_block(conv4_x, 64, False)
            layers['conv4_x'] = conv4_x
            layers['conv4'] = conv4

            for weights in conv4_x_w:
                layer_list.append(weights)
            for weights in conv4_w:
                layer_list.append(weights)

    return layers, layer_list
Beispiel #9
0
def resnet(inpt, n):
    if n < 20 or (n - 20) % 12 != 0:
        print("ResNet depth invalid.")
        return

    num_conv = int((n - 20) / 12 + 1)
    layers = []

    with tf.variable_scope('conv1'):
        conv1 = conv_layer(inpt, [3, 3, 3, 16], 1)
        layers.append(conv1)
#     tf.summary.image("conv1", conv1, max_outputs=6)
    for i in range(num_conv):
        with tf.variable_scope('conv2_%d' % (i + 1)):
            conv2_x = residual_block(layers[-1], 16, False)
            conv2 = residual_block(conv2_x, 16, False)
            layers.append(conv2_x)
            layers.append(conv2)

        assert conv2.get_shape().as_list()[1:] == [32, 32, 16]
#     tf.summary.image("conv2", conv2, max_outputs=6)
    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv3_%d' % (i + 1)):
            conv3_x = residual_block(layers[-1], 32, down_sample)
            conv3 = residual_block(conv3_x, 32, False)
            layers.append(conv3_x)
            layers.append(conv3)

        assert conv3.get_shape().as_list()[1:] == [16, 16, 32]


#     tf.summary.image("conv3", conv3, max_outputs=6)
    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv4_%d' % (i + 1)):
            conv4_x = residual_block(layers[-1], 64, down_sample)
            conv4 = residual_block(conv4_x, 64, False)
            layers.append(conv4_x)
            layers.append(conv4)

        assert conv4.get_shape().as_list()[1:] == [8, 8, 64]

    with tf.variable_scope('fc'):
        to_fc = tf.cond(tf.equal(seperate, tf.constant(False)),
                        lambda: layers[-1], lambda: data)
        global_pool = tf.reduce_mean(to_fc, [1, 2])
        assert global_pool.get_shape().as_list()[1:] == [64]
        sm = softmax_layer(global_pool, [64, 20])
        sm2 = softmax_layer(global_pool, [64, 100])
        out = tf.cond(tf.equal(seperate, tf.constant(False)), lambda: sm,
                      lambda: sm2)
        #         out = softmax_layer(global_pool, [64, 20])
        layers.append(out)

    return layers[-1], layers[-2]
Beispiel #10
0
def resnet(inpt, n):
    print(n)
    n = int(n)
    if n < 20 or (n - 20) % 12 != 0:
        print("ResNet depth invalid.")
        return

    num_conv = int((n - 20) / 12 + 1)
    layers = []

    with tf.variable_scope('conv1'):
        conv1 = conv_layer(inpt, [3, 3, 3, 16], 1)
        layers.append(conv1)

    for i in range(num_conv):
        with tf.variable_scope('conv2_%d' % (i + 1)):
            conv2_x = residual_block(layers[-1], 16, False)
            conv2 = residual_block(conv2_x, 16, False)
            layers.append(conv2_x)
            layers.append(conv2)

        # assert conv2.get_shape().as_list()[1:] == [32, 32, 16]

    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv3_%d' % (i + 1)):
            conv3_x = residual_block(layers[-1], 32, down_sample)
            conv3 = residual_block(conv3_x, 32, False)
            layers.append(conv3_x)
            layers.append(conv3)

        # assert conv3.get_shape().as_list()[1:] == [16, 16, 32]

    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv4_%d' % (i + 1)):
            conv4_x = residual_block(layers[-1], 64, down_sample)
            conv4 = residual_block(conv4_x, 64, False)
            layers.append(conv4_x)
            layers.append(conv4)

        # assert conv4.get_shape().as_list()[1:] == [8, 8, 64]

    with tf.variable_scope('fc'):
        global_pool = tf.reduce_mean(layers[-1], [1, 2])
        # assert global_pool.get_shape().as_list()[1:] == [64]

        out = softmax_layer(global_pool, [64, 100])
        layers.append(out)

    return layers[-1]
Beispiel #11
0
def resnet(inpt, n):
    if n < 20 or (n - 20) % 12 != 0:
        print "ResNet depth invalid."
        return

    num_conv = (n - 20) / 12 + 1
    layers = []

    with tf.variable_scope('conv1'):
        conv1 = conv_layer(inpt, [3, 3, 3, 16], 1)
        layers.append(conv1)

    for i in range (num_conv):
        with tf.variable_scope('conv2_%d' % (i+1)):
            conv2_x = residual_block(layers[-1], 16, False)
            conv2 = residual_block(conv2_x, 16, False)
            layers.append(conv2_x)
            layers.append(conv2)

        assert conv2.get_shape().as_list()[1:] == [32, 32, 16]

    for i in range (num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv3_%d' % (i+1)):
            conv3_x = residual_block(layers[-1], 32, down_sample)
            conv3 = residual_block(conv3_x, 32, False)
            layers.append(conv3_x)
            layers.append(conv3)

        assert conv3.get_shape().as_list()[1:] == [16, 16, 32]
    
    for i in range (num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv4_%d' % (i+1)):
            conv4_x = residual_block(layers[-1], 64, down_sample)
            conv4 = residual_block(conv4_x, 64, False)
            layers.append(conv4_x)
            layers.append(conv4)

        assert conv4.get_shape().as_list()[1:] == [8, 8, 64]

    with tf.variable_scope('fc'):
        global_pool = tf.reduce_mean(layers[-1], [1, 2])
        assert global_pool.get_shape().as_list()[1:] == [64]
        
        out = softmax_layer(global_pool, [64, 10])
        layers.append(out)

    return layers[-1]
Beispiel #12
0
def resnet(inpt, n):

    num_conv = 1
    layers = []

    with tf.variable_scope('conv1'):
        conv1 = conv_layer(inpt, [3, 3, 3, 16], 1)
        layers.append(conv1)

    for i in range (num_conv):
        with tf.variable_scope('conv2_%d' % (i+1)):
            conv2_x = residual_block(layers[-1], 16, False)
            conv2 = residual_block(conv2_x, 16, False)
            layers.append(conv2_x)
            layers.append(conv2)


    for i in range (num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv3_%d' % (i+1)):
            conv3_x = residual_block(layers[-1], 32, down_sample)
            conv3 = residual_block(conv3_x, 32, False)
            layers.append(conv3_x)
            layers.append(conv3)



    for i in range (num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv4_%d' % (i+1)):
            conv4_x = residual_block(layers[-1], 64, down_sample)
            conv4 = residual_block(conv4_x, 64, False)
            layers.append(conv4_x)
            layers.append(conv4)



    with tf.variable_scope('fc'):
        global_pool = tf.reduce_mean(layers[-1], [1, 2])


        out = softmax_layer(global_pool, [64, 10])
        layers.append(out)

    return layers[-1]
Beispiel #13
0
def inference(inpt):
  """Build the CIFAR-10 model.

  Args:
    images: Images returned from distorted_inputs() or inputs().

  Returns:
    Logits.
  """
  #Resnet architecture implementation from https://github.com/xuyuwei/resnet-tf

  n = 44
  n_dict = {20:1, 32:2, 44:3, 56:4}
  # ResNet architectures used for CIFAR-10
  
  if n < 20 or (n - 20) % 12 != 0:
    print("ResNet depth invalid.")
    return

  num_conv = n_dict[n]
  layers = []

  with tf.variable_scope('conv1'):
    conv1 = conv_layer(inpt, [3, 3, 3, 16], 1)
    layers.append(conv1)

  for i in range (num_conv):
    with tf.variable_scope('conv2_%d' % (i+1)):
      conv2_x = residual_block(layers[-1], 16, False)
      conv2 = residual_block(conv2_x, 16, False)
      layers.append(conv2_x)
      layers.append(conv2)

      assert conv2.get_shape().as_list()[1:] == [32, 32, 16]

  for i in range (num_conv):
    down_sample = True if i == 0 else False
    with tf.variable_scope('conv3_%d' % (i+1)):
      conv3_x = residual_block(layers[-1], 32, down_sample)
      conv3 = residual_block(conv3_x, 32, False)
      layers.append(conv3_x)
      layers.append(conv3)

      assert conv3.get_shape().as_list()[1:] == [16, 16, 32]
  
  for i in range (num_conv):
    down_sample = True if i == 0 else False
    with tf.variable_scope('conv4_%d' % (i+1)):
      conv4_x = residual_block(layers[-1], 64, down_sample)
      conv4 = residual_block(conv4_x, 64, False)
      layers.append(conv4_x)
      layers.append(conv4)

      assert conv4.get_shape().as_list()[1:] == [8, 8, 64]

  with tf.variable_scope('fc'):
    global_pool = tf.reduce_mean(layers[-1], [1, 2])
    assert global_pool.get_shape().as_list()[1:] == [64]
    
    out = softmax_layer(global_pool, [64, 10])
    layers.append(out)

  return layers[-1]  
def resnet_unet(img_size=(512, 512),
                no_channels=3,
                start_neurons=32,
                dropout_rate=0.25):

    # inner
    input_layer = layers.Input(name='the_input',
                               shape=(*img_size, no_channels),  # noqa
                               dtype='float32')

    # down 1
    conv1 = layers.Conv2D(start_neurons * 1, (3, 3),
                          activation=None, padding="same")(input_layer)
    conv1 = residual_block(conv1, start_neurons * 1)
    conv1 = residual_block(conv1, start_neurons * 1, True)
    pool1 = layers.MaxPooling2D((2, 2))(conv1)
    pool1 = layers.Dropout(dropout_rate)(pool1)

    # down 2
    conv2 = layers.Conv2D(start_neurons * 2, (3, 3),
                          activation=None, padding="same")(pool1)
    conv2 = residual_block(conv2, start_neurons * 2)
    conv2 = residual_block(conv2, start_neurons * 2, True)
    pool2 = layers.MaxPooling2D((2, 2))(conv2)
    pool2 = layers.Dropout(dropout_rate)(pool2)

    # down 3
    conv3 = layers.Conv2D(start_neurons * 4, (3, 3),
                          activation=None, padding="same")(pool2)
    conv3 = residual_block(conv3, start_neurons * 4)
    conv3 = residual_block(conv3, start_neurons * 4, True)
    pool3 = layers.MaxPooling2D((2, 2))(conv3)
    pool3 = layers.Dropout(dropout_rate)(pool3)

    # middle
    middle = layers.Conv2D(start_neurons * 8, (3, 3),
                           activation=None, padding="same")(pool3)
    middle = residual_block(middle, start_neurons * 8)
    middle = residual_block(middle, start_neurons * 8, True)

    # up 1
    deconv3 = layers.Conv2DTranspose(
        start_neurons * 4, (3, 3), strides=(2, 2), padding="same")(middle)
    uconv3 = layers.concatenate([deconv3, conv3])
    uconv3 = layers.Dropout(dropout_rate)(uconv3)

    uconv3 = layers.Conv2D(start_neurons * 4, (3, 3),
                           activation=None, padding="same")(uconv3)
    uconv3 = residual_block(uconv3, start_neurons * 4)
    uconv3 = residual_block(uconv3, start_neurons * 4, True)

    # up 2
    deconv2 = layers.Conv2DTranspose(
        start_neurons * 2, (3, 3), strides=(2, 2), padding="same")(uconv3)
    uconv2 = layers.concatenate([deconv2, conv2])
    uconv2 = layers.Dropout(dropout_rate)(uconv2)

    uconv2 = layers.Conv2D(start_neurons * 2, (3, 3),
                           activation=None, padding="same")(uconv2)
    uconv2 = residual_block(uconv2, start_neurons * 2)
    uconv2 = residual_block(uconv2, start_neurons * 2, True)

    # up 3
    deconv1 = layers.Conv2DTranspose(
        start_neurons * 1, (3, 3), strides=(2, 2), padding="same")(uconv2)
    uconv1 = layers.concatenate([deconv1, conv1])
    uconv1 = layers.Dropout(dropout_rate)(uconv1)

    uconv1 = layers.Conv2D(start_neurons * 1, (3, 3),
                           activation=None, padding="same")(uconv1)
    uconv1 = residual_block(uconv1, start_neurons * 1)
    uconv1 = residual_block(uconv1, start_neurons * 1, True)

    # output mask
    output_layer = layers.Conv2D(
        2, (1, 1), padding="same", activation=None)(uconv1)

    # 2 classes: character mask & center point mask
    output_layer = layers.Activation('sigmoid')(output_layer)

    model = models.Model(inputs=[input_layer], outputs=output_layer)
    return model
Beispiel #15
0
def resnet(inpt, n):
    if n < 20 or (n - 20) % 12 != 0:
        print "ResNet depth invalid."
        return

    num_conv = (n - 20) / 12 + 1
    layers = []

    with tf.variable_scope('conv1'):
        conv1 = conv_layer(inpt, [3, 3, 3, 16], 1)
        layers.append(conv1)
        print("conv1 shape:")
        print(conv1.get_shape())

    for i in range(num_conv):
        with tf.variable_scope('conv2_%d' % (i + 1)):
            conv2_x = residual_block(layers[-1], 16, False)
            conv2 = residual_block(conv2_x, 16, False)
            layers.append(conv2_x)
            layers.append(conv2)

            print("conv2 shape:")
        print(conv2.get_shape())
        assert conv2.get_shape().as_list()[1:] == [448, 448, 16]

    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv3_%d' % (i + 1)):
            conv3_x = residual_block(layers[-1], 32, down_sample)
            conv3 = residual_block(conv3_x, 32, False)
            layers.append(conv3_x)
            layers.append(conv3)

        assert conv3.get_shape().as_list()[1:] == [224, 224, 32]

    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv4_%d' % (i + 1)):
            conv4_x = residual_block(layers[-1], 64, down_sample)
            conv4 = residual_block(conv4_x, 64, False)
            layers.append(conv4_x)
            layers.append(conv4)

        assert conv4.get_shape().as_list()[1:] == [112, 112, 64]

    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv5_%d' % (i + 1)):
            conv5_x = residual_block(layers[-1], 128, down_sample)
            conv5 = residual_block(conv5_x, 128, False)
            layers.append(conv5_x)
            layers.append(conv5)

        assert conv5.get_shape().as_list()[1:] == [56, 56, 128]

    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv6_%d' % (i + 1)):
            conv6_x = residual_block(layers[-1], 256, down_sample)
            conv6 = residual_block(conv6_x, 256, False)
            layers.append(conv6_x)
            layers.append(conv6)

        assert conv6.get_shape().as_list()[1:] == [28, 28, 256]

    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv7_%d' % (i + 1)):
            conv7_x = residual_block(layers[-1], 512, down_sample)
            conv7 = residual_block(conv7_x, 512, False)
            layers.append(conv7_x)
            layers.append(conv7)

        assert conv7.get_shape().as_list()[1:] == [14, 14, 512]

    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv8_%d' % (i + 1)):
            conv8_x = residual_block(layers[-1], 1024, down_sample)
            conv8 = residual_block(conv8_x, 1024, False)
            layers.append(conv8_x)
            layers.append(conv8)

        assert conv8.get_shape().as_list()[1:] == [7, 7, 1024]

    return layers[-1]
Beispiel #16
0
def resnet(inpt, num_conv, batch_size, keep_prob):
    layers = []

    with tf.name_scope('conv1'):
        conv1 = conv_layer(inpt, [3, 3, 3, 64], 1)
        layers.append(conv1)

    for i in range(num_conv):
        with tf.name_scope('conv2_%d' % (i + 1)):
            conv2_x = residual_block(layers[-1], 64, False)
            conv2 = residual_block(conv2_x, 64, False)
            layers.append(conv2_x)
            layers.append(conv2)

            assert conv2.get_shape().as_list()[1:] == [224, 224, 64]

    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.name_scope('conv3_%d' % (i + 1)):
            conv3_x = residual_block(layers[-1], 128, down_sample)
            conv3 = residual_block(conv3_x, 128, False)
            layers.append(conv3_x)
            layers.append(conv3)

            assert conv3.get_shape().as_list()[1:] == [112, 112, 128]

    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.name_scope('conv4_%d' % (i + 1)):
            conv4_x = residual_block(layers[-1], 256, down_sample)
            conv4 = residual_block(conv4_x, 256, False)
            layers.append(conv4_x)
            layers.append(conv4)

            assert conv4.get_shape().as_list()[1:] == [56, 56, 256]

    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.name_scope('conv4_%d' % (i + 1)):
            conv4_x = residual_block(layers[-1], 512, down_sample)
            conv4 = residual_block(conv4_x, 512, False)
            layers.append(conv4_x)
            layers.append(conv4)

            assert conv4.get_shape().as_list()[1:] == [28, 28, 512]

    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.name_scope('conv4_%d' % (i + 1)):
            conv4_x = residual_block(layers[-1], 512, down_sample)
            conv4 = residual_block(conv4_x, 512, False)
            layers.append(conv4_x)
            layers.append(conv4)

            assert conv4.get_shape().as_list()[1:] == [14, 14, 512]

    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.name_scope('conv4_%d' % (i + 1)):
            conv4_x = residual_block(layers[-1], 512, down_sample)
            conv4 = residual_block(conv4_x, 512, False)
            layers.append(conv4_x)
            layers.append(conv4)

            assert conv4.get_shape().as_list()[1:] == [7, 7, 512]

    with tf.name_scope('fc'):
        # global_pool = tf.reduce_mean(layers[-1], [1, 2])
        global_conv = conv_layer(inpt, [7, 7, 512, 4096], 1)
        print(global_conv.get_shape().as_list())
        global_conv_flatten = tf.reshape(global_conv, [batch_size, -1])

        fc1 = relu_layer(global_conv_flatten, (4096, 4096))

        fc1 = tf.nn.dropout(fc1, keep_prob)

        fc2 = relu_layer(fc1, (4096, 4096))

        out = softmax_layer(fc2, [4096, 30])
        layers.append(out)

    return layers[-1]