예제 #1
0
def resnet(inpt, n):
    if n < 20 or (n - 20) % 12 != 0:
        print "ResNet depth invalid."
        return

    num_conv = (n - 20) / 12 + 1
    layers = []

    num_residual = 16

    chOut =0


    with tf.variable_scope('conv_start'):
        conv00 = conv_layer(inpt, [3, 3, 3, 16], 1)
        layers.append(conv00)

    for lv in range(num_residual/2 -2 ):
        with tf.variable_scope('conv_%d' % (lv)):
            chOut += 168
            print layers[-1].get_shape().as_list()[1:]
            conv1_1_x = residual_block(layers[-1], 16+chOut,True)
            print conv1_1_x.get_shape().as_list()[1:]
            conv1_1 = residual_block(conv1_1_x,16+chOut,  False)

            layers.append(conv1_1_x)
            layers.append(conv1_1)


    #print layers[-1].get_shape().as_list()[1:]
    assert layers[-1].get_shape().as_list()[1:] == [7, 7, 1024]


    return layers[-1]
예제 #2
0
def resnet(inpt, n):
    if n < 20 or (n - 20) % 12 != 0:
        print "ResNet depth invalid."
        return

    num_conv = (n - 20) / 12 + 1
    layers = {}
    layer_list = []

    with tf.variable_scope('conv1'):
        conv1, conv1_w = conv_layer(inpt, [3, 3, 3, 16], 1)
        layers['conv1'] = conv1

        for weights in conv1_w:
            layer_list.append(weights)

    for i in range(num_conv):
        with tf.variable_scope('conv2_%d' % (i + 1)):
            conv2_x, conv2_x_w = residual_block(layers['conv1'], 16, False)
            conv2, conv2_w = residual_block(conv2_x, 16, False)
            layers['conv2_x'] = conv2_x
            layers['conv2'] = conv2

            for weights in conv2_x_w:
                layer_list.append(weights)
            for weights in conv2_w:
                layer_list.append(weights)

    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv3_%d' % (i + 1)):
            conv3_x, conv3_x_w = residual_block(layers['conv2'], 32,
                                                down_sample)
            conv3, conv3_w = residual_block(conv3_x, 32, False)
            layers['conv3_x'] = conv3_x
            layers['conv3'] = conv3

            for weights in conv3_x_w:
                layer_list.append(weights)
            for weights in conv3_w:
                layer_list.append(weights)

    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv4_%d' % (i + 1)):
            conv4_x, conv4_x_w = residual_block(layers['conv3'], 64,
                                                down_sample)
            conv4, conv4_w = residual_block(conv4_x, 64, False)
            layers['conv4_x'] = conv4_x
            layers['conv4'] = conv4

            for weights in conv4_x_w:
                layer_list.append(weights)
            for weights in conv4_w:
                layer_list.append(weights)

    return layers, layer_list
예제 #3
0
def resnet(inpt, n):
    if n < 20 or (n - 20) % 12 != 0:
        print("ResNet depth invalid.")
        return

    num_conv = int((n - 20) / 12 + 1)
    layers = []

    with tf.variable_scope('conv1'):
        conv1 = conv_layer(inpt, [3, 3, 3, 16], 1)
        layers.append(conv1)
#     tf.summary.image("conv1", conv1, max_outputs=6)
    for i in range(num_conv):
        with tf.variable_scope('conv2_%d' % (i + 1)):
            conv2_x = residual_block(layers[-1], 16, False)
            conv2 = residual_block(conv2_x, 16, False)
            layers.append(conv2_x)
            layers.append(conv2)

        assert conv2.get_shape().as_list()[1:] == [32, 32, 16]
#     tf.summary.image("conv2", conv2, max_outputs=6)
    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv3_%d' % (i + 1)):
            conv3_x = residual_block(layers[-1], 32, down_sample)
            conv3 = residual_block(conv3_x, 32, False)
            layers.append(conv3_x)
            layers.append(conv3)

        assert conv3.get_shape().as_list()[1:] == [16, 16, 32]


#     tf.summary.image("conv3", conv3, max_outputs=6)
    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv4_%d' % (i + 1)):
            conv4_x = residual_block(layers[-1], 64, down_sample)
            conv4 = residual_block(conv4_x, 64, False)
            layers.append(conv4_x)
            layers.append(conv4)

        assert conv4.get_shape().as_list()[1:] == [8, 8, 64]

    with tf.variable_scope('fc'):
        to_fc = tf.cond(tf.equal(seperate, tf.constant(False)),
                        lambda: layers[-1], lambda: data)
        global_pool = tf.reduce_mean(to_fc, [1, 2])
        assert global_pool.get_shape().as_list()[1:] == [64]
        sm = softmax_layer(global_pool, [64, 20])
        sm2 = softmax_layer(global_pool, [64, 100])
        out = tf.cond(tf.equal(seperate, tf.constant(False)), lambda: sm,
                      lambda: sm2)
        #         out = softmax_layer(global_pool, [64, 20])
        layers.append(out)

    return layers[-1], layers[-2]
예제 #4
0
파일: models.py 프로젝트: dkout/Miniplaces
def resnet(inpt, n):
    print(n)
    n = int(n)
    if n < 20 or (n - 20) % 12 != 0:
        print("ResNet depth invalid.")
        return

    num_conv = int((n - 20) / 12 + 1)
    layers = []

    with tf.variable_scope('conv1'):
        conv1 = conv_layer(inpt, [3, 3, 3, 16], 1)
        layers.append(conv1)

    for i in range(num_conv):
        with tf.variable_scope('conv2_%d' % (i + 1)):
            conv2_x = residual_block(layers[-1], 16, False)
            conv2 = residual_block(conv2_x, 16, False)
            layers.append(conv2_x)
            layers.append(conv2)

        # assert conv2.get_shape().as_list()[1:] == [32, 32, 16]

    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv3_%d' % (i + 1)):
            conv3_x = residual_block(layers[-1], 32, down_sample)
            conv3 = residual_block(conv3_x, 32, False)
            layers.append(conv3_x)
            layers.append(conv3)

        # assert conv3.get_shape().as_list()[1:] == [16, 16, 32]

    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv4_%d' % (i + 1)):
            conv4_x = residual_block(layers[-1], 64, down_sample)
            conv4 = residual_block(conv4_x, 64, False)
            layers.append(conv4_x)
            layers.append(conv4)

        # assert conv4.get_shape().as_list()[1:] == [8, 8, 64]

    with tf.variable_scope('fc'):
        global_pool = tf.reduce_mean(layers[-1], [1, 2])
        # assert global_pool.get_shape().as_list()[1:] == [64]

        out = softmax_layer(global_pool, [64, 100])
        layers.append(out)

    return layers[-1]
예제 #5
0
def resnet(inpt, n):
    if n < 20 or (n - 20) % 12 != 0:
        print "ResNet depth invalid."
        return

    num_conv = (n - 20) / 12 + 1
    layers = []

    with tf.variable_scope('conv1'):
        conv1 = conv_layer(inpt, [3, 3, 3, 16], 1)
        layers.append(conv1)

    for i in range (num_conv):
        with tf.variable_scope('conv2_%d' % (i+1)):
            conv2_x = residual_block(layers[-1], 16, False)
            conv2 = residual_block(conv2_x, 16, False)
            layers.append(conv2_x)
            layers.append(conv2)

        assert conv2.get_shape().as_list()[1:] == [32, 32, 16]

    for i in range (num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv3_%d' % (i+1)):
            conv3_x = residual_block(layers[-1], 32, down_sample)
            conv3 = residual_block(conv3_x, 32, False)
            layers.append(conv3_x)
            layers.append(conv3)

        assert conv3.get_shape().as_list()[1:] == [16, 16, 32]
    
    for i in range (num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv4_%d' % (i+1)):
            conv4_x = residual_block(layers[-1], 64, down_sample)
            conv4 = residual_block(conv4_x, 64, False)
            layers.append(conv4_x)
            layers.append(conv4)

        assert conv4.get_shape().as_list()[1:] == [8, 8, 64]

    with tf.variable_scope('fc'):
        global_pool = tf.reduce_mean(layers[-1], [1, 2])
        assert global_pool.get_shape().as_list()[1:] == [64]
        
        out = softmax_layer(global_pool, [64, 10])
        layers.append(out)

    return layers[-1]
예제 #6
0
파일: models.py 프로젝트: at553/tfdepth
def resnet(inpt, n):

    num_conv = 1
    layers = []

    with tf.variable_scope('conv1'):
        conv1 = conv_layer(inpt, [3, 3, 3, 16], 1)
        layers.append(conv1)

    for i in range (num_conv):
        with tf.variable_scope('conv2_%d' % (i+1)):
            conv2_x = residual_block(layers[-1], 16, False)
            conv2 = residual_block(conv2_x, 16, False)
            layers.append(conv2_x)
            layers.append(conv2)


    for i in range (num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv3_%d' % (i+1)):
            conv3_x = residual_block(layers[-1], 32, down_sample)
            conv3 = residual_block(conv3_x, 32, False)
            layers.append(conv3_x)
            layers.append(conv3)



    for i in range (num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv4_%d' % (i+1)):
            conv4_x = residual_block(layers[-1], 64, down_sample)
            conv4 = residual_block(conv4_x, 64, False)
            layers.append(conv4_x)
            layers.append(conv4)



    with tf.variable_scope('fc'):
        global_pool = tf.reduce_mean(layers[-1], [1, 2])


        out = softmax_layer(global_pool, [64, 10])
        layers.append(out)

    return layers[-1]
예제 #7
0
def inference(inpt):
  """Build the CIFAR-10 model.

  Args:
    images: Images returned from distorted_inputs() or inputs().

  Returns:
    Logits.
  """
  #Resnet architecture implementation from https://github.com/xuyuwei/resnet-tf

  n = 44
  n_dict = {20:1, 32:2, 44:3, 56:4}
  # ResNet architectures used for CIFAR-10
  
  if n < 20 or (n - 20) % 12 != 0:
    print("ResNet depth invalid.")
    return

  num_conv = n_dict[n]
  layers = []

  with tf.variable_scope('conv1'):
    conv1 = conv_layer(inpt, [3, 3, 3, 16], 1)
    layers.append(conv1)

  for i in range (num_conv):
    with tf.variable_scope('conv2_%d' % (i+1)):
      conv2_x = residual_block(layers[-1], 16, False)
      conv2 = residual_block(conv2_x, 16, False)
      layers.append(conv2_x)
      layers.append(conv2)

      assert conv2.get_shape().as_list()[1:] == [32, 32, 16]

  for i in range (num_conv):
    down_sample = True if i == 0 else False
    with tf.variable_scope('conv3_%d' % (i+1)):
      conv3_x = residual_block(layers[-1], 32, down_sample)
      conv3 = residual_block(conv3_x, 32, False)
      layers.append(conv3_x)
      layers.append(conv3)

      assert conv3.get_shape().as_list()[1:] == [16, 16, 32]
  
  for i in range (num_conv):
    down_sample = True if i == 0 else False
    with tf.variable_scope('conv4_%d' % (i+1)):
      conv4_x = residual_block(layers[-1], 64, down_sample)
      conv4 = residual_block(conv4_x, 64, False)
      layers.append(conv4_x)
      layers.append(conv4)

      assert conv4.get_shape().as_list()[1:] == [8, 8, 64]

  with tf.variable_scope('fc'):
    global_pool = tf.reduce_mean(layers[-1], [1, 2])
    assert global_pool.get_shape().as_list()[1:] == [64]
    
    out = softmax_layer(global_pool, [64, 10])
    layers.append(out)

  return layers[-1]  
예제 #8
0
def resnet(inpt, n):
    if n < 20 or (n - 20) % 12 != 0:
        print "ResNet depth invalid."
        return

    num_conv = (n - 20) / 12 + 1
    layers = []

    with tf.variable_scope('conv1'):
        conv1 = conv_layer(inpt, [3, 3, 3, 16], 1)
        layers.append(conv1)
        print("conv1 shape:")
        print(conv1.get_shape())

    for i in range(num_conv):
        with tf.variable_scope('conv2_%d' % (i + 1)):
            conv2_x = residual_block(layers[-1], 16, False)
            conv2 = residual_block(conv2_x, 16, False)
            layers.append(conv2_x)
            layers.append(conv2)

            print("conv2 shape:")
        print(conv2.get_shape())
        assert conv2.get_shape().as_list()[1:] == [448, 448, 16]

    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv3_%d' % (i + 1)):
            conv3_x = residual_block(layers[-1], 32, down_sample)
            conv3 = residual_block(conv3_x, 32, False)
            layers.append(conv3_x)
            layers.append(conv3)

        assert conv3.get_shape().as_list()[1:] == [224, 224, 32]

    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv4_%d' % (i + 1)):
            conv4_x = residual_block(layers[-1], 64, down_sample)
            conv4 = residual_block(conv4_x, 64, False)
            layers.append(conv4_x)
            layers.append(conv4)

        assert conv4.get_shape().as_list()[1:] == [112, 112, 64]

    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv5_%d' % (i + 1)):
            conv5_x = residual_block(layers[-1], 128, down_sample)
            conv5 = residual_block(conv5_x, 128, False)
            layers.append(conv5_x)
            layers.append(conv5)

        assert conv5.get_shape().as_list()[1:] == [56, 56, 128]

    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv6_%d' % (i + 1)):
            conv6_x = residual_block(layers[-1], 256, down_sample)
            conv6 = residual_block(conv6_x, 256, False)
            layers.append(conv6_x)
            layers.append(conv6)

        assert conv6.get_shape().as_list()[1:] == [28, 28, 256]

    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv7_%d' % (i + 1)):
            conv7_x = residual_block(layers[-1], 512, down_sample)
            conv7 = residual_block(conv7_x, 512, False)
            layers.append(conv7_x)
            layers.append(conv7)

        assert conv7.get_shape().as_list()[1:] == [14, 14, 512]

    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv8_%d' % (i + 1)):
            conv8_x = residual_block(layers[-1], 1024, down_sample)
            conv8 = residual_block(conv8_x, 1024, False)
            layers.append(conv8_x)
            layers.append(conv8)

        assert conv8.get_shape().as_list()[1:] == [7, 7, 1024]

    return layers[-1]
예제 #9
0
def resnet(inpt, num_conv, batch_size, keep_prob):
    layers = []

    with tf.name_scope('conv1'):
        conv1 = conv_layer(inpt, [3, 3, 3, 64], 1)
        layers.append(conv1)

    for i in range(num_conv):
        with tf.name_scope('conv2_%d' % (i + 1)):
            conv2_x = residual_block(layers[-1], 64, False)
            conv2 = residual_block(conv2_x, 64, False)
            layers.append(conv2_x)
            layers.append(conv2)

            assert conv2.get_shape().as_list()[1:] == [224, 224, 64]

    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.name_scope('conv3_%d' % (i + 1)):
            conv3_x = residual_block(layers[-1], 128, down_sample)
            conv3 = residual_block(conv3_x, 128, False)
            layers.append(conv3_x)
            layers.append(conv3)

            assert conv3.get_shape().as_list()[1:] == [112, 112, 128]

    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.name_scope('conv4_%d' % (i + 1)):
            conv4_x = residual_block(layers[-1], 256, down_sample)
            conv4 = residual_block(conv4_x, 256, False)
            layers.append(conv4_x)
            layers.append(conv4)

            assert conv4.get_shape().as_list()[1:] == [56, 56, 256]

    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.name_scope('conv4_%d' % (i + 1)):
            conv4_x = residual_block(layers[-1], 512, down_sample)
            conv4 = residual_block(conv4_x, 512, False)
            layers.append(conv4_x)
            layers.append(conv4)

            assert conv4.get_shape().as_list()[1:] == [28, 28, 512]

    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.name_scope('conv4_%d' % (i + 1)):
            conv4_x = residual_block(layers[-1], 512, down_sample)
            conv4 = residual_block(conv4_x, 512, False)
            layers.append(conv4_x)
            layers.append(conv4)

            assert conv4.get_shape().as_list()[1:] == [14, 14, 512]

    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.name_scope('conv4_%d' % (i + 1)):
            conv4_x = residual_block(layers[-1], 512, down_sample)
            conv4 = residual_block(conv4_x, 512, False)
            layers.append(conv4_x)
            layers.append(conv4)

            assert conv4.get_shape().as_list()[1:] == [7, 7, 512]

    with tf.name_scope('fc'):
        # global_pool = tf.reduce_mean(layers[-1], [1, 2])
        global_conv = conv_layer(inpt, [7, 7, 512, 4096], 1)
        print(global_conv.get_shape().as_list())
        global_conv_flatten = tf.reshape(global_conv, [batch_size, -1])

        fc1 = relu_layer(global_conv_flatten, (4096, 4096))

        fc1 = tf.nn.dropout(fc1, keep_prob)

        fc2 = relu_layer(fc1, (4096, 4096))

        out = softmax_layer(fc2, [4096, 30])
        layers.append(out)

    return layers[-1]