コード例 #1
0
def resnet(inpt, n):
    if n < 20 or (n - 20) % 12 != 0:
        print("ResNet depth invalid.")
        return

    num_conv = int((n - 20) / 12 + 1)
    layers = []

    with tf.variable_scope('conv1'):
        conv1 = conv_layer(inpt, [3, 3, 3, 16], 1)
        layers.append(conv1)
#     tf.summary.image("conv1", conv1, max_outputs=6)
    for i in range(num_conv):
        with tf.variable_scope('conv2_%d' % (i + 1)):
            conv2_x = residual_block(layers[-1], 16, False)
            conv2 = residual_block(conv2_x, 16, False)
            layers.append(conv2_x)
            layers.append(conv2)

        assert conv2.get_shape().as_list()[1:] == [32, 32, 16]
#     tf.summary.image("conv2", conv2, max_outputs=6)
    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv3_%d' % (i + 1)):
            conv3_x = residual_block(layers[-1], 32, down_sample)
            conv3 = residual_block(conv3_x, 32, False)
            layers.append(conv3_x)
            layers.append(conv3)

        assert conv3.get_shape().as_list()[1:] == [16, 16, 32]


#     tf.summary.image("conv3", conv3, max_outputs=6)
    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv4_%d' % (i + 1)):
            conv4_x = residual_block(layers[-1], 64, down_sample)
            conv4 = residual_block(conv4_x, 64, False)
            layers.append(conv4_x)
            layers.append(conv4)

        assert conv4.get_shape().as_list()[1:] == [8, 8, 64]

    with tf.variable_scope('fc'):
        to_fc = tf.cond(tf.equal(seperate, tf.constant(False)),
                        lambda: layers[-1], lambda: data)
        global_pool = tf.reduce_mean(to_fc, [1, 2])
        assert global_pool.get_shape().as_list()[1:] == [64]
        sm = softmax_layer(global_pool, [64, 20])
        sm2 = softmax_layer(global_pool, [64, 100])
        out = tf.cond(tf.equal(seperate, tf.constant(False)), lambda: sm,
                      lambda: sm2)
        #         out = softmax_layer(global_pool, [64, 20])
        layers.append(out)

    return layers[-1], layers[-2]
コード例 #2
0
ファイル: model.py プロジェクト: codepike/iceberg
def resnet(inpt, n, is_training=False):
    if n < 20 or (n - 20) % 12 != 0:
        print("ResNet depth invalid.")
        return

    num_conv = int((n - 20) / 12 + 1)
    layers = []

    with tf.variable_scope('conv1'):
        conv1 = conv_layer_res(inpt, [3, 3, 3, 16], 1, is_training)
        layers.append(conv1)

    for i in range(num_conv):
        with tf.variable_scope('conv2_%d' % (i + 1)):
            conv2_x = residual_block(layers[-1],
                                     16,
                                     False,
                                     is_training=is_training)
            conv2 = residual_block(conv2_x, 16, False, is_training=is_training)
            layers.append(conv2_x)
            layers.append(conv2)
            print("conv2 shape: {}".format(conv2.shape))

        assert conv2.get_shape().as_list()[1:] == [75, 75, 16]

    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv3_%d' % (i + 1)):
            conv3_x = residual_block(layers[-1],
                                     32,
                                     down_sample,
                                     is_training=is_training)
            conv3 = residual_block(conv3_x, 32, False, is_training=is_training)
            layers.append(conv3_x)
            layers.append(conv3)

        assert conv3.get_shape().as_list()[1:] == [38, 38, 32]

    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv4_%d' % (i + 1)):
            conv4_x = residual_block(layers[-1],
                                     64,
                                     down_sample,
                                     is_training=is_training)
            conv4 = residual_block(conv4_x, 64, False, is_training=is_training)
            layers.append(conv4_x)
            layers.append(conv4)

        assert conv4.get_shape().as_list()[1:] == [19, 19, 64]

    with tf.variable_scope('fc'):
        global_pool = tf.reduce_mean(layers[-1], [1, 2])
        assert global_pool.get_shape().as_list()[1:] == [64]

        out = softmax_layer(global_pool, [64, 2])
        layers.append(out)

    return layers[-1]
コード例 #3
0
ファイル: models.py プロジェクト: dkout/Miniplaces
def resnet(inpt, n):
    print(n)
    n = int(n)
    if n < 20 or (n - 20) % 12 != 0:
        print("ResNet depth invalid.")
        return

    num_conv = int((n - 20) / 12 + 1)
    layers = []

    with tf.variable_scope('conv1'):
        conv1 = conv_layer(inpt, [3, 3, 3, 16], 1)
        layers.append(conv1)

    for i in range(num_conv):
        with tf.variable_scope('conv2_%d' % (i + 1)):
            conv2_x = residual_block(layers[-1], 16, False)
            conv2 = residual_block(conv2_x, 16, False)
            layers.append(conv2_x)
            layers.append(conv2)

        # assert conv2.get_shape().as_list()[1:] == [32, 32, 16]

    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv3_%d' % (i + 1)):
            conv3_x = residual_block(layers[-1], 32, down_sample)
            conv3 = residual_block(conv3_x, 32, False)
            layers.append(conv3_x)
            layers.append(conv3)

        # assert conv3.get_shape().as_list()[1:] == [16, 16, 32]

    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv4_%d' % (i + 1)):
            conv4_x = residual_block(layers[-1], 64, down_sample)
            conv4 = residual_block(conv4_x, 64, False)
            layers.append(conv4_x)
            layers.append(conv4)

        # assert conv4.get_shape().as_list()[1:] == [8, 8, 64]

    with tf.variable_scope('fc'):
        global_pool = tf.reduce_mean(layers[-1], [1, 2])
        # assert global_pool.get_shape().as_list()[1:] == [64]

        out = softmax_layer(global_pool, [64, 100])
        layers.append(out)

    return layers[-1]
コード例 #4
0
ファイル: models.py プロジェクト: RobSalzwedel/resnet-tf
def resnet(inpt, n):
    if n < 20 or (n - 20) % 12 != 0:
        print "ResNet depth invalid."
        return

    num_conv = (n - 20) / 12 + 1
    layers = []

    with tf.variable_scope('conv1'):
        conv1 = conv_layer(inpt, [3, 3, 3, 16], 1)
        layers.append(conv1)

    for i in range (num_conv):
        with tf.variable_scope('conv2_%d' % (i+1)):
            conv2_x = residual_block(layers[-1], 16, False)
            conv2 = residual_block(conv2_x, 16, False)
            layers.append(conv2_x)
            layers.append(conv2)

        assert conv2.get_shape().as_list()[1:] == [32, 32, 16]

    for i in range (num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv3_%d' % (i+1)):
            conv3_x = residual_block(layers[-1], 32, down_sample)
            conv3 = residual_block(conv3_x, 32, False)
            layers.append(conv3_x)
            layers.append(conv3)

        assert conv3.get_shape().as_list()[1:] == [16, 16, 32]
    
    for i in range (num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv4_%d' % (i+1)):
            conv4_x = residual_block(layers[-1], 64, down_sample)
            conv4 = residual_block(conv4_x, 64, False)
            layers.append(conv4_x)
            layers.append(conv4)

        assert conv4.get_shape().as_list()[1:] == [8, 8, 64]

    with tf.variable_scope('fc'):
        global_pool = tf.reduce_mean(layers[-1], [1, 2])
        assert global_pool.get_shape().as_list()[1:] == [64]
        
        out = softmax_layer(global_pool, [64, 10])
        layers.append(out)

    return layers[-1]
コード例 #5
0
ファイル: models.py プロジェクト: at553/tfdepth
def resnet(inpt, n):

    num_conv = 1
    layers = []

    with tf.variable_scope('conv1'):
        conv1 = conv_layer(inpt, [3, 3, 3, 16], 1)
        layers.append(conv1)

    for i in range (num_conv):
        with tf.variable_scope('conv2_%d' % (i+1)):
            conv2_x = residual_block(layers[-1], 16, False)
            conv2 = residual_block(conv2_x, 16, False)
            layers.append(conv2_x)
            layers.append(conv2)


    for i in range (num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv3_%d' % (i+1)):
            conv3_x = residual_block(layers[-1], 32, down_sample)
            conv3 = residual_block(conv3_x, 32, False)
            layers.append(conv3_x)
            layers.append(conv3)



    for i in range (num_conv):
        down_sample = True if i == 0 else False
        with tf.variable_scope('conv4_%d' % (i+1)):
            conv4_x = residual_block(layers[-1], 64, down_sample)
            conv4 = residual_block(conv4_x, 64, False)
            layers.append(conv4_x)
            layers.append(conv4)



    with tf.variable_scope('fc'):
        global_pool = tf.reduce_mean(layers[-1], [1, 2])


        out = softmax_layer(global_pool, [64, 10])
        layers.append(out)

    return layers[-1]
コード例 #6
0
ファイル: cifar10.py プロジェクト: phrayezzen/COMP540
def inference(inpt):
  """Build the CIFAR-10 model.

  Args:
    images: Images returned from distorted_inputs() or inputs().

  Returns:
    Logits.
  """
  #Resnet architecture implementation from https://github.com/xuyuwei/resnet-tf

  n = 44
  n_dict = {20:1, 32:2, 44:3, 56:4}
  # ResNet architectures used for CIFAR-10
  
  if n < 20 or (n - 20) % 12 != 0:
    print("ResNet depth invalid.")
    return

  num_conv = n_dict[n]
  layers = []

  with tf.variable_scope('conv1'):
    conv1 = conv_layer(inpt, [3, 3, 3, 16], 1)
    layers.append(conv1)

  for i in range (num_conv):
    with tf.variable_scope('conv2_%d' % (i+1)):
      conv2_x = residual_block(layers[-1], 16, False)
      conv2 = residual_block(conv2_x, 16, False)
      layers.append(conv2_x)
      layers.append(conv2)

      assert conv2.get_shape().as_list()[1:] == [32, 32, 16]

  for i in range (num_conv):
    down_sample = True if i == 0 else False
    with tf.variable_scope('conv3_%d' % (i+1)):
      conv3_x = residual_block(layers[-1], 32, down_sample)
      conv3 = residual_block(conv3_x, 32, False)
      layers.append(conv3_x)
      layers.append(conv3)

      assert conv3.get_shape().as_list()[1:] == [16, 16, 32]
  
  for i in range (num_conv):
    down_sample = True if i == 0 else False
    with tf.variable_scope('conv4_%d' % (i+1)):
      conv4_x = residual_block(layers[-1], 64, down_sample)
      conv4 = residual_block(conv4_x, 64, False)
      layers.append(conv4_x)
      layers.append(conv4)

      assert conv4.get_shape().as_list()[1:] == [8, 8, 64]

  with tf.variable_scope('fc'):
    global_pool = tf.reduce_mean(layers[-1], [1, 2])
    assert global_pool.get_shape().as_list()[1:] == [64]
    
    out = softmax_layer(global_pool, [64, 10])
    layers.append(out)

  return layers[-1]  
コード例 #7
0
def resnet(inpt, num_conv, batch_size, keep_prob):
    layers = []

    with tf.name_scope('conv1'):
        conv1 = conv_layer(inpt, [3, 3, 3, 64], 1)
        layers.append(conv1)

    for i in range(num_conv):
        with tf.name_scope('conv2_%d' % (i + 1)):
            conv2_x = residual_block(layers[-1], 64, False)
            conv2 = residual_block(conv2_x, 64, False)
            layers.append(conv2_x)
            layers.append(conv2)

            assert conv2.get_shape().as_list()[1:] == [224, 224, 64]

    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.name_scope('conv3_%d' % (i + 1)):
            conv3_x = residual_block(layers[-1], 128, down_sample)
            conv3 = residual_block(conv3_x, 128, False)
            layers.append(conv3_x)
            layers.append(conv3)

            assert conv3.get_shape().as_list()[1:] == [112, 112, 128]

    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.name_scope('conv4_%d' % (i + 1)):
            conv4_x = residual_block(layers[-1], 256, down_sample)
            conv4 = residual_block(conv4_x, 256, False)
            layers.append(conv4_x)
            layers.append(conv4)

            assert conv4.get_shape().as_list()[1:] == [56, 56, 256]

    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.name_scope('conv4_%d' % (i + 1)):
            conv4_x = residual_block(layers[-1], 512, down_sample)
            conv4 = residual_block(conv4_x, 512, False)
            layers.append(conv4_x)
            layers.append(conv4)

            assert conv4.get_shape().as_list()[1:] == [28, 28, 512]

    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.name_scope('conv4_%d' % (i + 1)):
            conv4_x = residual_block(layers[-1], 512, down_sample)
            conv4 = residual_block(conv4_x, 512, False)
            layers.append(conv4_x)
            layers.append(conv4)

            assert conv4.get_shape().as_list()[1:] == [14, 14, 512]

    for i in range(num_conv):
        down_sample = True if i == 0 else False
        with tf.name_scope('conv4_%d' % (i + 1)):
            conv4_x = residual_block(layers[-1], 512, down_sample)
            conv4 = residual_block(conv4_x, 512, False)
            layers.append(conv4_x)
            layers.append(conv4)

            assert conv4.get_shape().as_list()[1:] == [7, 7, 512]

    with tf.name_scope('fc'):
        # global_pool = tf.reduce_mean(layers[-1], [1, 2])
        global_conv = conv_layer(inpt, [7, 7, 512, 4096], 1)
        print(global_conv.get_shape().as_list())
        global_conv_flatten = tf.reshape(global_conv, [batch_size, -1])

        fc1 = relu_layer(global_conv_flatten, (4096, 4096))

        fc1 = tf.nn.dropout(fc1, keep_prob)

        fc2 = relu_layer(fc1, (4096, 4096))

        out = softmax_layer(fc2, [4096, 30])
        layers.append(out)

    return layers[-1]