コード例 #1
0
def vgg_net(weights, image, is_train):
    layers = ('conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1',
              'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 'conv3_1', 'relu3_1',
              'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'conv3_4', 'relu3_4',
              'pool3', 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
              'relu4_3', 'conv4_4', 'relu4_4', 'pool4', 'conv5_1', 'relu5_1',
              'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'conv5_4', 'relu5_4')

    net = {}
    current = image
    for i, name in enumerate(layers):
        kind = name[:4]
        if kind == 'conv':
            kernels, bias = weights[i][0][0][0][0]
            # matconvnet: weights are [width, height, in_channels, out_channels]
            # tensorflow: weights are [height, width, in_channels, out_channels]
            kernels = utils.get_variable(np.transpose(kernels, (1, 0, 2, 3)),
                                         name='%s_w' % name)
            current = utils.conv2d_basic(current, kernels, None)
            current = utils.batch_norm(current,
                                       kernels.get_shape()[3], is_train,
                                       '%s_bn' % name)
        elif kind == 'relu':
            current = tf.nn.relu(current, name='%s' % name)
            if FLAGS.debug:
                utils.add_activation_summary(current)
        elif kind == 'pool':
            current = utils.avg_pool_2x2(current)
        net[name] = current

    return net
コード例 #2
0
def vgg_net(weights, image):
    layers = ('conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1',
              'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 'conv3_1', 'relu3_1',
              'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'conv3_4', 'relu3_4',
              'pool3', 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
              'relu4_3', 'conv4_4', 'relu4_4', 'pool4', 'conv5_1', 'relu5_1',
              'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'conv5_4', 'relu5_4')

    net = {}
    current = image
    for i, name in enumerate(layers):
        if name in [
                'conv3_4', 'relu3_4', 'conv4_4', 'relu4_4', 'conv5_4',
                'relu5_4'
        ]:
            continue
        kind = name[:4]
        if kind == 'conv':
            kernels, bias = weights[i][0][0][0][0]
            # matconvnet: weights are [width, height, in_channels, out_channels]
            # tensorflow: weights are [height, width, in_channels, out_channels]
            kernels = utils.get_variable(np.transpose(kernels, (1, 0, 2, 3)),
                                         name=name + "_w")
            bias = utils.get_variable(bias.reshape(-1), name=name + "_b")
            current = utils.conv2d_basic(current, kernels, bias)
        elif kind == 'relu':
            current = tf.nn.relu(current, name=name)
            if FLAGS.debug:
                utils.add_activation_summary(current)
        elif kind == 'pool':
            current = utils.avg_pool_2x2(current)
        net[name] = current

    return net
コード例 #3
0
ファイル: FCN.py プロジェクト: jhchoi21/DL_Book
def vgg_net(weights, image):
    layers = ('conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1',
              'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 'conv3_1', 'relu3_1',
              'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'conv3_4', 'relu3_4',
              'pool3', 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
              'relu4_3', 'conv4_4', 'relu4_4', 'pool4', 'conv5_1', 'relu5_1',
              'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'conv5_4', 'relu5_4')

    net = {}
    current = image
    for i, name in enumerate(layers):
        kind = name[:4]
        # Convolutin 레이어일 경우
        if kind == 'conv':
            kernels, bias = weights[i][0][0][0][0]
            # matconvnet: weights are [width, height, in_channels, out_channels]
            # tensorflow: weights are [height, width, in_channels, out_channels]
            # MATLAB 파일의 행렬 순서를 tensorflow 행렬의 순서로 변환합니다.
            kernels = utils.get_variable(np.transpose(kernels, (1, 0, 2, 3)),
                                         name=name + "_w")
            bias = utils.get_variable(bias.reshape(-1), name=name + "_b")
            current = utils.conv2d_basic(current, kernels, bias)
        # Activation 레이어일 경우
        elif kind == 'relu':
            current = tf.nn.relu(current, name=name)
        # Pooling 레이어리 경우
        elif kind == 'pool':
            current = utils.avg_pool_2x2(current)
        net[name] = current

    return net
コード例 #4
0
ファイル: FCN.py プロジェクト: vicchu/-Rank09-
def vgg_net(weights, image):
    layers = ('conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1',
              'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 'conv3_1', 'relu3_1',
              'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'conv3_4', 'relu3_4',
              'pool3', 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
              'relu4_3', 'conv4_4', 'relu4_4', 'pool4', 'conv5_1', 'relu5_1',
              'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'conv5_4', 'relu5_4')

    net = {}
    current = image
    for i, name in enumerate(layers):
        kind = name[:4]  #网络的类型,是conv还是relu或者pool.
        if kind == 'conv':
            kernels, bias = weights[i][0][0][0][0]
            kernels = utils.get_variable(np.transpose(kernels, (1, 0, 2, 3)),
                                         name=name + "_w")
            bias = utils.get_variable(bias.reshape(-1), name=name + "_b")
            current = utils.conv2d_basic(current, kernels, bias)

        elif kind == 'relu':
            current = tf.nn.relu(current, name=name)
        elif kind == 'pool':
            current = utils.avg_pool_2x2(current)
        net[name] = current

    return net
コード例 #5
0
def vgg_net(weights, image):
    layers = ('conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1',
              'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 'conv3_1', 'relu3_1',
              'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'conv3_4', 'relu3_4',
              'pool3', 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
              'relu4_3', 'conv4_4', 'relu4_4', 'pool4', 'conv5_1', 'relu5_1',
              'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'conv5_4', 'relu5_4')

    net = {}
    current = image
    for i, name in enumerate(layers):
        kind = name[:4]
        if kind == 'conv':
            kernels, bias = weights[i][0][0][0][0]
            # matconvnet: weights are [width, height, in_channels, out_channels]
            # tensorflow: weights are [height, width, in_channels,
            # out_channels]
            kernels = utils.get_variable(np.transpose(kernels, (1, 0, 2, 3)),
                                         name=name + "_w")
            bias = utils.get_variable(bias.reshape(-1), name=name + "_b")
            current = utils.conv2d_basic(current, kernels, bias)
        elif kind == 'relu':
            current = tf.nn.relu(current, name=name)
            # if FLAGS.debug:
            # util.add_activation_summary(current)
        elif kind == 'pool':
            current = utils.avg_pool_2x2(current)
        net[name] = current
        # added for resume better
    global_iter_counter = tf.Variable(0, name='global_step', trainable=False)
    net['global_step'] = global_iter_counter

    return net
コード例 #6
0
def vgg_net(weights, image):
    layers = ('conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1',
              'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 'conv3_1', 'relu3_1',
              'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'conv3_4', 'relu3_4',
              'pool3', 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
              'relu4_3', 'conv4_4', 'relu4_4', 'pool4', 'conv5_1', 'relu5_1',
              'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'conv5_4', 'relu5_4')

    net = {}
    current = image
    for i, name in enumerate(layers):
        kind = name[:4]
        if kind == 'conv':
            kernels, bias = weights[i][0][0][0][0]
            # matconvnet: weights are [width, height, in_channels, out_channels]
            # tensorflow: weights are [height, width, in_channels, out_channels]
            kernels = np.transpose(kernels, (1, 0, 2, 3))
            bias = bias.reshape(-1)
            current = utils.conv2d_basic(current, kernels, bias)
        elif kind == 'relu':
            current = tf.nn.relu(current)
        elif kind == 'pool':
            current = utils.avg_pool_2x2(current)
        net[name] = current

    assert len(net) == len(layers)
    return net
コード例 #7
0
def vgg_net(weights, image):
    layers = (
        'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',

        'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',

        'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',
        'relu3_3'  # 'conv3_4', 'relu3_4', 'pool3',

        # 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
        # 'relu4_3', 'conv4_4', 'relu4_4', 'pool4',
        #
        # 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',
        # 'relu5_3', 'conv5_4', 'relu5_4'
    )

    net = {}
    current = image
    for i, name in enumerate(layers):
        kind = name[:4]
        if kind == 'conv':
            kernels, bias = weights[i][0][0][0][0]
            # matconvnet: weights are [width, height, in_channels, out_channels]
            # tensorflow: weights are [height, width, in_channels, out_channels]
            kernels = np.transpose(kernels, (1, 0, 2, 3))
            bias = bias.reshape(-1)
            current = utils.conv2d_basic(current, kernels, bias)
        elif kind == 'relu':
            current = tf.nn.relu(current)
        elif kind == 'pool':
            current = utils.avg_pool_2x2(current)
        net[name] = current

    assert len(net) == len(layers)
    return net
コード例 #8
0
    def vgg_net(self, weights, image):
        layers = ('conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',
                  'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
                  'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',
                  'relu3_3', 'conv3_4', 'relu3_4', 'pool3', 'conv4_1',
                  'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3', 'relu4_3',
                  'conv4_4', 'relu4_4', 'pool4', 'conv5_1', 'relu5_1',
                  'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'conv5_4',
                  'relu5_4')

        self.net = {}
        self.var_net = {}
        current = image
        for i, name in enumerate(layers):
            kind = name[:4]
            if kind == 'conv':
                kernels, bias = weights[i][0][0][0][0]
                # matconvnet: weights are [width, height, in_channels, out_channels]
                # tensorflow: weights are [height, width, in_channels, out_channels]
                kernels = utils.get_variable(np.transpose(
                    kernels, (1, 0, 2, 3)),
                                             name=name + "_w")
                bias = utils.get_variable(bias.reshape(-1), name=name + "_b")
                current = utils.conv2d_basic(current, kernels, bias)
                self.var_net[name] = kernels
                self.var_net[name + 'b'] = bias
            elif kind == 'relu':
                current = tf.nn.relu(current, name=name)

            elif kind == 'pool':
                current = utils.avg_pool_2x2(current)
            self.net[name] = current

        return self.net, self.var_net
コード例 #9
0
def vgg_net(weights, image):
    layers = (
        'conv1_1',
        'relu1_1',
        'conv1_2',
        'relu1_2',
        'pool1',  # output: 112*112*64
        'conv2_1',
        'relu2_1',
        'conv2_2',
        'relu2_2',
        'pool2',  # output: 56*56*128
        'conv3_1',
        'relu3_1',
        'conv3_2',
        'relu3_2',
        'conv3_3',  # output: 28*28*256
        'relu3_3',
        'conv3_4',
        'relu3_4',
        'pool3',
        'conv4_1',
        'relu4_1',
        'conv4_2',
        'relu4_2',
        'conv4_3',  # output: 14*14*512
        'relu4_3',
        'conv4_4',
        'relu4_4',
        'pool4',
        'conv5_1',
        'relu5_1',
        'conv5_2',
        'relu5_2',
        'conv5_3',  # output: 7*7*512
        'relu5_3',
        'conv5_4',
        'relu5_4')  # followed by three FN

    net = {}
    current = image
    for i, name in enumerate(layers):
        kind = name[:4]
        if kind == 'conv':
            kernels, bias = weights[i][0][0][0][0]
            # matconvnet: weights are [width, height, in_channels, out_channels]
            # tensorflow: weights are [height, width, in_channels, out_channels]
            kernels = utils.get_variable(np.transpose(kernels, (1, 0, 2, 3)),
                                         name=name + "_w")
            bias = utils.get_variable(bias.reshape(-1), name=name + "_b")
            current = utils.conv2d_basic(current, kernels, bias)
        elif kind == 'relu':
            current = tf.nn.relu(current, name=name)
        elif kind == 'pool':
            current = utils.avg_pool_2x2(current)
        net[name] = current

    return net
コード例 #10
0
ファイル: FCN.py プロジェクト: yaoyeyaoye/myvggFCN
def vgg_net(weights, image):
    layers = ('conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1',
              'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 'conv3_1', 'relu3_1',
              'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'conv3_4', 'relu3_4',
              'pool3', 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
              'relu4_3', 'conv4_4', 'relu4_4', 'pool4', 'conv5_1', 'relu5_1',
              'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'conv5_4', 'relu5_4')
    '''
    weights[i][0][0][0][0]:

    <tf.Variable 'inference/conv1_1_w:0' shape=(3, 3, 3, 64) dtype=float32_ref>
    <tf.Variable 'inference/conv1_1_b:0' shape=(64,) dtype=float32_ref>
    <tf.Variable 'inference/conv1_2_w:0' shape=(3, 3, 64, 64) dtype=float32_ref>
    <tf.Variable 'inference/conv1_2_b:0' shape=(64,) dtype=float32_ref>

    <tf.Variable 'inference/conv2_1_w:0' shape=(3, 3, 64, 128) dtype=float32_ref>
    <tf.Variable 'inference/conv2_1_b:0' shape=(128,) dtype=float32_ref>
    <tf.Variable 'inference/conv2_2_w:0' shape=(3, 3, 128, 128) dtype=float32_ref>
    <tf.Variable 'inference/conv2_2_b:0' shape=(128,) dtype=float32_ref>

    '''

    net = {}
    current = image
    for i, name in enumerate(
            layers
    ):  # 对于一个可迭代/可遍历的对象(如列表、字符串),enumerate将其组成一个索引序列,利用它可以同时获得索引和值
        kind = name[:4]
        num = name[4:]
        if kind == 'conv' and num == '1_1':
            W = utils.weight_variable(
                [3, 3, 4, 64],
                name=name + "_w")  # [patch 7*7,insize 512, outsize 4096]
            b = utils.bias_variable([64], name=name + "_b")
            current = utils.conv2d_basic(current, W, b)

        elif kind == 'conv' and num != '1_1':
            kernels, bias = weights[i][0][0][0][0]
            # print("kernels:",i,kernels)
            # print kernels
            # matconvnet: weights are [width, height, in_channels, out_channels]
            # tensorflow: weights are [height, width, in_channels, out_channels]
            kernels = utils.get_variable(np.transpose(kernels, (1, 0, 2, 3)),
                                         name=name + "_w")
            # print(kernels)
            bias = utils.get_variable(bias.reshape(-1), name=name + "_b")
            # print(bias)
            current = utils.conv2d_basic(current, kernels, bias)
        elif kind == 'relu':
            current = tf.nn.relu(current, name=name)
            if FLAGS.debug:
                utils.add_activation_summary(current)
        elif kind == 'pool':
            current = utils.avg_pool_2x2(current)
        net[name] = current

    return net
コード例 #11
0
def vgg_net(weights, image):
    # def vgg_net(image):
    layers = (
        'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',

        'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',

        'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',
        'relu3_3', 'conv3_4', 'relu3_4', 'pool3',

        'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
        'relu4_3', 'conv4_4', 'relu4_4', 'pool4',

        'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',
        'relu5_3', 'conv5_4', 'relu5_4'
    )

    net = {}
    current = image # [n,224,224,3]
    for i, name in enumerate(layers):
        kind = name[:4]
        # kind2=name[:5]
        if kind=='conv':#kind2 == 'conv1':
            # '''
            kernels, bias = weights[i][0][0][0][0]
            # matconvnet: weights are [width, height, in_channels, out_channels]
            # tensorflow: weights are [height, width, in_channels, out_channels]
            kernels = utils.get_variable(np.transpose(kernels, (1, 0, 2, 3)), name=name + "_w")
            bias = utils.get_variable(bias.reshape(-1), name=name + "_b")
            current = utils.conv2d_basic(current, kernels, bias)
            '''
            current=tf.layers.conv2d(current,64,3,padding='same')
        elif kind2=='conv2':
            current = tf.layers.conv2d(current, 128, 3, padding='same')
        elif kind2=='conv3':
            current = tf.layers.conv2d(current, 256, 3, padding='same')

        elif kind2=='conv4':
            current = tf.layers.conv2d(current, 512, 3, padding='same')
        elif kind2=='conv5':
            current = tf.layers.conv2d(current, 512, 3, padding='same')
        else:
            pass
            # '''

        if kind == 'relu':
            current = tf.nn.relu(current, name=name)
            if FLAGS.debug:
                utils.add_activation_summary(current)
        elif kind == 'pool':
            current = utils.avg_pool_2x2(current)
        net[name] = current

    return net # [n,14,14,512]
コード例 #12
0
def vgg_net(weights, image):
    layers = (
        'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',

        'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',

        'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',
        'relu3_3', 'conv3_4', 'relu3_4', 'pool3',

        'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
        'relu4_3', 'conv4_4', 'relu4_4', 'pool4',


        'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',
        'relu5_3', 'conv5_4', 'relu5_4'
    )

    net = {}
    current = image
    for i, name in enumerate(layers):
        kind = name[:4]
        if kind == 'conv':
            kernels, bias = weights[i][0][0][0][0]
            # matconvnet: weights are [width, height, in_channels, out_channels]
            # tensorflow: weights are [height, width, in_channels,
            # out_channels]
            kernels = Utils.get_variable(np.transpose(
                kernels, (1, 0, 2, 3)), name=name + "_w")
            bias = Utils.get_variable(bias.reshape(-1), name=name + "_b")
            current = Utils.conv2d_basic(current, kernels, bias)        # 前向传播结果 current
        elif kind == 'relu':
            current = tf.nn.relu(current, name=name)        
            # if FLAGS.debug:
            # util.add_activation_summary(current)
        elif kind == 'pool':
            # vgg 的前5层的stride都是2,也就是前5层的size依次减小1倍             
            # 这里处理了前4层的stride,用的是平均池化
            # 第5层的pool在下文的外部处理了,用的是最大池化
            # pool1 size缩小2倍             
            # pool2 size缩小4倍             
            # pool3 size缩小8倍             
            # pool4 size缩小16倍
            current = Utils.avg_pool_2x2(current)
        net[name] = current
        # added for resume better
    global_iter_counter = tf.Variable(0, name='global_step', trainable=False)
    net['global_step'] = global_iter_counter    # 每层前向传播结果放在net中, 是一个字典

    return net
コード例 #13
0
def vgg_net(weights, image):
    layers = (
        'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',

        'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',

        'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',
        'relu3_3', 'conv3_4', 'relu3_4', 'pool3',

        'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
        'relu4_3', 'conv4_4', 'relu4_4', 'pool4',

        'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',
        'relu5_3', 'conv5_4', 'relu5_4'
    )

    net = {}
    current = image
    for i, name in enumerate(layers):
        kind = name[:4]
        if kind == 'conv':
            kernels, bias = weights[i][0][0][0][0]
            if (name == 'conv1_1'):
                kernel_shape = kernels.shape[:2] + (4, ) + kernels.shape[3:]
            else:
                kernel_shape = kernels.shape
            
            bias_shape = bias.shape
            # matconvnet: weights are [width, height, in_channels, out_channels]
            # tensorflow: weights are [height, width, in_channels, out_channels]
            new_kernel = np.zeros(kernel_shape)
            new_kernel_shape = np.transpose(new_kernel, (1, 0, 2, 3)).shape
            # print(f"new kernel shape: {new_kernel_shape}")
            new_bias = np.zeros(bias_shape)
            new_bias_shape = new_bias.reshape(-1).shape[0]
            # print(f"new bias shape: {new_bias_shape}")

            kernels = utils.weight_variable(shape=new_kernel_shape, name=name + "_w" )
            bias = utils.bias_variable(shape=[new_bias_shape], name=name + "_b")
            current = utils.conv2d_basic(current, kernels, bias)
        elif kind == 'relu':
            current = tf.nn.relu(current, name=name)
            if FLAGS.debug:
                utils.add_activation_summary(current)
        elif kind == 'pool':
            current = utils.avg_pool_2x2(current)
        net[name] = current
        # print(f"VGG-19 {name} layer: {current.shape}")
    return net
コード例 #14
0
def vgg_net(weights, image):
    layers = ('relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1', 'relu2_1',
              'conv2_2', 'relu2_2', 'pool2', 'conv3_1', 'relu3_1', 'conv3_2',
              'relu3_2', 'conv3_3', 'relu3_3', 'conv3_4', 'relu3_4', 'pool3',
              'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3', 'relu4_3',
              'conv4_4', 'relu4_4', 'pool4', 'conv5_1', 'relu5_1', 'conv5_2',
              'relu5_2', 'conv5_3', 'relu5_3', 'conv5_4', 'relu5_4')

    net = {}
    current = image
    #grayiterstart = 0

    if GRAY_MODE:
        i = 0
        name = 'conv1_1'  # layers[0]
        kind = name[:4]  #=conv
        bias = weights[0][0][0][0][0][1]
        kernels = weights[0][0][0][0][0][0]
        #put the in_channels first, select 1st channel
        #[w,h,in_channels,out_channels]
        kernelstrans = np.array([np.transpose(kernels, (2, 0, 1, 3))[0]])
        #[in_channels,w,h,out_channels]
        kernels = np.transpose(kernelstrans, (2, 1, 0, 3))
        #after line above : [h,w,in_channels,out_channels]
        kernels = utils.get_variable(kernels, name=name + "_w")
        bias = utils.get_variable(bias.reshape(-1), name=name + "_b")
        current = utils.conv2d_basic(current, kernels, bias)
        #

    for i, name in enumerate(layers):
        kind = name[:4]
        if kind == 'conv':
            kernels, bias = weights[i + 1][0][0][0][0]
            # matconvnet: weights are [width, height, in_channels, out_channels]
            # tensorflow: weights are [height, width, in_channels, out_channels]
            kernels = utils.get_variable(np.transpose(kernels, (1, 0, 2, 3)),
                                         name=name + "_w")
            bias = utils.get_variable(bias.reshape(-1), name=name + "_b")
            current = utils.conv2d_basic(current, kernels, bias)
        elif kind == 'relu':
            current = tf.nn.relu(current, name=name)
            if FLAGS.debug:
                utils.add_activation_summary(current)
        elif kind == 'pool':
            current = utils.avg_pool_2x2(current)
        net[name] = current

    return net
コード例 #15
0
def inference_fully_convolutional(dataset):
    '''
    Fully convolutional inference on notMNIST dataset
    :param datset: [batch_size, 28*28*1] tensor
    :return: logits
    '''
    dataset_reshaped = tf.reshape(dataset, [-1, 28, 28, 1])
    with tf.name_scope("conv1") as scope:
        W_conv1 = utils.weight_variable_xavier_initialized([3, 3, 1, 32],
                                                           name="W_conv1")
        b_conv1 = utils.bias_variable([32], name="b_conv1")
        h_conv1 = tf.nn.relu(
            utils.conv2d_strided(dataset_reshaped, W_conv1, b_conv1))

    with tf.name_scope("conv2") as scope:
        W_conv2 = utils.weight_variable_xavier_initialized([3, 3, 32, 64],
                                                           name="W_conv2")
        b_conv2 = utils.bias_variable([64], name="b_conv2")
        h_conv2 = tf.nn.relu(utils.conv2d_strided(h_conv1, W_conv2, b_conv2))

    with tf.name_scope("conv3") as scope:
        W_conv3 = utils.weight_variable_xavier_initialized([3, 3, 64, 128],
                                                           name="W_conv3")
        b_conv3 = utils.bias_variable([128], name="b_conv3")
        h_conv3 = tf.nn.relu(utils.conv2d_strided(h_conv2, W_conv3, b_conv3))

    with tf.name_scope("conv4") as scope:
        W_conv4 = utils.weight_variable_xavier_initialized([3, 3, 128, 256],
                                                           name="W_conv4")
        b_conv4 = utils.bias_variable([256], name="b_conv4")
        h_conv4 = tf.nn.relu(utils.conv2d_strided(h_conv3, W_conv4, b_conv4))

    with tf.name_scope("conv5") as scope:
        # W_conv5 = utils.weight_variable_xavier_initialized([2, 2, 256, 512], name="W_conv5")
        # b_conv5 = utils.bias_variable([512], name="b_conv5")
        # h_conv5 = tf.nn.relu(utils.conv2d_strided(h_conv4, W_conv5, b_conv5))
        h_conv5 = utils.avg_pool_2x2(h_conv4)

    with tf.name_scope("conv6") as scope:
        W_conv6 = utils.weight_variable_xavier_initialized([1, 1, 256, 10],
                                                           name="W_conv6")
        b_conv6 = utils.bias_variable([10], name="b_conv6")
        logits = tf.nn.relu(utils.conv2d_basic(h_conv5, W_conv6, b_conv6))
        print logits.get_shape()
        logits = tf.reshape(logits, [-1, 10])
    return logits
コード例 #16
0
def vgg_dilate_net(weights, image):
    layers = ('convDilate1_1', 'reluDilate1_1', 'convDilate1_2',
              'reluDilate1_2', 'poolDilate1', 'convDilate2_1', 'reluDilate2_1',
              'convDilate2_2', 'reluDilate2_2', 'poolDilate2', 'convDilate3_1',
              'reluDilate3_1', 'convDilate3_2', 'reluDilate3_2',
              'convDilate3_3', 'reluDilate3_3', 'convDilate3_4',
              'reluDilate3_4', 'convDilate4_1', 'reluDilate4_1',
              'convDilate4_2', 'reluDilate4_2', 'convDilate4_3',
              'reluDilate4_3', 'convDilate4_4', 'reluDilate4_4',
              'convDilate5_1', 'reluDilate5_1', 'convDilate5_2',
              'reluDilate5_2', 'convDilate5_3', 'reluDilate5_3',
              'convDilate5_4', 'reluDilate5_4')

    net = {}
    current = image
    for i, name in enumerate(layers):
        kind = name[:4]
        if kind == 'conv':
            rate = int(name[10:11])
            if rate <= 2:
                kernels, bias = weights[i][0][0][0][0]
            elif rate > 2:
                kernels, bias = weights[7][0][0][0][0]
                if rate > 4:
                    rate = 3
                    if int(name[12:13]) == 4:
                        rate = 2
            # matconvnet: weights are [width, height, in_channels, out_channels]
            # tensorflow: weights are [height, width, in_channels, out_channels]
            kernels = utils.weight_variable(np.shape(
                np.transpose(kernels, (1, 0, 2, 3))),
                                            name=name + "_wDilate")
            bias = utils.bias_variable(np.shape(bias.reshape(-1)),
                                       name=name + "_bDilate")
            current = utils.conv2d_dilate(current, 2**(rate - 1), kernels,
                                          bias)
        elif kind == 'relu':
            current = tf.nn.relu(current, name=name)
        elif kind == 'pool':
            current = utils.avg_pool_2x2(current)
        net[name] = current

    return net
コード例 #17
0
def inference_fully_convolutional(dataset):
    '''
    Fully convolutional inference on notMNIST dataset
    :param datset: [batch_size, 28*28*1] tensor
    :return: logits
    '''
    dataset_reshaped = tf.reshape(dataset, [-1, 28, 28, 1])
    with tf.name_scope("conv1") as scope:
        W_conv1 = utils.weight_variable_xavier_initialized([3, 3, 1, 32], name="W_conv1")
        b_conv1 = utils.bias_variable([32], name="b_conv1")
        h_conv1 = tf.nn.relu(utils.conv2d_strided(dataset_reshaped, W_conv1, b_conv1))

    with tf.name_scope("conv2") as scope:
        W_conv2 = utils.weight_variable_xavier_initialized([3, 3, 32, 64], name="W_conv2")
        b_conv2 = utils.bias_variable([64], name="b_conv2")
        h_conv2 = tf.nn.relu(utils.conv2d_strided(h_conv1, W_conv2, b_conv2))

    with tf.name_scope("conv3") as scope:
        W_conv3 = utils.weight_variable_xavier_initialized([3, 3, 64, 128], name="W_conv3")
        b_conv3 = utils.bias_variable([128], name="b_conv3")
        h_conv3 = tf.nn.relu(utils.conv2d_strided(h_conv2, W_conv3, b_conv3))

    with tf.name_scope("conv4") as scope:
        W_conv4 = utils.weight_variable_xavier_initialized([3, 3, 128, 256], name="W_conv4")
        b_conv4 = utils.bias_variable([256], name="b_conv4")
        h_conv4 = tf.nn.relu(utils.conv2d_strided(h_conv3, W_conv4, b_conv4))

    with tf.name_scope("conv5") as scope:
        # W_conv5 = utils.weight_variable_xavier_initialized([2, 2, 256, 512], name="W_conv5")
        # b_conv5 = utils.bias_variable([512], name="b_conv5")
        # h_conv5 = tf.nn.relu(utils.conv2d_strided(h_conv4, W_conv5, b_conv5))
        h_conv5 = utils.avg_pool_2x2(h_conv4)

    with tf.name_scope("conv6") as scope:
        W_conv6 = utils.weight_variable_xavier_initialized([1, 1, 256, 10], name="W_conv6")
        b_conv6 = utils.bias_variable([10], name="b_conv6")
        logits = tf.nn.relu(utils.conv2d_basic(h_conv5, W_conv6, b_conv6))
        print logits.get_shape()
        logits = tf.reshape(logits, [-1, 10])
    return logits
コード例 #18
0
def vgg_net(weights, image, debug):
    layers = (
        'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',

        'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',

        'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',
        'relu3_3', 'conv3_4', 'relu3_4', 'pool3',

        'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
        'relu4_3', 'conv4_4', 'relu4_4', 'pool4',

        'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',
        'relu5_3', 'conv5_4', 'relu5_4'
    )

    net = {}
    current = image
    restore_vars = []
    for i, name in enumerate(layers):
        kind = name[:4]
        if kind == 'conv':
            # Utilizing stored weights of ImageNet pretrained network to provide the correct shapes
            kernels, bias = weights[i][0][0][0][0]
            # matconvnet: weights are [width, height, in_channels, out_channels]
            # tensorflow: weights are [height, width, in_channels, out_channels]
            kernels = utils.weight_variable(np.transpose(kernels, (1, 0, 2, 3)).shape, name=name + "_w")
            bias = utils.bias_variable(bias.reshape(-1).shape, name=name + "_b")
            current = utils.conv2d_basic(current, kernels, bias)
            restore_vars += [kernels, bias]
        elif kind == 'relu':
            current = tf.nn.relu(current, name=name)
            if debug:
                utils.add_activation_summary(current)
        elif kind == 'pool':
            current = utils.avg_pool_2x2(current)
        net[name] = current

    return net, restore_vars
コード例 #19
0
ファイル: Fcn CRF.py プロジェクト: zjuHong/segmentation
def vgg_net(weights, image):
    # VGG网络前五大部分
    layers = ('conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1',
              'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 'conv3_1', 'relu3_1',
              'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'conv3_4', 'relu3_4',
              'pool3', 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
              'relu4_3', 'conv4_4', 'relu4_4', 'pool4', 'conv5_1', 'relu5_1',
              'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'conv5_4', 'relu5_4')

    net = {}
    current = image  # 预测图像
    for i, name in enumerate(layers):
        kind = name[:4]
        if kind == 'conv':
            kernels, bias = weights[i][0][0][0][0]
            # matconvnet: weights are [width, height, in_channels, out_channels]
            # tensorflow: weights are [height, width, in_channels, out_channels]
            kernels = utils.get_variable(np.transpose(kernels, (1, 0, 2, 3)),
                                         name=name + "_w")  # conv1_1_w
            bias = utils.get_variable(bias.reshape(-1),
                                      name=name + "_b")  # conv1_1_b
            current = utils.conv2d_basic(current, kernels,
                                         bias)  # 前向传播结果 current
        elif kind == 'relu':
            current = tf.nn.relu(current, name=name)  # relu1_1
            if FLAGS.debug:  # 是否开启debug模式 true / false
                utils.add_activation_summary(current)  # 画图
        elif kind == 'pool':
            # vgg 的前5层的stride都是2,也就是前5层的size依次减小1倍
            # 这里处理了前4层的stride,用的是平均池化
            # 第5层的pool在下文的外部处理了,用的是最大池化
            # pool1 size缩小2倍
            # pool2 size缩小4倍
            # pool3 size缩小8倍
            # pool4 size缩小16倍
            current = utils.avg_pool_2x2(current)
        net[name] = current  # 每层前向传播结果放在net中, 是一个字典

    return net
コード例 #20
0
ファイル: FCN.py プロジェクト: Selimam/AutoPortraitMatting
def vgg_net(weights, image):
    layers = (
        'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',

        'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',

        'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',
        'relu3_3', 'conv3_4', 'relu3_4', 'pool3',

        'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
        'relu4_3', 'conv4_4', 'relu4_4', 'pool4',

        'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',
        'relu5_3', 'conv5_4', 'relu5_4'
    )

    net = {}
    current = image
    for i, name in enumerate(layers):
        if name in ['conv3_4', 'relu3_4', 'conv4_4', 'relu4_4', 'conv5_4', 'relu5_4']:
            continue
        kind = name[:4]
        if kind == 'conv':
            kernels, bias = weights[i][0][0][0][0]
            # matconvnet: weights are [width, height, in_channels, out_channels]
            # tensorflow: weights are [height, width, in_channels, out_channels]
            kernels = utils.get_variable(np.transpose(kernels, (1, 0, 2, 3)), name=name + "_w")
            bias = utils.get_variable(bias.reshape(-1), name=name + "_b")
            current = utils.conv2d_basic(current, kernels, bias)
        elif kind == 'relu':
            current = tf.nn.relu(current, name=name)
            if FLAGS.debug:
                utils.add_activation_summary(current)
        elif kind == 'pool':
            current = utils.avg_pool_2x2(current)
        net[name] = current

    return net
コード例 #21
0
def vgg_net(weights, image):
    """
    :param weights: np matrix
    :param image: tf place holder <- fed with np arrays
    :return: a dict. key is the name of layer, value is the corresponding opration node in tf graph.
    """
    layers = ('conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1',
              'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 'conv3_1', 'relu3_1',
              'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'conv3_4', 'relu3_4',
              'pool3', 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
              'relu4_3', 'conv4_4', 'relu4_4', 'pool4', 'conv5_1', 'relu5_1',
              'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'conv5_4', 'relu5_4')

    net = {}
    current = image
    for i, name in enumerate(layers):
        kind = name[:4]
        if kind == 'conv':
            kernels, bias = weights[i][0][0][0][0]
            # matconvnet: weights are [width, height, in_channels, out_channels]
            # tensorflow: weights are [height, width, in_channels, out_channels]
            # weight tf var
            kernels = utils.get_variable(np.transpose(kernels, (1, 0, 2, 3)),
                                         name=name + "_w")
            # bias tf var
            bias = utils.get_variable(bias.reshape(-1), name=name + "_b")
            # the output tf layer node
            current = utils.conv2d_basic(current, kernels, bias)
        elif kind == 'relu':
            current = tf.nn.relu(current, name=name)
            if FLAGS.debug:
                utils.add_activation_summary(current)
        elif kind == 'pool':
            # average pooling is this correct?????
            current = utils.avg_pool_2x2(current)
        net[name] = current

    return net
コード例 #22
0
def vgg_net(weights, image):

    ## fcn的前五层网络就是vgg网络
    layers = ('conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1',
              'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 'conv3_1', 'relu3_1',
              'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'conv3_4', 'relu3_4',
              'pool3', 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
              'relu4_3', 'conv4_4', 'relu4_4', 'pool4', 'conv5_1', 'relu5_1',
              'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'conv5_4', 'relu5_4')

    net = {}  #字典
    current = image
    for i, name in enumerate(layers):
        kind = name[:4]
        if kind == 'conv':
            kernels, bias = weights[i][0][0][0][0]
            # matconvnet: weights are [width, height, in_channels, out_channels]
            # tensorflow: weights are [height, width, in_channels, out_channels]
            kernels = utils.get_variable(np.transpose(kernels, (1, 0, 2, 3)),
                                         name=name + "_w")
            bias = utils.get_variable(bias.reshape(-1), name=name + "_b")
            current = utils.conv2d_basic(current, kernels, bias)
        elif kind == 'relu':
            current = tf.nn.relu(current, name=name)
            if FLAGS.debug:
                utils.add_activation_summary(current)
        elif kind == 'pool':
            ## vgg 的前5层的stride都是2,也就是前5层的size依次减小1倍
            ## 这里处理了前4层的stride,用的是平均池化
            ## 第5层的pool在下文的外部处理了,用的是最大池化
            ## pool1 size缩小2倍
            ## pool2 size缩小4倍
            ## pool3 size缩小8倍
            ## pool4 size缩小16倍
            current = utils.avg_pool_2x2(current)  ## 平均池化
        net[name] = current

    return net  ## vgg每层的结果都保存再net中了
コード例 #23
0
ファイル: FCN3.py プロジェクト: tuggeluk/FCNtensorflow
def compute_energy(image, keep_prob_conv, input_channels, output_channels,
                   scope):

    with tf.variable_scope(scope):

        #########################
        # downsample using VGG  #
        #########################

        conv1_1 = conv_layer(image, 3, input_channels, 64, "1_1")
        conv1_2 = conv_layer(conv1_1, 3, 64, 64, "1_2")
        pool1 = utils.max_pool_2x2(conv1_2)
        drop1 = tf.nn.dropout(pool1, keep_prob=keep_prob_conv)

        conv2_1 = conv_layer(drop1, 3, 64, 128, "2_1")
        conv2_2 = conv_layer(conv2_1, 3, 128, 128, "2_2")
        pool2 = utils.max_pool_2x2(conv2_2)
        drop2 = tf.nn.dropout(pool2, keep_prob=keep_prob_conv)

        conv3_1 = conv_layer(drop2, 3, 128, 256, "3_1")
        conv3_2 = conv_layer(conv3_1, 3, 256, 256, "3_2")
        conv3_3 = conv_layer(conv3_2, 3, 256, 256, "3_3")
        pool3 = utils.avg_pool_2x2(conv3_3)
        drop3 = tf.nn.dropout(pool3, keep_prob=keep_prob_conv)

        conv4_1 = conv_layer(drop3, 3, 256, 512, "4_1")
        conv4_2 = conv_layer(conv4_1, 3, 512, 512, "4_2")
        conv4_3 = conv_layer(conv4_2, 3, 512, 512, "4_3")
        pool4 = utils.avg_pool_2x2(conv4_3)
        drop4 = tf.nn.dropout(pool4, keep_prob=keep_prob_conv)

        conv5_1 = conv_layer(drop4, 3, 512, 512, "5_1")
        conv5_2 = conv_layer(conv5_1, 3, 512, 512, "5_2")
        conv5_3 = conv_layer(conv5_2, 3, 512, 512, "5_3")

        #######################
        #   Upsample
        #######################

        # fcn conv5 5x5, 1x1, 1x1
        fcn5_1 = conv_layer(conv5_3, 5, 512, 512, "f5_1")
        fcn5_2 = conv_layer(fcn5_1, 1, 512, 512, "f5_2")
        fcn5_3 = conv_layer(fcn5_2, 1, 512, 512, "f5_3")

        # fcn conv 4 5x5, 1x1, 1x1
        fcn4_1 = conv_layer(drop4, 5, 512, 512, "f4_1")
        fcn4_2 = conv_layer(fcn4_1, 1, 512, 512, "f4_2")
        fcn4_3 = conv_layer(fcn4_2, 1, 512, 512, "f4_3")

        # fcn conv 3 5x5, 1x1, 1x1
        fcn3_1 = conv_layer(drop3, 5, 256, 256, "f3_1")
        fcn3_2 = conv_layer(fcn3_1, 1, 256, 256, "f3_2")
        fcn3_3 = conv_layer(fcn3_2, 1, 256, 256, "f3_3")

        # upsample conv5
        shape_fc5_1 = tf.shape(fcn5_3)
        deconv_shape5_1 = tf.stack(
            [shape_fc5_1[0], shape_fc5_1[1] * 2, shape_fc5_1[2] * 2, 256])
        deconv5_1 = deconv_layer(fcn5_3, 4, 512, 256, deconv_shape5_1, "d5_1")

        # upsample conv4
        shape_fc4_3 = tf.shape(fcn4_3)
        deconv_shape4_1 = tf.stack(
            [shape_fc4_3[0], shape_fc4_3[1] * 2, shape_fc4_3[2] * 2, 256])
        deconv4_1 = deconv_layer(fcn4_3, 4, 512, 256, deconv_shape4_1, "d4_1")

        # stack
        stacked = tf.concat([fcn3_3, deconv4_1, deconv5_1], -1)

        # three 1by1 fuses
        fuse1 = conv_layer(stacked, 1, 3 * 256, 2 * 256, "fuse1")
        fuse2 = conv_layer(fuse1, 1, 2 * 256, 256, "fuse2")
        fuse3 = conv_layer(fuse2, 1, 256, 256, "fuse3")

        # final upsampling
        shape_final1 = tf.shape(fuse3)
        deconv_shape_final1 = tf.stack([
            shape_final1[0], shape_final1[1] * 2, shape_final1[2] * 2,
            output_channels
        ])
        deconv_final1 = deconv_layer(fuse3, 4, 256, output_channels,
                                     deconv_shape_final1, "d_final1")

        shape_final2 = tf.shape(deconv_final1)
        deconv_shape_final2 = tf.stack([
            shape_final2[0], shape_final2[1] * 2, shape_final2[2] * 2,
            output_channels
        ])
        deconv_final2 = deconv_layer(deconv_final1, 4, output_channels,
                                     output_channels, deconv_shape_final2,
                                     "d_final2")

        shape_final3 = tf.shape(deconv_final2)
        deconv_shape_final3 = tf.stack([
            shape_final3[0], shape_final3[1] * 2, shape_final3[2] * 2,
            output_channels
        ])
        deconv_final3 = deconv_layer(deconv_final2, 4, output_channels,
                                     output_channels, deconv_shape_final3,
                                     "d_final3")

        shape_final4 = tf.shape(deconv_final3)
        deconv_shape_final4 = tf.stack([
            shape_final4[0], shape_final4[1] * 2, shape_final4[2] * 2,
            output_channels
        ])
        deconv_final4 = deconv_layer(deconv_final3, 4, output_channels,
                                     output_channels, deconv_shape_final4,
                                     "d_final4")

    annotation_pred = tf.argmax(deconv_final3, dimension=3, name="prediction")
    return tf.expand_dims(annotation_pred, dim=3), deconv_final3
コード例 #24
0
def vgg_net_rgb(weights, image, debug, keep_prob):
    layers = (
        'rgb_conv1_1', 'rgb_relu1_1', 'rgb_conv1_2', 'rgb_relu1_2', 'rgb_pool1',

        'rgb_conv2_1', 'rgb_relu2_1', 'rgb_conv2_2', 'rgb_relu2_2', 'rgb_pool2',

        'rgb_conv3_1', 'rgb_relu3_1', 'rgb_conv3_2', 'rgb_relu3_2', 'rgb_conv3_3',
        'rgb_relu3_3', 'rgb_pool3',

        'rgb_conv4_1', 'rgb_relu4_1', 'rgb_conv4_2', 'rgb_relu4_2', 'rgb_conv4_3',
        'rgb_relu4_3'

        # 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',
        # 'relu5_3'
    )

    # output of retrained layer for vgg
    net = {}
    current = image
    for i, name in enumerate(layers):
        kind = name[4:8]
        if kind == 'conv':
            if name == 'rgb_conv4_1':
                # Modify the senventh conv layer
                # pdb.set_trace()
                kernels, bias = weights[i][0][0][0][0]
                kernels = np.transpose(kernels, (1, 0, 2, 3))
                sample_index = random.sample(range(512), 256)
                kernels = kernels[:, :, :, sample_index]
                bias = bias[:, sample_index]
                kernels = utils.get_variable(kernels, name=name + "_w")
                bias = utils.get_variable(bias.reshape(-1), name=name + "_b")
                current = utils.conv2d_basic(current, kernels, bias, keep_prob)
            elif name == 'rgb_conv4_2':
                # Modify the eighth conv layer
                # pdb.set_trace()
                kernels, bias = weights[i][0][0][0][0]
                kernels = np.transpose(kernels, (1, 0, 2, 3))
                sample_index_1 = random.sample(range(512), 256)
                sample_index_2 = random.sample(range(512), 256)
                kernels = kernels[:, :, sample_index_1, :]
                kernels = kernels[:, :, :, sample_index_2]
                bias = bias[:, sample_index_2]
                kernels = utils.get_variable(kernels, name=name + "_w")
                bias = utils.get_variable(bias.reshape(-1), name=name + "_b")
                current = utils.conv2d_basic(current, kernels, bias, keep_prob)
            elif name == 'rgb_conv4_3':
                # pdb.set_trace()
                # Modify the ninth conv layer
                kernels, bias = weights[i][0][0][0][0]
                kernels = np.transpose(kernels, (1, 0, 2, 3))
                sample_index_1 = random.sample(range(512), 256)
                sample_index_2 = random.sample(range(512), 256)
                kernels = kernels[:, :, sample_index_1, :]
                kernels = kernels[:, :, :, sample_index_2]
                bias = bias[:, sample_index_2]
                kernels = utils.get_variable(kernels, name=name + "_w")
                bias = utils.get_variable(bias.reshape(-1), name=name + "_b")
                current = utils.conv2d_basic(current, kernels, bias, keep_prob)
            else:
                kernels, bias = weights[i][0][0][0][0]
                # matconvnet: weights are [width, height, in_channels, out_channels]
                # tensorflow: weights are [height, width, in_channels, out_channels]
                kernels = utils.get_variable(np.transpose(kernels, (1, 0, 2, 3)), name=name + "_w")
                bias = utils.get_variable(bias.reshape(-1), name=name + "_b")
                current = utils.conv2d_basic(current, kernels, bias, keep_prob)
        elif kind == 'relu':
            current = tf.nn.relu(current, name=name)
            if debug: 
                utils.add_activation_summary(current)
        elif kind == 'pool':
            current = utils.avg_pool_2x2(current)
        net[name] = current
    # pdb.set_trace()

    return net