Пример #1
0
def compact_base(image, phase):
    net = {}
    net['layer1'] = batch_norm_conv('conv1',
                                    image, [3, 3],
                                    num_output_layers=32,
                                    phase=phase)
    net['layer2'] = batch_norm_conv('conv2',
                                    net['layer1'], [3, 3],
                                    num_output_layers=32,
                                    phase=phase)
    net['layer2_p'] = avg_pool_2x2(net['layer2'])
    net['layer3'] = batch_norm_conv('conv3',
                                    net['layer2_p'], [3, 3],
                                    num_output_layers=32,
                                    phase=phase)
    net['layer4'] = batch_norm_conv('conv4',
                                    net['layer3'], [3, 3],
                                    num_output_layers=32,
                                    phase=phase)
    net['layer4_p'] = avg_pool_2x2(net['layer4'])
    net['layer5'] = batch_norm_conv('conv5',
                                    net['layer4_p'], [3, 3],
                                    num_output_layers=32,
                                    phase=phase)
    net['layer6'] = batch_norm_conv('conv6',
                                    net['layer5'], [3, 3],
                                    num_output_layers=32,
                                    phase=phase)
    net['layer6_p'] = avg_pool_2x2(net['layer6'])

    return net
Пример #2
0
 def _vgg_net(weights, image):
     layers = ('conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',
               'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
               'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',
               'relu3_3', 'conv3_4', 'relu3_4', 'pool3', 'conv4_1',
               'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3', 'relu4_3',
               'conv4_4', 'relu4_4', 'pool4', 'conv5_1', 'relu5_1',
               'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'conv5_4',
               'relu5_4')
     net = {}
     current = image
     for i, name in enumerate(layers):
         kind = name[:4]
         if kind == 'conv':
             kernels, bias = weights[i][0][0][0][0]
             kernels = utils.get_variable(np.transpose(
                 kernels, (0, 1, 2, 3)),
                                          name=name + '_w')
             bias = utils.get_variable(bias.reshape(-1),
                                       name=name + '_b')
             current = utils.conv2d_basic(current, kernels, bias)
         elif kind == 'relu':
             current = tf.nn.relu(current, name=name)
         elif kind == 'pool':
             current = utils.avg_pool_2x2(current)
         net[name] = current
     return net
Пример #3
0
 def _vgg_net(weights, image):
     print('setting up vgg model initialized params --> extractor2')
     layers = ('conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',
               'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
               'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',
               'relu3_3', 'conv3_4', 'relu3_4', 'pool3', 'conv4_1',
               'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3', 'relu4_3',
               'conv4_4', 'relu4_4', 'pool4', 'conv5_1', 'relu5_1',
               'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'conv5_4',
               'relu5_4')
     net = {}
     current = image
     for i, name in enumerate(layers):
         kind = name[:4]
         if kind == 'conv':
             kernels, bias = weights[i][0][0][0][0]
             # kernels are [width, height, in_channles, out_channles]
             # tensorflow are [height, width, in channles, out_channles]
             kernels = utils.get_variable(np.transpose(
                 kernels, (1, 0, 2, 3)),
                                          name=name + '_w')
             bias = utils.get_variable(bias.reshape(-1),
                                       name=name + '_b')
             current = utils.conv2d_basic(current, kernels, bias)
         elif kind == 'relu':
             current = tf.nn.relu(current, name=name)
         elif kind == 'pool':
             current = utils.avg_pool_2x2(current)
         net[name] = current
     return net
Пример #4
0
    def vgg_net(self, weights, image):
        layers = (
            'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',

            'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',

            'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',
            'relu3_3', 'conv3_4', 'relu3_4', 'pool3',

            'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
            'relu4_3', 'conv4_4', 'relu4_4', 'pool4',

            'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',
            'relu5_3', 'conv5_4', 'relu5_4'
        )
        net = {}
        current = image
        for i, name in enumerate(layers):
            kind = name[:4]
            if kind == 'conv':
                kernels, bias = weights[i][0][0][0][0]
                # matconvnet: weights are [width, height, in_channels, out_channels]
                # tensorflow: weights are [height, width, in_channels, out_channels]
                kernels = utils.get_variable(np.transpose(kernels, (1, 0, 2, 3)), name=name + "_w")
                bias = utils.get_variable(bias.reshape(-1), name=name + "_b")
                current = utils.conv2d_basic(current, kernels, bias)
            elif kind == 'relu':
                current = tf.nn.relu(current, name=name)
            elif kind == 'pool':
                current = utils.avg_pool_2x2(current)
            net[name] = current
        return net
Пример #5
0
def vggnet(X, trainlayers=[], weights_file=None):

    layers = {
        'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1',
        'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 'conv3_1', 'relu3_1',
        'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'conv3_4', 'relu3_4',
        'pool3', 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
        'relu4_3', 'conv4_4', 'relu4_4', 'pool4', 'conv5_1', 'relu5_1',
        'conv5_2', 'relu5_2', 'conv5_3'
    }

    current = X
    if not weights_file:
        weights = np.load(weights_file).items()
    for i, layer in enumerate(layers):
        if 'conv' in layer:
            if layer in trainlayers:
                trainable = True
            kernels = utils.get_variable(weights[layer]['weights'],
                                         name=layer + '/weights',
                                         trainable=trainable)
            biases = utils.get_variable(weights[layer]['biases'],
                                        name=layer + '/biases',
                                        trainable=trainable)
            current = utils.conv2d(current, kernels, biases, name=layer)
        elif 'relu' in layer:
            current = tf.nn.relu(current, name=layer)
        elif 'pool' in layer:
            current = utils.avg_pool_2x2(current, name=layer)

    return current
Пример #6
0
    def _discriminator_encoder(self, image):
        with tf.name_scope('discriminator_encoder'):
            # image is 32 32 3 OR 64 64 3 (if whole image)
            h_d1 = activation_function(
                conv2d(image, self._W_discr1, stride=1, is_training=self.phase)
                + self._b_discr1)
            h_dpool1 = avg_pool_2x2(h_d1)

            # 16 16 128 OR 32 32 128 (if whole image)
            h_d2 = activation_function(
                conv2d(
                    h_dpool1, self._W_discr2, stride=1, is_training=self.phase)
                + self._b_discr2)
            h_dpool2 = avg_pool_2x2(h_d2)

            # 8 8 256 OR 16 16 256 (if whole image)
            h_d3 = activation_function(
                conv2d(
                    h_dpool2, self._W_discr3, stride=1, is_training=self.phase)
                + self._b_discr3)
            h_dpool3 = avg_pool_2x2(h_d3)

            if self.discr_whole_image:
                # 8 8 512 (if whole image)
                h_d4 = activation_function(
                    conv2d(h_dpool3,
                           self._W_discr4,
                           stride=1,
                           is_training=self.phase) + self._b_discr4)
                h_dfinal = avg_pool_2x2(h_d4)
            else:
                h_dfinal = h_dpool3

            # 4 4 512
            h_dfinal_flat = tf.reshape(h_dfinal,
                                       [self.batch_size, 4 * 4 * 512])
            if self.use_dropout:
                keep_prob = tf.cond(self.phase, lambda: tf.constant(.5),
                                    lambda: tf.constant(1.))
                h_dfinal_drop = tf.nn.dropout(h_dfinal_flat, keep_prob)
            else:
                h_dfinal_drop = h_dfinal_flat
            discr = tf.matmul(h_dfinal_drop, self._W_dfc) + self._b_dfc
            return discr
Пример #7
0
def vgg_net(weights, image):
    """
    首先通过vgg模型初始化权值
    Parameters
    ----------
        weights: vgg模型的权重
        image: 训练的样本图片
    Returns
    -------
        net: vgg模型初始化之后的模型
    """
    layers = (
        'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',

        'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',

        'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',
        'relu3_3', 'conv3_4', 'relu3_4', 'pool3',

        'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
        'relu4_3', 'conv4_4', 'relu4_4', 'pool4',

        'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',
        'relu5_3', 'conv5_4', 'relu5_4'
    )

    net = {}
    current = image
    for i, name in enumerate(layers):
        kind = name[:4]
        if kind == 'conv':
            kernels, bias = weights[i][0][0][0][0]
            # matconvnet: weights are [width, height, in_channels, out_channels]
            # tensorflow: weights are [height, width, in_channels, out_channels]
            # tensorflow和mat的卷积核格式不一样,需要做个transpose变换
            kernels = utils.get_variable(np.transpose(kernels, (1, 0, 2, 3)), name=name + "_w")
            bias = utils.get_variable(bias.reshape(-1), name = name + "_b")
            current = utils.conv2d_basic(current, kernels, bias)
        elif kind == 'relu':
            current = tf.nn.relu(current, name=name)
            if FLAGS.debug:
                utils.add_activation_summary(current)
        elif kind == 'pool':
            current = utils.avg_pool_2x2(current)
        net[name] = current
    return net
Пример #8
0
    def _encode(self):
        with tf.name_scope("encode"):
            with tf.name_scope('weights'):
                self._W_conv1 = weight_variable([5, 5, 3, 128])
                self._W_conv2 = weight_variable([5, 5, 128, 256])
                self._W_conv3 = weight_variable([5, 5, 256, 512])
                self._W_conv4 = weight_variable([5, 5, 512, 512])
                self._W_conv5 = weight_variable([3, 3, 512, 512])
                variable_summaries(self._W_conv1)
                variable_summaries(self._W_conv2)
                variable_summaries(self._W_conv3)
                variable_summaries(self._W_conv4)
                variable_summaries(self._W_conv5)
            with tf.name_scope('biases'):
                self._b_conv1 = bias_variable([128])
                self._b_conv2 = bias_variable([256])
                self._b_conv3 = bias_variable([512])
                self._b_conv4 = bias_variable([512])
                self._b_conv5 = bias_variable([512])
                variable_summaries(self._b_conv1)
                variable_summaries(self._b_conv2)
                variable_summaries(self._b_conv3)
                variable_summaries(self._b_conv4)
                variable_summaries(self._b_conv5)

            # 64 64 3
            self.h_conv1 = activation_function(
                conv2d(self.x_masked,
                       self._W_conv1,
                       stride=1,
                       is_training=self.phase) + self._b_conv1)
            self.h_pool1 = avg_pool_2x2(self.h_conv1)

            # 32 32 128
            self.h_conv2 = activation_function(
                conv2d(self.h_pool1,
                       self._W_conv2,
                       stride=1,
                       is_training=self.phase) + self._b_conv2)
            self.h_pool2 = avg_pool_2x2(self.h_conv2)

            # 16 16 256
            self.h_conv3 = activation_function(
                conv2d(self.h_pool2,
                       self._W_conv3,
                       stride=1,
                       is_training=self.phase) + self._b_conv3)
            self.h_pool3 = avg_pool_2x2(self.h_conv3)

            # 8 8 512
            self.h_conv4 = activation_function(
                conv2d(self.h_pool3,
                       self._W_conv4,
                       stride=1,
                       is_training=self.phase) + self._b_conv4)
            self.h_pool4 = avg_pool_2x2(self.h_conv4)

            # 4 4 512
            self.h_conv5 = activation_function(
                conv2d(self.h_pool4,
                       self._W_conv5,
                       stride=1,
                       is_training=self.phase) + self._b_conv5)

            # 4 4 512
            if self.use_dropout:
                keep_prob = tf.cond(self.phase, lambda: tf.constant(.5),
                                    lambda: tf.constant(1.))
                self.h_conv5_drop = tf.nn.dropout(self.h_conv5, keep_prob)
            else:
                self.h_conv5_drop = self.h_conv5