コード例 #1
0
def vgg_net(weights, image):
    layers = ('conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1',
              'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 'conv3_1', 'relu3_1',
              'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'conv3_4', 'relu3_4',
              'pool3', 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
              'relu4_3', 'conv4_4', 'relu4_4', 'pool4', 'conv5_1', 'relu5_1',
              'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'conv5_4', 'relu5_4')

    net = {}
    current = image
    for i, name in enumerate(layers):
        kind = name[:4]
        if kind == 'conv':
            # 加载vgg卷积各参数
            kernels, bias = weights[i][0][0][0][0]
            kernels = utils.get_variable(np.transpose(kernels, (1, 0, 2, 3)),
                                         name=name + "_w")
            bias = utils.get_variable(bias.reshape(-1), name=name + "_b")
            # 计算卷积
            conv = tf.nn.conv2d(current,
                                kernels,
                                strides=[1, 1, 1, 1],
                                padding="SAME")
            current = tf.nn.bias_add(conv, bias)

        elif kind == 'relu':
            current = tf.nn.relu(current, name=name)
        elif kind == 'pool':
            current = tf.nn.avg_pool(current,
                                     ksize=[1, 2, 2, 1],
                                     strides=[1, 2, 2, 1],
                                     padding="SAME")
        net[name] = current

    return net
コード例 #2
0
    def get_output_step(self, cell_output):
        with tf.variable_scope('mean', reuse=tf.AUTO_REUSE):
            mean_net = DenseNet(input_=cell_output,
                                hidden_dim=-1,
                                output_dim=self.output_dim,
                                num_layers=1,
                                transfer_fct=None,
                                act_out=self.act_out_mean,
                                reuse=tf.AUTO_REUSE,
                                kinit=self.kinit,
                                bias_init=self.bias_init)

            mean = mean_net.output

        with tf.variable_scope('var', reuse=tf.AUTO_REUSE):
            if (self.var_shared):
                var = utils.get_variable(self.output_dim, 'var')
                var = tf.tile(var, [self.batch_size, 1])  # [batch_size, var.dim]
            else:
                var_net = DenseNet(input_=cell_output,
                                   hidden_dim=-1,
                                   output_dim=self.output_dim,
                                   num_layers=1,
                                   transfer_fct=None,
                                   act_out=self.act_out_var,
                                   reuse=tf.AUTO_REUSE,
                                   kinit=self.kinit,
                                   bias_init=self.bias_init)

                var = var_net.output

        eps = tf.random_normal((self.batch_size, self.output_dim), 0, 1, dtype=tf.float32)
        current_z = tf.add(mean, tf.multiply(tf.sqrt(var), eps))
        return mean, var, current_z