예제 #1
0
    def evaluate_task(inputs):
        train_inputs, train_outputs, test_inputs, test_outputs = inputs
        with tf.variable_scope('shared_features'):
            # extract features from train and test data
            features_train = feature_extractor_fn(images=train_inputs,
                                                  output_size=args.d_theta,
                                                  use_batch_norm=True,
                                                  dropout_keep_prob=dropout_keep_prob)
            features_test = feature_extractor_fn(images=test_inputs,
                                                 output_size=args.d_theta,
                                                 use_batch_norm=True,
                                                 dropout_keep_prob=dropout_keep_prob)
        # Infer classification layer from q
        with tf.variable_scope('classifier'):
            classifier = infer_classifier(features_train, train_outputs, args.d_theta, args.way)

        # Local reparameterization trick
        # Compute parameters of q distribution over logits
        weight_mean, bias_mean = classifier['weight_mean'], classifier['bias_mean']
        weight_log_variance, bias_log_variance = classifier['weight_log_variance'], classifier['bias_log_variance']
        logits_mean_test = tf.matmul(features_test, weight_mean) + bias_mean
        logits_log_var_test = \
            tf.log(tf.matmul(features_test ** 2, tf.exp(weight_log_variance)) + tf.exp(bias_log_variance))
        logits_sample_test = sample_normal(logits_mean_test, logits_log_var_test, args.samples)
        test_labels_tiled = tf.tile(tf.expand_dims(test_outputs, 0), [args.samples, 1, 1])
        task_log_py = multinoulli_log_density(inputs=test_labels_tiled, logits=logits_sample_test)
        averaged_predictions = tf.reduce_logsumexp(logits_sample_test, axis=0) - tf.log(L)
        task_accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(test_outputs, axis=-1),
                                                        tf.argmax(averaged_predictions, axis=-1)), tf.float32))
        task_score = tf.reduce_logsumexp(task_log_py, axis=0) - tf.log(L)
        task_loss = -tf.reduce_mean(task_score, axis=0)

        return [task_loss, task_accuracy]
예제 #2
0
def shapenet_inference(image_features, angles, d_theta, d_psi, num_samples):
    """
    Perform inference for the view reconstruction task
    :param image_features: tensor (N x d) of training image features.
    :param angles: tensor (N x d_angle) of training image angles.
    :param d_theta: integer dimensionality of the features.
    :param d_psi: integer dimensionality of adaptation parameters.
    :param num_samples: number of samples to generate from the distribution.
    :return: dictionary containing distribution parameters and samples.
    """
    # Concatenate features and angles
    h = tf.concat([image_features, angles], axis=-1)

    # denses layer before pooling
    h = dense_layer(inputs=h, output_size=d_theta, activation=tf.nn.elu, use_bias=True, name='pre_process_dense_1')
    h = dense_layer(inputs=h, output_size=d_theta, activation=tf.nn.elu, use_bias=True, name='pre_process_dense_2')

    # Pool across dimensions
    nu = tf.expand_dims(tf.reduce_mean(h, axis=0), axis=0)
    post_processed = _post_process(nu, d_psi)

    # Compute means and log variances for the parameter
    psi = {
        'mu': dense_layer(inputs=post_processed, output_size=d_psi, activation=None, use_bias=True, name='psi_mean'),
        'log_variance': dense_layer(inputs=post_processed, output_size=d_psi, activation=None, use_bias=True,
                                    name='psi_log_var')
    }

    psi['psi_samples'] = sample_normal(psi['mu'], psi['log_variance'], num_samples)
    return psi
예제 #3
0
파일: CVAE.py 프로젝트: Yingjun-Du/VID
def vae_prior(noise, is_training):
    regularizer = tf.contrib.layers.l2_regularizer(scale=1e-10)
    initializer = tf.contrib.layers.xavier_initializer()

    with tf.variable_scope("prior", reuse=None):
        #  layer 1
        with tf.variable_scope('layer_1'):
            output = tf.layers.conv2d(noise, filters=16, kernel_size=3, padding='same',
                                      kernel_initializer=initializer,
                                      kernel_regularizer=regularizer, name='conv_1')
            output = tf.layers.batch_normalization(output, training=is_training, name='bn_1')
            output_shortcut = tf.nn.leaky_relu(output, name='relu_1')

        for i in range(3):
            with tf.variable_scope('layer_%d' % (i * 2 + 2)):
                output = tf.layers.conv2d(output_shortcut, num_feature, KernelSize, padding='same',
                                          kernel_initializer=initializer,
                                          kernel_regularizer=regularizer, name=('conv_%d' % (i * 2 + 2)))
                output = tf.layers.batch_normalization(output, training=is_training, name=('bn_%d' % (i * 2 + 2)))
                output = tf.nn.leaky_relu(output, name=('relu_%d' % (i * 2 + 2)))

            with tf.variable_scope('layer_%d' % (i * 2 + 3)):
                output = tf.layers.conv2d(output, num_feature, KernelSize, padding='same',
                                          kernel_initializer=initializer,
                                          kernel_regularizer=regularizer, name=('conv_%d' % (i * 2 + 3)))
                output = tf.layers.batch_normalization(output, training=is_training, name=('bn_%d' % (i * 2 + 3)))
                output = tf.nn.leaky_relu(output, name=('relu_%d' % (i * 2 + 3)))

            output_shortcut = tf.add(output_shortcut, output)  # shortcut

        with tf.variable_scope('layer_final'):
            output = tf.layers.conv2d(output_shortcut, 1, KernelSize, padding='same',
                                      kernel_initializer=initializer,
                                      kernel_regularizer=regularizer, name='conv_26')
            output = tf.layers.batch_normalization(output, training=is_training, name='bn_26')

        mn = tf.layers.conv2d(output, filters=1, kernel_size=3, strides=1, padding="SAME", activation=None)
        sd = tf.layers.conv2d(output, filters=1, kernel_size=3, strides=1, padding="SAME", activation=None)

        z = sample_normal(mn, sd, FLAGS.num_samples)
        return z, mn, sd