コード例 #1
0
 def xform_dynamic(x):
     return tf1.placeholder_with_default(x, shape=None)
コード例 #2
0
 def dynamic_run(fun, x_value, **kwargs):
     x_value = np.array(x_value, dtype=np.float32)
     placeholder = tf1.placeholder_with_default(x_value, shape=None)
     return self.evaluate(fun(placeholder, **kwargs))
コード例 #3
0
 def _build_tensor(self, ndarray):
     if self.use_static_batch_size:
         shape = ndarray.shape
     else:
         shape = [None] + list(ndarray.shape[1:])
     return tf1.placeholder_with_default(input=ndarray, shape=shape)
コード例 #4
0
def execute(configs):
    tf.reset_default_graph()
    random.seed(configs["random_state"])
    nprand.seed(configs["random_state"])

    DECAY_FACTOR = 0.80
    decay_steps = 1000
    latent_dim = configs["latent_dim"]
    som_dim = [configs["som_dim"], configs["som_dim"]]
    num_classes = 10
    global_step = tf.Variable(0, trainable=False, name="global_step")

    embeddings = tf.get_variable("embeddings", som_dim+[latent_dim],
                                 initializer=tf.truncated_normal_initializer(stddev=0.05))

    x_patient = tf.placeholder(tf.float32, shape=[None, 98])
    y = tf.placeholder(tf.int32, shape=[None])
    train = tf.placeholder(tf.bool, name="train")
    batch_size = tf.shape(x_patient)[0]

    with tf.variable_scope("encoder"):
        dense_1=tf.keras.layers.Dense(configs["conv_size"])(x_patient)
        dense_2=tf.keras.layers.Dense(configs["conv_size"])(dense_1)
        z_e = tf.keras.layers.Dense(latent_dim)(dense_2)

    z_dist = tf.squared_difference(tf.expand_dims(tf.expand_dims(z_e, 1), 1), tf.expand_dims(embeddings, 0))
    z_dist_red = tf.reduce_sum(z_dist, axis=-1)
    z_dist_flat = tf.reshape(z_dist_red, [batch_size, -1])
    k = tf.argmin(z_dist_flat, axis=-1)
    k_1 = k // som_dim[1]
    k_2 = k % som_dim[1]
    k_stacked = tf.stack([k_1, k_2], axis=1)
    z_q = tf.gather_nd(embeddings, k_stacked)

    def decoder(z_tensor):
        with tf.variable_scope("decoder", reuse=tf.AUTO_REUSE):
            dec_dense_1 = tf.keras.layers.Dense(configs["conv_size"])(z_tensor)
            dec_dense_2 = tf.keras.layers.Dense(configs["conv_size"])(dec_dense_1)
            flat_dec=tf.keras.layers.Dense(98)(dec_dense_2)
            x_hat = flat_dec
            return x_hat

    x_hat = decoder(z_q)

    beta = 0.25
    loss_rec_mse = tf.losses.mean_squared_error(x_patient, x_hat)
    loss_vq = tf.reduce_mean(tf.squared_difference(tf.stop_gradient(z_e), z_q))
    loss_commit = tf.reduce_mean(tf.squared_difference(z_e, tf.stop_gradient(z_q)))
    loss = loss_rec_mse + loss_vq + beta*loss_commit

    learning_rate = tf.placeholder_with_default(0.001, [])
    lr_decay = tf.train.exponential_decay(learning_rate, global_step, decay_steps, DECAY_FACTOR, staircase=True)

    decoder_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "decoder")
    decoder_grads = list(zip(tf.gradients(loss, decoder_vars), decoder_vars))
    encoder_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "encoder")
    grad_z = tf.gradients(loss_rec_mse, z_q)

    encoder_grads = [(tf.gradients(z_e,var,grad_z)[0]+beta*tf.gradients(loss_commit,var)[0],var) for var in encoder_vars]
    embed_grads = list(zip(tf.gradients(loss_vq, embeddings),[embeddings]))

    optimizer = tf.train.AdamOptimizer(lr_decay)
    train_step = optimizer.apply_gradients(decoder_grads+encoder_grads+embed_grads)

    BATCH_SIZE = configs["batch_size"]
    EPOCHS = configs["n_epochs"]
    NUM_TESTS = 1 

    if configs["benchmark"]:
        times_per_epoch=[]

    for data_set in configs["DATASETS"]:

        if not configs["debug_mode"]:
            with open("../results/vqvae_{}_{}.tsv".format(data_set,configs["random_state"]),'w') as fp:
                csv_fp=csv.writer(fp,delimiter='\t')
                csv_fp.writerow(["model","task","nmi"])

        if data_set=="eicu":
            data_train, data_test, labels_train, labels_test = get_data(test=True, train_ratio=configs["train_ratio"])

        for _ in range(NUM_TESTS):

            with tf.Session() as sess:
                sess.run(tf.global_variables_initializer())
                indices_unsup = np.arange(data_train.shape[0])
                with tqdm(total=EPOCHS*(data_train.shape[0]//BATCH_SIZE)) as pbar:
                    for epoch in range(EPOCHS):
                        if configs["benchmark"]:
                            t_begin=timeit.default_timer()
                        np.random.shuffle(indices_unsup)
                        test_mse = sess.run(loss_rec_mse, feed_dict={x_patient: data_test[:100], train: False})
                        for i in range(indices_unsup.shape[0]//BATCH_SIZE):
                            batch_data = data_train[indices_unsup[BATCH_SIZE*i:BATCH_SIZE*(i+1)]]
                            if i%100 == 0:
                                train_mse, train_commit, train_loss = sess.run([loss_rec_mse, loss_commit, loss],
                                                                               feed_dict={x_patient: batch_data, train: False})
                            train_step.run(feed_dict={x_patient: batch_data, train: True})
                            pbar.set_postfix(epoch=epoch, train_mse=train_mse, train_commit=train_commit,
                                                 test_mse=test_mse, refresh=False)
                            pbar.update(1)

                        if configs["benchmark"]:
                            t_end=timeit.default_timer()
                            times_per_epoch.append(t_end-t_begin)

                if configs["benchmark"]:
                    print("Times per epoch: {:.3f}".format(np.mean(times_per_epoch)))
                    sys.exit(0)

                test_k_all = []
                test_x_hat_all = []
                for i in trange(data_test.shape[0]//100):
                    batch_data = data_test[100*i:100*(i+1)]
                    test_k_all.extend(sess.run(k, feed_dict={x_patient: batch_data, train: False}))
                    test_x_hat_all.extend(sess.run(x_hat, feed_dict={x_patient: batch_data, train: False}))
                test_x_hat_all = np.array(test_x_hat_all)
                test_k_all=np.array(test_k_all)

            data_test=data_test[:test_x_hat_all.shape[0]] 

            for task_desc,task_idx in [("apache_0",0), ("apache_6",1), ("apache_12",2), ("apache_24",3)]:
                labels_test_task=labels_test[:,task_idx]
                aggregated_mses = []
                aggregated_NMIs = []
                aggregated_purities = []
                aggregated_mses.append(mean_squared_error(data_test, test_x_hat_all))
                aggregated_NMIs.append(normalized_mutual_info_score(test_k_all, labels_test_task[:len(test_k_all)]))
                aggregated_purities.append(cluster_purity(test_k_all, labels_test_task[:len(test_k_all)]))

                print("Results for {} on task: {}".format(data_set,task_desc))
                print("Test MSE: {} +- {}\nTest NMI: {} +- {}\nTest purity: {} +- {}".format(np.mean(aggregated_mses),
                            np.std(aggregated_mses)/np.sqrt(NUM_TESTS), np.mean(aggregated_NMIs), np.std(aggregated_NMIs)/
                            np.sqrt(NUM_TESTS), np.mean(aggregated_purities), np.std(aggregated_purities)/np.sqrt(NUM_TESTS)))

                if not configs["debug_mode"]:
                    with open("../results/vqvae_{}_{}.tsv".format(data_set,configs["random_state"]),'a') as fp:
                        csv_fp=csv.writer(fp,delimiter='\t')
                        csv_fp.writerow(["vqvae",task_desc,str(aggregated_NMIs[0])])
コード例 #5
0
    def testShapes(self):
        # We'll use a batch shape of [2, 3, 5, 7, 11]

        # 5x5 grid of index points in R^2 and flatten to 25x2
        index_points = np.linspace(-4., 4., 5, dtype=np.float64)
        index_points = np.stack(np.meshgrid(index_points, index_points),
                                axis=-1)
        index_points = np.reshape(index_points, [-1, 2])
        # ==> shape = [25, 2]
        batched_index_points = np.reshape(index_points, [1, 1, 25, 2])
        batched_index_points = np.stack([batched_index_points] * 5)
        # ==> shape = [5, 1, 1, 25, 2]

        # Kernel with batch_shape [2, 3, 1, 1, 1]
        amplitude = np.array([1., 2.], np.float64).reshape([2, 1, 1, 1, 1])
        length_scale = np.array([.1, .2, .3],
                                np.float64).reshape([1, 3, 1, 1, 1])
        observation_noise_variance = np.array([1e-9], np.float64).reshape(
            [1, 1, 1, 1, 1])

        jitter = np.float64(1e-6)
        observation_index_points = (np.random.uniform(
            -1., 1., (7, 1, 7, 2)).astype(np.float64))
        observations = np.random.uniform(-1., 1., (11, 7)).astype(np.float64)

        def cholesky_fn(x):
            return tf.linalg.cholesky(
                tf.linalg.set_diag(x,
                                   tf.linalg.diag_part(x) + 1.))

        if not self.is_static:
            amplitude = tf1.placeholder_with_default(amplitude, shape=None)
            length_scale = tf1.placeholder_with_default(length_scale,
                                                        shape=None)
            batched_index_points = tf1.placeholder_with_default(
                batched_index_points, shape=None)

            observation_index_points = tf1.placeholder_with_default(
                observation_index_points, shape=None)
            observations = tf1.placeholder_with_default(observations,
                                                        shape=None)

        kernel = psd_kernels.ExponentiatedQuadratic(amplitude, length_scale)

        gprm = tfd.GaussianProcessRegressionModel(kernel,
                                                  batched_index_points,
                                                  observation_index_points,
                                                  observations,
                                                  observation_noise_variance,
                                                  cholesky_fn=cholesky_fn,
                                                  jitter=jitter,
                                                  validate_args=True)

        batch_shape = [2, 3, 5, 7, 11]
        event_shape = [25]
        sample_shape = [9, 3]

        samples = gprm.sample(sample_shape, seed=test_util.test_seed())

        self.assertIs(cholesky_fn, gprm.cholesky_fn)

        if self.is_static or tf.executing_eagerly():
            self.assertAllEqual(gprm.batch_shape_tensor(), batch_shape)
            self.assertAllEqual(gprm.event_shape_tensor(), event_shape)
            self.assertAllEqual(samples.shape,
                                sample_shape + batch_shape + event_shape)
            self.assertAllEqual(gprm.batch_shape, batch_shape)
            self.assertAllEqual(gprm.event_shape, event_shape)
            self.assertAllEqual(samples.shape,
                                sample_shape + batch_shape + event_shape)
        else:
            self.assertAllEqual(self.evaluate(gprm.batch_shape_tensor()),
                                batch_shape)
            self.assertAllEqual(self.evaluate(gprm.event_shape_tensor()),
                                event_shape)
            self.assertAllEqual(
                self.evaluate(samples).shape,
                sample_shape + batch_shape + event_shape)
            self.assertIsNone(tensorshape_util.rank(samples.shape))
            self.assertIsNone(tensorshape_util.rank(gprm.batch_shape))
            self.assertEqual(tensorshape_util.rank(gprm.event_shape), 1)
            self.assertIsNone(
                tf.compat.dimension_value(
                    tensorshape_util.dims(gprm.event_shape)[0]))
コード例 #6
0
}

# get dataset
dataset = BinaryDbReader(mode='training',
                         batch_size=8,
                         shuffle=True,
                         use_wrist_coord=False,
                         hand_crop=True,
                         coord_uv_noise=False,
                         crop_center_noise=False)

# build network graph
data = dataset.get()

# build network
evaluation = tf.placeholder_with_default(True, shape=())
net = ColorHandPose3DNetwork()
keypoints_scoremap = net.inference_pose2d(data['image_crop'], train=True)
s = data['scoremap'].get_shape().as_list()
keypoints_scoremap = [
    tf.image.resize_images(x, (s[1], s[2])) for x in keypoints_scoremap
]

# Start TF
gpu_options = tf.GPUOptions(allow_growth=True, )
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
tf.train.start_queue_runners(sess=sess)

# Loss
loss = 0.0
s = data['scoremap'].get_shape().as_list()
コード例 #7
0
 def _make_placeholder(self, x):
     return tf1.placeholder_with_default(
         x, shape=(x.shape if self.use_static_shape else None))
コード例 #8
0
 def _input(self, value):
     """Helper to create inputs with varied dtypes an static shapes."""
     value = tf.cast(value, self.dtype)
     return tf1.placeholder_with_default(
         value, shape=value.shape if self.use_static_shape else None)
コード例 #9
0
def make_tensor_hiding_attributes(value, hide_shape, hide_value=True):
    if not hide_value:
        return tf.convert_to_tensor(value)

    shape = None if hide_shape else getattr(value, 'shape', None)
    return tf1.placeholder_with_default(value, shape=shape)
コード例 #10
0
    def _init_placeholders(self):
        self.input_ids_ph = tf.placeholder(shape=(None, None), dtype=tf.int32, name='ids_ph')
        self.input_masks_ph = tf.placeholder(shape=(None, None), dtype=tf.int32, name='masks_ph')
        self.token_types_ph = tf.placeholder(shape=(None, None), dtype=tf.int32, name='token_types_ph')

        self.is_train_ph = tf.placeholder_with_default(False, shape=[], name='is_train_ph')
コード例 #11
0
    def testShapes(self):
        # 5x5 grid of index points in R^2 and flatten to 25x2
        index_points = np.linspace(-4., 4., 5, dtype=np.float32)
        index_points = np.stack(np.meshgrid(index_points, index_points),
                                axis=-1)
        index_points = np.reshape(index_points, [-1, 2])
        # ==> shape = [25, 2]

        # Kernel with batch_shape [2, 4, 3, 1]
        amplitude = np.array([1., 2.], np.float32).reshape([2, 1, 1, 1])
        length_scale = np.array([1., 2., 3., 4.],
                                np.float32).reshape([1, 4, 1, 1])
        observation_noise_variance = np.array([1e-5, 1e-6, 1e-5],
                                              np.float32).reshape([1, 1, 3, 1])
        batched_index_points = np.stack([index_points] * 6)
        # ==> shape = [6, 25, 2]
        if not self.is_static:
            amplitude = tf1.placeholder_with_default(amplitude, shape=None)
            length_scale = tf1.placeholder_with_default(length_scale,
                                                        shape=None)
            batched_index_points = tf1.placeholder_with_default(
                batched_index_points, shape=None)
        kernel = psd_kernels.ExponentiatedQuadratic(amplitude, length_scale)
        gp = tfd.GaussianProcess(
            kernel,
            batched_index_points,
            observation_noise_variance=observation_noise_variance,
            jitter=1e-5)

        batch_shape = [2, 4, 3, 6]
        event_shape = [25]
        sample_shape = [5, 3]

        samples = gp.sample(sample_shape)

        if self.is_static or tf.executing_eagerly():
            self.assertAllEqual(gp.batch_shape_tensor(), batch_shape)
            self.assertAllEqual(gp.event_shape_tensor(), event_shape)
            self.assertAllEqual(samples.shape,
                                sample_shape + batch_shape + event_shape)
            self.assertAllEqual(gp.batch_shape, batch_shape)
            self.assertAllEqual(gp.event_shape, event_shape)
            self.assertAllEqual(samples.shape,
                                sample_shape + batch_shape + event_shape)
            self.assertAllEqual(gp.mean().shape, batch_shape + event_shape)
            self.assertAllEqual(gp.variance().shape, batch_shape + event_shape)
        else:
            self.assertAllEqual(self.evaluate(gp.batch_shape_tensor()),
                                batch_shape)
            self.assertAllEqual(self.evaluate(gp.event_shape_tensor()),
                                event_shape)
            self.assertAllEqual(
                self.evaluate(samples).shape,
                sample_shape + batch_shape + event_shape)
            self.assertIsNone(tensorshape_util.rank(samples.shape))
            self.assertIsNone(tensorshape_util.rank(gp.batch_shape))
            self.assertEqual(tensorshape_util.rank(gp.event_shape), 1)
            self.assertIsNone(
                tf.compat.dimension_value(
                    tensorshape_util.dims(gp.event_shape)[0]))
            self.assertAllEqual(self.evaluate(tf.shape(input=gp.mean())),
                                batch_shape + event_shape)
            self.assertAllEqual(self.evaluate(tf.shape(input=gp.variance())),
                                batch_shape + event_shape)
コード例 #12
0
    def test_variable_shapes(self, normalize_weights, share_combining_weights,
                             data_format):
        spatial_rank = 2
        kernel_size = 3
        filters = 16
        if data_format == 'channels_last':
            input_shape = [2, 32, 32, 3]
        elif data_format == 'channels_first':
            input_shape = [2, 3, 32, 32]

        images = tf.constant(np.random.randn(*tuple(input_shape)),
                             dtype=tf.float32)

        # Test handling variable input size if weights shared across one dimension.
        if share_combining_weights == (True, False):
            input_shape_ = input_shape[:]
            if data_format == 'channels_last':
                input_shape_[2] = None
            elif data_format == 'channels_first':
                input_shape_[3] = None
            images = tf.placeholder_with_default(images,
                                                 shape=tuple(input_shape_))

        elif share_combining_weights == (False, True):
            input_shape_ = input_shape[:]
            if data_format == 'channels_last':
                input_shape_[1] = None
            elif data_format == 'channels_first':
                input_shape_[2] = None
            images = tf.placeholder_with_default(images,
                                                 shape=tuple(input_shape_))

        layer = layers.LowRankLocallyConnected2D(
            filters=filters,
            kernel_size=(kernel_size, kernel_size),
            strides=(1, 1),
            spatial_rank=spatial_rank,
            normalize_weights=normalize_weights,
            share_row_combining_weights=share_combining_weights[0],
            share_col_combining_weights=share_combining_weights[1],
            data_format=data_format,
            input_dependent=False)
        output = layer(images)

        var_dict = {v.op.name: v for v in tf.global_variables()}

        # Make sure all generated weights are tracked in layer.weights.
        self.assertLen(var_dict, len(layer.weights))

        # Make sure the number of weights generated is correct.
        if share_combining_weights[0] and share_combining_weights[1]:
            # weights rows, weights cols, bias (rows, cols, channels), kernel bases
            self.assertLen(var_dict, 6)
        else:
            self.assertLen(var_dict, 4)

        self.evaluate(tf.global_variables_initializer())
        combining_weights = self.evaluate(layer.combining_weights)
        if data_format == 'channels_last':
            self.assertEqual(
                self.evaluate(output).shape,
                (input_shape[0], input_shape[1] - kernel_size + 1,
                 input_shape[2] - kernel_size + 1, filters))
        elif data_format == 'channels_first':
            self.assertEqual(
                self.evaluate(output).shape, (
                    input_shape[0],
                    filters,
                    input_shape[2] - kernel_size + 1,
                    input_shape[3] - kernel_size + 1,
                ))
        if normalize_weights == 'softmax':
            self.assertNDArrayNear(np.sum(combining_weights, axis=-1),
                                   np.ones(combining_weights.shape[:-1],
                                           dtype=np.float32),
                                   err=1e-5)
        elif normalize_weights == 'norm':
            self.assertNDArrayNear(np.sqrt(
                np.sum(combining_weights**2, axis=-1)),
                                   np.ones(combining_weights.shape[:-1],
                                           dtype=np.float32),
                                   err=1e-5)
コード例 #13
0
def main(latent_dim, som_dim, learning_rate, decay_factor, alpha, beta, gamma,
         theta, ex_name, more_runs, data_set, dropout, prior_var, convolution,
         prior, validation, epochs_pretrain, num_epochs, batch_size):
    """Main method to build a model, train it and evaluate it.
    Returns:
        dict: Results of the evaluation (NMI, Purity).
    """

    if not os.path.exists('../models'):
        os.mkdir('../models')

    # Dimensions for MNIST-like data
    input_length = 28
    input_channels = 28
    lr_val = tf.placeholder_with_default(learning_rate, [])

    model = DPSOM(latent_dim=latent_dim,
                  som_dim=som_dim,
                  learning_rate=lr_val,
                  alpha=alpha,
                  decay_factor=decay_factor,
                  input_length=input_length,
                  input_channels=input_channels,
                  beta=beta,
                  theta=theta,
                  gamma=gamma,
                  convolution=convolution,
                  dropout=dropout,
                  prior_var=prior_var,
                  prior=prior)

    if data_set == "MNIST":
        mnist = tf.keras.datasets.mnist.load_data(path='mnist.npz')
        data_total = np.reshape(mnist[0][0], [-1, 28 * 28])
        maxx = np.reshape(np.amax(data_total, axis=-1), [-1, 1])
        data_total = np.reshape(data_total / maxx, [-1, 28, 28, 1])
        labels_total = mnist[0][1]
        data_test = np.reshape(mnist[1][0], [-1, 28 * 28])
        maxx = np.reshape(np.amax(data_test, axis=-1), [-1, 1])
        data_test = np.reshape(data_test / maxx, [-1, 28, 28, 1])
        labels_test = mnist[1][1]
        data_train, data_val, labels_train, labels_val = train_test_split(
            data_total, labels_total, test_size=0.15, random_state=42)

    else:
        ((data_total, labels_total),
         (data_test,
          labels_test)) = tf.keras.datasets.fashion_mnist.load_data()
        data_total = np.reshape(data_total, [-1, 28 * 28])
        maxx = np.reshape(np.amax(data_total, axis=-1), [-1, 1])
        data_total = np.reshape(data_total / maxx, [-1, 28, 28, 1])
        data_test = np.reshape(data_test, [-1, 28 * 28])
        maxx = np.reshape(np.amax(data_test, axis=-1), [-1, 1])
        data_test = np.reshape(data_test / maxx, [-1, 28, 28, 1])
        data_train, data_val, labels_train, labels_val = train_test_split(
            data_total, labels_total, test_size=0.15, random_state=42)
    data_generator = get_data_generator(data_train, data_val, labels_train,
                                        labels_val, data_test, labels_test)
    if not validation:
        data_val = data_test

    if more_runs:
        NMI = []
        PUR = []
        for i in range(10):
            results = train_model(model, data_train, data_val, data_generator,
                                  lr_val)
            NMI.append(results["NMI"])
            PUR.append(results["Purity"])
        NMI_mean = np.mean(NMI)
        NMI_sd = np.std(NMI) / np.sqrt(10)
        PUR_mean = np.mean(PUR)
        PUR_sd = np.std(PUR) / np.sqrt(10)
        print("\nRESULTS NMI: %f +- %f, PUR: %f +- %f.  Name: %r. \n" %
              (NMI_mean, NMI_sd, PUR_mean, PUR_sd, ex_name))
        if data_set == "MNIST":
            f = open("evaluation_MNIST.txt", "a+")
        else:
            f = open("evaluation_fMNIST.txt", "a+")
        f.write(
            "som_dim=[%d,%d], latent_dim= %d, batch_size= %d, learning_rate= %f, theta= %f, "
            "dropout=%f, prior=%f, gamma=%d, beta%f, epochs_pretrain=%d, epochs= %d"
            %
            (som_dim[0], som_dim[1], latent_dim, batch_size, learning_rate,
             theta, dropout, prior, gamma, beta, epochs_pretrain, num_epochs))

        f.write(", RESULTS NMI: %f + %f, PUR: %f + %f.  Name: %r \n" %
                (NMI_mean, NMI_sd, PUR_mean, PUR_sd, ex_name))
        f.close()
    else:
        results = train_model(model, data_train, data_val, data_generator,
                              lr_val)
        print("\n NMI: {}, AMI: {}, PUR: {}.  Name: %r.\n".format(
            results["NMI"], results["AMI"], results["Purity"], ex_name))
    return results
コード例 #14
0
ファイル: blockwise_test.py プロジェクト: ic/probability
    def testExplicitBlocks(self, dynamic_shape, batch_shape):
        block_sizes = tf.convert_to_tensor(value=[2, 1, 3])
        block_sizes = tf1.placeholder_with_default(
            block_sizes, shape=None if dynamic_shape else block_sizes.shape)
        exp = tfb.Exp()
        sp = tfb.Softplus()
        aff = tfb.Affine(scale_diag=[2., 3., 4.])
        blockwise = tfb.Blockwise(bijectors=[exp, sp, aff],
                                  block_sizes=block_sizes,
                                  maybe_changes_size=False)

        x = tf.cast([0.1, 0.2, 0.3, 0.4, 0.5, 0.6], dtype=tf.float32)
        for s in batch_shape:
            x = tf.expand_dims(x, 0)
            x = tf.tile(x, [s] + [1] * (tensorshape_util.rank(x.shape) - 1))
        x = tf1.placeholder_with_default(
            x, shape=None if dynamic_shape else x.shape)

        # Identity to break the caching.
        blockwise_y = tf.identity(blockwise.forward(x))
        blockwise_fldj = blockwise.forward_log_det_jacobian(x, event_ndims=1)
        blockwise_x = blockwise.inverse(blockwise_y)
        blockwise_ildj = blockwise.inverse_log_det_jacobian(blockwise_y,
                                                            event_ndims=1)

        if not dynamic_shape:
            self.assertEqual(blockwise_y.shape, batch_shape + [6])
            self.assertEqual(blockwise_fldj.shape, batch_shape + [])
            self.assertEqual(blockwise_x.shape, batch_shape + [6])
            self.assertEqual(blockwise_ildj.shape, batch_shape + [])
        self.assertAllEqual(self.evaluate(tf.shape(blockwise_y)),
                            batch_shape + [6])
        self.assertAllEqual(self.evaluate(tf.shape(blockwise_fldj)),
                            batch_shape + [])
        self.assertAllEqual(self.evaluate(tf.shape(blockwise_x)),
                            batch_shape + [6])
        self.assertAllEqual(self.evaluate(tf.shape(blockwise_ildj)),
                            batch_shape + [])

        expl_y = tf.concat([
            exp.forward(x[..., :2]),
            sp.forward(x[..., 2:3]),
            aff.forward(x[..., 3:]),
        ],
                           axis=-1)
        expl_fldj = sum([
            exp.forward_log_det_jacobian(x[..., :2], event_ndims=1),
            sp.forward_log_det_jacobian(x[..., 2:3], event_ndims=1),
            aff.forward_log_det_jacobian(x[..., 3:], event_ndims=1)
        ])
        expl_x = tf.concat([
            exp.inverse(expl_y[..., :2]),
            sp.inverse(expl_y[..., 2:3]),
            aff.inverse(expl_y[..., 3:])
        ],
                           axis=-1)
        expl_ildj = sum([
            exp.inverse_log_det_jacobian(expl_y[..., :2], event_ndims=1),
            sp.inverse_log_det_jacobian(expl_y[..., 2:3], event_ndims=1),
            aff.inverse_log_det_jacobian(expl_y[..., 3:], event_ndims=1)
        ])

        self.assertAllClose(self.evaluate(expl_y), self.evaluate(blockwise_y))
        self.assertAllClose(self.evaluate(expl_fldj),
                            self.evaluate(blockwise_fldj))
        self.assertAllClose(self.evaluate(expl_x), self.evaluate(blockwise_x))
        self.assertAllClose(self.evaluate(expl_ildj),
                            self.evaluate(blockwise_ildj))
コード例 #15
0
    def _testMVN(self,
                 base_distribution_class,
                 base_distribution_kwargs,
                 batch_shape=(),
                 event_shape=(),
                 not_implemented_message=None):
        # Overriding shapes must be compatible w/bijector; most bijectors are
        # batch_shape agnostic and only care about event_ndims.
        # In the case of `Affine`, if we got it wrong then it would fire an
        # exception due to incompatible dimensions.
        batch_shape_pl = tf1.placeholder_with_default(
            input=np.int32(batch_shape),
            shape=None,
            name='dynamic_batch_shape')
        event_shape_pl = tf1.placeholder_with_default(
            input=np.int32(event_shape),
            shape=None,
            name='dynamic_event_shape')
        fake_mvn_dynamic = self._cls()(
            distribution=base_distribution_class(validate_args=True,
                                                 **base_distribution_kwargs),
            bijector=tfb.Affine(shift=self._shift, scale_tril=self._tril),
            batch_shape=batch_shape_pl,
            event_shape=event_shape_pl,
            validate_args=True)

        fake_mvn_static = self._cls()(
            distribution=base_distribution_class(validate_args=True,
                                                 **base_distribution_kwargs),
            bijector=tfb.Affine(shift=self._shift, scale_tril=self._tril),
            batch_shape=batch_shape,
            event_shape=event_shape,
            validate_args=True)

        actual_mean = np.tile(self._shift, [2, 1])  # Affine elided this tile.
        actual_cov = np.matmul(self._tril, np.transpose(self._tril, [0, 2, 1]))

        def actual_mvn_log_prob(x):
            return np.concatenate([
                [  # pylint: disable=g-complex-comprehension
                    stats.multivariate_normal(actual_mean[i],
                                              actual_cov[i]).logpdf(x[:, i, :])
                ] for i in range(len(actual_cov))
            ]).T

        actual_mvn_entropy = np.concatenate([[
            stats.multivariate_normal(actual_mean[i], actual_cov[i]).entropy()
        ] for i in range(len(actual_cov))])

        self.assertAllEqual([3], fake_mvn_static.event_shape)
        self.assertAllEqual([2], fake_mvn_static.batch_shape)

        if not tf.executing_eagerly():
            self.assertAllEqual(tf.TensorShape(None),
                                fake_mvn_dynamic.event_shape)
            self.assertAllEqual(tf.TensorShape(None),
                                fake_mvn_dynamic.batch_shape)

        x = self.evaluate(fake_mvn_static.sample(5,
                                                 seed=test_util.test_seed()))
        for unsupported_fn in (fake_mvn_static.log_cdf, fake_mvn_static.cdf,
                               fake_mvn_static.survival_function,
                               fake_mvn_static.log_survival_function):
            with self.assertRaisesRegexp(NotImplementedError,
                                         not_implemented_message):
                unsupported_fn(x)

        num_samples = 7e3
        for fake_mvn in [fake_mvn_static, fake_mvn_dynamic]:
            # Ensure sample works by checking first, second moments.
            y = fake_mvn.sample(int(num_samples), seed=test_util.test_seed())
            x = y[0:5, ...]
            sample_mean = tf.reduce_mean(input_tensor=y, axis=0)
            centered_y = tf.transpose(a=y - sample_mean, perm=[1, 2, 0])
            sample_cov = tf.matmul(centered_y, centered_y,
                                   transpose_b=True) / num_samples
            [
                sample_mean_,
                sample_cov_,
                x_,
                fake_event_shape_,
                fake_batch_shape_,
                fake_log_prob_,
                fake_prob_,
                fake_mean_,
                fake_entropy_,
            ] = self.evaluate([
                sample_mean,
                sample_cov,
                x,
                fake_mvn.event_shape_tensor(),
                fake_mvn.batch_shape_tensor(),
                fake_mvn.log_prob(x),
                fake_mvn.prob(x),
                fake_mvn.mean(),
                fake_mvn.entropy(),
            ])

            self.assertAllClose(actual_mean, sample_mean_, atol=0.1, rtol=0.1)
            self.assertAllClose(actual_cov, sample_cov_, atol=0., rtol=0.1)

            # Ensure all other functions work as intended.
            self.assertAllEqual([5, 2, 3], x_.shape)
            self.assertAllEqual([3], fake_event_shape_)
            self.assertAllEqual([2], fake_batch_shape_)
            self.assertAllClose(actual_mvn_log_prob(x_),
                                fake_log_prob_,
                                atol=0.,
                                rtol=1e-6)
            self.assertAllClose(np.exp(actual_mvn_log_prob(x_)),
                                fake_prob_,
                                atol=0.,
                                rtol=1e-5)
            self.assertAllClose(actual_mean, fake_mean_, atol=0., rtol=1e-6)
            self.assertAllClose(actual_mvn_entropy,
                                fake_entropy_,
                                atol=0.,
                                rtol=1e-6)
コード例 #16
0
 def test_num_paddings_dynamic(self):
     n = tf1.placeholder_with_default(2, shape=None)
     x = ps.pad([2, 3], paddings=[[0, n]], constant_values=1)
     if not ps.is_numpy(x):
         x = self.evaluate(x)
     self.assertAllEqual([2, 3, 1, 1], x)
コード例 #17
0
    def testMatrixEvent(self):
        batch_shape = [2]
        event_shape = [2, 3, 3]
        batch_shape_pl = tf1.placeholder_with_default(
            input=np.int32(batch_shape),
            shape=None,
            name='dynamic_batch_shape')
        event_shape_pl = tf1.placeholder_with_default(
            input=np.int32(event_shape),
            shape=None,
            name='dynamic_event_shape')

        scale = 2.
        loc = 0.
        fake_mvn_dynamic = self._cls()(distribution=tfd.Normal(loc=loc,
                                                               scale=scale),
                                       bijector=DummyMatrixTransform(),
                                       batch_shape=batch_shape_pl,
                                       event_shape=event_shape_pl,
                                       validate_args=True)

        fake_mvn_static = self._cls()(distribution=tfd.Normal(loc=loc,
                                                              scale=scale),
                                      bijector=DummyMatrixTransform(),
                                      batch_shape=batch_shape,
                                      event_shape=event_shape,
                                      validate_args=True)

        def actual_mvn_log_prob(x):
            # This distribution is the normal PDF, reduced over the
            # last 3 dimensions + a jacobian term which corresponds
            # to the determinant of x.
            return (
                np.sum(stats.norm(loc, scale).logpdf(x), axis=(-1, -2, -3)) +
                np.sum(np.linalg.det(x), axis=-1))

        self.assertAllEqual([2, 3, 3], fake_mvn_static.event_shape)
        self.assertAllEqual([2], fake_mvn_static.batch_shape)

        if not tf.executing_eagerly():
            self.assertAllEqual(tf.TensorShape(None),
                                fake_mvn_dynamic.event_shape)
            self.assertAllEqual(tf.TensorShape(None),
                                fake_mvn_dynamic.batch_shape)

        num_samples = 5e3
        for fake_mvn in [fake_mvn_static, fake_mvn_dynamic]:
            # Ensure sample works by checking first, second moments.
            y = fake_mvn.sample(int(num_samples), seed=test_util.test_seed())
            x = y[0:5, ...]
            [
                x_,
                fake_event_shape_,
                fake_batch_shape_,
                fake_log_prob_,
                fake_prob_,
            ] = self.evaluate([
                x,
                fake_mvn.event_shape_tensor(),
                fake_mvn.batch_shape_tensor(),
                fake_mvn.log_prob(x),
                fake_mvn.prob(x),
            ])

            # Ensure all other functions work as intended.
            self.assertAllEqual([5, 2, 2, 3, 3], x_.shape)
            self.assertAllEqual([2, 3, 3], fake_event_shape_)
            self.assertAllEqual([2], fake_batch_shape_)
            self.assertAllClose(actual_mvn_log_prob(x_),
                                fake_log_prob_,
                                atol=0.,
                                rtol=1e-6)
            # With this many dimensions and samples, the direct space probability
            # may underflow.
            self.assertAllClose(np.exp(actual_mvn_log_prob(x_)),
                                fake_prob_,
                                atol=1e-12,
                                rtol=1e-5)
コード例 #18
0
 def test_ones_like(self):
     x = tf1.placeholder_with_default(tf.ones([2], dtype=tf.float32),
                                      shape=None)
     self.assertEqual(dtype_util.convert_to_dtype(ps.ones_like(x)),
                      tf.float32)
コード例 #19
0
def construct_model(input_tensors, encoder_w0, prefix=None):
  """Construct model."""
  facto = tf.placeholder_with_default(1.0, ())
  context_xs = input_tensors['inputa']
  context_ys = input_tensors['labela']
  target_xs = input_tensors['inputb']
  target_ys = input_tensors['labelb']

  # sample ws ~ w|(x_all,a), rs = T(ws, ys), r = mean(rs), z = T(r)
  # x_all = tf.concat([context_xs, target_xs], axis=1) #n_task * 20 * (128*128)
  # y_all = tf.concat([context_ys, target_ys], axis=1)

  x_all = context_xs
  y_all = context_ys

  # n_task * [n_im] * d_z
  if 'train' in prefix:
    z_samples, mu_w_all, sigma_w_all = xy_to_z(x_all, y_all, encoder_w0)
    z_samples = z_samples * facto
  else:
    z_samples, _, _ = xy_to_z(context_xs, context_ys, encoder_w0)
    z_samples = z_samples * facto

  target_ws, _, _ = encoder_w(target_xs, encoder_w0)
  input_zxs = tf.concat([z_samples, target_ws], axis=-1)

  # sample y_hat ~  y|(w,z)
  target_yhat_mu = decoder_g(input_zxs)  # n_task * n_im * dim_y

  # when var of  p(y | x,z) is fixed, neg-loglik <=> MSE
  mse_loss = mse(target_yhat_mu, target_ys)

  tf.summary.scalar(prefix + 'mse', mse_loss)
  optimizer1 = tf.train.AdamOptimizer(FLAGS.update_lr)
  optimizer2 = tf.train.AdamOptimizer(0.001)

  if 'train' in prefix:
    # mu_w_all is n_task * n_im * dim_w
    # target_yhat_mu is n_task * n_im * dim_w
    kl_ib = kl_qp_gaussian(mu_w_all, sigma_w_all, tf.zeros(tf.shape(mu_w_all)),
                           tf.ones(tf.shape(mu_w_all)))

    THETA = (  # pylint: disable=invalid-name
        tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='decoder')
        + tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='encoder_w'))
    all_var = tf.trainable_variables()
    PHI = [v for v in all_var if v not in THETA]  # pylint: disable=invalid-name

    loss = mse_loss + FLAGS.beta * kl_ib

    gvs_theta = optimizer1.compute_gradients(loss, THETA)
    train_theta_op = optimizer1.apply_gradients(gvs_theta)

    gvs_phi = optimizer2.compute_gradients(loss, PHI)
    train_phi_op = optimizer2.apply_gradients(gvs_phi)

    with tf.control_dependencies([train_theta_op, train_phi_op]):
      train_op = tf.no_op()

    tf.summary.scalar(prefix + 'kl', kl_ib)
    tf.summary.scalar(prefix + 'full_loss', loss)
    return mse_loss, kl_ib, train_op, facto
  else:
    return mse_loss
コード例 #20
0
 def testDynamicBatchShape(self, params, shape):
     tensor_params = tf1.placeholder_with_default(params, shape=None)
     k = TestKernel(tensor_params)
     self.assertAllEqual(shape, self.evaluate(k.batch_shape_tensor()))
コード例 #21
0
 def make_tensor(self, x):
     x = tf.cast(x, self.dtype)
     return tf1.placeholder_with_default(
         input=x, shape=x.shape if self.use_static_shape else None)
コード例 #22
0
 def test_dynamic(self):
     if tf.executing_eagerly(): return
     x = tf1.placeholder_with_default(tf.random.normal([3, 4, 5]),
                                      shape=None)
     self.assertAllEqual(3 * 4 * 5, self.evaluate(prefer_static.size(x)))
コード例 #23
0
    def testForwardInverse(self, input_shape, event_dims, training):
        """Tests forward and backward passes with different event shapes and axes.

    Args:
      input_shape: Tuple of shapes for input tensor.
      event_dims: Tuple of dimension indices that will be normalized.
      training: Boolean of whether bijector runs in training or inference mode.
    """
        x_ = np.arange(5 * 4 * 2).astype(np.float32).reshape(input_shape)
        x = tf1.placeholder_with_default(
            x_, input_shape if 0 in event_dims else (None, ) + input_shape[1:])
        # When training, memorize the exact mean of the last
        # minibatch that it normalized (instead of moving average assignment).
        layer = tf.keras.layers.BatchNormalization(axis=event_dims,
                                                   momentum=0.,
                                                   epsilon=0.)
        batch_norm = tfb.BatchNormalization(batchnorm_layer=layer,
                                            training=training)
        # Minibatch statistics are saved only after norm_x has been computed.
        norm_x = batch_norm.inverse(x)
        with tf.control_dependencies(batch_norm.batchnorm.updates):
            moving_mean = tf.identity(batch_norm.batchnorm.moving_mean)
            moving_var = tf.identity(batch_norm.batchnorm.moving_variance)
            denorm_x = batch_norm.forward(tf.identity(norm_x))
            fldj = batch_norm.forward_log_det_jacobian(
                x, event_ndims=len(event_dims))
            # Use identity to invalidate cache.
            ildj = batch_norm.inverse_log_det_jacobian(
                tf.identity(denorm_x), event_ndims=len(event_dims))
        self.evaluate(tf1.global_variables_initializer())
        # Update variables.
        norm_x_ = self.evaluate(norm_x)
        [
            norm_x_,
            moving_mean_,
            moving_var_,
            denorm_x_,
            ildj_,
            fldj_,
        ] = self.evaluate([
            norm_x,
            moving_mean,
            moving_var,
            denorm_x,
            ildj,
            fldj,
        ])
        self.assertStartsWith(batch_norm.name, "batch_normalization")

        reduction_axes = self._reduction_axes(input_shape, event_dims)
        keepdims = len(event_dims) > 1

        expected_batch_mean = np.mean(x_,
                                      axis=reduction_axes,
                                      keepdims=keepdims)
        expected_batch_var = np.var(x_, axis=reduction_axes, keepdims=keepdims)

        if training:
            # When training=True, values become normalized across batch dim and
            # original values are recovered after de-normalizing.
            zeros = np.zeros_like(norm_x_)
            self.assertAllClose(np.mean(zeros, axis=reduction_axes),
                                np.mean(norm_x_, axis=reduction_axes))

            self.assertAllClose(expected_batch_mean, moving_mean_)
            self.assertAllClose(expected_batch_var, moving_var_)
            self.assertAllClose(x_, denorm_x_, atol=1e-5)
            # Since moving statistics are set to batch statistics after
            # normalization, ildj and -fldj should match.
            self.assertAllClose(ildj_, -fldj_)
            # ildj is computed with minibatch statistics.
            expected_ildj = np.sum(
                np.log(1.) -
                .5 * np.log(expected_batch_var + batch_norm.batchnorm.epsilon))
            self.assertAllClose(expected_ildj, np.squeeze(ildj_))
        else:
            # When training=False, moving_mean, moving_var remain at their
            # initialized values (0., 1.), resulting in no scale/shift (a small
            # shift occurs if epsilon > 0.)
            self.assertAllClose(x_, norm_x_)
            self.assertAllClose(x_, denorm_x_, atol=1e-5)
            # ildj is computed with saved statistics.
            expected_ildj = np.sum(
                np.log(1.) - .5 * np.log(1. + batch_norm.batchnorm.epsilon))
            self.assertAllClose(expected_ildj, np.squeeze(ildj_))
コード例 #24
0
def run_training(adj,
                 features,
                 y_train,
                 y_val,
                 y_test,
                 train_mask,
                 val_mask,
                 test_mask,
                 model_type,
                 model=None):
    # Set random seed
    seed = 123
    np.random.seed(seed)
    tf.random.set_random_seed(seed)  #set_random_seed(seed) random.set_seed

    # Settings
    try:
        flags = tf.app.flags
        FLAGS = flags.FLAGS
        flags.DEFINE_string('f', '', 'kernel')
        flags.DEFINE_string('model', 'gcn',
                            'Model string.')  # 'gcn', 'gcn_cheby', 'dense'
        flags.DEFINE_float('learning_rate', 0.000001, 'Initial learning rate.')
        flags.DEFINE_integer('epochs', 500, 'Number of epochs to train.')
        flags.DEFINE_integer('hidden1', 16,
                             'Number of units in hidden layer 1.')
        flags.DEFINE_float('dropout', 0.5,
                           'Dropout rate (1 - keep probability).')
        flags.DEFINE_float('weight_decay', 5e-4,
                           'Weight for L2 loss on embedding matrix.')
        flags.DEFINE_integer('early_stopping', 10,
                             'Tolerance for early stopping (# of epochs).')
        flags.DEFINE_integer('max_degree', 3,
                             'Maximum Chebyshev polynomial degree.')
        FLAGS = flags.FLAGS
    except:
        pass

    # Load data
    # adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask = load_data(FLAGS.dataset)
    # print(type(adj), adj.shape)
    # print(type(features), features.shape)
    # print(type(y_train), y_train.shape)
    # print(type(y_val), y_val.shape)
    # print(type(y_test), y_test.shape)
    # print(type(train_mask), train_mask.shape)
    # print(type(val_mask), val_mask.shape)
    # print(type(test_mask), test_mask.shape)

    # Some preprocessing
    features = preprocess_features(features)
    if model_type == 'gcn':
        support = [preprocess_adj(adj)]
        num_supports = 1
        model_func = GCN
    elif model_type == 'gcn_cheby':
        support = chebyshev_polynomials(adj, FLAGS.max_degree)
        num_supports = 1 + FLAGS.max_degree
        model_func = GCN
    elif model_type == 'dense':
        support = [preprocess_adj(adj)]  # Not used
        num_supports = 1
        model_func = MLP
    else:
        raise ValueError('Invalid argument for model: ' + str(FLAGS.model))

    # Define placeholders
    placeholders = {
        'support':
        [tf.sparse_placeholder(tf.float32) for _ in range(num_supports)],
        'features':
        tf.sparse_placeholder(tf.float32,
                              shape=tf.constant(features[2], dtype=tf.int64)),
        'labels':
        tf.placeholder(tf.float32, shape=(None, y_train.shape[1])),
        'labels_mask':
        tf.placeholder(tf.int32),
        'dropout':
        tf.placeholder_with_default(0., shape=()),
        'num_features_nonzero':
        tf.placeholder(tf.int32)  # helper variable for sparse dropout
    }

    # Create model
    if model is None:
        model = model_func(placeholders,
                           input_dim=features[2][1],
                           logging=True)
    else:
        model.placeholders = placeholders
        model.inputs = placeholders['features']
        model.output_dim = placeholders['labels'].get_shape().as_list()[1]
        model.input_dim = features[2][1]

    # Initialize session
    sess = tf.Session()

    # Define model evaluation function
    def evaluate(features, support, labels, mask, placeholders):
        t_test = time.time()
        feed_dict_val = construct_feed_dict(features, support, labels, mask,
                                            placeholders)
        outs_val = sess.run([model.loss, model.accuracy, model.outputs],
                            feed_dict=feed_dict_val)
        return outs_val[0], outs_val[1], outs_val[2], (time.time() - t_test)

    # Init variables
    sess.run(tf.global_variables_initializer())

    cost_val = []

    # Train model
    for epoch in range(FLAGS.epochs):

        t = time.time()
        # Construct feed dictionary
        feed_dict = construct_feed_dict(features, support, y_train, train_mask,
                                        placeholders)
        feed_dict.update({placeholders['dropout']: FLAGS.dropout})

        # Training step
        outs = sess.run([model.opt_op, model.loss, model.accuracy],
                        feed_dict=feed_dict)

        # Validation
        cost, acc, _, duration = evaluate(features, support, y_val, val_mask,
                                          placeholders)
        cost_val.append(cost)

        # Print results
        print("Epoch:", '%04d' % (epoch + 1), "train_loss=",
              "{:.5f}".format(outs[1]), "train_acc=", "{:.5f}".format(outs[2]),
              "val_loss=", "{:.5f}".format(cost), "val_acc=",
              "{:.5f}".format(acc), "time=", "{:.5f}".format(time.time() - t))

        if epoch > FLAGS.early_stopping and cost_val[-1] > np.mean(
                cost_val[-(FLAGS.early_stopping + 1):-1]):
            print("Early stopping...")
            break

    # print("Optimization Finished!")

    # Testing
    test_cost, test_acc, test_outputs, test_duration = evaluate(
        features, support, y_test, test_mask, placeholders)
    print("Training results:", "cost=", "{:.5f}".format(test_cost), "time=",
          "{:.5f}".format(
              test_duration))  #"accuracy=", "{:.5f}".format(test_acc)
    return model, test_outputs
コード例 #25
0
    def testCopy(self):
        # 5 random index points in R^2
        index_points_1 = np.random.uniform(-4., 4., (5, 2)).astype(np.float32)
        # 10 random index points in R^2
        index_points_2 = np.random.uniform(-4., 4., (10, 2)).astype(np.float32)

        observation_index_points_1 = (np.random.uniform(
            -4., 4., (7, 2)).astype(np.float32))
        observation_index_points_2 = (np.random.uniform(
            -4., 4., (9, 2)).astype(np.float32))

        observations_1 = np.random.uniform(-1., 1., 7).astype(np.float32)
        observations_2 = np.random.uniform(-1., 1., 9).astype(np.float32)

        # ==> shape = [6, 25, 2]
        if not self.is_static:
            index_points_1 = tf1.placeholder_with_default(index_points_1,
                                                          shape=None)
            index_points_2 = tf1.placeholder_with_default(index_points_2,
                                                          shape=None)
            observation_index_points_1 = tf1.placeholder_with_default(
                observation_index_points_1, shape=None)
            observation_index_points_2 = tf1.placeholder_with_default(
                observation_index_points_2, shape=None)
            observations_1 = tf1.placeholder_with_default(observations_1,
                                                          shape=None)
            observations_2 = tf1.placeholder_with_default(observations_2,
                                                          shape=None)

        mean_fn = lambda x: np.array([0.], np.float32)
        kernel_1 = psd_kernels.ExponentiatedQuadratic()
        kernel_2 = psd_kernels.ExpSinSquared()

        gprm1 = tfd.GaussianProcessRegressionModel(
            kernel=kernel_1,
            index_points=index_points_1,
            observation_index_points=observation_index_points_1,
            observations=observations_1,
            mean_fn=mean_fn,
            jitter=1e-5,
            validate_args=True)
        gprm2 = gprm1.copy(kernel=kernel_2,
                           index_points=index_points_2,
                           observation_index_points=observation_index_points_2,
                           observations=observations_2)

        precomputed_gprm1 = (
            tfd.GaussianProcessRegressionModel.precompute_regression_model(
                kernel=kernel_1,
                index_points=index_points_1,
                observation_index_points=observation_index_points_1,
                observations=observations_1,
                mean_fn=mean_fn,
                jitter=1e-5,
                validate_args=True))
        precomputed_gprm2 = precomputed_gprm1.copy(index_points=index_points_2)
        self.assertIs(precomputed_gprm1.mean_fn, precomputed_gprm2.mean_fn)
        self.assertIs(precomputed_gprm1.kernel, precomputed_gprm2.kernel)

        event_shape_1 = [5]
        event_shape_2 = [10]

        self.assertIsInstance(gprm1.kernel.base_kernel,
                              psd_kernels.ExponentiatedQuadratic)
        self.assertIsInstance(gprm2.kernel.base_kernel,
                              psd_kernels.ExpSinSquared)

        if self.is_static or tf.executing_eagerly():
            self.assertAllEqual(gprm1.batch_shape, gprm2.batch_shape)
            self.assertAllEqual(gprm1.event_shape, event_shape_1)
            self.assertAllEqual(gprm2.event_shape, event_shape_2)
            self.assertAllEqual(gprm1.index_points, index_points_1)
            self.assertAllEqual(gprm2.index_points, index_points_2)
            self.assertAllEqual(tf.get_static_value(gprm1.jitter),
                                tf.get_static_value(gprm2.jitter))
        else:
            self.assertAllEqual(self.evaluate(gprm1.batch_shape_tensor()),
                                self.evaluate(gprm2.batch_shape_tensor()))
            self.assertAllEqual(self.evaluate(gprm1.event_shape_tensor()),
                                event_shape_1)
            self.assertAllEqual(self.evaluate(gprm2.event_shape_tensor()),
                                event_shape_2)
            self.assertEqual(self.evaluate(gprm1.jitter),
                             self.evaluate(gprm2.jitter))
            self.assertAllEqual(self.evaluate(gprm1.index_points),
                                index_points_1)
            self.assertAllEqual(self.evaluate(gprm2.index_points),
                                index_points_2)
コード例 #26
0
ファイル: main.py プロジェクト: scroix/TecoGAN
            else:  # First 5 is a hard-coded symmetric frame padding, ignored but time added!
                print("Warming up %d" % (5 - i))
    print("total time " + str(srtime) + ", frame number " + str(max_iter))

# The training mode
elif FLAGS.mode == 'train':
    # hard coded save
    filelist = [
        'main.py', 'lib/Teco.py', 'lib/frvsr.py', 'lib/dataloader.py',
        'lib/ops.py'
    ]
    for filename in filelist:
        shutil.copyfile('./' + filename,
                        FLAGS.summary_dir + filename.replace("/", "_"))

    useValidat = tf.placeholder_with_default(tf.constant(False, dtype=tf.bool),
                                             shape=())
    rdata = frvsr_gpu_data_loader(FLAGS, useValidat)
    # Data = collections.namedtuple('Data', 'paths_HR, s_inputs, s_targets, image_count, steps_per_epoch')
    print('tData count = %d, steps per epoch %d' %
          (rdata.image_count, rdata.steps_per_epoch))
    if (FLAGS.ratio > 0):
        Net = TecoGAN(rdata.s_inputs, rdata.s_targets, FLAGS)
    else:
        Net = FRVSR(rdata.s_inputs, rdata.s_targets, FLAGS)
    # Network = collections.namedtuple('Network', 'gen_output, train, learning_rate, update_list, '
    #                                     'update_list_name, update_list_avg, image_summary')

    # Add scalar summary
    tf.summary.scalar('learning_rate', Net.learning_rate)
    train_summary = []
    for key, value in zip(Net.update_list_name, Net.update_list_avg):
コード例 #27
0
def _build_tensor(ndarray, dtype, use_static_shape):
    # Enforce parameterized dtype and static/dynamic testing.
    ndarray = np.asarray(ndarray).astype(dtype)
    return tf1.placeholder_with_default(
        input=ndarray, shape=ndarray.shape if use_static_shape else None)
コード例 #28
0
ファイル: normal_test.py プロジェクト: xzxzmmnn/probability
 def testIncompatibleArgShapesGraph(self):
   if tf.executing_eagerly(): return
   scale = tf1.placeholder_with_default(tf.ones([4, 1]), shape=None)
   with self.assertRaisesRegexp(tf.errors.OpError, r'Incompatible shapes'):
     d = tfd.Normal(loc=tf.zeros([2, 3]), scale=scale, validate_args=True)
     self.evaluate(d.mean())
コード例 #29
0
    def testWithCallable(self, use_init_state, use_bijector,
                         use_dynamic_shape):
        if (JAX_MODE or tf.executing_eagerly()) and use_dynamic_shape:
            self.skipTest('Dynamic shape test.')

        def target_log_prob_fn(x, y):
            return tf.reduce_sum(-x**2, -1) - y**2

        dtype = {
            'x': self.dtype,
            'y': self.dtype,
        }
        if use_init_state:
            if use_dynamic_shape:
                kwargs = dict(
                    init_state={
                        'x': tf.zeros((64, 2), dtype=self.dtype),
                        'y': tf.zeros(64, dtype=self.dtype),
                    })
            else:
                kwargs = dict(
                    init_state={
                        'x':
                        tf1.placeholder_with_default(tf.zeros(
                            (64, 2), dtype=self.dtype),
                                                     shape=[None, None]),
                        'y':
                        tf1.placeholder_with_default(
                            tf.zeros(64, dtype=self.dtype), shape=[None]),
                    })
        else:
            if use_dynamic_shape:
                kwargs = dict(
                    event_dtype=dtype,
                    event_shape={
                        'x':
                        tf1.placeholder_with_default(tf.constant(
                            [2], dtype=tf.int32),
                                                     shape=[1]),
                        'y':
                        tf1.placeholder_with_default(tf.constant(
                            [], dtype=tf.int32),
                                                     shape=[0]),
                    },
                )
            else:
                kwargs = dict(
                    event_dtype=dtype,
                    event_shape={
                        'x': [2],
                        'y': [],
                    },
                )
        if use_bijector:
            kwargs.update(event_space_bijector={
                'x': tfb.Exp(),
                'y': tfb.Identity()
            })

        seed = test_util.test_seed(sampler_type='stateless')
        results = tfp.experimental.mcmc.sample_snaper_hmc(target_log_prob_fn,
                                                          2,
                                                          num_burnin_steps=2,
                                                          seed=seed,
                                                          **kwargs)
        trace = self.evaluate(results.trace)
        states = trace[0]
        self.assertAllAssertsNested(self.assertAllNotNan, states)
        self.assertEqual(dtype, tf.nest.map_structure(lambda x: x.dtype,
                                                      states))
        self.assertTrue(
            hasattr(results.final_kernel_results, 'target_accept_prob'))
コード例 #30
0
 def _build_placeholder(self, ndarray):
   ndarray = np.asarray(ndarray).astype(self.dtype)
   return tf1.placeholder_with_default(
       input=ndarray, shape=ndarray.shape if self.use_static_shape else None)