Ejemplo n.º 1
0
 def train_classifier(loop):
     df = bernoulli_flow(
         x_train, config.batch_size, shuffle=False, skip_incomplete=False)
     with loop.timeit('cls_train_time'):
         [c_pred] = collect_outputs(
             outputs=[q_y_given_x],
             inputs=[input_x],
             data_flow=df,
         )
         c_classifier.fit(c_pred, y_train)
         print(c_classifier.describe())
Ejemplo n.º 2
0
def main():
    # parse the arguments
    arg_parser = ArgumentParser()
    spt.register_config_arguments(config, arg_parser)
    arg_parser.parse_args(sys.argv[1:])

    # print the config
    print_with_title('Configurations', pformat(config.to_dict()), after='\n')

    # open the result object and prepare for result directories
    results = MLResults(config.result_dir)
    results.save_config(config)  # save experiment settings for review
    results.make_dirs('plotting', exist_ok=True)
    results.make_dirs('train_summary', exist_ok=True)

    # input placeholders
    input_x = tf.placeholder(dtype=tf.int32,
                             shape=(None, config.x_dim),
                             name='input_x')
    learning_rate = spt.AnnealingVariable('learning_rate', config.initial_lr,
                                          config.lr_anneal_factor)

    # build the posterior flow
    with tf.variable_scope('posterior_flow'):
        flows = []
        for i in range(config.n_flows):
            flows.append(spt.layers.ActNorm())
            flows.append(
                spt.layers.CouplingLayer(tf.make_template(
                    'coupling',
                    coupling_layer_shift_and_scale,
                    create_scope_now_=True),
                                         scale_type='exp'))
            flows.append(spt.layers.InvertibleDense())
        posterior_flow = spt.layers.SequentialFlow(flows=flows)

    # derive the initialization op
    with tf.name_scope('initialization'), \
            arg_scope([spt.layers.act_norm], initializing=True):
        init_q_net = q_net(input_x, posterior_flow)
        init_chain = init_q_net.chain(p_net,
                                      latent_axis=0,
                                      observed={'x': input_x})
        init_loss = tf.reduce_mean(init_chain.vi.training.sgvb())

    # derive the loss and lower-bound for training
    with tf.name_scope('training'):
        train_q_net = q_net(input_x, posterior_flow)
        train_chain = train_q_net.chain(p_net,
                                        latent_axis=0,
                                        observed={'x': input_x})

        vae_loss = tf.reduce_mean(train_chain.vi.training.sgvb())
        loss = vae_loss + tf.losses.get_regularization_loss()

    # derive the nll and logits output for testing
    with tf.name_scope('testing'):
        test_q_net = q_net(input_x, posterior_flow, n_z=config.test_n_z)
        test_chain = test_q_net.chain(p_net,
                                      latent_axis=0,
                                      observed={'x': input_x})
        test_nll = -tf.reduce_mean(test_chain.vi.evaluation.is_loglikelihood())
        test_lb = tf.reduce_mean(test_chain.vi.lower_bound.elbo())

    # derive the optimizer
    with tf.name_scope('optimizing'):
        optimizer = tf.train.AdamOptimizer(learning_rate)
        params = tf.trainable_variables()
        grads = optimizer.compute_gradients(loss, var_list=params)
        with tf.control_dependencies(tf.get_collection(
                tf.GraphKeys.UPDATE_OPS)):
            train_op = optimizer.apply_gradients(grads)

    # derive the plotting function
    with tf.name_scope('plotting'):
        plot_p_net = p_net(n_z=100)
        x_plots = tf.reshape(bernoulli_as_pixel(plot_p_net['x']), (-1, 28, 28))

    def plot_samples(loop):
        with loop.timeit('plot_time'):
            images = session.run(x_plots)
            save_images_collection(images=images,
                                   filename='plotting/{}.png'.format(
                                       loop.epoch),
                                   grid_size=(10, 10))

    # prepare for training and testing data
    (x_train, y_train), (x_test, y_test) = spt.datasets.load_mnist()
    train_flow = bernoulli_flow(x_train,
                                config.batch_size,
                                shuffle=True,
                                skip_incomplete=True)
    test_flow = bernoulli_flow(x_test, config.test_batch_size, sample_now=True)

    with spt.utils.create_session().as_default() as session, \
            train_flow.threaded(5) as train_flow:
        # initialize the network
        spt.utils.ensure_variables_initialized()
        for [batch_x] in train_flow:
            print('Network initialization loss: {:.6g}'.format(
                session.run(init_loss, {input_x: batch_x})))
            print('')
            break

        # train the network
        with spt.TrainLoop(params,
                           var_groups=['p_net', 'q_net', 'posterior_flow'],
                           max_epoch=config.max_epoch,
                           max_step=config.max_step,
                           summary_dir=(results.system_path('train_summary')
                                        if config.write_summary else None),
                           summary_graph=tf.get_default_graph(),
                           early_stopping=False) as loop:
            trainer = spt.Trainer(loop,
                                  train_op, [input_x],
                                  train_flow,
                                  metrics={'loss': loss})
            trainer.anneal_after(learning_rate,
                                 epochs=config.lr_anneal_epoch_freq,
                                 steps=config.lr_anneal_step_freq)
            evaluator = spt.Evaluator(loop,
                                      metrics={
                                          'test_nll': test_nll,
                                          'test_lb': test_lb
                                      },
                                      inputs=[input_x],
                                      data_flow=test_flow,
                                      time_metric_name='test_time')
            evaluator.after_run.add_hook(
                lambda: results.update_metrics(evaluator.last_metrics_dict))
            trainer.evaluate_after_epochs(evaluator, freq=10)
            trainer.evaluate_after_epochs(functools.partial(
                plot_samples, loop),
                                          freq=10)
            trainer.log_after_epochs(freq=1)
            trainer.run()

    # print the final metrics and close the results object
    print_with_title('Results', results.format_metrics(), before='\n')
    results.close()
Ejemplo n.º 3
0
def main():
    # parse the arguments
    arg_parser = ArgumentParser()
    spt.register_config_arguments(config, arg_parser, title='Model options')
    spt.register_config_arguments(spt.settings,
                                  arg_parser,
                                  prefix='tfsnippet',
                                  title='TFSnippet options')
    arg_parser.parse_args(sys.argv[1:])

    # print the config
    print_with_title('Configurations', pformat(config.to_dict()), after='\n')

    # open the result object and prepare for result directories
    results = MLResults(config.result_dir)
    results.save_config(config)  # save experiment settings for review
    results.make_dirs('plotting', exist_ok=True)
    results.make_dirs('train_summary', exist_ok=True)

    # input placeholders
    input_x = tf.placeholder(dtype=tf.int32,
                             shape=(None, config.x_dim),
                             name='input_x')
    learning_rate = spt.AnnealingVariable('learning_rate', config.initial_lr,
                                          config.lr_anneal_factor)

    # derive the output for initialization
    with tf.name_scope('initialization'), \
            spt.utils.scoped_set_config(spt.settings, auto_histogram=False):
        init_q_net = q_net(input_x, is_initializing=True)
        init_chain = init_q_net.chain(p_net,
                                      observed={'x': input_x},
                                      is_initializing=True)
        init_lb = tf.reduce_mean(init_chain.vi.lower_bound.elbo())

    # derive the loss and lower-bound for training
    with tf.name_scope('training'):
        train_q_net = q_net(input_x)
        train_chain = train_q_net.chain(p_net, observed={'x': input_x})
        vae_loss = tf.reduce_mean(train_chain.vi.training.sgvb())
        loss = vae_loss + tf.losses.get_regularization_loss()

    # derive the nll and logits output for testing
    with tf.name_scope('testing'):
        test_q_net = q_net(input_x, n_z=config.test_n_z)
        test_chain = test_q_net.chain(p_net,
                                      latent_axis=0,
                                      observed={'x': input_x})
        test_nll = -tf.reduce_mean(test_chain.vi.evaluation.is_loglikelihood())
        test_lb = tf.reduce_mean(test_chain.vi.lower_bound.elbo())

    # derive the optimizer
    with tf.name_scope('optimizing'):
        optimizer = tf.train.AdamOptimizer(learning_rate)
        params = tf.trainable_variables()
        grads = optimizer.compute_gradients(loss, var_list=params)
        with tf.control_dependencies(tf.get_collection(
                tf.GraphKeys.UPDATE_OPS)):
            train_op = optimizer.apply_gradients(grads)

    # derive the plotting function
    with tf.name_scope('plotting'):
        plot_p_net = p_net(n_z=100)
        x_plots = tf.reshape(bernoulli_as_pixel(plot_p_net['x']), (-1, 28, 28))

    def plot_samples(loop):
        with loop.timeit('plot_time'):
            images = session.run(x_plots)
            save_images_collection(images=images,
                                   filename='plotting/{}.png'.format(
                                       loop.epoch),
                                   grid_size=(10, 10),
                                   results=results)

    # prepare for training and testing data
    (x_train, y_train), (x_test, y_test) = \
        spt.datasets.load_mnist(x_shape=[784])
    train_flow = bernoulli_flow(x_train,
                                config.batch_size,
                                shuffle=True,
                                skip_incomplete=True)
    test_flow = bernoulli_flow(x_test, config.test_batch_size, sample_now=True)

    with spt.utils.create_session().as_default() as session, \
            train_flow.threaded(5) as train_flow:
        spt.utils.ensure_variables_initialized()

        # initialize the network
        for [x] in train_flow:
            print('Network initialized, first-batch loss is {:.6g}.\n'.format(
                session.run(init_lb, feed_dict={input_x: x})))
            break

        # train the network
        with spt.TrainLoop(params,
                           var_groups=['q_net', 'p_net'],
                           max_epoch=config.max_epoch,
                           max_step=config.max_step,
                           summary_dir=(results.system_path('train_summary')
                                        if config.write_summary else None),
                           summary_graph=tf.get_default_graph(),
                           early_stopping=False) as loop:
            trainer = spt.Trainer(loop,
                                  train_op, [input_x],
                                  train_flow,
                                  metrics={'loss': loss},
                                  summaries=tf.summary.merge_all(
                                      spt.GraphKeys.AUTO_HISTOGRAM))
            trainer.anneal_after(learning_rate,
                                 epochs=config.lr_anneal_epoch_freq,
                                 steps=config.lr_anneal_step_freq)
            evaluator = spt.Evaluator(loop,
                                      metrics={
                                          'test_nll': test_nll,
                                          'test_lb': test_lb
                                      },
                                      inputs=[input_x],
                                      data_flow=test_flow,
                                      time_metric_name='test_time')
            evaluator.events.on(
                spt.EventKeys.AFTER_EXECUTION,
                lambda e: results.update_metrics(evaluator.last_metrics_dict))
            trainer.evaluate_after_epochs(evaluator, freq=10)
            trainer.evaluate_after_epochs(functools.partial(
                plot_samples, loop),
                                          freq=10)
            trainer.log_after_epochs(freq=1)
            trainer.run()

    # print the final metrics and close the results object
    print_with_title('Results', results.format_metrics(), before='\n')
    results.close()
Ejemplo n.º 4
0
def main():
    # parse the arguments
    arg_parser = ArgumentParser()
    spt.register_config_arguments(config, arg_parser)
    arg_parser.parse_args(sys.argv[1:])

    # print the config
    print_with_title('Configurations', pformat(config.to_dict()), after='\n')

    # open the result object and prepare for result directories
    results = MLResults(config.result_dir)
    results.save_config(config)  # save experiment settings for review
    results.make_dirs('plotting', exist_ok=True)
    results.make_dirs('train_summary', exist_ok=True)

    # input placeholders
    input_x = tf.placeholder(dtype=tf.int32,
                             shape=(None, config.x_dim),
                             name='input_x')
    is_training = tf.placeholder(dtype=tf.bool, shape=(), name='is_training')
    learning_rate = spt.AnnealingVariable('learning_rate', config.initial_lr,
                                          config.lr_anneal_factor)
    multi_gpu = MultiGPU(disable_prebuild=False)

    # build the model
    grads = []
    losses = []
    test_nlls = []
    test_lbs = []
    batch_size = spt.utils.get_batch_size(input_x)
    params = None
    optimizer = tf.train.AdamOptimizer(learning_rate)

    for dev, pre_build, [dev_input_x
                         ] in multi_gpu.data_parallel(batch_size, [input_x]):
        with tf.device(dev), multi_gpu.maybe_name_scope(dev):
            if pre_build:
                with arg_scope([p_net, q_net],
                               is_training=is_training,
                               channels_last=True):
                    _ = q_net(dev_input_x).chain(p_net,
                                                 observed={'x': dev_input_x})

            else:
                with arg_scope([p_net, q_net],
                               is_training=is_training,
                               channels_last=multi_gpu.channels_last(dev)):
                    # derive the loss and lower-bound for training
                    with tf.name_scope('training'):
                        train_q_net = q_net(dev_input_x)
                        train_chain = train_q_net.chain(
                            p_net, latent_axis=0, observed={'x': dev_input_x})

                        dev_vae_loss = tf.reduce_mean(
                            train_chain.vi.training.sgvb())
                        dev_loss = dev_vae_loss + \
                            tf.losses.get_regularization_loss()
                        losses.append(dev_loss)

                    # derive the nll and logits output for testing
                    with tf.name_scope('testing'):
                        test_q_net = q_net(dev_input_x, n_z=config.test_n_z)
                        test_chain = test_q_net.chain(
                            p_net, latent_axis=0, observed={'x': dev_input_x})
                        dev_test_nll = -tf.reduce_mean(
                            test_chain.vi.evaluation.is_loglikelihood())
                        dev_test_lb = tf.reduce_mean(
                            test_chain.vi.lower_bound.elbo())
                        test_nlls.append(dev_test_nll)
                        test_lbs.append(dev_test_lb)

                    # derive the optimizer
                    with tf.name_scope('optimizing'):
                        params = tf.trainable_variables()
                        grads.append(
                            optimizer.compute_gradients(dev_loss,
                                                        var_list=params))

    # merge multi-gpu outputs and operations
    with tf.name_scope('optimizing'):
        [loss, test_lb, test_nll] = \
            multi_gpu.average([losses, test_lbs, test_nlls], batch_size)
        train_op = multi_gpu.apply_grads(grads=multi_gpu.average_grads(grads),
                                         optimizer=optimizer,
                                         control_inputs=tf.get_collection(
                                             tf.GraphKeys.UPDATE_OPS))

    # derive the plotting function
    work_dev = multi_gpu.work_devices[0]
    with tf.device(work_dev), tf.name_scope('plotting'):
        plot_p_net = p_net(n_z=100,
                           is_training=is_training,
                           channels_last=multi_gpu.channels_last(work_dev))
        x_plots = tf.reshape(bernoulli_as_pixel(plot_p_net['x']), (-1, 28, 28))

    def plot_samples(loop):
        with loop.timeit('plot_time'):
            images = session.run(x_plots, feed_dict={is_training: False})
            save_images_collection(images=images,
                                   filename='plotting/{}.png'.format(
                                       loop.epoch),
                                   grid_size=(10, 10),
                                   results=results)

    # prepare for training and testing data
    (x_train, y_train), (x_test, y_test) = spt.datasets.load_mnist()
    train_flow = bernoulli_flow(x_train,
                                config.batch_size,
                                shuffle=True,
                                skip_incomplete=True)
    test_flow = bernoulli_flow(x_test, config.test_batch_size, sample_now=True)

    with spt.utils.create_session().as_default() as session, \
            train_flow.threaded(5) as train_flow:
        # train the network
        with spt.TrainLoop(params,
                           var_groups=['q_net', 'p_net'],
                           max_epoch=config.max_epoch,
                           max_step=config.max_step,
                           summary_dir=(results.system_path('train_summary')
                                        if config.write_summary else None),
                           summary_graph=tf.get_default_graph(),
                           early_stopping=False) as loop:
            trainer = spt.Trainer(loop,
                                  train_op, [input_x],
                                  train_flow,
                                  feed_dict={is_training: True},
                                  metrics={'loss': loss})
            trainer.anneal_after(learning_rate,
                                 epochs=config.lr_anneal_epoch_freq,
                                 steps=config.lr_anneal_step_freq)
            evaluator = spt.Evaluator(loop,
                                      metrics={
                                          'test_nll': test_nll,
                                          'test_lb': test_lb
                                      },
                                      inputs=[input_x],
                                      data_flow=test_flow,
                                      feed_dict={is_training: False},
                                      time_metric_name='test_time')
            evaluator.after_run.add_hook(
                lambda: results.update_metrics(evaluator.last_metrics_dict))
            trainer.evaluate_after_epochs(evaluator, freq=10)
            trainer.evaluate_after_epochs(functools.partial(
                plot_samples, loop),
                                          freq=10)
            trainer.log_after_epochs(freq=1)
            trainer.run()

    # print the final metrics and close the results object
    print_with_title('Results', results.format_metrics(), before='\n')
    results.close()
Ejemplo n.º 5
0
def main():
    # parse the arguments
    arg_parser = ArgumentParser()
    spt.register_config_arguments(config, arg_parser, title='Model options')
    spt.register_config_arguments(spt.settings,
                                  arg_parser,
                                  prefix='tfsnippet',
                                  title='TFSnippet options')
    arg_parser.parse_args(sys.argv[1:])

    # print the config
    print_with_title('Configurations', pformat(config.to_dict()), after='\n')

    # open the result object and prepare for result directories
    results = MLResults(config.result_dir)
    results.save_config(config)  # save experiment settings for review
    results.make_dirs('plotting', exist_ok=True)
    results.make_dirs('train_summary', exist_ok=True)

    # input placeholders
    input_x = tf.placeholder(dtype=tf.int32,
                             shape=(None, config.x_dim),
                             name='input_x')
    learning_rate = spt.AnnealingVariable('learning_rate', config.initial_lr,
                                          config.lr_anneal_factor)

    # derive the loss and lower-bound for training
    with tf.name_scope('training'):
        train_q_net = q_net(input_x, n_samples=config.train_n_samples)
        train_chain = train_q_net.chain(p_net,
                                        latent_axis=0,
                                        observed={'x': input_x})

        if config.vi_algorithm == 'reinforce':
            baseline = reinforce_baseline_net(input_x)
            vae_loss = tf.reduce_mean(
                train_chain.vi.training.reinforce(baseline=baseline))
        else:
            assert (config.vi_algorithm == 'vimco')
            vae_loss = tf.reduce_mean(train_chain.vi.training.vimco())
        loss = vae_loss + tf.losses.get_regularization_loss()

    # derive the nll and logits output for testing
    with tf.name_scope('testing'):
        test_q_net = q_net(input_x, n_samples=config.test_n_samples)
        test_chain = test_q_net.chain(p_net,
                                      latent_axis=0,
                                      observed={'x': input_x})
        test_nll = -tf.reduce_mean(test_chain.vi.evaluation.is_loglikelihood())

        # derive the classifier via q(y|x)
        q_y_given_x = tf.argmax(test_q_net['y'].distribution.logits,
                                axis=-1,
                                name='q_y_given_x')

    # derive the optimizer
    with tf.name_scope('optimizing'):
        optimizer = tf.train.AdamOptimizer(learning_rate)
        params = tf.trainable_variables()
        grads = optimizer.compute_gradients(loss, var_list=params)
        with tf.control_dependencies(tf.get_collection(
                tf.GraphKeys.UPDATE_OPS)):
            train_op = optimizer.apply_gradients(grads)

    # derive the plotting function
    with tf.name_scope('plotting'):
        plot_p_net = p_net(
            observed={'y': tf.range(config.n_clusters, dtype=tf.int32)},
            n_z=10)
        x_plots = tf.reshape(
            tf.transpose(bernoulli_as_pixel(plot_p_net['x']), (1, 0, 2)),
            (-1, 28, 28))

    def plot_samples(loop):
        with loop.timeit('plot_time'):
            images = session.run(x_plots)
            save_images_collection(images=images,
                                   filename='plotting/{}.png'.format(
                                       loop.epoch),
                                   grid_size=(config.n_clusters, 10),
                                   results=results)

    # derive the final un-supervised classifier
    c_classifier = ClusteringClassifier(config.n_clusters, 10)

    def train_classifier(loop):
        df = bernoulli_flow(x_train,
                            config.batch_size,
                            shuffle=False,
                            skip_incomplete=False)
        with loop.timeit('cls_train_time'):
            [c_pred] = collect_outputs(
                outputs=[q_y_given_x],
                inputs=[input_x],
                data_flow=df,
            )
            c_classifier.fit(c_pred, y_train)
            print(c_classifier.describe())

    def evaluate_classifier(loop):
        with loop.timeit('cls_test_time'):
            [c_pred] = collect_outputs(
                outputs=[q_y_given_x],
                inputs=[input_x],
                data_flow=test_flow,
            )
            y_pred = c_classifier.predict(c_pred)
            cls_metrics = {'test_acc': accuracy_score(y_test, y_pred)}
            loop.collect_metrics(cls_metrics)
            results.update_metrics(cls_metrics)

    # prepare for training and testing data
    (x_train, y_train), (x_test, y_test) = \
        spt.datasets.load_mnist(x_shape=[784])
    train_flow = bernoulli_flow(x_train,
                                config.batch_size,
                                shuffle=True,
                                skip_incomplete=True)
    test_flow = bernoulli_flow(x_test, config.test_batch_size, sample_now=True)

    with spt.utils.create_session().as_default() as session, \
            train_flow.threaded(5) as train_flow:
        # train the network
        with spt.TrainLoop(
                params,
                var_groups=['p_net', 'q_net', 'gaussian_mixture_prior'],
                max_epoch=config.max_epoch,
                max_step=config.max_step,
                summary_dir=(results.system_path('train_summary')
                             if config.write_summary else None),
                summary_graph=tf.get_default_graph(),
                early_stopping=False) as loop:
            trainer = spt.Trainer(loop,
                                  train_op, [input_x],
                                  train_flow,
                                  metrics={'loss': loss},
                                  summaries=tf.summary.merge_all(
                                      spt.GraphKeys.AUTO_HISTOGRAM))
            trainer.anneal_after(learning_rate,
                                 epochs=config.lr_anneal_epoch_freq,
                                 steps=config.lr_anneal_step_freq)
            evaluator = spt.Evaluator(loop,
                                      metrics={'test_nll': test_nll},
                                      inputs=[input_x],
                                      data_flow=test_flow,
                                      time_metric_name='test_time')
            evaluator.events.on(
                spt.EventKeys.AFTER_EXECUTION,
                lambda e: results.update_metrics(evaluator.last_metrics_dict))
            trainer.evaluate_after_epochs(evaluator, freq=10)
            trainer.evaluate_after_epochs(functools.partial(
                plot_samples, loop),
                                          freq=10)
            trainer.evaluate_after_epochs(functools.partial(
                train_classifier, loop),
                                          freq=10)
            trainer.evaluate_after_epochs(functools.partial(
                evaluate_classifier, loop),
                                          freq=10)

            trainer.log_after_epochs(freq=1)
            trainer.run()

    # print the final metrics and close the results object
    with codecs.open('cluster_classifier.txt', 'wb', 'utf-8') as f:
        f.write(c_classifier.describe())
    print_with_title('Results', results.format_metrics(), before='\n')
    results.close()