コード例 #1
0
def main(argv):
    del argv  # unused arg
    np.random.seed(FLAGS.seed)
    tf.random.set_seed(FLAGS.seed)
    tf.io.gfile.makedirs(FLAGS.output_dir)
    tf1.disable_v2_behavior()

    session = tf1.Session()
    with session.as_default():
        x_train, y_train, x_test, y_test = datasets.load(session)
        n_train = x_train.shape[0]

        num_classes = int(np.amax(y_train)) + 1
        if not FLAGS.resnet:
            model = lenet5(n_train, x_train.shape[1:], num_classes)
        else:
            datagen = tf.keras.preprocessing.image.ImageDataGenerator(
                rotation_range=90,
                width_shift_range=0.1,
                height_shift_range=0.1,
                horizontal_flip=True)
            datagen.fit(x_train)
            model = res_net(n_train,
                            x_train.shape[1:],
                            num_classes,
                            batchnorm=FLAGS.batchnorm,
                            variational='hybrid' if FLAGS.hybrid else 'full')

            def schedule_fn(epoch):
                """Learning rate schedule function."""
                rate = FLAGS.learning_rate
                if epoch > 180:
                    rate *= 0.5e-3
                elif epoch > 160:
                    rate *= 1e-3
                elif epoch > 120:
                    rate *= 1e-2
                elif epoch > 80:
                    rate *= 1e-1
                return float(rate)

            lr_callback = tf.keras.callbacks.LearningRateScheduler(schedule_fn)

        for l in model.layers:
            l.kl_cost_weight = l.add_weight(
                name='kl_cost_weight',
                shape=(),
                initializer=tf.constant_initializer(0.),
                trainable=False)
            l.kl_cost_bias = l.add_variable(
                name='kl_cost_bias',
                shape=(),
                initializer=tf.constant_initializer(0.),
                trainable=False)

        [negative_log_likelihood, accuracy, log_likelihood, kl,
         elbo] = get_losses_and_metrics(model, n_train)

        metrics = [elbo, log_likelihood, kl, accuracy]

        tensorboard = tf1.keras.callbacks.TensorBoard(
            log_dir=FLAGS.output_dir,
            update_freq=FLAGS.batch_size * FLAGS.validation_freq)
        if FLAGS.resnet:
            callbacks = [tensorboard, lr_callback]
        else:
            callbacks = [tensorboard]

        if not FLAGS.resnet or not FLAGS.data_augmentation:

            def fit_fn(model,
                       steps,
                       initial_epoch=0,
                       with_lr_schedule=FLAGS.resnet):
                return model.fit(
                    x=x_train,
                    y=y_train,
                    batch_size=FLAGS.batch_size,
                    epochs=initial_epoch +
                    (FLAGS.batch_size * steps) // n_train,
                    initial_epoch=initial_epoch,
                    validation_data=(x_test, y_test),
                    validation_freq=(
                        (FLAGS.validation_freq * FLAGS.batch_size) // n_train),
                    verbose=1,
                    callbacks=callbacks if with_lr_schedule else [tensorboard])
        else:

            def fit_fn(model,
                       steps,
                       initial_epoch=0,
                       with_lr_schedule=FLAGS.resnet):
                return model.fit_generator(
                    datagen.flow(x_train, y_train,
                                 batch_size=FLAGS.batch_size),
                    epochs=initial_epoch +
                    (FLAGS.batch_size * steps) // n_train,
                    initial_epoch=initial_epoch,
                    steps_per_epoch=n_train // FLAGS.batch_size,
                    validation_data=(x_test, y_test),
                    validation_freq=max(
                        (FLAGS.validation_freq * FLAGS.batch_size) // n_train,
                        1),
                    verbose=1,
                    callbacks=callbacks if with_lr_schedule else [tensorboard])

        model.compile(
            optimizer=tf.keras.optimizers.Adam(lr=float(FLAGS.learning_rate)),
            loss=negative_log_likelihood,
            metrics=metrics)
        session.run(tf1.initialize_all_variables())

        train_epochs = (FLAGS.training_steps * FLAGS.batch_size) // n_train
        fit_fn(model, FLAGS.training_steps)

        labels = tf.keras.layers.Input(shape=y_train.shape[1:])
        ll = tf.keras.backend.function([model.input, labels], [
            model.output.distribution.log_prob(tf.squeeze(labels)),
            model.output.distribution.logits
        ])

        base_metrics = [
            ensemble_metrics(x_train, y_train, model, ll),
            ensemble_metrics(x_test, y_test, model, ll)
        ]
        model_dir = os.path.join(FLAGS.output_dir, 'models')
        tf.io.gfile.makedirs(model_dir)
        base_model_filename = os.path.join(model_dir, 'base_model.weights')
        model.save_weights(base_model_filename)

        # Train base model further for comparison.
        fit_fn(model,
               FLAGS.n_auxiliary_variables *
               FLAGS.auxiliary_sampling_frequency * FLAGS.ensemble_size,
               initial_epoch=train_epochs)

        overtrained_metrics = [
            ensemble_metrics(x_train, y_train, model, ll),
            ensemble_metrics(x_test, y_test, model, ll)
        ]

        # Perform refined VI.
        sample_op = []
        for l in model.layers:
            if isinstance(
                    l, tfp.layers.DenseLocalReparameterization) or isinstance(
                        l, tfp.layers.Convolution2DFlipout):
                weight_op, weight_cost = sample_auxiliary_op(
                    l.kernel_prior.distribution,
                    l.kernel_posterior.distribution,
                    FLAGS.auxiliary_variance_ratio)
                sample_op.append(weight_op)
                sample_op.append(l.kl_cost_weight.assign_add(weight_cost))
                # Fix the variance of the prior
                session.run(l.kernel_prior.distribution.istrainable.assign(0.))
                if hasattr(l.bias_prior, 'distribution'):
                    bias_op, bias_cost = sample_auxiliary_op(
                        l.bias_prior.distribution,
                        l.bias_posterior.distribution,
                        FLAGS.auxiliary_variance_ratio)
                    sample_op.append(bias_op)
                    sample_op.append(l.kl_cost_bias.assign_add(bias_cost))
                    # Fix the variance of the prior
                    session.run(
                        l.bias_prior.distribution.istrainable.assign(0.))

        ensemble_filenames = []
        for i in range(FLAGS.ensemble_size):
            model.load_weights(base_model_filename)
            for j in range(FLAGS.n_auxiliary_variables):
                session.run(sample_op)
                model.compile(
                    optimizer=tf.keras.optimizers.Adam(
                        # The learning rate is proportional to the scale of the prior.
                        lr=float(FLAGS.learning_rate_for_sampling *
                                 np.sqrt(1. -
                                         FLAGS.auxiliary_variance_ratio)**j)),
                    loss=negative_log_likelihood,
                    metrics=metrics)
                fit_fn(model,
                       FLAGS.auxiliary_sampling_frequency,
                       initial_epoch=train_epochs,
                       with_lr_schedule=False)
            ensemble_filename = os.path.join(
                model_dir, 'ensemble_component_' + str(i) + '.weights')
            ensemble_filenames.append(ensemble_filename)
            model.save_weights(ensemble_filename)

        auxiliary_metrics = [
            ensemble_metrics(x_train,
                             y_train,
                             model,
                             ll,
                             weight_files=ensemble_filenames),
            ensemble_metrics(x_test,
                             y_test,
                             model,
                             ll,
                             weight_files=ensemble_filenames)
        ]

        for metrics, name in [(base_metrics, 'Base model'),
                              (overtrained_metrics, 'Overtrained model'),
                              (auxiliary_metrics, 'Auxiliary sampling')]:
            logging.info(name)
            for metrics_dict, split in [(metrics[0], 'Training'),
                                        (metrics[1], 'Testing')]:
                logging.info(split)
                for metric_name in metrics_dict:
                    logging.info('%s: %s', metric_name,
                                 metrics_dict[metric_name])
コード例 #2
0
def main(argv):
  del argv  # unused arg
  np.random.seed(FLAGS.seed)
  tf.random.set_seed(FLAGS.seed)
  tf.io.gfile.makedirs(FLAGS.output_dir)
  tf1.disable_v2_behavior()

  session = tf1.Session()
  x_train, y_train, x_test, y_test = datasets.load(session)
  n_train = x_train.shape[0]
  num_classes = int(np.amax(y_train)) + 1

  ensemble_filenames = []
  for i in range(FLAGS.ensemble_size):
    # TODO(trandustin): We re-build the graph for each ensemble member. This
    # is due to an unknown bug where the variables are otherwise not
    # re-initialized to be random. While this is inefficient in graph mode, I'm
    # keeping this for now as we'd like to move to eager mode anyways.
    if not FLAGS.resnet:
      model = lenet5(x_train.shape[1:], num_classes)
    else:
      model = res_net(
          n_train,
          x_train.shape[1:],
          num_classes,
          batchnorm=FLAGS.batchnorm,
          variational=False)

      def schedule_fn(epoch):
        """Learning rate schedule function."""
        rate = FLAGS.learning_rate
        if epoch > 180:
          rate *= 0.5e-3
        elif epoch > 160:
          rate *= 1e-3
        elif epoch > 120:
          rate *= 1e-2
        elif epoch > 80:
          rate *= 1e-1
        return rate

      lr_callback = tf.keras.callbacks.LearningRateScheduler(schedule_fn)

    def negative_log_likelihood(y, rv_y):
      del rv_y  # unused arg
      return -model.output.distribution.log_prob(tf.squeeze(y))  # pylint: disable=cell-var-from-loop

    def accuracy(y_true, y_sample):
      del y_sample  # unused arg
      return tf.equal(
          tf.argmax(input=model.output.distribution.logits, axis=1),  # pylint: disable=cell-var-from-loop
          tf.cast(tf.squeeze(y_true), tf.int64))

    def log_likelihood(y_true, y_sample):
      del y_sample  # unused arg
      return model.output.distribution.log_prob(tf.squeeze(y_true))  # pylint: disable=cell-var-from-loop

    model.compile(
        optimizer=tf.keras.optimizers.Adam(lr=FLAGS.learning_rate),
        loss=negative_log_likelihood,
        metrics=[log_likelihood, accuracy])
    member_dir = os.path.join(FLAGS.output_dir, 'member_' + str(i))
    tensorboard = tf1.keras.callbacks.TensorBoard(
        log_dir=member_dir,
        update_freq=FLAGS.batch_size * FLAGS.validation_freq)

    if FLAGS.bootstrap:
      inds = np.random.choice(n_train, n_train, replace=True)
      x_sampled = x_train[inds]
      y_sampled = y_train[inds]

    model.fit(
        x=x_train if not FLAGS.bootstrap else x_sampled,
        y=y_train if not FLAGS.bootstrap else y_sampled,
        batch_size=FLAGS.batch_size,
        epochs=(FLAGS.batch_size * FLAGS.training_steps) // n_train,
        validation_data=(x_test, y_test),
        validation_freq=max(
            (FLAGS.validation_freq * FLAGS.batch_size) // n_train, 1),
        verbose=1,
        callbacks=[tensorboard]
        if not FLAGS.resnet else [tensorboard, lr_callback])

    member_filename = os.path.join(member_dir, 'model.weights')
    ensemble_filenames.append(member_filename)
    model.save_weights(member_filename)

  labels = tf.keras.layers.Input(shape=y_train.shape[1:])
  ll = tf.keras.backend.function([model.input, labels], [
      model.output.distribution.log_prob(tf.squeeze(labels)),
      model.output.distribution.logits,
  ])

  ensemble_metrics_vals = {
      'train': ensemble_metrics(
          x_train, y_train, model, ll, weight_files=ensemble_filenames),
      'test': ensemble_metrics(
          x_test, y_test, model, ll, weight_files=ensemble_filenames),
  }

  for split, metrics in ensemble_metrics_vals.items():
    logging.info(split)
    for metric_name in metrics:
      logging.info('%s: %s', metric_name, metrics[metric_name])