Ejemplo n.º 1
0
  def test_cosine_lr(self):
    cosine_schedule = utils.WarmupLearningRateSchedule(
        1.0, total_steps=10, lr_decay_type='cosine', warmup_epochs=None)

    lr = cosine_schedule(4)
    self.assertAllClose(lr, 0.654508)

    lr = cosine_schedule(5)
    self.assertAllClose(lr, 0.5)

    lr = cosine_schedule(6)
    self.assertAllClose(lr, 0.345491)
Ejemplo n.º 2
0
  def test_linear_lr(self):
    linear_schedule = utils.WarmupLearningRateSchedule(
        1.0, total_steps=10, lr_decay_type='linear', warmup_epochs=None)

    lr = linear_schedule(0)
    self.assertAllClose(lr, 1.0)

    lr = linear_schedule(5)
    self.assertAllClose(lr, 0.5)

    lr = linear_schedule(10)
    self.assertAllClose(lr, 0.0)
Ejemplo n.º 3
0
  def test_warmup(self):
    warmup_schedule = utils.WarmupLearningRateSchedule(
        1.0,
        total_steps=100,
        steps_per_epoch=10,
        warmup_epochs=2,
        lr_decay_type='constant')

    lr = warmup_schedule(5)
    self.assertAllClose(lr, 0.25)

    lr = warmup_schedule(35)
    self.assertAllClose(lr, 1.0)
Ejemplo n.º 4
0
  def test_exponential_lr(self):
    exponential_schedule = utils.WarmupLearningRateSchedule(
        1.0,
        total_steps=100,
        steps_per_epoch=10,
        decay_epochs=2,
        decay_factor=0.5,
        lr_decay_type='exponential',
        warmup_epochs=None)

    lr = exponential_schedule(5)
    self.assertAllClose(lr, 1.0)

    lr = exponential_schedule(25)
    self.assertAllClose(lr, 0.5)

    lr = exponential_schedule(70)
    self.assertAllClose(lr, 0.125)
Ejemplo n.º 5
0
def main(_) -> None:
    config = copy.deepcopy(hparams.base_config)
    config.override(effnetv2_configs.get_model_config(FLAGS.model_name))
    config.override(datasets.get_dataset_config(FLAGS.dataset_cfg))
    config.override(FLAGS.hparam_str)
    config.model.num_classes = config.data.num_classes
    strategy = config.runtime.strategy
    if strategy == 'tpu' and not config.model.bn_type:
        config.model.bn_type = 'tpu_bn'

    # log and save config.
    logging.info('config=%s', str(config))
    if 'train' in FLAGS.mode:
        if not tf.io.gfile.exists(FLAGS.model_dir):
            tf.io.gfile.makedirs(FLAGS.model_dir)
        config.save_to_yaml(os.path.join(FLAGS.model_dir, 'config.yaml'))

    if strategy == 'tpu':
        tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
        tf.config.experimental_connect_to_cluster(tpu_cluster_resolver)
        tf.tpu.experimental.initialize_tpu_system(tpu_cluster_resolver)
        ds_strategy = tf.distribute.TPUStrategy(tpu_cluster_resolver)
        logging.info('All devices: %s', tf.config.list_logical_devices('TPU'))
    elif strategy == 'gpus':
        ds_strategy = tf.distribute.MirroredStrategy()
        logging.info('All devices: %s', tf.config.list_physical_devices('GPU'))
    else:
        if tf.config.list_physical_devices('GPU'):
            ds_strategy = tf.distribute.MirroredStrategy(['GPU:0'])
        else:
            ds_strategy = tf.distribute.MirroredStrategy(['CPU:0'])

    with ds_strategy.scope():
        train_split = config.train.split or 'train'
        eval_split = config.eval.split or 'eval'
        num_train_images = config.data.splits[train_split].num_images
        num_eval_images = config.data.splits[eval_split].num_images

        train_size = config.train.isize
        eval_size = config.eval.isize
        if train_size <= 16.:
            train_size = int(eval_size * train_size) // 16 * 16

        image_dtype = None
        if config.runtime.mixed_precision:
            image_dtype = 'bfloat16' if strategy == 'tpu' else 'float16'
            precision = 'mixed_bfloat16' if strategy == 'tpu' else 'mixed_float16'
            policy = tf.keras.mixed_precision.Policy(precision)
            tf.keras.mixed_precision.set_global_policy(policy)

        model = TrainableModel(config.model.model_name,
                               config.model,
                               weight_decay=config.train.weight_decay)

        if config.train.ft_init_ckpt:  # load pretrained ckpt for finetuning.
            model(tf.ones([1, 224, 224, 3]))
            ckpt = config.train.ft_init_ckpt
            utils.restore_tf2_ckpt(model,
                                   ckpt,
                                   exclude_layers=('_head', 'optimizer'))

        steps_per_epoch = num_train_images // config.train.batch_size
        total_steps = steps_per_epoch * config.train.epochs

        scaled_lr = config.train.lr_base * (config.train.batch_size / 256.0)
        scaled_lr_min = config.train.lr_min * (config.train.batch_size / 256.0)
        learning_rate = utils.WarmupLearningRateSchedule(
            scaled_lr,
            steps_per_epoch=steps_per_epoch,
            decay_epochs=config.train.lr_decay_epoch,
            warmup_epochs=config.train.lr_warmup_epoch,
            decay_factor=config.train.lr_decay_factor,
            lr_decay_type=config.train.lr_sched,
            total_steps=total_steps,
            minimal_lr=scaled_lr_min)

        optimizer = build_tf2_optimizer(learning_rate,
                                        optimizer_name=config.train.optimizer)

        model.compile(
            optimizer=optimizer,
            loss=tf.keras.losses.CategoricalCrossentropy(
                label_smoothing=config.train.label_smoothing,
                from_logits=True),
            metrics=[
                tf.keras.metrics.TopKCategoricalAccuracy(k=1, name='acc_top1'),
                tf.keras.metrics.TopKCategoricalAccuracy(k=5, name='acc_top5')
            ],
        )

        ckpt_callback = tf.keras.callbacks.ModelCheckpoint(
            os.path.join(FLAGS.model_dir, 'ckpt-{epoch:d}'),
            verbose=1,
            save_weights_only=True)
        tb_callback = tf.keras.callbacks.TensorBoard(log_dir=FLAGS.model_dir,
                                                     update_freq=100)
        rstr_callback = tf.keras.callbacks.experimental.BackupAndRestore(
            backup_dir=FLAGS.model_dir)

        def get_dataset(training):
            """A shared utility to get input dataset."""
            if training:
                return ds_strategy.distribute_datasets_from_function(
                    datasets.build_dataset_input(
                        True, train_size, image_dtype, FLAGS.data_dir,
                        train_split, config.data).distribute_dataset_fn(
                            config.train.batch_size))
            else:
                return ds_strategy.distribute_datasets_from_function(
                    datasets.build_dataset_input(
                        False, eval_size, image_dtype, FLAGS.data_dir,
                        eval_split, config.data).distribute_dataset_fn(
                            config.eval.batch_size))

        if FLAGS.mode == 'traineval':
            model.fit(
                get_dataset(training=True),
                epochs=config.train.epochs,
                steps_per_epoch=steps_per_epoch,
                validation_data=get_dataset(training=False),
                validation_steps=num_eval_images // config.eval.batch_size,
                callbacks=[ckpt_callback, tb_callback, rstr_callback],
                # don't log spam if running on tpus
                verbose=2 if strategy == 'tpu' else 1,
            )
        elif FLAGS.mode == 'train':
            model.fit(
                get_dataset(training=True),
                epochs=config.train.epochs,
                steps_per_epoch=steps_per_epoch,
                callbacks=[ckpt_callback, tb_callback, rstr_callback],
                verbose=2 if strategy == 'tpu' else 1,
            )
        elif FLAGS.mode == 'eval':
            for ckpt in tf.train.checkpoints_iterator(FLAGS.model_dir,
                                                      timeout=60 * 60 * 24):
                model.load_weights(ckpt)
                eval_results = model.evaluate(
                    get_dataset(training=False),
                    batch_size=config.eval.batch_size,
                    steps=num_eval_images // config.eval.batch_size,
                    callbacks=[tb_callback, rstr_callback],
                    verbose=2 if strategy == 'tpu' else 1,
                )

                try:
                    current_epoch = int(os.path.basename(ckpt).split('-')[1])
                except IndexError:
                    logging.info('%s has no epoch info: stop!', ckpt)
                    break

                logging.info('Epoch: %d, total %d', current_epoch,
                             config.train.epochs)
                if current_epoch >= config.train.epochs:
                    break
        else:
            raise ValueError(f'Invalid mode {FLAGS.mode}')
Ejemplo n.º 6
0
def model_fn(features, labels, mode, params):
    """The model_fn to be used with TPUEstimator.

  Args:
    features: A dict of `Tensor` of batched images and other features.
    labels: a Tensor or a dict of Tensor representing the batched labels.
    mode: one of `tf.estimator.ModeKeys.{TRAIN,EVAL,PREDICT}`
    params: `dict` of parameters passed to the model from the TPUEstimator,
      `params['batch_size']` is always provided and should be used as the
      effective batch size.

  Returns:
    A `TPUEstimatorSpec` for the model
  """
    logging.info('params=%s', params)
    images = features['image'] if isinstance(features, dict) else features
    labels = labels['label'] if isinstance(labels, dict) else labels
    config = params['config']
    image_size = params['image_size']
    utils.scalar('model/resolution', image_size)

    if config.model.data_format == 'channels_first':
        images = tf.transpose(images, [0, 3, 1, 2])

    is_training = (mode == tf.estimator.ModeKeys.TRAIN)
    has_moving_average_decay = (config.train.ema_decay > 0)
    if FLAGS.use_tpu and not config.model.bn_type:
        config.model.bn_type = 'tpu_bn'
    # This is essential, if using a keras-derived model.
    tf.keras.backend.set_learning_phase(is_training)

    def build_model(in_images):
        """Build model using the model_name given through the command line."""
        config.model.num_classes = config.data.num_classes
        model = effnetv2_model.EffNetV2Model(config.model.model_name,
                                             config.model)
        logits = model(in_images, training=is_training)[0]
        return logits

    pre_num_params, pre_num_flops = utils.num_params_flops(
        readable_format=True)

    if config.runtime.mixed_precision:
        precision = 'mixed_bfloat16' if FLAGS.use_tpu else 'mixed_float16'
        logits = utils.build_model_with_precision(precision, build_model,
                                                  images, is_training)
        logits = tf.cast(logits, tf.float32)
    else:
        logits = build_model(images)

    num_params, num_flops = utils.num_params_flops(readable_format=True)
    num_params = num_params - pre_num_params
    num_flops = (num_flops - pre_num_flops) / params['batch_size']
    logging.info('backbone params/flops = %.4f M / %.4f B', num_params,
                 num_flops)
    utils.scalar('model/params', num_params)
    utils.scalar('model/flops', num_flops)

    # Calculate loss, which includes softmax cross entropy and L2 regularization.
    if config.train.loss_type == 'sigmoid':
        cross_entropy = tf.losses.sigmoid_cross_entropy(
            multi_class_labels=tf.cast(labels, dtype=logits.dtype),
            logits=logits,
            label_smoothing=config.train.label_smoothing)
    elif config.train.loss_type == 'custom':
        xent = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.cast(
            labels, dtype=logits.dtype),
                                                       logits=logits)
        cross_entropy = tf.reduce_mean(tf.reduce_sum(xent, axis=-1))
    else:
        if config.data.multiclass:
            logging.info('use multi-class loss: %s', config.data.multiclass)
            labels /= tf.reshape(tf.reduce_sum(labels, axis=1), (-1, 1))
        cross_entropy = tf.losses.softmax_cross_entropy(
            onehot_labels=labels,
            logits=logits,
            label_smoothing=config.train.label_smoothing)

    train_steps = max(config.train.min_steps,
                      config.train.epochs * params['steps_per_epoch'])
    global_step = tf.train.get_global_step()
    weight_decay_inc = config.train.weight_decay_inc * (
        tf.cast(global_step, tf.float32) / tf.cast(train_steps, tf.float32))
    weight_decay = (1 + weight_decay_inc) * config.train.weight_decay
    utils.scalar('train/weight_decay', weight_decay)
    # Add weight decay to the loss for non-batch-normalization variables.
    matcher = re.compile(config.train.weight_decay_exclude)
    l2loss = weight_decay * tf.add_n([
        tf.nn.l2_loss(v)
        for v in tf.trainable_variables() if not matcher.match(v.name)
    ])
    loss = cross_entropy + l2loss
    utils.scalar('loss/l2reg', l2loss)
    utils.scalar('loss/xent', cross_entropy)

    if has_moving_average_decay:
        ema = tf.train.ExponentialMovingAverage(decay=config.train.ema_decay,
                                                num_updates=global_step)
        ema_vars = utils.get_ema_vars()

    host_call = None
    restore_vars_dict = None
    if is_training:
        # Compute the current epoch and associated learning rate from global_step.
        current_epoch = (tf.cast(global_step, tf.float32) /
                         params['steps_per_epoch'])
        utils.scalar('train/epoch', current_epoch)

        scaled_lr = config.train.lr_base * (config.train.batch_size / 256.0)
        scaled_lr_min = config.train.lr_min * (config.train.batch_size / 256.0)
        learning_rate = utils.WarmupLearningRateSchedule(
            scaled_lr,
            steps_per_epoch=params['steps_per_epoch'],
            decay_epochs=config.train.lr_decay_epoch,
            warmup_epochs=config.train.lr_warmup_epoch,
            decay_factor=config.train.lr_decay_factor,
            lr_decay_type=config.train.lr_sched,
            total_steps=train_steps,
            minimal_lr=scaled_lr_min)(global_step)
        utils.scalar('train/lr', learning_rate)
        optimizer = utils.build_optimizer(
            learning_rate, optimizer_name=config.train.optimizer)
        if FLAGS.use_tpu:
            # When using TPU, wrap the optimizer with CrossShardOptimizer which
            # handles synchronization details between different TPU cores. To the
            # user, this should look like regular synchronous training.
            optimizer = tf.tpu.CrossShardOptimizer(optimizer)

        # filter trainable variables if needed.
        var_list = tf.trainable_variables()
        if config.train.varsexp:
            vars2 = [
                v for v in var_list if re.match(config.train.varsexp, v.name)
            ]
            if len(vars2) == len(var_list):
                logging.warning('%s has no match.', config.train.freeze)
            logging.info('Filter variables: orig=%d, final=%d, delta=%d',
                         len(var_list), len(vars2),
                         len(var_list) - len(vars2))
            var_list = vars2

        # Batch norm requires update_ops to be added as a train_op dependency.
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        if config.train.gclip and is_training:
            logging.info('clip gradients norm by %f', config.train.gclip)
            grads_and_vars = optimizer.compute_gradients(loss, var_list)
            with tf.name_scope('gclip'):
                grads = [gv[0] for gv in grads_and_vars]
                tvars = [gv[1] for gv in grads_and_vars]
                utils.scalar('train/gnorm', tf.linalg.global_norm(grads))
                utils.scalar('train/gnormmax',
                             tf.math.reduce_max([tf.norm(g) for g in grads]))
                # First clip each variable's norm, then clip global norm.
                clip_norm = abs(config.train.gclip)
                clipped_grads = [
                    tf.clip_by_norm(g, clip_norm) if g is not None else None
                    for g in grads
                ]
                clipped_grads, _ = tf.clip_by_global_norm(
                    clipped_grads, clip_norm)
                grads_and_vars = list(zip(clipped_grads, tvars))

            with tf.control_dependencies(update_ops):
                train_op = optimizer.apply_gradients(grads_and_vars,
                                                     global_step)
        else:
            with tf.control_dependencies(update_ops):
                train_op = optimizer.minimize(loss,
                                              global_step,
                                              var_list=var_list)

        if has_moving_average_decay:
            with tf.control_dependencies([train_op]):
                train_op = ema.apply(ema_vars)

        if not config.runtime.skip_host_call:
            host_call = utils.get_tpu_host_call(
                global_step, FLAGS.model_dir,
                config.runtime.iterations_per_loop)
    else:
        train_op = None
        if has_moving_average_decay:
            # Load moving average variables for eval.
            restore_vars_dict = ema.variables_to_restore(ema_vars)

    eval_metrics = None
    if mode == tf.estimator.ModeKeys.EVAL:

        def metric_fn(labels, logits):
            """Evaluation metric function.

      Evaluates accuracy.

      This function is executed on the CPU and should not directly reference
      any Tensors in the rest of the `model_fn`. To pass Tensors from the model
      to the `metric_fn`, provide as part of the `eval_metrics`. See
      https://www.tensorflow.org/api_docs/python/tf/estimator/tpu/TPUEstimatorSpec
      for more information.

      Arguments should match the list of `Tensor` objects passed as the second
      element in the tuple passed to `eval_metrics`.

      Args:
        labels: `Tensor` with shape `[batch, num_classes]`.
        logits: `Tensor` with shape `[batch, num_classes]`.

      Returns:
        A dict of the metrics to return from evaluation.
      """
            metrics = {}
            if config.data.multiclass:
                metrics['eval/global_ap'] = tf.metrics.auc(
                    labels,
                    tf.nn.sigmoid(logits),
                    curve='PR',
                    num_thresholds=200,
                    summation_method='careful_interpolation',
                    name='global_ap')

                # Convert labels to set: be careful, tf.metrics.xx_at_k are horrible.
                labels = tf.cast(labels, dtype=tf.int64)
                label_to_repeat = tf.expand_dims(tf.argmax(labels, axis=-1),
                                                 axis=-1)
                all_labels_set = tf.range(0, labels.shape[-1], dtype=tf.int64)
                all_labels_set = tf.expand_dims(all_labels_set, axis=0)
                labels_set = labels * all_labels_set + (
                    1 - labels) * label_to_repeat

                metrics['eval/precision@1'] = tf.metrics.precision_at_k(
                    labels_set, logits, k=1)
                metrics['eval/recall@1'] = tf.metrics.recall_at_k(labels_set,
                                                                  logits,
                                                                  k=1)
                metrics['eval/precision@5'] = tf.metrics.precision_at_k(
                    labels_set, logits, k=5)
                metrics['eval/recall@5'] = tf.metrics.recall_at_k(labels_set,
                                                                  logits,
                                                                  k=5)

            # always add accuracy.
            labels = tf.argmax(labels, axis=1)
            predictions = tf.argmax(logits, axis=1)
            metrics['eval/acc_top1'] = tf.metrics.accuracy(labels, predictions)
            in_top_5 = tf.cast(tf.nn.in_top_k(logits, labels, 5), tf.float32)
            metrics['eval/acc_top5'] = tf.metrics.mean(in_top_5)
            metrics['model/resolution'] = tf.metrics.mean(image_size)
            metrics['model/flops'] = tf.metrics.mean(num_flops)
            metrics['model/params'] = tf.metrics.mean(num_params)
            return metrics

        eval_metrics = (metric_fn, [labels, logits])

    if has_moving_average_decay and not is_training:

        def scaffold_fn():  # read ema for eval jobs.
            saver = tf.train.Saver(restore_vars_dict)
            return tf.train.Scaffold(saver=saver)
    elif config.train.ft_init_ckpt and is_training:

        def scaffold_fn():
            logging.info('restore variables from %s',
                         config.train.ft_init_ckpt)
            var_map = utils.get_ckpt_var_map(
                ckpt_path=config.train.ft_init_ckpt,
                skip_mismatch=True,
                init_ema=config.train.ft_init_ema)
            tf.train.init_from_checkpoint(config.train.ft_init_ckpt, var_map)
            return tf.train.Scaffold()
    else:
        scaffold_fn = None

    return tf.estimator.tpu.TPUEstimatorSpec(mode=mode,
                                             loss=loss,
                                             train_op=train_op,
                                             host_call=host_call,
                                             eval_metrics=eval_metrics,
                                             scaffold_fn=scaffold_fn)
Ejemplo n.º 7
0
  def test_constant_lr(self):
    constant_schedule = utils.WarmupLearningRateSchedule(
        1.0, lr_decay_type='constant', warmup_epochs=None)

    lr = constant_schedule(10)
    self.assertAllClose(lr, 1.0)