예제 #1
0
    def __init__(self, min_level, max_level, num_scales, aspect_ratios,
                 anchor_scale, image_size):
        """Constructs multiscale anchors.

    Args:
      min_level: integer number of minimum level of the output feature pyramid.
      max_level: integer number of maximum level of the output feature pyramid.
      num_scales: integer number representing intermediate scales added
        on each level. For instances, num_scales=2 adds two additional
        anchor scales [2^0, 2^0.5] on each level.
      aspect_ratios: list of representing the aspect ratio anchors added
        on each level. For instances, aspect_ratios = [1.0, 2.0, 0..5]
        adds three anchors on each level.
      anchor_scale: float number representing the scale of size of the base
        anchor to the feature stride 2^level. Or a list, one value per layer.
      image_size: integer number or tuple of integer number of input image size.
    """
        self.min_level = min_level
        self.max_level = max_level
        self.num_scales = num_scales
        self.aspect_ratios = aspect_ratios
        if isinstance(anchor_scale, (list, tuple)):
            assert len(anchor_scale) == max_level - min_level + 1
            self.anchor_scales = anchor_scale
        else:
            self.anchor_scales = [anchor_scale] * (max_level - min_level + 1)
        self.image_size = utils.parse_image_size(image_size)
        self.feat_sizes = utils.get_feat_sizes(image_size, max_level)
        self.config = self._generate_configs()
        self.boxes = self._generate_boxes()
예제 #2
0
def main(_):
    if FLAGS.strategy == 'tpu':
        tf.disable_eager_execution()
        tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
        tpu_grpc_url = tpu_cluster_resolver.get_master()
        tf.Session.reset(tpu_grpc_url)
    else:
        tpu_cluster_resolver = None

    # Check data path
    if FLAGS.mode in ('train', 'train_and_eval'):
        if FLAGS.train_file_pattern is None:
            raise RuntimeError('Must specify --train_file_pattern for train.')
    if FLAGS.mode in ('eval', 'train_and_eval'):
        if FLAGS.val_file_pattern is None:
            raise RuntimeError('Must specify --val_file_pattern for eval.')

    # Parse and override hparams
    config = hparams_config.get_detection_config(FLAGS.model_name)
    config.override(FLAGS.hparams)
    if FLAGS.num_epochs:  # NOTE: remove this flag after updating all docs.
        config.num_epochs = FLAGS.num_epochs

    # Parse image size in case it is in string format.
    config.image_size = utils.parse_image_size(config.image_size)

    # The following is for spatial partitioning. `features` has one tensor while
    # `labels` had 4 + (`max_level` - `min_level` + 1) * 2 tensors. The input
    # partition is performed on `features` and all partitionable tensors of
    # `labels`, see the partition logic below.
    # In the TPUEstimator context, the meaning of `shard` and `replica` is the
    # same; follwing the API, here has mixed use of both.
    if FLAGS.use_spatial_partition:
        # Checks input_partition_dims agrees with num_cores_per_replica.
        if FLAGS.num_cores_per_replica != np.prod(FLAGS.input_partition_dims):
            raise RuntimeError(
                '--num_cores_per_replica must be a product of array'
                'elements in --input_partition_dims.')

        labels_partition_dims = {
            'mean_num_positives': None,
            'source_ids': None,
            'groundtruth_data': None,
            'image_scales': None,
            'image_masks': None,
        }
        # The Input Partition Logic: We partition only the partition-able tensors.
        feat_sizes = utils.get_feat_sizes(config.get('image_size'),
                                          config.get('max_level'))
        for level in range(config.get('min_level'),
                           config.get('max_level') + 1):

            def _can_partition(spatial_dim):
                partitionable_index = np.where(
                    spatial_dim % np.array(FLAGS.input_partition_dims) == 0)
                return len(partitionable_index[0]) == len(
                    FLAGS.input_partition_dims)

            spatial_dim = feat_sizes[level]
            if _can_partition(spatial_dim['height']) and _can_partition(
                    spatial_dim['width']):
                labels_partition_dims['box_targets_%d' %
                                      level] = FLAGS.input_partition_dims
                labels_partition_dims['cls_targets_%d' %
                                      level] = FLAGS.input_partition_dims
            else:
                labels_partition_dims['box_targets_%d' % level] = None
                labels_partition_dims['cls_targets_%d' % level] = None
        num_cores_per_replica = FLAGS.num_cores_per_replica
        input_partition_dims = [
            FLAGS.input_partition_dims, labels_partition_dims
        ]
        num_shards = FLAGS.num_cores // num_cores_per_replica
    else:
        num_cores_per_replica = None
        input_partition_dims = None
        num_shards = FLAGS.num_cores

    params = dict(config.as_dict(),
                  model_name=FLAGS.model_name,
                  iterations_per_loop=FLAGS.iterations_per_loop,
                  model_dir=FLAGS.model_dir,
                  num_shards=num_shards,
                  num_examples_per_epoch=FLAGS.num_examples_per_epoch,
                  strategy=FLAGS.strategy,
                  backbone_ckpt=FLAGS.backbone_ckpt,
                  ckpt=FLAGS.ckpt,
                  val_json_file=FLAGS.val_json_file,
                  testdev_dir=FLAGS.testdev_dir,
                  profile=FLAGS.profile,
                  mode=FLAGS.mode)
    config_proto = tf.ConfigProto(allow_soft_placement=True,
                                  log_device_placement=False)
    if FLAGS.strategy != 'tpu':
        if FLAGS.use_xla:
            config_proto.graph_options.optimizer_options.global_jit_level = (
                tf.OptimizerOptions.ON_1)
            config_proto.gpu_options.allow_growth = True

    model_dir = FLAGS.model_dir
    model_fn_instance = det_model_fn.get_model_fn(FLAGS.model_name)
    max_instances_per_image = config.max_instances_per_image
    if FLAGS.eval_samples:
        eval_steps = int((FLAGS.eval_samples + FLAGS.eval_batch_size - 1) //
                         FLAGS.eval_batch_size)
    else:
        eval_steps = None
    total_examples = int(config.num_epochs * FLAGS.num_examples_per_epoch)
    train_steps = total_examples // FLAGS.train_batch_size
    logging.info(params)

    if not tf.io.gfile.exists(model_dir):
        tf.io.gfile.makedirs(model_dir)

    config_file = os.path.join(model_dir, 'config.yaml')
    if not tf.io.gfile.exists(config_file):
        tf.io.gfile.GFile(config_file, 'w').write(str(config))

    train_input_fn = dataloader.InputReader(
        FLAGS.train_file_pattern,
        is_training=True,
        use_fake_data=FLAGS.use_fake_data,
        max_instances_per_image=max_instances_per_image)
    eval_input_fn = dataloader.InputReader(
        FLAGS.val_file_pattern,
        is_training=False,
        use_fake_data=FLAGS.use_fake_data,
        max_instances_per_image=max_instances_per_image)

    if FLAGS.strategy == 'tpu':
        tpu_config = tf.estimator.tpu.TPUConfig(
            FLAGS.iterations_per_loop if FLAGS.strategy == 'tpu' else 1,
            num_cores_per_replica=num_cores_per_replica,
            input_partition_dims=input_partition_dims,
            per_host_input_for_training=tf.estimator.tpu.InputPipelineConfig.
            PER_HOST_V2)
        run_config = tf.estimator.tpu.RunConfig(
            cluster=tpu_cluster_resolver,
            model_dir=model_dir,
            log_step_count_steps=FLAGS.iterations_per_loop,
            session_config=config_proto,
            tpu_config=tpu_config,
            save_checkpoints_steps=FLAGS.save_checkpoints_steps,
            tf_random_seed=FLAGS.tf_random_seed,
        )
        # TPUEstimator can do both train and eval.
        train_est = tf.estimator.tpu.TPUEstimator(
            model_fn=model_fn_instance,
            train_batch_size=FLAGS.train_batch_size,
            eval_batch_size=FLAGS.eval_batch_size,
            config=run_config,
            params=params)
        eval_est = train_est
    else:
        strategy = None
        if FLAGS.strategy == 'gpus':
            strategy = tf.distribute.MirroredStrategy()
        run_config = tf.estimator.RunConfig(
            model_dir=model_dir,
            train_distribute=strategy,
            log_step_count_steps=FLAGS.iterations_per_loop,
            session_config=config_proto,
            save_checkpoints_steps=FLAGS.save_checkpoints_steps,
            tf_random_seed=FLAGS.tf_random_seed,
        )

        def get_estimator(global_batch_size):
            params['num_shards'] = getattr(strategy, 'num_replicas_in_sync', 1)
            params['batch_size'] = global_batch_size // params['num_shards']
            return tf.estimator.Estimator(model_fn=model_fn_instance,
                                          config=run_config,
                                          params=params)

        # train and eval need different estimator due to different batch size.
        train_est = get_estimator(FLAGS.train_batch_size)
        eval_est = get_estimator(FLAGS.eval_batch_size)

    # start train/eval flow.
    if FLAGS.mode == 'train':
        train_est.train(input_fn=train_input_fn, max_steps=train_steps)
        if FLAGS.eval_after_train:
            eval_est.evaluate(input_fn=eval_input_fn, steps=eval_steps)

    elif FLAGS.mode == 'eval':
        # Run evaluation when there's a new checkpoint
        for ckpt in tf.train.checkpoints_iterator(
                FLAGS.model_dir,
                min_interval_secs=FLAGS.min_eval_interval,
                timeout=FLAGS.eval_timeout):

            logging.info('Starting to evaluate.')
            try:
                eval_results = eval_est.evaluate(eval_input_fn,
                                                 steps=eval_steps)
                # Terminate eval job when final checkpoint is reached.
                try:
                    current_step = int(os.path.basename(ckpt).split('-')[1])
                except IndexError:
                    logging.info('%s has no global step info: stop!', ckpt)
                    break

                utils.archive_ckpt(eval_results, eval_results['AP'], ckpt)
                if current_step >= train_steps:
                    logging.info('Eval finished step %d/%d', current_step,
                                 train_steps)
                    break

            except tf.errors.NotFoundError:
                # Checkpoint might be not already deleted by the time eval finished.
                # We simply skip ssuch case.
                logging.info('Checkpoint %s no longer exists, skipping.', ckpt)

    elif FLAGS.mode == 'train_and_eval':
        ckpt = tf.train.latest_checkpoint(FLAGS.model_dir)
        try:
            step = int(os.path.basename(ckpt).split('-')[1])
            current_epoch = (step * FLAGS.train_batch_size //
                             FLAGS.num_examples_per_epoch)
            logging.info('found ckpt at step %d (epoch %d)', step,
                         current_epoch)
        except (IndexError, TypeError):
            logging.info('Folder %s has no ckpt with valid step.',
                         FLAGS.model_dir)
            current_epoch = 0

        def run_train_and_eval(e):
            print('\n   =====> Starting training, epoch: %d.' % e)
            train_est.train(input_fn=train_input_fn,
                            max_steps=e * FLAGS.num_examples_per_epoch //
                            FLAGS.train_batch_size)
            print('\n   =====> Starting evaluation, epoch: %d.' % e)
            eval_results = eval_est.evaluate(input_fn=eval_input_fn,
                                             steps=eval_steps)
            ckpt = tf.train.latest_checkpoint(FLAGS.model_dir)
            utils.archive_ckpt(eval_results, eval_results['AP'], ckpt)

        epochs_per_cycle = 1  # higher number has less graph construction overhead.
        for e in range(current_epoch + 1, config.num_epochs + 1,
                       epochs_per_cycle):
            if FLAGS.run_epoch_in_child_process:
                p = multiprocessing.Process(target=run_train_and_eval,
                                            args=(e, ))
                p.start()
                p.join()
                if p.exitcode != 0:
                    return p.exitcode
            else:
                tf.compat.v1.reset_default_graph()
                run_train_and_eval(e)

    else:
        logging.info('Invalid mode: %s', FLAGS.mode)
예제 #3
0
def build_feature_network(features, config):
    """Build FPN input features.

  Args:
   features: input tensor.
   config: a dict-like config, including all parameters.

  Returns:
    A dict from levels to the feature maps processed after feature network.
  """
    feat_sizes = utils.get_feat_sizes(config.image_size, config.max_level)
    feats = []
    if config.min_level not in features.keys():
        raise ValueError(
            'features.keys ({}) should include min_level ({})'.format(
                features.keys(), config.min_level))

    # Build additional input features that are not from backbone.
    for level in range(config.min_level, config.max_level + 1):
        if level in features.keys():
            feats.append(features[level])
        else:
            h_id, w_id = (2,
                          3) if config.data_format == 'channels_first' else (1,
                                                                             2)
            # Adds a coarser level by downsampling the last feature map.
            feats.append(
                resample_feature_map(
                    feats[-1],
                    name='p%d' % level,
                    target_height=(feats[-1].shape[h_id] - 1) // 2 + 1,
                    target_width=(feats[-1].shape[w_id] - 1) // 2 + 1,
                    target_num_channels=config.fpn_num_filters,
                    apply_bn=config.apply_bn_for_resampling,
                    is_training=config.is_training_bn,
                    conv_after_downsample=config.conv_after_downsample,
                    strategy=config.strategy,
                    data_format=config.data_format,
                    batch_norm_trainable=config.batch_norm_trainable))

    utils.verify_feats_size(feats,
                            feat_sizes=feat_sizes,
                            min_level=config.min_level,
                            max_level=config.max_level,
                            data_format=config.data_format)

    with tf.variable_scope('fpn_cells'):
        for rep in range(config.fpn_cell_repeats):
            with tf.variable_scope('cell_{}'.format(rep)):
                logging.info('building cell %d', rep)
                new_feats = build_bifpn_layer(feats, feat_sizes, config)

                feats = [
                    new_feats[level]
                    for level in range(config.min_level, config.max_level + 1)
                ]

                utils.verify_feats_size(feats,
                                        feat_sizes=feat_sizes,
                                        min_level=config.min_level,
                                        max_level=config.max_level,
                                        data_format=config.data_format)

    return new_feats