コード例 #1
0
def main(_):
  pipeline_proto = _load_pipeline_proto(FLAGS.pipeline_proto)

  if FLAGS.model_dir:
    pipeline_proto.model_dir = FLAGS.model_dir
    tf.logging.info("Override model checkpoint dir: %s", FLAGS.model_dir)

  tf.logging.info("Pipeline configure: %s", '=' * 128)
  tf.logging.info(pipeline_proto)

  g = tf.Graph()
  with g.as_default():
    model = builder.build(pipeline_proto.model, is_training=False)
    predictions = model.build_prediction(
        examples={}, prediction_task=GAPPredictionTasks.word_saliency)
    saver = tf.train.Saver()
    invalid_variable_names = tf.report_uninitialized_variables()

  with tf.Session(graph=g) as sess:
    sess.run(tf.tables_initializer())

    checkpoint_path = tf.train.latest_checkpoint(pipeline_proto.model_dir)
    assert checkpoint_path is not None

    saver.restore(sess, checkpoint_path)
    assert len(sess.run(invalid_variable_names)) == 0

    predictions = sess.run(predictions)

  (vocabulary, word_saliency,
   word_embedding) = (predictions[GAPPredictions.vocabulary],
                      predictions[GAPPredictions.word_saliency],
                      predictions[GAPPredictions.word_embedding])
  vocabulary = [word.decode('UTF8') for word in vocabulary.tolist()]

  # Read the MSCOCO vocabulary.

  name_to_class_id = {}
  with open(FLAGS.name_to_class_id_file, 'r') as fid:
    for line in fid.readlines():
      name, class_id = line.strip('\n').split('\t')
      name_to_class_id[name] = class_id
  queries = list(name_to_class_id)
  query_embedding = word_embedding[[
      vocabulary.index(query) for query in queries
  ]]

  # Rank the words by importance.

  similarity_to_coco = np.matmul(word_embedding, query_embedding.transpose())
  most_similar_coco_word_index = similarity_to_coco.argmax(axis=-1)

  indices = np.argsort(word_saliency)[::-1]
  for i in indices[:1000]:
    word, score = vocabulary[i], word_saliency[i]
    coco_i = most_similar_coco_word_index[i]
    print('%s,%.4lf,%s,%.4lf' % (word, score, queries[coco_i],
                                 similarity_to_coco[i][coco_i]))

  tf.logging.info('Done')
コード例 #2
0
def main(_):
    pipeline_proto = _load_pipeline_proto(FLAGS.pipeline_proto)
    tf.logging.info("Pipeline configure: %s", '=' * 128)
    tf.logging.info(pipeline_proto)

    g = tf.Graph()
    with g.as_default():

        # Infer saliency.

        image = tf.placeholder(tf.uint8, shape=[None, None, 3])
        image_resized = tf.image.resize_images(image, [
            pipeline_proto.eval_reader.image_height,
            pipeline_proto.eval_reader.image_width
        ])

        model = builder.build(pipeline_proto.model, is_training=False)
        predictions = model.build_prediction(
            examples={InputDataFields.image: tf.expand_dims(image_resized, 0)},
            prediction_task=GAPPredictionTasks.image_saliency)

        height, width = tf.shape(image)[0], tf.shape(image)[1]
        saliency = tf.image.resize_images(
            tf.expand_dims(predictions[GAPPredictions.image_saliency],
                           axis=-1), [height, width],
            tf.image.ResizeMethod.NEAREST_NEIGHBOR)[:, :, :, 0]

        saver = tf.train.Saver()
        invalid_variable_names = tf.report_uninitialized_variables()

    with tf.Session(graph=g) as sess:

        # Load the latest checkpoint.

        checkpoint_path = tf.train.latest_checkpoint(pipeline_proto.model_dir)
        assert checkpoint_path is not None

        saver.restore(sess, checkpoint_path)
        assert len(sess.run(invalid_variable_names)) == 0

        # Iterate the testdir to generate the demo results.

        saliency_func = lambda x: sess.run(saliency[0], feed_dict={image: x})

        for filename in os.listdir(FLAGS.image_path):
            tf.logging.info('On processing %s', filename)

            _get_saliency(input_path=os.path.join(FLAGS.image_path, filename),
                          output_path=os.path.join(FLAGS.demo_path, filename),
                          saliency_func=saliency_func)

    tf.logging.info('Done')
コード例 #3
0
ファイル: word_importance.py プロジェクト: yekeren/WSOD
def main(_):
  pipeline_proto = _load_pipeline_proto(FLAGS.pipeline_proto)
  tf.logging.info("Pipeline configure: %s", '=' * 128)
  tf.logging.info(pipeline_proto)

  g = tf.Graph()
  with g.as_default():

    # Infer saliency.

    model = builder.build(pipeline_proto.model, is_training=False)
    predictions = model.build_prediction(examples={}, 
        prediction_task=GAPPredictionTasks.word_saliency)

    vocabulary = predictions[GAPPredictions.vocabulary]
    saliency = predictions[GAPPredictions.word_saliency]

    saver = tf.train.Saver()
    invalid_variable_names = tf.report_uninitialized_variables()

  with tf.Session(graph=g) as sess:

    sess.run(tf.tables_initializer())

    # Load the latest checkpoint.

    checkpoint_path = tf.train.latest_checkpoint(pipeline_proto.model_dir)
    assert checkpoint_path is not None

    saver.restore(sess, checkpoint_path)
    assert len(sess.run(invalid_variable_names)) == 0

    # Print word importance.

    vocabulary, saliency = sess.run([vocabulary, saliency])
    if FLAGS.ascending_order:
      indices = np.argsort(saliency)
    else:
      indices = np.argsort(saliency)[::-1]

    for i in indices[:FLAGS.top_k]:
      tf.logging.info("%12s: %.4lf", vocabulary[i].decode('UTF-8'), saliency[i])
      print('%s\t%.4lf' % (vocabulary[i].decode('UTF-8'), saliency[i]))

  tf.logging.info('Done')
コード例 #4
0
ファイル: trainer.py プロジェクト: yekeren/WSOD
  def _model_fn(features, labels, mode, params):
    """
    Args:
      features: a dict mapping from names to tensors, denoting the features.
      labels: a dict mapping from names to tensors, denoting the labels.
      mode: mode parameter required by the estimator.
      params: additional parameters used for creating the model.

    Returns:
      an instance of EstimatorSpec.
    """
    is_training = (tf.estimator.ModeKeys.TRAIN == mode)
    tf.logging.info("Current mode is %s, is_training=%s", mode, is_training)

    model = builder.build(pipeline_proto.model, is_training)
    predictions = model.build_prediction(features)

    # Get scaffold and variables_to_train.

    scaffold = model.get_scaffold()
    variables_to_train = model.get_variables_to_train()

    # Compute losses. Note: variables created in build_loss are not trainable.

    losses = model.build_loss(predictions, examples=features)
    for name, loss in losses.items():
      tf.losses.add_loss(loss)
      tf.summary.scalar('loss/' + name, loss)
    for loss in tf.losses.get_regularization_losses():
      tf.summary.scalar(
          "loss/regularization/" + '/'.join(loss.op.name.split('/')[:2]), loss)
    total_loss = tf.losses.get_total_loss(add_regularization_losses=True)

    train_op = None
    eval_metric_ops = None
    training_hooks = []

    #update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    #for op in update_ops:
    #  if 'cnn' not in op.name:
    #    tf.logging.info(op.name)
    #import pdb
    #pdb.set_trace()

    if tf.estimator.ModeKeys.TRAIN == mode:

      train_config = pipeline_proto.train_config

      # Create the optimizer.

      learning_rate = train_config.learning_rate
      global_step = tf.train.get_or_create_global_step()

      if train_config.HasField('learning_rate_decay'):
        learning_rate = tf.train.exponential_decay(
            learning_rate,
            global_step,
            train_config.learning_rate_decay.decay_steps,
            train_config.learning_rate_decay.decay_rate,
            staircase=train_config.learning_rate_decay.staircase)
      tf.summary.scalar('loss/learning_rate', learning_rate)

      optimizer = training_utils.build_optimizer(
          train_config.optimizer, learning_rate=learning_rate)

      # Setup the replicas_hook for the SyncReplicasOptimizer.

      if train_config.sync_replicas:
        optimizer = tf.train.SyncReplicasOptimizer(
            optimizer, replicas_to_aggregate=4)
        sync_replicas_hook = optimizer.make_session_run_hook(is_chief)
        training_hooks.append(sync_replicas_hook)

      # Enable MovingAverageOptimizer if specified.

      if train_config.HasField('moving_average_decay'):
        optimizer = tf.contrib.opt.MovingAverageOptimizer(
            optimizer, average_decay=train_config.moving_average_decay)

      # Apply gradient multipliers.

      gradient_multipliers = {}
      for var in variables_to_train:
        for multiplier in train_config.gradient_multiplier:
          if var.op.name.startswith(multiplier.scope):
            if var.op.name in gradient_multipliers:
              tf.logging.warn('Override gradient multiplier: %s', var.op.name)
            gradient_multipliers[var.op.name] = multiplier.multiplier
            tf.logging.info('Set gradient multiplier for %s', var.op.name)
        tf.logging.info('Variable to train: %s, %s', var.op.name,
                        var.get_shape())
      tf.logging.info('Apply gradient multipliers: \n%s',
                      json.dumps(gradient_multipliers, indent=2))

      def transform_grads_fn(grads):
        if not gradient_multipliers: 
          return grads
        return tf.contrib.training.multiply_gradients(grads,
                                                      gradient_multipliers)

      # The train_op is required for mode `TRAIN`.

      train_op = tf.contrib.training.create_train_op(
          total_loss,
          optimizer,
          variables_to_train=variables_to_train,
          transform_grads_fn=transform_grads_fn,
          summarize_gradients=True)

      if train_config.HasField('moving_average_decay'):
        scaffold = tf.train.Scaffold(
            saver=optimizer.swapping_saver(), copy_from_scaffold=scaffold)

    elif tf.estimator.ModeKeys.EVAL == mode:

      # The eval_metric_ops is optional for mode `EVAL`.

      eval_metric_ops = model.build_evaluation(predictions, examples=features)

    elif tf.estimator.ModeKeys.PREDICT == mode:

      # The predictions is required for mode `PREDICT`.

      predictions.update(features)
      predictions.update({'summary': tf.summary.merge_all()})

    return tf.estimator.EstimatorSpec(
        mode=mode,
        predictions=predictions,
        loss=total_loss,
        train_op=train_op,
        training_hooks=training_hooks,
        eval_metric_ops=eval_metric_ops,
        scaffold=scaffold)
コード例 #5
0
def main(_):
    logging.set_verbosity(logging.DEBUG)

    for gpu in tf.config.experimental.list_physical_devices('GPU'):
        tf.config.experimental.set_memory_growth(gpu, True)

    pipeline_proto = _load_pipeline_proto(FLAGS.pipeline_proto)
    vocab = _load_vocab_file(FLAGS.vocab_file)

    # Get `next_examples_ts' tensor.
    if 'train' in FLAGS.output_jsonl_file:
        input_fn = reader.get_input_fn(pipeline_proto.train_reader,
                                       is_training=False)
    else:
        input_fn = reader.get_input_fn(pipeline_proto.eval_reader,
                                       is_training=False)

    iterator = input_fn().make_initializable_iterator()
    next_examples_ts = iterator.get_next()

    # Build model that takes placeholder as inputs, and predicts the logits.
    frcnn_dims = pipeline_proto.eval_reader.vcr_text_frcnn_reader.frcnn_feature_dims
    (label_pl, choices_pl, choices_tag_pl,
     choices_len_pl) = (tf.placeholder(tf.int32, [1]),
                        tf.placeholder(tf.int32, [1, NUM_CHOICES, None]),
                        tf.placeholder(tf.int32, [1, NUM_CHOICES, None]),
                        tf.placeholder(tf.int32, [1, NUM_CHOICES]))
    (num_detections_pl, detection_boxes_pl, detection_classes_pl,
     detection_scores_pl,
     detection_features_pl) = (tf.placeholder(tf.int32, [1]),
                               tf.placeholder(tf.float32, [1, None, 4]),
                               tf.placeholder(tf.int32, [1, None]),
                               tf.placeholder(tf.float32, [1, None]),
                               tf.placeholder(tf.float32,
                                              [1, None, frcnn_dims]))

    model = builder.build(pipeline_proto.model, is_training=False)
    logits_ts = model.predict({
        InputFields.num_detections: num_detections_pl,
        InputFields.detection_boxes: detection_boxes_pl,
        InputFields.detection_classes: detection_classes_pl,
        InputFields.detection_scores: detection_scores_pl,
        InputFields.detection_features: detection_features_pl,
        model._field_choices: choices_pl,
        model._field_choices_tag: choices_tag_pl,
        model._field_choices_len: choices_len_pl,
    })[FIELD_ANSWER_PREDICTION]

    losses_ts = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits_ts,
                                                        labels=tf.one_hot(
                                                            label_pl,
                                                            depth=NUM_CHOICES))
    saver = tf.train.Saver()

    # Find the latest checkpoint file.
    ckpt_path = tf.train.latest_checkpoint(FLAGS.model_dir)
    assert ckpt_path is not None

    def _calc_score_and_loss(choices, choice_tag, choices_len, label,
                             num_detections, detection_boxes, detection_clases,
                             detection_scores, detection_features):
        """Get the VCR matching scores and losses."""
        (scores, losses) = sess.run(
            [logits_ts, losses_ts],
            feed_dict={
                label_pl: np.expand_dims(label, 0),
                choices_pl: np.expand_dims(choices, 0),
                choices_tag_pl: np.expand_dims(choices_tag, 0),
                choices_len_pl: np.expand_dims(choices_len, 0),
                num_detections_pl: np.expand_dims(num_detections, 0),
                detection_boxes_pl: np.expand_dims(detection_boxes, 0),
                detection_classes_pl: np.expand_dims(detection_clases, 0),
                detection_scores_pl: np.expand_dims(detection_scores, 0),
                detection_features_pl: np.expand_dims(detection_features, 0),
            })
        return scores[0], losses[0]

    # Run inference using the pretrained Bert model.
    with tf.Session() as sess, open(FLAGS.output_jsonl_file, 'w') as output_fp:
        sess.run(iterator.initializer)
        sess.run(tf.tables_initializer())
        saver.restore(sess, ckpt_path)
        logging.info('Restore from %s.', ckpt_path)

        batch_id = 0
        while True:
            batch_id += 1
            try:
                inputs_batched = sess.run(next_examples_ts)
                batch_size = len(inputs_batched[InputFields.annot_id])

                masks = np.array([[MASK_ID], [MASK_ID], [MASK_ID], [MASK_ID]])
                ones = np.array([[1], [1], [1], [1]])

                for example_id in range(batch_size):

                    (annot_id, choices, choices_tag, choices_len, label) = (
                        inputs_batched[
                            InputFields.annot_id][example_id].decode('utf8'),
                        inputs_batched[model._field_choices][example_id],
                        inputs_batched[model._field_choices_tag][example_id],
                        inputs_batched[model._field_choices_len][example_id],
                        inputs_batched[model._field_label][example_id])
                    (num_detections, detection_boxes, detection_clases,
                     detection_scores, detection_features) = (
                         inputs_batched[InputFields.num_detections]
                         [example_id], inputs_batched[
                             InputFields.detection_boxes][example_id],
                         inputs_batched[InputFields.detection_classes]
                         [example_id], inputs_batched[
                             InputFields.detection_scores][example_id],
                         inputs_batched[
                             InputFields.detection_features][example_id])

                    # Scores of the original choices.
                    orig_scores, orig_losses = _calc_score_and_loss(
                        choices, choices_tag, choices_len, label,
                        num_detections, detection_boxes, detection_clases,
                        detection_scores, detection_features)

                    # Adversarial atacking.
                    max_losses = np.zeros(NUM_CHOICES)
                    max_losses_choices = choices

                    if FLAGS.rationale:
                        sep_pos = np.where(choices == SEP_ID)[1].take(
                            [1, 3, 5, 7])
                    else:
                        sep_pos = np.where(choices == SEP_ID)[1]

                    result_losses = [[] for _ in range(4)]
                    result_tokens = [[] for _ in range(4)]

                    for pos_id in range(sep_pos.min() + 1, choices_len.max()):
                        # Compute the new losses.
                        new_choices = np.concatenate([
                            choices[:, :pos_id], masks, choices[:, pos_id + 1:]
                        ], -1)
                        new_choices_tag = np.concatenate([
                            choices_tag[:, :pos_id], -ones,
                            choices_tag[:, pos_id + 1:]
                        ], -1)
                        scores, losses = _calc_score_and_loss(
                            new_choices, new_choices_tag, choices_len, label,
                            num_detections, detection_boxes, detection_clases,
                            detection_scores, detection_features)

                        # Update the maximum values.
                        token_id = choices[:, pos_id]
                        is_valid = np.logical_not(
                            np.logical_or(
                                token_id == PAD_ID,
                                np.logical_or(token_id == CLS_ID,
                                              token_id == SEP_ID)))

                        for choice_id in range(4):
                            if is_valid[choice_id]:
                                result_losses[choice_id].append(
                                    round(float(losses[choice_id]), 4))
                                result_tokens[choice_id].append(
                                    vocab[choices[choice_id][pos_id]])

                        # Maximize loss.
                        adversarial_select_cond = np.logical_and(
                            losses > max_losses, is_valid)
                        max_losses_choices = np.where(
                            np.expand_dims(adversarial_select_cond, -1),
                            new_choices, max_losses_choices)
                        max_losses = np.maximum(max_losses, losses)

                    #END: for pos_id in range(sep_pos.min() + 1, choices_len.max()):

                    choices = pack_tensor_values(choices, choices_len, vocab)
                    adversarial_choices = pack_tensor_values(
                        max_losses_choices, choices_len, vocab)

                    output_annot = {
                        'annot_id': annot_id,
                        'label': int(label),
                        'choices': choices,
                        'adversarial_choices': adversarial_choices,
                        'result_losses': result_losses,
                        'result_tokens': result_tokens,
                    }
                    # print(label)
                    # for i in range(4):
                    #   print(choices[i])
                    #   print(adversarial_choices[i])
                    output_fp.write(json.dumps(output_annot) + '\n')

                if batch_id % 10 == 0:
                    logging.info('batch_id=%i', batch_id)

            except tf.errors.OutOfRangeError as ex:
                logging.info('Done!')
                break

    output_fp.close()
コード例 #6
0
def main(_):
    logging.set_verbosity(tf.logging.INFO)

    assert os.path.isfile(FLAGS.pipeline_proto)

    g = tf.Graph()
    with g.as_default():
        pipeline_proto = load_pipeline_proto(FLAGS.pipeline_proto)
        logging.info("Pipeline configure: %s", '=' * 128)
        logging.info(pipeline_proto)

        # Get examples from reader.
        examples = ads_examples.get_examples(pipeline_proto.example_reader)

        # Build model for evaluation.
        global_step = slim.get_or_create_global_step()

        model = builder.build(pipeline_proto.model, is_training=False)
        predictions = model.build_inference(examples)
        loss_dict = model.build_loss(predictions)

        uninitialized_variable_names = tf.report_uninitialized_variables()
        saver = tf.train.Saver()

        init_op = tf.group(tf.local_variables_initializer(),
                           tf.global_variables_initializer())

    session_config = train_utils.default_session_config(
        FLAGS.per_process_gpu_memory_fraction)

    # Start session.
    logging.info('=' * 128)
    eval_config = pipeline_proto.eval_config

    model_path = train_utils.get_latest_model(FLAGS.saved_ckpts_dir)
    if model_path is not None:
        with tf.Session(graph=g, config=session_config) as sess:

            # Initialize model.
            sess.run(init_op)
            saver.restore(sess, model_path)
            logging.info('Restore from %s.', model_path)

            warn_names = sess.run(uninitialized_variable_names)
            assert len(warn_names) == 0

            # Evaluation loop.
            step = sess.run(global_step)

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(coord=coord)

            evaluate_once(sess,
                          None,
                          step,
                          examples['video_id'],
                          predictions=predictions)

            coord.request_stop()
            coord.join(threads, stop_grace_period_secs=10)

    logging.info('Done')
コード例 #7
0
ファイル: trainer.py プロジェクト: yekeren/WSSGG
    def _model_fn(features, labels, mode, params):
        """Creates the model.

    Args:
      features: A dict mapping from names to tensors, denoting the features.
      labels: A dict mapping from names to tensors, denoting the labels.
      mode: Mode parameter required by the estimator.
      params: Additional parameters used for creating the model.

    Returns:
      An instance of EstimatorSpec.
    """
        is_training = (tf.estimator.ModeKeys.TRAIN == mode)
        logging.info("Current mode is %s, is_training=%s", mode, is_training)

        model = builder.build(pipeline_proto.model, is_training)

        # Predict resutls.
        predictions = model.predict(features)

        # Compute losses. Note: variables created in build_loss are not trainable.
        losses = model.build_losses(features, predictions)
        for name, loss in losses.items():
            tf.compat.v1.summary.scalar('losses/' + name, loss)
            tf.losses.add_loss(loss)
        for loss in tf.compat.v1.losses.get_regularization_losses():
            tf.summary.scalar(
                "regularization/" + '/'.join(loss.op.name.split('/')[:2]),
                loss)
        total_loss = tf.compat.v1.losses.get_total_loss(
            add_regularization_losses=True)

        # Get variables_to_train.
        variables_to_train = model.get_variables_to_train()
        scaffold = model.get_scaffold()

        train_op = None
        eval_metric_ops = None

        if tf.estimator.ModeKeys.TRAIN == mode:
            _summarize_variables(tf.compat.v1.global_variables())
            global_step = tf.compat.v1.train.get_global_step()

            # Set learning rate.
            train_config = pipeline_proto.train_config
            lr_schedule_fn = learning_rate_schedule.create_learning_rate_schedule(
                train_config.learning_rate_schedule)
            learning_rate = lr_schedule_fn(global_step)
            tf.compat.v1.summary.scalar('losses/learning_rate', learning_rate)

            # Use optimizer to minimize loss.
            optimizer = optimization.create_optimizer(
                train_config.optimizer, learning_rate=learning_rate)

            def transform_grads_fn(grads):
                if train_config.HasField('max_gradient_norm'):
                    grads = tf.contrib.training.clip_gradient_norms(
                        grads, max_norm=train_config.max_gradient_norm)
                return grads

            train_op = tf.contrib.training.create_train_op(
                total_loss,
                optimizer,
                variables_to_train=variables_to_train,
                transform_grads_fn=transform_grads_fn,
                summarize_gradients=True)

        elif tf.estimator.ModeKeys.EVAL == mode:

            eval_metric_ops = model.build_metrics(features, predictions)
            for name, loss in losses.items():
                loss_metric = tf.keras.metrics.Mean()
                loss_metric.update_state(loss)
                eval_metric_ops['losses/' + name] = loss_metric

        elif tf.estimator.ModeKeys.PREDICT == mode:

            # Add input tensors to the predictions.
            predictions.update(features)

            # Create additional tensors if specified.
            create_additional_predictions = params.get(
                'create_additional_predictions', None)

            if create_additional_predictions:
                assert callable(create_additional_predictions)

                predictions.update(
                    create_additional_predictions(tf.get_default_graph()))

        # Merge summaries.
        summary_saver_hook = tf.estimator.SummarySaverHook(
            summary_op=tf.compat.v1.summary.merge_all(),
            save_steps=pipeline_proto.train_config.save_summary_steps)

        return tf.estimator.EstimatorSpec(mode=mode,
                                          predictions=predictions,
                                          loss=total_loss,
                                          train_op=train_op,
                                          eval_metric_ops=eval_metric_ops,
                                          training_hooks=[summary_saver_hook],
                                          scaffold=scaffold)
コード例 #8
0
def main(_):
  pipeline_proto = _load_pipeline_proto(FLAGS.pipeline_proto)

  if FLAGS.model_dir:
    pipeline_proto.model_dir = FLAGS.model_dir
    tf.logging.info("Override model checkpoint dir: %s", FLAGS.model_dir)

  tf.logging.info("Pipeline configure: %s", '=' * 128)
  tf.logging.info(pipeline_proto)

  categories = [
      'bear', 'beach', 'car', 'motorcycle', 'light', 'donut', 'plate', 'person',
      'skateboard'
  ]

  g = tf.Graph()
  with g.as_default():

    image = tf.placeholder(tf.uint8, shape=[None, None, 3])
    image_resized = tf.image.resize_images(image, [
        pipeline_proto.eval_reader.image_height,
        pipeline_proto.eval_reader.image_width
    ])

    model = builder.build(pipeline_proto.model, is_training=False)
    prediction_dict = model.build_prediction(
        examples={
            InputDataFields.image: tf.expand_dims(image_resized, 0),
            InputDataFields.category_strings: tf.constant(categories)
        },
        prediction_task=GAPPredictionTasks.image_score_map)

    # height, width = tf.shape(image)[0], tf.shape(image)[1]

    (saliency_map,
     score_maps) = (prediction_dict[GAPPredictions.image_saliency],
                    prediction_dict[GAPPredictions.image_score_map])

    score_map_list = [tf.squeeze(saliency_map, axis=-1)] + tf.unstack(
        score_maps, axis=-1)

    tf.logging.info("score map list size: %d", len(score_map_list))
    for i, x in enumerate(score_map_list):
      tf.logging.info("%d: shape: %s", i, x.get_shape().as_list())

    saver = tf.train.Saver()
    invalid_variable_names = tf.report_uninitialized_variables()

  with tf.Session(graph=g) as sess:

    sess.run(tf.tables_initializer())

    # Load the latest checkpoint.

    checkpoint_path = tf.train.latest_checkpoint(pipeline_proto.model_dir)
    assert checkpoint_path is not None

    saver.restore(sess, checkpoint_path)
    assert len(sess.run(invalid_variable_names)) == 0

    # Iterate the testdir to generate the demo results.

    score_map_func = lambda x: sess.run(score_map_list, feed_dict={image: x})

    for filename in os.listdir(FLAGS.image_path):
      tf.logging.info('On processing %s', filename)

      _get_score_map(
          input_path=os.path.join(FLAGS.image_path, filename),
          output_path=os.path.join(FLAGS.demo_path, filename),
          names=['saliency'] + categories,
          score_map_func=score_map_func,
          shape=(pipeline_proto.eval_reader.image_height,
                 pipeline_proto.eval_reader.image_width))

  tf.logging.info('Done')
コード例 #9
0
def main(_):
    logging.set_verbosity(tf.logging.INFO)

    assert os.path.isfile(FLAGS.pipeline_proto)
    assert os.path.isfile(FLAGS.action_reason_annot_path)

    pipeline_proto = load_pipeline_proto(FLAGS.pipeline_proto)
    logging.info("Pipeline configure: %s", '=' * 128)
    logging.info(pipeline_proto)

    groundtruths = load_action_reason_annots(FLAGS.action_reason_annot_path)

    g = tf.Graph()
    with g.as_default():
        # Get examples from reader.
        split = 'valid'
        if not FLAGS.continuous_evaluation:
            split = 'test'

        examples, feed_init_fn = ads_mem_examples.get_examples(
            pipeline_proto.example_reader, split)

        # Build model for training.
        global_step = slim.get_or_create_global_step()

        model = builder.build(pipeline_proto.model, is_training=False)
        predictions = model.build_evaluation_graph(examples)

        init_fn = model.get_init_fn()
        uninitialized_variable_names = tf.report_uninitialized_variables()

        saver = tf.train.Saver()
        init_op = tf.group(tf.local_variables_initializer(),
                           tf.global_variables_initializer())

    session_config = train_utils.default_session_config(
        FLAGS.per_process_gpu_memory_fraction)

    # evaluation on test set.
    logging.info('Start evaluating.')
    eval_config = pipeline_proto.eval_config

    # One time evaluation and inference.
    if not FLAGS.continuous_evaluation:
        model_path = train_utils.get_latest_model(FLAGS.saved_ckpt_dir)
        with tf.Session(graph=g, config=session_config) as sess:
            feed_init_fn(sess)
            sess.run(init_op)
            saver.restore(sess, model_path)
            logging.info('Restore model from %s.', model_path)
            warn_names = sess.run(uninitialized_variable_names)
            assert len(warn_names) == 0

            # Evaluate the best model in terms of recall@3.
            step = sess.run(global_step)
            evaluate_once(sess, None, step, predictions, groundtruths)

        logging.info('Done')

        exit(0)

    # Continuous evaluation on valid set.
    writer = tf.summary.FileWriter(FLAGS.eval_log_dir, g)
    step = prev_step = -1
    while True:
        start = time.time()

        try:
            model_path = tf.train.latest_checkpoint(FLAGS.train_log_dir)

            if model_path is not None:
                with tf.Session(graph=g, config=session_config) as sess:
                    # Restore model.
                    feed_init_fn(sess)
                    sess.run(init_op)
                    saver.restore(sess, model_path)
                    logging.info('Restore model from %s.', model_path)

                    warn_names = sess.run(uninitialized_variable_names)
                    assert len(warn_names) == 0

                    step = sess.run(global_step)
                    if step != prev_step and step > eval_config.eval_min_global_steps:
                        # Evaluate the latest model.
                        prev_step = step
                        metric = evaluate_once(sess, writer, step, predictions,
                                               groundtruths)

                        step_best, metric_best = train_utils.save_model_if_it_is_better(
                            step,
                            metric,
                            model_path,
                            FLAGS.saved_ckpt_dir,
                            reverse=False)

                        if step_best == step:
                            summary = tf.Summary()
                            summary.value.add(tag='metrics/model_metric',
                                              simple_value=metric_best)
                            writer.add_summary(summary, global_step=step)
                        writer.flush()

                # with tf.Session
            # if model_path is not None
        except Exception as ex:
            pass

        if step >= eval_config.number_of_steps:
            break

        sleep_secs = eval_config.eval_interval_secs - (time.time() - start)
        if sleep_secs > 0:
            logging.info('Now sleep for %.2lf secs.', sleep_secs)
            time.sleep(sleep_secs)

    writer.close()
    logging.info('Done')
コード例 #10
0
def main(_):
  logging.set_verbosity(tf.logging.INFO)

  assert os.path.isfile(FLAGS.pipeline_proto)
  assert os.path.isfile(FLAGS.sentiment_vocab_path)
  assert os.path.isfile(FLAGS.sentiment_anno_vocab)
  assert os.path.isfile(FLAGS.sentiment_raw_annot_path)
  assert os.path.isfile(FLAGS.sentiment_clean_annot_path)

  sentiment_raw_annot = load_raw_annot(FLAGS.sentiment_raw_annot_path)
  sentiment_clean_annot = eval_utils.load_clean_annot(FLAGS.sentiment_clean_annot_path)
  topic_clean_annot = load_clean_annot(FLAGS.topic_clean_annot_path)

  g = tf.Graph()
  with g.as_default():
    pipeline_proto = load_pipeline_proto(FLAGS.pipeline_proto)
    logging.info("Pipeline configure: %s", '=' * 128)
    logging.info(pipeline_proto)

    # Get examples from reader.
    examples = ads_examples.get_examples(pipeline_proto.example_reader)

    # Build model for evaluation.
    global_step = slim.get_or_create_global_step()

    model = builder.build(pipeline_proto.model, is_training=False)
    predictions = model.build_inference(examples)
    loss_dict = model.build_loss(predictions)

    uninitialized_variable_names = tf.report_uninitialized_variables()
    saver = tf.train.Saver()
    
    init_op = tf.group(tf.local_variables_initializer(),
        tf.global_variables_initializer())

  session_config = train_utils.default_session_config( 
      FLAGS.per_process_gpu_memory_fraction)

  # Start session.
  logging.info('=' * 128)
  eval_config = pipeline_proto.eval_config
  writer = tf.summary.FileWriter(FLAGS.eval_log_dir, g)

  prev_step = -1
  while True:
    start = time.time()

    try:
      model_path = tf.train.latest_checkpoint(FLAGS.train_log_dir)

      if model_path is not None:
        with tf.Session(graph=g, config=session_config) as sess:

          # Initialize model.
          sess.run(init_op)
          saver.restore(sess, model_path)
          logging.info('Restore from %s.', model_path)

          warn_names = sess.run(uninitialized_variable_names)
          assert len(warn_names) == 0

          step = sess.run(global_step)
          if step != prev_step and step > eval_config.eval_min_global_steps:
            prev_step = step

            # Evaluation loop.
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(coord=coord)

            model_metric = evaluate_sentiment_once(sess, 
                writer, 
                step, 
                examples['video_id'],
                predictions=predictions,
                clean_annot=sentiment_clean_annot, 
                raw_annot=sentiment_raw_annot,
                use_sigmoid=True)

            step_best, metric_best = train_utils.save_model_if_it_is_better(
                step, model_metric, model_path, FLAGS.saved_ckpts_dir,
                reverse=False)

            # We improved the model, record it.
            if step_best == step:
              summary = tf.Summary()
              summary.value.add(
                  tag='metrics/metric_best', simple_value=metric_best)
              writer.add_summary(summary, global_step=step)
              writer.flush()

            coord.request_stop()
            coord.join(threads, stop_grace_period_secs=10)

          if step >= eval_config.number_of_steps:
            break
    except Exception as ex:
      pass

    # Sleep a while.
    sleep_secs = eval_config.eval_interval_secs - (time.time() - start)
    if sleep_secs > 0:
      logging.info('Now sleep for %.2lf secs.', sleep_secs)
      time.sleep(sleep_secs)

  writer.close()
  logging.info('Done')
コード例 #11
0
def main(_):
    logging.set_verbosity(tf.logging.INFO)

    assert os.path.isfile(FLAGS.pipeline_proto)

    g = tf.Graph()
    with g.as_default():
        pipeline_proto = load_pipeline_proto(FLAGS.pipeline_proto)
        logging.info("Pipeline configure: %s", '=' * 128)
        logging.info(pipeline_proto)

        train_config = pipeline_proto.train_config

        # Get examples from reader.
        examples, feed_init_fn = ads_mem_examples.get_examples(
            pipeline_proto.example_reader, split='train')

        # Build model for training.
        model = builder.build(pipeline_proto.model, is_training=True)
        predictions = model.build_inference_graph(examples)
        loss_dict = model.build_loss(predictions)

        model_init_fn = model.get_init_fn()
        uninitialized_variable_names = tf.report_uninitialized_variables()

        if FLAGS.restore_from:
            variables_to_restore = slim.get_variables_to_restore(
                exclude=[name for name in train_config.exclude_variable])
            restore_init_fn = slim.assign_from_checkpoint_fn(
                FLAGS.restore_from, variables_to_restore)

        def init_fn(sess):
            model_init_fn(sess)
            if FLAGS.restore_from:
                restore_init_fn(sess)

        # Loss and optimizer.
        for loss_name, loss_tensor in loss_dict.iteritems():
            tf.losses.add_loss(loss_tensor)
            tf.summary.scalar('losses/{}'.format(loss_name), loss_tensor)
        total_loss = tf.losses.get_total_loss()
        tf.summary.scalar('losses/total_loss', total_loss)

        for reg_loss in tf.losses.get_regularization_losses():
            name = 'losses/reg_loss_{}'.format(reg_loss.op.name.split('/')[0])
            tf.summary.scalar(name, reg_loss)

        optimizer = train_utils.build_optimizer(train_config)
        if train_config.moving_average:
            optimizer = tf.contrib.opt.MovingAverageOptimizer(
                optimizer, average_decay=0.99)

        gradient_multipliers = train_utils.build_multipler(
            train_config.gradient_multiplier)

        variables_to_train = model.get_variables_to_train()
        logging.info('=' * 128)
        for var in variables_to_train:
            logging.info(var)
        train_op = slim.learning.create_train_op(
            total_loss,
            variables_to_train=variables_to_train,
            clip_gradient_norm=0.0,
            gradient_multipliers=gradient_multipliers,
            summarize_gradients=True,
            optimizer=optimizer)

        saver = None
        if train_config.moving_average:
            saver = optimizer.swapping_saver()

    # Start checking.
    logging.info('Start checking...')
    session_config = train_utils.default_session_config(
        FLAGS.per_process_gpu_memory_fraction)

    def _session_wrapper_fn(sess):
        feed_init_fn(sess)
        return sess

    slim.learning.train(train_op,
                        logdir=FLAGS.train_log_dir,
                        graph=g,
                        master='',
                        is_chief=True,
                        number_of_steps=train_config.number_of_steps,
                        log_every_n_steps=train_config.log_every_n_steps,
                        save_interval_secs=train_config.save_interval_secs,
                        save_summaries_secs=train_config.save_summaries_secs,
                        session_config=session_config,
                        session_wrapper=_session_wrapper_fn,
                        init_fn=init_fn,
                        saver=saver)

    logging.info('Done')
コード例 #12
0
ファイル: knn_words.py プロジェクト: yekeren/WSOD
def main(_):
  pipeline_proto = _load_pipeline_proto(FLAGS.pipeline_proto)
  tf.logging.info("Pipeline configure: %s", '=' * 128)
  tf.logging.info(pipeline_proto)

  g = tf.Graph()
  with g.as_default():

    # Infer saliency.

    model = builder.build(pipeline_proto.model, is_training=False)
    predictions = model.build_prediction(
        examples={}, prediction_task=GAPPredictionTasks.word_saliency)

    saver = tf.train.Saver()
    invalid_variable_names = tf.report_uninitialized_variables()

  with tf.Session(graph=g) as sess:

    sess.run(tf.tables_initializer())

    # Load the latest checkpoint.

    checkpoint_path = tf.train.latest_checkpoint(pipeline_proto.model_dir)
    assert checkpoint_path is not None

    saver.restore(sess, checkpoint_path)
    assert len(sess.run(invalid_variable_names)) == 0

    predictions = sess.run(predictions)

  # Process kNN retrieval.

  name_to_class_id = {}
  with open(FLAGS.name_to_class_id_file, 'r') as fid:
    for line in fid.readlines():
      name, class_id = line.strip('\n').split('\t')
      name_to_class_id[name] = class_id

  (vocabulary, word_saliency,
   word_embedding) = (predictions[GAPPredictions.vocabulary],
                      predictions[GAPPredictions.word_saliency],
                      predictions[GAPPredictions.word_embedding])

  queries = list(name_to_class_id)
  synonyms_list = _knn_retrieval(queries, vocabulary, word_embedding,
                                 word_saliency)

  # Print to the terminal.

  expanded_name_to_class_id = []
  for query, synonyms in zip(queries, synonyms_list):
    elems = []
    for synonym in synonyms:
      if synonym['saliency'] < FLAGS.saliency_threshold:
        continue
      if synonym['similarity'] < FLAGS.similarity_threshold:
        continue
      elems.append(synonym['word'])
      expanded_name_to_class_id.append((synonym['word'],
                                        name_to_class_id[query]))
    print('%s\t%s' % (query, ','.join(elems)))

  # Write to output file.

  with open(FLAGS.expanded_name_to_class_id_file, 'w') as fid:
    for word, class_id in expanded_name_to_class_id:
      fid.write('%s\t%s\n' % (word, class_id))

  tf.logging.info('Done')
コード例 #13
0
def main(_):
  logging.set_verbosity(tf.logging.INFO)

  assert os.path.isfile(FLAGS.pipeline_proto), FLAGS.pipeline_proto

  g = tf.Graph()
  with g.as_default():
    pipeline_proto = load_pipeline_proto(FLAGS.pipeline_proto)
    logging.info("Pipeline configure: %s", '=' * 128)
    logging.info(pipeline_proto)

    train_config = pipeline_proto.train_config

    # Get examples from reader.
    examples = ads_examples.get_examples(pipeline_proto.example_reader)

    # Build model for training.
    global_step = slim.get_or_create_global_step()

    model = builder.build(pipeline_proto.model, is_training=True)
    predictions = model.build_inference(examples)
    loss_dict = model.build_loss(predictions)

    init_fn = model.get_init_fn()
    uninitialized_variable_names = tf.report_uninitialized_variables()

    # Loss and optimizer.
    for loss_name, loss_tensor in loss_dict.iteritems():
      tf.losses.add_loss(loss_tensor)
      tf.summary.scalar('losses/{}'.format(loss_name), loss_tensor)
    total_loss = tf.losses.get_total_loss()
    tf.summary.scalar('losses/total_loss', total_loss)

    optimizer = train_utils.build_optimizer(train_config)
    if train_config.moving_average:
      optimizer = tf.contrib.opt.MovingAverageOptimizer(optimizer,
          average_decay=0.99)

    gradient_multipliers = train_utils.build_multipler(
        train_config.gradient_multiplier)

    variables_to_train = model.get_variables_to_train()
    for var in variables_to_train:
      logging.info(var)

    train_op = slim.learning.create_train_op(total_loss,
        variables_to_train=variables_to_train, 
        clip_gradient_norm=5.0,
        gradient_multipliers=gradient_multipliers,
        summarize_gradients=True,
        optimizer=optimizer)

    saver = None
    if train_config.moving_average:
      saver = optimizer.swapping_saver()

  # Starts training.
  logging.info('Start training.')

  session_config = train_utils.default_session_config( 
      FLAGS.per_process_gpu_memory_fraction)
  slim.learning.train(train_op, 
      logdir=FLAGS.train_log_dir,
      graph=g,
      master='',
      is_chief=True,
      number_of_steps=train_config.number_of_steps,
      log_every_n_steps=train_config.log_every_n_steps,
      save_interval_secs=train_config.save_interval_secs,
      save_summaries_secs=train_config.save_summaries_secs,
      session_config=session_config,
      init_fn=init_fn,
      saver=saver)

  logging.info('Done')