示例#1
0
def EmbedQueryTargetData(query_records, target_records, config):
    """Embeds the full set of query_records and target_records.

  Args:
    query_records: List of Strings, paths to tfrecord datasets to use as
      queries.
    target_records: List of Strings, paths to tfrecord datasets to use as
      targets.
    config: A T object describing training config.

  Returns:
    query_sequences_to_data: A dict holding 'embeddings' and 'images'
    target_sequences_to_data: A dict holding 'embeddings' and 'images'
  """
    batch_size = config.data.embed_batch_size

    # Choose an estimator based on training strategy.
    estimator = get_estimator(config, FLAGS.checkpointdir)

    # Choose a checkpoint path to restore.
    checkpointdir = FLAGS.checkpointdir
    checkpoint_path = os.path.join(checkpointdir,
                                   'model.ckpt-%s' % FLAGS.checkpoint_iter)

    # Embed num_sequences query sequences, store embeddings and image strings in
    # query_sequences_to_data.
    num_query_sequences = FLAGS.num_query_sequences
    num_target_sequences = FLAGS.num_target_sequences
    query_sequences_to_data = {}
    for (view_embeddings, view_raw_image_strings,
         seqname) in estimator.inference(query_records,
                                         checkpoint_path,
                                         batch_size,
                                         num_sequences=num_query_sequences):
        query_sequences_to_data[seqname] = {
            'embeddings': view_embeddings,
            'images': view_raw_image_strings,
        }

    if (query_records == target_records) and (num_query_sequences
                                              == num_target_sequences):
        target_sequences_to_data = query_sequences_to_data
    else:
        # Embed num_sequences target sequences, store embeddings and image strings
        # in sequences_to_data.
        target_sequences_to_data = {}
        for (view_embeddings, view_raw_image_strings,
             seqname) in estimator.inference(
                 target_records,
                 checkpoint_path,
                 batch_size,
                 num_sequences=num_target_sequences):
            target_sequences_to_data[seqname] = {
                'embeddings': view_embeddings,
                'images': view_raw_image_strings,
            }
    return query_sequences_to_data, target_sequences_to_data
示例#2
0
文件: train.py 项目: ALISCIFP/models
def main(_):
  """Runs main training loop."""
  # Parse config dict from yaml config files / command line flags.
  config = util.ParseConfigsToLuaTable(
      FLAGS.config_paths, FLAGS.model_params, save=True, logdir=FLAGS.logdir)

  # Choose an estimator based on training strategy.
  estimator = get_estimator(config, FLAGS.logdir)

  # Run training
  estimator.train()
示例#3
0
def main(_):
  """Runs main training loop."""
  # Parse config dict from yaml config files / command line flags.
  config = util.ParseConfigsToLuaTable(
      FLAGS.config_paths, FLAGS.model_params, save=True, logdir=FLAGS.logdir)

  # Choose an estimator based on training strategy.
  estimator = get_estimator(config, FLAGS.logdir)

  # Run training
  estimator.train()
示例#4
0
def main(_):
    """Runs main labeled eval loop."""
    # Parse config dict from yaml config files / command line flags.
    config = util.ParseConfigsToLuaTable(FLAGS.config_paths,
                                         FLAGS.model_params)

    # Choose an estimator based on training strategy.
    checkpointdir = FLAGS.checkpointdir
    estimator = get_estimator(config, checkpointdir)

    # Get data configs.
    image_attr_keys = config.data.labeled.image_attr_keys
    label_attr_keys = config.data.labeled.label_attr_keys
    embedding_size = config.embedding_size
    num_views = config.data.num_views
    k_list = config.val.recall_at_k_list
    batch_size = config.data.batch_size

    # Get either labeled validation or test tables.
    labeled_tables = get_labeled_tables(config)

    def input_fn_by_view(view_index):
        """Returns an input_fn for use with a tf.Estimator by view."""
        def input_fn():
            # Get raw labeled images.
            (preprocessed_images, labels,
             tasks) = data_providers.labeled_data_provider(
                 labeled_tables,
                 estimator.preprocess_data,
                 view_index,
                 image_attr_keys,
                 label_attr_keys,
                 batch_size=batch_size)
            return {
                'batch_preprocessed': preprocessed_images,
                'tasks': tasks,
                'classification_labels': labels,
            }, None

        return input_fn

    # If evaluating a specific checkpoint, do that.
    if FLAGS.checkpoint_iter:
        checkpoint_path = os.path.join('%s/model.ckpt-%s' %
                                       (checkpointdir, FLAGS.checkpoint_iter))
        evaluate_once(estimator, input_fn_by_view, batch_size, checkpoint_path,
                      label_attr_keys, embedding_size, num_views, k_list)
    else:
        for checkpoint_path in tf.contrib.training.checkpoints_iterator(
                checkpointdir):
            evaluate_once(estimator, input_fn_by_view, batch_size,
                          checkpoint_path, label_attr_keys, embedding_size,
                          num_views, k_list)
示例#5
0
def EmbedQueryTargetData(query_records, target_records, config):
  """Embeds the full set of query_records and target_records.

  Args:
    query_records: List of Strings, paths to tfrecord datasets to use as
      queries.
    target_records: List of Strings, paths to tfrecord datasets to use as
      targets.
    config: A T object describing training config.

  Returns:
    query_sequences_to_data: A dict holding 'embeddings' and 'images'
    target_sequences_to_data: A dict holding 'embeddings' and 'images'
  """
  batch_size = config.data.embed_batch_size

  # Choose an estimator based on training strategy.
  estimator = get_estimator(config, FLAGS.checkpointdir)

  # Choose a checkpoint path to restore.
  checkpointdir = FLAGS.checkpointdir
  checkpoint_path = os.path.join(checkpointdir,
                                 'model.ckpt-%s' % FLAGS.checkpoint_iter)

  # Embed num_sequences query sequences, store embeddings and image strings in
  # query_sequences_to_data.
  num_query_sequences = FLAGS.num_query_sequences
  num_target_sequences = FLAGS.num_target_sequences
  query_sequences_to_data = {}
  for (view_embeddings, view_raw_image_strings, seqname) in estimator.inference(
      query_records, checkpoint_path, batch_size,
      num_sequences=num_query_sequences):
    query_sequences_to_data[seqname] = {
        'embeddings': view_embeddings,
        'images': view_raw_image_strings,
    }

  if (query_records == target_records) and (
      num_query_sequences == num_target_sequences):
    target_sequences_to_data = query_sequences_to_data
  else:
    # Embed num_sequences target sequences, store embeddings and image strings
    # in sequences_to_data.
    target_sequences_to_data = {}
    for (view_embeddings, view_raw_image_strings,
         seqname) in estimator.inference(
             target_records, checkpoint_path, batch_size,
             num_sequences=num_target_sequences):
      target_sequences_to_data[seqname] = {
          'embeddings': view_embeddings,
          'images': view_raw_image_strings,
      }
  return query_sequences_to_data, target_sequences_to_data
示例#6
0
def main(_):
  """Runs main labeled eval loop."""
  # Parse config dict from yaml config files / command line flags.
  config = util.ParseConfigsToLuaTable(FLAGS.config_paths, FLAGS.model_params)

  # Choose an estimator based on training strategy.
  checkpointdir = FLAGS.checkpointdir
  estimator = get_estimator(config, checkpointdir)

  # Get data configs.
  image_attr_keys = config.data.labeled.image_attr_keys
  label_attr_keys = config.data.labeled.label_attr_keys
  embedding_size = config.embedding_size
  num_views = config.data.num_views
  k_list = config.val.recall_at_k_list
  batch_size = config.data.batch_size

  # Get either labeled validation or test tables.
  labeled_tables = get_labeled_tables(config)

  def input_fn_by_view(view_index):
    """Returns an input_fn for use with a tf.Estimator by view."""
    def input_fn():
      # Get raw labeled images.
      (preprocessed_images, labels,
       tasks) = data_providers.labeled_data_provider(
           labeled_tables,
           estimator.preprocess_data, view_index, image_attr_keys,
           label_attr_keys, batch_size=batch_size)
      return {
          'batch_preprocessed': preprocessed_images,
          'tasks': tasks,
          'classification_labels': labels,
      }, None
    return input_fn

  # If evaluating a specific checkpoint, do that.
  if FLAGS.checkpoint_iter:
    checkpoint_path = os.path.join(
        '%s/model.ckpt-%s' % (checkpointdir, FLAGS.checkpoint_iter))
    evaluate_once(
        estimator, input_fn_by_view, batch_size, checkpoint_path,
        label_attr_keys, embedding_size, num_views, k_list)
  else:
    for checkpoint_path in tf.contrib.training.checkpoints_iterator(
        checkpointdir):
      evaluate_once(
          estimator, input_fn_by_view, batch_size, checkpoint_path,
          label_attr_keys, embedding_size, num_views, k_list)
示例#7
0
文件: eval.py 项目: ALISCIFP/models
def main(_):
  """Runs main eval loop."""
  # Parse config dict from yaml config files / command line flags.
  logdir = FLAGS.logdir
  config = util.ParseConfigsToLuaTable(FLAGS.config_paths, FLAGS.model_params)

  # Choose an estimator based on training strategy.
  estimator = get_estimator(config, logdir)

  # Wait for the first checkpoint file to be written.
  while not tf.train.latest_checkpoint(logdir):
    tf.logging.info('Waiting for a checkpoint file...')
    time.sleep(10)

  # Run validation.
  while True:
    estimator.evaluate()
示例#8
0
def main(_):
    """Runs main eval loop."""
    # Parse config dict from yaml config files / command line flags.
    logdir = FLAGS.logdir
    config = util.ParseConfigsToLuaTable(FLAGS.config_paths,
                                         FLAGS.model_params)

    # Choose an estimator based on training strategy.
    estimator = get_estimator(config, logdir)

    # Wait for the first checkpoint file to be written.
    while not tf.train.latest_checkpoint(logdir):
        tf.logging.info('Waiting for a checkpoint file...')
        time.sleep(10)

    # Run validation.
    while True:
        estimator.evaluate()
示例#9
0
def SameSequenceVideos(query_records, config, height, width):
    """Generate same sequence, cross-view imitation videos."""
    batch_size = config.data.embed_batch_size

    # Choose an estimator based on training strategy.
    estimator = get_estimator(config, FLAGS.checkpointdir)

    # Choose a checkpoint path to restore.
    checkpointdir = FLAGS.checkpointdir
    checkpoint_path = os.path.join(checkpointdir,
                                   'model.ckpt-%s' % FLAGS.checkpoint_iter)

    # Embed num_sequences query sequences, store embeddings and image strings in
    # query_sequences_to_data.
    sequences_to_data = {}
    for (view_embeddings, view_raw_image_strings,
         seqname) in estimator.inference(
             query_records,
             checkpoint_path,
             batch_size,
             num_sequences=FLAGS.num_query_sequences):
        sequences_to_data[seqname] = {
            'embeddings': view_embeddings,
            'images': view_raw_image_strings,
        }

    # Loop over query videos.
    qview = FLAGS.query_view
    tview = FLAGS.target_view
    for task_i, data_i in sequences_to_data.iteritems():
        ims = data_i['images']
        embs = data_i['embeddings']
        query_embs = SmoothEmbeddings(embs[qview])
        query_ims = ims[qview]

        target_embs = SmoothEmbeddings(embs[tview])
        target_ims = ims[tview]

        tf.logging.info('Generating %s imitating %s video.' % (task_i, task_i))
        vid_name = 'q%sv%s_im%sv%s' % (task_i, qview, task_i, tview)
        vid_name = vid_name.replace('/', '_')
        GenerateImitationVideo(vid_name, query_ims, query_embs, target_ims,
                               target_embs, height, width)
示例#10
0
def evaluate_once(
    config, checkpointdir, validation_records, checkpoint_path, batch_size,
    num_views):
  """Evaluates and reports the validation alignment."""
  # Choose an estimator based on training strategy.
  estimator = get_estimator(config, checkpointdir)

  # Embed all validation sequences.
  seqname_to_embeddings = {}
  for (view_embeddings, _, seqname) in estimator.inference(
      validation_records, checkpoint_path, batch_size):
    seqname_to_embeddings[seqname] = view_embeddings

  # Compute and report alignment statistics.
  ckpt_step = int(checkpoint_path.split('-')[-1])
  summary_dir = os.path.join(FLAGS.outdir, 'alignment_summaries')
  summary_writer = tf.summary.FileWriter(summary_dir)
  compute_average_alignment(
      seqname_to_embeddings, num_views, summary_writer, ckpt_step)
示例#11
0
def evaluate_once(config, checkpointdir, validation_records, checkpoint_path,
                  batch_size, num_views):
    """Evaluates and reports the validation alignment."""
    # Choose an estimator based on training strategy.
    estimator = get_estimator(config, checkpointdir)

    # Embed all validation sequences.
    seqname_to_embeddings = {}
    for (view_embeddings, _,
         seqname) in estimator.inference(validation_records, checkpoint_path,
                                         batch_size):
        seqname_to_embeddings[seqname] = view_embeddings

    # Compute and report alignment statistics.
    ckpt_step = int(checkpoint_path.split('-')[-1])
    summary_dir = os.path.join(FLAGS.outdir, 'alignment_summaries')
    summary_writer = tf.summary.FileWriter(summary_dir)
    compute_average_alignment(seqname_to_embeddings, num_views, summary_writer,
                              ckpt_step)
示例#12
0
def main(_):
    """Runs main eval loop."""
    # Parse config dict from yaml config files / command line flags.
    logdir = FLAGS.logdir
    config = util.ParseConfigsToLuaTable(FLAGS.config_paths,
                                         FLAGS.model_params)

    # Choose an estimator based on training strategy.
    estimator = get_estimator(config, logdir)

    # Wait for the first checkpoint file to be written.
    while not tf.train.latest_checkpoint(logdir):
        tf.logging.info('Waiting for a checkpoint file...')
        time.sleep(10)

    ckpt = tf.train.get_checkpoint_state(logdir)
    for val1, val2, val3 in estimator.inference(FLAGS.tfrecords,
                                                ckpt.model_checkpoint_path,
                                                FLAGS.batch_size,
                                                model_name=FLAGS.model_name):
        break
示例#13
0
def SameSequenceVideos(query_records, config, height, width):
  """Generate same sequence, cross-view imitation videos."""
  batch_size = config.data.embed_batch_size

  # Choose an estimator based on training strategy.
  estimator = get_estimator(config, FLAGS.checkpointdir)

  # Choose a checkpoint path to restore.
  checkpointdir = FLAGS.checkpointdir
  checkpoint_path = os.path.join(checkpointdir,
                                 'model.ckpt-%s' % FLAGS.checkpoint_iter)

  # Embed num_sequences query sequences, store embeddings and image strings in
  # query_sequences_to_data.
  sequences_to_data = {}
  for (view_embeddings, view_raw_image_strings, seqname) in estimator.inference(
      query_records, checkpoint_path, batch_size,
      num_sequences=FLAGS.num_query_sequences):
    sequences_to_data[seqname] = {
        'embeddings': view_embeddings,
        'images': view_raw_image_strings,
    }

  # Loop over query videos.
  qview = FLAGS.query_view
  tview = FLAGS.target_view
  for task_i, data_i in sequences_to_data.iteritems():
    ims = data_i['images']
    embs = data_i['embeddings']
    query_embs = SmoothEmbeddings(embs[qview])
    query_ims = ims[qview]

    target_embs = SmoothEmbeddings(embs[tview])
    target_ims = ims[tview]

    tf.logging.info('Generating %s imitating %s video.' % (task_i, task_i))
    vid_name = 'q%sv%s_im%sv%s' % (task_i, qview, task_i, tview)
    vid_name = vid_name.replace('/', '_')
    GenerateImitationVideo(vid_name, query_ims, query_embs,
                           target_ims, target_embs, height, width)
示例#14
0
def main(_):
  """Runs main labeled eval loop."""
  # Parse config dict from yaml config files / command line flags.
  config = util.ParseConfigsToLuaTable(FLAGS.config_paths, FLAGS.model_params)

  # Choose an estimator based on training strategy.
  checkpointdir = FLAGS.checkpointdir
  checkpoint_path = os.path.join(
      '%s/model.ckpt-%s' % (checkpointdir, FLAGS.checkpoint_iter))
  estimator = get_estimator(config, checkpointdir)

  # Get records to embed.
  validation_dir = FLAGS.embedding_records
  validation_records = util.GetFilesRecursively(validation_dir)

  sequences_to_data = {}
  for (view_embeddings, view_raw_image_strings, seqname) in estimator.inference(
      validation_records, checkpoint_path, config.data.embed_batch_size,
      num_sequences=FLAGS.num_sequences):
    sequences_to_data[seqname] = {
        'embeddings': view_embeddings,
        'images': view_raw_image_strings,
    }

  all_embeddings = np.zeros((0, config.embedding_size))
  all_ims = []
  all_seqnames = []

  num_embeddings = FLAGS.num_embed
  # Concatenate all views from all sequences into a big flat list.
  for seqname, data in sequences_to_data.iteritems():
    embs = data['embeddings']
    ims = data['images']
    for v in range(config.data.num_views):
      for (emb, im) in zip(embs[v], ims[v]):
        all_embeddings = np.append(all_embeddings, [emb], axis=0)
        all_ims.append(im)
        all_seqnames.append(seqname)

  # Choose N indices uniformly from all images.
  random_indices = range(all_embeddings.shape[0])
  random.shuffle(random_indices)
  viz_indices = random_indices[:num_embeddings]

  # Extract embs.
  viz_embs = np.array(all_embeddings[viz_indices])

  # Extract and decode ims.
  viz_ims = list(np.array(all_ims)[viz_indices])
  decoded_ims = []

  sprite_dim = FLAGS.sprite_dim
  for i, im in enumerate(viz_ims):
    if i % 100 == 0:
      print('Decoding image %d/%d.' % (i, num_embeddings))
    nparr_i = np.fromstring(str(im), np.uint8)
    img_np = cv2.imdecode(nparr_i, 1)
    img_np = img_np[..., [2, 1, 0]]

    img_np = imresize(img_np, [sprite_dim, sprite_dim, 3])
    decoded_ims.append(img_np)
  decoded_ims = np.array(decoded_ims)

  # Extract sequence names.
  outdir = FLAGS.outdir

  # The embedding variable, which needs to be stored
  # Note this must a Variable not a Tensor!
  embedding_var = tf.Variable(viz_embs, name='viz_embs')

  with tf.Session() as sess:
    sess.run(embedding_var.initializer)
    summary_writer = tf.summary.FileWriter(outdir)
    config = projector.ProjectorConfig()
    embedding = config.embeddings.add()
    embedding.tensor_name = embedding_var.name

    # Comment out if you don't want sprites
    embedding.sprite.image_path = os.path.join(outdir, 'sprite.png')
    embedding.sprite.single_image_dim.extend(
        [decoded_ims.shape[1], decoded_ims.shape[1]])

    projector.visualize_embeddings(summary_writer, config)
    saver = tf.train.Saver([embedding_var])
    saver.save(sess, os.path.join(outdir, 'model2.ckpt'), 1)

  sprite = images_to_sprite(decoded_ims)
  imsave(os.path.join(outdir, 'sprite.png'), sprite)
def main(_):
    """Runs main labeled eval loop."""
    # Parse config dict from yaml config files / command line flags.
    config = util.ParseConfigsToLuaTable(FLAGS.config_paths,
                                         FLAGS.model_params)

    # Choose an estimator based on training strategy.
    checkpointdir = FLAGS.checkpointdir
    checkpoint_path = os.path.join('%s/model.ckpt-%s' %
                                   (checkpointdir, FLAGS.checkpoint_iter))
    estimator = get_estimator(config, checkpointdir)

    # Get records to embed.
    validation_dir = FLAGS.embedding_records
    validation_records = util.GetFilesRecursively(validation_dir)

    sequences_to_data = {}
    for (view_embeddings, view_raw_image_strings,
         seqname) in estimator.inference(validation_records,
                                         checkpoint_path,
                                         config.data.embed_batch_size,
                                         num_sequences=FLAGS.num_sequences):
        sequences_to_data[seqname] = {
            'embeddings': view_embeddings,
            'images': view_raw_image_strings,
        }

    all_embeddings = np.zeros((0, config.embedding_size))
    all_ims = []
    all_seqnames = []

    num_embeddings = FLAGS.num_embed
    # Concatenate all views from all sequences into a big flat list.
    for seqname, data in sequences_to_data.iteritems():
        embs = data['embeddings']
        ims = data['images']
        for v in range(config.data.num_views):
            for (emb, im) in zip(embs[v], ims[v]):
                all_embeddings = np.append(all_embeddings, [emb], axis=0)
                all_ims.append(im)
                all_seqnames.append(seqname)

    # Choose N indices uniformly from all images.
    random_indices = range(all_embeddings.shape[0])
    random.shuffle(random_indices)
    viz_indices = random_indices[:num_embeddings]

    # Extract embs.
    viz_embs = np.array(all_embeddings[viz_indices])

    # Extract and decode ims.
    viz_ims = list(np.array(all_ims)[viz_indices])
    decoded_ims = []

    sprite_dim = FLAGS.sprite_dim
    for i, im in enumerate(viz_ims):
        if i % 100 == 0:
            print('Decoding image %d/%d.' % (i, num_embeddings))
        nparr_i = np.fromstring(str(im), np.uint8)
        img_np = cv2.imdecode(nparr_i, 1)
        img_np = img_np[..., [2, 1, 0]]

        img_np = imresize(img_np, [sprite_dim, sprite_dim, 3])
        decoded_ims.append(img_np)
    decoded_ims = np.array(decoded_ims)

    # Extract sequence names.
    outdir = FLAGS.outdir

    # The embedding variable, which needs to be stored
    # Note this must a Variable not a Tensor!
    embedding_var = tf.Variable(viz_embs, name='viz_embs')

    with tf.Session() as sess:
        sess.run(embedding_var.initializer)
        summary_writer = tf.summary.FileWriter(outdir)
        config = projector.ProjectorConfig()
        embedding = config.embeddings.add()
        embedding.tensor_name = embedding_var.name

        # Comment out if you don't want sprites
        embedding.sprite.image_path = os.path.join(outdir, 'sprite.png')
        embedding.sprite.single_image_dim.extend(
            [decoded_ims.shape[1], decoded_ims.shape[1]])

        projector.visualize_embeddings(summary_writer, config)
        saver = tf.train.Saver([embedding_var])
        saver.save(sess, os.path.join(outdir, 'model2.ckpt'), 1)

    sprite = images_to_sprite(decoded_ims)
    imsave(os.path.join(outdir, 'sprite.png'), sprite)