Exemplo n.º 1
0
def main(_):
    # Parse config dict from yaml config files / command line flags.
    config = util.ParseConfigsToLuaTable(FLAGS.config_paths,
                                         FLAGS.model_params)

    # Get tables to embed.
    query_records_dir = FLAGS.query_records_dir
    query_records = util.GetFilesRecursively(query_records_dir)

    target_records_dir = FLAGS.target_records_dir
    target_records = util.GetFilesRecursively(target_records_dir)

    height = config.data.raw_height
    width = config.data.raw_width
    mode = FLAGS.mode
    if mode == 'multi':
        # Generate videos where target set is composed of multiple videos.
        MultiImitationVideos(query_records, target_records, config, height,
                             width)
    elif mode == 'single':
        # Generate videos where target set is a single video.
        SingleImitationVideos(query_records, target_records, config, height,
                              width)
    elif mode == 'same':
        # Generate videos where target set is the same as query, but diff view.
        SameSequenceVideos(query_records, config, height, width)
    else:
        raise ValueError('Unknown mode %s' % mode)
Exemplo n.º 2
0
def main(_):
  """Runs main training loop."""
  # Parse config dict from yaml config files / command line flags.
  config = util.ParseConfigsToLuaTable(
      FLAGS.config_paths, FLAGS.model_params, save=True, logdir=FLAGS.logdir)

  # Choose an estimator based on training strategy.
  estimator = get_estimator(config, FLAGS.logdir)

  # Run training
  estimator.train()
Exemplo n.º 3
0
def main(_):
    """Runs main labeled eval loop."""
    # Parse config dict from yaml config files / command line flags.
    config = util.ParseConfigsToLuaTable(FLAGS.config_paths,
                                         FLAGS.model_params)

    # Choose an estimator based on training strategy.
    checkpointdir = FLAGS.checkpointdir
    estimator = get_estimator(config, checkpointdir)

    # Get data configs.
    image_attr_keys = config.data.labeled.image_attr_keys
    label_attr_keys = config.data.labeled.label_attr_keys
    embedding_size = config.embedding_size
    num_views = config.data.num_views
    k_list = config.val.recall_at_k_list
    batch_size = config.data.batch_size

    # Get either labeled validation or test tables.
    labeled_tables = get_labeled_tables(config)

    def input_fn_by_view(view_index):
        """Returns an input_fn for use with a tf.Estimator by view."""
        def input_fn():
            # Get raw labeled images.
            (preprocessed_images, labels,
             tasks) = data_providers.labeled_data_provider(
                 labeled_tables,
                 estimator.preprocess_data,
                 view_index,
                 image_attr_keys,
                 label_attr_keys,
                 batch_size=batch_size)
            return {
                'batch_preprocessed': preprocessed_images,
                'tasks': tasks,
                'classification_labels': labels,
            }, None

        return input_fn

    # If evaluating a specific checkpoint, do that.
    if FLAGS.checkpoint_iter:
        checkpoint_path = os.path.join('%s/model.ckpt-%s' %
                                       (checkpointdir, FLAGS.checkpoint_iter))
        evaluate_once(estimator, input_fn_by_view, batch_size, checkpoint_path,
                      label_attr_keys, embedding_size, num_views, k_list)
    else:
        for checkpoint_path in tf.contrib.training.checkpoints_iterator(
                checkpointdir):
            evaluate_once(estimator, input_fn_by_view, batch_size,
                          checkpoint_path, label_attr_keys, embedding_size,
                          num_views, k_list)
Exemplo n.º 4
0
def main(_):
    """Runs main eval loop."""
    # Parse config dict from yaml config files / command line flags.
    logdir = FLAGS.logdir
    config = util.ParseConfigsToLuaTable(FLAGS.config_paths,
                                         FLAGS.model_params)

    # Choose an estimator based on training strategy.
    estimator = get_estimator(config, logdir)

    # Wait for the first checkpoint file to be written.
    while not tf.train.latest_checkpoint(logdir):
        tf.logging.info('Waiting for a checkpoint file...')
        time.sleep(10)

    # Run validation.
    while True:
        estimator.evaluate()
Exemplo n.º 5
0
def main(_):
    """Runs main eval loop."""
    # Parse config dict from yaml config files / command line flags.
    logdir = FLAGS.logdir
    config = util.ParseConfigsToLuaTable(FLAGS.config_paths,
                                         FLAGS.model_params)

    # Choose an estimator based on training strategy.
    estimator = get_estimator(config, logdir)

    # Wait for the first checkpoint file to be written.
    while not tf.train.latest_checkpoint(logdir):
        tf.logging.info('Waiting for a checkpoint file...')
        time.sleep(10)

    ckpt = tf.train.get_checkpoint_state(logdir)
    for val1, val2, val3 in estimator.inference(FLAGS.tfrecords,
                                                ckpt.model_checkpoint_path,
                                                FLAGS.batch_size,
                                                model_name=FLAGS.model_name):
        break
Exemplo n.º 6
0
def main(_):
    # Parse config dict from yaml config files / command line flags.
    config = util.ParseConfigsToLuaTable(FLAGS.config_paths,
                                         FLAGS.model_params)
    num_views = config.data.num_views

    validation_records = util.GetFilesRecursively(config.data.validation)
    batch_size = config.data.batch_size

    checkpointdir = FLAGS.checkpointdir

    # If evaluating a specific checkpoint, do that.
    if FLAGS.checkpoint_iter:
        checkpoint_path = os.path.join('%s/model.ckpt-%s' %
                                       (checkpointdir, FLAGS.checkpoint_iter))
        evaluate_once(config, checkpointdir, validation_records,
                      checkpoint_path, batch_size, num_views)
    else:
        for checkpoint_path in tf.contrib.training.checkpoints_iterator(
                checkpointdir):
            evaluate_once(config, checkpointdir, validation_records,
                          checkpoint_path, batch_size, num_views)
def main(_):
    """Runs main labeled eval loop."""
    # Parse config dict from yaml config files / command line flags.
    config = util.ParseConfigsToLuaTable(FLAGS.config_paths,
                                         FLAGS.model_params)

    # Choose an estimator based on training strategy.
    checkpointdir = FLAGS.checkpointdir
    checkpoint_path = os.path.join('%s/model.ckpt-%s' %
                                   (checkpointdir, FLAGS.checkpoint_iter))
    estimator = get_estimator(config, checkpointdir)

    # Get records to embed.
    validation_dir = FLAGS.embedding_records
    validation_records = util.GetFilesRecursively(validation_dir)

    sequences_to_data = {}
    for (view_embeddings, view_raw_image_strings,
         seqname) in estimator.inference(validation_records,
                                         checkpoint_path,
                                         config.data.embed_batch_size,
                                         num_sequences=FLAGS.num_sequences):
        sequences_to_data[seqname] = {
            'embeddings': view_embeddings,
            'images': view_raw_image_strings,
        }

    all_embeddings = np.zeros((0, config.embedding_size))
    all_ims = []
    all_seqnames = []

    num_embeddings = FLAGS.num_embed
    # Concatenate all views from all sequences into a big flat list.
    for seqname, data in sequences_to_data.iteritems():
        embs = data['embeddings']
        ims = data['images']
        for v in range(config.data.num_views):
            for (emb, im) in zip(embs[v], ims[v]):
                all_embeddings = np.append(all_embeddings, [emb], axis=0)
                all_ims.append(im)
                all_seqnames.append(seqname)

    # Choose N indices uniformly from all images.
    random_indices = range(all_embeddings.shape[0])
    random.shuffle(random_indices)
    viz_indices = random_indices[:num_embeddings]

    # Extract embs.
    viz_embs = np.array(all_embeddings[viz_indices])

    # Extract and decode ims.
    viz_ims = list(np.array(all_ims)[viz_indices])
    decoded_ims = []

    sprite_dim = FLAGS.sprite_dim
    for i, im in enumerate(viz_ims):
        if i % 100 == 0:
            print('Decoding image %d/%d.' % (i, num_embeddings))
        nparr_i = np.fromstring(str(im), np.uint8)
        img_np = cv2.imdecode(nparr_i, 1)
        img_np = img_np[..., [2, 1, 0]]

        img_np = imresize(img_np, [sprite_dim, sprite_dim, 3])
        decoded_ims.append(img_np)
    decoded_ims = np.array(decoded_ims)

    # Extract sequence names.
    outdir = FLAGS.outdir

    # The embedding variable, which needs to be stored
    # Note this must a Variable not a Tensor!
    embedding_var = tf.Variable(viz_embs, name='viz_embs')

    with tf.Session() as sess:
        sess.run(embedding_var.initializer)
        summary_writer = tf.summary.FileWriter(outdir)
        config = projector.ProjectorConfig()
        embedding = config.embeddings.add()
        embedding.tensor_name = embedding_var.name

        # Comment out if you don't want sprites
        embedding.sprite.image_path = os.path.join(outdir, 'sprite.png')
        embedding.sprite.single_image_dim.extend(
            [decoded_ims.shape[1], decoded_ims.shape[1]])

        projector.visualize_embeddings(summary_writer, config)
        saver = tf.train.Saver([embedding_var])
        saver.save(sess, os.path.join(outdir, 'model2.ckpt'), 1)

    sprite = images_to_sprite(decoded_ims)
    imsave(os.path.join(outdir, 'sprite.png'), sprite)