コード例 #1
0
  def sample_step(self, sess,
                  single_observation, internal_state, single_action,
                  greedy=False):
    """Sample batch of steps from policy."""
    if greedy:
      outputs = [self.greedy_next_internal_state, self.greedy_sampled_actions]
    else:
      outputs = [self.next_internal_state, self.sampled_actions]

    feed_dict = {self.internal_state: internal_state}
    for action_place, action in zip(self.single_action, single_action):
      feed_dict[action_place] = action
    for obs_place, obs in zip(self.single_observation, single_observation):
      feed_dict[obs_place] = obs

    options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
    run_metadata = tf.RunMetadata()

    results = sess.run(outputs, feed_dict=feed_dict, options=options,
                       run_metadata=run_metadata)
    cg = CompGraph(FLAGS.model_name, run_metadata, tf.get_default_graph())

    cg_tensor_dict = cg.get_tensors()
    cg_sorted_keys = sorted(cg_tensor_dict.keys())
    cg_sorted_items = []
    for cg_key in cg_sorted_keys:
      cg_sorted_items.append(tf.shape(cg_tensor_dict[cg_key]))

    cg_sorted_shape = sess.run(cg_sorted_items, feed_dict=feed_dict)
    cg.op_analysis(dict(zip(cg_sorted_keys, cg_sorted_shape)),
                   '{}.pickle'.format(FLAGS.model_name))

    exit(0)

    return results
コード例 #2
0
ファイル: names.py プロジェクト: miglopst/models
def namignize(names, checkpoint_path, config, model_size):
    """Recognizes names and prints the Perplexity of the model for each names
    in the list

    Args:
        names: a list of names in the model format
        checkpoint_path: the path to restore the trained model from, should not
            include the model name, just the path to
        config: one of the above configs that specify the model and how it
            should be run and trained
    Returns:
        None
    """
    with tf.Graph().as_default(), tf.Session() as session:

        with tf.variable_scope("model"):
            m = NamignizerModel(is_training=False, config=config)

        m.saver.restore(session, checkpoint_path)

        for name in names:
            x, y = data_utils.name_to_batch(name, m.batch_size, m.num_steps)

            feed_dict = {
                m.input_data:
                x,
                m.targets:
                y,
                m.weights:
                np.concatenate(
                    (np.ones(len(name)),
                     np.zeros(m.batch_size * m.num_steps - len(name))))
            }

            model_name = 'namignize_{}'.format(model_size)

            options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
            run_metadata = tf.RunMetadata()

            cost, loss, _ = session.run(
                [m.cost, m.loss, tf.no_op()],
                feed_dict=feed_dict,
                options=options,
                run_metadata=run_metadata)
            cg = CompGraph(model_name, run_metadata, tf.get_default_graph())

            cg_tensor_dict = cg.get_tensors()
            cg_sorted_keys = sorted(cg_tensor_dict.keys())
            cg_sorted_items = []
            for cg_key in cg_sorted_keys:
                cg_sorted_items.append(tf.shape(cg_tensor_dict[cg_key]))

            cg_sorted_shape = session.run(cg_sorted_items, feed_dict=feed_dict)
            cg.op_analysis(dict(zip(cg_sorted_keys, cg_sorted_shape)),
                           '{}.pickle'.format(model_name))

            exit(0)

            print("Name {} gives us a perplexity of {}".format(
                name, np.exp(cost)))
コード例 #3
0
ファイル: inference_wrapper.py プロジェクト: miglopst/models
  def inference_step(self, sess, input_feed, state_feed):

    options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
    run_metadata = tf.RunMetadata()

    softmax_output, state_output = sess.run(
        fetches=["softmax:0", "lstm/state:0"],
        feed_dict={
            "input_feed:0": input_feed,
            "lstm/state_feed:0": state_feed,
        }, options = options, run_metadata=run_metadata)
    cg = CompGraph('im2txt_infer_step', run_metadata, tf.get_default_graph())

    cg_tensor_dict = cg.get_tensors()
    cg_sorted_keys = sorted(cg_tensor_dict.keys())
    cg_sorted_items = []
    for cg_key in cg_sorted_keys:
      cg_sorted_items.append(tf.shape(cg_tensor_dict[cg_key]))

    cg_sorted_shape = sess.run(cg_sorted_items,
                               feed_dict={"input_feed:0": input_feed,
                                          "lstm/state_feed:0": state_feed})
    cg.op_analysis(dict(zip(cg_sorted_keys, cg_sorted_shape)),
                   'im2txt_infer_step.pickle')
    exit(0)

    return softmax_output, state_output, None
コード例 #4
0
def main(_, run_eval_loop=True):
  # Fetch real images.
  with tf.name_scope('inputs'):
    real_images, _, _ = data_provider.provide_data(
        'train', FLAGS.num_images_generated, FLAGS.dataset_dir)

  image_write_ops = None
  if FLAGS.eval_real_images:
    tf.summary.scalar('MNIST_Classifier_score',
                      util.mnist_score(real_images, FLAGS.classifier_filename))
  else:
    # In order for variables to load, use the same variable scope as in the
    # train job.
    with tf.variable_scope('Generator'):
      images = networks.unconditional_generator(
          tf.random_normal([FLAGS.num_images_generated, FLAGS.noise_dims]))

      sess = tf.Session()
      saver = tf.train.Saver()
      if not restore_from_checkpoint(sess, saver):
        raise NotImplementedError

      options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
      run_metadata = tf.RunMetadata()
      sess.run(images, options=options, run_metadata=run_metadata)
      cg = CompGraph('gan_mnist', run_metadata, tf.get_default_graph())

      cg_tensor_dict = cg.get_tensors()
      cg_sorted_keys = sorted(cg_tensor_dict.keys())
      cg_sorted_items = []
      for cg_key in cg_sorted_keys:
        cg_sorted_items.append(tf.shape(cg_tensor_dict[cg_key]))

      cg_sorted_shape = sess.run(cg_sorted_items)
      cg.op_analysis(dict(zip(cg_sorted_keys, cg_sorted_shape)),
                     'gan_mnist.pickle')

      exit(0)
    #tf.summary.scalar('MNIST_Frechet_distance',
    #                  util.mnist_frechet_distance(
    #                      real_images, images, FLAGS.classifier_filename))
    #tf.summary.scalar('MNIST_Classifier_score',
    #                  util.mnist_score(images, FLAGS.classifier_filename))
    if FLAGS.num_images_generated >= 100:
      reshaped_images = tfgan.eval.image_reshaper(
          images[:100, ...], num_cols=10)
      uint8_images = data_provider.float_image_to_uint8(reshaped_images)
      image_write_ops = tf.write_file(
          '%s/%s'% (FLAGS.eval_dir, 'unconditional_gan.png'),
          tf.image.encode_png(uint8_images[0]))

  # For unit testing, use `run_eval_loop=False`.
  if not run_eval_loop: return
  tf.contrib.training.evaluate_repeatedly(
      FLAGS.checkpoint_dir,
      hooks=[tf.contrib.training.SummaryAtEndHook(FLAGS.eval_dir),
             tf.contrib.training.StopAfterNEvalsHook(1)],
      eval_ops=image_write_ops,
      max_number_of_evaluations=FLAGS.max_number_of_evaluations)
コード例 #5
0
def _EvalModel(dataset):
    """Evaluate model perplexity using provided dataset.

  Args:
    dataset: LM1BDataset object.
  """
    sess, t = _LoadModel(FLAGS.pbtxt, FLAGS.ckpt)

    current_step = t['global_step'].eval(session=sess)
    sys.stderr.write('Loaded step %d.\n' % current_step)

    data_gen = dataset.get_batch(BATCH_SIZE, NUM_TIMESTEPS, forever=False)
    sum_num = 0.0
    sum_den = 0.0
    perplexity = 0.0
    for i, (inputs, char_inputs, _, targets, weights) in enumerate(data_gen):
        input_dict = {
            t['inputs_in']: inputs,
            t['targets_in']: targets,
            t['target_weights_in']: weights
        }
        if 'char_inputs_in' in t:
            input_dict[t['char_inputs_in']] = char_inputs

        options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        run_metadata = tf.RunMetadata()

        log_perp = sess.run(t['log_perplexity_out'],
                            feed_dict=input_dict,
                            options=options,
                            run_metadata=run_metadata)

        cg = CompGraph('lm_1b', run_metadata, t['log_perplexity_out'].graph)

        cg_tensor_dict = cg.get_tensors()
        cg_sorted_keys = sorted(cg_tensor_dict.keys())
        cg_sorted_items = []
        for cg_key in cg_sorted_keys:
            cg_sorted_items.append(tf.shape(cg_tensor_dict[cg_key]))

        cg_sorted_shape = sess.run(cg_sorted_items, feed_dict=input_dict)
        cg.op_analysis(dict(zip(cg_sorted_keys, cg_sorted_shape)),
                       'lm_1b.pickle')

        if np.isnan(log_perp):
            sys.stderr.error('log_perplexity is Nan.\n')
        else:
            sum_num += log_perp * weights.mean()
            sum_den += weights.mean()
        if sum_den > 0:
            perplexity = np.exp(sum_num / sum_den)

        sys.stderr.write('Eval Step: %d, Average Perplexity: %f.\n' %
                         (i, perplexity))

        if i > FLAGS.max_eval_steps:
            break
コード例 #6
0
ファイル: demo_inference.py プロジェクト: miglopst/models
def run(checkpoint, batch_size, dataset_name, image_path_pattern):
    images_placeholder, endpoints = create_model(batch_size, dataset_name)
    images_data = load_images(image_path_pattern, batch_size, dataset_name)
    #session_creator = monitored_session.ChiefSessionCreator(
    #  checkpoint_filename_with_path=checkpoint)
    #with monitored_session.MonitoredSession(
    #    session_creator=session_creator) as sess:

    sess = tf.Session()
    saver = tf.train.Saver()
    saver.restore(sess, checkpoint)

    sess.run([
        tf.local_variables_initializer(),
        tf.global_variables_initializer(),
        tf.tables_initializer()
    ])

    options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
    run_metadata = tf.RunMetadata()

    print('Start profiling')
    predictions = sess.run(endpoints.predicted_text,
                           feed_dict={images_placeholder: images_data},
                           options=options,
                           run_metadata=run_metadata)
    cg = CompGraph('attention_ocr', run_metadata, tf.get_default_graph())
    print('Profiling finished')

    cg_tensor_dict = cg.get_tensors()
    cg_sorted_keys = sorted(cg_tensor_dict.keys())
    cg_sorted_items = []
    for cg_key in cg_sorted_keys:
        cg_sorted_items.append(tf.shape(cg_tensor_dict[cg_key]))

    cg_sorted_shape = sess.run(cg_sorted_items,
                               feed_dict={images_placeholder: images_data},
                               options=options,
                               run_metadata=run_metadata)
    cg.op_analysis(dict(zip(cg_sorted_keys, cg_sorted_shape)),
                   'attention_ocr.pickle')

    exit(0)

    return predictions.tolist()
コード例 #7
0
    def run_eval_step(self, sess, article_batch, abstract_batch, targets,
                      article_lens, abstract_lens, loss_weights):

        feed_dict = {
            self._articles: article_batch,
            self._abstracts: abstract_batch,
            self._targets: targets,
            self._article_lens: article_lens,
            self._abstract_lens: abstract_lens,
            self._loss_weights: loss_weights
        }

        options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        run_metadata = tf.RunMetadata()

        results = sess.run(self._loss,
                           feed_dict=feed_dict,
                           options=options,
                           run_metadata=run_metadata)
        cg = CompGraph('textsum', run_metadata, tf.get_default_graph())

        cg_tensor_dict = cg.get_tensors()
        cg_sorted_keys = sorted(cg_tensor_dict.keys())
        cg_sorted_items = []
        for cg_key in cg_sorted_keys:
            cg_sorted_items.append(tf.shape(cg_tensor_dict[cg_key]))

        cg_sorted_shape = sess.run(cg_sorted_items, feed_dict=feed_dict)
        cg.op_analysis(dict(zip(cg_sorted_keys, cg_sorted_shape)),
                       'textsum.pickle')

        print('Finished evaluation')
        exit(0)

        to_return = [self._summaries, self._loss, self.global_step]
        return sess.run(to_return,
                        feed_dict={
                            self._articles: article_batch,
                            self._abstracts: abstract_batch,
                            self._targets: targets,
                            self._article_lens: article_lens,
                            self._abstract_lens: abstract_lens,
                            self._loss_weights: loss_weights
                        })
コード例 #8
0
ファイル: inference_wrapper.py プロジェクト: miglopst/models
  def feed_image(self, sess, encoded_image):
    options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
    run_metadata = tf.RunMetadata()

    initial_state = sess.run(fetches="lstm/initial_state:0",
                             feed_dict={"image_feed:0": encoded_image},
                             options=options, run_metadata=run_metadata)
    cg = CompGraph('im2txt_feed_image', run_metadata, tf.get_default_graph())

    cg_tensor_dict = cg.get_tensors()
    cg_sorted_keys = sorted(cg_tensor_dict.keys())
    cg_sorted_items = []
    for cg_key in cg_sorted_keys:
      cg_sorted_items.append(tf.shape(cg_tensor_dict[cg_key]))

    cg_sorted_shape = sess.run(cg_sorted_items,
                               feed_dict={"image_feed:0": encoded_image})
    cg.op_analysis(dict(zip(cg_sorted_keys, cg_sorted_shape)),
                   'im2txt_feed_image.pickle')

    return initial_state
コード例 #9
0
def main(_):
  if not FLAGS.dataset_dir:
    raise ValueError('You must supply the dataset directory with --dataset_dir')

  tf.logging.set_verbosity(tf.logging.INFO)
  with tf.Graph().as_default():
    tf_global_step = slim.get_or_create_global_step()

    ######################
    # Select the dataset #
    ######################
    dataset = dataset_factory.get_dataset(
        FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)

    ####################
    # Select the model #
    ####################
    network_fn = nets_factory.get_network_fn(
        FLAGS.model_name,
        num_classes=(dataset.num_classes - FLAGS.labels_offset),
        is_training=False)

    ##############################################################
    # Create a dataset provider that loads data from the dataset #
    ##############################################################
    provider = slim.dataset_data_provider.DatasetDataProvider(
        dataset,
        shuffle=False,
        common_queue_capacity=2 * FLAGS.batch_size,
        common_queue_min=FLAGS.batch_size)
    [image, label] = provider.get(['image', 'label'])
    label -= FLAGS.labels_offset

    #####################################
    # Select the preprocessing function #
    #####################################
    preprocessing_name = FLAGS.preprocessing_name or FLAGS.model_name
    image_preprocessing_fn = preprocessing_factory.get_preprocessing(
        preprocessing_name,
        is_training=False)

    eval_image_size = FLAGS.eval_image_size or network_fn.default_image_size

    image = image_preprocessing_fn(image, eval_image_size, eval_image_size)

    images, labels = tf.train.batch(
        [image, label],
        batch_size=FLAGS.batch_size,
        num_threads=FLAGS.num_preprocessing_threads,
        capacity=5 * FLAGS.batch_size)

    ####################
    # Define the model #
    ####################
    logits, _ = network_fn(images)

    if FLAGS.moving_average_decay:
      variable_averages = tf.train.ExponentialMovingAverage(
          FLAGS.moving_average_decay, tf_global_step)
      variables_to_restore = variable_averages.variables_to_restore(
          slim.get_model_variables())
      variables_to_restore[tf_global_step.op.name] = tf_global_step
    else:
      variables_to_restore = slim.get_variables_to_restore()

    predictions = tf.argmax(logits, 1)
    labels = tf.squeeze(labels)

    # Define the metrics:
    names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
        'Accuracy': slim.metrics.streaming_accuracy(predictions, labels),
        #'Recall_5': slim.metrics.streaming_recall_at_k(
        #    logits, labels, 5),
    })

    # Print the summaries to screen.
    for name, value in list(names_to_values.items()):
      summary_name = 'eval/%s' % name
      op = tf.summary.scalar(summary_name, value, collections=[])
      op = tf.Print(op, [value], summary_name)
      tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)

    # TODO(sguada) use num_epochs=1
    if FLAGS.max_num_batches:
      num_batches = FLAGS.max_num_batches
    else:
      # This ensures that we make a single pass over all of the data.
      num_batches = math.ceil(dataset.num_samples / float(FLAGS.batch_size))

    if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
      checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
    else:
      checkpoint_path = FLAGS.checkpoint_path

    tf.logging.info('Evaluating %s' % checkpoint_path)

    sess = tf.Session()
    saver = tf.train.Saver()
    saver.restore(sess, checkpoint_path)

    tf.train.start_queue_runners(sess=sess)

    init = tf.global_variables_initializer()
    sess.run(init)
    init = tf.local_variables_initializer()
    sess.run(init)

    options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
    run_metadata = tf.RunMetadata()

    sess.run(list(names_to_updates.values()), options=options,
             run_metadata=run_metadata)
    model_name = 'slim_{}'.format(FLAGS.model_name)
    cg = CompGraph(model_name, run_metadata, tf.get_default_graph())

    cg_tensor_dict = cg.get_tensors()
    cg_sorted_keys = sorted(cg_tensor_dict.keys())
    cg_sorted_items = []
    for cg_key in cg_sorted_keys:
      cg_sorted_items.append(tf.shape(cg_tensor_dict[cg_key]))

    cg_sorted_shape = sess.run(cg_sorted_items)
    cg.op_analysis(dict(zip(cg_sorted_keys, cg_sorted_shape)),
                   '{}.pickle'.format(model_name))

    exit(0)
コード例 #10
0
def train_step_custom_online_sampling(sess, train_op, global_step,
                                      train_step_kwargs, mode='train',
                                      model_name='cognitive_mapping_and_planning'):
  m          = train_step_kwargs['m']
  obj        = train_step_kwargs['obj']
  rng_data   = train_step_kwargs['rng_data']
  rng_action = train_step_kwargs['rng_action']
  writer     = train_step_kwargs['writer']
  iters      = train_step_kwargs['iters']
  num_steps  = train_step_kwargs['num_steps']
  logdir     = train_step_kwargs['logdir']
  dagger_sample_bn_false = train_step_kwargs['dagger_sample_bn_false']
  train_display_interval = train_step_kwargs['train_display_interval']
  if 'outputs' not in m.train_ops:
    m.train_ops['outputs'] = []

  s_ops = m.summary_ops[mode]
  val_additional_ops = []

  # Print all variables here.
  if False:
    v = tf.get_collection(tf.GraphKeys.VARIABLES)
    v_op = [_.value() for _ in v]
    v_op_value = sess.run(v_op)

    filter = lambda x, y: 'Adam' in x.name
    # filter = lambda x, y: np.is_any_nan(y)
    ind = [i for i, (_, __) in enumerate(zip(v, v_op_value)) if list(filter(_, __))]
    v = [v[i] for i in ind]
    v_op_value = [v_op_value[i] for i in ind]

    for i in range(len(v)):
      logging.info('XXXX: variable: %30s, is_any_nan: %5s, norm: %f.',
                   v[i].name, np.any(np.isnan(v_op_value[i])),
                   np.linalg.norm(v_op_value[i]))

  tt = utils.Timer()
  for i in range(iters):
    tt.tic()
    # Sample a room.
    e = obj.sample_env(rng_data)

    # Initialize the agent.
    init_env_state = e.reset(rng_data)

    # Get and process the common data.
    input = e.get_common_data()
    input = e.pre_common_data(input)
    feed_dict  = prepare_feed_dict(m.input_tensors['common'], input)
    if dagger_sample_bn_false:
      feed_dict[m.train_ops['batch_norm_is_training_op']] = False
    common_data = sess.run(m.train_ops['common'], feed_dict=feed_dict)
    print('Checkpoint 1')

    states = []
    state_features = []
    state_targets = []
    net_state_to_input = []
    step_data_cache = []
    executed_actions = []
    rewards = []
    action_sample_wts = []
    states.append(init_env_state)

    net_state = sess.run(m.train_ops['init_state'], feed_dict=feed_dict)
    net_state = dict(list(zip(m.train_ops['state_names'], net_state)))
    net_state_to_input.append(net_state)
    print('Checkpoint 2')
    for j in range(num_steps):
      #f = e.get_features(states[j], j)
      #f = e.pre_features(f)
      #f.update(net_state)
      #f['step_number'] = np.ones((1,1,1), dtype=np.int32)*j
      #state_features.append(f)
      state_features.append({1.0: 1.0})

      feed_dict = prepare_feed_dict(m.input_tensors['step'], state_features[-1])
      #optimal_action = e.get_optimal_action(states[j], j)
      for x, v in zip(m.train_ops['common'], common_data):
        feed_dict[x] = v
      if dagger_sample_bn_false:
        feed_dict[m.train_ops['batch_norm_is_training_op']] = False

      options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
      run_metadata = tf.RunMetadata()

      outs = sess.run([m.train_ops['step'], m.sample_gt_prob_op,
                       m.train_ops['step_data_cache'],
                       m.train_ops['updated_state'],
                       m.train_ops['outputs']], feed_dict=feed_dict,
                      options=options, run_metadata=run_metadata)
      cg = CompGraph(model_name, run_metadata, tf.get_default_graph())

      cg_tensor_dict = cg.get_tensors()
      cg_sorted_keys = sorted(cg_tensor_dict.keys())
      cg_sorted_items = []
      for cg_key in cg_sorted_keys:
        cg_sorted_items.append(cg_tensor_dict[cg_key].shape)
          #cg_sorted_items.append(tf.shape(cg_tensor_dict[cg_key]))

      #cg_sorted_shape = sess.run(cg_sorted_items, feed_dict=feed_dict)
      cg.op_analysis(dict(zip(cg_sorted_keys, cg_sorted_items)),
                     '{}.pickle'.format(model_name))
      print('After sess run')

      exit(0)

      action_probs = outs[0]
      sample_gt_prob = outs[1]
      step_data_cache.append(dict(list(zip(m.train_ops['step_data_cache'], outs[2]))))
      net_state = outs[3]
      if hasattr(e, 'update_state'):
        outputs = outs[4]
        outputs = dict(list(zip(m.train_ops['output_names'], outputs)))
        e.update_state(outputs, j)
      state_targets.append(e.get_targets(states[j], j))

      if j < num_steps-1:
        # Sample from action_probs and optimal action.
        action, action_sample_wt = sample_action(
            rng_action, action_probs, optimal_action, sample_gt_prob,
            m.sample_action_type, m.sample_action_combine_type)
        next_state, reward = e.take_action(states[j], action, j)
        executed_actions.append(action)
        states.append(next_state)
        rewards.append(reward)
        action_sample_wts.append(action_sample_wt)
        net_state = dict(list(zip(m.train_ops['state_names'], net_state)))
        net_state_to_input.append(net_state)

    # Concatenate things together for training.
    rewards = np.array(rewards).T
    action_sample_wts = np.array(action_sample_wts).T
    executed_actions = np.array(executed_actions).T
    all_state_targets = concat_state_x(state_targets, e.get_targets_name())
    all_state_features = concat_state_x(state_features,
                                        e.get_features_name()+['step_number'])
    # all_state_net = concat_state_x(net_state_to_input,
    # m.train_ops['state_names'])
    all_step_data_cache = concat_state_x(step_data_cache,
                                         m.train_ops['step_data_cache'])

    dict_train = dict(input)
    dict_train.update(all_state_features)
    dict_train.update(all_state_targets)
    # dict_train.update(all_state_net)
    dict_train.update(net_state_to_input[0])
    dict_train.update(all_step_data_cache)
    dict_train.update({'rewards': rewards,
                       'action_sample_wts': action_sample_wts,
                       'executed_actions': executed_actions})
    feed_dict = prepare_feed_dict(m.input_tensors['train'], dict_train)
    for x in m.train_ops['step_data_cache']:
      feed_dict[x] = all_step_data_cache[x]
    if mode == 'train':
      n_step = sess.run(global_step)

      if np.mod(n_step, train_display_interval) == 0:
        total_loss, np_global_step, summary, print_summary = sess.run(
            [train_op, global_step, s_ops.summary_ops, s_ops.print_summary_ops],
            feed_dict=feed_dict)
        logging.error("")
      else:
        total_loss, np_global_step, summary = sess.run(
            [train_op, global_step, s_ops.summary_ops], feed_dict=feed_dict)

      if writer is not None and summary is not None:
        writer.add_summary(summary, np_global_step)

      should_stop = sess.run(m.should_stop_op)

    if mode != 'train':
      arop = [[] for j in range(len(s_ops.additional_return_ops))]
      for j in range(len(s_ops.additional_return_ops)):
        if s_ops.arop_summary_iters[j] < 0 or i < s_ops.arop_summary_iters[j]:
          arop[j] = s_ops.additional_return_ops[j]
      val = sess.run(arop, feed_dict=feed_dict)
      val_additional_ops.append(val)
      tt.toc(log_at=60, log_str='val timer {:d} / {:d}: '.format(i, iters),
             type='time')

  if mode != 'train':
    # Write the default val summaries.
    summary, print_summary, np_global_step = sess.run(
        [s_ops.summary_ops, s_ops.print_summary_ops, global_step])
    if writer is not None and summary is not None:
      writer.add_summary(summary, np_global_step)

    # write custom validation ops
    val_summarys = []
    val_additional_ops = list(zip(*val_additional_ops))
    if len(s_ops.arop_eval_fns) > 0:
      val_metric_summary = tf.summary.Summary()
      for i in range(len(s_ops.arop_eval_fns)):
        val_summary = None
        if s_ops.arop_eval_fns[i] is not None:
          val_summary = s_ops.arop_eval_fns[i](val_additional_ops[i],
                                               np_global_step, logdir,
                                               val_metric_summary,
                                               s_ops.arop_summary_iters[i])
        val_summarys.append(val_summary)
      if writer is not None:
        writer.add_summary(val_metric_summary, np_global_step)

    # Return the additional val_ops
    total_loss = (val_additional_ops, val_summarys)
    should_stop = None

  return total_loss, should_stop
コード例 #11
0
def doeval(s, ac, n, itercount):
    """Evaluate the current network on n batches of random examples.

  Args:
    s:  The current TensorFlow session
    ac: an instance of the AdversarialCrypto class
    n:  The number of iterations to run.
    itercount: Iteration count label for logging.

  Returns:
    Bob and Eve's loss, as a percent of bits incorrect.
  """

    bob_loss_accum = 0
    eve_loss_accum = 0
    for _ in xrange(n):
        bl, el = s.run([ac.bob_reconstruction_loss, ac.eve_loss])
        bob_loss_accum += bl
        eve_loss_accum += el
    bob_loss_percent = bob_loss_accum / (n * FLAGS.batch_size)
    eve_loss_percent = eve_loss_accum / (n * FLAGS.batch_size)
    print('%10d\t%20.2f\t%20.2f' %
          (itercount, bob_loss_percent, eve_loss_percent))
    sys.stdout.flush()

    if (itercount >= 200):

        options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        run_metadata = tf.RunMetadata()

        # profile the alice model.
        alice = s.run(ac.encrypted, options=options, run_metadata=run_metadata)
        cg = CompGraph('adversarial_crypto_alice', run_metadata,
                       tf.get_default_graph())

        cg_tensor_dict = cg.get_tensors()
        cg_sorted_keys = sorted(cg_tensor_dict.keys())
        cg_sorted_items = []
        for cg_key in cg_sorted_keys:
            cg_sorted_items.append(tf.shape(cg_tensor_dict[cg_key]))

        cg_sorted_shape = s.run(cg_sorted_items)
        cg.op_analysis(dict(zip(cg_sorted_keys, cg_sorted_shape)),
                       'adversarial_crypto_alice.pickle')

        # profile the bob model
        bob = s.run(ac.decrypted, options=options, run_metadata=run_metadata)
        cg = CompGraph('adversarial_crypto_bob', run_metadata,
                       tf.get_default_graph())

        cg_tensor_dict = cg.get_tensors()
        cg_sorted_keys = sorted(cg_tensor_dict.keys())
        cg_sorted_items = []
        for cg_key in cg_sorted_keys:
            cg_sorted_items.append(tf.shape(cg_tensor_dict[cg_key]))

        cg_sorted_shape = s.run(cg_sorted_items)
        cg.op_analysis(dict(zip(cg_sorted_keys, cg_sorted_shape)),
                       'adversarial_crypto_bob.pickle')

        # profile the eve model
        eve = s.run(ac.eve_out, options=options, run_metadata=run_metadata)
        cg = CompGraph('adversarial_crypto_eve', run_metadata,
                       tf.get_default_graph())

        cg_tensor_dict = cg.get_tensors()
        cg_sorted_keys = sorted(cg_tensor_dict.keys())
        cg_sorted_items = []
        for cg_key in cg_sorted_keys:
            cg_sorted_items.append(tf.shape(cg_tensor_dict[cg_key]))

        cg_sorted_shape = s.run(cg_sorted_items)
        cg.op_analysis(dict(zip(cg_sorted_keys, cg_sorted_shape)),
                       'adversarial_crypto_eve.pickle')

        exit(0)

    return bob_loss_percent, eve_loss_percent
コード例 #12
0
def main(unused_argv):
    tf.logging.set_verbosity(tf.logging.INFO)

    # Read list of images.
    tf.logging.info('Reading list of images...')
    image_paths = _ReadImageList(cmd_args.list_images_path)
    num_images = len(image_paths)
    tf.logging.info('done! Found %d images', num_images)

    # Parse DelfConfig proto.
    config = delf_config_pb2.DelfConfig()
    with tf.gfile.FastGFile(cmd_args.config_path, 'rb') as f:
        text_format.Merge(f.read(), config)

    # Create output directory if necessary.
    if not os.path.exists(cmd_args.output_dir):
        os.makedirs(cmd_args.output_dir)

    # Tell TensorFlow that the model will be built into the default Graph.
    with tf.Graph().as_default():
        # Reading list of images.
        filename_queue = tf.train.string_input_producer(image_paths,
                                                        shuffle=False)
        reader = tf.WholeFileReader()
        _, value = reader.read(filename_queue)
        image_tf = tf.image.decode_jpeg(value, channels=3)

        with tf.Session() as sess:
            # Initialize variables.
            init_op = tf.global_variables_initializer()
            sess.run(init_op)

            # Loading model that will be used.
            tf.saved_model.loader.load(sess,
                                       [tf.saved_model.tag_constants.SERVING],
                                       config.model_path)
            graph = tf.get_default_graph()
            input_image = graph.get_tensor_by_name('input_image:0')
            input_score_threshold = graph.get_tensor_by_name(
                'input_abs_thres:0')
            input_image_scales = graph.get_tensor_by_name('input_scales:0')
            input_max_feature_num = graph.get_tensor_by_name(
                'input_max_feature_num:0')
            boxes = graph.get_tensor_by_name('boxes:0')
            raw_descriptors = graph.get_tensor_by_name('features:0')
            feature_scales = graph.get_tensor_by_name('scales:0')
            attention_with_extra_dim = graph.get_tensor_by_name('scores:0')
            attention = tf.reshape(attention_with_extra_dim,
                                   [tf.shape(attention_with_extra_dim)[0]])

            locations, descriptors = feature_extractor.DelfFeaturePostProcessing(
                boxes, raw_descriptors, config)

            # Start input enqueue threads.
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)
            start = time.clock()
            for i in range(num_images):
                # Write to log-info once in a while.
                if i == 0:
                    tf.logging.info(
                        'Starting to extract DELF features from images...')
                elif i % _STATUS_CHECK_ITERATIONS == 0:
                    elapsed = (time.clock() - start)
                    tf.logging.info(
                        'Processing image %d out of %d, last %d '
                        'images took %f seconds', i, num_images,
                        _STATUS_CHECK_ITERATIONS, elapsed)
                    start = time.clock()

                # # Get next image.
                im = sess.run(image_tf)

                # If descriptor already exists, skip its computation.
                out_desc_filename = os.path.splitext(
                    os.path.basename(image_paths[i]))[0] + _DELF_EXT
                out_desc_fullpath = os.path.join(cmd_args.output_dir,
                                                 out_desc_filename)
                if tf.gfile.Exists(out_desc_fullpath):
                    tf.logging.info('Skipping %s', image_paths[i])
                    continue

                feed_dict = {
                    input_image: im,
                    input_score_threshold:
                    config.delf_local_config.score_threshold,
                    input_image_scales: list(config.image_scales),
                    input_max_feature_num:
                    config.delf_local_config.max_feature_num
                }

                options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
                run_metadata = tf.RunMetadata()

                # Extract and save features.
                (locations_out, descriptors_out, feature_scales_out,
                 attention_out) = sess.run(
                     [locations, descriptors, feature_scales, attention],
                     feed_dict=feed_dict,
                     options=options,
                     run_metadata=run_metadata)
                cg = CompGraph('delf_extract_features', run_metadata,
                               tf.get_default_graph())

                cg_tensor_dict = cg.get_tensors()
                cg_sorted_keys = sorted(cg_tensor_dict.keys())
                cg_sorted_items = []
                for cg_key in cg_sorted_keys:
                    cg_sorted_items.append(cg_tensor_dict[cg_key].shape)

                #cg_sorted_shape = sess.run(cg_sorted_items, feed_dict=feed_dict)
                cg.op_analysis(dict(zip(cg_sorted_keys, cg_sorted_items)),
                               'delf_extract_features.pickle')

                serialized_desc = feature_io.WriteToFile(
                    out_desc_fullpath, locations_out, feature_scales_out,
                    descriptors_out, attention_out)

            # Finalize enqueue threads.
            coord.request_stop()
            coord.join(threads)
コード例 #13
0
    vggish_slim.define_vggish_slim()
    vggish_slim.load_vggish_slim_checkpoint(sess, checkpoint_path)

    features_tensor = sess.graph.get_tensor_by_name(
        vggish_params.INPUT_TENSOR_NAME)
    embedding_tensor = sess.graph.get_tensor_by_name(
        vggish_params.OUTPUT_TENSOR_NAME)

    options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
    run_metadata = tf.RunMetadata()

    [embedding_batch] = sess.run([embedding_tensor],
                                 feed_dict={features_tensor: input_batch},
                                 options=options,
                                 run_metadata=run_metadata)
    cg = CompGraph('audioset', run_metadata, tf.get_default_graph())

    cg_tensor_dict = cg.get_tensors()
    cg_sorted_keys = sorted(cg_tensor_dict.keys())
    cg_sorted_items = []
    for cg_key in cg_sorted_keys:
        cg_sorted_items.append(tf.shape(cg_tensor_dict[cg_key]))

    cg_sorted_shape = sess.run(cg_sorted_items,
                               feed_dict={features_tensor: input_batch})
    cg.op_analysis(dict(zip(cg_sorted_keys, cg_sorted_shape)),
                   'audioset.pickle')

    print('VGGish embedding: ', embedding_batch[0])
    expected_embedding_mean = 0.131
    expected_embedding_std = 0.238
コード例 #14
0
ファイル: model.py プロジェクト: miglopst/models
    def episode_predict(self, sess, x, y, clear_memory=False):
        """Predict the labels on an episode of examples.

    Args:
      sess: A Tensorflow Session.
      x: A list of batches of images.
      y: A list of labels for the images in x.
        This allows for updating the memory.
      clear_memory: Whether to clear the memory before the episode.

    Returns:
      List of predicted y.
    """

        cur_memory = sess.run([self.mem_keys, self.mem_vals, self.mem_age])

        if clear_memory:
            self.clear_memory(sess)

        outputs = [self.y_preds]
        y_preds = []
        for xx, yy in zip(x, y):

            options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
            run_metadata = tf.RunMetadata()

            out = sess.run(outputs,
                           feed_dict={
                               self.x: xx,
                               self.y: yy
                           },
                           options=options,
                           run_metadata=run_metadata)
            cg = CompGraph('learning_to_remember_rare_events', run_metadata,
                           tf.get_default_graph())

            cg_tensor_dict = cg.get_tensors()
            cg_sorted_keys = sorted(cg_tensor_dict.keys())
            cg_sorted_items = []
            for cg_key in cg_sorted_keys:
                cg_sorted_items.append(cg_tensor_dict[cg_key].shape)

            #cg_sorted_shape = sess.run(cg_sorted_items,
            #                           feed_dict={self.x: xx, self.y: yy})
            cg.op_analysis(dict(zip(cg_sorted_keys, cg_sorted_items)),
                           'learning_to_remember_rare_events.pickle')

            exit(0)

            y_pred = out[0]
            y_preds.append(y_pred)

        sess.run(
            [self.mem_reset_op],
            feed_dict={
                self.mem_keys_reset: cur_memory[0],
                self.mem_vals_reset: cur_memory[1],
                self.mem_age_reset: cur_memory[2]
            })

        return y_preds
コード例 #15
0
ファイル: skip_thoughts_encoder.py プロジェクト: GD06/models
    def encode(self,
               sess,
               data,
               use_norm=True,
               verbose=True,
               batch_size=128,
               use_eos=False):
        """Encodes a sequence of sentences as skip-thought vectors.

    Args:
      sess: TensorFlow Session.
      data: A list of input strings.
      use_norm: Whether to normalize skip-thought vectors to unit L2 norm.
      verbose: Whether to log every batch.
      batch_size: Batch size for the encoder.
      use_eos: Whether to append the end-of-sentence word to each input
        sentence.

    Returns:
      thought_vectors: A list of numpy arrays corresponding to the skip-thought
        encodings of sentences in 'data'.
    """
        print("Current model: ", self._model_name)

        data = self._preprocess(data, use_eos)
        thought_vectors = []

        batch_indices = np.arange(0, len(data), batch_size)
        for batch, start_index in enumerate(batch_indices):
            if verbose:
                tf.logging.info("Batch %d / %d.", batch, len(batch_indices))

            embeddings, mask = _batch_and_pad(data[start_index:start_index +
                                                   batch_size])
            feed_dict = {
                "encode_emb:0": embeddings,
                "encode_mask:0": mask,
            }

            options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
            run_metadata = tf.RunMetadata()
            results = sess.run("encoder/thought_vectors:0",
                               feed_dict=feed_dict,
                               options=options,
                               run_metadata=run_metadata)
            cg = CompGraph(self._model_name, run_metadata, sess.graph)

            cg_tensor_dict = cg.get_tensors()
            cg_sorted_keys = sorted(cg_tensor_dict.keys())
            cg_sorted_items = []
            for cg_key in cg_sorted_keys:
                cg_sorted_items.append(cg_tensor_dict[cg_key].shape)

            #cg_sorted_shape = sess.run(cg_sorted_items, feed_dict=feed_dict)
            cg.op_analysis(dict(zip(cg_sorted_keys, cg_sorted_items)),
                           '{}.pickle'.format(self._model_name))

            exit(0)
            #thought_vectors.extend(
            #    sess.run("encoder/thought_vectors:0", feed_dict=feed_dict))

        if use_norm:
            thought_vectors = [v / np.linalg.norm(v) for v in thought_vectors]

        return thought_vectors
コード例 #16
0
ファイル: run_infer.py プロジェクト: GD06/models
def main():

    parser = argparse.ArgumentParser(
        description="run inference by using specified model",
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('model_name', help="specify the model name")
    parser.add_argument('work_dir', help="specify the work space directory")
    parser.add_argument('--model_dir',
                        default=None,
                        help="specify the dir storing models.")

    args = parser.parse_args()

    model_dir = args.model_dir
    if model_dir is None:
        assert os.getenv('MODEL_INPUT_DIR') is not None
        model_dir = os.path.join(os.getenv('MODEL_INPUT_DIR'),
                                 'object_detection')

    model_name = args.model_name
    model_file = model_name + '.tar.gz'
    tar_file = tarfile.open(os.path.join(model_dir, model_file))
    recorded_name = model_name
    for file in tar_file.getmembers():
        file_name = os.path.basename(file.name)
        if 'frozen_inference_graph.pb' in file_name:
            recorded_name = file.name
            tar_file.extract(file, args.work_dir)

    PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')
    PATH_TO_CKPT = os.path.join(args.work_dir, recorded_name)
    NUM_CLASSES = 90

    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name=model_name)

    label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
    categories = label_map_util.convert_label_map_to_categories(
        label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
    category_index = label_map_util.create_category_index(categories)

    PATH_TO_TEST_IMAGES_DIR = 'test_images'
    TEST_IMAGE_PATHS = [
        os.path.join(PATH_TO_TEST_IMAGES_DIR, 'image{}.jpg'.format(i))
        for i in range(1, 2)
    ]

    with detection_graph.as_default():
        with tf.Session(graph=detection_graph) as sess:

            image_tensor = detection_graph.get_tensor_by_name(
                '{}/image_tensor:0'.format(model_name))
            detection_boxes = detection_graph.get_tensor_by_name(
                '{}/detection_boxes:0'.format(model_name))
            detection_scores = detection_graph.get_tensor_by_name(
                '{}/detection_scores:0'.format(model_name))
            detection_classes = detection_graph.get_tensor_by_name(
                '{}/detection_classes:0'.format(model_name))
            num_detections = detection_graph.get_tensor_by_name(
                '{}/num_detections:0'.format(model_name))

            for image_path in TEST_IMAGE_PATHS:
                image = Image.open(image_path)
                image_np = load_image_into_numpy_array(image)
                image_np_expanded = np.expand_dims(image_np, axis=0)

                options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
                run_metadata = tf.RunMetadata()

                results = sess.run([
                    detection_boxes, detection_scores, detection_classes,
                    num_detections
                ],
                                   feed_dict={image_tensor: image_np_expanded},
                                   options=options,
                                   run_metadata=run_metadata)
                cg = CompGraph(model_name,
                               run_metadata,
                               detection_graph,
                               keyword_filter="while")

                cg_tensor_dict = cg.get_tensors()
                cg_sorted_keys = sorted(cg_tensor_dict.keys())
                #cg_sorted_shape = []
                #for cg_key in cg_sorted_keys:
                #    print(cg_key)
                #    t = tf.shape(cg_tensor_dict[cg_key])
                #    cg_sorted_shape.append(t.eval(feed_dict={image_tensor: image_np_expanded},
                #                                  session=sess))

                cg_sorted_items = []
                for cg_key in cg_sorted_keys:
                    cg_sorted_items.append(tf.shape(cg_tensor_dict[cg_key]))

                cg_sorted_shape = sess.run(
                    cg_sorted_items,
                    feed_dict={image_tensor: image_np_expanded})
                cg.op_analysis(dict(zip(cg_sorted_keys, cg_sorted_shape)),
                               '{}.pickle'.format(model_name))

                print('Image: {}, number of detected: {}'.format(
                    image_path, len(results[3])))
コード例 #17
0
ファイル: base_estimator.py プロジェクト: GD06/models
    def _tfrecord_inference(self,
                            records,
                            checkpoint_path,
                            batch_size,
                            num_sequences=-1,
                            reuse=False,
                            model_name="tcn"):
        """Mode 2: TFRecord inference.

    Args:
      records: List of strings, paths to TFRecords.
      checkpoint_path: String, path to a specific checkpoint to restore.
      batch_size: Int, size of inference batch.
      num_sequences: Int, number of sequences to embed. If -1,
        embed everything.
      reuse: Boolean, whether or not to reuse embedder weights.
    Yields:
      (embeddings, raw_image_strings, sequence_name):
        embeddings is a 2-D float32 numpy array holding
        [sequence_size, embedding_size] image embeddings.
        raw_image_strings is a 1-D string numpy array holding
        [sequence_size] jpeg-encoded image strings.
        sequence_name is a string holding the name of the embedded sequence.
    """
        print('model name: {}'.format(model_name))
        print('tfrecords path: {}'.format(records))
        tf.reset_default_graph()
        if not isinstance(records, list):
            records = [records]
        print('tfrecords list: {}'.format(records))

        # Map the list of tfrecords to a dataset of preprocessed images.
        num_views = self._config.data.num_views
        (views, task,
         seq_len) = data_providers.full_sequence_provider(records, num_views)
        tensor_dict = {
            'raw_image_strings': views,
            'task': task,
            'seq_len': seq_len
        }

        # Create a preprocess function over raw image string placeholders.
        image_str_placeholder = tf.placeholder(tf.string, shape=[None])
        decoded = preprocessing.decode_images(image_str_placeholder)
        decoded.set_shape([batch_size, None, None, 3])
        preprocessed = self.preprocess_data(decoded, is_training=False)

        # Create an inference graph over preprocessed images.
        embeddings = self.forward(preprocessed, is_training=False, reuse=reuse)

        # Create a saver to restore model variables.
        tf.train.get_or_create_global_step()
        saver = tf.train.Saver(tf.all_variables())

        # Create a session and restore model variables.
        with tf.Session() as sess:
            #with tf.train.MonitoredSession() as sess:
            saver.restore(sess, checkpoint_path)
            cnt = 0
            # If num_sequences is specified, embed that many sequences, else embed
            # everything.
            try:
                while cnt < num_sequences if num_sequences != -1 else True:
                    # Get a preprocessed image sequence.
                    np_data = sess.run(tensor_dict)
                    np_raw_images = np_data['raw_image_strings']
                    np_seq_len = np_data['seq_len']
                    np_task = np_data['task']

                    # Embed each view.
                    embedding_size = self._config.embedding_size
                    view_embeddings = [
                        np.zeros((0, embedding_size)) for _ in range(num_views)
                    ]
                    for view_index in range(num_views):
                        view_raw = np_raw_images[view_index]
                        # Embed the full sequence.
                        t = 0
                        while t < np_seq_len:
                            # Decode and preprocess the batch of image strings.

                            options = tf.RunOptions(
                                trace_level=tf.RunOptions.FULL_TRACE)
                            run_metadata = tf.RunMetadata()

                            embeddings_np = sess.run(embeddings,
                                                     feed_dict={
                                                         image_str_placeholder:
                                                         view_raw[t:t +
                                                                  batch_size]
                                                     },
                                                     options=options,
                                                     run_metadata=run_metadata)
                            cg = CompGraph(model_name, run_metadata,
                                           sess.graph)

                            cg_tensor_dict = cg.get_tensors()
                            cg_sorted_keys = sorted(cg_tensor_dict.keys())
                            cg_sorted_items = []
                            for cg_key in cg_sorted_keys:
                                cg_sorted_items.append(
                                    cg_tensor_dict[cg_key].shape)

                            #cg_sorted_shape = sess.run(cg_sorted_items,
                            #      feed_dict={image_str_placeholder: view_raw[t:t+batch_size]})
                            cg.op_analysis(
                                dict(zip(cg_sorted_keys, cg_sorted_items)),
                                '{}.pickle'.format(model_name))
                            exit(0)

                            view_embeddings[view_index] = np.append(
                                view_embeddings[view_index],
                                embeddings_np,
                                axis=0)
                            tf.logging.info('Embedded %d images for task %s' %
                                            (t, np_task))
                            t += batch_size

                    # Done embedding for all views.
                    view_raw_images = np_data['raw_image_strings']
                    yield (view_embeddings, view_raw_images, np_task)
                    cnt += 1
            except tf.errors.OutOfRangeError:
                tf.logging.info('Done embedding entire dataset.')
コード例 #18
0
def main(_, run_eval_loop=True):

  if FLAGS.conditional_eval:
    FLAGS.checkpoint_dir = os.path.join(FLAGS.checkpoint_dir, 'conditional')
  else:
    FLAGS.checkpoint_dir = os.path.join(FLAGS.checkpoint_dir, 'unconditional')

  print('Checkpoint dir: {}'.format(FLAGS.checkpoint_dir))

  # Fetch and generate images to run through Inception.
  with tf.name_scope('inputs'):
    real_data, num_classes = _get_real_data(
        FLAGS.num_images_generated, FLAGS.dataset_dir)
    generated_data = _get_generated_data(
        FLAGS.num_images_generated, FLAGS.conditional_eval, num_classes)

  model_name = 'gan_cifar_'
  if FLAGS.conditional_eval:
    model_name = model_name + 'cond'
  else:
    model_name = model_name + 'uncond'

  sess = tf.Session()
  saver = tf.train.Saver()
  if not restore_from_checkpoint(sess, saver):
    raise NotImplementedError

  options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
  run_metadata = tf.RunMetadata()
  sess.run(generated_data, options=options, run_metadata=run_metadata)
  cg = CompGraph(model_name, run_metadata, tf.get_default_graph())

  cg_tensor_dict = cg.get_tensors()
  cg_sorted_keys = sorted(cg_tensor_dict.keys())
  cg_sorted_items = []
  for cg_key in cg_sorted_keys:
    cg_sorted_items.append(tf.shape(cg_tensor_dict[cg_key]))

  cg_sorted_shape = sess.run(cg_sorted_items)
  cg.op_analysis(dict(zip(cg_sorted_keys, cg_sorted_shape)),
                 '{}.pickle'.format(model_name))

  exit(0)
  # Compute Frechet Inception Distance.
  #if FLAGS.eval_frechet_inception_distance:
  #  fid = util.get_frechet_inception_distance(
  #      real_data, generated_data, FLAGS.num_images_generated,
  #      FLAGS.num_inception_images)
  #  tf.summary.scalar('frechet_inception_distance', fid)

  # Compute normal Inception scores.
  #if FLAGS.eval_real_images:
  #  inc_score = util.get_inception_scores(
  #      real_data, FLAGS.num_images_generated, FLAGS.num_inception_images)
  #else:
  #  inc_score = util.get_inception_scores(
  #      generated_data, FLAGS.num_images_generated, FLAGS.num_inception_images)
  #tf.summary.scalar('inception_score', inc_score)

  # If conditional, display an image grid of difference classes.
  #if FLAGS.conditional_eval and not FLAGS.eval_real_images:
  #  reshaped_imgs = util.get_image_grid(
  #      generated_data, FLAGS.num_images_generated, num_classes,
  #      FLAGS.num_images_per_class)
  #  tf.summary.image('generated_data', reshaped_imgs, max_outputs=1)

  # Create ops that write images to disk.
  image_write_ops = None
  if FLAGS.conditional_eval:
    reshaped_imgs = util.get_image_grid(
        generated_data, FLAGS.num_images_generated, num_classes,
        FLAGS.num_images_per_class)
    uint8_images = data_provider.float_image_to_uint8(reshaped_imgs)
    image_write_ops = tf.write_file(
        '%s/%s'% (FLAGS.eval_dir, 'conditional_cifar10.png'),
        tf.image.encode_png(uint8_images[0]))
  else:
    if FLAGS.num_images_generated >= 100:
      reshaped_imgs = tfgan.eval.image_reshaper(
          generated_data[:100], num_cols=FLAGS.num_images_per_class)
      uint8_images = data_provider.float_image_to_uint8(reshaped_imgs)
      image_write_ops = tf.write_file(
          '%s/%s'% (FLAGS.eval_dir, 'unconditional_cifar10.png'),
          tf.image.encode_png(uint8_images[0]))

  # For unit testing, use `run_eval_loop=False`.
  if not run_eval_loop: return
  tf.contrib.training.evaluate_repeatedly(
      FLAGS.checkpoint_dir,
      master=FLAGS.master,
      hooks=[tf.contrib.training.SummaryAtEndHook(FLAGS.eval_dir),
             tf.contrib.training.StopAfterNEvalsHook(1)],
      eval_ops=image_write_ops,
      max_number_of_evaluations=FLAGS.max_number_of_evaluations)
コード例 #19
0
def main(_):
    if (FLAGS.input_codes is None or FLAGS.output_directory is None
            or FLAGS.model is None):
        print('\nUsage: python decoder.py --input_codes=output_codes.pkl '
              '--iteration=15 --output_directory=/tmp/compression_output/ '
              '--model=residual_gru.pb\n\n')
        return

    if FLAGS.iteration < -1 or FLAGS.iteration > 15:
        print(
            '\n--iteration must be between 0 and 15 inclusive, or -1 to infer '
            'from file.\n')
        return
    iteration = FLAGS.iteration

    if not tf.gfile.Exists(FLAGS.output_directory):
        tf.gfile.MkDir(FLAGS.output_directory)

    if not tf.gfile.Exists(FLAGS.input_codes):
        print('\nInput codes not found.\n')
        return

    contents = ''
    with tf.gfile.FastGFile(FLAGS.input_codes, 'rb') as code_file:
        contents = code_file.read()
        loaded_codes = np.load(io.BytesIO(contents))
        assert ['codes', 'shape'] not in loaded_codes.files
        loaded_shape = loaded_codes['shape']
        loaded_array = loaded_codes['codes']

        # Unpack and recover code shapes.
        unpacked_codes = np.reshape(
            np.unpackbits(loaded_array)[:np.prod(loaded_shape)], loaded_shape)

        numpy_int_codes = np.split(unpacked_codes, len(unpacked_codes))
        if iteration == -1:
            iteration = len(unpacked_codes) - 1
        # Convert back to float and recover scale.
        numpy_codes = [
            np.squeeze(x.astype(np.float32), 0) * 2 - 1
            for x in numpy_int_codes
        ]

    with tf.Graph().as_default() as graph:
        # Load the inference model for decoding.
        with tf.gfile.FastGFile(FLAGS.model, 'rb') as model_file:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(model_file.read())
        _ = tf.import_graph_def(graph_def, name='')

        # For encoding the tensors into PNGs.
        input_image = tf.placeholder(tf.uint8)
        encoded_image = tf.image.encode_png(input_image)

        input_tensors = [
            graph.get_tensor_by_name(name)
            for name in get_input_tensor_names()
        ][0:iteration + 1]
        outputs = [
            graph.get_tensor_by_name(name)
            for name in get_output_tensor_names()
        ][0:iteration + 1]

    feed_dict = {
        key: value
        for (key, value) in zip(input_tensors, numpy_codes)
    }

    with tf.Session(graph=graph) as sess:

        options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        run_metadata = tf.RunMetadata()

        results = sess.run(outputs,
                           feed_dict=feed_dict,
                           options=options,
                           run_metadata=run_metadata)
        cg = CompGraph('image_decoder', run_metadata, tf.get_default_graph())

        cg_tensor_dict = cg.get_tensors()
        cg_sorted_keys = sorted(cg_tensor_dict.keys())
        cg_sorted_items = []
        for cg_key in cg_sorted_keys:
            cg_sorted_items.append(tf.shape(cg_tensor_dict[cg_key]))

        cg_sorted_shape = sess.run(cg_sorted_items, feed_dict=feed_dict)
        cg.op_analysis(dict(zip(cg_sorted_keys, cg_sorted_shape)),
                       'image_decoder.pickle')

        for index, result in enumerate(results):
            img = np.uint8(np.clip(result + 0.5, 0, 255))
            img = img.squeeze()
            png_img = sess.run(encoded_image, feed_dict={input_image: img})

            with tf.gfile.FastGFile(
                    os.path.join(FLAGS.output_directory,
                                 'image_{0:02d}.png'.format(index)),
                    'w') as output_image:
                output_image.write(png_img)
コード例 #20
0
ファイル: dp_mnist.py プロジェクト: miglopst/models
def Eval(mnist_data_file,
         network_parameters,
         num_testing_images,
         randomize,
         load_path,
         save_mistakes=False):
    """Evaluate MNIST for a number of steps.

  Args:
    mnist_data_file: Path of a file containing the MNIST images to process.
    network_parameters: parameters for defining and training the network.
    num_testing_images: the number of images we will evaluate on.
    randomize: if false, randomize; otherwise, read the testing images
      sequentially.
    load_path: path where to load trained parameters from.
    save_mistakes: save the mistakes if True.

  Returns:
    The evaluation accuracy as a float.
  """
    batch_size = 100
    # Like for training, we need a session for executing the TensorFlow graph.
    with tf.Graph().as_default(), tf.Session() as sess:
        # Create the basic Mnist model.
        images, labels = MnistInput(mnist_data_file, batch_size, randomize)
        logits, _, _ = utils.BuildNetwork(images, network_parameters)
        softmax = tf.nn.softmax(logits)

        # Load the variables.
        ckpt_state = tf.train.get_checkpoint_state(load_path)
        if not (ckpt_state and ckpt_state.model_checkpoint_path):
            raise ValueError("No model checkpoint to eval at %s\n" % load_path)

        saver = tf.train.Saver()
        saver.restore(sess, ckpt_state.model_checkpoint_path)
        coord = tf.train.Coordinator()
        _ = tf.train.start_queue_runners(sess=sess, coord=coord)

        total_examples = 0
        correct_predictions = 0
        image_index = 0
        mistakes = []
        for _ in range((num_testing_images + batch_size - 1) // batch_size):

            options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
            run_metadata = tf.RunMetadata()

            predictions, label_values = sess.run([softmax, labels],
                                                 options=options,
                                                 run_metadata=run_metadata)
            cg = CompGraph('differential_privacy_sgd', run_metadata,
                           tf.get_default_graph())

            cg_tensor_dict = cg.get_tensors()
            cg_sorted_keys = sorted(cg_tensor_dict.keys())
            cg_sorted_items = []
            for cg_key in cg_sorted_keys:
                cg_sorted_items.append(tf.shape(cg_tensor_dict[cg_key]))

            cg_sorted_shape = sess.run(cg_sorted_items)
            cg.op_analysis(dict(zip(cg_sorted_keys, cg_sorted_shape)),
                           'differential_privacy_sgd.pickle')

            exit(0)

            # Count how many were predicted correctly.
            for prediction, label_value in zip(predictions, label_values):
                total_examples += 1
                if np.argmax(prediction) == label_value:
                    correct_predictions += 1
                elif save_mistakes:
                    mistakes.append({
                        "index": image_index,
                        "label": label_value,
                        "pred": np.argmax(prediction)
                    })
                image_index += 1

    return (correct_predictions / total_examples,
            mistakes if save_mistakes else None)
コード例 #21
0
def run_eval(eval_ops, summary_writer, saver):
    """Runs evaluation over FLAGS.num_examples examples.

  Args:
    eval_ops: dict<metric name, tuple(value, update_op)>
    summary_writer: Summary writer.
    saver: Saver.

  Returns:
    dict<metric name, value>, with value being the average over all examples.
  """

    sess = tf.Session()
    if not restore_from_checkpoint(sess, saver):
        return
    tf.logging.info('Checkpoint restored')

    tf.train.start_queue_runners(sess=sess)

    init = tf.global_variables_initializer()
    sess.run(init)
    init = tf.local_variables_initializer()
    sess.run(init)

    metric_names, ops = list(zip(*list(eval_ops.items())))
    value_ops, update_ops = list(zip(*ops))

    value_ops_dict = dict(list(zip(metric_names, value_ops)))

    options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
    run_metadata = tf.RunMetadata()

    tf.logging.info('Start profiling')
    sess.run(update_ops, options=options, run_metadata=run_metadata)
    cg = CompGraph('adversarial_text', run_metadata, tf.get_default_graph())
    tf.logging.info('Profiling finished')

    cg_tensor_dict = cg.get_tensors()
    cg_sorted_keys = sorted(cg_tensor_dict.keys())
    cg_sorted_items = []
    for cg_key in cg_sorted_keys:
        cg_sorted_items.append(tf.shape(cg_tensor_dict[cg_key]))

    cg_sorted_shape = sess.run(cg_sorted_items)
    cg.op_analysis(dict(zip(cg_sorted_keys, cg_sorted_shape)),
                   'adversarial_text.pickle')
    exit(0)

    sv = tf.train.Supervisor(logdir=FLAGS.eval_dir,
                             saver=None,
                             summary_op=None)
    with sv.managed_session(master=FLAGS.master,
                            start_standard_services=False) as sess:
        if not restore_from_checkpoint(sess, saver):
            return
        sv.start_queue_runners(sess)

        # Run update ops
        num_batches = int(math.ceil(FLAGS.num_examples / FLAGS.batch_size))
        tf.logging.info('Running %d batches for evaluation.', num_batches)
        for i in range(num_batches):
            if (i + 1) % 10 == 0:
                tf.logging.info('Running batch %d/%d...', i + 1, num_batches)
            if (i + 1) % 10 == 0:
                _log_values(sess, value_ops_dict)
            sess.run(update_ops)

        _log_values(sess, value_ops_dict, summary_writer=summary_writer)
コード例 #22
0
def main(_):
    if (FLAGS.input_image is None or FLAGS.output_codes is None
            or FLAGS.model is None):
        print('\nUsage: python encoder.py --input_image=/your/image/here.png '
              '--output_codes=output_codes.pkl --iteration=15 '
              '--model=residual_gru.pb\n\n')
        return

    if FLAGS.iteration < 0 or FLAGS.iteration > 15:
        print('\n--iteration must be between 0 and 15 inclusive.\n')
        return

    with tf.gfile.FastGFile(FLAGS.input_image, mode='rb') as input_image:
        input_image_str = input_image.read()

    with tf.Graph().as_default() as graph:
        # Load the inference model for encoding.
        with tf.gfile.FastGFile(FLAGS.model, 'rb') as model_file:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(model_file.read())
        _ = tf.import_graph_def(graph_def, name='')

        input_tensor = graph.get_tensor_by_name('Placeholder:0')
        outputs = [
            graph.get_tensor_by_name(name)
            for name in get_output_tensor_names()
        ]

        input_image = tf.placeholder(tf.string)
        _, ext = os.path.splitext(FLAGS.input_image)
        if ext == '.png':
            decoded_image = tf.image.decode_png(input_image, channels=3)
        elif ext == '.jpeg' or ext == '.jpg':
            decoded_image = tf.image.decode_jpeg(input_image, channels=3)
        else:
            assert False, 'Unsupported file format {}'.format(ext)
        decoded_image = tf.expand_dims(decoded_image, 0)

    with tf.Session(graph=graph) as sess:
        img_array = sess.run(decoded_image,
                             feed_dict={input_image: input_image_str})

        options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        run_metadata = tf.RunMetadata()

        results = sess.run(outputs,
                           feed_dict={input_tensor: img_array},
                           options=options,
                           run_metadata=run_metadata)
        cg = CompGraph('image_encoder', run_metadata, tf.get_default_graph())

        cg_tensor_dict = cg.get_tensors()
        cg_sorted_keys = sorted(cg_tensor_dict.keys())
        cg_sorted_items = []
        for cg_key in cg_sorted_keys:
            cg_sorted_items.append(tf.shape(cg_tensor_dict[cg_key]))

        cg_sorted_shape = sess.run(cg_sorted_items,
                                   feed_dict={input_tensor: img_array})
        cg.op_analysis(dict(zip(cg_sorted_keys, cg_sorted_shape)),
                       'image_encoder.pickle')

    results = results[0:FLAGS.iteration + 1]
    int_codes = np.asarray([x.astype(np.int8) for x in results])

    # Convert int codes to binary.
    int_codes = (int_codes + 1) // 2
    export = np.packbits(int_codes.reshape(-1))

    output = io.BytesIO()
    np.savez_compressed(output, shape=int_codes.shape, codes=export)
    with tf.gfile.FastGFile(FLAGS.output_codes, 'w') as code_file:
        code_file.write(output.getvalue())
コード例 #23
0
ファイル: prediction_train.py プロジェクト: GD06/models
def main(unused_argv):

    print('Constructing models and inputs.')
    with tf.variable_scope('model', reuse=None) as training_scope:
        images, actions, states = build_tfrecord_input(training=True)
        model = Model(images,
                      actions,
                      states,
                      FLAGS.sequence_length,
                      prefix='train')

    with tf.variable_scope('val_model', reuse=None):
        val_images, val_actions, val_states = build_tfrecord_input(
            training=False)
        val_model = Model(val_images,
                          val_actions,
                          val_states,
                          FLAGS.sequence_length,
                          training_scope,
                          prefix='val')

    print('Constructing saver.')
    # Make saver.
    saver = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES),
                           max_to_keep=0)

    # Make training session.
    sess = tf.InteractiveSession()
    sess.run(tf.global_variables_initializer())

    summary_writer = tf.summary.FileWriter(FLAGS.event_log_dir,
                                           graph=sess.graph,
                                           flush_secs=10)

    if FLAGS.pretrained_model:
        saver.restore(sess, FLAGS.pretrained_model)

    tf.train.start_queue_runners(sess)

    tf.logging.info('iteration number, cost')

    # Run training.
    for itr in range(FLAGS.num_iterations):
        # Generate new batch of data.
        feed_dict = {
            model.iter_num: np.float32(itr),
            model.lr: FLAGS.learning_rate
        }
        cost, _, summary_str = sess.run(
            [model.loss, model.train_op, model.summ_op], feed_dict)

        # Print info: iteration #, cost.
        tf.logging.info(str(itr) + ' ' + str(cost))

        if (itr) % VAL_INTERVAL == 2:
            # Run through validation set.
            feed_dict = {
                val_model.lr: 0.0,
                val_model.iter_num: np.float32(itr)
            }

            options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
            run_metadata = tf.RunMetadata()
            model_name = 'video_prediction_{}'.format(FLAGS.model.lower())

            eval_loss = sess.run(val_model.loss,
                                 feed_dict=feed_dict,
                                 options=options,
                                 run_metadata=run_metadata)
            cg = CompGraph(model_name, run_metadata, tf.get_default_graph())

            cg_tensor_dict = cg.get_tensors()
            cg_sorted_keys = sorted(cg_tensor_dict.keys())
            cg_sorted_items = []
            for cg_key in cg_sorted_keys:
                cg_sorted_items.append(tf.shape(cg_tensor_dict[cg_key]))

            cg_sorted_shape = sess.run(cg_sorted_items, feed_dict=feed_dict)
            cg.op_analysis(dict(zip(cg_sorted_keys, cg_sorted_shape)),
                           '{}.pickle'.format(model_name))

            #_, val_summary_str = sess.run([val_model.train_op, val_model.summ_op],
            #                               feed_dict)

            print('Evaluation finished')
            exit(0)

            summary_writer.add_summary(val_summary_str, itr)

        if (itr) % SAVE_INTERVAL == 2:
            tf.logging.info('Saving model.')
            saver.save(sess, FLAGS.output_dir + '/model' + str(itr))

        if (itr) % SUMMARY_INTERVAL:
            summary_writer.add_summary(summary_str, itr)

    tf.logging.info('Saving model.')
    saver.save(sess, FLAGS.output_dir + '/model')
    tf.logging.info('Training complete')
    tf.logging.flush()
コード例 #24
0
def main(_):
  if not FLAGS.dataset_dir:
    raise ValueError('You must supply the dataset directory with --dataset_dir')

  tf.logging.set_verbosity(tf.logging.INFO)
  with tf.Graph().as_default():
    tf_global_step = tf.train.get_or_create_global_step()

    ###################
    # Prepare dataset #
    ###################
    dataset = imagenet.get_split(FLAGS.split_name, FLAGS.dataset_dir)
    provider = slim.dataset_data_provider.DatasetDataProvider(
        dataset,
        shuffle=False,
        common_queue_capacity=2 * FLAGS.batch_size,
        common_queue_min=FLAGS.batch_size)
    [dataset_image, label] = provider.get(['image', 'label'])
    dataset_image = preprocess_for_eval(dataset_image, IMAGE_SIZE, IMAGE_SIZE)
    dataset_images, labels = tf.train.batch(
        [dataset_image, label],
        batch_size=FLAGS.batch_size,
        num_threads=FLAGS.num_preprocessing_threads,
        capacity=5 * FLAGS.batch_size)

    ########################################
    # Define the model and input exampeles #
    ########################################
    create_model(tf.placeholder(tf.float32, shape=dataset_images.shape))
    input_images = get_input_images(dataset_images)
    logits, _ = create_model(input_images, reuse=True)

    if FLAGS.moving_average_decay is not None:
      variable_averages = tf.train.ExponentialMovingAverage(
          FLAGS.moving_average_decay, tf_global_step)
      variables_to_restore = variable_averages.variables_to_restore(
          slim.get_model_variables())
      variables_to_restore[tf_global_step.op.name] = tf_global_step
    else:
      variables_to_restore = slim.get_variables_to_restore()

    ######################
    # Define the metrics #
    ######################
    predictions = tf.argmax(logits, 1)
    labels = tf.squeeze(labels)
    names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
        'Accuracy': slim.metrics.streaming_accuracy(predictions, labels),
        'Recall_5': slim.metrics.streaming_sparse_recall_at_k(
            logits, tf.reshape(labels, [-1, 1]), 5),
    })

    ######################
    # Run evaluation     #
    ######################
    if FLAGS.max_num_batches:
      num_batches = FLAGS.max_num_batches
    else:
      # This ensures that we make a single pass over all of the data.
      num_batches = math.ceil(dataset.num_samples / float(FLAGS.batch_size))

    if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
      checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
    else:
      checkpoint_path = FLAGS.checkpoint_path

    tf.logging.info('Evaluating %s' % checkpoint_path)

    sess = tf.Session()
    saver = tf.train.Saver(variables_to_restore)
    saver.restore(sess, checkpoint_path)

    tf.logging.info('Checkpoint restored')

    tf.train.start_queue_runners(sess=sess)

    init = tf.global_variables_initializer()
    sess.run(init)
    init = tf.local_variables_initializer()
    sess.run(init)

    options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
    run_metadata = tf.RunMetadata()

    tf.logging.info('Start Profiling')
    eval_results = sess.run(list(names_to_updates.values()),
                            options=options, run_metadata=run_metadata)
    cg = CompGraph('adv_imagenet_models', run_metadata, tf.get_default_graph())
    tf.logging.info('Profiling finished')

    cg_tensor_dict = cg.get_tensors()
    cg_sorted_keys = sorted(cg_tensor_dict.keys())
    cg_sorted_items = []
    for cg_key in cg_sorted_keys:
      cg_sorted_items.append(tf.shape(cg_tensor_dict[cg_key]))

    cg_sorted_shape = sess.run(cg_sorted_items)
    cg.op_analysis(dict(zip(cg_sorted_keys, cg_sorted_shape)),
                   'adv_imagenet_models.pickle')

    top1_accuracy = eval_results[0]
    top5_accuracy = eval_results[1]

    #top1_accuracy, top5_accuracy = slim.evaluation.evaluate_once(
    #    master=FLAGS.master,
    #    checkpoint_path=checkpoint_path,
    #    logdir=None,
    #    summary_op=None,
    #    num_evals=num_batches,
    #    eval_op=list(names_to_updates.values()),
    #    final_op=[names_to_values['Accuracy'], names_to_values['Recall_5']],
    #    variables_to_restore=variables_to_restore)

    print('Top1 Accuracy: ', top1_accuracy)
    print('Top5 Accuracy: ', top5_accuracy)
コード例 #25
0
ファイル: swivel.py プロジェクト: GD06/models
def main(_):
    tf.logging.set_verbosity(tf.logging.INFO)

    # If we have ps_hosts, then we'll assume that this is going to be a
    # distributed training run.  Configure the cluster appropriately.  Otherwise,
    # we just do everything in-process.
    if FLAGS.ps_hosts:
        cluster = tf.train.ClusterSpec({
            'ps': FLAGS.ps_hosts.split(','),
            'worker': FLAGS.worker_hosts.split(','),
        })

        if FLAGS.job_name == 'ps':
            # Ignore the GPU if we're the parameter server. This let's the PS run on
            # the same machine as a worker.
            config = tf.ConfigProto(device_count={'GPU': 0})
        elif FLAGS.job_name == 'worker':
            config = tf.ConfigProto(gpu_options=tf.GPUOptions(
                visible_device_list='%d' %
                FLAGS.gpu_device, allow_growth=True))
        else:
            raise ValueError('unknown job name "%s"' % FLAGS.job_name)

        server = tf.train.Server(cluster,
                                 job_name=FLAGS.job_name,
                                 task_index=FLAGS.task_index,
                                 config=config)

        if FLAGS.job_name == 'ps':
            return server.join()

        device_setter = tf.train.replica_device_setter(
            worker_device='/job:worker/task:%d' % FLAGS.task_index,
            cluster=cluster)

    else:
        server = None
        device_setter = tf.train.replica_device_setter(0)

    # Build the graph.
    with tf.Graph().as_default():
        with tf.device(device_setter):
            model = Model(FLAGS.input_base_path, FLAGS)

            # If an eval path is present, then create eval operators and set up scalar
            # summaries to report on the results.  Run the evals on the CPU since
            # the analogy eval requires a fairly enormous tensor to be allocated to
            # do the nearest neighbor search.
            if FLAGS.eval_base_path:
                wordsim_filenames = glob.glob(
                    os.path.join(FLAGS.eval_base_path, '*.ws.tab'))

                for filename in wordsim_filenames:
                    name = os.path.basename(filename).split('.')[0]
                    with tf.device(tf.DeviceSpec(device_type='CPU')):
                        op = model.wordsim_eval_op(filename)
                        tf.summary.scalar(name, op)

                analogy_filenames = glob.glob(
                    os.path.join(FLAGS.eval_base_path, '*.an.tab'))

                for filename in analogy_filenames:
                    name = os.path.basename(filename).split('.')[0]
                    with tf.device(tf.DeviceSpec(device_type='CPU')):
                        op = model.analogy_eval_op(filename)
                        tf.summary.scalar(name, op)

            tf.summary.scalar('loss', model.loss_op)

        # Train on, soldier.
        #supervisor = tf.train.Supervisor(
        #    logdir=FLAGS.output_base_path,
        #    is_chief=(FLAGS.task_index == 0),
        #    save_summaries_secs=60,
        #    recovery_wait_secs=5)

        max_step = FLAGS.num_epochs * model.steps_per_epoch
        master = server.target if server else ''
        #with supervisor.managed_session(master) as session:
        with tf.Session() as sess:

            print('Is finalized? ', tf.get_default_graph().finalized)
            tf.train.start_queue_runners(sess=sess)
            init = tf.global_variables_initializer()
            sess.run(init)
            init = tf.local_variables_initializer()
            sess.run(init)

            local_step = 0
            global_step = sess.run(model.global_step)
            #while not supervisor.should_stop() and global_step < max_step:
            while global_step < max_step:
                global_step, loss, _ = sess.run(
                    [model.global_step, model.loss_op, model.train_op])

                if not np.isfinite(loss):
                    raise ValueError('non-finite cost at step %d' %
                                     global_step)

                options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
                run_metadata = tf.RunMetadata()

                loss = sess.run(model.loss_op,
                                options=options,
                                run_metadata=run_metadata)
                cg = CompGraph('swivel', run_metadata, tf.get_default_graph())

                cg_tensor_dict = cg.get_tensors()
                cg_sorted_keys = sorted(cg_tensor_dict.keys())
                cg_sorted_items = []
                for cg_key in cg_sorted_keys:
                    cg_sorted_items.append(tf.shape(cg_tensor_dict[cg_key]))

                cg_sorted_shape = sess.run(cg_sorted_items)
                cg.op_analysis(dict(zip(cg_sorted_keys, cg_sorted_shape)),
                               'swivel.pickle')
                exit(0)

                local_step += 1
                if local_step % 10 == 0:
                    tf.logging.info(
                        'local_step=%d global_step=%d loss=%.1f, %.1f%% complete',
                        local_step, global_step, loss,
                        100.0 * global_step / max_step)

            if FLAGS.task_index == 0:
                #supervisor.saver.save(
                #    session, supervisor.save_path, global_step=global_step)
                #model.write_embeddings(FLAGS, session)
                model.write_embeddings(FLAGS, sess)
コード例 #26
0
ファイル: eval_rotator.py プロジェクト: GD06/models
def main(argv=()):
    del argv  # Unused.
    #eval_dir = os.path.join(FLAGS.checkpoint_dir,
    #                        FLAGS.model_name, 'train')
    #log_dir = os.path.join(FLAGS.checkpoint_dir,
    #                       FLAGS.model_name, 'eval')

    #if not os.path.exists(eval_dir):
    #  os.makedirs(eval_dir)
    #if not os.path.exists(log_dir):
    #  os.makedirs(log_dir)
    g = tf.Graph()

    if FLAGS.step_size < FLAGS.num_views:
        raise ValueError(
            'Impossible step_size, must not be less than num_views.')

    g = tf.Graph()
    with g.as_default():
        ##########
        ## data ##
        ##########
        val_data = model.get_inputs(FLAGS.inp_dir,
                                    FLAGS.dataset_name,
                                    'val',
                                    FLAGS.batch_size,
                                    FLAGS.image_size,
                                    is_training=False)
        inputs = model.preprocess(val_data, FLAGS.step_size)
        ###########
        ## model ##
        ###########
        model_fn = model.get_model_fn(FLAGS, is_training=False)
        outputs = model_fn(inputs)
        #############
        ## metrics ##
        #############
        names_to_values, names_to_updates = model.get_metrics(
            inputs, outputs, FLAGS)
        del names_to_values
        ################
        ## evaluation ##
        ################
        num_batches = int(val_data['num_samples'] / FLAGS.batch_size)

        sess = tf.Session()
        tf.train.start_queue_runners(sess=sess)
        saver = tf.train.Saver()

        def resotre_from_checkpoint(sess, saver):
            ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
            if not ckpt or not ckpt.model_checkpoint_path:
                return False

            saver.restore(sess, ckpt.model_checkpoint_path)
            return True

        if not resotre_from_checkpoint(sess, saver):
            raise NotImplementedError

        init = tf.global_variables_initializer()
        sess.run(init)
        init = tf.local_variables_initializer()
        sess.run(init)

        for i in range(num_batches):
            print('Running {} batch out of {} batches.'.format(i, num_batches))

            options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
            run_metadata = tf.RunMetadata()

            sess.run(list(names_to_updates.values()),
                     options=options,
                     run_metadata=run_metadata)
            cg = CompGraph('ptn_rotator', run_metadata, tf.get_default_graph())

            cg_tensor_dict = cg.get_tensors()
            cg_sorted_keys = sorted(cg_tensor_dict.keys())
            cg_sorted_items = []
            for cg_key in cg_sorted_keys:
                cg_sorted_items.append(tf.shape(cg_tensor_dict[cg_key]))

            cg_sorted_shape = sess.run(cg_sorted_items)
            cg.op_analysis(dict(zip(cg_sorted_keys, cg_sorted_shape)),
                           'ptn_rotator.pickle')
            exit(0)
コード例 #27
0
def main(_):
    if (FLAGS.input_codes is None or FLAGS.model is None):
        print('\nUsage: python entropy_coder_single.py --model=progressive '
              '--model_config=model_config.json'
              '--iteration=15\n\n')
        return

    #if FLAGS.iteration < -1 or FLAGS.iteration > 15:
    #  print ('\n--iteration must be between 0 and 15 inclusive, or -1 to infer '
    #         'from file.\n')
    #  return
    #iteration = FLAGS.iteration

    if not tf.gfile.Exists(FLAGS.input_codes):
        print('\nInput codes not found.\n')
        return

    with tf.gfile.FastGFile(FLAGS.input_codes, 'rb') as code_file:
        contents = code_file.read()
        loaded_codes = np.load(io.BytesIO(contents))
        assert ['codes', 'shape'] not in loaded_codes.files
        loaded_shape = loaded_codes['shape']
        loaded_array = loaded_codes['codes']

        # Unpack and recover code shapes.
        unpacked_codes = np.reshape(
            np.unpackbits(loaded_array)[:np.prod(loaded_shape)], loaded_shape)

        numpy_int_codes = unpacked_codes.transpose([1, 2, 3, 0, 4])
        numpy_int_codes = numpy_int_codes.reshape([
            numpy_int_codes.shape[0], numpy_int_codes.shape[1],
            numpy_int_codes.shape[2], -1
        ])
        numpy_codes = numpy_int_codes.astype(np.float32) * 2.0 - 1.0

    with tf.Graph().as_default() as graph:
        # TF tensor to hold the binary codes to losslessly compress.
        batch_size = 1
        codes = tf.placeholder(tf.float32, shape=numpy_codes.shape)

        # Create the entropy coder model.
        global_step = None
        optimizer = None
        model = model_factory.GetModelRegistry().CreateModel(FLAGS.model)
        model_config_string = config_helper.GetConfigString(FLAGS.model_config)
        model.Initialize(global_step, optimizer, model_config_string)
        model.BuildGraph(codes)

        saver = tf.train.Saver(sharded=True,
                               keep_checkpoint_every_n_hours=12.0)

        with tf.Session(graph=graph) as sess:
            # Initialize local variables.
            sess.run(tf.local_variables_initializer())

            # Restore model variables.
            saver.restore(sess, FLAGS.checkpoint)

            tf_tensors = {'code_length': model.average_code_length}
            feed_dict = {codes: numpy_codes}

            options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
            run_metadata = tf.RunMetadata()

            np_tensors = sess.run(tf_tensors,
                                  feed_dict=feed_dict,
                                  options=options,
                                  run_metadata=run_metadata)
            cg = CompGraph('entropy_coder', run_metadata,
                           tf.get_default_graph())

            cg_tensor_dict = cg.get_tensors()
            cg_sorted_keys = sorted(cg_tensor_dict.keys())
            cg_sorted_items = []
            for cg_key in cg_sorted_keys:
                cg_sorted_items.append(tf.shape(cg_tensor_dict[cg_key]))

            cg_sorted_shape = sess.run(cg_sorted_items, feed_dict=feed_dict)
            cg.op_analysis(dict(zip(cg_sorted_keys, cg_sorted_shape)),
                           'entropy_coder.pickle')

            print(('Additional compression ratio: {}'.format(
                np_tensors['code_length'])))