예제 #1
0
    if not tf.gfile.Exists(FLAGS.eval_log_dir):
        tf.gfile.MakeDirs(FLAGS.eval_log_dir)

    dataset = common_flags.create_dataset(FLAGS.dataset_name, FLAGS.dataset_split_name)
    model = common_flags.create_model(num_classes=FLAGS.num_classes)
    data = data_provider.get_data(dataset,
                                  FLAGS.model_name,
                                  FLAGS.batch_size,
                                  is_training=False,
                                  height=FLAGS.height,
                                  width=FLAGS.width)
    logits, endpoints = model.create_model(data.images,
                                           num_classes=FLAGS.num_classes,
                                           is_training=False)
    eval_ops = model.create_summary(data, logits, is_training=False)
    slim.get_or_create_global_step()
    session_config = tf.ConfigProto()
    session_config.gpu_options.allow_growth = True
    slim.evaluation.evaluation_loop(
        master=FLAGS.master,
        checkpoint_dir=FLAGS.train_dir,
        logdir=FLAGS.eval_log_dir,
        eval_op=eval_ops,
        num_evals=FLAGS.num_evals,
        eval_interval_secs=FLAGS.eval_interval_secs,
        max_number_of_evaluations=FLAGS.number_of_steps,
        session_config=session_config)

if __name__=='__main__':
    app.run()
예제 #2
0
       embedding_tensor = sess.graph.get_tensor_by_name(
          vggish_params.OUTPUT_TENSOR_NAME)

       # Run inference and postprocessing.
       [embedding_batch] = sess.run([embedding_tensor],
                                 feed_dict={features_tensor: examples_batch})
       #print(embedding_batch)
       postprocessed_batch = pproc.postprocess(embedding_batch)
       #print(postprocessed_batch)
       num_frames_batch_val = np.array([postprocessed_batch.shape[0]],dtype=np.int32)
    
       video_batch_val = np.zeros((1, 300, 128), dtype=np.float32)
       video_batch_val[0,0:postprocessed_batch.shape[0],:] = utils.Dequantize(postprocessed_batch.astype(float),2,-2)
    

 #  extract_n_predict()
       predicted_class = inference(video_batch_val ,num_frames_batch_val, checkpoint_file, train_dir, output_file)
       return(predicted_class)
      tf.reset_default_graph()

      
def main(unused_argv):
    predicted_class = extract_n_predict(FLAGS.input_wav_file, FLAGS.pca_params, FLAGS.checkpoint, FLAGS.checkpoint_file, FLAGS.train_dir, FLAGS.output_file)
    print(predicted_class)

  


if __name__ == '__main__':
  app.run(main)
예제 #3
0
    return example

def main(unused_argv):
  logging.set_verbosity(tf.logging.INFO)

  # convert feature_names and feature_sizes to lists of values
  feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
      FLAGS.feature_names, FLAGS.feature_sizes)

  if FLAGS.frame_features:
    reader = readers.YT8MFrameFeatureReader(feature_names=feature_names,
                                            feature_sizes=feature_sizes)
  else:
    reader = readers.YT8MAggregatedFeatureReader(feature_names=feature_names,
                                                 feature_sizes=feature_sizes)

  if FLAGS.output_dir is "":
    raise ValueError("'output_dir' was not specified. "
      "Unable to continue with inference.")

  if FLAGS.input_data_pattern is "":
    raise ValueError("'input_data_pattern' was not specified. "
      "Unable to continue with inference.")

  inference(reader, FLAGS.model_checkpoint_path, FLAGS.input_data_pattern,
      FLAGS.output_dir, FLAGS.batch_size, FLAGS.top_k)


if __name__ == "__main__":
  app.run()
        FLAGS.video_file_feature_key: _bytes_feature(_make_bytes(
            map(ord, video_file))),
        'mean_' + FLAGS.image_feature_key: tf.train.Feature(
            float_list=tf.train.FloatList(value=mean_rgb_features)),
    }

    if FLAGS.insert_zero_audio_features:
      zero_vec = [0] * 128
      feature_list['audio'] = tf.train.FeatureList(
          feature=[_bytes_feature(_make_bytes(zero_vec))] * len(rgb_features))
      context_features['mean_audio'] = tf.train.Feature(
          float_list=tf.train.FloatList(value=zero_vec))

    if FLAGS.skip_frame_level_features:
      example = tf.train.SequenceExample(
          context=tf.train.Features(feature=context_features))
    else:
      example = tf.train.SequenceExample(
          context=tf.train.Features(feature=context_features),
          feature_lists=tf.train.FeatureLists(feature_list=feature_list))
    writer.write(example.SerializeToString())
    total_written += 1

  writer.close()
  print('Successfully encoded %i out of %i videos' % (
      total_written, total_written + total_error))


if __name__ == '__main__':
  app.run(main)