示例#1
0
def evaluate():
    tf.set_random_seed(0)  # for reproducibility

    # Write json of flags
    model_flags_path = os.path.join(FLAGS.train_dir, "model_flags.json")
    if not os.path.exists(model_flags_path):
        raise IOError(("Cannot find file %s. Did you run train.py on the same "
                       "--train_dir?") % model_flags_path)
    flags_dict = json.loads(open(model_flags_path).read())

    with tf.Graph().as_default():
        # convert feature_names and feature_sizes to lists of values
        feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
            flags_dict["feature_names"], flags_dict["feature_sizes"])

        if flags_dict["frame_features"]:
            reader = readers.YT8MFrameFeatureReader(
                feature_names=feature_names, feature_sizes=feature_sizes)
        else:
            reader = readers.YT8MAggregatedFeatureReader(
                feature_names=feature_names, feature_sizes=feature_sizes)

        model = find_class_by_name(flags_dict["model"],
                                   [frame_level_models, video_level_models])()
        label_loss_fn = find_class_by_name(flags_dict["label_loss"],
                                           [losses])()

        if FLAGS.eval_data_pattern is "":
            raise IOError("'eval_data_pattern' was not specified. " +
                          "Nothing to evaluate.")

        build_graph(reader=reader,
                    model=model,
                    eval_data_pattern=FLAGS.eval_data_pattern,
                    label_loss_fn=label_loss_fn,
                    num_readers=FLAGS.num_readers,
                    batch_size=FLAGS.batch_size)
        logging.info("built evaluation graph")
        video_id_batch = tf.get_collection("video_id_batch")[0]
        prediction_batch = tf.get_collection("predictions")[0]
        label_batch = tf.get_collection("labels")[0]
        loss = tf.get_collection("loss")[0]
        summary_op = tf.get_collection("summary_op")[0]

        saver = tf.train.Saver(tf.global_variables())
        summary_writer = tf.summary.FileWriter(FLAGS.train_dir,
                                               graph=tf.get_default_graph())

        evl_metrics = eval_util.EvaluationMetrics(reader.num_classes,
                                                  FLAGS.top_k)

        last_global_step_val = -1
        with tf.device("/gpu:0"):
            while True:
                last_global_step_val = evaluation_loop(
                    video_id_batch, prediction_batch, label_batch, loss,
                    summary_op, saver, summary_writer, evl_metrics,
                    last_global_step_val)
                if FLAGS.run_once:
                    break
示例#2
0
def evaluate():
    tf.set_random_seed(0)  # for reproducibility
    with tf.Graph().as_default():
        # convert feature_names and feature_sizes to lists of values
        feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
            FLAGS.feature_names, FLAGS.feature_sizes)

        # prepare a reader for each single model prediction result
        all_readers = []

        all_patterns = FLAGS.eval_data_patterns
        all_patterns = map(lambda x: x.strip(),
                           all_patterns.strip().strip(",").split(","))
        for i in xrange(len(all_patterns)):
            reader = readers.EnsembleReader(feature_names=feature_names,
                                            feature_sizes=feature_sizes)
            all_readers.append(reader)

        input_reader = None
        input_data_pattern = None
        if FLAGS.input_data_pattern is not None:
            input_reader = readers.EnsembleReader(feature_names=["input"],
                                                  feature_sizes=[1024 + 128])
            input_data_pattern = FLAGS.input_data_pattern

        # find the model
        model = find_class_by_name(FLAGS.model, [ensemble_level_models])()
        label_loss_fn = find_class_by_name(FLAGS.label_loss, [losses])()

        if FLAGS.eval_data_patterns is "":
            raise IOError("'eval_data_patterns' was not specified. " +
                          "Nothing to evaluate.")

        build_graph(all_readers=all_readers,
                    input_reader=input_reader,
                    all_eval_data_patterns=all_patterns,
                    input_data_pattern=input_data_pattern,
                    model=model,
                    label_loss_fn=label_loss_fn,
                    batch_size=FLAGS.batch_size)
        logging.info("built evaluation graph")
        video_id_batch = tf.get_collection("video_id_batch")[0]
        prediction_batch = tf.get_collection("predictions")[0]
        label_batch = tf.get_collection("labels")[0]
        loss = tf.get_collection("loss")[0]
        summary_op = tf.get_collection("summary_op")[0]

        saver = tf.train.Saver(tf.global_variables())
        summary_writer = tf.summary.FileWriter(FLAGS.train_dir,
                                               graph=tf.get_default_graph())

        evl_metrics = eval_util.EvaluationMetrics(FLAGS.num_classes,
                                                  FLAGS.top_k)

        last_global_step_val = -1
        last_global_step_val = evaluation_loop(video_id_batch,
                                               prediction_batch, label_batch,
                                               loss, summary_op, saver,
                                               summary_writer, evl_metrics,
                                               last_global_step_val)
示例#3
0
def evaluate():
    tf.set_random_seed(0)  # for reproducibility
    with tf.Graph().as_default():
        # convert feature_names and feature_sizes to lists of values
        feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
            FLAGS.feature_names, FLAGS.feature_sizes)

        if FLAGS.frame_features:
            reader = readers.YT8MFrameFeatureReader(
                feature_names=feature_names, feature_sizes=feature_sizes)
        else:
            reader = readers.YT8MAggregatedFeatureReader(
                feature_names=feature_names, feature_sizes=feature_sizes)

        model = find_class_by_name(FLAGS.model,
                                   [frame_level_models, video_level_models])()
        label_loss_fn = find_class_by_name(FLAGS.label_loss, [losses])()

        if FLAGS.eval_data_pattern is "":
            raise IOError("'eval_data_pattern' was not specified. " +
                          "Nothing to evaluate.")

        build_graph(reader=reader,
                    model=model,
                    eval_data_pattern=FLAGS.eval_data_pattern,
                    label_loss_fn=label_loss_fn,
                    num_readers=FLAGS.num_readers,
                    batch_size=FLAGS.batch_size)
        logging.info("built evaluation graph")
        video_id_batch = tf.get_collection("video_id_batch")[0]
        prediction_batch = tf.get_collection("predictions")[0]
        ### Newly
        # coarse_prediction_batch = tf.get_collection("coarse_predictions")[0]
        # coarse_label_batch = tf.get_collection("coarse_labels")[0]
        # coarse_loss = tf.get_collection("coarse_loss")[0]
        ###
        label_batch = tf.get_collection("labels")[0]
        loss = tf.get_collection("loss")[0]
        summary_op = tf.get_collection("summary_op")[0]

        saver = tf.train.Saver(tf.global_variables())
        summary_writer = tf.summary.FileWriter(FLAGS.train_dir,
                                               graph=tf.get_default_graph())

        evl_metrics = eval_util.EvaluationMetrics(reader.num_classes,
                                                  FLAGS.top_k)

        last_global_step_val = -1
        while True:
            last_global_step_val = evaluation_loop(video_id_batch,
                                                   prediction_batch,
                                                   label_batch, loss,
                                                   summary_op, saver,
                                                   summary_writer, evl_metrics,
                                                   last_global_step_val)
            ### Newly
            # coarse_prediction_batch, coarse_label_batch, coarse_loss)
            ###
            if FLAGS.run_once:
                break
示例#4
0
def evaluate(preds, labels, loss):
    evl_metrics = eval_util.EvaluationMetrics(3862, 20)

    evl_metrics.clear()
    iteration_info_dict = evl_metrics.accumulate(preds, labels, loss)
    # calculate the metrics for the entire epoch
    epoch_info_dict = evl_metrics.get()
    return epoch_info_dict['gap']
示例#5
0
文件: valid.py 项目: ytann/yt8m
    def cal_gap(self):
        self.predict()
        evl_metrics = eval_util.EvaluationMetrics(4716, self.top_k)
        predictions_val = ensemble(self.pred_list, self.weights)

        print predictions_val.shape
        iteration_info_dict = evl_metrics.accumulate(predictions_val,
                                                         self.labels_val, np.zeros(predictions_val.shape[0]))
        epoch_info_dict = evl_metrics.get()
        print(("GAP@%d:" %self.top_k) + str(epoch_info_dict['gap']))
def evaluate():
    tf.set_random_seed(0)  # for reproducibility
    with tf.Graph().as_default():
        if FLAGS.use_mnist:
            reader = readers.MnistReader()
        else:
            reader = readers.FaceReader()

        generator_model = find_class_by_name(FLAGS.generator_model, [models])()
        discriminator_model = find_class_by_name(FLAGS.discriminator_model,
                                                 [models])()
        label_loss_fn = find_class_by_name(FLAGS.label_loss, [losses])()

        if FLAGS.eval_data_pattern is "":
            raise IOError("'eval_data_pattern' was not specified. " +
                          "Nothing to evaluate.")

        build_graph(reader=reader,
                    generator_model=generator_model,
                    discriminator_model=discriminator_model,
                    eval_data_pattern=FLAGS.eval_data_pattern,
                    label_loss_fn=label_loss_fn,
                    num_readers=FLAGS.num_readers,
                    batch_size=FLAGS.batch_size)
        logging.info("built evaluation graph")
        p_fake_batch = tf.get_collection("p_for_fake")[0]
        p_real_batch = tf.get_collection("p_for_data")[0]
        G_loss = tf.get_collection("G_loss")[0]
        D_loss = tf.get_collection("D_loss")[0]
        noise_input = tf.get_collection("noise_input_placeholder")[0]
        summary_op = tf.get_collection("summary_op")[0]

        saver = tf.train.Saver(tf.global_variables())
        summary_writer = tf.summary.FileWriter(FLAGS.train_dir,
                                               graph=tf.get_default_graph())

        evl_metrics = eval_util.EvaluationMetrics()

        last_global_step_val = -1
        while True:
            last_global_step_val = evaluation_loop(p_fake_batch, p_real_batch,
                                                   G_loss, D_loss, noise_input,
                                                   summary_op, saver,
                                                   summary_writer, evl_metrics,
                                                   last_global_step_val,
                                                   FLAGS.batch_size)
            if FLAGS.run_once:
                break
def evaluate():
    tf.set_random_seed(0)  # for reproducibility

    # Write json of flags
    model_flags_path = os.path.join(FLAGS.train_dir, "model_flags.json")
    if not file_io.file_exists(model_flags_path):
        raise IOError(("Cannot find file %s. Did you run train.py on the same "
                       "--train_dir?") % model_flags_path)
    flags_dict = json.loads(file_io.FileIO(model_flags_path, mode="r").read())

    with tf.Graph().as_default():
        # convert feature_names and feature_sizes to lists of values
        feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
            flags_dict["feature_names"], flags_dict["feature_sizes"])

        if flags_dict["frame_features"]:
            reader = readers.YT8MFrameFeatureReader(feature_names=feature_names,
                                                    feature_sizes=feature_sizes)
        else:
            reader = readers.YT8MAggregatedFeatureReader(feature_names=feature_names,
                                                         feature_sizes=feature_sizes)

        model = find_class_by_name(flags_dict["model"],
                                   [frame_level_models, video_level_models])()
        label_loss_fn = find_class_by_name(flags_dict["label_loss"], [losses])()

        if FLAGS.eval_data_pattern is "":
            raise IOError("'eval_data_pattern' was not specified. " +
                          "Nothing to evaluate.")

        build_graph(
            reader=reader,
            model=model,
            eval_data_pattern=FLAGS.eval_data_pattern,
            label_loss_fn=label_loss_fn,
            num_readers=FLAGS.num_readers,
            batch_size=FLAGS.batch_size)
        logging.info("built evaluation graph")

        summary_writer = tf.summary.FileWriter(
            FLAGS.train_dir, graph=tf.get_default_graph())

        evl_metrics = eval_util.EvaluationMetrics(reader.num_classes, FLAGS.top_k)
示例#8
0
def evaluate():
    tf.set_random_seed(0)  # for reproducibility
    with tf.Graph().as_default():
        reader = readers.CatsVsDogsFeatureReader()

        model = find_class_by_name(FLAGS.model, [cvd_models])()
        label_loss_fn = find_class_by_name(FLAGS.label_loss, [losses])()

        if FLAGS.eval_data_pattern is "":
            raise IOError("'eval_data_pattern' was not specified. " +
                          "Nothing to evaluate.")

        build_graph(reader=reader,
                    model=model,
                    eval_data_pattern=FLAGS.eval_data_pattern,
                    label_loss_fn=label_loss_fn,
                    num_readers=FLAGS.num_readers,
                    batch_size=FLAGS.batch_size)
        logging.info("built evaluation graph")
        image_id_batch = tf.get_collection("id_batch")[0]
        prediction_batch = tf.get_collection("predictions")[0]
        label_batch = tf.get_collection("labels")[0]
        loss = tf.get_collection("loss")[0]
        summary_op = tf.get_collection("summary_op")[0]

        saver = tf.train.Saver(tf.global_variables())
        summary_writer = tf.summary.FileWriter(FLAGS.train_dir,
                                               graph=tf.get_default_graph())

        evl_metrics = eval_util.EvaluationMetrics(reader.num_classes, 2)

        last_global_step_val = -1
        while True:
            last_global_step_val = evaluation_loop(image_id_batch,
                                                   prediction_batch,
                                                   label_batch, loss,
                                                   summary_op, saver,
                                                   summary_writer, evl_metrics,
                                                   last_global_step_val)
            if FLAGS.run_once:
                break
def evaluate():
    if os.path.exists(FLAGS.Ensemble_Models + FLAGS.ensemble_output_path):
        shutil.rmtree(FLAGS.Ensemble_Models + FLAGS.ensemble_output_path)
    subdirs = [
        os.path.join(FLAGS.Ensemble_Models, x)
        for x in FLAGS.ensemble_model_path.split(',')
    ]
    flags_dict = []
    save_flag = True
    for subdir in subdirs:
        model_flags_path = os.path.join(
            os.path.join(FLAGS.Ensemble_Models, subdir), "model_flags.json")
        print "Load model from " + model_flags_path + "\n"
        flags_dict.append(
            json.loads(file_io.FileIO(model_flags_path, mode="r").read()))
        # save model_flags.json to inference dictory
        if save_flag:
            if os.path.exists(FLAGS.Ensemble_Models +
                              FLAGS.ensemble_output_path) == False:
                os.mkdir(FLAGS.Ensemble_Models + FLAGS.ensemble_output_path)
            shutil.copyfile(
                model_flags_path,
                os.path.join(
                    FLAGS.Ensemble_Models + FLAGS.ensemble_output_path,
                    "model_flags.json"))
            save_flag = False

    g = tf.Graph()
    with g.as_default():
        models = []
        label_loss_fns = []
        readers = []
        model_nums = len(subdirs)
        for m in range(model_nums):
            model, label_loss_fn, reader = get_params(flags_dict[m])
            models.append(model)
            label_loss_fns.append(label_loss_fn)
            readers.append(reader)
        # start build graph
        build_graph(
            reader=readers[0],  # anyone is ok
            models=models,
            eval_data_pattern=FLAGS.eval_data_pattern,
            label_loss_fns=label_loss_fns,
            batch_size=FLAGS.batch_size,
            num_readers=FLAGS.num_readers)

        logging.info("built evaluation graph")
        video_id_batch = tf.get_collection("video_id_batch")[0]
        prediction_batch = tf.get_collection("predictions")[0]
        label_batch = tf.get_collection("labels")[0]
        loss = tf.get_collection("loss")[0]
        summary_op = tf.get_collection("summary_op")[0]

        all_vars = tf.global_variables()
        # remove global_step or inference can't find uninitialize parameter
        all_vars = [v for v in all_vars if "global_step" not in v.op.name]
        saver = tf.train.Saver(all_vars)

        evl_metrics = eval_util.EvaluationMetrics(readers[0].num_classes,
                                                  FLAGS.top_k)
        summary_writer = tf.summary.FileWriter(FLAGS.Ensemble_Models,
                                               graph=tf.get_default_graph())

        last_global_step_val = -1
        last_global_step_val = evaluation_loop(model_nums, subdirs,
                                               video_id_batch,
                                               prediction_batch, label_batch,
                                               loss, summary_op, saver,
                                               summary_writer, evl_metrics)
示例#10
0
def evaluate(model, checkpoint_name, k_frame):
    tf.set_random_seed(0)  # for reproducibility

    # Write json of flags
    model_flags_path = os.path.join(FLAGS.train_dir, "model_flags.json")
    if not os.path.exists(model_flags_path):
        raise IOError(("Cannot find file %s. Did you run train.py on the same "
                       "--train_dir?") % model_flags_path)
    flags_dict = json.loads(open(model_flags_path).read())

    with tf.Graph().as_default() as graph:
        # scope_name
        # if FLAGS.model_type=='KD':
        #     scope_name='student'
        # with tf.variable_scope(, reuse=False):
        # convert feature_names and feature_sizes to lists of values
        feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
            flags_dict["feature_names"], flags_dict["feature_sizes"])

        if flags_dict["frame_features"]:
            reader = readers.YT8MFrameFeatureReader(
                feature_names=feature_names, feature_sizes=feature_sizes)
        else:
            reader = readers.YT8MAggregatedFeatureReader(
                feature_names=feature_names, feature_sizes=feature_sizes)

        label_loss_fn = find_class_by_name(flags_dict["label_loss"],
                                           [losses])()

        if FLAGS.eval_data_pattern is "":
            raise IOError("'eval_data_pattern' was not specified. " +
                          "Nothing to evaluate.")

        scope_name = 'teacher'
        if k_frame != 300:
            scope_name = 'student'
        with tf.variable_scope(scope_name, reuse=None):
            build_graph(reader=reader,
                        model=model,
                        eval_data_pattern=FLAGS.eval_data_pattern,
                        label_loss_fn=label_loss_fn,
                        num_readers=FLAGS.num_readers,
                        batch_size=FLAGS.batch_size,
                        k_frame=k_frame)

        # print("See variables in student in EVAL")
        # for i in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,scope='student'):
        #     print(i)  # i.name if you want just a name

        logging.info("built evaluation graph")
        video_id_batch = tf.get_collection("video_id_batch")[0]
        prediction_batch = tf.get_collection("predictions")[0]
        label_batch = tf.get_collection("labels")[0]
        loss = tf.get_collection("loss")[0]
        summary_op = tf.get_collection("summary_op")[0]

        saver = tf.train.Saver(tf.global_variables())

        if k_frame != 300:
            student_variables = tf.get_collection(
                tf.GraphKeys.GLOBAL_VARIABLES, scope='student')
            saver = tf.train.Saver(var_list=student_variables)

        # student_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='student')
        # print("See variables in student")
        # for i in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='student'):
        #     print(i) # i.name if you want just a name
        # student_saver = tf.train.Saver(var_list=student_variables)
        summary_writer = tf.summary.FileWriter(FLAGS.train_dir,
                                               graph=tf.get_default_graph())

        evl_metrics = eval_util.EvaluationMetrics(reader.num_classes,
                                                  FLAGS.top_k)

        last_global_step_val = -1
        while True:
            last_global_step_val, EvalGAP = evaluation_loop(
                video_id_batch, prediction_batch, label_batch, loss,
                summary_op, saver, summary_writer, evl_metrics,
                last_global_step_val, checkpoint_name)
            if FLAGS.run_once:
                break
        return EvalGAP
示例#11
0
ground-truth labels, hence for train and validate data sets.
'''


def pairwise(t):
    it = iter(t)
    return izip(it, it)


records = glob.glob("/media/6TB/video/yt8m-v2/video/validate???5.tfrecord")
csv = "val_gatednetvladLF-256k-1024-80-0002-300iter-norelu-basic-gatedmoe-ens2.csv"

num_classes = 3862
batch_size = 1
top_k = 20
evl_metrics = eval_util.EvaluationMetrics(num_classes, top_k)

all_labels = dict()
for record in records:
    for example in tf.python_io.tf_record_iterator(record):
        tf_example = tf.train.Example.FromString(example)
        vid_id = tf_example.features.feature['id'].bytes_list.value[0].decode(
            encoding='UTF-8')
        labels = tf_example.features.feature['labels'].int64_list.value
        all_labels[vid_id] = labels

evl_metrics.clear()
with open(csv, 'r') as fid:
    next(fid)
    for line in fid:
        vid, label_scores = line.split(',')
示例#12
0
文件: eval.py 项目: ZouJoshua/cv
def evaluate():

  ema_tensors = None

  if FLAGS.use_EMA:
    latest_checkpoint = get_latest_checkpoint()
    assert latest_checkpoint, "No checkpoint found"

    with tf.device("/cpu:0"):
        saver = tf.train.import_meta_graph(latest_checkpoint + ".meta", clear_devices=True)
        # saver.restore(sess, "../trained_models/attention_frames_v0_EMA/model.ckpt-15512")
    xvars = tf.get_collection("ema_vars")
    assert len(xvars) > 0, "No EMA shadow variables found. Did you train with EMA?"
    ema_tensors = list(set([x.name for x in xvars]))
    tf.reset_default_graph()

  tf.set_random_seed(0)  # for reproducibility

  # Write json of flags
  model_flags_path = os.path.join(FLAGS.train_dir, "model_flags.json")
  if not os.path.exists(model_flags_path):
    raise IOError(("Cannot find file %s. Did you run train.py on the same "
                   "--train_dir?") % model_flags_path)
  flags_dict = json.loads(open(model_flags_path).read())

  with tf.Graph().as_default():
    # convert feature_names and feature_sizes to lists of values
    feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
        flags_dict["feature_names"], flags_dict["feature_sizes"])

    if flags_dict["frame_features"]:
      reader = readers.YT8MFrameFeatureReader(feature_names=feature_names,
                                              feature_sizes=feature_sizes)
    else:
      reader = readers.YT8MAggregatedFeatureReader(feature_names=feature_names,
                                                   feature_sizes=feature_sizes)

    model = find_class_by_name(flags_dict["model"],
        [frame_level_models, video_level_models])()
    label_loss_fn = find_class_by_name(flags_dict["label_loss"], [losses])()

    if FLAGS.eval_data_pattern is "":
      raise IOError("'eval_data_pattern' was not specified. " +
                     "Nothing to evaluate.")

    build_graph(
        reader=reader,
        model=model,
        eval_data_pattern=FLAGS.eval_data_pattern,
        label_loss_fn=label_loss_fn,
        num_readers=FLAGS.num_readers,
        batch_size=FLAGS.batch_size)
    logging.info("built evaluation graph")
    video_id_batch = tf.get_collection("video_id_batch")[0]
    prediction_batch = tf.get_collection("predictions")[0]
    label_batch = tf.get_collection("labels")[0]
    loss = tf.get_collection("loss")[0]
    summary_op = tf.get_collection("summary_op")[0]

    saver = tf.train.Saver(tf.global_variables())
    summary_writer = tf.summary.FileWriter(
        FLAGS.train_dir, graph=tf.get_default_graph())

    evl_metrics = eval_util.EvaluationMetrics(reader.num_classes, FLAGS.top_k)

    last_global_step_val = -1
    while True:
      last_global_step_val = evaluation_loop(video_id_batch, prediction_batch,
                                             label_batch, loss, summary_op,
                                             saver, summary_writer, evl_metrics,
                                             last_global_step_val, ema_tensors)
      if FLAGS.run_once:
        break
示例#13
0
    def run(self):

        tf.set_random_seed(0)  # for reproducibility

        # Setup logging & log the version.
        tf.set_random_seed(0)  # for reproducibility

        # Setup logging & log the version.
        tf.logging.set_verbosity(logging.INFO)
        logging.info("Tensorflow version: {}.".format(tf.__version__))

        if os.environ.get('CUDA_VISIBLE_DEVICES') is None:
            if FLAGS.eval_num_gpu == 0:
                os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
            else:
                os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(
                    map(str, range(FLAGS.eval_num_gpu)))

        # self.train_dir = join(FLAGS.path, FLAGS.train_dir)
        self.train_dir = FLAGS.train_dir

        pp = pprint.PrettyPrinter(indent=2, compact=True)
        logging.info(pp.pformat(FLAGS.values()))

        with tf.Graph().as_default():
            if FLAGS.eval_num_gpu:
                self.batch_size = \
                    FLAGS.eval_batch_size * FLAGS.eval_num_gpu
            else:
                self.batch_size = FLAGS.eval_batch_size

            self.reader = find_class_by_name(FLAGS.reader,
                                             [readers])(self.batch_size,
                                                        is_training=False)
            self.model = find_class_by_name(FLAGS.model, [models])()
            self.loss_fn = find_class_by_name(FLAGS.loss, [losses])()

            data_pattern = FLAGS.data_pattern
            if data_pattern is "":
                raise IOError("'data_pattern' was not specified. "
                              "Nothing to evaluate.")

            self.build_graph()
            logging.info("Built evaluation graph")

            self.saver = tf.train.Saver(tf.global_variables())
            filename_suffix = "_{}_{}".format(
                "eval",
                re.findall("[a-z0-9]+", data_pattern.lower())[0])
            self.summary_writer = tf.summary.FileWriter(
                self.train_dir,
                filename_suffix=filename_suffix,
                graph=tf.get_default_graph())

            evl_metrics = eval_util.EvaluationMetrics(self.reader.n_classes,
                                                      20)

            self.counter = 0
            last_global_step_val = 0
            while self.counter < FLAGS.stopped_at_n:
                last_global_step_val = self.eval_loop(last_global_step_val,
                                                      evl_metrics)
            logging.info("Done evaluation -- number of eval reached.")
示例#14
0
def evaluate():
    tf.compat.v1.set_random_seed(0)  # for reproducibility

    # Write json of flags
    model_flags_path = os.path.join(FLAGS.train_dir, "model_flags.json")
    if not file_io.file_exists(model_flags_path):
        raise IOError(("Cannot find file %s. Did you run train.py on the same "
                       "--train_dir?") % model_flags_path)
    flags_dict = json.loads(open(model_flags_path).read())

    with tf.Graph().as_default():
        # convert feature_names and feature_sizes to lists of values
        feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
            flags_dict["feature_names"], flags_dict["feature_sizes"])

        if flags_dict["frame_features"]:
            reader = readers.YT8MFrameFeatureReader(
                feature_names=feature_names,
                feature_sizes=feature_sizes,
                segment_labels=FLAGS.segment_labels,
            )
        else:
            reader = readers.YT8MAggregatedFeatureReader(
                feature_names=feature_names, feature_sizes=feature_sizes)

        model = find_class_by_name(
            flags_dict["model"],
            [frame_level_models, video_level_models, nextvlad])()
        label_loss_fn = find_class_by_name(flags_dict["label_loss"],
                                           [losses])()

        if FLAGS.eval_data_pattern:
            raise IOError("'eval_data_pattern' was not specified. " +
                          "Nothing to evaluate.")

        build_graph(
            reader=reader,
            model=model,
            eval_data_pattern=FLAGS.eval_data_pattern,
            label_loss_fn=label_loss_fn,
            num_readers=FLAGS.num_readers,
            batch_size=FLAGS.batch_size,
        )
        logging.info("built evaluation graph")
        # xxx 2018
        # video_id_batch = tf.compat.v1.get_collection("video_id_batch")[0]
        # prediction_batch = tf.compat.v1.get_collection("predictions")[0]
        # label_batch = tf.compat.v1.get_collection("labels")[0]
        # loss = tf.compat.v1.get_collection("loss")[0]
        # summary_op = tf.compat.v1.get_collection("summary_op")[0]
        # A dict of tensors to be run in Session.
        fetches = {
            "video_id": tf.compat.v1.get_collection("video_id_batch")[0],
            "predictions": tf.compat.v1.get_collection("predictions")[0],
            "labels": tf.compat.v1.get_collection("labels")[0],
            "loss": tf.compat.v1.get_collection("loss")[0],
            "summary": tf.compat.v1.get_collection("summary_op")[0],
        }
        if FLAGS.segment_labels:
            fetches["label_weights"] = tf.compat.v1.get_collection(
                "label_weights")[0]

        saver = tf.compat.v1.train.Saver(tf.compat.v1.global_variables())
        summary_writer = tf.compat.v1.summary.FileWriter(
            os.path.join(FLAGS.train_dir, "eval"),
            graph=tf.compat.v1.get_default_graph(),
        )

        evl_metrics = eval_util.EvaluationMetrics(reader.num_classes,
                                                  FLAGS.top_k, None)

        last_global_step_val = -1
        while True:
            last_global_step_val = evaluation_loop(fetches, saver,
                                                   summary_writer, evl_metrics,
                                                   last_global_step_val)
            if FLAGS.run_once:
                break
示例#15
0
文件: test.py 项目: Ravoxsg/DL2W
    return dense_predictions


def sparse_labels_to_dense(sparse_labels, num_classes):
    dense_labels = np.zeros([num_classes], dtype=int)

    for label in sparse_labels:
        dense_labels[label] = 1

    return dense_labels


with open(labels_file) as f:
    labels = json.load(f)

eval = eval_util.EvaluationMetrics(4716, 20)

count = 0
batch_num = 4096
batch_predictions = []
batch_labels = []
with open(predictions_file) as f:
    reader = csv.DictReader(f)
    for row in reader:
        sparse_predictions = row['LabelConfidencePairs'].split(' ')
        sparse_labels = labels[row['VideoId']]

        dense_predictions = sparse_predictions_to_dense(
            sparse_predictions, 4716)
        dense_labels = sparse_labels_to_dense(sparse_labels, 4716)
def evaluate():
  tf.set_random_seed(0)  # for reproducibility

  # Write json of flags
  # model_flags_path = os.path.join(FLAGS.train_dir, "model_flags.json")
  # if not file_io.file_exists(model_flags_path):
  #   raise IOError(("Cannot find file %s. Did you run train.py on the same "
  #                  "--train_dir?") % model_flags_path)
  # flags_dict = json.loads(file_io.FileIO(model_flags_path, mode="r").read())
  all_eval_data_patterns = []
  with open(FLAGS.eval_data_config) as f:
    all_eval_data_patterns = f.read().splitlines()

  with tf.Graph().as_default():
    # convert feature_names and feature_sizes to lists of values
    # feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
    #     flags_dict["feature_names"], flags_dict["feature_sizes"])

    # prepare a reader for each single model prediction result
    all_readers = []

    for i in xrange(len(all_eval_data_patterns)):
      reader = readers.EnsembleReader(
          feature_names=[FLAGS.feature_names], feature_sizes=[FLAGS.feature_sizes])
      all_readers.append(reader)

    input_reader = None
    input_data_pattern = None
    
    # model = find_class_by_name(flags_dict["model"],
    #     [frame_level_models, video_level_models])()
    model = ensemble_model.MeanModel()
    label_loss_fn = find_class_by_name("CrossEntropyLoss", [losses])()

    build_graph(
        all_readers=all_readers,
        all_eval_data_patterns = all_eval_data_patterns, 
        model=model,
        label_loss_fn=label_loss_fn,
        num_readers=FLAGS.num_readers,
        batch_size=FLAGS.batch_size)
    
    logging.info("built evaluation graph")
    video_id_batch = tf.get_collection("video_id_batch")[0]
    prediction_batch = tf.get_collection("predictions")[0]
    label_batch = tf.get_collection("labels")[0]
    loss = tf.get_collection("loss")[0]
    summary_op = tf.get_collection("summary_op")[0]

    saver = tf.train.Saver(tf.global_variables())
    summary_writer = tf.summary.FileWriter(
        FLAGS.train_dir, graph=tf.get_default_graph())

    evl_metrics = eval_util.EvaluationMetrics(reader.num_classes, FLAGS.top_k)

    last_global_step_val = -1
    while True:
      last_global_step_val = evaluation_loop(video_id_batch, prediction_batch,
                                             label_batch, loss, summary_op,
                                             saver, summary_writer, evl_metrics,
                                             last_global_step_val)
      if FLAGS.run_once:
        break