コード例 #1
0
ファイル: eval_show.py プロジェクト: fword/hasky
def deal_eval_generated_texts_results(results):
    ori_results = [x for x in results]

    #TODO better handle 9..
    if len(ori_results) == 9:
        ori_results += [None] * 3

    _, \
    evaluate_image_name, \
    evaluate_text_str, \
    evaluate_text, \
    generated_texts, \
    generated_texts_beam, \
    generated_texts_score, \
    generated_texts_score_beam, \
    pos_scores, \
    neg_scores, \
    evaluate_neg_text_str, \
    evaluate_neg_text = ori_results

    for i in xrange(len(evaluate_image_name)):
        if neg_scores is not None:
            evaluator.print_img_text_negscore_generatedtext(
                evaluate_image_name[i], i, evaluate_text_str[i], pos_scores[i],
                evaluate_text[i], generated_texts[i], generated_texts_score[i],
                generated_texts_beam[i], generated_texts_score_beam[i],
                evaluate_neg_text_str[i], neg_scores[i], evaluate_neg_text[i])
        else:
            evaluator.print_img_text_negscore_generatedtext(
                evaluate_image_name[i], i, evaluate_text_str[i], pos_scores[i],
                evaluate_text[i], generated_texts[i], generated_texts_score[i],
                generated_texts_beam[i], generated_texts_score_beam[i])
    melt.print_results(results, ['loss'])
コード例 #2
0
ファイル: eval_show.py プロジェクト: fword/hasky
def deal_eval_results(results):
    #eval_loss
    _,  \
    eval_max_score, \
    eval_max_index, \
    eval_word_max_score, \
    eval_word_max_index, \
    evaluate_image_name, \
    evaluate_text_str, \
    evaluate_text, \
    pos_scores, \
    neg_scores, \
    evaluate_neg_text_str, \
    evaluate_neg_text = results

    enumerate_list = enumerate(
        zip(evaluate_image_name, evaluate_text_str, pos_scores, evaluate_text,
            evaluate_neg_text_str, neg_scores, evaluate_neg_text))

    for i, (img, text, pos_score, text_ids, neg_text, neg_score,
            neg_text_ids) in enumerate_list:
        evaluator.print_img_text_negscore(img, i, text, pos_score, text_ids,
                                          neg_text, neg_score, neg_text_ids)
        evaluator.print_neareast_texts_from_sorted(eval_max_score[i],
                                                   eval_max_index[i], img)
        evaluator.print_neareast_words_from_sorted(eval_word_max_score[i],
                                                   eval_word_max_index[i])

    melt.print_results(results, ['eval_loss'])
コード例 #3
0
ファイル: sort_number.py プロジェクト: tangqiqi123/hasky
def deal_eval_results(results):
  melt.print_results(results, eval_names)
  correct_predict_ratio, predicts, targets = results[-3], results[-2], results[-1]
  num_show = 2
  for i, (predict, target) in enumerate(zip(predicts, targets)):
    if i < num_show:
      print('label--:', target)
      print('predict:', predict)
コード例 #4
0
ファイル: train.py プロジェクト: buptpriswang/hasky
    def _deal_results(results):
        melt.print_results(results, ['loss'])

        if deal_debug_results is not None:
            debug_results = results[:-len(feed_ops)] if feed_ops else results
            deal_debug_results(debug_results)

        if feed_ops:
            global feed_results
            feed_results = results[-len(feed_ops):]
コード例 #5
0
def gen_validate(input_app, input_results, trainer, predictor):
    eval_ops = None
    train_with_validation = input_results[
        input_app.input_valid_name] is not None
    deal_eval_results = None
    if train_with_validation and not FLAGS.train_only:
        eval_image_name, eval_image_feature, eval_text, eval_text_str, eval_input_text, eval_input_text_str = \
         input_results[input_app.input_valid_name]

        eval_loss = trainer.build_train_graph(eval_image_feature,
                                              eval_input_text, eval_text)
        eval_scores = tf.get_collection('scores')[-1]
        print('gen_validate-------------------------',
              tf.get_collection('scores'))
        eval_ops = [eval_loss]

        if FLAGS.show_eval and (predictor is not None):
            eval_ops, deal_eval_results = \
              gen_evalulate(
                  input_app,
                  input_results,
                  predictor,
                  eval_ops,
                  eval_scores)
        else:
            deal_eval_results = lambda x: melt.print_results(
                x, ['eval_batch_loss'])

    return eval_ops, None, deal_eval_results
コード例 #6
0
def evaluate_score():
    text_max_words = evaluator.all_distinct_texts.shape[1]
    print('text_max_words:', text_max_words)
    predictor = melt.Predictor(FLAGS.model_dir)
    timer = gezi.Timer()
    start = 0
    while start < FLAGS.num_examples:
        end = start + FLAGS.batch_size
        if end > FLAGS.num_examples:
            end = FLAGS.num_examples
        print('predicts start:', start, 'end:', end, file=sys.stderr)
        predicts(predictor, start, end)
        start = end

    melt.print_results(rank_metrics.get_metrics(), rank_metrics.get_names())
    print('predict using time:', timer.elapsed())
コード例 #7
0
def gen_validate(input_app, input_results, trainer, predictor):
    gen_eval_feed_dict = input_app.gen_eval_feed_dict
    eval_ops = None
    train_with_validation = input_results[
        input_app.input_valid_name] is not None
    deal_eval_results = None
    if train_with_validation and not FLAGS.train_only:
        eval_image_name, eval_image_feature, eval_text, eval_text_str = input_results[
            input_app.input_valid_name]
        if input_app.input_valid_neg_name in input_results:
            eval_neg_text, eval_neg_text_str = input_results[
                input_app.input_valid_neg_name]
        else:
            eval_neg_text, eval_neg_text_str = None, None

        eval_loss = trainer.build_train_graph(eval_image_feature, eval_text,
                                              eval_neg_text)
        eval_scores = tf.get_collection('scores')[-1]
        eval_ops = [eval_loss]

        if FLAGS.show_eval and (predictor is not None):
            eval_ops, deal_eval_results = \
              gen_evalulate(
                  input_app,
                  input_results,
                  predictor,
                  eval_ops,
                  eval_scores,
                  eval_neg_text,
                  eval_neg_text_str)
        else:
            deal_eval_results = lambda x: melt.print_results(x, ['loss'])

    return eval_ops, gen_eval_feed_dict, deal_eval_results
コード例 #8
0
ファイル: eval_show.py プロジェクト: fword/hasky
def deal_eval_generated_texts_results(results):
    _, \
    evaluate_image_name, \
    evaluate_input_text_str, \
    evaluate_input_text, \
    evaluate_text_str, \
    evaluate_text, \
    generated_texts, \
    generated_texts_beam, \
    generated_texts_score, \
    generated_texts_score_beam, \
    pos_scores = results

    for i in xrange(len(evaluate_image_name)):
        #print(generated_texts_score_beam[i])
        #print(generated_texts_beam[i])
        evaluator.print_img_text_generatedtext_score(
            evaluate_image_name[i], i, evaluate_input_text_str[i],
            evaluate_input_text[i], evaluate_text_str[i], pos_scores[i],
            evaluate_text[i], generated_texts[i], generated_texts_score[i],
            generated_texts_beam[i], generated_texts_score_beam[i])
    melt.print_results(results, ['loss'])
コード例 #9
0
ファイル: train.py プロジェクト: buptpriswang/hasky
def gen_validate(input_app, input_results, trainer, predictor):
    gen_eval_feed_dict = None
    eval_ops = None
    train_with_validation = input_results[
        input_app.input_valid_name] is not None
    deal_eval_results = None
    if train_with_validation and not FLAGS.train_only:
        eval_image_name, eval_image_feature, eval_text, eval_text_str = input_results[
            input_app.input_valid_name]
        if input_results[input_app.input_valid_neg_name]:
            eval_neg_image_name, eval_neg_image_feature, eval_neg_text, eval_neg_text_str = input_results[
                input_app.input_valid_neg_name]

        if not FLAGS.neg_left:
            eval_neg_image = None
        eval_neg_text_ = eval_neg_text
        if not FLAGS.neg_right:
            eval_neg_text_ = None
        if algos_factory.is_generative(FLAGS.algo):
            eval_neg_image_feature = None
            eval_neg_text_ = None

        eval_loss = trainer.build_train_graph(eval_image_feature, eval_text,
                                              eval_neg_image_feature,
                                              eval_neg_text_)
        eval_scores = tf.get_collection('scores')[-1]
        eval_ops = [eval_loss]

        if algos_factory.is_generative(FLAGS.algo):
            eval_neg_text = None
            eval_neg_text_str = None

        if FLAGS.show_eval and (predictor is not None):
            eval_ops, deal_eval_results = \
              gen_evalulate(
                  input_app,
                  input_results,
                  predictor,
                  eval_ops,
                  eval_scores,
                  eval_neg_text,
                  eval_neg_text_str)
        else:
            deal_eval_results = lambda x: melt.print_results(
                x, ['eval_batch_loss'])

    return eval_ops, gen_eval_feed_dict, deal_eval_results
コード例 #10
0
def train():
    trainset = FLAGS.train_files_pattern
    trainset = sys.argv[1]
    print('trainset', trainset)
    inputs = melt.shuffle_then_decode.inputs
    X, y = inputs(trainset,
                  decode=decode,
                  batch_size=FLAGS.batch_size,
                  num_epochs=FLAGS.num_epochs,
                  num_preprocess_threads=FLAGS.num_preprocess_threads,
                  batch_join=FLAGS.batch_join,
                  shuffle=FLAGS.shuffle)

    train_with_validation = bool(FLAGS.valid_files_pattern)
    if train_with_validation:
        validset = FLAGS.valid_files_pattern
        eval_X, eval_y = inputs(
            validset,
            decode=decode,
            batch_size=FLAGS.batch_size * 10,
            num_preprocess_threads=FLAGS.num_preprocess_threads,
            batch_join=FLAGS.batch_join,
            shuffle=FLAGS.shuffle)

    loss, accuracy = model.build_graph(X, y)
    train_op = melt.gen_train_op(loss, FLAGS.learning_rate)
    if train_with_validation:
        tf.get_variable_scope().reuse_variables()
        eval_loss, eval_accuracy = model.build_graph(eval_X, eval_y)
        tf.scalar_summary('loss_eval', eval_loss)
        eval_ops = [eval_loss, eval_accuracy]
    else:
        eval_ops = None

    train_flow(
        [train_op, loss, accuracy],
        deal_results=melt.show_precision_at_k,
        #deal_results=None,
        eval_ops=eval_ops,
        deal_eval_results=lambda x: melt.print_results(x,
                                                       names=['precision@1']),
        print_avg_loss=True,
        eval_interval_steps=FLAGS.eval_interval_steps)
コード例 #11
0
ファイル: train.py プロジェクト: yang9112/tensorflow-example
def train():
  assert FLAGS.num_classes > 0 and FLAGS.num_features > 0, 'you must pass num_classes and num_features according to your data'
  print('num_features:', FLAGS.num_features, 'num_classes:', FLAGS.num_classes)
  model.set_input_info(num_features=FLAGS.num_features, num_classes=FLAGS.num_classes)

  trainset = sys.argv[1]
  inputs = melt.shuffle_then_decode.inputs
  X, y = inputs(
    trainset, 
    decode=decode,
    batch_size=FLAGS.batch_size,
    num_epochs=FLAGS.num_epochs, 
    num_threads=FLAGS.num_preprocess_threads,
    batch_join=FLAGS.batch_join,
    shuffle=FLAGS.shuffle)
  
  train_with_validation = len(sys.argv) > 2
  if train_with_validation:
    validset = sys.argv[2]
    eval_X, eval_y = inputs(
      validset, 
      decode=decode,
      batch_size=FLAGS.batch_size * 10,
      num_threads=FLAGS.num_preprocess_threads,
      batch_join=FLAGS.batch_join,
      shuffle=FLAGS.shuffle)
  
  with tf.variable_scope('main') as scope:
    loss, accuracy = model.build_graph(X, y)
    scope.reuse_variables()
    if train_with_validation:
      eval_loss, eval_accuracy = model.build_graph(eval_X, eval_y)
      eval_ops = [eval_loss, eval_accuracy]
    else:
      eval_ops = None

  melt.apps.train_flow(
             [loss, accuracy], 
             deal_results=melt.show_precision_at_k,
             eval_ops=eval_ops,
             deal_eval_results= lambda x: melt.print_results(x, names=['precision@1']),
             model_dir=FLAGS.model_dir
             )
コード例 #12
0
def train():
    trainset = sys.argv[1]
    inputs = melt.shuffle_then_decode.inputs
    X, y = inputs(trainset,
                  decode=decode,
                  batch_size=FLAGS.batch_size,
                  num_epochs=FLAGS.num_epochs,
                  num_threads=FLAGS.num_preprocess_threads,
                  batch_join=FLAGS.batch_join,
                  shuffle=FLAGS.shuffle)

    train_with_validation = len(sys.argv) > 2
    if train_with_validation:
        validset = sys.argv[2]
        eval_X, eval_y = inputs(validset,
                                decode=decode,
                                batch_size=FLAGS.batch_size * 10,
                                num_threads=FLAGS.num_preprocess_threads,
                                batch_join=FLAGS.batch_join,
                                shuffle=FLAGS.shuffle)

    loss, accuracy = model.build_graph(X, y)
    if train_with_validation:
        tf.get_variable_scope().reuse_variables()
        eval_loss, eval_accuracy = model.build_graph(eval_X, eval_y)
        eval_ops = [eval_loss, eval_accuracy]
    else:
        eval_ops = None

    melt.apps.train_flow(
        [loss, accuracy],
        deal_results=melt.show_precision_at_k,
        eval_ops=eval_ops,
        deal_eval_results=lambda x: melt.print_results(x,
                                                       names=['precision@1']),
    )
コード例 #13
0
  def _deal_results(results):
    melt.print_results(results, ['batch_loss'])

    if deal_debug_results is not None:
      debug_results = results
      deal_debug_results(debug_results)
コード例 #14
0
def train_once(sess,
               step,
               ops,
               names=None,
               gen_feed_dict=None,
               deal_results=melt.print_results,
               interval_steps=100,
               eval_ops=None,
               eval_names=None,
               gen_eval_feed_dict=None,
               deal_eval_results=melt.print_results,
               eval_interval_steps=100,
               print_time=True,
               print_avg_loss=True,
               model_dir=None,
               log_dir=None,
               is_start=False,
               num_steps_per_epoch=None,
               metric_eval_function=None,
               metric_eval_interval_steps=0):

    timer = gezi.Timer()
    if print_time:
        if not hasattr(train_once, 'timer'):
            train_once.timer = Timer()
            train_once.eval_timer = Timer()
            train_once.metric_eval_timer = Timer()

    melt.set_global('step', step)
    epoch = step / num_steps_per_epoch if num_steps_per_epoch else -1
    epoch_str = 'epoch:%.4f' % (epoch) if num_steps_per_epoch else ''
    melt.set_global('epoch', '%.4f' % (epoch))

    info = BytesIO()
    stop = False

    if ops is not None:
        if deal_results is None and names is not None:
            deal_results = lambda x: melt.print_results(x, names)
        if deal_eval_results is None and eval_names is not None:
            deal_eval_results = lambda x: melt.print_results(x, eval_names)

        if eval_names is None:
            eval_names = names

        feed_dict = {} if gen_feed_dict is None else gen_feed_dict()

        results = sess.run(ops, feed_dict=feed_dict)

        # #--------trace debug
        # if step == 210:
        #   run_metadata = tf.RunMetadata()
        #   results = sess.run(
        #         ops,
        #         feed_dict=feed_dict,
        #         options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE),
        #         run_metadata=run_metadata)
        #   from tensorflow.python.client import timeline
        #   trace = timeline.Timeline(step_stats=run_metadata.step_stats)

        #   trace_file = open('timeline.ctf.json', 'w')
        #   trace_file.write(trace.generate_chrome_trace_format())

        #reults[0] assume to be train_op
        results = results[1:]

        #@TODO should support aver loss and other avg evaluations like test..
        if print_avg_loss:
            if not hasattr(train_once, 'avg_loss'):
                train_once.avg_loss = AvgScore()
                if interval_steps != eval_interval_steps:
                    train_once.avg_loss2 = AvgScore()
            #assume results[0] as train_op return, results[1] as loss
            loss = gezi.get_singles(results)
            train_once.avg_loss.add(loss)
            if interval_steps != eval_interval_steps:
                train_once.avg_loss2.add(loss)

        if is_start or interval_steps and step % interval_steps == 0:
            train_average_loss = train_once.avg_loss.avg_score()
            if print_time:
                duration = timer.elapsed()
                duration_str = 'duration:{:.3f} '.format(duration)
                melt.set_global('duration', '%.3f' % duration)
                info.write(duration_str)
                elapsed = train_once.timer.elapsed()
                steps_per_second = interval_steps / elapsed
                batch_size = melt.batch_size()
                num_gpus = melt.num_gpus()
                instances_per_second = interval_steps * batch_size * num_gpus / elapsed
                if num_gpus == 1:
                    info.write(
                        'elapsed:[{:.3f}] batch_size:[{}] batches/s:[{:.2f}] insts/s:[{:.2f}] '
                        .format(elapsed, batch_size, steps_per_second,
                                instances_per_second))
                else:
                    info.write(
                        'elapsed:[{:.3f}] batch_size:[{}] gpus:[{}], batches/s:[{:.2f}] insts/s:[{:.2f}] '
                        .format(elapsed, batch_size, num_gpus,
                                steps_per_second, instances_per_second))

            if print_avg_loss:
                #info.write('train_avg_metrics:{} '.format(melt.value_name_list_str(train_average_loss, names)))
                names_ = melt.adjust_names(train_average_loss, names)
                info.write('train_avg_metrics:{} '.format(
                    melt.parse_results(train_average_loss, names_)))
                #info.write('train_avg_loss: {} '.format(train_average_loss))

            #print(gezi.now_time(), epoch_str, 'train_step:%d'%step, info.getvalue(), end=' ')
            logging.info2('{} {} {}'.format(epoch_str, 'train_step:%d' % step,
                                            info.getvalue()))

            if deal_results is not None:
                stop = deal_results(results)

    metric_evaluate = False
    # if metric_eval_function is not None \
    #   and ( (is_start and (step or ops is None))\
    #     or (step and ((num_steps_per_epoch and step % num_steps_per_epoch == 0) \
    #            or (metric_eval_interval_steps \
    #                and step % metric_eval_interval_steps == 0)))):
    #     metric_evaluate = True
    if metric_eval_function is not None \
      and (is_start \
        or (num_steps_per_epoch and step % num_steps_per_epoch == 0) \
             or (metric_eval_interval_steps \
                 and step % metric_eval_interval_steps == 0)):
        metric_evaluate = True

    if metric_evaluate:
        evaluate_results, evaluate_names = metric_eval_function()

    if is_start or eval_interval_steps and step % eval_interval_steps == 0:
        if ops is not None:
            if interval_steps != eval_interval_steps:
                train_average_loss = train_once.avg_loss2.avg_score()

            info = BytesIO()

            names_ = melt.adjust_names(results, names)

            train_average_loss_str = ''
            if print_avg_loss and interval_steps != eval_interval_steps:
                train_average_loss_str = melt.value_name_list_str(
                    train_average_loss, names_)
                melt.set_global('train_loss', train_average_loss_str)
                train_average_loss_str = 'train_avg_loss:{} '.format(
                    train_average_loss_str)

            if interval_steps != eval_interval_steps:
                #end = '' if eval_ops is None else '\n'
                #print(gezi.now_time(), epoch_str, 'eval_step: %d'%step, train_average_loss_str, end=end)
                logging.info2('{} eval_step: {} {}'.format(
                    epoch_str, step, train_average_loss_str))

        if eval_ops is not None:
            eval_feed_dict = {} if gen_eval_feed_dict is None else gen_eval_feed_dict(
            )
            #eval_feed_dict.update(feed_dict)

            #------show how to perf debug
            ##timer_ = gezi.Timer('sess run generate')
            ##sess.run(eval_ops[-2], feed_dict=None)
            ##timer_.print()

            timer_ = gezi.Timer('sess run eval_ops')
            eval_results = sess.run(eval_ops, feed_dict=eval_feed_dict)
            timer_.print()
            if deal_eval_results is not None:
                #@TODO user print should also use logging as a must ?
                #print(gezi.now_time(), epoch_str, 'eval_step: %d'%step, 'eval_metrics:', end='')
                logging.info2('{} eval_step: {} eval_metrics:'.format(
                    epoch_str, step))
                eval_stop = deal_eval_results(eval_results)

            eval_loss = gezi.get_singles(eval_results)
            assert len(eval_loss) > 0
            if eval_stop is True: stop = True
            eval_names_ = melt.adjust_names(eval_loss, eval_names)

            melt.set_global('eval_loss',
                            melt.parse_results(eval_loss, eval_names_))
        elif interval_steps != eval_interval_steps:
            #print()
            pass

        if log_dir:
            #timer_ = gezi.Timer('witting log')

            if not hasattr(train_once, 'summary_op'):
                try:
                    train_once.summary_op = tf.summary.merge_all()
                except Exception:
                    train_once.summary_op = tf.merge_all_summaries()

                melt.print_summary_ops()

                try:
                    train_once.summary_train_op = tf.summary.merge_all(
                        key=melt.MonitorKeys.TRAIN)
                    train_once.summary_writer = tf.summary.FileWriter(
                        log_dir, sess.graph)
                except Exception:
                    train_once.summary_train_op = tf.merge_all_summaries(
                        key=melt.MonitorKeys.TRAIN)
                    train_once.summary_writer = tf.train.SummaryWriter(
                        log_dir, sess.graph)

                tf.contrib.tensorboard.plugins.projector.visualize_embeddings(
                    train_once.summary_writer, projector_config)

            summary = tf.Summary()
            #so the strategy is on eval_interval_steps, if has eval dataset, then tensorboard evluate on eval dataset
            #if not have eval dataset, will evaluate on trainset, but if has eval dataset we will also monitor train loss
            if train_once.summary_train_op is not None:
                summary_str = sess.run(train_once.summary_train_op,
                                       feed_dict=feed_dict)
                train_once.summary_writer.add_summary(summary_str, step)

            if eval_ops is None:
                #get train loss, for every batch train
                if train_once.summary_op is not None:
                    #timer2 = gezi.Timer('sess run')
                    summary_str = sess.run(train_once.summary_op,
                                           feed_dict=feed_dict)
                    #timer2.print()
                    train_once.summary_writer.add_summary(summary_str, step)
            else:
                #get eval loss for every batch eval, then add train loss for eval step average loss
                summary_str = sess.run(
                    train_once.summary_op, feed_dict=eval_feed_dict
                ) if train_once.summary_op is not None else ''
                #all single value results will be add to summary here not using tf.scalar_summary..
                summary.ParseFromString(summary_str)
                melt.add_summarys(summary,
                                  eval_results,
                                  eval_names_,
                                  suffix='eval')

            melt.add_summarys(summary,
                              train_average_loss,
                              names_,
                              suffix='train_avg%dsteps' % eval_interval_steps)

            if metric_evaluate:
                melt.add_summarys(summary,
                                  evaluate_results,
                                  evaluate_names,
                                  prefix='evaluate')

            train_once.summary_writer.add_summary(summary, step)
            train_once.summary_writer.flush()

            #timer_.print()

        if print_time:
            full_duration = train_once.eval_timer.elapsed()
            if metric_evaluate:
                metric_full_duration = train_once.metric_eval_timer.elapsed()
            full_duration_str = 'elapsed:{:.3f} '.format(full_duration)
            #info.write('duration:{:.3f} '.format(timer.elapsed()))
            duration = timer.elapsed()
            info.write('duration:{:.3f} '.format(duration))
            info.write(full_duration_str)
            info.write('eval_time_ratio:{:.3f} '.format(duration /
                                                        full_duration))
            if metric_evaluate:
                info.write('metric_time_ratio:{:.3f} '.format(
                    duration / metric_full_duration))
        #print(gezi.now_time(), epoch_str, 'eval_step: %d'%step, info.getvalue())
        logging.info2('{} {} {}'.format(epoch_str, 'eval_step: %d' % step,
                                        info.getvalue()))

        return stop