Ejemplo n.º 1
0
def log_outputs(opts, step, train_cost, test_cost):
    # apply post processing (hungarian matching and create cleaned outputs).
    predict_dir = os.path.join(opts["flags"].out_dir, "predictions", "train")
    train_dicts = post_processing.process_outputs(predict_dir, "")

    predict_dir = os.path.join(opts["flags"].out_dir, "predictions", "test")
    test_dicts = post_processing.process_outputs(predict_dir, "")

    # after applying the post processing,
    trainf, trainf_scores = compute_tpfp(opts, train_dicts)
    testf, testf_scores = compute_tpfp(opts, test_dicts)

    # write to the graph.
    loss_f = os.path.join(opts["flags"].out_dir, "plots", "loss_f.csv")
    if os.path.isfile(loss_f) is False:
        with open(loss_f, "w") as f:
            f.write(("iteration,training loss,test loss,train f1,test f1,"
                     "train lift,train hand,train grab,train supinate,"
                     "train mouth,train chew,"
                     "test lift,test hand,test grab,test supinate,"
                     "test mouth,test chew\n"))
    with open(loss_f, "a") as outfile:
        # write out the data...
        format_str = ("%d,%f,%f,%f,%f,"
                      "%f,%f,%f,%f,"
                      "%f,%f,"
                      "%f,%f,%f,%f,"
                      "%f,%f\n")
        output_data =\
            [step, train_cost, test_cost, trainf, testf] +\
            trainf_scores + testf_scores
        output_data = tuple(output_data)
        # import pdb; pdb.set_trace()
        outfile.write(format_str % output_data)
    print("\tupdated...")
def _log_outputs(opts, step, network, label_weight):
    """Log the outputs for the network."""
    # Run the network on all the training and testing examples.
    # Creates a graph for each video.
    train_cost, train_scores = _process_full_sequences(
        opts, step, network, train_data, "train", label_weight)
    test_cost, test_scores = _process_full_sequences(
        opts, step, network, test_data, "test", label_weight)

    # apply post processing (hungarian matching and create cleaned outputs).
    predict_dir = os.path.join(opts["flags"].out_dir,
                               "predictions", "train")
    train_dicts = post_processing.process_outputs(
        predict_dir, "")
    predict_dir = os.path.join(opts["flags"].out_dir,
                               "predictions", "test")
    test_dicts = post_processing.process_outputs(
        predict_dir, "")

    # after applying the post processing,
    trainf, trainf_scores = compute_tpfp(train_dicts)
    testf, testf_scores = compute_tpfp(test_dicts)

    # write to the graph.
    loss_f = os.path.join(opts["flags"].out_dir, "plots", "loss_f.csv")
    if os.path.isfile(loss_f) is False:
        with open(loss_f, "w") as f:
            f.write(("iteration,training loss,test loss,train f1,test f1,"
                     "train tp,train fp,train fn,train perframe,"
                     "test tp,test fp,test fn,test perframe,"
                     "train lift,train hand,train grab,train supinate,"
                     "train mouth,train chew,"
                     "test lift,test hand,test grab,test supinate,"
                     "test mouth,test chew\n"))
    with open(loss_f, "a") as outfile:
        # write out the data...
        format_str = ("%d,%f,%f,%f,%f,"
                      "%f,%f,%f,%f,"
                      "%f,%f,%f,%f,"
                      "%f,%f,%f,%f,"
                      "%f,%f,"
                      "%f,%f,%f,%f,"
                      "%f,%f\n")
        output_data =\
            [step, train_cost, test_cost, trainf, testf] +\
            train_scores + test_scores +\
            trainf_scores + testf_scores
        output_data = tuple(output_data)
        # import pdb; pdb.set_trace()
        outfile.write(format_str % output_data)
    print("\tupdated...")
Ejemplo n.º 3
0
# gflags.DEFINE_boolean("help", False, "Help")
gflags.ADOPT_module_key_flags(post_processing)
gflags.MarkFlagAsRequired("input_dir")

if __name__ == "__main__":
    FLAGS = gflags.FLAGS

    FLAGS(sys.argv)

    if FLAGS.help is True:
        print(FLAGS)
        exit()

    print(FLAGS.input_dir)
    labels = ['lift', 'hand', 'grab', 'supinate', 'mouth', 'chew']
    label_dicts = post_processing.process_outputs(FLAGS.input_dir, "", labels)

    mean_f = 0
    for i in range(len(label_dicts)):
        tp = float(len(label_dicts[i]['dists']))
        fp = float(label_dicts[i]['fp'])
        fn = float(label_dicts[i]['fn'])
        precision = tp / (tp + fp + 0.0001)
        recall = tp / (tp + fn + 0.0001)
        f1_score = 2 * (precision * recall) / (precision + recall + 0.0001)
        print("label: %s" % label_dicts[i]['label'])
        print("tp: %d, fp: %d, fn: %d\n" % (tp, fp, fn))
        print("\tprecision: %f" % precision)
        print("\trecall: %f" % recall)
        print("\tfscore: %f" % f1_score)
        mean_f += f1_score
if __name__ == "__main__":
    FLAGS = gflags.FLAGS

    FLAGS(sys.argv)

    if FLAGS.help is True:
        print(FLAGS)
        exit()

    print(FLAGS.input_dir)
    thresholds = [1, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75]
    all_labels = []
    for i in thresholds:
        print(i)
        temp = post_processing.process_outputs(
            FLAGS.input_dir,
            "", frame_thresh=i)
        all_labels.append(temp)

    filename = os.path.join(FLAGS.input_dir, "temp.csv")
    with open(filename, "w") as out_file:
        out_file.write(
            "threshold,lift,hand,grab,supinate,mouth,chew\n"
        )
        for j in range(len(all_labels)):
            label_dicts = all_labels[j]

            out_file.write("%f" % thresholds[j])
            for i in range(0, len(label_dicts)):
                tp = float(len(label_dicts[i]['dists']))
                fp = float(label_dicts[i]['fp'])