Exemplo n.º 1
0
def analyze_outs(predict, labels, label_names, frame_thresh=0.7, dist_threshold=10):
    # next apply non max suppression
    # labels = labels[0]
    num_labels = labels.shape[1]

    all_matches = []
    for i in range(num_labels):
        ground_truth = labels[:, i]
        gt_sup, gt_idx = post_processing.nonmax_suppress(
            ground_truth, frame_thresh)
        predict_sup, predict_idx = post_processing.nonmax_suppress(
            predict[:, i], frame_thresh)
        match_dict, dist_mat = hungarian_matching.apply_hungarian(
            gt_idx, predict_idx, dist_threshold=dist_threshold
        )
        match_dict["tps"] = len(match_dict["tps"])
        match_dict["fps"] = len(match_dict["fps"])

        # # write processed file
        # output_name = os.path.join(
        #     out_dir, exp_names[0], 'processed_%s.csv' % label_names[i]
        # )
        # create_proc_file(output_name, gt_sup, predict_sup, match_dict)
        all_matches.append(match_dict)
    return all_matches
Exemplo n.º 2
0
def analyze_outs(predict, labels, label_names, frame_thresh=0.7):
    # next apply non max suppression
    # labels = labels[0]
    num_labels = labels.shape[1]

    all_dists = {
        'lift': [],
        'hand': [],
        'grab': [],
        'supinate': [],
        'mouth': [],
        'chew': []
    }
    dist_keys = label_names
    all_matches = []
    for i in range(num_labels):
        ground_truth = labels[:, i]
        gt_sup, gt_idx = post_processing.nonmax_suppress(
            ground_truth, frame_thresh)
        predict_sup, predict_idx = post_processing.nonmax_suppress(
            predict[:, i], frame_thresh)
        match_dict, dist_mat = hungarian_matching.apply_hungarian(
            gt_idx, predict_idx
        )
        # predict_idx = match_dict["tps"]
        predict_idx = match_dict["fps"]
        # setup "greedy matching" to make a list of distances for each prediction.
        dist_mat = numpy.zeros((len(gt_idx), len(predict_idx)))
        abs_dist_mat = numpy.zeros((len(gt_idx), len(predict_idx)))
        for j in range(len(gt_idx)):
            for k in range(len(predict_idx)):
                # dist_mat[j, k] = predict_idx[k][1] - gt_idx[j]
                # abs_dist_mat[j, k] = abs(predict_idx[k][1] - gt_idx[j])

                dist_mat[j, k] = predict_idx[k] - gt_idx[j]
                abs_dist_mat[j, k] = abs(predict_idx[k] - gt_idx[j])
        # get the min idx for each column (each prediction).
        min_dists = []
        if len(gt_idx) > 0 and len(predict_idx) > 0:
            min_idx = numpy.argmin(abs_dist_mat, axis=0)

            for j in range(len(predict_idx)):
                min_dists.append(dist_mat[min_idx[j], j])
            all_dists[dist_keys[i]] += min_dists
        # if len(gt_idx) > 1 and len(predict_idx) > 1:
        #     import pdb; pdb.set_trace()
        # match_dict, dist_mat = hungarian_matching.apply_hungarian(
        #     gt_idx, predict_idx
        # )
        # if len(match_dict["tps"]) > 0:
        #     import pdb; pdb.set_trace()
        # match_dict["tps"] = len(match_dict["tps"])
        # match_dict["fps"] = len(match_dict["fps"])
        # # write processed file
        # output_name = os.path.join(
        #     out_dir, exp_names[0], 'processed_%s.csv' % label_names[i]
        # )
        # create_proc_file(output_name, gt_sup, predict_sup, match_dict)
        # all_matches.append(dist_mat)
    return all_dists
Exemplo n.º 3
0
def eval_scores(scores, score_thresh=0.7, dist_thresh=30.0):
    gt_sup, gt_idx = post_processing.nonmax_suppress(scores[:, 1],
                                                     score_thresh)
    predict_sup, predict_idx = post_processing.nonmax_suppress(
        scores[:, 0], score_thresh)

    # go through each prediction and see if it is a false positive or true
    # positive.
    predicts = predict_sup[predict_idx]
    is_tp = numpy.zeros(predicts.shape)
    is_gt = numpy.zeros(predicts.shape)
    match_dict, dist_mat = hungarian_matching.apply_hungarian(
        gt_idx,
        predict_idx,
        val_threshold=score_thresh,
        dist_threshold=dist_thresh)

    for j in range(len(predict_idx)):
        for i in range(len(gt_idx)):
            # check if in the right range?
            # dist mat is buffered by dummy nodes (of the number of gt's)
            if numpy.argmin(dist_mat[:, j + len(gt_idx)]) < dist_thresh:
                # is this prediction the lowest score for any gt?
                dists = dist_mat[i, :]
                if numpy.argmin(dists) == j + len(gt_idx):
                    is_tp[j] = dists[j + len(gt_idx)]
                    is_gt[j] = True
                    # no reason to check for other matches
                    break

    return predicts, is_tp, is_gt, len(gt_idx)
def analyze_outs(csv_dir, predict, labels, label_names, frame_thresh=0.1):
    # next apply non max suppression
    # labels = labels[0]
    num_labels = labels.shape[1]

    # create a mask for the network predictions.
    # apply an argmax to figure out which locations the network was most sure
    # that a behavior has occured
    predict_mask = numpy.zeros(predict.shape)
    predict_max_idx = numpy.argmax(predict, axis=1)
    for i in range(len(predict_max_idx)):
        # "i" should be the row.
        j = predict_max_idx[i]
        predict_mask[i, j] = 1

    # next convert the mask into predictions using the rules described by
    # startnet/odas paper.
    # c_t = argmax is an action.
    # c_t != c_{t-1}
    # as_t^{c_t} exceeds a threshold. This isn't available for odas.
    # set the previous behavior to background
    c_tminus1 = 6
    predict_starts = numpy.zeros(predict.shape)
    for i in range(predict_mask.shape[0]):
        c_t = predict_max_idx[i]
        if c_t != 6 and c_t != c_tminus1:
            predict_starts[i, c_t] = 1
        c_tminus1 = c_t

    # write predictions back to disk
    # copy the templates
    for label_name in label_names:
        csv_name = "odas_" + label_name + ".csv"
        base_out = csv_dir
        sequences_helper.create_html_file(base_out, csv_name, "movie_comb.avi",
                                          30)
        # write the predictions.
    exp_name = os.path.basename(base_out)
    write_csvs(base_out, exp_name, label_names, labels, predict_starts)

    all_matches = []
    for i in range(num_labels):
        ground_truth = labels[:, i]
        gt_sup, gt_idx = post_processing.nonmax_suppress(
            ground_truth, frame_thresh)
        predict_sup, predict_idx = post_processing.nonmax_suppress(
            predict[:, i], frame_thresh)
        match_dict, dist_mat = hungarian_matching.apply_hungarian(
            gt_idx, predict_idx)
        match_dict["tps"] = len(match_dict["tps"])
        match_dict["fps"] = len(match_dict["fps"])
        # # write processed file
        # output_name = os.path.join(
        #     out_dir, exp_names[0], 'processed_%s.csv' % label_names[i]
        # )
        # create_proc_file(output_name, gt_sup, predict_sup, match_dict)
        all_matches.append(match_dict)
    return all_matches
def eval_scores(scores, score_thresh=0.7, dist_thresh=30.0):
    gt_sup, gt_idx = post_processing.nonmax_suppress(scores[:, 1],
                                                     score_thresh)
    predict_sup, predict_idx = post_processing.nonmax_suppress(
        scores[:, 0], score_thresh)

    # go through each prediction and see if it is a false positive or true
    # positive.
    predicts = predict_sup[predict_idx]
    is_tp = numpy.zeros(predicts.shape)
    is_gt = numpy.zeros(predicts.shape)
    match_dict, dist_mat = hungarian_matching.apply_hungarian(
        gt_idx,
        predict_idx,
        val_threshold=score_thresh,
        dist_threshold=dist_thresh)

    # # loop over the predictions, and create a is_tp_fp indicator
    # for i in range(len(match_dict["tps"])):
    #     # second element is the prediction index
    #     is_tp[match_dict["tps"][i][1]] = 1
    # is_tp = is_tp[predict_idx]

    # for i in range(len(match_dict["fps"])):
    #     # second element is the prediction index
    #     is_tp[match_dict["tps"][i][1]] = 1
    #     # try:
    #     #     is_tp[match_dict["tps"][i][1] == numpy.asarray(predict_idx)] = 1
    #     # except:
    #     #     import pdb; pdb.set_trace()

    for j in range(len(predict_idx)):
        for i in range(len(gt_idx)):
            # check if in the right range?
            # dist mat is buffered by dummy nodes (of the number of gt's)
            if numpy.argmin(dist_mat[:, j + len(gt_idx)]) < dist_thresh:
                # is this prediction the lowest score for any gt?
                dists = dist_mat[i, :]
                if numpy.argmin(dists) == j + len(gt_idx):
                    is_tp[j] = dists[j + len(gt_idx)]
                    is_gt[j] = True
                    # no reason to check for other matches
                    break
            # else:
            #     is_tp[j] = -1
            #     import pdb; pdb.set_trace()
    # if len(gt_idx) > 2:
    #     import pdb; pdb.set_trace()
    return predicts, is_tp, is_gt, len(gt_idx)
Exemplo n.º 6
0
def analyze_outs(out_dir, exp_names, predict, labels, frame_thresh=0.7):
    # next apply non max suppression
    labels = labels[0]
    num_labels = labels.shape[2]

    all_matches = []
    for i in range(num_labels):
        ground_truth = labels[:, 0, i]
        gt_sup, gt_idx = post_processing.nonmax_suppress(
            ground_truth, frame_thresh)
        predict_sup, predict_idx = post_processing.nonmax_suppress(
            predict[:, 0, i], frame_thresh)
        match_dict, dist_mat = hungarian_matching.apply_hungarian(
            gt_idx, predict_idx)

        # write processed file
        output_name = os.path.join(out_dir, exp_names[0].decode("utf-8"),
                                   'processed_%s.csv' % g_label_names[i])
        create_proc_file(output_name, gt_sup, predict_sup, match_dict)
        all_matches.append(match_dict)
        # import pdb; pdb.set_trace()
    return all_matches
def proc_prediction_file(predict_file):
    """proc_prediction

    Process a single prediction file. Given a csv output of the form
    frame num, prediction, ground truth.
    """
    # Post processing flow:
    #  nonmax suppress
    #  Apply hungarian matching
    #  Compute stats.
    csv_data = post_processing.load_predict_csv(predict_file)
    # always assume there is a ground truth key name, and skip anything called
    # frames
    key_names = csv_data.keys()

    # get the values out of the dictionary
    ground_truth = []
    predict = []
    for key in key_names:
        if "ground truth" in key:
            ground_truth = csv_data[key]
        elif "frame" not in key:
            predict = csv_data[key]

    # next apply non max suppression
    gt_sup, gt_idx = post_processing.nonmax_suppress(ground_truth, 0.7)
    predict_sup, predict_idx = post_processing.nonmax_suppress(predict, 0.7)

    # hungarian matching
    match_dict, dist_mat = hungarian_matching.apply_hungarian(
        gt_idx, predict_idx
    )

    # create the post processed file
    output_name = predict_file.replace("predict_", "processed_")
    create_proc_file(output_name, gt_sup, predict_sup, match_dict)

    return match_dict
def process_prediction_csv(filename,
                           out_filename,
                           frame_thresh=5,
                           val_threshold=0.75):
    """Given a predict_*.csv file, post process it."""
    # run non max on the labels and data
    all_csv = load_predict_csv(filename)
    # loop over the rows (2nd row and on) and convert to numeric
    # remove header
    data, all_csv = convert_csv_numpy2(all_csv)
    all_csv = data
    # all_csv = numpy.asarray(all_csv[1:])

    processed = data
    # predicted
    suppressed, max_vals = nonmax_suppress(data[:, 1], val_threshold)
    processed[:, 1] = suppressed
    # import pdb; pdb.set_trace()

    # ground truth
    suppressed, _ = nonmax_suppress(data[:, 2], val_threshold)
    processed[:, 2] = suppressed

    labelled = numpy.argwhere(processed[:, 2] == 1)
    labelled = labelled.flatten().tolist()
    # add a dummy variable for each of the nodes.
    # dist_mat = create_frame_dists(processed, max_vals, labelled)
    # num_found = len(max_vals)
    num_labelled = len(labelled)
    # rows, cols, dist_mat = apply_hungarian(dist_mat, frame_thresh)
    match_dict, dist_mat = hungarian_matching.apply_hungarian(
        max_vals, labelled)

    # missed classifications
    # false_neg = len(labelled) - len(
    #     [i for i in range(len(max_vals)) if cols[i] < len(labelled)])
    false_neg = match_dict["num_fn"]

    # extra predictions
    # for each idx in labelled, find its location in the cols array. The
    # location in the cols array represents the row id, which is related
    # to the max_vals ids.
    # false_pos = len(max_vals) - len(
    #     [i for i in range(len(labelled))
    #      if numpy.where(i == cols)[0][0] < len(max_vals)])
    false_pos = len(match_dict["fps"])

    # create a graph with this data
    with open(out_filename, "w") as file:
        # write the header first
        file.write("frame,predicted,ground truth,image,nearest\n")
        for i in range(all_csv.shape[0]):
            file.write("%f,%f,%f,%s" %
                       (processed[i, 0], processed[i, 1], processed[i, 2],
                        "frames/%05d.jpg" % i))

            if i in max_vals:
                # this is dumb...
                idx = numpy.where(numpy.asarray(max_vals) == i)
                # col_idx = cols[idx][0]
                # row_idx = rows[idx][0]
                # import pdb; pdb.set_trace()
                for j in range(len(match_dict["tps"])):
                    if i == match_dict["tps"][j][0]:
                        match = match_dict["tps"][j][1]
                        file.write(",%d" % match)
                    else:
                        file.write(",no match")

                # if col_idx < len(labelled) and\
                #         dist_mat[row_idx, col_idx] < frame_thresh:
                #     match = processed[labelled[col_idx], 0]
                #     file.write(",%d" % match)
                # else:
                #     # this peak does not have a matched peak
                #     file.write(",no match")
            # if i in max_vals:
            #     # if "M134_20141203_v030" in out_filename\
            #     #         and "hand" in out_filename:
            #     #     import pdb; pdb.set_trace()
            #     # is this a peak?
            #     # if it is a peak, does it have a match
            #     idx = numpy.where(numpy.asarray(max_vals) == i)
            #     if cols[idx][0] >= len(labelled):
            #         # this peak does not have a matched peak
            #         file.write(",no match")
            #     else:
            #         match = processed[labelled[cols[idx][0]], 0]
            #         file.write(",%d" % match)
            else:
                file.write(",N/A")

            file.write("\n")
            # mean_abs_offset += numpy.abs(dist)
            # num_count += 1

    # dists = dist_mat[rows[list(range(len(max_vals)))], cols[list(range(len(max_vals)))]]
    # dists = dists[dists < frame_thresh]
    dists = [numpy.abs(match[1] - match[0]) for match in match_dict["tps"]]
    # import pdb; pdb.set_trace()
    # import pdb; pdb.set_trace()
    # return processed, data, all_csv, num_labelled, dists, missed, extra
    return num_labelled, dists, false_neg, false_pos