matching_cutoff=matching_cutoff,
                                             iou_cutoff=iou_cutoff,
                                             conf_cutoff=conf_cutoff,
                                             srr=srr,
                                             ber=ber,
                                             mask_others=mask_others,
                                             PLOT=SHOW)

        pred_path = write_kitti_output(preds,
                                       str(id).zfill(4), output_directory)
        print("wrote output file {}".format(id))

    # get results at the end
    mapping_path = "./data/tracking/evaluate_tracking.seqmap"
    file_id = "evaluations_{}".format(
        id)  # give a unique tag to the output files
    address = None

    # create mail messenger and debug output object
    if address:
        mail = mailpy.Mail(address)
    else:
        mail = mailpy.Mail("")

    # evaluate results and send notification email to user
    success = evaluate(output_directory,
                       label_dir,
                       mapping_path,
                       mail,
                       file_id=file_id)
Example #2
0
#########################################################################
# entry point of evaluation script
# input:
#   - result_sha (unique key of results)
#   - user_sha (key of user who submitted the results, optional)
#   - user_sha (email of user who submitted the results, optional)
if __name__ == "__main__":

    # check for correct number of arguments. if user_sha and email are not supplied,
    # no notification email is sent (this option is used for auto-updates)
    #if len(sys.argv)!=2 and len(sys.argv)!=4:
    #  print "Usage: python eval_tracking.py result_sha [user_sha email]"
    #  sys.exit(1);

    # get unique sha key of submitted results
    result_sha = "sha_key"

    # create mail messenger and debug output object
    if len(sys.argv) == 4:
        mail = mailpy.Mail(sys.argv[3])
    else:
        mail = mailpy.Mail("")

    # evaluate results and send notification email to user
    success = evaluate(result_sha, mail)
    if len(sys.argv) == 4:
        mail.finalize(success, "tracking", result_sha, sys.argv[2])
    else:
        mail.finalize(success, "tracking", result_sha, "")
Example #3
0
# entry point of evaluation script
# input:
#   - result_sha (unique key of results)
#   - user_sha (key of user who submitted the results, optional)
#   - user_sha (email of user who submitted the results, optional)
if __name__ == "__main__":

    # check for correct number of arguments. if user_sha and email are not supplied,
    # no notification email is sent (this option is used for auto-updates)
    # if len(sys.argv)!=2 and len(sys.argv)!=4:
    #   print("Usage: python eval_tracking.py result_sha [user_sha email]")
    #   sys.exit(1);

    # get unique sha key of submitted results
    result_sha = sys.argv[1]
    split_version = sys.argv[2] if len(sys.argv) >= 3 else ''
    import random
    uuid = sys.argv[3] if len(sys.argv) >= 4 else str(random.randint(0, 1e63))
    mail = mailpy.Mail("")
    # create mail messenger and debug output object
    # if len(sys.argv)==4:
    #   mail = mailpy.Mail(sys.argv[3])
    # else:
    #   mail = mailpy.Mail("")
    
    # evaluate results and send notification email to user
    success = evaluate(result_sha,mail,split_version=split_version, uuid=uuid)
    if len(sys.argv)==4: mail.finalize(success,"tracking",result_sha,split_version)
    else:                mail.finalize(success,"tracking",result_sha,"")

def run(*argv):
    """
    Parameters:
        argv = [signture, dir ,"3D/2D","Baseline","Your model*", subfolder]
            signture:
            
            3D/2D:

            Baseline: Name of basline
                must match the folder where the results are stored.
                tracked obejects are not in different
                subfolders
            
            Your model/*: name of your model
                must match the folder where the results are stored.
                Add * at the end if tracked obejects are not in different
                subfolders
            
            subfolder: (optional)
                to store in a subfoler
    """
    num_sample_pts = 41.0
    # check for correct number of arguments. if user_sha and email are not supplied,
    # no notification email is sent (this option is used for auto-updates)
    if len(argv) < 5:
        print("Usage: python eval_kitti3dmot.py result_sha ?D(e.g. 2D or 3D)")
        sys.exit(1)

    # get unique sha key of submitted results
    result_sha = argv[0]
    obj_tracked = result_sha.split("_")[0]
    dir = argv[1]
    dt_typ = result_sha.split("_")[3]
    baseline_name = argv[3]
    mail = mailpy.Mail("")
    D = argv[2]
    #

    if argv[2] == '2D':
        eval_3diou, eval_2diou = False, True  # eval 2d
    elif argv[2] == '3D':
        eval_3diou, eval_2diou = True, False  # eval 3d
    else:
        print("Usage: python eval_kitti3dmot.py result_sha ?D(e.g. 2D or 3D)")
        sys.exit(1)

    # evaluate results

    if len(argv) == 6:
        table_name = 'results/{}/{}/results_{}_{}_table_{}.csv'.format(
            dir, argv[5], obj_tracked, dt_typ, D)
    else:
        table_name = 'results/{}/results_{}_{}_table_{}.csv'.format(
            dir, obj_tracked, dt_typ, D)

    if os.path.exists(table_name):
        df = pandas.read_csv(table_name)
        if not (df["Model"] == baseline_name).any():
            print("Evaluating baseline")
            success, baseline, base_avgs = evaluate(result_sha, dir,
                                                    baseline_name, mail,
                                                    eval_3diou, eval_2diou)
            df.loc[len(df.index)] = [
                baseline_name, baseline.sMOTA, baseline.MOTA, baseline.MOTP,
                baseline.MT, baseline.ML, baseline.id_switches,
                baseline.fragments, baseline.F1, baseline.precision,
                baseline.recall, baseline.FAR, baseline.tp, baseline.fp,
                baseline.fn, base_avgs[0], base_avgs[1], base_avgs[2]
            ]
    else:
        print("Evaluating baseline :")
        success, baseline, base_avgs = evaluate(result_sha, dir, baseline_name,
                                                mail, eval_3diou, eval_2diou)
        # basline_data = [[baseline_name],[baseline.sMOTA], [baseline.MOTA], [baseline.MOTP], [baseline.MT], [baseline.ML], [baseline.id_switches], [baseline.fragments],
        #     [baseline.F1], [baseline.precision], [baseline.recall], [baseline.FAR], [baseline.tp], [baseline.fp], [baseline.fn],
        #     [base_avgs[0]], [base_avgs[1]], [base_avgs[2]]]
        cols = [
            "Model", "sMOTA", "MOTA", "MOTP", "MT", "ML", "IDS", "FRAG", "F1",
            "Prec", "Recall", "FAR", "TP", "FP", "FN", "sAMOTA", "AMOTA",
            "AMOTP"
        ]

        df = pandas.DataFrame(columns=cols)
        df.loc[len(df.index)] = [
            baseline_name, baseline.sMOTA, baseline.MOTA, baseline.MOTP,
            baseline.MT, baseline.ML, baseline.id_switches, baseline.fragments,
            baseline.F1, baseline.precision, baseline.recall, baseline.FAR,
            baseline.tp, baseline.fp, baseline.fn, base_avgs[0], base_avgs[1],
            base_avgs[2]
        ]

    other_name = argv[4]
    mail = mailpy.Mail("")
    print("Evaluating " + other_name[:-1] + " :")
    success, other_model, om_avgs = evaluate(result_sha, dir, other_name, mail,
                                             eval_3diou, eval_2diou)

    new_row = [other_name[:-1],other_model.sMOTA, other_model.MOTA, other_model.MOTP, other_model.MT, other_model.ML, other_model.id_switches, other_model.fragments, \
            other_model.F1, other_model.precision, other_model.recall, other_model.FAR, other_model.tp, other_model.fp, other_model.fn,\
            om_avgs[0], om_avgs[1], om_avgs[2]]
    df.loc[len(df.index)] = new_row
    print(df.loc[(df['Model'] == baseline_name) |
                 (df['Model'] == other_name[:-1])])
    string_format = df.to_latex(index=False)

    if len(argv) == 6:
        table_name = '.results/{}/{}/comparison_{}_{}_latex_{}.txt'.format(
            dir, argv[5], obj_tracked, dt_typ, D)
    else:
        table_name = 'results/{}/comparison_{}_{}_latex_{}.txt'.format(
            dir, obj_tracked, dt_typ, D)

    print(string_format, file=open(table_name, 'w'))

    if len(argv) == 6:
        table_name = 'results/{}/{}/results_{}_{}_table_{}.csv'.format(
            dir, argv[5], obj_tracked, dt_typ, D)

    else:
        table_name = 'results/{}/results_{}_{}_table_{}.csv'.format(
            dir, obj_tracked, dt_typ, D)

    file_ = open(table_name, "w")
    df.to_csv(file_, index=False, header=True)
    return other_model.MOTA, other_model.MOTP
def get_meas_target_set(score_intervals, det_method="lsvm", obj_class="car"):
    """
    Input:
    - doctor_clutter_probs: if True, replace 0 probabilities with .0000001/float(20+num_zero_probs) and extend
        clutter probability list with 20 values of .0000001/20 and subtract .0000001 from element 0
    - doctor_birth_probs: if True then if any birth probability is 0 subtract .0000001 from element 0
        of its score interval's birth probability list and replacing zero elements with .0000001/(number of
        zero elements in the score interval's birth probability list)
    """
    #    if USE_PICKLED_DATA:
    #        if not os.path.exists(PICKELD_DATA_DIRECTORY):
    #            os.makedirs(PICKELD_DATA_DIRECTORY)
    #
    #        data_filename = PICKELD_DATA_DIRECTORY + "/meas_targ_set_scores_%s_det_method_%s_obj_class_%s_include_ignored_gt_%s_include_dontcare_gt_%s_include_ignored_det_%s.pickle" % \
    #                                                 (str(score_intervals), det_method, obj_class, include_ignored_gt, include_dontcare_in_gt, include_ignored_detections)
    #        lock_filename = data_filename + "_lock"
    #
    #        if os.path.isfile(data_filename) and (not os.path.isfile(lock_filename)):
    #            f = open(data_filename, 'r')
    #            (measurementTargetSetsBySequence, target_emission_probs, clutter_probabilities, birth_probabilities, meas_noise_covs) = pickle.load(f)
    #            f.close()
    #            return (measurementTargetSetsBySequence, target_emission_probs, clutter_probabilities, birth_probabilities, meas_noise_covs)

    mail = mailpy.Mail("")

    print score_intervals

    (det_objects) = get_det_objs1(score_intervals[0],
                                  det_method,
                                  mail,
                                  obj_class="car")

    measurementTargetSetsBySequence = []

    for seq_idx in range(len(det_objects)):
        cur_seq_meas_target_set = TargetSet()
        for frame_idx in range(len(det_objects[seq_idx])):
            cur_frame_measurements = Measurement()
            cur_frame_measurements.time = frame_idx * .1  #frames are .1 seconds apart
            cur_fram_meas_unsorted = []
            for meas_idx in range(len(det_objects[seq_idx][frame_idx])):
                cur_meas = det_objects[seq_idx][frame_idx][meas_idx]

                meas_pos = np.array([cur_meas.x, cur_meas.y])
                meas_width = cur_meas.x2 - cur_meas.x1
                meas_height = cur_meas.y2 - cur_meas.y1
                #                cur_frame_measurements.val.append(meas_pos)
                #                cur_frame_measurements.widths.append(meas_width)
                #                cur_frame_measurements.heights.append(meas_height)
                #                cur_frame_measurements.scores.append(cur_meas.score)

                cur_fram_meas_unsorted.append(
                    (cur_meas.score, (meas_pos, meas_width, meas_height)))

            cur_frame_meas_sorted = sorted(cur_fram_meas_unsorted,
                                           key=lambda tup: tup[0],
                                           reverse=True)
            for i in range(len(cur_frame_meas_sorted)):
                cur_frame_measurements.val.append(
                    cur_frame_meas_sorted[i][1][0])
                cur_frame_measurements.widths.append(
                    cur_frame_meas_sorted[i][1][1])
                cur_frame_measurements.heights.append(
                    cur_frame_meas_sorted[i][1][2])
                cur_frame_measurements.scores.append(
                    cur_frame_meas_sorted[i][0])
            cur_seq_meas_target_set.measurements.append(cur_frame_measurements)

        measurementTargetSetsBySequence.append(cur_seq_meas_target_set)

    return (measurementTargetSetsBySequence)
Example #6
0
def run(*argv):
    """
    Parameters:
        argv = [signture, dir ,"3D/2D","Baseline","Your model*", subfolder, sequence]
            signture:
            
            3D/2D:

            df: pandas table
                into which results are going to be inserted.

            
            Your model/*: name of your model
                must match the folder where the results are stored.
                Add * at the end if tracked obejects are not in different
                subfolders
            
            subfolder: (optional)
                to store in a subfoler
    """
    num_sample_pts = 41.0
    # check for correct number of arguments. if user_sha and email are not supplied,
    # no notification email is sent (this option is used for auto-updates)
    if len(argv) < 5:
        print("Usage: python eval_kitti3dmot.py result_sha ?D(e.g. 2D or 3D)")
        sys.exit(1)

    # get unique sha key of submitted results
    result_sha = argv[0]
    obj_tracked = result_sha.split("_")[0]
    dir = argv[1]
    dt_typ = result_sha.split("_")[3]
    df = argv[3]
    mail = mailpy.Mail("")
    D = argv[2]
    #

    if argv[2] == '2D':
        eval_3diou, eval_2diou = False, True  # eval 2d
    elif argv[2] == '3D':
        eval_3diou, eval_2diou = True, False  # eval 3d
    else:
        print("Usage: python eval_kitti3dmot.py result_sha ?D(e.g. 2D or 3D)")
        sys.exit(1)

    # evaluate results

    other_name = argv[4]
    mail = mailpy.Mail("")
    print("Evaluating " + dir + " :")
    if len(argv) > 5:
        last_arg = argv[-1]
    else:
        last_arg = None
    success, other_model, om_avgs = evaluate(result_sha, dir, other_name, mail,
                                             eval_3diou, eval_2diou, last_arg)

    new_row = [dir,other_model.sMOTA, other_model.MOTA, other_model.MOTP, other_model.MT, other_model.ML, other_model.id_switches, other_model.fragments, \
            other_model.F1, other_model.precision, other_model.recall, other_model.FAR, other_model.tp, other_model.fp, other_model.fn,\
            om_avgs[0], om_avgs[1], om_avgs[2]]
    df.loc[len(df.index)] = new_row

    return other_model.MOTA, other_model.MOTP, df