Esempio n. 1
0
def prep_eval_gt_tracks_worker(args):
    model, name, scene, logdir, split_on_no_edge = args
    ofn = os.path.join(logdir, "tracks", os.path.basename(name))

    graph, detection_weight_features, connection_batch = torch.load(
        name + '-%s-eval_graph' % model.feature_name)
    promote_graph(graph)
    tracks, gt_graph_frames = ground_truth_tracks(scene.ground_truth(),
                                                  graph,
                                                  add_gt_class=True)
    if split_on_no_edge:
        tracks = split_track_on_missing_edge(tracks)
    len_stats = defaultdict(int)
    for tr in tracks:
        for det in tr:
            if hasattr(det, 'cls'):
                cls = det.cls
                gt_cls = det.gt_cls if hasattr(det, 'gt_cls') else None
                det.__dict__ = {}
                det.cls = cls
                if gt_cls is not None:
                    det.gt_cls = gt_cls
            else:
                det.__dict__ = {}
            len_stats[len(tr)] += 1

    save_pickle(tracks, ofn)
    save_pickle({'track_length': len_stats}, ofn + '-stats')

    return ofn
Esempio n. 2
0
 def convert_ground_truth(self):
     for cam in range(1, 9):
         filename = self.path + '/ground_truth/camera%d.pck' % cam
         if not os.path.exists(filename):
             gt = defaultdict(list)
             for det in tqdm(self.ground_truth_detections(cam),
                             'Converting ground truth for camera %d' % cam):
                 gt[det.frame].append(det)
             save_pickle(gt, filename)
Esempio n. 3
0
def make_duke_test_video():
    cam = 2
    seq = []
    scene = Duke('/home/hakan/src/duke').scene(cam)
    for frame_idx, frame, detections in video_detections(scene, 54373, 10):
        fn = "test/data/duke_frame_%d_%.8d.jpg" % (cam, frame_idx)
        imsave(frame, fn)
        seq.append((frame_idx, fn.replace("test/", ""), detections))
    save_pickle(seq, "test/data/duke_test_seq_cam2_10.pck")
    gt = scene.ground_truth()
    gt = {f: gt[f] for f, _, _ in seq}
    save_pickle(gt, "test/data/duke_test_seq_cam2_10_gt.pck")
Esempio n. 4
0
def prep_eval_tracks_worker(args):
    model, name, device, logdir = args
    ofn = os.path.join(logdir, "tracks", os.path.basename(name))

    graph, detection_weight_features, connection_batch = torch.load(
        name + '-%s-eval_graph' % model.feature_name)
    promote_graph(graph)
    detection_weight_features = detection_weight_features.to(device)
    connection_batch = connection_batch.to(device)
    tracks = lp_track(graph, connection_batch, detection_weight_features,
                      model)
    for tr in tracks:
        for det in tr:
            if hasattr(det, 'cls'):
                cls = det.cls
                det.__dict__ = {}
                det.cls = cls
            else:
                det.__dict__ = {}
    save_pickle(tracks, ofn)

    return ofn
Esempio n. 5
0
def prep_minimal_graph_diffs(dataset,
                             model,
                             threads=None,
                             limit=None,
                             skipped_ggd_types=()):
    trainval = {'train': [], 'eval': []}
    final_trainval = {n: [] for n in trainval.keys()}
    diff_lists = {}
    jobs = []
    os.makedirs(os.path.join(dataset.cachedir, "minimal_graph_diff"),
                exist_ok=True)
    for part in trainval.keys():
        if limit is None:
            entries = graph_names(dataset, part)
        elif isinstance(limit, list):
            entries = limit[part]
        else:
            entries = graph_names(dataset, part)
            shuffle(entries)
            entries = entries[:limit]
        for fn, cam in entries:
            bfn = os.path.join(dataset.cachedir, "minimal_graph_diff",
                               model.feature_name + '-' + os.path.basename(fn))
            jobs.append((dataset, cam, part, model, fn, bfn))
            final_trainval[part].append(bfn)

    trainval_name = os.path.join(
        dataset.cachedir, "minimal_graph_diff",
        "%s_%s_trainval.json" % (dataset.name, model.feature_name))
    skipped_ggd_types_name = os.path.join(
        dataset.cachedir, "minimal_graph_diff",
        "%s_%s_skipped_ggd_types.pck" % (dataset.name, model.feature_name))
    if os.path.exists(trainval_name) and os.path.exists(
            skipped_ggd_types_name):
        current_trainval = load_json(trainval_name)
        for part in trainval.keys():
            if set(current_trainval[part]) != set(final_trainval[part]):
                break
        else:
            if load_pickle(skipped_ggd_types_name) == skipped_ggd_types:
                return

    for part in trainval.keys():
        dn = os.path.join(
            dataset.cachedir, "minimal_graph_diff",
            "%s_%s_%s_mmaps" % (dataset.name, model.feature_name, part))
        if os.path.exists(dn):
            rmtree(dn)
        diff_lists[part] = GraphDiffList(dn, model)

    save_pickle(skipped_ggd_types, skipped_ggd_types_name)
    for part, base_bfn, bfns in parallel(prep_minimal_graph_diff_worker, jobs,
                                         threads,
                                         "Prepping minimal graph diffs"):
        trainval[part].append(base_bfn)
        save_json(trainval, trainval_name)
        for bfn in bfns:
            graphdiff = torch.load(bfn)
            lst = diff_lists[part]
            for gd in graphdiff:
                if gd.name not in skipped_ggd_types:
                    lst.append(gd)