Ejemplo n.º 1
0
 def make_graph(self):
     video_detections = load_pickle(
         os.path.join(mydir, "data", "duke_test_seq_cam2_10.pck"))
     ground_truth = load_pickle(
         os.path.join(mydir, "data", "duke_test_seq_cam2_10_gt.pck"))
     video_detections = [
         (frame_idx, imread(os.path.join(mydir, frame)), detections)
         for frame_idx, frame, detections in video_detections
     ]
     return make_graph(video_detections, 60), ground_truth
Ejemplo n.º 2
0
def eval_prepped_tracks_folds(datasets, part='eval'):
    metrics = MotMetrics(True, respect_classes=datasets[0].multi_class)
    metrics_int = MotMetrics(True, respect_classes=datasets[0].multi_class)
    for dataset in datasets:
        for name, cam in tqdm(graph_names(dataset, part), 'Evaluating tracks'):
            meta = load_json(name + '-meta.json')
            frame_range = range(meta['first_frame'],
                                meta['first_frame'] + meta['length'])
            scene = dataset.scene(cam)
            gt_frames = scene.ground_truth()
            tracks_name = os.path.join(dataset.logdir, "tracks",
                                       os.path.basename(name))
            tracks = load_pickle(tracks_name)
            filter_out_non_roi_dets(scene, tracks)

            metrics.add(tracks, gt_frames, name, frame_range)
            interpolate_missing_detections(tracks)
            metrics_int.add(tracks, gt_frames, name + 'i', frame_range)

    res = metrics.summary()
    res_int = metrics_int.summary()
    print("Result")
    print(res)
    print("\nResult interpolated")
    print(res_int)
    return res, res_int
Ejemplo n.º 3
0
def join_track_windows(dataset, part='eval'):
    entries = list(
        sorted(graph_names(dataset, part), key=lambda e:
               (e[1], e[0]))) + [(None, None)]
    prev_cam = prev_track_frames = all_tracks = prev_tracks = None
    for name, cam in entries:
        if cam != prev_cam:
            if all_tracks is not None:
                merge_overlapping_detections(all_tracks)
                consolidate_track_classes(all_tracks)
                yield prev_cam, all_tracks
            prev_track_frames = all_tracks = None
            prev_cam = cam
        if name is None:
            break

        tracks_name = os.path.join(dataset.logdir, "tracks",
                                   os.path.basename(name))
        tracks = load_pickle(tracks_name)
        track_frames = defaultdict(list)
        for i, tr in enumerate(tracks):
            for det in tr:
                det.idx = i
                track_frames[det.frame].append(det)

        if prev_track_frames is not None:
            if len(track_frames.keys()) and len(prev_track_frames.keys()):
                overlap = range(min(track_frames.keys()),
                                max(prev_track_frames.keys()) + 1)
                counts = np.zeros((len(prev_tracks), len(tracks)))
                for f in overlap:
                    for prv in prev_track_frames[f]:
                        for nxt in track_frames[f]:
                            if prv.left == nxt.left and prv.right == nxt.right and prv.top == nxt.top and prv.bottom == nxt.bottom:
                                counts[prv.idx, nxt.idx] += 1
                            else:
                                counts[prv.idx, nxt.idx] -= 1

                cost, _, nxt_matches = lapjv(-counts, extend_cost=True)
                assert len(nxt_matches) == len(tracks)
                for i in range(len(tracks)):
                    j = nxt_matches[i]
                    if counts[j][i] > 0:
                        prev_tracks[j] += tracks[i]
                        tracks[i] = prev_tracks[j]
                    else:
                        all_tracks.append(tracks[i])
            else:
                all_tracks.extend(tracks)
        else:
            all_tracks = tracks
        prev_track_frames = track_frames
        prev_tracks = tracks
Ejemplo n.º 4
0
    def test_demote_promote(self):
        g1 = load_pickle(os.path.join(mydir, "data", "promoted_graph.pck"))
        g2 = deepcopy(g1)
        demote_graph(g1)
        promote_graph(g1)

        for i in range(len(g1)):
            assert g1[i].prev == g2[i].prev
            assert g1[i].next_weight_data == g2[i].next_weight_data
            assert g1[i].prev.__class__ == g2[i].prev.__class__
            assert g1[i].next_weight_data.__class__ == g2[
                i].next_weight_data.__class__
Ejemplo n.º 5
0
        imwrite(img, "dbg/%.8d.jpg" % f)


if __name__ == '__main__':
    from ggdtrack.duke_dataset import Duke
    from ggdtrack.visdrone_dataset import VisDrone
    from ggdtrack.mot16_dataset import Mot16
    from ggdtrack.utils import load_pickle, default_torch_device, promote_graph

    # show_tracks(Duke('/home/hakan/src/duke').scene(3), interpolate_missing_detections(load_pickle("tracks/duke_graph_3_00190415.pck")))
    # show_tracks(VisDrone('/home/hakan/src/ggdtrack/data/').scene("val__uav0000086_00000_v"), interpolate_missing_detections(load_pickle("cachedir/tracks/VisDrone_graph_val__uav0000086_00000_v_00000001.pck")))
    # show_tracks(VisDrone('/home/hakan/src/ggdtrack/data/').scene("val__uav0000182_00000_v"),
    #             interpolate_missing_detections(load_pickle("cachedir/tracks/VisDrone_graph_val__uav0000182_00000_v_00000001.pck")))
    # show_tracks(VisDrone('/home/hakan/src/ggdtrack/data/').scene("val__uav0000268_05773_v"),
    #             interpolate_missing_detections(load_pickle("cachedir/tracks/VisDrone_graph_val__uav0000268_05773_v_00000001.pck")),
    #             first_frame=500)
    # show_tracks(VisDrone('/home/hakan/src/ggdtrack/data/').scene("val__uav0000117_02622_v"),
    #             interpolate_missing_detections(load_pickle("cachedir/tracks/VisDrone_graph_val__uav0000117_02622_v_00000001.pck")))
    # show_tracks(Mot16('/home/hakan/src/ggdtrack/data/').scene("train__MOT16-04"),
    #             interpolate_missing_detections(load_pickle("cachedir/tracks/MOT16_fold0_graph_train__MOT16-04_00000001.pck")))
    # show_tracks(Mot16('/home/hakan/src/ggdtrack/data/', fold=3).scene("train__MOT16-13"),
    #             interpolate_missing_detections(load_pickle("cachedir/tracks/MOT16_fold3_graph_train__MOT16-13_00000001.pck")))
    # show_tracks(VisDrone('data').scene("val__uav0000268_05773_v"),
    #             interpolate_missing_detections(load_pickle("cachedir/logdir_VisDrone/tracks/VisDrone_graph_val__uav0000268_05773_v_00000001.pck")))
    show_tracks(
        VisDrone('data').scene("val__uav0000305_00000_v"),
        interpolate_missing_detections(
            load_pickle(
                "cachedir/logdir_VisDrone/tracks/VisDrone_graph_val__uav0000305_00000_v_00000001.pck"
            )))
Ejemplo n.º 6
0
from glob import glob

import torch
import numpy as np

from ggdtrack.model import NNModelSimple
from ggdtrack.mot16_dataset import Mot16
from ggdtrack.utils import load_pickle

dataset = Mot16('/home/hakan/src/ggdtrack/data/')
scene = dataset.scene("train__MOT16-04"),
tracks = load_pickle(
    "cachedir/tracks/MOT16_fold0_graph_train__MOT16-04_00000001.pck")
tr1 = tracks[12]
tr2 = tracks[31]

model = NNModelSimple()
fn = sorted(glob("%s/snapshot_???.pyt" % dataset.logdir))[-1]
print(fn)
model.load_state_dict(torch.load(fn)['model_state'])
model.eval()


def sum_connection_weights(tr):
    sa = 0
    for i in range(len(tr) - 1):
        d1, d2 = tr[i], tr[i + 1]
        f = torch.tensor(
            model.connection_weight_feature(d1, d2)[0].astype(np.float32))
        sa += model.edge_model.klt_model(f)
    return sa
Ejemplo n.º 7
0
def prep_minimal_graph_diffs(dataset,
                             model,
                             threads=None,
                             limit=None,
                             skipped_ggd_types=()):
    trainval = {'train': [], 'eval': []}
    final_trainval = {n: [] for n in trainval.keys()}
    diff_lists = {}
    jobs = []
    os.makedirs(os.path.join(dataset.cachedir, "minimal_graph_diff"),
                exist_ok=True)
    for part in trainval.keys():
        if limit is None:
            entries = graph_names(dataset, part)
        elif isinstance(limit, list):
            entries = limit[part]
        else:
            entries = graph_names(dataset, part)
            shuffle(entries)
            entries = entries[:limit]
        for fn, cam in entries:
            bfn = os.path.join(dataset.cachedir, "minimal_graph_diff",
                               model.feature_name + '-' + os.path.basename(fn))
            jobs.append((dataset, cam, part, model, fn, bfn))
            final_trainval[part].append(bfn)

    trainval_name = os.path.join(
        dataset.cachedir, "minimal_graph_diff",
        "%s_%s_trainval.json" % (dataset.name, model.feature_name))
    skipped_ggd_types_name = os.path.join(
        dataset.cachedir, "minimal_graph_diff",
        "%s_%s_skipped_ggd_types.pck" % (dataset.name, model.feature_name))
    if os.path.exists(trainval_name) and os.path.exists(
            skipped_ggd_types_name):
        current_trainval = load_json(trainval_name)
        for part in trainval.keys():
            if set(current_trainval[part]) != set(final_trainval[part]):
                break
        else:
            if load_pickle(skipped_ggd_types_name) == skipped_ggd_types:
                return

    for part in trainval.keys():
        dn = os.path.join(
            dataset.cachedir, "minimal_graph_diff",
            "%s_%s_%s_mmaps" % (dataset.name, model.feature_name, part))
        if os.path.exists(dn):
            rmtree(dn)
        diff_lists[part] = GraphDiffList(dn, model)

    save_pickle(skipped_ggd_types, skipped_ggd_types_name)
    for part, base_bfn, bfns in parallel(prep_minimal_graph_diff_worker, jobs,
                                         threads,
                                         "Prepping minimal graph diffs"):
        trainval[part].append(base_bfn)
        save_json(trainval, trainval_name)
        for bfn in bfns:
            graphdiff = torch.load(bfn)
            lst = diff_lists[part]
            for gd in graphdiff:
                if gd.name not in skipped_ggd_types:
                    lst.append(gd)
Ejemplo n.º 8
0
def find_false_positive_graph_diff(scene,
                                   tracks,
                                   model,
                                   empty=torch.tensor([])):
    graph_diff = []
    for tr in find_false_positive_tracks(scene, tracks):
        detections = []
        edges = []
        prv = None
        for det in tr:
            detections.append(model.detecton_weight_feature(det))
            if prv is not None:
                edges.append(model.connection_weight_feature(prv, det))
        graph_diff.append(
            GraphBatchPair(GraphBatch(empty, empty, 0),
                           GraphBatch(edges, detections, 1),
                           'BootstrappedFPTrack'))
    return graph_diff


if __name__ == '__main__':
    from ggdtrack.visdrone_dataset import VisDrone
    from ggdtrack.utils import load_pickle
    from ggdtrack.model import NNModelGraphresPerConnection

    find_false_positive_graph_diff(
        VisDrone('data').scene("val__uav0000268_05773_v"),
        load_pickle(
            "cachedir/logdir_VisDrone/tracks/VisDrone_graph_val__uav0000268_05773_v_00000001.pck"
        ), NNModelGraphresPerConnection)