예제 #1
0
def main(dataset, datadir, threads, segment_length, cachedir, minimal_confidence, fold, max_connect,
         max_worse_eval_epochs, epochs, too_short_track, logdir_prefix):
    opts = dict(cachedir=cachedir, default_min_conf=minimal_confidence)
    if fold is not None:
        opts['fold'] = fold
    dataset = eval(dataset)(datadir, **opts)
    dataset.cachedir = cachedir
    logdir = logdir_prefix + '/' + dataset.logdir

    find_minimal_graph_diff.too_short_track = too_short_track
    find_minimal_graph_diff.long_track = too_short_track * 2


    skipped_ggd_types = set(ggd_types)
    for add in ggd_types_order:
        skipped_ggd_types.remove(add)

        dataset.logdir = logdir + "_added_" + add
        print(dataset.logdir)
        if os.path.exists(dataset.logdir):
            continue

        prep_training_graphs(dataset, cachedir, limit_train_amount=0.1, threads=threads, segment_length_s=segment_length,
                             worker_params=dict(max_connect=max_connect))

        model = NNModelGraphresPerConnection()
        prep_minimal_graph_diffs(dataset, model, threads=threads, skipped_ggd_types=skipped_ggd_types)
        prep_eval_graphs(dataset, model, threads=threads)

        train_graphres_minimal(dataset, model, epochs=epochs, max_worse_eval_epochs=max_worse_eval_epochs)

        prep_eval_tracks(dataset, model, 'eval', threads=1)
        res, res_int = eval_prepped_tracks(dataset, 'eval')
        open(os.path.join(dataset.logdir, "eval_results.txt"), "w").write(res)
        open(os.path.join(dataset.logdir, "eval_results_int.txt"), "w").write(res_int)
예제 #2
0
def main(dataset, datadir, limit, threads, segment_length, cachedir, minimal_confidence, fold, max_connect, no_train, resume, max_worse_eval_epochs, epochs, too_short_track):
    opts = dict(cachedir=cachedir, default_min_conf=minimal_confidence)
    if fold is not None:
        opts['fold'] = fold
    dataset = eval(dataset)(datadir, **opts)
    dataset.download()
    dataset.prepare()

    find_minimal_graph_diff.too_short_track = too_short_track
    find_minimal_graph_diff.long_track = too_short_track * 2

    prep_training_graphs(dataset, cachedir, limit=limit, threads=threads, segment_length_s=segment_length,
                         worker_params=dict(max_connect=max_connect))

    model = NNModelGraphresPerConnection()
    prep_minimal_graph_diffs(dataset, model, threads=threads)
    prep_eval_graphs(dataset, model, threads=threads)

    if not no_train:
        train_graphres_minimal(dataset, model, epochs=epochs, resume=resume, max_worse_eval_epochs=max_worse_eval_epochs)

    prep_eval_tracks(dataset, model, 'eval', threads=1)
    res, res_int = eval_prepped_tracks(dataset, 'eval')
    open(os.path.join(dataset.logdir, "eval_results.txt"), "w").write(res)
    open(os.path.join(dataset.logdir, "eval_results_int.txt"), "w").write(res_int)

    res, res_int = eval_prepped_tracks_joined(dataset, 'eval')
    open(os.path.join(dataset.logdir, "eval_results_joined.txt"), "w").write(res)
    open(os.path.join(dataset.logdir, "eval_results_joined_int.txt"), "w").write(res_int)

    prep_eval_tracks(dataset, model, 'test', threads=1)
    dataset.prepare_submition()
예제 #3
0
def main():
    max_extra = 3
    dataset = Duke("data")

    logdir = "cachedir/logdir_1.0_0.2"
    model = NNModelGraphresPerConnection()
    fn = sorted(glob("%s/snapshot_???.pyt" % logdir))[-max_extra-1]
    snapshot = torch.load(fn)
    model.load_state_dict(snapshot['model_state'])

    prep_eval_tracks(dataset, logdir, model, 'test', threads=1)
    eval_prepped_tracks_csv(dataset, logdir, 'test')

    os.system("cat  %s/result_duke_test_int/*_submit.txt > %s/duke.txt" % (logdir, logdir))
    if os.path.exists("%s/duke.zip" % logdir):
        os.unlink("%s/duke.zip" % logdir)
    os.system("cd %s; zip duke.zip duke.txt" % logdir)
예제 #4
0
    def test_ggd_batches(self):
        graphres = torch.load(
            os.path.join(mydir, "data", "basic-duke_graph_3_00190415.pck"))

        with TemporaryDirectory() as tmpdir:
            model = NNModelGraphresPerConnection()
            model.load_state_dict(
                torch.load(os.path.join(mydir, "data",
                                        "snapshot_009.pyt"))['model_state'])
            model.eval()

            lst = GraphDiffList(tmpdir, model)

            old = []
            batch_size = 4
            n = (len(graphres) // batch_size) * batch_size
            for i in range(n):
                ex1 = graphres[i]
                old.append((model(ex1.pos) - model(ex1.neg)).item())
                lst.append(graphres[i])

            for i0 in range(0, n, batch_size):
                batch = make_ggd_batch(
                    [lst[i] for i in range(i0, i0 + batch_size)])
                l = model.ggd_batch_forward(batch)
                for i in range(i0, i0 + batch_size):
                    assert abs(l[i - i0].item() - old[i]) < 1e-3
예제 #5
0
def main():
    dataset = Duke('data', cachedir="cachedir")  #_mc5")
    model = NNModelGraphresPerConnection()
    logdir = dataset.logdir
    print(logdir)
    fn = sorted(glob("%s/snapshot_???.pyt" % logdir))[-1]
    model.load_state_dict(torch.load(fn)['model_state'])
    model.eval()

    gt_not_in_graph = long_connections = long_connections_within_bound = 0

    for name, cam in tqdm(graph_names(dataset, "eval"),
                          "Estimating long structure"):
        name = name.replace("/lunarc/nobackup/projects/lu-haar/ggdtrack/", "")
        graph, detection_weight_features, connection_batch = torch.load(
            name + '-%s-eval_graph' % model.feature_name)
        promote_graph(graph)
        connection_weights = model.connection_batch_forward(connection_batch)
        detection_weights = model.detection_model(detection_weight_features)

        scene = dataset.scene(cam)
        gt_tracks, gt_graph_frames = ground_truth_tracks(
            scene.ground_truth(), graph)
        for tr in gt_tracks:
            prv = tr[0]
            for det in tr[1:]:
                prv.gt_next = det
                prv = det

        for det in graph:
            for i, nxt in zip(det.weight_index, det.next):
                if det.track_id == nxt.track_id != None and nxt.frame - det.frame > 1:
                    long_connections += 1
                    upper = get_upper_bound_from_gt(det, nxt,
                                                    connection_weights,
                                                    detection_weights)
                    if upper is None:
                        gt_not_in_graph += 1
                    elif 0 < connection_weights[i] < upper:
                        long_connections_within_bound += 1
                    # print ("  %s -[%4.2f]-> %s" % (det.track_id, connection_weights[i], nxt.track_id),
                    #        det.frame, nxt.frame, upper)

        # tracks = lp_track(graph, connection_batch, detection_weight_features, model)
        # print(tracks)

        print()
        print(gt_not_in_graph, long_connections, long_connections_within_bound)
        print(long_connections_within_bound /
              (long_connections - gt_not_in_graph))
        print()
예제 #6
0
def main(threads, cachedir, train_amounts, itterations, logdir_prefix):
    max_extra = 3
    dataset = Duke("data")
    dataset.cachedir = cachedir
    logdir = logdir_prefix + '/' + dataset.logdir

    global_skip = {"LongConnectionOrder", "LongFalsePositiveTrack"}

    for train_amount in map(float, train_amounts.split(',')):
        for itt in range(int(itterations)):
            t0 = time()
            prep_training_graphs(dataset,
                                 cachedir,
                                 limit_train_amount=train_amount,
                                 threads=threads,
                                 seed=hash(logdir) + itt)
            model = NNModelGraphresPerConnection()
            prep_minimal_graph_diffs(dataset,
                                     model,
                                     threads=threads,
                                     skipped_ggd_types=global_skip)
            prep_eval_graphs(dataset, model, threads=threads)

            t1 = time()
            dataset.logdir = logdir + "_%8.6f_%.2d" % (train_amount, itt)
            train_graphres_minimal(dataset,
                                   model,
                                   epochs=1000,
                                   max_worse_eval_epochs=max_extra,
                                   train_amount=train_amount)

            t2 = time()
            fn = sorted(glob("%s/snapshot_???.pyt" %
                             dataset.logdir))[-max_extra - 1]
            prep_eval_tracks(dataset, model, 'eval', threads=1, snapshot=fn)
            res, res_int = eval_prepped_tracks(dataset, 'eval')
            open(os.path.join(dataset.logdir, "eval_results.txt"),
                 "w").write(res)
            open(os.path.join(dataset.logdir, "eval_results_int.txt"),
                 "w").write(res_int)

            t3 = time()
            open(os.path.join(dataset.logdir, "timeing.txt"),
                 "w").write(repr((t0, t1, t2, t3)))

    prep_eval_gt_tracks(dataset, NNModelGraphresPerConnection)
    res, res_int = eval_prepped_tracks(dataset, 'eval')
    open(os.path.join(dataset.cachedir, "eval_gt_results.txt"), "w").write(res)
    open(os.path.join(dataset.cachedir, "eval_gt_results_int.txt"),
         "w").write(res_int)
예제 #7
0
def main(datadir):
    dataset = Duke(datadir)

    model = NNModelGraphresPerConnection()

    prep_eval_graphs(dataset, NNModelGraphresPerConnection(), parts=["train"])
    # train_frossard(dataset, "cachedir/logdir_fossard", model, resume_from="cachedir/logdir/model_0001.pyt", epochs=10)
    train_frossard(dataset,
                   "cachedir/logdir_fossard",
                   model,
                   resume_from="cachedir/logdir/snapshot_009.pyt",
                   save_every=10,
                   epochs=1)

    prep_eval_tracks(dataset,
                     "cachedir/logdir_fossard",
                     model,
                     'eval',
                     threads=1)
    res, res_int = eval_prepped_tracks(dataset, 'eval')

    open("cachedir/logdir_fossard/eval_results.txt", "w").write(res)
    open("cachedir/logdir_fossard/eval_results_int.txt", "w").write(res_int)
예제 #8
0
def main(threads, cachedir, logdir_glob):
    max_extra = 3
    dataset = Duke("data")
    dataset.cachedir = cachedir

    for logdir in glob(logdir_glob):
        dataset.logdir = logdir
        if os.path.exists(os.path.join(dataset.logdir,
                                       "eval_results_int.txt")):
            continue
        model = NNModelGraphresPerConnection()
        fn = sorted(glob("%s/snapshot_???.pyt" % dataset.logdir))[-max_extra -
                                                                  1]
        prep_eval_tracks(dataset, model, 'eval', threads=1, snapshot=fn)
        res, res_int = eval_prepped_tracks(dataset, 'eval')
        open(os.path.join(dataset.logdir, "eval_results.txt"), "w").write(res)
        open(os.path.join(dataset.logdir, "eval_results_int.txt"),
             "w").write(res_int)
예제 #9
0
def main(dataset, datadir, cachedir, minimal_confidence):
    model = NNModelGraphresPerConnection()
    dataset = eval(dataset)(datadir,
                            cachedir=cachedir,
                            default_min_conf=minimal_confidence)
    prep_eval_gt_tracks(dataset, model, 'eval', threads=1)

    res, res_int = eval_prepped_tracks(dataset, 'eval')
    open(os.path.join(dataset.logdir, "eval_results_gt.txt"), "w").write(res)
    open(os.path.join(dataset.logdir, "eval_results_gt_int.txt"),
         "w").write(res_int)

    res, res_int = eval_prepped_tracks_joined(dataset,
                                              'eval',
                                              discard_bad_classes=True)
    open(os.path.join(dataset.logdir, "eval_results_gt_joined.txt"),
         "w").write(res)
    open(os.path.join(dataset.logdir, "eval_results_gt_joined_int.txt"),
         "w").write(res_int)
예제 #10
0
                # print(model.entry_weight_parameter, model.entry_weight_parameter.grad, hamming_distance_entry)
                optimizer.step()

                if lp_tracker_pool is None:
                    break

    # writer = SummaryWriter(logdir)


if __name__ == '__main__':
    from ggdtrack.duke_dataset import Duke
    from ggdtrack.model import NNModelGraphresPerConnection
    from ggdtrack.eval import prep_eval_graphs, EvalGtGraphs

    t0 = time.time()

    dataset = Duke('data')
    # train_graphres_minimal(dataset, "logdir", NNModelGraphresPerConnection())
    train_graphres_minimal(dataset,
                           "cachedir/logdir",
                           NNModelGraphresPerConnection(),
                           save_every=1)

    # train_frossard(dataset, "cachedir/logdir_fossard", NNModelGraphresPerConnection(), mean_from="cachedir/logdir/snapshot_009.pyt")
    seed(42)
    # train_frossard(dataset, "cachedir/logdir_fossard", NNModelGraphresPerConnection(), resume_from="cachedir/logdir_fossard", limit=1)

    # train_frossard(dataset, "cachedir/logdir_fossard2", NNModelGraphresPerConnection(), mean_from="cachedir/logdir/snapshot_009.pyt", limit=1)
    # train_frossard(dataset, "cachedir/logdir_fossard2", NNModelGraphresPerConnection(), resume_from="cachedir/logdir/snapshot_009.pyt", epochs=10)
    print(time.time() - t0)
예제 #11
0
import re
from glob import glob
from os import stat
from random import shuffle

import torch

from ggdtrack.duke_dataset import Duke
from ggdtrack.eval import eval_hamming, ConnectionBatch, prep_eval_graphs
from ggdtrack.model import NNModelGraphresPerConnection
from ggdtrack.utils import save_json

dataset = Duke("data")
model = NNModelGraphresPerConnection()

prep_eval_graphs(dataset, NNModelGraphresPerConnection(), parts=["train"])

models = glob("cachedir/logdir/model*")
shuffle(models)

hammings = []
for fn in models:
    model.load_state_dict(torch.load(fn))
    hamming = eval_hamming(dataset, None, model)
    print(hamming)
    t = stat(fn).st_ctime_ns / 1e9
    hammings.append((t, hamming))
    save_json(hammings, "cachedir/logdir/hammings.json")
예제 #12
0
 def test_full_ggd_train(self):
     graph, ground_truth = self.make_graph()
     print(ground_truth)
     model = NNModelGraphresPerConnection()
     scene = FakeScene(ground_truth)
     graph_diff = find_minimal_graph_diff(scene, graph, model)