Exemple #1
0
def main(data_root='/mnt/EEC2C12EC2C0FBB9/Users/mirap/CMP/MOTDT/datasets',
         det_root=None,
         seqs=('MOT17-01-DPM ', ),
         exp_name='demo',
         save_image=True,
         show_image=False):
    logger.setLevel(logging.INFO)
    result_root = os.path.join(data_root, '..', 'results', exp_name)
    mkdirs(result_root)
    data_type = 'mot'

    # run tracking
    accs = []
    for seq in seqs:
        output_dir = os.path.join(data_root, 'outputs',
                                  seq) if save_image else None

        logger.info('start seq: {}'.format(seq))
        loader = get_loader(data_root, det_root, seq)
        result_filename = os.path.join(result_root, '{}.txt'.format(seq))
        eval_seq(loader,
                 data_type,
                 result_filename,
                 save_dir=output_dir,
                 show_image=show_image)

        # eval
        logger.info('Evaluate seq: {}'.format(seq))
        evaluator = Evaluator(data_root, seq, data_type)
        accs.append(evaluator.eval_file(result_filename))
Exemple #2
0
def plot_prec_rec(results, ensembles, plotname = 'rec-prec_dl-z.png'):
    
    fig = plt.figure(plotname)
    n = len(ensembles.keys())*10
    i = 101 + n
    for ensemblename, classifiers in ensembles.items():
        ax = fig.add_subplot(i)
        ax.set_title(ensemblename)
        ax.set_xlabel('Recall')
        ax.set_ylabel('Precision')
        ax.set_ylim(0,1.05)
        ax.set_ylim(0,1.0)
        lines = []
        for name, result in results.items():
            predictions = result[classifiers].T.values
            gold = result['Gold'].values
            auc = Evaluator.auc(gold, predictions)
            prec, rec, _ = Evaluator.precision_recall_curve(gold, predictions)
            
            line, = ax.step(rec, prec, where = 'post', label = '{0} (AUC={1:.3f})'.format(name, auc), color=colors[name], alpha=0.8)
            lines.append(line)
            ax.fill_between(rec, prec, step='post', alpha=0.25, color=colors[name])
            
            if ensemblename == list(ensembles.keys())[0]:
                ax.plot(rec[-2], prec[-2], marker = 'x', color='black')
                ax.text(rec[-2], prec[-2], '({0:.2f},{1:.2f})'.format(rec[-2], prec[-2]))
                
        ax.legend(handles = lines)
        i = i + 1
    plt.figure(plotname)
    #plt.tight_layout()
    plt.savefig(path.join(
        OUTPATH,
        plotname
    ))
Exemple #3
0
def daganlevy_reproduction(plotname='dlr.png'):
    result = res.load_result('daganlevy')
    gold = result['Gold'].values
    fig = plt.figure('dlr')
    ax = fig.add_subplot(111)
    ax.set_xlabel('Recall')
    ax.set_ylabel('Precision')
    ax.set_ylim(0.5,1.0)
    ax.set_xlim(0,0.5)
    points = {
        'Lemma': {
            'values': ['Lemma Baseline'],
            'marker': 'p',
            'color': 'black'
        },
        'PPDB': {
            'values': ['Lemma Baseline', 'PPDB'],
            'marker': 'o',
            'color': '#ff006e'
        },
        'Entailment Graph': {
            'values': ['Lemma Baseline', 'Entailment Graph'],
            'marker': 's',
            'color': 'blue'
        },
        'All Rules': {
            'values': ['Lemma Baseline', 'Entailment Graph', 'PPDB'],
            'marker': '*',
            'color': '#ff006e'
        }
    }
    legend = []
    for name, props in points.items():
        predictions = result[props['values']].T.values
        prediction = Evaluator.aggregate(predictions, max)
        precision = skm.precision_score(gold, prediction)
        recall = skm.recall_score(gold, prediction)
        line, = ax.plot([recall], [precision], marker = props['marker'], markersize=10, color = props['color'], label=name, linestyle='None')
        legend.append(line)
    predictions = result[['Lemma Baseline', 'Relation Embeddings']].T.values
    prediction = Evaluator.aggregate(predictions, max)
    prec, rec, thresh = skm.precision_recall_curve(gold, prediction)
    line, = ax.plot(rec[1:-1], prec[1:-1], color='green', linestyle='--', linewidth=1, label='Relation Embs')
    legend.append(line)
    plt.figure('dlr')
    plt.legend(handles = legend)
    plt.tight_layout()
    plt.savefig(path.join(
        OUTPATH,
        plotname
    ))
    plt.show()
Exemple #4
0
def test(args):
    sys.setrecursionlimit(7000)
    is_ensemble = args['--ensemble']
    model_path = args['MODEL_FILE']
    test_set_path = args['TEST_DATA_FILE']

    extra_config = None
    if args['--extra-config']:
        extra_config = args['--extra-config']
        extra_config = json.loads(extra_config)

    print(f'loading model from [{model_path}]', file=sys.stderr)
    model_cls = EnsembleModel if is_ensemble else RenamingModel
    if is_ensemble:
        model_path = model_path.split(',')
    model = model_cls.load(model_path,
                           use_cuda=args['--cuda'],
                           new_config=extra_config)
    model.eval()

    test_set = Dataset(test_set_path)
    eval_results, decode_results = Evaluator.decode_and_evaluate(
        model, test_set, model.config, return_results=True)

    print(eval_results, file=sys.stderr)

    save_to = args['--save-to'] if args['--save-to'] else args[
        'MODEL_FILE'] + f'.{test_set_path.split("/")[-1]}.decode_results.bin'
    print(f'Save decode results to {save_to}', file=sys.stderr)
    pickle.dump(decode_results, open(save_to, 'wb'))
Exemple #5
0
def main(data_root='', seqs=('', ), args=""):
    logger.setLevel(logging.INFO)
    data_type = 'mot'
    result_root = os.path.join(Path(data_root), "mot_results")
    mkdir_if_missing(result_root)

    cfg = get_config()
    cfg.merge_from_file(args.config_detection)
    cfg.merge_from_file(args.config_deepsort)

    # run tracking
    accs = []
    for seq in seqs:
        logger.info('start seq: {}'.format(seq))
        result_filename = os.path.join(result_root, '{}.txt'.format(seq))
        video_path = data_root + "/" + seq + "/video/video.mp4"

        with VideoTracker(cfg, args, video_path, result_filename) as vdo_trk:
            vdo_trk.run()

        # eval
        logger.info('Evaluate seq: {}'.format(seq))
        evaluator = Evaluator(data_root, seq, data_type)
        accs.append(evaluator.eval_file(result_filename))

    # get summary
    metrics = mm.metrics.motchallenge_metrics
    mh = mm.metrics.create()
    summary = Evaluator.get_summary(accs, seqs, metrics)
    strsummary = mm.io.render_summary(summary,
                                      formatters=mh.formatters,
                                      namemap=mm.io.motchallenge_metric_names)
    print(strsummary)
    Evaluator.save_summary(summary,
                           os.path.join(result_root, 'summary_global.xlsx'))
Exemple #6
0
def main(seqs=('2', ), det_types=('', )):
    # run tracking
    accs = []
    data_root = args.data_root + '/MOT{}/train'.format(args.mot_version)
    choice = (0, 0, 4, 0, 3, 3)
    TrackerConfig.set_configure(choice)
    choice_str = TrackerConfig.get_configure_str(choice)
    seq_names = []
    for seq in seqs:
        for det_type in det_types:
            result_filename = track_seq(seq, det_type, choice_str)
            seq_name = 'MOT{}-{}{}'.format(args.mot_version, seq.zfill(2),
                                           det_type)
            seq_names.append(seq_name)
            print('Evaluate seq:{}'.format(seq_name))

            evaluator = Evaluator(data_root, seq_name, 'mot')
            accs.append(evaluator.eval_file(result_filename))

    # get summary
    # metrics = ['mota', 'num_switches', 'idp', 'idr', 'idf1', 'precision', 'recall']
    metrics = mm.metrics.motchallenge_metrics
    # metrics = None
    mh = mm.metrics.create()
    summary = Evaluator.get_summary(accs, seq_names, metrics)
    strsummary = mm.io.render_summary(summary,
                                      formatters=mh.formatters,
                                      namemap=mm.io.motchallenge_metric_names)
    print(strsummary)
    Evaluator.save_summary(
        summary,
        os.path.join(os.path.join(args.log_folder, choice_str),
                     'summary_{}.xlsx'.format(args.exp_name)))
Exemple #7
0
def decode(model: RenamingModel, examples, config: Dict):

    model.eval()
    all_examples = dict()

    with torch.no_grad():
        for line_num, example in enumerate(examples):
            rename_result = model.predict([example])[0]
            example_pred_accs = []
            top_rename_result = rename_result[0]
            for old_name, gold_new_name \
                in example.variable_name_map.items():
                pred = top_rename_result[old_name]
                pred_new_name = pred['new_name']
                var_metric = Evaluator.get_soft_metrics(
                    pred_new_name, gold_new_name)
                example_pred_accs.append(var_metric)
            fun_name = example.ast.compilation_unit
            all_examples[f'{line_num}_{fun_name}'] = \
                (rename_result, Evaluator.average(example_pred_accs))

    return all_examples
def main(opt, data_root='/data/MOT16/train', det_root=None, seqs=('MOT16-05',), exp_name='demo', 
         save_images=False, save_videos=False, show_image=True):
    logger.setLevel(logging.INFO)
    result_root = os.path.join(data_root, '..', 'results', exp_name)
    mkdir_if_missing(result_root)
    data_type = 'mot'

    # Read config
    cfg_dict = parse_model_cfg(opt.cfg)
    opt.img_size = [int(cfg_dict[0]['width']), int(cfg_dict[0]['height'])]

    # run tracking
    accs = []
    n_frame = 0
    timer_avgs, timer_calls = [], []
    for seq in seqs:
        output_dir = os.path.join(data_root, '..','outputs', exp_name, seq) if save_images or save_videos else None

        logger.info('start seq: {}'.format(seq))
        dataloader = datasets.LoadImages(osp.join(data_root, seq, 'img1'), opt.img_size)
        result_filename = os.path.join(result_root, '{}.txt'.format(seq))
        meta_info = open(os.path.join(data_root, seq, 'seqinfo.ini')).read() 
        frame_rate = int(meta_info[meta_info.find('frameRate')+10:meta_info.find('\nseqLength')])
        nf, ta, tc = eval_seq(opt, dataloader, data_type, result_filename,
                              save_dir=output_dir, show_image=show_image, frame_rate=frame_rate)
        n_frame += nf
        timer_avgs.append(ta)
        timer_calls.append(tc)

        # eval
        logger.info('Evaluate seq: {}'.format(seq))
        evaluator = Evaluator(data_root, seq, data_type)
        accs.append(evaluator.eval_file(result_filename))
        if save_videos:
            output_video_path = osp.join(output_dir, '{}.mp4'.format(seq))
            cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(output_dir, output_video_path)
            os.system(cmd_str)
    timer_avgs = np.asarray(timer_avgs)
    timer_calls = np.asarray(timer_calls)
    all_time = np.dot(timer_avgs, timer_calls)
    avg_time = all_time / np.sum(timer_calls)
    logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(all_time, 1.0 / avg_time))

    # get summary
    metrics = mm.metrics.motchallenge_metrics
    mh = mm.metrics.create()
    summary = Evaluator.get_summary(accs, seqs, metrics)
    strsummary = mm.io.render_summary(
        summary,
        formatters=mh.formatters,
        namemap=mm.io.motchallenge_metric_names
    )
    print(strsummary)
    sys.stdout.flush()
    Evaluator.save_summary(summary, os.path.join(result_root, 'summary_{}.xlsx'.format(exp_name)))
Exemple #9
0
def main(data_root='', seqs=('', ), args=""):
    logger = get_logger()
    logger.setLevel(logging.INFO)
    model_name = args.MODEL_NAME
    data_type = 'mot'
    analyse_every_frames = args.frame_interval
    dataset_name = data_root.split(sep='/')[-1]
    tracker_type = get_tracker_type(args)
    result_root = os.path.join("mot_results", model_name, dataset_name)
    mkdir_if_missing(result_root)

    cfg = get_config()
    cfg.merge_from_file(args.config_detection)
    cfg.merge_from_file(args.config_tracker)

    args.save_path = result_root

    # run tracking
    accs = []
    for seq in seqs:
        logger.info('start seq: {}'.format(seq))
        result_filename = os.path.join(result_root, '{}.txt'.format(seq))
        seq_root = os.path.join(data_root, seq)
        video_root = os.path.join(seq_root, "video")
        video_path = os.path.join(video_root, os.listdir(video_root)[0])

        logger.info(f"Result filename: {result_filename}")
        logger.info(f'Frame interval: {analyse_every_frames}')
        if not os.path.exists(result_filename):
            with VideoTracker(cfg, args, video_path,
                              result_filename) as vdo_trk:
                vdo_trk.run()
        else:
            print(
                f"Result file {result_filename} already exists. Skipping processing"
            )

        # eval
        logger.info('Evaluate seq: {}'.format(seq))
        evaluator = Evaluator(data_root, seq, data_type)
        accs.append(evaluator.eval_file(result_filename))

    # get summary
    metrics = mm.metrics.motchallenge_metrics
    mh = mm.metrics.create()
    summary = Evaluator.get_summary(accs, seqs, metrics)
    strsummary = mm.io.render_summary(summary,
                                      formatters=mh.formatters,
                                      namemap=mm.io.motchallenge_metric_names)
    print(strsummary)
    Evaluator.save_summary(summary,
                           os.path.join(result_root, 'summary_global.xlsx'))
Exemple #10
0
def main(data_root=os.path.expanduser('~/Data/MOT16/train'),
         det_root=None,
         seqs=('MOT16-05', ),
         exp_name='demo',
         save_image=False,
         show_image=True,
         args=None):
    logger.setLevel(logging.INFO)
    result_root = os.path.join(data_root, '..', 'results', exp_name)
    mkdirs(result_root)
    data_type = 'mot'

    # run tracking
    accs = []
    for seq in seqs:
        output_dir = os.path.join(data_root, 'outputs',
                                  seq) if save_image else None

        logger.info('start seq: {}'.format(seq))
        loader = get_loader(data_root, det_root, seq)
        result_filename = os.path.join(result_root, '{}.txt'.format(seq))
        eval_seq(loader,
                 data_type,
                 result_filename,
                 save_dir=output_dir,
                 show_image=show_image,
                 args=args)

        # eval
        logger.info('Evaluate seq: {}'.format(seq))
        evaluator = Evaluator(data_root, seq, data_type)
        accs.append(evaluator.eval_file(result_filename))

    # get summary
    # metrics = ['mota', 'num_switches', 'idp', 'idr', 'idf1', 'precision', 'recall']
    metrics = mm.metrics.motchallenge_metrics
    # metrics = None
    mh = mm.metrics.create()
    summary = Evaluator.get_summary(accs, seqs, metrics)
    strsummary = mm.io.render_summary(summary,
                                      formatters=mh.formatters,
                                      namemap=mm.io.motchallenge_metric_names)
    print(strsummary)
    Evaluator.save_summary(
        summary, os.path.join(result_root, f'summary_{exp_name}.xlsx'))
Exemple #11
0
def main(_):
    tf_flags = tf.app.flags.FLAGS

    if tf_flags.phase == "train":
        with tf.Session() as sess:
            model = LocalDisNet(sess, tf_flags)
            if tf_flags.validation:
                val_dir = os.path.join(tf_flags.val_dir)
            else:
                val_dir = None
            model.train(tf_flags.batch_size,
                        tf_flags.training_epoches,
                        os.path.join(tf_flags.train_dir),
                        val_dir)
    elif tf_flags.phase == 'prediction':

        if not os.path.exists(tf_flags.test_res):
            os.makedirs(tf_flags.test_res)

        img_path = {f: os.path.join(tf_flags.test_dir, f) for f in os.listdir(tf_flags.test_dir)}

        if not os.path.exists(tf_flags.test_res):
            os.makedirs(tf_flags.test_res)

        with tf.Session() as sess:
            model = LocalDisNet(sess, tf_flags)
            model.restore_model()
            for f_name, f_path in img_path.items():
                img = imread(f_path)
                print("Processing: ", f_path)
                segs = model.segment_from_seed([img], seed_thres=0.7, similarity_thres=0.7, resize=True)
                save_indexed_png(os.path.join(tf_flags.test_res, os.path.splitext(f_name)[0]+'_seg.png'), segs[0].astype(np.uint8))

    elif tf_flags.phase == 'evaluation':
        e = Evaluator(gt_type="mask")
        # implement your the evaluation based on your dataset with Evaluator
        pass
Exemple #12
0
def main(opt,
         data_root='/media/dh/data/MOT16/train',
         det_root=None,
         seqs=('MOT16-05', ),
         exp_name='demo',
         save_images=False,
         save_videos=False,
         show_image=True):
    logger.setLevel(logging.INFO)
    result_root = os.path.join(data_root, '..', 'results', exp_name)
    mkdir_if_missing(result_root)
    data_type = 'mot'

    # Read config
    cfg_dict = parse_model_cfg(opt.cfg)
    opt.img_size = [int(cfg_dict[0]['width']), int(cfg_dict[0]['height'])]

    # run tracking
    accs = []
    n_frame = 0
    timer_avgs, timer_calls = [], []
    for seq in seqs:
        output_dir = os.path.join(data_root, '..', 'outputs', exp_name,
                                  seq) if save_images or save_videos else None

        logger.info('start seq: {}'.format(seq))
        #dataloader = datasets.LoadImages(osp.join(data_root, seq, 'img1'), opt.img_size)
        print(osp.join(data_root, seq))
        dataloader = datasets.LoadVideo(osp.join(data_root, seq))
        # print ("DATALOADER", dataloader.vw)
        result_filename = os.path.join(result_root, '{}.csv'.format(seq))
        #meta_info = open(os.path.join(data_root, seq, 'seqinfo.ini')).read()
        #frame_rate = int(meta_info[meta_info.find('frameRate')+10:meta_info.find('\nseqLength')])
        frame_rate = 30
        nf, ta, tc = eval_seq(opt,
                              dataloader,
                              data_type,
                              result_filename,
                              save_dir=output_dir,
                              show_image=show_image,
                              frame_rate=frame_rate,
                              vw=dataloader.vw)
        n_frame += nf
        timer_avgs.append(ta)
        timer_calls.append(tc)

        # eval
        logger.info('Evaluate seq: {}'.format(seq))
        evaluator = Evaluator(data_root, seq, data_type)
        accs.append(evaluator.eval_file(result_filename))
        if save_videos:
            output_video_path = osp.join(output_dir, '{}.mp4'.format(seq))
            cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(
                output_dir, output_video_path)
            os.system(cmd_str)
    timer_avgs = np.asarray(timer_avgs)
    timer_calls = np.asarray(timer_calls)
    all_time = np.dot(timer_avgs, timer_calls)
    avg_time = all_time / np.sum(timer_calls)
    logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(
        all_time, 1.0 / avg_time))
Exemple #13
0
    # X: input, Y: output, poses, egomotions
    data_idxs = [0, 1, 2, 7]
    if data_idxs is None:
        logger.info("Invalid argument: model={}".format(args.model))
        exit(1)

    model = get_model(args)
    optimizer = optimizers.Adam()
    optimizer.setup(model)
    optimizer.add_hook(chainer.optimizer.WeightDecay(1e-4))
    scheduler = AdamScheduler(optimizer, 0.5, args.lr_step_list, 0)

    train_iterator = iterators.MultithreadIterator(train_dataset,
                                                   args.batch_size,
                                                   n_threads=args.nb_jobs)
    train_eval = Evaluator("train", args)
    valid_iterator = iterators.MultithreadIterator(valid_dataset,
                                                   args.batch_size,
                                                   False,
                                                   False,
                                                   n_threads=args.nb_jobs)
    valid_eval = Evaluator("valid", args)

    logger.info("Training...")
    train_eval.reset()
    st = time.time()

    # Training loop
    for iter_cnt, batch in enumerate(train_iterator):
        if iter_cnt == args.nb_iters:
            break
Exemple #14
0
    # X: input, Y: output, poses, egomotions
    data_idxs = [0, 1, 2, 7]
    if data_idxs is None:
        logger.info("Invalid argument: model={}".format(args.model))
        exit(1)

    model = get_model(args)

    prediction_dict = {
        "arguments": vars(args),
        "predictions": {}
    }
    valid_iterator = iterators.MultiprocessIterator(
        valid_dataset, args.batch_size, False, False, n_processes=args.nb_jobs)
    valid_eval = Evaluator("valid", args)

    logger.info("Evaluation...")
    chainer.config.train = False
    chainer.config.enable_backprop = False

    # Evaluation loop
    for itr, batch in enumerate(valid_iterator):
        batch_array = [convert.concat_examples([x[idx] for x in batch], args.gpu) for idx in data_idxs]
        loss, pred_y, prob = model.predict(tuple(map(Variable, batch_array)))
        valid_eval.update(cuda.to_cpu(loss.data), pred_y, batch)
        write_prediction(prediction_dict["predictions"], batch, pred_y)

    message_str = "Evaluation: valid loss {} / ADE {} / FDE {}"
    logger.info(message_str.format(valid_eval("loss"), valid_eval("ade"), valid_eval("fde")))
    valid_eval.update_summary(summary, -1, ["loss", "ade", "fde"])
Exemple #15
0
        output_dir = os.path.join(data_root, '..','outputs', exp_name, seq) if save_images or save_videos else None

        logger.info('start seq: {}'.format(seq))
        dataloader = datasets.LoadImages(osp.join(data_root, seq, 'img1'), opt.img_size)
        result_filename = os.path.join(result_root, '{}.txt'.format(seq))
        meta_info = open(os.path.join(data_root, seq, 'seqinfo.ini')).read() 
        frame_rate = int(meta_info[meta_info.find('frameRate')+10:meta_info.find('\nseqLength')])
        nf, ta, tc = eval_seq(opt, dataloader, data_type, result_filename,
                              save_dir=output_dir, show_image=show_image, frame_rate=frame_rate)
        n_frame += nf
        timer_avgs.append(ta)
        timer_calls.append(tc)

        # eval
        logger.info('Evaluate seq: {}'.format(seq))
        evaluator = Evaluator(data_root, seq, data_type)
        accs.append(evaluator.eval_file(result_filename))
        if save_videos:
            output_video_path = osp.join(output_dir, '{}.mp4'.format(seq))
            cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(output_dir, output_video_path)
            os.system(cmd_str)
    timer_avgs = np.asarray(timer_avgs)
    timer_calls = np.asarray(timer_calls)
    all_time = np.dot(timer_avgs, timer_calls)
    avg_time = all_time / np.sum(timer_calls)
    logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(all_time, 1.0 / avg_time))

    # get summary
    metrics = mm.metrics.motchallenge_metrics
    mh = mm.metrics.create()
    summary = Evaluator.get_summary(accs, seqs, metrics)
Exemple #16
0
if cmd_args['--extra-config'] is not None:
    extra_config = json.loads(cmd_args['--extra-config'])
else:
    default_config = '{"decoder": {"remove_duplicates_in_prediction": true} }'
    extra_config = json.loads(default_config)

model_path = cmd_args['MODEL_FILE']
print(f'loading model from [{model_path}]', file=sys.stderr)
model = RenamingModel.load(model_path,
                           use_cuda=cmd_args['--cuda'],
                           new_config=extra_config)
model.eval()

test_set_path = cmd_args['TEST_DATA_FILE']
test_set = Dataset(test_set_path)
decode_results = \
    Evaluator.decode(model, test_set, model.config)
pp = pprint.PrettyPrinter(stream=sys.stderr)
pp.pprint(decode_results)

if cmd_args['--save-to'] is not None:
    save_to = cmd_args['--save-to']
else:
    test_name = test_set_path.split("/")[-1]
    save_to = \
        f'{cmd_args["MODEL_FILE"]}.{test_name}.decode_results.bin'

print(f'Saved decode results to {save_to}', file=sys.stderr)
pickle.dump(decode_results, open(save_to, 'wb'))
Exemple #17
0
def train(args):
    work_dir = args['--work-dir']
    config = json.loads(_jsonnet.evaluate_file(args['CONFIG_FILE']))
    config['work_dir'] = work_dir

    if not os.path.exists(work_dir):
        print(f'creating work dir [{work_dir}]', file=sys.stderr)
        os.makedirs(work_dir)

    if args['--extra-config']:
        extra_config = args['--extra-config']
        extra_config = json.loads(extra_config)
        config = util.update(config, extra_config)

    json.dump(config,
              open(os.path.join(work_dir, 'config.json'), 'w'),
              indent=2)

    model = RenamingModel.build(config)
    config = model.config
    model.train()

    if args['--cuda']:
        model = model.cuda()

    params = [p for p in model.parameters() if p.requires_grad]
    optimizer = torch.optim.Adam(params, lr=0.001)
    nn_util.glorot_init(params)

    # set the padding index for embedding layers to zeros
    # model.encoder.var_node_name_embedding.weight[0].fill_(0.)

    train_set = Dataset(config['data']['train_file'])
    dev_set = Dataset(config['data']['dev_file'])
    batch_size = config['train']['batch_size']

    print(f'Training set size {len(train_set)}, dev set size {len(dev_set)}',
          file=sys.stderr)

    # training loop
    train_iter = epoch = cum_examples = 0
    log_every = config['train']['log_every']
    evaluate_every_nepoch = config['train']['evaluate_every_nepoch']
    max_epoch = config['train']['max_epoch']
    max_patience = config['train']['patience']
    cum_loss = 0.
    patience = 0.
    t_log = time.time()

    history_accs = []
    while True:
        # load training dataset, which is a collection of ASTs and maps of gold-standard renamings
        train_set_iter = train_set.batch_iterator(
            batch_size=batch_size,
            return_examples=False,
            config=config,
            progress=True,
            train=True,
            num_readers=config['train']['num_readers'],
            num_batchers=config['train']['num_batchers'])
        epoch += 1

        for batch in train_set_iter:
            train_iter += 1
            optimizer.zero_grad()

            # t1 = time.time()
            nn_util.to(batch.tensor_dict, model.device)
            # print(f'[Learner] {time.time() - t1}s took for moving tensors to device', file=sys.stderr)

            # t1 = time.time()
            result = model(batch.tensor_dict,
                           batch.tensor_dict['prediction_target'])
            # print(f'[Learner] batch {train_iter}, {batch.size} examples took {time.time() - t1:4f}s', file=sys.stderr)

            loss = -result['batch_log_prob'].mean()

            cum_loss += loss.item() * batch.size
            cum_examples += batch.size

            loss.backward()

            # clip gradient
            grad_norm = torch.nn.utils.clip_grad_norm_(params, 5.)

            optimizer.step()
            del loss

            if train_iter % log_every == 0:
                print(
                    f'[Learner] train_iter={train_iter} avg. loss={cum_loss / cum_examples}, '
                    f'{cum_examples} examples ({cum_examples / (time.time() - t_log)} examples/s)',
                    file=sys.stderr)

                cum_loss = cum_examples = 0.
                t_log = time.time()

        print(f'[Learner] Epoch {epoch} finished', file=sys.stderr)

        if epoch % evaluate_every_nepoch == 0:
            print(f'[Learner] Perform evaluation', file=sys.stderr)
            t1 = time.time()
            # ppl = Evaluator.evaluate_ppl(model, dev_set, config, predicate=lambda e: not e['function_body_in_train'])
            eval_results = Evaluator.decode_and_evaluate(
                model, dev_set, config)
            # print(f'[Learner] Evaluation result ppl={ppl} (took {time.time() - t1}s)', file=sys.stderr)
            print(
                f'[Learner] Evaluation result {eval_results} (took {time.time() - t1}s)',
                file=sys.stderr)
            dev_metric = eval_results['func_body_not_in_train_acc']['accuracy']
            # dev_metric = -ppl
            if len(history_accs) == 0 or dev_metric > max(history_accs):
                patience = 0
                model_save_path = os.path.join(work_dir, f'model.bin')
                model.save(model_save_path)
                print(
                    f'[Learner] Saved currently the best model to {model_save_path}',
                    file=sys.stderr)
            else:
                patience += 1
                if patience == max_patience:
                    print(
                        f'[Learner] Reached max patience {max_patience}, exiting...',
                        file=sys.stderr)
                    patience = 0
                    exit()

            history_accs.append(dev_metric)

        if epoch == max_epoch:
            print(f'[Learner] Reached max epoch', file=sys.stderr)
            exit()

        t1 = time.time()
    if data_idxs is None:
        logger.info("Invalid argument: model={}".format(args.model))
        exit(1)

    #Create model
    torch.manual_seed(12)  # init same network weights everytime
    net = TCN(args)
    optimizer = getattr(optim, args.optimizer)(net.parameters(), lr=args.lr)
    print(net)

    input("here")

    train_iterator = iterators.MultithreadIterator(train_dataset,
                                                   args.batch_size,
                                                   n_threads=args.nb_jobs)
    train_eval = Evaluator("train", args)
    valid_iterator = iterators.MultithreadIterator(valid_dataset,
                                                   args.batch_size,
                                                   False,
                                                   False,
                                                   n_threads=args.nb_jobs)
    valid_eval = Evaluator("valid", args)

    logger.info("Training...")
    train_eval.reset()
    st = time.time()
    total_loss = 0
    # Training loop
    for iter_cnt, batch in enumerate(train_iterator):
        if iter_cnt == args.nb_iters:
            break
Exemple #19
0
def main(opt,
         data_root='/data/MOT16/train',
         det_root=None,
         seqs=('MOT16-05', ),
         exp_name='demo',
         save_images=False,
         save_videos=False,
         show_image=True):
    logger.setLevel(logging.INFO)
    result_root = os.path.join(data_root, '..', 'results', exp_name)
    mkdir_if_missing(result_root)
    data_type = 'mot'

    # run tracking
    timer = Timer()
    accs = []
    n_frame = 0
    timer.tic()
    for seq in seqs:
        output_dir = os.path.join(data_root, '..', 'outputs', exp_name,
                                  seq) if save_images or save_videos else None

        logger.info('start seq: {}'.format(seq))
        dataloader = datasets.LoadImages(osp.join(data_root, seq, 'img1'),
                                         opt.img_size)
        result_filename = os.path.join(result_root, '{}.txt'.format(seq))
        meta_info = open(os.path.join(data_root, seq, 'seqinfo.ini')).read()
        frame_rate = int(meta_info[meta_info.find('frameRate') +
                                   10:meta_info.find('\nseqLength')])
        n_frame += eval_seq(opt,
                            dataloader,
                            data_type,
                            result_filename,
                            save_dir=output_dir,
                            show_image=show_image,
                            frame_rate=frame_rate)

        # eval
        logger.info('Evaluate seq: {}'.format(seq))
        evaluator = Evaluator(data_root, seq, data_type)
        accs.append(evaluator.eval_file(result_filename))
        if save_videos:
            output_video_path = osp.join(output_dir, '{}.mp4'.format(seq))
            cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(
                output_dir, output_video_path)
            os.system(cmd_str)
    timer.toc()
    logger.info('Time elapsed: {}, FPS {}'.format(timer.average_time, n_frame /
                                                  timer.average_time))

    # get summary
    # metrics = ['mota', 'num_switches', 'idp', 'idr', 'idf1', 'precision', 'recall']
    metrics = mm.metrics.motchallenge_metrics
    mh = mm.metrics.create()
    summary = Evaluator.get_summary(accs, seqs, metrics)
    strsummary = mm.io.render_summary(summary,
                                      formatters=mh.formatters,
                                      namemap=mm.io.motchallenge_metric_names)
    print(strsummary)
    Evaluator.save_summary(
        summary, os.path.join(result_root, 'summary_{}.xlsx'.format(exp_name)))