Exemplo n.º 1
0
def main(pred_file, split_type='valid', **kwargs):
    if 'cfg' not in kwargs:
        from extended_config import (
            cfg as conf,
            key_maps,
            # CN,
            update_from_dict,
            # post_proc_config
        )
        cfg = conf
        cfg = update_from_dict(cfg, kwargs, key_maps)
    else:
        cfg = kwargs['cfg']
        cfg.freeze()
    # grnd_eval = GroundEval_Corr(cfg)
    # grnd_eval = GroundEvalDS4(cfg)
    comm = Munch()
    exp = cfg.ds.exp_setting
    if exp == 'gt5':
        comm.num_prop_per_frm = 5
    elif exp == 'p100':
        comm.num_prop_per_frm = 100
    else:
        raise NotImplementedError

    conc_type = cfg.ds.conc_type
    if conc_type == 'sep' or conc_type == 'svsq':
        grnd_eval = GroundEval_SEP(cfg, comm)
    elif conc_type == 'temp':
        grnd_eval = GroundEval_TEMP(cfg, comm)
    elif conc_type == 'spat':
        grnd_eval = GroundEval_SPAT(cfg, comm)
    else:
        raise NotImplementedError
    out = grnd_eval.eval_ground_acc(pred_file, split_type=split_type)
    # to_print = ['avg1', 'avg2']
    # print(Counter(grnd_eval.pcs))
    met_keys = ['avg1', 'avg1_cons',
                'avg1_vidf', 'avg1_strict']
    print({k: out[k] for k in met_keys})
    # print(Counter(grnd_eval.stuff))
    # return out
    return
Exemplo n.º 2
0
def main_dist(uid: str, **kwargs):
    """
    uid is a unique identifier for the experiment name
    Can be kept same as a previous run, by default will start executing
    from latest saved model
    **kwargs: allows arbit arguments of cfg to be changed
    """
    cfg = conf
    num_gpus = torch.cuda.device_count()
    cfg.num_gpus = num_gpus

    if num_gpus > 1:

        if 'local_rank' in kwargs:
            # We are doing distributed parallel
            cfg.do_dist = True
            torch.cuda.set_device(kwargs['local_rank'])
            torch.distributed.init_process_group(
                backend="nccl", init_method="env://"
            )
            synchronize()
        else:
            # We are doing data parallel
            cfg.do_dist = False

    # Update the config file depending on the command line args
    cfg = update_from_dict(cfg, kwargs, key_maps)

    # Freeze the cfg, can no longer be changed
    cfg.freeze()
    # print(cfg)
    # Initialize learner
    learn = learner_init(uid, cfg)
    # Train or Test
    if not (cfg.only_val or cfg.only_test):
        learn.fit(epochs=cfg.epochs, lr=cfg.lr)
    else:
        if cfg.only_val:
            learn.testing(learn.data.valid_dl)
        if cfg.only_test:
            learn.testing(learn.data.test_dl)
Exemplo n.º 3
0
def main(pred_file, split_type='valid', **kwargs):
    if 'cfg' not in kwargs:
        from extended_config import (
            cfg as conf,
            key_maps,
            # CN,
            update_from_dict,
            # post_proc_config
        )
        cfg = conf
        cfg = update_from_dict(cfg, kwargs, key_maps)
    else:
        cfg = kwargs['cfg']
        cfg.freeze()
    # grnd_eval = GroundEval_Corr(cfg)
    # grnd_eval = GroundEvalDS4(cfg)
    comm = Munch()
    exp = cfg.ds.exp_setting
    if exp == 'gt5':
        comm.num_prop_per_frm = 5
    elif exp == 'p100':
        comm.num_prop_per_frm = 100
    else:
        raise NotImplementedError

    conc_type = cfg.ds.conc_type
    if conc_type == 'sep' or conc_type == 'svsq':
        avis = ASRL_Vis_SEP(cfg, comm)
    elif conc_type == 'temp':
        avis = ASRL_Vis_TEMP(cfg, comm)
    elif conc_type == 'spat':
        avis = ASRL_Vis_SPAT(cfg, comm)
    else:
        raise NotImplementedError

    # avis.draw_boxes_all_indices(
    #     pred_file, split_type=split_type
    # )

    return avis
Exemplo n.º 4
0
def main_dist(uid: str, **kwargs):
    """
    uid is a unique identifier for the experiment name
    Can be kept same as a previous run, by default will start executing
    from latest saved model
    **kwargs: allows arbit arguments of cfg to be changed
    """
    cfg = conf
    num_gpus = torch.cuda.device_count()
    cfg.num_gpus = num_gpus
    cfg.uid = uid
    cfg.cmd = sys.argv
    if num_gpus > 1:
        if 'local_rank' in kwargs:
            # We are doing distributed parallel
            cfg.do_dist = True
            torch.cuda.set_device(kwargs['local_rank'])
            torch.distributed.init_process_group(
                backend="nccl", init_method="env://"
            )
            synchronize()
        else:
            # We are doing data parallel
            cfg.do_dist = False
            # cfg.do_dp = True
    # Update the config file depending on the command line args
    cfg = update_from_dict(cfg, kwargs, key_maps)
    cfg = post_proc_config(cfg)
    # Freeze the cfg, can no longer be changed
    cfg.freeze()
    # print(cfg)
    # Initialize learner
    learn = learner_init(uid, cfg)
    # Train or Test
    if not (cfg.only_val or cfg.only_test or cfg.overfit_batch):
        learn.fit(epochs=cfg.train.epochs, lr=cfg.train.lr)
        if cfg.run_final_val:
            print('Running Final Validation using best model')
            learn.load_model_dict(
                resume_path=learn.model_file,
                load_opt=False
            )
            val_loss, val_acc, _ = learn.validate(
                db={'valid': learn.data.valid_dl},
                write_to_file=True
            )
            print(val_loss)
            print(val_acc)
        else:
            pass
    else:
        if cfg.overfit_batch:
            learn.overfit_batch(1000, 1e-4)
        if cfg.only_val:
            val_loss, val_acc, _ = learn.validate(
                db={'valid': learn.data.valid_dl},
                write_to_file=True
            )
            print(val_loss)
            print(val_acc)
            # learn.testing(learn.data.valid_dl)
            pass
        if cfg.only_test:
            # learn.testing(learn.data.test_dl)
            test_loss, test_acc, _ = learn.validate(
                db=learn.data.test_dl)
            print(test_loss)
            print(test_acc)

    return