示例#1
0
def factory(model, engine):
    opt = Options()['optimizer']
    #optimizer = factory_optimizer(model)

    optimizer = BanOptimizer(
        engine,
        name=Options()['optimizer'].get('name', 'Adamax'),
        lr=Options()['optimizer']['lr'],
        gradual_warmup_steps=Options()['optimizer'].get(
            'gradual_warmup_steps', [0.5, 2.0, 4]),
        lr_decay_epochs=Options()['optimizer'].get('lr_decay_epochs',
                                                   [10, 20, 2]),
        lr_decay_rate=Options()['optimizer'].get('lr_decay_rate', .25))

    if opt.get('lr_scheduler', None):
        optimizer = ReduceLROnPlateau(optimizer, engine, **opt['lr_scheduler'])

    if opt.get('init', None) == 'glorot':
        for p in model.network.parameters():
            if p.dim() == 1:
                p.data.fill_(0)
            elif p.dim() >= 2:
                nn.init.xavier_uniform_(p.data)
            else:
                raise ValueError(p.dim())

    return optimizer
示例#2
0
文件: factory.py 项目: valeoai/BEEF
def factory(engine):
    opt = Options()['model.network']
    if opt['name'] == "beef_hdd":
        net = BeefHDD(layers_to_fuse=opt['layers_to_fuse'],
                     label_fusion_opt=opt['label_fusion'],
                     blinkers_dim=opt['blinkers_dim'],
                     gru_opt=opt['gru_opt'],
                     n_future=opt['n_future'],
                     detach_pred=opt.get('detach_pred',False))
    elif opt['name'] == "driver_hdd":
        net = DriverHDD(blinkers_dim=opt['blinkers_dim'],
                                 gru_opt=opt['gru_opt'],
                                 n_future=opt['n_future'])
    elif opt['name'] == "baseline_multitask_hdd":
        net = BaselineMultitaskHDD(n_classes=opt['n_classes'],
                                    blinkers_dim=opt['blinkers_dim'],
                                    layer_to_extract=opt['layer_to_extract'],
                                    dim_features=opt['dim_features'],
                                    gru_opt=opt['gru_opt'],
                                    n_future=opt['n_future'],
                                    mlp_opt=opt.get('mlp_opt',None))
    else:
        raise ValueError(opt['name'])
    if torch.cuda.device_count()>1:
        net = DataParallel(net)
    return net
示例#3
0
文件: factory.py 项目: valeoai/BEEF
def factory(engine=None):
    opt = Options()['dataset']
    dataset = {}
    if opt.get('train_split', None):
        dataset['train'] = factory_split(opt['train_split'], mode='train')
    if opt.get('eval_split', None):
        dataset['eval'] = factory_split(opt['eval_split'], mode="eval")

    return dataset
def factory(engine=None):
    opt = Options()["dataset"]

    dataset = {}
    if opt.get("train_split", None):
        dataset["train"] = factory_split(opt["train_split"])
    if opt.get("eval_split", None):
        dataset["eval"] = factory_split(opt["eval_split"])

    return dataset
示例#5
0
文件: factory.py 项目: valeoai/BEEF
def factory():
    opt = Options()['view']
    exp_dir = Options()['exp.dir']

    view_name = opt.get('name', 'plotly')
    items = opt.get('items', None)
    fname = opt.get('file_name', 'view.html')

    if view_name == 'plotly':
        view = Plotly(items, exp_dir, fname)
    else:
        view = None
    return view
示例#6
0
def factory(engine=None):
    Logger()('Creating dataset...')

    opt = Options()['dataset']

    dataset = {}

    if opt.get('train_split', None):
        dataset['train'] = factory_split(opt['train_split'])

    if opt.get('eval_split', None):
        dataset['eval'] = factory_split(opt['eval_split'])

    return dataset
def factory(engine, mode):
    name = Options()["model.criterion.name"]
    opt = Options()["model.criterion"]
    # if split == "test" and "tdiuc" not in Options()["dataset.name"]:
    #     return None
    if name == "vqa_cross_entropy":
        criterion = VQACrossEntropyLoss()
    elif name == "counting-regression":
        criterion = CountingRegression(
            loss=opt.get("loss", "mse"),
            entropy_loss_weight=opt.get("entropy_loss_weight", 0.0),
        )
    else:
        raise ValueError(name)
    return criterion
示例#8
0
文件: factory.py 项目: valeoai/BEEF
def factory():
    if Options()['engine']['name'] == 'extract':
        engine = ExtractEngine()
    elif Options()['engine']['name'] == 'predict':
        opt = Options()['engine']
        engine = PredictEngine(vid_id=opt.get('vid_id', None))
    else:
        raise ValueError
    return engine
示例#9
0
文件: factory.py 项目: valeoai/BEEF
def factory_split(split, mode=None):
    opt = Options()['dataset']
    shuffle = mode == 'train'

    if opt['name'] == 'hdd_classif':
        dataset = HDDClassif(dir_data=Path(opt['dir_data']),
                             split=split,
                             win_size=opt['win_size'],
                             im_size=opt.get('im_size', 'small'),
                             layer=opt['layer'],
                             frame_position=opt['frame_position'],
                             traintest_mode=opt.get('traintest_mode', False),
                             fps=opt['fps'],
                             horizon=opt['horizon'],
                             batch_size=opt['batch_size'],
                             debug=opt['debug'],
                             shuffle=shuffle,
                             pin_memory=Options()['misc']['cuda'],
                             nb_threads=opt['nb_threads'])
    elif opt['name'] == 'bdd_drive':
        dataset = BDDDrive(dir_data=Path(opt['dir_data']),
                           split=split,
                           n_before=opt['n_before'],
                           batch_size=opt['batch_size'],
                           debug=opt['debug'],
                           shuffle=shuffle,
                           pin_memory=Options()['misc']['cuda'],
                           nb_threads=opt['nb_threads'])
    elif opt['name'] == 'bdd_caption':
        dataset = BDDCaption(dir_data=Path(opt['dir_data']),
                             split=split,
                             n_before=opt['n_before'],
                             batch_size=opt['batch_size'],
                             features_dir=opt['features_dir'],
                             debug=opt['debug'],
                             shuffle=shuffle,
                             pin_memory=Options()['misc']['cuda'],
                             nb_threads=opt['nb_threads'])
    else:
        raise ValueError(opt['name'])

    return dataset
示例#10
0
def factory(engine=None):
    logger = Logger()
    logger('Creating dataset...')

    opt = Options()["dataset"]

    dataset = {}

    if opt.get("train_split", None):
        logger("Loading train data")
        dataset["train"] = factory_split(opt["train_split"])
        logger(f"Train dataset length is {len(dataset['train'])}")

    if opt.get("eval_split", None):
        logger("Loading test data")
        dataset["eval"] = factory_split(opt["eval_split"])
        logger(f"Test dataset length is {len(dataset['eval'])}")

    logger("Dataset was created")
    return dataset
示例#11
0
def factory(engine):
    opt = Options()['model']['network']

    if opt['name'] == 'bdd_caption':
        net = Captioning(hidden_size=opt["lstm_hidden_size"],
                temperature=opt["temperature"],
                sampling_strategy=opt["sampling_strategy"],
                fusion_opt=opt["fusion"],
                gru_lstm=opt["gru_lstm"],
                output_sentence=opt.get("output_sentence", "caption"),
                )
    else:
        raise ValueError(opt['name'])
    return net
示例#12
0
def factory(engine, mode):
    opt = Options()['model.criterion']
    if opt['name'] == "multitask_hdd":
        if opt['use_class_weights']:
            class_freq = engine.dataset[mode].class_freq
        else:
            class_freq = None
        criterion = MultiTaskHDD(class_freq=class_freq,
                                 alpha_dict=opt.get("alpha", {}))
    elif opt['name'] == 'l2_points':
        criterion = L2Points()
    elif opt['name'] == "bdd-drive":
        criterion = BDDDriveLoss(scales=opt['scales'],
                                 normalize_outputs=opt.get(
                                     'normalize_outputs', False))
    elif opt['name'] == "bdd_caption":
        output_sentence = Options().get('model.network.output_sentence',
                                        "caption")
        criterion = BDDCaptionLoss(output_sentence)
    else:
        raise ValueError(opt['name'])

    return criterion
def factory(model, engine):
    opt = Options()["optimizer"]

    optimizer = BanOptimizer(
        engine,
        name=Options()["optimizer"].get("name", "Adamax"),
        lr=Options()["optimizer"]["lr"],
        gradual_warmup_steps=Options()["optimizer"].get(
            "gradual_warmup_steps", [0.5, 2.0, 4]),
        lr_decay_epochs=Options()["optimizer"].get("lr_decay_epochs",
                                                   [10, 20, 2]),
        lr_decay_rate=Options()["optimizer"].get("lr_decay_rate", 0.25),
    )

    if Options()["model.network.name"] == "rcn.RCN":

        def clip_gradients():
            nn.utils.clip_grad_norm_(engine.model.network.parameters(),
                                     opt["clip_norm"])

        # add hook
        engine.register_hook("train_on_backward", clip_gradients)

    if opt.get("lr_scheduler", None):
        optimizer = ReduceLROnPlateau(optimizer, engine, **opt["lr_scheduler"])

    if opt.get("init", None) == "glorot":
        for p in model.network.parameters():
            if p.dim() == 1:
                p.data.fill_(0)
            elif p.dim() >= 2:
                nn.init.xavier_uniform_(p.data)
            else:
                raise ValueError(p.dim())

    return optimizer
def factory(engine):
    mode = list(engine.dataset.keys())[0]
    dataset = engine.dataset[mode]
    opt = Options()["model.network"]

    if True:
        module, class_name = opt["name"].rsplit(".", 1)
        try:
            cls = getattr(
                import_module("." + module, "counting.models.networks"),
                class_name)
        except:
            traceback.print_exc()
            Logger()(f"Error importing class {module}, {class_name}")
            sys.exit(1)
        print("Network parameters", opt["parameters"])
        # check if @ in parameters
        print("checking if @ in parameters")
        parameters = opt.get("parameters", {}) or {}
        for key, value in parameters.items():  # TODO intégrer ça à bootstrap
            if value == "@dataset":
                print("loading dataset")
            elif value == "@engine":
                opt["parameters"][key] = engine
            elif value == "@aid_to_ans":
                opt["parameters"][key] = dataset.aid_to_ans
            elif value == "@ans_to_aid":
                opt["parameters"][key] = dataset.ans_to_aid
        net = cls(
            **parameters,
            wid_to_word=dataset.wid_to_word,
            word_to_wid=dataset.word_to_wid,
            aid_to_ans=dataset.aid_to_ans,
            ans_to_aid=dataset.ans_to_aid,
        )

    if Options()["misc.cuda"] and torch.cuda.device_count() > 1:
        net = DataParallel(net)

    return net
示例#15
0
def factory_split(split):
    opt = Options()['dataset']
    shuffle = ('train' in split)

    if opt['name'] == 'vqacp2':
        assert(split in ['train', 'val', 'test'])
        samplingans = (opt['samplingans'] and split == 'train')

        dataset = VQACP2(
            dir_data=opt['dir'],
            split=split,
            batch_size=opt['batch_size'],
            nb_threads=opt['nb_threads'],
            pin_memory=Options()['misc']['cuda'],
            shuffle=shuffle,
            nans=opt['nans'],
            minwcount=opt['minwcount'],
            nlp=opt['nlp'],
            proc_split=opt['proc_split'],
            samplingans=samplingans,
            dir_rcnn=opt['dir_rcnn'],
            dir_cnn=opt.get('dir_cnn', None),
            dir_vgg16=opt.get('dir_vgg16', None),
            )

    elif opt['name'] == 'vqacpv2-with-testdev':
        assert(split in ['train', 'val', 'test'])
        samplingans = (opt['samplingans'] and split == 'train')
        dataset = VQACP2(
            dir_data=opt['dir'],
            split=split,
            batch_size=opt['batch_size'],
            nb_threads=opt['nb_threads'],
            pin_memory=Options()['misc']['cuda'],
            shuffle=shuffle,
            nans=opt['nans'],
            minwcount=opt['minwcount'],
            nlp=opt['nlp'],
            proc_split=opt['proc_split'],
            samplingans=samplingans,
            dir_rcnn=opt['dir_rcnn'],
            dir_cnn=opt.get('dir_cnn', None),
            dir_vgg16=opt.get('dir_vgg16', None),
            has_testdevset=True,
            )

    elif opt['name'] == 'vqa2':
        assert(split in ['train', 'val', 'test'])
        samplingans = (opt['samplingans'] and split == 'train')

        if opt['vg']:
            assert(opt['proc_split'] == 'trainval')

            # trainvalset 
            vqa2 = VQA2(
                dir_data=opt['dir'],
                split='train',
                nans=opt['nans'],
                minwcount=opt['minwcount'],
                nlp=opt['nlp'],
                proc_split=opt['proc_split'],
                samplingans=samplingans,
                dir_rcnn=opt['dir_rcnn'])

            vg = VG(
                dir_data=opt['dir_vg'],
                split='train',
                nans=10000,
                minwcount=0,
                nlp=opt['nlp'],
                dir_rcnn=opt['dir_rcnn_vg'])

            vqa2vg = ListVQADatasets(
                [vqa2,vg],
                split='train',
                batch_size=opt['batch_size'],
                nb_threads=opt['nb_threads'],
                pin_memory=Options()['misc.cuda'],
                shuffle=shuffle)

            if split == 'train':
                dataset = vqa2vg
            else:
                dataset = VQA2(
                    dir_data=opt['dir'],
                    split=split,
                    batch_size=opt['batch_size'],
                    nb_threads=opt['nb_threads'],
                    pin_memory=Options()['misc.cuda'],
                    shuffle=False,
                    nans=opt['nans'],
                    minwcount=opt['minwcount'],
                    nlp=opt['nlp'],
                    proc_split=opt['proc_split'],
                    samplingans=samplingans,
                    dir_rcnn=opt['dir_rcnn'])
                dataset.sync_from(vqa2vg)

        else:
            dataset = VQA2(
                dir_data=opt['dir'],
                split=split,
                batch_size=opt['batch_size'],
                nb_threads=opt['nb_threads'],
                pin_memory=Options()['misc.cuda'],
                shuffle=shuffle,
                nans=opt['nans'],
                minwcount=opt['minwcount'],
                nlp=opt['nlp'],
                proc_split=opt['proc_split'],
                samplingans=samplingans,
                dir_rcnn=opt['dir_rcnn'],
                dir_cnn=opt.get('dir_cnn', None),
                )

    return dataset
def factory_split(split):
    opt = Options()["dataset"]
    shuffle = "train" in split

    if opt["name"] == "vqacp2":
        assert split in ["train", "val", "test"]
        samplingans = opt["samplingans"] and split == "train"

        dataset = VQACP2(
            dir_data=opt["dir"],
            split=split,
            batch_size=opt["batch_size"],
            nb_threads=opt["nb_threads"],
            pin_memory=Options()["misc"]["cuda"],
            shuffle=shuffle,
            nans=opt["nans"],
            minwcount=opt["minwcount"],
            nlp=opt["nlp"],
            proc_split=opt["proc_split"],
            samplingans=samplingans,
            dir_rcnn=opt["dir_rcnn"],
            dir_cnn=opt.get("dir_cnn", None),
            dir_vgg16=opt.get("dir_vgg16", None),
        )
    elif opt['name'] == 'vqacp2-sampling':
        samplingans = opt["samplingans"] and split == "train"

        dataset = VQACP2Sampling(
            dir_data=opt["dir"],
            split=split,
            batch_size=opt["batch_size"],
            nb_threads=opt["nb_threads"],
            pin_memory=Options()["misc"]["cuda"],
            shuffle=shuffle,
            nans=opt["nans"],
            minwcount=opt["minwcount"],
            nlp=opt["nlp"],
            proc_split=opt["proc_split"],
            samplingans=samplingans,
            dir_rcnn=opt["dir_rcnn"],
            dir_cnn=opt.get("dir_cnn", None),
            dir_vgg16=opt.get("dir_vgg16", None),
            weight_method=opt.get('weight_method', None),
        )

    elif opt["name"] == "vqacpv2-with-testdev":
        assert split in ["train", "val", "test"]
        samplingans = opt["samplingans"] and split == "train"
        dataset = VQACP2(
            dir_data=opt["dir"],
            split=split,
            batch_size=opt["batch_size"],
            nb_threads=opt["nb_threads"],
            pin_memory=Options()["misc"]["cuda"],
            shuffle=shuffle,
            nans=opt["nans"],
            minwcount=opt["minwcount"],
            nlp=opt["nlp"],
            proc_split=opt["proc_split"],
            samplingans=samplingans,
            dir_rcnn=opt["dir_rcnn"],
            dir_cnn=opt.get("dir_cnn", None),
            dir_vgg16=opt.get("dir_vgg16", None),
            has_testdevset=True,
        )

    elif opt["name"] == "vqa2":
        assert split in ["train", "val", "test"]
        samplingans = opt["samplingans"] and split == "train"

        if opt["vg"]:
            assert opt["proc_split"] == "trainval"

            # trainvalset
            vqa2 = VQA2(
                dir_data=opt["dir"],
                split="train",
                nans=opt["nans"],
                minwcount=opt["minwcount"],
                nlp=opt["nlp"],
                proc_split=opt["proc_split"],
                samplingans=samplingans,
                dir_rcnn=opt["dir_rcnn"],
            )

            vg = VG(
                dir_data=opt["dir_vg"],
                split="train",
                nans=10000,
                minwcount=0,
                nlp=opt["nlp"],
                dir_rcnn=opt["dir_rcnn_vg"],
            )

            vqa2vg = ListVQADatasets(
                [vqa2, vg],
                split="train",
                batch_size=opt["batch_size"],
                nb_threads=opt["nb_threads"],
                pin_memory=Options()["misc.cuda"],
                shuffle=shuffle,
            )

            if split == "train":
                dataset = vqa2vg
            else:
                dataset = VQA2(
                    dir_data=opt["dir"],
                    split=split,
                    batch_size=opt["batch_size"],
                    nb_threads=opt["nb_threads"],
                    pin_memory=Options()["misc.cuda"],
                    shuffle=False,
                    nans=opt["nans"],
                    minwcount=opt["minwcount"],
                    nlp=opt["nlp"],
                    proc_split=opt["proc_split"],
                    samplingans=samplingans,
                    dir_rcnn=opt["dir_rcnn"],
                )
                dataset.sync_from(vqa2vg)

        else:
            dataset = VQA2(
                dir_data=opt["dir"],
                split=split,
                batch_size=opt["batch_size"],
                nb_threads=opt["nb_threads"],
                pin_memory=Options()["misc.cuda"],
                shuffle=shuffle,
                nans=opt["nans"],
                minwcount=opt["minwcount"],
                nlp=opt["nlp"],
                proc_split=opt["proc_split"],
                samplingans=samplingans,
                dir_rcnn=opt["dir_rcnn"],
                dir_cnn=opt.get("dir_cnn", None),
            )

    elif opt["name"] == "lxmert-vqa2":

        dataset = LXMERTVQATorchDataset(
            dir_data=opt["dir"],
            split=split,
            dir_imgfeats=opt["dir_imgfeats"],
            tiny=opt.get("tiny", False),
            fast=opt.get("fast", False),
            batch_size=opt["batch_size"],
            nb_threads=opt["nb_threads"],
            pin_memory=Options()["misc"]["cuda"],
            shuffle=shuffle,
            only_numbers=opt['only_numbers'],
        )
    return dataset