def test_setitem_key_bytes():
    reset_options_instance()
    source = {bytes(1): 123}
    Options(source, run_parser=False)
    assert Options().options == source
    Options()[bytes(2)] = 'new value'
    assert Options()[bytes(2)] == 'new value'
def test_setitem_key_float():
    reset_options_instance()
    source = {1.2: 123}
    Options(source, run_parser=False)
    assert Options().options == source
    Options()[1.2] = 'new value'
    assert Options()[1.2] == 'new value'
def test_setitem_1():
    reset_options_instance()
    source = {'abc': 123}
    Options(source, run_parser=False)
    assert Options().options == source
    Options()['abc'] = 'new value'
    assert Options()['abc'] == 'new value'
def test_save():
    """ Test save and load
    """
    reset_options_instance()
    sys.argv += ['-o', 'tests/sgd.yaml', '--nested.message', 'save']
    path_yaml = 'tests/saved.yaml'
    Options().save(path_yaml)
    with open(path_yaml, 'r') as yaml_file:
        options_yaml = yaml.safe_load(yaml_file)
    assert (OptionsDict(options_yaml) == OptionsDict({
        "message": "sgd",
        "sgd": True,
        "nested": {
            "message": "save"
        }
    }))
    reset_options_instance()
    sys.argv += ['-o', 'tests/saved.yaml']
    assert (Options().options == OptionsDict({
        "path_opts": "tests/saved.yaml",
        "message": "sgd",
        "sgd": True,
        "nested": {
            "message": "save"
        }
    }))
def test_get_default_value():
    reset_options_instance()
    source = {'abc': 123}
    Options(source, run_parser=False)
    assert Options().options == source
    value = Options().get('cba', 'default value')
    assert value == 'default value'
def test_str_to_bool_no():
    reset_options_instance()
    source = {'abc': 123}
    Options(source, run_parser=False)
    assert not Options().str_to_bool('no')
    assert not Options().str_to_bool('No')
    assert not Options().str_to_bool('NO')
Beispiel #7
0
def factory(engine):
    opt = Options()['model.network']
    if opt['name'] == "beef_hdd":
        net = BeefHDD(layers_to_fuse=opt['layers_to_fuse'],
                     label_fusion_opt=opt['label_fusion'],
                     blinkers_dim=opt['blinkers_dim'],
                     gru_opt=opt['gru_opt'],
                     n_future=opt['n_future'],
                     detach_pred=opt.get('detach_pred',False))
    elif opt['name'] == "driver_hdd":
        net = DriverHDD(blinkers_dim=opt['blinkers_dim'],
                                 gru_opt=opt['gru_opt'],
                                 n_future=opt['n_future'])
    elif opt['name'] == "baseline_multitask_hdd":
        net = BaselineMultitaskHDD(n_classes=opt['n_classes'],
                                    blinkers_dim=opt['blinkers_dim'],
                                    layer_to_extract=opt['layer_to_extract'],
                                    dim_features=opt['dim_features'],
                                    gru_opt=opt['gru_opt'],
                                    n_future=opt['n_future'],
                                    mlp_opt=opt.get('mlp_opt',None))
    else:
        raise ValueError(opt['name'])
    if torch.cuda.device_count()>1:
        net = DataParallel(net)
    return net
def factory(engine=None):

    Logger()('Creating network...')

    if 'import' in Options()['model']['network']:
        module = importlib.import_module(
            Options()['model']['network']['import'])
        network = module.factory(engine)

    else:
        module, class_name = opt['name'].rsplit('.', 1)
        cls = getattr(import_module('.' + module, 'counting.models.networks'),
                      class_name)
        print("Network parameters", opt['parameters'])
        # check if @ in parameters
        print("checking if @ in parameters")
        for key, value in opt['parameters'].items(
        ):  # TODO intégrer ça à bootstrap
            if value.startswith("@"):
                try:
                    output = eval(value[1:])
                    opt['parameters'][key] = output
                except:
                    pass
        net = cls(**opt['parameters'], )

        raise ValueError()

    Logger()(f'Network created, of type {type(network)}...')

    return network
Beispiel #9
0
def init_experiment_directory(exp_dir, resume=None):
    # create the experiment directory
    if not os.path.isdir(exp_dir):
        os.system('mkdir -p ' + exp_dir)
    else:
        if resume is None:
            if click.confirm(
                    'Exp directory already exists in {}. Erase?'.format(
                        exp_dir, default=False)):
                os.system('rm -r ' + exp_dir)
                os.system('mkdir -p ' + exp_dir)
            else:
                os._exit(1)

    # if resume to evaluate the model on one epoch
    if resume and Options()['dataset']['train_split'] is None:
        eval_split = Options()['dataset']['eval_split']
        path_yaml = os.path.join(exp_dir,
                                 'options_eval_{}.yaml'.format(eval_split))
        logs_name = 'logs_eval_{}'.format(eval_split)
    else:
        path_yaml = os.path.join(exp_dir, 'options.yaml')
        logs_name = 'logs'

    # create the options.yaml file
    Options.save_yaml_opts(path_yaml)

    # open(write) the logs.txt file and init logs.json path for later
    Logger(exp_dir, name=logs_name)
Beispiel #10
0
def factory_humanProteome(split, trainingDataset=None):
    dataset = HumanProteome(Options()['dataset']['dir'],
                            split,
                            batch_size=Options()['dataset']['batch_size'],
                            nb_threads=Options()['dataset']['nb_threads'],
                            trainingDataset=trainingDataset)
    return dataset
Beispiel #11
0
def factory_mixedSpectraCrux(split, trainingDataset=None):
    dataset = MixedSpectraCrux(Options()['dataset']['dir'],
                               split,
                               batch_size=Options()['dataset']['batch_size'],
                               nb_threads=Options()['dataset']['nb_threads'],
                               trainingDataset=trainingDataset)
    return dataset
    def __init__(self):

        super(DoubleNPairL2, self).__init__()

        self.aggregation = Options()['model']['criterion'].get(
            'aggregation', 'mean')
        self.epsilon = Options()['model']['criterion'].get(
            'epsilon', 0.00000001)

        self.batch_size = Options().get("dataset.batch_size", 100)

        self.regularizer = Options()['model']['criterion'].get(
            'regularizer', 0.1)

        # torch.autograd.set_detect_anomaly(True)

        self.access_mask = torch.ones([self.batch_size, self.batch_size],
                                      dtype=torch.bool)

        for i in range(self.batch_size):
            self.access_mask[i, i] = False

        self.access_mask[1::2] = False

        if torch.cuda.is_available():
            self.access_mask = self.access_mask.cuda()
def load_activation(identifier):
    retrieval_dir = os.path.join(Options()['model']['metric']['retrieval_dir'],
                                 os.path.basename(Options()['exp']['dir']))
    if not os.path.exists(retrieval_dir):
        os.makedirs(retrieval_dir)
    file_path = os.path.join(retrieval_dir, '{}.pth'.format(identifier))
    return torch.load(file_path)
def test_add_options():
    reset_options_instance()
    sys.argv += [
        '-o',
        'tests/default.yaml',
        '--dataset',
        '421',
        '--value',
        '2',
        '--model.metric',
        'm1',
        'm2',
    ]
    source = {
        'dataset': 123,
        'value': 1.5,
        'model': {
            'criterion': ['mse', 'l1'],
            'network': 'I am a network',
            'metric': [],
        },
        'useless': None,
    }
    Options(source, run_parser=True)
    assert Options()['dataset'] == 421
    assert Options()['value'] == 2
    assert isinstance(Options()['value'], float)
    assert Options()['model']['metric'] == ['m1', 'm2']
def test_str_to_bool_false():
    reset_options_instance()
    source = {'abc': 123}
    Options(source, run_parser=False)
    assert not Options().str_to_bool('false')
    assert not Options().str_to_bool('False')
    assert not Options().str_to_bool('FALSE')
def test_str_to_bool_true():
    reset_options_instance()
    source = {'abc': 123}
    Options(source, run_parser=False)
    assert Options().str_to_bool('true')
    assert Options().str_to_bool('True')
    assert Options().str_to_bool('TRUE')
def test_str_to_bool_yes():
    reset_options_instance()
    source = {'abc': 123}
    Options(source, run_parser=False)
    assert Options().str_to_bool('yes')
    assert Options().str_to_bool('Yes')
    assert Options().str_to_bool('YES')
Beispiel #18
0
def factory(engine):
    mode = list(engine.dataset.keys())[0]
    dataset = engine.dataset[mode]
    opt = Options()['model.network']

    if opt['base'] == 'smrl':
        from .smrl_net import SMRLNet as BaselineNet
    elif opt['base'] == 'updn':
        from .updn_net import UpDnNet as BaselineNet
    elif opt['base'] == 'san':
        from .san_net import SANNet as BaselineNet
    else:
        raise ValueError(opt['base'])

    orig_net = BaselineNet(
        txt_enc=opt['txt_enc'],
        self_q_att=opt['self_q_att'],
        agg=opt['agg'],
        classif=opt['classif'],
        wid_to_word=dataset.wid_to_word,
        word_to_wid=dataset.word_to_wid,
        aid_to_ans=dataset.aid_to_ans,
        ans_to_aid=dataset.ans_to_aid,
        fusion=opt['fusion'],
        residual=opt['residual'],
        q_single=opt['q_single'],
    )

    if opt['name'] == 'baseline':
        net = orig_net

    elif opt['name'] == 'rubi':
        net = RUBiNet(model=orig_net,
                      output_size=len(dataset.aid_to_ans),
                      classif=opt['rubi_params']['mlp_q'])

    elif opt['name'] == 'cfvqa':
        net = CFVQA(model=orig_net,
                    output_size=len(dataset.aid_to_ans),
                    classif_q=opt['cfvqa_params']['mlp_q'],
                    classif_v=opt['cfvqa_params']['mlp_v'],
                    fusion_mode=opt['fusion_mode'],
                    is_va=True)

    elif opt['name'] == 'cfvqasimple':
        net = CFVQA(model=orig_net,
                    output_size=len(dataset.aid_to_ans),
                    classif_q=opt['cfvqa_params']['mlp_q'],
                    classif_v=None,
                    fusion_mode=opt['fusion_mode'],
                    is_va=False)

    else:
        raise ValueError(opt['name'])

    if Options()['misc.cuda'] and torch.cuda.device_count() > 1:
        net = DataParallel(net)

    return net
def factory(model, engine=None):

    if Options()['optimizer']['name'] == 'trijoint_fixed_fine_tune':
        optimizer = Trijoint(Options()['optimizer'], model, engine)
    else:
        raise ValueError()

    return optimizer
def test_items():
    reset_options_instance()
    source = {'model': 'mymodel'}
    Options(source, run_parser=False)
    assert Options().options == source
    for key, value in Options().items():
        assert key == 'model'
        assert value == 'mymodel'
def factory_cifar10(split):
    dataset = CIFAR(Options()['dataset']['dir'],
                    split,
                    batch_size=Options()['dataset']['batch_size'],
                    nb_threads=Options()['dataset']['nb_threads'],
                    shuffle=split == 'train',
                    name='CIFAR10')
    return dataset
Beispiel #22
0
    def __init__(self):

        super(NPair, self).__init__()

        self.aggregation = Options()['model']['criterion'].get(
            'aggregation', 'mean')
        self.epsilon = Options()['model']['criterion'].get(
            'epsilon', 0.00000001)
def main():

    Logger('.')

    #classes = ['pizza', 'pork chops', 'cupcake', 'hamburger', 'green beans']
    split = 'test'
    dir_exp = '/home/cadene/doc/bootstrap.pytorch/logs/recipe1m/trijoint/2017-12-14-15-04-51'
    path_opts = os.path.join(dir_exp, 'options.yaml')
    dir_extract = os.path.join(dir_exp, 'extract_mean_features', split)
    dir_img = os.path.join(dir_extract, 'image')
    dir_rcp = os.path.join(dir_extract, 'recipe')
    path_model_ckpt = os.path.join(
        dir_exp,
        'ckpt_best_val_epoch.metric.recall_at_1_im2recipe_mean_model.pth.tar')

    Options.load_from_yaml(path_opts)
    utils.set_random_seed(Options()['misc']['seed'])

    Logger()('Load dataset...')
    dataset = factory(split)

    Logger()('Load model...')
    model = model_factory()
    model_state = torch.load(path_model_ckpt)
    model.load_state_dict(model_state)
    model.set_mode(split)

    if not os.path.isdir(dir_extract):
        Logger()('Create extract_dir {}'.format(dir_extract))
        os.system('mkdir -p ' + dir_extract)

        mean_ingrs = torch.zeros(model.network.recipe_embedding.dim_ingr_out *
                                 2)  # bi LSTM
        mean_instrs = torch.zeros(model.network.recipe_embedding.dim_instr_out)

        for i in tqdm(range(len(dataset))):
            item = dataset[i]
            batch = dataset.items_tf()([item])

            batch = model.prepare_batch(batch)
            out_ingrs = model.network.recipe_embedding.forward_ingrs(
                batch['recipe']['ingrs'])
            out_instrs = model.network.recipe_embedding.forward_instrs(
                batch['recipe']['instrs'])

            mean_ingrs += out_ingrs.data.cpu().squeeze(0)
            mean_instrs += out_instrs.data.cpu().squeeze(0)

        mean_ingrs /= len(dataset)
        mean_instrs /= len(dataset)

        path_ingrs = os.path.join(dir_extract, 'ingrs.pth')
        path_instrs = os.path.join(dir_extract, 'instrs.pth')

        torch.save(mean_ingrs, path_ingrs)
        torch.save(mean_instrs, path_instrs)

    Logger()('End')
def test_str():
    reset_options_instance()
    source = {'abc': 123, 'key1': 'value1'}
    Options(source, run_parser=False)
    assert Options().options == source
    str_representation = Options().__str__()
    opt_dict = json.loads(str_representation)
    assert isinstance(str_representation, str)
    assert opt_dict == source
Beispiel #25
0
def test_values():
    reset_options_instance()
    source = {
        'model': 'mymodel',
        'dataset': 'mydataset'
    }
    Options(source, run_parser=False)
    assert Options().options == source
    assert sorted(Options().values()) == sorted(['mymodel', 'mydataset'])
Beispiel #26
0
    def eval(self):
        self.status = 'evaluating'
        ckpt_name, path_ckpt = find_ckpt(self.exp_dir,
                                         Options()['pbt']['resume'])
        ckpt = torch.load(path_ckpt)

        metric_key = Options()['pbt']['resume'].replace('best_', '')  # TODO
        self.score = ckpt['best_out'][metric_key]
        self.status = 'evaluated'
Beispiel #27
0
 def __init__(self):
     self.queue = queue.Queue()
     self.pop = collections.OrderedDict()
     self.exp_dir = Options()['exp']['dir']
     self.path_ckpt = os.path.join(self.exp_dir,
                                   'ckpt_population_last.pth.tar')
     self.n_pop_max = Options()['pbt']['n_pop_max']
     self.n_workers = Options()['pbt']['n_workers']
     self.workers = []
Beispiel #28
0
 def populate(self):
     # TODO: improve populate
     path_mutant_opts = Options()['pbt']['path_opts']
     for i in range(Options()['pbt']['n_workers']):
         exp_mutant_dir = os.path.join(
             self.exp_dir, '{:%Y-%m-%d-%H-%M-%S-%f}'.format(datetime.now()))
         new_mutant = Mutant(path_mutant_opts, exp_mutant_dir)
         self.add_mutant_to_train(new_mutant)
         time.sleep(1)
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     self.metric_list = ['_all', '_vq', '_cfvqa', '_q']
     if Options()['dataset.eval_split'] == 'test':  # 0430
         self.accuracy = None
     else:
         self.accuracy = VQAAccuracy()
     self.rm_dir_rslt = 1 if Options(
     )['dataset.train_split'] is not None else 0
Beispiel #30
0
def factory(engine=None):
    opt = Options()['dataset']
    dataset = {}
    if opt.get('train_split', None):
        dataset['train'] = factory_split(opt['train_split'], mode='train')
    if opt.get('eval_split', None):
        dataset['eval'] = factory_split(opt['eval_split'], mode="eval")

    return dataset