コード例 #1
0
def run(force_rerun: bool, params_path: Optional[str],
        changed_params_path: Optional[str], device_id: Optional[int]) -> None:
    module = get_params_module(params_path, 'cl_default_config')
    config = module.classifier_config
    if changed_params_path:
        with open(changed_params_path, 'r') as f:
            patch = dict(jsons.loads(f.read()))
            config = patch_config(config, patch)
    if gpu.gpu_available():
        gpu_id_to_use = device_id if device_id is not None else get_current_device(
        )
        logger.debug(f'Using gpu with id: {gpu_id_to_use}')
        with device(gpu_id_to_use):
            run_on_device(config, force_rerun)
    else:
        run_on_device(config, force_rerun)
コード例 #2
0
def create_nn_architecture(fs: FS,
                           text_field: Field,
                           level_label: Field,
                           data: Data,
                           arch: Arch,
                           threshold: float,
                           path: str = None):
    splits = ContextsDataset.splits(text_field,
                                    level_label,
                                    fs.path_to_model_dataset,
                                    context_len=arch.bptt,
                                    threshold=threshold,
                                    data=data)

    text_data = TextData.from_splits(fs.path_to_model if not path else path,
                                     splits, arch.bs)

    opt_fn = partial(torch.optim.Adam, betas=(0.7, 0.99))

    if arch.qrnn and not gpu.gpu_available():
        logger.warning(
            "Cuda not available, not using qrnn. Using lstm instead")
        arch.qrnn = False
    dropout_multiplier = arch.drop.multiplier
    rnn_learner = text_data.get_model(
        opt_fn,
        arch.bptt + 1,
        arch.bptt,
        arch.em_sz,
        arch.nh,
        arch.nl,
        dropout=arch.drop.out * dropout_multiplier,
        dropouti=arch.drop.outi * dropout_multiplier,
        wdrop=arch.drop.w * dropout_multiplier,
        dropoute=arch.drop.oute * dropout_multiplier,
        dropouth=arch.drop.outh * dropout_multiplier,
        bidir=arch.bidir,
        qrnn=arch.qrnn)

    # reguarizing LSTM paper -- penalizing large activations -- reduce overfitting
    rnn_learner.reg_fn = partial(seq2seq_reg,
                                 alpha=arch.reg_fn.alpha,
                                 beta=arch.reg_fn.beta)
    rnn_learner.clip = arch.clip

    logger.info(f'Dictionary size is: {len(text_field.vocab.itos)}')
    return rnn_learner
コード例 #3
0
def run(find_lr: bool, force_rerun: bool, params_path: Optional[str],
        changed_params_path: Optional[str], device_id: Optional[int]) -> None:
    if find_lr:
        module = get_params_module(params_path, 'lm_lr_default_config')
        config = module.lm_lr_config
    else:
        module = get_params_module(params_path, 'lm_default_config')
        config = module.lm_config
    if changed_params_path:
        with open(changed_params_path, 'r') as f:
            patch = dict(jsons.loads(f.read()))
            config = patch_config(config, patch)
    logger.info(f'Using config: {jsons.dumps(config)}')
    if gpu.gpu_available():
        gpu_id_to_use = device_id if device_id is not None else get_current_device(
        )
        with device(gpu_id_to_use):
            run_on_device(config, find_lr, force_rerun)
    else:
        run_on_device(config, find_lr, force_rerun)
コード例 #4
0
#!/usr/bin/env python

from logrec.util import gpu

if __name__ == '__main__':
    if gpu.gpu_available():
        print(gpu.get_n_gpus())
    else:
        print(-1)