示例#1
0
def run(cfg):
    # Hack to load with liftoff
    import sys

    # Hack to move configs from liftoff
    sys.argv = [
        sys.argv[0],
    ] + [f"{k}={v}" for k, v in flatten_cfg(cfg)]

    main_wrapper = hydra.main('config.yaml', strict=False)
    main_wrapper(main)()
示例#2
0
    def task_script_wrapper(script_func: TaskFunction) -> TaskFunction:
        @functools.wraps(script_func)
        def process_config_and_run_main(cfg: "DictConfig"):
            operator, cfg = process_config_and_get_operator(cfg)
            try:
                ret_val = script_func(operator, cfg)
            except Exception as e:
                raise e
            finally:
                if not operator.is_shutdown:
                    operator.shutdown()
            return ret_val

        absolute_config_path = os.path.abspath(
            os.path.join(get_run_file_dir(), config_path))
        hydra_wrapper = hydra.main(config_path=absolute_config_path,
                                   config_name="taskconfig")
        return cast(TaskFunction, hydra_wrapper(process_config_and_run_main))
示例#3
0
import torch
from homura.modules import cross_entropy_with_softlabels
from torch.nn import functional as F

from backends.utils import SSLTrainerBase, disable_bn_stats, get_task


class PseudoLabelTrainer(SSLTrainerBase):
    def labeled(self, input: torch.Tensor,
                target: torch.Tensor) -> (torch.Tensor, torch.Tensor):
        target = self.to_onehot(target, self.smoothing)
        output = self.model(input)
        loss = self.loss_f(output, target)
        return output, loss

    def unlabeled(self, input: torch.Tensor) -> (torch.Tensor, torch.Tensor):
        with disable_bn_stats(self.model):
            u_output = self.model(input)
        u_loss = F.cross_entropy(u_output,
                                 u_output.argmax(dim=1),
                                 reduction='none')
        u_loss = ((u_loss > self.threshold).float() * u_loss).mean()
        return u_output, u_loss


if __name__ == "__main__":
    import hydra

    hydra.main('config/pseudo_label.yaml')(get_task(
        PseudoLabelTrainer, cross_entropy_with_softlabels))()
示例#4
0
    nlp = cast(TorchLanguage, create_model(cfg.model))
    train_data = list(
        srsly.read_jsonl(os.path.join(org_cwd, cfg.train.data.train)))
    cfg.train.data.ndata = len(train_data)
    val_data = list(srsly.read_jsonl(os.path.join(org_cwd,
                                                  cfg.train.data.val)))
    logger.info("output dir: {}".format(os.getcwd()))
    if torch.cuda.is_available():
        logger.info("CUDA enabled")
        nlp.to(torch.device("cuda"))
    savedir = Path.cwd() / "models"
    srsly.write_jsonl(Path.cwd() / f"train-data.jsonl", train_data)
    srsly.write_jsonl(Path.cwd() / f"val-data.jsonl", val_data)
    savedir.mkdir(exist_ok=True)
    train(cfg.train, nlp, train_data, val_data, savedir)


if __name__ == "__main__":
    from util import get_relative_path
    if len(sys.argv) != 2:
        print('python train.py [your train_(intent|slot).yaml]',
              file=sys.stderr)
        sys.exit(0)
    config_path = sys.argv.pop()
    if not os.path.exists(config_path):
        print(f'{config_path} is not found.', file=sys.stderr)
        sys.exit(0)
    rel_path = get_relative_path(config_path)
    main = hydra.main(config_path=rel_path, strict=False)(_main)
    main()
示例#5
0
文件: my_app.py 项目: Jasha10/hydra
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from omegaconf import DictConfig

import hydra
from hydra.core.hydra_config import HydraConfig


class MyCallable:
    def __init__(self, state: int = 123) -> None:
        self._state = state

    def __call__(self, cfg: DictConfig) -> None:
        print(self._state)
        print(HydraConfig.get().job.name)


my_callable = MyCallable()
my_app = hydra.main(version_base=None)(my_callable)

if __name__ == "__main__":
    my_app()
示例#6
0
            log.exception(err)
            _exceptions.append(err)
        else:
            raise err

    finally:
        # flush logger to ensure free memory for next run
        if trainer is not None:
            experiment = trainer.logger.experiment  # type: ignore
            if experiment is not None and hasattr(experiment, "flush"):
                experiment.flush()
                log.debug("Flushed experiment to disk")
            if experiment is not None and hasattr(experiment, "close"):
                experiment.close()
                log.debug("Closed experiment writer")

    # postprocess results if desired (e.g. to scalars for bayesian optimization)
    if process_results_fn is not None:
        log.debug("Running results postprocessing")
        output = process_results_fn(train_results,
                                    test_results)  # type: ignore
    else:
        output = train_results, test_results

    return output  # type: ignore


if __name__ == "__main__":
    _main = hydra.main(config_path="./conf/config.yaml")(main)
    _main()
示例#7
0
    F = solve_F(points_list, K)
    if F is None:
        logger.info('Could not solve F.')
        return

    logger.info(f"\n{F=}")
    logger.info(f"{np.linalg.eig(F)[0]=}")
    # Line epipolar
    utils.save_lined_epipolar_image(F.T, images_list[0], points_list[0],
                                    points_list[1], 0)
    utils.save_lined_epipolar_image(F, images_list[1], points_list[1],
                                    points_list[0], 1)
    utils.save_lined_epipolar_image(F.T,
                                    images_list[0],
                                    test_points_list[0],
                                    test_points_list[1],
                                    0,
                                    prefix='test_')
    utils.save_lined_epipolar_image(F,
                                    images_list[1],
                                    test_points_list[1],
                                    test_points_list[0],
                                    1,
                                    prefix='test_')
    return


if __name__ == "__main__":
    hydra.main(config_path='../conf/config.yaml')(main)()
示例#8
0
        target = self.to_onehot(target, self.smoothing)
        s_loss = self.loss_f[0](output, target)
        return output, s_loss

    def unlabeled(self,
                  input: torch.Tensor) -> (None, torch.Tensor, torch.Tensor):
        with disable_bn_stats(self.model):
            logits = self.model(input)
            u_loss = self.vat_loss(input, logits.clone().detach())
            e_loss = Categorical(logits=logits).entropy().mean()
        return None, u_loss, e_loss

    def vat_loss(self, input: torch.Tensor,
                 logits: torch.Tensor) -> torch.Tensor:
        d = normalize(input.clone().normal_())
        d.requires_grad_(True)
        pred_hat = self.model(input + self.xi * d)
        adv_loss = kl_div(logits, pred_hat)
        d_grad, = torch.autograd.grad([adv_loss], [d])
        d = normalize(d_grad)
        self.model.zero_grad()
        pred_hat = self.model(input + self.eps * d)
        return kl_div(logits, pred_hat)


if __name__ == "__main__":
    import hydra

    hydra.main('config/vat.yaml')(get_task(
        VATTrainer, [cross_entropy_with_softlabels, F.cross_entropy]))()
示例#9
0
from omegaconf import OmegaConf, DictConfig
from artefact_nca.config.config_utils import setup_config

from artefact_nca.trainer.voxel_ca_trainer import VoxelCATrainer


def train(cfg: DictConfig):
    if "trainer" not in cfg:
        cfg["trainer"] = {}
    else:
        cfg = OmegaConf.to_container(cfg)
        cfg["trainer"].pop("config")
    ct = VoxelCATrainer.from_config(config=cfg["trainer"])
    ct.train()


if __name__ == "__main__":
    setup_config()
    config_path = None
    config_name = None
    hydra_args = "hydra.run.dir=. hydra.output_subdir=null hydra/job_logging=disabled hydra/hydra_logging=disabled".split(" ")
    if len(sys.argv) > 1 and sys.argv[1].startswith("config="):
        config_path = sys.argv[1].split("=")[-1]
        sys.argv.pop(1)
        sys.argv.extend(hydra_args)
        config_path = os.path.abspath(config_path)
        config_path, config_name = os.path.split(config_path)
        config_name = config_name.replace(".yaml", "")
    main_wrapper = hydra.main(config_path=config_path, config_name=config_name)
    main_wrapper(train)()
示例#10
0
    workspace.run()


def run(cfg):
    # Hack to load with liftoff
    import sys

    # Hack to move configs from liftoff
    sys.argv = [
        sys.argv[0],
    ] + [f"{k}={v}" for k, v in flatten_cfg(cfg)]

    main_wrapper = hydra.main('config.yaml', strict=False)
    main_wrapper(main)()


if __name__ == "__main__":
    import sys

    cfg = parse_opts()

    # Hack to move configs from liftoff
    ftcfg = flatten_cfg(cfg)
    # ftcfg = [('seed', 0), ('log_save_tb', False), ('save_video', False), ('log_save_wandb', False), ('replay_buffer_augmentation', True), ('out_dir', 'results/experiment_configs'), ('run_id', 1)]
    sys.argv = [
        sys.argv[0],
    ] + [f"{k}={v}" for k, v in ftcfg]

    main_wrapper = hydra.main('config.yaml', strict=False)
    main_wrapper(main)()
示例#11
0
import torch
from homura.modules import cross_entropy_with_softlabels, to_onehot
from torch.nn import functional as F

from backends.utils import SSLTrainerBase, disable_bn_stats, get_task


class MeanTeacherTrainer(SSLTrainerBase):
    def labeled(self, input: torch.Tensor,
                target: torch.Tensor) -> (torch.Tensor, torch.Tensor):
        output = self.model(input)
        target = to_onehot(target, self.num_classes)
        target -= self.smoothing * (target - 1 / self.num_classes)
        loss = self.loss_f(output, target)
        return output, loss

    def unlabeled(self, input1: torch.Tensor,
                  input2: torch.Tensor) -> (torch.Tensor, torch.Tensor):
        with disable_bn_stats(self.model):
            o1 = self.model(input1)
            with torch.no_grad():
                o2 = self.ema(input2)
        return o1, F.mse_loss(o1.softmax(dim=1), o2.softmax(dim=1))


if __name__ == "__main__":
    import hydra

    hydra.main('config/mean_teacher.yaml')(get_task(
        MeanTeacherTrainer, cross_entropy_with_softlabels))()
示例#12
0
        loss = self.loss_f[0](output, target)
        return output, loss

    def unlabeled(self, input: torch.Tensor):
        with torch.no_grad():
            expected = self.ema(input).softmax(dim=-1)
            input, expected = self.mixup(input, expected)
        output = self.model(input)
        loss = self.loss_f[1](output, expected)
        return output, loss

    def mixup(self, input: torch.Tensor, target: torch.Tensor):
        if not torch.is_tensor(self.beta):
            # very important for speed up
            self.beta = torch.tensor(self.beta).to(self.device)
        gamma = Beta(self.beta, self.beta).sample((input.size(0), 1, 1, 1))
        perm = torch.randperm(input.size(0))
        perm_input = input[perm]
        perm_target = target[perm]
        input.mul_(gamma).add_(perm_input.mul_(1 - gamma))
        gamma = gamma.view(-1, 1)
        target.mul_(gamma).add_(perm_target.mul_(1 - gamma))
        return input, target


if __name__ == '__main__':
    import hydra

    hydra.main('config/ict.yaml')(get_task(
        ICTTrainer, [cross_entropy_with_softlabels, mse_with_logits]))()
示例#13
0
from pytorch_modules.engine import build_modules, load_obj


def run(cfg):
    print(cfg.pretty())
    tb_logger = TensorBoardLogger(save_dir=cfg.general.save_dir)
    checkpoint_callback = build_modules(cfg.checkpoint)
    model = load_obj(cfg.model.model_name)(cfg)
    callbacks = build_modules(cfg.callbacks)
    trainer = pl.Trainer(logger=tb_logger,
                         checkpoint_callback=checkpoint_callback,
                         callbacks=callbacks,
                         **cfg.trainer)
    trainer.fit(model)


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--config_path',
                        default='conf/config.yaml',
                        type=str,
                        help='Your config file path.')
    parser.add_argument('--strict',
                        action='store_true',
                        help='Strict mode for hydra.')

    opt, left = parser.parse_known_args()
    sys.argv = sys.argv[:1] + left
    hydra_wrapper = hydra.main(config_path=opt.config_path, strict=opt.strict)
    hydra_wrapper(run)()
示例#14
0
        torch.cuda.manual_seed_all(seed)  # type: ignore


def _main(cfg: Config) -> None:
    if cfg.user_config is not None:
        # Override config by user config.
        # This `user_config` have some limitations, and it will be improved
        # after the issue https://github.com/facebookresearch/hydra/issues/386 solved
        cfg = OmegaConf.merge(
            cfg, OmegaConf.load(hydra.utils.to_absolute_path(cfg.user_config)))
    cfg = parse(cfg)
    if cfg.seed:
        set_seed(cfg.seed)
    logger.info(cfg.pretty())
    train_data, val_data = create_data(cfg.train.data)
    nlp = cast(TorchLanguage, create_model(cfg.model))
    logger.info("output dir: {}".format(os.getcwd()))
    if torch.cuda.is_available():
        logger.info("CUDA enabled")
        nlp.to(torch.device("cuda"))
    savedir = Path.cwd() / "models"
    savedir.mkdir(exist_ok=True)
    train(cfg.train, nlp, train_data, val_data, savedir)


# Avoid to use decorator for testing
main = hydra.main(config_path="conf/train/config.yaml", strict=False)(_main)

if __name__ == "__main__":
    main()
示例#15
0
    def data_handle(self,
                    data: Tuple) -> Tuple:
        pillow_aug = (len(data) == 5)
        if pillow_aug:
            # Pillow augmentation
            input, target, u_x1, u_x2, _ = data
            u_x, u_y = self.sharpen((u_x1, u_x2))
        else:
            input, target, u_x, _ = data
            u_x, u_y = self.sharpen(u_x)
        # l_x, l_y, u_x, u_y
        return input, self.to_onehot(target), u_x, u_y

    def sharpen(self,
                input: torch.Tensor or Tuple) -> Tuple[torch.Tensor, torch.Tensor]:

        u_b = torch.cat(input, dim=0)
        with disable_bn_stats(self.model):
            q_b = (self.model(input[0]).softmax(dim=1) + self.model(input[1]).softmax(dim=1)) / 2
        q_b.pow_(1 / self.temperature).div_(q_b.sum(dim=1, keepdim=True))
        return u_b, q_b.repeat(2, 1)


if __name__ == "__main__":
    import hydra

    hydra.main('config/mixmatch.yaml')(
        get_task(MixmatchTrainer,
                 [cross_entropy_with_softlabels, F.mse_loss])
    )()
示例#16
0
def hydra_init(config_path='conf', config_name='config', strict=None):
    return hydra.main(config_name=config_name,
                      config_path=config_path,
                      strict=strict)