示例#1
0
    def _merge_from_list(self, opts):
        if opts is None:
            opts = []

        assert len(opts) % 2 == 0, "Number of opts should be multiple of 2"

        for opt, value in zip(opts[0::2], opts[1::2]):
            splits = opt.split(".")
            current = self.config
            for idx, field in enumerate(splits):
                if field not in current:
                    raise AttributeError("While updating configuration"
                                         " option {} is missing from"
                                         " configuration at field {}".format(
                                             opt, field))
                if not isinstance(current[field], collections.abc.Mapping):
                    if idx == len(splits) - 1:
                        if is_main_process():
                            print_only_main(
                                "Overriding option {} to {}".format(
                                    opt, value))

                        current[field] = self._decode_value(value)
                    else:
                        raise AttributeError(
                            "While updating configuration",
                            "option {} is not present "
                            "after field {}".format(opt, field),
                        )
                else:
                    current = current[field]
示例#2
0
    def _update_specific(self, args):
        self.writer = registry.get("writer")
        tp = self.config["training_parameters"]

        if args["seed"] is not None or tp['seed'] is not None:
            print_only_main(
                "You have chosen to seed the training. This will turn on CUDNN deterministic "
                "setting which can slow down your training considerably! You may see unexpected "
                "behavior when restarting from checkpoints.")

        if args["seed"] == -1:
            self.config["training_parameters"]["seed"] = random.randint(
                1, 1000000)

        if "learning_rate" in args:
            if "optimizer" in self.config and "params" in self.config[
                    "optimizer"]:
                lr = args["learning_rate"]
                self.config["optimizer_attributes"]["params"]["lr"] = lr

        if (not torch.cuda.is_available()
                and "cuda" in self.config["training_parameters"]["device"]):
            if is_main_process():
                print_only_main(
                    "WARNING: Device specified is 'cuda' but cuda is "
                    "not present. Switching to CPU version")
            self.config["training_parameters"]["device"] = "cpu"

        if tp["distributed"] is True and tp["data_parallel"] is True:
            print_only_main("training_parameters.distributed and "
                            "training_parameters.data_parallel are "
                            "mutually exclusive. Setting "
                            "training_parameters.distributed to False")
            tp["distributed"] = False