Ejemplo n.º 1
0
    def __init__(self,
                 config,
                 experiments_prefix,
                 forced_debug=False,
                 logfile_name="log"):
        """Create a new Experiment instance.

        Modified based on: https://github.com/ex4sperans/mag

        Args:
            logfile_name: str, naming for log file. This can be useful to
                separate logs for different runs on the same experiment
            experiments_prefix: str, a prefix to the path where
                experiment will be saved
        """

        # get all defaults
        all_defaults = {}
        for key in vars(config):
            all_defaults[key] = get_base_parser().get_default(key)

        self.default_config = all_defaults

        config.resume = False
        if not config.debug and not forced_debug:
            if os.path.isdir(self.experiment_dir):
                print("log exists: {}".format(self.experiment_dir))
                config.resume = True

            print(config)
            self._makedir()
Ejemplo n.º 2
0
    def experiment_dir(self):
        if self.config.debug or self.forced_debug:
            return "./"
        else:
            # get namespace for each group of args
            arg_g = dict()
            for group in get_base_parser()._action_groups:
                group_d = {
                    a.dest: self.default_config.get(a.dest, None)
                    for a in group._group_actions
                }
                arg_g[group.title] = argparse.Namespace(**group_d)

            # skip default value
            identifier = ""
            for key, value in sorted(vars(arg_g["model_configs"]).items()):
                if getattr(self.config, key) != value:
                    identifier += key + str(getattr(self.config, key))
            return os.path.join(self.experiments_prefix, identifier)
Ejemplo n.º 3
0
    def __init__(self, config, experiments_prefix, logfile_name="log"):
        """Create a new Experiment instance.

        Modified based on: https://github.com/ex4sperans/mag

        Args:
            logfile_name: str, naming for log file. This can be useful to
                separate logs for different runs on the same experiment
            experiments_prefix: str, a prefix to the path where
                experiment will be saved
        """

        # get all defaults
        all_defaults = {}
        for key in vars(config):
            all_defaults[key] = get_base_parser().get_default(key)

        self.default_config = all_defaults

        # activation function
        activate_list = ["Tanh", "ReLU", "Softplus", "LogSigmoid"]
        map_activate = {act.lower(): act for act in activate_list}
        if self.config.activate.lower() in map_activate:
            self.config.activate = map_activate[self.config.activate.lower()]
        else:
            raise ValueError("Choose activation function among 'Tanh,'"
                             "ReLU, Softplus, logsigmoid'")

        config.resume = False
        if not config.debug:
            if os.path.isdir(self.experiment_dir):
                print("log exists: {}".format(self.experiment_dir))
                config.resume = True

            print(config)
            self._makedir()
Ejemplo n.º 4
0
                        gen_init=gen_init,
                        theta_y=theta_y,
                        disc_init=netD_state,
                        experiment=e)

    e.log.info(model)

    model.train()
    model.get_theta()
    model.save()

    pickle.dump([
        theta_y, model.all_theta, model.all_delta, model.all_weights,
        model.all_grad_norm, model.all_loss, model.all_tolerance
    ], open(e.experiment_dir + "/bayes_all+thetas+weights.pkl", "wb+"))


if __name__ == '__main__':

    args = config.get_base_parser().parse_args()

    with train_helper.experiment(args, args.save_prefix) as e:
        np.random.seed(e.config.random_seed)
        torch.manual_seed(e.config.random_seed)

        e.log.info("*" * 25 + " ARGS " + "*" * 25)
        e.log.info(args)
        e.log.info("*" * 25 + " ARGS " + "*" * 25)

        run(e)
Ejemplo n.º 5
0
                   iteration=true_it,
                   epoch=epoch + 1,
                   name="latest")

        time_per_epoch = (e.elapsed_time / (epoch - start_epoch + 1))
        time_in_need = time_per_epoch * (e.config.n_epoch - epoch - 1)
        e.log.info("elapsed time: {:.2f}(h), "
                   "time per epoch: {:.2f}(h), "
                   "time needed to finish: {:.2f}(h)".format(
                       e.elapsed_time, time_per_epoch, time_in_need))
        train_stats.reset()


if __name__ == '__main__':

    PARSED_CONFIG = config.get_base_parser().parse_args()

    def exit_handler(*args):
        print(PARSED_CONFIG)
        print("best dev bleu: {:.4f}, test bleu: {:.4f}".format(
            BEST_DEV_BLEU, TEST_BLEU))
        sys.exit()

    train_helper.register_exit_handler(exit_handler)

    with train_helper.Experiment(PARSED_CONFIG,
                                 PARSED_CONFIG.save_prefix) as exp:

        exp.log.info("*" * 25 + " ARGS " + "*" * 25)
        exp.log.info(PARSED_CONFIG)
        exp.log.info("*" * 25 + " ARGS " + "*" * 25)