def __init__(self):
     self.loaded_exp = load_best_model_exp(conf.model)
     self.__dict__.update(self.loaded_exp)
     self.best_model = self.loaded_exp.train_eval_model_factory.load_estimator(
         self.best_model_dir, self.best_model_params)
     self.data_loader = create_data_loader()
     self.data_loader.load_data()
示例#2
0
def create_load_experiment():
    data_loader = create_data_loader()
    exp_factory_clazz = getattr(experiment.factory, FLAGS.model)
    exp_factory = exp_factory_clazz()
    exp = None
    if exp_factory.is_compatible_with(data_loader):
        exp = exp_factory.create(data_loader, FLAGS.num_workers,
                                 FLAGS.num_samples,
                                 FLAGS.num_samples_best_eval)
        exp.load_best_model()
    return exp
def run_true_metrics(data_set):
    chdir_data_set(data_set)

    FLAGS.data_set = data_set

    data_loader = create_data_loader()
    if data_loader.can_compute_ll():

        train_ll = data_loader.ll(data_loader.train_x, data_loader.train_y)
        validation_ll=data_loader.ll(data_loader.validation_x, data_loader.validation_y)
        test_ll=data_loader.ll(data_loader.test_x, data_loader.test_y)

        with open('real_metrics.pkl', 'wb') as f:
            data = {}
            data["train_ll"] = train_ll
            data["validation_ll"] = validation_ll
            data["test_ll"] = test_ll

            pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
    store_data_seeds(data_sample_random_seeds)
    print("Data sample seeds: %s"%data_sample_random_seeds)

    for data_set in [data_set.strip() for data_set in FLAGS.data_sets.split(",")]:
        funcs.append(Callable(run_true_metrics, data_set=data_set))
        for model in [model.strip() for model in FLAGS.models.split(",")]:
            funcs.append(Callable(run,model=model, data_set=data_set,data_sample_random_seeds = data_sample_random_seeds))

    # First prefetch all data
    for data_set in FLAGS.data_sets.split(","):
        FLAGS.data_set = data_set
        target_dir = os.path.join(FLAGS.dir, data_set)
        os.makedirs(target_dir, exist_ok=True)
        os.chdir(target_dir)
        data_loader = create_data_loader()

    done = 0.0
    futures = []
    res = [None] * len(funcs)
    with concurrent.futures.ProcessPoolExecutor(FLAGS.num_parallel_experiments) as executor:
        for i, fun in enumerate(funcs):
            inserted = False
            while not inserted:
                if len(futures) < FLAGS.num_parallel_experiments:
                    futures.append((i, executor.submit(fun)))
                    inserted = True

                for fut in list(futures):
                    try:
                        res[fut[0]] = fut[1].result(0)
示例#5
0
 def __init__(self):
     loaded = load_best_model_exp(FLAGS.model)
     self.__dict__.update(loaded)
     self.best_model = load_model(self.best_model_dir, self.best_model_params)
     self.data_loader = create_data_loader()
     self.data_loader.load_data()