Exemple #1
0
    def get_param(self):
        """
        Get the next hyperparameter values, return None when experiment is finished.
        :return: hyperparameters in dictionary
        """
        while True:
            next_run = None
            for i in self.active_iterations():
                next_run = self.iterations[i].get_next_run()
                if next_run is not None:
                    break

            if next_run is not None:
                logger.debug("new hyperparameters %s" % (next_run, ))
                break
            else:
                if self.n_iterations > 0:
                    logger.debug("create new iteration for %d" %
                                 self.n_iterations)
                    self.iterations.append(
                        self.get_next_iteration(len(self.iterations)))
                    self.n_iterations -= 1
                else:
                    self.finished = True
                    return None

        config_id, config, budget = next_run
        job = Job(config_id, config=config, budget=budget)
        job.time_it("started")
        self.running_jobs[self.tid] = job
        config = config.copy()
        config['tid'] = self.tid
        config['n_iterations'] = budget  # for job execution
        self.tid += 1
        return config
	def __init__(self, Result, config_generator):
		
		self.is_finished=False
		self.stage = 0

		
		id2conf = Result.get_id2config_mapping()
		delta_t = - max(map(lambda r: r.time_stamps['finished'], Result.get_all_runs()))

		super().__init__(-1, [len(id2conf)]	, [None], None)
		
		
		for i, id in enumerate(id2conf):
			new_id = self.add_configuration(config=id2conf[id]['config'], config_info=id2conf[id]['config_info'])
			
			for r in Result.get_runs_by_id(id):
			
				
				j = Job(new_id, config=id2conf[id]['config'], budget=r.budget)
				
				j.result = {'loss': r.loss, 'info': r.info}
				j.error_logs = r.error_logs
				
				for k,v in r.time_stamps.items():
					j.timestamps[k] = v + delta_t
				
				self.register_result(j , skip_sanity_checks=True)
				
				config_generator.new_result(j, update_model=(i==len(id2conf)-1))
				
		# mark as finished, as no more runs should be executed from these runs
		self.is_finished = True
Exemple #3
0
    def train_kde(self, result, config_space):
        cg = BohbConfigGenerator(config_space)

        results_for_budget = dict()
        build_model_jobs = dict()
        id2conf = result.get_id2config_mapping()
        for id in id2conf:
            for r in result.get_runs_by_id(id):
                j = Job(id, config=id2conf[id]['config'], budget=r.budget)
                if r.loss is None:
                    r.loss = float('inf')
                if r.info is None:
                    r.info = dict()
                j.result = {'loss': r.loss, 'info': r.info}
                j.error_logs = r.error_logs

                if r.budget not in results_for_budget:
                    results_for_budget[r.budget] = list()
                results_for_budget[r.budget].append(j)

                if r.loss is not None and r.budget not in build_model_jobs:
                    build_model_jobs[r.budget] = j
                    continue
                cg.new_result(j, update_model=False)
        for j in build_model_jobs.values():
            cg.new_result(j, update_model=True)

        good_kdes = [m["good"] for b, m in sorted(cg.kde_models.items())]
        bad_kdes = [m["bad"] for b, m in sorted(cg.kde_models.items())]
        budgets = sorted(cg.kde_models.keys())
        return good_kdes, bad_kdes, budgets
Exemple #4
0
    def __init__(self, Result, config_generator):

        self.logger = logging.getLogger(name="warm_start_logger")

        self.is_finished = False
        self.stage = 0

        id2conf = Result.get_id2config_mapping()
        delta_t = -max(
            map(lambda r: r.time_stamps['finished'], Result.get_all_runs()))
        super().__init__(-1, [len(id2conf)], [None], None)

        for i, id in enumerate(id2conf):
            new_id = self.add_configuration(
                config=id2conf[id]['config'],
                config_info=id2conf[id]['config_info'])

            for count, r in enumerate(Result.get_runs_by_id(id)):

                j = Job(new_id, config=id2conf[id]['config'], budget=r.budget)

                j.result = {'loss': r.loss, 'info': r.info}
                j.error_logs = r.error_logs

                for k, v in r.time_stamps.items():
                    j.timestamps[k] = v + delta_t

                self.register_result(j, skip_sanity_checks=True)
                self.logger.debug("Registered result %s -> %s", count, r)

                # set_trace(host="127.0.0.1", port=34500)
                try:
                    config_generator.new_result(
                        j, update_model=(i == len(id2conf) - 1))
                except Exception as ex:
                    self.logger.debug("Caught {}".format(
                        ex.__class__.__name__))
                    # self.logger.debug("Entering remote debugger...")
                    # set_trace(host="127.0.0.1", port=34500)
                    raise ex from None

        # mark as finished, as no more runs should be executed from these runs
        self.logger.debug("Finished loading warm-start Result!")
        self.is_finished = True
Exemple #5
0
    def test_imputation_conditional_spaces(self):

        bohb = BOHB(self.configspace, random_fraction=0)

        raw_array = []

        for i in range(128):

            config = self.configspace.sample_configuration()
            raw_array.append(config.get_array())
            imputed_array = bohb.impute_conditional_data(np.array(raw_array))
            self.assertFalse(np.any(np.isnan(imputed_array)))
            job = Job(i, budget=1, config=config)
            job.result = {'loss': np.random.rand(), 'info': {}}
            bohb.new_result(job)

        for j in range(64):
            conf, info = bohb.get_config(1)
            self.assertTrue(info['model_based_pick'])
Exemple #6
0
def run_with_time(self, runtime=1, n_iterations=float("inf"), min_n_workers=1, iteration_kwargs = {},):
    """
        custom run method of Master in hpbandster submodule

        Parameters:
        -----------
        runtime: int
            time for this run in seconds
        n_iterations: int
            maximum number of iterations
        min_n_workers: int
            minimum number of workers before starting the run
    """

    self.wait_for_workers(min_n_workers)

    iteration_kwargs.update({'result_logger': self.result_logger})

    if self.time_ref is None:
        self.time_ref = time.time()
        self.config['time_ref'] = self.time_ref
    
        self.logger.info('HBMASTER: starting run at %s'%(str(self.time_ref)))

    self.thread_cond.acquire()

    start_time = time.time()

    while True:

        self._queue_wait()

        # Check if timelimit is reached
        if (runtime < time.time() - start_time):
            self.logger.info('HBMASTER: Timelimit reached: wait for remaining %i jobs'%self.num_running_jobs)
            break
        
        next_run = None
        # find a new run to schedule
        for i in self.active_iterations():
            next_run = self.iterations[i].get_next_run()
            if not next_run is None: break

        if next_run is not None:
            self.logger.debug('HBMASTER: schedule new run for iteration %i'%i)
            self._submit_job(*next_run)
            continue
        elif n_iterations > 0:
            next_HPB_iter = len(self.iterations) + (self.iterations[0].HPB_iter if len(self.iterations) > 0 else 0)
            self.iterations.append(self.get_next_iteration(next_HPB_iter, iteration_kwargs))
            n_iterations -= 1
            continue

        # at this point there is no imediate run that can be scheduled,
        # so wait for some job to finish if there are active iterations
        if self.active_iterations():
            self.thread_cond.wait()
        else:
            break

    # clean up / cancel remaining iteration runs
    next_run = True
    n_canceled = 0
    while next_run is not None:
        next_run = None
        for i in self.active_iterations():
            next_run = self.iterations[i].get_next_run()
            if not next_run is None: 
                config_id, config, budget = next_run
                job = Job(config_id, config=config, budget=budget, working_directory=self.working_directory)
                self.iterations[job.id[0]].register_result(job) # register dummy job - will be interpreted as canceled job
                n_canceled += 1
                break

    self.logger.debug('HBMASTER: Canceled %i remaining runs'%n_canceled)

    # wait for remaining jobs
    while self.num_running_jobs > 0:
        self.thread_cond.wait(60)
        self.logger.debug('HBMASTER: Job finished: wait for remaining %i jobs'%self.num_running_jobs)

    self.thread_cond.release()
    
    for i in self.warmstart_iteration:
        i.fix_timestamps(self.time_ref)
        
    ws_data = [i.data for i in self.warmstart_iteration]
    
    return Result([copy.deepcopy(i.data) for i in self.iterations] + ws_data, self.config)