def assess_trial(self, trial_job_id, trial_history): curr_step = len(trial_history) if curr_step < self._start_step: return AssessResult.Good scalar_trial_history = extract_scalar_history(trial_history) self._update_data(trial_job_id, scalar_trial_history) if self._high_better: best_history = max(scalar_trial_history) else: best_history = min(scalar_trial_history) avg_array = [] for id_ in self._completed_avg_history: if len(self._completed_avg_history[id_]) >= curr_step: avg_array.append(self._completed_avg_history[id_][curr_step - 1]) if avg_array: avg_array.sort() if self._high_better: median = avg_array[(len(avg_array) - 1) // 2] return AssessResult.Bad if best_history < median else AssessResult.Good else: median = avg_array[len(avg_array) // 2] return AssessResult.Bad if best_history > median else AssessResult.Good else: return AssessResult.Good
def assess_trial(self, trial_job_id, trial_history): """assess_trial Parameters ---------- trial_job_id : int trial job id trial_history : list The history performance matrix of each trial Returns ------- bool AssessResult.Good or AssessResult.Bad Raises ------ Exception unrecognize exception in medianstop_assessor """ curr_step = len(trial_history) if curr_step < self._start_step: return AssessResult.Good scalar_trial_history = extract_scalar_history(trial_history) self._update_data(trial_job_id, scalar_trial_history) if self._high_better: best_history = max(scalar_trial_history) else: best_history = min(scalar_trial_history) avg_array = [] for id_ in self._completed_avg_history: if len(self._completed_avg_history[id_]) >= curr_step: avg_array.append(self._completed_avg_history[id_][curr_step - 1]) if avg_array: avg_array.sort() if self._high_better: median = avg_array[(len(avg_array) - 1) // 2] return AssessResult.Bad if best_history < median else AssessResult.Good else: median = avg_array[len(avg_array) // 2] return AssessResult.Bad if best_history > median else AssessResult.Good else: return AssessResult.Good
def assess_trial(self, trial_job_id, trial_history): scalar_trial_history = extract_scalar_history(trial_history) self.trial_history = scalar_trial_history if not self.set_best_performance: return AssessResult.Good curr_step = len(scalar_trial_history) if curr_step < self.start_step: return AssessResult.Good if trial_job_id in self.last_judgment_num.keys() and curr_step - self.last_judgment_num[trial_job_id] < self.gap: return AssessResult.Good self.last_judgment_num[trial_job_id] = curr_step try: start_time = datetime.datetime.now() # Predict the final result curvemodel = CurveModel(self.target_pos) predict_y = curvemodel.predict(scalar_trial_history) log_message = "Prediction done. Trial job id = {}, Predict value = {}".format(trial_job_id, predict_y) if predict_y is None: logger.info('%s, wait for more information to predict precisely', log_message) return AssessResult.Good else: logger.info(log_message) standard_performance = self.completed_best_performance * self.threshold end_time = datetime.datetime.now() if (end_time - start_time).seconds > 60: logger.warning( 'Curve Fitting Assessor Runtime Exceeds 60s, Trial Id = %s Trial History = %s', trial_job_id, self.trial_history ) if predict_y > standard_performance: return AssessResult.Good return AssessResult.Bad except Exception as exception: logger.exception('unrecognize exception in curvefitting_assessor %s', exception)
def assess_trial(self, trial_job_id, trial_history): """assess whether a trial should be early stop by curve fitting algorithm Parameters ---------- trial_job_id : int trial job id trial_history : list The history performance matrix of each trial Returns ------- bool AssessResult.Good or AssessResult.Bad Raises ------ Exception unrecognize exception in curvefitting_assessor """ scalar_trial_history = extract_scalar_history(trial_history) self.trial_history = scalar_trial_history if not self.set_best_performance: return AssessResult.Good curr_step = len(scalar_trial_history) if curr_step < self.start_step: return AssessResult.Good if trial_job_id in self.last_judgment_num.keys( ) and curr_step - self.last_judgment_num[trial_job_id] < self.gap: return AssessResult.Good self.last_judgment_num[trial_job_id] = curr_step try: start_time = datetime.datetime.now() # Predict the final result curvemodel = CurveModel(self.target_pos) predict_y = curvemodel.predict(scalar_trial_history) logger.info( 'Prediction done. Trial job id = %s. Predict value = %s', trial_job_id, predict_y) if predict_y is None: logger.info('wait for more information to predict precisely') return AssessResult.Good standard_performance = self.completed_best_performance * self.threshold end_time = datetime.datetime.now() if (end_time - start_time).seconds > 60: logger.warning( 'Curve Fitting Assessor Runtime Exceeds 60s, Trial Id = %s Trial History = %s', trial_job_id, self.trial_history) if self.higher_better: if predict_y > standard_performance: return AssessResult.Good return AssessResult.Bad else: if predict_y < standard_performance: return AssessResult.Good return AssessResult.Bad except Exception as exception: logger.exception( 'unrecognize exception in curvefitting_assessor %s', exception)