Exemple #1
0
 def __init__(self,
              time_attr="time_total_s",
              reward_attr=None,
              metric="episode_reward_mean",
              mode="max",
              grace_period=60.0,
              min_samples_required=3,
              min_time_slice=0,
              hard_stop=True):
     assert mode in ["min", "max"], "`mode` must be 'min' or 'max'!"
     if reward_attr is not None:
         mode = "max"
         metric = reward_attr
         logger.warning(
             "`reward_attr` is deprecated and will be removed in a future "
             "version of Tune. "
             "Setting `metric={}` and `mode=max`.".format(reward_attr))
     FIFOScheduler.__init__(self)
     self._stopped_trials = set()
     self._grace_period = grace_period
     self._min_samples_required = min_samples_required
     self._min_time_slice = min_time_slice
     self._metric = metric
     assert mode in {"min", "max"}, "`mode` must be 'min' or 'max'."
     self._worst = float("-inf") if mode == "max" else float("inf")
     self._compare_op = max if mode == "max" else min
     self._time_attr = time_attr
     self._hard_stop = hard_stop
     self._trial_state = {}
     self._last_pause = collections.defaultdict(lambda: float("-inf"))
     self._results = collections.defaultdict(list)
    def __init__(self,
                 time_attr='training_iteration',
                 reward_attr='episode_reward_mean',
                 max_t=100,
                 grace_period=10,
                 reduction_factor=3,
                 brackets=3):
        assert max_t > 0, "Max (time_attr) not valid!"
        assert max_t >= grace_period, "grace_period must be <= max_t!"
        assert grace_period > 0, "grace_period must be positive!"
        assert reduction_factor > 1, "Reduction Factor not valid!"
        assert brackets > 0, "brackets must be positive!"
        FIFOScheduler.__init__(self)
        self._reduction_factor = reduction_factor
        self._max_t = max_t

        self._trial_info = {}  # Stores Trial -> Bracket

        # Tracks state for new trial add
        self._brackets = [
            _Bracket(grace_period, max_t, reduction_factor, s)
            for s in range(brackets)
        ]
        self._counter = 0  # for
        self._num_stopped = 0
        self._reward_attr = reward_attr
        self._time_attr = time_attr
    def __init__(self,
                 time_attr="time_total_s",
                 reward_attr=None,
                 metric="episode_reward_mean",
                 mode="max",
                 grace_period=60.0,
                 min_samples_required=3,
                 hard_stop=True,
                 verbose=True):
        assert mode in ["min", "max"], "`mode` must be 'min' or 'max'!"

        if reward_attr is not None:
            mode = "max"
            metric = reward_attr
            logger.warning(
                "`reward_attr` is deprecated and will be removed in a future "
                "version of Tune. "
                "Setting `metric={}` and `mode=max`.".format(reward_attr))

        FIFOScheduler.__init__(self)
        self._stopped_trials = set()
        self._completed_trials = set()
        self._results = collections.defaultdict(list)
        self._grace_period = grace_period
        self._min_samples_required = min_samples_required
        self._metric = metric
        if mode == "max":
            self._metric_op = 1.
        elif mode == "min":
            self._metric_op = -1.
        self._time_attr = time_attr
        self._hard_stop = hard_stop
        self._verbose = verbose
Exemple #4
0
    def __init__(
        self,
        time_attr: str = "time_total_s",
        metric: Optional[str] = None,
        mode: Optional[str] = None,
        grace_period: float = 60.0,
        min_samples_required: int = 3,
        min_time_slice: int = 0,
        hard_stop: bool = True,
    ):
        FIFOScheduler.__init__(self)
        self._stopped_trials = set()
        self._grace_period = grace_period
        self._min_samples_required = min_samples_required
        self._min_time_slice = min_time_slice
        self._metric = metric
        self._worst = None
        self._compare_op = None

        self._mode = mode
        if mode:
            assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
            self._worst = float("-inf") if self._mode == "max" else float("inf")
            self._compare_op = max if self._mode == "max" else min

        self._time_attr = time_attr
        self._hard_stop = hard_stop
        self._trial_state = {}
        self._last_pause = collections.defaultdict(lambda: float("-inf"))
        self._results = collections.defaultdict(list)
Exemple #5
0
    def __init__(self,
                 time_attr='training_iteration',
                 reward_attr='episode_reward_mean',
                 max_t=100,
                 grace_period=10,
                 reduction_factor=3,
                 brackets=3):
        assert max_t > 0, "Max (time_attr) not valid!"
        assert max_t >= grace_period, "grace_period must be <= max_t!"
        assert grace_period > 0, "grace_period must be positive!"
        assert reduction_factor > 1, "Reduction Factor not valid!"
        assert brackets > 0, "brackets must be positive!"
        FIFOScheduler.__init__(self)
        self._reduction_factor = reduction_factor
        self._max_t = max_t

        self._trial_info = {}  # Stores Trial -> Bracket

        # Tracks state for new trial add
        self._brackets = [
            _Bracket(grace_period, max_t, reduction_factor, s)
            for s in range(brackets)
        ]
        self._counter = 0  # for
        self._num_stopped = 0
        self._reward_attr = reward_attr
        self._time_attr = time_attr
Exemple #6
0
    def __init__(self, metric="acc", mode='max', patience=1, hard_stop=True):

        FIFOScheduler.__init__(self)
        self._metric = metric
        assert mode in {"min", "max"}, "`mode` must be 'min' or 'max'."
        self._worst = float("-inf") if mode == "max" else float("inf")
        self._compare_op = max if mode == "max" else min
        self._hard_stop = hard_stop
        self._patience = patience
        self._results = defaultdict(list)
        self._beaten = defaultdict(int)
 def __init__(
     self,
     base_scheduler: Optional[TrialScheduler] = None,
     resources_allocation_function: Optional[
         Callable[
             [
                 "trial_runner.TrialRunner",
                 Trial,
                 Dict[str, Any],
                 "ResourceChangingScheduler",
             ],
             Union[None, PlacementGroupFactory, Resources],
         ]
     ] = _DistributeResourcesDefault,
 ) -> None:
     super().__init__()
     if resources_allocation_function is None:
         warnings.warn(
             "`resources_allocation_function` is None. No resource "
             "requirements will be changed at any time. Pass a "
             "correctly defined function to enable functionality."
         )
     self._resources_allocation_function = resources_allocation_function
     self._base_scheduler = base_scheduler or FIFOScheduler()
     self._base_trial_resources: Optional[
         Union[Resources, PlacementGroupFactory]
     ] = None
     self._trials_to_reallocate: Dict[
         Trial, Union[None, dict, PlacementGroupFactory]
     ] = {}
     self._reallocated_trial_ids: Set[str] = set()
     self._metric = None
     self._mode = None
Exemple #8
0
    def __init__(
        self,
        time_attr: str = "training_iteration",
        metric: Optional[str] = None,
        mode: Optional[str] = None,
        max_t: int = 100,
        grace_period: int = 1,
        reduction_factor: float = 4,
        brackets: int = 1,
        stop_last_trials: bool = True,
    ):
        assert max_t > 0, "Max (time_attr) not valid!"
        assert max_t >= grace_period, "grace_period must be <= max_t!"
        assert grace_period > 0, "grace_period must be positive!"
        assert reduction_factor > 1, "Reduction Factor not valid!"
        assert brackets > 0, "brackets must be positive!"
        if mode:
            assert mode in ["min", "max"], "`mode` must be 'min' or 'max'!"

        FIFOScheduler.__init__(self)
        self._reduction_factor = reduction_factor
        self._max_t = max_t

        self._trial_info = {}  # Stores Trial -> Bracket

        # Tracks state for new trial add
        self._brackets = [
            _Bracket(
                grace_period,
                max_t,
                reduction_factor,
                s,
                stop_last_trials=stop_last_trials,
            )
            for s in range(brackets)
        ]
        self._counter = 0  # for
        self._num_stopped = 0
        self._metric = metric
        self._mode = mode
        self._metric_op = None
        if self._mode == "max":
            self._metric_op = 1.0
        elif self._mode == "min":
            self._metric_op = -1.0
        self._time_attr = time_attr
        self._stop_last_trials = stop_last_trials
 def __init__(self,
              time_attr="time_total_s",
              reward_attr="episode_reward_mean",
              grace_period=60.0,
              min_samples_required=3,
              hard_stop=True,
              verbose=True):
     FIFOScheduler.__init__(self)
     self._stopped_trials = set()
     self._completed_trials = set()
     self._results = collections.defaultdict(list)
     self._grace_period = grace_period
     self._min_samples_required = min_samples_required
     self._reward_attr = reward_attr
     self._time_attr = time_attr
     self._hard_stop = hard_stop
     self._verbose = verbose
Exemple #10
0
    def __init__(self,
                 time_attr="training_iteration",
                 reward_attr=None,
                 metric=None,
                 mode=None,
                 max_t=100,
                 grace_period=1,
                 reduction_factor=4,
                 brackets=1):
        assert max_t > 0, "Max (time_attr) not valid!"
        assert max_t >= grace_period, "grace_period must be <= max_t!"
        assert grace_period > 0, "grace_period must be positive!"
        assert reduction_factor > 1, "Reduction Factor not valid!"
        assert brackets > 0, "brackets must be positive!"
        if mode:
            assert mode in ["min", "max"], "`mode` must be 'min' or 'max'!"

        if reward_attr is not None:
            mode = "max"
            metric = reward_attr
            logger.warning(
                "`reward_attr` is deprecated and will be removed in a future "
                "version of Tune. "
                "Setting `metric={}` and `mode=max`.".format(reward_attr))

        FIFOScheduler.__init__(self)
        self._reduction_factor = reduction_factor
        self._max_t = max_t

        self._trial_info = {}  # Stores Trial -> Bracket

        # Tracks state for new trial add
        self._brackets = [
            _Bracket(grace_period, max_t, reduction_factor, s)
            for s in range(brackets)
        ]
        self._counter = 0  # for
        self._num_stopped = 0
        self._metric = metric
        self._mode = mode
        self._metric_op = None
        if self._mode == "max":
            self._metric_op = 1.
        elif self._mode == "min":
            self._metric_op = -1.
        self._time_attr = time_attr
Exemple #11
0
    def __init__(self,
                 time_attr: str = "training_iteration",
                 reward_attr: Optional[str] = None,
                 metric: Optional[str] = None,
                 mode: Optional[str] = None,
                 max_t: int = 81,
                 reduction_factor: float = 3,
                 stop_last_trials: bool = True):
        assert max_t > 0, "Max (time_attr) not valid!"
        if mode:
            assert mode in ["min", "max"], "`mode` must be 'min' or 'max'!"

        if reward_attr is not None:
            mode = "max"
            metric = reward_attr
            logger.warning(
                "`reward_attr` is deprecated and will be removed in a future "
                "version of Tune. "
                "Setting `metric={}` and `mode=max`.".format(reward_attr))

        FIFOScheduler.__init__(self)
        self._eta = reduction_factor
        self._s_max_1 = int(
            np.round(np.log(max_t) / np.log(reduction_factor))) + 1
        self._max_t_attr = max_t
        # bracket max trials
        self._get_n0 = lambda s: int(
            np.ceil(self._s_max_1 / (s + 1) * self._eta**s))
        # bracket initial iterations
        self._get_r0 = lambda s: int((max_t * self._eta**(-s)))
        self._hyperbands = [[]]  # list of hyperband iterations
        self._trial_info = {}  # Stores Trial -> Bracket, Band Iteration

        # Tracks state for new trial add
        self._state = {"bracket": None, "band_idx": 0}
        self._num_stopped = 0
        self._metric = metric
        self._mode = mode
        self._metric_op = None

        if self._mode == "max":
            self._metric_op = 1.
        elif self._mode == "min":
            self._metric_op = -1.
        self._time_attr = time_attr
        self._stop_last_trials = stop_last_trials
Exemple #12
0
    def __init__(self,
                 time_attr='training_iteration',
                 reward_attr='episode_reward_mean',
                 max_t=81):
        assert max_t > 0, "Max (time_attr) not valid!"
        FIFOScheduler.__init__(self)
        self._eta = 3
        self._s_max_1 = 5
        self._max_t_attr = max_t
        # bracket max trials
        self._get_n0 = lambda s: int(
            np.ceil(self._s_max_1 / (s + 1) * self._eta**s))
        # bracket initial iterations
        self._get_r0 = lambda s: int((max_t * self._eta**(-s)))
        self._hyperbands = [[]]  # list of hyperband iterations
        self._trial_info = {}  # Stores Trial -> Bracket, Band Iteration

        # Tracks state for new trial add
        self._state = {"bracket": None, "band_idx": 0}
        self._num_stopped = 0
        self._reward_attr = reward_attr
        self._time_attr = time_attr
Exemple #13
0
    def __init__(self,
                 time_attr='training_iteration',
                 reward_attr='episode_reward_mean',
                 max_t=81):
        assert max_t > 0, "Max (time_attr) not valid!"
        FIFOScheduler.__init__(self)
        self._eta = 3
        self._s_max_1 = 5
        self._max_t_attr = max_t
        # bracket max trials
        self._get_n0 = lambda s: int(
            np.ceil(self._s_max_1/(s+1) * self._eta**s))
        # bracket initial iterations
        self._get_r0 = lambda s: int((max_t * self._eta**(-s)))
        self._hyperbands = [[]]  # list of hyperband iterations
        self._trial_info = {}  # Stores Trial -> Bracket, Band Iteration

        # Tracks state for new trial add
        self._state = {"bracket": None, "band_idx": 0}
        self._num_stopped = 0
        self._reward_attr = reward_attr
        self._time_attr = time_attr
Exemple #14
0
    def __init__(
        self,
        time_attr: str = "training_iteration",
        metric: Optional[str] = None,
        mode: Optional[str] = None,
        max_t: int = 81,
        reduction_factor: float = 3,
        stop_last_trials: bool = True,
    ):
        assert max_t > 0, "Max (time_attr) not valid!"
        if mode:
            assert mode in ["min", "max"], "`mode` must be 'min' or 'max'!"

        FIFOScheduler.__init__(self)
        self._eta = reduction_factor
        self._s_max_1 = int(np.round(
            np.log(max_t) / np.log(reduction_factor))) + 1
        self._max_t_attr = max_t
        # bracket max trials
        self._get_n0 = lambda s: int(
            np.ceil(self._s_max_1 / (s + 1) * self._eta**s))
        # bracket initial iterations
        self._get_r0 = lambda s: int((max_t * self._eta**(-s)))
        self._hyperbands = [[]]  # list of hyperband iterations
        self._trial_info = {}  # Stores Trial -> Bracket, Band Iteration

        # Tracks state for new trial add
        self._state = {"bracket": None, "band_idx": 0}
        self._num_stopped = 0
        self._metric = metric
        self._mode = mode
        self._metric_op = None

        if self._mode == "max":
            self._metric_op = 1.0
        elif self._mode == "min":
            self._metric_op = -1.0
        self._time_attr = time_attr
        self._stop_last_trials = stop_last_trials
Exemple #15
0
    def __init__(self,
                 time_attr="training_iteration",
                 reward_attr=None,
                 metric="episode_reward_mean",
                 mode="max",
                 max_t=81):
        assert max_t > 0, "Max (time_attr) not valid!"
        assert mode in ["min", "max"], "`mode` must be 'min' or 'max'!"

        if reward_attr is not None:
            mode = "max"
            metric = reward_attr
            logger.warning(
                "`reward_attr` is deprecated and will be removed in a future "
                "version of Tune. "
                "Setting `metric={}` and `mode=max`.".format(reward_attr))

        FIFOScheduler.__init__(self)
        self._eta = 3
        self._s_max_1 = 5
        self._max_t_attr = max_t
        # bracket max trials
        self._get_n0 = lambda s: int(
            np.ceil(self._s_max_1 / (s + 1) * self._eta**s))
        # bracket initial iterations
        self._get_r0 = lambda s: int((max_t * self._eta**(-s)))
        self._hyperbands = [[]]  # list of hyperband iterations
        self._trial_info = {}  # Stores Trial -> Bracket, Band Iteration

        # Tracks state for new trial add
        self._state = {"bracket": None, "band_idx": 0}
        self._num_stopped = 0
        self._metric = metric
        if mode == "max":
            self._metric_op = 1.
        elif mode == "min":
            self._metric_op = -1.
        self._time_attr = time_attr
Exemple #16
0
    def __init__(self,
                 time_attr="time_total_s",
                 reward_attr="episode_reward_mean",
                 perturbation_interval=60.0,
                 hyperparam_mutations={},
                 resample_probability=0.25,
                 custom_explore_fn=None):
        if not hyperparam_mutations and not custom_explore_fn:
            raise TuneError(
                "You must specify at least one of `hyperparam_mutations` or "
                "`custom_explore_fn` to use PBT.")
        FIFOScheduler.__init__(self)
        self._reward_attr = reward_attr
        self._time_attr = time_attr
        self._perturbation_interval = perturbation_interval
        self._hyperparam_mutations = hyperparam_mutations
        self._resample_probability = resample_probability
        self._trial_state = {}
        self._custom_explore_fn = custom_explore_fn

        # Metrics
        self._num_checkpoints = 0
        self._num_perturbations = 0
Exemple #17
0
    def __init__(
        self,
        res_name="training_iteration",
        max_res=9,
        metric="mean_accuracy",
        mode="max",
        reduction_factor=3,
    ):
        assert max_res > 0, "Max (time_attr) not valid!"
        assert mode in ["min", "max"], "`mode` must be 'min' or 'max'!"

        # super().__init__(self)
        FIFOScheduler.__init__(self)
        self._eta = reduction_factor
        # number of interations
        self._s_max_1 = int(
            np.round(np.log(max_res) / np.log(reduction_factor))) + 1
        self._max_res = max_res
        # bracket max trials
        self._get_n0 = lambda s: int(
            np.ceil(self._s_max_1 / (s + 1) * self._eta**s))
        # bracket initial iterations
        self._get_r0 = lambda s: int((max_res * self._eta**(-s)))
        # list of hyperband iterations
        self._iterations: List[List[Optional[Bracket]]] = [[]]
        # Stores Trial -> Bracket, Band Iteration
        self._trial_info: Dict[Trial, Tuple[Bracket, int]] = {}

        self._num_stopped = 0
        self._metric = metric
        if mode == "max":
            self._metric_op = 1.0
        elif mode == "min":
            self._metric_op = -1.0
        self._res_name = res_name

        # scheduler estimator
        self.base_rung_runtime_sum = [[0 for x in range(self._s_max_1)]]
        # map trial to init res
        self._res_estimator = {}
        self.trial_runtime = [[]]
        # scheduler state machine
        self.stage = 0
        # get our own GPU resources, should be created first
        create_custom_gpu_res()
        time.sleep(2)
        self._gpu_resources: Dict[str, float] = {
            k: v
            for k, v in ray.cluster_resources().items()
            if k.startswith("fluid_GPU")
        }
        self.num_gpu_resources = len(self._gpu_resources)
        self.gpu_overhead = [0 for x in range(self.num_gpu_resources)]
        self.customized_gpu_wl: List[Dict] = [
            dict() for x in range(self.num_gpu_resources)
        ]
        self.trial_overhead = defaultdict(int)
        self.update_res = {}  # recored updated res for scaling up
        self.cur_iter_idx = 0
        self.cur_braket_idx = 0
        self.start_iter = [[]]
        print("[Grab {num} resources]:{gpu} ".format(
            num=self.num_gpu_resources, gpu=self._gpu_resources))

        self._sm: Union[None, Stage0, Stage1, Stage2] = None  # Stage0(self)
        logger.info("[Grab resources] {res}".format(res=self._gpu_resources))
Exemple #18
0
 def trial_scheduler(self):
     return FIFOScheduler()