def _add_window(self, start, end, raise_on_min_length=False):
        """Add a window - checking whether it violates rules"""

        length = end - start

        if length < self.min_length:
            # Only raise when recursing
            if raise_on_min_length:
                raise Exception("Min length constraint violated")

            if start == 0:
                # Expected
                logger.debug(
                    "Skipping circular wraparound at beginning of loop")
            else:
                # Bad user. Bad.
                logger.info("Skipping window less than min length")
            return
        if length > self.day_length:
            # Split in two - a floor and a ceiling
            # Hypothetically recurses
            logger.info("Splitting large window into subproblems")
            center = start + (end - start) / 2
            # It's possible the windows will violate min length constrainnts,
            # so we wrap in a try block
            try:
                self._add_window(start, center, raise_on_min_length=True)
                self._add_window(center, end, raise_on_min_length=True)
            except:
                self._windows.append((start, end))
            return

        self._windows.append((start, end))
    def _add_window(self, start, end, raise_on_min_length=False):
        """Add a window - checking whether it violates rules"""

        length = end - start

        if length < self.min_length:
            # Only raise when recursing
            if raise_on_min_length:
                raise Exception("Min length constraint violated")

            if start == 0:
                # Expected
                logger.debug(
                    "Skipping circular wraparound at beginning of loop")
            else:
                # Bad user. Bad.
                logger.info("Skipping window less than min length")
            return
        if length > self.day_length:
            # Split in two - a floor and a ceiling
            # Hypothetically recurses
            logger.info("Splitting large window into subproblems")
            center = start + (end - start) / 2
            # It's possible the windows will violate min length constrainnts,
            # so we wrap in a try block
            try:
                self._add_window(start, center, raise_on_min_length=True)
                self._add_window(center, end, raise_on_min_length=True)
            except:
                self._windows.append((start, end))
            return

        self._windows.append((start, end))
    def efficiency(self):
        """Return the overage as a float. 0 is perfect."""
        PERFECT_OPTIMALITY = 0.0

        # Check for divide by 0 error.
        if sum(self.flat_demand) == 0.0:
            return PERFECT_OPTIMALITY

        efficiency = (1.0 * sum(shift["length"] for shift in self._shifts) /
                      sum(self.flat_demand)) - 1

        logger.info("Efficiency: Overage is %s percent", efficiency * 100.0)
        return efficiency
    def efficiency(self):
        """Return the overage as a float. 0 is perfect."""
        PERFECT_OPTIMALITY = 0.0

        # Check for divide by 0 error.
        if sum(self.flat_demand) == 0.0:
            return PERFECT_OPTIMALITY

        efficiency = (1.0 * sum(shift["length"] for shift in self._shifts) /
                      sum(self.flat_demand)) - 1

        logger.info("Efficiency: Overage is %s percent", efficiency * 100.0)
        return efficiency
    def _process_task(self, task):
        # 1. Fetch schedule
        self.org = self.client.get_organization(
            task.data.get("organization_id"))
        self.loc = self.org.get_location(task.data.get("location_id"))
        self.role = self.loc.get_role(task.data.get("role_id"))
        self.sched = self.role.get_schedule(task.data.get("schedule_id"))

        self._compute_demand()
        self._subtract_existing_shifts_from_demand()

        # Run the  calculation
        s = Splitter(self.demand,
                     self.sched.data.get("min_shift_length_hour"),
                     self.sched.data.get("max_shift_length_hour"))
        s.calculate()
        s.efficiency()

        # Naive becuase not yet datetimes
        naive_shifts = s.get_shifts()
        logger.info("Starting upload of %s shifts", len(naive_shifts))

        local_start_time = self._get_local_start_time()

        for shift in naive_shifts:
            # We have to think of daylight savings time here, so we need to
            # guarantee that we don't have any errors. We do this by overshooting
            # the timedelta by an extra two hours, then rounding back to midnight.

            logger.debug("Processing shift %s", shift)

            start_day = normalize_to_midnight(
                deepcopy(local_start_time) + timedelta(days=shift["day"]))

            # Beware of time changes - duplicate times are possible
            try:
                start = start_day.replace(hour=shift["start"])
            except pytz.AmbiguousTimeError:
                # Randomly pick one. Minor tech debt.
                start = start_day.replace(hour=shift["start"], is_dst=False)

            stop = start + timedelta(hours=shift["length"])

            # Convert to the strings we are passing up to the cLoUd
            utc_start_str = start.astimezone(self.default_tz).isoformat()
            utc_stop_str = stop.astimezone(self.default_tz).isoformat()

            logger.info("Creating shift with start %s stop %s", start, stop)
            self.role.create_shift(start=utc_start_str, stop=utc_stop_str)
    def _process_task(self, task):
        # 1. Fetch schedule
        self.org = self.client.get_organization(
            task.data.get("organization_id"))
        self.loc = self.org.get_location(task.data.get("location_id"))
        self.role = self.loc.get_role(task.data.get("role_id"))
        self.sched = self.role.get_schedule(task.data.get("schedule_id"))

        self._compute_demand()
        self._subtract_existing_shifts_from_demand()

        # Run the  calculation
        s = Splitter(self.demand,
                     self.sched.data.get("min_shift_length_hour"),
                     self.sched.data.get("max_shift_length_hour"))
        s.calculate()
        s.efficiency()

        # Naive becuase not yet datetimes
        naive_shifts = s.get_shifts()
        logger.info("Starting upload of %s shifts", len(naive_shifts))

        local_start_time = self._get_local_start_time()

        for shift in naive_shifts:
            # We have to think of daylight savings time here, so we need to
            # guarantee that we don't have any errors. We do this by overshooting
            # the timedelta by an extra two hours, then rounding back to midnight.

            logger.debug("Processing shift %s", shift)

            start_day = normalize_to_midnight(
                deepcopy(local_start_time) + timedelta(days=shift["day"]))

            # Beware of time changes - duplicate times are possible
            try:
                start = start_day.replace(hour=shift["start"])
            except pytz.AmbiguousTimeError:
                # Randomly pick one. Minor tech debt.
                start = start_day.replace(hour=shift["start"], is_dst=False)

            stop = start + timedelta(hours=shift["length"])

            # Convert to the strings we are passing up to the cLoUd
            utc_start_str = start.astimezone(self.default_tz).isoformat()
            utc_stop_str = stop.astimezone(self.default_tz).isoformat()

            logger.info("Creating shift with start %s stop %s", start, stop)
            self.role.create_shift(start=utc_start_str, stop=utc_stop_str)
    def server(self):
        previous_request_failed = False  # Have some built-in retries

        while True:
            # Get task
            try:
                task = self.client.claim_chomp_task()
                logger.info("Task received: %s", task.data)
                previous_request_failed = False
            except NotFoundException:
                logger.debug("No task found. Sleeping.")
                previous_request_failed = False
                sleep(config.TASKING_FETCH_INTERVAL_SECONDS)
                continue
            except Exception as e:
                if not previous_request_failed:
                    # retry, but info log it
                    logger.info("Unable to fetch chomp task - retrying")
                    previous_request_failed = True
                else:
                    logger.error(
                        "Unable to fetch chomp task after previous failure: %s",
                        e)

                # Still sleep so we avoid thundering herd
                sleep(config.TASKING_FETCH_INTERVAL_SECONDS)
                continue

            try:
                self._process_task(task)
                task.delete()
                logger.info("Task completed %s", task.data)
            except Exception as e:
                logger.error("Failed schedule %s:  %s %s",
                             task.data.get("schedule_id"), e,
                             traceback.format_exc())

                logger.info("Requeuing schedule %s",
                            task.data.get("schedule_id"))
                # self.sched set in process_task
                self.sched.patch(state=self.REQUEUE_STATE)

                # Sometimes rebooting Chomp helps with errors. For example, if
                # a Gurobi connection is drained then it helps to reboot.
                if config.KILL_ON_ERROR:
                    sleep(config.KILL_DELAY)
                    logger.info("Rebooting to kill container")
                    os.system("shutdown -r now")
Exemple #8
0
    def server(self):
        previous_request_failed = False  # Have some built-in retries

        while True:
            # Get task
            try:
                task = self.client.claim_chomp_task()
                logger.info("Task received: %s", task.data)
                previous_request_failed = False
            except NotFoundException:
                logger.debug("No task found. Sleeping.")
                previous_request_failed = False
                sleep(config.TASKING_FETCH_INTERVAL_SECONDS)
                continue
            except Exception as e:
                if not previous_request_failed:
                    # retry, but info log it
                    logger.info("Unable to fetch chomp task - retrying")
                    previous_request_failed = True
                else:
                    logger.error(
                        "Unable to fetch chomp task after previous failure: %s",
                        e)

                # Still sleep so we avoid thundering herd
                sleep(config.TASKING_FETCH_INTERVAL_SECONDS)
                continue

            try:
                self._process_task(task)
                task.delete()
                logger.info("Task completed %s", task.data)
            except Exception as e:
                logger.error("Failed schedule %s:  %s %s",
                             task.data.get("schedule_id"), e,
                             traceback.format_exc())

                logger.info("Requeuing schedule %s",
                            task.data.get("schedule_id"))
                # self.sched set in process_task
                self.sched.patch(state=self.REQUEUE_STATE)

                # Sometimes rebooting Chomp helps with errors. For example, if
                # a Gurobi connection is drained then it helps to reboot.
                if config.KILL_ON_ERROR:
                    sleep(config.KILL_DELAY)
                    logger.info("Rebooting to kill container")
                    os.system("shutdown -r now")
    def _solve_windows(self):
        """Run windows through decompose to create shifts"""

        window_count = 0
        for (start, stop) in self._windows:
            window_count += 1
            logger.info("Starting window %s of %s (start %s stop %s) ",
                        window_count, len(self._windows), start, stop)

            # Need to wrap
            demand = self._get_window_demand(start, stop)
            d = Decompose(
                demand, self.min_length, self.max_length, window_offset=start)
            d.calculate()
            e = d.efficiency()
            logger.info("Window efficiency: Overage is %s percent",
                        (e * 100.0))
            self._shifts.extend(d.get_shifts())
    def _solve_windows(self):
        """Run windows through decompose to create shifts"""

        window_count = 0
        for (start, stop) in self._windows:
            window_count += 1
            logger.info("Starting window %s of %s (start %s stop %s) ",
                        window_count, len(self._windows), start, stop)

            # Need to wrap
            demand = self._get_window_demand(start, stop)
            d = Decompose(demand,
                          self.min_length,
                          self.max_length,
                          window_offset=start)
            d.calculate()
            e = d.efficiency()
            logger.info("Window efficiency: Overage is %s percent",
                        (e * 100.0))
            self._shifts.extend(d.get_shifts())
    def anneal(self):
        """Look for overages and try to fix them"""
        if not self.demand_is_met:
            raise Exception("Cannot anneal an unfeasible demand")

        if self.is_optimal:
            # noop
            return

        # Run on loop until no more improvements
        improvement_made = True
        time_saved = 0
        while improvement_made:
            improvement_made = False

            # Find times that are overscheduled
            for t in range(self.demand_length):
                if self.get_demand_minus_coverage(t) < 0:
                    # Then it's unoptimal - look for shifts that start or end there, and try to roll back
                    for i in range(len(self._shifts)):
                        start, length = self._shifts[i]
                        end = start + length
                        if start == t and length > self.min_length:
                            self._shifts[i] = (start + 1, length - 1)
                            self._coverage[t] -= 1
                            time_saved += 1
                            improvement_made = True

                        if end == t and length > self.max_length:
                            self._shifts[i] = (start, length - 1)
                            self._coverage[t] -= 1
                            time_saved += 1
                            improvement_made = True

        if time_saved > 0:
            logger.info("Annealing removed %s units", time_saved)

        if self.is_optimal:
            logger.info("Annealing reached optimality")
Exemple #12
0
    def anneal(self):
        """Look for overages and try to fix them"""
        if not self.demand_is_met:
            raise Exception("Cannot anneal an unfeasible demand")

        if self.is_optimal:
            # noop
            return

        # Run on loop until no more improvements
        improvement_made = True
        time_saved = 0
        while improvement_made:
            improvement_made = False

            # Find times that are overscheduled
            for t in range(self.demand_length):
                if self.get_demand_minus_coverage(t) < 0:
                    # Then it's unoptimal - look for shifts that start or end there, and try to roll back
                    for i in range(len(self._shifts)):
                        start, length = self._shifts[i]
                        end = start + length
                        if start == t and length > self.min_length:
                            self._shifts[i] = (start + 1, length - 1)
                            self._coverage[t] -= 1
                            time_saved += 1
                            improvement_made = True

                        if end == t and length > self.max_length:
                            self._shifts[i] = (start, length - 1)
                            self._coverage[t] -= 1
                            time_saved += 1
                            improvement_made = True

        if time_saved > 0:
            logger.info("Annealing removed %s units", time_saved)

        if self.is_optimal:
            logger.info("Annealing reached optimality")
    def calculate(self):
        if len(self._shifts) > 0:
            raise Exception("Shifts already calculated")

        # Try checking cache. Putting the check here means it even works for
        # subproblems!
        cached_shifts = cache.get(
            demand=self.demand,
            min_length=self.min_length,
            max_length=self.max_length)
        if cached_shifts:
            logger.info("Hit cache")
            self._shifts = cached_shifts
            return

        # Subproblem splitting
        demand_sum = sum(self.demand)
        if demand_sum > config.BIFURCATION_THRESHHOLD:
            # Subproblems. Split into round up and round down.
            logger.info("Initiating split (demand sum %s, threshhold %s)",
                        demand_sum, config.BIFURCATION_THRESHHOLD)
            # Show parent demand sum becuase it can recursively split
            demand_up = self._split_demand(round_up=True)
            demand_low = self._split_demand(round_up=False)

            d_up = Decompose(demand_up, self.min_length, self.max_length)
            d_low = Decompose(demand_low, self.min_length, self.max_length)

            logger.info(
                "Beginning upper round subproblem (parent demand sum: %s)",
                demand_sum)
            d_up.calculate()

            logger.info(
                "Beginning lower round subproblem (parent demand sum: %s)",
                demand_sum)
            d_low.calculate()

            self._shifts.extend(d_up.get_shifts())
            self._shifts.extend(d_low.get_shifts())
            self._set_cache()  # Set cache for the parent problem too!
            return

        self._calculate()
        self._set_cache()
    def calculate(self):
        if len(self._shifts) > 0:
            raise Exception("Shifts already calculated")

        # Try checking cache. Putting the check here means it even works for
        # subproblems!
        cached_shifts = cache.get(demand=self.demand,
                                  min_length=self.min_length,
                                  max_length=self.max_length)
        if cached_shifts:
            logger.info("Hit cache")
            self._shifts = cached_shifts
            return

        # Subproblem splitting
        demand_sum = sum(self.demand)
        if demand_sum > config.BIFURCATION_THRESHHOLD:
            # Subproblems. Split into round up and round down.
            logger.info("Initiating split (demand sum %s, threshhold %s)",
                        demand_sum, config.BIFURCATION_THRESHHOLD)
            # Show parent demand sum becuase it can recursively split
            demand_up = self._split_demand(round_up=True)
            demand_low = self._split_demand(round_up=False)

            d_up = Decompose(demand_up, self.min_length, self.max_length)
            d_low = Decompose(demand_low, self.min_length, self.max_length)

            logger.info(
                "Beginning upper round subproblem (parent demand sum: %s)",
                demand_sum)
            d_up.calculate()

            logger.info(
                "Beginning lower round subproblem (parent demand sum: %s)",
                demand_sum)
            d_low.calculate()

            self._shifts.extend(d_up.get_shifts())
            self._shifts.extend(d_low.get_shifts())
            self._set_cache()  # Set cache for the parent problem too!
            return

        self._calculate()
        self._set_cache()
    def _calculate(self):
        """Search that tree"""
        # Not only do we want optimality, but we want it with
        # longest shifts possible. That's why we do DFS on long shifts.

        starting_solution = self.use_heuristics_to_generate_some_solution()

        # Helper variables for branch and bound
        best_known_coverage = starting_solution.coverage_sum
        best_known_solution = starting_solution
        best_possible_solution = sum(self.demand)

        logger.debug("Starting with known coverage %s vs best possible %s",
                     best_known_coverage, best_possible_solution)

        # Branches to search
        # (We want shortest shifts retrieved first, so
        # we add shortest and pop() to git last in)
        # (a LIFO queue using pop is more efficient in python
        # than a FIFO queue using pop(0))

        stack = []

        logger.info("Demand: %s", self.demand)
        empty_collection = ShiftCollection(self.min_length,
                                           self.max_length,
                                           demand=self.demand)
        stack.append(empty_collection)

        start_time = datetime.utcnow()

        while len(stack) != 0:
            if start_time + timedelta(
                    seconds=config.CALCULATION_TIMEOUT) < datetime.utcnow():
                logger.info("Exited due to timeout (%s seconds)",
                            (datetime.utcnow() - start_time).total_seconds())
                break

            # Get a branch
            working_collection = stack.pop()

            if working_collection.is_optimal:
                # We have a complete solution
                logger.info("Found an optimal collection. Exiting.")
                self.set_shift_collection_as_optimal(working_collection)
                return

            if working_collection.demand_is_met:
                if working_collection.coverage_sum < best_known_coverage:
                    logger.info(
                        "Better solution found (previous coverage %s / new coverage %s / best_possible %s)",
                        best_known_coverage, working_collection.coverage_sum,
                        best_possible_solution)

                    # Set new best possible solution
                    best_known_solution = working_collection
                    best_known_coverage = working_collection.coverage_sum
                else:
                    logger.debug("Found less optimal solution - continuing")
                    # discard
                del working_collection

            else:

                # New branch to explore - else discard
                if working_collection.best_possible_coverage < best_known_coverage:
                    # Gotta add more shifts!
                    t = working_collection.get_first_time_demand_not_met()

                    # Get shift start time
                    start = t
                    for length in reverse_inclusive_range(
                            self.min_length, self.max_length):
                        # Make sure we aren't off edge
                        end_index = start + length

                        # Our edge smoothing means this will always work
                        if end_index <= len(self.demand):
                            shift = (start, length)
                            new_collection = deepcopy(working_collection)
                            new_collection.add_shift(shift)

                            if new_collection.demand_is_met:
                                new_collection.anneal()

                            if new_collection.best_possible_coverage < best_known_coverage:

                                # Only save it if it's an improvement
                                stack.append(new_collection)

        self.set_shift_collection_as_optimal(best_known_solution)
    def _subtract_existing_shifts_from_demand(self):
        logger.info("Starting demand: %s", self.demand)
        demand_copy = deepcopy(self.demand)
        search_start = (self._get_local_start_time() - timedelta(
            hours=config.MAX_SHIFT_LENGTH_HOURS)).astimezone(self.default_tz)
        # 1 week
        search_end = (self._get_local_start_time() + timedelta(
            days=7, hours=config.MAX_SHIFT_LENGTH_HOURS)
                      ).astimezone(self.default_tz)

        shifts = self.role.get_shifts(start=search_start, end=search_end)

        logger.info("Checking %s shifts for existing demand", len(shifts))

        # Search hour by hour throughout the weeks
        for day in range(len(self.demand)):
            start_day = normalize_to_midnight(self._get_local_start_time() +
                                              timedelta(days=day))
            for start in range(len(self.demand[0])):

                # Beware of time changes - duplicate times are possible
                try:
                    start_hour = deepcopy(start_day).replace(hour=start)
                except pytz.AmbiguousTimeError:
                    # Randomly pick one - cause phucket. Welcome to chomp.
                    start_hour = deepcopy(start_day).replace(
                        hour=start, is_dst=False)

                try:
                    stop_hour = start_hour + timedelta(hours=1)
                except pytz.AmbiguousTimeError:
                    stop_hour = start_hour + timedelta(hours=1, is_dst=False)

                # Find shift
                current_staffing_level = 0
                for shift in shifts:
                    shift_start = iso8601.parse_date(
                        shift.data.get("start")).replace(
                            tzinfo=self.default_tz)
                    shift_stop = iso8601.parse_date(
                        shift.data.get("stop")).replace(tzinfo=self.default_tz)

                    if ((shift_start <= start_hour and shift_stop > stop_hour)
                            or
                        (shift_start >= start_hour and shift_start < stop_hour)
                            or
                        (shift_stop > start_hour and shift_stop <= stop_hour)):

                        # increment staffing level during that bucket
                        current_staffing_level += 1

                logger.debug("Current staffing level at day %s time %s is %s",
                             day, start, current_staffing_level)

                demand_copy[day][start] -= current_staffing_level
                # demand cannot be less than zero
                if demand_copy[day][start] < 0:
                    demand_copy[day][start] = 0

        logger.info("Demand minus existing shifts: %s", demand_copy)
        self.demand = demand_copy
Exemple #17
0
    def _subtract_existing_shifts_from_demand(self):
        logger.info("Starting demand: %s", self.demand)
        demand_copy = deepcopy(self.demand)
        search_start = (
            self._get_local_start_time() -
            timedelta(hours=config.MAX_SHIFT_LENGTH_HOURS)).astimezone(
                self.default_tz)
        # 1 week
        search_end = (
            self._get_local_start_time() +
            timedelta(days=7, hours=config.MAX_SHIFT_LENGTH_HOURS)).astimezone(
                self.default_tz)

        shifts = self.role.get_shifts(start=search_start, end=search_end)

        logger.info("Checking %s shifts for existing demand", len(shifts))

        # Search hour by hour throughout the weeks
        for day in range(len(self.demand)):
            start_day = normalize_to_midnight(self._get_local_start_time() +
                                              timedelta(days=day))
            for start in range(len(self.demand[0])):

                # Beware of time changes - duplicate times are possible
                try:
                    start_hour = deepcopy(start_day).replace(hour=start)
                except pytz.AmbiguousTimeError:
                    # Randomly pick one - cause phucket. Welcome to chomp.
                    start_hour = deepcopy(start_day).replace(hour=start,
                                                             is_dst=False)

                try:
                    stop_hour = start_hour + timedelta(hours=1)
                except pytz.AmbiguousTimeError:
                    stop_hour = start_hour + timedelta(hours=1, is_dst=False)

                # Find shift
                current_staffing_level = 0
                for shift in shifts:
                    shift_start = iso8601.parse_date(
                        shift.data.get("start")).replace(
                            tzinfo=self.default_tz)
                    shift_stop = iso8601.parse_date(
                        shift.data.get("stop")).replace(tzinfo=self.default_tz)

                    if ((shift_start <= start_hour and shift_stop > stop_hour)
                            or
                        (shift_start >= start_hour and shift_start < stop_hour)
                            or
                        (shift_stop > start_hour and shift_stop <= stop_hour)):

                        # increment staffing level during that bucket
                        current_staffing_level += 1

                logger.debug("Current staffing level at day %s time %s is %s",
                             day, start, current_staffing_level)

                demand_copy[day][start] -= current_staffing_level
                # demand cannot be less than zero
                if demand_copy[day][start] < 0:
                    demand_copy[day][start] = 0

        logger.info("Demand minus existing shifts: %s", demand_copy)
        self.demand = demand_copy
    def _calculate(self):
        """Search that tree"""
        # Not only do we want optimality, but we want it with
        # longest shifts possible. That's why we do DFS on long shifts.

        starting_solution = self.use_heuristics_to_generate_some_solution()

        # Helper variables for branch and bound
        best_known_coverage = starting_solution.coverage_sum
        best_known_solution = starting_solution
        best_possible_solution = sum(self.demand)

        logger.debug("Starting with known coverage %s vs best possible %s",
                     best_known_coverage, best_possible_solution)

        # Branches to search
        # (We want shortest shifts retrieved first, so 
        # we add shortest and pop() to git last in)
        # (a LIFO queue using pop is more efficient in python
        # than a FIFO queue using pop(0))

        stack = []

        logger.info("Demand: %s", self.demand)
        empty_collection = ShiftCollection(
            self.min_length, self.max_length, demand=self.demand)
        stack.append(empty_collection)

        start_time = datetime.utcnow()

        while len(stack) != 0:
            if start_time + timedelta(
                    seconds=config.CALCULATION_TIMEOUT) < datetime.utcnow():
                logger.info("Exited due to timeout (%s seconds)",
                            (datetime.utcnow() - start_time).total_seconds())
                break

            # Get a branch
            working_collection = stack.pop()

            if working_collection.is_optimal:
                # We have a complete solution
                logger.info("Found an optimal collection. Exiting.")
                self.set_shift_collection_as_optimal(working_collection)
                return

            if working_collection.demand_is_met:
                if working_collection.coverage_sum < best_known_coverage:
                    logger.info(
                        "Better solution found (previous coverage %s / new coverage %s / best_possible %s)",
                        best_known_coverage, working_collection.coverage_sum,
                        best_possible_solution)

                    # Set new best possible solution
                    best_known_solution = working_collection
                    best_known_coverage = working_collection.coverage_sum
                else:
                    logger.debug("Found less optimal solution - continuing")
                    # discard
                del working_collection

            else:

                # New branch to explore - else discard
                if working_collection.best_possible_coverage < best_known_coverage:
                    # Gotta add more shifts!
                    t = working_collection.get_first_time_demand_not_met()

                    # Get shift start time
                    start = t
                    for length in reverse_inclusive_range(self.min_length,
                                                          self.max_length):
                        # Make sure we aren't off edge
                        end_index = start + length

                        # Our edge smoothing means this will always work
                        if end_index <= len(self.demand):
                            shift = (start, length)
                            new_collection = deepcopy(working_collection)
                            new_collection.add_shift(shift)

                            if new_collection.demand_is_met:
                                new_collection.anneal()

                            if new_collection.best_possible_coverage < best_known_coverage:

                                # Only save it if it's an improvement
                                stack.append(new_collection)

        self.set_shift_collection_as_optimal(best_known_solution)