예제 #1
0
    def _add_window(self, start, end, raise_on_min_length=False):
        """Add a window - checking whether it violates rules"""

        length = end - start

        if length < self.min_length:
            # Only raise when recursing
            if raise_on_min_length:
                raise Exception("Min length constraint violated")

            if start == 0:
                # Expected
                logger.debug(
                    "Skipping circular wraparound at beginning of loop")
            else:
                # Bad user. Bad.
                logger.info("Skipping window less than min length")
            return
        if length > self.day_length:
            # Split in two - a floor and a ceiling
            # Hypothetically recurses
            logger.info("Splitting large window into subproblems")
            center = start + (end - start) / 2
            # It's possible the windows will violate min length constrainnts,
            # so we wrap in a try block
            try:
                self._add_window(start, center, raise_on_min_length=True)
                self._add_window(center, end, raise_on_min_length=True)
            except:
                self._windows.append((start, end))
            return

        self._windows.append((start, end))
예제 #2
0
    def _add_window(self, start, end, raise_on_min_length=False):
        """Add a window - checking whether it violates rules"""

        length = end - start

        if length < self.min_length:
            # Only raise when recursing
            if raise_on_min_length:
                raise Exception("Min length constraint violated")

            if start == 0:
                # Expected
                logger.debug(
                    "Skipping circular wraparound at beginning of loop")
            else:
                # Bad user. Bad.
                logger.info("Skipping window less than min length")
            return
        if length > self.day_length:
            # Split in two - a floor and a ceiling
            # Hypothetically recurses
            logger.info("Splitting large window into subproblems")
            center = start + (end - start) / 2
            # It's possible the windows will violate min length constrainnts,
            # so we wrap in a try block
            try:
                self._add_window(start, center, raise_on_min_length=True)
                self._add_window(center, end, raise_on_min_length=True)
            except:
                self._windows.append((start, end))
            return

        self._windows.append((start, end))
예제 #3
0
    def _process_task(self, task):
        # 1. Fetch schedule
        self.org = self.client.get_organization(
            task.data.get("organization_id"))
        self.loc = self.org.get_location(task.data.get("location_id"))
        self.role = self.loc.get_role(task.data.get("role_id"))
        self.sched = self.role.get_schedule(task.data.get("schedule_id"))

        self._compute_demand()
        self._subtract_existing_shifts_from_demand()

        # Run the  calculation
        s = Splitter(self.demand,
                     self.sched.data.get("min_shift_length_hour"),
                     self.sched.data.get("max_shift_length_hour"))
        s.calculate()
        s.efficiency()

        # Naive becuase not yet datetimes
        naive_shifts = s.get_shifts()
        logger.info("Starting upload of %s shifts", len(naive_shifts))

        local_start_time = self._get_local_start_time()

        for shift in naive_shifts:
            # We have to think of daylight savings time here, so we need to
            # guarantee that we don't have any errors. We do this by overshooting
            # the timedelta by an extra two hours, then rounding back to midnight.

            logger.debug("Processing shift %s", shift)

            start_day = normalize_to_midnight(
                deepcopy(local_start_time) + timedelta(days=shift["day"]))

            # Beware of time changes - duplicate times are possible
            try:
                start = start_day.replace(hour=shift["start"])
            except pytz.AmbiguousTimeError:
                # Randomly pick one. Minor tech debt.
                start = start_day.replace(hour=shift["start"], is_dst=False)

            stop = start + timedelta(hours=shift["length"])

            # Convert to the strings we are passing up to the cLoUd
            utc_start_str = start.astimezone(self.default_tz).isoformat()
            utc_stop_str = stop.astimezone(self.default_tz).isoformat()

            logger.info("Creating shift with start %s stop %s", start, stop)
            self.role.create_shift(start=utc_start_str, stop=utc_stop_str)
예제 #4
0
    def _process_task(self, task):
        # 1. Fetch schedule
        self.org = self.client.get_organization(
            task.data.get("organization_id"))
        self.loc = self.org.get_location(task.data.get("location_id"))
        self.role = self.loc.get_role(task.data.get("role_id"))
        self.sched = self.role.get_schedule(task.data.get("schedule_id"))

        self._compute_demand()
        self._subtract_existing_shifts_from_demand()

        # Run the  calculation
        s = Splitter(self.demand,
                     self.sched.data.get("min_shift_length_hour"),
                     self.sched.data.get("max_shift_length_hour"))
        s.calculate()
        s.efficiency()

        # Naive becuase not yet datetimes
        naive_shifts = s.get_shifts()
        logger.info("Starting upload of %s shifts", len(naive_shifts))

        local_start_time = self._get_local_start_time()

        for shift in naive_shifts:
            # We have to think of daylight savings time here, so we need to
            # guarantee that we don't have any errors. We do this by overshooting
            # the timedelta by an extra two hours, then rounding back to midnight.

            logger.debug("Processing shift %s", shift)

            start_day = normalize_to_midnight(
                deepcopy(local_start_time) + timedelta(days=shift["day"]))

            # Beware of time changes - duplicate times are possible
            try:
                start = start_day.replace(hour=shift["start"])
            except pytz.AmbiguousTimeError:
                # Randomly pick one. Minor tech debt.
                start = start_day.replace(hour=shift["start"], is_dst=False)

            stop = start + timedelta(hours=shift["length"])

            # Convert to the strings we are passing up to the cLoUd
            utc_start_str = start.astimezone(self.default_tz).isoformat()
            utc_stop_str = stop.astimezone(self.default_tz).isoformat()

            logger.info("Creating shift with start %s stop %s", start, stop)
            self.role.create_shift(start=utc_start_str, stop=utc_stop_str)
예제 #5
0
    def server(self):
        previous_request_failed = False  # Have some built-in retries

        while True:
            # Get task
            try:
                task = self.client.claim_chomp_task()
                logger.info("Task received: %s", task.data)
                previous_request_failed = False
            except NotFoundException:
                logger.debug("No task found. Sleeping.")
                previous_request_failed = False
                sleep(config.TASKING_FETCH_INTERVAL_SECONDS)
                continue
            except Exception as e:
                if not previous_request_failed:
                    # retry, but info log it
                    logger.info("Unable to fetch chomp task - retrying")
                    previous_request_failed = True
                else:
                    logger.error(
                        "Unable to fetch chomp task after previous failure: %s",
                        e)

                # Still sleep so we avoid thundering herd
                sleep(config.TASKING_FETCH_INTERVAL_SECONDS)
                continue

            try:
                self._process_task(task)
                task.delete()
                logger.info("Task completed %s", task.data)
            except Exception as e:
                logger.error("Failed schedule %s:  %s %s",
                             task.data.get("schedule_id"), e,
                             traceback.format_exc())

                logger.info("Requeuing schedule %s",
                            task.data.get("schedule_id"))
                # self.sched set in process_task
                self.sched.patch(state=self.REQUEUE_STATE)

                # Sometimes rebooting Chomp helps with errors. For example, if
                # a Gurobi connection is drained then it helps to reboot.
                if config.KILL_ON_ERROR:
                    sleep(config.KILL_DELAY)
                    logger.info("Rebooting to kill container")
                    os.system("shutdown -r now")
예제 #6
0
    def server(self):
        previous_request_failed = False  # Have some built-in retries

        while True:
            # Get task
            try:
                task = self.client.claim_chomp_task()
                logger.info("Task received: %s", task.data)
                previous_request_failed = False
            except NotFoundException:
                logger.debug("No task found. Sleeping.")
                previous_request_failed = False
                sleep(config.TASKING_FETCH_INTERVAL_SECONDS)
                continue
            except Exception as e:
                if not previous_request_failed:
                    # retry, but info log it
                    logger.info("Unable to fetch chomp task - retrying")
                    previous_request_failed = True
                else:
                    logger.error(
                        "Unable to fetch chomp task after previous failure: %s",
                        e)

                # Still sleep so we avoid thundering herd
                sleep(config.TASKING_FETCH_INTERVAL_SECONDS)
                continue

            try:
                self._process_task(task)
                task.delete()
                logger.info("Task completed %s", task.data)
            except Exception as e:
                logger.error("Failed schedule %s:  %s %s",
                             task.data.get("schedule_id"), e,
                             traceback.format_exc())

                logger.info("Requeuing schedule %s",
                            task.data.get("schedule_id"))
                # self.sched set in process_task
                self.sched.patch(state=self.REQUEUE_STATE)

                # Sometimes rebooting Chomp helps with errors. For example, if
                # a Gurobi connection is drained then it helps to reboot.
                if config.KILL_ON_ERROR:
                    sleep(config.KILL_DELAY)
                    logger.info("Rebooting to kill container")
                    os.system("shutdown -r now")
예제 #7
0
    def _process_demand(self):
        """Apply windowing to demand"""

        demand = copy.copy(self.demand)

        # 1) Remove any lagging zeros. This affects nothing.
        while demand[-1] is 0:
            demand.pop()  # remove last element

        # 2) Remove any leading zeros andtrack with offset
        offset = 0
        while demand[0] is 0:
            demand.pop(0)  # remove first element
            offset += 1

        # TODO - edge smoothing algorithms

        # Smooth beginning edge
        # (TODO - search past to max_length and manually strip out shifts)
        peak = 0
        for t in range(self.min_length):
            if demand[t] > peak:
                peak = demand[t]
            elif demand[t] < peak:
                demand[t] = peak

        peak = 0
        for t in reversed(
                range((len(demand) - self.min_length - 1), len(demand))):
            if demand[t] > peak:
                peak = demand[t]
            elif demand[t] < peak:
                demand[t] = peak

        self.demand = demand
        self.window_offset += offset

        logger.debug("Windowing removed %s leading zeros", offset)
        logger.debug("Processed demand: %s", self.demand)
예제 #8
0
    def _process_demand(self):
        """Apply windowing to demand"""

        demand = copy.copy(self.demand)

        # 1) Remove any lagging zeros. This affects nothing.
        while demand[-1] is 0:
            demand.pop()  # remove last element

        # 2) Remove any leading zeros andtrack with offset
        offset = 0
        while demand[0] is 0:
            demand.pop(0)  # remove first element
            offset += 1

        # TODO - edge smoothing algorithms

        # Smooth beginning edge
        # (TODO - search past to max_length and manually strip out shifts)
        peak = 0
        for t in range(self.min_length):
            if demand[t] > peak:
                peak = demand[t]
            elif demand[t] < peak:
                demand[t] = peak

        peak = 0
        for t in reversed(
                range((len(demand) - self.min_length - 1), len(demand))):
            if demand[t] > peak:
                peak = demand[t]
            elif demand[t] < peak:
                demand[t] = peak

        self.demand = demand
        self.window_offset += offset

        logger.debug("Windowing removed %s leading zeros", offset)
        logger.debug("Processed demand: %s", self.demand)
예제 #9
0
    def __init__(self, week_demand, min_length, max_length):
        """Flatten demand and build helpers"""
        # week_demand is a list of lists

        # These are treated as unitless and should match the demand units
        logger.debug("Min %s max %s", min_length, max_length)
        self.min_length = int(min_length)
        self.max_length = int(max_length)

        self._shifts = []  # don't access directly!
        self._windows = []

        self.week_length = len(week_demand)

        # Validate that days are same length
        self.day_length = len(week_demand[0])
        for i in range(len(week_demand)):
            if self.day_length != len(week_demand[i]):
                raise UnequalDayLengthException()

        # 2) Flatten demand
        self.flat_demand = [
            item for day_demand in week_demand for item in day_demand
        ]
예제 #10
0
    def __init__(self, week_demand, min_length, max_length):
        """Flatten demand and build helpers"""
        # week_demand is a list of lists

        # These are treated as unitless and should match the demand units
        logger.debug("Min %s max %s", min_length, max_length)
        self.min_length = int(min_length)
        self.max_length = int(max_length)

        self._shifts = []  # don't access directly!
        self._windows = []

        self.week_length = len(week_demand)

        # Validate that days are same length
        self.day_length = len(week_demand[0])
        for i in range(len(week_demand)):
            if self.day_length != len(week_demand[i]):
                raise UnequalDayLengthException()

        # 2) Flatten demand
        self.flat_demand = [
            item for day_demand in week_demand for item in day_demand
        ]
예제 #11
0
    def validate(self):
        """Check whether shifts meet demand. Used in testing."""
        expected_demand = [0] * len(self.demand)
        sum_demand = copy.copy(expected_demand)

        logger.debug("Starting validation of %s shifts", len(self._shifts))

        for shift in self._shifts:
            for t in range(shift["start"], shift["start"] + shift["length"]):
                # Remove window
                sum_demand[t] += 1

        logger.debug("Expected demand: %s", expected_demand)
        logger.debug("Scheduled supply: %s", sum_demand)
        for t in range(len(expected_demand)):
            if sum_demand[t] < expected_demand[t]:
                logger.error(
                    "Demand not met at time %s (demand %s, supply %s)",
                    t + self.window_offset, expected_demand[t], sum_demand[t])
                raise Exception("Demand not met at time %s" % t)
        return True
예제 #12
0
    def validate(self):
        """Check whether shifts meet demand. Used in testing."""
        expected_demand = [0] * len(self.demand)
        sum_demand = copy.copy(expected_demand)

        logger.debug("Starting validation of %s shifts", len(self._shifts))

        for shift in self._shifts:
            for t in range(shift["start"], shift["start"] + shift["length"]):
                # Remove window
                sum_demand[t] += 1

        logger.debug("Expected demand: %s", expected_demand)
        logger.debug("Scheduled supply: %s", sum_demand)
        for t in range(len(expected_demand)):
            if sum_demand[t] < expected_demand[t]:
                logger.error(
                    "Demand not met at time %s (demand %s, supply %s)",
                    t + self.window_offset, expected_demand[t], sum_demand[t])
                raise Exception("Demand not met at time %s" % t)
        return True
예제 #13
0
    def validate(self):
        """Check whether shifts meet demand. Used in testing."""
        expected_demand = copy.copy(self.flat_demand)
        sum_demand = [0] * len(self.flat_demand)

        logger.debug("Starting validation of %s shifts", len(self._shifts))

        for shift in self._shifts:
            # Inclusive range
            for t in range(shift["start"], shift["start"] + shift["length"]):
                # (circular)
                sum_demand[(t + 1) % len(self.flat_demand) - 1] += 1

        logger.debug("Expected demand: %s", expected_demand)
        logger.debug("Scheduled supply: %s", sum_demand)
        for t in range(len(expected_demand)):
            if sum_demand[t] < expected_demand[t]:
                logger.error(
                    "Demand not met at time %s (demand %s, supply %s)", t,
                    expected_demand[t], sum_demand[t])
                raise Exception("Demand not met at time %s" % t)
        return True
예제 #14
0
    def validate(self):
        """Check whether shifts meet demand. Used in testing."""
        expected_demand = copy.copy(self.flat_demand)
        sum_demand = [0] * len(self.flat_demand)

        logger.debug("Starting validation of %s shifts", len(self._shifts))

        for shift in self._shifts:
            # Inclusive range
            for t in range(shift["start"], shift["start"] + shift["length"]):
                # (circular)
                sum_demand[(t + 1) % len(self.flat_demand) - 1] += 1

        logger.debug("Expected demand: %s", expected_demand)
        logger.debug("Scheduled supply: %s", sum_demand)
        for t in range(len(expected_demand)):
            if sum_demand[t] < expected_demand[t]:
                logger.error(
                    "Demand not met at time %s (demand %s, supply %s)", t,
                    expected_demand[t], sum_demand[t])
                raise Exception("Demand not met at time %s" % t)
        return True
예제 #15
0
    def _subtract_existing_shifts_from_demand(self):
        logger.info("Starting demand: %s", self.demand)
        demand_copy = deepcopy(self.demand)
        search_start = (self._get_local_start_time() - timedelta(
            hours=config.MAX_SHIFT_LENGTH_HOURS)).astimezone(self.default_tz)
        # 1 week
        search_end = (self._get_local_start_time() + timedelta(
            days=7, hours=config.MAX_SHIFT_LENGTH_HOURS)
                      ).astimezone(self.default_tz)

        shifts = self.role.get_shifts(start=search_start, end=search_end)

        logger.info("Checking %s shifts for existing demand", len(shifts))

        # Search hour by hour throughout the weeks
        for day in range(len(self.demand)):
            start_day = normalize_to_midnight(self._get_local_start_time() +
                                              timedelta(days=day))
            for start in range(len(self.demand[0])):

                # Beware of time changes - duplicate times are possible
                try:
                    start_hour = deepcopy(start_day).replace(hour=start)
                except pytz.AmbiguousTimeError:
                    # Randomly pick one - cause phucket. Welcome to chomp.
                    start_hour = deepcopy(start_day).replace(
                        hour=start, is_dst=False)

                try:
                    stop_hour = start_hour + timedelta(hours=1)
                except pytz.AmbiguousTimeError:
                    stop_hour = start_hour + timedelta(hours=1, is_dst=False)

                # Find shift
                current_staffing_level = 0
                for shift in shifts:
                    shift_start = iso8601.parse_date(
                        shift.data.get("start")).replace(
                            tzinfo=self.default_tz)
                    shift_stop = iso8601.parse_date(
                        shift.data.get("stop")).replace(tzinfo=self.default_tz)

                    if ((shift_start <= start_hour and shift_stop > stop_hour)
                            or
                        (shift_start >= start_hour and shift_start < stop_hour)
                            or
                        (shift_stop > start_hour and shift_stop <= stop_hour)):

                        # increment staffing level during that bucket
                        current_staffing_level += 1

                logger.debug("Current staffing level at day %s time %s is %s",
                             day, start, current_staffing_level)

                demand_copy[day][start] -= current_staffing_level
                # demand cannot be less than zero
                if demand_copy[day][start] < 0:
                    demand_copy[day][start] = 0

        logger.info("Demand minus existing shifts: %s", demand_copy)
        self.demand = demand_copy
예제 #16
0
    def _calculate(self):
        """Search that tree"""
        # Not only do we want optimality, but we want it with
        # longest shifts possible. That's why we do DFS on long shifts.

        starting_solution = self.use_heuristics_to_generate_some_solution()

        # Helper variables for branch and bound
        best_known_coverage = starting_solution.coverage_sum
        best_known_solution = starting_solution
        best_possible_solution = sum(self.demand)

        logger.debug("Starting with known coverage %s vs best possible %s",
                     best_known_coverage, best_possible_solution)

        # Branches to search
        # (We want shortest shifts retrieved first, so 
        # we add shortest and pop() to git last in)
        # (a LIFO queue using pop is more efficient in python
        # than a FIFO queue using pop(0))

        stack = []

        logger.info("Demand: %s", self.demand)
        empty_collection = ShiftCollection(
            self.min_length, self.max_length, demand=self.demand)
        stack.append(empty_collection)

        start_time = datetime.utcnow()

        while len(stack) != 0:
            if start_time + timedelta(
                    seconds=config.CALCULATION_TIMEOUT) < datetime.utcnow():
                logger.info("Exited due to timeout (%s seconds)",
                            (datetime.utcnow() - start_time).total_seconds())
                break

            # Get a branch
            working_collection = stack.pop()

            if working_collection.is_optimal:
                # We have a complete solution
                logger.info("Found an optimal collection. Exiting.")
                self.set_shift_collection_as_optimal(working_collection)
                return

            if working_collection.demand_is_met:
                if working_collection.coverage_sum < best_known_coverage:
                    logger.info(
                        "Better solution found (previous coverage %s / new coverage %s / best_possible %s)",
                        best_known_coverage, working_collection.coverage_sum,
                        best_possible_solution)

                    # Set new best possible solution
                    best_known_solution = working_collection
                    best_known_coverage = working_collection.coverage_sum
                else:
                    logger.debug("Found less optimal solution - continuing")
                    # discard
                del working_collection

            else:

                # New branch to explore - else discard
                if working_collection.best_possible_coverage < best_known_coverage:
                    # Gotta add more shifts!
                    t = working_collection.get_first_time_demand_not_met()

                    # Get shift start time
                    start = t
                    for length in reverse_inclusive_range(self.min_length,
                                                          self.max_length):
                        # Make sure we aren't off edge
                        end_index = start + length

                        # Our edge smoothing means this will always work
                        if end_index <= len(self.demand):
                            shift = (start, length)
                            new_collection = deepcopy(working_collection)
                            new_collection.add_shift(shift)

                            if new_collection.demand_is_met:
                                new_collection.anneal()

                            if new_collection.best_possible_coverage < best_known_coverage:

                                # Only save it if it's an improvement
                                stack.append(new_collection)

        self.set_shift_collection_as_optimal(best_known_solution)
예제 #17
0
    def _calculate(self):
        """Search that tree"""
        # Not only do we want optimality, but we want it with
        # longest shifts possible. That's why we do DFS on long shifts.

        starting_solution = self.use_heuristics_to_generate_some_solution()

        # Helper variables for branch and bound
        best_known_coverage = starting_solution.coverage_sum
        best_known_solution = starting_solution
        best_possible_solution = sum(self.demand)

        logger.debug("Starting with known coverage %s vs best possible %s",
                     best_known_coverage, best_possible_solution)

        # Branches to search
        # (We want shortest shifts retrieved first, so
        # we add shortest and pop() to git last in)
        # (a LIFO queue using pop is more efficient in python
        # than a FIFO queue using pop(0))

        stack = []

        logger.info("Demand: %s", self.demand)
        empty_collection = ShiftCollection(self.min_length,
                                           self.max_length,
                                           demand=self.demand)
        stack.append(empty_collection)

        start_time = datetime.utcnow()

        while len(stack) != 0:
            if start_time + timedelta(
                    seconds=config.CALCULATION_TIMEOUT) < datetime.utcnow():
                logger.info("Exited due to timeout (%s seconds)",
                            (datetime.utcnow() - start_time).total_seconds())
                break

            # Get a branch
            working_collection = stack.pop()

            if working_collection.is_optimal:
                # We have a complete solution
                logger.info("Found an optimal collection. Exiting.")
                self.set_shift_collection_as_optimal(working_collection)
                return

            if working_collection.demand_is_met:
                if working_collection.coverage_sum < best_known_coverage:
                    logger.info(
                        "Better solution found (previous coverage %s / new coverage %s / best_possible %s)",
                        best_known_coverage, working_collection.coverage_sum,
                        best_possible_solution)

                    # Set new best possible solution
                    best_known_solution = working_collection
                    best_known_coverage = working_collection.coverage_sum
                else:
                    logger.debug("Found less optimal solution - continuing")
                    # discard
                del working_collection

            else:

                # New branch to explore - else discard
                if working_collection.best_possible_coverage < best_known_coverage:
                    # Gotta add more shifts!
                    t = working_collection.get_first_time_demand_not_met()

                    # Get shift start time
                    start = t
                    for length in reverse_inclusive_range(
                            self.min_length, self.max_length):
                        # Make sure we aren't off edge
                        end_index = start + length

                        # Our edge smoothing means this will always work
                        if end_index <= len(self.demand):
                            shift = (start, length)
                            new_collection = deepcopy(working_collection)
                            new_collection.add_shift(shift)

                            if new_collection.demand_is_met:
                                new_collection.anneal()

                            if new_collection.best_possible_coverage < best_known_coverage:

                                # Only save it if it's an improvement
                                stack.append(new_collection)

        self.set_shift_collection_as_optimal(best_known_solution)
예제 #18
0
    def _subtract_existing_shifts_from_demand(self):
        logger.info("Starting demand: %s", self.demand)
        demand_copy = deepcopy(self.demand)
        search_start = (
            self._get_local_start_time() -
            timedelta(hours=config.MAX_SHIFT_LENGTH_HOURS)).astimezone(
                self.default_tz)
        # 1 week
        search_end = (
            self._get_local_start_time() +
            timedelta(days=7, hours=config.MAX_SHIFT_LENGTH_HOURS)).astimezone(
                self.default_tz)

        shifts = self.role.get_shifts(start=search_start, end=search_end)

        logger.info("Checking %s shifts for existing demand", len(shifts))

        # Search hour by hour throughout the weeks
        for day in range(len(self.demand)):
            start_day = normalize_to_midnight(self._get_local_start_time() +
                                              timedelta(days=day))
            for start in range(len(self.demand[0])):

                # Beware of time changes - duplicate times are possible
                try:
                    start_hour = deepcopy(start_day).replace(hour=start)
                except pytz.AmbiguousTimeError:
                    # Randomly pick one - cause phucket. Welcome to chomp.
                    start_hour = deepcopy(start_day).replace(hour=start,
                                                             is_dst=False)

                try:
                    stop_hour = start_hour + timedelta(hours=1)
                except pytz.AmbiguousTimeError:
                    stop_hour = start_hour + timedelta(hours=1, is_dst=False)

                # Find shift
                current_staffing_level = 0
                for shift in shifts:
                    shift_start = iso8601.parse_date(
                        shift.data.get("start")).replace(
                            tzinfo=self.default_tz)
                    shift_stop = iso8601.parse_date(
                        shift.data.get("stop")).replace(tzinfo=self.default_tz)

                    if ((shift_start <= start_hour and shift_stop > stop_hour)
                            or
                        (shift_start >= start_hour and shift_start < stop_hour)
                            or
                        (shift_stop > start_hour and shift_stop <= stop_hour)):

                        # increment staffing level during that bucket
                        current_staffing_level += 1

                logger.debug("Current staffing level at day %s time %s is %s",
                             day, start, current_staffing_level)

                demand_copy[day][start] -= current_staffing_level
                # demand cannot be less than zero
                if demand_copy[day][start] < 0:
                    demand_copy[day][start] = 0

        logger.info("Demand minus existing shifts: %s", demand_copy)
        self.demand = demand_copy