def test_org_crud():
    c = Client(key=KEY, env=ENV)

    # Just some basic stuff
    assert len(c.get_plans()) > 0
    assert len(c.get_timezones()) > 0

    logger.debug("Fetching organization")
    o = c.get_organization(TEST_ORG)

    assert o.get_id() == TEST_ORG

    location_count = len(o.get_locations())

    logger.debug("Changing organization name")
    o.patch(name="[In Progress] Continuous integration test")

    logger.debug("Creating a location")
    l = o.create_location(name="El Farolito", timezone="America/Los_Angeles")
    l_id = l.get_id()
    logger.debug("Location id {}".format(l_id))

    assert l.data.get("name") == "El Farolito"
    logger.debug("Changing location name")
    l.patch(name="La Taqueria")

    logger.debug("Checking that location is created")
    new_location_count = len(o.get_locations())
    assert new_location_count == (location_count + 1)
    del l

    logger.debug("Fetching location by ID")
    l = o.get_location(l_id)
    assert l.data.get("name") == "La Taqueria"

    logger.debug("Testing role crud")
    r = l.create_role(name="Kitchen")
    r.patch(name="Cocina")
    logger.debug("Adding worker")
    r.get_workers()
    r.create_worker(email=TEST_WORKER)
    r.delete()

    logger.debug("Deleting location")
    l.delete()
    del l
    logger.debug("Making sure location no longer exists")

    with pytest.raises(UnauthorizedException):
        o.get_location(l_id)

    logger.debug("Finishing up")
    o.patch(name="Continuous integration test")
    all_locations = o.get_locations()
    for location in all_locations:
        location.delete()

    assert 0 == len(o.get_locations())
Beispiel #2
0
class Tasking():
    """Get tasks and process them"""

    REQUEUE_STATE = "chomp-queue"

    def __init__(self):
        self.client = Client(
            key=config.STAFFJOY_API_KEY,
            env=config.ENV,
            url_base="https://staffjoy.partnerhero.com/api/v2/")
        self.default_tz = pytz.timezone(config.DEFAULT_TZ)

        # To be defined later
        self.org = None
        self.loc = None
        self.role = None
        self.sched = None
        self.demand = None

    def server(self):
        previous_request_failed = False  # Have some built-in retries

        while True:
            # Get task
            try:
                task = self.client.claim_chomp_task()
                logger.info("Task received: %s", task.data)
                previous_request_failed = False
            except NotFoundException:
                logger.debug("No task found. Sleeping.")
                previous_request_failed = False
                sleep(config.TASKING_FETCH_INTERVAL_SECONDS)
                continue
            except Exception as e:
                if not previous_request_failed:
                    # retry, but info log it
                    logger.info("Unable to fetch chomp task - retrying")
                    previous_request_failed = True
                else:
                    logger.error(
                        "Unable to fetch chomp task after previous failure: %s",
                        e)

                # Still sleep so we avoid thundering herd
                sleep(config.TASKING_FETCH_INTERVAL_SECONDS)
                continue

            try:
                self._process_task(task)
                task.delete()
                logger.info("Task completed %s", task.data)
            except Exception as e:
                logger.error("Failed schedule %s:  %s %s",
                             task.data.get("schedule_id"), e,
                             traceback.format_exc())

                logger.info("Requeuing schedule %s",
                            task.data.get("schedule_id"))
                # self.sched set in process_task
                self.sched.patch(state=self.REQUEUE_STATE)

                # Sometimes rebooting Chomp helps with errors. For example, if
                # a Gurobi connection is drained then it helps to reboot.
                if config.KILL_ON_ERROR:
                    sleep(config.KILL_DELAY)
                    logger.info("Rebooting to kill container")
                    os.system("shutdown -r now")

    def _process_task(self, task):
        # 1. Fetch schedule
        self.org = self.client.get_organization(
            task.data.get("organization_id"))
        self.loc = self.org.get_location(task.data.get("location_id"))
        self.role = self.loc.get_role(task.data.get("role_id"))
        self.sched = self.role.get_schedule(task.data.get("schedule_id"))

        self._compute_demand()
        self._subtract_existing_shifts_from_demand()

        # Run the  calculation
        s = Splitter(self.demand, self.sched.data.get("min_shift_length_hour"),
                     self.sched.data.get("max_shift_length_hour"))
        s.calculate()
        s.efficiency()

        # Naive becuase not yet datetimes
        naive_shifts = s.get_shifts()
        logger.info("Starting upload of %s shifts", len(naive_shifts))

        local_start_time = self._get_local_start_time()

        for shift in naive_shifts:
            # We have to think of daylight savings time here, so we need to
            # guarantee that we don't have any errors. We do this by overshooting
            # the timedelta by an extra two hours, then rounding back to midnight.

            logger.debug("Processing shift %s", shift)

            start_day = normalize_to_midnight(
                deepcopy(local_start_time) + timedelta(days=shift["day"]))

            # Beware of time changes - duplicate times are possible
            try:
                start = start_day.replace(hour=shift["start"])
            except pytz.AmbiguousTimeError:
                # Randomly pick one. Minor tech debt.
                start = start_day.replace(hour=shift["start"], is_dst=False)

            stop = start + timedelta(hours=shift["length"])

            # Convert to the strings we are passing up to the cLoUd
            utc_start_str = start.astimezone(self.default_tz).isoformat()
            utc_stop_str = stop.astimezone(self.default_tz).isoformat()

            logger.info("Creating shift with start %s stop %s", start, stop)
            self.role.create_shift(start=utc_start_str, stop=utc_stop_str)

    def _subtract_existing_shifts_from_demand(self):
        logger.info("Starting demand: %s", self.demand)
        demand_copy = deepcopy(self.demand)
        search_start = (
            self._get_local_start_time() -
            timedelta(hours=config.MAX_SHIFT_LENGTH_HOURS)).astimezone(
                self.default_tz)
        # 1 week
        search_end = (
            self._get_local_start_time() +
            timedelta(days=7, hours=config.MAX_SHIFT_LENGTH_HOURS)).astimezone(
                self.default_tz)

        shifts = self.role.get_shifts(start=search_start, end=search_end)

        logger.info("Checking %s shifts for existing demand", len(shifts))

        # Search hour by hour throughout the weeks
        for day in range(len(self.demand)):
            start_day = normalize_to_midnight(self._get_local_start_time() +
                                              timedelta(days=day))
            for start in range(len(self.demand[0])):

                # Beware of time changes - duplicate times are possible
                try:
                    start_hour = deepcopy(start_day).replace(hour=start)
                except pytz.AmbiguousTimeError:
                    # Randomly pick one - cause phucket. Welcome to chomp.
                    start_hour = deepcopy(start_day).replace(hour=start,
                                                             is_dst=False)

                try:
                    stop_hour = start_hour + timedelta(hours=1)
                except pytz.AmbiguousTimeError:
                    stop_hour = start_hour + timedelta(hours=1, is_dst=False)

                # Find shift
                current_staffing_level = 0
                for shift in shifts:
                    shift_start = iso8601.parse_date(
                        shift.data.get("start")).replace(
                            tzinfo=self.default_tz)
                    shift_stop = iso8601.parse_date(
                        shift.data.get("stop")).replace(tzinfo=self.default_tz)

                    if ((shift_start <= start_hour and shift_stop > stop_hour)
                            or
                        (shift_start >= start_hour and shift_start < stop_hour)
                            or
                        (shift_stop > start_hour and shift_stop <= stop_hour)):

                        # increment staffing level during that bucket
                        current_staffing_level += 1

                logger.debug("Current staffing level at day %s time %s is %s",
                             day, start, current_staffing_level)

                demand_copy[day][start] -= current_staffing_level
                # demand cannot be less than zero
                if demand_copy[day][start] < 0:
                    demand_copy[day][start] = 0

        logger.info("Demand minus existing shifts: %s", demand_copy)
        self.demand = demand_copy

    def _get_local_start_time(self):
        # Create the datetimes
        local_tz = pytz.timezone(self.loc.data.get("timezone"))
        utc_start_time = iso8601.parse_date(
            self.sched.data.get("start")).replace(tzinfo=self.default_tz)
        local_start_time = utc_start_time.astimezone(local_tz)
        return local_start_time

    def _compute_demand(self):
        weekday_demand = self.sched.data.get("demand")
        day_week_starts = self.org.data.get("day_week_starts")
        # flatten days from dict to list
        demand = []
        for day in week_day_range(day_week_starts):
            demand.append(weekday_demand[day])

        self.demand = demand
Beispiel #3
0
class Tasking():
    """Get tasks and process them"""

    REQUEUE_STATE = "mobius-queue"

    def __init__(self):
        self.client = Client(key=config.STAFFJOY_API_KEY, url_base=config.STAFFJOY_URL_BASE)
        self.default_tz = pytz.timezone(config.DEFAULT_TZ)

    def server(self):
        previous_request_failed = False  # Have some built-in retries

        while True:
            # Get task
            try:
                task = self.client.claim_mobius_task()
                logger.info("Task received: %s" % task.data)
                previous_request_failed = False
            except NotFoundException:
                logger.debug("No task found. Sleeping.")
                previous_request_failed = False
                sleep(config.TASKING_FETCH_INTERVAL_SECONDS)
                continue
            except Exception as e:
                if not previous_request_failed:
                    # retry, but info log it
                    logger.info("Unable to fetch mobius task - retrying")
                    previous_request_failed = True
                else:
                    logger.error(
                        "Unable to fetch mobius task after previous failure: %s"
                        % e)

                # Still sleep so we avoid thundering herd
                sleep(config.TASKING_FETCH_INTERVAL_SECONDS)
                continue

            try:
                self._process_task(task)
                task.delete()
                logger.info("Task completed %s" % task.data)
            except Exception as e:
                logger.error("Failed schedule %s:  %s %s" %
                             (task.data.get("schedule_id"), e,
                              traceback.format_exc()))

                logger.info("Requeuing schedule %s" %
                            task.data.get("schedule_id"))

                # self.sched set in process_task
                self.sched.patch(state=self.REQUEUE_STATE)

                # Sometimes rebooting Mobius helps with errors. For example, if
                # a Gurobi connection is drained then it helps to reboot.
                if config.KILL_ON_ERROR:
                    sleep(config.KILL_DELAY)
                    logger.info("Rebooting to kill container")
                    os.system("shutdown -r now")

    def _process_task(self, task):

        # 1. Fetch schedule
        self.org = self.client.get_organization(task.data.get(
            "organization_id"))
        self.loc = self.org.get_location(task.data.get("location_id"))
        self.role = self.loc.get_role(task.data.get("role_id"))
        self.sched = self.role.get_schedule(task.data.get("schedule_id"))

        env = Environment(
            organization_id=task.data.get("organization_id"),
            location_id=task.data.get("location_id"),
            role_id=task.data.get("role_id"),
            schedule_id=task.data.get("schedule_id"),
            tz_string=self.loc.data.get("timezone"),
            start=self.sched.data.get("start"),
            stop=self.sched.data.get("stop"),
            day_week_starts=self.org.data.get("day_week_starts"),
            min_minutes_per_workday=self.role.data.get("min_hours_per_workday")
            * MINUTES_PER_HOUR,
            max_minutes_per_workday=self.role.data.get("max_hours_per_workday")
            * MINUTES_PER_HOUR,
            min_minutes_between_shifts=self.role.data.get(
                "min_hours_between_shifts") * MINUTES_PER_HOUR,
            max_consecutive_workdays=self.role.data.get(
                "max_consecutive_workdays"))

        user_objs = self.role.get_workers(archived=False)
        employees = []
        for e in user_objs:
            new_e = Employee(
                user_id=e.data["id"],
                min_hours_per_workweek=e.data["min_hours_per_workweek"],
                max_hours_per_workweek=e.data["max_hours_per_workweek"],
                environment=env, )

            # check whether employee even has availability to work
            if week_sum(new_e.availability) > new_e.min_hours_per_workweek:
                employees.append(new_e)

        if len(employees) is 0:
            logger.info("No employees")
            return

        # Get the shifts
        shift_api_objs = self.role.get_shifts(start=dt_to_query_str(env.start),
                                              end=dt_to_query_str(env.stop),
                                              user_id=UNASSIGNED_USER_ID)

        # Convert api objs to something more manageable
        shifts = []
        for s in shift_api_objs:
            shifts.append(Shift(s))

        if len(shifts) is 0:
            logger.info("No unassigned shifts")
            return

        # Run the  calculation
        a = Assign(env, employees, shifts)
        a.calculate()
        a.set_shift_user_ids()

    def _get_local_start_time(self):
        # Create the datetimes
        local_tz = pytz.timezone(self.loc.data.get("timezone"))
        utc_start_time = iso8601.parse_date(self.sched.data.get(
            "start")).replace(tzinfo=self.default_tz)
        local_start_time = utc_start_time.astimezone(local_tz)
        return local_start_time
class Tasking():
    """Get tasks and process them"""

    REQUEUE_STATE = "chomp-queue"

    def __init__(self):
        self.client = Client(key=config.STAFFJOY_API_KEY, env=config.ENV, url_base="https://staffjoy.partnerhero.com/api/v2/")
        self.default_tz = pytz.timezone(config.DEFAULT_TZ)

        # To be defined later
        self.org = None
        self.loc = None
        self.role = None
        self.sched = None
        self.demand = None

    def server(self):
        previous_request_failed = False  # Have some built-in retries

        while True:
            # Get task
            try:
                task = self.client.claim_chomp_task()
                logger.info("Task received: %s", task.data)
                previous_request_failed = False
            except NotFoundException:
                logger.debug("No task found. Sleeping.")
                previous_request_failed = False
                sleep(config.TASKING_FETCH_INTERVAL_SECONDS)
                continue
            except Exception as e:
                if not previous_request_failed:
                    # retry, but info log it
                    logger.info("Unable to fetch chomp task - retrying")
                    previous_request_failed = True
                else:
                    logger.error(
                        "Unable to fetch chomp task after previous failure: %s",
                        e)

                # Still sleep so we avoid thundering herd
                sleep(config.TASKING_FETCH_INTERVAL_SECONDS)
                continue

            try:
                self._process_task(task)
                task.delete()
                logger.info("Task completed %s", task.data)
            except Exception as e:
                logger.error("Failed schedule %s:  %s %s",
                             task.data.get("schedule_id"), e,
                             traceback.format_exc())

                logger.info("Requeuing schedule %s",
                            task.data.get("schedule_id"))
                # self.sched set in process_task
                self.sched.patch(state=self.REQUEUE_STATE)

                # Sometimes rebooting Chomp helps with errors. For example, if
                # a Gurobi connection is drained then it helps to reboot.
                if config.KILL_ON_ERROR:
                    sleep(config.KILL_DELAY)
                    logger.info("Rebooting to kill container")
                    os.system("shutdown -r now")

    def _process_task(self, task):
        # 1. Fetch schedule
        self.org = self.client.get_organization(
            task.data.get("organization_id"))
        self.loc = self.org.get_location(task.data.get("location_id"))
        self.role = self.loc.get_role(task.data.get("role_id"))
        self.sched = self.role.get_schedule(task.data.get("schedule_id"))

        self._compute_demand()
        self._subtract_existing_shifts_from_demand()

        # Run the  calculation
        s = Splitter(self.demand,
                     self.sched.data.get("min_shift_length_hour"),
                     self.sched.data.get("max_shift_length_hour"))
        s.calculate()
        s.efficiency()

        # Naive becuase not yet datetimes
        naive_shifts = s.get_shifts()
        logger.info("Starting upload of %s shifts", len(naive_shifts))

        local_start_time = self._get_local_start_time()

        for shift in naive_shifts:
            # We have to think of daylight savings time here, so we need to
            # guarantee that we don't have any errors. We do this by overshooting
            # the timedelta by an extra two hours, then rounding back to midnight.

            logger.debug("Processing shift %s", shift)

            start_day = normalize_to_midnight(
                deepcopy(local_start_time) + timedelta(days=shift["day"]))

            # Beware of time changes - duplicate times are possible
            try:
                start = start_day.replace(hour=shift["start"])
            except pytz.AmbiguousTimeError:
                # Randomly pick one. Minor tech debt.
                start = start_day.replace(hour=shift["start"], is_dst=False)

            stop = start + timedelta(hours=shift["length"])

            # Convert to the strings we are passing up to the cLoUd
            utc_start_str = start.astimezone(self.default_tz).isoformat()
            utc_stop_str = stop.astimezone(self.default_tz).isoformat()

            logger.info("Creating shift with start %s stop %s", start, stop)
            self.role.create_shift(start=utc_start_str, stop=utc_stop_str)

    def _subtract_existing_shifts_from_demand(self):
        logger.info("Starting demand: %s", self.demand)
        demand_copy = deepcopy(self.demand)
        search_start = (self._get_local_start_time() - timedelta(
            hours=config.MAX_SHIFT_LENGTH_HOURS)).astimezone(self.default_tz)
        # 1 week
        search_end = (self._get_local_start_time() + timedelta(
            days=7, hours=config.MAX_SHIFT_LENGTH_HOURS)
                      ).astimezone(self.default_tz)

        shifts = self.role.get_shifts(start=search_start, end=search_end)

        logger.info("Checking %s shifts for existing demand", len(shifts))

        # Search hour by hour throughout the weeks
        for day in range(len(self.demand)):
            start_day = normalize_to_midnight(self._get_local_start_time() +
                                              timedelta(days=day))
            for start in range(len(self.demand[0])):

                # Beware of time changes - duplicate times are possible
                try:
                    start_hour = deepcopy(start_day).replace(hour=start)
                except pytz.AmbiguousTimeError:
                    # Randomly pick one - cause phucket. Welcome to chomp.
                    start_hour = deepcopy(start_day).replace(
                        hour=start, is_dst=False)

                try:
                    stop_hour = start_hour + timedelta(hours=1)
                except pytz.AmbiguousTimeError:
                    stop_hour = start_hour + timedelta(hours=1, is_dst=False)

                # Find shift
                current_staffing_level = 0
                for shift in shifts:
                    shift_start = iso8601.parse_date(
                        shift.data.get("start")).replace(
                            tzinfo=self.default_tz)
                    shift_stop = iso8601.parse_date(
                        shift.data.get("stop")).replace(tzinfo=self.default_tz)

                    if ((shift_start <= start_hour and shift_stop > stop_hour)
                            or
                        (shift_start >= start_hour and shift_start < stop_hour)
                            or
                        (shift_stop > start_hour and shift_stop <= stop_hour)):

                        # increment staffing level during that bucket
                        current_staffing_level += 1

                logger.debug("Current staffing level at day %s time %s is %s",
                             day, start, current_staffing_level)

                demand_copy[day][start] -= current_staffing_level
                # demand cannot be less than zero
                if demand_copy[day][start] < 0:
                    demand_copy[day][start] = 0

        logger.info("Demand minus existing shifts: %s", demand_copy)
        self.demand = demand_copy

    def _get_local_start_time(self):
        # Create the datetimes
        local_tz = pytz.timezone(self.loc.data.get("timezone"))
        utc_start_time = iso8601.parse_date(
            self.sched.data.get("start")).replace(tzinfo=self.default_tz)
        local_start_time = utc_start_time.astimezone(local_tz)
        return local_start_time

    def _compute_demand(self):
        weekday_demand = self.sched.data.get("demand")
        day_week_starts = self.org.data.get("day_week_starts")
        # flatten days from dict to list
        demand = []
        for day in week_day_range(day_week_starts):
            demand.append(weekday_demand[day])

        self.demand = demand
Beispiel #5
0
def test_org_crud():
    c = Client(key=KEY, env=ENV)

    # Just some basic stuff
    assert len(c.get_plans()) > 0

    logger.debug("Fetching organization")
    o = c.get_organization(TEST_ORG)

    assert o.get_id() == TEST_ORG

    location_count = len(o.get_locations())

    logger.debug("Changing organization name")
    o.patch(name="[In Progress] Continuous integration test")

    logger.debug("Creating a location")
    l = o.create_location(name="El Farolito", timezone="America/Los_Angeles")
    l_id = l.get_id()
    logger.debug("Location id {}".format(l_id))

    assert l.data.get("name") == "El Farolito"
    logger.debug("Changing location name")
    l.patch(name="La Taqueria")

    logger.debug("Checking that location is created")
    new_location_count = len(o.get_locations())
    assert new_location_count == (location_count + 1)
    del l

    logger.debug("Fetching location by ID")
    l = o.get_location(l_id)
    assert l.data.get("name") == "La Taqueria"

    logger.debug("Testing role crud")
    r = l.create_role(name="Kitchen")
    r.patch(name="Cocina")
    logger.debug("Adding worker")
    r.get_workers()
    r.create_worker(email=TEST_WORKER,
                    min_hours_per_workweek=30,
                    max_hours_per_workweek=40)

    logger.debug("Deleting worker")
    r.delete()

    logger.debug("Deleting location")
    l.delete()
    del l
    logger.debug("Making sure location has been archived")

    loc = o.get_location(l_id)
    assert loc.data.get("archived")

    logger.debug("Finishing up")
    o.patch(name="Continuous integration test")
    all_locations = o.get_locations()
    for location in all_locations:
        if not location.data.get("archived"):
            location.delete()

    for location in o.get_locations():
        assert location.data.get("archived")