def calculate(self): success = False # Step 1: Try consecutive days off, happy try: logger.info("Trying consecutive days off with happiness") self._calculate(consecutive_days_off=True, happiness_scoring=True) success = True except Exception as e: success = False logger.info("Consecutive days off failed: %s" % e) if success: return # Step 2: Try no happy, yes consecutive days off try: logger.info("Trying consecutive days off without happiness") self._calculate(consecutive_days_off=True, happiness_scoring=False) success = True except Exception as e: success = False logger.info("Consecutive days off without happiness failed: %s" % e) if success: return # Step 3: Try no happy, no consecutive days off # Don't catch error logger.info("Trying consecutive days off without happiness") self._calculate(consecutive_days_off=False, happiness_scoring=False)
def _process_time_off_requests(self, to_requests): """Subtract time off requests from availablity and min/max hours""" for r in to_requests: if r.data.get("state") not in APPROVED_TIME_OFF_STATES: logger.info( "Time off request %s skipped because it is in unapproved state %s" % (r.data.get("time_off_request_id"), r.data.get("state"))) continue logger.debug("Processing time off request for user %s: %s" % (self.user_id, r)) self.min_hours_per_workweek -= 1.0 * r.data[ "minutes_paid"] / MINUTES_PER_HOUR if self.min_hours_per_workweek < 0: self.min_hours_per_workweek = 0 self.max_hours_per_workweek -= 1.0 * r.data[ "minutes_paid"] / MINUTES_PER_HOUR if self.max_hours_per_workweek < 0: self.max_hours_per_workweek = 0 # Update availability # Get day of week for request and update availability day_of_week = dt_to_day( self.environment.datetime_utc_to_local( iso8601.parse_date(r.data["start"]))) self.availability[day_of_week] = [0] * HOURS_PER_DAY logger.info("Marked user %s as unavailable on %s due to time off" % (self.user_id, day_of_week))
def __init__(self, environment, employees, shifts): self.environment = environment self.employees = employees self.shifts = shifts self.shifts.sort(key=lambda s: s.start) logger.info( "Initialized assignment problem of %s employees and %s shifts" % (len(self.employees), len(self.shifts)))
def server(self): previous_request_failed = False # Have some built-in retries while True: # Get task try: task = self.client.claim_mobius_task() logger.info("Task received: %s" % task.data) previous_request_failed = False except NotFoundException: logger.debug("No task found. Sleeping.") previous_request_failed = False sleep(config.TASKING_FETCH_INTERVAL_SECONDS) continue except Exception as e: if not previous_request_failed: # retry, but info log it logger.info("Unable to fetch mobius task - retrying") previous_request_failed = True else: logger.error( "Unable to fetch mobius task after previous failure: %s" % e) # Still sleep so we avoid thundering herd sleep(config.TASKING_FETCH_INTERVAL_SECONDS) continue try: self._process_task(task) task.delete() logger.info("Task completed %s" % task.data) except Exception as e: logger.error("Failed schedule %s: %s %s" % (task.data.get("schedule_id"), e, traceback.format_exc())) logger.info("Requeuing schedule %s" % task.data.get("schedule_id")) # self.sched set in process_task self.sched.patch(state=self.REQUEUE_STATE) # Sometimes rebooting Mobius helps with errors. For example, if # a Gurobi connection is drained then it helps to reboot. if config.KILL_ON_ERROR: sleep(config.KILL_DELAY) logger.info("Rebooting to kill container") os.system("shutdown -r now")
def set_shift_user_ids(self): """Patch request the user ids in for all of the assigned shifts!""" c = staffjoy.Client(key=config.STAFFJOY_API_KEY, env=config.ENV) org = c.get_organization(self.environment.organization_id) loc = org.get_location(self.environment.location_id) role = loc.get_role(self.environment.role_id) for shift in self.shifts: if shift.user_id is 0: logger.info("Shift %s not assigned" % shift.shift_id) else: logger.info("Setting shift %s to user %s" % (shift.shift_id, shift.user_id)) shift_api = role.get_shift(shift.shift_id) shift_api.patch(user_id=shift.user_id)
def _fetch_existing_shifts(self): """Look for fixed shifts and other stuff""" logger.debug("Fetching existing shifts for user %s" % self.user_id) self.existing_shifts = [] shifts_obj_raw = self._get_role_client().get_shifts( start=dt_to_query_str(self.environment.start - timedelta(hours=config.MAX_HOURS_PER_SHIFT)), end=dt_to_query_str(self.environment.stop), user_id=self.user_id) shifts_obj = [] for s in shifts_obj_raw: shifts_obj.append(Shift(s)) for s in [s for s in shifts_obj if s.start >= self.environment.start]: logger.info("Found existing shift %s for user %s" % (s.shift_id, self.user_id)) self.existing_shifts.append(s) # Also decrease hours to be scheduled by that self.min_hours_per_workweek -= 1.0 * s.total_minutes( ) / MINUTES_PER_HOUR if self.min_hours_per_workweek < 0: self.min_hours_per_workweek = 0 self.max_hours_per_workweek -= 1.0 * s.total_minutes( ) / MINUTES_PER_HOUR if self.max_hours_per_workweek < 0: self.min_hours_per_workweek = 0 self.max_hours_per_workweek -= 1.0 * s.total_minutes( ) / MINUTES_PER_HOUR if self.max_hours_per_workweek < 0: self.max_hours_per_workweek = 0
def _process_task(self, task): # 1. Fetch schedule self.org = self.client.get_organization(task.data.get( "organization_id")) self.loc = self.org.get_location(task.data.get("location_id")) self.role = self.loc.get_role(task.data.get("role_id")) self.sched = self.role.get_schedule(task.data.get("schedule_id")) env = Environment( organization_id=task.data.get("organization_id"), location_id=task.data.get("location_id"), role_id=task.data.get("role_id"), schedule_id=task.data.get("schedule_id"), tz_string=self.loc.data.get("timezone"), start=self.sched.data.get("start"), stop=self.sched.data.get("stop"), day_week_starts=self.org.data.get("day_week_starts"), min_minutes_per_workday=self.role.data.get("min_hours_per_workday") * MINUTES_PER_HOUR, max_minutes_per_workday=self.role.data.get("max_hours_per_workday") * MINUTES_PER_HOUR, min_minutes_between_shifts=self.role.data.get( "min_hours_between_shifts") * MINUTES_PER_HOUR, max_consecutive_workdays=self.role.data.get( "max_consecutive_workdays")) user_objs = self.role.get_workers(archived=False) employees = [] for e in user_objs: new_e = Employee( user_id=e.data["id"], min_hours_per_workweek=e.data["min_hours_per_workweek"], max_hours_per_workweek=e.data["max_hours_per_workweek"], environment=env, ) # check whether employee even has availability to work if week_sum(new_e.availability) > new_e.min_hours_per_workweek: employees.append(new_e) if len(employees) is 0: logger.info("No employees") return # Get the shifts shift_api_objs = self.role.get_shifts(start=dt_to_query_str(env.start), end=dt_to_query_str(env.stop), user_id=UNASSIGNED_USER_ID) # Convert api objs to something more manageable shifts = [] for s in shift_api_objs: shifts.append(Shift(s)) if len(shifts) is 0: logger.info("No unassigned shifts") return # Run the calculation a = Assign(env, employees, shifts) a.calculate() a.set_shift_user_ids()
def _calculate(self, consecutive_days_off=False, return_unsolved_model_for_tuning=False, happiness_scoring=False): """Run the calculation""" # Import Guorbi now so server connection doesn't go stale # (importing triggers a server connection) import gurobipy as grb GRB = grb.GRB # For easier constant access m = grb.Model("mobius-%s-role-%s" % (config.ENV, self.environment.role_id)) m.setParam("OutputFlag", False) # Don't print gurobi logs m.setParam("Threads", config.THREADS) # Add Timeout on happiness scoring. if happiness_scoring: m.setParam("TimeLimit", config.HAPPY_CALCULATION_TIMEOUT) # Try loading a tuning file if we're not tuning if not return_unsolved_model_for_tuning: try: m.read(tune_file) logger.info("Loaded tuned model") except: logger.info("No tune file found") # Create objective - which is basically happiness minus penalties obj = grb.LinExpr() # Whether worker is assigned to shift assignments = {} unassigned = {} for e in self.employees: logger.debug("Building shifts for user %s" % e.user_id) for s in self.shifts: assignments[e.user_id, s.shift_id] = m.addVar( vtype=GRB.BINARY, name="user-%s-assigned-shift-%s" % (e.user_id, s.shift_id)) # Only add happiness if we're scoring happiness if happiness_scoring: obj += assignments[e.user_id, s.shift_id] * e.shift_happiness_score(s) # Also add an unassigned shift - and penalize it! unassigned[s.shift_id] = m.addVar(vtype=GRB.BINARY, name="unassigned-shift-%s" % s.shift_id) obj += unassigned[s.shift_id] * config.UNASSIGNED_PENALTY # Helper variables min_week_hours_violation = {} week_minutes_sum = {} day_shifts_sum = {} day_active = {} for e in self.employees: min_week_hours_violation[e.user_id] = m.addVar( vtype=GRB.BINARY, name="user-%s-min-week-hours-violation" % (e.user_id)) week_minutes_sum[e.user_id] = m.addVar( name="user-%s-hours-per-week" % e.user_id) for day in week_day_range(): day_shifts_sum[e.user_id, day] = m.addVar( vtype=GRB.INTEGER, name="user-%s-day-%s-shift-sum" % (e.user_id, day)) day_active[e.user_id, day] = m.addVar( vtype=GRB.BINARY, name="user-%s-day-%s-shift-sum" % (e.user_id, day)) obj += min_week_hours_violation[ e.user_id] * config.MIN_HOURS_VIOLATION_PENALTY m.update() for s in self.shifts: m.addConstr( grb.quicksum(assignments[e.user_id, s.shift_id] for e in self.employees) + unassigned[s.shift_id], GRB.EQUAL, 1) # Allowed shift state transitions for test in self.shifts: # Use index because shifts are sorted! # Iterate through "other" (o) shifts for o in self.shifts: if o.shift_id == test.shift_id: continue # Add min minutes between shifts for allowed overlaps if dt_overlaps( o.start, o.stop, test.start, test.stop + timedelta(minutes=self.environment. min_minutes_between_shifts)): # Add constraint that shift transitions not allowed for e in self.employees: m.addConstr( assignments[e.user_id, test.shift_id] + assignments[e.user_id, o.shift_id], GRB.LESS_EQUAL, 1) # Add consecutive days off constraint # so that workers have a "weekend" - at least 2 consecutive # days off in a week where possible # # The current implementation has us run the model a second # time if this is infeasible, however we should revise it # to be a weighted variable. if consecutive_days_off: for e in self.employees: day_off_sum = grb.LinExpr() previous_day_name = None for day in week_day_range(self.environment.day_week_starts): if not previous_day_name: # It's the first loop if not e.preceding_day_worked: # if they didn't work the day before, then not # working the first day is consec days off day_off_sum += (1 - day_active[e.user_id, day]) else: # We're in the loop not on first day day_off_sum += (1 - day_active[e.user_id, day]) * ( 1 - day_active[e.user_id, previous_day_name]) previous_day_name = day # We now have built the LinExpr. It needs to be >= 1 # (for at least 1 set of consec days off) m.addConstr(day_off_sum, GRB.GREATER_EQUAL, 1) # Availability constraints for e in self.employees: for s in self.shifts: if not e.available_to_work(s): logger.debug("User %s unavailable to work shift %s" % (e.user_id, s.shift_id)) m.addConstr(assignments[e.user_id, s.shift_id], GRB.EQUAL, 0) # Limit employee hours per workweek for e in self.employees: # The running total of shifts is equal to the helper variable m.addConstr( sum([ s.total_minutes() * assignments[e.user_id, s.shift_id] for s in self.shifts ]), GRB.EQUAL, week_minutes_sum[e.user_id]) # The total minutes an employee works in a week is less than or equal to their max m.addConstr(week_minutes_sum[e.user_id], GRB.LESS_EQUAL, e.max_hours_per_workweek * MINUTES_PER_HOUR) # A worker must work at least their min hours per week. # Violation causes a penalty. # NOTE - once the min is violated, we don't say "try to get as close as possible" - # we stop unassigned shifts, but if you violate min then you're not guaranteed anything m.addConstr( week_minutes_sum[e.user_id], GRB.GREATER_EQUAL, e.min_hours_per_workweek * MINUTES_PER_HOUR * (1 - min_week_hours_violation[e.user_id])) for day in week_day_range(): m.addSOS(GRB.SOS_TYPE1, [ day_shifts_sum[e.user_id, day], day_active[e.user_id, day] ]) m.addConstr( day_shifts_sum[e.user_id, day], GRB.EQUAL, grb.quicksum([ assignments[e.user_id, s.shift_id] for s in self.shifts if ((dt_to_day(s.start) == day) or ( dt_to_day(s.stop) == day and s.stop <= self.environment.stop)) ])) m.addConstr( day_shifts_sum[e.user_id, day] + day_active[e.user_id, day], GRB.GREATER_EQUAL, 1) # Limit employee hours per workday workday_start = deepcopy(self.environment.start) while workday_start < self.environment.stop: for e in self.employees: # Look for minutes of overlap workday_stop = workday_start + timedelta(days=1) m.addConstr( sum([ s.minutes_overlap(start=workday_start, stop=workday_stop) * assignments[e.user_id, s.shift_id] for s in self.shifts if dt_overlaps( s.start, s.stop, workday_start, workday_stop) ]), GRB.LESS_EQUAL, self.environment.max_minutes_per_workday) workday_start += timedelta(days=1) m.update() m.setObjective(obj) m.modelSense = GRB.MAXIMIZE # Make something people love! if return_unsolved_model_for_tuning: return m m.optimize() if m.status != GRB.status.OPTIMAL: logger.info("Calculation failed - gurobi status code %s" % m.status) raise Exception("Calculation failed") logger.info("Optimized! objective: %s" % m.objVal) for e in self.employees: if min_week_hours_violation[e.user_id].x > .5: logger.info( "User %s unable to meet min hours for week (hours: %s, min: %s)" % (e.user_id, 1.0 * week_minutes_sum[e.user_id].x / MINUTES_PER_HOUR, e.min_hours_per_workweek)) for s in self.shifts: if assignments[e.user_id, s.shift_id].x > .5: logger.info("User %s assigned shift %s" % (e.user_id, s.shift_id)) s.user_id = e.user_id logger.info( "%s shifts of %s still unsassigned" % (len([s for s in self.shifts if s.user_id == 0]), len(self.shifts)))
def tune(): """Take a canonical decomposition model, then tune it using Gurobi""" logger.info("Beginning tuning") # Creating some employees # This must be feasible env_attributes = { "organization_id": 7, "location_id": 8, "role_id": 4, "schedule_id": 9, "tz_string": "America/Los_Angeles", "start": "2015-12-21T08:00:00", "stop": "2015-12-28T08:00:00", "day_week_starts": "monday", "min_minutes_per_workday": 60 * 5, "max_minutes_per_workday": 60 * 8, "min_minutes_between_shifts": 60 * 12, "max_consecutive_workdays": 6, } env = Environment(**env_attributes) employee_attributes = { "user_id": 27182818, "min_hours_per_workweek": 16, "max_hours_per_workweek": 24, "preceding_day_worked": False, "preceding_days_worked_streak": 4, "existing_shifts": [], "time_off_requests": [], # TODO "preferences": { "monday": [0] * 24, "tuesday": [1] * 24, "wednesday": [0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], "thursday": [0] * 24, "friday": [1] * 24, "saturday": [1] * 24, "sunday": [0] * 24, }, "working_hours": { "monday": [0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], "tuesday": [1] * 24, "wednesday": [1] * 24, "thursday": [0] * 24, "friday": [1] * 24, "saturday": [1] * 24, "sunday": [1] * 24, }, "environment": env, } employees = [] e0 = deepcopy(employee_attributes) e0["user_id"] = 0 e0["preferences"]["friday"] = [1] * 24 employees.append(Employee(**e0)) e1 = deepcopy(employee_attributes) e1["user_id"] = 1 e1["working_hours"]["tuesday"] = [0] * 24 e1["preferences"]["monday"] = [0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0] employees.append(Employee(**e1)) e2 = deepcopy(employee_attributes) e2["user_id"] = 2 e2["working_hours"]["monday"] = [0] * 24 e2["preferences"]["friday"] = [1] * 24 employees.append(Employee(**e2)) e3 = deepcopy(employee_attributes) e3["user_id"] = 3 employees.append(Employee(**e3)) e4 = deepcopy(employee_attributes) e4["user_id"] = 4 employees.append(Employee(**e4)) e5 = deepcopy(employee_attributes) e5["user_id"] = 5 e5["working_hours"]["wednesday"] = [0] * 24 employees.append(Employee(**e5)) e6 = deepcopy(employee_attributes) e6["user_id"] = 6 e6["working_hours"]["tuesday"] = [0] * 24 e6["working_hours"]["monday"] = [0] * 24 e6["preferences"]["wednesday"] = [0] * 24 employees.append(Employee(**e6)) e7 = deepcopy(employee_attributes) e7["user_id"] = 7 e7["working_hours"]["tuesday"] = [0] * 24 e7["working_hours"]["monday"] = [0] * 24 e7["preferences"]["wednesday"] = [0] * 24 employees.append(Employee(**e7)) e8 = deepcopy(employee_attributes) e8["user_id"] = 8 e8["working_hours"]["tuesday"] = [0] * 24 e8["working_hours"]["monday"] = [0] * 24 e8["preferences"]["monday"] = [0] * 24 e8["preferences"]["tuesday"] = [0] * 24 e8["preferences"]["wednesday"] = [0] * 24 employees.append(Employee(**e8)) e9 = deepcopy(employee_attributes) e9["user_id"] = 9 e9["working_hours"]["tuesday"] = [1] * 24 e9["working_hours"]["monday"] = [1] * 24 e9["preferences"]["monday"] = [1] * 24 e9["preferences"]["tuesday"] = [1] * 24 e9["preferences"]["wednesday"] = [1] * 24 employees.append(Employee(**e9)) e10 = deepcopy(employee_attributes) e10["user_id"] = 10 e10["working_hours"]["tuesday"] = [1] * 24 e10["working_hours"]["monday"] = [1] * 24 e10["preferences"]["monday"] = [1] * 24 e10["preferences"]["tuesday"] = [1] * 24 e10["preferences"]["wednesday"] = [1] * 24 employees.append(Employee(**e10)) e11 = deepcopy(employee_attributes) e11["user_id"] = 11 e11["preferences"]["wednesday"] = [1] * 24 employees.append(Employee(**e11)) with open(os.path.dirname(os.path.realpath(__file__)) + "/tune_data/shifts.json") as json_data: print json_data shifts_raw = json.load(json_data) json_data.close() shifts = [] for s in shifts_raw: shifts.append(Shift(s)) a = Assign(env, employees, shifts) model = a._calculate(return_unsolved_model_for_tuning=True) # We're only tuning one model right now model.params.tuneResults = 1 model.params.tuneTimeLimit = config.MAX_TUNING_TIME # For tuning - turn this back on for fun model.setParam("OutputFlag", True) # Tune the model model.tune() if model.tuneResultCount > 0: logger.info("Tuning completed") # Load the best tuned parameters into the model model.getTuneResult(0) # Write tuned parameters to a file model.write(config.TUNE_FILE) logger.info("Wrote tuning to file %s" % config.TUNE_FILE) # Solve the model using the tuned parameters model.optimize() else: logger.warning("No tuning completed")