def transition_to_unpublished(self): """ transition state to unpublished """ initial_state = self.state if initial_state not in ["initial", "chomp-processing"]: raise Exception( "Schedule is in incorrect state for being set to unpublished") if initial_state == "initial": current_app.logger.info("Creating fixed shifts for schedule %s" % self.id) # Find recurring shifts recurring_shifts = recurring_shift_model.RecurringShift.query.filter_by( role_id=self.role_id) # Create shifts for this week for recurring_shift in recurring_shifts: recurring_shift.create_shift2_for_schedule2(self.id) if initial_state == "chomp-processing": self.chomp_end = datetime.utcnow() self.state = "unpublished" db.session.commit() Schedules2Cache.delete(self.role_id) Shifts2Cache.delete(self.id) # logging current_app.logger.info("Schedule %s set to state %s" % (self.id, self.state)) if self.chomp_start and self.chomp_end: chomp_processing_time = ( self.chomp_end - self.chomp_start).total_seconds() current_app.logger.info( "Schedule id %s Chomp calculation took %s seconds." % (self.id, chomp_processing_time)) if initial_state == "chomp-processing": role = Role.query.get(self.role_id) loc = Location.query.get(role.location_id) org = organization_model.Organization.query.get( loc.organization_id) week_start = self.start.strftime("%Y-%m-%d") week = self.start.strftime("%b %-d") subject = "[Alert] Shift Scaffold Computed for %s - Week of %s in %s" % ( role.name, week, loc.name) message = "The %s %s shift scaffold for the week of %s in %s has been finished:" % ( org.name, role.name, week, loc.name) url = url_for('manager.manager_app', org_id=org.id, _external=True) \ + "#locations/%s/scheduling/%s" % (loc.id, week_start) loc.send_manager_email(subject, message, url)
def transition_to_chomp_queue(self): """ transition schedule state from unpublished to chomp queue """ if self.state not in ["unpublished", "chomp-processing"]: raise Exception( "Schedule is in incorrect state for being added to chomp queue") self.state = "chomp-queue" db.session.commit() Schedules2Cache.delete(self.role_id) Shifts2Cache.delete(self.id) # logging current_app.logger.info("Schedule %s set to state %s" % (self.id, self.state))
def transition_to_mobius_processing(self): """ transition to mobius processing """ if self.state != "mobius-queue": raise Exception( "Schedule is in incorrect state for being promoted to mobius-processing" ) self.state = "mobius-processing" self.mobius_start = datetime.utcnow() db.session.commit() Schedules2Cache.delete(self.role_id) Shifts2Cache.delete(self.id) # logging current_app.logger.info("Schedule %s set to state %s" % (self.id, self.state))
def transition_to_mobius_queue(self): """ transition to queue for mobius processing """ current_app.logger.info("Skipping mobius and publishing") self.transition_to_published() return if self.state not in ["unpublished", "mobius-processing"]: raise Exception( "Schedule is in incorrect state for being added to mobius queue" ) self.state = "mobius-queue" db.session.commit() Schedules2Cache.delete(self.role_id) Shifts2Cache.delete(self.id) # logging current_app.logger.info("Schedule %s set to state %s" % (self.id, self.state)) return
def create(role_id, start, stop, state="initial", demand=None, force_send=False): """ Create and return a new schedule. Don't send notifications. Start is a datetime object with pytz UTC as tz_info """ role = Role.query.get(role_id) loc = Location.query.get(role.location_id) org = organization_model.Organization.query.get(loc.organization_id) if not org.active and not force_send: raise Exception("Cannot create a schedule of inactive org") if state not in Schedule2.VALID_STATES: raise Exception("Cannot create a schedule in an invalid state") if demand is not None: if not verify_days_of_week_struct(demand): raise Exception( "Cannot create a schedule with poorly structured demand") else: demand = json.dumps(demand) # cast start to local time local_tz = loc.timezone_pytz localized_start = start.astimezone(local_tz) localized_stop = stop.astimezone(local_tz) # Check if correct day of week if localized_start.strftime( "%A").lower() != org.day_week_starts.lower(): raise Exception("Schedule starts on incorrect day of week for org") # Check if correct day of week if localized_stop.strftime( "%A").lower() != org.day_week_starts.lower(): raise Exception("Schedule stops on incorrect day of week for org") # check that the local start time is exactly midnight if not all(x == 0 for x in [ localized_start.hour, localized_start.minute, localized_start.second, localized_start.microsecond ]): raise Exception( "Schedule start is not exactly midnight in local time %s" % localized_start) # check that the local stop time is exactly midnight if not all(x == 0 for x in [ localized_stop.hour, localized_stop.minute, localized_stop.second, localized_stop.microsecond ]): raise Exception( "Schedule stop is not exactly midnight in local time %s" % localized_stop) # the total duration of the week must be between a reasonable min/max (to consider DST changes) duration_seconds = (stop - start).total_seconds() week_seconds = timedelta(days=7).total_seconds() search_window = timedelta(hours=2).total_seconds() if not ((week_seconds - search_window) < duration_seconds < (week_seconds + search_window)): raise Exception( "Duration between start and end is incorrect (%s seconds)" % duration_seconds) # sql alchemy gets grumpy when a timezone is added in with the datetime # from here on out, tzinfo will be None, and start/stop are in UTC start = start.replace(tzinfo=None) stop = stop.replace(tzinfo=None) # make query to see if any overlapping schedules - there should be none overlapping_schedules = Schedule2.query \ .filter( Schedule2.role_id == role_id, or_( and_( Schedule2.start <= start, Schedule2.stop >= stop ), and_( Schedule2.start >= start, Schedule2.start < stop, ), and_( Schedule2.stop > start, Schedule2.stop <= stop ) ) ).all() if len(overlapping_schedules) > 0: raise Exception( "Overlapping schedule found for role id %s between %s and %s" % (role_id, start, stop)) schedule = Schedule2( role_id=role_id, start=start, stop=stop, state=state, demand=demand, ) db.session.add(schedule) db.session.commit() Schedules2Cache.delete(role.id) current_app.logger.info( "Schedule Created: start %s / org %s (%s) / location %s (%s) / role %s (%s)" % (start, org.id, org.name, loc.id, loc.name, role.id, role.name)) return schedule
def transition_to_published(self): """ publish a schedule """ if self.state not in ["unpublished", "mobius-processing"]: raise Exception( "Schedule is in incorrect state for being published") previous_state = self.state if self.state == "mobius-processing": self.mobius_end = datetime.utcnow() self.state = "published" db.session.commit() shifts_to_publish = shift2_model.Shift2.query \ .filter( shift2_model.Shift2.role_id == self.role_id, shift2_model.Shift2.start >= self.start, shift2_model.Shift2.stop < self.stop ).all() for shift in shifts_to_publish: shift.published = True db.session.commit() Schedules2Cache.delete(self.role_id) Shifts2Cache.delete(self.id) # logging current_app.logger.info("Schedule %s set to state %s" % (self.id, self.state)) if self.mobius_start and self.mobius_end: mobius_processing_time = ( self.mobius_end - self.mobius_start).total_seconds() current_app.logger.info( "Schedule id %s mobius calculation took %s seconds." % (self.id, mobius_processing_time)) # prepare for email notifications role = Role.query.get(self.role_id) loc = Location.query.get(role.location_id) org = organization_model.Organization.query.get(loc.organization_id) week_start = self.start.strftime("%Y-%m-%d") week = self.start.strftime("%b %-d") subject = "Schedule published for %s - Week of %s in %s" % ( role.name, week, loc.name) message = "The %s %s schedule for the week of %s in %s is now published:" % ( org.name, role.name, week, loc.name) # only send manager emails if the schedule is automatically being # transitioned and its in the future if previous_state == "mobius-processing" and self.stop > datetime.utcnow( ): # Don't block emails if one fail url = url_for('manager.manager_app', org_id=org.id, _external=True) \ + "#locations/%s/scheduling/%s" % (loc.id, week_start) loc.send_manager_email(subject, message, url) # users always get notified upon publishing workers = user_model.User.query \ .join(RoleToUser) \ .filter( RoleToUser.role_id == self.role_id, RoleToUser.archived == False ) \ .all() # only send alerts for future schedules if self.stop > datetime.utcnow(): for worker in workers: url = url_for('myschedules.myschedules_app', org_id=org.id, location_id=loc.id, role_id=self.role_id, user_id=worker.id, _external=True)\ + "#week/%s" % week_start try: worker.send_email(subject, render_template( "email/notification-email.html", user=worker, message=message, url=url)) except Exception as e: current_app.logger.warning( "Failed email send to manager in 'transition_to_published' - user id %s - email %s - %s" % (worker.id, worker.email, e))
def patch(self, org_id, location_id, role_id, schedule_id): schedule = Schedule2.query.get_or_404(schedule_id) org = Organization.query.get_or_404(org_id) parser = reqparse.RequestParser() parser.add_argument("demand", type=str) parser.add_argument("state", type=str) parser.add_argument("min_shift_length_hour", type=int) parser.add_argument("max_shift_length_hour", type=int) changes = parser.parse_args(strict=True) # Filter out null values changes = dict((k, v) for k, v in changes.iteritems() if v is not None) original_state = schedule.state if len(changes) == 0: return {"message": "No valid changes detected"}, 400 # schedule can only be modified from initial or unpublished state if not sudo if not g.current_user.is_sudo(): if original_state not in ["initial", "unpublished"]: return { "message": "You are not able to modify a schedule from its current state." }, 400 if "min_shift_length_hour" in changes: min_shift_length_half_hour = changes["min_shift_length_hour"] * 2 else: min_shift_length_half_hour = schedule.min_shift_length_half_hour if "max_shift_length_hour" in changes: max_shift_length_half_hour = changes["max_shift_length_hour"] * 2 else: max_shift_length_half_hour = schedule.max_shift_length_half_hour # now verification # NOTE that if we choose to support lengths of 0, these 1st two checks will break # because None and 0 get evalulated as the same if bool(min_shift_length_half_hour) != bool( max_shift_length_half_hour): return { "message": "min_shift_length_hour and max_shift_length_hour most both be defined" }, 400 if min_shift_length_half_hour and max_shift_length_half_hour: if min_shift_length_half_hour > max_shift_length_half_hour: return { "message": "min_shift_length_hour cannot be greater than max_shift_length_hour" }, 400 if min_shift_length_half_hour: if not (1 <= min_shift_length_half_hour <= 46): return { "message": "min_shift_length_hour must be between 1 and 24" }, 400 if max_shift_length_half_hour: if not (1 <= max_shift_length_half_hour <= 46): return { "message": "max_shift_length_hour must be between 1 and 24" }, 400 if "min_shift_length_hour" in changes: del changes["min_shift_length_hour"] changes["min_shift_length_half_hour"] = min_shift_length_half_hour if "max_shift_length_hour" in changes: del changes["max_shift_length_hour"] changes["max_shift_length_half_hour"] = max_shift_length_half_hour if "demand" in changes: # admins can only modify demand in the unpublished state if not g.current_user.is_sudo(): if changes.get("state", schedule.state) not in [ "unpublished", "chomp-queue" ]: return { "message": "Admins can only modify demand when the schedule is in the unpublished state." }, 400 # demand can be set to None when it is sent down without a value in the request # (not "") will resolve to True, which we consider None - assume json for all other cases if not changes["demand"]: changes["demand"] = None else: try: demand = json.loads(changes.get("demand")) except: return {"message": "Unable to parse demand json body"}, 400 if demand is None or not isinstance(demand, dict): return {"message": "Unable to parse demand json body"}, 400 # Check that days of week are right if not verify_days_of_week_struct(demand): return {"message": "demand is improperly formatted"}, 400 try: changes["demand"] = json.dumps(demand) except Exception as exception: return {"message": "Unable to parse demand json body"}, 400 g.current_user.track_event("updated_demand") if "state" in changes: state = changes.get("state") if state == original_state: return { "message": "Schedule is already in state %s." % state }, 400 if state not in [ "unpublished", "chomp-queue", "mobius-queue", "published" ]: return { "message": "State can only be updated to 'unpublished', 'chomp-queue', 'mobius-queue' or 'done'." }, 400 if not org.active: return { "message": "This organization must be active for a state change" }, 400 if state == "chomp-queue": if not changes.get("min_shift_length_half_hour", schedule.min_shift_length_half_hour): return { "message": "min_shift_length_hour must be set for chomp queue" }, 400 if not changes.get("max_shift_length_half_hour", schedule.max_shift_length_half_hour): return { "message": "max_shift_length_hour must be set for chomp queue" }, 400 if original_state not in ["unpublished", "chomp-processing"]: return {"message": "This state change is not allowed"}, 400 # reset timing measurements - although they will soon be reset, the monitoring timing # may be inaccurate for the duration of calculation (e.g. a requeue) changes["chomp_start"] = None changes["chomp_end"] = None if not g.current_user.is_sudo(): g.current_user.track_event("chomp_schedule_calculation") schedule.transition_to_chomp_queue() elif state == "published": if original_state not in ["unpublished", "mobius-processing"]: return {"message": "This state change is not allowed"}, 400 schedule.transition_to_published() if not g.current_user.is_sudo(): g.current_user.track_event("published_schedule") elif state == "mobius-queue": if original_state not in ["unpublished", "mobius-processing"]: return {"message": "This state change is not allowed"}, 400 # reset timing measurements - although they will soon be reset, the monitoring timing # may be inaccurate for the duration of calculation (e.g. a requeue) changes["mobius_start"] = None changes["mobius_end"] = None schedule.transition_to_mobius_queue() elif state == "unpublished": if original_state not in ["initial", "chomp-processing"]: return { "message": "Schedule cannot be set to unpublished from its current state" } schedule.transition_to_unpublished() for change, value in changes.iteritems(): try: setattr(schedule, change, value) db.session.commit() except Exception as exception: db.session.rollback() current_app.logger.exception(str(exception)) abort(400) Schedules2Cache.delete(role_id) return changes
def get(self, org_id, location_id, role_id): parser = reqparse.RequestParser() parser.add_argument("start", type=str) parser.add_argument("end", type=str) parameters = parser.parse_args(strict=True) # Filter out null values parameters = dict( (k, v) for k, v in parameters.iteritems() if v is not None) response = { API_ENVELOPE: [], } schedules = Schedules2Cache.get(role_id) if schedules is None: schedules = Schedule2.query \ .filter_by(role_id=role_id) \ .order_by( Schedule2.start.asc(), ) \ .all() schedules = map( lambda schedule: marshal(schedule, schedule_fields), schedules) Schedules2Cache.set(role_id, schedules) default_tz = get_default_tz() if "start" in parameters: try: start = iso8601.parse_date(parameters.get("start")) except iso8601.ParseError: return { "message": "Start time parameter needs to be in ISO 8601 format" }, 400 else: start = (start + start.utcoffset()).replace(tzinfo=default_tz) # run a filter to only keep schedules that occur after start schedules = filter( lambda x: \ iso8601.parse_date(x.get("start")).replace(tzinfo=default_tz) >= start, schedules ) if "end" in parameters: try: end = iso8601.parse_date(parameters.get("end")) except iso8601.ParseError: return { "message": "End time parameter time needs to be in ISO 8601 format" }, 400 else: end = (end + end.utcoffset()).replace(tzinfo=default_tz) schedules = filter( lambda x: \ iso8601.parse_date(x.get("start")).replace(tzinfo=default_tz) < end, schedules ) response["data"] = schedules return response