def delete(self, org_id, location_id, role_id, shift_id): shift = Shift2.query.get_or_404(shift_id) location = Location.query.get_or_404(location_id) user_id = shift.user_id # cached becuase we are deleting the shift # check if a schedule exists during this time - if so, bust the cache schedule = Schedule2.query \ .filter( Schedule2.role_id == role_id, Schedule2.start <= shift.start, Schedule2.stop > shift.start, ).first() try: db.session.delete(shift) db.session.commit() except Exception as exception: db.session.rollback() current_app.logger.error(str(exception)) abort(400) # clear cache if schedule is not None: Shifts2Cache.delete(schedule.id) if (g.current_user.id != shift.user_id) and shift.published: default_tz = get_default_tz() local_tz = location.timezone_pytz local_datetime = default_tz.localize( shift.start).astimezone(local_tz) alert_changed_shift(org_id, location_id, role_id, local_datetime, user_id) g.current_user.track_event("deleted_shift") return {}, 204
def transition_to_unpublished(self): """ transition state to unpublished """ initial_state = self.state if initial_state not in ["initial", "chomp-processing"]: raise Exception( "Schedule is in incorrect state for being set to unpublished") if initial_state == "initial": current_app.logger.info("Creating fixed shifts for schedule %s" % self.id) # Find recurring shifts recurring_shifts = recurring_shift_model.RecurringShift.query.filter_by( role_id=self.role_id) # Create shifts for this week for recurring_shift in recurring_shifts: recurring_shift.create_shift2_for_schedule2(self.id) if initial_state == "chomp-processing": self.chomp_end = datetime.utcnow() self.state = "unpublished" db.session.commit() Schedules2Cache.delete(self.role_id) Shifts2Cache.delete(self.id) # logging current_app.logger.info("Schedule %s set to state %s" % (self.id, self.state)) if self.chomp_start and self.chomp_end: chomp_processing_time = ( self.chomp_end - self.chomp_start).total_seconds() current_app.logger.info( "Schedule id %s Chomp calculation took %s seconds." % (self.id, chomp_processing_time)) if initial_state == "chomp-processing": role = Role.query.get(self.role_id) loc = Location.query.get(role.location_id) org = organization_model.Organization.query.get( loc.organization_id) week_start = self.start.strftime("%Y-%m-%d") week = self.start.strftime("%b %-d") subject = "[Alert] Shift Scaffold Computed for %s - Week of %s in %s" % ( role.name, week, loc.name) message = "The %s %s shift scaffold for the week of %s in %s has been finished:" % ( org.name, role.name, week, loc.name) url = url_for('manager.manager_app', org_id=org.id, _external=True) \ + "#locations/%s/scheduling/%s" % (loc.id, week_start) loc.send_manager_email(subject, message, url)
def transition_to_chomp_queue(self): """ transition schedule state from unpublished to chomp queue """ if self.state not in ["unpublished", "chomp-processing"]: raise Exception( "Schedule is in incorrect state for being added to chomp queue") self.state = "chomp-queue" db.session.commit() Schedules2Cache.delete(self.role_id) Shifts2Cache.delete(self.id) # logging current_app.logger.info("Schedule %s set to state %s" % (self.id, self.state))
def transition_to_mobius_processing(self): """ transition to mobius processing """ if self.state != "mobius-queue": raise Exception( "Schedule is in incorrect state for being promoted to mobius-processing" ) self.state = "mobius-processing" self.mobius_start = datetime.utcnow() db.session.commit() Schedules2Cache.delete(self.role_id) Shifts2Cache.delete(self.id) # logging current_app.logger.info("Schedule %s set to state %s" % (self.id, self.state))
def unassign_overlapping_shifts(self): rtu = RoleToUser.query.get(self.role_to_user_id) # unassign any overlapping shifts overlapping_shifts = shift2_model.Shift2.query \ .filter( shift2_model.Shift2.role_id == rtu.role_id, shift2_model.Shift2.user_id == rtu.user_id, shift2_model.Shift2.start >= datetime.utcnow(), or_( # Case 1: test_shift is within time off request and_( shift2_model.Shift2.start <= self.start, shift2_model.Shift2.stop >= self.stop ), # Case 2: a shift starts during time off request and_( shift2_model.Shift2.start >= self.start, shift2_model.Shift2.start < self.stop, ), # Case 3: a shift ends during time off request and_( shift2_model.Shift2.stop > self.start, shift2_model.Shift2.stop <= self.stop ) ) ).all() for shift in overlapping_shifts: current_app.logger.info( "Setting shift %s to unassigned because it overlaps with an approved time off request for User %s" % (shift.id, shift.user_id)) shift.user_id = None # clear cache too schedule = schedule2_model.Schedule2.query \ .filter( schedule2_model.Schedule2.role_id == rtu.role_id, schedule2_model.Schedule2.start <= shift.start, schedule2_model.Schedule2.stop > shift.start, ).first() if schedule is not None: Shifts2Cache.delete(schedule.id)
def transition_to_mobius_queue(self): """ transition to queue for mobius processing """ current_app.logger.info("Skipping mobius and publishing") self.transition_to_published() return if self.state not in ["unpublished", "mobius-processing"]: raise Exception( "Schedule is in incorrect state for being added to mobius queue" ) self.state = "mobius-queue" db.session.commit() Schedules2Cache.delete(self.role_id) Shifts2Cache.delete(self.id) # logging current_app.logger.info("Schedule %s set to state %s" % (self.id, self.state)) return
def create_shift2_for_schedule2(self, schedule_id): """ creates a shift2 for the week according to the recurring shift """ # get org, location, and schedule models org = organization_model.Organization.query \ .join(Location) \ .join(Role) \ .filter( Role.id == self.role_id, Location.id == Role.location_id, organization_model.Organization.id == Location.organization_id ) \ .first() # get location for the timezone data location = Location.query \ .join(Role) \ .filter( Role.id == self.role_id, Location.id == Role.location_id ) \ .first() schedule = schedule2_model.Schedule2.query.get(schedule_id) local_tz = location.timezone_pytz default_tz = get_default_tz() # get start and stop time for the shift start_local = default_tz.localize(schedule.start).astimezone(local_tz) # adjust start to fall on the correct day of the week ordered_week = org.get_ordered_week() adjust_days = ordered_week.index(self.start_day) start_local = start_local + timedelta(days=adjust_days) try: start_local = start_local.replace(hour=self.start_hour, minute=self.start_minute) except pytz.AmbiguousTimeError: start_local = start_local.replace(hour=self.start_hour, minute=self.start_minute, is_dst=False) stop_local = start_local + timedelta(minutes=self.duration_minutes) # convert start and end back to utc time start_utc = start_local.astimezone(default_tz).replace(tzinfo=None) stop_utc = stop_local.astimezone(default_tz).replace(tzinfo=None) published = (schedule.state == "published") for _ in range(self.quantity): new_shift = shift2_model.Shift2(start=start_utc, stop=stop_utc, role_id=self.role_id, published=published, user_id=self.user_id) # check if shift overlaps - make it unassigned if an overlap if self.user_id is not None: if new_shift.has_overlaps(): new_shift.user_id = None db.session.add(new_shift) db.session.commit() # flush the shift cache Shifts2Cache.delete(schedule_id) current_app.logger.info( "Created shift for recurring shift %s during schedule %s" % (self.id, schedule_id))
def patch(self, org_id, location_id, role_id, shift_id): parser = reqparse.RequestParser() parser.add_argument("start", type=str) parser.add_argument("stop", type=str) parser.add_argument("user_id", type=int) parser.add_argument("published", type=inputs.boolean) parser.add_argument("description", type=str) changes = parser.parse_args(strict=True) # Filter out null values changes = dict((k, v) for k, v in changes.iteritems() if v is not None) shift = Shift2.query.get(shift_id) shift_copy = deepcopy(shift) org = Organization.query.get(org_id) location = Location.query.get(location_id) role_to_user = None default_tz = get_default_tz() local_tz = location.timezone_pytz user_id = changes.get("user_id", shift.user_id) if user_id != shift.user_id: # Need this for later old_user_id = shift.user_id else: old_user_id = None # Check if user is in that role if user_id is not None and user_id != 0: role_to_user = RoleToUser.query.filter_by( user_id=user_id, role_id=role_id, ).first_or_404() # People that are not Sudo or Org Admins cannot do anything # except claim a shift. # (But a worker that's also, say, sudo can do so!) if not (g.current_user.is_sudo() or g.current_user.is_org_admin_or_location_manager(org_id, location_id)): # User claiming a shift! # Check that it's the only change being made if set(("user_id", )) != set(changes): return { "message": "You are only allowed to claim unassigned shifts" }, 400 # this user must be active to claim if role_to_user: if role_to_user.archived: abort(404) # This user can only claim shifts for themself if user_id != g.current_user.id: return { "message": "You are not permitted to assign a shift to somebody else" }, 400 # And the shift must be currently unclaimed if shift.user_id != 0 and shift.user_id is not None: return {"message": "Shift already claimed"}, 400 # the shift cannot be in the past if shift.is_in_past: return {"message": "Shift is in the past"}, 400 # And the user cannot claim the shift if it overlaps shift_copy.user_id = user_id if shift_copy.has_overlaps(): return { "message": "Shift overlaps with an existing shift" }, 400 # Users on boss cannot claim if it violates caps and org doesn't allow exceeding if (org.is_plan_boss() and not org.workers_can_claim_shifts_in_excess_of_max and not shift_copy.is_within_caps(user_id)): return {"message": "This shift breaks existing limits"}, 400 current_app.logger.info("User %s is claiming shift %s" % (user_id, shift.id)) # admin or sudo only # get start and stop values if "start" in changes: try: start = iso8601.parse_date(changes.get("start")) except iso8601.ParseError: return { "message": "Start time needs to be in ISO 8601 format" }, 400 else: start = (start + start.utcoffset()).replace(tzinfo=default_tz) else: start = shift.start.replace(tzinfo=default_tz) # get new or current stop value if "stop" in changes: try: stop = iso8601.parse_date(changes.get("stop")) except iso8601.ParseError: return { "message": "Stop time needs to be in ISO 8601 format" }, 400 else: stop = (stop + stop.utcoffset()).replace(tzinfo=default_tz) else: stop = shift.stop.replace(tzinfo=default_tz) # stop can't be before start if start >= stop: return {"message": "Stop time must be after start time"}, 400 # shifts are limited to 23 hours in length if int((stop - start).total_seconds()) > constants.MAX_SHIFT_LENGTH: return { "message": "Shifts cannot be more than %s hours long" % (constants.MAX_SHIFT_LENGTH / constants.SECONDS_PER_HOUR) }, 400 # Unassigned shifts need to be converted to None in db if user_id == 0: user_id = None changes["user_id"] = None # assume always checking for overlap except for 3 cases # 1) shift was and still will be unassigned # 2) shift is becoming unassigned # 3) only published state is being changed overlap_check = True # shift was, and still is unassigned if shift.user_id is None and "user_id" not in changes: overlap_check = False # shift is becoming unassigned, don't need to check if "user_id" in changes and (user_id is None or user_id == 0): overlap_check = False # only published being modified, don't care if set(("published", )) == set(changes): overlap_check = False # a person cannot have overlapping shifts if overlap_check: shift_copy.start = start.replace(tzinfo=None) shift_copy.stop = stop.replace(tzinfo=None) shift_copy.user_id = user_id # check for overlap - don't need to check for in past here if shift_copy.has_overlaps(): return { "message": "Shift overlaps with an existing shift" }, 400 # start/stop need to be in isoformat for committing changes if "start" in changes: changes["start"] = start.isoformat() if "stop" in changes: changes["stop"] = stop.isoformat() if "description" in changes: if len(changes["description"]) > Shift2.MAX_DESCRIPTION_LENGTH: return { "message": "Description cannot me more than %s characters" % Shift2.MAX_DESCRIPTION_LENGTH }, 400 for change, value in changes.iteritems(): try: setattr(shift, change, value) db.session.commit() except Exception as exception: db.session.rollback() current_app.logger.exception(str(exception)) abort(400) g.current_user.track_event("modified_shift") # check if a schedule exists during this time - if so, bust the cache schedule = Schedule2.query \ .filter( Schedule2.role_id == role_id, Schedule2.start <= shift.start, Schedule2.stop > shift.start, ).first() if schedule is not None: Shifts2Cache.delete(schedule.id) if shift.published and not shift.is_in_past: local_datetime = default_tz.localize( shift.start).astimezone(local_tz) # if shift became unassigned, send an email to notify workers if shift.user_id is None: # get all users who are eligible for the shift eligible_users = shift.get_users_within_caps() alert_available_shifts( org_id, location_id, role_id, local_datetime, eligible_users, exclude_id=old_user_id) if old_user_id != shift.user_id: # old worker if g.current_user.id != old_user_id: alert_changed_shift(org_id, location_id, role_id, local_datetime, old_user_id) # new worker if g.current_user.id != shift.user_id: alert_changed_shift( org_id, location_id, role_id, local_datetime, shift.user_id, ) return changes
def transition_to_published(self): """ publish a schedule """ if self.state not in ["unpublished", "mobius-processing"]: raise Exception( "Schedule is in incorrect state for being published") previous_state = self.state if self.state == "mobius-processing": self.mobius_end = datetime.utcnow() self.state = "published" db.session.commit() shifts_to_publish = shift2_model.Shift2.query \ .filter( shift2_model.Shift2.role_id == self.role_id, shift2_model.Shift2.start >= self.start, shift2_model.Shift2.stop < self.stop ).all() for shift in shifts_to_publish: shift.published = True db.session.commit() Schedules2Cache.delete(self.role_id) Shifts2Cache.delete(self.id) # logging current_app.logger.info("Schedule %s set to state %s" % (self.id, self.state)) if self.mobius_start and self.mobius_end: mobius_processing_time = ( self.mobius_end - self.mobius_start).total_seconds() current_app.logger.info( "Schedule id %s mobius calculation took %s seconds." % (self.id, mobius_processing_time)) # prepare for email notifications role = Role.query.get(self.role_id) loc = Location.query.get(role.location_id) org = organization_model.Organization.query.get(loc.organization_id) week_start = self.start.strftime("%Y-%m-%d") week = self.start.strftime("%b %-d") subject = "Schedule published for %s - Week of %s in %s" % ( role.name, week, loc.name) message = "The %s %s schedule for the week of %s in %s is now published:" % ( org.name, role.name, week, loc.name) # only send manager emails if the schedule is automatically being # transitioned and its in the future if previous_state == "mobius-processing" and self.stop > datetime.utcnow( ): # Don't block emails if one fail url = url_for('manager.manager_app', org_id=org.id, _external=True) \ + "#locations/%s/scheduling/%s" % (loc.id, week_start) loc.send_manager_email(subject, message, url) # users always get notified upon publishing workers = user_model.User.query \ .join(RoleToUser) \ .filter( RoleToUser.role_id == self.role_id, RoleToUser.archived == False ) \ .all() # only send alerts for future schedules if self.stop > datetime.utcnow(): for worker in workers: url = url_for('myschedules.myschedules_app', org_id=org.id, location_id=loc.id, role_id=self.role_id, user_id=worker.id, _external=True)\ + "#week/%s" % week_start try: worker.send_email(subject, render_template( "email/notification-email.html", user=worker, message=message, url=url)) except Exception as e: current_app.logger.warning( "Failed email send to manager in 'transition_to_published' - user id %s - email %s - %s" % (worker.id, worker.email, e))
def post(self, org_id, location_id, role_id): """ create a new shift """ parser = reqparse.RequestParser() parser.add_argument("start", type=str, required=True) parser.add_argument("stop", type=str, required=True) parser.add_argument("user_id", type=int) parser.add_argument("published", type=inputs.boolean) parser.add_argument("description", type=str) parameters = parser.parse_args() # Filter out null values parameters = dict((k, v) for k, v in parameters.iteritems() if v is not None) default_tz = get_default_tz() local_tz = Location.query.get(location_id).timezone_pytz # start time try: start = iso8601.parse_date(parameters.get("start")) except iso8601.ParseError: return { "message": "Start time needs to be in ISO 8601 format" }, 400 else: start = (start + start.utcoffset()).replace(tzinfo=default_tz) # stop time try: stop = iso8601.parse_date(parameters.get("stop")) except iso8601.ParseError: return {"message": "Stop time needs to be in ISO 8601 format"}, 400 else: stop = (stop + stop.utcoffset()).replace(tzinfo=default_tz) # stop can't be before start if start >= stop: return {"message": "Stop time must be after start time"}, 400 # shifts are limited to 23 hours in length if int((stop - start).total_seconds()) > MAX_SHIFT_LENGTH: return { "message": "Shifts cannot be more than %s hours long" % (MAX_SHIFT_LENGTH / SECONDS_PER_HOUR) }, 400 shift = Shift2( role_id=role_id, start=start, stop=stop, published=parameters.get("published", False)) if "description" in parameters: description = parameters.get("description") if len(description) > Shift2.MAX_DESCRIPTION_LENGTH: return { "message": "Description cannot me more than %s characters" % Shift2.MAX_DESCRIPTION_LENGTH }, 400 shift.description = description user_id = parameters.get("user_id") # if user_id defined, and if not for unassigned shift, check if user is in role # and make sure it won't overlap with existing shifts if user_id is not None: if user_id > 0: role_to_user = RoleToUser.query.filter_by( user_id=user_id, role_id=role_id, archived=False).first() if role_to_user is None: return { "message": "User does not exist or is not apart of role" }, 400 # check if this shift can be assigned to the user shift.user_id = user_id if shift.has_overlaps(): return { "message": "This shift overlaps with an existing shift" }, 400 db.session.add(shift) try: db.session.commit() except: abort(500) g.current_user.track_event("created_shift") # check if a schedule exists during this time - if so, bust the cache schedule = Schedule2.query \ .filter( Schedule2.role_id == role_id, Schedule2.start <= shift.start, Schedule2.stop > shift.start, ).first() if schedule is not None: Shifts2Cache.delete(schedule.id) # timezone stuff local_datetime = default_tz.localize(shift.start).astimezone(local_tz) # only send emails if future and published if not shift.is_in_past and shift.published: # if shift is unassigned - alert people that it's available if shift.user_id is None: # get all users who are eligible for the shift eligible_users, _ = shift.eligible_users() alert_available_shifts(org_id, location_id, role_id, local_datetime, eligible_users) # Otherwise send an alert_changed_shift notification # (function has logic for whether to send) elif (g.current_user.id != shift.user_id): alert_changed_shift(org_id, location_id, role_id, local_datetime, shift.user_id) return marshal(shift, shift_fields), 201
def flush_associated_shift_caches(self): schedules2 = schedule2_model.Schedule2.query.join(Role).join( RoleToUser).filter(RoleToUser.user_id == self.id, RoleToUser.archived == False).all() for schedule in schedules2: Shifts2Cache.delete(schedule.id)
def delete(self, org_id, location_id, role_id, user_id): user = User.query.get_or_404(user_id) role = Role.query.get_or_404(role_id) assoc = RoleToUser.query.filter_by(user_id=user.id, role_id=role.id).first() if assoc is None: abort(404) if assoc.archived: abort(400) assoc.archived = True try: db.session.commit() except: abort(500) location = Location.query.get(location_id) organization = Organization.query.get(org_id) # Set future shifts to unassigned # Be careful to not unassign them from other orgs! future_shifts = Shift2.query.filter( Shift2.user_id == user.id, Shift2.role_id == role_id, Shift2.start > datetime.datetime.utcnow(), ).all() for shift in future_shifts: shift.user_id = None # clear cache too schedule = Schedule2.query \ .filter( Schedule2.role_id == role_id, Schedule2.start <= shift.start, Schedule2.stop > shift.start, ).first() if schedule is not None: Shifts2Cache.delete(schedule.id) # deny future time off requests that are open future_time_off_requests = TimeOffRequest.query \ .filter_by(role_to_user_id=assoc.id) \ .filter_by(state=None) \ .filter( TimeOffRequest.start > datetime.datetime.utcnow(), ) \ .all() for time_off_request in future_time_off_requests: time_off_request.state = "denied" # unassign all recurring shifts recurring_shifts = RecurringShift.query \ .filter_by( role_id=role_id, user_id=user_id ) \ .all() for recurring_shift in recurring_shifts: current_app.logger.info( "Setting recurring shift %s to unassigned because user %s is being removed from role %s" % (recurring_shift.id, user_id, role_id)) recurring_shift.user_id = None # close open timeclocks timeclocks = Timeclock.query \ .filter_by( role_id=role_id, user_id=user_id, stop=None ) \ .all() for timeclock in timeclocks: original_start = timeclock.start original_stop = timeclock.stop timeclock.stop = datetime.datetime.utcnow() current_app.logger.info( "Closing timeclock %s because user %s is being removed from role %s" % (timeclock.id, user_id, role_id)) alert_timeclock_change(timeclock, org_id, location_id, role_id, original_start, original_stop, user, g.current_user) alert_email( user, "You have been removed from a team at %s" % organization.name, "You have been removed from the team <b>%s</b> at the <b>%s</b> location of <b>%s</b>. This may happen as the scheduling manager changes your role or location." % (role.name, location.name, organization.name), force_send=True) g.current_user.track_event("deleted_role_member") return {}, 204