def update_single_event(self, updates, original): events_service = get_resource_service('events') has_plannings = events_service.has_planning_items(original) remove_lock_information(updates) reason = updates.pop('reason', None) event_in_use = has_plannings or (original.get('pubstatus') or '') != '' if event_in_use or original.get('state') == WORKFLOW_STATE.POSTPONED: if event_in_use: # If the Event is in use, then we will duplicate the original # and set the original's status to `rescheduled` duplicated_event_id = self._duplicate_event( updates, original, events_service) updates['reschedule_to'] = duplicated_event_id set_actioned_date_to_event(updates, original) else: updates['actioned_date'] = None self._mark_event_rescheduled(updates, reason, not event_in_use) if not event_in_use: updates['state'] = WORKFLOW_STATE.DRAFT if has_plannings: self._reschedule_event_plannings(original, reason) self.set_planning_schedule(updates)
def update_recurring_events(self, updates, original, update_method): historic, past, future = self.get_recurring_timeline(original) # Determine if the selected event is the first one, if so then # act as if we're changing future events if len(historic) == 0 and len(past) == 0: update_method = UPDATE_FUTURE if update_method == UPDATE_FUTURE: new_series = [original] + future else: new_series = past + [original] + future # Release the Lock on the selected Event remove_lock_information(updates) # Get the timezone from the original Event (as the series was created with that timezone in mind) timezone = original['dates']['tz'] # First find the hour and minute of the start date in local time start_time = utc_to_local(timezone, updates['dates']['start']).time() # Next convert that to seconds since midnight (which gives us a timedelta instance) delta_since_midnight = datetime.combine(date.min, start_time) - datetime.min # And calculate the new duration of the events duration = updates['dates']['end'] - updates['dates']['start'] for event in new_series: if not event.get(config.ID_FIELD): continue new_updates = {'dates': deepcopy(event['dates'])} \ if event.get(config.ID_FIELD) != original.get(config.ID_FIELD) else updates # Calculate midnight in local time for this occurrence start_of_day_local = utc_to_local(timezone, event['dates']['start'])\ .replace(hour=0, minute=0, second=0) # Then convert midnight in local time to UTC start_date_time = local_to_utc(timezone, start_of_day_local) # Finally add the delta since midnight start_date_time += delta_since_midnight # Set the new start and end times new_updates['dates']['start'] = start_date_time new_updates['dates']['end'] = start_date_time + duration # Set '_planning_schedule' on the Event item self.set_planning_schedule(new_updates) if event.get(config.ID_FIELD) != original.get(config.ID_FIELD): new_updates['skip_on_update'] = True self.patch(event[config.ID_FIELD], new_updates) app.on_updated_events_update_time( new_updates, {'_id': event[config.ID_FIELD]})
def update(self, id, updates, original): user = get_user(required=True).get(config.ID_FIELD, '') session = get_auth().get(config.ID_FIELD, '') updates['assigned_to'] = deepcopy(original).get('assigned_to') # If we are confirming availability, save the revert state for revert action coverage_type = original.get('planning', {}).get('g2_content_type') if coverage_type != 'text': updates['assigned_to']['revert_state'] = updates['assigned_to'][ 'state'] updates['assigned_to']['state'] = ASSIGNMENT_WORKFLOW_STATE.COMPLETED remove_lock_information(updates) item = self.backend.update(self.datasource, id, updates, original) # Save history if user initiates complete if coverage_type == 'text': get_resource_service('assignments_history').on_item_complete( updates, original) else: get_resource_service( 'assignments_history').on_item_confirm_availability( updates, original) push_notification('assignments:completed', item=str(original[config.ID_FIELD]), planning=original.get('planning_item'), assigned_user=(original.get('assigned_to') or {}).get('user'), assigned_desk=(original.get('assigned_to') or {}).get('desk'), assignment_state=ASSIGNMENT_WORKFLOW_STATE.COMPLETED, user=str(user), session=str(session), coverage=original.get('coverage_item')) # Send notification that the work has been completed # Determine the display name of the assignee assigned_to_user = get_resource_service('users').find_one(req=None, _id=user) assignee = assigned_to_user.get( 'display_name') if assigned_to_user else 'Unknown' PlanningNotifications().notify_assignment( target_user=str( original.get('assigned_to', {}).get('assignor_user')), message='{{coverage_type}} coverage \"{{slugline}}\" has been ' 'completed by {{assignee}}', assignee=assignee, coverage_type=get_coverage_type_name( original.get('planning', {}).get('g2_content_type', '')), slugline=original.get('planning', {}).get('slugline'), omit_user=True) return item
def _set_event_cancelled(updates, original, occur_cancel_state): if not EventsCancelService.validate_states(original): raise SuperdeskApiError.badRequestError( 'Event not in valid state for cancellation') remove_lock_information(updates) updates.update({ 'state': WORKFLOW_STATE.CANCELLED, 'occur_status': occur_cancel_state, 'state_reason': updates.get('reason') })
def _set_event_postponed(updates, original): reason = updates.get('reason', None) remove_lock_information(updates) updates['state'] = WORKFLOW_STATE.POSTPONED ednote = '''------------------------------------------------------------ Event Postponed ''' if reason is not None: ednote += 'Reason: {}\n'.format(reason) if len(original.get('ednote') or '') > 0: updates['ednote'] = original['ednote'] + '\n\n' + ednote else: updates['ednote'] = ednote
def on_update(self, updates, original): coverage_type = original.get('planning', {}).get('g2_content_type') assignment_state = original.get('assigned_to').get('state') if coverage_type == 'text': raise SuperdeskApiError.forbiddenError('Cannot revert text assignments.') if assignment_state != ASSIGNMENT_WORKFLOW_STATE.COMPLETED: raise SuperdeskApiError.forbiddenError('Cannot revert an assignment which is not yet confirmed.') updates['assigned_to'] = deepcopy(original).get('assigned_to') updates['assigned_to']['state'] = updates['assigned_to'].get( 'revert_state', ASSIGNMENT_WORKFLOW_STATE.ASSIGNED) updates['assigned_to']['revert_state'] = None remove_lock_information(updates)
def update_recurring_events(self, updates, original, update_method): """Spike events in a recurring series Based on the update_method provided, spikes 'future' or 'all' events in the series. Historic events, i.e. events that have already occurred, will not be spiked. """ # Ensure that no other Event or Planning item is currently locked events_with_plans = self._validate_recurring(original, original['recurrence_id']) historic, past, future = self.get_recurring_timeline(original, postponed=True, cancelled=True) # Mark item as unlocked directly in order to avoid more queries and notifications # coming from lockservice. remove_lock_information(updates) self._spike_event(updates, original) # Determine if the selected event is the first one, if so then # act as if we're changing future events if len(historic) == 0 and len(past) == 0: update_method = UPDATE_FUTURE if update_method == UPDATE_FUTURE: spiked_events = future else: spiked_events = past + future notifications = [] for event in spiked_events: if not self._can_spike(event, events_with_plans): continue new_updates = {'skip_on_update': True} self._spike_event(new_updates, event) item = self.patch(event[config.ID_FIELD], new_updates) app.on_updated_events_spike(new_updates, event) notifications.append({ 'id': event[config.ID_FIELD], 'etag': item['_etag'], 'revert_state': item['revert_state'] }) updates['_spiked_items'] = notifications
def on_update(self, updates, original): assignment_state = original.get('assigned_to').get('state') assignments_service = get_resource_service('assignments') assignments_service.validate_assignment_action(original) if assignments_service.is_text_assignment(original): raise SuperdeskApiError.forbiddenError( 'Cannot revert text assignments.') if assignment_state != ASSIGNMENT_WORKFLOW_STATE.COMPLETED: raise SuperdeskApiError.forbiddenError( 'Cannot revert an assignment which is not yet confirmed.') updates['assigned_to'] = deepcopy(original).get('assigned_to') updates['assigned_to']['state'] = updates['assigned_to'].get( 'revert_state', ASSIGNMENT_WORKFLOW_STATE.ASSIGNED) updates['assigned_to']['revert_state'] = None remove_lock_information(updates)
def _convert_to_recurring_event(self, updates, original): """Convert a single event to a series of recurring events""" self._validate_convert_to_recurring(updates, original) updates['recurrence_id'] = generate_guid(type=GUID_NEWSML) merged = copy.deepcopy(original) merged.update(updates) # Generated new events will be "draft" merged[ITEM_STATE] = WORKFLOW_STATE.DRAFT generated_events = generate_recurring_events(merged) updated_event = generated_events.pop(0) # Check to see if the first generated event is different from original # If yes, mark original as rescheduled with generated recurrence_id if updated_event['dates']['start'].date( ) != original['dates']['start'].date(): # Reschedule original event updates['update_method'] = UPDATE_SINGLE event_reschedule_service = get_resource_service( 'events_reschedule') updates['dates'] = updated_event['dates'] set_planning_schedule(updates) event_reschedule_service.update_single_event(updates, original) if updates.get('state') == WORKFLOW_STATE.RESCHEDULED: history_service = get_resource_service('events_history') history_service.on_reschedule(updates, original) else: # Original event falls as a part of the series # Remove the first element in the list (the current event being updated) # And update the start/end dates to be in line with the new recurring rules updates['dates']['start'] = updated_event['dates']['start'] updates['dates']['end'] = updated_event['dates']['end'] set_planning_schedule(updates) remove_lock_information(item=updates) # Create the new events and generate their history self.create(generated_events) app.on_inserted_events(generated_events) return generated_events
def _set_event_cancelled(updates, original, occur_cancel_state): if not EventsCancelService.validate_states(original): raise SuperdeskApiError.badRequestError( 'Event not in valid state for cancellation') reason = updates.get('reason', None) remove_lock_information(updates) updates.update({ 'state': WORKFLOW_STATE.CANCELLED, 'occur_status': occur_cancel_state }) ednote = '''------------------------------------------------------------ Event Cancelled ''' if reason is not None: ednote += 'Reason: {}\n'.format(reason) if len(original.get('ednote') or '') > 0: updates['ednote'] = original['ednote'] + '\n\n' + ednote else: updates['ednote'] = ednote
def remove_assignment(self, assignment_item, unlock_planning=False): coverage_id = assignment_item.get('coverage_item') planning_item = self.find_one(req=None, _id=assignment_item.get('planning_item')) if planning_item: coverages = planning_item.get('coverages') or [] try: coverage_item = next(c for c in coverages if c.get('coverage_id') == coverage_id) except StopIteration: raise SuperdeskApiError.badRequestError( 'Coverage does not exist') assigned_to = assignment_item.get('assigned_to') message = 'The {{coverage_type}} assignment {{slugline}} has been removed' PlanningNotifications().notify_assignment( coverage_status=coverage_item.get('workflow_status'), target_desk=assigned_to.get('desk') if assigned_to.get('user') is None else None, target_user=assigned_to.get('user'), message=message, coverage_type=get_coverage_type_name( coverage_item.get('planning', {}).get('g2_content_type', '')), slugline=planning_item.get('slugline', '')) coverage_item['assigned_to'] = None coverage_item['workflow_status'] = WORKFLOW_STATE.DRAFT updates = {'coverages': coverages} if unlock_planning: remove_lock_information(updates) updated_planning = self.update(planning_item[config.ID_FIELD], updates, planning_item) return updated_planning
def update_single_event(self, updates, original): self._validate(original) remove_lock_information(updates) self._spike_event(updates, original)
def update(self, id, updates, original): # if the completion is being done by an external application then ensure that it is not locked if 'proxy_user' in updates: if original.get('lock_user'): raise SuperdeskApiError.forbiddenError('Assignment is locked') user = updates.pop('proxy_user', None) proxy_user = True else: user = get_user(required=True).get(config.ID_FIELD, '') proxy_user = False session = get_auth().get(config.ID_FIELD, '') original_assigned_to = deepcopy(original).get('assigned_to') if not updates.get('assigned_to'): updates['assigned_to'] = {} original_assigned_to.update(updates['assigned_to']) updates['assigned_to'] = original_assigned_to assignments_service = get_resource_service('assignments') # If we are confirming availability, save the revert state for revert action text_assignment = assignments_service.is_text_assignment(original) if not text_assignment: updates['assigned_to']['revert_state'] = updates['assigned_to'][ 'state'] updates['assigned_to']['state'] = get_next_assignment_status( updates, ASSIGNMENT_WORKFLOW_STATE.COMPLETED) remove_lock_information(updates) item = self.backend.update(self.datasource, id, updates, original) # publish the planning item assignments_service.publish_planning(original['planning_item']) # Save history if user initiates complete if text_assignment: get_resource_service('assignments_history').on_item_complete( updates, original) else: if proxy_user: updates['proxy_user'] = user get_resource_service( 'assignments_history').on_item_confirm_availability( updates, original) push_notification('assignments:completed', item=str(original[config.ID_FIELD]), planning=original.get('planning_item'), assigned_user=(original.get('assigned_to') or {}).get('user'), assigned_desk=(original.get('assigned_to') or {}).get('desk'), assignment_state=ASSIGNMENT_WORKFLOW_STATE.COMPLETED, user=str(user), session=str(session), coverage=original.get('coverage_item')) # Send notification that the work has been completed # Determine the display name of the assignee assigned_to_user = get_resource_service('users').find_one(req=None, _id=user) assignee = assigned_to_user.get( 'display_name') if assigned_to_user else 'Unknown' target_user = original.get('assigned_to', {}).get('assignor_user') if target_user is None: target_user = original.get('assigned_to', {}).get('assignor_desk') PlanningNotifications().notify_assignment( target_user=target_user, message='assignment_fulfilled_msg', assignee=assignee, coverage_type=get_coverage_type_name( original.get('planning', {}).get('g2_content_type', '')), slugline=original.get('planning', {}).get('slugline'), omit_user=True, assignment_id=original[config.ID_FIELD], is_link=True, no_email=True) return item
def update_recurring_events(self, updates, original, update_method): remove_lock_information(updates) rules_changed = updates['dates']['recurring_rule'] != original['dates']['recurring_rule'] times_changed = updates['dates']['start'] != original['dates']['start'] or \ updates['dates']['end'] != original['dates']['end'] reason = updates.pop('reason', None) events_service = get_resource_service('events') historic, past, future = self.get_recurring_timeline(original, postponed=True) # Determine if the selected event is the first one, if so then # act as if we're changing future events if len(historic) == 0 and len(past) == 0: update_method = UPDATE_FUTURE if update_method == UPDATE_FUTURE: rescheduled_events = [original] + future new_start_date = updates['dates']['start'] original_start_date = original['dates']['start'] original_rule = original['dates']['recurring_rule'] else: rescheduled_events = past + [original] + future # Assign the date from the beginning of the new series new_start_date = updates['dates']['start'] original_start_date = past[0]['dates']['start'] original_rule = past[0]['dates']['recurring_rule'] updated_rule = deepcopy(updates['dates']['recurring_rule']) if updated_rule['endRepeatMode'] == 'count': num_events = len(historic) + len(past) + len(future) + 1 updated_rule['count'] -= num_events - len(rescheduled_events) # Compute the difference between start and end in the updated event time_delta = updates['dates']['end'] - updates['dates']['start'] # Generate the dates for the new event series new_dates = [date for date in islice(generate_recurring_dates( start=new_start_date, tz=updates['dates'].get('tz') and pytz.timezone(updates['dates']['tz'] or None), date_only=True, **updated_rule ), 0, 200)] # Generate the dates for the original events original_dates = [date for date in islice(generate_recurring_dates( start=original_start_date, tz=original['dates'].get('tz') and pytz.timezone(original['dates']['tz'] or None), date_only=True, **original_rule ), 0, 200)] self.set_next_occurrence(updates) dates_processed = [] # Iterate over the current events in the series and delete/spike # or update the event accordingly deleted_events = {} for event in rescheduled_events: if event[config.ID_FIELD] == original[config.ID_FIELD]: event_date = updates['dates']['start'].replace(tzinfo=None).date() else: event_date = event['dates']['start'].replace(tzinfo=None).date() # If the event does not occur in the new dates, then we need to either # delete or spike this event if event_date not in new_dates: # Add it to the list of events to delete or spike # This is done later so that we can perform a single # query against mongo, rather than one per deleted event deleted_events[event[config.ID_FIELD]] = event # If the date has already been processed, then we should mark this event for deletion # This occurs when the selected Event is being updated to an Event that already exists # in another Event in the series. # This stops multiple Events to occur on the same day elif event_date in new_dates and event_date in dates_processed: deleted_events[event[config.ID_FIELD]] = event # Otherwise this Event does occur in the new dates else: # Because this Event occurs in the new dates, then we are not to set the state to 'rescheduled', # instead we set it to either 'scheduled' (if public) or 'draft' (if not public) new_state = WORKFLOW_STATE.SCHEDULED if event.get('pubstatus') else WORKFLOW_STATE.DRAFT # If this is the selected Event, then simply update the fields and # Reschedule associated Planning items if event[config.ID_FIELD] == original[config.ID_FIELD]: self._mark_event_rescheduled(updates, original, reason, True) updates['state'] = new_state self._reschedule_event_plannings(event, reason, state=WORKFLOW_STATE.DRAFT) else: new_updates = { 'reason': reason, 'skip_on_update': True } self._mark_event_rescheduled(new_updates, event, reason) new_updates['state'] = new_state # Update the 'start', 'end' and 'recurring_rule' fields of the Event if rules_changed or times_changed: new_updates['state'] = new_state new_updates['dates'] = event['dates'] new_updates['dates']['start'] = datetime.combine(event_date, updates['dates']['start'].time()) new_updates['dates']['end'] = new_updates['dates']['start'] + time_delta new_updates['dates']['recurring_rule'] = updates['dates']['recurring_rule'] self.set_planning_schedule(new_updates) # And finally update the Event, and Reschedule associated Planning items self.patch(event[config.ID_FIELD], new_updates) self._reschedule_event_plannings(event, reason, state=WORKFLOW_STATE.DRAFT) app.on_updated_events_reschedule(new_updates, {'_id': event[config.ID_FIELD]}) # Mark this date as being already processed dates_processed.append(event_date) # Create new events that do not fall on the original occurrence dates new_events = [] for date in new_dates: # If the new date falls on the original occurrences, or if the # start date of the selected one, then skip this date occurrence if date in original_dates or date in dates_processed: continue # Create a copy of the metadata to use for the new event new_event = deepcopy(original) new_event.update(deepcopy(updates)) # Remove fields not required by the new events for key in list(new_event.keys()): if key.startswith('_'): new_event.pop(key) elif key.startswith('lock_'): new_event.pop(key) # Set the new start and end dates, as well as the _id and guid fields new_event['dates']['start'] = datetime.combine(date, updates['dates']['start'].time()) new_event['dates']['end'] = new_event['dates']['start'] + time_delta new_event[config.ID_FIELD] = new_event['guid'] = generate_guid(type=GUID_NEWSML) new_event.pop('reason', None) self.set_planning_schedule(new_event) # And finally add this event to the list of events to be created new_events.append(new_event) # Now iterate over the new events and create them if new_events: events_service.create(new_events) app.on_inserted_events(new_events) # Iterate over the events to delete/spike self._set_events_planning(deleted_events) for event in deleted_events.values(): event_plans = event.get('_plans', []) is_original = event[config.ID_FIELD] == original[config.ID_FIELD] if len(event_plans) > 0 or event.get('pubstatus', None) is not None: if is_original: self._mark_event_rescheduled(updates, original, reason) else: # This event has Planning items, so spike this event and # all Planning items new_updates = { 'skip_on_update': True, 'reason': reason } self._mark_event_rescheduled(new_updates, original, reason) self.patch(event[config.ID_FIELD], new_updates) if len(event_plans) > 0: self._reschedule_event_plannings(original, reason, event_plans) else: # This event has no Planning items, therefor we can safely # delete this event events_service.delete_action(lookup={'_id': event[config.ID_FIELD]}) app.on_deleted_item_events(event) if is_original: updates['_deleted'] = True
def _update_rules(event, updated_rules): updates = {'dates': deepcopy(event['dates'])} updates['dates']['recurring_rule'] = deepcopy(updated_rules) remove_lock_information(updates) return updates
def on_update(self, updates, original): user_id = get_user_id() if user_id: updates['version_creator'] = user_id # If `skip_on_update` is provided in the updates # Then return here so no further processing is performed on this event. if 'skip_on_update' in updates: return # We only validate the original event, # not the events that are automatically updated by the system self.validate(updates, original) remove_lock_information(updates) updated_rule = deepcopy(updates['dates']['recurring_rule']) original_rule = deepcopy(original['dates']['recurring_rule']) existing_events = self._get_series(original) first_event = existing_events[0] new_dates = [ date for date in generate_recurring_dates( start=first_event.get('dates', {}).get('start'), tz=updates['dates'].get('tz') and pytz.timezone(updates['dates']['tz'] or None), **updated_rule) ] original_dates = [ date for date in generate_recurring_dates( start=first_event.get('dates', {}).get('start'), tz=original['dates'].get('tz') and pytz.timezone(original['dates']['tz'] or None), **original_rule) ] # Compute the difference between start and end in the updated event time_delta = original['dates']['end'] - original['dates']['start'] events_service = get_resource_service('events') deleted_events = {} new_events = [] # Update the recurring rules for EVERY event in the series # Also if we're decreasing the length of the series, then # delete or mark the Event as cancelled. for event in existing_events: # if the event does not occur in the new dates, then we need to either # delete or cancel this event if event['dates']['start'].replace(tzinfo=None) not in new_dates: deleted_events[event[config.ID_FIELD]] = event # Otherwise this Event does occur in the new dates # So just update the recurring_rule to match the new series recurring_rule else: self._update_event(updated_rule, event) # Create new events that do not fall on the original series for date in new_dates: if date not in original_dates: new_events.append( self._create_event(date, updates, original, time_delta)) # Now iterate over the new events and create them if new_events: events_service.create(new_events) for event in new_events: get_resource_service('events_history').on_update_repetitions( event, event[config.ID_FIELD], 'update_repetitions_create') # Iterate over the events to delete/cancel self._set_events_planning(deleted_events) for event in deleted_events.values(): self._delete_event(event, events_service, updated_rule) # if the original event was "posted" then post the new generated events if original.get('pubstatus') in [ POST_STATE.CANCELLED, POST_STATE.USABLE ]: post = { 'event': original[config.ID_FIELD], 'etag': original['_etag'], 'update_method': 'all', 'pubstatus': original.get('pubstatus'), 'repost_on_update': True } get_resource_service('events_post').post([post])
def _set_event_postponed(updates): reason = updates.get('reason', None) remove_lock_information(updates) updates['state'] = WORKFLOW_STATE.POSTPONED updates['state_reason'] = reason
def on_update(self, updates, original): self.set_assignment(updates, original) remove_lock_information(updates)
def update_single_event(self, updates, original): # Release the Lock on the selected Event remove_lock_information(updates) # Set '_planning_schedule' on the Event item self.set_planning_schedule(updates)