def process_queue_item(self, job_details): notification = model.get_enabled_notification( job_details['notification_uuid']) if notification is None: return event_name = notification.event_name method_name = notification.method_name try: event_handler = NotificationEvent.get_event(event_name) method_handler = NotificationMethod.get_method(method_name) except InvalidNotificationMethodException as ex: logger.exception('Cannot find notification method: %s', ex.message) raise JobException('Cannot find notification method: %s' % ex.message) except InvalidNotificationEventException as ex: logger.exception('Cannot find notification event: %s', ex.message) raise JobException('Cannot find notification event: %s' % ex.message) if event_handler.should_perform(job_details['event_data'], notification): try: method_handler.perform(notification, event_handler, job_details) model.reset_number_of_failures_to_zero(notification) except (JobException, KeyError) as exc: model.increment_notification_failure_count(notification) raise exc
def perform_notification_work(self, data, layer_limit=_LAYER_LIMIT): """ Performs the work for handling a security notification as referenced by the given data object. Returns True on successful handling, False on non-retryable failure and raises a JobException on retryable failure. """ notification_name = data["Name"] current_page = data.get("page", None) handler = SecurityNotificationHandler(layer_limit) while True: # Retrieve the current page of notification data from the security scanner. (response_data, should_retry) = secscan_api.get_notification( notification_name, layer_limit=layer_limit, page=current_page) # If no response, something went wrong. if response_data is None: if should_retry: raise JobException() else: # Remove the job from the API. logger.error("Failed to handle security notification %s", notification_name) secscan_api.mark_notification_read(notification_name) # Return to mark the job as "complete", as we'll never be able to finish it. return False # Extend processing on the queue item so it doesn't expire while we're working. self.extend_processing(_PROCESSING_SECONDS, json.dumps(data)) # Process the notification data. notification_data = response_data["Notification"] result = handler.process_notification_page_data(notification_data) # Possible states after processing: failed to process, finished processing entirely # or finished processing the page. if result == ProcessNotificationPageResult.FAILED: # Something went wrong. raise JobException if result == ProcessNotificationPageResult.FINISHED_PROCESSING: # Mark the notification as read. if not secscan_api.mark_notification_read(notification_name): # Return to mark the job as "complete", as we'll never be able to finish it. logger.error("Failed to mark notification %s as read", notification_name) return False # Send the generated Quay notifications. handler.send_notifications() return True if result == ProcessNotificationPageResult.FINISHED_PAGE: # Continue onto the next page. current_page = notification_data["NextPage"] continue
def process_queue_item(self, job_details): logger.debug('Got chunk cleanup queue item: %s', job_details) storage_location = job_details['location'] storage_path = job_details['path'] if not storage.exists([storage_location], storage_path): logger.debug('Chunk already deleted') return try: storage.remove([storage_location], storage_path) except IOError: raise JobException()
def replicate_storage(self, namespace, storage_uuid, storage, backoff_check=True): # Lookup the namespace and its associated regions. if not namespace: logger.debug( "Unknown namespace when trying to replicate storage %s", storage_uuid) return locations = model.user.get_region_locations(namespace) # Lookup the image storage. try: partial_storage = model.storage.get_storage_by_uuid(storage_uuid) except model.InvalidImageException: logger.debug("Unknown storage: %s", storage_uuid) return # Check to see if the image is at all the required locations. locations_required = locations | set(storage.default_locations) locations_missing = locations_required - set(partial_storage.locations) logger.debug( "For replication of storage %s under namespace %s: %s required; %s missing", storage_uuid, namespace.username, locations_required, locations_missing, ) if not locations_missing: logger.debug( "No missing locations for storage %s under namespace %s. Required: %s", storage_uuid, namespace.username, locations_required, ) return # For any missing storage locations, initiate a copy. existing_location = list(partial_storage.locations)[0] path_to_copy = model.storage.get_layer_path(partial_storage) # Lookup and ensure the existing location exists. if not self._backoff_check_exists(existing_location, path_to_copy, storage, backoff_check): logger.warning( "Cannot find image storage %s in existing location %s; stopping replication", storage_uuid, existing_location, ) raise JobException() # For each missing location, copy over the storage. for location in locations_missing: logger.debug( "Starting copy of storage %s to location %s from %s", partial_storage.uuid, location, existing_location, ) # Copy the binary data. copied = False try: with CloseForLongOperation(app.config): storage.copy_between(path_to_copy, existing_location, location) copied = True except IOError: logger.exception( "Failed to copy path `%s` of image storage %s to location %s", path_to_copy, partial_storage.uuid, location, ) raise JobException() except: logger.exception( "Unknown exception when copying path %s of image storage %s to loc %s", path_to_copy, partial_storage.uuid, location, ) raise WorkerUnhealthyException() if copied: # Verify the data was copied to the target storage, to ensure that there are no cases # where we write the placement without knowing the data is present. if not self._backoff_check_exists(location, path_to_copy, storage, backoff_check): logger.warning( "Failed to find path `%s` in location `%s` after copy", path_to_copy, location, ) raise JobException() # Create the storage location record for the storage now that the copy has # completed. model.storage.add_storage_placement(partial_storage, location) logger.debug( "Finished copy of image storage %s to location %s from %s", partial_storage.uuid, location, existing_location, ) logger.debug( "Completed replication of image storage %s to locations %s from %s", partial_storage.uuid, locations_missing, existing_location, )
def _perform_notification_worker(self, job_details): """ Performs the work for handling a security notification as referenced by the given data object. Returns True on successful handling, False on non-retryable failure and raises a JobException on retryable failure. """ logger.debug("Got security scanning notification queue item: %s", job_details) notification_id = job_details["notification_id"] page_index = job_details.get("current_page_index", None) while True: page_result = self._secscan_model.lookup_notification_page( notification_id, page_index) if page_result is None: logger.warning("Got unsupported for notification page") return logger.debug("Got page result for notification %s: %s", notification_id, page_result.status) if page_result.status == PaginatedNotificationStatus.RETRYABLE_ERROR: logger.warning( "Got notification page issue; will retry in the future") raise JobException() if page_result.status == PaginatedNotificationStatus.FATAL_ERROR: logger.error( "Got fatal error for notification %s; terminating", notification_id) return # Update the job details with the current page index and extend processing to ensure # we do not timeout during the notification handling. job_details["current_page_index"] = page_index self.extend_processing(_PROCESSING_SECONDS_EXPIRATION, job_details) with notification_batch() as spawn_notification: # Process the notification page into notifications. for updated_vuln_info in self._secscan_model.process_notification_page( page_result.data): vulnerability = updated_vuln_info.vulnerability # Find all manifests in repositories with configured security notifications that # match that of the vulnerability. for manifest in registry_model.find_manifests_for_sec_notification( updated_vuln_info.manifest_digest): # Filter any repositories where the notification level is below that of # the vulnerability. found_severity = PRIORITY_LEVELS.get( vulnerability.Severity, PRIORITY_LEVELS["Unknown"]) lowest_severity = PRIORITY_LEVELS["Defcon1"] for severity_name in registry_model.lookup_secscan_notification_severities( manifest.repository): severity = PRIORITY_LEVELS.get( severity_name, PRIORITY_LEVELS["Defcon1"], ) if lowest_severity["score"] > severity["score"]: lowest_severity = severity if found_severity["score"] < lowest_severity["score"]: continue # Issue a notification for the repository. tag_names = list( registry_model.tag_names_for_manifest( manifest, TAG_LIMIT)) if tag_names: event_data = { "tags": list(tag_names), "vulnerability": { "id": vulnerability.Name, "description": vulnerability.Description, "link": vulnerability.Link, "priority": found_severity["title"], "has_fix": bool(vulnerability.FixedBy), }, } spawn_notification(manifest.repository, "vulnerability_found", event_data) # Mark the job as having completed the page. page_index = page_result.next_page_index if page_index is None: logger.debug("Completed processing of notification %s", notification_id) attempt_count = 5 while not self._secscan_model.mark_notification_handled( notification_id): attempt_count -= 1 if attempt_count == 0: break return job_details["current_page_index"] = page_index self.extend_processing(_PROCESSING_SECONDS_EXPIRATION, job_details)