def send_notifications(self): """ Sends all queued up notifications. """ if self.vulnerability_info is None: return new_vuln = self.vulnerability_info new_severity = PRIORITY_LEVELS.get(new_vuln.get("Severity", "Unknown"), {"index": sys.maxint}) # For each of the tags found, issue a notification. with notification_batch() as spawn_notification: for repository_id, tags in self.tags_by_repository_map.iteritems(): event_data = { "tags": list(tags), "vulnerability": { "id": new_vuln["Name"], "description": new_vuln.get("Description", None), "link": new_vuln.get("Link", None), "priority": new_severity["title"], "has_fix": "FixedIn" in new_vuln, }, } spawn_notification(self.repository_map[repository_id], "vulnerability_found", event_data)
def send_notifications(self): """ Sends all queued up notifications. """ if self.vulnerability_info is None: return new_vuln = self.vulnerability_info new_severity = PRIORITY_LEVELS.get(new_vuln.get('Severity', 'Unknown'), {'index': sys.maxint}) # For each of the tags found, issue a notification. with notification_batch() as spawn_notification: for repository_id, tags in self.tags_by_repository_map.iteritems(): event_data = { 'tags': list(tags), 'vulnerability': { 'id': new_vuln['Name'], 'description': new_vuln.get('Description', None), 'link': new_vuln.get('Link', None), 'priority': new_severity['title'], 'has_fix': 'FixedIn' in new_vuln, }, } spawn_notification(self.repository_map[repository_id], 'vulnerability_found', event_data)
def should_perform(self, event_data, notification_data): event_config = notification_data.event_config_dict if VulnerabilityFoundEvent.CONFIG_LEVEL not in event_config: return True if VulnerabilityFoundEvent.VULNERABILITY_KEY not in event_data: return False vuln_info = event_data.get(VulnerabilityFoundEvent.VULNERABILITY_KEY, {}) event_severity = PRIORITY_LEVELS.get(vuln_info.get("priority", "Unknown")) if event_severity is None: return False actual_level_index = int(event_severity["index"]) filter_level_index = int(event_config[VulnerabilityFoundEvent.CONFIG_LEVEL]) return actual_level_index <= filter_level_index
def process_notification_page_data(self, notification_page_data): """ Processes the given notification page data to spawn vulnerability notifications as necessary. Returns the status of the processing. """ if not "New" in notification_page_data: return self._done() new_data = notification_page_data["New"] old_data = notification_page_data.get("Old", {}) new_vuln = new_data["Vulnerability"] old_vuln = old_data.get("Vulnerability", {}) self.vulnerability_info = new_vuln new_layer_ids = new_data.get("LayersIntroducingVulnerability", []) old_layer_ids = old_data.get("LayersIntroducingVulnerability", []) new_severity = PRIORITY_LEVELS.get(new_vuln.get("Severity", "Unknown"), {"index": sys.maxint}) old_severity = PRIORITY_LEVELS.get(old_vuln.get("Severity", "Unknown"), {"index": sys.maxint}) # Check if the severity of the vulnerability has increased. If so, then we report this # vulnerability for *all* layers, rather than a difference, as it is important for everyone. if new_severity["index"] < old_severity["index"]: # The vulnerability has had its severity increased. Report for *all* layers. all_layer_ids = set(new_layer_ids) | set(old_layer_ids) for layer_id in all_layer_ids: self._report(layer_id) if "NextPage" not in notification_page_data: return self._done() else: return ProcessNotificationPageResult.FINISHED_PAGE # Otherwise, only send the notification to new layers. To find only the new layers, we # need to do a streaming diff vs the old layer IDs stream. # Check for ordered data. If found, we use the indexed tracker, which is faster and # more memory efficient. is_indexed = False if ("OrderedLayersIntroducingVulnerability" in new_data or "OrderedLayersIntroducingVulnerability" in old_data): def tuplize(stream): return [(entry["LayerName"], entry["Index"]) for entry in stream] new_layer_ids = tuplize( new_data.get("OrderedLayersIntroducingVulnerability", [])) old_layer_ids = tuplize( old_data.get("OrderedLayersIntroducingVulnerability", [])) is_indexed = True # If this is the first call, initialize the tracker. if self.stream_tracker is None: self.stream_tracker = (IndexedStreamingDiffTracker( self._report, self.results_per_stream) if is_indexed else StreamingDiffTracker( self._report, self.results_per_stream)) # Call to add the old and new layer ID streams to the tracker. The tracker itself will # call _report whenever it has determined a new layer has been found. self.stream_tracker.push_new(new_layer_ids) self.stream_tracker.push_old(old_layer_ids) # Check to see if there are any additional pages to process. if "NextPage" not in notification_page_data: return self._done() else: return ProcessNotificationPageResult.FINISHED_PAGE
def _analyze(self, layer, force_parents=False): """ Analyzes a single layer. Return a tuple of two bools: - The first one tells us if we should evaluate its children. - The second one is set to False when another worker pre-empted the candidate's analysis for us. """ # If the parent couldn't be analyzed with the target version or higher, we can't analyze # this image. Mark it as failed with the current target version. if not force_parents and ( layer.parent_id and not layer.parent.security_indexed and layer.parent.security_indexed_engine >= self._target_version): if not set_secscan_status(layer, False, self._target_version): raise PreemptedException # Nothing more to do. return # Make sure the image's storage is not marked as uploading. If so, nothing more to do. if layer.storage.uploading: if not set_secscan_status(layer, False, self._target_version): raise PreemptedException # Nothing more to do. return # Analyze the image. previously_security_indexed_successfully = layer.security_indexed previous_security_indexed_engine = layer.security_indexed_engine logger.info('Analyzing layer %s', layer.docker_image_id) analyzed_version = self._api.analyze_layer(layer) logger.info('Analyzed layer %s successfully with version %s', layer.docker_image_id, analyzed_version) # Mark the image as analyzed. if not set_secscan_status(layer, True, analyzed_version): # If the image was previously successfully marked as resolved, then set_secscan_status # might return False because we're not changing it (since this is a fixup). if not previously_security_indexed_successfully: raise PreemptedException # If we are the one who've done the job successfully first, then we need to decide if we should # send notifications. Notifications are sent if: # 1) This is a new layer # 2) This is an existing layer that previously did not index properly # We don't always send notifications as if we are re-indexing a successful layer for a newer # feature set in the security scanner, notifications will be spammy. is_new_image = previous_security_indexed_engine == IMAGE_NOT_SCANNED_ENGINE_VERSION is_existing_image_unindexed = not is_new_image and not previously_security_indexed_successfully if (features.SECURITY_NOTIFICATIONS and (is_new_image or is_existing_image_unindexed)): # Get the tags of the layer we analyzed. repository_map = defaultdict(list) event = ExternalNotificationEvent.get(name='vulnerability_found') matching = list( filter_tags_have_repository_event(get_tags_for_image(layer.id), event)) for tag in matching: repository_map[tag.repository_id].append(tag) # If there is at least one tag, # Lookup the vulnerabilities for the image, now that it is analyzed. if len(repository_map) > 0: logger.debug('Loading data for layer %s', layer.id) try: layer_data = self._api.get_layer_data( layer, include_vulnerabilities=True) except APIRequestFailure: raise if layer_data is not None: # Dispatch events for any detected vulnerabilities logger.debug('Got data for layer %s: %s', layer.id, layer_data) found_features = layer_data['Layer'].get('Features', []) for repository_id in repository_map: tags = repository_map[repository_id] vulnerabilities = dict() # Collect all the vulnerabilities found for the layer under each repository and send # as a batch notification. for feature in found_features: if 'Vulnerabilities' not in feature: continue for vulnerability in feature.get( 'Vulnerabilities', []): vuln_data = { 'id': vulnerability['Name'], 'description': vulnerability.get('Description', None), 'link': vulnerability.get('Link', None), 'has_fix': 'FixedBy' in vulnerability, # TODO: Change this key name if/when we change the event format. 'priority': vulnerability.get('Severity', 'Unknown'), } vulnerabilities[ vulnerability['Name']] = vuln_data # TODO: remove when more endpoints have been converted to using # interfaces repository = AttrDict({ 'namespace_name': tags[0].repository.namespace_user.username, 'name': tags[0].repository.name, }) repo_vulnerabilities = list(vulnerabilities.values()) if not repo_vulnerabilities: continue priority_key = lambda v: PRIORITY_LEVELS.get( v['priority'], {}).get('index', 100) repo_vulnerabilities.sort(key=priority_key) event_data = { 'tags': [tag.name for tag in tags], 'vulnerabilities': repo_vulnerabilities, 'vulnerability': repo_vulnerabilities[ 0], # For back-compat with existing events. } spawn_notification(repository, 'vulnerability_found', event_data)
def _perform_notification_worker(self, job_details): """ Performs the work for handling a security notification as referenced by the given data object. Returns True on successful handling, False on non-retryable failure and raises a JobException on retryable failure. """ logger.debug("Got security scanning notification queue item: %s", job_details) notification_id = job_details["notification_id"] page_index = job_details.get("current_page_index", None) while True: page_result = self._secscan_model.lookup_notification_page( notification_id, page_index) if page_result is None: logger.warning("Got unsupported for notification page") return logger.debug("Got page result for notification %s: %s", notification_id, page_result.status) if page_result.status == PaginatedNotificationStatus.RETRYABLE_ERROR: logger.warning( "Got notification page issue; will retry in the future") raise JobException() if page_result.status == PaginatedNotificationStatus.FATAL_ERROR: logger.error( "Got fatal error for notification %s; terminating", notification_id) return # Update the job details with the current page index and extend processing to ensure # we do not timeout during the notification handling. job_details["current_page_index"] = page_index self.extend_processing(_PROCESSING_SECONDS_EXPIRATION, job_details) with notification_batch() as spawn_notification: # Process the notification page into notifications. for updated_vuln_info in self._secscan_model.process_notification_page( page_result.data): vulnerability = updated_vuln_info.vulnerability # Find all manifests in repositories with configured security notifications that # match that of the vulnerability. for manifest in registry_model.find_manifests_for_sec_notification( updated_vuln_info.manifest_digest): # Filter any repositories where the notification level is below that of # the vulnerability. found_severity = PRIORITY_LEVELS.get( vulnerability.Severity, PRIORITY_LEVELS["Unknown"]) lowest_severity = PRIORITY_LEVELS["Defcon1"] for severity_name in registry_model.lookup_secscan_notification_severities( manifest.repository): severity = PRIORITY_LEVELS.get( severity_name, PRIORITY_LEVELS["Defcon1"], ) if lowest_severity["score"] > severity["score"]: lowest_severity = severity if found_severity["score"] < lowest_severity["score"]: continue # Issue a notification for the repository. tag_names = list( registry_model.tag_names_for_manifest( manifest, TAG_LIMIT)) if tag_names: event_data = { "tags": list(tag_names), "vulnerability": { "id": vulnerability.Name, "description": vulnerability.Description, "link": vulnerability.Link, "priority": found_severity["title"], "has_fix": bool(vulnerability.FixedBy), }, } spawn_notification(manifest.repository, "vulnerability_found", event_data) # Mark the job as having completed the page. page_index = page_result.next_page_index if page_index is None: logger.debug("Completed processing of notification %s", notification_id) attempt_count = 5 while not self._secscan_model.mark_notification_handled( notification_id): attempt_count -= 1 if attempt_count == 0: break return job_details["current_page_index"] = page_index self.extend_processing(_PROCESSING_SECONDS_EXPIRATION, job_details)