def get(self): with db.session_scope() as session: image_report = db_catalog_image.get(self.image_digest, self.account_id, session=session) if not image_report: raise ResourceNotFound("Image not found", detail=self.get_error_detail()) self.verify_analysis_status(image_report) image_content_data = self.get_image_content_data(self.image_digest) if self.content_type not in image_content_data: raise BadRequest( "image content of type (%s) was not an available type at analysis time for this image" % str(self.content_type), detail=self.get_error_detail(), ) if self.__normalize_to_user_format_on_load__: image_content_data = helpers.make_image_content_response( self.content_type, image_content_data[self.content_type]) return self.hydrate_additional_data(image_content_data, image_report)
def _execute(self): # if image record already exists, exit. with session_scope() as session: if db_catalog_image.get(self.image_digest, self.account, session): logger.info('Image archive restore found existing image records already. Aborting restore.') raise ImageConflict('Conflict: Image already exists in system. No restore possible') dest_obj_mgr = object_store.get_manager() # Load the archive manifest m = self.fileobj.read() if m: tf = tempfile.NamedTemporaryFile(prefix='analysis_archive_{}'.format(self.image_digest), dir=localconfig.get_config()['tmp_dir'], delete=False) try: tf.write(ensure_bytes(m)) tf.close() # Load the archive from the temp file with ImageArchive.for_reading(tf.name) as img_archive: logger.debug('Using manifest: {}'.format(img_archive.manifest)) self.restore_artifacts(img_archive, dest_obj_mgr) self.restore_records(img_archive.manifest) self._reload_policy_engine(img_archive.manifest) finally: os.remove(tf.name) else: raise Exception('No archive manifest found in archive record. Cannot restore')
def run(self, merge=False): """ :param merge: :return: (str, str) tuple, with status as first element and detail msg as second """ logger.debug("Starting archiving process for image {}".format(self.image_digest)) self.started = datetime.datetime.utcnow() try: with session_scope() as session: found = db_archived_images.get(session, self.account, self.image_digest) if found and not merge: # Short-circuit, since already exists return found.status, 'Existing record found, archiving aborted' catalog_img_dict = db_catalog_image.get(self.image_digest, self.account, session) if not catalog_img_dict: raise Exception('Could not locate an image with digest {} in account {}'.format(self.image_digest, self.account)) else: self._catalog_record = catalog_img_dict if catalog_img_dict.get('image_status') != 'active' or catalog_img_dict.get('analysis_status') != 'analyzed': raise Exception('Invalid image record state. Image must have "analysis_status"="analyzed" and "image_status"="active". Found {} and {}'.format(catalog_img_dict.get('analysis_status'), catalog_img_dict.get('image_status'))) # Add the new record img = ArchivedImage.from_catalog_image(catalog_img_dict, cascade=True) if merge and found: img = session.merge(img) else: img = session.add(img) except Exception as ex: add_event(ImageArchivingFailed(self.account, self.image_digest, self.id, err=str(ex))) return 'error', str(ex) try: return self._execute() except Exception as ex: logger.exception('Error executing image archive task') return 'error', str(ex) finally: self.stopped = datetime.datetime.utcnow()
def _execute(self): # if image record already exists, exit. with session_scope() as session: if db_catalog_image.get(self.image_digest, self.account, session): logger.info('Image archive restore found existing image records already. Aborting restore.') raise Exception('Conflict: Image already exists in system. No restore possible') #rec = db_archived_images.get(session, self.account, self.image_digest) #if not rec: # raise MetadataNotFound('/'.join([str(self.account), str(self.image_digest)])) #self.archive_record = rec.to_dict() #self.archive_detail_records = [x.to_dict() for x in rec.tags()] #src_archive_mgr = archive.get_manager() dest_obj_mgr = object_store.get_manager() # Load the archive manifest #m = src_archive_mgr.get(self.account, self.archive_record['manifest_bucket'], self.archive_record['manifest_key']) m = self.fileobj.read() if m: tf = tempfile.NamedTemporaryFile(prefix='analysis_archive_{}'.format(self.image_digest), dir=localconfig.get_config()['tmp_dir'], delete=False) try: tf.write(ensure_bytes(m)) tf.close() # Load the archive from the temp file with ImageArchive.for_reading(tf.name) as img_archive: logger.debug('Using manifest: {}'.format(img_archive.manifest)) self.restore_artifacts(img_archive, dest_obj_mgr) self.restore_records(img_archive.manifest) self._reload_policy_engine(img_archive.manifest) finally: os.remove(tf.name) else: raise Exception('No archive manifest found in archive record. Cannot restore')
def import_image( dbsession, account: str, operation_id: str, import_manifest: ImportManifest, force: bool = False, annotations: dict = None, ) -> dict: """ Process the image import finalization, creating the new 'image' record and setting the proper state for queueing :param dbsession: :param account: :param operation_id: :param import_manifest: :param force: :param annotations: :return: """ logger.debug( "Processing import image request with source operation_id = %s, annotations = %s", operation_id, annotations, ) # Add annotation indicating this is an import annotations = add_import_annotations(import_manifest, annotations) # Import analysis for a new digest, or re-load analysis for existing image logger.debug("Loading image info using import operation id %s", operation_id) image_references = [] for t in import_manifest.tags: r = DockerImageReference.from_string(t) r.digest = import_manifest.digest if import_manifest.local_image_id: r.image_id = import_manifest.local_image_id else: r.image_id = import_manifest.digest image_references.append(r) if not (image_references and image_references[0].has_digest()): raise ValueError("Must have image digest in image reference") # Check for dockerfile updates to an existing image found_img = db_catalog_image.get(imageDigest=import_manifest.digest, userId=account, session=dbsession) # Removed this to align processing with how analysis works: the status is updated *after* the add call # if the record already had an older status it will get reset if (found_img and found_img["analysis_status"] not in taskstate.fault_state("analyze") and not force): # Load the existing manifest since we aren't going to use the import manifest for analysis obj_mgr = get_manager() manifest = obj_mgr.get_document(account, "manifest_data", found_img["imageDigest"]) parent_manifest = obj_mgr.get_document(account, "parent_manifest_data", found_img["imageDigest"]) # Don't allow a dockerfile update via import path dockerfile_content = None dockerfile_mode = None # Finalize the import, go straight to complete finalize_import_operation( dbsession, account, operation_id, import_manifest, final_state=ImportState.complete, ) # raise BadRequest( # "Cannot reload image that already exists unless using force=True for re-analysis", # detail={"digest": import_manifest.digest}, # ) else: # Finalize the import internal_import_manifest = finalize_import_operation( dbsession, account, operation_id, import_manifest) # Get the dockerfile content if available if import_manifest.contents.dockerfile: rec = [ ref for ref in internal_import_manifest.contents if ref.content_type == ImportTypes.dockerfile.value ][0] obj_mgr = get_manager() dockerfile_content = obj_mgr.get_document( userId=account, bucket=rec.bucket, archiveId=rec.key, ) dockerfile_mode = "Actual" else: dockerfile_content = "" dockerfile_mode = "Guessed" # Set the manifest to the import manifest. This is swapped out for the real manifest during the import operation on # the analyzer manifest = internal_import_manifest.to_json() parent_manifest = "" # Update the db for the image record image_records = add_or_update_image( dbsession, account, image_references[0].image_id, tags=[x.tag_pullstring() for x in image_references], digests=[x.digest_pullstring() for x in image_references], parentdigest=import_manifest.parent_digest if import_manifest.parent_digest else import_manifest.digest, dockerfile=dockerfile_content, dockerfile_mode=dockerfile_mode, manifest= manifest, # Fo now use the import manifest as the image manifest. This will get set to the actual manifest on the analyzer parent_manifest=parent_manifest, annotations=annotations, ) if image_records: image_record = image_records[0] else: raise Exception("No record updated/inserted") return image_record