def add(userId, bucket, archiveId, documentName, content_url=None, metadata=None, is_compressed=None, content_digest=None, size=None, session=None): if not session: session = db.Session doc_record = ArchiveMetadata(userId=userId, bucket=bucket, archiveId=archiveId, documentName=documentName, content_url=content_url, document_metadata=metadata, is_compressed=is_compressed, digest=content_digest, size=size) merged_result = session.merge(doc_record) return (True)
def archive_data_upgrade_005_006(): """ Upgrade the document archive data schema and move the data appropriately. Assumes both tables are in place (archive_document, archive_document_reference, object_storage) :return: """ from anchore_engine.db import ArchiveDocument, session_scope, ArchiveMetadata from anchore_engine.subsys import archive from anchore_engine.subsys.archive import operations from anchore_engine.configuration import localconfig config = localconfig.get_config() archive.initialize(config.get('services', {}).get('catalog', {})) client = operations.get_archive().primary_client session_counter = 0 max_pending_session_size = 10000 with session_scope() as db_session: for doc in db_session.query( ArchiveDocument.userId, ArchiveDocument.bucket, ArchiveDocument.archiveId, ArchiveDocument.documentName, ArchiveDocument.created_at, ArchiveDocument.last_updated, ArchiveDocument.record_state_key, ArchiveDocument.record_state_val): meta = ArchiveMetadata(userId=doc[0], bucket=doc[1], archiveId=doc[2], documentName=doc[3], is_compressed=False, document_metadata=None, content_url=client.uri_for(userId=doc[0], bucket=doc[1], key=doc[2]), created_at=doc[4], last_updated=doc[5], record_state_key=doc[6], record_state_val=doc[6]) db_session.add(meta) session_counter += 1 if session_counter >= max_pending_session_size: db_session.flush() session_counter = 0