def test_new_record(app, db, dummy_location, record_dumps, resolver): """Test creation of new record.""" RecordDumpLoader.create(record_dumps) pid, record = resolver.resolve('11783') created = datetime(2011, 10, 13, 8, 27, 47) # Basic some test that record exists assert record['title'] assert record.created == created # Test that this is a completely new record assert len(record.revisions) == 3 # check revisions assert record.revisions[2].created == created assert record.revisions[2].updated == datetime(2012, 10, 13, 8, 27, 47) assert record.revisions[1].created == created assert record.revisions[1].updated == datetime(2012, 10, 13, 8, 27, 47) assert record.revisions[0].created == created assert record.revisions[0].updated == datetime(2011, 10, 13, 8, 27, 47) pytest.raises(IntegrityError, RecordIdentifier.insert, 11783) # Test the PIDs are extracted and created assert PersistentIdentifier.get('doi', '10.5281/zenodo.11783') assert len(record['_files']) == 1 f = record['_files'][0] obj = ObjectVersion.get(f['bucket'], f['key']) assert obj.file.checksum == f['checksum'] assert obj.file.size == f['size'] assert BucketTag.get_value(f['bucket'], 'record') == str(record.id)
def create_files(cls, record, files, existing_files): """Create files. This method is currently limited to a single bucket per record. """ default_bucket = None # Look for bucket id in existing files. for f in existing_files: if 'bucket' in f: default_bucket = f['bucket'] break # Create a bucket in default location if none is found. if default_bucket is None: b = Bucket.create() BucketTag.create(b, 'record', str(record.id)) default_bucket = str(b.id) db.session.commit() else: b = Bucket.get(default_bucket) record['_files'] = [] for key, meta in files.items(): obj = cls.create_file(b, key, meta) ext = splitext(obj.key)[1].lower() if ext.startswith('.'): ext = ext[1:] last_ver = meta[-1] rec_docs = [ rec_doc[1] for rec_doc in last_ver['recids_doctype'] if rec_doc[0] == last_ver['recid'] ] record['_files'].append( dict(bucket=str(obj.bucket.id), key=obj.key, version_id=str(obj.version_id), size=obj.file.size, checksum=obj.file.checksum, type=ext, doctype=rec_docs[0] if rec_docs else '')) db.session.add(RecordsBuckets(record_id=record.id, bucket_id=b.id)) record.commit() db.session.commit() return [b]
def create_files(cls, record, files, existing_files): """Create files. This method is currently limited to a single bucket per record. """ default_bucket = None # Look for bucket id in existing files. for f in existing_files: if 'bucket' in f: default_bucket = f['bucket'] break # Create a bucket in default location if none is found. if default_bucket is None: b = Bucket.create() BucketTag.create(b, 'record', str(record.id)) default_bucket = str(b.id) db.session.commit() else: b = Bucket.get(default_bucket) record['_files'] = [] for key, meta in files.items(): obj = cls.create_file(b, key, meta) ext = splitext(obj.key)[1].lower() if ext.startswith('.'): ext = ext[1:] record['_files'].append(dict( bucket=str(obj.bucket.id), key=obj.key, version_id=str(obj.version_id), size=obj.file.size, checksum=obj.file.checksum, type=ext, )) db.session.add( RecordsBuckets(record_id=record.id, bucket_id=b.id) ) record.commit() db.session.commit() return [b]
def create_files(cls, record, files, existing_files): """Create files. This method is currently limited to a single bucket per record. """ default_bucket = None # Look for bucket id in existing files. for f in existing_files: if 'bucket' in f: default_bucket = f['bucket'] break # Create a bucket in default location if none is found. if default_bucket is None: b = Bucket.create() BucketTag.create(b, 'record', str(record.id)) default_bucket = str(b.id) db.session.commit() else: b = Bucket.get(default_bucket) record['files'] = [] for key, meta in files.items(): obj = cls.create_file(b, key, meta) ext = splitext(obj.key)[1].lower() if ext.startswith('.'): ext = ext[1:] record['files'].append( dict( bucket=str(obj.bucket.id), filename=obj.key, version_id=str(obj.version_id), size=obj.file.size, checksum=obj.file.checksum, type=ext, )) record.commit() db.session.commit() return [b]
def test_new_record(app, db, dummy_location, record_dump, resolver): """Test creation of new record.""" RecordDumpLoader.create(record_dump) pid, record = resolver.resolve("11782") # Basic some test that record exists assert record["title"] assert record.created == datetime(2014, 10, 13, 8, 27, 47) # Test that this is a completely new record assert len(record.revisions) == 2 pytest.raises(IntegrityError, RecordIdentifier.insert, 11782) # Test the PIDs are extracted and created assert PersistentIdentifier.get("doi", "10.5281/zenodo.11782") assert len(record["_files"]) == 1 f = record["_files"][0] obj = ObjectVersion.get(f["bucket"], f["key"]) assert obj.file.checksum == f["checksum"] assert obj.file.size == f["size"] assert BucketTag.get_value(f["bucket"], "record") == str(record.id)
def test_bucket_tags(app, db, dummy_location): """Test bucket tags.""" b = Bucket.create() BucketTag.create(b, "mykey", "testvalue") BucketTag.create(b, "another_key", "another value") db.session.commit() # Duplicate key pytest.raises(Exception, BucketTag.create, b, "mykey", "newvalue") # Test get assert BucketTag.query.count() == 2 assert BucketTag.get(b.id, "mykey").value == "testvalue" assert BucketTag.get_value(b, "another_key") == "another value" assert BucketTag.get_value(b.id, "invalid") is None # Test delete BucketTag.delete(b, "mykey") assert BucketTag.query.count() == 1 BucketTag.delete(b, "invalid") assert BucketTag.query.count() == 1 # Create or update BucketTag.create_or_update(b, "another_key", "newval") BucketTag.create_or_update(b, "newkey", "testval") db.session.commit() assert BucketTag.get_value(b, "another_key") == "newval" assert BucketTag.get_value(b, "newkey") == "testval" # Get tags as dictionary assert b.get_tags() == dict(another_key="newval", newkey="testval") b2 = Bucket.create() assert b2.get_tags() == dict() # Test cascading delete. Bucket.query.delete() db.session.commit() assert BucketTag.query.count() == 0