def test_download_to_object_version(db, bucket): """Test download to object version task.""" with mock.patch('requests.get') as mock_request: obj = ObjectVersion.create(bucket=bucket, key='test.pdf') bid = bucket.id db.session.commit() # Mock download request file_size = 1024 mock_request.return_value = type( 'Response', (object, ), { 'raw': BytesIO(b'\x00' * file_size), 'headers': {'Content-Length': file_size} }) assert obj.file is None task_s = DownloadTask().s('http://example.com/test.pdf', version_id=obj.version_id) # Download task = task_s.delay() assert ObjectVersion.query.count() == 1 obj = ObjectVersion.query.first() assert obj.key == 'test.pdf' assert str(obj.version_id) == task.result assert obj.file assert obj.file.size == file_size assert Bucket.get(bid).size == file_size assert FileInstance.query.count() == 1 # Undo DownloadTask().clean(version_id=obj.version_id) assert ObjectVersion.query.count() == 0 assert FileInstance.query.count() == 1 assert Bucket.get(bid).size == 0
def put_file_into_bucket(bucket_id, key, stream, content_length): # TODO: refactor invenio_files_rest to have a proper API and use that one here from invenio_db import db from invenio_files_rest.models import Bucket, ObjectVersion from invenio_files_rest.views import need_bucket_permission from invenio_files_rest.errors import FileSizeError bucket = Bucket.get(bucket_id) if bucket is None: abort(404, 'Bucket does not exist.') # WARNING: this function should be isomorphic with # invenio_files_rest.views:ObjectResource.create_object @need_bucket_permission('bucket-update') def create_object(bucket, key): size_limit = bucket.size_limit if size_limit and int(content_length or 0) > size_limit: desc = 'File size limit exceeded.' \ if isinstance(size_limit, int) else size_limit.reason raise FileSizeError(description=desc) with db.session.begin_nested(): obj = ObjectVersion.create(bucket, key) obj.set_contents( stream, size=content_length, size_limit=size_limit) db.session.commit() return obj return create_object(key=key, bucket=bucket)
def put_file_into_bucket(bucket_id, key, stream, content_length): # TODO: refactor invenio_files_rest to have a proper API and use that one here from invenio_db import db from invenio_files_rest.models import Bucket, ObjectVersion from invenio_files_rest.views import need_bucket_permission from invenio_files_rest.errors import FileSizeError bucket = Bucket.get(bucket_id) if bucket is None: abort(404, 'Bucket does not exist.') # WARNING: this function should be isomorphic with # invenio_files_rest.views:ObjectResource.create_object @need_bucket_permission('bucket-update') def create_object(bucket, key): size_limit = bucket.size_limit if size_limit and content_length > size_limit: desc = 'File size limit exceeded.' \ if isinstance(size_limit, int) else size_limit.reason raise FileSizeError(description=desc) with db.session.begin_nested(): obj = ObjectVersion.create(bucket, key) obj.set_contents( stream, size=content_length, size_limit=size_limit) db.session.commit() return obj return create_object(key=key, bucket=bucket)
def delete_buckets(cls, record): """Delete the bucket.""" files = record.get('_files', []) buckets = set() for f in files: buckets.add(f.get('bucket')) for b_id in buckets: b = Bucket.get(b_id) b.deleted = True
def test_factory_caches_on_request_in_request_ctx(self, create_record, request_ctx): record = create_record(published=False) bucket_id = record['_buckets']['deposit'] bucket = Bucket.get(bucket_id) permission = FilesPermission.create(bucket, action='bucket-update') assert request.current_file_record == record
def delete_buckets(record): """Delete the bucket.""" files = record.get('files', []) buckets = set() for f in files: buckets.add(f.get('bucket')) for b_id in buckets: b = Bucket.get(b_id) b.deleted = True
def test_factory_create_update_action_returns_CreateFilesPermission( self, create_record, request_ctx): # Create/Add file == bucket-update record = create_record() bucket_id = record['_buckets']['deposit'] bucket = Bucket.get(bucket_id) permission = FilesPermission.create(bucket, action='bucket-update') assert type(permission) == CreateFilesPermission
def test_object_set_contents(app, db, dummy_location): """Test object set contents.""" with db.session.begin_nested(): b1 = Bucket.create() obj = ObjectVersion.create(b1, "LICENSE") assert obj.file_id is None assert FileInstance.query.count() == 0 # Save a file. with open('LICENSE', 'rb') as fp: obj.set_contents(fp) # Assert size, location and checksum assert obj.file_id is not None assert obj.file.uri is not None assert obj.file.size == getsize('LICENSE') assert obj.file.checksum is not None assert b1.size == obj.file.size # Try to overwrite with db.session.begin_nested(): with open('LICENSE', 'rb') as fp: pytest.raises(FileInstanceAlreadySetError, obj.set_contents, fp) # Save a new version with different content with db.session.begin_nested(): obj2 = ObjectVersion.create(b1, "LICENSE") with open('README.rst', 'rb') as fp: obj2.set_contents(fp) assert obj2.file_id is not None and obj2.file_id != obj.file_id assert obj2.file.size == getsize('README.rst') assert obj2.file.uri != obj.file.uri assert Bucket.get(b1.id).size == obj.file.size + obj2.file.size obj2.file.verify_checksum() assert obj2.file.last_check_at assert obj2.file.last_check is True old_checksum = obj2.file.checksum obj2.file.checksum = "md5:invalid" assert obj2.file.verify_checksum() is False previous_last_check = obj2.file.last_check previous_last_check_date = obj2.file.last_check_at with db.session.begin_nested(): obj2.file.checksum = old_checksum obj2.file.uri = 'invalid' pytest.raises(ResourceNotFoundError, obj2.file.verify_checksum) assert obj2.file.last_check == previous_last_check assert obj2.file.last_check_at == previous_last_check_date obj2.file.verify_checksum(throws=False) assert obj2.file.last_check is None assert obj2.file.last_check_at != previous_last_check_date
def test_factory_read_action_returns_ReadFilesPermission( self, create_record, request_ctx): record = create_record() bucket_id = record['_buckets']['deposit'] bucket = Bucket.get(bucket_id) # Read file == read on an ObjectVersion obj = ObjectVersion.create(bucket, 'foo.txt') permission = FilesPermission.create(obj, action='object-read') assert type(permission) == ReadFilesPermission
def download(): """The download view.""" # storing the bucket uuid bucket_uuid = request.form['record_bucket'] record_id = request.form['record_id'] # check if the session is anonymous or not if not hasattr(current_user, 'email'): usr = "******" else: usr = current_user.email # get Bucket object bucket = Bucket.get(bucket_uuid) # chekc if the bucket exist if bucket is None: current_app.logger.error( "Impossible to download file requested by user= "******", bucket not found: " + bucket_uuid) abort(404) # store buckets values: bucket, version_id and the key values = str(bucket.objects[0]).split(':') bucket = values[0] version_id = values[1] key = values[2] try: record = RecordFare.get_record(record_id) except NoResultFound: current_app.logger.error( "Impossible to download file requested by user= "******" ,record id not found: " + record_id) abort(404) # check if the file is revisioned, if not only # staff member can download it to do the review if not record['revisioned']: if ((not current_user.has_role('admin')) and (not current_user.has_role('staff'))): current_app.logger.error("Impossible to download file= " + record['title'] + ", user= "******" not authorized") # forbidden for the user abort(403) return RecordFare.download_record(record, bucket, key, version_id, usr)
def test_before_deposit_index_hook_doesnt_create_new_buckets( create_record, db, es): deposit = create_record(published=False) bucket = Bucket.get(deposit['_buckets']['deposit']) obj = ObjectVersion.create(bucket, 'foo.txt') stream = BytesIO(b'Hello world!') obj.set_contents(stream, size=len(stream.getvalue()), size_limit=bucket.size_limit) db.session.commit() number_buckets_preindex = len(Bucket.query.all()) indexer = RecordIndexer() indexer.index(deposit) assert len(Bucket.query.all()) == number_buckets_preindex
def create_files(cls, record, files, existing_files): """Create files. This method is currently limited to a single bucket per record. """ default_bucket = None # Look for bucket id in existing files. for f in existing_files: if 'bucket' in f: default_bucket = f['bucket'] break # Create a bucket in default location if none is found. if default_bucket is None: b = Bucket.create() BucketTag.create(b, 'record', str(record.id)) default_bucket = str(b.id) db.session.commit() else: b = Bucket.get(default_bucket) record['_files'] = [] for key, meta in files.items(): obj = cls.create_file(b, key, meta) ext = splitext(obj.key)[1].lower() if ext.startswith('.'): ext = ext[1:] last_ver = meta[-1] rec_docs = [ rec_doc[1] for rec_doc in last_ver['recids_doctype'] if rec_doc[0] == last_ver['recid'] ] record['_files'].append( dict(bucket=str(obj.bucket.id), key=obj.key, version_id=str(obj.version_id), size=obj.file.size, checksum=obj.file.checksum, type=ext, doctype=rec_docs[0] if rec_docs else '')) db.session.add(RecordsBuckets(record_id=record.id, bucket_id=b.id)) record.commit() db.session.commit() return [b]
def test_multipart_full(app, db, bucket): """Test full multipart object.""" app.config.update( dict( FILES_REST_MULTIPART_CHUNKSIZE_MIN=5 * 1024 * 1024, FILES_REST_MULTIPART_CHUNKSIZE_MAX=5 * 1024 * 1024 * 1024, )) # Initial parameters chunks = 20 chunk_size = 5 * 1024 * 1024 # 5 MiB last_chunk = 1024 * 1024 # 1 MiB size = (chunks - 1) * chunk_size + last_chunk # Initiate mp = MultipartObject.create(bucket, 'testfile', size=size, chunk_size=chunk_size) db.session.commit() # Create parts for i in range(chunks): part_size = chunk_size if i < chunks - 1 else last_chunk Part.create(mp, i, stream=make_stream(part_size)) db.session.commit() # Complete mp.complete() db.session.commit() # Merge parts. pre_size = mp.bucket.size mp.merge_parts() db.session.commit() # Test size update bucket = Bucket.get(bucket.id) assert bucket.size == pre_size app.config.update( dict( FILES_REST_MULTIPART_CHUNKSIZE_MIN=2, FILES_REST_MULTIPART_CHUNKSIZE_MAX=20, ))
def test_update_files_permission_factory( create_real_record, superuser_role_need): record = create_real_record() bucket_id = record['_bucket'] bucket = Bucket.get(bucket_id) action = 'bucket-update' permission = record_files_permission_factory(bucket, action) # Only super_user + owners assert permission.needs == { superuser_role_need, UserNeed(1), UserNeed(2), UserNeed(3) } assert permission.excludes == set() assert permission.action == 'update_files'
def test_bucket_create_object(app, db): """Test bucket creation.""" with db.session.begin_nested(): l1 = Location(name='test1', uri='file:///tmp/1', default=False) l2 = Location(name='test2', uri='file:///tmp/2', default=True) db.session.add(l1) db.session.add(l2) assert Location.query.count() == 2 # Simple create with db.session.begin_nested(): b = Bucket.create() assert b.id assert b.default_location == Location.get_default().id assert b.location == Location.get_default() assert b.default_storage_class == \ app.config['FILES_REST_DEFAULT_STORAGE_CLASS'] assert b.size == 0 assert b.quota_size is None assert b.max_file_size is None assert b.deleted is False # __repr__ test assert str(b) == str(b.id) # Retrieve one assert Bucket.get(b.id).id == b.id # Create with location_name and storage class with db.session.begin_nested(): b = Bucket.create(location=l1, storage_class='A') assert b.default_location == Location.get_by_name('test1').id assert b.default_storage_class == 'A' # Create using location name instead b = Bucket.create(location=l2.name, storage_class='A') assert b.default_location == Location.get_by_name('test2').id # Retrieve one assert Bucket.all().count() == 3 # Invalid storage class. pytest.raises(ValueError, Bucket.create, storage_class='X')
def delete(): """The delete view.""" bucket_uuid = request.form['record_bucket'] record_id = request.form['record_id'] # get Bucket object bucket = Bucket.get(bucket_uuid) # chekc if the bucket exist if bucket is None: current_app.logger.error( "Impossible to delete the file requested by user= "******", bucket not found: " + bucket_uuid) abort(404) # store buckets values: version_id and the key values = str(bucket.objects[0]).split(':') version_id = values[1] key = values[2] # retrieve the fileinstance_id fileinstance_id = str(ObjectVersion.get(bucket, key, version_id).file_id) # creating MyRecord object, extention of invenio_records_files.Record try: record = RecordFare.get_record(record_id) except NoResultFound: current_app.logger.error( "Impossible to delete file requested by user= "******" ,record id not found: " + record_id) abort(404) # check if the user is the owner of the record or if is admin or staff if ((not current_user.id == record['owner']) and (not current_user.has_role('admin')) and (not current_user.has_role('staff'))): current_app.logger.error( "Impossible to delete file requested by user= "******" , this user does not have \ the permission, action not allowed") abort(403) record.delete_record(fileinstance_id, record_id) return redirect(url_for('file_management.success_delete'))
def test_handle_payload(app, db, location, tester_id, remote_token, github_api): from invenio_webhooks.models import Event extra_data = remote_token.remote_account.extra_data assert '1' in extra_data['repos'] assert 'repo-1' in extra_data['repos']['1']['full_name'] assert '2' in extra_data['repos'] assert 'repo-2' in extra_data['repos']['2']['full_name'] # Create the repository that will make the release with db.session.begin_nested(): Repository.enable(tester_id, github_id=1, name='repo-1', hook=1234) event = Event( receiver_id='github', user_id=tester_id, payload=fixtures.PAYLOAD('auser', 'repo-1', 1) ) db.session.add(event) with patch('invenio_deposit.api.Deposit.indexer'): event.process() repo_1 = Repository.query.filter_by(name='repo-1', github_id=1).first() assert repo_1.releases.count() == 1 release = repo_1.releases.first() assert release.status == ReleaseStatus.PUBLISHED assert release.errors is None assert release.tag == 'v1.0' assert release.record is not None assert release.record.get('control_number') == '1' record_files = release.record.get('_files') assert len(record_files) == 1 assert record_files[0]['size'] > 0 bucket = Bucket.get(record_files[0]['bucket']) assert bucket is not None assert len(bucket.objects) == 1 assert bucket.objects[0].key == 'auser/repo-1-v1.0.zip'
def test_before_deposit_index_hook_sets_files(create_record, db, es): deposit = create_record(published=False) # Reproduce file upload: add file to bucket associated with deposit bucket = Bucket.get(deposit['_buckets']['deposit']) obj = ObjectVersion.create(bucket, 'foo.txt') stream = BytesIO(b'Hello world!') obj.set_contents(stream, size=len(stream.getvalue()), size_limit=bucket.size_limit) db.session.commit() indexer = RecordIndexer() indexer.index(deposit) # Get the raw indexed document index, doc_type = indexer.record_to_index(deposit) es_deposit = es.get(index=index, doc_type=doc_type, id=deposit.id) assert '_files' in es_deposit['_source'] assert es_deposit['_source']['_files'][0]['type'] == 'txt'
def create_files(cls, record, files, existing_files): """Create files. This method is currently limited to a single bucket per record. """ default_bucket = None # Look for bucket id in existing files. for f in existing_files: if 'bucket' in f: default_bucket = f['bucket'] break # Create a bucket in default location if none is found. if default_bucket is None: b = Bucket.create() BucketTag.create(b, 'record', str(record.id)) default_bucket = str(b.id) db.session.commit() else: b = Bucket.get(default_bucket) record['_files'] = [] for key, meta in files.items(): obj = cls.create_file(b, key, meta) ext = splitext(obj.key)[1].lower() if ext.startswith('.'): ext = ext[1:] record['_files'].append(dict( bucket=str(obj.bucket.id), key=obj.key, version_id=str(obj.version_id), size=obj.file.size, checksum=obj.file.checksum, type=ext, )) db.session.add( RecordsBuckets(record_id=record.id, bucket_id=b.id) ) record.commit() db.session.commit() return [b]
def before_deposit_index_hook(sender, json=None, record=None, index=None, **kwargs): """Hook to transform Deposits before indexing in ES. :param sender: The entity sending the signal. :param json: The dumped record dictionary which can be modified. :param record: The record (deposit) being indexed. :param index: The index in which the record will be indexed. :param kwargs: Any other parameters. """ if (index.startswith('records-record') and json.get('type') == 'draft'): bucket = Bucket.get(json.get('_buckets', {}).get('deposit')) if bucket: iterator = FilesIterator(record, bucket=bucket, file_cls=record.file_cls) json['_files'] = iterator.dumps()
def update_record(pid, schema, data, files, skip_files): """Updates the given record.""" record = Record.get_record(pid.object_uuid) with db.session.begin_nested(): if record.files and not skip_files: bucket_id = record.files.bucket bucket = Bucket.get(bucket_id.id) for o in ObjectVersion.get_by_bucket(bucket).all(): o.remove() o.file.delete() RecordsBuckets.query.filter_by(record=record.model, bucket=bucket).delete() bucket_id.remove() db.session.commit() record.update(data) if not skip_files: bucket = Bucket.create() handle_record_files(data, bucket, files, skip_files) RecordsBuckets.create(record=record.model, bucket=bucket) return record
def test_multipart_full(app, db, bucket): """Test full multipart object.""" app.config.update(dict( FILES_REST_MULTIPART_CHUNKSIZE_MIN=5 * 1024 * 1024, FILES_REST_MULTIPART_CHUNKSIZE_MAX=5 * 1024 * 1024 * 1024, )) # Initial parameters chunks = 20 chunk_size = 5 * 1024 * 1024 # 5 MiB last_chunk = 1024 * 1024 # 1 MiB size = (chunks - 1) * chunk_size + last_chunk # Initiate mp = MultipartObject.create( bucket, 'testfile', size=size, chunk_size=chunk_size) db.session.commit() # Create parts for i in range(chunks): part_size = chunk_size if i < chunks - 1 else last_chunk Part.create(mp, i, stream=make_stream(part_size)) db.session.commit() # Complete mp.complete() db.session.commit() # Merge parts. pre_size = mp.bucket.size mp.merge_parts() db.session.commit() # Test size update bucket = Bucket.get(bucket.id) assert bucket.size == pre_size app.config.update(dict( FILES_REST_MULTIPART_CHUNKSIZE_MIN=2, FILES_REST_MULTIPART_CHUNKSIZE_MAX=20, ))
def test_handle_payload(app, db, location, tester_id, remote_token, github_api): from invenio_webhooks.models import Event extra_data = remote_token.remote_account.extra_data assert '1' in extra_data['repos'] assert 'repo-1' in extra_data['repos']['1']['full_name'] assert '2' in extra_data['repos'] assert 'repo-2' in extra_data['repos']['2']['full_name'] # Create the repository that will make the release with db.session.begin_nested(): Repository.enable(tester_id, github_id=1, name='repo-1', hook=1234) event = Event(receiver_id='github', user_id=tester_id, payload=fixtures.PAYLOAD('auser', 'repo-1', 1)) db.session.add(event) with patch('invenio_deposit.api.Deposit.indexer'): event.process() repo_1 = Repository.query.filter_by(name='repo-1', github_id=1).first() assert repo_1.releases.count() == 1 release = repo_1.releases.first() assert release.status == ReleaseStatus.PUBLISHED assert release.errors is None assert release.tag == 'v1.0' assert release.record is not None assert release.record.get('control_number') == '1' record_files = release.record.get('_files') assert len(record_files) == 1 assert record_files[0]['size'] > 0 bucket = Bucket.get(record_files[0]['bucket']) assert bucket is not None assert len(bucket.objects) == 1 assert bucket.objects[0].key == 'auser/repo-1-v1.0.zip'
def add_master_to_video(video_deposit, filename, stream, video_duration): """Add a master file inside video.""" video_bucket = Bucket.get(video_deposit['_buckets']['deposit']) # Master video master_obj = ObjectVersion.create(bucket=video_bucket, key=filename, stream=stream) _create_tags(master_obj, display_aspect_ratio='16:9', bit_rate='959963', codec_name='h264', codec_long_name='H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10', duration=video_duration, nb_framesr='1557', size='10498667', media_type='video', context_type='master', avg_frame_rate='25/1', width='1280', height='720') return str(master_obj.version_id)
def test_post_bucket(app, client, headers, dummy_location, permissions, user, expected): """Test post a bucket.""" expected_keys = [ 'id', 'links', 'size', 'quota_size', 'max_file_size', 'locked', 'created', 'updated' ] params = [{}, {'location_name': dummy_location.name}] login_user(client, permissions[user]) for data in params: resp = client.post(url_for('invenio_files_rest.location_api'), data=data, headers=headers) assert resp.status_code == expected if resp.status_code == 200: resp_json = get_json(resp) for key in expected_keys: assert key in resp_json assert Bucket.get(resp_json['id'])
def create_files(cls, record, files, existing_files): """Create files. This method is currently limited to a single bucket per record. """ default_bucket = None # Look for bucket id in existing files. for f in existing_files: if 'bucket' in f: default_bucket = f['bucket'] break # Create a bucket in default location if none is found. if default_bucket is None: b = Bucket.create() BucketTag.create(b, 'record', str(record.id)) default_bucket = str(b.id) db.session.commit() else: b = Bucket.get(default_bucket) record['files'] = [] for key, meta in files.items(): obj = cls.create_file(b, key, meta) ext = splitext(obj.key)[1].lower() if ext.startswith('.'): ext = ext[1:] record['files'].append( dict( bucket=str(obj.bucket.id), filename=obj.key, version_id=str(obj.version_id), size=obj.file.size, checksum=obj.file.checksum, type=ext, )) record.commit() db.session.commit() return [b]
def create_object(bucket, key, file_instance): """ Upload the file. :param bucket: the bucket id or instance :param key: the file name :param file_instance: the file object """ if isinstance(bucket, str): size_limit = Bucket.get(bucket).size_limit else: size_limit = bucket.size_limit with db.session.begin_nested(): obj = ObjectVersion.create(bucket, key) obj.set_contents( stream=file_instance.stream, size_limit=size_limit ) db.session.commit() file_uploaded.send(obj)
def test_post_bucket(app, client, headers, dummy_location, permissions, user, expected): """Test post a bucket.""" expected_keys = [ 'id', 'links', 'size', 'quota_size', 'max_file_size', 'locked', 'created', 'updated'] params = [{}, {'location_name': dummy_location.name}] login_user(client, permissions[user]) for data in params: resp = client.post( url_for('invenio_files_rest.location_api'), data=data, headers=headers ) assert resp.status_code == expected if resp.status_code == 200: resp_json = get_json(resp) for key in expected_keys: assert key in resp_json assert Bucket.get(resp_json['id'])
def test_object_set_contents(app, db, dummy_location): """Test object set contents.""" with db.session.begin_nested(): b1 = Bucket.create() obj = ObjectVersion.create(b1, "LICENSE") assert obj.file_id is None assert FileInstance.query.count() == 0 # Save a file. with open('LICENSE', 'rb') as fp: obj.set_contents(fp) # Assert size, location and checksum assert obj.file_id is not None assert obj.file.uri is not None assert obj.file.size == getsize('LICENSE') assert obj.file.checksum is not None assert b1.size == obj.file.size # Try to overwrite with db.session.begin_nested(): with open('LICENSE', 'rb') as fp: pytest.raises(FileInstanceAlreadySetError, obj.set_contents, fp) # Save a new version with different content with db.session.begin_nested(): obj2 = ObjectVersion.create(b1, "LICENSE") with open('README.rst', 'rb') as fp: obj2.set_contents(fp) assert obj2.file_id is not None and obj2.file_id != obj.file_id assert obj2.file.size == getsize('README.rst') assert obj2.file.uri != obj.file.uri assert Bucket.get(b1.id).size == obj.file.size + obj2.file.size obj2.file.verify_checksum() obj2.file.checksum = "md5:invalid" pytest.raises(AssertionError, obj2.file.verify_checksum)
def test_delete(client, db, bucket, multipart, multipart_url, permissions, parts, get_json): """Test complete when parts are missing.""" assert bucket.size == multipart.size cases = [ (None, 404), ('auth', 404), ('objects', 404), ('bucket', 404), ('location', 204), ] for user, expected in cases: login_user(client, permissions[user]) res = client.delete(multipart_url) assert res.status_code == expected if res.status_code == 204: assert client.get(multipart_url).status_code == 404 assert MultipartObject.query.count() == 0 assert Part.query.count() == 0 assert Bucket.get(bucket.id).size == 0
def update_record(pid, schema, data, files, skip_files): """Updates the given record.""" record = Record.get_record(pid.object_uuid) with db.session.begin_nested(): if record.files and not skip_files: bucket_id = record.files.bucket bucket = Bucket.get(bucket_id.id) for o in ObjectVersion.get_by_bucket(bucket).all(): o.remove() o.file.delete() RecordsBuckets.query.filter_by( record=record.model, bucket=bucket ).delete() bucket_id.remove() db.session.commit() record.update(data) if not skip_files: bucket = Bucket.create() handle_record_files(data, bucket, files, skip_files) RecordsBuckets.create( record=record.model, bucket=bucket) return record
def bucket(self): """Get bucket instance.""" if self._bucket is None: if self.bucket_id: self._bucket = Bucket.get(self.bucket_id) return self._bucket
def file_content_bucket(self): """Get file content bucket instance.""" if self._bucket_content is None: if self.bucket_content_id: self._bucket_content = Bucket.get(self.bucket_content_id) return self._bucket_content