def test_location_default(app, db): """Test location model.""" with db.session.begin_nested(): l1 = Location(name='test1', uri='file:///tmp', default=False) db.session.add(l1) assert Location.get_default() is None with db.session.begin_nested(): l2 = Location(name='test2', uri='file:///tmp', default=True) l3 = Location(name='test3', uri='file:///tmp', default=True) db.session.add(l2) db.session.add(l3) assert Location.get_default() is None
def test_location_default(app, db): """Test location model.""" with db.session.begin_nested(): l1 = Location(name='test1', uri='file:///tmp', default=False) db.session.add(l1) assert Location.get_default() is None with db.session.begin_nested(): l2 = Location(name='test2', uri='file:///tmp', default=True) l3 = Location(name='test3', uri='file:///tmp', default=True) db.session.add(l2) db.session.add(l3) assert Location.get_default() is None
def create(cls, data, id_=None): """Create a deposit. Adds bucket creation immediately on deposit creation. """ bucket = Bucket.create( default_location=Location.get_default() ) try: schema = data.get("$schema", None) \ .split('/schemas/', 1)[1] except (IndexError, AttributeError): return None if schema: _deposit_group = \ next( (depgroup for dg, depgroup in current_app.config.get('DEPOSIT_GROUPS').iteritems() if schema in depgroup['schema'] ), None ) data["_experiment"] = _deposit_group.get("experiment", "Unknown") deposit = super(CAPDeposit, cls).create(data, id_=id_) add_owner_permissions(deposit.id) RecordsBuckets.create(record=deposit.model, bucket=bucket) return deposit
def load(source, verbose, cache, files, skip, max=None): """Load records attach files and index them.""" data = json.load(source) if isinstance(data, dict): data = [data] # to upload remote fulltext files upload_dir = os.path.join(current_app.instance_path, 'uploads') try: os.makedirs(upload_dir) except FileExistsError: pass # initialize file location if needed if not Location.get_default(): data_dir = os.path.join(current_app.instance_path, 'files') db.session.add( Location(name='default', uri='file://' + data_dir, default=True)) db.session.commit() # create records and index them click.secho('Creating records...', fg='green') rec_uuids = load_records_with_files(data, upload_dir, max, verbose, files, cache, skip) click.secho('Put %d records for indexing...' % len(rec_uuids), fg='green') RecordIndexer().bulk_index(rec_uuids) click.secho('Execute "run" command to process the queue!', fg='yellow')
def ping_db(): """Load balancer ping view.""" try: default_location = Location.get_default() return 'OK' except OperationalError: return 'ERROR'
def ping_files(): """Load balancer ping view.""" try: default_location = Location.get_default().uri test_file_path = join(default_location, 'test.txt') f = open(test_file_path, 'r') return "OK" except (OperationalError, IOError) as e: return "ERROR"
def ping(service=None): """Ping the services for status check.""" try: if service is None: return 'Pong!', 200 elif service == 'db': Location.get_default() elif service == 'search': current_search.cluster_version elif service == 'files': default_location = Location.get_default().uri test_file_path = join(default_location, 'test.txt') with open(test_file_path, 'r'): pass return 'OK!', 200 except (OperationalError, IOError, ConnectionError): return 'ERROR', 500
def test_bucket_create_object(app, db): """Test bucket creation.""" with db.session.begin_nested(): l1 = Location(name='test1', uri='file:///tmp/1', default=False) l2 = Location(name='test2', uri='file:///tmp/2', default=True) db.session.add(l1) db.session.add(l2) assert Location.query.count() == 2 # Simple create with db.session.begin_nested(): b = Bucket.create() assert b.id assert b.default_location == Location.get_default().id assert b.location == Location.get_default() assert b.default_storage_class == \ app.config['FILES_REST_DEFAULT_STORAGE_CLASS'] assert b.size == 0 assert b.quota_size is None assert b.max_file_size is None assert b.deleted is False # __repr__ test assert str(b) == str(b.id) # Retrieve one assert Bucket.get(b.id).id == b.id # Create with location_name and storage class with db.session.begin_nested(): b = Bucket.create(location=l1, storage_class='A') assert b.default_location == Location.get_by_name('test1').id assert b.default_storage_class == 'A' # Create using location name instead b = Bucket.create(location=l2.name, storage_class='A') assert b.default_location == Location.get_by_name('test2').id # Retrieve one assert Bucket.all().count() == 3 # Invalid storage class. pytest.raises(ValueError, Bucket.create, storage_class='X')
def test_bucket_create_object(app, db): """Test bucket creation.""" with db.session.begin_nested(): l1 = Location(name='test1', uri='file:///tmp/1', default=False) l2 = Location(name='test2', uri='file:///tmp/2', default=True) db.session.add(l1) db.session.add(l2) assert Location.query.count() == 2 # Simple create with db.session.begin_nested(): b = Bucket.create() assert b.id assert b.default_location == Location.get_default().id assert b.location == Location.get_default() assert b.default_storage_class == \ app.config['FILES_REST_DEFAULT_STORAGE_CLASS'] assert b.size == 0 assert b.quota_size is None assert b.max_file_size is None assert b.deleted is False # __repr__ test assert str(b) == str(b.id) # Retrieve one assert Bucket.get(b.id).id == b.id # Create with location_name and storage class with db.session.begin_nested(): b = Bucket.create(location=l1, storage_class='A') assert b.default_location == Location.get_by_name('test1').id assert b.default_storage_class == 'A' # Create using location name instead b = Bucket.create(location=l2.name, storage_class='A') assert b.default_location == Location.get_by_name('test2').id # Retrieve one assert Bucket.all().count() == 3 # Invalid storage class. pytest.raises(ValueError, Bucket.create, storage_class='X')
def create(cls, data, id_=None): """Create a deposit. Adds bucket creation immediately on deposit creation. """ bucket = Bucket.create(default_location=Location.get_default()) data['_buckets'] = {'deposit': str(bucket.id)} deposit = super(CAPDeposit, cls).create(data, id_=id_) RecordsBuckets.create(record=deposit.model, bucket=bucket) return deposit
def test_location(app, db): """Test location model.""" with db.session.begin_nested(): l1 = Location(name='test1', uri='file:///tmp', default=False) l2 = Location(name='test2', uri='file:///tmp', default=True) l3 = Location(name='test3', uri='file:///tmp', default=False) db.session.add(l1) db.session.add(l2) db.session.add(l3) assert Location.get_by_name('test1').name == 'test1' assert Location.get_by_name('test2').name == 'test2' assert Location.get_by_name('test3').name == 'test3' assert Location.get_default().name == 'test2' assert len(Location.all()) == 3 assert str(Location.get_by_name('test1')) == 'test1'
def initialize_communities_bucket(): """Initialize the communities file bucket. :raises: `invenio_files_rest.errors.FilesException` """ bucket_id = UUID(current_app.config['COMMUNITIES_BUCKET_UUID']) if Bucket.query.get(bucket_id): raise FilesException("Bucket with UUID {} already exists.".format( bucket_id)) else: storage_class = current_app.config['FILES_REST_DEFAULT_STORAGE_CLASS'] location = Location.get_default() bucket = Bucket(id=bucket_id, location=location, default_storage_class=storage_class) db.session.add(bucket) db.session.commit()
def initialize_communities_bucket(): """Initialize the communities file bucket. :raises: `invenio_files_rest.errors.FilesException` """ bucket_id = UUID(current_app.config['COMMUNITIES_BUCKET_UUID']) if Bucket.query.get(bucket_id): raise FilesException("Bucket with UUID {} already exists.".format( bucket_id)) else: storage_class = current_app.config['FILES_REST_DEFAULT_STORAGE_CLASS'] location = Location.get_default() bucket = Bucket(id=bucket_id, location=location, default_storage_class=storage_class) db.session.add(bucket) db.session.commit()
def test_location(app, db): """Test location model.""" with db.session.begin_nested(): l1 = Location(name='test1', uri='file:///tmp', default=False) l2 = Location(name='test2', uri='file:///tmp', default=True) l3 = Location(name='test3', uri='file:///tmp', default=False) db.session.add(l1) db.session.add(l2) db.session.add(l3) assert Location.get_by_name('test1').name == 'test1' assert Location.get_by_name('test2').name == 'test2' assert Location.get_by_name('test3').name == 'test3' assert Location.get_default().name == 'test2' assert len(Location.all()) == 3 assert str(Location.get_by_name('test1')) == 'test1'
def create_bucket_from_dir(source_dir, location_obj=None): """Create bucket from the specified source directory. :param source_dir: The directory to create the bucket from. :param location_obj: Optional location object to use. If None is specified, get the current default location. :returns: The new bucket object. """ if not location_obj: from invenio_files_rest.models import Bucket, Location, ObjectVersion location_obj = Location.get_default() or location bucket_obj = Bucket.create(location_obj) for file_name in os.listdir(source_dir): full_file_path = os.path.join(source_dir, file_name) if os.path.isdir(full_file_path): continue file_obj = open(full_file_path, 'rb') ObjectVersion.create(bucket_obj, key=file_name, stream=file_obj) db.session.commit() return bucket_obj
def create_bucket(bucket_id=None): with db.session.begin_nested(): loc = Location.get_default() if not loc: loc = Location(name='local', uri=GEO_KNOWLEDGE_HUB_DEFAULT_BUCKET_URL, default=True) db.session.add(loc) bucket = None if bucket_id: bucket = db.session.query(Bucket).filter( Bucket.id == bucket_id).first() if not bucket: bucket = Bucket.create(quota_size=100 * 1000 * 1000, max_file_size=100 * 1000 * 1000, locked=False) db.session.commit() return jsonify({"bucket_id": bucket.id})
def add_record(metadata, collection, schema, force, files=[]): """Add record.""" collection = Collection.query.filter( Collection.name == collection).first() if collection is None: return data, pid, recid = construct_record( collection, metadata, 1, {} if force else schema) d = current_app.config['DATADIR'] buckets = [] data['_files'] = [] for file in files: bucket = Bucket.create(default_location=Location.get_default()) buckets.append(bucket) with open(pkg_resources.resource_filename( 'cap.modules.fixtures', os.path.join('data', 'files', file) ), 'rb') as fp: obj = ObjectVersion.create(bucket, file, stream=fp) data['_files'].append({ 'bucket': str(obj.bucket_id), 'key': obj.key, 'size': obj.file.size, 'checksum': str(obj.file.checksum), 'version_id': str(obj.version_id), }) try: record = Record.create(data, id_=recid) for bucket in buckets: rb = RecordsBuckets(record_id=record.id, bucket_id=bucket.id) db.session.add(rb) # Invenio-Indexer is delegating the document inferring to # Invenio-Search which is analysing the string splitting by `/` and # using `.json` to be sure that it cans understand the mapping. record['$schema'] = 'mappings/{0}.json'.format(collection.name.lower()) indexer = RecordIndexer() indexer.index(record) # Creating permission needs for the record action_edit_record = RecordUpdateActionNeed(str(recid)) action_read_record = RecordReadActionNeed(str(recid)) action_index_record = RecordIndexActionNeed(str(recid)) # Giving index, read, write permissions to user/creator db.session.add(ActionUsers.allow(action_edit_record)) db.session.add(ActionUsers.allow(action_read_record)) db.session.add(ActionUsers.allow(action_index_record)) db.session.commit() print("DONE!!!") except ValidationError as error: print("============================") pprint(error.message) pprint(error.path) print("============================") db.session.rollback()
def add_record(metadata, collection, schema, force, files=[]): """Add record.""" collection = Collection.query.filter(Collection.name == collection).first() if collection is None: return data, pid, recid = construct_record(collection, metadata, 1, {} if force else schema) d = current_app.config['DATADIR'] buckets = [] data['_files'] = [] for file in files: bucket = Bucket.create(default_location=Location.get_default()) buckets.append(bucket) with open( pkg_resources.resource_filename( 'cap.modules.fixtures', os.path.join('data', 'files', file)), 'rb') as fp: obj = ObjectVersion.create(bucket, file, stream=fp) data['_files'].append({ 'bucket': str(obj.bucket_id), 'key': obj.key, 'size': obj.file.size, 'checksum': str(obj.file.checksum), 'version_id': str(obj.version_id), }) try: record = Record.create(data, id_=recid) for bucket in buckets: rb = RecordsBuckets(record_id=record.id, bucket_id=bucket.id) db.session.add(rb) # Invenio-Indexer is delegating the document inferring to # Invenio-Search which is analysing the string splitting by `/` and # using `.json` to be sure that it cans understand the mapping. record['$schema'] = 'mappings/{0}.json'.format(collection.name.lower()) indexer = RecordIndexer() indexer.index(record) # Creating permission needs for the record action_edit_record = RecordUpdateActionNeed(str(recid)) action_read_record = RecordReadActionNeed(str(recid)) action_index_record = RecordIndexActionNeed(str(recid)) # Giving index, read, write permissions to user/creator db.session.add(ActionUsers.allow(action_edit_record)) db.session.add(ActionUsers.allow(action_read_record)) db.session.add(ActionUsers.allow(action_index_record)) db.session.commit() print("DONE!!!") except ValidationError as error: print("============================") pprint(error.message) pprint(error.path) print("============================") db.session.rollback()