def fs_connect(self, config): try: if "logger_name" in config: self.logger = logging.getLogger(config["logger_name"]) if "path" in config: self.path = config["path"] else: raise FsException("Missing parameter \"path\"") if not self.path.endswith("/"): self.path += "/" if not os.path.exists(self.path): raise FsException( "Invalid configuration param at '[storage]': path '{}' does not exist" .format(config["path"])) elif not os.access(self.path, os.W_OK): raise FsException( "Invalid configuration param at '[storage]': path '{}' is not writable" .format(config["path"])) if all(key in config.keys() for key in ["uri", "collection"]): self.client = MongoClient(config["uri"]) self.fs = GridFSBucket(self.client[config["collection"]]) elif all(key in config.keys() for key in ["host", "port", "collection"]): self.client = MongoClient(config["host"], config["port"]) self.fs = GridFSBucket(self.client[config["collection"]]) else: if "collection" not in config.keys(): raise FsException("Missing parameter \"collection\"") else: raise FsException( "Missing parameters: \"uri\" or \"host\" + \"port\"") except FsException: raise except Exception as e: # TODO refine raise FsException(str(e))
def play_video(): # def grab_title(): # video = request.form['movie_title'] # return video # client = MongoClient(host='35.242.180.246:27017') # videoz = request.form['movie_title'] # if request.method == 'POST': # video = request.form['movie_title'] client = MongoClient('mongodb://*****:*****@35.242.180.246:27017') # client2 = 0 # if request.form['movie_title'] != '': # video = request.form.get("movie_title", True) # video = request.form['movie_title'] # video = 'SampleVideo_1.mp4' video = request.form.get('movie_title') if video == None: return 'None no variable' else: client = MongoClient('mongodb://*****:*****@35.242.180.246:27017') videos_db = client.get_database('videos') fs = GridFSBucket(videos_db) grid_out = fs.open_download_stream_by_name(video) contents = grid_out.read() # return video return Response(contents, mimetype='video/mp4')
def pub_file_bucket(self, database, collection, file_name, file_property): client = MongoClient(self.host, self.port) db = client[database] bucket_files = GridFSBucket(db, bucket_name=collection) file_bucket_fs = bucket_files.open_upload_stream( filename=file_name, metadata=file_property) return file_bucket_fs
def put(self): client_uuid = self.request.matchdict['uuid'] images = self.request.db['images'].find({"client_key": client_uuid}) fs = GridFSBucket(self.request.db) file_key = None for image in images: file_key = image['file_key'] file_data = self.request.body if not file_data: return Response("FILE DATA MUST BE SPECIFIED!") fs.delete(file_key) file_id = fs.upload_from_stream(str(file_key), file_data) self.request.db['images'].insert( { 'client_key': client_uuid, 'file_key': file_id, 'description': self.request.POST.get('description') } ) return Response("UPDATED!")
def get_file_bucket(self, database, collection, file_id): client = MongoClient(self.host, self.port) db = client[database] bucket_files = GridFSBucket(db, bucket_name=collection) grid_out = bucket_files.open_download_stream(ObjectId(file_id)) client.close() return grid_out
def delete_file_from_gridfs_for_project(project_id): vcs_systems = handler.client.get_database(handler.database).get_collection('vcs_system').find({'project_id': project_id}) fs = GridFSBucket(handler.client.get_database(handler.database), bucket_name='repository_data') for vcs_system in vcs_systems: try: fs.delete(vcs_system['repository_file']) except NoFile: pass
def read_gridfs_file(filename): bucket = GridFSBucket(proc_client.perftest) temp = tempfile.TemporaryFile() try: bucket.download_to_stream_by_name(filename, temp) finally: temp.close()
def delete_file(self, database, collection, file_id): client = MongoClient(self.host, self.port) try: db = client[database] bucket_files = GridFSBucket(db, bucket_name=collection) bucket_files.delete(ObjectId(file_id)) except Exception as ex: SimpleLogger.exception(ex) finally: client.close()
def setUp(self): self.client = client_context.client self.client.drop_database('perftest') gridfs_path = os.path.join( TEST_PATH, os.path.join('single_and_multi_document', 'gridfs_large.bin')) with open(gridfs_path, 'rb') as data: self.document = data.read() self.bucket = GridFSBucket(self.client.perftest)
def stream(request, pk): selected_track = Tracks.objects.get(pk=pk) print("Downloaded") db = MongoClient().MusicDB fs = GridFSBucket(db) music_file_stream = fs.open_download_stream_by_name( selected_track.track_name) response = HttpResponse(music_file_stream.read(), content_type='content_type: audio/mpeg') return response
def get_file_bucket(self, database, collection, file_id): grid_out = None client = MongoClient(self.host, self.port) try: db = client[database] bucket_files = GridFSBucket(db, bucket_name=collection) grid_out = bucket_files.open_download_stream(ObjectId(file_id)) except Exception as ex: SimpleLogger.exception(ex) finally: client.close() return grid_out
def download(request, pk): selected_track = Tracks.objects.get(pk=pk) print("Downloaded") db = MongoClient().MusicDB fs = GridFSBucket(db) music_file_stream = fs.open_download_stream_by_name( selected_track.track_name) response = HttpResponse(music_file_stream.read(), content_type='content_type: audio/mpeg') response['Content-Disposition'] = 'attachment; filename=' + (str)( selected_track.actual_track) return response
def setUp(self): self.client = client_context.client self.client.drop_database('perftest') gridfs_path = os.path.join( TEST_PATH, os.path.join('single_and_multi_document', 'gridfs_large.bin')) self.bucket = GridFSBucket(self.client.perftest) with open(gridfs_path, 'rb') as gfile: self.uploaded_id = self.bucket.upload_from_stream( 'gridfstest', gfile)
def read_stream_from_grid(self, fid): if isinstance(fid, str): fid = ObjectId(fid) grid = GridFSBucket(self.db).open_download_stream(fid) return { 'name': grid.filename, 'contentType': grid.content_type, 'length': grid.chunk_size, 'fileId': fid, 'uploadDate': grid.upload_date, 'fileStream': grid.read() }
def setUp(self): self.client = client_context.client self.client.drop_database('perftest') bucket = GridFSBucket(self.client.perftest) gridfs_path = os.path.join( TEST_PATH, os.path.join('parallel', 'gridfs_multi')) self.files = [os.path.join( gridfs_path, s) for s in os.listdir(gridfs_path)] for fname in self.files: with open(fname, 'rb') as gfile: bucket.upload_from_stream(fname, gfile)
def test_gridfsbucket_cursor(self): client = self.client bucket = GridFSBucket(client.pymongo_test) for file_id in 1, 2: stream = bucket.open_upload_stream_with_id(file_id, str(file_id)) stream.write(b'a' * 1048576) stream.close() with client.start_session() as s: cursor = bucket.find(session=s) for f in cursor: f.read() self.assertFalse(s.has_ended) self.assertTrue(s.has_ended) # No explicit session. cursor = bucket.find(batch_size=1) files = [cursor.next()] s = cursor._Cursor__session self.assertFalse(s.has_ended) cursor.__del__() self.assertTrue(s.has_ended) self.assertIsNone(cursor._Cursor__session) # Files are still valid, they use their own sessions. for f in files: f.read() # Explicit session. with client.start_session() as s: cursor = bucket.find(session=s) s = cursor.session files = list(cursor) cursor.__del__() self.assertFalse(s.has_ended) for f in files: f.read() for f in files: # Attempt to read the file again. f.seek(0) with self.assertRaisesRegex(InvalidOperation, "ended session"): f.read()
def get(self): images = self.request.db['images'].find({"client_key": self.request.matchdict['uuid']}) fs = GridFSBucket(self.request.db) file_key = None for image in images: file_key = image['file_key'] try: data = fs.open_download_stream(file_key) except NoFile: return Response("FILE NOT FOUND!") return Response(data.read())
def delete(self): images = self.request.db['images'].find({"client_key": self.request.matchdict['uuid']}) fs = GridFSBucket(self.request.db) file_key = None for image in images: file_key = image['file_key'] if not file_key: return Response("FILE NOT FOUND!") fs.delete(file_key) return Response("DELETED!")
def __new__(cls): if not hasattr(cls, 'init_done'): cls.instance = super(DBManager, cls).__new__(cls) connect(Config.c.mongodb.url + Config.c.mongodb.database_name) cls.instance.storage = GridFSStorage(GridFSBucket(_get_db())) cls.init_done = True return cls.instance
def test_gridfs_attrs(self): motor_gridfs_only = set(["collection"]).union(motor_only) self.assertEqual( attrs(GridFSBucket(env.sync_cx.test)), attrs(MotorGridFSBucket(self.cx.test)) - motor_gridfs_only, )
def write_stream_to_grid(self, name, stream, content_type, length): return { 'name': name, 'contentType': content_type, 'length': length, 'fileId': GridFSBucket(self.db).upload_from_stream(name, stream, metadata={'contentType': content_type}) }
def before(self): self.client.perftest.drop_collection('fs.files') self.client.perftest.drop_collection('fs.chunks') self.bucket = GridFSBucket(self.client.perftest) gridfs_path = os.path.join( TEST_PATH, os.path.join('parallel', 'gridfs_multi')) self.files = [os.path.join( gridfs_path, s) for s in os.listdir(gridfs_path)]
def save(self): #Save to the database super(Tracks, self).save() #if genre doesn't exist add it try: genre_object = Genres.objects.get(genre_name=self.track_genre) except: genre_object = Genres.objects.create(genre_name=self.track_genre) genre_object.save() #Upload music to Gridfs using pymongo commands print("Uploading to GridFS") db = MongoClient().MusicDB fs = GridFSBucket(db) music_file = open(self.actual_track.path, 'rb') #open the file in binary mode fs.upload_from_stream(self.track_name, music_file.read()) #upload to gridfs
class TestGridFsDownload(PerformanceTest, unittest.TestCase): data_size = 52428800 def setUp(self): self.client = client_context.client self.client.drop_database('perftest') gridfs_path = os.path.join( TEST_PATH, os.path.join('single_and_multi_document', 'gridfs_large.bin')) self.bucket = GridFSBucket(self.client.perftest) with open(gridfs_path, 'rb') as gfile: self.uploaded_id = self.bucket.upload_from_stream( 'gridfstest', gfile) def tearDown(self): super(TestGridFsDownload, self).tearDown() self.client.drop_database('perftest') def do_task(self): self.bucket.open_download_stream(self.uploaded_id).read()
def __init__(self, config_params, database): """Получаем базу, GridFSBucket, коллекцию :param config_params: dict словарь конфигурационных парметров dict(host='10.205.33.221', port=27017, username=None, password=None) :param database: string название базы данных MongoDB """ # Client for a MongoDB instance # client = MongoClient(**config_params) super().__init__(config_params) # print(self.client.test_database) # Get a database by client and name self.db = self.client[database] # получаем GridFSBucket self.fs = GridFSBucket(self.db) # получаем коллекцию fs.files - стандартная коллекция GridFSBucket self.collection = self.db.fs.files print(self.collection) self.meta = self.client['main'].projects print(self.meta)
def __getattr__(self, name): """A wrapper to return the underlying collections""" mongo_name = "".join( name.split("_")[0:1] + [word.title() for word in name.split("_")[1:]]) if name == "fs" or name.endswith("_files"): if name not in self._files: bucket = GridFSBucket(self.mongo, mongo_name) self._files[name] = bucket return self._files[name] else: return self.mongo[mongo_name]
def test_gridfs_does_not_support_transactions(self): client = client_context.client db = client.pymongo_test gfs = GridFS(db) bucket = GridFSBucket(db) def gridfs_find(*args, **kwargs): return gfs.find(*args, **kwargs).next() def gridfs_open_upload_stream(*args, **kwargs): bucket.open_upload_stream(*args, **kwargs).write(b'1') gridfs_ops = [ (gfs.put, (b'123', )), (gfs.get, (1, )), (gfs.get_version, ('name', )), (gfs.get_last_version, ('name', )), (gfs.delete, (1, )), (gfs.list, ()), (gfs.find_one, ()), (gridfs_find, ()), (gfs.exists, ()), (gridfs_open_upload_stream, ('name', )), (bucket.upload_from_stream, ( 'name', b'data', )), (bucket.download_to_stream, ( 1, BytesIO(), )), (bucket.download_to_stream_by_name, ( 'name', BytesIO(), )), (bucket.delete, (1, )), (bucket.find, ()), (bucket.open_download_stream, (1, )), (bucket.open_download_stream_by_name, ('name', )), (bucket.rename, ( 1, 'new-name', )), ] with client.start_session() as s, s.start_transaction(): for op, args in gridfs_ops: with self.assertRaisesRegex( InvalidOperation, 'GridFS does not support multi-document transactions', ): op(*args, session=s)
def collection_post(self): fs = GridFSBucket(self.request.db) client_key = uuid.uuid4().hex file_data = self.request.body if not file_data: return Response("FILE DATA MUST BE SPECIFIED!") file_id = fs.upload_from_stream(str(client_key), file_data) self.request.db['images'].insert( { 'client_key': client_key, 'file_key': file_id, 'description': self.request.POST.get('description'), 'date': datetime.datetime.now() } ) return {'file_key': client_key}
def temporary_fs(self, snapshot=None, **kwargs): with temporary_mongodb() as conn_str: if snapshot: client = MongoClient(conn_str) try: database = client.get_database('admin') files_coll = database['test.files'] gridfs_bucket = GridFSBucket(database, 'test') for filename, payload in six.iteritems(snapshot): content = payload[0] meta_dict = payload[1] if len(payload) > 1 else None gridfs_bucket.upload_from_stream( filename, BytesIO(content)) if meta_dict: files_coll.update_one( {'filename': filename}, {'$set': { 'metadata': meta_dict }}) finally: client.close() with MongoFS(conn_str, 'admin', 'test', **kwargs) as fs: yield fs
def test_gridfs_bucket(self): listener = SessionTestListener() client = rs_or_single_client(event_listeners=[listener]) client.drop_database('pymongo_test') self.addCleanup(client.drop_database, 'pymongo_test') bucket = GridFSBucket(client.pymongo_test) def upload(session=None): stream = bucket.open_upload_stream('f', session=session) stream.write(b'a' * 1048576) stream.close() def upload_with_id(session=None): stream = bucket.open_upload_stream_with_id(1, 'f1', session=session) stream.write(b'a' * 1048576) stream.close() def open_download_stream(session=None): stream = bucket.open_download_stream(1, session=session) stream.read() def open_download_stream_by_name(session=None): stream = bucket.open_download_stream_by_name('f', session=session) stream.read() def find(session=None): files = list(bucket.find({'_id': 1}, session=session)) for f in files: f.read() sio = StringIO() self._test_ops( client, (upload, [], {}), (upload_with_id, [], {}), (bucket.upload_from_stream, ['f', b'data'], {}), (bucket.upload_from_stream_with_id, [2, 'f', b'data'], {}), (open_download_stream, [], {}), (open_download_stream_by_name, [], {}), (bucket.download_to_stream, [1, sio], {}), (bucket.download_to_stream_by_name, ['f', sio], {}), (find, [], {}), (bucket.rename, [1, 'f2'], {}), # Delete both files so _test_ops can run these operations twice. (bucket.delete, [1], {}), (bucket.delete, [2], {}))
def __init__(self, **kwargs): super().__init__(**kwargs) self._client: MongoClient = MongoClient(self.mongodb_uri) self._database: MongoDatabase = self._client[self.database_name] self._directories: MongoCollection\ = self._database[self.directories_collection_name] self._files: GridFSBucket\ = GridFSBucket(self._database, self.files_collection_name) self._files_metadata: MongoCollection\ = self._database[self.files_collection_name].files self._directories.create_index('path', unique=True) if not self.dir_exists('/'): self.save({'type': 'directory'}, '/')
class MotorCoreTestGridFS(MotorTest): def setUp(self): super(MotorCoreTestGridFS, self).setUp() self.sync_fs = GridFSBucket(env.sync_cx.test) self.sync_fs.upload_from_stream_with_id(1, 'filename', source=b'') def tearDown(self): self.sync_fs.delete(file_id=1) super(MotorCoreTestGridFS, self).tearDown() def test_gridfs_attrs(self): motor_gridfs_only = set(['collection']).union(motor_only) self.assertEqual( attrs(GridFSBucket(env.sync_cx.test)), attrs(MotorGridFSBucket(self.cx.test)) - motor_gridfs_only) def test_gridin_attrs(self): motor_gridin_only = set(['set']).union(motor_only) self.assertEqual( attrs(GridIn(env.sync_cx.test.fs)), attrs(MotorGridIn(self.cx.test.fs)) - motor_gridin_only) @gen_test def test_gridout_attrs(self): motor_gridout_only = set([ 'open', 'stream_to_handler' ]).union(motor_only) fs = MotorGridFSBucket(self.cx.test) motor_gridout = yield fs.open_download_stream(1) self.assertEqual( attrs(self.sync_fs.open_download_stream(1)), attrs(motor_gridout) - motor_gridout_only) def test_gridout_cursor_attrs(self): self.assertEqual( attrs(self.sync_fs.find()) - pymongo_cursor_only, attrs(MotorGridFSBucket(self.cx.test).find()) - motor_cursor_only)
def insert_gridfs_file(filename): bucket = GridFSBucket(proc_client.perftest) with open(filename, 'rb') as gfile: bucket.upload_from_stream(filename, gfile)
def setUp(self): super(MotorCoreTestGridFS, self).setUp() self.sync_fs = GridFSBucket(env.sync_cx.test) self.sync_fs.upload_from_stream_with_id(1, 'filename', source=b'')