示例#1
0
def del_files_of_object(objid, types=['all']):
    allowed_types = ['images', 'thumbs', 'videos', 'audios']
    if (types == ['all']):
        types = allowed_types
    for filetype in types:
        if filetype in allowed_types:
            dbname = mediatypes[filetype]['db']
            ext = mediatypes[filetype]['ext']
            gridfsdb = database.Database(
                MongoClient(host=GRIDFS_HOST, port=GRIDFS_PORT), dbname)
            fs = GridFS(gridfsdb)
            if (filetype in ['audios', 'videos']):
                languages = mongodb.db.languages.find({}, {
                    '_id': 0,
                    'code': 1,
                    'locale': 1
                })
                for language in languages:
                    isocode = ','.join([language['code'], language['locale']])
                    fileid = fs.find_one({'filename': isocode + objid + ext})
                    if (fileid is not None):
                        fs.delete(fileid._id)
            else:
                fileid = fs.find_one({'filename': objid + ext})
                if (fileid is not None):
                    fs.delete(fileid._id)
        else:
            return False
示例#2
0
    def tearDown(self):
        gridfs = GridFS(self.mongo.db)
        files = list(gridfs.find())
        for gridfile in files:
            gridfs.delete(gridfile._id)

        super(GridFSCleanupMixin, self).tearDown()
示例#3
0
def set_obj_media(objid, isocode, mediatype, file):
    if (file.filename[-4:] not in ['.mp3', '.mp4']):
        return False
    if (mediatype not in ['audios', 'videos']):
        return False

    gridfsdb = database.Database(
        MongoClient(host=GRIDFS_HOST, port=GRIDFS_PORT),
        mediatypes[mediatype]['db'])
    fs = GridFS(gridfsdb)

    mongodb.db.objects.update_one(
        {
            'id': objid,
            'translations.isocode': isocode
        }, {'$set': {
            'translations.$.' + mediatype: True
        }})
    oldfile = fs.find_one(
        {'filename': isocode + '-' + objid + mediatypes[mediatype]['ext']})
    if (oldfile is not None):
        fs.delete(oldfile._id)
    oid = fs.put(file,
                 content_type=file.content_type,
                 filename=isocode + '-' + objid + mediatypes[mediatype]['ext'])

    return True
示例#4
0
    def tearDown(self):
        gridfs = GridFS(self.mongo.db)
        files = list(gridfs.find())
        for gridfile in files:
            gridfs.delete(gridfile._id)

        super(GridFSCleanupMixin, self).tearDown()
示例#5
0
class GridFSStorage(Storage):
    def __init__(self, host='localhost', port=27017, collection='fs'):
        for s in ('host', 'port', 'collection'):
            name = 'GRIDFS_' + s.upper()
            if hasattr(settings, name):
                setattr(self, s, getattr(settings, name))
        for s, v in zip(('host', 'port', 'collection'), (host, port, collection)):
            if v:
                setattr(self, s, v)
        self.db = Connection(host=self.host, port=self.port)[self.collection]
        self.fs = GridFS(self.db)

    def _save(self, name, content):
        self.fs.put(content, filename=name)
        return name

    def _open(self, name, *args, **kwars):
        return self.fs.get_last_version(filename=name)

    def delete(self, name):
        oid = fs.get_last_version(filename=name)._id
        self.fs.delete(oid)

    def exists(self, name):
        return self.fs.exists({'filename': name})

    def size(self, name):
        return self.fs.get_last_version(filename=name).length
示例#6
0
    def pre_save(self, model_instance, add):
        oid = getattr(model_instance, "_%s_oid" % self.attname, None)
        value = getattr(model_instance, "_%s_val" % self.attname, None)

        if not getattr(model_instance, "id"):
            return u''

        if value == getattr(model_instance, "_%s_cache" % self.attname, None):
            return oid

        from django.db import connections
        gdfs = GridFS(connections[self.model.objects.db].db_connection.db)


        if not self._versioning and not oid is None:
            gdfs.delete(oid)

        if not self._as_string:
            value.seek(0)
            value = value.read()

        oid = gdfs.put(value)
        setattr(self, "_%s_oid" % self.attname, oid)
        setattr(self, "_%s_cache" % self.attname, value)

        return oid
示例#7
0
    def pre_save(self, model_instance, add):
        oid = getattr(model_instance, "_%s_oid" % self.attname, None)
        value = getattr(model_instance, "_%s_val" % self.attname, None)

        if not getattr(model_instance, "id"):
            return u''

        if value == getattr(model_instance, "_%s_cache" % self.attname, None):
            return oid

        from django.db import connections
        gdfs = GridFS(connections[self.model.objects.db].db_connection.db)

        if not self._versioning and not oid is None:
            gdfs.delete(oid)

        if not self._as_string:
            value.seek(0)
            value = value.read()

        oid = gdfs.put(value)
        setattr(self, "_%s_oid" % self.attname, oid)
        setattr(self, "_%s_cache" % self.attname, value)

        return oid
示例#8
0
class GridFSStorage(Storage):
    def __init__(self, host='localhost', port=27017, collection='fs'):
        for s in ('host', 'port', 'collection'):
            name = 'GRIDFS_' + s.upper()
            if hasattr(settings, name):
                setattr(self, s, getattr(settings, name))
        for s, v in zip(('host', 'port', 'collection'),
                        (host, port, collection)):
            if v:
                setattr(self, s, v)
        self.db = Connection(host=self.host, port=self.port)[self.collection]
        self.fs = GridFS(self.db)

    def _save(self, name, content):
        self.fs.put(content, filename=name)
        return name

    def _open(self, name, *args, **kwars):
        return self.fs.get_last_version(filename=name)

    def delete(self, name):
        oid = fs.get_last_version(filename=name)._id
        self.fs.delete(oid)

    def exists(self, name):
        return self.fs.exists({'filename': name})

    def size(self, name):
        return self.fs.get_last_version(filename=name).length
示例#9
0
文件: objectdb.py 项目: merbst/psage
class ObjectDB:
    def __init__(self, db):
        from gridfs import GridFS
        self.gridfs = GridFS(db)

    def __setitem__(self, key, obj):
        self.save(obj, key)

    def __getitem__(self, key):
        return self.load(key)

    def __delitem__(self, key):
        from pymongo.objectid import ObjectId
        if not isinstance(key, ObjectId):
            id = self.gridfs.get_last_version(key)._id
        else:
            id = key
        self.gridfs.delete(id)

    def __repr__(self):
        return "Key-value database"

    def keys(self):
        """Return list of filenames of objects in the gridfs store."""
        return self.gridfs.list()

    def object_ids(self):
        """Return list of id's of objects in the gridfs store, which
        are not id's of objects with filenames."""
        v = self.gridfs._GridFS__files.find({'filename': {
            '$exists': False
        }}, ['_id'])
        return [x['_id'] for x in v]

    def has_key(self, key):
        return self.gridfs.exists(filename=key)

    def save(self, obj, key=None, compress=None):
        """Save Python object obj to the grid file system self.gridfs.
        If key is None, the file is stored by MongoDB assigned
        ObjectID, and that id is returned.
        """
        from sage.all import dumps
        data = dumps(obj, compress=compress)
        if key is not None:
            self.gridfs.put(data, filename=key)
            return key
        else:
            # store by MongoDB assigned _id only, and return that id.
            return self.gridfs.put(data)

    def load(self, key, compress=True):
        from pymongo.objectid import ObjectId
        if isinstance(key, ObjectId):
            data = self.gridfs.get(key).read()
        else:
            data = self.gridfs.get_last_version(key).read()
        from sage.all import loads
        return loads(data, compress=compress)
示例#10
0
def process(task_id, target=None, copy_path=None, report=False, auto=False):
    assert isinstance(task_id, int)
    # This is the results container. It's what will be used by all the
    # reporting modules to make it consumable by humans and machines.
    # It will contain all the results generated by every processing
    # module available. Its structure can be observed through the JSON
    # dump in the analysis' reports folder. (If jsondump is enabled.)
    results = { }
    results["statistics"] = { }
    results["statistics"]["processing"] = list()
    results["statistics"]["signatures"] = list()
    results["statistics"]["reporting"] = list()
    GetFeeds(results=results).run()
    RunProcessing(task_id=task_id, results=results).run()
    RunSignatures(task_id=task_id, results=results).run()


    if report:
        mongoconf = Config("reporting").mongodb
        if mongoconf["enabled"]:
            host = mongoconf["host"]
            port = mongoconf["port"]
            db = mongoconf["db"]
            conn = MongoClient(host, port)
            mdata = conn[db]
            fs = GridFS(mdata)
            analyses = mdata.analysis.find({"info.id": int(task_id)})
            if analyses.count() > 0:
                log.debug("Deleting analysis data for Task %s" % task_id)
                for analysis in analyses:
                    if "file_id" in analysis["target"]:
                        if mdata.analysis.find({"target.file_id": ObjectId(analysis["target"]["file_id"])}).count() == 1:
                            fs.delete(ObjectId(analysis["target"]["file_id"]))
                    for shot in analysis["shots"]:
                        if mdata.analysis.find({"shots": ObjectId(shot)}).count() == 1:
                            fs.delete(ObjectId(shot))
                    if "pcap_id" in analysis["network"] and mdata.analysis.find({"network.pcap_id": ObjectId(analysis["network"]["pcap_id"])}).count() == 1:
                        fs.delete(ObjectId(analysis["network"]["pcap_id"]))
                    if "sorted_pcap_id" in analysis["network"] and mdata.analysis.find({"network.sorted_pcap_id": ObjectId(analysis["network"]["sorted_pcap_id"])}).count() == 1:
                        fs.delete(ObjectId(analysis["network"]["sorted_pcap_id"]))
                    for drop in analysis["dropped"]:
                        if "object_id" in drop and mdata.analysis.find({"dropped.object_id": ObjectId(drop["object_id"])}).count() == 1:
                            fs.delete(ObjectId(drop["object_id"]))
                    for process in analysis["behavior"]["processes"]:
                        for call in process["calls"]:
                            mdata.calls.remove({"_id": ObjectId(call)})
                    mdata.analysis.remove({"_id": ObjectId(analysis["_id"])})
            conn.close()
            log.debug("Deleted previous MongoDB data for Task %s" % task_id)

        RunReporting(task_id=task_id, results=results).run()
        Database().set_status(task_id, TASK_REPORTED)

        if auto:
            if cfg.cuckoo.delete_original and os.path.exists(target):
                os.unlink(target)

            if cfg.cuckoo.delete_bin_copy and os.path.exists(copy_path):
                os.unlink(copy_path)
示例#11
0
    def process(self, document):
        database = pymongo.MongoClient(host=config.MONGODB_CONFIG['host'],
                port=config.MONGODB_CONFIG['port']
            )[config.MONGODB_CONFIG['database']]
        gridfs = GridFS(database, config.MONGODB_CONFIG['gridfs_collection'])

        gridfs.delete(ObjectId(document['file_id']))
        return {}
    def process(self, document):
        database = pymongo.MongoClient(host=config.MONGODB_CONFIG['host'],
                                       port=config.MONGODB_CONFIG['port'])[
                                           config.MONGODB_CONFIG['database']]
        gridfs = GridFS(database, config.MONGODB_CONFIG['gridfs_collection'])

        gridfs.delete(ObjectId(document['file_id']))
        return {}
示例#13
0
 def remove_file(self, filename, base='fs'):
     storage = GridFS(self.db, base)
     try:
         grid_file = storage.get_last_version(filename)
         storage.delete(grid_file._id)
         return True
     except NoFile:
         return False
示例#14
0
 def test_remove_file(self):
     """Tests removing a gridfs file
     """
     fs = GridFS(self.conn['test'], 'test')
     id = fs.put("test file", filename="test.txt", encoding='utf8')
     assert_soon(lambda: sum(1 for _ in self.solr_conn.search("*:*")) == 1)
     fs.delete(id)
     assert_soon(lambda: sum(1 for _ in self.solr_conn.search("*:*")) == 0)
示例#15
0
 def test_remove_file(self):
     """Tests removing a gridfs file
     """
     fs = GridFS(self.conn['test'], 'test')
     id = fs.put("test file", filename="test.txt", encoding='utf8')
     assert_soon(lambda: sum(1 for _ in self.solr_conn.search("*:*")) == 1)
     fs.delete(id)
     assert_soon(lambda: sum(1 for _ in self.solr_conn.search("*:*")) == 0)
示例#16
0
 def remove(self, db, id):
     '''
     删除文件数据
     :param db:
     :param id:
     :return:
     '''
     fs = GridFS(db, self.file_collection)
     fs.delete(id)
示例#17
0
 def delete_files(self, set_name, ids):
     self.globalLock.acquire()
     try:
         grid_fs = GridFS(self.db, collection=set_name)
         for each in ids:
             if grid_fs.exists(document_or_id=ObjectId(each)):
                 grid_fs.delete(ObjectId(each))
     finally:
         self.globalLock.release()
def drop(db, bucket, pattern, limit = None):
    print(f"Permanently dropping these snapshots from MongoDB GridFS {db.name}.{bucket}:")
    filenames = find(db, bucket, pattern, limit = limit, print_output= False)
    i = 1
    for filename in filenames:
        fs = GridFS(db, collection= bucket)
        fs.delete(filename)
        print("{:5}".format(str(i)+".") + f"{filename}")
        i+=1
示例#19
0
def delete_file(id):
    storage = GridFS(mongo.db, "fs")

    try:
        storage.delete(file_id=ObjectId(id))
        return True

    # not sure what to do here...
    except Exception as e:
        return False
示例#20
0
    def delete_file(self, database: str, collection: str, obj_id: str):
        """
        删除文件

        @param {str} database - 文件保存到的数据库名
        @param {str} collection - 文件保存到的集合名(table)
        @param {str} obj_id - 文件id
        """
        _fs = GridFS(self.db[database], collection)
        _fs.delete(ObjectId(obj_id))  # 只能是id
示例#21
0
 def CleanSpatialGridFs(self, scenario_id):
     # type: (int) -> None
     """Delete Spatial GridFS files in Main database."""
     spatial_gfs = GridFS(self.maindb, DBTableNames.gridfs_spatial)
     # If there are any GridFS in Sptial collection are generated during scenario analysis,
     #   the format of such GridFS file is: <SubbasinID>_<CoreFileName>_<ScenarioID>
     # e.g., SLPPOS_UNITS_12345678
     regex_str = '\\d+_\\S+_%d' % scenario_id
     for i in spatial_gfs.find({'filename': {'$regex': regex_str}}):
         spatial_gfs.delete(i._id)
示例#22
0
def delete_gridfs_document(database, object_id):
    """
    Delete a gridfs document
    """
    if database in system_databases:
        return logAndAbort("Cannot get data for system databases")
    expdb = mongoclient[database]
    oid = ObjectId(object_id)
    fs = GridFS(expdb)
    fs.delete(oid)
    return JSONEncoder().encode({"status": True})
示例#23
0
class GridFSPickleDict(BaseStorage):
    """A dictionary-like interface for a GridFS collection"""
    def __init__(self,
                 db_name,
                 collection_name=None,
                 connection=None,
                 **kwargs):
        """
        :param db_name: database name (be careful with production databases)
        :param connection: ``pymongo.Connection`` instance. If it's ``None``
                           (default) new connection with default options will
                           be created
        """
        super().__init__(**kwargs)
        if connection is not None:
            self.connection = connection
        else:
            self.connection = MongoClient()

        self.db = self.connection[db_name]
        self.fs = GridFS(self.db)

    def __getitem__(self, key):
        result = self.fs.find_one({'_id': key})
        if result is None:
            raise KeyError
        return self.deserialize(result.read())

    def __setitem__(self, key, item):
        try:
            self.__delitem__(key)
        except KeyError:
            pass
        self.fs.put(self.serialize(item), **{'_id': key})

    def __delitem__(self, key):
        res = self.fs.find_one({'_id': key})
        if res is None:
            raise KeyError
        self.fs.delete(res._id)

    def __len__(self):
        return self.db['fs.files'].estimated_document_count()

    def __iter__(self):
        for d in self.fs.find():
            yield d._id

    def clear(self):
        self.db['fs.files'].drop()
        self.db['fs.chunks'].drop()

    def __str__(self):
        return str(dict(self.items()))
示例#24
0
    def delete_tar(self, record=None, name=None, style=None):
        """
        Deletes a tar file from the database.  Issues an error if exactly one
        matching record is not found in the database.
        
        Parameters
        ----------
        record : iprPy.Record, optional
            The record associated with the tar archive to delete.  If not
            given, then name and/or style necessary to uniquely identify
            the record are needed.
        name : str, optional
            .The name to use in uniquely identifying the record.
        style : str, optional
            .The style to use in uniquely identifying the record.
        
        Raises
        ------
        ValueError
            If style and/or name content given with record.
        """

        # Create Record object if not given
        if record is None:
            record = self.get_record(name=name, style=style)

        # Issue a ValueError for competing kwargs
        elif style is not None or name is not None:
            raise ValueError(
                'kwargs style and name cannot be given with kwarg record')

        # Verify that record exists
        else:
            record = self.get_record(name=record.name, style=record.style)

        # Define mongofs
        mongofs = GridFS(self.mongodb, collection=record.style)

        # Build query
        query = {}
        query['recordname'] = record.name

        # Get tar
        matches = list(mongofs.find(query))
        if len(matches) == 1:
            tar = matches[0]
        elif len(matches) == 0:
            raise ValueError('No tar found for the record')
        else:
            raise ValueError('Multiple tars found for the record')

        # Delete tar
        mongofs.delete(tar._id)
示例#25
0
def main_config():
    if (not session.get('logged_in')):
        return redirect(url_for('login'))
    access = check_access('main_config')
    if (request.method == 'POST' and access != 'rw'):
        abort(403)

    filenames = {'pubkey': 'rsa_1024_pub.pem', 'privkey': 'rsa_1024_priv.pem'}
    certs = None
    if (request.method == 'POST'):
        changes = to_dict(request.form)
        if (changes['action'] == 'configurations'):
            del (changes['action'])
            mongodb.db.configs.update_one({'name': changes['name']},
                                          {'$set': changes})
        elif (changes['action'] == 'genkeys'):
            certs = genkeypair()
            gridfsdb = database.Database(
                MongoClient(host=GRIDFS_HOST, port=GRIDFS_PORT), 'certs')
            fs = GridFS(gridfsdb)
            for key in ['privkey', 'pubkey']:
                oldfile = fs.find_one({'filename': filenames[key]})
                if (oldfile is not None):
                    fs.delete(oldfile._id)
                fs.put(certs[key].copy(),
                       content_type="text/plain",
                       filename=filenames[key])

    result = mongodb.db.configs.find({}, {'_id': 0})
    gridfsdb = database.Database(
        MongoClient(host=GRIDFS_HOST, port=GRIDFS_PORT), 'images')
    fs = GridFS(gridfsdb)
    avatar = fs.exists(filename='avatar.png')
    background = fs.exists(filename='background.png')
    logo = fs.exists(filename='logo.png')
    imgresult = {'avatar': avatar, 'background': background, 'logo': logo}

    if (certs is None):
        gridfsdb = database.Database(
            MongoClient(host=GRIDFS_HOST, port=GRIDFS_PORT), 'certs')
        fs = GridFS(gridfsdb)
        if (fs.exists(filename=filenames['pubkey'])):
            file = fs.get_last_version(filenames['pubkey'])
            pubkey = file.read()
            certs = {'pubkey': pubkey}
    languages = copy_cursor(
        mongodb.db.languages.find({}, sort=([('name', 1), ('variant', 1)])))
    return render_template('main_config.html',
                           access=access,
                           images=imgresult,
                           configs=result,
                           certs=certs,
                           languages=languages)
示例#26
0
class GridFSPickleDict(BaseStorage):
    """A dictionary-like interface for a GridFS database

    Args:
        db_name: Database name
        collection_name: Ignored; GridFS internally uses collections 'fs.files' and 'fs.chunks'
        connection: :py:class:`pymongo.MongoClient` object to reuse instead of creating a new one
        kwargs: Additional keyword arguments for :py:class:`pymongo.MongoClient`
    """
    def __init__(self,
                 db_name,
                 collection_name=None,
                 connection=None,
                 **kwargs):
        super().__init__(**kwargs)
        connection_kwargs = get_valid_kwargs(MongoClient, kwargs)
        self.connection = connection or MongoClient(**connection_kwargs)
        self.db = self.connection[db_name]
        self.fs = GridFS(self.db)

    def __getitem__(self, key):
        result = self.fs.find_one({'_id': key})
        if result is None:
            raise KeyError
        return self.deserialize(result.read())

    def __setitem__(self, key, item):
        try:
            self.__delitem__(key)
        except KeyError:
            pass
        self.fs.put(self.serialize(item), **{'_id': key})

    def __delitem__(self, key):
        res = self.fs.find_one({'_id': key})
        if res is None:
            raise KeyError
        self.fs.delete(res._id)

    def __len__(self):
        return self.db['fs.files'].estimated_document_count()

    def __iter__(self):
        for d in self.fs.find():
            yield d._id

    def clear(self):
        self.db['fs.files'].drop()
        self.db['fs.chunks'].drop()

    def __str__(self):
        return str(dict(self.items()))
def call(**kwargs):
    from io import StringIO
    from gridfs import GridFS
    from datetime import datetime
    from dateutil.relativedelta import relativedelta

    if "remove_days" not in kwargs:
        kwargs["remove_days"] = 7
    if "warning_days" not in kwargs:
        kwargs["warning_days"] = 3

    manager = Manager()
    db = manager.db("files")
    fs = GridFS(db)
    output = StringIO()

    today = datetime.now()
    days_till_remove = int(kwargs["remove_days"]) - int(kwargs["warning_days"])
    warning = today + relativedelta(days=-int(kwargs["warning_days"]))
    remove = today + relativedelta(days=-int(kwargs["remove_days"]))

    cursor = db.fs.files.find({
        "assigned": False,
        "uploadDate": {
            "$lte": remove
        }
    })
    print("Removing", cursor.count(), "files...", file=output)
    for f in cursor:
        print("\tFile", f["_id"], "owned by", f["uid"], file=output)
        files_notify.call("remove", f["_id"], days=kwargs["remove_days"])
        fs.delete(ObjectId(f["_id"]))

    cursor = db.fs.files.find({
        "assigned": False,
        "uploadDate": {
            "$lte": warning
        }
    })
    print("Warning", cursor.count(), "files...", file=output)
    for f in cursor:
        print("\tFile", f["_id"], "owened by", f["uid"], file=output)
        files_notify.call("warning",
                          f["_id"],
                          days=kwargs["warning_days"],
                          days_till_remove=days_till_remove)

    out = output.getvalue()
    output.close()
    return out
示例#28
0
 def delete(self, remote_path):
     client = self.connect()
     fs = GridFS(client[self.settings.database])
     try:
         fsfile = fs.get_last_version(filename=remote_path)
         fs.delete(fsfile._id)
         log.info("[DELETE] File deleted: mongodb:%s" % remote_path)
     except NoFile:
         client.close()
         log.error("[DELETE] File not found: mongodb:%s" % remote_path)
         raise FileNotFound("%s not found" % remote_path)
     except Exception as e:
         log.error("[DELETE] Unhandled error: %s" % e)
         client.close()
         raise StorageError(e)
def del_image():
    ID = request.form['id']
    password = request.form['password']
    Oid = ObjectId(request.form['Oid'])

    is_file = gallery_col.find({'id': ID, 'password': password, 'fileID': Oid})
    if not is_file.count():
        return 'Can not find in file list'

    fs = GridFS(db, "image")
    fs.delete(Oid)
    gallery_col.delete_one({'fileID': Oid})
    myResponse = {"result": 1}
    response = jsonify(myResponse)
    return response
示例#30
0
 def remove_file(self, coll_name, file_id):
     """
     从gridFS删除文件
     :param coll_name:
     :param file_id:
     :return:
     """
     try:
         fs = GridFS(self.db, coll_name)
         fs.delete(ObjectId(file_id))
     except Exception:
         raise exc.DeleteException(
             '{}Delete file from "{}" failed! _id: {}, error: {}'.format(
                 self.log_prefix, coll_name, file_id,
                 traceback.format_exc()))
     return True
示例#31
0
class GridfsStorageBackend(object):
    def __init__(self, db, collection_name="storage"):
        from gridfs import GridFS
        self.fs = GridFS(db, collection_name)

    def __get_file_object(self, key):
        from gridfs import NoFile
        try:
            return self.fs.get_version(filename=key)
        except NoFile:
            raise KeyError(key)

    def __contains__(self, key):
        return self.fs.exists(filename=key)

    def __getitem__(self, key):
        return iterate_file_object(self.__get_file_object(key))

    def put_file(self, key, tmpfile):
        # ResourceDatabase will check to make sure the file doesn't already
        # exist before calling this, but in the event of a race condition this
        # may be called twice for a given key.  Fortunately this will cause no
        # issues, but it seems to result in two "versions" of the file being in
        # gridfs, which wastes some space (but not very much, if race
        # conditions are rare).
        #
        # FIXME: look into whether it is possible to drop old versions
        # automatically in gridfs
        with file(tmpfile) as f:
            self.fs.put(f, filename=key)

    def __delitem__(self, key):
        self.fs.delete(self.__get_file_object(key)._id)

    def keys(self):
        if six.PY3:
            return self.iterkeys()
        else:
            return self.fs.list()

    def iterkeys(self):
        return iter(self.fs.list())

    __iter__ = iterkeys

    def __len__(self):
        return len(self.fs.list())
class GridFSPickleDict(MutableMapping):
    """ MongoDict - a dictionary-like interface for ``mongo`` database
    """
    def __init__(self, db_name, connection=None):
        """
        :param db_name: database name (be careful with production databases)
        :param connection: ``pymongo.Connection`` instance. If it's ``None``
                           (default) new connection with default options will
                           be created
        """
        if connection is not None:
            self.connection = connection
        else:
            self.connection = MongoClient()

        self.db = self.connection[db_name]
        self.fs = GridFS(self.db)

    def __getitem__(self, key):
        result = self.fs.find_one({'_id': key})
        if result is None:
            raise KeyError
        return pickle.loads(bytes(result.read()))

    def __setitem__(self, key, item):
        self.__delitem__(key)
        self.fs.put(pickle.dumps(item), **{'_id': key})

    def __delitem__(self, key):
        res = self.fs.find_one({'_id': key})
        if res is not None:
            self.fs.delete(res._id)

    def __len__(self):
        return self.db['fs.files'].count()

    def __iter__(self):
        for d in self.fs.find():
            yield d._id

    def clear(self):
        self.db['fs.files'].drop()
        self.db['fs.chunks'].drop()

    def __str__(self):
        return str(dict(self.items()))
示例#33
0
class GridFsBackend(BaseBackend):
    '''
    A Mongo GridFS backend

    Expect the following settings:

    - `mongo_url`: The Mongo access URL
    - `mongo_db`: The database to store the file in.
    '''
    def __init__(self, name, config):
        super(GridFsBackend, self).__init__(name, config)

        self.client = MongoClient(config.mongo_url)
        self.db = self.client[config.mongo_db]
        self.fs = GridFS(self.db, self.name)

    def exists(self, filename):
        return self.fs.exists(filename=filename)

    @contextmanager
    def open(self, filename, mode='r', encoding='utf8'):
        if 'r' in mode:
            f = self.fs.get_last_version(filename)
            yield f if 'b' in mode else codecs.getreader(encoding)(f)
        else:  # mode == 'w'
            f = io.BytesIO() if 'b' in mode else io.StringIO()
            yield f
            params = {'filename': filename}
            if 'b' not in mode:
                params['encoding'] = encoding
            self.fs.put(f.getvalue(), **params)

    def read(self, filename):
        f = self.fs.get_last_version(filename)
        return f.read()

    def write(self, filename, content):
        return self.fs.put(self.as_binary(content), filename=filename)

    def delete(self, filename):
        for version in self.fs.find({'filename': filename}):
            self.fs.delete(version._id)

    def serve(self, filename):
        file = self.fs.get_last_version(filename)
        return send_file(file, mimetype=file.content_type)
class FileStoreMongo(FileStore):
    
    def __init__(self, connection):
        self._conn=connection
        self._fs=GridFS(connection)

    def new_file(self, cell_id, filename):
        return self._fs.new_file(filename=filename, cell_id=ObjectId(cell_id))

    def delete_cell_files(self, cell_id):
        c=self.conn.find({'cell_id':ObjectId(cell_id)}, ['_id'])
        for _id in c:
            self._fs.delete(_id)

    def get_file(self, cell_id, filename):
        _id=self._conn.fs.files.find_one({'cell_id':ObjectId(cell_id), 'filename':filename},['_id'])
        return self._fs.get(_id['_id'])
示例#35
0
class MotorCoreTestGridFS(MotorTest):
    def setUp(self):
        super(MotorCoreTestGridFS, self).setUp()
        self.sync_fs = GridFS(env.sync_cx.test)
        self.sync_fs.delete(file_id=1)
        self.sync_fs.put(b'', _id=1)

    def tearDown(self):
        self.sync_fs.delete(file_id=1)
        super(MotorCoreTestGridFS, self).tearDown()

    def test_gridfs_attrs(self):
        pymongo_gridfs_only = set([
            # Obsolete PyMongo methods.
            'open',
            'remove'])

        motor_gridfs_only = set(['collection']).union(motor_only)

        self.assertEqual(
            attrs(GridFS(env.sync_cx.test)) - pymongo_gridfs_only,
            attrs(MotorGridFS(self.cx.test)) - motor_gridfs_only)

    def test_gridin_attrs(self):
        motor_gridin_only = set(['set']).union(motor_only)

        self.assertEqual(
            attrs(GridIn(env.sync_cx.test.fs)),
            attrs(MotorGridIn(self.cx.test.fs)) - motor_gridin_only)

    @gen_test
    def test_gridout_attrs(self):
        motor_gridout_only = set([
            'open',
            'stream_to_handler'
        ]).union(motor_only)

        motor_gridout = yield MotorGridOut(self.cx.test.fs, file_id=1).open()
        self.assertEqual(
            attrs(self.sync_fs.get(1)),
            attrs(motor_gridout) - motor_gridout_only)

    def test_gridout_cursor_attrs(self):
        self.assertEqual(
            attrs(self.sync_fs.find()) - pymongo_cursor_only,
            attrs(MotorGridFS(self.cx.test).find()) - motor_cursor_only)
class GridFSPickleDict(MutableMapping):
    """ MongoDict - a dictionary-like interface for ``mongo`` database
    """
    def __init__(self, db_name, connection=None):
        """
        :param db_name: database name (be careful with production databases)
        :param connection: ``pymongo.Connection`` instance. If it's ``None``
                           (default) new connection with default options will
                           be created
        """
        if connection is not None:
            self.connection = connection
        else:
            self.connection = MongoClient()

        self.db = self.connection[db_name]
        self.fs = GridFS(self.db)

    def __getitem__(self, key):
        result = self.fs.find_one({'_id': key})
        if result is None:
            raise KeyError
        return pickle.loads(bytes(result.read()))

    def __setitem__(self, key, item):
        self.__delitem__(key)
        self.fs.put(pickle.dumps(item), **{'_id': key})

    def __delitem__(self, key):
        res = self.fs.find_one({'_id': key})
        if res is not None:
            self.fs.delete(res._id)

    def __len__(self):
        return self.db['fs.files'].count()

    def __iter__(self):
        for d in self.fs.find():
            yield d._id

    def clear(self):
        self.db['fs.files'].drop()
        self.db['fs.chunks'].drop()

    def __str__(self):
        return str(dict(self.items()))
示例#37
0
class GridFSCache(BaseCache):
    """A dictionary-like interface for MongoDB GridFS

    Args:
        db_name: database name (be careful with production databases)
        connection: MongoDB connection instance to use instead of creating a new one
    """
    def __init__(self, db_name, connection: MongoClient = None):
        self.connection = connection or MongoClient()
        self.db = self.connection[db_name]
        self.fs = GridFS(self.db)

    # TODO
    async def contains(self, key: str) -> bool:
        raise NotImplementedError

    async def clear(self):
        self.db['fs.files'].drop()
        self.db['fs.chunks'].drop()

    async def delete(self, key: str):
        res = self.fs.find_one({'_id': key})
        if res is not None:
            self.fs.delete(res._id)

    async def keys(self) -> Iterable[str]:
        return [d._id for d in self.fs.find()]

    async def read(self, key: str) -> ResponseOrKey:
        result = self.fs.find_one({'_id': key})
        if result is None:
            raise KeyError
        return self.unpickle(bytes(result.read()))

    async def size(self) -> int:
        return self.db['fs.files'].count()

    # TODO
    async def values(self) -> Iterable[ResponseOrKey]:
        raise NotImplementedError

    async def write(self, key: str, item: ResponseOrKey):
        await self.delete(key)
        self.fs.put(pickle.dumps(item, protocol=-1), **{'_id': key})
class FileRepository():
    def __init__(self, db: Database):
        self.fs = GridFS(db)

    def get_file(self, id: ObjectId) -> bytes:
        if not self.fs.exists(id):
            raise NonExistentError("The request file with the id " + str(id) +
                                   " does not exist")
        return self.fs.get(id).read()

    def put_file(self, file: bytes) -> ObjectId:
        return self.fs.put(file)

    def delete_file(self, id: ObjectId):
        self.fs.delete(id)

    def replace_file(self, old_id: ObjectId, new_file: bytes) -> ObjectId:
        self.delete_file(old_id)
        return self.put_file(new_file)
示例#39
0
def edit_profile():
    form = EditProfileForm()
    user = app.config['USERS_COLLECTION'].find_one(
        {"username": current_user.username})
    if form.validate_on_submit():
        MongoClient().blog.users.update(
            {'email': current_user.email},
            {'$set': {
                'username': form.username.data
            }})
        MongoClient().blog.users.update(
            {'email': current_user.email},
            {'$set': {
                'about_me': form.about_me.data
            }})
        current_user.username = form.username.data
        current_user.about_me = form.about_me.data
        name = current_user.username
        avatar = request.files['file']
        if avatar and allowed_file(avatar.filename):
            fs = GridFS(MongoClient().db, collection="avatar")
            filename = secure_filename(avatar.filename)
            avatar_id = fs.put(avatar,
                               content_type=avatar.content_type,
                               filename=filename)

            if avatar_id:
                if user['avatar']:
                    fs.delete(user['avatar'])
                MongoClient().blog.users.update(
                    {'email': current_user.email},
                    {'$set': {
                        'avatar': avatar_id
                    }})
                flash('successfully upload image')
            else:
                flash('It is not support', 'red')

        flash('Information Changed')
        return redirect(request.args.get("next") or url_for('profile'))
    form.username.data = current_user.username
    form.about_me.data = current_user.about_me
    return render_template('edit_profile.html', form=form, user=user)
示例#40
0
class SimpleFrameMongo(object):

    config_settings = None

    def __init__(self):

        db_name = self.config_settings['name']
        mongo_host = self.config_settings['mongo_host']
        username = self.config_settings['username']
        password = self.config_settings['password']

        self.db = pymongo.MongoClient(mongo_host)[db_name]
        self.db.authenticate(username, password)

        self.fs = GridFS(self.db)

    def write(self, name, df, metadata=''):
        if name in self.fs.list():
            warnings.warn(
                'filename `{}` already exists, nothing inserted'.format(name))
            return

        return self.fs.put(pkl.dumps(df, pkl.HIGHEST_PROTOCOL),
                           filename=name,
                           metadata=metadata)

    def delete(self, name):
        doc = self.db['fs.files'].find_one({'filename': name})
        if doc:
            _id = doc.get('_id')
            self.fs.delete(_id)

    def read(self, name):
        return pkl.loads(self.fs.find_one({'filename': name}).read())

    def read_metadata(self, name):
        return self.db['fs.files'].find_one({'filename': name}).get('metadata')

    def __enter__(self):
        return self

    def __exit__(self, et, ev, tb):
        self.db.client.close()
class FileStoreMongo(FileStore):
    def __init__(self, connection):
        self._conn = connection
        self._fs = GridFS(connection)

    def new_file(self, cell_id, filename):
        return self._fs.new_file(filename=filename, cell_id=ObjectId(cell_id))

    def delete_cell_files(self, cell_id):
        c = self.conn.find({'cell_id': ObjectId(cell_id)}, ['_id'])
        for _id in c:
            self._fs.delete(_id)

    def get_file(self, cell_id, filename):
        _id = self._conn.fs.files.find_one(
            {
                'cell_id': ObjectId(cell_id),
                'filename': filename
            }, ['_id'])
        return self._fs.get(_id['_id'])
示例#42
0
class GridFsHandler(object):
    def __init__(self, database: Database=None) -> None:
        if database is None:
            database = mongo.db
        self.gridfs = GridFS(database)

    def save_file_in_gridfs(self, file: Union[str, bytes, IOBase, BinaryIO, GridOut], **kwargs: str) -> str:
        """
            :rtype: the id of the gridfs
        """
        return str(self.gridfs.put(file, **kwargs))

    def get_file_from_gridfs(self, id: str) -> GridOut:
        return self.gridfs.get(ObjectId(id))

    def delete_file_from_gridfs(self, id: str) -> None:
        logging.getLogger(__name__).info('deleting file from gridfs with id "{}"'.format(id))
        self.gridfs.delete(ObjectId(id))

    def copy_file(self, id: str) -> str:
        file = self.get_file_from_gridfs(id)
        return self.save_file_in_gridfs(file=file, filename=file.filename)
示例#43
0
 def store(self, revision):
     doc = self._mongoify(revision.to_json())
     
     logger.debug("Storing revision {0} with {1} operations.".format(
                 doc['_id'], len(doc['delta']['operations'])))
     
     try:
         self.mongo.db.revisions.save(doc)
     except DocumentTooLarge as e:
         delta = doc['delta']
         file_system = GridFS(self.mongo.db, "revisions")
         file_system.delete(doc['_id'])
         doc['delta'] = {'grid_id': doc['_id']}
         with file_system.new_file(_id=doc['_id'], encoding='utf-8') as f:
             
             json.dumps(delta, f)
         
         self.mongo.db.revisions.save(doc)
         
     
     logger.debug("Storing revision {0} stored!".format(doc['_id']))
     
     return True
示例#44
0
class GridFile(object):
    def __init__(self, db=None):
        if db:
            self.db = db
        self.gfs = GridFS(database=self.db)

    def delete(self, file_id):
        self.gfs.delete(file_id=objectid.ObjectId(file_id))

    def get(self, file_id):
        return self.gfs.get(file_id=objectid.ObjectId(file_id))

    def list(self):
        return self.gfs._GridFS__files.find().sort('uploadDate')

    def save_flask_upload(self, file_store):
        assert file_store.__class__.__name__ == 'FileStorage'
        r = ID3(file_store)
        kwargs = {}
        for key in ('album', 'performer', 'title', 'track', 'year', 'genre'):
            val = r.getValue(key)
            if val:  kwargs[key] = val

        file_store.seek(0)
        _id = self.gfs.put(
            filename= file_store.filename,
            content_type=file_store.content_type,
            data=file_store.stream,
            **kwargs
        )
        return _id

    def send_flask_file(self, grid_out):
        assert grid_out.__class__.__name__ == 'GridOut'
        byt = BytesIO(grid_out.read())
        return send_file(byt, mimetype=grid_out.content_type)
示例#45
0
文件: views.py 项目: ThinkZ/pkyx
def profile_edit():
    user = User.find_by_id(bson_obj_id(current_user.id))
    if not user:
        abort(404)
    form = ProfileForm()
    if request.method == 'POST':
        if form.validate_on_submit():
            username = form.username.data
            location = form.location.data
            website = form.website.data
            introduction = form.introduction.data
            data = {
                'username': username,
                'location': location,
                'website': website,
                'introduction': introduction
            }

            avatar = request.files['avatar']
            if avatar and AllowFile.is_img(avatar.filename):
                filename = secure_filename(avatar.filename)
                fs = GridFS(mongo.db, collection="avatar")
                avatar_id = fs.put(avatar, content_type=avatar.content_type, filename=filename)
                if avatar_id:
                    if user['avatar']:
                        fs.delete(bson_obj_id(user['avatar']))
                    data['avatar'] = avatar_id
            else:
                flash('图片格式不支持', 'red')

            User.update_user(user['_id'], data)

            return redirect(url_for('.profile'))
        else:
            flash('资料修改失败', 'red')
    return render_template('profile_edit.html', user=user, form=form, title='编辑资料')
示例#46
0
class Gallery:
    def __init__(self, document):
        self.document = document
        self.fs = GridFS(db)

    def get_image_ids(self):
        ids = []
        for obj in self.document["images"]:
            ids.append(obj["_id"])
        return ids

    def save(self):
        self.document.save()

    def add_file(self, file_name, data):
        if self.has_file_name(file_name):
            return
        id = self.fs.put(data, name=file_name)
        self.document["images"].append({"_id":id,"filename":file_name})
        self.save()

    def get_image_data(self,id):
        data = self.fs.get(ObjectId(id)).read()
        return data

    def has_file_name(self, file_name):
        for obj in self.document["images"]:
            if obj["filename"] == file_name:
                return True
        return False

    def destroy(self):
        for id in self.get_image_ids():
            self.fs.delete(id)
        gallery_name = self.document["name"]
        mongo.Slikaona.galleryes.remove({"name":gallery_name})
示例#47
0
文件: files.py 项目: alikzao/tao1
def del_files(request, doc_id, file_name, proc_id):
	fs = GridFS(request.db)
	doc = get_doc(request, doc_id, proc_id)
	if doc and 'default_img' in doc:
		del doc['default_img']
		request.db.doc.save(doc)
	try:
		fn = get_file_meta(request, proc_id, file_name, doc_id, 'thumb')
		fs.delete(fn['_id'])
	except:pass
	try:
		fn = get_file_meta(request, proc_id, file_name, doc_id, 'orig')
		fs.delete(fn['_id'])
	except:pass
	try:
		fn = get_file_meta(request, proc_id, file_name, doc_id, 'middle')
		fs.delete(fn['_id'])
	except:pass
	return response_json(request, {"result":"ok"})
 def test_remove_file(self):
     fs = GridFS(self.conn['test'], 'test')
     id = fs.put("test file", filename="test.txt", encoding='utf8')
     assert_soon(lambda: sum(1 for _ in self._search()) == 1)
     fs.delete(id)
     assert_soon(lambda: sum(1 for _ in self._search()) == 0)
示例#49
0
class GridFSStorage(Storage):
    URL_PREFIX = '/files/'

    DEFAULT_CONFIG = {
        'HOST': 'localhost',
        'PORT': 27017,
        'DB': 'admin',
        'AUTH_DB': 'admin',
        'USER': '',
        'PASS': '',
        'GRIDFS': 'fs',
        'OPTIONS': {
            }
    }

    def __init__(self, host=None, port=None, db=None, auth_db=None, user=None, password=None, collection=None):
        config = self.DEFAULT_CONFIG.copy()
        options = config['OPTIONS'].copy()
        config.update(getattr(settings, 'MONGODB', {}))
        options.update(config['OPTIONS'])
        config['OPTIONS'] = options

        host = host or config['HOST']
        port = port or config['PORT']
        db = db or config['DB']
        auth_db = auth_db or config['AUTH_DB']
        user = user if user is not None else config['USER']
        password = password or config['PASS']
        collection = collection or config['GRIDFS']

        client = MongoClient(host=host, port=port, **options)
        self.db = client[db]
        if user:
            self.db.authenticate(user, password, auth_db)
        self.fs = GridFS(self.db, collection=collection)

    def _open(self, name, mode='rb'):
        return GridFSFile(name, self, mode=mode)

    def _save(self, name, content):
        name = force_unicode(name).replace('\\', '/')
        content.open()
        with content:
            kwargs = {'filename': name}
            content_type = get_content_type(content)
            if content_type:
                kwargs['content_type'] = content_type
            try:
                with self.fs.new_file(**kwargs) as gfile:
                    if hasattr(content, 'chunks'):
                        for chunk in content.chunks():
                            gfile.write(chunk)
                    else:
                        gfile.write(content)
            except Exception as exc:
                try:
                    self.delete(name)
                except Exception:
                    pass
                raise exc
        return name

    def get_valid_name(self, name):
        return force_unicode(name).strip().replace('\\', '/')

    def delete(self, name):
        f = self._open(name, 'r')
        return self.fs.delete(f.file._id)

    def exists(self, name):
        try:
            self.fs.get_last_version(name)
            return True
        except NoFile:
            return False

    def listdir(self, path):
        return ((), self.fs.list())

    def size(self, name):
        try:
            return self.fs.get_last_version(name).length
        except NoFile:
            raise ValueError('File with name "%s" does not exist' % name)

    def url(self, name):
        return '%s%s' % (self.URL_PREFIX, name)
示例#50
0
def process(target=None, copy_path=None, task=None, report=False, auto=False):
    # This is the results container. It's what will be used by all the
    # reporting modules to make it consumable by humans and machines.
    # It will contain all the results generated by every processing
    # module available. Its structure can be observed through the JSON
    # dump in the analysis' reports folder. (If jsondump is enabled.)
    results = { }
    results["statistics"] = { }
    results["statistics"]["processing"] = list()
    results["statistics"]["signatures"] = list()
    results["statistics"]["reporting"] = list()
    GetFeeds(results=results).run()
    RunProcessing(task=task, results=results).run()
    RunSignatures(task=task, results=results).run()
    task_id = task["id"]
    if report:
        if repconf.mongodb.enabled:
            host = repconf.mongodb.host
            port = repconf.mongodb.port
            db = repconf.mongodb.db
            conn = MongoClient(host, port)
            mdata = conn[db]
            fs = GridFS(mdata)
            analyses = mdata.analysis.find({"info.id": int(task_id)})
            if analyses.count() > 0:
                log.debug("Deleting analysis data for Task %s" % task_id)
                for analysis in analyses:
                    if "file_id" in analysis["target"]:
                        if mdata.analysis.find({"target.file_id": ObjectId(analysis["target"]["file_id"])}).count() == 1:
                            fs.delete(ObjectId(analysis["target"]["file_id"]))
                    for shot in analysis["shots"]:
                        if mdata.analysis.find({"shots": ObjectId(shot)}).count() == 1:
                            fs.delete(ObjectId(shot))
                    if "pcap_id" in analysis["network"] and mdata.analysis.find({"network.pcap_id": ObjectId(analysis["network"]["pcap_id"])}).count() == 1:
                        fs.delete(ObjectId(analysis["network"]["pcap_id"]))
                    if "sorted_pcap_id" in analysis["network"] and mdata.analysis.find({"network.sorted_pcap_id": ObjectId(analysis["network"]["sorted_pcap_id"])}).count() == 1:
                        fs.delete(ObjectId(analysis["network"]["sorted_pcap_id"]))
                    for drop in analysis["dropped"]:
                        if "object_id" in drop and mdata.analysis.find({"dropped.object_id": ObjectId(drop["object_id"])}).count() == 1:
                            fs.delete(ObjectId(drop["object_id"]))
                    for process in analysis["behavior"]["processes"]:
                        for call in process["calls"]:
                            mdata.calls.remove({"_id": ObjectId(call)})
                    mdata.analysis.remove({"_id": ObjectId(analysis["_id"])})
            conn.close()
            log.debug("Deleted previous MongoDB data for Task %s" % task_id)

        if repconf.elasticsearchdb.enabled:
            analyses = es.search(
                           index="cuckoo-*",
                           doc_type="analysis",
                           q="info.id: \"%s\"" % task_id
                       )["hits"]["hits"]
            if analyses:
                for analysis in analyses:
                    esidx = analysis["_index"]
                    esid = analysis["_id"]
                    # Check if behavior exists
                    if analysis["_source"]["behavior"]:
                        for process in analysis["_source"]["behavior"]["processes"]:
                            for call in process["calls"]:
                                es.delete(
                                    index=esidx,
                                    doc_type="calls",
                                    id=call,
                                )
                    # Delete the analysis results
                    es.delete(
                        index=esidx,
                        doc_type="analysis",
                        id=esid,
                    )

        RunReporting(task=task, results=results).run()
        Database().set_status(task_id, TASK_REPORTED)

        if auto:
            if cfg.cuckoo.delete_original and os.path.exists(target):
                os.unlink(target)

            if cfg.cuckoo.delete_bin_copy and os.path.exists(copy_path):
                os.unlink(copy_path)
示例#51
0
class FileStoreMongo(FileStore):
    """
    Filestore database using GridFS (see :mod:`gridfs`)

    :arg pymongo.database.Database connection: MongoDB database object
    """

    def __init__(self, connection):
        self._conn=connection
        self.new_context()
        self._fs=GridFS(self.database)

    def _filename(self, **kwargs):
        return {'session': kwargs.get('session', kwargs.get('cell_id', 'SESSION NOT FOUND')), 'filename': kwargs['filename']}
    @Debugger
    def new_file(self, **kwargs):
        """
        See :meth:`FileStore.new_file`

        :rtype: :class:`gridfs.grid_file.GridIn`
        """
        self.delete_files(**kwargs)
        log("FS Creating %s"%self._filename(**kwargs))
        return self._fs.new_file(**self._filename(**kwargs))

    @Debugger
    def delete_files(self, **kwargs):
        """
        See :meth:`FileStore.delete_files`
        """
        while self._fs.exists(self._filename(**kwargs)):
            self._fs.delete(self._fs.get_last_version(**self._filename(**kwargs))._id)

    @Debugger
    def get_file(self, **kwargs):
        """
        See :meth:`FileStore.get_file`

        :rtype: :class:`gridfs.grid_file.GridOut`
        """
        if self._fs.exists(self._filename(**kwargs)):
            return self._fs.get(self._fs.get_last_version(**self._filename(**kwargs))._id)
        else:
            return None
    
    @Debugger
    def create_file(self, file_handle, **kwargs):
        """
        See :meth:`FileStore.create_file`
        """
        with self.new_file(**kwargs) as f:
            f.write(file_handle.read())

    @Debugger
    def copy_file(self, file_handle, **kwargs):
        """
        See :meth:`FileStore.copy_file`
        """
        file_handle.write(self.get_file(**kwargs).read())

    @Debugger
    def new_context(self):
        """
        See :meth:`FileStore.new_context`
        """
        self.database=pymongo.database.Database(self._conn, mongo_config['mongo_db'])
        uri=mongo_config['mongo_uri']
        if '@' in uri:
            # strip off optional mongodb:// part
            if uri.startswith('mongodb://'):
                uri=uri[len('mongodb://'):]
            result=self.database.authenticate(uri[:uri.index(':')],uri[uri.index(':')+1:uri.index('@')])
            if result==0:
                raise Exception("MongoDB authentication problem")

    @Debugger
    def new_context_copy(self):
        """
        See :meth:`FileStore.new_context_copy`
        """
        return type(self)(self._conn)

    valid_untrusted_methods=()
示例#52
0
文件: fuse.py 项目: lig/pyfusegridfs
class GridFSOperations(Operations):

    def __init__(self, host, db_name='test', collection_name='fs'):
        self.client = MongoClient(host)
        self.db = Database(self.client, db_name)
        self.fs = GridFS(self.db, collection_name)

    def _new_file(self, name):
        return self.fs.new_file(
            filename=name,
            aliases=[],
            length=0,
            upload_date=datetime.now())

    @logmethod
    def init(self):
        pass

    @logmethod
    def access(self, inode, mode, ctx):
        return True

    @logmethod
    def getattr(self, inode):
        if inode == 1:
            return Operations.getattr(self, inode)
        else:
            return grid2attrs(self.fs.get(int2oid(inode)))

    @logmethod
    def lookup(self, parent_inode, name):

        if parent_inode != 1:
            raise FUSEError(errno.ENOENT)

        try:
            gridout = self.fs.get_last_version(filename=name.decode())
        except NoFile:
            raise FUSEError(errno.ENOENT)

        return grid2attrs(gridout)

    @logmethod
    def create(self, inode_parent, name, mode, flags, ctx):
        gridin = self._new_file(name.decode())
        fh = oid2int(gridin._id)
        grid_cache[fh] = gridin
        return (fh, grid2attrs(gridin))

    @logmethod
    def flush(self, fh):
        grid = grid_cache[fh]
        grid.close()

    @logmethod
    def setattr(self, inode, attr):
        gridout = self.fs.get(int2oid(inode))
        return grid2attrs(gridout)

    @logmethod
    def release(self, fh):
        del grid_cache[fh]

    @logmethod
    def forget(self, inode_list):

        for inode in inode_list:
            if inode in oid_cache.ints:
                del oid_cache.ints[inode]

    @logmethod
    def destroy(self):
        self.client.close()

    @logmethod
    def open(self, inode, flags):
        gridout = self.fs.get(int2oid(inode))
        grid_cache[inode] = gridout
        return inode

    @logmethod
    def read(self, fh, off, size):
        grid = grid_cache[fh]

        if isinstance(grid, GridIn):
            grid.close()
            grid = self.fs.get(int2oid(fh))
            grid_cache[fh] = grid

        grid.seek(off)
        return grid.read(size)

    @logmethod
    def write(self, fh, off, buf):
        grid = grid_cache[fh]

        if isinstance(grid, GridOut):
            offbuf = grid.read(off)
            grid = self._new_file(name=grid.name)
            grid_cache[fh] = grid
            grid.write(offbuf)
            del offbuf

        if grid.closed:
            grid = self._new_file(name=grid.name)
            grid_cache[fh] = grid

        grid.write(buf)
        return len(buf)

    @logmethod
    def unlink(self, parent_inode, name):

        if parent_inode != 1:
            Operations.unlink(self, parent_inode, name)
        else:
            for gridout in self.fs.find({'filename': name.decode()}):
                self.fs.delete(gridout._id)

    @logmethod
    def fsync(self, fh, datasync):
        Operations.fsync(self, fh, datasync)

    @logmethod
    def fsyncdir(self, fh, datasync):
        Operations.fsyncdir(self, fh, datasync)

    @logmethod
    def getxattr(self, inode, name):
        Operations.getxattr(self, inode, name)

    @logmethod
    def link(self, inode, new_parent_inode, new_name):
        Operations.link(self, inode, new_parent_inode, new_name)

    @logmethod
    def listxattr(self, inode):
        Operations.listxattr(self, inode)

    @logmethod
    def mkdir(self, parent_inode, name, mode, ctx):
        Operations.mkdir(self, parent_inode, name, mode, ctx)

    @logmethod
    def mknod(self, parent_inode, name, mode, rdev, ctx):
        Operations.mknod(self, parent_inode, name, mode, rdev, ctx)

    @logmethod
    def opendir(self, inode):
        Operations.opendir(self, inode)

    @logmethod
    def readdir(self, fh, off):
        Operations.readdir(self, fh, off)

    @logmethod
    def readlink(self, inode):
        Operations.readlink(self, inode)

    @logmethod
    def releasedir(self, fh):
        Operations.releasedir(self, fh)

    @logmethod
    def removexattr(self, inode, name):
        Operations.removexattr(self, inode, name)

    @logmethod
    def rename(self, inode_parent_old, name_old, inode_parent_new, name_new):
        Operations.rename(self,
            inode_parent_old, name_old, inode_parent_new, name_new)

    @logmethod
    def rmdir(self, inode_parent, name):
        Operations.rmdir(self, inode_parent, name)

    @logmethod
    def setxattr(self, inode, name, value):
        Operations.setxattr(self, inode, name, value)

    @logmethod
    def statfs(self):
        Operations.statfs(self)

    @logmethod
    def symlink(self, inode_parent, name, target, ctx):
        Operations.symlink(self, inode_parent, name, target, ctx)
示例#53
0
文件: file.py 项目: crudbug/canopsis
class MongoFileStorage(MongoStorage, FileStorage):

    FILENAME = 'filename'

    def __init__(self, *args, **kwargs):

        super(MongoFileStorage, self).__init__(*args, **kwargs)

        self.gridfs = None

    def _connect(self, **kwargs):

        result = super(MongoFileStorage, self)._connect(**kwargs)

        if result:

            self.gridfs = GridFS(
                database=self._database, collection=self.get_table()
            )

        return result

    def put(self, name, data, meta=None):

        try:
            fs = self.new_file(name=name, meta=meta)
            fs.write(data=data)
        finally:
            fs.close()

    def put_meta(self, name, meta):
        try:
            oldf, _meta = self.get(name, with_meta=True)
            _meta.update(meta)

            fs = self.new_file(name=name, meta=_meta)

            while True:
                data = oldf.read(512)

                if not data:
                    break

                fs.write(data=data)

        finally:
            fs.close()

    def get(self, name, version=-1, with_meta=False):

        result = None

        try:
            gridout = self.gridfs.get_version(filename=name, version=version)
        except NoFile:
            pass
        else:
            if with_meta:
                result = MongoFileStream(gridout), gridout.metadata

            else:
                result = MongoFileStream(gridout)

        return result

    def get_meta(self, name):
        result = self.get(name, with_meta=True)

        if result is not None:
            result = result[1]

        return result

    def exists(self, name):

        result = self.gridfs.exists(filename=name)

        return result

    def find(
        self,
        names=None,
        meta=None,
        sort=None,
        limit=-1,
        skip=0,
        with_meta=False
    ):

        request = {}

        if names is not None:
            if isinstance(names, basestring):
                request[MongoFileStorage.FILENAME] = names
            else:
                request[MongoFileStorage.FILENAME] = {'$in': names}

        if meta is not None:
            for metafield in meta:
                field = 'metadata.{0}'.format(metafield)
                request[field] = meta[metafield]

        cursor = self.gridfs.find(request)

        if sort is not None:
            cursor.sort(sort)
        if limit > 0:
            cursor.limit(limit)
        if skip > 0:
            cursor.skip(skip)

        if with_meta:
            result = (
                (MongoFileStream(gridout), gridout.metadata)
                for gridout in cursor
            )

        else:
            result = (MongoFileStream(gridout) for gridout in cursor)

        return result

    def list(self):

        return self.gridfs.list()

    def new_file(self, name=None, meta=None, data=None):

        kwargs = {}

        if name is None:
            name = str(uuid())

        kwargs['filename'] = name

        if meta is not None:
            kwargs['metadata'] = meta

        gridout = self.gridfs.new_file(**kwargs)

        result = MongoFileStream(gridout)

        if data is not None:
            result.write(data)

        return result

    def delete(self, names=None):

        if names is None:
            names = self.gridfs.list()

        names = ensure_iterable(names)

        for name in names:
            while True:
                fs = self.get(name)

                if fs is None:
                    break

                self.gridfs.delete(file_id=fs.get_inner_object()._id)
    def fs_remove(self,dbname,file):
        db=self.db_conn[dbname]
        fs=GridFS(db)

        file=fs.get_last_version(filename=file)
        fs.delete(file._id)
示例#55
0
文件: module.py 项目: David-/shinken
class Mongodb_retention_scheduler(BaseModule):
    def __init__(self, modconf, uri, database, replica_set):
        BaseModule.__init__(self, modconf)
        self.uri = uri
        self.database = database
        self.replica_set = replica_set
        if self.replica_set and not ReplicaSetConnection:
            logger.error('[MongodbRetention] Can not initialize module with '
                         'replica_set because your pymongo lib is too old. '
                         'Please install it with a 2.x+ version from '
                         'https://github.com/mongodb/mongo-python-driver/downloads')
            return None



    def init(self):
        """
        Called by Scheduler to say 'let's prepare yourself guy'
        """
        logger.debug("Initialization of the mongodb  module")

        if self.replica_set:
            self.con = ReplicaSetConnection(self.uri, replicaSet=self.replica_set, fsync=True)
        else:
            # Old versions of pymongo do not known about fsync
            if ReplicaSetConnection:
                self.con = Connection(self.uri, fsync=True)
            else:
                self.con = Connection(self.uri)

        #self.con = Connection(self.uri)
        # Open a gridfs connection
        self.db = getattr(self.con, self.database)
        self.hosts_fs = GridFS(self.db, collection='retention_hosts')
        self.services_fs = GridFS(self.db, collection='retention_services')


    def hook_save_retention(self, daemon):
        """
        main function that is called in the retention creation pass
        """
        logger.debug("[MongodbRetention] asking me to update the retention objects")

        all_data = daemon.get_retention_data()

        hosts = all_data['hosts']
        services = all_data['services']

        # Now the flat file method
        for h_name in hosts:
            h = hosts[h_name]
            key = "HOST-%s" % h_name
            val = cPickle.dumps(h, protocol=cPickle.HIGHEST_PROTOCOL)
            # First delete if a previous one is here, because gridfs is a versionned
            # fs, so we only want the last version...
            self.hosts_fs.delete(key)
            # We save it in the Gridfs for hosts
            fd = self.hosts_fs.put(val, _id=key, filename=key)

        for (h_name, s_desc) in services:
            s = services[(h_name, s_desc)]
            key = "SERVICE-%s,%s" % (h_name, s_desc)
            # space are not allowed in a key.. so change it by SPACE token
            key = key.replace(' ', 'SPACE')
            val = cPickle.dumps(s, protocol=cPickle.HIGHEST_PROTOCOL)

            # We save the binary dumps in a gridfs system
            # First delete if a previous one is here, because gridfs is a versionned
            # fs, so we only want the last version...
            self.services_fs.delete(key)
            fd = self.services_fs.put(val, _id=key, filename=key)

        logger.info("Retention information updated in Mongodb")

    # Should return if it succeed in the retention load or not
    def hook_load_retention(self, daemon):

        # Now the new redis way :)
        logger.debug("MongodbRetention] asking me to load the retention objects")

        # We got list of loaded data from retention uri
        ret_hosts = {}
        ret_services = {}

        # We must load the data and format as the scheduler want :)
        for h in daemon.hosts:
            key = "HOST-%s" % h.host_name
            try:
                fd = self.hosts_fs.get_last_version(key)
            except gridfs.errors.NoFile, exp:
                # Go in the next host object
                continue
            val = fd.read()

            if val is not None:
                val = cPickle.loads(val)
                ret_hosts[h.host_name] = val

        for s in daemon.services:
            key = "SERVICE-%s,%s" % (s.host.host_name, s.service_description)
            # space are not allowed in memcache key.. so change it by SPACE token
            key = key.replace(' ', 'SPACE')
            try:
                fd = self.services_fs.get_last_version(key)
            except gridfs.errors.NoFile, exp:
                # Go in the next host object
                continue
            val = fd.read()

            if val is not None:
                val = cPickle.loads(val)
                ret_services[(s.host.host_name, s.service_description)] = val
示例#56
0
 def delete(self):
     f = GridFS(self._database, collection=self._collection)
     return f.delete(self._value)
示例#57
0
class FileStoreMongo(FileStore):
    """
    Filestore database using GridFS (see :mod:`gridfs`)

    :arg pymongo.database.Database connection: MongoDB database object
    """

    def __init__(self, connection):
        self._conn=connection
        self.new_context()
        self._fs=GridFS(self.database)

    def new_file(self, **kwargs):
        """
        See :meth:`FileStore.new_file`

        :rtype: :class:`gridfs.grid_file.GridIn`
        """
        self.delete_files(**kwargs)
        return self._fs.new_file(**kwargs)

    def delete_files(self, **kwargs):
        """
        See :meth:`FileStore.delete_files`
        """
        while self._fs.exists(kwargs):
            self._fs.delete(self._fs.get_last_version(**kwargs)._id)

    def get_file(self, **kwargs):
        """
        See :meth:`FileStore.get_file`

        :rtype: :class:`gridfs.grid_file.GridOut`
        """
        if self._fs.exists(kwargs):
            return self._fs.get(self._fs.get_last_version(**kwargs)._id)
        else:
            return None
    
    def create_file(self, file_handle, **kwargs):
        """
        See :meth:`FileStore.create_file`
        """
        with self.new_file(**kwargs) as f:
            f.write(file_handle.read())

    def copy_file(self, file_handle, **kwargs):
        """
        See :meth:`FileStore.copy_file`
        """
        file_handle.write(self.get_file(**kwargs).read())

    def new_context(self):
        """
        Reconnect to the filestore. This function should be
        called before the first filestore access in each new process.
        """
        self.database=pymongo.database.Database(self._conn, mongo_config['mongo_db'])
        uri=mongo_config['mongo_uri']
        if '@' in uri:
            # strip off optional mongodb:// part
            if uri.startswith('mongodb://'):
                uri=uri[len('mongodb://'):]
            result=self.database.authenticate(uri[:uri.index(':')],uri[uri.index(':')+1:uri.index('@')])
            if result==0:
                raise Exception("MongoDB authentication problem")

    valid_untrusted_methods=()
示例#58
0
class Database():
    def __init__(self):
        # Create the connection

        connection = pymongo.MongoClient('localhost')

        # Version Check
        server_version = connection.server_info()['version']
        if int(server_version[0]) < 3:
            raise UserWarning('Incompatible MongoDB Version detected. Requires 3 or higher. Found {0}'.format(server_version))

        # Connect to Databases.
        voldb = connection['voldb']
        voldbfs = connection['voldbfs']

        # Get Collections
        self.vol_sessions = voldb.sessions
        self.vol_comments = voldb.comments
        self.vol_plugins = voldb.plugins
        self.vol_datastore = voldb.datastore
        self.vol_files = GridFS(voldbfs)

        # Indexes
        self.vol_comments.create_index([('freetext', 'text')])

        self.vol_plugins.create_index([('$**', 'text')])

    ##
    # Sessions
    ##
    def get_allsessions(self):
        sessions = self.vol_sessions.find()
        return [x for x in sessions]

    def get_session(self, sess_id):
        session = self.vol_sessions.find_one({'_id': sess_id})
        return session

    def create_session(self, sess_data):
        sess_id = self.vol_sessions.insert_one(sess_data).inserted_id
        return sess_id

    def update_session(self, sess_id, new_values):
        self.vol_sessions.update_one({'_id':sess_id},{"$set": new_values })
        return True

    ##
    # Comments
    ##
    def get_commentbyid(self, comment_id):
        comment = self.vol_comments.find({'_id': comment_id})
        return comment

    def get_commentbysession(self,session_id):
        comments = self.vol_comments.find({'session_id': session_id}).sort("created", -1)
        return [row for row in comments]

    def create_comment(self, comment_data):
        comment_id = self.vol_comments.insert_one(comment_data).inserted_id
        return comment_id

    def search_comments(self, search_text, session_id=None):
        results = []
        rows = self.vol_comments.find({"$text": {"$search": search_text}})
        for row in rows:
            if session_id:
                if row['session_id'] == session_id:
                    results.append(row)
            else:
                results.append(row)
        return results

    ##
    # Plugins
    ##

    def get_pluginbysession(self, session_id):
        result_rows = []
        plugin_output = self.vol_plugins.find({'session_id': session_id}).sort("created", -1)
        for row in plugin_output:
            result_rows.append(row)
        return result_rows

    def get_pluginbyid(self, plugin_id):
        plugin_output = self.vol_plugins.find_one({'_id': plugin_id})
        return plugin_output

    def create_plugin(self, plugin_data):
        plugin_id = self.vol_plugins.insert_one(plugin_data).inserted_id
        return plugin_id

    def search_plugins(self, search_text, session_id=None):
        results = []
        rows = self.vol_plugins.find({"$text": {"$search": search_text}})
        for row in rows:
            if session_id:
                if row['session_id'] == session_id:
                    results.append(row)
            else:
                results.append(row)
        return results

    def update_plugin(self, plugin_id, new_values):
        self.vol_plugins.update_one({'_id':plugin_id},{"$set": new_values })
        return True


    ##
    # File System
    ##
    def get_filebyid(self, file_id):
        file_object = self.vol_files.get(file_id)
        return file_object

    def list_files(self, sess_id):
        results = self.vol_files.find({'session_id': sess_id})
        return results

    def create_file(self, file_data, sess_id, sha256, filename, pid=None, file_meta=None):
        file_id = self.vol_files.put(file_data, filename=filename, sess_id=sess_id, sha256=sha256, pid=pid)
        return file_id


    ##
    # DataStore
    ##

    def get_alldatastore(self):
        results = self.vol_datastore.find()
        return [row for row in results]

    def search_datastore(self, search_query):
        results = self.vol_datastore.find(search_query)
        return [row for row in results]

    def create_datastore(self, store_data):
        data_id = self.vol_datastore.insert_one(store_data).inserted_id
        return data_id

    def update_datastore(self, search_query, new_values):
        self.vol_datastore.update_one(search_query, {"$set": new_values})
        return True


    ##
    # Drop Session
    ##
    def drop_session(self, session_id):

        # Drop Plugins
        self.vol_plugins.delete_many({'session_id': session_id})
        # Drop Files
        results = self.vol_files.find({'session_id': session_id})
        for row in results:
            self.vol_files.delete(row['file_id'])
        # Drop DataStore
        self.vol_datastore.delete_many({'session_id': session_id})
        # Drop Notes
        self.vol_comments.delete_many({'session_id': session_id})
        # Drop session
        self.vol_sessions.delete_many({'_id': session_id})
示例#59
0
文件: models.py 项目: CanalTP/tartare
def delete_file_from_gridfs(id, gridfs=None):
    if not gridfs:
        gridfs = GridFS(mongo.db)
    return gridfs.delete(ObjectId(id))