def write(self, cr, user, ids, vals, context=None): db = mdbpool.get_db() collection = mdbpool.get_collection(self._table) vals = vals.copy() if not ids: return True self.pool.get('ir.model.access').check(cr, user, self._name, 'write', context=context) #Pre process date and datetime fields self.preformat_write_fields(vals) self.write_binary_gridfs_fields(vals) #Log access vals.update({'write_uid': user, 'write_date': datetime.now(), }) #bulk update with modifiers, and safe mode collection.update({'id': {'$in': ids}}, {'$set': vals}, False, False, True, True) if db.error(): raise except_orm('MongoDB update error', db.error()) return True
def unlink(self, cr, uid, ids, context=None): db = mdbpool.get_db() collection = mdbpool.get_collection(self._table) if not ids: return True if isinstance(ids, (int, long)): ids = [ids] self.pool.get('ir.model.access').check(cr, uid, self._name, 'unlink', context=context) # Remove binary fields (files in gridfs) self.unlink_binary_gridfs_fields(collection, ids) #Remove with safe mode collection.remove({'id': {'$in': ids}}, True) if db.error(): raise except_orm('MongoDB unlink error', db.error()) return True
def write(self, cr, user, ids, vals, context=None): db = mdbpool.get_db() collection = mdbpool.get_collection(self._table) vals = vals.copy() if not ids: return True self.pool.get('ir.model.access').check(cr, user, self._name, 'write', context=context) #Pre process date and datetime fields self.preformat_write_fields(vals) self.write_binary_gridfs_fields(vals) #Log access vals.update({ 'write_uid': user, 'write_date': datetime.now(), }) #bulk update with modifiers, and safe mode collection.update({'id': { '$in': ids }}, {'$set': vals}, False, False, True, True) if db.error(): raise except_orm('MongoDB update error', db.error()) return True
def _auto_init(self, cr, context=None): self._field_create(cr, context=context) logger = netsvc.Logger() db = mdbpool.get_db() #Create the model counters document in order to #have incremental ids the way postgresql does collection = db['counters'] if not collection.find({'_id': self._table}).count(): vals = {'_id': self._table, 'counter': 1} collection.save(vals) collection = db[self._table] #Create index for the id field collection.ensure_index([('id', pymongo.ASCENDING)], deprecated_unique=None, ttl=300, unique=True) # Create auto indexs if field has select=True in field definition # like PostgreSQL created_idx = [ x['key'][0][0] for x in collection.index_information().values() if 'key' in x and len(x['key']) == 1 ] for field_name, field_obj in self._columns.iteritems(): if getattr(field_obj, 'select', False): if field_name not in created_idx: collection.ensure_index(field_name) if db.error(): raise except_orm('MongoDB create id field index error', db.error()) #Update docs with new default values if they do not exist #If we find at least one document with this field #we assume that the field is present in the collection def_fields = filter(lambda a: not collection.find_one( {a: {'$exists': True}}), self._defaults.keys()) if len(def_fields): logger.notifyChannel('orm', netsvc.LOG_INFO, 'setting default value for \ %s of collection %s' % (def_fields, self._table)) def_values = self.default_get(cr, 1, def_fields) collection.update({}, {'$set': def_values}, upsert=False, manipulate=False, safe=True, multi=True) if db.error(): raise except_orm('MongoDB update defaults error', db.error())
def _auto_init(self, cr, context=None): self._field_create(cr, context=context) logger = netsvc.Logger() db = mdbpool.get_db() #Create the model counters document in order to #have incremental ids the way postgresql does collection = db['counters'] if not collection.find({'_id': self._table}).count(): vals = {'_id': self._table, 'counter': 1} collection.save(vals) collection = db[self._table] #Create index for the id field collection.ensure_index([('id', pymongo.ASCENDING)], deprecated_unique=None, ttl=300, unique=True) # Create auto indexs if field has select=True in field definition # like PostgreSQL created_idx = [ x['key'][0][0] for x in collection.index_information().values() if 'key' in x and len(x['key']) == 1 ] for field_name, field_obj in self._columns.iteritems(): if getattr(field_obj, 'select', False): if field_name not in created_idx: collection.ensure_index(field_name) if db.error(): raise except_orm('MongoDB create id field index error', db.error()) #Update docs with new default values if they do not exist #If we find at least one document with this field #we assume that the field is present in the collection def_fields = filter( lambda a: not collection.find_one({a: { '$exists': True }}), self._defaults.keys()) if len(def_fields): logger.notifyChannel( 'orm', netsvc.LOG_INFO, 'setting default value for \ %s of collection %s' % (def_fields, self._table)) def_values = self.default_get(cr, 1, def_fields) collection.update({}, {'$set': def_values}, upsert=False, manipulate=False, safe=True, multi=True) if db.error(): raise except_orm('MongoDB update defaults error', db.error())
def unlink_binary_gridfs_fields(self, collection, ids): binary_fields = self.get_binary_gridfs_fields() if binary_fields: fs = gridfs.GridFS(mdbpool.get_db(), collection='fs') mongo_cr = collection.find({'id': {'$in': ids}}, binary_fields) res = [x for x in mongo_cr] for item in res: for binary_field in binary_fields: oid = item.get(binary_field, False) if not oid: continue objectid = ObjectId(oid) if fs.exists(objectid): fs.delete(objectid)
def transform_binary_gridfs_field(self, field, value, action): if not value: return value fs = gridfs.GridFS(mdbpool.get_db(), collection='fs') if action == 'read': objectid = ObjectId(value) if fs.exists(objectid): value = fs.get(objectid).read() else: value = '' return value elif action == 'write': _id = fs.put(value) return str(_id)
def unlink_binary_gridfs_fields(self, collection, ids): binary_fields = self.get_binary_gridfs_fields() if binary_fields: fs = gridfs.GridFS(mdbpool.get_db(), collection='fs') mongo_cr = collection.find({'id': {'$in': ids}}, binary_fields) res = [x for x in mongo_cr] for item in res: for binary_field in binary_fields: oid = item.get(binary_field, False) if not oid: continue objectid = ObjectId(oid) if fs.exists(objectid): fs.delete(objectid)
def transform_binary_gridfs_field(self, field, value, action): if not value: return value fs = gridfs.GridFS(mdbpool.get_db(), collection='fs') if action == 'read': objectid = ObjectId(value) if fs.exists(objectid): value = fs.get(objectid).read() else: value = '' return value elif action == 'write': _id = fs.put(value) return str(_id)
def set(self, cursor, obj, rid, name, value, user=None, context=None): # TODO: Store some more metadata. File name, author, etc. db = mdbpool.get_db() fs = gfs.GridFS(db, collection='fs') for rid, oid in self.get_oids(cursor, obj, [rid], name).items(): filename = self.get_filename(obj, rid, name) if oid and fs.exists(ObjectId(oid)) and not self.versioning: fs.delete(ObjectId(oid)) if value: _id = fs.put(value, filename=filename) value = str(_id) if not value and self.versioning: fs.delete(ObjectId(oid)) res = db.fs.files.find( {'filename': filename}, {'uploadDate': True, '_id': True} ).sort('filename', pymongo.DESCENDING).limit(1) if res.count(): value = str(res[0]['_id']) return super(gridfs, self).set(cursor, obj, rid, name, value, user, context)
def unlink(self, cr, uid, ids, context=None): db = mdbpool.get_db() collection = mdbpool.get_collection(self._table) if not ids: return True if isinstance(ids, (int, long)): ids = [ids] self.pool.get('ir.model.access').check(cr, uid, self._name, 'unlink', context=context) # Remove binary fields (files in gridfs) self.unlink_binary_gridfs_fields(collection, ids) #Remove with safe mode collection.remove({'id': {'$in': ids}}, True) if db.error(): raise except_orm('MongoDB unlink error', db.error()) return True
def get(self, cursor, obj, ids, name, user=None, offset=0, context=None, values=None): if not context: context = {} db = mdbpool.get_db() fs = gfs.GridFS(db, collection='fs') res = self.get_oids(cursor, obj, ids, name) for rid, oid in res.items(): filename = self.get_filename(obj, rid, name) if oid: oid = ObjectId(oid) val = fs.get(oid).read() if context.get('bin_size', False) and val: version = db.fs.files.find( {'filename': filename}, {'uploadDate': True, '_id': False} ).count() res[rid] = '%s - v%s' % (human_size(val), version) else: res[rid] = val else: res[rid] = False return res