def perm_read(self, cr, user, ids, context=None, details=True): if not ids: return [] if isinstance(ids, (int, long)): ids = [ids] collection = mdbpool.get_collection(self._table) fields = ['id', 'create_uid', 'create_date', 'write_uid', 'write_date'] res = [] mongo_cr = collection.find({'id': {'$in': ids}}, fields) res = [x for x in mongo_cr] for doc in res: docfields = doc.keys() for field in fields: if field not in docfields: doc[field] = False if field in ['create_date', 'write_date']\ and doc[field]: doc[field] = doc[field].strftime('%Y-%m-%d %H:%M:%S') if field in ['create_uid', 'write_uid']\ and doc[field]: doc[field] = self.pool.get('res.users').name_get(cr, user, [doc[field]])[0] del doc['_id'] return res
def write(self, cr, user, ids, vals, context=None): db = mdbpool.get_db() collection = mdbpool.get_collection(self._table) vals = vals.copy() if not ids: return True self.pool.get('ir.model.access').check(cr, user, self._name, 'write', context=context) #Pre process date and datetime fields self.preformat_write_fields(vals) self.write_binary_gridfs_fields(vals) #Log access vals.update({'write_uid': user, 'write_date': datetime.now(), }) #bulk update with modifiers, and safe mode collection.update({'id': {'$in': ids}}, {'$set': vals}, False, False, True, True) if db.error(): raise except_orm('MongoDB update error', db.error()) return True
def perm_read(self, cr, user, ids, context=None, details=True): if not ids: return [] if isinstance(ids, (int, long)): ids = [ids] collection = mdbpool.get_collection(self._table) fields = ['id', 'create_uid', 'create_date', 'write_uid', 'write_date'] res = [] mongo_cr = collection.find({'id': {'$in': ids}}, fields) res = [x for x in mongo_cr] for doc in res: docfields = doc.keys() for field in fields: if field not in docfields: doc[field] = False if field in ['create_date', 'write_date']\ and doc[field]: doc[field] = doc[field].strftime('%Y-%m-%d %H:%M:%S') if field in ['create_uid', 'write_uid']\ and doc[field]: doc[field] = self.pool.get('res.users').name_get( cr, user, [doc[field]])[0] del doc['_id'] return res
def _read_flat(self, cr, user, ids, fields_to_read, context=None, load='_classic_read'): collection = mdbpool.get_collection(self._table) if not context: context = {} if not ids: return [] if fields_to_read is None: fields_to_read = self._columns.keys() # All non inherited fields for which the attribute # whose name is in load is True fields_pre = [f for f in fields_to_read if f == self.CONCURRENCY_CHECK_FIELD or (f in self._columns and getattr(self._columns[f], '_classic_write')) ] res = [] if len(fields_pre): order = self._compute_order(cr, user) mongo_cr = collection.find({'id': {'$in': ids}}, fields_pre + ['id'], sort=order) res = [x for x in mongo_cr] else: res = map(lambda x: {'id': x}, ids) #Post process date and datetime fields self.read_date_fields(fields_to_read, res) self.read_binary_gridfs_fields(fields_to_read, res) # Function fields fields_function = [ f for f in fields_to_read if f in self._columns and isinstance(self._columns[f], fields.function) ] todo = {} for f in fields_function: todo.setdefault(self._columns[f]._multi, []) todo[self._columns[f]._multi].append(f) for key,val in todo.items(): if key: res2 = self._columns[val[0]].get(cr, self, ids, val, user, context=context, values=res) for pos in val: for record in res: record[pos] = res2[record['id']][pos] else: for f in val: res2 = self._columns[f].get(cr, self, ids, f, user, context=context, values=res) for record in res: if res2 and (record['id'] in res2): record[f] = res2[record['id']] else: record[f] = [] return res
def write(self, cr, user, ids, vals, context=None): db = mdbpool.get_db() collection = mdbpool.get_collection(self._table) vals = vals.copy() if not ids: return True self.pool.get('ir.model.access').check(cr, user, self._name, 'write', context=context) #Pre process date and datetime fields self.preformat_write_fields(vals) self.write_binary_gridfs_fields(vals) #Log access vals.update({ 'write_uid': user, 'write_date': datetime.now(), }) #bulk update with modifiers, and safe mode collection.update({'id': { '$in': ids }}, {'$set': vals}, False, False, True, True) if db.error(): raise except_orm('MongoDB update error', db.error()) return True
def unlink(self, cr, uid, ids, context=None): db = mdbpool.get_db() collection = mdbpool.get_collection(self._table) if not ids: return True if isinstance(ids, (int, long)): ids = [ids] self.pool.get('ir.model.access').check(cr, uid, self._name, 'unlink', context=context) # Remove binary fields (files in gridfs) self.unlink_binary_gridfs_fields(collection, ids) #Remove with safe mode collection.remove({'id': {'$in': ids}}, True) if db.error(): raise except_orm('MongoDB unlink error', db.error()) return True
def search(self, cr, user, args, offset=0, limit=0, order=None, context=None, count=False): #Make a copy of args for working #Domain has to be list of lists tmp_args = [ isinstance(arg, tuple) and list(arg) or arg for arg in args ] collection = mdbpool.get_collection(self._table) self.search_trans_fields(tmp_args) new_args = mdbpool.translate_domain(tmp_args) if not context: context = {} self.pool.get('ir.model.access').check(cr, user, self._name, 'read', context=context) #Performance problems for counting in mongodb #Only count when forcing. Else return limit #https://jira.mongodb.org/browse/SERVER-1752 if not context.get('force_count', False) and count: return limit #In very large collections when no args #orders all documents prior to return a result #so when no filters, order by id that is sure that #has an individual index and works very fast if not args: order = 'id' if count: return collection.find(new_args, { 'id': 1 }, skip=int(offset), limit=int(limit), timeout=True, snapshot=False, tailable=False, sort=self._compute_order(cr, user, order)).count() mongo_cr = collection.find(new_args, {'id': 1}, skip=int(offset), limit=int(limit), timeout=True, snapshot=False, tailable=False, sort=self._compute_order(cr, user, order)) res = [x['id'] for x in mongo_cr] return res
def _read_flat(self, cr, user, ids, fields_to_read, context=None, load='_classic_read'): collection = mdbpool.get_collection(self._table) if not context: context = {} if not ids: return [] if fields_to_read is None: fields_to_read = self._columns.keys() # All non inherited fields for which the attribute # whose name is in load is True fields_pre = [f for f in fields_to_read if f == self.CONCURRENCY_CHECK_FIELD or (f in self._columns and getattr(self._columns[f], '_classic_write')) ] res = [] if len(fields_pre): order = self._compute_order(cr, user) mongo_cr = collection.find({'id': {'$in': ids}}, fields_pre + ['id'], sort=order) res = [x for x in mongo_cr] else: res = map(lambda x: {'id': x}, ids) #Post process date and datetime fields self.read_date_fields(fields_to_read, res) self.read_binary_gridfs_fields(fields_to_read, res) return res
def create(self, cr, user, vals, context=None): collection = mdbpool.get_collection(self._table) vals = vals.copy() if not context: context = {} self.pool.get('ir.model.access').check(cr, user, self._name, 'create', context=context) if self._defaults: #Default values default = [f for f in self._columns.keys() if f not in vals] if len(default): default_values = self.default_get(cr, user, default, context) vals.update(default_values) #Add incremental id to store vals # Deprecated in pymongo 3.0.3 # counter = mdbpool.get_collection('counters').find_and_modify( # {'_id': self._table}, # {'$inc': {'counter': 1}}) counter = mdbpool.get_collection('counters').find_one_and_update( {'_id': self._table}, {'$inc': { 'counter': 1 }}, upsert=True) vals.update({'id': counter['counter']}) #Pre proces date fields self.preformat_write_fields(vals) self.write_binary_gridfs_fields(vals) #Log access vals.update({ 'create_uid': user, 'create_date': datetime.now(), }) #Effectively create the record collection.insert(vals) return vals['id']
def create(self, cr, user, vals, context=None): collection = mdbpool.get_collection(self._table) vals = vals.copy() if not context: context = {} self.pool.get('ir.model.access').check(cr, user, self._name, 'create', context=context) if self._defaults: #Default values default = [f for f in self._columns.keys() if f not in vals] if len(default): default_values = self.default_get(cr, user, default, context) vals.update(default_values) #Add incremental id to store vals # Deprecated in pymongo 3.0.3 # counter = mdbpool.get_collection('counters').find_and_modify( # {'_id': self._table}, # {'$inc': {'counter': 1}}) counter = mdbpool.get_collection('counters').find_one_and_update( {'_id': self._table}, {'$inc': {'counter': 1}}, upsert=True) vals.update({'id': counter['counter']}) #Pre proces date fields self.preformat_write_fields(vals) self.write_binary_gridfs_fields(vals) #Log access vals.update({'create_uid': user, 'create_date': datetime.now(), }) #Effectively create the record collection.insert(vals) return vals['id']
def search(self, cr, user, args, offset=0, limit=0, order=None, context=None, count=False): #Make a copy of args for working #Domain has to be list of lists tmp_args = [isinstance(arg, tuple) and list(arg) or arg for arg in args] collection = mdbpool.get_collection(self._table) self.search_trans_fields(tmp_args) new_args = mdbpool.translate_domain(tmp_args) if not context: context = {} self.pool.get('ir.model.access').check(cr, user, self._name, 'read', context=context) #Performance problems for counting in mongodb #Only count when forcing. Else return limit #https://jira.mongodb.org/browse/SERVER-1752 if not context.get('force_count', False) and count: return limit #In very large collections when no args #orders all documents prior to return a result #so when no filters, order by id that is sure that #has an individual index and works very fast if not args: order = 'id' if count: return collection.find( new_args, {'id': 1}, skip=int(offset), limit=int(limit), timeout=True, snapshot=False, tailable=False, sort=self._compute_order(cr, user, order)).count() mongo_cr = collection.find( new_args, {'id': 1}, skip=int(offset), limit=int(limit), timeout=True, snapshot=False, tailable=False, sort=self._compute_order(cr, user, order)) res = [x['id'] for x in mongo_cr] return res
def search(self, cr, user, args, offset=0, limit=0, order=None, context=None, count=False): #Make a copy of args for working #Domain has to be list of lists tmp_args = [isinstance(arg, tuple) and list(arg) or arg for arg in args] collection = mdbpool.get_collection(self._table) self.search_trans_fields(tmp_args) new_args = mdbpool.translate_domain(tmp_args) # Implement exact match for fields char which defaults to ilike for k in new_args: field = self._columns.get(k) if not field: pass if getattr(field, 'exact_match', False): if isinstance(new_args[k], re._pattern_type): new_args[k] = new_args[k].pattern.lstrip('.*').rstrip('.*') if not context: context = {} self.pool.get('ir.model.access').check(cr, user, self._name, 'read', context=context) #In very large collections when no args #orders all documents prior to return a result #so when no filters, order by id that is sure that #has an individual index and works very fast if not args: order = 'id' if count: return collection.find( new_args, {'id': 1}, no_cursor_timeout=True, modifiers={"$snapshot": False}, ).count() mongo_cr = collection.find( new_args, {'id': 1}, skip=int(offset), limit=int(limit), no_cursor_timeout=True, modifiers={"$snapshot": False}, sort=self._compute_order(cr, user, order)) res = [x['id'] for x in mongo_cr] return res
def _read_flat(self, cr, user, ids, fields_to_read, context=None, load='_classic_read'): collection = mdbpool.get_collection(self._table) if not context: context = {} if not ids: return [] if fields_to_read is None: fields_to_read = self._columns.keys() # All non inherited fields for which the attribute # whose name is in load is True fields_pre = [ f for f in fields_to_read if f == self.CONCURRENCY_CHECK_FIELD or (f in self._columns and getattr(self._columns[f], '_classic_write') ) ] res = [] if len(fields_pre): order = self._compute_order(cr, user) mongo_cr = collection.find({'id': { '$in': ids }}, fields_pre + ['id'], sort=order) res = [x for x in mongo_cr] else: res = map(lambda x: {'id': x}, ids) #Post process date and datetime fields self.read_date_fields(fields_to_read, res) self.read_binary_gridfs_fields(fields_to_read, res) return res
def search(self, cr, user, args, offset=0, limit=0, order=None, context=None, count=False): #Make a copy of args for working #Domain has to be list of lists tmp_args = [ isinstance(arg, tuple) and list(arg) or arg for arg in args ] collection = mdbpool.get_collection(self._table) self.search_trans_fields(tmp_args) new_args = mdbpool.translate_domain(tmp_args) # Implement exact match for fields char which defaults to ilike for k in new_args: field = self._columns.get(k) if not field: pass if getattr(field, 'exact_match', False): if isinstance(new_args[k], re._pattern_type): new_args[k] = new_args[k].pattern.lstrip('.*').rstrip('.*') if not context: context = {} self.pool.get('ir.model.access').check(cr, user, self._name, 'read', context=context) #In very large collections when no args #orders all documents prior to return a result #so when no filters, order by id that is sure that #has an individual index and works very fast if not args: order = 'id' if count: return collection.find( new_args, { 'id': 1 }, no_cursor_timeout=True, modifiers={ "$snapshot": False }, ).count() mongo_cr = collection.find(new_args, {'id': 1}, skip=int(offset), limit=int(limit), no_cursor_timeout=True, modifiers={"$snapshot": False}, sort=self._compute_order(cr, user, order)) res = [x['id'] for x in mongo_cr] return res
def _read_flat(self, cr, user, ids, fields_to_read, context=None, load='_classic_read'): collection = mdbpool.get_collection(self._table) if not context: context = {} if not ids: return [] if fields_to_read is None: fields_to_read = self._columns.keys() # All non inherited fields for which the attribute # whose name is in load is True fields_pre = [ f for f in fields_to_read if f == self.CONCURRENCY_CHECK_FIELD or (f in self._columns and getattr(self._columns[f], '_classic_write') ) ] res = [] if len(fields_pre): order = self._compute_order(cr, user) mongo_cr = collection.find({'id': { '$in': ids }}, fields_pre + ['id'], sort=order) res = [x for x in mongo_cr] else: res = map(lambda x: {'id': x}, ids) #Post process date and datetime fields self.read_date_fields(fields_to_read, res) self.read_binary_gridfs_fields(fields_to_read, res) # Function fields fields_function = [ f for f in fields_to_read if f in self._columns and isinstance(self._columns[f], fields.function) ] todo = {} for f in fields_function: todo.setdefault(self._columns[f]._multi, []) todo[self._columns[f]._multi].append(f) for key, val in todo.items(): if key: res2 = self._columns[val[0]].get(cr, self, ids, val, user, context=context, values=res) for pos in val: for record in res: record[pos] = res2[record['id']][pos] else: for f in val: res2 = self._columns[f].get(cr, self, ids, f, user, context=context, values=res) for record in res: if res2 and (record['id'] in res2): record[f] = res2[record['id']] else: record[f] = [] return res