def write(self, cr, uid, ids, vals, context=None): result = False if not isinstance(ids, list): ids = [ids] res = self.search(cr, uid, [('id', 'in', ids)]) if not len(res): return False if not self._check_duplication(cr, uid, vals, ids, 'write'): raise osv.except_osv(_('ValidateError'), _('File name must be unique!')) # if nodes call this write(), they must skip the code below from_node = context and context.get('__from_node', False) if (('parent_id' in vals) or ('name' in vals)) and not from_node: # perhaps this file is renaming or changing directory nctx = nodes.get_node_context(cr, uid, context={}) dirobj = self.pool.get('document.directory') if 'parent_id' in vals: dbro = dirobj.browse(cr, uid, vals['parent_id'], context=context) dnode = nctx.get_dir_node(cr, dbro) else: dbro = None dnode = None ids2 = [] for fbro in self.browse(cr, uid, ids, context=context): if ('parent_id' not in vals or fbro.parent_id.id == vals['parent_id']) \ and ('name' not in vals or fbro.name == vals['name']): ids2.append(fbro.id) continue fnode = nctx.get_file_node(cr, fbro) res = fnode.move_to(cr, dnode or fnode.parent, vals.get('name', fbro.name), fbro, dbro, True) if isinstance(res, dict): vals2 = vals.copy() vals2.update(res) wid = res.get('id', fbro.id) result = super(document_file, self).write(cr, uid, wid, vals2, context=context) # TODO: how to handle/merge several results? elif res == True: ids2.append(fbro.id) elif res == False: pass ids = ids2 if 'file_size' in vals: # only write that field using direct SQL calls del vals['file_size'] if ids and vals: result = super(document_file, self).write(cr, uid, ids, vals, context=context) return result
def _data_set(self, cr, uid, id, name, value, arg, context=None): if not value: return True fbro = self.browse(cr, uid, id, context=context) nctx = nodes.get_node_context(cr, uid, context={}) fnode = nodes.node_file(None, None, nctx, fbro) res = fnode.set_data(cr, base64.decodestring(value), fbro) return res
def write(self, cr, uid, ids, vals, context=None): result = False if not isinstance(ids, list): ids = [ids] res = self.search(cr, uid, [("id", "in", ids)]) if not len(res): return False if not self._check_duplication(cr, uid, vals, ids, "write"): # raise osv.except_osv(_('ValidateError'), _('File name must be unique!')) pass # if nodes call this write(), they must skip the code below from_node = context and context.get("__from_node", False) if (("parent_id" in vals) or ("name" in vals)) and not from_node: # perhaps this file is renaming or changing directory nctx = nodes.get_node_context(cr, uid, context={}) dirobj = self.pool.get("document.directory") if "parent_id" in vals: dbro = dirobj.browse(cr, uid, vals["parent_id"], context=context) dnode = nctx.get_dir_node(cr, dbro) else: dbro = None dnode = None ids2 = [] for fbro in self.browse(cr, uid, ids, context=context): if ("parent_id" not in vals or fbro.parent_id.id == vals["parent_id"]) and ( "name" not in vals or fbro.name == vals["name"] ): ids2.append(fbro.id) continue fnode = nctx.get_file_node(cr, fbro) res = fnode.move_to(cr, dnode or fnode.parent, vals.get("name", fbro.name), fbro, dbro, True) if isinstance(res, dict): vals2 = vals.copy() vals2.update(res) wid = res.get("id", fbro.id) result = super(document_file, self).write(cr, uid, wid, vals2, context=context) # TODO: how to handle/merge several results? elif res: ids2.append(fbro.id) elif not res: pass ids = ids2 if "file_size" in vals: # only write that field using direct SQL calls del vals["file_size"] if ids and vals: result = super(document_file, self).write(cr, uid, ids, vals, context=context) return result
def write(self, cr, uid, ids, vals, context=None): result = False if not isinstance(ids, list): ids = [ids] res = self.search(cr, uid, [('id', 'in', ids)]) if not len(res): return False if not self._check_duplication(cr, uid, vals, ids, 'write'): raise osv.except_osv(_('ValidateError'), _('File name must be unique!')) # if nodes call this write(), they must skip the code below from_node = context and context.get('__from_node', False) if (('parent_id' in vals) or ('name' in vals)) and not from_node: # perhaps this file is renaming or changing directory nctx = nodes.get_node_context(cr,uid,context={}) dirobj = self.pool.get('document.directory') if 'parent_id' in vals: dbro = dirobj.browse(cr, uid, vals['parent_id'], context=context) dnode = nctx.get_dir_node(cr, dbro) else: dbro = None dnode = None ids2 = [] for fbro in self.browse(cr, uid, ids, context=context): if ('parent_id' not in vals or fbro.parent_id.id == vals['parent_id']) \ and ('name' not in vals or fbro.name == vals['name']): ids2.append(fbro.id) continue fnode = nctx.get_file_node(cr, fbro) res = fnode.move_to(cr, dnode or fnode.parent, vals.get('name', fbro.name), fbro, dbro, True) if isinstance(res, dict): vals2 = vals.copy() vals2.update(res) wid = res.get('id', fbro.id) result = super(document_file,self).write(cr,uid,wid,vals2,context=context) # TODO: how to handle/merge several results? elif res == True: ids2.append(fbro.id) elif res == False: pass ids = ids2 if 'file_size' in vals: # only write that field using direct SQL calls del vals['file_size'] if len(ids) and len(vals): result = super(document_file,self).write(cr, uid, ids, vals, context=context) cr.commit() # ? return result
def _data_get(self, cr, uid, ids, name, arg, context=None): if context is None: context = {} fbrl = self.browse(cr, uid, ids, context=context) nctx = nodes.get_node_context(cr, uid, context={}) # nctx will /not/ inherit the caller's context. Most of # it would be useless, anyway (like active_id, active_model, # bin_size etc.) result = {} bin_size = context.get('bin_size', False) for fbro in fbrl: fnode = nodes.node_file(None, None, nctx, fbro) if not bin_size: data = fnode.get_data(cr, fbro) result[fbro.id] = base64.encodestring(data or '') else: result[fbro.id] = fnode.get_data_len(cr, fbro) return result
def get_object(self, cr, uid, uri, context=None): """ Return a node object for the given uri. This fn merely passes the call to node_context """ return nodes.get_node_context(cr, uid, context).get_uri(cr, uri)