def export_data(self, fname, fields, import_compat=False, **kw): params, data_index = TinyDict.split(kw) proxy = rpc.RPCProxy(params.model) flds = [] for item in fields: fld = item.replace('/.id', '.id') flds.append(fld) if isinstance(fields, basestring): fields = fields.replace('/.id', '.id') flds = [fields] ctx = dict((params.context or {}), **rpc.session.context) ctx['import_comp'] = bool(int(import_compat)) domain = params.seach_domain or [] ids = params.ids or proxy.search(domain, 0, 0, 0, ctx) result = datas_read(ids, params.model, flds, context=ctx) if result.get('warning'): common.warning(unicode(result.get('warning', False)), _('Export Error')) return False result = result.get('datas', []) if import_compat: params.fields2 = flds return export_csv(params.fields2, result)
def export_data(self, fname, fields, import_compat=False, export_format='csv', **kw): params, data_index = TinyDict.split(kw) proxy = rpc.RPCProxy(params.model) flds = [] for item in fields: fld = item.replace('/.id','.id') flds.append(fld) if isinstance(fields, basestring): fields = fields.replace('/.id','.id') flds = [fields] ctx = dict((params.context or {}), **rpc.get_session().context) ctx['import_comp'] = bool(int(import_compat)) domain = params.seach_domain or [] ids = params.ids or proxy.search(domain, 0, 0, 0, ctx) result = datas_read(ids, params.model, flds, context=ctx) if result.get('warning'): common.warning(unicode(result.get('warning', False)), _('Export Error')) return False result = result.get('datas',[]) if import_compat: params.fields2 = flds if export_format == 'xls': return export_xls(params.fields2, result) else: return export_csv(params.fields2, result)
def exp(self, import_compat="1", **kw): params, data = TinyDict.split(kw) ctx = dict((params.context or {}), **rpc.session.context) if ctx.get("group_by_no_leaf", 0): raise common.warning(_("You cannot export these record(s) !"), _('Error')) views = {} if params.view_mode and params.view_ids: for i, view in enumerate(params.view_mode): views[view] = params.view_ids[i] exports = rpc.RPCProxy('ir.exports') headers = [{'string' : 'Name', 'name' : 'name', 'type' : 'char'}] tree = treegrid.TreeGrid('export_fields', model=params.model, headers=headers, url=tools.url('/openerp/impex/get_fields'), field_parent='relation', context=ctx, views=views, import_compat=int(import_compat)) tree.show_headers = False existing_exports = exports.read( exports.search([('resource', '=', params.model)], context=ctx), [], ctx) return dict(existing_exports=existing_exports, model=params.model, ids=params.ids, ctx=ctx, search_domain=params.search_domain, source=params.source, tree=tree, import_compat=import_compat)
def button_action(self, params): button_name = openobject.ustr(params.button.name) button_name = button_name.rsplit('/', 1)[-1] btype = params.button.btype try: return self.BUTTON_ACTIONS_BY_BTYPE[btype](self, button_name, params) except KeyError: raise common.warning(_('Invalid button type "%s"') % btype)
def __init__(self, prefix, model, view, ids=[], domain=[], context=None, editable=True, readonly=False, nodefault=False, nolinks=1): super(Form, self).__init__(prefix=prefix, model=model, editable=editable, readonly=readonly, nodefault=nodefault) dom = xml.dom.minidom.parseString(view['arch'].encode('utf-8')) root = dom.childNodes[0] attrs = node_attributes(root) fields = view['fields'] self.string = attrs.get('string', '') self.link = attrs.get('link', nolinks) self.model = model self.id = None proxy = rpc.RPCProxy(model) self.context = dict(rpc.get_session().context, **(context or {})) self.context['bin_size'] = True values = {} defaults = {} try: if ids: lval = proxy.read(ids[:1], fields.keys() + ['__last_update'], self.context) if lval: values = lval[0] self.id = ids[0] for f in fields: if fields[f]['type'] == 'many2one' and isinstance(values[f], tuple): values[f] = values[f][0] ConcurrencyInfo.update(self.model, [values]) elif 'datas' in view: # wizard data for f in fields: if 'value' in fields[f]: values[f] = fields[f]['value'] values.update(view['datas']) elif not self.nodefault: # default defaults = self.get_defaults(fields, domain, self.context) elif 'state' in fields: # if nodefault and state get state only defaults = proxy.default_get(['state'], self.context) elif 'x_state' in fields: # if nodefault and x_state get x_state only (for custom objects) defaults = proxy.default_get(['x_state'], self.context) except Exception,e: raise common.warning(e)
def __init__(self, prefix, model, view, ids=[], domain=[], context=None, editable=True, readonly=False, nodefault=False, nolinks=1): super(Form, self).__init__(prefix=prefix, model=model, editable=editable, readonly=readonly, nodefault=nodefault) dom = xml.dom.minidom.parseString(view['arch'].encode('utf-8')) root = dom.childNodes[0] attrs = node_attributes(root) fields = view['fields'] self.string = attrs.get('string', '') self.link = attrs.get('link', nolinks) self.model = model self.id = None proxy = rpc.RPCProxy(model) self.context = dict(rpc.session.context, **(context or {})) self.context['bin_size'] = True values = {} defaults = {} try: if ids: lval = proxy.read(ids[:1], fields.keys() + ['__last_update'], self.context) if lval: values = lval[0] self.id = ids[0] ConcurrencyInfo.update(self.model, [values]) elif 'datas' in view: # wizard data for f in fields: if 'value' in fields[f]: values[f] = fields[f]['value'] values.update(view['datas']) elif not self.nodefault: # default defaults = self.get_defaults(fields, domain, self.context) elif 'state' in fields: # if nodefault and state get state only defaults = proxy.default_get(['state'], self.context) elif 'x_state' in fields: # if nodefault and x_state get x_state only (for custom objects) defaults = proxy.default_get(['x_state'], self.context) except Exception,e: raise common.warning(e)
def _print_data(data): if 'result' not in data and 'path' not in data: raise common.message(_('Error no report')) cherrypy.response.headers['Content-Type'] = PRINT_FORMATS[data['format']] if data.get('code','normal')=='zlib': import zlib content = zlib.decompress(base64.decodestring(data['result'])) else: if not data.get('result') and data.get('path'): try: return serve_file.serve_file(data['path'], "application/x-download", 'attachment', delete=data.get('delete', False)) except Exception, e: cherrypy.response.headers['Content-Type'] = 'text/html' if 'Content-Disposition' in cherrypy.response.headers: del(cherrypy.response.headers['Content-Disposition']) raise common.warning(e) finally:
def exp(self, import_compat="1", **kw): params, data = TinyDict.split(kw) ctx = dict((params.context or {}), **rpc.session.context) if ctx.get("group_by_no_leaf", 0): raise common.warning(_("You cannot export these record(s) !"), _('Error')) views = {} if params.view_mode and params.view_ids: for i, view in enumerate(params.view_mode): views[view] = params.view_ids[i] exports = rpc.RPCProxy('ir.exports') headers = [{'string': 'Name', 'name': 'name', 'type': 'char'}] tree = treegrid.TreeGrid('export_fields', model=params.model, headers=headers, url=tools.url('/openerp/impex/get_fields'), field_parent='relation', context=ctx, views=views, import_compat=int(import_compat)) tree.show_headers = False existing_exports = exports.read( exports.search([('resource', '=', params.model)], context=ctx), [], ctx) return dict(existing_exports=existing_exports, model=params.model, ids=params.ids, ctx=ctx, search_domain=params.search_domain, source=params.source, tree=tree, import_compat=import_compat)
def detect_data(self, csvfile, csvsep, csvdel, csvcode, csvskip, **kw): params, data = TinyDict.split(kw) _fields = {} _fields_invert = {} error = None fields = dict(rpc.RPCProxy(params.model).fields_get(False, rpc.get_session().context)) fields.update({'id': {'string': 'ID'}, '.id': {'string': 'Database ID'}}) def model_populate(fields, prefix_node='', prefix=None, prefix_value='', level=2): def str_comp(x,y): if x<y: return 1 elif x>y: return -1 else: return 0 fields_order = fields.keys() fields_order.sort(lambda x,y: str_comp(fields[x].get('string', ''), fields[y].get('string', ''))) for field in fields_order: if (fields[field].get('type','') not in ('reference',))\ and (not fields[field].get('readonly')\ or not dict(fields[field].get('states', {}).get( 'draft', [('readonly', True)])).get('readonly',True)): st_name = prefix_value+fields[field]['string'] or field _fields[prefix_node+field] = st_name _fields_invert[st_name] = prefix_node+field if fields[field].get('type','')=='one2many' and level>0: fields2 = rpc.RPCProxy(fields[field]['relation']).fields_get(False) model_populate(fields2, prefix_node+field+'/', None, st_name+'/', level-1) if fields[field].get('relation',False) and level>0: model_populate({'/id': {'type': 'char', 'string': 'ID'}, '.id': {'type': 'char', 'string': 'Database ID'}}, prefix_node+field, None, st_name+'/', level-1) fields.update({'id':{'string':'ID'},'.id':{'string':_('Database ID')}}) model_populate(fields) try: data = csv.reader(csvfile.file, quotechar=str(csvdel), delimiter=str(csvsep)) except: raise common.warning(_('Error opening .CSV file'), _('Input Error.')) records = [] fields = [] word='' limit = 3 try: for i, row in enumerate(data): records.append(row) if i == limit: break for line in records: for word in line: word = ustr(word.decode(csvcode)) if word in _fields: fields.append((word, _fields[word])) elif word in _fields_invert.keys(): fields.append((_fields_invert[word], word)) else: error = {'message':_("You cannot import the field '%s', because we cannot auto-detect it" % (word,))} break except: error = {'message':_('Error processing the first line of the file. Field "%s" is unknown') % (word,), 'title':_('Import Error.')} kw['fields'] = fields if error: csvfile.file.seek(0) return self.imp(error=dict(error, preview=csvfile.file.read(200)), **kw) return self.imp(records=records, **kw)
def detect_data(self, csvfile, csvsep, csvdel, csvcode, csvskip, **kw): params, data = TinyDict.split(kw) _fields = {} _fields_invert = {} error = None fields = dict( rpc.RPCProxy(params.model).fields_get(False, rpc.session.context)) fields.update({ 'id': { 'string': 'ID' }, '.id': { 'string': 'Database ID' } }) def model_populate(fields, prefix_node='', prefix=None, prefix_value='', level=2): def str_comp(x, y): if x < y: return 1 elif x > y: return -1 else: return 0 fields_order = fields.keys() fields_order.sort(lambda x, y: str_comp( fields[x].get('string', ''), fields[y].get('string', ''))) for field in fields_order: if (fields[field].get('type','') not in ('reference',))\ and (not fields[field].get('readonly')\ or not dict(fields[field].get('states', {}).get( 'draft', [('readonly', True)])).get('readonly',True)): st_name = prefix_value + fields[field]['string'] or field _fields[prefix_node + field] = st_name _fields_invert[st_name] = prefix_node + field if fields[field].get('type', '') == 'one2many' and level > 0: fields2 = rpc.session.execute( 'object', 'execute', fields[field]['relation'], 'fields_get', False, rpc.session.context) model_populate(fields2, prefix_node + field + '/', None, st_name + '/', level - 1) if fields[field].get('relation', False) and level > 0: model_populate( { '/id': { 'type': 'char', 'string': 'ID' }, '.id': { 'type': 'char', 'string': 'Database ID' } }, prefix_node + field, None, st_name + '/', level - 1) fields.update({ 'id': { 'string': 'ID' }, '.id': { 'string': _('Database ID') } }) model_populate(fields) try: data = csv.reader(csvfile.file, quotechar=str(csvdel), delimiter=str(csvsep)) except: raise common.warning(_('Error opening .CSV file'), _('Input Error.')) records = [] fields = [] word = '' limit = 3 try: for i, row in enumerate(data): records.append(row) if i == limit: break for line in records: for word in line: word = ustr(word.decode(csvcode)) if word in _fields: fields.append((word, _fields[word])) elif word in _fields_invert.keys(): fields.append((_fields_invert[word], word)) else: error = { 'message': _("You cannot import the field '%s', because we cannot auto-detect it" % (word, )) } break except: error = { 'message': _('Error processing the first line of the file. Field "%s" is unknown' ) % (word, ), 'title': _('Import Error.') } kw['fields'] = fields if error: csvfile.file.seek(0) return self.imp(error=dict(error, preview=csvfile.file.read(200)), **kw) return self.imp(records=records, **kw)
def export_data(self, fname, fields, import_compat=False, export_format='csv', all_records=False, **kw): params, data_index = TinyDict.split(kw) proxy = rpc.RPCProxy(params.model) flds = [] for item in fields: fld = item.replace('/.id','.id') flds.append(fld) if isinstance(fields, basestring): fields = fields.replace('/.id','.id') flds = [fields] if params.model == 'product.product': tmp_flds = flds flds = [] fields_header = [] for f in tmp_flds: header = "" if params.fields2 and len(params.fields2): header = params.fields2.pop(0) if f not in product_remove_fields: flds.append(f) fields_header.append(header) params.fields2 = fields_header ctx = dict((params.context or {}), **rpc.session.context) ctx['import_comp'] = bool(int(import_compat)) view_name = ctx.get('_terp_view_name', '') if ctx.get('group_by_no_leaf'): ctx['client_export_data'] = True # UTP-580-582-697 client export flag rpc_obj = rpc.RPCProxy(params.model) domain = params.search_domain or [] to_group = ctx.get('group_by', []) group_by = [] for gr in to_group: gr = gr.replace('group_', '') group_by.append(gr) if gr not in flds: flds.append(gr) params.fields2.append(gr) fields_to_read = [] for f in flds: if '/' not in f: fields_to_read.append(f) flds = fields_to_read[:] params.fields2 = fields_to_read[:] data = rpc_obj.read_group(domain, flds, group_by, 0, 2000, ctx) result_tmp = [] # List of processed data lines (dictionaries) # Closure to recursively prepare and insert lines in 'result_tmp' # (as much as the number of 'group_by' levels) def process_data(line): domain_line = line.get('__domain', []) grp_by_line = line.get('__context', {}).get('group_by', []) # If there is a 'group_by', we fetch data one level deeper if grp_by_line: data = rpc_obj.read_group(domain_line, flds, grp_by_line, 0, 0, ctx) for line2 in data: line_copy = line.copy() line_copy.update(line2) process_data(line_copy) # If 'group_by' is empty, this means we were at the last level # so we insert the line in the final result else: result_tmp.append(line) # Prepare recursively the data to export (inserted in 'result_tmp') for data_line in data: process_data(data_line) result = self.get_grp_data(result_tmp, flds) result, params.fields2 = rpc_obj.filter_export_data_result(result, params.fields2) if export_format == "excel": return self.export_html(params.fields2, result, view_name) return export_csv(params.fields2, result) if not params.ids or all_records: domain = params.search_domain or [] if params.model == 'product.product': ids = proxy.search(domain, 0, None, 0, ctx) else: ids = proxy.search(domain, 0, 2000, 0, ctx) else: ids = params.ids or [] result = datas_read(ids, params.model, flds, context=ctx) if result.get('warning'): common.warning(unicode(result.get('warning', False)), _('Export Error')) return False result = result.get('datas',[]) if import_compat == "1": params.fields2 = flds if export_format == "excel": return self.export_html(params.fields2, result, view_name) return export_csv(params.fields2, result)