def _child_get(node, self=None, tagname=None): for n in node: if self and self.localcontext and n.get('rml_loop'): for ctx in eval(n.get('rml_loop'), {}, self.localcontext): self.localcontext.update(ctx) if (tagname is None) or (n.tag == tagname): if n.get('rml_except', False): try: eval(n.get('rml_except'), {}, self.localcontext) except GeneratorExit: continue except Exception, e: logging.getLogger('report').exception(e) continue if n.get('rml_tag'): try: (tag, attr) = eval(n.get('rml_tag'), {}, self.localcontext) n2 = copy.deepcopy(n) n2.tag = tag n2.attrib.update(attr) yield n2 except GeneratorExit: yield n except Exception, e: logging.getLogger('report').exception(e) yield n else: yield n continue
def _eval_params(self, model, params): args = [] for i, param in enumerate(params): if isinstance(param, types.ListType): value = self._eval_params(model, param) elif is_ref(param): value = self.process_ref(param) elif is_eval(param): value = self.process_eval(param) elif isinstance(param, types.DictionaryType): # supports XML syntax param_model = self.get_model(param.get('model', model)) if 'search' in param: q = eval(param['search'], self.eval_context) ids = param_model.search(self.cr, self.uid, q) value = self._get_first_result(ids) elif 'eval' in param: local_context = {'obj': lambda x: param_model.browse(self.cr, self.uid, x, self.context)} local_context.update(self.id_map) value = eval(param['eval'], self.eval_context, local_context) else: raise YamlImportException('You must provide either a !ref or at least a "eval" or a "search" to function parameter #%d.' % i) else: value = param # scalar value args.append(value) return args
def process_report(self, node): values = {} for dest, f in (('name','string'), ('model','model'), ('report_name','name')): values[dest] = getattr(node, f) assert values[dest], "Attribute %s of report is empty !" % (f,) for field,dest in (('rml','report_rml'),('file','report_rml'),('xml','report_xml'),('xsl','report_xsl'),('attachment','attachment'),('attachment_use','attachment_use')): if getattr(node, field): values[dest] = getattr(node, field) if node.auto: values['auto'] = eval(node.auto) if node.sxw: sxw_file = misc.file_open(node.sxw) try: sxw_content = sxw_file.read() values['report_sxw_content'] = sxw_content finally: sxw_file.close() if node.header: values['header'] = eval(node.header) values['multi'] = node.multi and eval(node.multi) xml_id = node.id self.validate_xml_id(xml_id) self._set_group_values(node, values) id = self.pool.get('ir.model.data')._update(self.cr, 1, "ir.actions.report.xml", \ self.module, values, xml_id, noupdate=self.isnoupdate(node), mode=self.mode) self.id_map[xml_id] = int(id) if not node.menu or eval(node.menu): keyword = node.keyword or 'client_print_multi' value = 'ir.actions.report.xml,%s' % id replace = node.replace or True self.pool.get('ir.model.data').ir_set(self.cr, 1, 'action', \ keyword, values['name'], [values['model']], value, replace=replace, isobject=True, xml_id=xml_id)
def process_report(self, node): values = {} for dest, f in (('name','string'), ('model','model'), ('report_name','name')): values[dest] = getattr(node, f) assert values[dest], "Attribute %s of report is empty !" % (f,) for field,dest in (('rml','report_rml'),('xml','report_xml'),('xsl','report_xsl'),('attachment','attachment'),('attachment_use','attachment_use')): if getattr(node, field): values[dest] = getattr(node, field) if node.auto: values['auto'] = eval(node.auto) if node.sxw: sxw_content = misc.file_open(node.sxw).read() values['report_sxw_content'] = sxw_content if node.header: values['header'] = eval(node.header) values['multi'] = node.multi and eval(node.multi) xml_id = node.id self.validate_xml_id(xml_id) self._set_group_values(node, values) id = self.pool.get('ir.model.data')._update(self.cr, self.uid, "ir.actions.report.xml", \ self.module, values, xml_id, noupdate=self.isnoupdate(node), mode=self.mode) self.id_map[xml_id] = int(id) if not node.menu or eval(node.menu): keyword = node.keyword or 'client_print_multi' value = 'ir.actions.report.xml,%s' % id replace = node.replace or True self.pool.get('ir.model.data').ir_set(self.cr, self.uid, 'action', \ keyword, values['name'], [values['model']], value, replace=replace, isobject=True, xml_id=xml_id)
def _child_get(node, self=None, tagname=None): for n in node: if self and self.localcontext and n.get('rml_loop'): for ctx in eval(n.get('rml_loop'),{}, self.localcontext): self.localcontext.update(ctx) if (tagname is None) or (n.tag==tagname): if n.get('rml_except', False): try: eval(n.get('rml_except'), {}, self.localcontext) except GeneratorExit: continue except Exception, e: logging.getLogger('report').warning('rml_except: "%s"',n.get('rml_except',''), exc_info=True) continue if n.get('rml_tag'): try: (tag,attr) = eval(n.get('rml_tag'),{}, self.localcontext) n2 = copy.deepcopy(n) n2.tag = tag n2.attrib.update(attr) yield n2 except GeneratorExit: yield n except Exception, e: logging.getLogger('report').warning('rml_tag: "%s"',n.get('rml_tag',''), exc_info=True) yield n else: yield n continue
def _tag_wizard(self, cr, rec, data_node=None): string = rec.get("string",'').encode('utf8') model = rec.get("model",'').encode('utf8') name = rec.get("name",'').encode('utf8') xml_id = rec.get('id','').encode('utf8') self._test_xml_id(xml_id) multi = rec.get('multi','') and eval(rec.get('multi','False')) res = {'name': string, 'wiz_name': name, 'multi': multi, 'model': model} if rec.get('groups'): g_names = rec.get('groups','').split(',') groups_value = [] for group in g_names: if group.startswith('-'): group_id = self.id_get(cr, 'res.groups', group[1:]) groups_value.append((3, group_id)) else: group_id = self.id_get(cr, 'res.groups', group) groups_value.append((4, group_id)) res['groups_id'] = groups_value id = self.pool.get('ir.model.data')._update(cr, self.uid, "ir.actions.wizard", self.module, res, xml_id, noupdate=self.isnoupdate(data_node), mode=self.mode) self.idref[xml_id] = int(id) # ir_set if (not rec.get('menu') or eval(rec.get('menu','False'))) and id: keyword = str(rec.get('keyword','') or 'client_action_multi') value = 'ir.actions.wizard,'+str(id) replace = rec.get("replace",'') or True self.pool.get('ir.model.data').ir_set(cr, self.uid, 'action', keyword, string, [model], value, replace=replace, isobject=True, xml_id=xml_id) elif self.mode=='update' and (rec.get('menu') and eval(rec.get('menu','False'))==False): # Special check for wizard having attribute menu=False on update value = 'ir.actions.wizard,'+str(id) self._remove_ir_values(cr, string, value, model)
def get_needaction_data(self, cr, uid, ids, context=None): """ Return for each menu entry of ids : - if it uses the needaction mechanism (needaction_enabled) - the needaction counter of the related action, taking into account the action domain """ res = {} for menu in self.browse(cr, uid, ids, context=context): res[menu.id] = { 'needaction_enabled': False, 'needaction_counter': False, } if menu.action and menu.action.type in ( 'ir.actions.act_window', 'ir.actions.client') and menu.action.res_model: obj = self.pool.get(menu.action.res_model) if obj and obj._needaction: if menu.action.type == 'ir.actions.act_window': dom = menu.action.domain and eval( menu.action.domain, {'uid': uid}) or [] else: dom = eval(menu.action.params_store or '{}', { 'uid': uid }).get('domain') res[menu.id]['needaction_enabled'] = obj._needaction res[menu.id]['needaction_counter'] = obj._needaction_count( cr, uid, dom, context=context) return res
def onchange_tipe_history_id(self, cr, uid, ids, tipe_history_id): domain = {} value = {} obj_tipe_history = self.pool.get('hr.tipe_history') value.update({'employee_id' : False}) domain.update({'employee_id' : [('id','in',[])]}) if tipe_history_id: try: tipe_history = obj_tipe_history.browse(cr, uid, [tipe_history_id])[0] localdict = { 'self' : self, 'cr' : cr, 'uid' : uid, 'tipe_history' : tipe_history, 'karyawan_ids' : [], 'value' : value, } eval(tipe_history.kode_python_karyawan, localdict, mode='exec', nocopy=True) karyawan_ids = localdict['karyawan_ids'] except: karyawan_ids = [] value = localdict['value'] domain.update({'employee_id' : [('id','in',karyawan_ids)]}) return {'value' : value, 'domain' : domain}
def _row_get(self, cr, uid, objs, fields, conditions, row_canvas=None, group_by=None): result = [] tmp = [] for obj in objs: tobreak = False for cond in conditions: if cond and cond[0]: c = cond[0] temp = c[0](eval('obj.'+c[1])) if not eval('\''+temp+'\''+' '+c[2]+' '+'\''+str(c[3])+'\''): tobreak = True if tobreak: break levels = {} row = [] for i in range(len(fields)): if not fields[i]: row.append(row_canvas and row_canvas[i]) if row_canvas[i]: row_canvas[i]=False elif len(fields[i])==1: if not isinstance(obj, browse_null): row.append(str(eval('obj.'+fields[i][0]))) else: row.append(None) else: row.append(None) levels[fields[i][0]]=True if not levels: result.append(row) else: # Process group_by data first key = [] if group_by != None and fields[group_by] != None: if fields[group_by][0] in levels.keys(): key.append(fields[group_by][0]) for l in levels.keys(): if l != fields[group_by][0]: key.append(l) else: key = levels.keys() for l in key: objs = eval('obj.'+l) if not isinstance(objs, browse_record_list) and type(objs) <> type([]): objs = [objs] field_new = [] cond_new = [] for f in range(len(fields)): if (fields[f] and fields[f][0])==l: field_new.append(fields[f][1:]) cond_new.append(conditions[f][1:]) else: field_new.append(None) cond_new.append(None) if len(objs): result += self._row_get(cr, uid, objs, field_new, cond_new, row, group_by) else: result.append(row) return result
def _rule_eval(self, cr, uid, rule, obj_name, obj, context): expr = rule.code space = self._exception_rule_eval_context(cr, uid, obj_name, obj, context=context) try: eval(expr, space, mode='exec', nocopy=True) # nocopy allows to return 'result' except Exception, e: raise except_osv(_('Error'), _('Error when evaluating the sale exception rule :\n %s \n(%s)') % (rule.name, e))
def get_pg_type(f): '''Override the original method in order to accept fields.serialized''' type_dict = { fields.boolean: 'bool', fields.integer: 'int4', fields.integer_big: 'int8', fields.text: 'text', fields.date: 'date', fields.time: 'time', fields.datetime: 'timestamp', fields.binary: 'bytea', fields.many2one: 'int4', } if type(f) in type_dict: f_type = (type_dict[type(f)], type_dict[type(f)]) elif isinstance(f, fields.float): if f.digits: f_type = ('numeric', 'NUMERIC') else: f_type = ('float8', 'DOUBLE PRECISION') elif isinstance(f, (fields.char, fields.reference)): f_type = ('varchar', 'VARCHAR(%d)' % (f.size,)) elif isinstance(f, fields.selection): if isinstance(f.selection, list) and isinstance(f.selection[0][0], (str, unicode)): f_size = reduce(lambda x, y: max(x, len(y[0])), f.selection, f.size or 16) elif isinstance(f.selection, list) and isinstance(f.selection[0][0], int): f_size = -1 else: f_size = getattr(f, 'size', None) or 16 if f_size == -1: f_type = ('int4', 'INTEGER') else: f_type = ('varchar', 'VARCHAR(%d)' % f_size) elif isinstance(f, (fields.function, fields.serialized)) and eval('fields.' + (f._type), globals()) in type_dict: t = eval('fields.' + (f._type), globals()) f_type = (type_dict[t], type_dict[t]) elif isinstance(f, (fields.function, fields.serialized)) and f._type == 'float': if f.digits: f_type = ('numeric', 'NUMERIC') else: f_type = ('float8', 'DOUBLE PRECISION') elif isinstance(f, (fields.function, fields.serialized)) and f._type == 'selection': f_type = ('text', 'text') elif isinstance(f, (fields.function, fields.serialized)) and f._type == 'char': f_type = ('varchar', 'VARCHAR(%d)' % (f.size)) else: logger = netsvc.Logger() logger.notifyChannel("init", netsvc.LOG_WARNING, '%s type not supported!' % (type(f))) f_type = None return f_type
def _tag_report(self, cr, rec, data_node=None): res = {} for dest,f in (('name','string'),('model','model'),('report_name','name')): res[dest] = rec.get(f,'').encode('utf8') assert res[dest], "Attribute %s of report is empty !" % (f,) for field,dest in (('rml','report_rml'),('file','report_rml'),('xml','report_xml'),('xsl','report_xsl'),('attachment','attachment'),('attachment_use','attachment_use')): if rec.get(field): res[dest] = rec.get(field).encode('utf8') if rec.get('auto'): res['auto'] = eval(rec.get('auto','False')) if rec.get('sxw'): sxw_content = misc.file_open(rec.get('sxw')).read() res['report_sxw_content'] = sxw_content if rec.get('header'): res['header'] = eval(rec.get('header','False')) if rec.get('report_type'): res['report_type'] = rec.get('report_type') if rec.get('target_filename'): res['target_filename'] = rec.get('target_filename') res['multi'] = rec.get('multi') and eval(rec.get('multi','False')) xml_id = rec.get('id','').encode('utf8') self._test_xml_id(xml_id) if rec.get('groups'): g_names = rec.get('groups','').split(',') groups_value = [] for group in g_names: if group.startswith('-'): group_id = self.id_get(cr, group[1:]) groups_value.append((3, group_id)) else: group_id = self.id_get(cr, group) groups_value.append((4, group_id)) res['groups_id'] = groups_value id = self.pool.get('ir.model.data')._update(cr, self.uid, "ir.actions.report.xml", self.module, res, xml_id, noupdate=self.isnoupdate(data_node), mode=self.mode) self.idref[xml_id] = int(id) if not rec.get('menu') or eval(rec.get('menu','False')): keyword = str(rec.get('keyword', 'client_print_multi')) value = 'ir.actions.report.xml,'+str(id) replace = rec.get('replace', True) self.pool.get('ir.model.data').ir_set(cr, self.uid, 'action', keyword, res['name'], [res['model']], value, replace=replace, isobject=True, xml_id=xml_id) elif self.mode=='update' and eval(rec.get('menu','False'))==False: # Special check for report having attribute menu=False on update value = 'ir.actions.report.xml,'+str(id) self._remove_ir_values(cr, res['name'], value, res['model']) return False
def create_normal_update(self, rule, context): domain = eval(rule.domain or '[]') export_fields = eval(rule.included_fields or '[]') if 'id' not in export_fields: export_fields.append('id') ids_need_to_push = self.need_to_push(cr, uid, [], [m.group(0) for m in map(re_fieldname.match, export_fields)], empty_ids=True, context=context) if not ids_need_to_push: return 0 domain.append(('id', 'in', ids_need_to_push)) ids_to_compute = self.search_ext(cr, uid, domain, context=context) if not ids_to_compute: return 0 owners = self.get_destination_name(cr, uid, ids_to_compute, rule.owner_field, context) min_offset = 0 max_offset = len(ids_to_compute) while min_offset < max_offset: offset = min_offset + 200 < max_offset and min_offset +200 or max_offset datas = self.export_data(cr, uid, ids_to_compute[min_offset:offset], export_fields, context=context)['datas'] sdrefs = self.get_sd_ref(cr, uid, ids_to_compute, field=['name','version','force_recreation','id'], context=context) ustr_export_fields = tools.ustr(export_fields) for (id, row) in zip(ids_to_compute[min_offset:offset], datas): sdref, version, force_recreation, data_id = sdrefs[id] for owner in (owners[id] if hasattr(owners[id], '__iter__') else [owners[id]]): update_id = update.create(cr, uid, { 'session_id' : session_id, 'rule_id' : rule.id, 'owner' : owner, 'model' : self._name, 'sdref' : sdref, 'version' : version + 1, 'force_recreation' : force_recreation, 'fields' : ustr_export_fields, 'values' : tools.ustr(row), 'handle_priority' : rule.handle_priority, }, context=context) update._logger.debug("Created 'normal' update model=%s id=%d (rule sequence=%d)" % (self._name, update_id, rule.id)) min_offset += 200 self.clear_synchronization(cr, uid, ids_to_compute, context=context) return len(ids_to_compute)
def _tag_report(self, cr, rec, data_node=None): res = {} for dest,f in (('name','string'),('model','model'),('report_name','name')): res[dest] = rec.get(f,'').encode('utf8') assert res[dest], "Attribute %s of report is empty !" % (f,) for field,dest in (('rml','report_rml'),('xml','report_xml'),('xsl','report_xsl'),('attachment','attachment'),('attachment_use','attachment_use')): if rec.get(field): res[dest] = rec.get(field).encode('utf8') if rec.get('auto'): res['auto'] = eval(rec.get('auto','False')) if rec.get('sxw'): sxw_content = misc.file_open(rec.get('sxw')).read() res['report_sxw_content'] = sxw_content if rec.get('header'): res['header'] = eval(rec.get('header','False')) if rec.get('report_type'): res['report_type'] = rec.get('report_type') res['multi'] = rec.get('multi') and eval(rec.get('multi','False')) xml_id = rec.get('id','').encode('utf8') self._test_xml_id(xml_id) if rec.get('groups'): g_names = rec.get('groups','').split(',') groups_value = [] for group in g_names: if group.startswith('-'): group_id = self.id_get(cr, 'res.groups', group[1:]) groups_value.append((3, group_id)) else: group_id = self.id_get(cr, 'res.groups', group) groups_value.append((4, group_id)) res['groups_id'] = groups_value id = self.pool.get('ir.model.data')._update(cr, self.uid, "ir.actions.report.xml", self.module, res, xml_id, noupdate=self.isnoupdate(data_node), mode=self.mode) self.idref[xml_id] = int(id) if not rec.get('menu') or eval(rec.get('menu','False')): keyword = str(rec.get('keyword', 'client_print_multi')) value = 'ir.actions.report.xml,'+str(id) replace = rec.get('replace', True) self.pool.get('ir.model.data').ir_set(cr, self.uid, 'action', keyword, res['name'], [res['model']], value, replace=replace, isobject=True, xml_id=xml_id) elif self.mode=='update' and eval(rec.get('menu','False'))==False: # Special check for report having attribute menu=False on update value = 'ir.actions.report.xml,'+str(id) self._remove_ir_values(cr, res['name'], value, res['model']) return False
def process_workflow(self, node): workflow, values = node.items()[0] if self.isnoupdate(workflow) and self.mode != 'init': return if workflow.ref: id = self.get_id(workflow.ref) else: if not values: raise YamlImportException('You must define a child node if you do not give a ref.') if not len(values) == 1: raise YamlImportException('Only one child node is accepted (%d given).' % len(values)) value = values[0] if not 'model' in value and (not 'eval' in value or not 'search' in value): raise YamlImportException('You must provide a "model" and an "eval" or "search" to evaluate.') value_model = self.get_model(value['model']) local_context = {'obj': lambda x: value_model.browse(self.cr, self.uid, x, context=self.context)} local_context.update(self.id_map) id = eval(value['eval'], self.eval_context, local_context) if workflow.uid is not None: uid = workflow.uid else: uid = self.uid self.cr.execute('select distinct signal from wkf_transition') signals=[x['signal'] for x in self.cr.dictfetchall()] if workflow.action not in signals: raise YamlImportException('Incorrect action %s. No such action defined' % workflow.action) wf_service = netsvc.LocalService("workflow") wf_service.trg_validate(uid, workflow.model, id, workflow.action, self.cr)
def _domain_force_get(self, cr, uid, ids, field_name, arg, context={}): res = {} for rule in self.browse(cr, uid, ids, context): eval_user_data = {'user': self.pool.get('res.users').browse(cr, 1, uid), 'time':time} res[rule.id] = eval(rule.domain_force, eval_user_data) return res
def action_launch(self, cr, uid, ids, context=None): """ Launch Action of Wizard""" wizard_id = ids and ids[0] or False wizard = self.browse(cr, uid, wizard_id, context=context) if wizard.type in ('automatic', 'once'): wizard.write({'state': 'done'}) # Load action act_type = self.pool.get('ir.actions.actions').read(cr, uid, wizard.action_id.id, ['type'], context=context) res = self.pool.get(act_type['type']).read(cr, uid, wizard.action_id.id, [], context=context) if act_type<>'ir.actions.act_window': return res res.setdefault('context','{}') res['nodestroy'] = True # Open a specific record when res_id is provided in the context user = self.pool.get('res.users').browse(cr, uid, uid, context=context) ctx = eval(res['context'], {'user': user}) if ctx.get('res_id'): res.update({'res_id': ctx.pop('res_id')}) # disable log for automatic wizards if wizard.type == 'automatic': ctx.update({'disable_log': True}) res.update({'context': ctx}) return res
def _company_default_get(self, cr, uid, object=False, field=False, context=None): """ Check if the object for this company have a default value """ if not context: context = {} proxy = self.pool.get('multi_company.default') args = [ ('object_id.model', '=', object), ('field_id', '=', field), ] ids = proxy.search(cr, uid, args, context=context) user = self.pool.get('res.users').browse(cr, SUPERUSER_ID, uid, context=context) for rule in proxy.browse(cr, uid, ids, context): if eval(rule.expression, {'context': context, 'user': user}): return rule.company_dest_id.id return user.company_id.id
def upgrade_graph(graph, cr, module_list, force=None): if force is None: force = [] packages = [] len_graph = len(graph) for module in module_list: mod_path = get_module_path(module) terp_file = get_module_resource(module, '__openerp__.py') if not terp_file or not os.path.isfile(terp_file): terp_file = get_module_resource(module, '__terp__.py') if not mod_path or not terp_file: logger.notifyChannel('init', netsvc.LOG_WARNING, 'module %s: not found, skipped' % (module)) continue if os.path.isfile(terp_file) or zipfile.is_zipfile(mod_path+'.zip'): try: info = eval(tools.file_open(terp_file).read()) except: logger.notifyChannel('init', netsvc.LOG_ERROR, 'module %s: eval file %s' % (module, terp_file)) raise if info.get('installable', True): packages.append((module, info.get('depends', []), info)) else: logger.notifyChannel('init', netsvc.LOG_WARNING, 'module %s: not installable, skipped' % (module)) dependencies = dict([(p, deps) for p, deps, data in packages]) current, later = set([p for p, dep, data in packages]), set() while packages and current > later: package, deps, data = packages[0] # if all dependencies of 'package' are already in the graph, add 'package' in the graph if reduce(lambda x, y: x and y in graph, deps, True): if not package in current: packages.pop(0) continue later.clear() current.remove(package) graph.addNode(package, deps) node = Node(package, graph) node.data = data for kind in ('init', 'demo', 'update'): if package in tools.config[kind] or 'all' in tools.config[kind] or kind in force: setattr(node, kind, True) else: later.add(package) packages.append((package, deps, data)) packages.pop(0) graph.update_from_db(cr) for package in later: unmet_deps = filter(lambda p: p not in graph, dependencies[package]) logger.notifyChannel('init', netsvc.LOG_ERROR, 'module %s: Unmet dependencies: %s' % (package, ', '.join(unmet_deps))) result = len(graph) - len_graph if result != len(module_list): logger.notifyChannel('init', netsvc.LOG_WARNING, 'Not all modules have loaded.') return result
def _create_birt_report(self, cr, uid, ids, data, report_birt, context=None): if len(ids) != 1: raise NotImplementedError("Report on multiple object not implemented") table_obj = pooler.get_pool(cr.dbname).get(self.table) objs = table_obj.browse(cr, uid, ids, list_class=None, context=context, fields_process=None) obj = objs[0] fields_def = obj._table.fields_get(cr, uid, None, context) report_file = report_birt.birt_report format = report_birt.birt_format local = dict((k, getattr(obj, k)) for k, v in fields_def.iteritems()) local.update(context) local['user'] = pooler.get_pool(cr.dbname) \ .get('res.users') \ .browse(cr, uid, context['uid'], context=context) params = dict((o['name'], o['value'] if not o['eval'] else eval(o['value'], globals(), local)) for o in report_birt.birt_report_params) birt_factory = birt.BirtConnection(report_birt.birt_url) return birt_factory(report_file, format, params)
def _edi_generate_report_attachment(self, cr, uid, record, context=None): """Utility method to generate the first PDF-type report declared for the current model with ``usage`` attribute set to ``default``. This must be called explicitly by models that need it, usually at the beginning of ``edi_export``, before the call to ``super()``.""" ir_actions_report = self.pool.get('ir.actions.report.xml') matching_reports = ir_actions_report.search(cr, uid, [('model','=',self._name), ('report_type','=','pdf'), ('usage','=','default')]) if matching_reports: report = ir_actions_report.browse(cr, uid, matching_reports[0]) report_service = 'report.' + report.report_name service = netsvc.LocalService(report_service) (result, format) = service.create(cr, uid, [record.id], {'model': self._name}, context=context) eval_context = {'time': time, 'object': record} if not report.attachment or not eval(report.attachment, eval_context): # no auto-saving of report as attachment, need to do it manually result = base64.b64encode(result) file_name = record.name_get()[0][1] file_name = re.sub(r'[^a-zA-Z0-9_-]', '_', file_name) file_name += ".pdf" ir_attachment = self.pool.get('ir.attachment').create(cr, uid, {'name': file_name, 'datas': result, 'datas_fname': file_name, 'res_model': self._name, 'res_id': record.id, 'type': 'binary'}, context=context)
def _process_text(self, txt): if not self.localcontext: return str2xml(txt) if not txt: return '' result = '' sps = _regex.split(txt) while sps: # This is a simple text to translate to_translate = tools.ustr(sps.pop(0)) result += tools.ustr(self.localcontext.get('translate', lambda x:x)(to_translate)) if sps: try: txt = None expr = sps.pop(0) txt = eval(expr, self.localcontext) if txt and isinstance(txt, basestring): txt = tools.ustr(txt) except Exception: pass if isinstance(txt, basestring): result += txt elif txt and (txt is not None) and (txt is not False): result += ustr(txt) return str2xml(result)
def _check_selection(self, cr, uid, selection, context=None): try: selection_list = eval(selection) except Exception: _logger.warning( 'Invalid selection list definition for fields.selection', exc_info=True) raise except_orm(_('Error'), _("The Selection Options expression is not a valid Pythonic expression." \ "Please provide an expression in the [('key','Label'), ...] format.")) check = True if not (isinstance(selection_list, list) and selection_list): check = False else: for item in selection_list: if not (isinstance(item, (tuple, list)) and len(item) == 2): check = False break if not check: raise except_orm( _('Error'), _("The Selection Options expression is must be in the [('key','Label'), ...] format!" )) return True
def action_launch(self, cr, uid, ids, context=None): """ Launch Action of Wizard""" wizard_id = ids and ids[0] or False wizard = self.browse(cr, uid, wizard_id, context=context) if wizard.type in ('automatic', 'once'): wizard.write({'state': 'done'}) # Load action res = self.pool.get('ir.actions.act_window').read(cr, uid, wizard.action_id.id, [], context=context) res.setdefault('context', '{}') res['nodestroy'] = True # Open a specific record when res_id is provided in the context user = self.pool.get('res.users').browse(cr, uid, uid, context=context) ctx = eval(res['context'], {'user': user}) if ctx.get('res_id'): res.update({'res_id': ctx.pop('res_id')}) # disable log for automatic wizards if wizard.type == 'automatic': ctx.update({'disable_log': True}) res.update({'context': ctx}) return res
def format(self, cr, uid, ids, percent, value, grouping=False, monetary=False, context=None): """ Format() will return the language-specific output for float values""" if percent[0] != '%': raise ValueError("format() must be given exactly one %char format specifier") lang_grouping, thousands_sep, decimal_point = self._lang_data_get(cr, uid, ids[0], monetary) eval_lang_grouping = eval(lang_grouping) formatted = percent % value # floats and decimal ints need special action! if percent[-1] in 'eEfFgG': seps = 0 parts = formatted.split('.') if grouping: parts[0], seps = intersperse(parts[0], eval_lang_grouping, thousands_sep) formatted = decimal_point.join(parts) while seps: sp = formatted.find(' ') if sp == -1: break formatted = formatted[:sp] + formatted[sp+1:] seps -= 1 elif percent[-1] in 'diu': if grouping: formatted = intersperse(formatted, eval_lang_grouping, thousands_sep)[0] return formatted
def _check_selection(self, cr, uid, selection, context=None): try: selection_list = eval(selection) except Exception: logging.getLogger("ir.model").warning( "Invalid selection list definition for fields.selection", exc_info=True ) raise except_orm( _("Error"), _( "The Selection Options expression is not a valid Pythonic expression." "Please provide an expression in the [('key','Label'), ...] format." ), ) check = True if not (isinstance(selection_list, list) and selection_list): check = False else: for item in selection_list: if not (isinstance(item, (tuple, list)) and len(item) == 2): check = False break if not check: raise except_orm( _("Error"), _("The Selection Options expression is must be in the [('key','Label'), ...] format!") ) return True
def hitung_ketentuan(self, cr, uid, id, localdict, context=None): obj_ketentuan_absensi = self.pool.get('hr.ketentuan_absensi') ketentuan_absensi = obj_ketentuan_absensi.browse(cr, uid, [id])[0] #try: eval(ketentuan_absensi.perhitungan_python, localdict, mode='exec', nocopy=True) val = { 'name' : ketentuan_absensi.name, 'kode' : ketentuan_absensi.kode, 'satuan_ukur_id' : ketentuan_absensi.satuan_ukur_id.id, 'hasil' : localdict['hasil'], } return val
def _process_text(self, txt): if not self.localcontext: return str2xml(txt) if not txt: return '' result = '' sps = _regex.split(txt) while sps: # This is a simple text to translate result += unicode( self.localcontext.get('translate', lambda x: x)(sps.pop(0))) if sps: try: expr = sps.pop(0) txt = eval(expr, self.localcontext) if txt and (isinstance(txt, unicode) or isinstance(txt, str)): txt = unicode(txt) except Exception, e: tb_s = reduce( lambda x, y: x + y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback)) if type(txt) == type('') or type(txt) == type(u''): txt2 = str2xml(txt) result += unicode(txt2) elif (txt is not None) and (txt is not False): result += unicode(txt)
def _edi_generate_report_attachment(self, cr, uid, record, context=None): """Utility method to generate the first PDF-type report declared for the current model with ``usage`` attribute set to ``default``. This must be called explicitly by models that need it, usually at the beginning of ``edi_export``, before the call to ``super()``.""" ir_actions_report = self.pool.get('ir.actions.report.xml') matching_reports = ir_actions_report.search( cr, uid, [('model', '=', self._name), ('report_type', '=', 'pdf'), ('usage', '=', 'default')]) if matching_reports: report = ir_actions_report.browse(cr, uid, matching_reports[0]) report_service = 'report.' + report.report_name service = netsvc.LocalService(report_service) (result, format) = service.create(cr, uid, [record.id], {'model': self._name}, context=context) eval_context = {'time': time, 'object': record} if not report.attachment or not eval(report.attachment, eval_context): # no auto-saving of report as attachment, need to do it manually result = base64.b64encode(result) file_name = record.name_get()[0][1] file_name = re.sub(r'[^a-zA-Z0-9_-]', '_', file_name) file_name += ".pdf" ir_attachment = self.pool.get('ir.attachment').create( cr, uid, { 'name': file_name, 'datas': result, 'datas_fname': file_name, 'res_model': self._name, 'res_id': record.id }, context=context)
def _process_text(self, txt): if not self.localcontext: return str2xml(txt) if not txt: return '' result = '' sps = _regex.split(txt) while sps: # This is a simple text to translate to_translate = tools.ustr(sps.pop(0)) result += tools.ustr( self.localcontext.get('translate', lambda x: x)(to_translate)) if sps: try: txt = None expr = sps.pop(0) txt = eval(expr, self.localcontext) if txt and isinstance(txt, basestring): txt = tools.ustr(txt) except Exception: pass if isinstance(txt, basestring): result += txt elif txt and (txt is not None) and (txt is not False): result += ustr(txt) return str2xml(result)
def _search_view(self, cr, uid, ids, name, arg, context=None): res = {} def encode(s): if isinstance(s, unicode): return s.encode('utf8') return s for act in self.browse(cr, uid, ids, context=context): context.update(eval(act.context, context)) fields_from_fields_get = self.pool.get(act.res_model).fields_get( cr, uid, context=context) search_view_id = False if act.search_view_id: search_view_id = act.search_view_id.id else: res_view = self.pool.get('ir.ui.view').search( cr, uid, [('model', '=', act.res_model), ('type', '=', 'search'), ('inherit_id', '=', False)], context=context) if res_view: search_view_id = res_view[0] if search_view_id: field_get = self.pool.get(act.res_model).fields_view_get( cr, uid, search_view_id, 'search', context) fields_from_fields_get.update(field_get['fields']) field_get['fields'] = fields_from_fields_get res[act.id] = str(field_get) else: def process_child(node, new_node, doc): for child in node.childNodes: if child.localName=='field' and child.hasAttribute('select') \ and child.getAttribute('select')=='1': if child.childNodes: fld = doc.createElement('field') for attr in child.attributes.keys(): fld.setAttribute(attr, child.getAttribute(attr)) new_node.appendChild(fld) else: new_node.appendChild(child) elif child.localName in ('page', 'group', 'notebook'): process_child(child, new_node, doc) form_arch = self.pool.get(act.res_model).fields_view_get( cr, uid, False, 'form', context) dom_arc = dom.minidom.parseString(encode(form_arch['arch'])) new_node = copy.deepcopy(dom_arc) for child_node in new_node.childNodes[0].childNodes: if child_node.nodeType == child_node.ELEMENT_NODE: new_node.childNodes[0].removeChild(child_node) process_child(dom_arc.childNodes[0], new_node.childNodes[0], dom_arc) form_arch['arch'] = new_node.toxml() form_arch['fields'].update(fields_from_fields_get) res[act.id] = str(form_arch) return res
def get_arg(self, args): res = [] for arg in eval(args): if isinstance(arg, dict): res.append(dict_to_obj(arg)) else: res.append(arg) return res
def get_context(self, node, eval_dict): context = self.context.copy() if node.context: if isinstance(node.context, dict): context.update(node.context) else: context.update(eval(node.context, eval_dict)) return context
def get_list_of_lines(self, cr, uid, ids, message, context=None): """ Return list of lists representing the values of each line of the report. """ if context is None: context = {} config_values = self.read(cr, uid, ids[0], ['code'], context) space_code = self.get_space_code(cr, uid, ids, context) try: expr_code = config_values['code'] eval(expr_code, space_code, mode='exec', nocopy=True) # nocopy allows to return 'result' except Exception, e: del(space_code['__builtins__']) message += """ Something went wrong when generating the report: %s. """ % (e,)
def merge(match): exp = str(match.group()[2:-1]).strip() result = eval(exp, { 'user': self.pool.get('res.users').browse(cr, uid, uid, context=context), 'object': self.pool.get(model).browse(cr, uid, res_id, context=context), 'context': dict(context), # copy context to prevent side-effects of eval }) return result and tools.ustr(result) or ''
def get_header(self, cr, uid, ids, message, context=None): """ Return one list of list representing the header of the report. """ if context is None: context = {} config_values = self.read(cr, uid, ids[0], ['columns_header'], context) space_header = self.get_space_header(cr, uid, ids, context) try: expr_header = config_values['columns_header'] eval(expr_header, space_header, mode='exec', nocopy=True) # nocopy allows to return 'result' except Exception, e: del(space_header['__builtins__']) message += """ Something went wrong when generating the report: %s """ % (e,)
def _get_assertion_id(self, assertion): if assertion.id: ids = [self.get_id(assertion.id)] elif assertion.search: q = eval(assertion.search, self.eval_context) ids = self.pool.get(assertion.model).search(self.cr, self.uid, q, context=assertion.context) else: raise YamlImportException('Nothing to assert: you must give either an id or a search criteria.') return ids
def merge(match): obj_pool = self.pool.get(action.model_id.model) id = context.get('active_id') obj = obj_pool.browse(cr, uid, id) exp = str(match.group()[2:-2]).strip() result = eval(exp, {'object':obj, 'context': context,'time':time}) if result in (None, False): return str("--------") return tools.ustr(result)
def _get_assertion_id(self, assertion): if assertion.id: ids = [self.get_id(assertion.id)] elif assertion.search: q = eval(assertion.search, self.eval_context) ids = self.pool.get(assertion.model).search(self.cr, self.uid, q, context=assertion.context) if not ids: raise YamlImportException('Nothing to assert: you must give either an id or a search criteria.') return ids
def _domain_force_get(self, cr, uid, ids, field_name, arg, context=None): res = {} eval_context = self._eval_context(cr, uid) for rule in self.browse(cr, uid, ids, context): if rule.domain_force: res[rule.id] = expression.normalize(eval(rule.domain_force, eval_context)) else: res[rule.id] = [] return res
def get_modules_with_version(): modules = get_modules() res = {} for module in modules: terp = get_module_resource(module, '__terp__.py') try: info = eval(tools.file_open(terp).read()) res[module] = "%s.%s" % (release.major_version, info['version']) except Exception, e: continue
def _domain_force_get(self, cr, uid, ids, field_name, arg, context=None): res = {} eval_context = self._eval_context(cr, uid) for rule in self.browse(cr, uid, ids, context): if rule.domain_force: res[rule.id] = expression.normalize( eval(rule.domain_force, eval_context)) else: res[rule.id] = [] return res
def build_tree(obj, level, depth): res = self._row_get(cr, uid,[obj], new_fields, new_cond) level.append(depth) new_obj = eval('obj.'+report['field_parent'][1]) if not isinstance(new_obj, list) : new_obj = [new_obj] for o in new_obj: if not isinstance(o, browse_null): res += build_tree(o, level, depth+1) return res
def build_tree(obj, level, depth): res = self._row_get(cr, uid, [obj], new_fields, new_cond) level.append(depth) new_obj = eval('obj.' + report['field_parent'][1]) if not isinstance(new_obj, list): new_obj = [new_obj] for o in new_obj: if not isinstance(o, browse_null): res += build_tree(o, level, depth + 1) return res
def process_delete(self, node): assert getattr(node, 'model'), "Attribute %s of delete tag is empty !" % ('model',) if self.pool.get(node.model): if len(node.search): ids = self.pool.get(node.model).search(self.cr, self.uid, eval(node.search, self.eval_context)) else: ids = [self.get_id(node.id)] if len(ids): self.pool.get(node.model).unlink(self.cr, self.uid, ids) self.pool.get('ir.model.data')._unlink(self.cr, 1, node.model, ids) else: self.logger.log(logging.TEST, "Record not deleted.")
def __domain_calc(self, rule_dic, eval_data): """ Calculate the domain expression for some rule. @rule_dic is a dictionary with rule{'domain_force', 'operand', 'operator', 'field_name'} @eval_data a dictionary with context for eval() """ if not rule_dic['domain_force']: return [] res = eval(rule_dic['domain_force'], eval_data) if self._debug: logging.getLogger('orm').debug("Domain calc: %s " % res) return res
def _parse_node(self, root_node): result = [] for node in root_node: field_name = node.get('name') if not eval(str(node.attrib.get('invisible',False)),{'context':self.context}): if node.tag == 'field': if field_name in self.groupby: continue result.append(field_name) else: result.extend(self._parse_node(node)) return result
def _tag_url(self, cr, rec, data_node=None): url = rec.get("string",'').encode('utf8') target = rec.get("target",'').encode('utf8') name = rec.get("name",'').encode('utf8') xml_id = rec.get('id','').encode('utf8') self._test_xml_id(xml_id) res = {'name': name, 'url': url, 'target':target} id = self.pool.get('ir.model.data')._update(cr, self.uid, "ir.actions.url", self.module, res, xml_id, noupdate=self.isnoupdate(data_node), mode=self.mode) self.idref[xml_id] = int(id) # ir_set if (not rec.get('menu') or eval(rec.get('menu','False'))) and id: keyword = str(rec.get('keyword','') or 'client_action_multi') value = 'ir.actions.url,'+str(id) replace = rec.get("replace",'') or True self.pool.get('ir.model.data').ir_set(cr, self.uid, 'action', keyword, url, ["ir.actions.url"], value, replace=replace, isobject=True, xml_id=xml_id) elif self.mode=='update' and (rec.get('menu') and eval(rec.get('menu','False'))==False): # Special check for URL having attribute menu=False on update value = 'ir.actions.url,'+str(id) self._remove_ir_values(cr, url, value, "ir.actions.url")
def _domain_force_get(self, cr, uid, ids, field_name, arg, context=None): res = {} for rule in self.browse(cr, uid, ids, context): if rule.domain_force: eval_user_data = { 'user': self.pool.get('res.users').browse(cr, 1, uid), 'time': time } res[rule.id] = eval(rule.domain_force, eval_user_data) else: res[rule.id] = [] return res
def process_delete(self, node): assert getattr(node, 'model'), "Attribute %s of delete tag is empty !" % ('model',) if self.pool.get(node.model): if len(node.search): ids = self.pool.get(node.model).search(self.cr, self.uid, eval(node.search, self.eval_context)) else: ids = [self.get_id(node.id)] if len(ids): self.pool.get(node.model).unlink(self.cr, self.uid, ids) self.pool.get('ir.model.data')._unlink(self.cr, self.uid, node.model, ids) else: self.logger.log(logging.TEST, "Record not deleted.")
def _child_get(node, self=None, tagname=None): for n in node: if self and self.localcontext and n.get('rml_loop', False): oldctx = self.localcontext for ctx in eval(n.get('rml_loop'), {}, self.localcontext): self.localcontext.update(ctx) if (tagname is None) or (n.tag == tagname): if n.get('rml_except', False): try: eval(n.get('rml_except'), {}, self.localcontext) except: continue if n.get('rml_tag'): try: (tag, attr) = eval(n.get('rml_tag'), {}, self.localcontext) n2 = copy.copy(n) n2.tag = tag n2.attrib.update(attr) yield n2 except: yield n else: yield n self.localcontext = oldctx continue if self and self.localcontext and n.get('rml_except', False): try: eval(n.get('rml_except'), {}, self.localcontext) except: continue if (tagname is None) or (n.tag == tagname): yield n
def merge(match): obj_pool = self.pool.get(action.model_id.model) id = context.get('active_id') obj = obj_pool.browse(cr, uid, id) exp = str(match.group()[2:-2]).strip() result = eval(exp, { 'object': obj, 'context': context, 'time': time }) if result in (None, False): return str("--------") return tools.ustr(result)
def load_information_from_description_file(module): """ :param module: The name of the module (sale, purchase, ...) """ for filename in ['__openerp__.py', '__terp__.py']: description_file = get_module_resource(module, filename) if os.path.isfile(description_file): return eval(tools.file_open(description_file).read()) #TODO: refactor the logger in this file to follow the logging guidelines # for 6.0 logging.getLogger('addons').debug('The module %s does not contain a description file:'\ '__openerp__.py or __terp__.py (deprecated)', module) return {}
def process_ir_set(self, node): if not self.mode == 'init': return False _, fields = node.items()[0] res = {} for fieldname, expression in fields.items(): if is_eval(expression): value = eval(expression.expression, self.eval_context) else: value = expression res[fieldname] = value self.pool.get('ir.model.data').ir_set(self.cr, 1, res['key'], res['key2'], \ res['name'], res['models'], res['value'], replace=res.get('replace',True), \ isobject=res.get('isobject', False), meta=res.get('meta',None))