def get_needaction_data(self, cr, uid, ids, context=None): """ Return for each menu entry of ids : - if it uses the needaction mechanism (needaction_enabled) - the needaction counter of the related action, taking into account the action domain """ if context is None: context = {} res = {} menu_ids = set() for menu in self.browse(cr, uid, ids, context=context): menu_ids.add(menu.id) ctx = None if menu.action and menu.action.type in ('ir.actions.act_window', 'ir.actions.client') and menu.action.context: try: # use magical UnquoteEvalContext to ignore undefined client-side variables such as `active_id` eval_ctx = tools.UnquoteEvalContext(**context) ctx = eval(menu.action.context, locals_dict=eval_ctx, nocopy=True) or None except Exception: # if the eval still fails for some reason, we'll simply skip this menu pass menu_ref = ctx and ctx.get('needaction_menu_ref') if menu_ref: if not isinstance(menu_ref, list): menu_ref = [menu_ref] model_data_obj = self.pool.get('ir.model.data') for menu_data in menu_ref: try: model, id = model_data_obj.get_object_reference(cr, uid, menu_data.split('.')[0], menu_data.split('.')[1]) if (model == 'ir.ui.menu'): menu_ids.add(id) except Exception: pass menu_ids = list(menu_ids) for menu in self.browse(cr, uid, menu_ids, context=context): res[menu.id] = { 'needaction_enabled': False, 'needaction_counter': False, } if menu.action and menu.action.type in ('ir.actions.act_window', 'ir.actions.client') and menu.action.res_model: if menu.action.res_model in self.pool: obj = self.pool[menu.action.res_model] if obj._needaction: if menu.action.type == 'ir.actions.act_window': dom = menu.action.domain and eval(menu.action.domain, {'uid': uid}) or [] else: dom = eval(menu.action.params_store or '{}', {'uid': uid}).get('domain') res[menu.id]['needaction_enabled'] = obj._needaction ctx = context if menu.action.context: try: # use magical UnquoteEvalContext to ignore undefined client-side variables such as `active_id` eval_ctx = tools.UnquoteEvalContext(**context) ctx = eval(menu.action.context, locals_dict=eval_ctx, nocopy=True) or None except Exception: pass res[menu.id]['needaction_counter'] = obj._needaction_count(cr, uid, dom, context=ctx) return res
def process(self, msg): match_context = { 'self': self, 'msg': msg, 'result': False, 'context': {}, 're': re, } if self.match: eval(self.match, match_context, mode="exec", nocopy=True) if match_context['result']: reply_context = {} action_context = { 'self': self, 'msg': msg, 'reply': reply_context, 'context': match_context['context'], 'template': self.template } eval(self.action, action_context, mode="exec", nocopy=True) # build reply if self.reply_type != 'news': reply_context['message'] = msg reply = replies.REPLY_TYPES[self.reply_type](**reply_context) else: reply = replies.ArticlesReply(message=msg) for article in reply_context['articles']: reply.add_article(article) return True, reply else: return False, None
def calc_privilege(self, cr, uid, ids, method, context): if isinstance(ids, (int, long)): ids = [ids] access = self.browse(cr, 1, ids[0], context) # If method need eval if method in ['perm_write', 'is_downloadable']: if access[method] is False: return False if access.code and access.code.strip(): ctx = { 'self': self, 'object': access, 'obj': access, 'pool': self.pool, 'time': time, 'cr': cr, 'context': dict(context), # copy context to prevent side-effects of eval 'uid': uid, 'user': self.pool.get('res.users').browse(cr, uid, uid), 'result': None, } ctx.update(context.get('ctx', {})) eval(access.code.strip(), ctx, mode="exec", nocopy=True) # nocopy allows to return 'action' return True if ctx.get('result', None) else False else: return True else: return access[method]
def _action_email(self,cr,uid,user,action,cxt,context=None): email_from = config['email_from'] if not email_from: _logger.debug('--email-from command line option is not specified, using a fallback value instead.') if user.email: email_from = user.email else: email_from = "%s@%s" % (user.login, gethostname()) try: address = eval(str(action.email), cxt) except Exception: address = str(action.email) try: address_cc = eval(str(action.email_cc), cxt) except Exception: address_cc = str(action.email_cc) try: address_bcc = eval(str(action.email_bcc), cxt) except Exception: address_bcc = str(action.email_bcc) try: address_reply_to = eval(str(action.email_reply_to), cxt) except Exception: address_reply_to = str(action.email_reply_to) if not address: _logger.info('No to email address specified, not sending any email.') return # handle single and multiple recipient addresses addresses = address if isinstance(address, (tuple, list)) else [address] address_cces = None if address_cc: address_cces = address_cc if isinstance(address_cc, (tuple, list)) else [address_cc] addresses_bcces = None if address_bcc: address_bcc if isinstance(address_bcc, (tuple, list)) else [address_bcc] address_reply_toes = None if address_reply_to: address_reply_toes = address_reply_to if address_reply_to else None email_subtype = action.email_subtype or 'plain'; subject = self.merge_message(cr, uid, action.subject, action, context) body = self.merge_message(cr, uid, action.message, action, context) ir_mail_server = self.pool.get('ir.mail_server') msg = ir_mail_server.build_email(email_from, addresses, subject, body, email_cc = address_cces, email_bcc = addresses_bcces, reply_to = address_reply_toes, subtype = email_subtype) res_email = ir_mail_server.send_email(cr, uid, msg) if res_email: _logger.info('Email successfully sent to: %s', addresses) else: _logger.warning('Failed to send email to: %s', addresses)
def _domain_force_get(self, cr, uid, ids, field_name, arg, context=None): res = {} eval_context = self._eval_context(cr, uid) for rule in self.browse(cr, uid, ids, context): tmp_domain=str(rule.domain_force) if tmp_domain.find("%s")>0: tmp_domain=eval(rule.domain_force) str_query=str(tmp_domain[0][2]) cr.execute(str_query % uid) tmp_ids=cr.fetchall() list_ids=[] for i in tmp_ids: list_ids.append(i[0]) res[rule.id] = eval("[('"+tmp_domain[0][0]+"','"+tmp_domain[0][1]+"',"+str(list_ids)+")]") elif tmp_domain.find('.sql.query')>0: tmp_domain=eval(rule.domain_force) str_query=str(tmp_domain[0][2]) str_query=str_query.replace('.sql.query','') cr.execute(str_query) tmp_ids=cr.fetchall() list_ids=[] for i in tmp_ids: list_ids.append(i[0]) res[rule.id] = eval("[('"+tmp_domain[0][0]+"','"+tmp_domain[0][1]+"',"+str(list_ids)+")]") elif rule.domain_force: res[rule.id] = expression.normalize_domain(eval(rule.domain_force, eval_context)) else: res[rule.id] = [] return res
def compute_rule(self, cr, uid, rule_id, localdict, context=None): """ :param rule_id: id of rule to compute :param localdict: dictionary containing the environement in which to compute the rule :return: returns a tuple build as the base/amount computed, the quantity and the rate :rtype: (float, float, float) """ rule = self.browse(cr, uid, rule_id, context=context) if rule.amount_select == 'fix': try: return rule.amount_fix, float(eval(rule.quantity, localdict)), 100.0 except: raise UserError(_('Wrong quantity defined for salary rule %s (%s).') % (rule.name, rule.code)) elif rule.amount_select == 'percentage': try: return (float(eval(rule.amount_percentage_base, localdict)), float(eval(rule.quantity, localdict)), rule.amount_percentage) except: raise UserError(_('Wrong percentage base or quantity defined for salary rule %s (%s).') % (rule.name, rule.code)) else: try: eval(rule.amount_python_compute, localdict, mode='exec', nocopy=True) return float(localdict['result']), 'result_qty' in localdict and localdict['result_qty'] or 1.0, 'result_rate' in localdict and localdict['result_rate'] or 100.0 except: raise UserError(_('Wrong python code defined for salary rule %s (%s).') % (rule.name, rule.code))
def get_google_drive_config(self, cr, uid, res_model, res_id, context=None): ''' Function called by the js, when no google doc are yet associated with a record, with the aim to create one. It will first seek for a google.docs.config associated with the model `res_model` to find out what's the template of google doc to copy (this is usefull if you want to start with a non-empty document, a type or a name different than the default values). If no config is associated with the `res_model`, then a blank text document with a default name is created. :param res_model: the object for which the google doc is created :param ids: the list of ids of the objects for which the google doc is created. This list is supposed to have a length of 1 element only (batch processing is not supported in the code, though nothing really prevent it) :return: the config id and config name ''' if not res_id: raise UserError(_("Creating google drive may only be done by one at a time.")) # check if a model is configured with a template config_ids = self.search(cr, uid, [('model_id', '=', res_model)], context=context) configs = [] for config in self.browse(cr, uid, config_ids, context=context): if config.filter_id: if (config.filter_id.user_id and config.filter_id.user_id.id != uid): #Private continue domain = [('id', 'in', [res_id])] + eval(config.filter_id.domain) local_context = context and context.copy() or {} local_context.update(eval(config.filter_id.context)) google_doc_configs = self.pool.get(config.filter_id.model_id).search(cr, uid, domain, context=local_context) if google_doc_configs: configs.append({'id': config.id, 'name': config.name}) else: configs.append({'id': config.id, 'name': config.name}) return configs
def _child_get(node, self=None, tagname=None): for n in node: if self and self.localcontext and n.get('rml_loop'): for ctx in eval(n.get('rml_loop'),{}, self.localcontext): self.localcontext.update(ctx) if (tagname is None) or (n.tag==tagname): if n.get('rml_except', False): try: eval(n.get('rml_except'), {}, self.localcontext) except GeneratorExit: continue except Exception, e: _logger.warning('rml_except: "%s"', n.get('rml_except',''), exc_info=True) continue if n.get('rml_tag'): try: (tag,attr) = eval(n.get('rml_tag'),{}, self.localcontext) n2 = copy.deepcopy(n) n2.tag = tag n2.attrib.update(attr) yield n2 except GeneratorExit: yield n except Exception, e: _logger.warning('rml_tag: "%s"', n.get('rml_tag',''), exc_info=True) yield n else: yield n continue
def run_expressions(self, cr, uid, ids, rec_id, source_connection=False, target_connection=False, context=None): if context is None: context = {} user = self.pool.get('res.users').browse(cr, uid, uid) result = [] for field_mapping in self.browse(cr, uid, ids, context=context): expression_result = False if not source_connection or not target_connection: (source_connection, target_connection) = self.pool.get('etl.manager').open_connections(cr, uid, [field_mapping.action_id.manager_id.id], context=context) source_model_obj = source_connection.get_model(field_mapping.action_id.source_model_id.model) target_model_obj = target_connection.get_model(field_mapping.action_id.target_model_id.model) obj_pool = source_model_obj cxt = { 'self': obj_pool, #to be replaced by target_obj 'source_obj': source_model_obj, 'source_connection': source_connection, 'target_obj': target_model_obj, 'target_connection': target_connection, 'rec_id': rec_id, 'pool': self.pool, 'time': time, 'cr': cr, 'context': dict(context), # copy context to prevent side-effects of eval 'uid': uid, 'user': user, } if not field_mapping.expression: raise osv.except_osv(_('Warning!'),_('Type expression choosen buy not expression set')) eval(field_mapping.expression.strip(), cxt, mode="exec") # nocopy allows to return 'action' if 'result' in cxt['context']: expression_result = cxt['context'].get('result') result.append(expression_result) return result
def _row_get(self, cr, uid, objs, fields, conditions, row_canvas=None, group_by=None): result = [] tmp = [] for obj in objs: tobreak = False for cond in conditions: if cond and cond[0]: c = cond[0] temp = c[0](eval('obj.'+c[1],{'obj': obj})) if not eval('\''+temp+'\''+' '+c[2]+' '+'\''+str(c[3])+'\''): tobreak = True if tobreak: break levels = {} row = [] for i in range(len(fields)): if not fields[i]: row.append(row_canvas and row_canvas[i]) if row_canvas[i]: row_canvas[i]=False elif len(fields[i])==1: if not isinstance(obj, browse_null): row.append(str(eval('obj.'+fields[i][0],{'obj': obj}))) else: row.append(None) else: row.append(None) levels[fields[i][0]]=True if not levels: result.append(row) else: # Process group_by data first key = [] if group_by != None and fields[group_by] != None: if fields[group_by][0] in levels.keys(): key.append(fields[group_by][0]) for l in levels.keys(): if l != fields[group_by][0]: key.append(l) else: key = levels.keys() for l in key: objs = eval('obj.'+l,{'obj': obj}) if not isinstance(objs, browse_record_list) and type(objs) <> type([]): objs = [objs] field_new = [] cond_new = [] for f in range(len(fields)): if (fields[f] and fields[f][0])==l: field_new.append(fields[f][1:]) cond_new.append(conditions[f][1:]) else: field_new.append(None) cond_new.append(None) if len(objs): result += self._row_get(cr, uid, objs, field_new, cond_new, row, group_by) else: result.append(row) return result
def run_action_code(self, values): try: eval_context = self._get_eval_context(values) eval(self.code.strip(), eval_context, mode="exec", nocopy=True) # nocopy allows to return 'result' if 'result' in eval_context: return eval_context['result'] except SyntaxError: _logger.error('code %s values %s' %(self.code.strip(), self._get_eval_context(values)))
def _row_get(self, cr, uid, objs, fields, conditions, row_canvas=None, group_by=None): result = [] for obj in objs: tobreak = False for cond in conditions: if cond and cond[0]: c = cond[0] temp = c[0](eval("obj." + c[1], {"obj": obj})) if not eval("'" + temp + "'" + " " + c[2] + " " + "'" + str(c[3]) + "'"): tobreak = True if tobreak: break levels = {} row = [] for i in range(len(fields)): if not fields[i]: row.append(row_canvas and row_canvas[i]) if row_canvas[i]: row_canvas[i] = False elif len(fields[i]) == 1: if obj: row.append(str(eval("obj." + fields[i][0], {"obj": obj}))) else: row.append(None) else: row.append(None) levels[fields[i][0]] = True if not levels: result.append(row) else: # Process group_by data first key = [] if group_by is not None and fields[group_by] is not None: if fields[group_by][0] in levels.keys(): key.append(fields[group_by][0]) for l in levels.keys(): if l != fields[group_by][0]: key.append(l) else: key = levels.keys() for l in key: objs = eval("obj." + l, {"obj": obj}) if not isinstance(objs, (BaseModel, list)): objs = [objs] field_new = [] cond_new = [] for f in range(len(fields)): if (fields[f] and fields[f][0]) == l: field_new.append(fields[f][1:]) cond_new.append(conditions[f][1:]) else: field_new.append(None) cond_new.append(None) if len(objs): result += self._row_get(cr, uid, objs, field_new, cond_new, row, group_by) else: result.append(row) return result
def add_terms_and_conditions(self, ids, original_report_pdf, original_report): terms_and_conditions_decoded = False default_terms_and_conditions_decoded = False user = self.env['res.users'].browse(self._uid) # todo change user language to report language (client language) language_field = original_report.terms_conditions_language_field model = original_report.model object = self.env[model].browse(ids) localdict = {'o': object} eval('document_language = o.%s' % language_field, localdict, mode="exec", nocopy=True) document_language = localdict.get('document_language', self._context.get('lang')) company = object.company_id # todo check language terms_and_conditions_list = company.terms_and_conditions for terms_and_conditions in terms_and_conditions_list: if terms_and_conditions.language == document_language: terms_and_conditions_decoded =\ base64.decodestring(terms_and_conditions.datas) if terms_and_conditions.language == 'default': default_terms_and_conditions_decoded = \ base64.decodestring(terms_and_conditions.datas) if not terms_and_conditions_decoded: terms_and_conditions_decoded = \ default_terms_and_conditions_decoded or False if terms_and_conditions_decoded: writer = PdfFileWriter() stream_original_report = StringIO(original_report_pdf) reader_original_report = PdfFileReader(stream_original_report) stream_terms_and_conditions = StringIO(terms_and_conditions_decoded) reader_terms_and_conditions = PdfFileReader( stream_terms_and_conditions) for page in range(0, reader_original_report.getNumPages()): writer.addPage(reader_original_report.getPage(page)) for page in range(0, reader_terms_and_conditions.getNumPages()): writer.addPage(reader_terms_and_conditions.getPage(page)) stream_to_write = StringIO() writer.write(stream_to_write) combined_pdf = stream_to_write.getvalue() return combined_pdf else: return original_report_pdf
def _check(self, cr, uid, automatic=False, use_new_cursor=False, context=None): """ This Function is called by scheduler. """ context = context or {} if '__action_done' not in context: context = dict(context, __action_done={}) # retrieve all the action rules to run based on a timed condition action_dom = [('kind', '=', 'on_time')] action_ids = self.search(cr, uid, action_dom, context=dict(context, active_test=True)) eval_context = self._get_eval_context(cr, uid, context=context) for action in self.browse(cr, uid, action_ids, context=context): now = datetime.now() if action.last_run: last_run = get_datetime(action.last_run) else: last_run = datetime.utcfromtimestamp(0) # retrieve all the records that satisfy the action's condition model = self.pool[action.model_id.model] domain = [] ctx = dict(context) if action.filter_domain is not False: domain = eval(action.filter_domain, eval_context) elif action.filter_id: domain = eval(action.filter_id.domain, eval_context) ctx.update(eval(action.filter_id.context)) if 'lang' not in ctx: # Filters might be language-sensitive, attempt to reuse creator lang # as we are usually running this as super-user in background [filter_meta] = action.filter_id.get_metadata() user_id = filter_meta['write_uid'] and filter_meta['write_uid'][0] or \ filter_meta['create_uid'][0] ctx['lang'] = self.pool['res.users'].browse(cr, uid, user_id).lang record_ids = model.search(cr, uid, domain, context=ctx) # determine when action should occur for the records date_field = action.trg_date_id.name if date_field == 'date_action_last' and 'create_date' in model._fields: get_record_dt = lambda record: record[date_field] or record.create_date else: get_record_dt = lambda record: record[date_field] # process action on the records that should be executed for record in model.browse(cr, uid, record_ids, context=context): record_dt = get_record_dt(record) if not record_dt: continue action_dt = self._check_delay(cr, uid, action, record, record_dt, context=context) if last_run <= action_dt < now: try: action._process(record) except Exception: import traceback _logger.error(traceback.format_exc()) action.write({'last_run': now.strftime(DEFAULT_SERVER_DATETIME_FORMAT)}) if automatic: # auto-commit for batch processing cr.commit()
def _check(self, cr, uid, automatic=False, use_new_cursor=False, context=None): """ This Function is called by scheduler. """ context = context or {} # retrieve all the action rules that have a trg_date_id and no precondition action_dom = [('trg_date_id', '!=', False), ('filter_pre_id', '=', False)] action_ids = self.search(cr, uid, action_dom, context=context) for action in self.browse(cr, uid, action_ids, context=context): now = datetime.now() if action.last_run: last_run = get_datetime(action.last_run) else: last_run = datetime.utcfromtimestamp(0) # retrieve all the records that satisfy the action's condition model = self.pool.get(action.model_id.model) domain = [] ctx = dict(context) if action.filter_id: domain = eval(action.filter_id.domain) ctx.update(eval(action.filter_id.context)) if 'lang' not in ctx: # Filters might be language-sensitive, attempt to reuse creator lang # as we are usually running this as super-user in background [filter_meta] = action.filter_id.perm_read() user_id = filter_meta['write_uid'] and filter_meta['write_uid'][0] or \ filter_meta['create_uid'][0] ctx['lang'] = self.pool['res.users'].browse(cr, uid, user_id).lang record_ids = model.search(cr, uid, domain, context=ctx) # determine when action should occur for the records date_field = action.trg_date_id.name if date_field == 'date_action_last' and 'create_date' in model._all_columns: get_record_dt = lambda record: record[date_field] or record.create_date else: get_record_dt = lambda record: record[date_field] delay = DATE_RANGE_FUNCTION[action.trg_date_range_type](action.trg_date_range) # process action on the records that should be executed for record in model.browse(cr, uid, record_ids, context=context): record_dt = get_record_dt(record) if not record_dt: continue action_dt = get_datetime(record_dt) + delay if last_run <= action_dt < now: try: context = dict(context or {}, action=True) self._process(cr, uid, action, [record.id], context=context) except Exception: import traceback _logger.error(traceback.format_exc()) action.write({'last_run': now.strftime(DEFAULT_SERVER_DATETIME_FORMAT)}) if automatic: # auto-commit for batch processing cr.commit()
def _filter(self, cr, uid, action, action_filter, record_ids, context=None): """ filter the list record_ids that satisfy the action filter """ if record_ids and action_filter: assert action.model == action_filter.model_id, "Filter model different from action rule model" model = self.pool.get(action_filter.model_id) domain = [('id', 'in', record_ids)] + eval(action_filter.domain) ctx = dict(context or {}) ctx.update(eval(action_filter.context)) record_ids = model.search(cr, uid, domain, context=ctx) return record_ids
def run(self, image, **kwargs): # return a image with specified recipe kwargs.update({p.name: p.value for p in self.param_ids}) #get parameters from recipe #TODO: Remove time import once caching is working import time kwargs.update({ 'time': time, 'Image': Image, 'image': image, '_logger': _logger, 'user': self.env['res.users'].browse(self._uid), }) eval(self.recipe, kwargs, mode='exec', nocopy=True) return image
def _filter_post(self, records): """ Filter the records that satisfy the postcondition of action ``self``. """ if self.filter_id and records: eval_context = self._get_eval_context() domain = [('id', 'in', records.ids)] + eval(self.filter_id.domain, eval_context) ctx = eval(self.filter_id.context) return records.with_context(**ctx).search(domain).with_env(records.env) elif self.filter_domain and records: eval_context = self._get_eval_context() domain = [('id', 'in', records.ids)] + eval(self.filter_domain, eval_context) return records.search(domain) else: return records
def _filter(self, cr, uid, action, action_filter, record_ids, domain=False, context=None): """ Filter the list record_ids that satisfy the domain or the action filter. """ if record_ids and (domain is not False or action_filter): if domain is not False: new_domain = [('id', 'in', record_ids)] + eval(domain) ctx = context elif action_filter: assert action.model == action_filter.model_id, "Filter model different from action rule model" new_domain = [('id', 'in', record_ids)] + eval(action_filter.domain) ctx = dict(context or {}) ctx.update(eval(action_filter.context)) record_ids = self.pool[action.model].search(cr, uid, new_domain, context=ctx) return record_ids
def _post_import_operation(self, record, operations): """ Run python code after import """ if not record or not operations: return try: if not isinstance(operations, list): operations = [operations] for operation in operations: if '${' in operation: code = (operation.split('${'))[1].split('}')[0] eval_context = {'object': record} eval(code, eval_context) except Exception, e: raise except_orm(_('Post import operation error!'), e)
def _check_alias_defaults(self, cr, uid, ids, context=None): try: for record in self.browse(cr, uid, ids, context=context): dict(eval(record.alias_defaults)) except Exception: return False return True
def _search_product_quantity(self, cr, uid, obj, name, domain, context): res = [] for field, operator, value in domain: # to prevent sql injections assert field in ( "qty_available", "virtual_available", "incoming_qty", "outgoing_qty", ), "Invalid domain left operand" assert operator in ("<", ">", "=", "!=", "<=", ">="), "Invalid domain operator" assert isinstance(value, (float, int)), "Invalid domain right operand" if operator == "=": operator = "==" ids = [] if name == "qty_available" and (value != 0.0 or operator not in ("==", ">=", "<=")): res.append(("id", "in", self._search_qty_available(cr, uid, operator, value, context))) else: product_ids = self.search(cr, uid, [], context=context) if product_ids: # TODO: Still optimization possible when searching virtual quantities for element in self.browse(cr, uid, product_ids, context=context): if eval(str(element[field]) + operator + str(value)): ids.append(element.id) res.append(("id", "in", ids)) return res
def _process_text(self, txt): """Translate ``txt`` according to the language in the local context, replace dynamic ``[[expr]]`` with their real value, then escape the result for XML. :param str txt: original text to translate (must NOT be XML-escaped) :return: translated text, with dynamic expressions evaluated and with special XML characters escaped (``&,<,>``). """ if not self.localcontext: return str2xml(txt) if not txt: return '' result = '' sps = _regex.split(txt) while sps: # This is a simple text to translate to_translate = tools.ustr(sps.pop(0)) result += tools.ustr(self.localcontext.get('translate', lambda x:x)(to_translate)) if sps: txt = None try: expr = sps.pop(0) txt = eval(expr, self.localcontext) if txt and isinstance(txt, basestring): txt = tools.ustr(txt) except Exception: _logger.error("Failed to evaluate expression [[ %s ]] with context %r while rendering report, ignored.", expr, self.localcontext) if isinstance(txt, basestring): result += txt elif txt and (txt is not None) and (txt is not False): result += ustr(txt) return str2xml(result)
def _edi_generate_report_attachment(self, cr, uid, record, context=None): """Utility method to generate the first PDF-type report declared for the current model with ``usage`` attribute set to ``default``. This must be called explicitly by models that need it, usually at the beginning of ``edi_export``, before the call to ``super()``.""" ir_actions_report = self.pool.get('ir.actions.report.xml') matching_reports = ir_actions_report.search(cr, uid, [('model','=',self._name), ('report_type','=','pdf'), ('usage','=','default')]) if matching_reports: report = ir_actions_report.browse(cr, uid, matching_reports[0]) result, format = openerp.report.render_report(cr, uid, [record.id], report.report_name, {'model': self._name}, context=context) eval_context = {'time': time, 'object': record} if not report.attachment or not eval(report.attachment, eval_context): # no auto-saving of report as attachment, need to do it manually result = base64.b64encode(result) file_name = record.name_get()[0][1] file_name = re.sub(r'[^a-zA-Z0-9_-]', '_', file_name) file_name += ".pdf" self.pool.get('ir.attachment').create(cr, uid, { 'name': file_name, 'datas': result, 'datas_fname': file_name, 'res_model': self._name, 'res_id': record.id, 'type': 'binary' }, context=context)
def _check_attachment_use(self, cr, uid, ids, report): """ Check attachment_use field. If set to true and an existing pdf is already saved, load this one now. Else, mark save it. """ save_in_attachment = {} if report.attachment_use is True: save_in_attachment['model'] = report.model save_in_attachment['loaded_documents'] = {} for record_id in ids: obj = self.pool[report.model].browse(cr, uid, record_id) filename = eval(report.attachment, {'object': obj, 'time': time}) if filename is False: # May be false if, for instance, the record is in draft state continue else: alreadyindb = [('datas_fname', '=', filename), ('res_model', '=', report.model), ('res_id', '=', record_id)] attach_ids = self.pool['ir.attachment'].search(cr, uid, alreadyindb) if attach_ids: # Add the loaded pdf in the loaded_documents list pdf = self.pool['ir.attachment'].browse(cr, uid, attach_ids[0]).datas pdf = base64.decodestring(pdf) save_in_attachment['loaded_documents'][record_id] = pdf _logger.info('The PDF document %s was loaded from the database' % filename) else: # Mark current document to be saved save_in_attachment[record_id] = filename return save_in_attachment
def _search_product_quantity(self, cr, uid, obj, name, domain, context): prod = self.pool.get("product.product") res = [] for field, operator, value in domain: # to prevent sql injections assert field in ( "qty_available", "virtual_available", "incoming_qty", "outgoing_qty", ), "Invalid domain left operand" assert operator in ("<", ">", "=", "<=", ">="), "Invalid domain operator" assert isinstance(value, (float, int)), "Invalid domain right operand" if operator == "=": operator = "==" product_ids = prod.search(cr, uid, [], context=context) ids = [] if product_ids: # TODO: use a query instead of this browse record which is probably making the too much requests, but don't forget # the context that can be set with a location, an owner... for element in prod.browse(cr, uid, product_ids, context=context): if eval(str(element[field]) + operator + str(value)): ids.append(element.id) res.append(("product_variant_ids", "in", ids)) return res
def _check_alias_defaults(self): try: dict(eval(self.alias_defaults)) except Exception: raise UserError( _("Invalid expression, it must be a literal python dictionary definition e.g. \"{'field': 'value'}\"") )
def format(self, cr, uid, ids, percent, value, grouping=False, monetary=False, context=None): """ Format() will return the language-specific output for float values""" if percent[0] != "%": raise ValueError("format() must be given exactly one %char format specifier") lang_grouping, thousands_sep, decimal_point = self._lang_data_get(cr, uid, ids[0], monetary) eval_lang_grouping = eval(lang_grouping) formatted = percent % value # floats and decimal ints need special action! if percent[-1] in "eEfFgG": seps = 0 parts = formatted.split(".") if grouping: parts[0], seps = intersperse(parts[0], eval_lang_grouping, thousands_sep) formatted = decimal_point.join(parts) while seps: sp = formatted.find(" ") if sp == -1: break formatted = formatted[:sp] + formatted[sp + 1 :] seps -= 1 elif percent[-1] in "diu": if grouping: formatted = intersperse(formatted, eval_lang_grouping, thousands_sep)[0] return formatted
def action_launch(self, cr, uid, ids, context=None): """ Launch Action of Wizard""" wizard_id = ids and ids[0] or False wizard = self.browse(cr, uid, wizard_id, context=context) if wizard.type in ('automatic', 'once'): wizard.write({'state': 'done'}) # Load action act_type = self.pool.get('ir.actions.actions').read(cr, uid, wizard.action_id.id, ['type'], context=context) res = self.pool.get(act_type['type']).read(cr, uid, wizard.action_id.id, [], context=context) if act_type<>'ir.actions.act_window': return res res.setdefault('context','{}') res['nodestroy'] = True # Open a specific record when res_id is provided in the context user = self.pool.get('res.users').browse(cr, uid, uid, context=context) ctx = eval(res['context'], {'user': user}) if ctx.get('res_id'): res.update({'res_id': ctx.pop('res_id')}) # disable log for automatic wizards if wizard.type == 'automatic': ctx.update({'disable_log': True}) res.update({'context': ctx}) return res
def satisfy_condition(self, cr, uid, rule_id, localdict, context=None): """ @param rule_id: id of hr.payslip.exception.rule to be tested @param contract_id: id of hr.contract to be tested @return: returns True if the given rule match the condition for the given contract. Return False otherwise. """ rule = self.browse(cr, uid, rule_id, context=context) if rule.condition_select == 'none': return True else: #python code try: eval(rule.condition_python, localdict, mode='exec', nocopy=True) return 'result' in localdict and localdict['result'] or False except: raise osv.except_osv(_('Error!'), _('Wrong python condition defined for payroll exception rule %s (%s).')% (rule.name, rule.code))
def _compute_data(self): if not self.active: return model = self.env[self.model_id.model] eval_context = self._get_eval_context() domain = self.domain or '[]' try: count = model.search_count(eval(domain, eval_context)) except Exception as e: self.primary_value = self.secondary_value = 'ERR!' self.error = str(e) return if any([ self.primary_function and self.primary_function != 'count', self.secondary_function and self.secondary_function != 'count' ]): records = model.search(eval(domain, eval_context)) for f in ['primary_', 'secondary_']: f_function = f + 'function' f_field_id = f + 'field_id' f_format = f + 'format' f_value = f + 'value' value = 0 if self[f_function] == 'count': value = count elif self[f_function]: func = FIELD_FUNCTIONS[self[f_function]]['func'] if func and self[f_field_id] and count: vals = [x[self[f_field_id].name] for x in records] value = func(vals) if self[f_function]: try: self[f_value] = (self[f_format] or '{:,}').format(value) except ValueError as e: self[f_value] = 'F_ERR!' self.error = str(e) return else: self[f_value] = False
def _search_qty_available(self, cr, uid, operator, value, context): domain_quant = [] if context.get('lot_id'): domain_quant.append(('lot_id', '=', context['lot_id'])) if context.get('owner_id'): domain_quant.append(('owner_id', '=', context['owner_id'])) if context.get('package_id'): domain_quant.append(('package_id', '=', context['package_id'])) domain_quant += self._get_domain_locations(cr, uid, [], context=context)[0] quants = self.pool.get('stock.quant').read_group(cr, uid, domain_quant, ['product_id', 'qty'], ['product_id'], context=context) quants = dict(map(lambda x: (x['product_id'][0], x['qty']), quants)) quants = dict((k, v) for k, v in quants.iteritems() if eval(str(v) + operator + str(value))) return(list(quants))
def _get_allowance_pricelist(self, training, employee): self.ensure_one() result = False localdict = { "env": self.env, "participant_type": self, "training": training, "employee": employee, } for policy in self.pricelist_allowance_policy_ids: try: eval(policy.policy_field_id.python_code, localdict, mode="exec", nocopy=True) result = localdict["result"] if result: break except: warning_msg = _("Error on pricelist allowance formula") raise UserError(warning_msg) return result
def _compute_data(self): if not self.active: return model = self.env[self.model_id.model] eval_context = self._get_eval_context() domain = self.domain or '[]' try: count = model.search_count(eval(domain, eval_context)) except Exception as e: self.primary_value = self.secondary_value = 'ERR!' self.error = str(e) return fields = [ f.name for f in [self.primary_field_id, self.secondary_field_id] if f ] read_vals = fields and\ model.search_read(eval(domain, eval_context), fields) or [] for f in ['primary_', 'secondary_']: f_function = f + 'function' f_field_id = f + 'field_id' f_format = f + 'format' f_value = f + 'value' value = 0 if not self[f_function]: self[f_value] = False else: if self[f_function] == 'count': value = count else: func = FIELD_FUNCTIONS[self[f_function]]['func'] vals = [x[self[f_field_id].name] for x in read_vals] value = func(vals) try: self[f_value] = (self[f_format] or '{:,}').format(value) except ValueError as e: self[f_value] = 'F_ERR!' self.error = str(e) return
def process_request(self, msg): for a_filter in self.filters.filtered(lambda f: f.is_active is True): match_context = { 'self': self, 'msg': msg, 'result': False, 'context': {}, 're': re, } eval(a_filter.match, match_context, mode="exec", nocopy=True) if match_context['result']: action_context = { 'self': self, 'msg': msg, 'result': None, 'context': match_context['context'], 'template': a_filter.template } eval(a_filter.action, action_context, mode="exec", nocopy=True) return action_context['result'] else: return None
def _parse_node(self, root_node): result = [] for node in root_node: field_name = node.get('name') if not eval(str(node.attrib.get('invisible', False)), {'context': self.context}): if node.tag == 'field': if field_name in self.groupby: continue result.append(field_name) else: result.extend(self._parse_node(node)) return result
def _compute_data(self): self.count = 0 self.computed_value = 0 if self.active: # Compute count item model = self.env[self.model_id.model] eval_context = self._get_eval_context() self.count = model.search_count(eval(self.domain, eval_context)) # Compute datas for field_id depending of field_function if self.field_function and self.field_id and self.count != 0: records = model.search(eval(self.domain, eval_context)) vals = [x[self.field_id.name] for x in records] if self.field_function == 'min': self.computed_value = min(vals) elif self.field_function == 'max': self.computed_value = max(vals) elif self.field_function == 'sum': self.computed_value = sum(vals) elif self.field_function == 'avg': self.computed_value = sum(vals) / len(vals) elif self.field_function == 'median': self.computed_value = self.median(vals)
def _get_ifrs_query(self, cr, uid, brw, context=None): """ Fetches a semi-query to be provided as context into aml""" context = dict(context or {}) query = '' if not brw.filter_id: return query args = eval(brw.filter_id.domain) query = self.pool['account.move.line']._where_calc( cr, uid, args, context=context) where_clause, where_clause_params = query.get_sql()[1:] where_clause = where_clause.replace('account_move_line', 'l') query = cr.mogrify(where_clause, where_clause_params) return query
def get_needaction_data(self, cr, uid, ids, context=None): """ Return for each menu entry of ids : - if it uses the needaction mechanism (needaction_enabled) - the needaction counter of the related action, taking into account the action domain """ res = {} menu_ids = set() for menu in self.browse(cr, uid, ids, context=context): menu_ids.add(menu.id) ctx = None if menu.action and menu.action.type in ('ir.actions.act_window', 'ir.actions.client') and menu.action.context: ctx = eval(menu.action.context, context) or None menu_ref = ctx and ctx.get('needaction_menu_ref') if menu_ref: if not isinstance(menu_ref, list): menu_ref = [menu_ref] model_data_obj = self.pool.get('ir.model.data') for menu_data in menu_ref: model, id = model_data_obj.get_object_reference(cr, uid, menu_data.split('.')[0], menu_data.split('.')[1]) if (model == 'ir.ui.menu'): menu_ids.add(id) menu_ids = list(menu_ids) for menu in self.browse(cr, uid, menu_ids, context=context): res[menu.id] = { 'needaction_enabled': False, 'needaction_counter': False, } if menu.action and menu.action.type in ('ir.actions.act_window', 'ir.actions.client') and menu.action.res_model: obj = self.pool.get(menu.action.res_model) if obj and obj._needaction: if menu.action.type == 'ir.actions.act_window': dom = menu.action.domain and eval(menu.action.domain, {'uid': uid}) or [] else: dom = eval(menu.action.params_store or '{}', {'uid': uid}).get('domain') res[menu.id]['needaction_enabled'] = obj._needaction res[menu.id]['needaction_counter'] = obj._needaction_count(cr, uid, dom, context=context) return res
def satisfy_condition(self, cr, uid, rule_id, localdict, context=None): """ @param rule_id: id of hr.payslip.exception.rule to be tested @param contract_id: id of hr.contract to be tested @return: returns True if the given rule match the condition for the given contract. Return False otherwise. """ rule = self.browse(cr, uid, rule_id, context=context) if rule.condition_select == 'none': return True else: #python code try: eval(rule.condition_python, localdict, mode='exec', nocopy=True) return 'result' in localdict and localdict['result'] or False except: raise osv.except_osv( _('Error!'), _('Wrong python condition defined for payroll exception rule %s (%s).' ) % (rule.name, rule.code))
def report_download(self, data, token): """This function is used by 'qwebactionmanager.js' in order to trigger the download of a pdf/controller report. :param data: a javascript array JSON.stringified containg report internal url ([0]) and type [1] :returns: Response with a filetoken cookie and an attachment header """ requestcontent = json.loads(data) url, type = requestcontent[0], requestcontent[1] try: if type == 'qweb-pdf': reportname = url.split('/report/pdf/')[1].split('?')[0] docids = None if '/' in reportname: reportname, docids = reportname.split('/') if docids: # Generic report: response = self.report_routes(reportname, docids=docids, converter='pdf') else: # Particular report: data = url_decode(url.split('?')[1]).items() # decoding the args represented in JSON response = self.report_routes(reportname, converter='pdf', **dict(data)) cr, uid = request.cr, request.uid report = request.registry['report']._get_report_from_name(cr, uid, reportname) filename = "%s.%s" % (report.name, "pdf") ids = [int(x) for x in docids.split(",")] obj = request.env[report.model].browse(ids) if report.print_report_name and not len(obj) > 1: filename = eval(report.print_report_name, {'object': obj, 'time': time}) response.headers.add('Content-Disposition', content_disposition(filename)) response.set_cookie('fileToken', token) return response elif type =='controller': reqheaders = Headers(request.httprequest.headers) response = Client(request.httprequest.app, BaseResponse).get(url, headers=reqheaders, follow_redirects=True) response.set_cookie('fileToken', token) return response else: return except Exception, e: se = _serialize_exception(e) error = { 'code': 200, 'message': "Odoo Server Error", 'data': se } return request.make_response(html_escape(json.dumps(error)))
def _child_get(node, self=None, tagname=None): for n in node: if self and self.localcontext and n.get('rml_loop'): for ctx in eval(n.get('rml_loop'),{}, self.localcontext): self.localcontext.update(ctx) if (tagname is None) or (n.tag==tagname): if n.get('rml_except', False): try: eval(n.get('rml_except'), {}, self.localcontext) except GeneratorExit: continue except Exception as e: _logger.warning('rml_except: "%s"', n.get('rml_except',''), exc_info=True) continue if n.get('rml_tag'): try: (tag,attr) = eval(n.get('rml_tag'),{}, self.localcontext) n2 = copy.deepcopy(n) n2.tag = tag n2.attrib.update(attr) yield n2 except GeneratorExit: yield n except Exception as e: _logger.warning('rml_tag: "%s"', n.get('rml_tag',''), exc_info=True) yield n else: yield n continue if self and self.localcontext and n.get('rml_except'): try: eval(n.get('rml_except'), {}, self.localcontext) except GeneratorExit: continue except Exception as e: _logger.warning('rml_except: "%s"', n.get('rml_except',''), exc_info=True) continue if self and self.localcontext and n.get('rml_tag'): try: (tag,attr) = eval(n.get('rml_tag'),{}, self.localcontext) n2 = copy.deepcopy(n) n2.tag = tag n2.attrib.update(attr or {}) yield n2 tagname = '' except GeneratorExit: pass except Exception as e: _logger.warning('rml_tag: "%s"', n.get('rml_tag',''), exc_info=True) pass if (tagname is None) or (n.tag==tagname): yield n
def _get_alias_defaults_values(self, cr, uid, ids, context=None): res = dict.fromkeys(ids, False) is_group_use_lead = self.pool['res.users'].has_group(cr, uid, 'crm.group_use_lead') for team in self.browse(cr, uid, ids, context=context): alias_defaults = eval(team.alias_defaults) alias_defaults.update({ 'type': 'lead' if is_group_use_lead and team.use_leads else 'opportunity', 'team_id': team.id, }) res[team.id] = { 'alias_defaults': alias_defaults, 'alias_parent_thread_id': team.id, } return res
def get_interested_action(self, cr, uid, interested, context=None): try: model, action_id = self.pool.get( 'ir.model.data').get_object_reference( cr, uid, 'crm_partner_assign', 'crm_lead_channel_interested_act') except ValueError: raise UserError(_("The CRM Channel Interested Action is missing")) action = self.pool[model].read(cr, uid, [action_id], context=context)[0] action_context = eval(action['context']) action_context['interested'] = interested action['context'] = str(action_context) return action
def satisfy_condition(self, cr, uid, rule_id, localdict, context=None): """ @param rule_id: id of hr.salary.rule to be tested @param contract_id: id of hr.contract to be tested @return: returns True if the given rule match the condition for the given contract. Return False otherwise. """ rule = self.browse(cr, uid, rule_id, context=context) localdict['rule'] = rule if rule.condition_select == 'none': return True elif rule.condition_select == 'range': try: result = eval(rule.condition_range, localdict) return rule.condition_range_min <= result and result <= rule.condition_range_max or False except: raise osv.except_osv(_('Error!'), _('Wrong range condition defined for salary rule %s (%s).')% (rule.name, rule.code)) else: #python code try: eval(rule.condition_python, localdict, mode='exec', nocopy=True) return 'result' in localdict and localdict['result'] or False except Exception, e: raise osv.except_osv(_('Error!'), _('Wrong python condition defined for salary rule %s (%s).')% (rule.name, rule.code) + "\n\n" + str(e))
def get_price_from_picking(self, total, weight, volume, quantity): price = 0.0 criteria_found = False price_dict = {'price': total, 'volume': volume, 'weight': weight, 'wv': volume * weight, 'quantity': quantity} for line in self.price_rule_ids: test = eval(line.variable + line.operator + str(line.max_value), price_dict) if test: price = line.list_base_price + line.list_price * price_dict[line.variable_factor] criteria_found = True break if not criteria_found: raise UserError(_("Selected product in the delivery method doesn't fulfill any of the delivery carrier(s) criteria.")) return price
def merge(match): obj_pool = self.pool.get(action.model_id.model) id = context.get('active_id') obj = obj_pool.browse(cr, uid, id) exp = str(match.group()[2:-2]).strip() result = eval(exp, { 'object': obj, 'context': dict(context), # copy context to prevent side-effects of eval 'time': time, }) if result in (None, False): return str("--------") return tools.ustr(result)
def _get_res_ids(self, *args): model_obj = self.env[self.model_id.model] if self._context.get('original_cr'): model_obj = model_obj.with_env( self.env(cr=self._context['original_cr'])) if self.filter_type == 'domain': domain = eval(self.filter_domain or '[]', self._get_eval_context()) res_ids = set( model_obj.search(domain, order=self.order or '')._ids) else: # elif self.filter_type == 'method': if not (self.filter_method and hasattr(model_obj, self.filter_method)): raise UserError( _("Can't find method: %s on object: %s") % (self.filter_method, self.model_id.model)) res_ids = set(getattr(model_obj, self.filter_method)(*args)) if 'active_ids' in self._context: res_ids &= set(self._context['active_ids']) if self.unique: res_ids -= set( sum([eval(export.record_ids) for export in self.export_ids], [])) return list(res_ids)
def create_logs(self, uid, res_model, res_ids, method, old_values=None, new_values=None, additional_log_values=None): """Create logs. `old_values` and `new_values` are dictionnaries, e.g: {RES_ID: {'FIELD': VALUE, ...}} """ if old_values is None: old_values = EMPTY_DICT if new_values is None: new_values = EMPTY_DICT log_model = self.env['auditlog.log'] http_request_model = self.env['auditlog.http.request'] http_session_model = self.env['auditlog.http.session'] for res_id in res_ids: model_model = self.env[res_model] obj = model_model.browse(res_id) rules = self.env['auditlog.rule'].\ search([('model_id', '=', self.pool._auditlog_model_cache[res_model])]) if not rules or not rules[0].name_method: name = obj.name_get() res_name = name and name[0] and name[0][1] else: res_name = eval(rules[0].name_method, locals_dict={'o': obj}) vals = { 'name': res_name, 'model_id': self.pool._auditlog_model_cache[res_model], 'res_id': res_id, 'method': method, 'user_id': uid} if rules: if not rules[0].not_trace_request: vals['http_request_id'] = \ http_request_model.current_http_request() if not rules[0].not_trace_session: vals['http_session_id'] = \ http_session_model.current_http_session() vals.update(additional_log_values or {}) log = log_model.create(vals) diff = DictDiffer( new_values.get(res_id, EMPTY_DICT), old_values.get(res_id, EMPTY_DICT)) if method is 'create': self._create_log_line_on_create(log, diff.added(), new_values) elif method is 'read': self._create_log_line_on_read( log, old_values.get(res_id, EMPTY_DICT).keys(), old_values) elif method is 'write': self._create_log_line_on_write( log, diff.changed(), old_values, new_values)
def _get_content(self): self.ensure_one() content = [] data = eval(self.data or '{}') model_obj = self.env[self.model_id.model] for fname in set(data['new'].keys() + data['old'].keys()): field = model_obj._fields.get( fname) or model_obj._inherit_fields.get(fname) old_value = self._format_value(field, data['old'].get(fname, '')) new_value = self._format_value(field, data['new'].get(fname, '')) if old_value != new_value: label = field.get_description(self.env)['string'] content.append((label, old_value, new_value)) return content
def load_information_from_description_file(module): """ :param module: The name of the module (sale, purchase, ...) """ terp_file = get_module_resource(module, '__openerp__.py') mod_path = get_module_path(module) if terp_file: info = {} if os.path.isfile(terp_file) or zipfile.is_zipfile(mod_path+'.zip'): # default values for descriptor info = { 'application': False, 'author': '', 'auto_install': False, 'category': 'Uncategorized', 'depends': [], 'description': '', 'icon': get_module_icon(module), 'installable': True, 'license': 'AGPL-3', 'name': False, 'post_load': None, 'version': '1.0', 'web': False, 'website': '', 'sequence': 100, 'summary': '', } info.update(itertools.izip( 'depends data demo test init_xml update_xml demo_xml'.split(), iter(list, None))) f = tools.file_open(terp_file) try: info.update(eval(f.read())) finally: f.close() if 'active' in info: # 'active' has been renamed 'auto_install' info['auto_install'] = info['active'] info['version'] = adapt_version(info['version']) return info #TODO: refactor the logger in this file to follow the logging guidelines # for 6.0 _logger.debug('module %s: no __openerp__.py file found.', module) return {}
def process_segment(self, cr, uid, segment_ids=None, context=None): Workitems = self.pool.get('marketing.campaign.workitem') Campaigns = self.pool.get('marketing.campaign') if not segment_ids: segment_ids = self.search(cr, uid, [('state', '=', 'running')], context=context) action_date = time.strftime('%Y-%m-%d %H:%M:%S') campaigns = set() for segment in self.browse(cr, uid, segment_ids, context=context): if segment.campaign_id.state != 'running': continue campaigns.add(segment.campaign_id.id) act_ids = self.pool.get('marketing.campaign.activity').search(cr, uid, [('start', '=', True), ('campaign_id', '=', segment.campaign_id.id)], context=context) model_obj = self.pool[segment.object_id.model] criteria = [] if segment.sync_last_date and segment.sync_mode != 'all': criteria += [(segment.sync_mode, '>', segment.sync_last_date)] if segment.ir_filter_id: criteria += eval(segment.ir_filter_id.domain) object_ids = model_obj.search(cr, uid, criteria, context=context) # XXX TODO: rewrite this loop more efficiently without doing 1 search per record! for record in model_obj.browse(cr, uid, object_ids, context=context): # avoid duplicate workitem for the same resource if segment.sync_mode in ('write_date','all'): if Campaigns._find_duplicate_workitems(cr, uid, record, segment.campaign_id, context=context): continue wi_vals = { 'segment_id': segment.id, 'date': action_date, 'state': 'todo', 'res_id': record.id } partner = self.pool.get('marketing.campaign')._get_partner_for(segment.campaign_id, record) if partner: wi_vals['partner_id'] = partner.id for act_id in act_ids: wi_vals['activity_id'] = act_id Workitems.create(cr, uid, wi_vals, context=context) self.write(cr, uid, segment.id, {'sync_last_date':action_date}, context=context) Workitems.process_all(cr, uid, list(campaigns), context=context) return True
def _update_context(self): eval_dict = { 'active_id': unquote("active_id"), 'active_ids': unquote("active_ids"), 'active_model': unquote("active_model"), 'uid': self._uid, 'context': self._context, } try: context = eval(self.context or '{}', eval_dict) or {} if 'act_window_id' not in context: context['act_window_id'] = self.id self.context = '%s' % context except: pass
def _compute_amount(self): obj_computation = self.env["accountant.client_account_type_computation_item"] for document in self: amount = amount_extrapolation = amount_previous = amount_audited = 0.0 criteria = [ ("computation_id", "=", document.computation_item_id.id), ( "account_type_set_id", "=", document.trial_balance_id.account_type_set_id.id, ), ] computations = obj_computation.search(criteria) if len(computations) > 0: python_code = computations[0].phyton_code localdict = document._get_localdict() try: eval( python_code, localdict, mode="exec", nocopy=True, ) amount = localdict["result"] amount_extrapolation = localdict["result_extrapolation"] amount_previous = localdict["result_previous"] amount_audited = localdict["result_audited"] except Exception: amount = ( amount_extrapolation ) = amount_previous = amount_audited = 0.0 document.amount = amount document.amount_extrapolation = amount_extrapolation document.amount_previous = amount_previous document.amount_audited = amount_audited
def _compute_balance(self): obj_detail = self.env["accountant.client_trial_balance_detail"] for document in self: previous_balance = ( balance ) = interim_balance = extrapolation_balance = audited_balance = 0.0 criteria = [ ("trial_balance_id", "=", document.trial_balance_id.id), ("type_id", "=", document.type_id.id), ] for detail in obj_detail.search(criteria): previous_balance += detail.previous_balance balance += detail.balance interim_balance += detail.interim_balance audited_balance += detail.audited_balance localdict = document._get_localdict( previous_balance, balance, interim_balance, audited_balance, ) try: eval( document.type_id.extrapolation_python_code, localdict, mode="exec", nocopy=True, ) extrapolation_balance = localdict["result"] except Exception: extrapolation_balance = 7.0 document.previous_balance = previous_balance document.balance = balance document.extrapolation_balance = extrapolation_balance document.audited_balance = audited_balance
def _get_line_vals(self, st, worksheet, model, line_field): """ Get values of this field from excel sheet """ XLS = self.env['pabi.utils.xls'] new_line_field, max_row = get_line_max(line_field) vals = {} for rc, columns in worksheet.get(line_field, {}).iteritems(): if not isinstance(columns, list): # Ex. 'A1': ['field1', 'field2'] columns = [columns] for field in columns: rc, key_eval_cond = get_field_condition(rc) x_field, val_eval_cond = get_field_condition(field) row, col = XLS.pos2idx(rc) out_field = '%s/%s' % (new_line_field, x_field) field_type = XLS._get_field_type(model, out_field) vals.update({out_field: []}) # Case default value from an eval for idx in range(row, st.nrows): if max_row and (idx - row) > (max_row - 1): break value = XLS._get_cell_value(st.cell(idx, col), field_type=field_type) eval_context = self.get_eval_context(model=model, value=value) if key_eval_cond: # str() will throw cordinal not in range error # value = str(eval(key_eval_cond, eval_context)) value = eval(key_eval_cond, eval_context) # Case Eval if val_eval_cond: # value = str(eval(val_eval_cond, eval_context)) value = eval(val_eval_cond, eval_context) vals[out_field].append(value) # if all value in vals[out_field] == '', we don't need it if not filter(lambda x: x != '', vals[out_field]): vals.pop(out_field) return vals
def _filter(self, cr, uid, action, action_filter, record_ids, domain=False, context=None): """ Filter the list record_ids that satisfy the domain or the action filter. """ if record_ids and (domain is not False or action_filter): if domain is not False: new_domain = [('id', 'in', record_ids)] + eval(domain) ctx = context elif action_filter: assert action.model == action_filter.model_id, "Filter model different from action rule model" new_domain = [('id', 'in', record_ids)] + eval( action_filter.domain) ctx = dict(context or {}) ctx.update(eval(action_filter.context)) record_ids = self.pool[action.model].search(cr, uid, new_domain, context=ctx) return record_ids
def compute_rule(self, cr, uid, rule_id, localdict, context=None): """ :param rule_id: id of rule to compute :param localdict: dictionary containing the environement in which to compute the rule :return: returns a tuple build as the base/amount computed, the quantity and the rate :rtype: (float, float, float) """ rule = self.browse(cr, uid, rule_id, context=context) if rule.amount_select == 'fix': try: return rule.amount_fix, eval(rule.quantity, localdict), 100.0 except: raise osv.except_osv(_('Error!'), _('Wrong quantity defined for salary rule %s (%s).')% (rule.name, rule.code)) elif rule.amount_select == 'percentage': try: return eval(rule.amount_percentage_base, localdict), eval(rule.quantity, localdict), rule.amount_percentage except: raise osv.except_osv(_('Error!'), _('Wrong percentage base or quantity defined for salary rule %s (%s).')% (rule.name, rule.code)) else: try: eval(rule.amount_python_compute, localdict, mode='exec', nocopy=True) return localdict['result'], 'result_qty' in localdict and localdict['result_qty'] or 1.0, 'result_rate' in localdict and localdict['result_rate'] or 100.0 except: raise osv.except_osv(_('Error!'), _('Wrong python code defined for salary rule %s (%s).')% (rule.name, rule.code))
def action_your_pipeline(self, cr, uid, context=None): IrModelData = self.pool['ir.model.data'] action = IrModelData.xmlid_to_object( cr, uid, 'crm.crm_lead_opportunities_tree_view', context=context).read([ 'name', 'help', 'res_model', 'target', 'domain', 'context', 'type', 'search_view_id' ]) if not action: action = {} else: action = action[0] user_team_id = self.pool['res.users'].browse( cr, uid, uid, context=context).sale_team_id.id if not user_team_id: user_team_id = self.search(cr, uid, [], context=context, limit=1) user_team_id = user_team_id and user_team_id[0] or False action[ 'help'] = """<p class='oe_view_nocontent_create'>Click here to add new opportunities</p><p> Looks like you are not a member of a sales team. You should add yourself as a member of one of the sales team. </p>""" if user_team_id: action[ 'help'] += "<p>As you don't belong to any sales team, Odoo opens the first one by default.</p>" action_context = eval(action['context'], {'uid': uid}) if user_team_id: action_context.update({ 'default_team_id': user_team_id, 'search_default_team_id': user_team_id }) tree_view_id = IrModelData.xmlid_to_res_id( cr, uid, 'crm.crm_case_tree_view_oppor') form_view_id = IrModelData.xmlid_to_res_id( cr, uid, 'crm.crm_case_form_view_oppor') kanb_view_id = IrModelData.xmlid_to_res_id( cr, uid, 'crm.crm_case_kanban_view_leads') action.update({ 'views': [[kanb_view_id, 'kanban'], [tree_view_id, 'tree'], [form_view_id, 'form'], [False, 'graph'], [False, 'calendar'], [False, 'pivot']], 'context': action_context, }) return action
def get_actions(self, action_slot, model, res_id=False): assert action_slot in ACTION_SLOTS, 'Illegal action slot value: %s' % action_slot # use a direct SQL query for performance reasons, # this is called very often # Add by Smile # cr, uid, context = self.env.args query = """SELECT v.id, v.name, v.value FROM ir_values v WHERE v.key = %s AND v.key2 = %s AND v.model = %s AND (v.res_id = %s OR v.res_id IS NULL OR v.res_id = 0) AND (v.window_actions IS NULL OR v.window_actions = ', , ' OR v.window_actions like %s) ORDER BY v.sequence, v.id""" cr.execute(query, ('action', action_slot, model, res_id or None, '%%, %s, %%' % context.get('act_window_id', ''))) ################ results = {} for action in cr.dictfetchall(): if not action['value']: continue # skip if undefined action_model, action_id = action['value'].split(',') if not eval(action_id): continue fields = [field for field in self.env[action_model]._fields if field not in EXCLUDED_FIELDS] # FIXME: needs cleanup try: action_def = self.env[action_model].browse(int(action_id)).read(fields) if action_def: if isinstance(action_def, list): action_def = action_def[0] if action_model in ('ir.actions.report.xml', 'ir.actions.act_window', 'ir.actions.wizard'): groups = action_def.get('groups_id') if groups: cr.execute('SELECT 1 FROM res_groups_users_rel WHERE gid IN %s AND uid=%s', (tuple(groups), uid)) if not cr.fetchone(): if action['name'] == 'Menuitem': raise UserError(_('You do not have the permission to perform this operation !!!')) continue # keep only the first action registered for each action name results[action['name']] = (action['id'], action['name'], action_def) except (except_orm, UserError): continue return sorted(results.values())