def retry(self, cr, uid, ids, context=None): if isinstance(ids, int): ids = [ids] for log in self.browse(cr, uid, ids, context=context): origin_context = safe_eval(log.origin_context) origin_defaults = safe_eval(log.origin_defaults) # keep the id of the line to update it with the result origin_context['retry_report_line_id'] = log.id # force export of the resource origin_context['force_export'] = True origin_context['force'] = True ##TODO remove : not needed since magento 6.1 ######## origin_context['do_not_update_date'] = True # ##################################################### mapping = self.pool.get(log.res_model).\ report_action_mapping(cr, uid, context=context) method = mapping.get(log.action, False) if not method: raise Exception("No python method defined for action %s" % (log.action, )) method(cr, uid, log.res_id, log.external_id, log.external_report_id.external_referential_id.id, origin_defaults, origin_context) return True
def retry(self, cr, uid, ids, context=None): if isinstance(ids, int): ids = [ids] for log in self.browse(cr, uid, ids, context=context): origin_context = safe_eval(log.origin_context) origin_defaults = safe_eval(log.origin_defaults) # keep the id of the line to update it with the result origin_context['retry_report_line_id'] = log.id # force export of the resource origin_context['force_export'] = True origin_context['force'] = True ##TODO remove : not needed since magento 6.1 ######## origin_context['do_not_update_date'] = True # ##################################################### mapping = self.pool.get(log.res_model).\ report_action_mapping(cr, uid, context=context) method = mapping.get(log.action, False) if not method: raise Exception("No python method defined for action %s" % (log.action,)) method(cr, uid, log.res_id, log.external_id, log.external_report_id.external_referential_id.id, origin_defaults, origin_context) return True
def _check_evalexpr(self, cr, uid, ids, context=None): action = self.browse(cr, uid, ids[0], context=context) if action.trg_evalexpr: #Test if expression is valid, against an empty dict #Newlines are tolerated safe_eval(action.trg_evalexpr.replace('\n', ' '), {}, DEFAULT_EVALDICT) return True
def build_product_field(self, cr, uid, ids, field, context=None): def get_description_sale(product): return self.parse(cr, uid, product, product.product_tmpl_id.description_sale, context=context) def get_name(product): return (product.product_tmpl_id.name or '') + ' ' + (product.variants or '') if not context: context = {} context['is_multi_variants'] = True obj_lang = self.pool.get('res.lang') lang_ids = obj_lang.search(cr, uid, [('translatable', '=', True)], context=context) lang_code = [ x['code'] for x in obj_lang.read( cr, uid, lang_ids, ['code'], context=context) ] for code in lang_code: context['lang'] = code for product in self.browse(cr, uid, ids, context=context): new_field_value = eval( "get_" + field + "(product)") # TODO convert to safe_eval cur_field_value = safe_eval("product." + field, {'product': product}) if new_field_value != cur_field_value: self.write(cr, uid, [product.id], {field: new_field_value}, context=context) return True
def get_sys_logs(cr, uid): """ Utility method to send a publisher warranty get logs messages. """ pool = pooler.get_pool(cr.dbname) dbuuid = pool.get("ir.config_parameter").get_param(cr, uid, "database.uuid") db_create_date = pool.get("ir.config_parameter").get_param(cr, uid, "database.create_date") nbr_users = pool.get("res.users").search(cr, uid, [], count=True) contractosv = pool.get("publisher_warranty.contract") contracts = contractosv.browse(cr, uid, contractosv.search(cr, uid, [])) user = pool.get("res.users").browse(cr, uid, uid) msg = { "dbuuid": dbuuid, "nbr_users": nbr_users, "dbname": cr.dbname, "db_create_date": db_create_date, "version": release.version, "contracts": [c.name for c in contracts], "language": user.context_lang, } add_arg = {"timeout": 30} if sys.version_info >= (2, 6) else {} arguments = {"arg0": msg, "action": "update"} arguments_raw = urllib.urlencode(arguments) url = config.get("publisher_warranty_url") uo = urllib2.urlopen(url, arguments_raw, **add_arg) try: submit_result = uo.read() finally: uo.close() result = safe_eval(submit_result) if submit_result else {} return result
def do_check(self, cr, uid, action, obj, context={}): ok = super(base_action_rule, self).do_check(cr, uid, action, obj, context=context) if action.trg_evalexpr: old = None #Find in the list this obj's old for x in context.get('_action_old', []): old = x.get('id') == obj.id and x #Convert tuples (id, name) into id only for x in old or []: if type(old[x]) == tuple: old[x] = old[x][0] #Build dict with new and old and eval the expression eval_dict = {'old': old, 'new': context.get('_action_new')} try: ok = safe_eval(action.trg_evalexpr, {}, eval_dict) except (ValueError, KeyError, TypeError): ok = False #Debug Log if ok: _logger.debug('Activated rule %s on record id %d.' % (action.name, obj.id)) #print '********************************************************************' #print '\n==== Rule:', action.name, action.trg_evalexpr, '====' '\n---- old:: ', eval_dict['old'], '\n---- new::', eval_dict['new'] return ok
def create_returns(self, cr, uid, ids, context=None): if context is None: context = {} move_obj = self.pool.get('stock.move') pick_obj = self.pool.get('stock.picking') mem_obj = self.pool.get('stock.return.picking.memory') wf_service = netsvc.LocalService("workflow") return_context = context.copy() return_context['confirm_return'] = False ret = super(stock_return_picking, self).create_returns(cr, uid, ids, context=return_context) prm_datas = self.read(cr, uid, ids, ['product_return_moves'], context=context) for prm_data in prm_datas: mem_ids = prm_data['product_return_moves'] mem_data = mem_obj.read(cr, uid, mem_ids, ['move_id', 'location_dest_id'], context=context) move_to_dest = {} for data in mem_data: move_to_dest.update({data['move_id'][0] : data['location_dest_id'][0]}) move_ids = [mem['move_id'][0] for mem in mem_data] move_datas = move_obj.read(cr, uid, move_ids, ['location_dest_id','move_history_ids2'], context=context) for move_data in move_datas: new_move_ids = move_data['move_history_ids2'] for new_move_id in new_move_ids: move_id = move_data['id'] move_obj.write(cr, uid, new_move_id, {'location_dest_id' : move_to_dest[move_id]}, context=context) new_picking = pick_obj.search(cr, uid, safe_eval(ret['domain']), context=context).pop() wf_service.trg_validate(uid, 'stock.picking', new_picking, 'button_confirm', cr) pick_obj.force_assign(cr, uid, [new_picking], context) return ret
def retry(self, cr, uid, ids, context=None): if isinstance(ids, (int, long)): ids = [ids] for log in self.browse(cr, uid, ids, context=context): mapping = self.pool.get(log.res_model).\ report_action_mapping(cr, uid, context=context) method = mapping.get(log.action, False) if not method: raise Exception("No python method defined for action %s" % (log.action, )) kwargs = {} for field, value in method['fields'].items(): kwargs[field] = safe_eval(value, {'log': log, 'self': self}) if not kwargs.get('context', False): kwargs['context'] = {} # keep the id of the line to update it with the result kwargs['context']['retry_report_line_id'] = log.id # force export of the resource kwargs['context']['force_export'] = True kwargs['context']['force'] = True method['method'](cr, uid, **kwargs) return True
def _cleanup_action_context(self, context_str, user_id): """Returns a dict representing the context_str evaluated (safe_eval) as a dict where items that are not useful for shared actions have been removed. If the evaluation of context_str as a dict fails, context_str is returned unaltered. :param user_id: the integer uid to be passed as 'uid' in the evaluation context """ result = False if context_str: try: context = safe_eval(context_str, tools.UnquoteEvalContext(), nocopy=True) result = dict(context) for key in context: # Remove all context keys that seem to toggle default # filters based on the current user, as it makes no sense # for shared users, who would not see any data by default. if key and key.startswith('search_default_') and 'user_id' in key: result.pop(key) except Exception: # Note: must catch all exceptions, as UnquoteEvalContext may cause many # different exceptions, as it shadows builtins. self._logger.debug("Failed to cleanup action context as it does not parse server-side", exc_info=True) result = context_str return result
def build_product_field(self, cr, uid, ids, field, context=None): def get_description_sale(product): description_sale = product.product_tmpl_id.description_sale return self.parse(cr, uid, product, description_sale, context=context) def get_name(product): if context.get('variants_values', False): return ((product.product_tmpl_id.name or '') + ' ' + (context['variants_values'][product.id] or '')) return (product.product_tmpl_id.name or '') + ' ' + (product.variants or '') if not context: context = {} context['is_multi_variants'] = True obj_lang = self.pool.get('res.lang') lang_ids = obj_lang.search(cr, uid, [('translatable', '=', True)], context=context) langs = obj_lang.read(cr, uid, lang_ids, ['code'], context=context) lang_code = [x['code'] for x in langs] for code in lang_code: context['lang'] = code for product in self.browse(cr, uid, ids, context=context): new_field_value = eval("get_" + field + "(product)") # TODO convert to safe_eval cur_field_value = safe_eval("product." + field, {'product': product}) if new_field_value != cur_field_value: self.write(cr, uid, product.id, {field: new_field_value}, context=context) return True
def update_menu(self, cr, uid, action_report, context=None): if action_report.created_menu_id and not action_report.linked_menu_id: self.delete_menu(cr, uid, action_report.created_menu_id.id, context=context) if action_report.linked_menu_id: groups_id = [(6, 0, map(lambda x: x.id, action_report.groups_id))] if not action_report.created_menu_id: result = self.create_menu(cr, uid, {'name' : action_report.name, 'linked_menu_id': action_report.linked_menu_id.id, 'report_name' : action_report.report_name, 'groups_id' : groups_id, }, context=context) else: action = action_report.created_menu_id.action if action and action._model._name == 'ir.actions.act_window': existing_context = safe_eval(self.pool.get('ir.actions.act_window').browse(cr, uid, action.id, context=context).context) new_context = existing_context if type(existing_context) == dict else {} new_context['service_name'] = action_report.report_name or '' self.pool.get('ir.actions.act_window').write(cr, uid, [action.id], {'name' : action_report.name or 'Pentaho Report', 'context' : str(new_context), }, context=context) self.pool.get('ir.ui.menu').write(cr, uid, [action_report.created_menu_id.id], {'name' : action_report.name or 'Pentaho Report', 'parent_id' : action_report.linked_menu_id.id, 'groups_id' : groups_id, }, context=context) result = action_report.created_menu_id.id else: result = 0 return result
def retry(self, cr, uid, ids, context=None): if isinstance(ids, (int, long)): ids = [ids] for log in self.browse(cr, uid, ids, context=context): mapping = self.pool.get(log.res_model).\ report_action_mapping(cr, uid, context=context) method = mapping.get(log.action, False) if not method: raise Exception("No python method defined for action %s" % (log.action,)) kwargs = {} for field, value in method['fields'].items(): kwargs[field] = safe_eval(value, {'log': log, 'self': self}) if not kwargs.get('context', False): kwargs['context'] = {} # keep the id of the line to update it with the result kwargs['context']['retry_report_line_id'] = log.id # force export of the resource kwargs['context']['force_export'] = True kwargs['context']['force'] = True method['method'](cr, uid, **kwargs) return True
def do_check(self, cr, uid, action, obj, context={}): ok = super(base_action_rule, self).do_check(cr, uid, action, obj, context=context) if action.trg_evalexpr: old = None #Find in the list this obj's old for x in context.get('_action_old', []): old = x.get('id') == obj.id and x #Convert tuples (id, name) into id only for x in old or []: if type(old[x]) == tuple: old[x] = old[x][0] #Build dict with new and old and eval the expression eval_dict = { 'old': old, 'new': context.get('_action_new')} try: ok = safe_eval(action.trg_evalexpr, {}, eval_dict) except (ValueError, KeyError, TypeError): ok = False #Debug Log if ok: _logger.debug('Activated rule %s on record id %d.' % (action.name, obj.id) ) #print '********************************************************************' #print '\n==== Rule:', action.name, action.trg_evalexpr, '====' '\n---- old:: ', eval_dict['old'], '\n---- new::', eval_dict['new'] return ok
def get_sys_logs(cr, uid): """ Utility method to send a publisher warranty get logs messages. """ pool = pooler.get_pool(cr.dbname) dbuuid = pool.get('ir.config_parameter').get_param(cr, uid, 'database.uuid') db_create_date = pool.get('ir.config_parameter').get_param( cr, uid, 'database.create_date') limit_date = datetime.datetime.now() limit_date = limit_date - datetime.timedelta(15) limit_date_str = limit_date.strftime(misc.DEFAULT_SERVER_DATETIME_FORMAT) nbr_users = pool.get("res.users").search(cr, uid, [], count=True) nbr_active_users = pool.get("res.users").search( cr, uid, [("date", ">=", limit_date_str)], count=True) nbr_share_users = False nbr_active_share_users = False if "share" in pool.get("res.users")._columns \ or "share" in pool.get("res.users")._inherit_fields: nbr_share_users = pool.get("res.users").search(cr, uid, [("share", "=", True)], count=True) nbr_active_share_users = pool.get("res.users").search( cr, uid, [("share", "=", True), ("date", ">=", limit_date_str)], count=True) contractosv = pool.get('publisher_warranty.contract') contracts = contractosv.browse(cr, uid, contractosv.search(cr, uid, [])) user = pool.get("res.users").browse(cr, uid, uid) msg = { "dbuuid": dbuuid, "nbr_users": nbr_users, "nbr_active_users": nbr_active_users, "nbr_share_users": nbr_share_users, "nbr_active_share_users": nbr_active_share_users, "dbname": cr.dbname, "db_create_date": db_create_date, "version": release.version, "contracts": [c.name for c in contracts], "language": user.context_lang, } add_arg = {"timeout": 30} if sys.version_info >= (2, 6) else {} arguments = { 'arg0': msg, "action": "update", } arguments_raw = urllib.urlencode(arguments) url = config.get("publisher_warranty_url") uo = urllib2.urlopen(url, arguments_raw, **add_arg) try: submit_result = uo.read() finally: uo.close() result = safe_eval(submit_result) if submit_result else {} return result
def _create_indirect_sharing_rules(self, cr, uid, wizard_data, group_id, fields_relations, context=None): user_obj = self.pool.get('res.users') current_user = user_obj.browse(cr, uid, uid, context=context) rule_obj = self.pool.get('ir.rule') try: domain = safe_eval(wizard_data.domain) if domain: domain_expr = expression(domain) for rel_field, model in fields_relations: related_domain = [] for element in domain: if domain_expr._is_leaf(element): left, operator, right = element left = '%s.%s'%(rel_field, left) element = left, operator, right related_domain.append(element) rule_obj.create(cr, 1, { 'name': _('Indirect sharing filter created by user %s (%s) for group %s') % \ (current_user.name, current_user.login, group_id), 'model_id': model.id, 'domain_force': str(related_domain), 'groups': [(4,group_id)] }) self.__logger.debug("Created indirect rule on model %s with domain: %s", model.model, repr(related_domain)) except Exception: self.__logger.exception('Failed to create share access') raise osv.except_osv(_('Sharing access could not be setup'), _('Sorry, the current screen and filter you are trying to share are not supported at the moment.\nYou may want to try a simpler filter.'))
def send(self, cr, uid, tb, explanations, remarks=None, issue_name=None): """ Method called by the client to send a problem to the publisher warranty server. """ return True # KGB if not remarks: remarks = "" valid_contracts = self._get_valid_contracts(cr, uid) valid_contract = valid_contracts[0] try: origin = 'client' dbuuid = self.pool.get('ir.config_parameter').get_param(cr, uid, 'database.uuid') db_create_date = self.pool.get('ir.config_parameter').get_param(cr, uid, 'database.create_date') user = self.pool.get("res.users").browse(cr, uid, uid) user_name = user.name email = user.user_email msg = {'contract_name': valid_contract.name, 'tb': tb, 'explanations': explanations, 'remarks': remarks, 'origin': origin, 'dbname': cr.dbname, 'dbuuid': dbuuid, 'db_create_date': db_create_date, 'issue_name': issue_name, 'email': email, 'user_name': user_name, } add_arg = {"timeout":30} if sys.version_info >= (2,6) else {} uo = urllib2.urlopen(config.get("publisher_warranty_url"), urllib.urlencode({'arg0': msg, "action": "send",}),**add_arg) try: submit_result = uo.read() finally: uo.close() result = safe_eval(submit_result) crm_case_id = result if not crm_case_id: return False except osv.except_osv: raise except Exception: _logger.warning("Error sending problem report", exc_info=1) raise osv.except_osv(_("Error"), _("Error during communication with the publisher warranty server.")) return True
def get_sys_logs(cr, uid): """ Utility method to send a publisher warranty get logs messages. """ return False # KGB pool = pooler.get_pool(cr.dbname) dbuuid = pool.get('ir.config_parameter').get_param(cr, uid, 'database.uuid') db_create_date = pool.get('ir.config_parameter').get_param(cr, uid, 'database.create_date') limit_date = datetime.datetime.now() limit_date = limit_date - datetime.timedelta(15) limit_date_str = limit_date.strftime(misc.DEFAULT_SERVER_DATETIME_FORMAT) nbr_users = pool.get("res.users").search(cr, uid, [], count=True) nbr_active_users = pool.get("res.users").search(cr, uid, [("date", ">=", limit_date_str)], count=True) nbr_share_users = False nbr_active_share_users = False if "share" in pool.get("res.users")._all_columns: nbr_share_users = pool.get("res.users").search(cr, uid, [("share", "=", True)], count=True) nbr_active_share_users = pool.get("res.users").search(cr, uid, [("share", "=", True), ("date", ">=", limit_date_str)], count=True) contractosv = pool.get('publisher_warranty.contract') contracts = contractosv.browse(cr, uid, contractosv.search(cr, uid, [])) user = pool.get("res.users").browse(cr, uid, uid) msg = { "dbuuid": dbuuid, "nbr_users": nbr_users, "nbr_active_users": nbr_active_users, "nbr_share_users": nbr_share_users, "nbr_active_share_users": nbr_active_share_users, "dbname": cr.dbname, "db_create_date": db_create_date, "version": release.version, "contracts": [c.name for c in contracts], "language": user.context_lang, } msg.update(pool.get("res.company").read(cr,uid,[1],["name","email","phone"])[0]) add_arg = {"timeout":30} if sys.version_info >= (2,6) else {} arguments = {'arg0': msg, "action": "update",} arguments_raw = urllib.urlencode(arguments) url = config.get("publisher_warranty_url") uo = urllib2.urlopen(url, arguments_raw, **add_arg) try: submit_result = uo.read() finally: uo.close() if not submit_result: raise IOError('Invalid result') result = safe_eval(submit_result) return result
def send(self, cr, uid, tb, explanations, remarks=None, issue_name=None): """ Method called by the client to send a problem to the publisher warranty server. """ if not remarks: remarks = "" valid_contracts = self._get_valid_contracts(cr, uid) valid_contract = valid_contracts[0] try: origin = "client" dbuuid = self.pool.get("ir.config_parameter").get_param(cr, uid, "database.uuid") db_create_date = self.pool.get("ir.config_parameter").get_param(cr, uid, "database.create_date") user = self.pool.get("res.users").browse(cr, uid, uid) user_name = user.name email = user.email msg = { "contract_name": valid_contract.name, "tb": tb, "explanations": explanations, "remarks": remarks, "origin": origin, "dbname": cr.dbname, "dbuuid": dbuuid, "db_create_date": db_create_date, "issue_name": issue_name, "email": email, "user_name": user_name, } add_arg = {"timeout": 30} if sys.version_info >= (2, 6) else {} uo = urllib2.urlopen( config.get("publisher_warranty_url"), urllib.urlencode({"arg0": msg, "action": "send"}), **add_arg ) try: submit_result = uo.read() finally: uo.close() result = safe_eval(submit_result) crm_case_id = result if not crm_case_id: return False except osv.except_osv: raise except Exception: _logger.warning("Error sending problem report", exc_info=1) raise osv.except_osv(_("Error"), _("Error during communication with the publisher warranty server.")) return True
def _func(*args, **kwds): func(*args, **kwds) global FONTNAME try: fonts_map = args[1].get('ir.config_parameter').get_param( args[2], 1, 'fonts_map') fonts_map = (safe_eval(fonts_map)) if fonts_map.get('wrap', False): FONTNAME = fonts_map['maps'][0][1].encode('utf-8') except: pass
def parse(self, cr, uid, o, text, context=None): if not text: return '' vals = text.split('[_') description = '' for val in vals: if '_]' in val: sub_val = val.split('_]') description += (safe_eval(sub_val[0], {'o' :o, 'context':context}) or '' ) + sub_val[1] else: description += val return description
def compute_product_dimension_extra_price(self, cr, uid, product_id, product_price_extra=False, dim_price_margin=False, dim_price_extra=False, context=None): if context is None: context = {} dimension_extra = 0.0 product = self.browse(cr, uid, product_id, context=context) for dim in product.dimension_value_ids: if product_price_extra and dim_price_margin and dim_price_extra: dimension_extra += safe_eval('product.' + product_price_extra, {'product': product}) * safe_eval('dim.' + dim_price_margin, {'dim': dim}) + safe_eval('dim.' + dim_price_extra, {'dim': dim}) elif not product_price_extra and not dim_price_margin and dim_price_extra: dimension_extra += safe_eval('dim.' + dim_price_extra, {'dim': dim}) elif product_price_extra and dim_price_margin and not dim_price_extra: dimension_extra += safe_eval('product.' + product_price_extra, {'product': product}) * safe_eval('dim.' + dim_price_margin, {'dim': dim}) elif product_price_extra and not dim_price_margin and dim_price_extra: dimension_extra += safe_eval('product.' + product_price_extra, {'product': product}) + safe_eval('dim.' + dim_price_extra, {'dim': dim}) if 'uom' in context: product_uom_obj = self.pool.get('product.uom') uom = product.uos_id or product.uom_id dimension_extra = product_uom_obj._compute_price(cr, uid, uom.id, dimension_extra, context['uom']) return dimension_extra
def _prepare_field(self, cr, uid, field_name, field_value, eval_ctx, max_size=0, gen_args=None, context=None): '''This function is designed to be inherited !''' if gen_args is None: gen_args = {} assert isinstance(eval_ctx, dict), 'eval_ctx must contain a dict' try: value = safe_eval(field_value, eval_ctx) # SEPA uses XML ; XML = UTF-8 ; UTF-8 = support for all characters # But we are dealing with banks... # and many banks don't want non-ASCCI characters ! # cf section 1.4 "Character set" of the SEPA Credit Transfer # Scheme Customer-to-bank guidelines if gen_args.get('convert_to_ascii'): value = unidecode(value) unallowed_ascii_chars = [ '"', '#', '$', '%', '&', '*', ';', '<', '>', '=', '@', '[', ']', '^', '_', '`', '{', '}', '|', '~', '\\', '!' ] for unallowed_ascii_char in unallowed_ascii_chars: value = value.replace(unallowed_ascii_char, '-') except: line = eval_ctx.get('line') if line: raise osv.except_osv( _('Error:'), _("Cannot compute the '%s' of the Payment Line with " "reference '%s'.") % (field_name, line.name)) else: raise osv.except_osv( _('Error:'), _("Cannot compute the '%s'.") % field_name) if not isinstance(value, (str, unicode)): raise osv.except_osv( _('Field type error:'), _("The type of the field '%s' is %s. It should be a string " "or unicode.") % (field_name, type(value))) if not value: raise osv.except_osv( _('Error:'), _("The '%s' is empty or 0. It should have a non-null value.") % field_name) if max_size and len(value) > max_size: value = value[0:max_size] return value
def eval(self, record, expr): #TODO: support remote variables (eg address.title) in expr # how to do that: parse the string, find dots, replace those dotted variables by temporary # "simple ones", fetch the value of those variables and add them (temporarily) to the _data # dictionary passed to eval #FIXME: it wont work if the data hasn't been fetched yet... this could # happen if the eval node is the first one using this browse_record # the next line is a workaround for the problem: it causes the resource to be loaded #Pinky: Why not this ? eval(expr, browser) ? # name = browser.name # data_dict = browser._data[self.get_value(browser, 'id')] return safe_eval(expr, {}, {'obj': record})
def update_menu(self, cr, uid, action_report, context=None): if action_report.created_menu_id and not action_report.linked_menu_id: self.delete_menu(cr, uid, action_report.created_menu_id.id, context=context) if action_report.linked_menu_id: groups_id = [(6, 0, map(lambda x: x.id, action_report.groups_id))] if not action_report.created_menu_id: result = self.create_menu( cr, uid, { 'name': action_report.name, 'linked_menu_id': action_report.linked_menu_id.id, 'report_name': action_report.report_name, 'groups_id': groups_id, }, context=context) else: action = action_report.created_menu_id.action if action and action._model._name == 'ir.actions.act_window': existing_context = safe_eval( self.pool.get('ir.actions.act_window').browse( cr, uid, action.id, context=context).context) new_context = existing_context if type( existing_context) == dict else {} new_context[ 'service_name'] = action_report.report_name or '' self.pool.get('ir.actions.act_window').write( cr, uid, [action.id], { 'name': action_report.name or 'Pentaho Report', 'context': str(new_context), }, context=context) self.pool.get('ir.ui.menu').write( cr, uid, [action_report.created_menu_id.id], { 'name': action_report.name or 'Pentaho Report', 'parent_id': action_report.linked_menu_id.id, 'groups_id': groups_id, }, context=context) result = action_report.created_menu_id.id else: result = 0 return result
def parse(self, cr, uid, o, text, context=None): if not text: return '' vals = text.split('[_') description = '' for val in vals: if '_]' in val: sub_val = val.split('_]') try: description += (safe_eval(sub_val[0], {'o' :o, 'context':context}) or '' ) + sub_val[1] except: LOGGER.notifyChannel('product_variant_multi', netsvc.LOG_ERROR, "%s can't eval. Description is blank" % (sub_val[0])) description += '' else: description += val return description
def _action_calc_formula(self, cr, uid, ids, field_names, args, context): result = {}.fromkeys(ids, 0) for test in self.browse(cr, uid, ids, context): vals = {} for line in test.test_line_ids: if line.name and line.proof_type == 'quantitative': vals[line.name] = line.actual_value_qt if not test.formula: result[test.id] = 0 continue try: value = safe_eval(test.formula, vals) result[test.id] = value except NameError: pass #raise osv.except_osv( _('Error:'), msg ) return result
def compute_product_dimension_extra_price(self, cr, uid, product_id, product_price_extra=False, dim_price_margin=False, dim_price_extra=False, context=None): if context is None: context = {} dimension_extra = 0.0 product = self.browse(cr, uid, product_id, context=context) for dim in product.dimension_value_ids: if product_price_extra and dim_price_margin and dim_price_extra: dimension_extra += safe_eval( 'product.' + product_price_extra, {'product': product}) * safe_eval( 'dim.' + dim_price_margin, {'dim': dim}) + safe_eval( 'dim.' + dim_price_extra, {'dim': dim}) elif not product_price_extra and not dim_price_margin and dim_price_extra: dimension_extra += safe_eval('dim.' + dim_price_extra, {'dim': dim}) elif product_price_extra and dim_price_margin and not dim_price_extra: dimension_extra += safe_eval('product.' + product_price_extra, {'product': product}) * safe_eval( 'dim.' + dim_price_margin, {'dim': dim}) elif product_price_extra and not dim_price_margin and dim_price_extra: dimension_extra += safe_eval('product.' + product_price_extra, {'product': product}) + safe_eval( 'dim.' + dim_price_extra, {'dim': dim}) if 'uom' in context: product_uom_obj = self.pool.get('product.uom') uom = product.uos_id or product.uom_id dimension_extra = product_uom_obj._compute_price( cr, uid, uom.id, dimension_extra, context['uom']) return dimension_extra
def get_price_from_picking(self, cr, uid, id, total, weight, volume, context={}): grid = self.browse(cr, uid, id, context) price = 0.0 ok = False for line in grid.line_ids: price_dict = {'price': total, 'volume':volume, 'weight': weight, 'wv':volume*weight} test = safe_eval(line.type+line.operator+str(line.max_value), price_dict) if test: if line.price_type=='variable': price = line.list_price * price_dict[line.variable_factor] else: price = line.list_price ok = True break if not ok: raise osv.except_osv(_('No price available !'), _('No line matched this order in the choosed delivery grids !')) return price
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False): if context is None: context = {} # if the user belongs to a portal, we have to rewrite any search on the # top menus to be under the portal's parent menu if not context.get('ir.ui.menu.full_list') and uid != 1 and \ args == [('parent_id', '=', False)]: portal_obj = self.pool.get('res.portal') portal_ids = portal_obj.search(cr, uid, [('users', 'in', uid)]) if portal_ids: if len(portal_ids) > 1: log = logging.getLogger('ir.ui.menu') log.warning('User %s belongs to several portals', str(uid)) p = portal_obj.browse(cr, uid, portal_ids[0]) # if the portal overrides the menu, use its domain if p.menu_action_id: args = safe_eval(p.menu_action_id.domain) return super(portal_menu, self).search(cr, uid, args, offset=offset, limit=limit, order=order, context=context, count=count)
def parse(self, cr, uid, o, text, context=None): if not text: return '' vals = text.split('[_') description = '' for val in vals: if '_]' in val: sub_val = val.split('_]') try: description += (safe_eval(sub_val[0], { 'o': o, 'context': context }) or '') + sub_val[1] except: LOGGER.notifyChannel( 'product_variant_multi', netsvc.LOG_ERROR, "%s can't eval. Description is blank" % (sub_val[0])) description += '' else: description += val return description
def get_sys_logs(cr, uid): """ Utility method to send a publisher warranty get logs messages. """ pool = pooler.get_pool(cr.dbname) dbuuid = pool.get('ir.config_parameter').get_param(cr, uid, 'database.uuid') db_create_date = pool.get('ir.config_parameter').get_param( cr, uid, 'database.create_date') nbr_users = pool.get("res.users").search(cr, uid, [], count=True) contractosv = pool.get('publisher_warranty.contract') contracts = contractosv.browse(cr, uid, contractosv.search(cr, uid, [])) user = pool.get("res.users").browse(cr, uid, uid) msg = { "dbuuid": dbuuid, "nbr_users": nbr_users, "dbname": cr.dbname, "db_create_date": db_create_date, "version": release.version, "contracts": [c.name for c in contracts], "language": user.context_lang, } add_arg = {"timeout": 30} if sys.version_info >= (2, 6) else {} arguments = { 'arg0': msg, "action": "update", } arguments_raw = urllib.urlencode(arguments) url = config.get("publisher_warranty_url") uo = urllib2.urlopen(url, arguments_raw, **add_arg) try: submit_result = uo.read() finally: uo.close() result = safe_eval(submit_result) if submit_result else {} return result
def _create_indirect_sharing_rules(self, cr, current_user, wizard_data, group_id, fields_relations, context=None): rule_name = _('Indirect sharing filter created by user %s (%s) for group %s') % \ (current_user.name, current_user.login, group_id) try: domain = safe_eval(wizard_data.domain) if domain: for rel_field, model in fields_relations: related_domain = [] if not rel_field: continue for element in domain: if expression.is_leaf(element): left, operator, right = element left = '%s.%s'%(rel_field, left) element = left, operator, right related_domain.append(element) self._create_or_combine_sharing_rule(cr, current_user, wizard_data, group_id, model_id=model.id, domain=str(related_domain), rule_name=rule_name, restrict=True, context=context) except Exception: self._logger.exception('Failed to create share access') raise osv.except_osv(_('Sharing access could not be created'), _('Sorry, the current screen and filter you are trying to share are not supported at the moment.\nYou may want to try a simpler filter.'))
def _create_indirect_sharing_rules(self, cr, uid, wizard_data, group_id, fields_relations, context=None): user_obj = self.pool.get('res.users') current_user = user_obj.browse(cr, uid, uid, context=context) rule_obj = self.pool.get('ir.rule') try: domain = safe_eval(wizard_data.domain) if domain: domain_expr = expression(domain) for rel_field, model in fields_relations: related_domain = [] for element in domain: if domain_expr._is_leaf(element): left, operator, right = element left = '%s.%s' % (rel_field, left) element = left, operator, right related_domain.append(element) rule_obj.create(cr, 1, { 'name': _('Indirect sharing filter created by user %s (%s) for group %s') % \ (current_user.name, current_user.login, group_id), 'model_id': model.id, 'domain_force': str(related_domain), 'groups': [(4,group_id)] }) self.__logger.debug( "Created indirect rule on model %s with domain: %s", model.model, repr(related_domain)) except Exception: self.__logger.exception('Failed to create share access') raise osv.except_osv( _('Sharing access could not be setup'), _('Sorry, the current screen and filter you are trying to share are not supported at the moment.\nYou may want to try a simpler filter.' ))
def extractProperties(self): # The function will read all relevant information from the jrxml file doc = etree.parse(self._reportPath) # Define namespaces ns = "http://jasperreports.sourceforge.net/jasperreports" nss = {"jr": ns} # Language # Note that if either queryString or language do not exist the default (from the constructor) # is XPath. langTags = doc.xpath("/jr:jasperReport/jr:queryString", namespaces=nss) if langTags: if langTags[0].get("language"): self._language = langTags[0].get("language").lower() # Relations relationTags = doc.xpath('/jr:jasperReport/jr:property[@name="OPENERP_RELATIONS"]', namespaces=nss) if relationTags and "value" in relationTags[0].keys(): relation = relationTags[0].get("value").strip() if relation.startswith("["): self._relations = safe_eval(relationTags[0].get("value"), {}) else: self._relations = [x.strip() for x in relation.split(",")] self._relations = [self._pathPrefix + x for x in self._relations] if not self._relations and self._pathPrefix: self._relations = [self._pathPrefix[:-1]] # Repeat field copiesFieldTags = doc.xpath('/jr:jasperReport/jr:property[@name="OPENERP_COPIES_FIELD"]', namespaces=nss) if copiesFieldTags and "value" in copiesFieldTags[0].keys(): self._copiesField = self._pathPrefix + copiesFieldTags[0].get("value") # Repeat copiesTags = doc.xpath('/jr:jasperReport/jr:property[@name="OPENERP_COPIES"]', namespaces=nss) if copiesTags and "value" in copiesTags[0].keys(): self._copies = int(copiesTags[0].get("value")) self._isHeader = False headerTags = doc.xpath('/jr:jasperReport/jr:property[@name="OPENERP_HEADER"]', namespaces=nss) if headerTags and "value" in headerTags[0].keys(): self._isHeader = True fieldTags = doc.xpath("/jr:jasperReport/jr:field", namespaces=nss) self._fields, self._fieldNames = self.extractFields(fieldTags, ns) # Subreports # Here we expect the following structure in the .jrxml file: # <subreport> # <dataSourceExpression><![CDATA[$P{REPORT_DATA_SOURCE}]]></dataSourceExpression> # <subreportExpression class="java.lang.String"><![CDATA[$P{STANDARD_DIR} + "report_header.jasper"]]></subreportExpression> # </subreport> subreportTags = doc.xpath("//jr:subreport", namespaces=nss) for tag in subreportTags: dataSourceExpression = tag.findtext("{%s}dataSourceExpression" % ns, "") if not dataSourceExpression: continue dataSourceExpression = dataSourceExpression.strip() m = dataSourceExpressionRegExp.match(dataSourceExpression) if not m: continue dataSourceExpression = m.group(1) if dataSourceExpression == "REPORT_DATA_SOURCE": continue subreportExpression = tag.findtext("{%s}subreportExpression" % ns, "") if not subreportExpression: continue subreportExpression = subreportExpression.strip() subreportExpression = subreportExpression.replace("$P{STANDARD_DIR}", '"%s"' % self.standardDirectory()) subreportExpression = subreportExpression.replace("$P{SUBREPORT_DIR}", '"%s"' % self.subreportDirectory()) try: subreportExpression = safe_eval(subreportExpression, {}) except: print "COULD NOT EVALUATE EXPRESSION: '%s'" % subreportExpression # If we're not able to evaluate the expression go to next subreport continue if subreportExpression.endswith(".jasper"): subreportExpression = subreportExpression[:-6] + "jrxml" # Model model = "" modelTags = tag.xpath('//jr:reportElement/jr:property[@name="OPENERP_MODEL"]', namespaces=nss) if modelTags and "value" in modelTags[0].keys(): model = modelTags[0].get("value") pathPrefix = "" pathPrefixTags = tag.xpath('//jr:reportElement/jr:property[@name="OPENERP_PATH_PREFIX"]', namespaces=nss) if pathPrefixTags and "value" in pathPrefixTags[0].keys(): pathPrefix = pathPrefixTags[0].get("value") isHeader = False headerTags = tag.xpath('//jr:reportElement/jr:property[@name="OPENERP_HEADER"]', namespaces=nss) if headerTags and "value" in headerTags[0].keys(): isHeader = True # Add our own pathPrefix to subreport's pathPrefix subPrefix = [] if self._pathPrefix: subPrefix.append(self._pathPrefix) if pathPrefix: subPrefix.append(pathPrefix) subPrefix = "/".join(subPrefix) subreport = JasperReport(subreportExpression, subPrefix) self._subreports.append( { "parameter": dataSourceExpression, "filename": subreportExpression, "model": model, "pathPrefix": pathPrefix, "report": subreport, "depth": 1, } ) for subsubInfo in subreport.subreports(): subsubInfo["depth"] += 1 # Note hat 'parameter' (the one used to pass report's DataSource) must be # the same in all reports self._subreports.append(subsubInfo) # Dataset # Here we expect the following structure in the .jrxml file: # <datasetRun> # <dataSourceExpression><![CDATA[$P{REPORT_DATA_SOURCE}]]></dataSourceExpression> # </datasetRun> datasetTags = doc.xpath("//jr:datasetRun", namespaces=nss) for tag in datasetTags: dataSourceExpression = tag.findtext("{%s}dataSourceExpression" % ns, "") if not dataSourceExpression: continue dataSourceExpression = dataSourceExpression.strip() m = dataSourceExpressionRegExp.match(dataSourceExpression) if not m: continue dataSourceExpression = m.group(1) if dataSourceExpression == "REPORT_DATA_SOURCE": continue subDatasetName = tag.get("subDataset") if not subDatasetName: continue # Relations relations = [] relationTags = tag.xpath('../../jr:reportElement/jr:property[@name="OPENERP_RELATIONS"]', namespaces=nss) if relationTags and "value" in relationTags[0].keys(): relation = relationTags[0].get("value").strip() if relation.startswith("["): relations = safe_eval(relationTags[0].get("value"), {}) else: relations = [x.strip() for x in relation.split(",")] relations = [self._pathPrefix + x for x in relations] if not relations and self._pathPrefix: relations = [self._pathPrefix[:-1]] # Repeat field copiesField = None copiesFieldTags = tag.xpath( '../../jr:reportElement/jr:property[@name="OPENERP_COPIES_FIELD"]', namespaces=nss ) if copiesFieldTags and "value" in copiesFieldTags[0].keys(): copiesField = self._pathPrefix + copiesFieldTags[0].get("value") # Repeat copies = None copiesTags = tag.xpath('../../jr:reportElement/jr:property[@name="OPENERP_COPIES"]', namespaces=nss) if copiesTags and "value" in copiesTags[0].keys(): copies = int(copiesTags[0].get("value")) # Model model = "" modelTags = tag.xpath('../../jr:reportElement/jr:property[@name="OPENERP_MODEL"]', namespaces=nss) if modelTags and "value" in modelTags[0].keys(): model = modelTags[0].get("value") pathPrefix = "" pathPrefixTags = tag.xpath( '../../jr:reportElement/jr:property[@name="OPENERP_PATH_PREFIX"]', namespaces=nss ) if pathPrefixTags and "value" in pathPrefixTags[0].keys(): pathPrefix = pathPrefixTags[0].get("value") # We need to find the appropriate subDataset definition for this dataset run. subDataset = doc.xpath('//jr:subDataset[@name="%s"]' % subDatasetName, namespaces=nss)[0] fieldTags = subDataset.xpath("jr:field", namespaces=nss) fields, fieldNames = self.extractFields(fieldTags, ns) dataset = JasperReport() dataset._fields = fields dataset._fieldNames = fieldNames dataset._relations = relations dataset._copiesField = copiesField dataset._copies = copies self._subreports.append( { "parameter": dataSourceExpression, "model": model, "pathPrefix": pathPrefix, "report": dataset, "filename": "DATASET", } )
def extractProperties(self): # The function will read all relevant information from the jrxml file doc = etree.parse( self._reportPath ) # Define namespaces ns = 'http://jasperreports.sourceforge.net/jasperreports' nss = {'jr': ns} # Language # Note that if either queryString or language do not exist the default (from the constructor) # is XPath. langTags = doc.xpath( '/jr:jasperReport/jr:queryString', namespaces=nss ) if langTags: if langTags[0].get('language'): self._language = langTags[0].get('language').lower() # Relations relationTags = doc.xpath( '/jr:jasperReport/jr:property[@name="OPENERP_RELATIONS"]', namespaces=nss ) if relationTags and 'value' in relationTags[0].keys(): relation = relationTags[0].get('value').strip() if relation.startswith('['): self._relations = safe_eval( relationTags[0].get('value'), {} ) else: self._relations = [x.strip() for x in relation.split(',')] self._relations = [self._pathPrefix + x for x in self._relations] if not self._relations and self._pathPrefix: self._relations = [self._pathPrefix[:-1]] # Repeat field copiesFieldTags = doc.xpath( '/jr:jasperReport/jr:property[@name="OPENERP_COPIES_FIELD"]', namespaces=nss ) if copiesFieldTags and 'value' in copiesFieldTags[0].keys(): self._copiesField = self._pathPrefix + copiesFieldTags[0].get('value') # Repeat copiesTags = doc.xpath( '/jr:jasperReport/jr:property[@name="OPENERP_COPIES"]', namespaces=nss ) if copiesTags and 'value' in copiesTags[0].keys(): self._copies = int(copiesTags[0].get('value')) self._isHeader = False headerTags = doc.xpath( '/jr:jasperReport/jr:property[@name="OPENERP_HEADER"]', namespaces=nss ) if headerTags and 'value' in headerTags[0].keys(): self._isHeader = True fieldTags = doc.xpath( '/jr:jasperReport/jr:field', namespaces=nss ) self._fields, self._fieldNames = self.extractFields( fieldTags, ns ) # Subreports # Here we expect the following structure in the .jrxml file: #<subreport> # <dataSourceExpression><![CDATA[$P{REPORT_DATA_SOURCE}]]></dataSourceExpression> # <subreportExpression class="java.lang.String"><![CDATA[$P{STANDARD_DIR} + "report_header.jasper"]]></subreportExpression> #</subreport> subreportTags = doc.xpath( '//jr:subreport', namespaces=nss ) for tag in subreportTags: dataSourceExpression = tag.findtext('{%s}dataSourceExpression' % ns, '') if not dataSourceExpression: continue dataSourceExpression = dataSourceExpression.strip() m = dataSourceExpressionRegExp.match( dataSourceExpression ) if not m: continue dataSourceExpression = m.group(1) if dataSourceExpression == 'REPORT_DATA_SOURCE': continue subreportExpression = tag.findtext('{%s}subreportExpression' % ns, '') if not subreportExpression: continue subreportExpression = subreportExpression.strip() subreportExpression = subreportExpression.replace('$P{STANDARD_DIR}', '"%s"' % self.standardDirectory() ) subreportExpression = subreportExpression.replace('$P{SUBREPORT_DIR}', '"%s"' % self.subreportDirectory() ) try: subreportExpression = safe_eval( subreportExpression, {} ) except: print "COULD NOT EVALUATE EXPRESSION: '%s'" % subreportExpression # If we're not able to evaluate the expression go to next subreport continue if subreportExpression.endswith('.jasper'): subreportExpression = subreportExpression[:-6] + 'jrxml' # Model model = '' modelTags = tag.xpath( '//jr:reportElement/jr:property[@name="OPENERP_MODEL"]', namespaces=nss ) if modelTags and 'value' in modelTags[0].keys(): model = modelTags[0].get('value') pathPrefix = '' pathPrefixTags = tag.xpath( '//jr:reportElement/jr:property[@name="OPENERP_PATH_PREFIX"]', namespaces=nss ) if pathPrefixTags and 'value' in pathPrefixTags[0].keys(): pathPrefix = pathPrefixTags[0].get('value') isHeader = False headerTags = tag.xpath( '//jr:reportElement/jr:property[@name="OPENERP_HEADER"]', namespaces=nss ) if headerTags and 'value' in headerTags[0].keys(): isHeader = True # Add our own pathPrefix to subreport's pathPrefix subPrefix = [] if self._pathPrefix: subPrefix.append( self._pathPrefix ) if pathPrefix: subPrefix.append( pathPrefix ) subPrefix = '/'.join( subPrefix ) subreport = JasperReport( subreportExpression, subPrefix ) self._subreports.append({ 'parameter': dataSourceExpression, 'filename': subreportExpression, 'model': model, 'pathPrefix': pathPrefix, 'report': subreport, 'depth': 1, }) for subsubInfo in subreport.subreports(): subsubInfo['depth'] += 1 # Note hat 'parameter' (the one used to pass report's DataSource) must be # the same in all reports self._subreports.append( subsubInfo ) # Dataset # Here we expect the following structure in the .jrxml file: #<datasetRun> # <dataSourceExpression><![CDATA[$P{REPORT_DATA_SOURCE}]]></dataSourceExpression> #</datasetRun> datasetTags = doc.xpath( '//jr:datasetRun', namespaces=nss ) for tag in datasetTags: dataSourceExpression = tag.findtext('{%s}dataSourceExpression' % ns, '') if not dataSourceExpression: continue dataSourceExpression = dataSourceExpression.strip() m = dataSourceExpressionRegExp.match( dataSourceExpression ) if not m: continue dataSourceExpression = m.group(1) if dataSourceExpression == 'REPORT_DATA_SOURCE': continue subDatasetName = tag.get('subDataset') if not subDatasetName: continue # Relations relations = [] relationTags = tag.xpath( '../../jr:reportElement/jr:property[@name="OPENERP_RELATIONS"]', namespaces=nss ) if relationTags and 'value' in relationTags[0].keys(): relation = relationTags[0].get('value').strip() if relation.startswith('['): relations = safe_eval( relationTags[0].get('value'), {} ) else: relations = [x.strip() for x in relation.split(',')] relations = [self._pathPrefix + x for x in relations] if not relations and self._pathPrefix: relations = [self._pathPrefix[:-1]] # Repeat field copiesField = None copiesFieldTags = tag.xpath( '../../jr:reportElement/jr:property[@name="OPENERP_COPIES_FIELD"]', namespaces=nss ) if copiesFieldTags and 'value' in copiesFieldTags[0].keys(): copiesField = self._pathPrefix + copiesFieldTags[0].get('value') # Repeat copies = None copiesTags = tag.xpath( '../../jr:reportElement/jr:property[@name="OPENERP_COPIES"]', namespaces=nss ) if copiesTags and 'value' in copiesTags[0].keys(): copies = int(copiesTags[0].get('value')) # Model model = '' modelTags = tag.xpath( '../../jr:reportElement/jr:property[@name="OPENERP_MODEL"]', namespaces=nss ) if modelTags and 'value' in modelTags[0].keys(): model = modelTags[0].get('value') pathPrefix = '' pathPrefixTags = tag.xpath( '../../jr:reportElement/jr:property[@name="OPENERP_PATH_PREFIX"]', namespaces=nss ) if pathPrefixTags and 'value' in pathPrefixTags[0].keys(): pathPrefix = pathPrefixTags[0].get('value') # We need to find the appropriate subDataset definition for this dataset run. subDataset = doc.xpath( '//jr:subDataset[@name="%s"]' % subDatasetName, namespaces=nss )[0] fieldTags = subDataset.xpath( 'jr:field', namespaces=nss ) fields, fieldNames = self.extractFields( fieldTags, ns ) dataset = JasperReport() dataset._fields = fields dataset._fieldNames = fieldNames dataset._relations = relations dataset._copiesField = copiesField dataset._copies = copies self._subreports.append({ 'parameter': dataSourceExpression, 'model': model, 'pathPrefix': pathPrefix, 'report': dataset, 'filename': 'DATASET', })
def get_id(self, cr, uid, sequence_id, test='id', context=None): if not context: context = {} log = logging.getLogger('orm') try: sql_test = self._get_test(test, context) cr.execute( """SELECT id, number_next, prefix, suffix, padding, condition FROM ir_sequence WHERE """ + sql_test + """ AND active=%s AND ( company_id IS NULL OR company_id IN ( SELECT company_id FROM res_users WHERE id = %s ) OR company_id IN ( SELECT cid FROM res_company_users_rel WHERE user_id = %s )) ORDER BY company_id, weight DESC, length(COALESCE(condition,'')) DESC FOR UPDATE """, (sequence_id, True, uid, uid), debug=self._debug) for res in cr.dictfetchall(): if res['condition']: if self._debug: log.debug("ir_seq: %s has condition: %s" % (res['id'], res['condition'])) try: ctx = context.copy() ctx['this'] = attrob(res) bo = safe_eval(res['condition'], ctx) if not bo: if self._debug: log.debug('ir_seq: %d not matched' % res['id']) continue except Exception, e: # it would be normal to have exceptions, because # the domain may contain errors if self._debug: log.debug('ir_seq[%d]: Exception %s with context %s' % \ (res['id'], e, context), exc_info=True) continue if self._debug: log.debug('ir_seq: %d matched' % res['id']) cr.execute( 'UPDATE ir_sequence ' 'SET number_next=number_next+number_increment ' 'WHERE id=%s AND active=%s', (res['id'], True), debug=self._debug) if res['number_next']: return self._process(res['prefix']) + '%%0%sd' % res[ 'padding'] % res['number_next'] + self._process( res['suffix']) else: return self._process(res['prefix']) + self._process( res['suffix']) # end for finally: # cr.commit() pass return False
def do_check(self, cr, uid, action, obj, context={}): ok = super(base_action_rule, self)\ .do_check(cr, uid, action, obj, context=context) if ok and action.trg_evalexpr: ok = False if context.get('_action_trigger'): is_ins = context.get('_action_trigger') == 'create' #If no changed values, exit with False #Abort if called from crm_case._action, to duplicate triggering if not context.get('_action_new') or context.get('state_to'): return False #old dict: holds original values for all columns #(before the write/update) old = {} for x in context.get('_action_old'): if x.get('id') == obj.id: # Old is a list of records old = x break #Normalize tuples (id, name) on "old" into id only form for x in old: if isinstance(old[x], tuple): old[x] = old[x][0] # changed dict: holds only the changed values; # available only on write/update changed = {} if not is_ins: for k, v in context.get('_action_new').items(): #rint '\t', k, ':', v, '<=', old.get(k) if old.get(k) != v: changed.update({k: v}) # new dict: result of applying changes to old # (includes non changed values) new = dict(old) # copy dict content, not dict pointer new.update(changed) #Evaluate trigger expression eval_dict = dict(DEFAULT_EVALDICT) eval_dict.update({ 'obj': obj, # allows object.notation 'old': old, 'changed': changed, 'new': new, 'inserting': is_ins, 'creating': is_ins, 'updating': not is_ins, 'writing': not is_ins, }) if action.trg_evalexpr_dbg: _logger.setLevel(logging.DEBUG) _logger.debug('Rule CHECK: %s on record id %d.' % (action.name, obj.id)) _logger.debug('CHG: %s' % str(changed)) _logger.debug('NEW: %s' % str(new)) _logger.debug('OLD: %s' % str(old)) #try: ...removed; leaving eval errors unhandled... ok = safe_eval(action.trg_evalexpr.replace('\n', ' '), {}, eval_dict) if ok: _logger.debug('RULE ACTIVATED: %s on record id %d.' % (action.name, obj.id)) else: if action.trg_evalexpr_dbg: _logger.debug('Rule not activated: %s on record id %d.' % (action.name, obj.id)) return ok
def extractProperties(self): # The function will read all relevant information from the jrxml file doc = etree.parse( self._reportPath ) # Define namespaces ns = 'http://jasperreports.sourceforge.net/jasperreports' nss = {'jr': ns} # Language # Note that if either queryString or language do not exist the default (from the constructor) # is XPath. langTags = doc.xpath( '/jr:jasperReport/jr:queryString', namespaces=nss ) if langTags: if langTags[0].get('language'): self._language = langTags[0].get('language').lower() # Relations relationTags = doc.xpath( '/jr:jasperReport/jr:property[@name="OPENERP_RELATIONS"]', namespaces=nss ) if relationTags and 'value' in relationTags[0].keys(): relation = relationTags[0].get('value').strip() if relation.startswith('['): self._relations = safe_eval( relationTags[0].get('value'), {} ) else: self._relations = [x.strip() for x in relation.split(',')] self._relations = [self._pathPrefix + x for x in self._relations] if not self._relations and self._pathPrefix: self._relations = [self._pathPrefix[:-1]] # Repeat field copiesFieldTags = doc.xpath( '/jr:jasperReport/jr:property[@name="OPENERP_COPIES_FIELD"]', namespaces=nss ) if copiesFieldTags and 'value' in copiesFieldTags[0].keys(): self._copiesField = self._pathPrefix + copiesFieldTags[0].get('value') fieldTags = doc.xpath( '/jr:jasperReport/jr:field', namespaces=nss ) self._fields, self._fieldNames = self.extractFields( fieldTags, ns ) # Subreports # Here we expect the following structure in the .jrxml file: #<subreport> # <dataSourceExpression><![CDATA[$P{REPORT_DATA_SOURCE}]]></dataSourceExpression> # <subreportExpression class="java.lang.String"><![CDATA[$P{STANDARD_DIR} + "report_header.jasper"]]></subreportExpression> #</subreport> subreportTags = doc.xpath( '//jr:subreport', namespaces=nss ) for tag in subreportTags: dataSourceExpression = tag.findtext('{%s}dataSourceExpression' % ns, '') if not dataSourceExpression: continue dataSourceExpression = dataSourceExpression.strip() m = dataSourceExpressionRegExp.match( dataSourceExpression ) if not m: continue dataSourceExpression = m.group(1) if dataSourceExpression == 'REPORT_DATA_SOURCE': continue subreportExpression = tag.findtext('{%s}subreportExpression' % ns, '') if not subreportExpression: continue subreportExpression = subreportExpression.strip() subreportExpression = subreportExpression.replace('$P{STANDARD_DIR}', '"%s"' % self.standardDirectory() ) subreportExpression = subreportExpression.replace('$P{SUBREPORT_DIR}', '"%s"' % self.subreportDirectory() ) try: subreportExpression = safe_eval( subreportExpression, {} ) except: print "COULD NOT EVALUATE EXPRESSION: '%s'" % subreportExpression # If we're not able to evaluate the expression go to next subreport continue if subreportExpression.endswith('.jasper'): subreportExpression = subreportExpression[:-6] + 'jrxml' # Model model = '' modelTags = tag.xpath( '//jr:reportElement/jr:property[@name="OPENERP_MODEL"]', namespaces=nss ) if modelTags and 'value' in modelTags[0].keys(): model = modelTags[0].get('value') pathPrefix = '' pathPrefixTags = tag.xpath( '//jr:reportElement/jr:property[@name="OPENERP_PATH_PREFIX"]', namespaces=nss ) if pathPrefixTags and 'value' in pathPrefixTags[0].keys(): pathPrefix = pathPrefixTags[0].get('value') # Add our own pathPrefix to subreport's pathPrefix subPrefix = [] if self._pathPrefix: subPrefix.append( self._pathPrefix ) if pathPrefix: subPrefix.append( pathPrefix ) subPrefix = '/'.join( subPrefix ) subreport = JasperReport( subreportExpression, subPrefix ) self._subreports.append({ 'parameter': dataSourceExpression, 'filename': subreportExpression, 'model': model, 'pathPrefix': pathPrefix, 'report': subreport, 'depth': 1, }) for subsubInfo in subreport.subreports(): subsubInfo['depth'] += 1 # Note hat 'parameter' (the one used to pass report's DataSource) must be # the same in all reports self._subreports.append( subsubInfo ) # Dataset # Here we expect the following structure in the .jrxml file: #<datasetRun> # <dataSourceExpression><![CDATA[$P{REPORT_DATA_SOURCE}]]></dataSourceExpression> #</datasetRun> datasetTags = doc.xpath( '//jr:datasetRun', namespaces=nss ) for tag in datasetTags: dataSourceExpression = tag.findtext('{%s}dataSourceExpression' % ns, '') if not dataSourceExpression: continue dataSourceExpression = dataSourceExpression.strip() m = dataSourceExpressionRegExp.match( dataSourceExpression ) if not m: continue dataSourceExpression = m.group(1) if dataSourceExpression == 'REPORT_DATA_SOURCE': continue subDatasetName = tag.get('subDataset') if not subDatasetName: continue # Relations relations = [] relationTags = tag.xpath( '../../jr:reportElement/jr:property[@name="OPENERP_RELATIONS"]', namespaces=nss ) if relationTags and 'value' in relationTags[0].keys(): relation = relationTags[0].get('value').strip() if relation.startswith('['): relations = safe_eval( relationTags[0].get('value'), {} ) else: relations = [x.strip() for x in relation.split(',')] relations = [self._pathPrefix + x for x in relations] if not relations and self._pathPrefix: relations = [self._pathPrefix[:-1]] # Repeat field copiesField = None copiesFieldTags = tag.xpath( '../../jr:reportElement/jr:property[@name="OPENERP_COPIES_FIELD"]', namespaces=nss ) if copiesFieldTags and 'value' in copiesFieldTags[0].keys(): copiesField = self._pathPrefix + copiesFieldTags[0].get('value') # Model model = '' modelTags = tag.xpath( '../../jr:reportElement/jr:property[@name="OPENERP_MODEL"]', namespaces=nss ) if modelTags and 'value' in modelTags[0].keys(): model = modelTags[0].get('value') pathPrefix = '' pathPrefixTags = tag.xpath( '../../jr:reportElement/jr:property[@name="OPENERP_PATH_PREFIX"]', namespaces=nss ) if pathPrefixTags and 'value' in pathPrefixTags[0].keys(): pathPrefix = pathPrefixTags[0].get('value') # We need to find the appropriate subDataset definition for this dataset run. subDataset = doc.xpath( '//jr:subDataset[@name="%s"]' % subDatasetName, namespaces=nss )[0] fieldTags = subDataset.xpath( 'jr:field', namespaces=nss ) fields, fieldNames = self.extractFields( fieldTags, ns ) dataset = JasperReport() dataset._fields = fields dataset._fieldNames = fieldNames dataset._relations = relations dataset._copiesField = copiesField self._subreports.append({ 'parameter': dataSourceExpression, 'model': model, 'pathPrefix': pathPrefix, 'report': dataset, 'filename': 'DATASET', })
def fields_view_get(self, cr, user, view_id=None, view_type='form', context=None, toolbar=False): if (not context) or 'report_id' not in context: return super(report_creator, self).fields_view_get(cr, user, view_id, view_type, context, toolbar) report = self.browse(cr, user, context['report_id']) models = {} for model in report.model_ids: models[model.model] = self.pool.get(model.model).fields_get(cr, user, context=context) fields = {} i = 0 for f in report.field_ids: if f.field_id.model: fields['field'+str(i)] = models[f.field_id.model][f.field_id.name] i+=1 else: fields['column_count'] = {'readonly': True, 'type': 'integer', 'string': 'Count', 'size': 64, 'name': 'column_count'} arch = '<?xml version="1.0" encoding="utf-8"?>\n' if view_type=='graph': orientation_eval = {'horz':'horizontal','vert' :'vertical'} orientation = safe_eval(report.view_graph_orientation,orientation_eval) arch +='<graph string="%s" type="%s" orientation="%s">' % (report.name, report.view_graph_type, orientation) i = 0 for val in ('x','y'): for f in report.field_ids: if f.graph_mode==val: if f.field_id.model: arch += '<field name="%s" select="1"/>' % ('field'+str(i),) i+=1 else: arch += '<field name="%s" select="1"/>' % ('column_count',) elif view_type=='calendar': required_types = ['date_start','date_delay','color'] set_dict = {'view_type':view_type,'string':report.name} temp_list = [] i=0 for f in report.field_ids: if f.calendar_mode and f.calendar_mode in required_types: if f.field_id.model: field_cal = 'field'+str(i) i+=1 else: field_cal = 'column_count' set_dict[f.calendar_mode] = field_cal del required_types[required_types.index(f.calendar_mode)] else: if f.field_id.model: temp_list.append('''<field name="%(name)s" select="1"/>''' % {'name':'field'+str(i)}) i+=1 else: temp_list.append('''<field name="%(name)s" select="1"/>''' % {'name':'column_count'}) arch += '''<%(view_type)s string="%(string)s" date_start="%(date_start)s" ''' %set_dict if set_dict.get('date_delay',False): arch +=''' date_delay="%(date_delay)s" '''%set_dict if set_dict.get('date_stop',False): arch +=''' date_stop="%(date_stop)s" '''%set_dict if set_dict.get('color',False): arch +=''' color="%(color)s"'''%set_dict arch += '''>''' arch += ''.join(temp_list) else: arch += '<%s string="%s">\n' % (view_type, report.name) i = 0 for f in report.field_ids: if f.field_id.model: arch += '<field name="%s" select="1"/>' % ('field'+str(i),) i+=1 else: arch += '<field name="%s" select="1"/>' % ('column_count',) arch += '</%s>' % (view_type,) result = { 'arch': arch, 'fields': fields } result['toolbar'] = { 'print': [], 'action': [], 'relate': [] } return result
def do_check(self, cr, uid, action, obj, context={}): ok = super(base_action_rule, self)\ .do_check(cr, uid, action, obj, context=context) if ok and action.trg_evalexpr: ok = False if context.get('_action_trigger'): is_ins = context.get('_action_trigger') == 'create' #If no changed values, exit with False #Abort if called from crm_case._action, to duplicate triggering if not context.get('_action_new') or context.get('state_to'): return False #old dict: holds original values for all columns #(before the write/update) old = {} for x in context.get('_action_old'): if x.get('id') == obj.id: # Old is a list of records old = x break #Normalize tuples (id, name) on "old" into id only form for x in old: if isinstance(old[x], tuple): old[x] = old[x][0] # changed dict: holds only the changed values; # available only on write/update changed = {} if not is_ins: for k, v in context.get('_action_new').items(): #rint '\t', k, ':', v, '<=', old.get(k) if old.get(k) != v: changed.update({k: v}) # new dict: result of applying changes to old # (includes non changed values) new = dict(old) # copy dict content, not dict pointer new.update(changed) #Evaluate trigger expression eval_dict = dict(DEFAULT_EVALDICT) eval_dict.update({ 'obj': obj, # allows object.notation 'old': old, 'changed': changed, 'new': new, 'inserting': is_ins, 'creating': is_ins, 'updating': not is_ins, 'writing': not is_ins, }) if action.trg_evalexpr_dbg: _logger.setLevel(logging.DEBUG) _logger.debug('Rule CHECK: %s on record id %d.' % (action.name, obj.id)) _logger.debug('CHG: %s' % str(changed)) _logger.debug('NEW: %s' % str(new)) _logger.debug('OLD: %s' % str(old)) #try: ...removed; leaving eval errors unhandled... ok = safe_eval(action.trg_evalexpr.replace('\n', ' '), {}, eval_dict) if ok: _logger.debug('RULE ACTIVATED: %s on record id %d.' % (action.name, obj.id)) else: if action.trg_evalexpr_dbg: _logger.debug( 'Rule not activated: %s on record id %d.' % (action.name, obj.id)) return ok
def do_action(self, cr, uid, action, model_obj, obj, context=None): """ Do Action @param self: The object pointer @param cr: the current row, from the database cursor, @param uid: the current user’s ID for security checks, @param action: pass action @param model_obj: pass Model object @param context: A standard dictionary for contextual values """ if context is None: context = {} if action.server_action_id: context.update({"active_id": obj.id, "active_ids": [obj.id]}) self.pool.get("ir.actions.server").run(cr, uid, [action.server_action_id.id], context) write = {} if hasattr(obj, "user_id") and action.act_user_id: obj.user_id = action.act_user_id write["user_id"] = action.act_user_id.id if hasattr(obj, "date_action_last"): write["date_action_last"] = time.strftime("%Y-%m-%d %H:%M:%S") if hasattr(obj, "state") and action.act_state: obj.state = action.act_state write["state"] = action.act_state if hasattr(obj, "categ_id") and action.act_categ_id: obj.categ_id = action.act_categ_id write["categ_id"] = action.act_categ_id.id model_obj.write(cr, uid, [obj.id], write, context) if hasattr(model_obj, "remind_user") and action.act_remind_user: model_obj.remind_user(cr, uid, [obj.id], context, attach=action.act_remind_attach) if hasattr(model_obj, "remind_partner") and action.act_remind_partner: model_obj.remind_partner(cr, uid, [obj.id], context, attach=action.act_remind_attach) if action.act_method: getattr(model_obj, "act_method")(cr, uid, [obj.id], action, context) emails = [] if hasattr(obj, "user_id") and action.act_mail_to_user: if obj.user_id: emails.append(obj.user_id.user_email) if action.act_mail_to_watchers: emails += (action.act_email_cc or "").split(",") if action.act_mail_to_email: emails += (action.act_mail_to_email or "").split(",") locals_for_emails = {"user": self.pool.get("res.users").browse(cr, uid, uid, context=context), "obj": obj} if action.act_email_to: emails.append(safe_eval(action.act_email_to, {}, locals_for_emails)) emails = filter(None, emails) if len(emails) and action.act_mail_body: emails = list(set(emails)) email_from = safe_eval(action.act_email_from, {}, locals_for_emails) def to_email(text): return re.findall(r"([^ ,<@]+@[^> ,]+)", text or "") emails = to_email(",".join(filter(None, emails))) email_froms = to_email(email_from) if email_froms: self.email_send(cr, uid, obj, emails, action.act_mail_body, emailfrom=email_froms[0]) return True
def process_read(self, cr, uid, node, context=None): """ @param self: The object pointer @param cr: the current row, from the database cursor, @param uid: the current user’s ID for security checks, @param node: pass the node @param context: A standard dictionary for contextual values """ def ics_datetime(idate, short=False): if short: return datetime.date.fromtimestamp( time.mktime(time.strptime(idate, '%Y-%m-%d'))) else: return datetime.datetime.strptime(idate, '%Y-%m-%d %H:%M:%S') if node.extension != '.ics': return super(document_directory_content, self).process_read(cr, uid, node, context) import vobject ctx = (context or {}) ctx.update(node.context.context.copy()) ctx.update(node.dctx) content = self.browse(cr, uid, node.cnt_id, ctx) if not content.object_id: return super(document_directory_content, self).process_read(cr, uid, node, context) obj_class = self.pool.get(content.object_id.model) if content.ics_domain: domain = safe_eval(content.ics_domain, ctx) else: domain = [] if node.act_id: domain.append(('id', '=', node.act_id)) # print "process read clause:",domain ids = obj_class.search(cr, uid, domain, context=ctx) cal = vobject.iCalendar() for obj in obj_class.browse(cr, uid, ids): event = cal.add('vevent') # Fix dtstamp et last-modified with create and write date on the object line perm = obj_class.perm_read(cr, uid, [obj.id], context) event.add('created').value = ics_datetime( time.strftime('%Y-%m-%d %H:%M:%S')) event.add('dtstamp').value = ics_datetime( perm[0]['create_date'][:19]) if perm[0]['write_date']: event.add('last-modified').value = ics_datetime( perm[0]['write_date'][:19]) for field in content.ics_field_ids: if field.field_id.name: value = getattr(obj, field.field_id.name) else: value = None if (not value) and field.name == 'uid': value = 'OpenERP-%s_%s@%s' % ( content.object_id.model, str(obj.id), cr.dbname, ) # Why? obj_class.write(cr, uid, [obj.id], {field.field_id.name: value}) if ICS_TAGS[field.name] == 'normal': if type(value) == type(obj): value = value.name event.add(field.name).value = tools.ustr(value) or '' elif ICS_TAGS[field.name] == 'date' and value: if field.name == 'dtstart': date_start = start_date = datetime.datetime.fromtimestamp( time.mktime( time.strptime(value, "%Y-%m-%d %H:%M:%S"))) if field.name == 'dtend' and (isinstance(value, float) or field.fn == 'hours'): value = (start_date + datetime.timedelta(hours=value) ).strftime('%Y-%m-%d %H:%M:%S') if len(value) == 10: value = ics_datetime(value, True) else: value = ics_datetime(value) event.add(field.name).value = value s = cal.serialize() return s
def process_write(self, cr, uid, node, data, context=None): """ @param self: The object pointer @param cr: the current row, from the database cursor, @param uid: the current user’s ID for security checks, @param node: pass the node @param data: pass the data @param context: A standard dictionary for contextual values """ if node.extension != '.ics': return super(document_directory_content, self).process_write(cr, uid, node, data, context) import vobject parsedCal = vobject.readOne(data) fields = {} funcs = {} fexprs = {} content = self.browse(cr, uid, node.cnt_id, context) idomain = {} ctx = (context or {}) ctx.update(node.context.context.copy()) ctx.update(node.dctx) if content.ics_domain: for d in safe_eval(content.ics_domain, ctx): # TODO: operator? idomain[d[0]] = d[2] for n in content.ics_field_ids: fields[n.name] = n.field_id.name and str(n.field_id.name) funcs[n.name] = n.fn fexprs[n.name] = n.expr if 'uid' not in fields: # FIXME: should pass return True for child in parsedCal.getChildren(): result = {} uuid = None for event in child.getChildren(): enl = event.name.lower() if enl == 'uid': uuid = event.value if not enl in fields: continue if fields[enl] and funcs[enl] == 'field': if ICS_TAGS[enl] == 'normal': result[fields[enl]] = event.value.encode('utf8') elif ICS_TAGS[enl] == 'date': result[fields[enl]] = event.value.strftime( '%Y-%m-%d %H:%M:%S') elif fields[enl] and funcs[enl] == 'hours': ntag = fexprs[enl] or 'dtstart' ts_start = child.getChildValue(ntag, default=False) if not ts_start: raise Exception( "Cannot parse hours (for %s) without %s" % (enl, ntag)) ts_end = event.value assert isinstance(ts_start, datetime.datetime) assert isinstance(ts_end, datetime.datetime) td = ts_end - ts_start result[fields[enl]] = td.days * 24.0 + (td.seconds / 3600.0) # put other functions here.. else: # print "Unhandled tag in ICS:", enl pass # end for if not uuid: # FIXME: should pass continue cmodel = content.object_id.model wexpr = False if fields['uid']: wexpr = [(fields['uid'], '=', uuid.encode('utf8'))] else: # Parse back the uid from 'OpenERP-%s_%s@%s' wematch = self.__rege.match(uuid.encode('utf8')) # TODO: perhaps also add the domain to wexpr, restrict. if not wematch: raise Exception("Cannot locate UID in %s" % uuid) if wematch.group(3) != cr.dbname: raise Exception("Object is not for our db!") if content.object_id: if wematch.group(1) != cmodel: raise Exception( "ICS must be at the wrong folder, this one is for %s" % cmodel) else: # TODO: perhaps guess the model from the iCal, is it safe? pass wexpr = [('id', '=', wematch.group(2))] fobj = self.pool.get(content.object_id.model) if not wexpr: id = False else: id = fobj.search(cr, uid, wexpr, context=context) if isinstance(id, list): if len(id) > 1: raise Exception("Multiple matches found for ICS") if id: fobj.write(cr, uid, id, result, context=context) else: r = idomain.copy() r.update(result) fobj.create(cr, uid, r, context=context) return True
def send(self, cr, uid, tb, explanations, remarks=None, issue_name=None): """ Method called by the client to send a problem to the publisher warranty server. """ if not remarks: remarks = "" valid_contracts = self._get_valid_contracts(cr, uid) valid_contract = valid_contracts[0] try: origin = 'client' dbuuid = self.pool.get('ir.config_parameter').get_param( cr, uid, 'database.uuid') db_create_date = self.pool.get('ir.config_parameter').get_param( cr, uid, 'database.create_date') user = self.pool.get("res.users").browse(cr, uid, uid) user_name = user.name email = user.email msg = { 'contract_name': valid_contract.name, 'tb': tb, 'explanations': explanations, 'remarks': remarks, 'origin': origin, 'dbname': cr.dbname, 'dbuuid': dbuuid, 'db_create_date': db_create_date, 'issue_name': issue_name, 'email': email, 'user_name': user_name, } add_arg = {"timeout": 30} if sys.version_info >= (2, 6) else {} uo = urllib2.urlopen( config.get("publisher_warranty_url"), urllib.urlencode({ 'arg0': msg, "action": "send", }), **add_arg) try: submit_result = uo.read() finally: uo.close() result = safe_eval(submit_result) crm_case_id = result if not crm_case_id: return False except osv.except_osv: raise except Exception: _logger.warning("Error sending problem report", exc_info=1) raise osv.except_osv( _("Error"), _("Error during communication with the publisher warranty server." )) return True
def do_action(self, cr, uid, action, model_obj, obj, context=None): """ Do Action @param self: The object pointer @param cr: the current row, from the database cursor, @param uid: the current user’s ID for security checks, @param action: pass action @param model_obj: pass Model object @param context: A standard dictionary for contextual values """ if context is None: context = {} if action.server_action_id: context.update({'active_id': obj.id, 'active_ids': [obj.id], 'active_model': obj._name}) self.pool.get('ir.actions.server').run(cr, uid, [action.server_action_id.id], context) write = {} if hasattr(obj, 'user_id') and action.act_user_id: obj.user_id = action.act_user_id write['user_id'] = action.act_user_id.id if hasattr(obj, 'date_action_last'): write['date_action_last'] = time.strftime('%Y-%m-%d %H:%M:%S') if hasattr(obj, 'state') and action.act_state: obj.state = action.act_state write['state'] = action.act_state if hasattr(obj, 'categ_id') and action.act_categ_id: obj.categ_id = action.act_categ_id write['categ_id'] = action.act_categ_id.id model_obj.write(cr, uid, [obj.id], write, context) if hasattr(model_obj, 'remind_user') and action.act_remind_user: model_obj.remind_user(cr, uid, [obj.id], context, attach=action.act_remind_attach) if hasattr(model_obj, 'remind_partner') and action.act_remind_partner: model_obj.remind_partner(cr, uid, [obj.id], context, attach=action.act_remind_attach) if action.act_method: getattr(model_obj, 'act_method')(cr, uid, [obj.id], action, context) emails = [] if hasattr(obj, 'user_id') and action.act_mail_to_user: if obj.user_id: emails.append(obj.user_id.user_email) if action.act_mail_to_watchers: emails += (action.act_email_cc or '').split(',') if action.act_mail_to_email: emails += (action.act_mail_to_email or '').split(',') locals_for_emails = { 'user' : self.pool.get('res.users').browse(cr, uid, uid, context=context), 'obj' : obj, } if action.act_email_to: emails.append(safe_eval(action.act_email_to, {}, locals_for_emails)) emails = filter(None, emails) if len(emails) and action.act_mail_body: emails = list(set(emails)) email_from = safe_eval(action.act_email_from, {}, locals_for_emails) def to_email(text): return re.findall(r'([^ ,<@]+@[^> ,]+)', text or '') emails = to_email(','.join(filter(None, emails))) email_froms = to_email(email_from) if email_froms: self.email_send(cr, uid, obj, emails, action.act_mail_body, emailfrom=email_froms[0]) return True
def _exec_action(action, datas, context): # taken from client/modules/action/main.py:84 _exec_action() if isinstance(action, bool) or 'type' not in action: return # Updating the context : Adding the context of action in order to use it on Views called from buttons if datas.get('id', False): context.update({ 'active_id': datas.get('id', False), 'active_ids': datas.get('ids', []), 'active_model': datas.get('model', False) }) context.update(safe_eval(action.get('context', '{}'), context.copy())) if action['type'] in ['ir.actions.act_window', 'ir.actions.submenu']: for key in ('res_id', 'res_model', 'view_type', 'view_mode', 'limit', 'auto_refresh', 'search_view', 'auto_search', 'search_view_id'): datas[key] = action.get(key, datas.get(key, None)) view_id = False if action.get('views', []): if isinstance(action['views'], list): view_id = action['views'][0][0] datas['view_mode'] = action['views'][0][1] else: if action.get('view_id', False): view_id = action['view_id'][0] elif action.get('view_id', False): view_id = action['view_id'][0] assert datas['res_model'], "Cannot use the view without a model" # Here, we have a view that we need to emulate log_test("will emulate a %s view: %s#%s", action['view_type'], datas['res_model'], view_id or '?') view_res = pool.get(datas['res_model']).fields_view_get( cr, uid, view_id, action['view_type'], context) assert view_res and view_res.get( 'arch'), "Did not return any arch for the view" view_data = {} if view_res.get('fields', {}).keys(): view_data = pool.get(datas['res_model']).default_get( cr, uid, view_res['fields'].keys(), context) if datas.get('form'): view_data.update(datas.get('form')) if wiz_data: view_data.update(wiz_data) log.debug("View data is: %r", view_data) for fk, field in view_res.get('fields', {}).items(): # Default fields returns list of int, while at create() # we need to send a [(6,0,[int,..])] if field['type'] in ('one2many', 'many2many') \ and view_data.get(fk, False) \ and isinstance(view_data[fk], list) \ and not isinstance(view_data[fk][0], tuple) : nvdata = [] new_ids = [] for da in view_data[fk]: if isinstance(da, (int, long)): new_ids.append(da) elif isinstance(da, dict): nvdata.append((0, 0, da)) else: raise ValueError("Don't know what to do with %r data in %s" % \ (da, field['type'])) if new_ids: nvdata.append((6, 0, new_ids)) view_data[fk] = nvdata action_name = action.get('name') try: from xml.dom import minidom cancel_found = False buttons = [] dom_doc = minidom.parseString(view_res['arch']) if not action_name: action_name = dom_doc.documentElement.getAttribute('name') for button in dom_doc.getElementsByTagName('button'): button_weight = 0 if button.getAttribute('special') == 'cancel': cancel_found = True continue if button.getAttribute('icon') == 'gtk-cancel': cancel_found = True continue if button.getAttribute('default_focus') == '1': button_weight += 20 if button.getAttribute('string') in wiz_buttons: button_weight += 30 elif button.getAttribute('icon') in wiz_buttons: button_weight += 10 string = button.getAttribute( 'string') or '?%s' % len(buttons) buttons.append({ 'name': button.getAttribute('name'), 'string': string, 'type': button.getAttribute('type'), 'weight': button_weight, }) except Exception, e: log.warning( "Cannot resolve the view arch and locate the buttons!", exc_info=True) raise AssertionError(e.args[0]) if not datas['res_id']: # it is probably an orm_memory object, we need to create # an instance datas['res_id'] = pool.get(datas['res_model']).create( cr, uid, view_data, context) if not buttons: raise AssertionError( "view form doesn't have any buttons to press!") buttons.sort(key=lambda b: b['weight']) log.debug( 'Buttons are: %s', ', '.join( ['%s: %d' % (b['string'], b['weight']) for b in buttons])) res = None while buttons and not res: b = buttons.pop() log_test("in the \"%s\" form, I will press the \"%s\" button.", action_name, b['string']) if not b['type']: log_test("the \"%s\" button has no type, cannot use it", b['string']) continue if b['type'] == 'object': #there we are! press the button! fn = getattr(pool.get(datas['res_model']), b['name']) if not fn: log.error("The %s model doesn't have a %s attribute!", datas['res_model'], b['name']) continue res = fn(cr, uid, [ datas['res_id'], ], context) break else: log.warning( "in the \"%s\" form, the \"%s\" button has unknown type %s", action_name, b['string'], b['type']) return res
def action_number(self, cr, uid, ids, context=None): if context is None: context = {} res = super(account_invoice, self).action_number(cr, uid, ids, context) if res: _invoice_ids = self.search(cr, uid, [('id','in',ids)], context) if len(_invoice_ids) > 0: ctx = {} _costs_installed = self.pool.get('stock.picking.in').fields_get(cr, uid, ['landing_costs_line_ids'], context) != {} _picking_ids = [] for _invoice in self.browse(cr, uid, _invoice_ids, context): _src_usage = False _dst_usage = False if _invoice.type == 'in_invoice': _src_usage = 'supplier' _dst_usage = 'internal' elif _invoice.type == 'in_refund': _src_usage = 'internal' _dst_usage = 'supplier' elif _invoice.type == 'out_invoice': _src_usage = 'internal' _dst_usage = 'customer' elif _invoice.type == 'out_refund': _src_usage = 'customer' _dst_usage = 'internal' if not _src_usage and not _dst_usage: continue for _line in _invoice.invoice_line: _quantity = 0.0 _uom_id = False _order_line_ids = [] _stock_move_ids = [] if _src_usage == 'supplier' or _dst_usage == 'supplier': _order_line_ids = self.pool.get('purchase.order.line').search(cr, uid, [('invoice_lines','=',_line.id)]) _stock_move_ids = self.pool.get('stock.move').search(cr, uid, [('purchase_line_id','in',_order_line_ids), ('state','=','done'), ('location_id.usage','=',_src_usage), ('location_dest_id.usage','=',_dst_usage)]) else: _order_line_ids = self.pool.get('sale.order.line').search(cr, uid, [('invoice_lines','=',_line.id)]) _stock_move_ids = self.pool.get('stock.move').search(cr, uid, [('sale_line_id','in',_order_line_ids), ('state','=','done'), ('location_id.usage','=',_src_usage), ('location_dest_id.usage','=',_dst_usage)]) if len(_stock_move_ids) > 0: _stock_moves = self.pool.get('stock.move').browse(cr, uid, _stock_move_ids, context) if safe_eval(self.pool.get('ir.config_parameter').get_param(cr, uid, 'account.check_quantity_on_invoices', 'False')): for _stock_move in _stock_moves: _quantity = _quantity + _stock_move.product_qty _uom_id = _stock_move.product_uom.id if _line.uos_id.id == _uom_id and _quantity != _line.quantity: _text = _line.invoice_id.origin + ' - ' + _line.name raise osv.except_osv(_('Error!'), _('Invoiced quantity is different than received quantity.\n' + _text)) if _line.uos_id.id == _uom_id and (_src_usage == 'supplier' or _dst_usage == 'supplier'): for _stock_move in _stock_moves: _updated = False if _costs_installed: _has_costs = len(_stock_move.landing_costs_line_ids) <> 0 or len(_stock_move.picking_id.landing_costs_line_ids) <> 0 if (_has_costs and _stock_move.price_unit_without_costs != _line.price_unit) \ or (not _has_costs and _stock_move.price_unit != _line.price_unit): ctx['move%s' % (_stock_move.id)] = {'price_unit': _line.price_unit, 'price_unit_without_costs': False, 'quantity': False} _updated = True else: if _stock_move.price_unit != _line.price_unit: ctx['move%s' % (_stock_move.id)] = {'price_unit': _line.price_unit, 'quantity': False} _updated = True if _updated: _picking_ids.append(_stock_move.picking_id.id) if _picking_ids: self.pool.get('stock.picking')._picking_update(cr, uid, _picking_ids, ctx) return res
def extract_properties(self): # The function will read all relevant information from the jrxml file _logger.info('-------mrissa extract_properties ------s ------%s : ', self.report_path) doc = etree.parse(self.report_path) # Define namespaces ns = 'http://jasperreports.sourceforge.net/jasperreports' nss = {'jr': ns} # Language # is XPath. lang_tags = doc.xpath('/jr:jasperReport/jr:queryString', namespaces=nss) _logger.info( '-------mrissa extract_properties ------s -lang_tags-----%s : ', lang_tags) if lang_tags: if lang_tags[0].get('language'): self.language = lang_tags[0].get('language').lower() # Relations ex_path = '/jr:jasperReport/jr:property[@name="OPENERP_RELATIONS"]' relation_tags = doc.xpath(ex_path, namespaces=nss) _logger.info( '-------mrissa extract_properties ------s -relation_tags-----%s : ', relation_tags) if relation_tags and 'value' in relation_tags[0].keys(): relation = relation_tags[0].get('value').strip() if relation.startswith('['): self.relations = safe_eval(relation_tags[0].get('value'), {}) else: self.relations = [x.strip() for x in relation.split(',')] self.relations = [self.path_prefix + x for x in self.relations] if not self.relations and self.path_prefix: self.relations = [self.path_prefix[:-1]] # Repeat field path1 = '/jr:jasperReport/jr:property[@name="OPENERP_COPIES_FIELD"]' copies_field_tags = doc.xpath(path1, namespaces=nss) if copies_field_tags and 'value' in copies_field_tags[0].keys(): self.copies_field = (self.path_prefix + copies_field_tags[0].get('value')) # Repeat path2 = '/jr:jasperReport/jr:property[@name="OPENERP_COPIES"]' copies_tags = doc.xpath(path2, namespaces=nss) if copies_tags and 'value' in copies_tags[0].keys(): self.copies = int(copies_tags[0].get('value')) self.is_header = False path3 = '/jr:jasperReport/jr:property[@name="OPENERP_HEADER"]' header_tags = doc.xpath(path3, namespaces=nss) if header_tags and 'value' in header_tags[0].keys(): self.is_header = True field_tags = doc.xpath('/jr:jasperReport/jr:field', namespaces=nss) self.fields, self.field_names = self.extract_fields(field_tags, ns) # Subreports # Here we expect the following structure in the .jrxml file: # <subreport> # <dataSourceExpression><![CDATA[$P{REPORT_DATA_SOURCE}]]> # </dataSourceExpression> # <subreportExpression class="java.lang.String"> # <![CDATA[$P{STANDARD_DIR} + "report_header.jasper"]]> # </subreportExpression> # </subreport> subreport_tags = doc.xpath('//jr:subreport', namespaces=nss) for tag in subreport_tags: text1 = '{%s}dataSourceExpression' data_source_expression = tag.findtext(text1 % ns, '') if not data_source_expression: continue data_source_expression = data_source_expression.strip() m = DATA_SOURCE_EXPRESSION_REG_EXP.match(data_source_expression) if not m: continue data_source_expression = m.group(1) if data_source_expression == 'REPORT_DATA_SOURCE': continue subreport_expression = tag.findtext('{%s}subreportExpression' % ns, '') if not subreport_expression: continue subreport_expression = subreport_expression.strip() subreport_expression = (subreport_expression.replace( '$P{STANDARD_DIR}', '"%s"' % self.standard_directory())) subreport_expression = (subreport_expression.replace( '$P{SUBREPORT_DIR}', '"%s"' % self.subreport_directory())) try: subreport_expression = safe_eval(subreport_expression, {}) except Exception: continue if subreport_expression.endswith('.jasper'): subreport_expression = subreport_expression[:-6] + 'jrxml' # Model model = '' path4 = '//jr:reportElement/jr:property[@name="OPENERP_MODEL"]' model_tags = tag.xpath(path4, namespaces=nss) if model_tags and 'value' in model_tags[0].keys(): model = model_tags[0].get('value') path_prefix = '' pat = '//jr:reportElement/jr:property[@name="OPENERP_PATH_PREFIX"]' path_prefix_tags = tag.xpath(pat, namespaces=nss) if path_prefix_tags and 'value' in path_prefix_tags[0].keys(): path_prefix = path_prefix_tags[0].get('value') self.is_header = False path5 = '//jr:reportElement/jr:property[@name="OPENERP_HEADER"]' header_tags = tag.xpath(path5, namespaces=nss) if header_tags and 'value' in header_tags[0].keys(): self.is_header = True # Add our own path_prefix to subreport's path_prefix sub_prefix = [] if self.path_prefix: sub_prefix.append(self.path_prefix) if path_prefix: sub_prefix.append(path_prefix) sub_prefix = '/'.join(sub_prefix) subreport = JasperReport(subreport_expression, sub_prefix) self.subreports.append({ 'parameter': data_source_expression, 'filename': subreport_expression, 'model': model, 'pathPrefix': path_prefix, 'report': subreport, 'depth': 1, }) for subsub_info in subreport.subreports: subsub_info['depth'] += 1 # Note hat 'parameter' (the one used to pass report's # DataSource) must be the same in all reports self.subreports.append(subsub_info) # Dataset # Here we expect the following structure in the .jrxml file: # <datasetRun> # <dataSourceExpression><![CDATA[$P{REPORT_DATA_SOURCE}]]> # </dataSourceExpression> # </datasetRun> dataset_tags = doc.xpath('//jr:datasetRun', namespaces=nss) for tag in dataset_tags: path7 = '{%s}dataSourceExpression' data_source_expression = tag.findtext(path7 % ns, '') if not data_source_expression: continue data_source_expression = data_source_expression.strip() m = DATA_SOURCE_EXPRESSION_REG_EXP.match(data_source_expression) if not m: continue data_source_expression = m.group(1) if data_source_expression == 'REPORT_DATA_SOURCE': continue sub_dataset_name = tag.get('subDataset') if not sub_dataset_name: continue # Relations relations = [] path8 = '../../jr:reportElement/jr:property \ [@name="OPENERP_RELATIONS"]' relation_tags = tag.xpath(path8, namespaces=nss) if relation_tags and 'value' in relation_tags[0].keys(): relation = relation_tags[0].get('value').strip() if relation.startswith('['): relations = safe_eval(relation_tags[0].get('value'), {}) else: relations = [x.strip() for x in relation.split(',')] relations = [self.path_prefix + x for x in relations] if not relations and self.path_prefix: relations = [self.path_prefix[:-1]] # Repeat field copies_field = None path9 = '../../jr:reportElement/jr:property \ [@name="OPENERP_COPIES_FIELD"]' copies_field_tags = tag.xpath(path9, namespaces=nss) if copies_field_tags and 'value' in copies_field_tags[0].keys(): copies_field = \ self.path_prefix + copies_field_tags[0].get('value') # Repeat copies = None path11 = '../../jr:reportElement/jr:property \ [@name="OPENERP_COPIES"]' copies_tags = tag.xpath(path11, namespaces=nss) if copies_tags and 'value' in copies_tags[0].keys(): copies = int(copies_tags[0].get('value')) # Model model = '' path12 = '../../jr:reportElement/jr:property \ [@name="OPENERP_MODEL"]' model_tags = tag.xpath(path12, namespaces=nss) if model_tags and 'value' in model_tags[0].keys(): model = model_tags[0].get('value') path_prefix = '' path13 = '../../jr:reportElement/jr:property \ [@name="OPENERP_PATH_PREFIX"]' path_prefix_tags = tag.xpath(path13, namespaces=nss) if path_prefix_tags and 'value' in path_prefix_tags[0].keys(): path_prefix = path_prefix_tags[0].get('value') # We need to find the appropriate subDataset definition # for this dataset run. path14 = '//jr:subDataset[@name="%s"]' sub_dataset = doc.xpath(path14 % sub_dataset_name, namespaces=nss)[0] field_tags = sub_dataset.xpath('jr:field', namespaces=nss) fields, field_names = self.extract_fields(field_tags, ns) _logger.info( '-------mrissa extract_properties ------s -fields-----%s : ', fields) _logger.info( '-------mrissa extract_properties ------s -field_names-----%s : ', field_names) _logger.info( '-------mrissa extract_properties ------s -relations-----%s : ', relations) _logger.info( '-------mrissa extract_properties ------s -copies_field-----%s : ', copies_field) _logger.info( '-------mrissa extract_properties ------s -copies-----%s : ', copies) _logger.info( '-------mrissa extract_properties ------s -data_source_expression-----%s : ', data_source_expression) _logger.info( '-------mrissa extract_properties ------s -path_prefix-----%s : ', path_prefix) dataset = JasperReport() dataset.fields = fields dataset.field_names = field_names dataset.relations = relations dataset.copies_field = copies_field dataset.copies = copies self.subreports.append({ 'parameter': data_source_expression, 'model': model, 'pathPrefix': path_prefix, 'report': dataset, 'filename': 'DATASET', })
def create_single_pdf(self, cr, uid, ids, data, report_xml, context=None): log = logging.getLogger('agaplan_terms_and_conditions') res = openerp_create_single_pdf(self, cr, uid, ids, data, report_xml, context) if report_xml.report_type != 'pdf': log.warn("report_type was not what we expected (%s) thus we return regular result.", report_xml.report_type) return res pool = pooler.get_pool(cr.dbname) # Check conditions to add or not rule_obj = pool.get('term.rule') if not rule_obj: # Module is not installed return res rule_ids = rule_obj.search(cr, uid, [ ('report_name','=',report_xml.report_name), ]) if not len(rule_ids): # No conditions should be added, return regular result return res valid_rules = [] for rule in rule_obj.browse(cr, uid, rule_ids, context=context): log.debug("Checking rule %s for report %s", rule.term_id.name, report_xml.report_name) if rule.company_id: model_obj = pool.get( data['model'] ).browse(cr, uid, ids[0], context=context) if hasattr(model_obj, 'company_id'): if rule.company_id.id != model_obj.company_id.id: log.debug("Company id's did not match !") continue else: log.debug("Company id's matched !") if rule.condition: env = { 'object': pool.get( data['model'] ).browse(cr, uid, ids[0], context=context), 'report': report_xml, 'data': data, 'date': time.strftime('%Y-%m-%d'), 'time': time, 'context': context, } # User has specified a condition, check it and return res when not met if not safe_eval(rule.condition, env): log.debug("Term condition not met !") continue else: log.debug("Term condition met !") valid_rules += [ rule ] output = PdfFileWriter() reader = PdfFileReader( StringIO(res[0]) ) for rule in valid_rules: if rule.term_id.mode == 'begin': att = PdfFileReader( StringIO(base64.decodestring(rule.term_id.pdf)) ) map(output.addPage, att.pages) for page in reader.pages: output.addPage( page ) for rule in valid_rules: if rule.term_id.mode == 'duplex': att = PdfFileReader( StringIO(base64.decodestring(rule.term_id.pdf)) ) map(output.addPage, att.pages) for rule in valid_rules: if rule.term_id.mode == 'end': att = PdfFileReader( StringIO(base64.decodestring(rule.term_id.pdf)) ) map(output.addPage, att.pages) buf = StringIO() output.write(buf) return (buf.getvalue(), report_xml.report_type)
def do_action(self, cr, uid, action, model_obj, obj, context=None): """ Do Action @param self: The object pointer @param cr: the current row, from the database cursor, @param uid: the current user’s ID for security checks, @param action: pass action @param model_obj: pass Model object @param context: A standard dictionary for contextual values """ if context is None: context = {} if action.server_action_id: context.update({ 'active_id': obj.id, 'active_ids': [obj.id], 'active_model': obj._name }) self.pool.get('ir.actions.server').run( cr, uid, [action.server_action_id.id], context) write = {} if 'user_id' in obj._model._all_columns and action.act_user_id: obj.user_id = action.act_user_id write['user_id'] = action.act_user_id.id if 'date_action_last' in obj._model._all_columns: write['date_action_last'] = time.strftime('%Y-%m-%d %H:%M:%S') if 'state' in obj._model._all_columns and action.act_state: obj.state = action.act_state write['state'] = action.act_state if 'categ_id' in obj._model._all_columns and action.act_categ_id: obj.categ_id = action.act_categ_id write['categ_id'] = action.act_categ_id.id if write: model_obj.write(cr, uid, [obj.id], write, context) if 'remind_user' in obj._model._all_columns and action.act_remind_user: model_obj.remind_user(cr, uid, [obj.id], context, attach=action.act_remind_attach) if 'remind_partner' in obj._model._all_columns and action.act_remind_partner: model_obj.remind_partner(cr, uid, [obj.id], context, attach=action.act_remind_attach) if action.act_method: getattr(model_obj, 'act_method')(cr, uid, [obj.id], action, context) emails = [] if 'user_id' in obj._model._all_columns and action.act_mail_to_user: if obj.user_id: emails.append(obj.user_id.user_email) if action.act_mail_to_watchers: emails += (action.act_email_cc or '').split(',') if action.act_mail_to_email: emails += (action.act_mail_to_email or '').split(',') locals_for_emails = { 'user': self.pool.get('res.users').browse(cr, uid, uid, context=context), 'obj': obj, } if action.act_email_to: emails.append(safe_eval(action.act_email_to, {}, locals_for_emails)) emails = filter(None, emails) if len(emails) and action.act_mail_body: emails = list(set(emails)) email_from = safe_eval(action.act_email_from, {}, locals_for_emails) def to_email(text): return re.findall(r'([^ ,<@]+@[^> ,]+)', text or '') emails = to_email(','.join(filter(None, emails))) email_froms = to_email(email_from) if email_froms: self.email_send(cr, uid, obj, emails, action.act_mail_body, emailfrom=email_froms[0]) return True
def _exec_action(action, datas, context): # taken from client/modules/action/main.py:84 _exec_action() if isinstance(action, bool) or 'type' not in action: return # Updating the context : Adding the context of action in order to use it on Views called from buttons if datas.get('id',False): context.update( {'active_id': datas.get('id',False), 'active_ids': datas.get('ids',[]), 'active_model': datas.get('model',False)}) context.update(safe_eval(action.get('context','{}'), context.copy())) if action['type'] in ['ir.actions.act_window', 'ir.actions.submenu']: for key in ('res_id', 'res_model', 'view_type', 'view_mode', 'limit', 'auto_refresh', 'search_view', 'auto_search', 'search_view_id'): datas[key] = action.get(key, datas.get(key, None)) view_id = False if action.get('views', []): if isinstance(action['views'],list): view_id = action['views'][0][0] datas['view_mode']= action['views'][0][1] else: if action.get('view_id', False): view_id = action['view_id'][0] elif action.get('view_id', False): view_id = action['view_id'][0] assert datas['res_model'], "Cannot use the view without a model" # Here, we have a view that we need to emulate log_test("will emulate a %s view: %s#%s", action['view_type'], datas['res_model'], view_id or '?') view_res = pool.get(datas['res_model']).fields_view_get(cr, uid, view_id, action['view_type'], context) assert view_res and view_res.get('arch'), "Did not return any arch for the view" view_data = {} if view_res.get('fields',{}).keys(): view_data = pool.get(datas['res_model']).default_get(cr, uid, view_res['fields'].keys(), context) if datas.get('form'): view_data.update(datas.get('form')) if wiz_data: view_data.update(wiz_data) log.debug("View data is: %r", view_data) for fk, field in view_res.get('fields',{}).items(): # Default fields returns list of int, while at create() # we need to send a [(6,0,[int,..])] if field['type'] in ('one2many', 'many2many') \ and view_data.get(fk, False) \ and isinstance(view_data[fk], list) \ and not isinstance(view_data[fk][0], tuple) : view_data[fk] = [(6, 0, view_data[fk])] action_name = action.get('name') try: from xml.dom import minidom cancel_found = False buttons = [] dom_doc = minidom.parseString(view_res['arch']) if not action_name: action_name = dom_doc.documentElement.getAttribute('name') for button in dom_doc.getElementsByTagName('button'): button_weight = 0 if button.getAttribute('special') == 'cancel': cancel_found = True continue if button.getAttribute('icon') == 'gtk-cancel': cancel_found = True continue if button.getAttribute('default_focus') == '1': button_weight += 20 if button.getAttribute('string') in wiz_buttons: button_weight += 30 elif button.getAttribute('icon') in wiz_buttons: button_weight += 10 string = button.getAttribute('string') or '?%s' % len(buttons) buttons.append( { 'name': button.getAttribute('name'), 'string': string, 'type': button.getAttribute('type'), 'weight': button_weight, }) except Exception, e: log.warning("Cannot resolve the view arch and locate the buttons!", exc_info=True) raise AssertionError(e.args[0]) if not datas['res_id']: # it is probably an orm_memory object, we need to create # an instance datas['res_id'] = pool.get(datas['res_model']).create(cr, uid, view_data, context) if not buttons: raise AssertionError("view form doesn't have any buttons to press!") buttons.sort(key=lambda b: b['weight']) log.debug('Buttons are: %s', ', '.join([ '%s: %d' % (b['string'], b['weight']) for b in buttons])) res = None while buttons and not res: b = buttons.pop() log_test("in the \"%s\" form, I will press the \"%s\" button.", action_name, b['string']) if not b['type']: log_test("the \"%s\" button has no type, cannot use it", b['string']) continue if b['type'] == 'object': #there we are! press the button! fn = getattr(pool.get(datas['res_model']), b['name']) if not fn: log.error("The %s model doesn't have a %s attribute!", datas['res_model'], b['name']) continue res = fn(cr, uid, [datas['res_id'],], context) break else: log.warning("in the \"%s\" form, the \"%s\" button has unknown type %s", action_name, b['string'], b['type']) return res