예제 #1
0
 def get_google_drive_config(self, res_model, res_id):
     '''
     Function called by the js, when no google doc are yet associated with a record, with the aim to create one. It
     will first seek for a google.docs.config associated with the model `res_model` to find out what's the template
     of google doc to copy (this is usefull if you want to start with a non-empty document, a type or a name
     different than the default values). If no config is associated with the `res_model`, then a blank text document
     with a default name is created.
       :param res_model: the object for which the google doc is created
       :param ids: the list of ids of the objects for which the google doc is created. This list is supposed to have
         a length of 1 element only (batch processing is not supported in the code, though nothing really prevent it)
       :return: the config id and config name
     '''
     # TO DO in master: fix my signature and my model
     if isinstance(res_model, str):
         res_model = self.env['ir.model'].search([('model', '=', res_model)]).id
     if not res_id:
         raise UserError(_("Creating google drive may only be done by one at a time."))
     # check if a model is configured with a template
     configs = self.search([('model_id', '=', res_model)])
     config_values = []
     for config in configs.sudo():
         if config.filter_id:
             if config.filter_id.user_id and config.filter_id.user_id.id != self.env.user.id:
                 #Private
                 continue
             domain = [('id', 'in', [res_id])] + safe_eval(config.filter_id.domain)
             additionnal_context = safe_eval(config.filter_id.context)
             google_doc_configs = self.env[config.filter_id.model_id].with_context(**additionnal_context).search(domain)
             if google_doc_configs:
                 config_values.append({'id': config.id, 'name': config.name})
         else:
             config_values.append({'id': config.id, 'name': config.name})
     return config_values
 def run(self, qid, uid, set_state=True):
     Env, reg = api.Environment, registry(self.env.cr.dbname)
     with Env.manage(), reg.cursor() as crc, reg.cursor() as crj:
         control_env = Env(crc, SUPERUSER_ID, {})
         job_env = Env(crj, uid, {})
         # Load queue in a dedicated environment, dedicated to update
         # queue and steps states with explicit commits, outside
         # the job transaction.
         queue = control_env[self._name].browse(qid)
         if set_state:
             queue.state = 'running'
         try:
             getattr(job_env[queue.model], queue.method)(
                 queue, *safe_eval(queue.args))
         except Exception:
             crj.rollback()
             queue.write(
                 {'state': 'failed', 'error_log': traceback.format_exc()})
             if queue.failed_method:
                 getattr(job_env[queue.model], queue.failed_method)(
                     queue, *safe_eval(queue.args))
         else:
             crc.commit()
             crj.commit()
             queue.write({'state': 'done'})
         finally:
             crc.commit()
             crj.commit()
예제 #3
0
 def _compute_rule(self, localdict):
     """
     :param localdict: dictionary containing the environement in which to compute the rule
     :return: returns a tuple build as the base/amount computed, the quantity and the rate
     :rtype: (float, float, float)
     """
     self.ensure_one()
     if self.amount_select == 'fix':
         try:
             return self.amount_fix, float(safe_eval(self.quantity, localdict)), 100.0
         except Exception as e:
             raise UserError(_('Wrong quantity defined for salary rule %s (%s).\nError: %s') % (self.name, self.code, e))
     elif self.amount_select == 'percentage':
         try:
             return (float(safe_eval(self.amount_percentage_base, localdict)),
                     float(safe_eval(self.quantity, localdict)),
                     self.amount_percentage)
         except Exception as e:
             raise UserError(_('Wrong percentage base or quantity defined for salary rule %s (%s).\nError: %s') % (self.name, self.code, e))
     else:
         try:
             safe_eval(self.amount_python_compute, localdict, mode='exec', nocopy=True)
             return float(localdict['result']), 'result_qty' in localdict and localdict['result_qty'] or 1.0, 'result_rate' in localdict and localdict['result_rate'] or 100.0
         except Exception as e:
             raise UserError(_('Wrong python code defined for salary rule %s (%s).\nError: %s') % (self.name, self.code, e))
예제 #4
0
    def _stripe_s2s_validate_tree(self, tree):
        self.ensure_one()
        if self.state not in ('draft', 'pending'):
            _logger.info('Stripe: trying to validate an already validated tx (ref %s)', self.reference)
            return True

        status = tree.get('status')
        if status == 'succeeded':
            self.write({
                'state': 'done',
                'date_validate': fields.datetime.now(),
                'acquirer_reference': tree.get('id'),
            })
            if self.callback_eval:
                safe_eval(self.callback_eval, {'self': self})
            return True
        else:
            error = tree['error']['message']
            _logger.warn(error)
            self.sudo().write({
                'state': 'error',
                'state_message': error,
                'acquirer_reference': tree.get('id'),
                'date_validate': fields.datetime.now(),
            })
            return False
예제 #5
0
파일: utils.py 프로젝트: ADS101/odoo
def _child_get(node, self=None, tagname=None):
    for n in node:
        if self and self.localcontext and n.get('rml_loop'):

            for ctx in safe_eval(n.get('rml_loop'),{}, self.localcontext):
                self.localcontext.update(ctx)
                if (tagname is None) or (n.tag==tagname):
                    if n.get('rml_except', False):
                        try:
                            safe_eval(n.get('rml_except'), {}, self.localcontext)
                        except GeneratorExit:
                            continue
                        except Exception, e:
                            _logger.info('rml_except: "%s"', n.get('rml_except',''), exc_info=True)
                            continue
                    if n.get('rml_tag'):
                        try:
                            (tag,attr) = safe_eval(n.get('rml_tag'),{}, self.localcontext)
                            n2 = copy.deepcopy(n)
                            n2.tag = tag
                            n2.attrib.update(attr)
                            yield n2
                        except GeneratorExit:
                            yield n
                        except Exception, e:
                            _logger.info('rml_tag: "%s"', n.get('rml_tag',''), exc_info=True)
                            yield n
                    else:
                        yield n
            continue
예제 #6
0
 def get_express_menu(self):
     expressconfigparam = self.env['ir.config_parameter']
     appid = expressconfigparam.get_param('express_menu_app_id', default='')
     appkey = expressconfigparam.get_param('express_menu_app_key', default='')
     path = expressconfigparam.get_param('express_menu_oder_url', default='')
     header = safe_eval(expressconfigparam.get_param('express_menu_request_headers',
                                                     default=''))
     order_code = self.name
     sender = self.get_sender(self.warehouse_id, self.pakge_sequence)
     remark = self.note or '小心轻放'
     shipping_type = self.express_type or 'YTO'
     receiver, commodity, qty = self.get_receiver_goods_message()
     request_data = dict(OrderCode=order_code, PayType=1, ExpType=1, Cost=1.0, OtherCost=1.0,
                         Sender=sender, Receiver=receiver, Commodity=commodity, Weight=1.0,
                         Quantity=qty, Volume=0.0, Remark=remark, IsReturnPrintTemplate=1)
     request_data.update(self.get_shipping_type_config(shipping_type))
     request_data = json.dumps(request_data)
     data = {'RequestData': request_data,
             'EBusinessID': appid,
             'RequestType': '1007',
             'DataType': '2',
             'DataSign': self.encrypt_kdn(request_data, appkey)}
     http = httplib2.Http()
     response, content = http.request(path, 'POST', headers=header, body=urllib.urlencode(data))
     content = content.replace('true', 'True').replace('false', 'False')
     self.express_code = (safe_eval(content).get('Order', {})).get('LogisticCode', "")
     self.express_menu = str(safe_eval(content).get('PrintTemplate'))
     if not self.express_code:
         raise UserError("获取快递面单失败!\n原因:%s"%str(content))
     return str(safe_eval(content).get('PrintTemplate'))
예제 #7
0
파일: res_config.py 프로젝트: CobooGuo/odoo
 def get_default_auth_signup_template_user_id(self, fields):
     IrConfigParam = self.env["ir.config_parameter"]
     # we use safe_eval on the result, since the value of the parameter is a nonempty string
     return {
         "auth_signup_reset_password": safe_eval(IrConfigParam.get_param("auth_signup.reset_password", "False")),
         "auth_signup_uninvited": safe_eval(IrConfigParam.get_param("auth_signup.allow_uninvited", "False")),
         "auth_signup_template_user_id": safe_eval(IrConfigParam.get_param("auth_signup.template_user_id", "False")),
     }
예제 #8
0
파일: res_config.py 프로젝트: Choumy/odoo
 def get_default_auth_signup_template_user_id(self, fields):
     get_param = self.env['ir.config_parameter'].get_param
     # we use safe_eval on the result, since the value of the parameter is a nonempty string
     return {
         'auth_signup_reset_password': safe_eval(get_param('auth_signup.reset_password', 'False')),
         'auth_signup_uninvited': safe_eval(get_param('auth_signup.allow_uninvited', 'False')),
         'auth_signup_template_user_id': safe_eval(get_param('auth_signup.template_user_id', 'False')),
     }
예제 #9
0
파일: account_tax.py 프로젝트: ADS101/odoo
 def _compute_amount(self, base_amount, price_unit, quantity=1.0, product=None, partner=None):
     self.ensure_one()
     if self.amount_type == 'code':
         company = self.env.user.company_id
         localdict = {'base_amount': base_amount, 'price_unit':price_unit, 'quantity': quantity, 'product':product, 'partner':partner, 'company': company}
         safe_eval(self.python_compute, localdict, mode="exec", nocopy=True)
         return localdict['result']
     return super(AccountTaxPython, self)._compute_amount(base_amount, price_unit, quantity, product, partner)
예제 #10
0
파일: custom.py 프로젝트: ADS101/odoo
 def _row_get(self, cr, uid, objs, fields, conditions, row_canvas=None, group_by=None):
     result = []
     for obj in objs:
         tobreak = False
         for cond in conditions:
             if cond and cond[0]:
                 c = cond[0]
                 temp = c[0](safe_eval('obj.'+c[1],{'obj': obj}))
                 if not safe_eval('\''+temp+'\''+' '+c[2]+' '+'\''+str(c[3])+'\''):
                     tobreak = True
         if tobreak:
             break
         levels = {}
         row = []
         for i in range(len(fields)):
             if not fields[i]:
                 row.append(row_canvas and row_canvas[i])
                 if row_canvas[i]:
                     row_canvas[i]=False
             elif len(fields[i])==1:
                 if obj:
                     row.append(str(safe_eval('obj.'+fields[i][0],{'obj': obj})))
                 else:
                     row.append(None)
             else:
                 row.append(None)
                 levels[fields[i][0]]=True
         if not levels:
             result.append(row)
         else:
             # Process group_by data first
             key = []
             if group_by is not None and fields[group_by] is not None:
                 if fields[group_by][0] in levels.keys():
                     key.append(fields[group_by][0])
                 for l in levels.keys():
                     if l != fields[group_by][0]:
                         key.append(l)
             else:
                 key = levels.keys()
             for l in key:
                 objs = safe_eval('obj.'+l,{'obj': obj})
                 if not isinstance(objs, (BaseModel, list)):
                     objs = [objs]
                 field_new = []
                 cond_new = []
                 for f in range(len(fields)):
                     if (fields[f] and fields[f][0])==l:
                         field_new.append(fields[f][1:])
                         cond_new.append(conditions[f][1:])
                     else:
                         field_new.append(None)
                         cond_new.append(None)
                 if len(objs):
                     result += self._row_get(cr, uid, objs, field_new, cond_new, row, group_by)
                 else:
                     result.append(row)
     return result 
예제 #11
0
파일: account_tax.py 프로젝트: ADS101/odoo
 def compute_all(self, price_unit, currency=None, quantity=1.0, product=None, partner=None):
     taxes = self.env['account.tax']
     company = self.env.user.company_id
     for tax in self:
         localdict = {'price_unit': price_unit, 'quantity': quantity, 'product': product, 'partner': partner, 'company': company}
         safe_eval(tax.python_applicable, localdict, mode="exec", nocopy=True)
         if localdict.get('result', False):
             taxes += tax
     return super(AccountTaxPython, taxes).compute_all(price_unit, currency, quantity, product, partner)
예제 #12
0
파일: account_tax.py 프로젝트: 10537/odoo
 def compute_all(self, price_unit, currency=None, quantity=1.0, product=None, partner=None):
     taxes = self.filtered(lambda r: r.amount_type != 'code')
     company = self.env.user.company_id
     for tax in self.filtered(lambda r: r.amount_type == 'code'):
         localdict = self._context.get('tax_computation_context', {})
         localdict.update({'price_unit': price_unit, 'quantity': quantity, 'product': product, 'partner': partner, 'company': company})
         safe_eval(tax.python_applicable, localdict, mode="exec", nocopy=True)
         if localdict.get('result', False):
             taxes += tax
     return super(AccountTaxPython, taxes).compute_all(price_unit, currency, quantity, product, partner)
예제 #13
0
파일: payment.py 프로젝트: ADS101/odoo
    def _ogone_s2s_validate_tree(self, tree, tries=2):
        if self.state not in ('draft', 'pending'):
            _logger.info('Ogone: trying to validate an already validated tx (ref %s)', self.reference)
            return True

        status = int(tree.get('STATUS') or 0)
        if status in self._ogone_valid_tx_status:
            self.write({
                'state': 'done',
                'date_validate': datetime.date.today().strftime(DEFAULT_SERVER_DATE_FORMAT),
                'acquirer_reference': tree.get('PAYID'),
            })
            if tree.get('ALIAS') and self.partner_id and \
               (self.type == 'form_save' or self.acquirer_id.save_token == 'always')\
               and not self.payment_token_id:
                pm = self.env['payment.token'].create({
                    'partner_id': self.partner_id.id,
                    'acquirer_id': self.acquirer_id.id,
                    'acquirer_ref': tree.get('ALIAS'),
                    'name': tree.get('CARDNO'),
                })
                self.write({'payment_token_id': pm.id})
            if self.callback_eval:
                safe_eval(self.callback_eval, {'self': self})
            return True
        elif status in self._ogone_cancel_tx_status:
            self.write({
                'state': 'cancel',
                'acquirer_reference': tree.get('PAYID'),
            })
        elif status in self._ogone_pending_tx_status:
            self.write({
                'state': 'pending',
                'acquirer_reference': tree.get('PAYID'),
                'html_3ds': str(tree.HTML_ANSWER).decode('base64')
            })
        elif status in self._ogone_wait_tx_status and tries > 0:
            time.sleep(0.5)
            self.write({'acquirer_reference': tree.get('PAYID')})
            tree = self._ogone_s2s_get_tx_status(self)
            return self._ogone_s2s_validate_tree(self, tree, tries - 1)
        else:
            error = 'Ogone: feedback error: %(error_str)s\n\n%(error_code)s: %(error_msg)s' % {
                'error_str': tree.get('NCERRORPLUS'),
                'error_code': tree.get('NCERROR'),
                'error_msg': ogone.OGONE_ERROR_MAP.get(tree.get('NCERROR')),
            }
            _logger.info(error)
            self.write({
                'state': 'error',
                'state_message': error,
                'acquirer_reference': tree.get('PAYID'),
            })
            return False
예제 #14
0
 def _filter_post(self, records):
     """ Filter the records that satisfy the postcondition of action ``self``. """
     if self.filter_id and records:
         domain = [('id', 'in', records.ids)] + safe_eval(self.filter_id.domain, self._get_eval_context())
         ctx = safe_eval(self.filter_id.context)
         return records.with_context(**ctx).search(domain).with_env(records.env)
     elif self.filter_domain and records:
         domain = [('id', 'in', records.ids)] + safe_eval(self.filter_domain, self._get_eval_context())
         return records.search(domain)
     else:
         return records
예제 #15
0
    def _check(self, automatic=False, use_new_cursor=False):
        """ This Function is called by scheduler. """
        if '__action_done' not in self._context:
            self = self.with_context(__action_done={})

        # retrieve all the action rules to run based on a timed condition
        eval_context = self._get_eval_context()
        for action in self.with_context(active_test=True).search([('kind', '=', 'on_time')]):
            last_run = fields.Datetime.from_string(action.last_run) or datetime.datetime.utcfromtimestamp(0)

            # retrieve all the records that satisfy the action's condition
            domain = []
            context = dict(self._context)
            if action.filter_domain:
                domain = safe_eval(action.filter_domain, eval_context)
            elif action.filter_id:
                domain = safe_eval(action.filter_id.domain, eval_context)
                context.update(safe_eval(action.filter_id.context))
                if 'lang' not in context:
                    # Filters might be language-sensitive, attempt to reuse creator lang
                    # as we are usually running this as super-user in background
                    filter_meta = action.filter_id.get_metadata()[0]
                    user_id = (filter_meta['write_uid'] or filter_meta['create_uid'])[0]
                    context['lang'] = self.env['res.users'].browse(user_id).lang
            records = self.env[action.model].with_context(context).search(domain)

            # determine when action should occur for the records
            if action.trg_date_id.name == 'date_action_last' and 'create_date' in records._fields:
                get_record_dt = lambda record: record[action.trg_date_id.name] or record.create_date
            else:
                get_record_dt = lambda record: record[action.trg_date_id.name]

            # process action on the records that should be executed
            now = datetime.datetime.now()
            for record in records:
                record_dt = get_record_dt(record)
                if not record_dt:
                    continue
                action_dt = self._check_delay(action, record, record_dt)
                if last_run <= action_dt < now:
                    try:
                        action._process(record)
                    except Exception:
                        _logger.error(traceback.format_exc())

            action.write({'last_run': fields.Datetime.now()})

            if automatic:
                # auto-commit for batch processing
                self._cr.commit()
예제 #16
0
파일: payment.py 프로젝트: ADS101/odoo
    def _ogone_form_validate(self, data):
        if self.state == 'done':
            _logger.info('Ogone: trying to validate an already validated tx (ref %s)', self.reference)
            return True

        status = int(data.get('STATUS', '0'))
        if status in self._ogone_valid_tx_status:
            vals = {
                'state': 'done',
                'date_validate': datetime.datetime.strptime(data['TRXDATE'], '%m/%d/%y').strftime(DEFAULT_SERVER_DATE_FORMAT),
                'acquirer_reference': data['PAYID'],
            }
            if data.get('ALIAS') and self.partner_id and \
               (self.type == 'form_save' or self.acquirer_id.save_token == 'always')\
               and not self.payment_token_id:
                pm = self.env['payment.token'].create({
                    'partner_id': self.partner_id.id,
                    'acquirer_id': self.acquirer_id.id,
                    'acquirer_ref': data.get('ALIAS'),
                    'name': '%s - %s' % (data.get('CARDNO'), data.get('CN'))
                })
                vals.update(payment_token_id=pm.id)
            self.write(vals)
            if self.callback_eval:
                safe_eval(self.callback_eval, {'self': self})
            return True
        elif status in self._ogone_cancel_tx_status:
            self.write({
                'state': 'cancel',
                'acquirer_reference': data.get('PAYID'),
            })
        elif status in self._ogone_pending_tx_status or status in self._ogone_wait_tx_status:
            self.write({
                'state': 'pending',
                'acquirer_reference': data.get('PAYID'),
            })
        else:
            error = 'Ogone: feedback error: %(error_str)s\n\n%(error_code)s: %(error_msg)s' % {
                'error_str': data.get('NCERRORPLUS'),
                'error_code': data.get('NCERROR'),
                'error_msg': ogone.OGONE_ERROR_MAP.get(data.get('NCERROR')),
            }
            _logger.info(error)
            self.write({
                'state': 'error',
                'state_message': error,
                'acquirer_reference': data.get('PAYID'),
            })
            return False
예제 #17
0
 def _authorize_s2s_validate(self, tree):
     self.ensure_one()
     if self.state == 'done':
         _logger.warning('Authorize: trying to validate an already validated tx (ref %s)' % self.reference)
         return True
     status_code = int(tree.get('x_response_code', '0'))
     if status_code == self._authorize_valid_tx_status:
         if tree.get('x_type').lower() in ['auth_capture', 'prior_auth_capture']:
             init_state = self.state
             self.write({
                 'state': 'done',
                 'acquirer_reference': tree.get('x_trans_id'),
                 'date_validate': fields.Datetime.now(),
             })
             if self.callback_eval and init_state != 'authorized':
                 safe_eval(self.callback_eval, {'self': self})
         if tree.get('x_type').lower() == 'auth_only':
             self.write({
                 'state': 'authorized',
                 'acquirer_reference': tree.get('x_trans_id'),
             })
             if self.callback_eval:
                 safe_eval(self.callback_eval, {'self': self})
         if tree.get('x_type').lower() == 'void':
             self.write({
                 'state': 'cancel',
             })
         return True
     elif status_code == self._authorize_pending_tx_status:
         self.write({
             'state': 'pending',
             'acquirer_reference': tree.get('x_trans_id'),
         })
         return True
     elif status_code == self._authorize_cancel_tx_status:
         self.write({
             'state': 'cancel',
             'acquirer_reference': tree.get('x_trans_id'),
         })
         return True
     else:
         error = tree.get('x_response_reason_text')
         _logger.info(error)
         self.write({
             'state': 'error',
             'state_message': error,
             'acquirer_reference': tree.get('x_trans_id'),
         })
         return False
예제 #18
0
 def _filter_post_export_domain(self, records):
     """ Filter the records that satisfy the postcondition of action ``self``. """
     if self.filter_domain and records:
         domain = [('id', 'in', records.ids)] + safe_eval(self.filter_domain, self._get_eval_context())
         return records.search(domain), domain
     else:
         return records, None
예제 #19
0
파일: ir_actions.py 프로젝트: fabianpc/odoo
    def action_launch(self):
        """ Launch Action of Wizard"""
        self.ensure_one()

        self.write({'state': 'done'})

        # Load action
        action_type = self.action_id.type
        action = self.env[action_type].browse(self.action_id.id)

        result = action.read()[0]
        if action_type != 'ir.actions.act_window':
            return result
        result.setdefault('context', '{}')

        # Open a specific record when res_id is provided in the context
        ctx = safe_eval(result['context'], {'user': self.env.user})
        if ctx.get('res_id'):
            result['res_id'] = ctx.pop('res_id')

        # disable log for automatic wizards
        ctx['disable_log'] = True

        result['context'] = ctx

        return result
예제 #20
0
파일: mail_alias.py 프로젝트: microcom/odoo
 def _check_alias_defaults(self):
     try:
         dict(safe_eval(self.alias_defaults))
     except Exception:
         raise ValidationError(
             _("Invalid expression, it must be a literal python dictionary definition e.g. \"{'field': 'value'}\"")
         )
예제 #21
0
파일: crm_team.py 프로젝트: Murphyx2/odoo
    def action_your_pipeline(self):
        action = self.env.ref('crm.crm_lead_opportunities_tree_view').read()[0]
        user_team_id = self.env.user.sale_team_id.id
        if not user_team_id:
            user_team_id = self.search([], limit=1).id
            action['help'] = _("""<p class='o_view_nocontent_smiling_face'>Add new opportunities</p><p>
    Looks like you are not a member of a sales channel. You should add yourself
    as a member of one of the sales channel.
</p>""")
            if user_team_id:
                action['help'] += "<p>As you don't belong to any sales channel, Odoo opens the first one by default.</p>"

        action_context = safe_eval(action['context'], {'uid': self.env.uid})
        if user_team_id:
            action_context['default_team_id'] = user_team_id

        tree_view_id = self.env.ref('crm.crm_case_tree_view_oppor').id
        form_view_id = self.env.ref('crm.crm_case_form_view_oppor').id
        kanb_view_id = self.env.ref('crm.crm_case_kanban_view_leads').id
        action['views'] = [
                [kanb_view_id, 'kanban'],
                [tree_view_id, 'tree'],
                [form_view_id, 'form'],
                [False, 'graph'],
                [False, 'calendar'],
                [False, 'pivot']
            ]
        action['context'] = action_context
        return action
예제 #22
0
파일: crm_team.py 프로젝트: Vauxoo/odoo
 def get_alias_values(self):
     has_group_use_lead = self.env.user.has_group('crm.group_use_lead')
     values = super(Team, self).get_alias_values()
     values['alias_defaults'] = defaults = safe_eval(self.alias_defaults or "{}")
     defaults['type'] = 'lead' if has_group_use_lead  else 'opportunity'
     defaults['team_id'] = self.id
     return values   
예제 #23
0
    def __add_approver__(self, thread_row, model_name, active_id):
        # TODO 加上当前用户的部门经理
        approver_rows = []
        users = []
        process_rows = self.env['good_process.process'].search([('model_id.model', '=', model_name),
                                                                ('type', '=', getattr(thread_row, 'type', False))],
                                                               order='sequence')
        process_row = False
        for process in process_rows:
            domain = [('id', '=', active_id)]
            if process.applicable_domain:
                domain += safe_eval(process.applicable_domain)
            if self.env[model_name].search(domain):
                process_row = process
                break
        if not process_row:
            return []

        groups = self.__get_groups__(process_row)
        department_manager = self.__get_user_manager__(
            thread_row, process_row)
        if department_manager:
            users.append((department_manager, 0, False))
        users.extend(self.__get_users__(groups))
        [approver_rows.append(self.env['good_process.approver'].create(
            {'user_id': user.id,
             'res_id': thread_row.id,
             'model_type': thread_row._description,
             'record_name': getattr(thread_row, 'name', ''),
             'creator': thread_row.create_uid.id,
             'sequence': sequence,
             'group_id': groud_id,
             'model': thread_row._name})) for user, sequence, groud_id in users]
        return [{'id': row.id, 'display_name': row.user_id.name} for row in approver_rows]
예제 #24
0
    def action_view_task(self):
        self.ensure_one()

        list_view_id = self.env.ref('project.view_task_tree2').id
        form_view_id = self.env.ref('project.view_task_form2').id

        action = {'type': 'ir.actions.act_window_close'}

        task_projects = self.tasks_ids.mapped('project_id')
        if len(task_projects) == 1 and len(self.tasks_ids) > 1:  # redirect to task of the project (with kanban stage, ...)
            action = self.env.ref('project.act_project_project_2_project_task_all').read()[0]
            if action.get('context'):
                eval_context = self.env['ir.actions.actions']._get_eval_context()
                eval_context.update({'active_id': task_projects.id})
                action['context'] = safe_eval(action['context'], eval_context)
        else:
            action = self.env.ref('project.action_view_task').read()[0]
            action['context'] = {}  # erase default context to avoid default filter
            if len(self.tasks_ids) > 1:  # cross project kanban task
                action['views'] = [[False, 'kanban'], [list_view_id, 'tree'], [form_view_id, 'form'], [False, 'graph'], [False, 'calendar'], [False, 'pivot'], [False, 'graph']]
            elif len(self.tasks_ids) == 1:  # single task -> form view
                action['views'] = [(form_view_id, 'form')]
                action['res_id'] = self.tasks_ids.id
        # filter on the task of the current SO
        action.setdefault('context', {})
        action['context'].update({'search_default_sale_order_id': self.id})
        return action
예제 #25
0
    def postprocess_pdf_report(self, record, buffer):
        '''Hook to handle post processing during the pdf report generation.
        The basic behavior consists to create a new attachment containing the pdf
        base64 encoded.

        :param record_id: The record that will own the attachment.
        :param pdf_content: The optional name content of the file to avoid reading both times.
        :return: The newly generated attachment if no AccessError, else None.
        '''
        attachment_name = safe_eval(self.attachment, {'object': record, 'time': time})
        if not attachment_name:
            return None
        attachment_vals = {
            'name': attachment_name,
            'datas': base64.encodestring(buffer.getvalue()),
            'datas_fname': attachment_name,
            'res_model': self.model,
            'res_id': record.id,
        }
        attachment = None
        try:
            attachment = self.env['ir.attachment'].create(attachment_vals)
        except AccessError:
            _logger.info("Cannot save PDF report %r as attachment", attachment_vals['name'])
        else:
            _logger.info('The PDF document %s is now saved in the database', attachment_vals['name'])
        return attachment
예제 #26
0
파일: ir_rule.py 프로젝트: qq470647251/odoo
 def _force_domain(self):
     eval_context = self._eval_context()
     for rule in self:
         if rule.domain_force:
             rule.domain = expression.normalize_domain(safe_eval(rule.domain_force, eval_context))
         else:
             rule.domain = []
예제 #27
0
파일: ir_rule.py 프로젝트: akretion/odoo
    def _compute_domain(self, model_name, mode="read"):
        if mode not in self._MODES:
            raise ValueError('Invalid mode: %r' % (mode,))

        if self._uid == SUPERUSER_ID:
            return None

        query = """ SELECT r.id FROM ir_rule r JOIN ir_model m ON (r.model_id=m.id)
                    WHERE m.model=%s AND r.active AND r.perm_{mode}
                    AND (r.id IN (SELECT rule_group_id FROM rule_group_rel rg
                                  JOIN res_groups_users_rel gu ON (rg.group_id=gu.gid)
                                  WHERE gu.uid=%s)
                         OR r.global)
                """.format(mode=mode)
        self._cr.execute(query, (model_name, self._uid))
        rule_ids = [row[0] for row in self._cr.fetchall()]
        if not rule_ids:
            return []

        # browse user and rules as SUPERUSER_ID to avoid access errors!
        eval_context = self._eval_context()
        user_groups = self.env.user.groups_id
        global_domains = []                     # list of domains
        group_domains = []                      # list of domains
        for rule in self.browse(rule_ids).sudo():
            # evaluate the domain for the current user
            dom = safe_eval(rule.domain_force, eval_context) if rule.domain_force else []
            dom = expression.normalize_domain(dom)
            if not rule.groups:
                global_domains.append(dom)
            elif rule.groups & user_groups:
                group_domains.append(dom)

        # combine global domains and group domains
        return expression.AND(global_domains + [expression.OR(group_domains)])
예제 #28
0
파일: ir_rule.py 프로젝트: Vauxoo/odoo
    def _get_failing(self, for_records, mode='read'):
        """ Returns the rules for the mode for the current user which fail on
        the specified records.

        Can return any global rule and/or all local rules (since local rules
        are OR-ed together, the entire group succeeds or fails, while global
        rules get AND-ed and can each fail)
        """
        Model = for_records.browse(()).sudo()
        eval_context = self._eval_context()

        all_rules = self._get_rules(Model._name, mode=mode).sudo()

        # first check if the group rules fail for any record (aka if
        # searching on (records, group_rules) filters out some of the records)
        group_rules = all_rules.filtered(lambda r: r.groups and r.groups & self.env.user.groups_id)
        group_domains = expression.OR([
            safe_eval(r.domain_force, eval_context) if r.domain_force else []
            for r in group_rules
        ])
        # if all records get returned, the group rules are not failing
        if Model.search_count(expression.AND([[('id', 'in', for_records.ids)], group_domains])) == len(for_records):
            group_rules = self.browse(())

        # failing rules are previously selected group rules or any failing global rule
        def is_failing(r, ids=for_records.ids):
            dom = safe_eval(r.domain_force, eval_context) if r.domain_force else []
            return Model.search_count(expression.AND([
                [('id', 'in', ids)],
                expression.normalize_domain(dom)
            ])) < len(ids)

        return all_rules.filtered(lambda r: r in group_rules or (not r.groups and is_failing(r))).sudo(self.env.user)
예제 #29
0
    def action_launch(self, context=None):
        """ Launch Action of Wizard"""
        self.ensure_one()
        if self.type in ("automatic", "once"):
            self.write({"state": "done"})

        # Load action
        action = self.env[self.action_id.type].browse(self.action_id.id)

        result = action.read()[0]
        if action._name != "ir.actions.act_window":
            return result
        result.setdefault("context", "{}")

        # Open a specific record when res_id is provided in the context
        ctx = safe_eval(result["context"], {"user": self.env.user})
        if ctx.get("res_id"):
            result["res_id"] = ctx.pop("res_id")

        # disable log for automatic wizards
        if self.type == "automatic":
            ctx["disable_log"] = True
        result["context"] = ctx

        return result
예제 #30
0
    def create_wkhtmltopdf_obj(self, header, content, footer, res_id=None):
        '''Create an object using namedtuple that represents a "sub-report" in wkhtmltopdf.
        This object contains header, content, footer, res_id and data related to the attachment:
        * attachment_id: an existing attachment_id found for the record.
        * attachment_name: the expected name of the attachment created (if necessary) after calling wkhtmltopdf.

        :param header: The header as a string.
        :param content: The content as a string.
        :param footer: The footer as a string.
        :param res_id: The related record of the report.
        :return: A new instance of WkhtmltopdfObj.
        '''
        attachment_id = attachment_name = None
        attachment_use = False
        if res_id and len(self._ids) == 1 and self.attachment:
            record_id = self.env[self.model].browse(res_id)
            attachment_name = safe_eval(self.attachment, {'object': record_id, 'time': time})
            attachment_id = self.retrieve_attachment(record_id, attachment_name)
            attachment_use = self.attachment_use
        return WkhtmltopdfObj(
            header=header,
            content=content,
            footer=footer,
            res_id=res_id,
            attachment_id=attachment_id,
            attachment_name=attachment_name,
            attachment_use=attachment_use
        )
예제 #31
0
 def _format_slow_queries_in_html(self):
     data = safe_eval(self.slow_queries)
     data = sorted(data, key=itemgetter(1), reverse=True)
     header = _('Slow Query'), _('Duration'), _('Trace')
     self.slow_queries_html = self._format_in_html(data, header)
예제 #32
0
 def _get_domain(self):
     """ override me to customize domains according exceptions cases """
     self.ensure_one()
     return safe_eval(self.domain)
예제 #33
0
    def compute_refund(self, mode='refund'):
        inv_obj = self.env['account.invoice']
        inv_tax_obj = self.env['account.invoice.tax']
        inv_line_obj = self.env['account.invoice.line']
        context = dict(self._context or {})
        xml_id = False

        for form in self:
            created_inv = []
            date = False
            description = False
            for inv in inv_obj.browse(context.get('active_ids')):
                if inv.state in ['draft', 'proforma2', 'cancel']:
                    raise UserError(_('Cannot refund draft/proforma/cancelled invoice.'))
                if inv.reconciled and mode in ('cancel', 'modify'):
                    raise UserError(_('Cannot refund invoice which is already reconciled, invoice should be unreconciled first. You can only refund this invoice.'))

                date = form.date or False
                description = form.description or inv.name
                refund = inv.refund(form.date_invoice, date, description, inv.journal_id.id)
                refund.compute_taxes()

                created_inv.append(refund.id)
                if mode in ('cancel', 'modify'):
                    movelines = inv.move_id.line_ids
                    to_reconcile_ids = {}
                    to_reconcile_lines = self.env['account.move.line']
                    for line in movelines:
                        if line.account_id.id == inv.account_id.id:
                            to_reconcile_lines += line
                            to_reconcile_ids.setdefault(line.account_id.id, []).append(line.id)
                        if line.reconciled:
                            line.remove_move_reconcile()
                    refund.action_invoice_open()
                    for tmpline in refund.move_id.line_ids:
                        if tmpline.account_id.id == inv.account_id.id:
                            to_reconcile_lines += tmpline
                            to_reconcile_lines.filtered(lambda l: l.reconciled == False).reconcile()
                    if mode == 'modify':
                        invoice = inv.read(
                                    ['name', 'type', 'number', 'reference',
                                    'comment', 'date_due', 'partner_id',
                                    'partner_insite', 'partner_contact',
                                    'partner_ref', 'payment_term_id', 'account_id',
                                    'currency_id', 'invoice_line_ids', 'tax_line_ids',
                                    'journal_id', 'date'])
                        invoice = invoice[0]
                        del invoice['id']
                        invoice_lines = inv_line_obj.browse(invoice['invoice_line_ids'])
                        invoice_lines = inv_obj.with_context(mode='modify')._refund_cleanup_lines(invoice_lines)
                        tax_lines = inv_tax_obj.browse(invoice['tax_line_ids'])
                        tax_lines = inv_obj._refund_cleanup_lines(tax_lines)
                        invoice.update({
                            'type': inv.type,
                            'date_invoice': form.date_invoice,
                            'state': 'draft',
                            'number': False,
                            'invoice_line_ids': invoice_lines,
                            'tax_line_ids': tax_lines,
                            'date': date,
                            'origin': inv.origin,
                            'fiscal_position_id': inv.fiscal_position_id.id,
                        })
                        for field in ('partner_id', 'account_id', 'currency_id',
                                         'payment_term_id', 'journal_id'):
                                invoice[field] = invoice[field] and invoice[field][0]
                        inv_refund = inv_obj.create(invoice)
                        if inv_refund.payment_term_id.id:
                            inv_refund._onchange_payment_term_date_invoice()
                        created_inv.append(inv_refund.id)
                xml_id = (inv.type in ['out_refund', 'out_invoice']) and 'action_invoice_tree1' or \
                         (inv.type in ['in_refund', 'in_invoice']) and 'action_invoice_tree2'
                # Put the reason in the chatter
                subject = _("Invoice refund")
                body = description
                refund.message_post(body=body, subject=subject)
        if xml_id:
            result = self.env.ref('account.%s' % (xml_id)).read()[0]
            invoice_domain = safe_eval(result['domain'])
            invoice_domain.append(('id', 'in', created_inv))
            result['domain'] = invoice_domain
            return result
        return True
예제 #34
0
 def test_05_safe_eval_forbiddon(self):
     """ Try forbidden expressions in safe_eval to verify they are not allowed (open) """
     with self.assertRaises(ValueError):
         safe_eval('open("/etc/passwd","r")')
예제 #35
0
 def _format_slow_recomputation_in_html(self):
     data = safe_eval(self.slow_recomputation)
     data = sorted(data, key=itemgetter(2), reverse=True)
     header = _('Model'), _('Field'), _('Duration'), _('Count')
     self.slow_recomputation_html = self._format_in_html(data, header)
예제 #36
0
    def send(self, auto_commit=False, raise_exception=False):
        """ Sends the selected emails immediately, ignoring their current
            state (mails that have already been sent should not be passed
            unless they should actually be re-sent).
            Emails successfully delivered are marked as 'sent', and those
            that fail to be deliver are marked as 'exception', and the
            corresponding error mail is output in the server logs.

            :param bool auto_commit: whether to force a commit of the mail status
                after sending each mail (meant only for scheduler processing);
                should never be True during normal transactions (default: False)
            :param bool raise_exception: whether to raise an exception if the
                email sending process has failed
            :return: True
        """
        IrMailServer = self.env['ir.mail_server']

        for mail in self:
            try:
                # TDE note: remove me when model_id field is present on mail.message - done here to avoid doing it multiple times in the sub method
                if mail.model:
                    model = self.env['ir.model'].sudo().search([
                        ('model', '=', mail.model)
                    ])[0]
                else:
                    model = None
                if model:
                    mail = mail.with_context(model_name=model.name)

                # load attachment binary data with a separate read(), as prefetching all
                # `datas` (binary field) could bloat the browse cache, triggerring
                # soft/hard mem limits with temporary data.
                attachments = [(a['datas_fname'], base64.b64decode(a['datas']))
                               for a in mail.attachment_ids.sudo().read(
                                   ['datas_fname', 'datas'])]

                # specific behavior to customize the send email for notified partners
                email_list = []
                if mail.email_to:
                    email_list.append(mail.send_get_email_dict())
                for partner in mail.recipient_ids:
                    email_list.append(
                        mail.send_get_email_dict(partner=partner))

                # headers
                headers = {}
                bounce_alias = self.env['ir.config_parameter'].get_param(
                    "mail.bounce.alias")
                catchall_domain = self.env['ir.config_parameter'].get_param(
                    "mail.catchall.domain")
                if bounce_alias and catchall_domain:
                    if mail.model and mail.res_id:
                        headers['Return-Path'] = '%s+%d-%s-%d@%s' % (
                            bounce_alias, mail.id, mail.model, mail.res_id,
                            catchall_domain)
                    else:
                        headers['Return-Path'] = '%s+%d@%s' % (
                            bounce_alias, mail.id, catchall_domain)
                if mail.headers:
                    try:
                        headers.update(safe_eval(mail.headers))
                    except Exception:
                        pass

                # Writing on the mail object may fail (e.g. lock on user) which
                # would trigger a rollback *after* actually sending the email.
                # To avoid sending twice the same email, provoke the failure earlier
                mail.write({
                    'state':
                    'exception',
                    'failure_reason':
                    _('Error without exception. Probably due do sending an email without computed recipients.'
                      ),
                })
                mail_sent = False

                # build an RFC2822 email.message.Message object and send it without queuing
                res = None
                for email in email_list:
                    msg = IrMailServer.build_email(
                        email_from=mail.email_from,
                        email_to=email.get('email_to'),
                        subject=mail.subject,
                        body=email.get('body'),
                        body_alternative=email.get('body_alternative'),
                        email_cc=tools.email_split(mail.email_cc),
                        email_bcc=tools.email_split(mail.email_bcc),
                        reply_to=mail.reply_to,
                        attachments=attachments,
                        message_id=mail.message_id,
                        references=mail.references,
                        object_id=mail.res_id
                        and ('%s-%s' % (mail.res_id, mail.model)),
                        subtype='html',
                        subtype_alternative='plain',
                        headers=headers)
                    try:
                        res = IrMailServer.send_email(
                            msg, mail_server_id=mail.mail_server_id.id)
                    except AssertionError as error:
                        if error.message == IrMailServer.NO_VALID_RECIPIENT:
                            # No valid recipient found for this particular
                            # mail item -> ignore error to avoid blocking
                            # delivery to next recipients, if any. If this is
                            # the only recipient, the mail will show as failed.
                            _logger.info(
                                "Ignoring invalid recipients for mail.mail %s: %s",
                                mail.message_id, email.get('email_to'))
                        else:
                            raise
                if res:
                    mail.write({
                        'state': 'sent',
                        'message_id': res,
                        'failure_reason': False
                    })
                    mail_sent = True

                # /!\ can't use mail.state here, as mail.refresh() will cause an error
                # see revid:[email protected] in 6.1
                if mail_sent:
                    _logger.info(
                        'Mail with ID %r and Message-Id %r successfully sent',
                        mail.id, mail.message_id)
                mail._postprocess_sent_message(mail_sent=mail_sent)
            except MemoryError:
                # prevent catching transient MemoryErrors, bubble up to notify user or abort cron job
                # instead of marking the mail as failed
                _logger.exception(
                    'MemoryError while processing mail with ID %r and Msg-Id %r. Consider raising the --limit-memory-hard startup option',
                    mail.id, mail.message_id)
                raise
            except psycopg2.Error:
                # If an error with the database occurs, chances are that the cursor is unusable.
                # This will lead to an `psycopg2.InternalError` being raised when trying to write
                # `state`, shadowing the original exception and forbid a retry on concurrent
                # update. Let's bubble it.
                raise
            except Exception as e:
                failure_reason = tools.ustr(e)
                _logger.exception('failed sending mail (id: %s) due to %s',
                                  mail.id, failure_reason)
                mail.write({
                    'state': 'exception',
                    'failure_reason': failure_reason
                })
                mail._postprocess_sent_message(mail_sent=False)
                if raise_exception:
                    if isinstance(e, AssertionError):
                        # get the args of the original error, wrap into a value and throw a MailDeliveryException
                        # that is an except_orm, with name and value as arguments
                        value = '. '.join(e.args)
                        raise MailDeliveryException(_("Mail Delivery Failed"),
                                                    value)
                    raise

            if auto_commit is True:
                self._cr.commit()
        return True
예제 #37
0
 def _import_record_data(self, import_file, record, data_dict):
     """ From complex excel, create temp simple excel and do import """
     if not data_dict:
         return
     try:
         header_fields = []
         decoded_data = base64.decodestring(import_file)
         wb = xlrd.open_workbook(file_contents=decoded_data)
         col_idx = 0
         out_wb = xlwt.Workbook()
         out_st = out_wb.add_sheet("Sheet 1")
         xml_id = record and self.get_external_id(record) or \
             '%s.%s' % ('xls', uuid.uuid4())
         out_st.write(0, 0, 'id')  # id and xml_id on first column
         out_st.write(1, 0, xml_id)
         header_fields.append('id')
         col_idx += 1
         model = record._name
         for sheet_name in data_dict:  # For each Sheet
             worksheet = data_dict[sheet_name]
             st = False
             if isinstance(sheet_name, str):
                 st = co.xlrd_get_sheet_by_name(wb, sheet_name)
             elif isinstance(sheet_name, int):
                 st = wb.sheet_by_index(sheet_name - 1)
             if not st:
                 raise ValidationError(
                     _('Sheet %s not found') % sheet_name)
             # HEAD updates
             for rc, field in worksheet.get('_HEAD_', {}).items():
                 rc, key_eval_cond = co.get_field_condition(rc)
                 field, val_eval_cond = co.get_field_condition(field)
                 field_type = self._get_field_type(model, field)
                 value = False
                 try:
                     row, col = co.pos2idx(rc)
                     value = co._get_cell_value(st.cell(row, col),
                                                field_type=field_type)
                 except Exception:
                     pass
                 eval_context = self.get_eval_context(model=model,
                                                      value=value)
                 if key_eval_cond:
                     value = str(safe_eval(key_eval_cond, eval_context))
                 if val_eval_cond:
                     value = str(safe_eval(val_eval_cond, eval_context))
                 out_st.write(0, col_idx, field)  # Next Column
                 out_st.write(1, col_idx, value)  # Next Value
                 header_fields.append(field)
                 col_idx += 1
             # Line Items
             line_fields = filter(lambda x: x != '_HEAD_', worksheet)
             for line_field in line_fields:
                 vals = self._get_line_vals(st, worksheet,
                                            model, line_field)
                 for field in vals:
                     # Columns, i.e., line_ids/field_id
                     out_st.write(0, col_idx, field)
                     header_fields.append(field)
                     # Data
                     i = 1
                     for value in vals[field]:
                         out_st.write(i, col_idx, value)
                         i += 1
                     col_idx += 1
         content = BytesIO()
         out_wb.save(content)
         content.seek(0)  # Set index to 0, and start reading
         xls_file = content.read()
         # Do the import
         Import = self.env['base_import.import']
         imp = Import.create({
             'res_model': model,
             'file': xls_file,
             'file_type': 'application/vnd.ms-excel',
             'file_name': 'temp.xls',
         })
         errors = imp.do(
             header_fields,
             {'headers': True,
              'advanced': True,
              'keep_matches': False,
              'encoding': '',
              'separator': '',
              'quoting': '"',
              'date_style': '',
              'datetime_style': '%Y-%m-%d %H:%M:%S',
              'float_thousand_separator': ',',
              'float_decimal_separator': '.',
              'fields': []})
         if errors:
             message = errors[0]['message'].encode('utf-8')
             raise ValidationError(message)
         return self.env.ref(xml_id)
     except xlrd.XLRDError:
         raise ValidationError(
             _('Invalid file style, only .xls or .xlsx file allowed'))
     except Exception as e:
         raise ValidationError(_('Error importing data\n%s') % e)
 def _fetch_queries(self,
                    date_from,
                    date_to,
                    get_additional_query_filter=None):
     self.ensure_one()
     res = {}
     for query in self.query_ids:
         model = self.env[query.model_id.model]
         eval_context = {
             'env': self.env,
             'time': time,
             'datetime': datetime,
             'dateutil': dateutil,
             # deprecated
             'uid': self.env.uid,
             'context': self.env.context,
         }
         domain = query.domain and \
             safe_eval(query.domain, eval_context) or []
         if get_additional_query_filter:
             domain.extend(get_additional_query_filter(query))
         if query.date_field.ttype == 'date':
             domain.extend([(query.date_field.name, '>=', date_from),
                            (query.date_field.name, '<=', date_to)])
         else:
             datetime_from = _utc_midnight(date_from,
                                           self._context.get('tz', 'UTC'))
             datetime_to = _utc_midnight(date_to,
                                         self._context.get('tz', 'UTC'),
                                         add_day=1)
             domain.extend([(query.date_field.name, '>=', datetime_from),
                            (query.date_field.name, '<', datetime_to)])
         field_names = [f.name for f in query.field_ids]
         all_stored = all([model._fields[f].store for f in field_names])
         if not query.aggregate:
             data = model.search_read(domain, field_names)
             res[query.name] = [AutoStruct(**d) for d in data]
         elif query.aggregate == 'sum' and all_stored:
             # use read_group to sum stored fields
             data = model.read_group(domain, field_names, [])
             s = AutoStruct(count=data[0]['__count'])
             for field_name in field_names:
                 try:
                     v = data[0][field_name]
                 except KeyError:
                     _logger.error(
                         'field %s not found in read_group '
                         'for %s; not summable?', field_name, model._name)
                     v = AccountingNone
                 setattr(s, field_name, v)
             res[query.name] = s
         else:
             data = model.search_read(domain, field_names)
             s = AutoStruct(count=len(data))
             if query.aggregate == 'min':
                 agg = _min
             elif query.aggregate == 'max':
                 agg = _max
             elif query.aggregate == 'avg':
                 agg = _avg
             elif query.aggregate == 'sum':
                 agg = _sum
             for field_name in field_names:
                 setattr(s, field_name, agg([d[field_name] for d in data]))
             res[query.name] = s
     return res
예제 #39
0
 def _get_challenger_users(self, domain):
     # FIXME: literal_eval?
     user_domain = safe_eval(domain)
     return self.env['res.users'].search(user_domain)
예제 #40
0
파일: ir_http.py 프로젝트: dzywenji/odoo
        session_info = super(Http, self).get_frontend_session_info()
        session_info.update({
            'is_website_user': request.env.user.id == request.website.user_id.id,
        })
        if request.env.user.has_group('website.group_website_publisher'):
            session_info.update({
                'website_id': request.website.id,
                'website_company_id': request.website._get_cached('company_id'),
            })
        return session_info


class ModelConverter(ModelConverter):

    def generate(self, uid, dom=None, args=None):
        Model = request.env[self.model].with_user(uid)
        # Allow to current_website_id directly in route domain
        args.update(current_website_id=request.env['website'].get_current_website().id)
        domain = safe_eval(self.domain, (args or {}).copy())
        if dom:
            domain += dom
<<<<<<< HEAD
        for record in Model.search_read(domain, ['display_name']):
            yield {'loc': (record['id'], record['display_name'])}
=======
        for record in Model.search(domain):
            # return record so URL will be the real endpoint URL as the record will go through `slug()`
            # the same way as endpoint URL is retrieved during dispatch (301 redirect), see `to_url()` from ModelConverter
            yield record
>>>>>>> f0a66d05e70e432d35dc68c9fb1e1cc6e51b40b8
예제 #41
0
 def _compute_params(self):
     self_bin = self.with_context(bin_size=False, bin_size_params_store=False)
     for record, record_bin in zip(self, self_bin):
         record.params = record_bin.params_store and safe_eval(record_bin.params_store, {'uid': self._uid})
예제 #42
0
 def run_action_code_multi(self, action, eval_context=None):
     safe_eval(action.sudo().code.strip(), eval_context, mode="exec", nocopy=True)  # nocopy allows to return 'action'
     if 'action' in eval_context:
         return eval_context['action']
예제 #43
0
    def report_download(self, data, token):
        """This function is used by 'qwebactionmanager.js' in order to trigger the download of
        a pdf/controller report.

        :param data: a javascript array JSON.stringified containg report internal url ([0]) and
        type [1]
        :returns: Response with a filetoken cookie and an attachment header
        """
        requestcontent = json.loads(data)
        url, type = requestcontent[0], requestcontent[1]
        try:
            if type == "qweb-pdf":
                reportname = url.split("/report/pdf/")[1].split("?")[0]

                docids = None
                active_model = ""
                NewReportName = ""
                if "/" in reportname:
                    reportname, docids = reportname.split("/")

                if docids:
                    # Generic report:
                    response = self.report_routes(reportname,
                                                  docids=docids,
                                                  converter="pdf")
                else:
                    # Particular report:
                    data = url_decode(
                        url.split("?")
                        [1]).items()  # decoding the args represented in JSON

                    dictData = dict(data)
                    active_model = json.loads(
                        dictData.get("context")).get("active_model")
                    NewReportName = (json.loads(
                        dictData.get("options")).get("form").get("name"))
                    response = self.report_routes(reportname,
                                                  converter="pdf",
                                                  **dictData)

                report = request.env[
                    "ir.actions.report"]._get_report_from_name(reportname)
                filename = "%s.%s" % (report.name, "pdf")

                if active_model == "payroll.register":
                    filename = "%s.%s" % (NewReportName, "pdf")

                if docids:
                    ids = [int(x) for x in docids.split(",")]
                    obj = request.env[report.model].browse(ids)
                    if report.print_report_name and not len(obj) > 1:
                        report_name = safe_eval(
                            report.print_report_name,
                            {
                                "object": obj,
                                "time": time
                            },
                        )
                        filename = "%s.%s" % (report_name, "pdf")
                    if report.model == "payroll.register":
                        filename = "%s.%s" % (obj.name, "pdf")
                response.headers.add("Content-Disposition",
                                     content_disposition(filename))
                response.set_cookie("fileToken", token)
                return response
            else:
                return
        except Exception as e:
            se = _serialize_exception(e)
            error = {"code": 200, "message": "Odoo Server Error", "data": se}
            return request.make_response(html_escape(json.dumps(error)))
예제 #44
0
 def _run_action_code_multi(self, eval_context):
     safe_eval(self.code.strip(), eval_context, mode="exec",
               nocopy=True)  # nocopy allows to return 'action'
     return eval_context.get('action')
예제 #45
0
파일: goal.py 프로젝트: suribes/odoo
    def update_goal(self):
        """Update the goals to recomputes values and change of states

        If a manual goal is not updated for enough time, the user will be
        reminded to do so (done only once, in 'inprogress' state).
        If a goal reaches the target value, the status is set to reached
        If the end date is passed (at least +1 day, time not considered) without
        the target value being reached, the goal is set as failed."""
        goals_by_definition = {}
        for goal in self:
            goals_by_definition.setdefault(goal.definition_id, []).append(goal)

        for definition, goals in goals_by_definition.items():
            goals_to_write = {}
            if definition.computation_mode == 'manually':
                for goal in goals:
                    goals_to_write[goal] = goal._check_remind_delay()
            elif definition.computation_mode == 'python':
                # TODO batch execution
                for goal in goals:
                    # execute the chosen method
                    cxt = {
                        'object': goal,
                        'env': self.env,
                        'date': date,
                        'datetime': datetime,
                        'timedelta': timedelta,
                        'time': time,
                    }
                    code = definition.compute_code.strip()
                    safe_eval(code, cxt, mode="exec", nocopy=True)
                    # the result of the evaluated codeis put in the 'result' local variable, propagated to the context
                    result = cxt.get('result')
                    if result is not None and isinstance(
                            result, (float, pycompat.integer_types)):
                        goals_to_write.update(goal._get_write_values(result))
                    else:
                        _logger.error(
                            "Invalid return content '%r' from the evaluation "
                            "of code for definition %s, expected a number",
                            result, definition.name)

            else:  # count or sum
                Obj = self.env[definition.model_id.model]

                field_date_name = definition.field_date_id.name
                if definition.computation_mode == 'count' and definition.batch_mode:
                    # batch mode, trying to do as much as possible in one request
                    general_domain = safe_eval(definition.domain)
                    field_name = definition.batch_distinctive_field.name
                    subqueries = {}
                    for goal in goals:
                        start_date = field_date_name and goal.start_date or False
                        end_date = field_date_name and goal.end_date or False
                        subqueries.setdefault(
                            (start_date, end_date), {}).update({
                                goal.id:
                                safe_eval(definition.batch_user_expression,
                                          {'user': goal.user_id})
                            })

                    # the global query should be split by time periods (especially for recurrent goals)
                    for (start_date,
                         end_date), query_goals in subqueries.items():
                        subquery_domain = list(general_domain)
                        subquery_domain.append(
                            (field_name, 'in',
                             list(set(query_goals.values()))))
                        if start_date:
                            subquery_domain.append(
                                (field_date_name, '>=', start_date))
                        if end_date:
                            subquery_domain.append(
                                (field_date_name, '<=', end_date))

                        if field_name == 'id':
                            # grouping on id does not work and is similar to search anyway
                            users = Obj.search(subquery_domain)
                            user_values = [{
                                'id': user.id,
                                'id_count': 1
                            } for user in users]
                        else:
                            user_values = Obj.read_group(subquery_domain,
                                                         fields=[field_name],
                                                         groupby=[field_name])
                        # user_values has format of read_group: [{'partner_id': 42, 'partner_id_count': 3},...]
                        for goal in [g for g in goals if g.id in query_goals]:
                            for user_value in user_values:
                                queried_value = field_name in user_value and user_value[
                                    field_name] or False
                                if isinstance(queried_value, tuple) and len(
                                        queried_value) == 2 and isinstance(
                                            queried_value[0],
                                            pycompat.integer_types):
                                    queried_value = queried_value[0]
                                if queried_value == query_goals[goal.id]:
                                    new_value = user_value.get(
                                        field_name + '_count', goal.current)
                                    goals_to_write.update(
                                        goal._get_write_values(new_value))

                else:
                    for goal in goals:
                        # eval the domain with user replaced by goal user object
                        domain = safe_eval(definition.domain,
                                           {'user': goal.user_id})

                        # add temporal clause(s) to the domain if fields are filled on the goal
                        if goal.start_date and field_date_name:
                            domain.append(
                                (field_date_name, '>=', goal.start_date))
                        if goal.end_date and field_date_name:
                            domain.append(
                                (field_date_name, '<=', goal.end_date))

                        if definition.computation_mode == 'sum':
                            field_name = definition.field_id.name
                            # TODO for master: group on user field in batch mode
                            res = Obj.read_group(domain, [field_name], [])
                            new_value = res and res[0][field_name] or 0.0

                        else:  # computation mode = count
                            new_value = Obj.search_count(domain)

                        goals_to_write.update(
                            goal._get_write_values(new_value))

            for goal, values in goals_to_write.iteritems():
                if not values:
                    continue
                goal.write(values)
            if self.env.context.get('commit_gamification'):
                self.env.cr.commit()
        return True
 def _filter_quants(self, move, quants):
     domain = safe_eval(self.quant_domain) or []
     if domain:
         return self._eval_quant_domain(quants, domain)
     return quants
    def compute_refund(
        self,
        mode='refund'
    ):  #todo omar add params id_fact_orig monto_nota_cr  : cancelar factura de ticket desde pos
        inv_obj = self.env['account.invoice']
        inv_tax_obj = self.env['account.invoice.tax']
        inv_line_obj = self.env['account.invoice.line']
        context = dict(self._context or {})
        xml_id = False

        for form in self:
            created_inv = []
            date = False
            description = False

            for inv in inv_obj.browse(
                    int(form.factura_origen) or context.get('active_ids')):
                if inv.state in ['draft', 'proforma2', 'cancel']:
                    raise UserError(
                        _('Cannot refund draft/proforma/cancelled invoice.'))
                if inv.reconciled and mode in ('cancel', 'modify'):
                    raise UserError(
                        _('Cannot refund invoice which is already reconciled, invoice should be unreconciled first. You can only refund this invoice.'
                          ))

                date = form.date or False
                description = form.description or inv.name
                refund = inv.refund(form.date_invoice, date, description,
                                    inv.journal_id.id)

                # todo add by omar
                lineas_objs = self.env['account.invoice.line'].browse(
                    refund.invoice_line_ids.ids)
                lineas_objs.write({'price_unit': self.nota_credito_total})

                # actualiza impuestos
                refund.write({
                    'amount_tax': lineas_objs.price_tax,
                    'amount_total': lineas_objs.price_total
                })
                refund._onchange_invoice_line_ids()
                refund._compute_amount()

                refund.cfdi_relacionados = inv.uuid
                created_inv.append(refund.id)
                if mode in ('cancel', 'modify'):
                    movelines = inv.move_id.line_ids
                    to_reconcile_ids = {}
                    to_reconcile_lines = self.env['account.move.line']
                    for line in movelines:
                        if line.account_id.id == inv.account_id.id:
                            to_reconcile_lines += line
                            to_reconcile_ids.setdefault(
                                line.account_id.id, []).append(line.id)
                        if line.reconciled:
                            line.remove_move_reconcile()
                    # campos en los que la informacion que aparece en la factura retrictiva(Nota de Credito) esta en False
                    # Aqui se setean los campos faltas desde el objeto inv(Factura Original)
                    if inv.tipo_de_relacion_id.id == False:
                        inv.tipo_de_relacion_id = self.tipo_de_relacion_id.id
                    refund.rfc_cliente_factura = inv.rfc_cliente_factura
                    refund.fac_id = inv.fac_id
                    refund.tipo_de_relacion_id = inv.tipo_de_relacion_id.id
                    refund.forma_pago_id = inv.forma_pago_id
                    refund.metodo_pago_id = inv.metodo_pago_id
                    refund.uso_cfdi_id = inv.uso_cfdi_id
                    refund.action_invoice_open()
                    for tmpline in refund.move_id.line_ids:
                        if tmpline.account_id.id == inv.account_id.id:
                            to_reconcile_lines += tmpline
                            to_reconcile_lines.filtered(
                                lambda l: l.reconciled == False).reconcile()
                    if mode == 'modify':
                        invoice = inv.read(
                            inv_obj._get_refund_modify_read_fields())
                        invoice = invoice[0]
                        del invoice['id']
                        invoice_lines = inv_line_obj.browse(
                            invoice['invoice_line_ids'])
                        invoice_lines = inv_obj.with_context(
                            mode='modify')._refund_cleanup_lines(invoice_lines)
                        tax_lines = inv_tax_obj.browse(invoice['tax_line_ids'])
                        tax_lines = inv_obj._refund_cleanup_lines(tax_lines)
                        invoice.update({
                            'type':
                            inv.type,
                            'date_invoice':
                            form.date_invoice,
                            'state':
                            'draft',
                            'number':
                            False,
                            'invoice_line_ids':
                            invoice_lines,
                            'tax_line_ids':
                            tax_lines,
                            'date':
                            date,
                            'origin':
                            inv.origin,
                            'fiscal_position_id':
                            inv.fiscal_position_id.id,
                        })
                        for field in inv_obj._get_refund_common_fields():
                            if inv_obj._fields[field].type == 'many2one':
                                invoice[field] = invoice[field] and invoice[
                                    field][0]
                            else:
                                invoice[field] = invoice[field] or False
                        inv_refund = inv_obj.create(invoice)
                        if inv_refund.payment_term_id.id:
                            inv_refund._onchange_payment_term_date_invoice()
                        created_inv.append(inv_refund.id)
                xml_id = (inv.type in ['out_refund', 'out_invoice']) and 'action_invoice_tree1' or \
                         (inv.type in ['in_refund', 'in_invoice']) and 'action_invoice_tree2'
                # Put the reason in the chatter
                subject = _("Invoice refund")
                body = description
                refund.message_post(body=body, subject=subject)
        if xml_id:
            result = self.env.ref('account.%s' % (xml_id)).read()[0]
            invoice_domain = safe_eval(result['domain'])
            invoice_domain.append(('id', 'in', created_inv))
            result['domain'] = invoice_domain
            return result
        return True
예제 #48
0
 def _eval(self, expr):
     eval_dict = {'AccountingNone': AccountingNone}
     return safe_eval(self.aep.replace_expr(expr), eval_dict)
예제 #49
0
 def _get_valid_products(self, products):
     if self.rule_products_domain:
         domain = safe_eval(self.rule_products_domain)
         return products.filtered_domain(domain)
     return products
예제 #50
0
    def send_mail(self, auto_commit=False):
        """ Process the wizard content and proceed with sending the related
            email(s), rendering any template patterns on the fly if needed. """
        for wizard in self:
            # Duplicate attachments linked to the email.template.
            # Indeed, basic mail.compose.message wizard duplicates attachments in mass
            # mailing mode. But in 'single post' mode, attachments of an email template
            # also have to be duplicated to avoid changing their ownership.
            if wizard.attachment_ids and wizard.composition_mode != 'mass_mail' and wizard.template_id:
                new_attachment_ids = []
                for attachment in wizard.attachment_ids:
                    if attachment in wizard.template_id.attachment_ids:
                        new_attachment_ids.append(
                            attachment.copy({
                                'res_model': 'mail.compose.message',
                                'res_id': wizard.id
                            }).id)
                    else:
                        new_attachment_ids.append(attachment.id)
                    wizard.write(
                        {'attachment_ids': [(6, 0, new_attachment_ids)]})

            # Mass Mailing
            mass_mode = wizard.composition_mode in ('mass_mail', 'mass_post')

            Mail = self.env['mail.mail']
            ActiveModel = self.env[wizard.model if wizard.
                                   model else 'mail.thread']
            if wizard.template_id:
                # template user_signature is added when generating body_html
                # mass mailing: use template auto_delete value -> note, for emails mass mailing only
                Mail = Mail.with_context(mail_notify_user_signature=False)
                ActiveModel = ActiveModel.with_context(
                    mail_notify_user_signature=False,
                    mail_auto_delete=wizard.template_id.auto_delete)
            if not hasattr(ActiveModel, 'message_post'):
                ActiveModel = self.env['mail.thread'].with_context(
                    thread_model=wizard.model)
            if wizard.composition_mode == 'mass_post':
                # do not send emails directly but use the queue instead
                # add context key to avoid subscribing the author
                ActiveModel = ActiveModel.with_context(
                    mail_notify_force_send=False, mail_create_nosubscribe=True)
            # wizard works in batch mode: [res_id] or active_ids or active_domain
            if mass_mode and wizard.use_active_domain and wizard.model:
                res_ids = self.env[wizard.model].search(
                    safe_eval(wizard.active_domain)).ids
            elif mass_mode and wizard.model and self._context.get(
                    'active_ids'):
                res_ids = self._context['active_ids']
            else:
                res_ids = [wizard.res_id]

            batch_size = int(self.env['ir.config_parameter'].sudo().get_param(
                'mail.batch_size')) or self._batch_size
            sliced_res_ids = [
                res_ids[i:i + batch_size]
                for i in range(0, len(res_ids), batch_size)
            ]

            if wizard.composition_mode == 'mass_mail' or wizard.is_log or (
                    wizard.composition_mode == 'mass_post'
                    and not wizard.notify):  # log a note: subtype is False
                subtype_id = False
            elif wizard.subtype_id:
                subtype_id = wizard.subtype_id.id
            else:
                subtype_id = self.sudo().env.ref('mail.mt_comment',
                                                 raise_if_not_found=False).id

            for res_ids in sliced_res_ids:
                batch_mails = Mail
                all_mail_values = wizard.get_mail_values(res_ids)
                for res_id, mail_values in all_mail_values.items():
                    if wizard.composition_mode == 'mass_mail':
                        batch_mails |= Mail.create(mail_values)
                    else:
                        ActiveModel.browse(res_id).message_post(
                            message_type=wizard.message_type,
                            subtype_id=subtype_id,
                            **mail_values)

                if wizard.composition_mode == 'mass_mail':
                    batch_mails.send(auto_commit=auto_commit)

        return {'type': 'ir.actions.act_window_close'}
예제 #51
0
    def _send(self,
              auto_commit=False,
              raise_exception=False,
              smtp_session=None):
        IrMailServer = self.env['ir.mail_server']
        IrAttachment = self.env['ir.attachment']
        for mail_id in self.ids:
            success_pids = []
            failure_type = 'NONE'
            processing_pid = None
            mail = None
            try:
                mail = self.browse(mail_id)
                if mail.state != 'outgoing':
                    if mail.state != 'exception' and mail.auto_delete:
                        mail.sudo().unlink()
                    continue

                # remove attachments if user send the link with the access_token
                body = mail.body_html or ''
                attachments = mail.attachment_ids
                for link in re.findall(r'/web/(?:content|image)/([0-9]+)',
                                       body):
                    attachments = attachments - IrAttachment.browse(int(link))

                # load attachment binary data with a separate read(), as prefetching all
                # `datas` (binary field) could bloat the browse cache, triggerring
                # soft/hard mem limits with temporary data.
                attachments = [(a['datas_fname'], base64.b64decode(a['datas']),
                                a['mimetype'])
                               for a in attachments.sudo().read(
                                   ['datas_fname', 'datas', 'mimetype'])]

                # specific behavior to customize the send email for notified partners
                email_list = []
                if mail.email_to:
                    email_list.append(mail._send_prepare_values())
                for partner in mail.recipient_ids:
                    values = mail._send_prepare_values(partner=partner)
                    values['partner_id'] = partner
                    email_list.append(values)

                # headers
                headers = {}
                ICP = self.env['ir.config_parameter'].sudo()
                bounce_alias = ICP.get_param("mail.bounce.alias")
                catchall_domain = ICP.get_param("mail.catchall.domain")
                if bounce_alias and catchall_domain:
                    if mail.model and mail.res_id:
                        headers['Return-Path'] = '%s+%d-%s-%d@%s' % (
                            bounce_alias, mail.id, mail.model, mail.res_id,
                            catchall_domain)
                    else:
                        headers['Return-Path'] = '%s+%d@%s' % (
                            bounce_alias, mail.id, catchall_domain)
                if mail.headers:
                    try:
                        headers.update(safe_eval(mail.headers))
                    except Exception:
                        pass

                # Writing on the mail object may fail (e.g. lock on user) which
                # would trigger a rollback *after* actually sending the email.
                # To avoid sending twice the same email, provoke the failure earlier
                mail.write({
                    'state':
                    'exception',
                    'failure_reason':
                    _('Error without exception. Probably due do sending an email without computed recipients.'
                      ),
                })
                # build an RFC2822 email.message.Message object and send it without queuing
                res = None
                for email in email_list:
                    msg = IrMailServer.build_email(
                        email_from=mail.email_from,
                        email_to=email.get('email_to'),
                        subject=mail.subject,
                        body=email.get('body'),
                        body_alternative=email.get('body_alternative'),
                        email_cc=tools.email_split(mail.email_cc),
                        reply_to=mail.reply_to,
                        attachments=attachments,
                        message_id=mail.message_id,
                        references=mail.references,
                        object_id=mail.res_id
                        and ('%s-%s' % (mail.res_id, mail.model)),
                        subtype='html',
                        subtype_alternative='plain',
                        headers=headers)
                    processing_pid = email.pop("partner_id", None)
                    try:
                        res = IrMailServer.send_email(
                            msg,
                            mail_server_id=mail.mail_server_id.id,
                            smtp_session=smtp_session)
                        if processing_pid:
                            success_pids.append(processing_pid)
                        processing_pid = None
                    except AssertionError as error:
                        if str(error) == IrMailServer.NO_VALID_RECIPIENT:
                            failure_type = "RECIPIENT"
                            # No valid recipient found for this particular
                            # mail item -> ignore error to avoid blocking
                            # delivery to next recipients, if any. If this is
                            # the only recipient, the mail will show as failed.
                            _logger.info(
                                "Ignoring invalid recipients for mail.mail %s: %s",
                                mail.message_id, email.get('email_to'))
                        else:
                            raise
                if res:  # mail has been sent at least once, no major exception occured
                    mail.write({
                        'state': 'sent',
                        'message_id': res,
                        'failure_reason': False
                    })
                    _logger.info(
                        'Mail with ID %r and Message-Id %r successfully sent',
                        mail.id, mail.message_id)
                    # /!\ can't use mail.state here, as mail.refresh() will cause an error
                    # see revid:[email protected] in 6.1
                mail._postprocess_sent_message(success_pids=success_pids,
                                               failure_type=failure_type)
            except MemoryError:
                # prevent catching transient MemoryErrors, bubble up to notify user or abort cron job
                # instead of marking the mail as failed
                _logger.exception(
                    'MemoryError while processing mail with ID %r and Msg-Id %r. Consider raising the --limit-memory-hard startup option',
                    mail.id, mail.message_id)
                # mail status will stay on ongoing since transaction will be rollback
                raise
            except psycopg2.Error:
                # If an error with the database occurs, chances are that the cursor is unusable.
                # This will lead to an `psycopg2.InternalError` being raised when trying to write
                # `state`, shadowing the original exception and forbid a retry on concurrent
                # update. Let's bubble it.
                raise
            except Exception as e:
                failure_reason = tools.ustr(e)
                _logger.exception('failed sending mail (id: %s) due to %s',
                                  mail.id, failure_reason)
                mail.write({
                    'state': 'exception',
                    'failure_reason': failure_reason
                })
                mail._postprocess_sent_message(success_pids=success_pids,
                                               failure_reason=failure_reason,
                                               failure_type='UNKNOWN')
                if raise_exception:
                    if isinstance(e, AssertionError):
                        # get the args of the original error, wrap into a value and throw a MailDeliveryException
                        # that is an except_orm, with name and value as arguments
                        value = '. '.join(e.args)
                        raise MailDeliveryException(_("Mail Delivery Failed"),
                                                    value)
                    raise

            if auto_commit is True:
                self._cr.commit()
        return True
예제 #52
0
 def _attachment_filename(self, records, report):
     return dict((record.id,
                  safe_eval(report.attachment, {
                      'object': record,
                      'time': time
                  })) for record in records)
예제 #53
0
    def get_diagram_info(self, id, model, node, connector, src_node, des_node,
                         label, **kw):

        visible_node_fields = kw.get("visible_node_fields", [])
        invisible_node_fields = kw.get("invisible_node_fields", [])
        node_fields_string = kw.get("node_fields_string", [])
        connector_fields = kw.get("connector_fields", [])
        connector_fields_string = kw.get("connector_fields_string", [])

        bgcolors = {}
        shapes = {}
        bgcolor = kw.get("bgcolor", "")
        shape = kw.get("shape", "")

        if bgcolor:
            for color_spec in bgcolor.split(";"):
                if color_spec:
                    colour, color_state = color_spec.split(":")
                    bgcolors[colour] = color_state

        if shape:
            for shape_spec in shape.split(";"):
                if shape_spec:
                    shape_colour, shape_color_state = shape_spec.split(":")
                    shapes[shape_colour] = shape_color_state

        ir_view = http.request.env["ir.ui.view"]
        graphs = ir_view.graph_get(
            int(id),
            model,
            node,
            connector,
            src_node,
            des_node,
            label,
            (140, 180),
        )
        nodes = graphs["nodes"]
        transitions = graphs["transitions"]
        isolate_nodes = {}
        for blnk_node in graphs["blank_nodes"]:
            isolate_nodes[blnk_node["id"]] = blnk_node
        y = [t["y"] for t in nodes.values() if t["x"] == 20 if t["y"]]
        y_max = (y and max(y)) or 120

        connectors = {}
        list_tr = []

        for tr in transitions:
            list_tr.append(tr)
            connectors.setdefault(
                tr,
                {
                    "id": int(tr),
                    "s_id": transitions[tr][0],
                    "d_id": transitions[tr][1],
                },
            )

        connector_model = http.request.env[connector]
        data_connectors = connector_model.search([("id", "in", list_tr)
                                                  ]).read(connector_fields)

        for tr in data_connectors:
            transition_id = str(tr["id"])
            _sourceid, label = graphs["label"][transition_id]
            t = connectors[transition_id]
            t.update(
                source=tr[src_node][1],
                destination=tr[des_node][1],
                options={},
                signal=label,
            )

            for i, fld in enumerate(connector_fields):
                t["options"][connector_fields_string[i]] = tr[fld]

        fields = http.request.env["ir.model.fields"]
        field = fields.search([
            ("model", "=", model),
            ("relation", "=", node),
            ("ttype", "=", "one2many"),
        ])
        node_act = http.request.env[node]
        search_acts = node_act.search([(field.relation_field, "=", id)])
        data_acts = search_acts.read(invisible_node_fields +
                                     visible_node_fields)

        for act in data_acts:
            n = nodes.get(str(act["id"]))
            if not n:
                n = isolate_nodes.get(act["id"], {})
                y_max += 140
                n.update(x=20, y=y_max)
                nodes[act["id"]] = n

            n.update(id=act["id"], color="white", options={})
            for color, expr in bgcolors.items():
                if safe_eval(expr, act):
                    n["color"] = color

            for shape, expr in shapes.items():
                if safe_eval(expr, act):
                    n["shape"] = shape

            for i, fld in enumerate(visible_node_fields):
                n["options"][node_fields_string[i]] = act[fld]

        _id, name = http.request.env[model].browse([id]).name_get()[0]
        ret = dict(
            nodes=nodes,
            conn=connectors,
            display_name=name,
            parent_field=graphs["node_parent_field"],
        )

        # End of original method

        xpos = kw.get("xpos", False)
        ypos = kw.get("ypos", False)

        if xpos and ypos:
            # Nodes collection contains inconsistent key type
            # Integers and integer string representations are used as keys:
            # [1, 2, 3, '4', '5', '6']
            # We need to fix this before we can continue

            nodes = dict()
            for key, value in ret["nodes"].items():
                nodes[int(key)] = value
            ret["nodes"] = nodes

            model_dao = http.request.env[model]

            View = http.request.env["ir.ui.view"]
            nodes_field = View.get_graph_nodes_field(model, node)
            state_ids = model_dao.browse(id)[nodes_field]
            for state in state_ids:
                state_id = state["id"]
                if state_id in nodes:
                    nodes[state_id].update(
                        x=state[xpos],
                        y=state[ypos],
                    )
        return ret
예제 #54
0
    def _send(self,
              auto_commit=False,
              raise_exception=False,
              smtp_session=None):
        IrMailServer = self.env['ir.mail_server']
        for mail_id in self.ids:
            try:
                mail = self.browse(mail_id)
                if mail.state != 'outgoing':
                    if mail.state != 'exception' and mail.auto_delete:
                        mail.sudo().unlink()
                    continue
                # TDE note: remove me when model_id field is present on mail.message - done here to avoid doing it multiple times in the sub method
                if mail.model:
                    model = self.env['ir.model']._get(mail.model)[0]
                else:
                    model = None
                if model:
                    mail = mail.with_context(model_name=model.name)

                # load attachment binary data with a separate read(), as prefetching all
                # `datas` (binary field) could bloat the browse cache, triggerring
                # soft/hard mem limits with temporary data.
                attachments = [(a['datas_fname'], base64.b64decode(a['datas']),
                                a['mimetype'])
                               for a in mail.attachment_ids.sudo().read(
                                   ['datas_fname', 'datas', 'mimetype'])]

                # specific behavior to customize the send email for notified partners
                email_list = []
                if mail.email_to:
                    email_list.append(mail.send_get_email_dict())
                for partner in mail.recipient_ids:
                    email_list.append(
                        mail.send_get_email_dict(partner=partner))

                # headers
                headers = {}
                ICP = self.env['ir.config_parameter'].sudo()
                bounce_alias = ICP.get_param("mail.bounce.alias")
                catchall_domain = ICP.get_param("mail.catchall.domain")
                if bounce_alias and catchall_domain:
                    if mail.model and mail.res_id:
                        headers['Return-Path'] = '%s+%d-%s-%d@%s' % (
                            bounce_alias, mail.id, mail.model, mail.res_id,
                            catchall_domain)
                    else:
                        headers['Return-Path'] = '%s+%d@%s' % (
                            bounce_alias, mail.id, catchall_domain)
                if mail.headers:
                    try:
                        headers.update(safe_eval(mail.headers))
                    except Exception:
                        pass

                # Writing on the mail object may fail (e.g. lock on user) which
                # would trigger a rollback *after* actually sending the email.
                # To avoid sending twice the same email, provoke the failure earlier
                mail.write({
                    'state':
                    'exception',
                    'failure_reason':
                    _('Error without exception. Probably due do sending an email without computed recipients.'
                      ),
                })
                mail_sent = False

                # Update notification in a transient exception state to avoid concurrent
                # update in case an email bounces while sending all emails related to current
                # mail record.
                notifs = self.env['mail.notification'].search([
                    ('is_email', '=', True),
                    ('mail_message_id', 'in',
                     mail.mapped('mail_message_id').ids),
                    ('res_partner_id', 'in', mail.mapped('recipient_ids').ids),
                    ('email_status', 'not in', ('sent', 'canceled'))
                ])
                if notifs:
                    notifs.sudo().write({
                        'email_status': 'exception',
                    })

                # build an RFC2822 email.message.Message object and send it without queuing
                res = None
                for email in email_list:
                    msg = IrMailServer.build_email(
                        email_from=mail.email_from,
                        email_to=email.get('email_to'),
                        subject=mail.subject,
                        body=email.get('body'),
                        body_alternative=email.get('body_alternative'),
                        email_cc=tools.email_split(mail.email_cc),
                        reply_to=mail.reply_to,
                        attachments=attachments,
                        message_id=mail.message_id,
                        references=mail.references,
                        object_id=mail.res_id
                        and ('%s-%s' % (mail.res_id, mail.model)),
                        subtype='html',
                        subtype_alternative='plain',
                        headers=headers)
                    try:
                        res = IrMailServer.send_email(
                            msg,
                            mail_server_id=mail.mail_server_id.id,
                            smtp_session=smtp_session)
                    except AssertionError as error:
                        if str(error) == IrMailServer.NO_VALID_RECIPIENT:
                            # No valid recipient found for this particular
                            # mail item -> ignore error to avoid blocking
                            # delivery to next recipients, if any. If this is
                            # the only recipient, the mail will show as failed.
                            _logger.info(
                                "Ignoring invalid recipients for mail.mail %s: %s",
                                mail.message_id, email.get('email_to'))
                        else:
                            raise
                if res:
                    mail.write({
                        'state': 'sent',
                        'message_id': res,
                        'failure_reason': False
                    })
                    mail_sent = True

                # /!\ can't use mail.state here, as mail.refresh() will cause an error
                # see revid:[email protected] in 6.1
                if mail_sent:
                    _logger.info(
                        'Mail with ID %r and Message-Id %r successfully sent',
                        mail.id, mail.message_id)
                mail._postprocess_sent_message(mail_sent=mail_sent)
            except MemoryError:
                # prevent catching transient MemoryErrors, bubble up to notify user or abort cron job
                # instead of marking the mail as failed
                _logger.exception(
                    'MemoryError while processing mail with ID %r and Msg-Id %r. Consider raising the --limit-memory-hard startup option',
                    mail.id, mail.message_id)
                raise
            except (psycopg2.Error, smtplib.SMTPServerDisconnected):
                # If an error with the database or SMTP session occurs, chances are that the cursor
                # or SMTP session are unusable, causing further errors when trying to save the state.
                _logger.exception(
                    'Exception while processing mail with ID %r and Msg-Id %r.',
                    mail.id, mail.message_id)
                raise
            except Exception as e:
                failure_reason = tools.ustr(e)
                _logger.exception('failed sending mail (id: %s) due to %s',
                                  mail.id, failure_reason)
                mail.write({
                    'state': 'exception',
                    'failure_reason': failure_reason
                })
                mail._postprocess_sent_message(mail_sent=False)
                if raise_exception:
                    if isinstance(e, AssertionError):
                        # get the args of the original error, wrap into a value and throw a MailDeliveryException
                        # that is an except_orm, with name and value as arguments
                        value = '. '.join(e.args)
                        raise MailDeliveryException(_("Mail Delivery Failed"),
                                                    value)
                    raise

            if auto_commit is True:
                self._cr.commit()
        return True
예제 #55
0
    def generate_coupon(self):
        program = self.coupon_id
        vals = {
            'program_id':
            program.id,
            'sale_order_id':
            self.id,
            'customer_source_id':
            self.partner_id.id,
            'is_free_order':
            program.is_free_order,
            'start_date_use':
            program.start_date_use,
            'end_date_use':
            program.end_date_use,
            'start_hour_use':
            program.start_hour_use,
            'end_hour_use':
            program.end_hour_use,
            # 'expiration_date': datetime.now().date() + timedelta(days=program.validity_duration,
            'expiration_date_2':
            datetime.now().date() + timedelta(days=program.validity_duration)
        }
        order_products = []
        program_products = []
        is_product_ability = False
        for rec in self.order_line:
            order_products.append(rec.product_id)
        for product in self.env['product.product'].search(
                safe_eval(self.coupon_id.rule_products_domain)):
            program_products.append(product)
        for product in order_products:
            if product in program_products:
                is_product_ability = True
        if is_product_ability == True:
            if self.coupon_id.generation_type == 'nbr_coupon' and self.coupon_id.nbr_coupons > 0:
                for count in range(0, self.coupon_id.nbr_coupons):
                    self.env['sale.coupon'].create(vals)

            if self.coupon_id.generation_type == 'nbr_customer':
                vals.update({'partner_id': self.partner_id.id})
                for count in range(0, self.coupon_id.nbr_coupons):
                    coupon = self.env['sale.coupon'].create(vals)
                    subject = '%s, a coupon has been generated for you' % (
                        self.partner_id.name)
                    template = self.env.ref(
                        'sale_coupon.mail_template_sale_coupon',
                        raise_if_not_found=False)
                    if template:
                        template.send_mail(coupon.id,
                                           email_values={
                                               'email_to':
                                               self.partner_id.email,
                                               'email_from':
                                               self.env.user.email or '',
                                               'subject':
                                               subject,
                                           })
            if self.coupon_id.generation_type == 'nbr_vehicles':
                vals.update({'vehicle_id': self.vehicle_id.id})
                for count in range(0, self.coupon_id.nbr_coupons):
                    self.env['sale.coupon'].create(vals)
            self.is_generate_coupon = True
        else:
            raise ValidationError(
                "Your Program Doesn't Contain your any product in that order !"
            )
예제 #56
0
def eval_request_params(kwargs):
    for k, v in kwargs.items():
        try:
            kwargs[k] = safe_eval(v)
        except Exception:
            continue
    def _inverse_datas(self):
        # set s3_records to empty recordset
        condition = self._get_s3_settings("s3.condition", "S3_CONDITION")
        if condition and not self.env.context.get("force_s3"):
            condition = safe_eval(condition, mode="eval")
            s3_records = self.sudo().search([("id", "in", self.ids)] +
                                            condition)
        else:
            # if there is no condition or force_s3 in context
            # then store all attachments on s3
            s3_records = self

        if s3_records:
            s3 = self._get_s3_resource()
            if not s3:
                _logger.info(
                    "something wrong on aws side, keep attachments as usual")
                s3_records = self.env[self._name]
            else:
                s3_records = s3_records._filter_protected_attachments()
                s3_records = s3_records.filtered(lambda r: r.type != "url")

        resized_to_remove = self.env["ir.attachment.resized"].sudo()
        for attach in (
                self & s3_records
        ):  # datas field has got empty somehow in the result of ``s3_records = self.sudo().search([('id', 'in', self.ids)] + condition)`` search for non-superusers but it is in original recordset. Here we use original (with datas) in case it intersects with the search result
            resized_to_remove |= attach.sudo().resized_ids
            value = attach.datas
            bin_data = value and value.decode("base64") or ""
            fname = hashlib.sha1(bin_data).hexdigest()
            bucket_name = self._get_s3_settings("s3.bucket", "S3_BUCKET")
            try:
                s3.Bucket(bucket_name).put_object(
                    Key=fname,
                    Body=bin_data,
                    ACL="public-read",
                    ContentType=attach.mimetype,
                )
            except botocore.exceptions.ClientError as e:
                raise exceptions.UserError(str(e))

            vals = {
                "file_size":
                len(bin_data),
                "checksum":
                self._compute_checksum(bin_data),
                "index_content":
                self._index(bin_data, attach.datas_fname, attach.mimetype),
                "store_fname":
                fname,
                "db_datas":
                False,
                "type":
                "url",
                "url":
                self._get_s3_object_url(s3, bucket_name, fname),
            }
            super(IrAttachment, attach.sudo()).write(vals)

        resized_to_remove.mapped("resized_attachment_id").unlink()
        resized_to_remove.unlink()
        super(IrAttachment, self - s3_records)._inverse_datas()
예제 #58
0
    def _do_scenario_save(self,
                          message,
                          transition_type,
                          scenario_id=None,
                          step_id=None):
        """
        Save the scenario on this terminal and execute the current step
        Return the action to the terminal
        """
        self.ensure_one()
        scanner_scenario_obj = self.env['scanner.scenario']
        scanner_step_obj = self.env['scanner.scenario.step']
        terminal = self

        tracer = ''

        if (transition_type == 'restart'
                or transition_type == 'back' and terminal.scenario_id.id):
            if terminal.step_id.no_back:
                step_id = terminal.step_id.id
            else:
                last_call = terminal.step_history_ids[-1]

                # Retrieve last values
                step_id = last_call.step_id.id
                transition = last_call.transition_id
                tracer = last_call.transition_id.tracer
                message = safe_eval(last_call.message)

                # Prevent looping on the same step
                if transition.to_id == terminal.step_id and \
                   transition_type == 'back':
                    # Remove the history line
                    last_call.unlink()
                    return self._do_scenario_save(
                        message,
                        transition_type,
                        scenario_id=scenario_id,
                        step_id=step_id,
                    )

        # No scenario in arguments, start a new one
        if not self.scenario_id.id:
            # Retrieve the terminal's warehouse
            terminal_warehouse_ids = terminal.warehouse_id.ids
            # Retrieve the warehouse's scenarios
            scenario_ids = scanner_scenario_obj.search([
                ('name', '=', message),
                ('type', '=', 'scenario'),
                '|',
                ('warehouse_ids', '=', False),
                ('warehouse_ids', 'in', terminal_warehouse_ids),
            ])

            # If at least one scenario was found, pick the start step of the
            # first
            if scenario_ids:
                scenario_id = scenario_ids[0].id
                step_ids = scanner_step_obj.search([
                    ('scenario_id', '=', scenario_id),
                    ('step_start', '=', True),
                ])

                # No start step found on the scenario, return an error
                if not step_ids:
                    return self._send_error([
                        _('No start step found on the scenario'),
                    ])

                step_id = step_ids[0].id
                # Store the first step in terminal history
                terminal.step_history_ids.create({
                    'hardware_id': terminal.id,
                    'step_id': step_id,
                    'message': repr(message)
                })

            else:
                return self._send_error([_('Scenario not found')])

        elif transition_type not in ('back', 'none', 'restart'):
            # Retrieve outgoing transitions from the current step
            transition_obj = self.env['scanner.scenario.transition']
            transitions = transition_obj.search([('from_id', '=', step_id)])

            # Evaluate the condition for each transition
            for transition in transitions:
                step_id = False
                ctx = {
                    'context':
                    self.env.context,
                    'model':
                    self.env[
                        transition.from_id.scenario_id.model_id.sudo().model],
                    'cr':
                    self.env.cr,
                    'pool':
                    self.pool,
                    'env':
                    self.env,
                    'uid':
                    self.env.uid,
                    'm':
                    message,
                    'message':
                    message,
                    't':
                    self,
                    'terminal':
                    self,
                }
                try:
                    expr = safe_eval(str(transition.condition), ctx)
                except:
                    logger.exception(
                        "Error when evaluating transition condition\n%s",
                        transition.condition)
                    raise

                # Invalid condition, evaluate next transition
                if not expr:
                    continue

                # Condition passed, go to this step
                step_id = transition.to_id.id
                tracer = transition.tracer

                # Store the old step id if we are on a back step
                if transition.to_id.step_back and (
                        not terminal.step_history_ids
                        or terminal.step_history_ids[-1].transition_id !=
                        transition):
                    terminal.step_history_ids.create({
                        'hardware_id':
                        terminal.id,
                        'step_id':
                        transition.to_id.id,
                        'transition_id':
                        transition.id,
                        'message':
                        repr(message)
                    })

                # Valid transition found, stop searching
                break

            # No step found, return an error
            if not step_id:
                terminal.log('No valid transition found !')
                return self._unknown_action([
                    _('Please contact'),
                    _('your'),
                    _('administrator'),
                ])

        # Memorize the current step
        terminal._memorize(scenario_id, step_id)

        # Execute the step
        step = terminal.step_id

        ld = {
            'cr': self.env.cr,
            'uid': self.env.uid,
            'pool': self.pool,
            'env': self.env,
            'model': self.env[step.scenario_id.model_id.sudo().model],
            'term': self,
            'context': self.env.context,
            'm': message,
            'message': message,
            't': terminal,
            'terminal': terminal,
            'tracer': tracer,
            'scenario': terminal.scenario_id,
            '_': _,
        }

        terminal.log('Executing step %d : %s' % (step_id, step.name))
        terminal.log('Message : %s' % repr(message))
        if tracer:
            terminal.log('Tracer : %s' % repr(tracer))

        exec(step.python_code, ld)
        if step.step_stop:
            terminal.empty_scanner_values()

        return (
            ld.get('act', 'M'),
            ld.get('res', ['nothing']),
            ld.get('val', 0),
        )
예제 #59
0
    def _eval_formula(self, financial_report, debit_credit, context,
                      currency_table, linesDict):
        if context.analytic_level_id:
            analytic_account_ids = self.env['account.analytic.account'].search(
                [('level_id', '=', context.analytic_level_id.id)]).ids
            analytic_amount_dict = dict([(id, 0.00)
                                         for id in analytic_account_ids])
            analytic_final_dict = dict([(id, '')
                                        for id in analytic_account_ids])
            if self.domain:
                field_data = self.formulas.split(';')
                field_data = field_data[0].split('=')
                field_data = field_data[1].split('.')
                # field_data_sign = field_data[0].replace('sum', '')
                field_data_sign = ''

                domain = ast.literal_eval(self.domain)
                domain.extend([
                    ('date', '>=', self.env.context.get('date_from')),
                    ('date', '<=', self.env.context.get('date_to'))
                ])
                line_ids = self.env['account.move.line'].search(domain)
                for line in line_ids:
                    if line.analytic_account_id.id in analytic_amount_dict:
                        analytic_amount_dict.update({
                            line.analytic_account_id.id:
                            analytic_amount_dict.get(
                                line.analytic_account_id.id) +
                            line.read([field_data[1]])[0].get(field_data[1])
                        })
                # Updating main dict
                for id in analytic_final_dict:
                    analytic_final_dict.update({
                        id:
                        field_data_sign + str(analytic_amount_dict.get(id))
                    })
            elif self.formulas:
                result = self._expand_formulas(self.formulas)
                for item in result:
                    if len(item) > 1:
                        if type(item) == tuple:
                            analytic_amount_dict2 = dict([
                                (id, 0.00) for id in analytic_account_ids
                            ])
                            analytic_final_dict2 = dict([
                                (id, '') for id in analytic_account_ids
                            ])
                            for item2 in item:
                                if len(item2) > 1 and '.' in item2:
                                    data = item2.split('.')
                                    report_line_id = self.search(
                                        [('code', '=', data[0])], limit=1)
                                    field_data = report_line_id.formulas.split(
                                        ';')
                                    field_data = field_data[0].split('=')
                                    field_data = field_data[1].split('.')
                                    # field_data_sign = field_data[0].replace('sum', '')
                                    field_data_sign = ''

                                    domain = ast.literal_eval(
                                        report_line_id.domain)
                                    domain.extend([
                                        ('date', '>=',
                                         self.env.context.get('date_from')),
                                        ('date', '<=',
                                         self.env.context.get('date_to'))
                                    ])
                                    line_ids = self.env[
                                        'account.move.line'].search(domain)
                                    for line in line_ids:
                                        if line.analytic_account_id.id in analytic_amount_dict2:
                                            analytic_amount_dict2.update({
                                                line.analytic_account_id.id:
                                                analytic_amount_dict2.get(
                                                    line.analytic_account_id.
                                                    id) + line.read([
                                                        field_data[1]
                                                    ])[0].get(field_data[1])
                                            })
                                    for id in analytic_amount_dict2:
                                        analytic_final_dict2.update({
                                            id:
                                            analytic_final_dict2.get(id) +
                                            field_data_sign +
                                            str(analytic_amount_dict2.get(id))
                                        })
                                elif len(item2) > 1 and item2 == 'NDays':
                                    d1 = datetime.strptime(
                                        self.env.context['date_from'],
                                        "%Y-%m-%d")
                                    d2 = datetime.strptime(
                                        self.env.context['date_to'],
                                        "%Y-%m-%d")
                                    days = (d2 - d1).days
                                    for id in analytic_final_dict2:
                                        analytic_final_dict2.update({
                                            id:
                                            str(analytic_final_dict2.get(id)) +
                                            str(days)
                                        })
                                else:
                                    for id in analytic_final_dict2:
                                        analytic_final_dict2.update({
                                            id:
                                            str(analytic_final_dict2.get(id)) +
                                            item2
                                        })
                            # Updating main dict
                            for id in analytic_final_dict2:
                                analytic_final_dict.update({
                                    id:
                                    analytic_final_dict.get(id) + '(' +
                                    analytic_final_dict2.get(id) + ')'
                                })
                        elif len(item) > 1 and item == 'NDays':
                            # Updating main dict
                            d1 = datetime.strptime(
                                self.env.context['date_from'], "%Y-%m-%d")
                            d2 = datetime.strptime(self.env.context['date_to'],
                                                   "%Y-%m-%d")
                            days = (d2 - d1).days
                            for id in analytic_final_dict:
                                analytic_final_dict.update({
                                    id:
                                    analytic_final_dict.get(id) + str(days)
                                })
                        else:
                            data = item.split('.')
                            report_line_id = self.search(
                                [('code', '=', data[0])], limit=1)
                            field_data = report_line_id.formulas.split(';')
                            field_data = field_data[0].split('=')
                            field_data = field_data[1].split('.')
                            # field_data_sign = field_data[0].replace('sum', '')
                            field_data_sign = ''

                            domain = ast.literal_eval(report_line_id.domain)
                            domain.extend([('date', '>=',
                                            self.env.context.get('date_from')),
                                           ('date', '<=',
                                            self.env.context.get('date_to'))])
                            line_ids = self.env['account.move.line'].search(
                                domain)
                            for line in line_ids:
                                if line.analytic_account_id.id in analytic_amount_dict:
                                    analytic_amount_dict.update({
                                        line.analytic_account_id.id:
                                        analytic_amount_dict.get(
                                            line.analytic_account_id.id) +
                                        line.read([field_data[1]])[0].get(
                                            field_data[1])
                                    })
                            # Updating main dict
                            for id in analytic_amount_dict:
                                analytic_final_dict.update({
                                    id:
                                    analytic_final_dict.get(id) +
                                    field_data_sign +
                                    str(analytic_amount_dict.get(id))
                                })

                    elif len(item) > 1 and item == 'NDays':
                        # Updating main dict
                        d1 = datetime.strptime(self.env.context['date_from'],
                                               "%Y-%m-%d")
                        d2 = datetime.strptime(self.env.context['date_to'],
                                               "%Y-%m-%d")
                        days = (d2 - d1).days
                        for id in analytic_final_dict:
                            analytic_final_dict.update(
                                {id: analytic_final_dict.get(id) + str(days)})
                    else:
                        # Updating main dict
                        for id in analytic_final_dict:
                            analytic_final_dict.update(
                                {id: analytic_final_dict.get(id) + item})

        debit_credit = debit_credit and financial_report.debit_credit
        formulas = self._split_formulas()
        if self.code and self.code in linesDict:
            res = linesDict[self.code]
        else:
            res = FormulaLine(self,
                              currency_table,
                              financial_report,
                              linesDict=linesDict)
        vals = {}
        vals['balance'] = res.balance
        if context.analytic_level_id:
            vals['analytic_final_dict'] = analytic_final_dict
        if debit_credit:
            vals['credit'] = res.credit
            vals['debit'] = res.debit

        results = {}
        if self.domain and self.groupby and self.show_domain != 'never':
            aml_obj = self.env['account.move.line']
            tables, where_clause, where_params = aml_obj._query_get(
                domain=self.domain)
            sql, params = self._get_with_statement(financial_report)
            if financial_report.tax_report:
                where_clause += ''' AND "account_move_line".tax_exigible = 't' '''

            groupby = self.groupby or 'id'
            if groupby not in self.env['account.move.line']:
                raise ValueError(
                    'Groupby should be a field from account.move.line')
            select, select_params = self._query_get_select_sum(currency_table)
            params += select_params
            sql = sql + "SELECT \"account_move_line\"." + groupby + ", " + select + " FROM " + tables + " WHERE " + where_clause + " GROUP BY \"account_move_line\"." + groupby

            params += where_params
            self.env.cr.execute(sql, params)
            results = self.env.cr.fetchall()
            results = dict([(k[0], {
                'balance': k[1],
                'amount_residual': k[2],
                'debit': k[3],
                'credit': k[4]
            }) for k in results])
            c = FormulaContext(self.env['account.financial.html.report.line'],
                               linesDict,
                               currency_table,
                               financial_report,
                               only_sum=True)
            if formulas:
                for key in results:
                    c['sum'] = FormulaLine(results[key],
                                           currency_table,
                                           financial_report,
                                           type='not_computed')
                    c['sum_if_pos'] = FormulaLine(
                        results[key]['balance'] >= 0.0 and results[key]
                        or {'balance': 0.0},
                        currency_table,
                        financial_report,
                        type='not_computed')
                    c['sum_if_neg'] = FormulaLine(
                        results[key]['balance'] <= 0.0 and results[key]
                        or {'balance': 0.0},
                        currency_table,
                        financial_report,
                        type='not_computed')
                    for col, formula in formulas.items():
                        if col in results[key]:
                            results[key][col] = safe_eval(formula,
                                                          c,
                                                          nocopy=True)
            to_del = []
            for key in results:
                if self.env.user.company_id.currency_id.is_zero(
                        results[key]['balance']):
                    to_del.append(key)
            for key in to_del:
                del results[key]

        results.update({'line': vals})
        return results
예제 #60
0
    def _send(
        self,
        auto_commit=False,
        raise_exception=False,
        smtp_session=None,
    ):
        IrMailServer = self.env["ir.mail_server"]
        for mail_id in self.ids:
            try:
                mail = self.browse(mail_id)
                if mail.state != "outgoing":
                    if mail.state != "exception" and mail.auto_delete:
                        mail.sudo().unlink()
                    continue
                # TDE note: remove me when model_id field is present
                #  on mail.message - done here to avoid doing it multiple times in the sub method
                if mail.model:
                    model = self.env["ir.model"]._get(mail.model)[0]
                else:
                    model = None
                if model:
                    mail = mail.with_context(model_name=model.name)

                # load attachment binary data with a separate read(),
                #  as prefetching all
                # `datas` (binary field) could bloat the browse cache,
                #  triggerring
                # soft/hard mem limits with temporary data.
                attachments = [(
                    a["datas_fname"],
                    base64.b64decode(a["datas"]),
                    a["mimetype"],
                ) for a in mail.attachment_ids.sudo().read(
                    ["datas_fname", "datas", "mimetype"])]

                # specific behavior to customize the send
                #  email for notified partners
                email_list = []
                if mail.email_to:
                    email_list.append(mail.send_get_email_dict())
                for partner in mail.recipient_ids:
                    email_list.append(
                        mail.send_get_email_dict(partner=partner))
                _logger.info("email_list? %r ", email_list)
                # headers
                headers = {}
                ICP = self.env["ir.config_parameter"].sudo()
                bounce_alias = ICP.get_param("mail.bounce.alias")
                catchall_domain = ICP.get_param("mail.catchall.domain")
                if bounce_alias and catchall_domain:
                    if mail.model and mail.res_id:
                        headers["Return-Path"] = "%s+%d-%s-%d@%s" % (
                            bounce_alias,
                            mail.id,
                            mail.model,
                            mail.res_id,
                            catchall_domain,
                        )
                    else:
                        headers["Return-Path"] = "%s+%d@%s" % (
                            bounce_alias,
                            mail.id,
                            catchall_domain,
                        )
                if mail.headers:
                    try:
                        headers.update(safe_eval(mail.headers))
                    except Exception:
                        pass

                # Writing on the mail object may fail (e.g. lock on user) which
                # would trigger a rollback *after* actually sending the email.
                # To avoid sending twice the same email,
                #  provoke the failure earlier
                mail.write({
                    "state":
                    "exception",
                    "failure_reason":
                    _("Error without exception. Probably due do sending an email without computed recipients."
                      ),
                })
                mail_sent = False

                # build an RFC2822 email.message.Message object
                #  and send it without queuing

                res = None
                for email in email_list[:1]:
                    if mail.email_from not in email.get("email_cc"):
                        _logger.info(
                            "must assign to cc list %r ",
                            mail.email_from,
                        )
                    else:
                        _logger.info(
                            "from is assigned to cc list %r ",
                            mail.email_from,
                        )
                    _logger.info("email_from %r ", mail.email_from)
                    _logger.info("email_to %r ", email.get("email_to"))
                    _logger.info("email_cc %r ", email.get("email_cc"))
                    _logger.info("email_replay_to %r ", mail.reply_to)
                    _logger.info("email_subhject %r ", mail.subject)
                    msg = IrMailServer.build_email(
                        email_from=mail.email_from,
                        email_to=email.get("email_to"),
                        subject=mail.subject,
                        body=email.get("body"),
                        body_alternative=email.get("body_alternative"),
                        email_cc=email.get("email_cc"),
                        reply_to=mail.reply_to,
                        attachments=attachments,
                        message_id=mail.message_id,
                        references=mail.references,
                        object_id=mail.res_id
                        and ("%s-%s" % (mail.res_id, mail.model)),
                        subtype="html",
                        subtype_alternative="plain",
                        headers=headers,
                    )
                    try:
                        res = IrMailServer.send_email(
                            msg,
                            mail_server_id=mail.mail_server_id.id,
                            smtp_session=smtp_session,
                        )
                    except AssertionError as error:
                        if (str(error) == IrMailServer.NO_VALID_RECIPIENT):
                            # No valid recipient found for this particular
                            # mail item -> ignore error to avoid blocking
                            # delivery to next recipients, if any. If this is
                            # the only recipient, the mail will show as failed.
                            _logger.info(
                                "Ignoring invalid recipients for mail.mail %s: %s",
                                mail.message_id,
                                email.get("email_to"),
                            )
                        else:
                            raise
                    if res:
                        mail.write({
                            "state": "sent",
                            "message_id": res,
                            "failure_reason": False,
                        })
                        mail_sent = True

                # /!\ can't use mail.state here, as mail.refresh()
                # will cause an error
                # see revid:[email protected]
                # in 6.1
                if mail_sent:
                    _logger.info(
                        "Mail with ID %r and Message-Id %r successfully sent",
                        mail.id,
                        mail.message_id,
                    )
                mail._postprocess_sent_message(mail_sent=mail_sent)
            except MemoryError:
                # prevent catching transient MemoryErrors, bubble
                # up to notify user or abort cron job
                # instead of marking the mail as failed
                _logger.exception(
                    "MemoryError while processing mail with ID %r and Msg-Id %r. Consider raising the --limit-memory-hard startup option",
                    mail.id,
                    mail.message_id,
                )
                raise
            except psycopg2.Error:
                # If an error with the database occurs, chances are that
                #  the cursor is unusable.
                # This will lead to an `psycopg2.InternalError` being
                # raised when trying to write
                # `state`, shadowing the original exception and forbid
                #  a retry on concurrent
                # update. Let's bubble it.
                raise
            except Exception as e:
                failure_reason = tools.ustr(e)
                _logger.exception(
                    "failed sending mail (id: %s) due to %s",
                    mail.id,
                    failure_reason,
                )
                mail.write({
                    "state": "exception",
                    "failure_reason": failure_reason,
                })
                mail._postprocess_sent_message(mail_sent=False)
                if raise_exception:
                    if isinstance(e, AssertionError):
                        # get the args of the original error, wrap
                        #  into a value and throw a MailDeliveryException
                        # that is an except_orm, with name and value
                        #  as arguments
                        value = ". ".join(e.args)
                        raise MailDeliveryException(_("Mail Delivery Failed"),
                                                    value)
                    raise

            if auto_commit is True:
                self._cr.commit()
        return True