def create(self, cr, user, vals, context=None):
     
     if context is None:
         context = {}
         
     ir_config_parameter = self.pool.get('ir.config_parameter')
     empty_location_active = safe_eval(ir_config_parameter.get_param(cr, user, 'stock.empty_location_active', 'False'))
     if empty_location_active == 1:
         empty_location_id = safe_eval(ir_config_parameter.get_param(cr, user, 'stock.empty_location', 'False'))
         loc_id = int(empty_location_id)
     else:
         loc_id = 12
     
     context['product_id'] = vals['product_id']
     stock_values = self.pool.get('stock.location')._product_value(cr, user, [loc_id], ['stock_real'], arg=None, context=context)
     vals['product_qty_onstock'] = int(stock_values[loc_id]['stock_real'])
     
     #Koliko je bilo v resnici naroceno, ce se naredi delovni za polizdelek zaradi izdelka potem je narocena kolicina 0
     move_ids = self.pool.get('stock.move').search(cr, user, [('product_id','=',vals['product_id']),
                                                   ('basket_status','=',3)])
     prod_ordered = 0
     for move_line in self.pool.get('stock.move').browse(cr, user, move_ids):
         prod_ordered = prod_ordered + move_line.product_qty
     vals['product_qty_ordered'] = prod_ordered
     
     res = super(mrp_production, self).create(cr, user, vals, context=context)
     return res
Пример #2
0
 def get_default_auth_signup_template_user_id(self, cr, uid, fields, context=None):
     icp = self.pool.get('ir.config_parameter')
     # we use safe_eval on the result, since the value of the parameter is a nonempty string
     return {
         'auth_signup_uninvited': safe_eval(icp.get_param(cr, uid, 'auth_signup.allow_uninvited', 'False')),
         'auth_signup_template_user_id': safe_eval(icp.get_param(cr, uid, 'auth_signup.template_user_id', 'False')),
     }
Пример #3
0
    def _ogone_form_validate(self, cr, uid, tx, data, context=None):
        if tx.state == "done":
            _logger.info("Ogone: trying to validate an already validated tx (ref %s)", tx.reference)
            return True

        status = int(data.get("STATUS", "0"))
        if status in self._ogone_valid_tx_status:
            tx.write(
                {
                    "state": "done",
                    "date_validate": datetime.datetime.strptime(data["TRXDATE"], "%m/%d/%y").strftime(
                        DEFAULT_SERVER_DATE_FORMAT
                    ),
                    "acquirer_reference": data["PAYID"],
                }
            )
            if tx.s2s_cb_eval:
                safe_eval(tx.s2s_cb_eval, {"self": tx})
            return True
        elif status in self._ogone_cancel_tx_status:
            tx.write({"state": "cancel", "acquirer_reference": data.get("PAYID")})
        elif status in self._ogone_pending_tx_status:
            tx.write({"state": "pending", "acquirer_reference": data.get("PAYID")})
        else:
            error = "Ogone: feedback error: %(error_str)s\n\n%(error_code)s: %(error_msg)s" % {
                "error_str": data.get("NCERRORPLUS"),
                "error_code": data.get("NCERROR"),
                "error_msg": ogone.OGONE_ERROR_MAP.get(data.get("NCERROR")),
            }
            _logger.info(error)
            tx.write({"state": "error", "state_message": error, "acquirer_reference": data.get("PAYID")})
            return False
Пример #4
0
    def _send_email_passkey(self, cr, user_id, user_agent_env):
        """ Send a email to the admin of the system and / or the user
 to inform passkey use."""
        mails = []
        mail_obj = self.pool['mail.mail']
        icp_obj = self.pool['ir.config_parameter']
        admin_user = self.browse(cr, SUPERUSER_ID, SUPERUSER_ID)
        login_user = self.browse(cr, SUPERUSER_ID, user_id)
        send_to_admin = safe_eval(icp_obj.get_param(
            cr, SUPERUSER_ID, 'auth_admin_passkey.send_to_admin', 'True'))
        send_to_user = safe_eval(icp_obj.get_param(
            cr, SUPERUSER_ID, 'auth_admin_passkey.send_to_user', 'True'))

        if send_to_admin and admin_user.email:
            mails.append({'email': admin_user.email, 'lang': admin_user.lang})
        if send_to_user and login_user.email:
            mails.append({'email': login_user.email, 'lang': login_user.lang})

        for mail in mails:
            subject = self._get_translation(
                cr, mail['lang'], _('Passkey used'))
            body = self._get_translation(
                cr, mail['lang'],
                _("""Admin user used his passkey to login with '%s'.\n\n"""
                    """\n\nTechnicals informations belows : \n\n"""
                    """- Login date : %s\n\n""")) % (
                        login_user.login,
                        datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
            for k, v in user_agent_env.iteritems():
                body += ("- %s : %s\n\n") % (k, v)
            mail_obj.create(
                cr, SUPERUSER_ID, {
                    'email_to': mail['email'],
                    'subject': subject,
                    'body_html': '<pre>%s</pre>' % body})
Пример #5
0
def _child_get(node, self=None, tagname=None):
    for n in node:
        if self and self.localcontext and n.get("rml_loop"):

            for ctx in safe_eval(n.get("rml_loop"), {}, self.localcontext):
                self.localcontext.update(ctx)
                if (tagname is None) or (n.tag == tagname):
                    if n.get("rml_except", False):
                        try:
                            safe_eval(n.get("rml_except"), {}, self.localcontext)
                        except GeneratorExit:
                            continue
                        except Exception, e:
                            _logger.info('rml_except: "%s"', n.get("rml_except", ""), exc_info=True)
                            continue
                    if n.get("rml_tag"):
                        try:
                            (tag, attr) = safe_eval(n.get("rml_tag"), {}, self.localcontext)
                            n2 = copy.deepcopy(n)
                            n2.tag = tag
                            n2.attrib.update(attr)
                            yield n2
                        except GeneratorExit:
                            yield n
                        except Exception, e:
                            _logger.info('rml_tag: "%s"', n.get("rml_tag", ""), exc_info=True)
                            yield n
                    else:
                        yield n
            continue
Пример #6
0
 def _compute_amount(self, base_amount, price_unit, quantity=1.0, product=None, partner=None):
     self.ensure_one()
     if self.amount_type == 'code':
         company = self.env.user.company_id
         localdict = {'base_amount': base_amount, 'price_unit':price_unit, 'quantity': quantity, 'product':product, 'partner':partner, 'company': company}
         safe_eval(self.python_compute, localdict, mode="exec", nocopy=True)
         return localdict['result']
     return super(AccountTaxPython, self)._compute_amount(base_amount, price_unit, quantity, product, partner)
Пример #7
0
 def _row_get(self, cr, uid, objs, fields, conditions, row_canvas=None, group_by=None):
     result = []
     for obj in objs:
         tobreak = False
         for cond in conditions:
             if cond and cond[0]:
                 c = cond[0]
                 temp = c[0](safe_eval('obj.'+c[1],{'obj': obj}))
                 if not safe_eval('\''+temp+'\''+' '+c[2]+' '+'\''+str(c[3])+'\''):
                     tobreak = True
         if tobreak:
             break
         levels = {}
         row = []
         for i in range(len(fields)):
             if not fields[i]:
                 row.append(row_canvas and row_canvas[i])
                 if row_canvas[i]:
                     row_canvas[i]=False
             elif len(fields[i])==1:
                 if obj:
                     row.append(str(safe_eval('obj.'+fields[i][0],{'obj': obj})))
                 else:
                     row.append(None)
             else:
                 row.append(None)
                 levels[fields[i][0]]=True
         if not levels:
             result.append(row)
         else:
             # Process group_by data first
             key = []
             if group_by is not None and fields[group_by] is not None:
                 if fields[group_by][0] in levels.keys():
                     key.append(fields[group_by][0])
                 for l in levels.keys():
                     if l != fields[group_by][0]:
                         key.append(l)
             else:
                 key = levels.keys()
             for l in key:
                 objs = safe_eval('obj.'+l,{'obj': obj})
                 if not isinstance(objs, (BaseModel, list)):
                     objs = [objs]
                 field_new = []
                 cond_new = []
                 for f in range(len(fields)):
                     if (fields[f] and fields[f][0])==l:
                         field_new.append(fields[f][1:])
                         cond_new.append(conditions[f][1:])
                     else:
                         field_new.append(None)
                         cond_new.append(None)
                 if len(objs):
                     result += self._row_get(cr, uid, objs, field_new, cond_new, row, group_by)
                 else:
                     result.append(row)
     return result 
Пример #8
0
 def compute_all(self, price_unit, currency=None, quantity=1.0, product=None, partner=None):
     taxes = self.env['account.tax']
     company = self.env.user.company_id
     for tax in self:
         localdict = {'price_unit':price_unit, 'quantity': quantity, 'product':product, 'partner':partner, 'company': company}
         safe_eval(tax.python_applicable, localdict, mode="exec", nocopy=True)
         if localdict.get('result', False):
             taxes += tax
     return super(AccountTaxPython, taxes).compute_all(price_unit, currency, quantity, product, partner)
    def _create_or_combine_sharing_rule(self, cr, current_user, wizard_data, group_id, model_id, domain, restrict=False, rule_name=None, context=None):
        """Add a new ir.rule entry for model_id and domain on the target group_id.
           If ``restrict`` is True, instead of adding a rule, the domain is
           combined with AND operator with all existing rules in the group, to implement
           an additional restriction (as of 6.1, multiple rules in the same group are
           OR'ed by default, so a restriction must alter all existing rules)

           This is necessary because the personal rules of the user that is sharing
           are first copied to the new share group. Afterwards the filters used for
           sharing are applied as an additional layer of rules, which are likely to
           apply to the same model. The default rule algorithm would OR them (as of 6.1),
           which would result in a combined set of permission that could be larger
           than those of the user that is sharing! Hence we must forcefully AND the
           rules at this stage.
           One possibly undesirable effect can appear when sharing with a
           pre-existing group, in which case altering pre-existing rules would not
           be desired. This is addressed in the portal module.
           """
        if rule_name is None:
            rule_name = _('Sharing filter created by user %s (%s) for group %s') % \
                            (current_user.name, current_user.login, group_id)
        rule_obj = self.pool.get('ir.rule')
        rule_ids = rule_obj.search(cr, UID_ROOT, [('groups', 'in', group_id), ('model_id', '=', model_id)])
        if rule_ids:
            for rule in rule_obj.browse(cr, UID_ROOT, rule_ids, context=context):
                if rule.domain_force == domain:
                    # don't create it twice!
                    if restrict:
                        continue
                    else:
                        _logger.debug("Ignoring sharing rule on model %s with domain: %s the same rule exists already", model_id, domain)
                        return
                if restrict:
                    # restricting existing rules is done by adding the clause
                    # with an AND, but we can't alter the rule if it belongs to
                    # other groups, so we duplicate if needed
                    rule = self._check_personal_rule_or_duplicate(cr, group_id, rule, context=context)
                    eval_ctx = rule_obj._eval_context_for_combinations()
                    org_domain = expression.normalize_domain(safe_eval(rule.domain_force, eval_ctx))
                    new_clause = expression.normalize_domain(safe_eval(domain, eval_ctx))
                    combined_domain = expression.AND([new_clause, org_domain])
                    rule.write({'domain_force': combined_domain, 'name': rule.name + _('(Modified)')})
                    _logger.debug("Combining sharing rule %s on model %s with domain: %s", rule.id, model_id, domain)
        if not rule_ids or not restrict:
            # Adding the new rule in the group is ok for normal cases, because rules
            # in the same group and for the same model will be combined with OR
            # (as of v6.1), so the desired effect is achieved.
            rule_obj.create(cr, UID_ROOT, {
                'name': rule_name,
                'model_id': model_id,
                'domain_force': domain,
                'groups': [(4,group_id)]
                })
            _logger.debug("Created sharing rule on model %s with domain: %s", model_id, domain)
Пример #10
0
    def _ogone_s2s_validate_tree(self, tx, tree, tries=2):
        if tx.state not in ('draft', 'pending'):
            _logger.info('Ogone: trying to validate an already validated tx (ref %s)', tx.reference)
            return True

        status = int(tree.get('STATUS') or 0)
        if status in self._ogone_valid_tx_status:
            tx.write({
                'state': 'done',
                'date_validate': datetime.date.today().strftime(DEFAULT_SERVER_DATE_FORMAT),
                'acquirer_reference': tree.get('PAYID'),
            })
            if tree.get('ALIAS') and tx.partner_id and tx.type == 'form_save' and not tx.payment_method_id:
                pm = tx.env['payment.method'].create({
                    'partner_id': tx.partner_id.id,
                    'acquirer_id': tx.acquirer_id.id,
                    'acquirer_ref': tree.get('ALIAS'),
                    'name': tree.get('CARDNO'),
                })
                tx.write({'payment_method_id': pm.id})
            if tx.callback_eval:
                safe_eval(tx.callback_eval, {'self': tx})
            return True
        elif status in self._ogone_cancel_tx_status:
            tx.write({
                'state': 'cancel',
                'acquirer_reference': tree.get('PAYID'),
            })
        elif status in self._ogone_pending_tx_status:
            tx.write({
                'state': 'pending',
                'acquirer_reference': tree.get('PAYID'),
                'html_3ds': str(tree.HTML_ANSWER).decode('base64')
            })
        elif (not status or status in self._ogone_wait_tx_status) and tries > 0:
            time.sleep(500)
            tx.write({'acquirer_reference': tree.get('PAYID')})
            tree = self._ogone_s2s_get_tx_status(tx)
            return self._ogone_s2s_validate_tree(tx, tree, tries - 1)
        else:
            error = 'Ogone: feedback error: %(error_str)s\n\n%(error_code)s: %(error_msg)s' % {
                'error_str': tree.get('NCERRORPLUS'),
                'error_code': tree.get('NCERROR'),
                'error_msg': ogone.OGONE_ERROR_MAP.get(tree.get('NCERROR')),
            }
            _logger.info(error)
            tx.write({
                'state': 'error',
                'state_message': error,
                'acquirer_reference': tree.get('PAYID'),
            })
            return False
Пример #11
0
 def _rule_eval(self, rule, obj_name, rec):
     expr = rule.code
     space = self._exception_rule_eval_context(obj_name, rec)
     try:
         safe_eval(expr,
                   space,
                   mode='exec',
                   nocopy=True)  # nocopy allows to return 'result'
     except Exception, e:
         raise except_orm(
             _('Error'),
             _('Error when evaluating the sale exception '
               'rule:\n %s \n(%s)') % (rule.name, e))
Пример #12
0
    def _signup_create_user(self, cr, uid, values, context=None):
        """ create a new user from the template user """
        ir_config_parameter = self.pool.get('ir.config_parameter')
        template_user_id = safe_eval(ir_config_parameter.get_param(cr, uid, 'auth_signup.template_user_id', 'False'))
        assert template_user_id and self.exists(cr, uid, template_user_id, context=context), 'Signup: invalid template user'

        # check that uninvited users may sign up
        if 'partner_id' not in values:
            if not safe_eval(ir_config_parameter.get_param(cr, uid, 'auth_signup.allow_uninvited', 'False')):
                raise Exception('Signup is not allowed for uninvited users')

        # create a copy of the template user (attached to a specific partner_id if given)
        values['active'] = True
        return self.copy(cr, uid, template_user_id, values, context=context)
Пример #13
0
    def compute_product_dimension_extra_price(self, cr, uid, product_id,
                                              product_price_extra=False, dim_price_margin=False,
                                              dim_price_extra=False, context=None):
        if context is None:
            context = {}
        dimension_extra = 0.0
        product = self.browse(cr, uid, product_id, context=context)
        for dim in product.dimension_value_ids:
            if product_price_extra and dim_price_margin and dim_price_extra:
                dimension_extra += (safe_eval('product.' + product_price_extra,
                                              {'product': product})
                                    * safe_eval('dim.' + dim_price_margin,
                                                {'dim': dim})
                                    + safe_eval('dim.' + dim_price_extra,
                                                {'dim': dim}))
            elif not product_price_extra and not dim_price_margin and dim_price_extra:
                dimension_extra += safe_eval('dim.' + dim_price_extra, {'dim': dim})
            elif product_price_extra and dim_price_margin and not dim_price_extra:
                dimension_extra += (safe_eval('product.' + product_price_extra,
                                              {'product': product})
                                    * safe_eval('dim.' + dim_price_margin,
                                                {'dim': dim}))
            elif product_price_extra and not dim_price_margin and dim_price_extra:
                dimension_extra += (safe_eval('product.' + product_price_extra,
                                              {'product': product})
                                    + safe_eval('dim.' + dim_price_extra, {'dim': dim}))

        if 'uom' in context:
            product_uom_obj = self.pool.get('product.uom')
            uom = product.uos_id or product.uom_id
            dimension_extra = product_uom_obj._compute_price(cr, uid, uom.id,
                                                             dimension_extra, context['uom'])
        return dimension_extra
Пример #14
0
    def apply_eval(text, obj, eval_context):
        parts = open_field.split(text)
        
        otext = parts[0]
        parts.pop(0)
        while len(parts) > 0:
            ep = close_field.split(parts[0])
            parts.pop(0)
            expression = ep[0]
            _logger.debug("Evaluando: [%s], con eval_context=[%s]" % (expression, unicode(eval_context)))
            try:
                output = unicode(safe_eval(expression, 
                                           locals_dict=eval_context))
                otext += output
                _logger.debug("Resultado: %s" % output)

            except LoopSkip:
                raise
            except GeneratorSkip:
                raise
            except Exception, e:
                otext += unicode(e)
                _logger.info("Unexpected exception [%s]" % unicode(e))
            if len(ep) > 1:
                otext += ep[1]
Пример #15
0
 def get_default_auth_admin_passkey_send_to_user(
         self, cr, uid, ids, context=None):
     icp = self.pool['ir.config_parameter']
     return {
         'auth_admin_passkey_send_to_user': safe_eval(icp.get_param(
             cr, uid, 'auth_admin_passkey.send_to_user', 'True')),
     }
Пример #16
0
    def update_menu(self, cr, uid, action_report, context=None):
        if action_report.created_menu_id and not action_report.linked_menu_id:
            self.delete_menu(cr, uid, action_report.created_menu_id.id, context=context)

        if action_report.report_type == 'pentaho' and action_report.linked_menu_id:
            groups_id = [(6, 0, map(lambda x: x.id, action_report.groups_id))]
            if not action_report.created_menu_id:
                result = self.create_menu(cr, uid, {'name': action_report.name,
                                                    'linked_menu_id': action_report.linked_menu_id.id,
                                                    'report_name': action_report.report_name,
                                                    'groups_id': groups_id,
                                                    }, context=context)
            else:
                action = action_report.created_menu_id.action
                if action and action._model._name == 'ir.actions.act_window':
                    existing_context = safe_eval(self.pool.get('ir.actions.act_window').browse(cr, uid, action.id, context=context).context)
                    new_context = existing_context if type(existing_context) == dict else {}
                    new_context['service_name'] = action_report.report_name or ''
                    self.pool.get('ir.actions.act_window').write(cr, uid, [action.id], {'name': action_report.name or 'Pentaho Report',
                                                                                        'context': str(new_context),
                                                                                        }, context=context)

                self.pool.get('ir.ui.menu').write(cr, SUPERUSER_ID, [action_report.created_menu_id.id], {'name': action_report.name or 'Pentaho Report',
                                                                                                         'parent_id': action_report.linked_menu_id.id,
                                                                                                         'groups_id': groups_id,
                                                                                                         }, context=context)
                result = action_report.created_menu_id.id
        else:
            result = 0

        return result
    def _get_report_issue(self, cr, uid, ids, context=None):
        issue_obj = self.pool.get('project.issue')
        wzr_brw = self.browse(cr, uid, ids, context=context)[0]
        domain_issues = wzr_brw.filter_issue_id and \
            wzr_brw.filter_issue_id.domain or []
        if not domain_issues:
            return []
        dom_issues = [len(d) > 1 and tuple(d) or d
                      for d in safe_eval(domain_issues)]
        # Setting issues elements.
        issues_grouped = issue_obj.read_group(cr, uid, dom_issues,
                                              ['analytic_account_id'],
                                              ['analytic_account_id'],
                                              context=context)

        issues_all = []
        for issue in issues_grouped:
            analytic_id = issue.get('analytic_account_id')
            new_issue_dom = dom_issues + [('analytic_account_id', '=', analytic_id and analytic_id[0] or analytic_id)]  # noqa
            issue['children_by_stage'] = issue_obj.read_group(cr, uid, new_issue_dom,  # noqa
                                                              ['stage_id'],
                                                              ['stage_id'],
                                                              orderby='stage_id asc',  # noqa
                                                              context=context)
            issues_all.append(issue)
        return issues_all
Пример #18
0
    def _cleanup_action_context(self, context_str, user_id):
        """Returns a dict representing the context_str evaluated (safe_eval) as
           a dict where items that are not useful for shared actions
           have been removed. If the evaluation of context_str as a
           dict fails, context_str is returned unaltered.

           :param user_id: the integer uid to be passed as 'uid' in the
                           evaluation context
           """
        result = False
        if context_str:
            try:
                context = safe_eval(context_str, tools.UnquoteEvalContext(), nocopy=True)
                result = dict(context)
                for key in context:
                    # Remove all context keys that seem to toggle default
                    # filters based on the current user, as it makes no sense
                    # for shared users, who would not see any data by default.
                    if key and key.startswith('search_default_') and 'user_id' in key:
                        result.pop(key)
            except Exception:
                # Note: must catch all exceptions, as UnquoteEvalContext may cause many
                #       different exceptions, as it shadows builtins.
                _logger.debug("Failed to cleanup action context as it does not parse server-side", exc_info=True)
                result = context_str
        return result
Пример #19
0
    def run_manually(self):
        """Run a job from the cron form view."""

        if self.env.uid != SUPERUSER_ID and (not self.active or not self.numbercall):
            raise exceptions.AccessError(_("Only the admin user is allowed to " "execute inactive cron jobs manually"))

        try:
            # Try to grab an exclusive lock on the job row
            # until the end of the transaction
            self.env.cr.execute(
                """SELECT *
                   FROM ir_cron
                   WHERE id=%s
                   FOR UPDATE NOWAIT""",
                (self.id,),
                log_exceptions=False,
            )

        except OperationalError as e:
            # User friendly error if the lock could not be claimed
            if getattr(e, "pgcode", None) == "55P03":
                raise exceptions.Warning(_("Another process/thread is already busy " "executing this job"))

            raise

        _logger.info("Job `%s` triggered from form", self.name)

        # Do not propagate active_test to the method to execute
        ctx = dict(self.env.context)
        ctx.pop("active_test", None)

        # Execute the cron job
        method = getattr(self.with_context(ctx).sudo(self.user_id).env[self.model], self.function)
        args = safe_eval("tuple(%s)" % (self.args or ""))
        return method(*args)
Пример #20
0
    def run_manually_old(self, cr, uid, ids, context=None):
        if uid != SUPERUSER_ID:
            raise exceptions.AccessError(_("Only the admin user is allowed to execute inactive cron jobs manually"))

        wizard = self.browse(cr, uid, ids[0], context=context)

        try:
            cr.execute(
                """SELECT
                        *
                   FROM
                        ir_cron
                   WHERE
                        id=%s
                   FOR UPDATE NOWAIT""",
                (ids[0],),
            )
        except OperationalError as e:
            if getattr(e, "pgcode", None) == "55P03":
                raise exceptions.Warning(_("Another process/thread is already busy executing this job"))
            raise
        _logger.info("Job `%s` triggered from form", wizard.name)
        ctx = context.copy()
        ctx.pop("active_test", None)
        method = getattr(self.pool.get(wizard.model), wizard.function)
        args = safe_eval("tuple(%s)" % (wizard.args or ""))
        return method(cr, uid, *args)
Пример #21
0
 def get_shipstation_url(self,cr,uid,context={}):
     params = self.pool.get('ir.config_parameter')
     shipstation_debug_mode = safe_eval(params.get_param(cr, uid, 'shipstation_debug_mode',default='False'))
     if shipstation_debug_mode:
            return params.get_param(cr, uid, 'shipstation_test_url',default='')
     else:
            return params.get_param(cr, uid, 'shipstation_url',default='')
Пример #22
0
    def get_action(self, cr, uid, goal_id, context=None):
        """Get the ir.action related to update the goal

        In case of a manual goal, should return a wizard to update the value
        :return: action description in a dictionnary
        """
        goal = self.browse(cr, uid, goal_id, context=context)

        if goal.definition_id.action_id:
            # open a the action linked to the goal
            action = goal.definition_id.action_id.read()[0]

            if goal.definition_id.res_id_field:
                current_user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
                action['res_id'] = safe_eval(goal.definition_id.res_id_field, {'user': current_user})

                # if one element to display, should see it in form mode if possible
                action['views'] = [(view_id, mode) for (view_id, mode) in action['views'] if mode == 'form'] or action['views']
            return action

        if goal.computation_mode == 'manually':
            # open a wizard window to update the value manually
            action = {
                'name': _("Update %s") % goal.definition_id.name,
                'id': goal_id,
                'type': 'ir.actions.act_window',
                'views': [[False, 'form']],
                'target': 'new',
                'context': {'default_goal_id': goal_id, 'default_current': goal.current},
                'res_model': 'gamification.goal.wizard'
            }
            return action

        return False
Пример #23
0
 def _auth_oauth_overwrite(self, cr, uid, validation, provider, params, context=None):
     data_overwrite = self.pool.get('auth.oauth.provider').read(cr, uid, provider,['data_overwrite'], context=context)['data_overwrite']
     if data_overwrite:
         overdict = expr_eval(data_overwrite)
         assert isinstance(overdict, dict), 'the overwrite expression must be a dict object.'
         for r in overdict.items():
             validation[r[0]]=safe_eval(r[1], params, validation)
 def common_test(self):
     self.purchase_order = self.env.ref('purchase.purchase_order_1')
     # I change invoice method to 'based on purchase order line'
     self.purchase_order.invoice_method = 'manual'
     # I change the quantity on the first line to 10
     self.purchase_order.order_line[0].product_qty = 10
     # I confirm the purchase order
     workflow.trg_validate(self.uid, 'purchase.order',
                           self.purchase_order.id, 'purchase_confirm',
                           self.cr)
     # I check if the purchase order is confirmed
     self.purchase_order.invalidate_cache()
     self.assertEqual(self.purchase_order.state, 'approved',
                      "Purchase order's state isn't correct")
     # I get lines to invoiced
     purchase_lines = self.purchase_order.order_line
     # I get menu item action
     menu = self.env.ref('purchase.purchase_line_form_action2')
     self.domain = safe_eval(menu.domain)
     self.domain.extend([('order_id', '=', self.purchase_order.id),
                         ('fully_invoiced', '=', False)])
     purchase_line_domain = self.po_line_obj.search(self.domain)
     # I check if all lines is on the view's result
     self.assertEqual(purchase_line_domain, purchase_lines,
                      "lines aren't on the menu")
Пример #25
0
 def _assert_valid_domain(self):
     try:
         domain = safe_eval(self.domain or '[]', evaluation_context)
         self.env['crm.lead'].search(domain)
     except Exception as e:
         _logger.warning('Exception: %s' % (e,))
         raise Warning('The domain is incorrectly formatted')
Пример #26
0
    def assign_scores_to_leads(self, ids=[]):
        domain = [('running', '=', True)]
        if ids:
            domain.append(('id', 'in', ids))
        scores = self.search_read(domain=domain, fields=['domain'])
        for score in scores:
            domain = safe_eval(score['domain'], evaluation_context)

            # Don't replace the domain with a 'not in' like below... that doesn't make the same thing !!!
            # domain.extend(['|', ('stage_id.on_change', '=', False), ('stage_id.probability', 'not in', [0,100])])
            domain.extend(['|', ('stage_id.on_change', '=', False), '&', ('stage_id.probability', '!=', 0), ('stage_id.probability', '!=', 100)])

            e = expression(self._cr, self._uid, domain, self.pool['crm.lead'], self._context)
            where_clause, where_params = e.to_sql()

            where_clause += """ AND (id NOT IN (SELECT lead_id FROM crm_lead_score_rel WHERE score_id = %s)) """
            where_params.append(score['id'])

            self._cr.execute("""INSERT INTO crm_lead_score_rel
                                    SELECT crm_lead.id as lead_id, %s as score_id
                                    FROM crm_lead
                                    WHERE %s RETURNING lead_id""" % (score['id'], where_clause), where_params)

            # Force recompute of fields that depends on score_ids
            lead_ids = [resp[0] for resp in self._cr.fetchall()]
            leads = self.env["crm.lead"].browse(lead_ids)
            leads.modified(['score_ids'])
            leads.recompute()
    def makeInvoices(self):
        invoice_line_obj = self.env['account.invoice.line']
        purchase_line_obj = self.env['purchase.order.line']
        ctx = self.env.context.copy()
        res = super(PurchaseLineInvoice, self.with_context(ctx)).makeInvoices()
        invoice_ids = []
        for field, op, val in safe_eval(res['domain']):
            if field == 'id':
                invoice_ids = val
                break

        invoice_lines = invoice_line_obj.search(
            [('invoice_id', 'in', invoice_ids)])
        for invoice_line in invoice_lines:
            order_line = purchase_line_obj.search(
                [('invoice_lines', '=', invoice_line.id)],
                limit=1
                )
            if not order_line:
                continue

            if not invoice_line.sequence:
                invoice_line.write({'sequence': order_line.sequence})

        return res
Пример #28
0
 def _get_oerp_ids(self, obj, oerp_id, first_node_domain, domain,
                   child_field, context):
     search_domain = []
     if oerp_id:
         obj_ids = obj.read(
             oerp_id, [child_field], context=context)[child_field]
         search_domain.append(('id', 'in', obj_ids))
     else:
         # case for the first node
         if isinstance(first_node_domain, str):
             first_node_domain = safe_eval(first_node_domain)
         search_domain.extend(first_node_domain)
     if isinstance(domain, str):
         domain = safe_eval(domain)
     search_domain.extend(domain)
     return obj.search(search_domain, context=context)
 def _eval_by_account_id(self, expr):
     res = {}
     eval_dict = {'AccountingNone': AccountingNone}
     for account_id, replaced_exprs in \
             self.aep.replace_exprs_by_account_id([expr]):
         res[account_id] = safe_eval(replaced_exprs[0], eval_dict)
     return res
    def _get_sum(self, cr, uid, ids, fname, arg, context=None):
        # copy-paste from addons/gamification/models/goal.py::update

        result = {}
        for goal in self.browse(cr, uid, ids, context=context):
            definition = goal.definition_id
            if True: # keep original indent
                obj = self.pool.get(definition.model_id.model)
                field_date_name = definition.field_date_id and definition.field_date_id.name or False
                if True: # keep original indent
                    #for goal in goals:
                    if True:
                        # eval the domain with user replaced by goal user object
                        domain = safe_eval(definition.domain, {'user': goal.user_id})

                        # add temporal clause(s) to the domain if fields are filled on the goal
                        if goal.start_date and field_date_name:
                            domain.append((field_date_name, '>=', goal.start_date))
                        if goal.end_date and field_date_name:
                            domain.append((field_date_name, '<=', goal.end_date))

                        #if definition.computation_mode == 'sum':
                        if fname=='sum':
                            field_name = definition.field_id.name
                            # TODO for master: group on user field in batch mode
                            res = obj.read_group(cr, uid, domain, [field_name], [], context=context)
                            new_value = res and res[0][field_name] or 0.0

                        else: # fname == 'count'
                            new_value = obj.search(cr, uid, domain, context=context, count=True)
                        result[goal.id] = new_value
        return result
Пример #31
0
 def _get_report_inv(self, cr, uid, ids, context=None):
     invoice_obj = self.pool.get('account.invoice')
     wzr_brw = self.browse(cr, uid, ids, context=context)[0]
     domain_inv = wzr_brw.filter_invoice_id and \
         wzr_brw.filter_invoice_id.domain or []
     if not domain_inv:
         return ([], [], [], [], {})
     dom_inv = [len(d) > 1 and tuple(d) or d for d in safe_eval(domain_inv)]
     # Preparing grouped invoices due to it is 2 levels it need a
     # little extra Work.
     elements = invoice_obj.read_group(
         cr,
         uid,
         dom_inv, [
             'period_id', 'amount_total', 'amount_tax', 'amount_untaxed',
             'residual', 'partner_id'
         ], [
             'period_id',
             'amount_total',
             'amount_tax',
             'amount_untaxed',
             'residual',
         ],
         context=context)
     grouped_by_currency = invoice_obj.read_group(
         cr,
         uid,
         dom_inv, [
             'currency_id', 'amount_total', 'amount_tax', 'amount_untaxed',
             'residual', 'partner_id'
         ], [
             'currency_id',
             'amount_total',
             'amount_tax',
             'amount_untaxed',
             'residual',
         ],
         context=context)
     inv_line_obj = self.pool.get('account.invoice.line')
     curr_obj = self.pool.get('res.currency')
     grouped_by_product = {}
     ent_ids = [ent.id for ent in wzr_brw.prod_ent_ids]
     train_ids = [ent.id for ent in wzr_brw.prod_train_ids]
     cons_ids = [ent.id for ent in wzr_brw.prod_cons_ids]
     all_ids = ent_ids + cons_ids + train_ids
     for gbc in grouped_by_currency:
         currency = gbc['currency_id']
         inv_line_ids = invoice_obj.search(
             cr,
             uid,
             dom_inv + [('currency_id', 'in', [currency[0]])],
             context=context)
         group_prod = inv_line_obj.read_group(
             cr,
             uid, [('invoice_id', 'in', inv_line_ids)], [
                 'product_id',
                 'price_subtotal',
             ], [
                 'product_id',
                 'price_subtotal',
             ],
             context=context)
         total_ent = sum([
             gr['price_subtotal'] for gr in group_prod
             if gr['product_id'][0] in ent_ids
         ])
         total_cons = sum([
             gr['price_subtotal'] for gr in group_prod
             if gr['product_id'][0] in cons_ids
         ])
         total_train = sum([
             gr['price_subtotal'] for gr in group_prod
             if gr['product_id'][0] in train_ids
         ])
         total_others = sum([
             gr['price_subtotal'] for gr in group_prod
             if gr['product_id'][0] not in all_ids
         ])
         total = total_ent + total_ent + total_train + total_others
         curr_from = wzr_brw.currency_id.id
         curr_to = currency[0]
         curr_from_brw = curr_obj.browse(cr,
                                         uid,
                                         curr_from,
                                         context=context)
         curr_to_brw = curr_obj.browse(cr, uid, curr_to, context=context)
         rate = curr_obj._get_conversion_rate(cr,
                                              uid,
                                              curr_from_brw,
                                              curr_to_brw,
                                              context=context)
         grouped_by_product[gbc['currency_id'][1]] = {
             'enterprises':
             total_ent,
             'consultancy':
             total_cons,
             'others':
             total_others,
             'training':
             total_train,
             'pending':
             total_train,
             'total':
             total,
             'rate':
             rate,
             'total_in_control':
             round(total / rate, 2),
             'total_cons':
             round(total_cons / rate, 2),
             'total_train':
             round(total_train / rate, 2),
             'total_others':
             round(total_others / rate, 2),
             'total_lic':
             round(total_ent / rate, 2),
             'conversion_rate':
             curr_obj._get_conversion_rate(
                 cr, uid,
                 curr_obj.browse(cr, uid, curr_from, context=context),
                 curr_obj.browse(cr, uid, curr_to, context=context)),
         }
     #  TODO: This must be a better way to achieve this list directly from
     #  search group on v8.0 for now the simplest way make a list with
     #  everything an group in the report itself
     invoice_ids = invoice_obj.search(cr, uid, dom_inv, context=context)
     invoices_brw = invoice_obj.browse(cr,
                                       uid,
                                       invoice_ids,
                                       context=context)
     # Getting resumed numbers
     resumed_numbers = {
         'total_invoiced':
         sum([
             grouped_by_product[i]['total_in_control']
             for i in grouped_by_product
         ])
     }
     return (elements, grouped_by_currency, invoices_brw,
             grouped_by_product, resumed_numbers)
Пример #32
0
    def assign_leads_to_salesmen(self, all_team_users, dry=False):
        users = []
        for su in all_team_users:
            if (su.maximum_user_leads - su.leads_count) <= 0:
                continue
            domain = safe_eval(su.team_user_domain or '[]', evaluation_context)
            domain.extend([('user_id', '=', False),
                           ('assign_date', '=', False),
                           ('score', '>=', su.team_id.min_for_assign)])

            # assignation rythm: 2 days of leads if a lot of leads should be assigned
            limit = int(math.ceil(su.maximum_user_leads / 15.0))

            if dry:
                dry_leads = self.env["leads.dry.run"].search([('team_id', '=',
                                                               su.team_id.id)])
                domain.append(['id', 'in', dry_leads.mapped('lead_id.id')])
            else:
                domain.append(('team_id', '=', su.team_id.id))

            leads = self.env["crm.lead"].search(domain,
                                                order='score desc',
                                                limit=limit *
                                                len(su.team_id.team_user_ids))
            users.append({
                "su":
                su,
                "nbr":
                min(su.maximum_user_leads - su.leads_count, limit),
                "leads":
                leads
            })

        assigned = set()
        while users:
            i = 0

            # statistically select the user that should receive the next lead
            idx = randint(0,
                          reduce(lambda nbr, x: nbr + x['nbr'], users, 0) - 1)

            while idx > users[i]['nbr']:
                idx -= users[i]['nbr']
                i += 1
            user = users[i]

            # Get the first unassigned leads available for this user
            while user['leads'] and user['leads'][0] in assigned:
                user['leads'] = user['leads'][1:]
            if not user['leads']:
                del users[i]
                continue

            #lead convert for this user
            lead = user['leads'][0]
            assigned.add(lead)
            if dry:
                values = {
                    'lead_id': lead.id,
                    'team_id': user['su'].team_id.id,
                    'user_id': user['su'].user_id.id
                }
                self.env['leads.dry.run'].create(values)
            else:
                # Assign date will be setted by write function
                data = {'user_id': user['su'].user_id.id}
                lead.write(data)

                lead.convert_opportunity(lead.partner_id and lead.partner_id.id
                                         or None)
                self._cr.commit()

            user['nbr'] -= 1
            if not user['nbr']:
                del users[i]
Пример #33
0
    def update(self, cr, uid, ids, context=None):
        """Update the goals to recomputes values and change of states

        If a manual goal is not updated for enough time, the user will be
        reminded to do so (done only once, in 'inprogress' state).
        If a goal reaches the target value, the status is set to reached
        If the end date is passed (at least +1 day, time not considered) without
        the target value being reached, the goal is set as failed."""
        if context is None:
            context = {}
        commit = context.get('commit_gamification', False)

        goals_by_definition = {}
        for goal in self.browse(cr, uid, ids, context=context):
            goals_by_definition.setdefault(goal.definition_id, []).append(goal)

        for definition, goals in goals_by_definition.items():
            goals_to_write = dict((goal.id, {}) for goal in goals)
            if definition.computation_mode == 'manually':
                for goal in goals:
                    goals_to_write[goal.id].update(
                        self._check_remind_delay(cr, uid, goal, context))
            elif definition.computation_mode == 'python':
                # TODO batch execution
                for goal in goals:
                    # execute the chosen method
                    cxt = {
                        'self': self.pool.get('gamification.goal'),
                        'object': goal,
                        'pool': self.pool,
                        'cr': cr,
                        'context':
                        dict(context
                             ),  # copy context to prevent side-effects of eval
                        'uid': uid,
                        'date': date,
                        'datetime': datetime,
                        'timedelta': timedelta,
                        'time': time
                    }
                    code = definition.compute_code.strip()
                    safe_eval(code, cxt, mode="exec", nocopy=True)
                    # the result of the evaluated codeis put in the 'result' local variable, propagated to the context
                    result = cxt.get('result')
                    if result is not None and type(result) in (float, int,
                                                               long):
                        goals_to_write.update(
                            self._get_write_values(cr,
                                                   uid,
                                                   goal,
                                                   result,
                                                   context=context))

                    else:
                        _logger.exception(
                            _('Invalid return content from the evaluation of code for definition %s'
                              ) % definition.name)

            else:  # count or sum

                obj = self.pool.get(definition.model_id.model)
                field_date_name = definition.field_date_id and definition.field_date_id.name or False

                if definition.computation_mode == 'count' and definition.batch_mode:
                    # batch mode, trying to do as much as possible in one request
                    general_domain = safe_eval(definition.domain)
                    field_name = definition.batch_distinctive_field.name
                    subqueries = {}
                    for goal in goals:
                        start_date = field_date_name and goal.start_date or False
                        end_date = field_date_name and goal.end_date or False
                        subqueries.setdefault(
                            (start_date, end_date), {}).update({
                                goal.id:
                                safe_eval(definition.batch_user_expression,
                                          {'user': goal.user_id})
                            })

                    # the global query should be split by time periods (especially for recurrent goals)
                    for (start_date,
                         end_date), query_goals in subqueries.items():
                        subquery_domain = list(general_domain)
                        subquery_domain.append(
                            (field_name, 'in',
                             list(set(query_goals.values()))))
                        if start_date:
                            subquery_domain.append(
                                (field_date_name, '>=', start_date))
                        if end_date:
                            subquery_domain.append(
                                (field_date_name, '<=', end_date))

                        if field_name == 'id':
                            # grouping on id does not work and is similar to search anyway
                            user_ids = obj.search(cr,
                                                  uid,
                                                  subquery_domain,
                                                  context=context)
                            user_values = [{
                                'id': user_id,
                                'id_count': 1
                            } for user_id in user_ids]
                        else:
                            user_values = obj.read_group(cr,
                                                         uid,
                                                         subquery_domain,
                                                         fields=[field_name],
                                                         groupby=[field_name],
                                                         context=context)
                        # user_values has format of read_group: [{'partner_id': 42, 'partner_id_count': 3},...]
                        for goal in [
                                g for g in goals if g.id in query_goals.keys()
                        ]:
                            for user_value in user_values:
                                queried_value = field_name in user_value and user_value[
                                    field_name] or False
                                if isinstance(queried_value, tuple) and len(
                                        queried_value) == 2 and isinstance(
                                            queried_value[0], (int, long)):
                                    queried_value = queried_value[0]
                                if queried_value == query_goals[goal.id]:
                                    new_value = user_value.get(
                                        field_name + '_count', goal.current)
                                    goals_to_write.update(
                                        self._get_write_values(
                                            cr,
                                            uid,
                                            goal,
                                            new_value,
                                            context=context))

                else:
                    for goal in goals:
                        # eval the domain with user replaced by goal user object
                        domain = safe_eval(definition.domain,
                                           {'user': goal.user_id})

                        # add temporal clause(s) to the domain if fields are filled on the goal
                        if goal.start_date and field_date_name:
                            domain.append(
                                (field_date_name, '>=', goal.start_date))
                        if goal.end_date and field_date_name:
                            domain.append(
                                (field_date_name, '<=', goal.end_date))

                        if definition.computation_mode == 'sum':
                            field_name = definition.field_id.name
                            # TODO for master: group on user field in batch mode
                            res = obj.read_group(cr,
                                                 uid,
                                                 domain, [field_name], [],
                                                 context=context)
                            new_value = res and res[0][field_name] or 0.0

                        else:  # computation mode = count
                            new_value = obj.search(cr,
                                                   uid,
                                                   domain,
                                                   context=context,
                                                   count=True)

                        goals_to_write.update(
                            self._get_write_values(cr,
                                                   uid,
                                                   goal,
                                                   new_value,
                                                   context=context))

            for goal_id, value in goals_to_write.items():
                if not value:
                    continue
                self.write(cr, uid, [goal_id], value, context=context)
            if commit:
                cr.commit()
        return True
Пример #34
0
    def compute_rule(self, rule_id, localdict):
        rule = self.browse(rule_id)
        if not rule.calculo_nao_padrao:
            if rule.amount_select != 'code':
                return super(HrSalaryRule, self).compute_rule(rule_id,
                                                              localdict)

            codigo_python = python_pt_BR(rule.amount_python_compute or '',
                                         CALCULO_FOLHA_PT_BR)
        else:
            if rule.custom_amount_select == 'code':
                codigo_python = python_pt_BR(
                    rule.custom_amount_python_compute or '',
                    CALCULO_FOLHA_PT_BR)
            elif rule.custom_amount_select == 'fix':
                try:
                    return rule.custom_amount_fix, Decimal(
                        safe_eval(rule.custom_quantity, localdict)), 100.0
                except:
                    raise osv.except_osv(_('Error!'), _(
                        'Wrong quantity defined for salary rule %s (%s).') % (
                            rule.name, rule.code))
            elif rule.custom_amount_select == 'percentage':
                try:
                    return (
                        Decimal(
                            safe_eval(
                                rule.custom_amount_percentage_base, localdict
                            )
                        ), float(
                            safe_eval(rule.custom_quantity, localdict)
                        ), rule.custom_amount_percentage
                    )
                except:
                    raise osv.except_osv(
                        _('Error!'), _(
                            'Wrong percentage base or quantity defined for '
                            'salary rule %s (%s).') % (rule.name, rule.code)
                    )

        if codigo_python:
            try:
                for variavel in localdict:
                    if isinstance(localdict[variavel], float):
                        localdict[variavel] = Decimal(localdict[variavel] or 0)
                safe_eval(codigo_python, localdict, mode='exec', nocopy=True)
                result = localdict['result']

                if 'result_qty' in localdict:
                    result_qty = localdict['result_qty']
                else:
                    result_qty = 1

                if 'result_rate' in localdict:
                    result_rate = localdict['result_rate']
                else:
                    result_rate = 100

                return result, result_qty, result_rate
            except:
                from openerp.exceptions import Warning
                msg = _('Wrong python code defined for salary rule %s (%s).')
                raise Warning(msg % (rule.name, rule.code))
Пример #35
0
    def _generate_record_from_line(self, cr, uid, configurator, line, context):
        current_pool = self.pool.get(line.name.model)
        current_record_ids = current_pool.search(
            cr,
            uid,
            line.domain and safe_eval(line.domain) or [],
            context=context)

        for current_record_id in current_record_ids:
            record = current_pool.browse(cr, uid,
                                         current_record_id,
                                         context=context)
            #//++++++++++++++++++++++
            #if line.user_field_id and \
               #record[line.user_field_id.name] and \
               #record[line.user_field_id.name]._table_name != 'res.users':
                #raise orm.except_orm(
                    #_('Error'),
                    #_("The 'User' field of record %s (%s) "
                      #"does not refer to res.users")
                    #% (record[line.description_field_id.name],
                       #line.name.model))

            if (((line.description_field_id and
                  record[line.description_field_id.name]) or
                    line.description_code) and
                    record[line.date_start_field_id.name]):
                duration = False
                if (not line.duration_field_id and
                        line.date_stop_field_id and
                        record[line.date_start_field_id.name] and
                        record[line.date_stop_field_id.name]):
                    
                    #//+++++++++++++++++++++++
                    #date_start = datetime.strptime(
                        #record[line.date_start_field_id.name],
                        #tools.DEFAULT_SERVER_DATETIME_FORMAT
                    #)
                    try:
                        date_start = datetime.strptime(
                            record[line.date_start_field_id.name],
                            tools.DEFAULT_SERVER_DATETIME_FORMAT
                        )
                    except:
                        date_start = datetime.strptime(
                            record[line.date_start_field_id.name] + ' 00:00:00',
                            tools.DEFAULT_SERVER_DATETIME_FORMAT
                        )
                    
                    #//+++++++++++++++++++++++
                    #date_stop = datetime.strptime(
                        #record[line.date_stop_field_id.name],
                        #tools.DEFAULT_SERVER_DATETIME_FORMAT
                    #)
                    try:
                        date_stop = datetime.strptime(
                            record[line.date_stop_field_id.name],
                            tools.DEFAULT_SERVER_DATETIME_FORMAT
                        )
                    except:
                        date_stop = datetime.strptime(
                            record[line.date_stop_field_id.name] + ' 00:00:00',
                            tools.DEFAULT_SERVER_DATETIME_FORMAT
                        )
                    
                    duration = (date_stop - date_start).total_seconds() / 3600
                elif line.duration_field_id:
                    duration = record[line.duration_field_id.name]
                if line.description_type != 'code':
                    name = record[line.description_field_id.name]
                else:
                    parse_dict = {'o': record}
                    mytemplate = Template(line.description_code)
                    name = mytemplate.render(**parse_dict)
                super_calendar_values = {
                    'name': name,
                    'model_description': line.description,
                    'date_start': record[line.date_start_field_id.name],
                    'duration': duration,
                    'user_id': (
                        line.user_field_id and
                        record[line.user_field_id.name] and
                        record[line.user_field_id.name].id or
                        False
                    ),
                    'configurator_id': configurator.id,
                    'res_id': line.name.model+','+str(record['id']),
                    'model_id': line.name.id,
                    }
                
                #//++++++++++++++++++++++
                #return super_calendar_values
                self.pool.get('super.calendar').create(cr, uid,
                    super_calendar_values, context=context)
        return {}
Пример #36
0
def add_module_dependencies(cr, module_list):
    """
    Select (new) dependencies from the modules in the list
    so that we can inject them into the graph at upgrade
    time. Used in the modified OpenUpgrade Server,
    not to be called from migration scripts

    Also take the OpenUpgrade configuration directives 'forced_deps'
    and 'autoinstall' into account. From any additional modules
    that these directives can add, the dependencies are added as
    well (but these directives are not checked for the occurrence
    of any of the dependencies).
    """
    if not module_list:
        return module_list

    forced_deps = safe_eval.safe_eval(
        config.get_misc('openupgrade', 'forced_deps_' + release.version,
                        config.get_misc('openupgrade', 'forced_deps', '{}')))

    autoinstall = safe_eval.safe_eval(
        config.get_misc('openupgrade', 'autoinstall_' + release.version,
                        config.get_misc('openupgrade', 'autoinstall', '{}')))

    for module in list(module_list):
        module_list += forced_deps.get(module, [])
        module_list += autoinstall.get(module, [])

    module_list = list(set(module_list))

    dependencies = module_list
    while dependencies:
        cr.execute(
            """
            SELECT DISTINCT dep.name
            FROM
                ir_module_module,
                ir_module_module_dependency dep
            WHERE
                module_id = ir_module_module.id
                AND ir_module_module.name in %s
                AND dep.name not in %s
            """, (
                tuple(dependencies),
                tuple(module_list),
            ))

        dependencies = [x[0] for x in cr.fetchall()]
        module_list += dependencies

    # Select auto_install modules of which all dependencies
    # are fulfilled based on the modules we know are to be
    # installed
    cr.execute(
        """
        SELECT name from ir_module_module WHERE state IN %s
        """, (('installed', 'to install', 'to upgrade'), ))
    modules = list(set(module_list + [row[0] for row in cr.fetchall()]))
    cr.execute(
        """
        SELECT name from ir_module_module m
        WHERE auto_install IS TRUE
            AND state = 'uninstalled'
            AND NOT EXISTS(
                SELECT id FROM ir_module_module_dependency d
                WHERE d.module_id = m.id
                AND name NOT IN %s)
         """, (tuple(modules), ))
    auto_modules = [row[0] for row in cr.fetchall()]
    if auto_modules:
        logger.info("Selecting autoinstallable modules %s",
                    ','.join(auto_modules))
        module_list += auto_modules

    return module_list
Пример #37
0
 def get_default_res_users_process_uid(self, cr, uid, fields, context=None):
     icp = self.pool.get('ir.config_parameter')
     # we use safe_eval on the result, since the value of the parameter is a nonempty string
     return {
         'process_uid': safe_eval(icp.get_param(cr, uid, 'res.users.process_uid', 'False')),
     }
    def _compute(self, lang_id, aep):
        res = {}

        localdict = {
            'registry': self.pool,
            'sum': _sum,
            'min': _min,
            'max': _max,
            'len': len,
            'avg': _avg,
        }

        localdict.update(self._fetch_queries())

        aep.do_queries(self.date_from, self.date_to, self.period_from,
                       self.period_to, self.report_instance_id.target_move,
                       self._get_additional_move_line_filter())

        compute_queue = self.report_instance_id.report_id.kpi_ids
        recompute_queue = []
        while True:
            for kpi in compute_queue:
                try:
                    kpi_val_comment = kpi.name + " = " + kpi.expression
                    kpi_eval_expression = aep.replace_expr(kpi.expression)
                    kpi_val = safe_eval(kpi_eval_expression, localdict)
                except ZeroDivisionError:
                    kpi_val = None
                    kpi_val_rendered = '#DIV/0'
                    kpi_val_comment += '\n\n%s' % (traceback.format_exc(), )
                except (NameError, ValueError):
                    recompute_queue.append(kpi)
                    kpi_val = None
                    kpi_val_rendered = '#ERR'
                    kpi_val_comment += '\n\n%s' % (traceback.format_exc(), )
                except:
                    kpi_val = None
                    kpi_val_rendered = '#ERR'
                    kpi_val_comment += '\n\n%s' % (traceback.format_exc(), )
                else:
                    kpi_val_rendered = kpi.render(lang_id, kpi_val)

                localdict[kpi.name] = kpi_val
                try:
                    kpi_style = None
                    if kpi.css_style:
                        kpi_style = safe_eval(kpi.css_style, localdict)
                except:
                    _logger.warning("error evaluating css stype expression %s",
                                    kpi.css_style,
                                    exc_info=True)
                    kpi_style = None

                drilldown = (kpi_val is not None
                             and AEP.has_account_var(kpi.expression))

                res[kpi.name] = {
                    'val': kpi_val,
                    'val_r': kpi_val_rendered,
                    'val_c': kpi_val_comment,
                    'style': kpi_style,
                    'suffix': kpi.suffix,
                    'dp': kpi.dp,
                    'is_percentage': kpi.type == 'pct',
                    'period_id': self.id,
                    'expr': kpi.expression,
                    'drilldown': drilldown,
                }

            if len(recompute_queue) == 0:
                # nothing to recompute, we are done
                break
            if len(recompute_queue) == len(compute_queue):
                # could not compute anything in this iteration
                # (ie real Value errors or cyclic dependency)
                # so we stop trying
                break
            # try again
            compute_queue = recompute_queue
            recompute_queue = []

        return res
Пример #39
0
    def test_01_user_input_cases(self):
        test_cases = [
            # Case 1: First column empty
            {
                'case': 1,
                'lines_no': 1,
                'content': """
                *  5 *                  _


                9168      """,
                'asserts': [
                    {
                        'line': 1,
                        'value': "('9168', ('0', ''))"
                    },
                ]
            },
            # Case 2: First line empty
            {
                'case': 2,
                'lines_no': 1,
                'content': """


                9898
                """,
                'asserts': [
                    {
                        'line': 1,
                        'value': "('9898', ('0', ''))"
                    },
                ]
            },
            # Case 3: Completed and alone Invoice/Lot numbers
            {
                'case':
                3,
                'lines_no':
                3,
                'content':
                """00004*5*Screen does not work
                SAJ/2016/001
                00007""",
                'asserts': [
                    {
                        'line': 1,
                        'value': "('00004', ('5', 'Screen does not work'))"
                    },
                    {
                        'line': 2,
                        'value': "('SAJ/2016/001', ('0', ''))"
                    },
                ]
            },
            # Case 4: Serial/Lot No and a reason is still valid
            {
                'case':
                4,
                'lines_no':
                6,
                'content':
                """00004*5*Screen is not working anymore
                00084*5
                SAJ/2016/001
                SAJ/2016/002*5
                SAJ/2016/003*5*The keyboard does not turn on
                00007""",
                'asserts': [
                    {
                        'line': 2,
                        'value': "('00084', ('5', ''))"
                    },
                    {
                        'line': 4,
                        'value': "('SAJ/2016/002', ('5', ''))"
                    },
                ]
            },
            # Case 5: A dot (first line) is still valid, because there is no
            # rule to minimal valid lot number (invoices have a string format)
            {
                'case': 5,
                'lines_no': 4,
                'content': """.
                00004*5*Screen does not work
                SAJ/2016/001
                00007""",
                'asserts': [
                    {
                        'line': 1,
                        'value': "('.', ('0', ''))"
                    },
                ]
            },

            # Case 6: A non-valid character as * is introduced
            {
                'case': 6,
                'lines_no': 4,
                'content': """00004*5*Screen is not working anymore
                80808*3*There is a * in the middle of the screen
                SAJ/2016/001
                00007""",
                'asserts': [
                    {
                        'line': 2,
                        'value': "('80808', ('0', ''))"
                    },
                ]
            },

            # Case 7: non-ascii chars
            {
                'case':
                7,
                'lines_no':
                2,
                'content':
                """imac2*5*áéíóú dañado
                loté*2*okok""",
                'asserts': [
                    {
                        'line': 1,
                        'value': "('imac2', ('5', 'áéíóú dañado'))"
                    },
                    {
                        'line': 2,
                        'value': "('loté', ('2', 'okok'))"
                    },
                ]
            },
        ]

        for case in test_cases:
            user_input = case['content']
            user_input = self.wizard.get_data_of_products(user_input)
            self.assertEqual(
                len(user_input), case['lines_no'],
                "Case # %s is expecting %s lines when the following user input"
                " gets parsed:\n%s" %
                (case['case'], case['lines_no'], case['content']))
            for item in case['asserts']:
                self.assertEqual(user_input[item['line'] - 1],
                                 safe_eval(item['value']))
Пример #40
0
 def _fetch_queries(self,
                    date_from,
                    date_to,
                    get_additional_query_filter=None):
     self.ensure_one()
     res = {}
     for query in self.query_ids:
         model = self.env[query.model_id.model]
         eval_context = {
             'env': self.env,
             'time': time,
             'datetime': datetime,
             'dateutil': dateutil,
             # deprecated
             'uid': self.env.uid,
             'context': self.env.context,
         }
         domain = query.domain and \
             safe_eval(query.domain, eval_context) or []
         if get_additional_query_filter:
             domain.extend(get_additional_query_filter(query))
         if query.date_field.ttype == 'date':
             domain.extend([(query.date_field.name, '>=', date_from),
                            (query.date_field.name, '<=', date_to)])
         else:
             datetime_from = _utc_midnight(date_from,
                                           self._context.get('tz', 'UTC'))
             datetime_to = _utc_midnight(date_to,
                                         self._context.get('tz', 'UTC'),
                                         add_day=1)
             domain.extend([(query.date_field.name, '>=', datetime_from),
                            (query.date_field.name, '<', datetime_to)])
         field_names = [f.name for f in query.field_ids]
         all_stored = all([model._fields[f].store for f in field_names])
         if not query.aggregate:
             data = model.search_read(domain, field_names)
             res[query.name] = [AutoStruct(**d) for d in data]
         elif query.aggregate == 'sum' and all_stored:
             # use read_group to sum stored fields
             data = model.read_group(domain, field_names, [])
             s = AutoStruct(count=data[0]['__count'])
             for field_name in field_names:
                 try:
                     v = data[0][field_name]
                 except KeyError:
                     _logger.error(
                         'field %s not found in read_group '
                         'for %s; not summable?', field_name, model._name)
                     v = AccountingNone
                 setattr(s, field_name, v)
             res[query.name] = s
         else:
             data = model.search_read(domain, field_names)
             s = AutoStruct(count=len(data))
             if query.aggregate == 'min':
                 agg = _min
             elif query.aggregate == 'max':
                 agg = _max
             elif query.aggregate == 'avg':
                 agg = _avg
             elif query.aggregate == 'sum':
                 agg = _sum
             for field_name in field_names:
                 setattr(s, field_name, agg([d[field_name] for d in data]))
             res[query.name] = s
     return res
Пример #41
0
    def _exec_action(action, datas, context):
        # taken from client/modules/action/main.py:84 _exec_action()
        if isinstance(action, bool) or 'type' not in action:
            return
        # Updating the context : Adding the context of action in order to use it on Views called from buttons
        if datas.get('id', False):
            context.update({
                'active_id': datas.get('id', False),
                'active_ids': datas.get('ids', []),
                'active_model': datas.get('model', False)
            })
        context.update(safe_eval(action.get('context', '{}'), context.copy()))
        if action['type'] in ['ir.actions.act_window', 'ir.actions.submenu']:
            for key in ('res_id', 'res_model', 'view_type', 'view_mode',
                        'limit', 'auto_refresh', 'search_view', 'auto_search',
                        'search_view_id'):
                datas[key] = action.get(key, datas.get(key, None))

            view_id = False
            if action.get('views', []):
                if isinstance(action['views'], list):
                    view_id = action['views'][0][0]
                    datas['view_mode'] = action['views'][0][1]
                else:
                    if action.get('view_id', False):
                        view_id = action['view_id'][0]
            elif action.get('view_id', False):
                view_id = action['view_id'][0]

            assert datas['res_model'], "Cannot use the view without a model"
            # Here, we have a view that we need to emulate
            log_test("will emulate a %s view: %s#%s", action['view_type'],
                     datas['res_model'], view_id or '?')

            view_res = pool.get(datas['res_model']).fields_view_get(
                cr, uid, view_id, action['view_type'], context)
            assert view_res and view_res.get(
                'arch'), "Did not return any arch for the view"
            view_data = {}
            if view_res.get('fields', {}).keys():
                view_data = pool.get(datas['res_model']).default_get(
                    cr, uid, view_res['fields'].keys(), context)
            if datas.get('form'):
                view_data.update(datas.get('form'))
            if wiz_data:
                view_data.update(wiz_data)
            _logger.debug("View data is: %r", view_data)

            for fk, field in view_res.get('fields', {}).items():
                # Default fields returns list of int, while at create()
                # we need to send a [(6,0,[int,..])]
                if field['type'] in ('one2many', 'many2many') \
                        and view_data.get(fk, False) \
                        and isinstance(view_data[fk], list) \
                        and not isinstance(view_data[fk][0], tuple) :
                    view_data[fk] = [(6, 0, view_data[fk])]

            action_name = action.get('name')
            try:
                from xml.dom import minidom
                cancel_found = False
                buttons = []
                dom_doc = minidom.parseString(view_res['arch'])
                if not action_name:
                    action_name = dom_doc.documentElement.getAttribute('name')

                for button in dom_doc.getElementsByTagName('button'):
                    button_weight = 0
                    if button.getAttribute('special') == 'cancel':
                        cancel_found = True
                        continue
                    if button.getAttribute('icon') == 'gtk-cancel':
                        cancel_found = True
                        continue
                    if button.getAttribute('default_focus') == '1':
                        button_weight += 20
                    if button.getAttribute('string') in wiz_buttons:
                        button_weight += 30
                    elif button.getAttribute('icon') in wiz_buttons:
                        button_weight += 10
                    string = button.getAttribute(
                        'string') or '?%s' % len(buttons)

                    buttons.append({
                        'name': button.getAttribute('name'),
                        'string': string,
                        'type': button.getAttribute('type'),
                        'weight': button_weight,
                    })
            except Exception, e:
                _logger.warning(
                    "Cannot resolve the view arch and locate the buttons!",
                    exc_info=True)
                raise AssertionError(e.args[0])

            if not datas['res_id']:
                # it is probably an orm_memory object, we need to create
                # an instance
                datas['res_id'] = pool.get(datas['res_model']).create(
                    cr, uid, view_data, context)

            if not buttons:
                raise AssertionError(
                    "view form doesn't have any buttons to press!")

            buttons.sort(key=lambda b: b['weight'])
            _logger.debug(
                'Buttons are: %s', ', '.join(
                    ['%s: %d' % (b['string'], b['weight']) for b in buttons]))

            res = None
            while buttons and not res:
                b = buttons.pop()
                log_test("in the \"%s\" form, I will press the \"%s\" button.",
                         action_name, b['string'])
                if not b['type']:
                    log_test("the \"%s\" button has no type, cannot use it",
                             b['string'])
                    continue
                if b['type'] == 'object':
                    #there we are! press the button!
                    fn = getattr(pool.get(datas['res_model']), b['name'])
                    if not fn:
                        _logger.error(
                            "The %s model doesn't have a %s attribute!",
                            datas['res_model'], b['name'])
                        continue
                    res = fn(cr, uid, [
                        datas['res_id'],
                    ], context)
                    break
                else:
                    _logger.warning(
                        "in the \"%s\" form, the \"%s\" button has unknown type %s",
                        action_name, b['string'], b['type'])
            return res
Пример #42
0
 def _generic_reformat_phonenumbers(self, cr, uid, ids, vals, context=None):
     """Reformat phone numbers in E.164 format i.e. +33141981242"""
     assert isinstance(self._country_field, (str, unicode, type(None))),\
         'Wrong self._country_field'
     assert isinstance(self._partner_field, (str, unicode, type(None))),\
         'Wrong self._partner_field'
     assert isinstance(self._phone_fields, list),\
         'self._phone_fields must be a list'
     if context is None:
         context = {}
     if ids and isinstance(ids, (int, long)):
         ids = [ids]
     if any([vals.get(field) for field in self._phone_fields]):
         user = self.pool['res.users'].browse(cr, uid, uid, context=context)
         # country_id on res.company is a fields.function that looks at
         # company_id.partner_id.addres(default).country_id
         countrycode = None
         if self._country_field:
             if vals.get(self._country_field):
                 country = self.pool['res.country'].browse(
                     cr, uid, vals[self._country_field], context=context)
                 countrycode = country.code
             elif ids:
                 rec = self.browse(cr, uid, ids[0], context=context)
                 country = safe_eval('rec.' + self._country_field,
                                     {'rec': rec})
                 countrycode = country and country.code or None
         elif self._partner_field:
             if vals.get(self._partner_field):
                 partner = self.pool['res.partner'].browse(
                     cr, uid, vals[self._partner_field], context=context)
                 countrycode = partner.country_id and\
                     partner.country_id.code or None
             elif ids:
                 rec = self.browse(cr, uid, ids[0], context=context)
                 partner = safe_eval('rec.' + self._partner_field,
                                     {'rec': rec})
                 if partner:
                     countrycode = partner.country_id and\
                         partner.country_id.code or None
         if not countrycode:
             if user.company_id.country_id:
                 countrycode = user.company_id.country_id.code
             else:
                 _logger.error(
                     _("You should set a country on the company '%s' "
                       "to allow the reformat of phone numbers") %
                     user.company_id.name)
                 countrycode = None
             # with country code = None, phonenumbers.parse() will work
             # with phonenumbers formatted in E164, but will fail with
             # phone numbers in national format
         for field in self._phone_fields:
             if vals.get(field):
                 init_value = vals.get(field)
                 try:
                     res_parse = phonenumbers.parse(vals.get(field),
                                                    countrycode.upper())
                     vals[field] = phonenumbers.format_number(
                         res_parse, phonenumbers.PhoneNumberFormat.E164)
                     if init_value != vals[field]:
                         _logger.info(
                             "%s initial value: '%s' updated value: '%s'" %
                             (field, init_value, vals[field]))
                 except Exception, e:
                     # I do BOTH logger and raise, because:
                     # raise is usefull when the record is created/written
                     #    by a user via the Web interface
                     # logger is usefull when the record is created/written
                     #    via the webservices
                     _logger.error(
                         "Cannot reformat the phone number '%s' to "
                         "international format with region=%s" %
                         (vals.get(field), countrycode))
                     if context.get('raise_if_phone_parse_fails'):
                         raise Warning(
                             _("Cannot reformat the phone number '%s' to "
                               "international format. Error message: %s") %
                             (vals.get(field), e))
Пример #43
0
    def execute_cron_splited(self, cr, uid, ids=None, number=10, context=None):
        """TODO: Separate in some more readable methods.
        TODO: Better doc.
        ids: list: Wired IDS list to compute.
        number: int:split thread to be called.
        context: special context if needed.
        """
        batch = str(time.time())
        if number < 3 or not isinstance(number, int):
            _logger.info(
                'Cowardly refused run the cron at %s because I need at least '
                'devide by 1', batch)
            return False
        cron_obj = self.pool.get('ir.cron')
        # Create a template of cron in order to set only the new values
        # afterwards.
        cron_tmpl = {
            'user_id': uid,
            'active': False,
            'priority': 100,
            'numbercall': 1,
            'doall': False,
            'model': 'wizard.price',
            'function': 'execute_cron',
            'nextcall': fields.datetime.now(),
        }

        def chunks(list_p, numb):
            """Split the list in "number" parts"""
            numb = max(1, numb)
            return [list_p[i:i + numb] for i in range(0, len(list_p), numb)]

        product_ids = self._get_products(cr, uid, ids, context=context)
        if not product_ids:
            _logger.info(
                'Cowardly refused run the cron at %s because I do not have '
                'elements to compute', batch)
            return False

        crons = 1
        if number:
            crons = int(math.ceil(len(product_ids) / float(number)))
        chunked = chunks(product_ids, crons and crons or 1)
        cron_name = 'ID Update Cost %s' % batch
        # First step:
        # I create the cronjobs first.
        cron_job_ids = []
        for plist in chunked:
            new_cron = cron_tmpl.copy()
            new_cron.update({
                'name':
                cron_name,
                'nextcall':
                fields.datetime.now(),
                'args':
                '[%(elements)s]' % dict(elements=str(plist)),
            })
            created = cron_obj.create(cr, uid, new_cron)
            cron_job_ids.append(created)
            _logger.info('Created cron job id [%s]', created)
        # Second Step:
        # I set postmortem the order because only here we now who is next and
        # who is before in terms of ID in database to pass is as parameter.
        cron_job_ids = sorted(cron_job_ids)
        for cron_id in cron_job_ids:
            cron = cron_obj.browse(cr, uid, cron_id)
            previous_cron = False
            next_cron = False
            if len(cron_job_ids) == 1:
                cron.write({'active': True})
            elif cron_id == min(cron_job_ids):
                cron.write({'active': True})
                next_cron = cron_job_ids[1]
            elif cron_id == max(cron_job_ids):
                previous_cron = cron_job_ids[-2]
            else:
                previous_cron = cron_job_ids[cron_job_ids.index(cron_id) - 1]
                next_cron = cron_job_ids[cron_job_ids.index(cron_id) + 1]
            new_args = safe_eval(cron.args)
            new_args.append(previous_cron)
            new_args.append(next_cron)
            variables = {
                'name': cron_name.replace('ID', str(cron_id)),
                'args': '(%s)' % str(new_args),
            }
            cron.write(variables)
            _logger.info('Setted the elements correct [%s]', cron_id)

        # self._create_update_crons
        return True
Пример #44
0
# 1) create({'name': 'Mon produit joli'})
# 2) with_context(lang='fr_FR').write({'name': 'Mon produit joli'})
# 3) with_context(lang='en_US').write({'name': 'My cute product'})
# Evidemment, le plus simple et logique est de faire :
# 1) create({'name': 'My cute product'})
# 2) with_context(lang='fr_FR').write({'name': 'Mon produit joli'})

#### List comprehension :
# [x*2 for x in range(20) if x % 3]
# result : [2, 4, 8, 10, 14, 16, 20, 22, 26, 28, 32, 34, 38]

#### SAFE EVAL
from openerp.tools.safe_eval import safe_eval
# 1er arg : la string
# 2e arg : un dico avec pour chaque début de string, l'obj openerp correspondant
safe_eval('sepa_export.payment_order_ids[0].reference', {'sepa_export': gen_args['sepa_export']})

#### Polish notation
#Comment la lire :
#http://en.wikipedia.org/wiki/Polish_notation
# Tu vas de gauche à droite, et dès que tu rencontres un signe suivi de 2 (), tu exécutes la requête, et tu recommences tt à gauche

## ATTACHMENTS
# Création
attach_id = self.pool['ir.attachment'].create(
    cr, uid, {
        'name': filename,
        'res_id': 12,
        'res_model': 'sale.order',  # self._name
        'datas': base64.encodestring(xml_string),
        ##'datas_fname': filename
Пример #45
0
 def _only_deposit_location_action(self, res_action):
     ctx = safe_eval(res_action['context'])
     ctx.update({'search_default_deposit_loc': 1})
     res_action['context'] = ctx
     return res_action
Пример #46
0
 def get_default_stock_empty_location(self, cr, uid, fields, context=None):
     icp = self.pool.get('ir.config_parameter')
     # we use safe_eval on the result, since the value of the parameter is a nonempty string
     return {
         'empty_stock_location': safe_eval(icp.get_param(cr, uid, 'stock.empty_location', 'False')),
     }
Пример #47
0
def get_sys_logs(self, cr, uid):
    """
    Utility method to send a publisher warranty get logs messages.
    """
    pool = pooler.get_pool(cr.dbname)

    dbuuid = pool.get('ir.config_parameter').get_param(cr, uid,
                                                       'database.uuid')
    db_create_date = pool.get('ir.config_parameter').get_param(
        cr, uid, 'database.create_date')
    limit_date = datetime.datetime.now()
    limit_date = limit_date - datetime.timedelta(15)
    limit_date_str = limit_date.strftime(misc.DEFAULT_SERVER_DATETIME_FORMAT)
    nbr_users = pool.get("res.users").search(cr, uid, [], count=True)
    nbr_active_users = pool.get("res.users").search(
        cr, uid, [("login_date", ">=", limit_date_str)], count=True)
    nbr_share_users = False
    nbr_active_share_users = False
    if "share" in pool.get("res.users")._all_columns:
        nbr_share_users = pool.get("res.users").search(cr,
                                                       uid,
                                                       [("share", "=", True)],
                                                       count=True)
        nbr_active_share_users = pool.get("res.users").search(
            cr,
            uid, [("share", "=", True), ("login_date", ">=", limit_date_str)],
            count=True)
    user = pool.get("res.users").browse(cr, uid, uid)

    web_base_url = self.pool.get('ir.config_parameter').get_param(
        cr, uid, 'web.base.url', 'False')
    msg = {
        "dbuuid": dbuuid,
        "nbr_users": nbr_users,
        "nbr_active_users": nbr_active_users,
        "nbr_share_users": nbr_share_users,
        "nbr_active_share_users": nbr_active_share_users,
        "dbname": cr.dbname,
        "db_create_date": db_create_date,
        "version": release.version,
        "language": user.lang,
        "web_base_url": web_base_url,
    }
    msg.update(
        pool.get("res.company").read(cr, uid, [1],
                                     ["name", "email", "phone"])[0])

    add_arg = {"timeout": 30} if sys.version_info >= (2, 6) else {}
    arguments = {
        'arg0': msg,
        "action": "update",
    }
    arguments_raw = urllib.urlencode(arguments)

    url = config.get("publisher_warranty_url")

    uo = urllib2.urlopen(url, arguments_raw, **add_arg)
    result = {}
    try:
        submit_result = uo.read()
        result = safe_eval(submit_result)
    finally:
        uo.close()
    return result
Пример #48
0
 def get_default_params(self):
     res = {}
     ir_config_env = self.env['ir.config_parameter']
     res['diacritics_insensitive_search'] = safe_eval(
         ir_config_env.get_param('diacritics_insensitive_search', 'False'))
     return res
Пример #49
0
    def get_data(self, user):

        domain = safe_eval(self.domain, {'user': user})
        won_domain = safe_eval(self.won_domain or '[]', {'user': user})

        field_date_name = self.field_date_id and self.field_date_id.name
        if self.start_date and field_date_name:
            domain.append((field_date_name, '>=', self.start_date))
        if self.end_date and field_date_name:
            domain.append((field_date_name, '<=', self.end_date))

        res = {
            'name': self.name,
            'type': self.type,
            'model': self.model_id.model,
            'domain': str(domain),
            'precision': self.precision,
        }
        obj = self.env[self.model_id.model]
        if self.type == 'list':
            total_count = obj.search_count(domain)
            groups = [{'test': lambda r: True}]
            if self.agenda:
                today = date.today()
                tomorrow = today + timedelta(days=1)

                def r2date(r):
                    d = getattr(r, field_date_name)
                    if d:
                        d = datetime.strptime(
                            d, self.field_date_id.ttype == 'date'
                            and DEFAULT_SERVER_DATE_FORMAT
                            or DEFAULT_SERVER_DATETIME_FORMAT)
                        d = d.date()
                    else:
                        d = date.today()
                    return d

                groups = [
                    {
                        'label': _('Overdue'),
                        'class': 'overdue',
                        'test': lambda r: r2date(r) < today,
                        'mandatory': False,
                    },
                    {
                        'label': _('Today'),
                        'class': 'today',
                        'test': lambda r: r2date(r) == today,
                        'mandatory': True,
                    },
                    {
                        'label': _('Tomorrow'),
                        'class': 'tomorrow',
                        'test': lambda r: r2date(r) == tomorrow,
                        'mandatory': False,
                    },
                    {
                        'label': _('Later'),
                        'class': 'later',
                        'test': lambda r: r2date(r) > tomorrow,
                        'mandatory': False,
                    },
                ]
            for g in groups:
                g['lines'] = []

            res.update({
                'more': self.limit and self.limit < total_count,
                'total_count': total_count,
                'agenda': self.agenda,
                'groups': groups,
            })
            for r in obj.search(domain, limit=self.limit, order=self.order):
                mako = mako_template_env.from_string(tools.ustr(self.content))
                content = mako.render({'record': r})
                r_json = {
                    'id': r.id,
                    # 'fields': dict( (f,getattr(r,f)) for f in fields),
                    'display_mode': 'progress',
                    'state': 'inprogress',
                    'completeness': 0,
                    'name': content,
                    'description': '',
                }
                if self.value_field_id:
                    r_json['current'] = getattr(r, self.value_field_id.name)
                    if self.value_field_monetary:
                        r_json['monetary'] = 1
                for g in groups:
                    if g['test'](r):
                        g['lines'].append(r_json)
                        break
            for g in groups:
                del g['test']
        elif self.type == 'funnel':
            stage_ids = []  # [key]
            for group in obj.read_group(domain, [],
                                        [self.stage_field_id.name]):
                key = group[self.stage_field_id.name]
                if isinstance(key, (list, tuple)):
                    key = key[0]
                stage_ids.append(key)

            stages = []  # [{'name':Name, 'id': key}]
            if self.stage_field_id.ttype == 'selection':
                d = dict(self.stage_field_id.selection)
                stages = [{'id': id, 'name': d[id]} for id in stage_ids]
            else:  # many2one
                stage_model = self.stage_field_id.relation
                for r in self.env[stage_model].browse(stage_ids):
                    stages.append({'id': r.id, 'name': r.name_get()[0][1]})

            value_field_name = self.value_field_id.name
            for stage in stages:
                d = copy.copy(domain)
                d.append((self.stage_field_id.name, '=', stage['id']))
                result = obj.read_group(d, [value_field_name], [])
                stage['closed_value'] = result and result[0][
                    value_field_name] or 0.0
                stage['domain'] = str(d)

            # won value
            d = domain + won_domain
            result = obj.read_group(domain, [value_field_name], [])
            won = {
                'name': _('Won'),
                'id': '__won__',
                'closed_value': result and result[0][value_field_name] or 0.0
            }
            stages.append(won)
            cur = 0
            for stage in reversed(stages):
                cur += stage['closed_value']
                stage['abs_value'] = cur
            total_value = stages[0]['abs_value']
            precision = self.precision
            for s in stages:
                s['rel_value'] = round(
                    100 * s['abs_value'] / total_value /
                    precision) * precision if total_value else 100
                # dummy fields
                s['display_mode'] = 'progress'
                s['monetary'] = 1

            res['stages'] = stages
            res['won'] = won
            res['conversion_rate'] = stages[-1]['rel_value']
        elif self.type == 'slice':
            value_field_name = self.value_field_id.name
            for f, d in [('total', domain), ('won', won_domain)]:
                result = obj.read_group(d, [value_field_name], [])
                res[f] = result and result[0][value_field_name] or 0.0

            res['domain'] = str(domain)
            res['won_domain'] = str(won_domain)

            precision = self.precision
            total_value = res['total']
            res['slice'] = round(
                100 * res['won'] / res['total'] /
                precision) * precision if res['total'] else 100
            # dummy fields
            res['display_mode'] = 'progress'
            res['monetary'] = self.value_field_monetary
        return res