Esempio n. 1
0
 def _compute_percentage_satisfaction_issue(self):
     project_issue = self.env['project.issue'].search([('project_id', '=',
                                                        self.id)])
     activity = project_issue.rating_get_grades()
     self.percentage_satisfaction_issue = activity['great'] * 100 / sum(
         pycompat.values(activity)) if sum(
             pycompat.values(activity)) else -1
Esempio n. 2
0
 def _compute_percentage_satisfaction_project(self):
     domain = [('create_date', '>=',
                fields.Datetime.to_string(fields.datetime.now() -
                                          timedelta(days=30)))]
     for project in self:
         activity = project.tasks.rating_get_grades(domain)
         project.percentage_satisfaction_project = activity[
             'great'] * 100 / sum(pycompat.values(activity)) if sum(
                 pycompat.values(activity)) else -1
Esempio n. 3
0
 def _compute_percentage_satisfaction_project(self):
     super(Project, self)._compute_percentage_satisfaction_project()
     for project in self:
         domain = [('create_date', '>=', fields.Datetime.to_string(fields.datetime.now() - timedelta(days=30)))]
         activity_great, activity_sum = 0, 0
         if project.use_tasks:
             activity_task = project.tasks.rating_get_grades(domain)
             activity_great = activity_task['great']
             activity_sum = sum(pycompat.values(activity_task))
         if project.use_issues:
             activity_issue = self.env['project.issue'].search([('project_id', '=', project.id)]).rating_get_grades(domain)
             activity_great += activity_issue['great']
             activity_sum += sum(pycompat.values(activity_issue))
         project.percentage_satisfaction_project = activity_great * 100 / activity_sum if activity_sum else -1
Esempio n. 4
0
 def _process_registration_details(self, details):
     ''' Process data posted from the attendee details form. '''
     registrations = {}
     global_values = {}
     for key, value in pycompat.items(details):
         counter, field_name = key.split('-', 1)
         if counter == '0':
             global_values[field_name] = value
         else:
             registrations.setdefault(counter, dict())[field_name] = value
     for key, value in pycompat.items(global_values):
         for registration in pycompat.values(registrations):
             registration[key] = value
     return list(pycompat.values(registrations))
Esempio n. 5
0
 def _compute_percentage_satisfaction_project(self):
     super(Project, self)._compute_percentage_satisfaction_project()
     for project in self:
         domain = [('create_date', '>=', fields.Datetime.to_string(fields.datetime.now() - timedelta(days=30)))]
         activity_great, activity_sum = 0, 0
         if project.use_tasks:
             activity_task = project.tasks.rating_get_grades(domain)
             activity_great = activity_task['great']
             activity_sum = sum(pycompat.values(activity_task))
         if project.use_issues:
             activity_issue = self.env['project.issue'].search([('project_id', '=', project.id)]).rating_get_grades(domain)
             activity_great += activity_issue['great']
             activity_sum += sum(pycompat.values(activity_issue))
         project.percentage_satisfaction_project = activity_great * 100 / activity_sum if activity_sum else -1
Esempio n. 6
0
 def rating_get_repartition(self, add_stats=False, domain=None):
     """ get the repatition of rating grade for the given res_ids.
         :param add_stats : flag to add stat to the result
         :type add_stats : boolean
         :param domain : optional extra domain of the rating to include/exclude in repartition
         :return dictionnary
             if not add_stats, the dict is like
                 - key is the rating value (integer)
                 - value is the number of object (res_model, res_id) having the value
             otherwise, key is the value of the information (string) : either stat name (avg, total, ...) or 'repartition'
             containing the same dict if add_stats was False.
     """
     base_domain = [('res_model', '=', self._name), ('res_id', 'in', self.ids), ('rating', '>=', 1), ('consumed', '=', True)]
     if domain:
         base_domain += domain
     data = self.env['rating.rating'].read_group(base_domain, ['rating'], ['rating', 'res_id'])
     # init dict with all posible rate value, except 0 (no value for the rating)
     values = dict.fromkeys(range(1, 11), 0)
     values.update((d['rating'], d['rating_count']) for d in data)
     # add other stats
     if add_stats:
         rating_number = sum(pycompat.values(values))
         result = {
             'repartition': values,
             'avg': sum(float(key * values[key]) for key in values) / rating_number if rating_number > 0 else 0,
             'total': sum(it['rating_count'] for it in data),
         }
         return result
     return values
Esempio n. 7
0
 def signParams(parms):
     signing_string = ':'.join(
         escapeVal(v)
         for v in chain(pycompat.keys(parms), pycompat.values(parms))
     )
     hm = hmac.new(hmac_key, signing_string, hashlib.sha256)
     return base64.b64encode(hm.digest())
Esempio n. 8
0
    def _compute_account_balance(self, accounts):
        """ compute the balance, debit and credit for the provided accounts
        """
        mapping = {
            'balance': "COALESCE(SUM(debit),0) - COALESCE(SUM(credit), 0) as balance",
            'debit': "COALESCE(SUM(debit), 0) as debit",
            'credit': "COALESCE(SUM(credit), 0) as credit",
        }

        res = {}
        for account in accounts:
            res[account.id] = dict.fromkeys(mapping, 0.0)
        if accounts:
            tables, where_clause, where_params = self.env['account.move.line']._query_get()
            tables = tables.replace('"', '') if tables else "account_move_line"
            wheres = [""]
            if where_clause.strip():
                wheres.append(where_clause.strip())
            filters = " AND ".join(wheres)
            request = "SELECT account_id as id, " + ', '.join(pycompat.values(mapping)) + \
                       " FROM " + tables + \
                       " WHERE account_id IN %s " \
                            + filters + \
                       " GROUP BY account_id"
            params = (tuple(accounts._ids),) + tuple(where_params)
            self.env.cr.execute(request, params)
            for row in self.env.cr.dictfetchall():
                res[row['id']] = row
        return res
Esempio n. 9
0
    def rule_is_enumerable(self, rule):
        """ Checks that it is possible to generate sensible GET queries for
            a given rule (if the endpoint matches its own requirements)
            :type rule: werkzeug.routing.Rule
            :rtype: bool
        """
        endpoint = rule.endpoint
        methods = endpoint.routing.get('methods') or ['GET']

        converters = list(pycompat.values(rule._converters))
        if not ('GET' in methods
            and endpoint.routing['type'] == 'http'
            and endpoint.routing['auth'] in ('none', 'public')
            and endpoint.routing.get('website', False)
            and all(hasattr(converter, 'generate') for converter in converters)
            and endpoint.routing.get('website')):
            return False

        # dont't list routes without argument having no default value or converter
        spec = inspect.getargspec(endpoint.method.original_func)

        # remove self and arguments having a default value
        defaults_count = len(spec.defaults or [])
        args = spec.args[1:(-defaults_count or None)]

        # check that all args have a converter
        return all((arg in rule._converters) for arg in args)
Esempio n. 10
0
 def channel_info(self, extra_info=False):
     """ Extends the channel header by adding the livechat operator and the 'anonymous' profile
         :rtype : list(dict)
     """
     channel_infos = super(MailChannel, self).channel_info(extra_info)
     # add the operator id
     if self.env.context.get('im_livechat_operator_partner_id'):
         partner_name = self.env['res.partner'].browse(
             self.env.context.get(
                 'im_livechat_operator_partner_id')).name_get()[0]
         for channel_info in channel_infos:
             channel_info['operator_pid'] = partner_name
     channel_infos_dict = dict((c['id'], c) for c in channel_infos)
     for channel in self:
         # add the anonymous name
         if channel.anonymous_name:
             channel_infos_dict[
                 channel.id]['anonymous_name'] = channel.anonymous_name
         # add the last message date
         if channel.channel_type == 'livechat':
             last_msg = self.env['mail.message'].search(
                 [("channel_ids", "in", [channel.id])], limit=1)
             if last_msg:
                 channel_infos_dict[
                     channel.id]['last_message_date'] = last_msg.date
     return list(pycompat.values(channel_infos_dict))
Esempio n. 11
0
    def _compute_previous_order(self):
        self.ensure_one()
        self.previous_order_widget = json.dumps(False)

        prev_order = self.env['lunch.order.line'].search(
            [('user_id', '=', self.env.uid),
             ('product_id.active', '!=', False)],
            limit=20,
            order='date desc, id desc')
        # If we use prev_order.ids, we will have duplicates (identical orders).
        # Therefore, this following part removes duplicates based on product_id and note.
        self.previous_order_ids = list(
            pycompat.values({(order.product_id, order.note): order.id
                             for order in prev_order}))

        if self.previous_order_ids:
            lunch_data = []
            for line in self.previous_order_ids:
                lunch_data.append({
                    'line_id': line.id,
                    'product_id': line.product_id.id,
                    'product_name': line.product_id.name,
                    'supplier': line.supplier.name,
                    'note': line.note,
                    'price': line.price,
                    'date': line.date,
                    'currency_id': line.currency_id.id,
                })
            lunch_data.sort(key=itemgetter('date', 'line_id'), reverse=True)
            self.previous_order_widget = json.dumps(lunch_data)
Esempio n. 12
0
File: lunch.py Progetto: RoganW/odoo
    def _compute_previous_order(self):
        self.ensure_one()
        self.previous_order_widget = json.dumps(False)

        prev_order = self.env['lunch.order.line'].search([('user_id', '=', self.env.uid), ('product_id.active', '!=', False)], limit=20, order='date desc, id desc')
        # If we use prev_order.ids, we will have duplicates (identical orders).
        # Therefore, this following part removes duplicates based on product_id and note.
        self.previous_order_ids = list(pycompat.values({
            (order.product_id, order.note): order.id
            for order in prev_order
        }))

        if self.previous_order_ids:
            lunch_data = []
            for line in self.previous_order_ids:
                lunch_data.append({
                    'line_id': line.id,
                    'product_id': line.product_id.id,
                    'product_name': line.product_id.name,
                    'supplier': line.supplier.name,
                    'note': line.note,
                    'price': line.price,
                    'date': line.date,
                    'currency_id': line.currency_id.id,
                })
            lunch_data.sort(key=itemgetter('date', 'line_id'), reverse=True)
            self.previous_order_widget = json.dumps(lunch_data)
Esempio n. 13
0
    def _compute_account_balance(self, accounts):
        """ compute the balance, debit and credit for the provided accounts
        """
        mapping = {
            'balance':
            "COALESCE(SUM(debit),0) - COALESCE(SUM(credit), 0) as balance",
            'debit': "COALESCE(SUM(debit), 0) as debit",
            'credit': "COALESCE(SUM(credit), 0) as credit",
        }

        res = {}
        for account in accounts:
            res[account.id] = dict.fromkeys(mapping, 0.0)
        if accounts:
            tables, where_clause, where_params = self.env[
                'account.move.line']._query_get()
            tables = tables.replace('"', '') if tables else "account_move_line"
            wheres = [""]
            if where_clause.strip():
                wheres.append(where_clause.strip())
            filters = " AND ".join(wheres)
            request = "SELECT account_id as id, " + ', '.join(pycompat.values(mapping)) + \
                       " FROM " + tables + \
                       " WHERE account_id IN %s " \
                            + filters + \
                       " GROUP BY account_id"
            params = (tuple(accounts._ids), ) + tuple(where_params)
            self.env.cr.execute(request, params)
            for row in self.env.cr.dictfetchall():
                res[row['id']] = row
        return res
Esempio n. 14
0
    def update_from_db(self, cr):
        if not len(self):
            return
        # update the graph with values from the database (if exist)
        ## First, we set the default values for each package in graph
        additional_data = {
            key: {
                'id': 0,
                'state': 'uninstalled',
                'dbdemo': False,
                'installed_version': None
            }
            for key in pycompat.keys(self)
        }
        ## Then we get the values from the database
        cr.execute(
            'SELECT name, id, state, demo AS dbdemo, latest_version AS installed_version'
            '  FROM ir_module_module'
            ' WHERE name IN %s', (tuple(additional_data), ))

        ## and we update the default values with values from the database
        additional_data.update((x['name'], x) for x in cr.dictfetchall())

        for package in pycompat.values(self):
            for k, v in pycompat.items(additional_data[package.name]):
                setattr(package, k, v)
Esempio n. 15
0
    def _compute_previous_order(self):
        self.ensure_one()
        self.previous_order_widget = json.dumps(False)

        prev_order = self.env['lunch.order.line'].search([('user_id', '=', self.env.uid), ('product_id.active', '!=', False)], limit=20, order='date desc, id desc')
        # If we use prev_order.ids, we will have duplicates (identical orders).
        # Therefore, this following part removes duplicates based on product_id and note.
        self.previous_order_ids = list(pycompat.values({
            (order.product_id, order.note): order.id
            for order in prev_order
        }))

        if self.previous_order_ids:
            lunch_data = {}
            for line in self.previous_order_ids:
                lunch_data[line.id] = {
                    'line_id': line.id,
                    'product_id': line.product_id.id,
                    'product_name': line.product_id.name,
                    'supplier': line.supplier.name,
                    'note': line.note,
                    'price': line.price,
                    'date': line.date,
                    'currency_id': line.currency_id.id,
                }
            # sort the old lunch orders by (date, id)
            lunch_data = OrderedDict(sorted(lunch_data.items(), key=lambda t: (t[1]['date'], t[0]), reverse=True))
            self.previous_order_widget = json.dumps(lunch_data)
Esempio n. 16
0
    def is_css_preprocessed(self):
        preprocessed = True
        attachments = None
        for atype in (SassStylesheetAsset, LessStylesheetAsset):
            outdated = False
            assets = dict((asset.html_url, asset) for asset in self.stylesheets
                          if isinstance(asset, atype))
            if assets:
                assets_domain = [('url', 'in', list(assets))]
                attachments = self.env['ir.attachment'].sudo().search(
                    assets_domain)
                for attachment in attachments:
                    asset = assets[attachment.url]
                    if asset.last_modified > fields.Datetime.from_string(
                            attachment['__last_update']):
                        outdated = True
                        break
                    if asset._content is None:
                        asset._content = attachment.datas and attachment.datas.decode(
                            'base64').decode('utf8') or ''
                        if not asset._content and attachment.file_size > 0:
                            asset._content = None  # file missing, force recompile

                if any(asset._content is None
                       for asset in pycompat.values(assets)):
                    outdated = True

                if outdated:
                    preprocessed = False

        return preprocessed, attachments
    def make_invoices(self):
        if not self._context.get('active_ids'):
            return {'type': 'ir.actions.act_window_close'}
        new_invoice = {}
        for wizard in self:
            repairs = self.env['mrp.repair'].browse(
                self._context['active_ids'])
            new_invoice = repairs.action_invoice_create(group=wizard.group)

            # We have to udpate the state of the given repairs, otherwise they remain 'to be invoiced'.
            # Note that this will trigger another call to the method 'action_invoice_create',
            # but that second call will not do anything, since the repairs are already invoiced.
            repairs.action_repair_invoice_create()
        return {
            'domain': [('id', 'in', list(pycompat.values(new_invoice)))],
            'name':
            'Invoices',
            'view_type':
            'form',
            'view_mode':
            'tree,form',
            'res_model':
            'account.invoice',
            'view_id':
            False,
            'views': [(self.env.ref('account.invoice_tree').id, 'tree'),
                      (self.env.ref('account.invoice_form').id, 'form')],
            'context':
            "{'type':'out_invoice'}",
            'type':
            'ir.actions.act_window'
        }
Esempio n. 18
0
    def test_60_prefetch(self):
        """ Check the record cache prefetching """
        partners = self.env['res.partner'].search([],
                                                  limit=models.PREFETCH_MAX)
        self.assertTrue(len(partners) > 1)

        # all the records in partners are ready for prefetching
        self.assertItemsEqual(partners.ids, partners._prefetch['res.partner'])

        # reading ONE partner should fetch them ALL
        partner = next(p for p in partners)
        partner.country_id
        country_id_cache = self.env.cache[type(partners).country_id]
        self.assertItemsEqual(partners.ids, country_id_cache)

        # partners' countries are ready for prefetching
        country_ids = set(cid for cids in pycompat.values(country_id_cache)
                          for cid in cids)
        self.assertTrue(len(country_ids) > 1)
        self.assertItemsEqual(country_ids, partners._prefetch['res.country'])

        # reading ONE partner country should fetch ALL partners' countries
        country = next(p.country_id for p in partners if p.country_id)
        country.name
        name_cache = self.env.cache[type(country).name]
        self.assertItemsEqual(country_ids, name_cache)
Esempio n. 19
0
    def is_css_preprocessed(self):
        preprocessed = True
        attachments = None
        for atype in (SassStylesheetAsset, LessStylesheetAsset):
            outdated = False
            assets = dict((asset.html_url, asset) for asset in self.stylesheets if isinstance(asset, atype))
            if assets:
                assets_domain = [('url', 'in', list(assets))]
                attachments = self.env['ir.attachment'].sudo().search(assets_domain)
                for attachment in attachments:
                    asset = assets[attachment.url]
                    if asset.last_modified > fields.Datetime.from_string(attachment['__last_update']):
                        outdated = True
                        break
                    if asset._content is None:
                        asset._content = attachment.datas and attachment.datas.decode('base64').decode('utf8') or ''
                        if not asset._content and attachment.file_size > 0:
                            asset._content = None # file missing, force recompile

                if any(asset._content is None for asset in pycompat.values(assets)):
                    outdated = True

                if outdated:
                    preprocessed = False

        return preprocessed, attachments
Esempio n. 20
0
    def setup_models(self, cr):
        """ Complete the setup of models.
            This must be called after loading modules and before using the ORM.
        """
        lazy_property.reset_all(self)
        env = odoo.api.Environment(cr, SUPERUSER_ID, {})

        # add manual models
        if self._init_modules:
            env['ir.model']._add_manual_models()

        # prepare the setup on all models
        models = list(pycompat.values(env))
        for model in models:
            model._prepare_setup()

        # do the actual setup from a clean state
        self._m2m = {}
        for model in models:
            model._setup_base()

        for model in models:
            model._setup_fields()

        for model in models:
            model._setup_complete()

        self.registry_invalidated = True
Esempio n. 21
0
 def _get_migration_versions(pkg):
     versions = sorted({
         ver
         for lv in pycompat.values(self.migrations[pkg.name])
         for ver, lf in pycompat.items(lv)
         if lf
     }, key=lambda k: parse_version(convert_version(k)))
     return versions
Esempio n. 22
0
 def _default_previous_order_ids(self):
     prev_order = self.env['lunch.order.line'].search([('user_id', '=', self.env.uid), ('product_id.active', '!=', False)], limit=20, order='id desc')
     # If we return return prev_order.ids, we will have duplicates (identical orders).
     # Therefore, this following part removes duplicates based on product_id and note.
     return list(pycompat.values({
         (order.product_id, order.note): order.id
         for order in prev_order
     }))
Esempio n. 23
0
 def field_sequence(self):
     """ Return a function mapping a field to an integer. The value of a
         field is guaranteed to be strictly greater than the value of the
         field's dependencies.
     """
     # map fields on their dependents
     dependents = {
         field: set(dep for dep, _ in model._field_triggers[field] if dep != field)
         for model in pycompat.values(self)
         for field in pycompat.values(model._fields)
     }
     # sort them topologically, and associate a sequence number to each field
     mapping = {
         field: num
         for num, field in enumerate(reversed(topological_sort(dependents)))
     }
     return mapping.get
Esempio n. 24
0
 def linearize(app, gs):
     # determine sequence order: a group appears after its implied groups
     order = {g: len(g.trans_implied_ids & gs) for g in gs}
     # check whether order is total, i.e., sequence orders are distinct
     if len(set(pycompat.values(order))) == len(gs):
         return (app, 'selection', gs.sorted(key=order.get))
     else:
         return (app, 'boolean', gs)
Esempio n. 25
0
File: lunch.py Progetto: RoganW/odoo
 def _default_previous_order_ids(self):
     prev_order = self.env['lunch.order.line'].search([('user_id', '=', self.env.uid), ('product_id.active', '!=', False)], limit=20, order='id desc')
     # If we return return prev_order.ids, we will have duplicates (identical orders).
     # Therefore, this following part removes duplicates based on product_id and note.
     return list(pycompat.values({
         (order.product_id, order.note): order.id
         for order in prev_order
     }))
Esempio n. 26
0
 def linearize(app, gs):
     # determine sequence order: a group appears after its implied groups
     order = {g: len(g.trans_implied_ids & gs) for g in gs}
     # check whether order is total, i.e., sequence orders are distinct
     if len(set(pycompat.values(order))) == len(gs):
         return (app, 'selection', gs.sorted(key=order.get))
     else:
         return (app, 'boolean', gs)
Esempio n. 27
0
    def get_actions(self, action_slot, model, res_id=False):
        """Retrieves the list of actions bound to the given model's action slot.
           See the class description for more details about the various action
           slots: :class:`~.ir_values`.

           :param string action_slot: the action slot to which the actions should be
                                      bound to - one of ``client_action_multi``,
                                      ``client_print_multi``, ``client_action_relate``,
                                      ``tree_but_open``.
           :param string model: model name
           :param int res_id: optional record id - will bind the action only to a
                              specific record of the model, not all records.
           :return: list of action tuples of the form ``(id, name, action_def)``,
                    where ``id`` is the ID of the default entry, ``name`` is the
                    action label, and ``action_def`` is a dict containing the
                    action definition as obtained by calling
                    :meth:`~odoo.models.Model.read` on the action record.
        """
        assert action_slot in ACTION_SLOTS, 'Illegal action slot value: %s' % action_slot
        # use a direct SQL query for performance reasons,
        # this is called very often
        query = """ SELECT v.id, v.name, v.value FROM ir_values v
                    WHERE v.key = %s AND v.key2 = %s AND v.model = %s
                        AND (v.res_id = %s OR v.res_id IS NULL OR v.res_id = 0)
                    ORDER BY v.id """
        self._cr.execute(query, ('action', action_slot, model, res_id or None))

        # map values to their corresponding action record
        actions = []
        for id, name, value in self._cr.fetchall():
            if not value:
                continue  # skip if undefined
            action_model, action_id = value.split(',')
            if action_model not in self.env:
                continue  # unknown model? skip it!
            action = self.env[action_model].browse(int(action_id))
            actions.append((id, name, action))

        # process values and their action
        results = {}
        for id, name, action in actions:
            # FIXME: needs cleanup
            try:
                action_def = dict([(k, v.convert_to_read(action[k], action))
                                   for k, v in action._fields.items()])
                if action._name in ('ir.actions.report',
                                    'ir.actions.act_window'):
                    if action.groups_id and not action.groups_id & self.env.user.groups_id:
                        if name == 'Menuitem':
                            raise AccessError(
                                _('You do not have the permission to perform this operation!!!'
                                  ))
                        continue
                # keep only the last action registered for each action name
                results[name] = (id, name, action_def)
            except (AccessError, MissingError):
                continue
        return sorted(pycompat.values(results))
Esempio n. 28
0
    def test_10_Authorize_form_render(self):
        self.assertEqual(self.authorize.environment, 'test', 'test without test environment')

        # ----------------------------------------
        # Test: button direct rendering
        # ----------------------------------------
        base_url = self.env['ir.config_parameter'].get_param('web.base.url')
        form_values = {
            'x_login': self.authorize.authorize_login,
            'x_trans_key': self.authorize.authorize_transaction_key,
            'x_amount': '320.0',
            'x_show_form': 'PAYMENT_FORM',
            'x_type': 'AUTH_CAPTURE',
            'x_method': 'CC',
            'x_fp_sequence': '%s%s' % (self.authorize.id, int(time.time())),
            'x_version': '3.1',
            'x_relay_response': 'TRUE',
            'x_fp_timestamp': str(int(time.time())),
            'x_relay_url': urls.url_join(base_url, AuthorizeController._return_url),
            'x_cancel_url': urls.url_join(base_url, AuthorizeController._cancel_url),
            'return_url': None,
            'x_currency_code': 'USD',
            'x_invoice_num': 'SO004',
            'x_first_name': 'Norbert',
            'x_last_name': 'Buyer',
            'x_address': 'Huge Street 2/543',
            'x_city': 'Sin City',
            'x_zip': '1000',
            'x_country': 'Belgium',
            'x_phone': '0032 12 34 56 78',
            'x_email': '*****@*****.**',
            'x_state': None,
            'x_ship_to_first_name': 'Norbert',
            'x_ship_to_last_name': 'Buyer',
            'x_ship_to_address': 'Huge Street 2/543',
            'x_ship_to_city': 'Sin City',
            'x_ship_to_zip': '1000',
            'x_ship_to_country': 'Belgium',
            'x_ship_to_phone': '0032 12 34 56 78',
            'x_ship_to_email': '*****@*****.**',
            'x_ship_to_state': None,
        }

        form_values['x_fp_hash'] = self._authorize_generate_hashing(form_values)
        # render the button
        res = self.authorize.render('SO004', 320.0, self.currency_usd.id, values=self.buyer_values)
        # check form result
        tree = objectify.fromstring(res)
        self.assertEqual(tree.get('action'), 'https://test.authorize.net/gateway/transact.dll', 'Authorize: wrong form POST url')
        for el in tree.iterfind('input'):
            values = list(pycompat.values(el.attrib))
            if values[1] in ['submit', 'x_fp_hash', 'return_url', 'x_state', 'x_ship_to_state']:
                continue
            self.assertEqual(
                unicode(values[2], "utf-8"),
                form_values[values[1]],
                'Authorize: wrong value for input %s: received %s instead of %s' % (values[1], values[2], form_values[values[1]])
            )
Esempio n. 29
0
 def fiscal_pos_map_to_csv(self):
     writer = csv.writer(open('account.fiscal.'
                              'position.tax.template-%s.csv' %
                              self.suffix, 'wb'))
     fiscal_pos_map_iterator = self.iter_fiscal_pos_map()
     keys = next(fiscal_pos_map_iterator)
     writer.writerow(keys)
     for row in fiscal_pos_map_iterator:
         writer.writerow(pycompat.imap(_e, pycompat.values(row)))
Esempio n. 30
0
 def _get_migration_versions(pkg):
     versions = sorted(
         {
             ver
             for lv in pycompat.values(self.migrations[pkg.name])
             for ver, lf in pycompat.items(lv) if lf
         },
         key=lambda k: parse_version(convert_version(k)))
     return versions
Esempio n. 31
0
    def normalize(self):
        """The ranks are normalized by setting the least rank to zero.
        """

        least_rank = min(x['x'] for x in pycompat.values(self.result.values))

        if least_rank!=0:
            for node in self.result:
                self.result[node]['x']-=least_rank
Esempio n. 32
0
    def prepare_result(self, question, current_filters=None):
        """ Compute statistical data for questions by counting number of vote per choice on basis of filter """
        current_filters = current_filters if current_filters else []
        result_summary = {}

        # Calculate and return statistics for choice
        if question.type in ['simple_choice', 'multiple_choice']:
            answers = {}
            comments = []
            [answers.update({label.id: {'text': label.value, 'count': 0, 'answer_id': label.id}}) for label in question.labels_ids]
            for input_line in question.user_input_line_ids:
                if input_line.answer_type == 'suggestion' and answers.get(input_line.value_suggested.id) and (not(current_filters) or input_line.user_input_id.id in current_filters):
                    answers[input_line.value_suggested.id]['count'] += 1
                if input_line.answer_type == 'text' and (not(current_filters) or input_line.user_input_id.id in current_filters):
                    comments.append(input_line)
            result_summary = {'answers': list(pycompat.values(answers)), 'comments': comments}

        # Calculate and return statistics for matrix
        if question.type == 'matrix':
            rows = OrderedDict()
            answers = OrderedDict()
            res = dict()
            comments = []
            [rows.update({label.id: label.value}) for label in question.labels_ids_2]
            [answers.update({label.id: label.value}) for label in question.labels_ids]
            for cell in product(rows, answers):
                res[cell] = 0
            for input_line in question.user_input_line_ids:
                if input_line.answer_type == 'suggestion' and (not(current_filters) or input_line.user_input_id.id in current_filters) and input_line.value_suggested_row:
                    res[(input_line.value_suggested_row.id, input_line.value_suggested.id)] += 1
                if input_line.answer_type == 'text' and (not(current_filters) or input_line.user_input_id.id in current_filters):
                    comments.append(input_line)
            result_summary = {'answers': answers, 'rows': rows, 'result': res, 'comments': comments}

        # Calculate and return statistics for free_text, textbox, date
        if question.type in ['free_text', 'textbox', 'date']:
            result_summary = []
            for input_line in question.user_input_line_ids:
                if not(current_filters) or input_line.user_input_id.id in current_filters:
                    result_summary.append(input_line)

        # Calculate and return statistics for numerical_box
        if question.type == 'numerical_box':
            result_summary = {'input_lines': []}
            all_inputs = []
            for input_line in question.user_input_line_ids:
                if not(current_filters) or input_line.user_input_id.id in current_filters:
                    all_inputs.append(input_line.value_number)
                    result_summary['input_lines'].append(input_line)
            if all_inputs:
                result_summary.update({'average': round(sum(all_inputs) / len(all_inputs), 2),
                                       'max': round(max(all_inputs), 2),
                                       'min': round(min(all_inputs), 2),
                                       'sum': sum(all_inputs),
                                       'most_common': Counter(all_inputs).most_common(5)})
        return result_summary
Esempio n. 33
0
 def _compute_percentage_satisfaction(self):
     for record in self:
         dt = fields.Datetime.to_string(datetime.utcnow() - timedelta(days=7))
         repartition = record.channel_ids.rating_get_grades([('create_date', '>=', dt)])
         total = sum(pycompat.values(repartition))
         if total > 0:
             happy = repartition['great']
             record.rating_percentage_satisfaction = ((happy*100) / total) if happy > 0 else 0
         else:
             record.rating_percentage_satisfaction = -1
Esempio n. 34
0
 def _compute_report_balance(self, reports):
     '''returns a dictionary with key=the ID of a record and value=the credit, debit and balance amount
        computed for this record. If the record is of type :
            'accounts' : it's the sum of the linked accounts
            'account_type' : it's the sum of leaf accoutns with such an account_type
            'account_report' : it's the amount of the related report
            'sum' : it's the sum of the children of this record (aka a 'view' record)'''
     res = {}
     fields = ['credit', 'debit', 'balance']
     for report in reports:
         if report.id in res:
             continue
         res[report.id] = dict((fn, 0.0) for fn in fields)
         if report.type == 'accounts':
             # it's the sum of the linked accounts
             res[report.id]['account'] = self._compute_account_balance(
                 report.account_ids)
             for value in pycompat.values(res[report.id]['account']):
                 for field in fields:
                     res[report.id][field] += value.get(field)
         elif report.type == 'account_type':
             # it's the sum the leaf accounts with such an account type
             accounts = self.env['account.account'].search([
                 ('user_type_id', 'in', report.account_type_ids.ids)
             ])
             res[report.id]['account'] = self._compute_account_balance(
                 accounts)
             for value in pycompat.values(res[report.id]['account']):
                 for field in fields:
                     res[report.id][field] += value.get(field)
         elif report.type == 'account_report' and report.account_report_id:
             # it's the amount of the linked report
             res2 = self._compute_report_balance(report.account_report_id)
             for key, value in pycompat.items(res2):
                 for field in fields:
                     res[report.id][field] += value[field]
         elif report.type == 'sum':
             # it's the sum of the children of this account.report
             res2 = self._compute_report_balance(report.children_ids)
             for key, value in pycompat.items(res2):
                 for field in fields:
                     res[report.id][field] += value[field]
     return res
Esempio n. 35
0
    def get_actions(self, action_slot, model, res_id=False):
        """Retrieves the list of actions bound to the given model's action slot.
           See the class description for more details about the various action
           slots: :class:`~.ir_values`.

           :param string action_slot: the action slot to which the actions should be
                                      bound to - one of ``client_action_multi``,
                                      ``client_print_multi``, ``client_action_relate``,
                                      ``tree_but_open``.
           :param string model: model name
           :param int res_id: optional record id - will bind the action only to a
                              specific record of the model, not all records.
           :return: list of action tuples of the form ``(id, name, action_def)``,
                    where ``id`` is the ID of the default entry, ``name`` is the
                    action label, and ``action_def`` is a dict containing the
                    action definition as obtained by calling
                    :meth:`~odoo.models.Model.read` on the action record.
        """
        assert action_slot in ACTION_SLOTS, 'Illegal action slot value: %s' % action_slot
        # use a direct SQL query for performance reasons,
        # this is called very often
        query = """ SELECT v.id, v.name, v.value FROM ir_values v
                    WHERE v.key = %s AND v.key2 = %s AND v.model = %s
                        AND (v.res_id = %s OR v.res_id IS NULL OR v.res_id = 0)
                    ORDER BY v.id """
        self._cr.execute(query, ('action', action_slot, model, res_id or None))

        # map values to their corresponding action record
        actions = []
        for id, name, value in self._cr.fetchall():
            if not value:
                continue                # skip if undefined
            action_model, action_id = value.split(',')
            if action_model not in self.env:
                continue                # unknown model? skip it!
            action = self.env[action_model].browse(int(action_id))
            actions.append((id, name, action))

        # process values and their action
        results = {}
        for id, name, action in actions:
            # FIXME: needs cleanup
            try:
                action_def = dict([(k, v.convert_to_read(action[k], action)) for k, v in action._fields.items()])
                if action._name in ('ir.actions.report', 'ir.actions.act_window'):
                    if action.groups_id and not action.groups_id & self.env.user.groups_id:
                        if name == 'Menuitem':
                            raise AccessError(_('You do not have the permission to perform this operation!!!'))
                        continue
                # keep only the last action registered for each action name
                results[name] = (id, name, action_def)
            except (AccessError, MissingError):
                continue
        return sorted(pycompat.values(results))
Esempio n. 36
0
    def get_worked_day_lines(self, contracts, date_from, date_to):
        """
        @param contract: Browse record of contracts
        @return: returns a list of dict containing the input that should be applied for the given contract between date_from and date_to
        """
        res = []
        # fill only if the contract as a working schedule linked
        for contract in contracts.filtered(
                lambda contract: contract.resource_calendar_id):
            day_from = datetime.combine(fields.Date.from_string(date_from),
                                        datetime_time.min)
            day_to = datetime.combine(fields.Date.from_string(date_to),
                                      datetime_time.max)

            # compute leave days
            leaves = {}
            day_leave_intervals = contract.employee_id.iter_leaves(
                day_from, day_to, calendar=contract.resource_calendar_id)
            for day_intervals in day_leave_intervals:
                for interval in day_intervals:
                    holiday = interval[2]['leaves'].holiday_id
                    current_leave_struct = leaves.setdefault(
                        holiday.holiday_status_id, {
                            'name': holiday.holiday_status_id.name,
                            'sequence': 5,
                            'code': holiday.holiday_status_id.name,
                            'number_of_days': 0.0,
                            'number_of_hours': 0.0,
                            'contract_id': contract.id,
                        })
                    leave_time = (interval[1] - interval[0]).seconds / 3600
                    current_leave_struct['number_of_hours'] += leave_time
                    work_hours = contract.employee_id.get_day_work_hours_count(
                        interval[0].date(),
                        calendar=contract.resource_calendar_id)
                    current_leave_struct[
                        'number_of_days'] += leave_time / work_hours

            # compute worked days
            work_data = contract.employee_id.get_work_days_data(
                day_from, day_to, calendar=contract.resource_calendar_id)
            attendances = {
                'name': _("Normal Working Days paid at 100%"),
                'sequence': 1,
                'code': 'WORK100',
                'number_of_days': work_data['days'],
                'number_of_hours': work_data['hours'],
                'contract_id': contract.id,
            }

            res.append(attendances)
            res.extend(pycompat.values(leaves))
        return res
Esempio n. 37
0
    def get_defaults(self, model, condition=False):
        """Returns any default values that are defined for the current model and user,
           (and match ``condition``, if specified), previously registered via
           :meth:`~.set_default`.

           Defaults are global to a model, not field-specific, but an optional
           ``condition`` can be provided to restrict matching default values
           to those that were defined for the same condition (usually based
           on another field's value).

           Default values also have priorities depending on whom they apply
           to: only the highest priority value will be returned for any
           field. See :meth:`~.set_default` for more details.

           :param string model: model name
           :param string condition: optional condition specification that can be used to
                                    restrict the applicability of the default values
                                    (e.g. based on another field's value). This is an
                                    opaque string as far as the API is concerned, but client
                                    stacks typically use single-field conditions in the
                                    form ``'key=stringified_value'``.
                                    (Currently, the condition is trimmed to 200 characters,
                                    so values that share the same first 200 characters always
                                    match)
           :return: list of default values tuples of the form ``(id, field_name, value)``
                    (``id`` is the ID of the default entry, usually irrelevant)
        """
        # use a direct SQL query for performance reasons,
        # this is called very often
        query = """ SELECT v.id, v.name, v.value FROM ir_values v
                    LEFT JOIN res_users u ON (v.user_id = u.id)
                    WHERE v.key = %%s AND v.model = %%s
                        AND (v.user_id = %%s OR v.user_id IS NULL)
                        AND (v.company_id IS NULL OR
                             v.company_id = (SELECT company_id FROM res_users WHERE id = %%s)
                            )
                    %s
                    ORDER BY v.user_id, v.company_id, v.id"""
        params = ('default', model, self._uid, self._uid)
        if condition:
            query = query % 'AND v.key2 = %s'
            params += (condition[:200],)
        else:
            query = query % 'AND v.key2 IS NULL'
        self._cr.execute(query, params)

        # keep only the highest priority default for each field
        defaults = {}
        for row in self._cr.dictfetchall():
            value = pickle.loads(row['value'].encode('utf-8'))
            defaults.setdefault(row['name'], (row['id'], row['name'], value))
        return list(pycompat.values(defaults))
Esempio n. 38
0
    def get_defaults(self, model, condition=False):
        """Returns any default values that are defined for the current model and user,
           (and match ``condition``, if specified), previously registered via
           :meth:`~.set_default`.

           Defaults are global to a model, not field-specific, but an optional
           ``condition`` can be provided to restrict matching default values
           to those that were defined for the same condition (usually based
           on another field's value).

           Default values also have priorities depending on whom they apply
           to: only the highest priority value will be returned for any
           field. See :meth:`~.set_default` for more details.

           :param string model: model name
           :param string condition: optional condition specification that can be used to
                                    restrict the applicability of the default values
                                    (e.g. based on another field's value). This is an
                                    opaque string as far as the API is concerned, but client
                                    stacks typically use single-field conditions in the
                                    form ``'key=stringified_value'``.
                                    (Currently, the condition is trimmed to 200 characters,
                                    so values that share the same first 200 characters always
                                    match)
           :return: list of default values tuples of the form ``(id, field_name, value)``
                    (``id`` is the ID of the default entry, usually irrelevant)
        """
        # use a direct SQL query for performance reasons,
        # this is called very often
        query = """ SELECT v.id, v.name, v.value FROM ir_values v
                    LEFT JOIN res_users u ON (v.user_id = u.id)
                    WHERE v.key = %%s AND v.model = %%s
                        AND (v.user_id = %%s OR v.user_id IS NULL)
                        AND (v.company_id IS NULL OR
                             v.company_id = (SELECT company_id FROM res_users WHERE id = %%s)
                            )
                    %s
                    ORDER BY v.user_id, v.company_id, v.id"""
        params = ('default', model, self._uid, self._uid)
        if condition:
            query = query % 'AND v.key2 = %s'
            params += (condition[:200], )
        else:
            query = query % 'AND v.key2 IS NULL'
        self._cr.execute(query, params)

        # keep only the highest priority default for each field
        defaults = {}
        for row in self._cr.dictfetchall():
            value = pickle.loads(row['value'].encode('utf-8'))
            defaults.setdefault(row['name'], (row['id'], row['name'], value))
        return list(pycompat.values(defaults))
Esempio n. 39
0
    def _search(self, args, offset=0, limit=None, order=None, count=False, access_rights_uid=None):
        # add res_field=False in domain if not present; the arg[0] trick below
        # works for domain items and '&'/'|'/'!' operators too
        if not any(arg[0] in ('id', 'res_field') for arg in args):
            args.insert(0, ('res_field', '=', False))

        ids = super(IrAttachment, self)._search(args, offset=offset, limit=limit, order=order,
                                                count=False, access_rights_uid=access_rights_uid)

        if self._uid == SUPERUSER_ID:
            # rules do not apply for the superuser
            return len(ids) if count else ids

        if not ids:
            return 0 if count else []

        # Work with a set, as list.remove() is prohibitive for large lists of documents
        # (takes 20+ seconds on a db with 100k docs during search_count()!)
        orig_ids = ids
        ids = set(ids)

        # For attachments, the permissions of the document they are attached to
        # apply, so we must remove attachments for which the user cannot access
        # the linked document.
        # Use pure SQL rather than read() as it is about 50% faster for large dbs (100k+ docs),
        # and the permissions are checked in super() and below anyway.
        model_attachments = defaultdict(lambda: defaultdict(set))   # {res_model: {res_id: set(ids)}}
        self._cr.execute("""SELECT id, res_model, res_id, public FROM ir_attachment WHERE id IN %s""", [tuple(ids)])
        for row in self._cr.dictfetchall():
            if not row['res_model'] or row['public']:
                continue
            # model_attachments = {res_model: {res_id: set(ids)}}
            model_attachments[row['res_model']][row['res_id']].add(row['id'])

        # To avoid multiple queries for each attachment found, checks are
        # performed in batch as much as possible.
        for res_model, targets in pycompat.items(model_attachments):
            if res_model not in self.env:
                continue
            if not self.env[res_model].check_access_rights('read', False):
                # remove all corresponding attachment ids
                ids.difference_update(itertools.chain(*pycompat.values(targets)))
                continue
            # filter ids according to what access rules permit
            target_ids = list(targets)
            allowed = self.env[res_model].with_context(active_test=False).search([('id', 'in', target_ids)])
            for res_id in set(target_ids).difference(allowed.ids):
                ids.difference_update(targets[res_id])

        # sort result according to the original sort ordering
        result = [id for id in orig_ids if id in ids]
        return len(result) if count else list(result)
Esempio n. 40
0
    def test_11_indirect_inherits_m2o_order(self):
        Cron = self.env['ir.cron']
        Users = self.env['res.users']

        user_ids = {}
        cron_ids = {}
        for u in 'BAC':
            user_ids[u] = Users.create({'name': u, 'login': u}).id
            cron_ids[u] = Cron.create({'name': u, 'model_id': self.env.ref('base.model_res_partner').id, 'user_id': user_ids[u]}).id

        ids = Cron.search([('id', 'in', list(pycompat.values(cron_ids)))], order='user_id').ids
        expected_ids = [cron_ids[l] for l in 'ABC']
        self.assertEqual(ids, expected_ids)
Esempio n. 41
0
 def _compute_report_balance(self, reports):
     '''returns a dictionary with key=the ID of a record and value=the credit, debit and balance amount
        computed for this record. If the record is of type :
            'accounts' : it's the sum of the linked accounts
            'account_type' : it's the sum of leaf accoutns with such an account_type
            'account_report' : it's the amount of the related report
            'sum' : it's the sum of the children of this record (aka a 'view' record)'''
     res = {}
     fields = ['credit', 'debit', 'balance']
     for report in reports:
         if report.id in res:
             continue
         res[report.id] = dict((fn, 0.0) for fn in fields)
         if report.type == 'accounts':
             # it's the sum of the linked accounts
             res[report.id]['account'] = self._compute_account_balance(report.account_ids)
             for value in pycompat.values(res[report.id]['account']):
                 for field in fields:
                     res[report.id][field] += value.get(field)
         elif report.type == 'account_type':
             # it's the sum the leaf accounts with such an account type
             accounts = self.env['account.account'].search([('user_type_id', 'in', report.account_type_ids.ids)])
             res[report.id]['account'] = self._compute_account_balance(accounts)
             for value in pycompat.values(res[report.id]['account']):
                 for field in fields:
                     res[report.id][field] += value.get(field)
         elif report.type == 'account_report' and report.account_report_id:
             # it's the amount of the linked report
             res2 = self._compute_report_balance(report.account_report_id)
             for key, value in pycompat.items(res2):
                 for field in fields:
                     res[report.id][field] += value[field]
         elif report.type == 'sum':
             # it's the sum of the children of this account.report
             res2 = self._compute_report_balance(report.children_ids)
             for key, value in pycompat.items(res2):
                 for field in fields:
                     res[report.id][field] += value[field]
     return res
Esempio n. 42
0
 def _compute_percentage_satisfaction(self):
     for record in self:
         dt = fields.Datetime.to_string(datetime.utcnow() -
                                        timedelta(days=7))
         repartition = record.channel_ids.rating_get_grades([('create_date',
                                                              '>=', dt)])
         total = sum(pycompat.values(repartition))
         if total > 0:
             happy = repartition['great']
             record.rating_percentage_satisfaction = (
                 (happy * 100) / total) if happy > 0 else 0
         else:
             record.rating_percentage_satisfaction = -1
Esempio n. 43
0
 def channel_fetch_preview(self):
     """ Return the last message of the given channels """
     self._cr.execute("""
         SELECT mail_channel_id AS id, MAX(mail_message_id) AS message_id
         FROM mail_message_mail_channel_rel
         WHERE mail_channel_id IN %s
         GROUP BY mail_channel_id
         """, (tuple(self.ids),))
     channels_preview = dict((r['message_id'], r) for r in self._cr.dictfetchall())
     last_messages = self.env['mail.message'].browse(channels_preview).message_format()
     for message in last_messages:
         channel = channels_preview[message['id']]
         del(channel['message_id'])
         channel['last_message'] = message
     return list(pycompat.values(channels_preview))
Esempio n. 44
0
 def taxes_to_csv(self):
     writer = csv.writer(open('account.tax.template-%s.csv' %
                              self.suffix, 'wb'))
     taxes_iterator = self.iter_taxes()
     keys = next(taxes_iterator)
     writer.writerow(keys[3:] + ['sequence'])
     seq = 100
     for row in sorted(taxes_iterator, key=lambda r: r['description']):
         if not _is_true(row['active']):
             continue
         seq += 1
         if row['parent_id:id']:
             cur_seq = seq + 1000
         else:
             cur_seq = seq
         writer.writerow(list(pycompat.imap(_e, list(pycompat.values(row))[3:])) + [cur_seq])
Esempio n. 45
0
 def validate_multiple_choice(self, post, answer_tag):
     self.ensure_one()
     errors = {}
     if self.constr_mandatory:
         answer_candidates = dict_keys_startswith(post, answer_tag)
         comment_flag = answer_candidates.pop(("%s_%s" % (answer_tag, -1)), None)
         if self.comments_allowed:
             comment_answer = answer_candidates.pop(("%s_%s" % (answer_tag, 'comment')), '').strip()
         # Preventing answers with blank value
         if all(not answer.strip() for answer in pycompat.values(answer_candidates)) and answer_candidates:
             errors.update({answer_tag: self.constr_error_msg})
         # There is no answer neither comments (if comments count as answer)
         if not answer_candidates and self.comment_count_as_answer and (not comment_flag or not comment_answer):
             errors.update({answer_tag: self.constr_error_msg})
         # There is no answer at all
         if not answer_candidates and not self.comment_count_as_answer:
             errors.update({answer_tag: self.constr_error_msg})
     return errors
Esempio n. 46
0
File: graph.py Progetto: RoganW/odoo
    def update_from_db(self, cr):
        if not len(self):
            return
        # update the graph with values from the database (if exist)
        ## First, we set the default values for each package in graph
        additional_data = {key: {'id': 0, 'state': 'uninstalled', 'dbdemo': False, 'installed_version': None} for key in pycompat.keys(self)}
        ## Then we get the values from the database
        cr.execute('SELECT name, id, state, demo AS dbdemo, latest_version AS installed_version'
                   '  FROM ir_module_module'
                   ' WHERE name IN %s',(tuple(additional_data),)
                   )

        ## and we update the default values with values from the database
        additional_data.update((x['name'], x) for x in cr.dictfetchall())

        for package in pycompat.values(self):
            for k, v in pycompat.items(additional_data[package.name]):
                setattr(package, k, v)
Esempio n. 47
0
File: main.py Progetto: RoganW/odoo
    def channel_rating(self, channel, **kw):
        # get the last 100 ratings and the repartition per grade
        ratings = request.env['rating.rating'].search([('res_model', '=', 'mail.channel'), ('res_id', 'in', channel.sudo().channel_ids.ids)], order='create_date desc', limit=100)
        repartition = channel.sudo().channel_ids.rating_get_grades()

        # compute percentage
        percentage = dict.fromkeys(['great', 'okay', 'bad'], 0)
        for grade in repartition:
            percentage[grade] = repartition[grade] * 100 / sum(pycompat.values(repartition)) if sum(pycompat.values(repartition)) else 0

        # the value dict to render the template
        values = {
            'channel': channel,
            'ratings': ratings,
            'team': channel.sudo().user_ids,
            'percentage': percentage
        }
        return request.render("website_livechat.channel_page", values)
Esempio n. 48
0
    def test_12_m2o_order_loop_self(self):
        Cats = self.env['ir.module.category']
        cat_ids = {}
        def create(name, **kw):
            cat_ids[name] = Cats.create(dict(kw, name=name)).id

        self.patch_order('ir.module.category', 'parent_id desc, name')

        create('A')
        create('B', parent_id=cat_ids['A'])
        create('C', parent_id=cat_ids['A'])
        create('D')
        create('E', parent_id=cat_ids['D'])
        create('F', parent_id=cat_ids['D'])

        expected_ids = [cat_ids[x] for x in 'ADEFBC']
        found_ids = Cats.search([('id', 'in', list(pycompat.values(cat_ids)))]).ids
        self.assertEqual(found_ids, expected_ids)