Exemplo n.º 1
0
    def descendants(self, model_names, *kinds):
        """ Return the models corresponding to ``model_names`` and all those
        that inherit/inherits from them.
        """
        assert all(kind in ('_inherit', '_inherits') for kind in kinds)
        funcs = [attrgetter(kind + '_children') for kind in kinds]

        models = OrderedSet()
        queue = deque(model_names)
        while queue:
            model = self[queue.popleft()]
            models.add(model._name)
            for func in funcs:
                queue.extend(func(model))
        return models
Exemplo n.º 2
0
 def __call__(self, path=None, path_args=None, **kw):
     path = path or self.path
     for key, value in self.args.items():
         kw.setdefault(key, value)
     path_args = OrderedSet(path_args or []) | self.path_args
     paths, fragments = {}, []
     for key, value in kw.items():
         if value and key in path_args:
             if isinstance(value, models.BaseModel):
                 paths[key] = slug(value)
             else:
                 paths[key] = u"%s" % value
         elif value:
             if isinstance(value, list) or isinstance(value, set):
                 fragments.append(werkzeug.url_encode([(key, item) for item in value]))
             else:
                 fragments.append(werkzeug.url_encode([(key, value)]))
     for key in path_args:
         value = paths.get(key)
         if value is not None:
             path += '/' + key + '/' + value
     if fragments:
         path += '?' + '&'.join(fragments)
     return path
Exemplo n.º 3
0
    def _free_reservation(self, product_id, location_id, quantity, lot_id=None, package_id=None, owner_id=None, ml_ids_to_ignore=None):
        """ When editing a done move line or validating one with some forced quantities, it is
        possible to impact quants that were not reserved. It is therefore necessary to edit or
        unlink the move lines that reserved a quantity now unavailable.

        :param ml_ids_to_ignore: OrderedSet of `stock.move.line` ids that should NOT be unreserved
        """
        self.ensure_one()

        if ml_ids_to_ignore is None:
            ml_ids_to_ignore = OrderedSet()
        ml_ids_to_ignore |= self.ids

        # Check the available quantity, with the `strict` kw set to `True`. If the available
        # quantity is greather than the quantity now unavailable, there is nothing to do.
        available_quantity = self.env['stock.quant']._get_available_quantity(
            product_id, location_id, lot_id=lot_id, package_id=package_id, owner_id=owner_id, strict=True
        )
        if quantity > available_quantity:
            # We now have to find the move lines that reserved our now unavailable quantity. We
            # take care to exclude ourselves and the move lines were work had already been done.
            outdated_move_lines_domain = [
                ('state', 'not in', ['done', 'cancel']),
                ('product_id', '=', product_id.id),
                ('lot_id', '=', lot_id.id if lot_id else False),
                ('location_id', '=', location_id.id),
                ('owner_id', '=', owner_id.id if owner_id else False),
                ('package_id', '=', package_id.id if package_id else False),
                ('product_qty', '>', 0.0),
                ('id', 'not in', tuple(ml_ids_to_ignore)),
            ]

            # We take the current picking first, then the pickings with the latest scheduled date
            current_picking_first = lambda cand: (
                cand.picking_id != self.move_id.picking_id,
                -(cand.picking_id.scheduled_date or cand.move_id.date).timestamp()
                if cand.picking_id or cand.move_id
                else -cand.id,
            )
            outdated_candidates = self.env['stock.move.line'].search(outdated_move_lines_domain).sorted(current_picking_first)

            # As the move's state is not computed over the move lines, we'll have to manually
            # recompute the moves which we adapted their lines.
            move_to_recompute_state = self.env['stock.move']

            rounding = self.product_uom_id.rounding
            for candidate in outdated_candidates:
                if float_compare(candidate.product_qty, quantity, precision_rounding=rounding) <= 0:
                    quantity -= candidate.product_qty
                    move_to_recompute_state |= candidate.move_id
                    if candidate.qty_done:
                        candidate.product_uom_qty = 0.0
                    else:
                        candidate.unlink()
                    if float_is_zero(quantity, precision_rounding=rounding):
                        break
                else:
                    # split this move line and assign the new part to our extra move
                    quantity_split = float_round(
                        candidate.product_qty - quantity,
                        precision_rounding=self.product_uom_id.rounding,
                        rounding_method='UP')
                    candidate.product_uom_qty = self.product_id.uom_id._compute_quantity(quantity_split, candidate.product_uom_id, rounding_method='HALF-UP')
                    move_to_recompute_state |= candidate.move_id
                    break
            move_to_recompute_state._recompute_state()
Exemplo n.º 4
0
    def _action_done(self):
        """ This method is called during a move's `action_done`. It'll actually move a quant from
        the source location to the destination location, and unreserve if needed in the source
        location.

        This method is intended to be called on all the move lines of a move. This method is not
        intended to be called when editing a `done` move (that's what the override of `write` here
        is done.
        """
        Quant = self.env['stock.quant']

        # First, we loop over all the move lines to do a preliminary check: `qty_done` should not
        # be negative and, according to the presence of a picking type or a linked inventory
        # adjustment, enforce some rules on the `lot_id` field. If `qty_done` is null, we unlink
        # the line. It is mandatory in order to free the reservation and correctly apply
        # `action_done` on the next move lines.
        ml_ids_tracked_without_lot = OrderedSet()
        ml_ids_to_delete = OrderedSet()
        ml_ids_to_create_lot = OrderedSet()
        for ml in self:
            # Check here if `ml.qty_done` respects the rounding of `ml.product_uom_id`.
            uom_qty = float_round(ml.qty_done, precision_rounding=ml.product_uom_id.rounding, rounding_method='HALF-UP')
            precision_digits = self.env['decimal.precision'].precision_get('Product Unit of Measure')
            qty_done = float_round(ml.qty_done, precision_digits=precision_digits, rounding_method='HALF-UP')
            if float_compare(uom_qty, qty_done, precision_digits=precision_digits) != 0:
                raise UserError(_('The quantity done for the product "%s" doesn\'t respect the rounding precision \
                                  defined on the unit of measure "%s". Please change the quantity done or the \
                                  rounding precision of your unit of measure.') % (ml.product_id.display_name, ml.product_uom_id.name))

            qty_done_float_compared = float_compare(ml.qty_done, 0, precision_rounding=ml.product_uom_id.rounding)
            if qty_done_float_compared > 0:
                if ml.product_id.tracking != 'none':
                    picking_type_id = ml.move_id.picking_type_id
                    if picking_type_id:
                        if picking_type_id.use_create_lots:
                            # If a picking type is linked, we may have to create a production lot on
                            # the fly before assigning it to the move line if the user checked both
                            # `use_create_lots` and `use_existing_lots`.
                            if ml.lot_name:
                                if ml.product_id.tracking == 'lot' and not ml.lot_id:
                                    lot = self.env['stock.production.lot'].search([
                                        ('company_id', '=', ml.company_id.id),
                                        ('product_id', '=', ml.product_id.id),
                                        ('name', '=', ml.lot_name),
                                    ], limit=1)
                                    if lot:
                                        ml.lot_id = lot.id
                                    else:
                                        ml_ids_to_create_lot.add(ml.id)
                                else:
                                    ml_ids_to_create_lot.add(ml.id)
                        elif not picking_type_id.use_create_lots and not picking_type_id.use_existing_lots:
                            # If the user disabled both `use_create_lots` and `use_existing_lots`
                            # checkboxes on the picking type, he's allowed to enter tracked
                            # products without a `lot_id`.
                            continue
                    elif ml.is_inventory:
                        # If an inventory adjustment is linked, the user is allowed to enter
                        # tracked products without a `lot_id`.
                        continue

                    if not ml.lot_id and ml.id not in ml_ids_to_create_lot:
                        ml_ids_tracked_without_lot.add(ml.id)
            elif qty_done_float_compared < 0:
                raise UserError(_('No negative quantities allowed'))
            elif not ml.is_inventory:
                ml_ids_to_delete.add(ml.id)

        if ml_ids_tracked_without_lot:
            mls_tracked_without_lot = self.env['stock.move.line'].browse(ml_ids_tracked_without_lot)
            raise UserError(_('You need to supply a Lot/Serial Number for product: \n - ') +
                              '\n - '.join(mls_tracked_without_lot.mapped('product_id.display_name')))
        ml_to_create_lot = self.env['stock.move.line'].browse(ml_ids_to_create_lot)
        ml_to_create_lot._create_and_assign_production_lot()

        mls_to_delete = self.env['stock.move.line'].browse(ml_ids_to_delete)
        mls_to_delete.unlink()

        mls_todo = (self - mls_to_delete)
        mls_todo._check_company()

        # Now, we can actually move the quant.
        ml_ids_to_ignore = OrderedSet()
        for ml in mls_todo:
            if ml.product_id.type == 'product':
                rounding = ml.product_uom_id.rounding

                # if this move line is force assigned, unreserve elsewhere if needed
                if not ml._should_bypass_reservation(ml.location_id) and float_compare(ml.qty_done, ml.product_uom_qty, precision_rounding=rounding) > 0:
                    qty_done_product_uom = ml.product_uom_id._compute_quantity(ml.qty_done, ml.product_id.uom_id, rounding_method='HALF-UP')
                    extra_qty = qty_done_product_uom - ml.product_qty
                    ml._free_reservation(ml.product_id, ml.location_id, extra_qty, lot_id=ml.lot_id, package_id=ml.package_id, owner_id=ml.owner_id, ml_ids_to_ignore=ml_ids_to_ignore)
                # unreserve what's been reserved
                if not ml._should_bypass_reservation(ml.location_id) and ml.product_id.type == 'product' and ml.product_qty:
                    try:
                        Quant._update_reserved_quantity(ml.product_id, ml.location_id, -ml.product_qty, lot_id=ml.lot_id, package_id=ml.package_id, owner_id=ml.owner_id, strict=True)
                    except UserError:
                        Quant._update_reserved_quantity(ml.product_id, ml.location_id, -ml.product_qty, lot_id=False, package_id=ml.package_id, owner_id=ml.owner_id, strict=True)

                # move what's been actually done
                quantity = ml.product_uom_id._compute_quantity(ml.qty_done, ml.move_id.product_id.uom_id, rounding_method='HALF-UP')
                available_qty, in_date = Quant._update_available_quantity(ml.product_id, ml.location_id, -quantity, lot_id=ml.lot_id, package_id=ml.package_id, owner_id=ml.owner_id)
                if available_qty < 0 and ml.lot_id:
                    # see if we can compensate the negative quants with some untracked quants
                    untracked_qty = Quant._get_available_quantity(ml.product_id, ml.location_id, lot_id=False, package_id=ml.package_id, owner_id=ml.owner_id, strict=True)
                    if untracked_qty:
                        taken_from_untracked_qty = min(untracked_qty, abs(quantity))
                        Quant._update_available_quantity(ml.product_id, ml.location_id, -taken_from_untracked_qty, lot_id=False, package_id=ml.package_id, owner_id=ml.owner_id)
                        Quant._update_available_quantity(ml.product_id, ml.location_id, taken_from_untracked_qty, lot_id=ml.lot_id, package_id=ml.package_id, owner_id=ml.owner_id)
                Quant._update_available_quantity(ml.product_id, ml.location_dest_id, quantity, lot_id=ml.lot_id, package_id=ml.result_package_id, owner_id=ml.owner_id, in_date=in_date)
            ml_ids_to_ignore.add(ml.id)
        # Reset the reserved quantity as we just moved it to the destination location.
        mls_todo.with_context(bypass_reservation_update=True).write({
            'product_uom_qty': 0.00,
            'date': fields.Datetime.now(),
        })
Exemplo n.º 5
0
    def _read_group_raw(self, domain, fields, groupby, offset=0, limit=None, orderby=False, lazy=True):
        self.check_access_rights('read')
        query = self._where_calc(domain)
        fields = fields or [f.name for f in self._fields.itervalues() if f.store]

        groupby = [groupby] if isinstance(groupby, basestring) else list(OrderedSet(groupby))
        groupby_list = groupby[:1] if lazy else groupby
        annotated_groupbys = [self._read_group_process_groupby(gb, query) for gb in groupby_list]
        groupby_fields = [g['field'] for g in annotated_groupbys]
        order = orderby or ','.join([g for g in groupby_list])
        groupby_dict = {gb['groupby']: gb for gb in annotated_groupbys}

        self._apply_ir_rules(query, 'read')
        for gb in groupby_fields:
            assert gb in fields, "Fields in 'groupby' must appear in the list of fields to read (perhaps it's missing in the list view?)"
            assert gb in self._fields, "Unknown field %r in 'groupby'" % gb
            gb_field = self._fields[gb].base_field
            assert gb_field.store and gb_field.column_type, "Fields in 'groupby' must be regular database-persisted fields (no function or related fields), or function fields with store=True"

        aggregated_fields = [
            f for f in fields
            if f != 'sequence'
            if f not in groupby_fields
            for field in [self._fields.get(f)]
            if field
            if field.group_operator
            if field.base_field.store and field.base_field.column_type
        ]

        char_fields = [
            f for f in fields
            if f != 'sequence'
            if f not in groupby_fields
            for field in [self._fields.get(f)]
            if field
            if field.type == 'char'
            if field.base_field.store and field.base_field.column_type
        ]

        bool_fields = [
            f for f in fields
            if f != 'sequence'
            if f not in groupby_fields
            for field in [self._fields.get(f)]
            if field
            if field.type == 'boolean'
            if field.base_field.store and field.base_field.column_type
        ]


        date_fields = [
            f for f in fields
            if f != 'sequence'
            if f not in groupby_fields
            for field in [self._fields.get(f)]
            if field
            if field.type == 'datetime' or field.type == 'date'
            if field.base_field.store and field.base_field.column_type
        ]

        many2one_fields = [
            f for f in fields
            if f != 'sequence'
            if f not in groupby_fields
            for field in [self._fields.get(f)]
            if field
            if field.type == 'many2one'
            if field.base_field.store and field.base_field.column_type
        ]

        selection_fields = [
            f for f in fields
            if f != 'sequence'
            if f not in groupby_fields
            for field in [self._fields.get(f)]
            if field
            if field.type == 'selection'
            if field.base_field.store and field.base_field.column_type
        ]

        field_formatter = lambda f: (
            self._fields[f].group_operator,
            self._inherits_join_calc(self._table, f, query),
            f,
        )
        select_terms = ['%s(%s) AS "%s" ' % field_formatter(f) for f in aggregated_fields]

        for f in char_fields:
            select_terms.append("ARRAY_AGG(DISTINCT (%s)) AS %s " %(self._inherits_join_calc(self._table, f, query),f))

        for f in bool_fields:
            select_terms.append("COUNT(CASE WHEN %s THEN 1 END) AS %s " %(self._inherits_join_calc(self._table, f, query),f))

        for f in date_fields:
            select_terms.append("ARRAY_AGG(DISTINCT (COALESCE(to_char(%s,'DD-MM-YYYY'),'')) ) AS %s " %(self._inherits_join_calc(self._table, f, query),f))

        for f in selection_fields:
            if f == 'gender':
                select_terms.append(u"ARRAY_AGG(DISTINCT (CASE %s WHEN 'nam' THEN 'Nam' WHEN 'nu' THEN 'Nữ' END)) AS %s " %(self._inherits_join_calc(self._table, f, query),f))
            elif f == 'enter_source':
                select_terms.append(
                    u"ARRAY_AGG(DISTINCT (CASE %s WHEN '1' THEN 'Ngắn hạn' WHEN '2' THEN 'Dài hạn' WHEN '3' THEN 'Ban chỉ đạo' ELSE '' END)) AS %s " % (
                    self._inherits_join_calc(self._table, f, query), f))
        #
        # for f in many2one_fields:
        #     _logger.info("AAA %s"%self._fields[f].comodel_name)

        for f in many2one_fields:
            if f == 'dispatchcom':
                select_terms.append(u"(SELECT name_short from %s WHERE %s.id = min(%s)) AS %s" %(self._fields[f].comodel_name.replace(".", "_"),self._fields[f].comodel_name.replace(".", "_"),self._inherits_join_calc(self._table, f, query),f))
            else:
                select_terms.append(u"(SELECT name from %s WHERE %s.id = min(%s)) AS %s" % (
                self._fields[f].comodel_name.replace(".", "_"), self._fields[f].comodel_name.replace(".", "_"),
                self._inherits_join_calc(self._table, f, query), f))

        for gb in annotated_groupbys:
            select_terms.append('%s as "%s" ' % (gb['qualified_field'], gb['groupby']))

        groupby_terms, orderby_terms = self._read_group_prepare(order, aggregated_fields, annotated_groupbys, query)
        from_clause, where_clause, where_clause_params = query.get_sql()
        if lazy and (len(groupby_fields) >= 2 or not self._context.get('group_by_no_leaf')):
            count_field = groupby_fields[0] if len(groupby_fields) >= 1 else '_'
        else:
            count_field = '_'
        count_field += '_count'

        prefix_terms = lambda prefix, terms: (prefix + " " + ",".join(terms)) if terms else ''
        prefix_term = lambda prefix, term: ('%s %s' % (prefix, term)) if term else ''

        query = """
                SELECT min("%(table)s".id) AS id, count("%(table)s".id) AS "%(count_field)s" %(extra_fields)s
                FROM %(from)s
                %(where)s
                %(groupby)s
                %(orderby)s
                %(limit)s
                %(offset)s
            """ % {
            'table': self._table,
            'count_field': count_field,
            'extra_fields': prefix_terms(',', select_terms),
            'from': from_clause,
            'where': prefix_term('WHERE', where_clause),
            'groupby': prefix_terms('GROUP BY', groupby_terms),
            'orderby': prefix_terms('ORDER BY', orderby_terms),
            'limit': prefix_term('LIMIT', int(limit) if limit else None),
            'offset': prefix_term('OFFSET', int(offset) if limit else None),
        }
        self._cr.execute(query, where_clause_params)
        fetched_data = self._cr.dictfetchall()

        if not groupby_fields:
            return fetched_data

        many2onefields = [gb['field'] for gb in annotated_groupbys if gb['type'] == 'many2one']
        if many2onefields:
            data_ids = [r['id'] for r in fetched_data]
            many2onefields = list(set(many2onefields))
            data_dict = {d['id']: d for d in self.browse(data_ids).read(many2onefields)}
            for d in fetched_data:
                d.update(data_dict[d['id']])

        data = map(lambda r: {k: self._read_group_prepare_data(k, v, groupby_dict) for k, v in r.iteritems()},
                   fetched_data)
        result = [self._read_group_format_result(d, annotated_groupbys, groupby, domain) for d in data]
        if lazy:
            # Right now, read_group only fill results in lazy mode (by default).
            # If you need to have the empty groups in 'eager' mode, then the
            # method _read_group_fill_results need to be completely reimplemented
            # in a sane way
            result = self._read_group_fill_results(
                domain, groupby_fields[0], groupby[len(annotated_groupbys):],
                aggregated_fields, count_field, result, read_group_order=order,
            )
        return result
Exemplo n.º 6
0
    def read_group_qualified(self, domain, fields, groupby, offset=0, limit=None, orderby=False, lazy=True):
        self.check_access_rights('read')
        # gun_filter = []
        w_clause_centron_part2 = ""
        w_clause_centron_part1 = ""
        d = list()
        for r in domain:
            if r[0] == 'measure_result':
                continue
            if r[0] == 'gun_id':
                # gun_filter = [r[2]] if isinstance(r[2], int) else r[2]
                if len(w_clause_centron_part1):
                    w_clause_centron_part1 += 'and gun_id = {0}'.format(r[2])
                else:
                    w_clause_centron_part1 += 'where gun_id = {0}'.format(r[2])
                w_clause_centron_part2 += 'and r1.gun_id = {0}'.format(r[2])
                continue
            if r[0] == 'vin':
                # 过滤vin
                if len(w_clause_centron_part1):
                    w_clause_centron_part1 += "and vin ilike \'%{0}%\'".format(r[2])
                else:
                    w_clause_centron_part1 += "where vin ilike \'%{0}%\'".format(r[2])
                w_clause_centron_part2 += "and r1.vin ilike \'%{0}%\'".format(r[2])
                continue
            d.append(r)
        domain = d
        query = self._where_calc(domain)
        fields = fields or [f.name for f in self._fields.itervalues() if f.store]

        groupby = [groupby] if isinstance(groupby, basestring) else list(OrderedSet(groupby))
        groupby_list = groupby[:1] if lazy else groupby
        annotated_groupbys1 = [self._read_group_process_groupby(gb, query) for gb in groupby_list]
        one_time_pass_state = False
        for gb in annotated_groupbys1:
            if gb['field'] == 'final_pass':
                gb['qualified_field'] = "\'final\'"
            if gb['field'] == 'one_time_pass':
                one_time_pass_state = True
                gb['qualified_field'] = "\'one time\'"
            if gb['field'] == 'control_date':
                gb['qualified_field'] = "d1.control_date"
                gb['tz_convert'] = False

        if one_time_pass_state:
            annotated_groupbys = [gb for gb in annotated_groupbys1 if gb['field'] != 'final_pass']
        else:
            annotated_groupbys = [gb for gb in annotated_groupbys1 if gb['field'] != 'one_time_pass']

        groupby_fields = [g['field'] for g in annotated_groupbys]
        order = orderby or ','.join([g for g in groupby_list])
        groupby_dict = {gb['groupby']: gb for gb in annotated_groupbys}

        self._apply_ir_rules(query, 'read')
        for gb in groupby_fields:
            assert gb in fields, "Fields in 'groupby' must appear in the list of fields to read (perhaps it's missing in the list view?)"
            assert gb in self._fields, "Unknown field %r in 'groupby'" % gb
            gb_field = self._fields[gb].base_field
            assert gb_field.store and gb_field.column_type, "Fields in 'groupby' must be regular database-persisted fields (no function or related fields), or function fields with store=True"

        aggregated_fields = [
            f for f in fields
            if f != 'sequence'
            if f not in groupby_fields
            for field in [self._fields.get(f)]
            if field
            if field.group_operator
            if field.base_field.store and field.base_field.column_type
        ]

        field_formatter = lambda f: (
            self._fields[f].group_operator,
            self._inherits_join_calc(self._table, f, query),
            f,
        )
        select_terms = ['%s(%s) AS "%s" ' % field_formatter(f) for f in aggregated_fields]

        for gb in annotated_groupbys:
            select_terms.append('%s as "%s" ' % (gb['qualified_field'], gb['groupby']))

        groupby_terms, orderby_terms = self._read_group_prepare(order, aggregated_fields, annotated_groupbys, query)
        from_clause, where_clause, where_clause_params = query.get_sql()
        if lazy and (len(groupby_fields) >= 2 or not self._context.get('group_by_no_leaf')):
            count_field = groupby_fields[0] if len(groupby_fields) >= 1 else '_'
        else:
            count_field = '_'
        count_field += '_count'

        prefix_terms = lambda prefix, terms: (prefix + " " + ",".join(terms)) if terms else ''
        prefix_term = lambda prefix, term: ('%s %s' % (prefix, term)) if term else ''

        from_clause = """
                                        (select sum(b.sequence) as sequence, date_trunc('%(interval)s', timezone('Asia/Chongqing', timezone('UTC', mw.date_planned_start))) as control_date from
                                        (select  vin,product_id,min(control_date) as date_planned_start  from operation_result
                                        %(where_clause_part1)s
                                        group by vin,product_id) mw
                                        left join
                                        (select  a.product_id,count(*) as sequence from mrp_bom a
                                        left join mrp_bom_line b on a.id=b.bom_id
                                        group by a.product_id) b on mw.product_id=b.product_id
                                        group by control_date
                                        ) d1
                                       ,
                                        (SELECT  count (mw.batch) as sequence,  date_trunc('%(interval)s', timezone('Asia/Chongqing', timezone('UTC', mw.date_planned_start))) as control_date FROM
                                        (select DISTINCT mw.date_planned_start,r1.VIN,r1.batch from operation_result r1
                                        LEFT JOIN (select  vin,product_id,min(control_date) as date_planned_start  from operation_result
                                        group by vin,product_id) mw ON R1.vin=MW.vin
                                        where   %(interval2)s and  r1.measure_result in ('ok','nok') %(where_clause_part2)s) mw
                                        group by control_date
                                        ) d2 
                                    """ % {
            'interval': annotated_groupbys[0]['groupby'].split(':')[-1] if annotated_groupbys[0][
                                                                               'field'] == 'control_date' else
            annotated_groupbys[1]['groupby'].split(':')[-1],
            'interval2': ''' r1.one_time_pass='******' ''' if one_time_pass_state else ''' r1.final_pass='******' ''',
            'where_clause_part1': w_clause_centron_part1,
            'where_clause_part2': w_clause_centron_part2
        }
        if where_clause == '':
            where_clause = 'd1.control_date = d2.control_date'
        else:
            if where_clause.find('''"operation_result"."control_date"''') > 0:
                where_clause = where_clause.replace('''"operation_result"."control_date"''', '''"d1"."control_date"''')
            where_clause += 'AND d1.control_date = d2.control_date'

        query = """
                        SELECT round(round(d2.sequence,2)/ NULLIF(d1.sequence, 0) * 100.0, 4) AS "%(count_field)s" %(extra_fields)s
                        FROM %(from)s
                        %(where)s
                        %(orderby)s
                        %(limit)s
                        %(offset)s
                    """ % {
            'table': self._table,
            'count_field': count_field,
            'extra_fields': prefix_terms(',', select_terms),
            'from': from_clause,
            'where': prefix_term('WHERE', where_clause),
            'orderby': 'ORDER BY ' + count_field,
            'limit': prefix_term('LIMIT', int(limit) if limit else None),
            'offset': prefix_term('OFFSET', int(offset) if limit else None),
        }

        self._cr.execute(query, where_clause_params)
        fetched_data = self._cr.dictfetchall()

        if not groupby_fields:
            return fetched_data

        many2onefields = [gb['field'] for gb in annotated_groupbys if gb['type'] == 'many2one']
        if many2onefields:
            data_ids = [r['id'] for r in fetched_data]
            many2onefields = list(set(many2onefields))
            data_dict = {d['id']: d for d in self.browse(data_ids).read(many2onefields)}
            for d in fetched_data:
                d.update(data_dict[d['id']])

        data = map(lambda r: {k: self._read_group_prepare_data(k, v, groupby_dict) for k, v in r.iteritems()},
                   fetched_data)
        result = [self._read_group_format_result_centron(d, annotated_groupbys, groupby, domain) for d in data]

        return result
Exemplo n.º 7
0
    def read_group_lacking_by_gun(self,
                                  domain,
                                  fields,
                                  groupby,
                                  offset=0,
                                  limit=None,
                                  orderby=False,
                                  lazy=True):
        self.check_access_rights('read')
        query = self._where_calc(domain)
        fields = fields or [
            f.name for f in self._fields.itervalues() if f.store
        ]

        groupby = [groupby] if isinstance(groupby, basestring) else list(
            OrderedSet(groupby))
        groupby_list = groupby[:1] if lazy else groupby
        annotated_groupbys = [
            self._read_group_process_groupby(gb, query) for gb in groupby_list
        ]
        for gb in annotated_groupbys:
            if gb['field'] == 'lacking':
                gb['qualified_field'] = "\'lack\'"
            if gb['field'] == 'gun_id':
                gb['qualified_field'] = "a1.equip_id"
        groupby_fields = [g['field'] for g in annotated_groupbys]
        order = orderby or ','.join([g for g in groupby_list])
        groupby_dict = {gb['groupby']: gb for gb in annotated_groupbys}

        self._apply_ir_rules(query, 'read')
        for gb in groupby_fields:
            assert gb in fields, "Fields in 'groupby' must appear in the list of fields to read (perhaps it's missing in the list view?)"
            assert gb in self._fields, "Unknown field %r in 'groupby'" % gb
            gb_field = self._fields[gb].base_field
            assert gb_field.store and gb_field.column_type, "Fields in 'groupby' must be regular database-persisted fields (no function or related fields), or function fields with store=True"

        aggregated_fields = [
            f for f in fields if f != 'sequence' if f not in groupby_fields
            for field in [self._fields.get(f)] if field if field.group_operator
            if field.base_field.store and field.base_field.column_type
        ]

        field_formatter = lambda f: (
            self._fields[f].group_operator,
            self._inherits_join_calc(self._table, f, query),
            f,
        )
        select_terms = [
            '%s(%s) AS "%s" ' % field_formatter(f) for f in aggregated_fields
        ]

        for gb in annotated_groupbys:
            select_terms.append('%s as "%s" ' %
                                (gb['qualified_field'], gb['groupby']))

        groupby_terms, orderby_terms = self._read_group_prepare(
            order, aggregated_fields, annotated_groupbys, query)
        from_clause, where_clause, where_clause_params = query.get_sql()
        if lazy and (len(groupby_fields) >= 2
                     or not self._context.get('group_by_no_leaf')):
            count_field = groupby_fields[0] if len(
                groupby_fields) >= 1 else '_'
        else:
            count_field = '_'
        count_field += '_count'

        prefix_terms = lambda prefix, terms: (prefix + " " + ",".join(terms)
                                              ) if terms else ''
        prefix_term = lambda prefix, term: ('%s %s' %
                                            (prefix, term)) if term else ''

        where_clause2 = '''r1.measure_result in ('ok', 'nok')'''  # 初始化为空

        if where_clause == '':
            where_clause2 = '''r1.measure_result in ('ok', 'nok')'''
        else:
            if where_clause.find('''"operation_result"."control_date"''') > 0:
                where_clause = where_clause.replace(
                    '''"operation_result"."control_date"''',
                    '''"mw"."date_planned_start"''')

                where_clause_params.extend(where_clause_params[:])
            where_clause2 = where_clause + '''AND r1.measure_result in ('ok', 'nok')'''
        from_clause = '''
                            (select id as equip_id,serial_no as equip_sn, name as equip_name
                              from maintenance_equipment, d1
                              where category_id = d1.gc_id
                             ) a1
                        left join (select a.gun_id,count(a.sequence)  as sequence from mrp_wo_consu a
                                          left join mrp_workorder mw on a.workorder_id = mw.id
                                          %(where)s
                                          group by gun_id) a on a1.equip_id = a.gun_id
                        left join (select gun_id,count(batch) as sequence from
                                          (select distinct r1.workorder_id,r1.gun_id,r1.batch from operation_result r1
                                                left join mrp_workorder mw on r1.workorder_id = mw.id
                                                %(where2)s
                                          ) a group by gun_id) b   on a.gun_id = b.gun_id
                ''' % {
            'where': prefix_term('WHERE', where_clause),
            'where2': prefix_term('WHERE', where_clause2),
        }

        query = """
                    with d1 as ( select id as gc_id from maintenance_equipment_category where name = 'Gun')
                    SELECT  round(round(COALESCE(a.sequence, 0) - COALESCE(b.sequence, 0), 2) / COALESCE(a.sequence, 1) * 100.0, 2) AS "%(count_field)s" %(extra_fields)s
                            FROM %(from)s
                            %(orderby)s
                            %(limit)s
                            %(offset)s
                        """ % {
            'table': self._table,
            'count_field': count_field,
            'extra_fields': prefix_terms(',', select_terms),
            'from': from_clause,
            # 'where': prefix_term('WHERE', where_clause),
            'orderby': 'ORDER BY ' + count_field,
            'limit': prefix_term('LIMIT',
                                 int(limit) if limit else None),
            'offset': prefix_term('OFFSET',
                                  int(offset) if limit else None),
        }
        self._cr.execute(query, where_clause_params)
        fetched_data = self._cr.dictfetchall()

        if not groupby_fields:
            return fetched_data

        for d in fetched_data:
            n = {
                'gun_id':
                self.env['maintenance.equipment'].browse(
                    d['gun_id']).name_get()[0]
            }
            d.update(n)

        data = map(
            lambda r: {
                k: self._read_group_prepare_data(k, v, groupby_dict)
                for k, v in r.iteritems()
            }, fetched_data)
        result = [
            self._read_group_format_result_centron(d, annotated_groupbys,
                                                   groupby, domain)
            for d in data
        ]

        return result
Exemplo n.º 8
0
    def _build_component(cls, registry):
        """ Instantiate a given Component in the components registry.

        This method is called at the end of the Odoo's registry build.  The
        caller is :meth:`component.builder.ComponentBuilder.load_components`.

        It generates new classes, which will be the Component classes we will
        be using.  The new classes are generated following the inheritance
        of ``_inherit``. It ensures that the ``__bases__`` of the generated
        Component classes follow the ``_inherit`` chain.

        Once a Component class is created, it adds it in the Component Registry
        (:class:`ComponentRegistry`), so it will be available for
        lookups.

        At the end of new class creation, a hook method
        :meth:`_complete_component_build` is called, so you can customize
        further the created components. An example can be found in
        :meth:`odoo.addons.connector.components.mapper.Mapper._complete_component_build`

        The following code is roughly the same than the Odoo's one for
        building Models.

        """

        # In the simplest case, the component's registry class inherits from
        # cls and the other classes that define the component in a flat
        # hierarchy.  The registry contains the instance ``component`` (on the
        # left). Its class, ``ComponentClass``, carries inferred metadata that
        # is shared between all the component's instances for this registry
        # only.
        #
        #   class A1(Component):                    Component
        #       _name = 'a'                           / | \
        #                                            A3 A2 A1
        #   class A2(Component):                      \ | /
        #       _inherit = 'a'                    ComponentClass
        #
        #   class A3(Component):
        #       _inherit = 'a'
        #
        # When a component is extended by '_inherit', its base classes are
        # modified to include the current class and the other inherited
        # component classes.
        # Note that we actually inherit from other ``ComponentClass``, so that
        # extensions to an inherited component are immediately visible in the
        # current component class, like in the following example:
        #
        #   class A1(Component):
        #       _name = 'a'                          Component
        #                                            /  / \  \
        #   class B1(Component):                    /  A2 A1  \
        #       _name = 'b'                        /   \  /    \
        #                                         B2 ComponentA B1
        #   class B2(Component):                   \     |     /
        #       _name = 'b'                         \    |    /
        #       _inherit = ['b', 'a']                \   |   /
        #                                            ComponentB
        #   class A2(Component):
        #       _inherit = 'a'

        # determine inherited components
        parents = cls._inherit
        if isinstance(parents, str):
            parents = [parents]
        elif parents is None:
            parents = []

        if cls._name in registry and not parents:
            raise TypeError("Component %r (in class %r) already exists. "
                            "Consider using _inherit instead of _name "
                            "or using a different _name." % (cls._name, cls))

        # determine the component's name
        name = cls._name or (len(parents) == 1 and parents[0])

        if not name:
            raise TypeError("Component %r must have a _name" % cls)

        # all components except 'base' implicitly inherit from 'base'
        if name != "base":
            parents = list(parents) + ["base"]

        # create or retrieve the component's class
        if name in parents:
            if name not in registry:
                raise TypeError("Component %r does not exist in registry." %
                                name)
            ComponentClass = registry[name]
            ComponentClass._build_component_check_base(cls)
            check_parent = ComponentClass._build_component_check_parent
        else:
            ComponentClass = type(
                name,
                (AbstractComponent, ),
                {
                    "_name": name,
                    "_register": False,
                    # names of children component
                    "_inherit_children": OrderedSet(),
                },
            )
            check_parent = cls._build_component_check_parent

        # determine all the classes the component should inherit from
        bases = LastOrderedSet([cls])
        for parent in parents:
            if parent not in registry:
                raise TypeError(
                    "Component %r inherits from non-existing component %r." %
                    (name, parent))
            parent_class = registry[parent]
            if parent == name:
                for base in parent_class.__bases__:
                    bases.add(base)
            else:
                check_parent(cls, parent_class)
                bases.add(parent_class)
                parent_class._inherit_children.add(name)
        ComponentClass.__bases__ = tuple(bases)

        ComponentClass._complete_component_build()

        registry[name] = ComponentClass

        return ComponentClass
Exemplo n.º 9
0
    def _read_group_raw(self,
                        domain,
                        fields,
                        groupby,
                        offset=0,
                        limit=None,
                        orderby=False,
                        lazy=True):
        self.check_access_rights("read")
        query = self._where_calc(domain)
        fields = fields or [f.name for f in self._fields.values() if f.store]

        groupby = [groupby] if isinstance(groupby, str) else list(
            OrderedSet(groupby))
        groupby_list = groupby[:1] if lazy else groupby
        annotated_groupbys = [
            self._read_group_process_groupby(gb, query) for gb in groupby_list
        ]
        groupby_fields = [g["field"] for g in annotated_groupbys]
        order = orderby or ",".join([g for g in groupby_list])
        groupby_dict = {gb["groupby"]: gb for gb in annotated_groupbys}

        self._apply_ir_rules(query, "read")
        for gb in groupby_fields:
            if gb not in self._fields:
                raise UserError(_("Unknown field %r in 'groupby'") % gb)
            gb_field = self._fields[gb].base_field
            # TODO: call super instead of overwriting it completely!
            if not ((gb_field.store and gb_field.column_type) or
                    (gb_field.type == "many2many")):
                raise UserError(
                    _("""Fields in 'groupby' must be
                        database-persisted fields (no computed fields)"""))

        aggregated_fields = []
        select_terms = []
        fnames = []  # list of fields to flush

        for fspec in fields:
            if fspec == "sequence":
                continue
            if fspec == "__count":
                # the web client sometimes adds this pseudo-field in the list
                continue

            match = regex_field_agg.match(fspec)
            if not match:
                raise UserError(_("Invalid field specification %r.", fspec))

            name, func, fname = match.groups()
            if func:
                # we have either 'name:func' or 'name:func(fname)'
                fname = fname or name
                field = self._fields.get(fname)
                if not field:
                    raise ValueError("Invalid field {!r} on model {!r}".format(
                        fname, self._name))
                if not (field.base_field.store
                        and field.base_field.column_type):
                    raise UserError(_("Cannot aggregate field %r.", fname))
                if func not in VALID_AGGREGATE_FUNCTIONS:
                    raise UserError(_("Invalid aggregation function %r.",
                                      func))
            else:
                # we have 'name', retrieve the aggregator on the field
                field = self._fields.get(name)
                if not field:
                    raise ValueError("Invalid field {!r} on model {!r}".format(
                        name, self._name))
                if not (field.base_field.store and field.base_field.column_type
                        and field.group_operator):
                    continue
                func, fname = field.group_operator, name

            fnames.append(fname)

            if fname in groupby_fields:
                continue
            if name in aggregated_fields:
                raise UserError(_("Output name %r is used twice.", name))
            aggregated_fields.append(name)

            expr = self._inherits_join_calc(self._table, fname, query)
            if func.lower() == "count_distinct":
                term = 'COUNT(DISTINCT {}) AS "{}"'.format(expr, name)
            else:
                term = '{}({}) AS "{}"'.format(func, expr, name)
            select_terms.append(term)
        for gb in annotated_groupbys:
            select_terms.append('{} as "{}" '.format(gb["qualified_field"],
                                                     gb["groupby"]))

        self._flush_search(domain, fields=fnames + groupby_fields)

        groupby_terms, orderby_terms = self._read_group_prepare(
            order, aggregated_fields, annotated_groupbys, query)
        from_clause, where_clause, where_clause_params = query.get_sql()
        if lazy and (len(groupby_fields) >= 2
                     or not self._context.get("group_by_no_leaf")):
            count_field = groupby_fields[0] if len(
                groupby_fields) >= 1 else "_"
        else:
            count_field = "_"
        count_field += "_count"

        prefix_terms = (lambda prefix, terms: (prefix + " " + ",".join(terms))
                        if terms else "")
        prefix_term = (lambda prefix, term: ("{} {}".format(prefix, term))
                       if term else "")

        query = """
            SELECT min("%(table)s".id) AS id, count("%(table)s".id) AS
            "%(count_field)s" %(extra_fields)s
            FROM %(from)s
            %(where)s
            %(groupby)s
            %(orderby)s
            %(limit)s
            %(offset)s
        """ % {
            "table": self._table,
            "count_field": count_field,
            "extra_fields": prefix_terms(",", select_terms),
            "from": from_clause,
            "where": prefix_term("WHERE", where_clause),
            "groupby": prefix_terms("GROUP BY", groupby_terms),
            "orderby": prefix_terms("ORDER BY", orderby_terms),
            "limit": prefix_term("LIMIT",
                                 int(limit) if limit else None),
            "offset": prefix_term("OFFSET",
                                  int(offset) if limit else None),
        }
        self._cr.execute(query, where_clause_params)
        fetched_data = self._cr.dictfetchall()

        if not groupby_fields:
            return fetched_data

        self._read_group_resolve_many2one_fields(fetched_data,
                                                 annotated_groupbys)

        data = [{
            k: self._read_group_prepare_data(k, v, groupby_dict)
            for k, v in r.items()
        } for r in fetched_data]

        fill_temporal = self.env.context.get("fill_temporal")
        if (data and fill_temporal) or isinstance(fill_temporal, dict):
            # fill_temporal = {} is equivalent to fill_temporal = True
            # if fill_temporal is a dictionary and there is no data, there is
            # a chance that we
            # want to display empty columns anyway, so we should apply the
            # fill_temporal logic
            if not isinstance(fill_temporal, dict):
                fill_temporal = {}
            data = self._read_group_fill_temporal(data, groupby,
                                                  aggregated_fields,
                                                  annotated_groupbys,
                                                  **fill_temporal)
        result = [
            self._read_group_format_result(d, annotated_groupbys, groupby,
                                           domain) for d in data
        ]
        if lazy:
            # Right now, read_group only fill results in lazy mode (by default).
            # If you need to have the empty groups in 'eager' mode, then the
            # method _read_group_fill_results need to be completely reimplemented

            # in a sane way
            result = self._read_group_fill_results(
                domain,
                groupby_fields[0],
                groupby[len(annotated_groupbys):],
                aggregated_fields,
                count_field,
                result,
                read_group_order=order,
            )
        return result
Exemplo n.º 10
0
    def onchange(self, values, field_name, field_onchange):
        env = self.env
        if isinstance(field_name, list):
            names = field_name
        elif field_name:
            names = [field_name]
        else:
            names = []

        if not all(name in self._fields for name in names):
            return {}

        def PrefixTree(model, dotnames):
            """ Return a prefix tree for sequences of field names. """
            if not dotnames:
                return {}
            # group dotnames by prefix
            suffixes = defaultdict(list)
            for dotname in dotnames:
                # name, *names = dotname.split('.', 1)
                names = dotname.split('.', 1)
                name = names.pop(0)
                suffixes[name].extend(names)
            # fill in prefix tree in fields order
            tree = OrderedDict()
            for name, field in model._fields.items():
                if name in suffixes:
                    tree[name] = subtree = PrefixTree(model[name],
                                                      suffixes[name])
                    if subtree and field.type == 'one2many':
                        subtree.pop(field.inverse_name, None)
            return tree

        class Snapshot(dict):
            """ A dict with the values of a record, following a prefix tree. """
            __slots__ = ()

            def __init__(self, record, tree):
                # put record in dict to include it when comparing snapshots
                super(Snapshot, self).__init__({
                    '<record>': record,
                    '<tree>': tree
                })
                for name in tree:
                    self.fetch(name)

            def fetch(self, name):
                """ Set the value of field ``name`` from the record's value. """
                record = self['<record>']
                tree = self['<tree>']
                if record._fields[name].type in ('one2many', 'many2many'):
                    # x2many fields are serialized as a list of line snapshots
                    self[name] = [
                        Snapshot(line, tree[name]) for line in record[name]
                    ]
                else:
                    self[name] = record[name]

            def has_changed(self, name):
                """ Return whether a field on record has changed. """
                record = self['<record>']
                subnames = self['<tree>'][name]
                if record._fields[name].type not in ('one2many', 'many2many'):
                    return self[name] != record[name]
                return (len(self[name]) != len(record[name]) or (set(
                    line_snapshot["<record>"].id
                    for line_snapshot in self[name]) != set(record[name]._ids))
                        or any(
                            line_snapshot.has_changed(subname)
                            for line_snapshot in self[name]
                            for subname in subnames))

            def diff(self, other):
                """ Return the values in ``self`` that differ from ``other``.
                    Requires record cache invalidation for correct output!
                """
                record = self['<record>']
                result = {}
                for name, subnames in self['<tree>'].items():
                    if (name == 'id') or (other.get(name) == self[name]):
                        continue
                    field = record._fields[name]
                    if field.type not in ('one2many', 'many2many'):
                        result[name] = field.convert_to_onchange(
                            self[name], record, {})
                    else:
                        # x2many fields: serialize value as commands
                        result[name] = commands = [(5, )]
                        for line_snapshot in self[name]:
                            line = line_snapshot['<record>']
                            # line = line._origin or line
                            if not line.id:
                                # new line: send diff from scratch
                                line_diff = line_snapshot.diff({})
                                commands.append((0, line.id.ref
                                                 or 0, line_diff))
                            else:
                                # existing line: check diff from database
                                # (requires a clean record cache!)
                                line_diff = line_snapshot.diff(
                                    Snapshot(line, subnames))
                                if line_diff:
                                    # send all fields because the web client
                                    # might need them to evaluate modifiers
                                    line_diff = line_snapshot.diff({})
                                    commands.append((1, line.id, line_diff))
                                else:
                                    commands.append((4, line.id))
                return result

        nametree = PrefixTree(self.browse(), field_onchange)

        # prefetch x2many lines without data (for the initial snapshot)
        for name, subnames in nametree.items():
            if subnames and values.get(name):
                # retrieve all ids in commands, and read the expected fields
                line_ids = []
                for cmd in values[name]:
                    if cmd[0] in (1, 4):
                        line_ids.append(cmd[1])
                    elif cmd[0] == 6:
                        line_ids.extend(cmd[2])
                lines = self.browse()[name].browse(line_ids)
                lines.read(list(subnames), load='_classic_write')

        # Isolate changed values, to handle inconsistent data sent from the
        # client side: when a form view contains two one2many fields that
        # overlap, the lines that appear in both fields may be sent with
        # different data. Consider, for instance:
        #
        #   foo_ids: [line with value=1, ...]
        #   bar_ids: [line with value=1, ...]
        #
        # If value=2 is set on 'line' in 'bar_ids', the client sends
        #
        #   foo_ids: [line with value=1, ...]
        #   bar_ids: [line with value=2, ...]
        #
        # The idea is to put 'foo_ids' in cache first, so that the snapshot
        # contains value=1 for line in 'foo_ids'. The snapshot is then updated
        # with the value of `bar_ids`, which will contain value=2 on line.
        #
        # The issue also occurs with other fields. For instance, an onchange on
        # a move line has a value for the field 'move_id' that contains the
        # values of the move, among which the one2many that contains the line
        # itself, with old values!
        #
        changed_values = {name: values[name] for name in names}
        # set changed values to null in initial_values; not setting them
        # triggers default_get() on the new record when creating snapshot0
        initial_values = dict(values, **dict.fromkeys(names, False))

        # create a new record with values, and attach ``self`` to it
        with env.do_in_onchange():
            record = self.new(initial_values)
            # attach ``self`` with a different context (for cache consistency)
            record._origin = self.with_context(__onchange=True)

        # make a snapshot based on the initial values of record
        with env.do_in_onchange():
            snapshot0 = snapshot1 = Snapshot(record, nametree)

        # store changed values in cache, and update snapshot0
        with env.do_in_onchange():
            record._update_cache(changed_values, validate=False)
            for name in names:
                snapshot0.fetch(name)

        # determine which field(s) should be triggered an onchange
        todo = list(names or nametree)
        done = set()

        # dummy assignment: trigger invalidations on the record
        with env.do_in_onchange():
            for name in todo:
                if name == 'id':
                    continue
                value = record[name]
                field = self._fields[name]
                if field.type == 'many2one' and field.delegate and not value:
                    # do not nullify all fields of parent record for new records
                    continue
                record[name] = value

        result = {'warnings': OrderedSet()}

        # process names in order
        with env.do_in_onchange():
            while todo:
                # apply field-specific onchange methods
                for name in todo:
                    if field_onchange.get(name):
                        record._onchange_eval(name, field_onchange[name],
                                              result)
                    done.add(name)

                # determine which fields to process for the next pass
                todo = [
                    name for name in nametree
                    if name not in done and snapshot0.has_changed(name)
                ]

            # make the snapshot with the final values of record
            snapshot1 = Snapshot(record, nametree)

        # determine values that have changed by comparing snapshots
        self.invalidate_cache()
        result['value'] = snapshot1.diff(snapshot0)

        # format warnings
        warnings = result.pop('warnings')
        if len(warnings) == 1:
            title, message = warnings.pop()
            result['warning'] = dict(title=title, message=message)
        elif len(warnings) > 1:
            # concatenate warning titles and messages
            title = _("Warnings")
            message = "\n\n".join(itertools.chain(*warnings))
            result['warning'] = dict(title=title, message=message)

        return result
Exemplo n.º 11
0
    def _build_datamodel(cls, registry):
        """Instantiate a given Datamodel in the datamodels registry.

        This method is called at the end of the Odoo's registry build.  The
        caller is :meth:`datamodel.builder.DatamodelBuilder.load_datamodels`.

        It generates new classes, which will be the Datamodel classes we will
        be using.  The new classes are generated following the inheritance
        of ``_inherit``. It ensures that the ``__bases__`` of the generated
        Datamodel classes follow the ``_inherit`` chain.

        Once a Datamodel class is created, it adds it in the Datamodel Registry
        (:class:`DatamodelRegistry`), so it will be available for
        lookups.

        At the end of new class creation, a hook method
        :meth:`_complete_datamodel_build` is called, so you can customize
        further the created datamodels.

        The following code is roughly the same than the Odoo's one for
        building Models.

        """

        # In the simplest case, the datamodel's registry class inherits from
        # cls and the other classes that define the datamodel in a flat
        # hierarchy.  The registry contains the instance ``datamodel`` (on the
        # left). Its class, ``DatamodelClass``, carries inferred metadata that
        # is shared between all the datamodel's instances for this registry
        # only.
        #
        #   class A1(Datamodel):                    Datamodel
        #       _name = 'a'                           / | \
        #                                            A3 A2 A1
        #   class A2(Datamodel):                      \ | /
        #       _inherit = 'a'                    DatamodelClass
        #
        #   class A3(Datamodel):
        #       _inherit = 'a'
        #
        # When a datamodel is extended by '_inherit', its base classes are
        # modified to include the current class and the other inherited
        # datamodel classes.
        # Note that we actually inherit from other ``DatamodelClass``, so that
        # extensions to an inherited datamodel are immediately visible in the
        # current datamodel class, like in the following example:
        #
        #   class A1(Datamodel):
        #       _name = 'a'                          Datamodel
        #                                            /  / \  \
        #   class B1(Datamodel):                    /  A2 A1  \
        #       _name = 'b'                        /   \  /    \
        #                                         B2 DatamodelA B1
        #   class B2(Datamodel):                   \     |     /
        #       _name = 'b'                         \    |    /
        #       _inherit = ['b', 'a']                \   |   /
        #                                            DatamodelB
        #   class A2(Datamodel):
        #       _inherit = 'a'

        # determine inherited datamodels
        parents = cls._inherit
        if isinstance(parents, str):
            parents = [parents]
        elif parents is None:
            parents = []

        if cls._name in registry and not parents:
            raise TypeError("Datamodel %r (in class %r) already exists. "
                            "Consider using _inherit instead of _name "
                            "or using a different _name." % (cls._name, cls))

        # determine the datamodel's name
        name = cls._name or (len(parents) == 1 and parents[0])

        if not name:
            raise TypeError("Datamodel %r must have a _name" % cls)

        # all datamodels except 'base' implicitly inherit from 'base'
        if name != "base":
            parents = list(parents) + ["base"]

        # create or retrieve the datamodel's class
        if name in parents:
            if name not in registry:
                raise TypeError("Datamodel %r does not exist in registry." %
                                name)

        # determine all the classes the datamodel should inherit from
        bases = LastOrderedSet([cls])
        for parent in parents:
            if parent not in registry:
                raise TypeError(
                    "Datamodel %r inherits from non-existing datamodel %r." %
                    (name, parent))
            parent_class = registry[parent]
            if parent == name:
                for base in parent_class.__bases__:
                    bases.add(base)
            else:
                bases.add(parent_class)
                parent_class._inherit_children.add(name)

        if name in parents:
            DatamodelClass = registry[name]
            # Add the new bases to the existing model since the class into
            # the registry could already be used into an inherit
            DatamodelClass.__bases__ = tuple(bases)
            # We must update the marshmallow schema on the existing datamodel
            # class to include those inherited
            parent_schemas = []
            for parent in bases:
                if issubclass(parent, MarshmallowModel):
                    parent_schemas.append(parent.__schema_class__)
            schema_class = type(name + "Schema", tuple(parent_schemas), {})
            DatamodelClass.__schema_class__ = schema_class
        else:
            attrs = {
                "_name": name,
                "_register": False,
                # names of children datamodel
                "_inherit_children": OrderedSet(),
            }
            if name == "base":
                attrs["_registry"] = registry
            DatamodelClass = type(name, tuple(bases), attrs)

        setattr(DatamodelClass.__schema_class__, "_registry",
                registry)  # noqa: B010
        setattr(DatamodelClass.__schema_class__, "_datamodel_name",
                name)  # noqa: B010
        setattr(DatamodelClass.__schema_class__, "__make_object__",
                __make_object__)  # noqa: B010
        DatamodelClass._complete_datamodel_build()

        registry[name] = DatamodelClass

        return DatamodelClass
Exemplo n.º 12
0
 def _get_browser_output_formats(self):
     return OrderedSet(
         self.search([("output_format", "in", ["opus", "ogg", "mp3"])
                      ]).mapped("output_format.name"))
Exemplo n.º 13
0
 def _get_browser_output_formats(self):
     return OrderedSet(
         self.search([('output_format', 'in', ['opus', 'ogg', 'mp3'])
                      ]).mapped('output_format.name'))
Exemplo n.º 14
0
    def _read_group_raw(self,
                        domain,
                        fields,
                        groupby,
                        offset=0,
                        limit=None,
                        orderby=False,
                        lazy=True):
        self.check_access_rights('read')
        query = self._where_calc(domain)
        fields = fields or [
            f.name for f in self._fields.itervalues() if f.store
        ]

        groupby = [groupby] if isinstance(groupby, basestring) else list(
            OrderedSet(groupby))
        groupby_list = groupby[:1] if lazy else groupby
        annotated_groupbys = [
            self._read_group_process_groupby(gb, query) for gb in groupby_list
        ]
        groupby_fields = [g['field'] for g in annotated_groupbys]
        order = orderby or ','.join([g for g in groupby_list])
        groupby_dict = {gb['groupby']: gb for gb in annotated_groupbys}

        self._apply_ir_rules(query, 'read')
        for gb in groupby_fields:
            assert gb in fields, "Fields in 'groupby' must appear in the list of fields to read (perhaps it's missing in the list view?)"
            assert gb in self._fields, "Unknown field %r in 'groupby'" % gb
            gb_field = self._fields[gb].base_field
            # Modification OpenFire : Un champ custom peut ne pas être présent en base de données
            if not getattr(gb_field, 'of_custom_groupby', False):
                assert gb_field.store and gb_field.column_type, "Fields in 'groupby' must be regular database-persisted fields (no function or related fields), or function fields with store=True"

        aggregated_fields = [
            f for f in fields if f != 'sequence' if f not in groupby_fields
            for field in [self._fields.get(f)] if field if field.group_operator
            if field.base_field.store and field.base_field.column_type
        ]

        field_formatter = lambda f: (
            self._fields[f].group_operator,
            self._inherits_join_calc(self._table, f, query),
            f,
        )
        select_terms = [
            '%s(%s) AS "%s" ' % field_formatter(f) for f in aggregated_fields
        ]

        for gb in annotated_groupbys:
            select_terms.append('%s as "%s" ' %
                                (gb['qualified_field'], gb['groupby']))

        groupby_terms, orderby_terms = self._read_group_prepare(
            order, aggregated_fields, annotated_groupbys, query)
        from_clause, where_clause, where_clause_params = query.get_sql()
        if lazy and (len(groupby_fields) >= 2
                     or not self._context.get('group_by_no_leaf')):
            count_field = groupby_fields[0] if len(
                groupby_fields) >= 1 else '_'
        else:
            count_field = '_'
        count_field += '_count'

        prefix_terms = lambda prefix, terms: (prefix + " " + ",".join(terms)
                                              ) if terms else ''
        prefix_term = lambda prefix, term: ('%s %s' %
                                            (prefix, term)) if term else ''

        query = """
            SELECT min(%(table)s.id) AS id, count(%(table)s.id) AS %(count_field)s %(extra_fields)s
            FROM %(from)s
            %(where)s
            %(groupby)s
            %(orderby)s
            %(limit)s
            %(offset)s
        """ % {
            'table': self._table,
            'count_field': count_field,
            'extra_fields': prefix_terms(',', select_terms),
            'from': from_clause,
            'where': prefix_term('WHERE', where_clause),
            'groupby': prefix_terms('GROUP BY', groupby_terms),
            'orderby': prefix_terms('ORDER BY', orderby_terms),
            'limit': prefix_term('LIMIT',
                                 int(limit) if limit else None),
            'offset': prefix_term('OFFSET',
                                  int(offset) if limit else None),
        }
        self._cr.execute(query, where_clause_params)
        fetched_data = self._cr.dictfetchall()

        if not groupby_fields:
            return fetched_data

        # Modif OpenFire : Recherche directe du nom par name_get sur l'objet ciblé
        #  (la méthode standart Odoo procédait par lecture du champ sur l'objet courant,
        #   ce qui est impossible dans le cadre d'un champ one2many)
        for gb in annotated_groupbys:
            if gb['type'] == 'many2one':
                gb_field = gb['field']
                rel = self._fields[gb_field].base_field.comodel_name
                gb_obj = self.env[rel]
                gb_ids = [r[gb_field] for r in fetched_data if r[gb_field]]
                gb_dict = {d[0]: d for d in gb_obj.browse(gb_ids).name_get()}
                for d in fetched_data:
                    d[gb_field] = gb_dict.get(d[gb_field], False)

        data = map(
            lambda r: {
                k: self._read_group_prepare_data(k, v, groupby_dict)
                for k, v in r.iteritems()
            }, fetched_data)
        result = [
            self._read_group_format_result(d, annotated_groupbys, groupby,
                                           domain) for d in data
        ]
        if lazy:
            # Right now, read_group only fill results in lazy mode (by default).
            # If you need to have the empty groups in 'eager' mode, then the
            # method _read_group_fill_results need to be completely reimplemented
            # in a sane way
            result = self._read_group_fill_results(
                domain,
                groupby_fields[0],
                groupby[len(annotated_groupbys):],
                aggregated_fields,
                count_field,
                result,
                read_group_order=order,
            )
        return result
Exemplo n.º 15
0
def _build_model_new(cls, pool, cr):
    """ Instantiate a given model in the registry.

        This method creates or extends a "registry" class for the given model.
        This "registry" class carries inferred model metadata, and inherits (in
        the Python sense) from all classes that define the model, and possibly
        other registry classes.

    """

    # Keep links to non-inherited constraints in cls; this is useful for
    # instance when exporting translations
    cls._local_constraints = cls.__dict__.get('_constraints', [])
    cls._local_sql_constraints = cls.__dict__.get('_sql_constraints', [])

    # determine inherited models
    parents = inherit_workflow_manager(cr, cls)
    parents = [parents] if isinstance(parents, str) else (parents or [])

    # determine the model's name
    name = cls._name or (len(parents) == 1 and parents[0]) or cls.__name__

    # all models except 'base' implicitly inherit from 'base'
    if name != 'base':
        parents = list(parents) + ['base']

    # create or retrieve the model's class
    if name in parents:
        if name not in pool:
            raise TypeError("Model %r does not exist in registry." % name)
        ModelClass = pool[name]
        ModelClass._build_model_check_base(cls)
        check_parent = ModelClass._build_model_check_parent
    else:
        ModelClass = type(
            name,
            (BaseModel, ),
            {
                '_name': name,
                '_register': False,
                '_original_module': cls._module,
                '_inherit_children': OrderedSet(),  # names of children models
                '_inherits_children': set(),  # names of children models
                '_fields': {},  # populated in _setup_base()
            })
        check_parent = cls._build_model_check_parent

    # determine all the classes the model should inherit from
    bases = LastOrderedSet([cls])
    for parent in parents:
        if parent not in pool:
            raise TypeError("Model %r inherits from non-existing model %r." %
                            (name, parent))
        parent_class = pool[parent]
        if parent == name:
            for base in parent_class.__bases__:
                bases.add(base)
        else:
            check_parent(cls, parent_class)
            bases.add(parent_class)
            parent_class._inherit_children.add(name)
    ModelClass.__bases__ = tuple(bases)

    # determine the attributes of the model's class
    ModelClass._build_model_attributes(pool)

    check_pg_name(ModelClass._table)

    # Transience
    if ModelClass._transient:
        assert ModelClass._log_access, \
            "TransientModels must have log_access turned on, " \
            "in order to implement their access rights policy"

    # link the class to the registry, and update the registry
    ModelClass.pool = pool
    pool[name] = ModelClass

    # backward compatibility: instantiate the model, and initialize it
    model = object.__new__(ModelClass)
    model.__init__(pool, cr)

    return ModelClass
Exemplo n.º 16
0
 def __init__(self, path='', path_args=None, **args):
     self.path = path
     self.args = args
     self.path_args = OrderedSet(path_args or [])
Exemplo n.º 17
0
    def read_group_qualified_byvin(self, domain, fields, groupby, offset=0, limit=None, orderby=False, lazy=True):
        self.check_access_rights('read')
        w_clause_centron_part2 = ""
        d = list()
        for r in domain:
            if r[0] == 'measure_result':
                continue
            if r[0] == 'gun_id':
                w_clause_centron_part2 += 'and r1.gun_id = {0}'.format(r[2])
                continue
            d.append(r)
        domain = d
        query = self._where_calc(domain)
        fields = fields or [f.name for f in self._fields.itervalues() if f.store]

        groupby = [groupby] if isinstance(groupby, basestring) else list(OrderedSet(groupby))
        groupby_list = groupby[:1] if lazy else groupby
        annotated_groupbys1 = [self._read_group_process_groupby(gb, query) for gb in groupby_list]
        one_time_pass_state = False
        for gb in annotated_groupbys1:
            if gb['field'] == 'final_pass':
                gb['qualified_field'] = "\'final\'"
            if gb['field'] == 'one_time_pass':
                one_time_pass_state = True
                gb['qualified_field'] = "\'one time\'"
            if gb['field'] == 'vin':
                gb['qualified_field'] = "d1.vin"

        if one_time_pass_state:
            annotated_groupbys = [gb for gb in annotated_groupbys1 if gb['field'] != 'final_pass']
        else:
            annotated_groupbys = [gb for gb in annotated_groupbys1 if gb['field'] != 'one_time_pass']

        groupby_fields = [g['field'] for g in annotated_groupbys]
        order = orderby or ','.join([g for g in groupby_list])
        groupby_dict = {gb['groupby']: gb for gb in annotated_groupbys}

        self._apply_ir_rules(query, 'read')
        for gb in groupby_fields:
            assert gb in fields, "Fields in 'groupby' must appear in the list of fields to read (perhaps it's missing in the list view?)"
            assert gb in self._fields, "Unknown field %r in 'groupby'" % gb
            gb_field = self._fields[gb].base_field
            assert gb_field.store and gb_field.column_type, "Fields in 'groupby' must be regular database-persisted fields (no function or related fields), or function fields with store=True"

        aggregated_fields = [
            f for f in fields
            if f != 'sequence'
            if f not in groupby_fields
            for field in [self._fields.get(f)]
            if field
            if field.group_operator
            if field.base_field.store and field.base_field.column_type
        ]

        field_formatter = lambda f: (
            self._fields[f].group_operator,
            self._inherits_join_calc(self._table, f, query),
            f,
        )
        select_terms = ['%s(%s) AS "%s" ' % field_formatter(f) for f in aggregated_fields]

        for gb in annotated_groupbys:
            select_terms.append('%s as "%s" ' % (gb['qualified_field'], gb['groupby']))

        groupby_terms, orderby_terms = self._read_group_prepare(order, aggregated_fields, annotated_groupbys, query)
        from_clause, where_clause, where_clause_params = query.get_sql()
        if lazy and (len(groupby_fields) >= 2 or not self._context.get('group_by_no_leaf')):
            count_field = groupby_fields[0] if len(groupby_fields) >= 1 else '_'
        else:
            count_field = '_'
        count_field += '_count'

        prefix_terms = lambda prefix, terms: (prefix + " " + ",".join(terms)) if terms else ''
        prefix_term = lambda prefix, term: ('%s %s' % (prefix, term)) if term else ''

        from_clause = """
                    (select a.vin,sum(COALESCE(b.sequence, 0)) as sequence from
                    (select  distinct  vin,product_id from operation_result) a
                    left join
                    (select  a.product_id,count(*) as sequence from mrp_bom a
                    left join mrp_bom_line b on a.id=b.bom_id
                    group by a.product_id) b on a.product_id=b.product_id
                    group by a.vin) d1
                    left join 
                    (select  a.VIN,count (a.*) as sequence  from (
                    select distinct r1.VIN,r1.point_id,r1.batch from operation_result r1
                    where  %(interval2)s  and  r1.measure_result in ('ok','nok') %(where_clause)s) a
                    group by a.VIN) d2  on d1.VIN = d2.VIN
                    left join 
                    (select  vin,min(control_date) as control_date  from operation_result
                      group by vin) d3 on d1.VIN = d3.VIN
                        """ % {
            'interval2': ''' r1.one_time_pass='******' ''' if one_time_pass_state else ''' r1.final_pass='******' ''',
            'where_clause': w_clause_centron_part2
        }

        if where_clause == '':
            where_clause = ''
        else:
            if where_clause.find('''"operation_result"."vin"''') > 0:
                where_clause = where_clause.replace('''"operation_result"."vin"''', '''"d1"."vin"''')
            if where_clause.find('''"operation_result"."control_date"''') > 0:
                where_clause = where_clause.replace('''"operation_result"."control_date"''', '''"d3"."control_date"''')
            where_clause += ''

        query = """
                        SELECT round(round(d2.sequence,2)/ NULLIF(d1.sequence, 0) * 100.0, 4) AS "%(count_field)s" %(extra_fields)s
                        FROM %(from)s
                        %(where)s
                        %(orderby)s
                        %(limit)s
                        %(offset)s
                    """ % {
            'table': self._table,
            'count_field': count_field,
            'extra_fields': prefix_terms(',', select_terms),
            'from': from_clause,
            'where': prefix_term('WHERE', where_clause),
            'orderby': 'ORDER BY ' + count_field,
            'limit': prefix_term('LIMIT', int(limit) if limit else None),
            'offset': prefix_term('OFFSET', int(offset) if limit else None),
        }

        self._cr.execute(query, where_clause_params)
        fetched_data = self._cr.dictfetchall()

        if not groupby_fields:
            return fetched_data
        """
        for d in fetched_data:
            n = {'consu_product_id': self.env['product.product'].browse(d['consu_product_id']).name_get()[0]}
            d.update(n)
        """
        """
        for d in fetched_data:
           n = {'consu_product_id': """
        # (%(from)s,'test')
        """ % {'from': d['consu_product_id']}}
            d.update(n)
        """
        data = map(lambda r: {k: self._read_group_prepare_data(k, v, groupby_dict) for k, v in r.iteritems()},
                   fetched_data)
        result = [self._read_group_format_result_centron(d, annotated_groupbys, groupby, domain) for d in data]

        return result
Exemplo n.º 18
0
    def _get_activity_report_data(self, detailed=True):
        self.ensure_one()
        task_rate_matrix_data = {}
        project_rate_matrix_data = {}
        time_category_rate_matrix_data = {}
        product_obj = self.env['product.product']
        rate_product_ids = product_obj
        projects_row_data = OrderedDict()
        list_timesheet_to_compute = {}

        # Creating a dict of the task we want to use, key=parent_task, value=list of task (can include the parent task id if there is time coded on it)
        for timesheet_id in self.timesheet_ids:
            if not timesheet_id.so_line.qty_invoiced:
                continue
            project_id = timesheet_id.project_id

            if timesheet_id.task_id.parent_id:
                if timesheet_id.task_id.parent_id in list_timesheet_to_compute:
                    if timesheet_id.task_id[
                            0] not in list_timesheet_to_compute[
                                timesheet_id.task_id.parent_id[0]]:
                        list_timesheet_to_compute[
                            timesheet_id.task_id.parent_id[0]] += [
                                timesheet_id.task_id[0]
                            ]
                else:
                    list_timesheet_to_compute[
                        timesheet_id.task_id.parent_id[0]] = [
                            timesheet_id.task_id[0]
                        ]
            else:
                if timesheet_id.task_id[0] in list_timesheet_to_compute:
                    if timesheet_id.task_id[0] not in list_timesheet_to_compute[
                            timesheet_id.task_id[
                                0]]:  # if the task is already in the list but is its own parent, put it a the begining of the list
                        list_timesheet_to_compute[timesheet_id.task_id[0]] = [
                            timesheet_id.task_id[0]
                        ] + list_timesheet_to_compute[timesheet_id.task_id[0]]
                else:
                    list_timesheet_to_compute[timesheet_id.task_id[0]] = [
                        timesheet_id.task_id[0]
                    ]

        # using the dict of task to create matrix to use in the view
        for parent_task, list_tasks in list_timesheet_to_compute.items():
            number_tasks = len(list_tasks)
            for task_individual in list_tasks:
                for timesheet_id in task_individual.timesheet_ids.filtered(
                        lambda t: t.timesheet_invoice_id.id == self.id):
                    if self.merge_subtask and timesheet_id.task_id.parent_id:  # if the task has a parent and we want to merge
                        current_task_id = timesheet_id.task_id.parent_id
                    else:
                        current_task_id = timesheet_id.task_id

                    rate_product_id = timesheet_id.so_line.product_id
                    rate_product_ids |= rate_product_id
                    time_category_id = timesheet_id.time_category_id
                    unit_amount = timesheet_id.unit_amount

                    # project matrix data
                    project_rate_matrix_key = (project_id, rate_product_id)
                    project_rate_matrix_data.setdefault(
                        project_rate_matrix_key, 0.)
                    project_rate_matrix_data[
                        project_rate_matrix_key] += unit_amount
                    tasks_row_data = projects_row_data.setdefault(
                        project_id, OrderedDict())

                    # task matrix data, the first value is the total of hours per rate per task
                    task_rate_matrix_key = (project_id, current_task_id,
                                            rate_product_id)
                    task_rate_matrix_data.setdefault(task_rate_matrix_key,
                                                     [0., 0.])
                    task_rate_matrix_data[task_rate_matrix_key][
                        0] += unit_amount
                    task_rate_matrix_data[task_rate_matrix_key][
                        1] += unit_amount
                    # adding a second value in the list, the total for a parent_task
                    if timesheet_id.task_id.parent_id or current_task_id == parent_task:
                        task_to_use = timesheet_id.task_id.parent_id
                        task_rate_matrix_key = (project_id, task_to_use,
                                                rate_product_id)
                        task_rate_matrix_data.setdefault(
                            task_rate_matrix_key, [0., 0.])
                        task_rate_matrix_data[task_rate_matrix_key][
                            1] += unit_amount

                    time_category_row_data = tasks_row_data.setdefault(
                        current_task_id, [
                            OrderedDict(), parent_task, number_tasks,
                            self.merge_subtask
                        ])
                    time_category_row_data = time_category_row_data[0]

                    # time category matrix data
                    if detailed:
                        time_category_matrix_key = (project_id,
                                                    current_task_id,
                                                    time_category_id,
                                                    rate_product_id)
                        time_category_rate_matrix_data.setdefault(
                            time_category_matrix_key, 0.)
                        time_category_rate_matrix_data[
                            time_category_matrix_key] += unit_amount
                        time_category_row_data.setdefault(
                            time_category_id, None)

        # reorder rate_product_ids columns according to the most expensive one
        rate_product_ids = rate_product_ids
        rate_product_ids = product_obj.browse(
            OrderedSet([
                couple[1].id for couple in sorted(
                    [(project_id, rate_product_id)
                     for project_id in projects_row_data.keys()
                     for rate_product_id in rate_product_ids],
                    key=lambda key: project_rate_matrix_data[key],
                    reverse=True)
            ]))
        return {
            'project_rate_matrix_data': project_rate_matrix_data,
            'task_rate_matrix_data': task_rate_matrix_data,
            'time_category_rate_matrix_data': time_category_rate_matrix_data,
            'rate_product_ids': rate_product_ids,
            'projects_row_data': projects_row_data,
        }
Exemplo n.º 19
0
    def read_group_lacking(self,
                           domain,
                           fields,
                           groupby,
                           offset=0,
                           limit=None,
                           orderby=False,
                           lazy=True):
        self.check_access_rights('read')
        query = self._where_calc(domain)
        fields = fields or [
            f.name for f in self._fields.itervalues() if f.store
        ]

        groupby = [groupby] if isinstance(groupby, basestring) else list(
            OrderedSet(groupby))
        groupby_list = groupby[:1] if lazy else groupby
        annotated_groupbys = [
            self._read_group_process_groupby(gb, query) for gb in groupby_list
        ]
        for gb in annotated_groupbys:
            if gb['field'] == 'lacking':
                gb['qualified_field'] = "\'lack\'"
            if gb['field'] == 'control_date':
                gb['qualified_field'] = "d1.control_date"
        groupby_fields = [g['field'] for g in annotated_groupbys]
        order = orderby or ','.join([g for g in groupby_list])
        groupby_dict = {gb['groupby']: gb for gb in annotated_groupbys}

        self._apply_ir_rules(query, 'read')
        for gb in groupby_fields:
            assert gb in fields, "Fields in 'groupby' must appear in the list of fields to read (perhaps it's missing in the list view?)"
            assert gb in self._fields, "Unknown field %r in 'groupby'" % gb
            gb_field = self._fields[gb].base_field
            assert gb_field.store and gb_field.column_type, "Fields in 'groupby' must be regular database-persisted fields (no function or related fields), or function fields with store=True"

        aggregated_fields = [
            f for f in fields if f != 'sequence' if f not in groupby_fields
            for field in [self._fields.get(f)] if field if field.group_operator
            if field.base_field.store and field.base_field.column_type
        ]

        field_formatter = lambda f: (
            self._fields[f].group_operator,
            self._inherits_join_calc(self._table, f, query),
            f,
        )
        select_terms = [
            '%s(%s) AS "%s" ' % field_formatter(f) for f in aggregated_fields
        ]

        for gb in annotated_groupbys:
            select_terms.append('%s as "%s" ' %
                                (gb['qualified_field'], gb['groupby']))

        groupby_terms, orderby_terms = self._read_group_prepare(
            order, aggregated_fields, annotated_groupbys, query)
        from_clause, where_clause, where_clause_params = query.get_sql()
        if lazy and (len(groupby_fields) >= 2
                     or not self._context.get('group_by_no_leaf')):
            count_field = groupby_fields[0] if len(
                groupby_fields) >= 1 else '_'
        else:
            count_field = '_'
        count_field += '_count'

        prefix_terms = lambda prefix, terms: (prefix + " " + ",".join(terms)
                                              ) if terms else ''
        prefix_term = lambda prefix, term: ('%s %s' %
                                            (prefix, term)) if term else ''

        from_clause = '''
                    (select sum(dd.count)                                                                             as acount,
                             date_trunc('%(interval)s', timezone('Asia/Chongqing', timezone('UTC', mp.date_planned_start))) as control_date
                      from (select count(op.*) as count, mw.id as wo_id, mw.production_id as mpdid
                            from mrp_routing_workcenter mrw,
                                 operation_point op,
                                 mrp_workorder mw
                            where mw.operation_id = mrw.id
                              and op.operation_id = mrw.id
                            group by mw.id) dd,
                           mrp_production mp
                      where dd.mpdid = mp.id
                      group by control_date
                      order by control_date) d1,
                     (select sum(dd.count)                                                                             as rs,
                             date_trunc('%(interval)s', timezone('Asia/Chongqing', timezone('UTC', mp.date_planned_start))) as control_date
                      from (select count(e.oprb) as count, e.oprw as oprw
                            from (select distinct opr.batch as oprb, opr.workorder_id as oprw
                                  from operation_result opr
                                  group by opr.workorder_id, opr.batch) as e
                            group by oprw) dd,
                           mrp_workorder mw,
                           mrp_production mp
                      where mw.id = dd.oprw and mp.id = mw.production_id
                      group by control_date
                      order by control_date) d2
        ''' % {
            'interval':
            annotated_groupbys[0]['groupby'].split(':')[-1]
            if annotated_groupbys[0]['field'] == 'control_date' else
            annotated_groupbys[1]['groupby'].split(':')[-1],
        }

        if where_clause == '':
            where_clause = 'd1.control_date = d2.control_date'
        else:
            if where_clause.find('''"operation_result"."control_date"''') > 0:
                where_clause = where_clause.replace(
                    '''"operation_result"."control_date"''',
                    '''"d1"."control_date"''')
            where_clause += 'AND d1.control_date = d2.control_date'

        query = """
                    SELECT round((d1.acount - d2.rs) / NULLIF(d1.acount, 0) * 100.0, 4) AS "%(count_field)s" %(extra_fields)s
                    FROM %(from)s
                    %(where)s
                    %(orderby)s
                    %(limit)s
                    %(offset)s
                """ % {
            'table': self._table,
            'count_field': count_field,
            'extra_fields': prefix_terms(',', select_terms),
            'from': from_clause,
            'where': prefix_term('WHERE', where_clause),
            'orderby': 'ORDER BY ' + count_field,
            'limit': prefix_term('LIMIT',
                                 int(limit) if limit else None),
            'offset': prefix_term('OFFSET',
                                  int(offset) if limit else None),
        }
        self._cr.execute(query, where_clause_params)
        fetched_data = self._cr.dictfetchall()

        if not groupby_fields:
            return fetched_data

        many2onefields = [
            gb['field'] for gb in annotated_groupbys
            if gb['type'] == 'many2one'
        ]
        if many2onefields:
            data_ids = [r['id'] for r in fetched_data]
            many2onefields = list(set(many2onefields))
            data_dict = {
                d['id']: d
                for d in self.browse(data_ids).read(many2onefields)
            }
            for d in fetched_data:
                d.update(data_dict[d['id']])

        data = map(
            lambda r: {
                k: self._read_group_prepare_data(k, v, groupby_dict)
                for k, v in r.iteritems()
            }, fetched_data)
        result = [
            self._read_group_format_result_centron(d, annotated_groupbys,
                                                   groupby, domain)
            for d in data
        ]

        return result