def _split_by_server(self): """Returns an iterator of pairs `(mail_server_id, record_ids)` for current recordset. The same `mail_server_id` may repeat in order to limit batch size according to the `mail.session.batch.size` system parameter. """ groups = defaultdict(list) # Turn prefetch OFF to avoid MemoryError on very large mail queues, we only care # about the mail server ids in this case. for mail in self.with_context(prefetch_fields=False): groups[mail.mail_server_id.id].append(mail.id) sys_params = self.env['ir.config_parameter'].sudo() batch_size = int(sys_params.get_param('mail.session.batch.size', 1000)) for server_id, record_ids in groups.items(): for mail_batch in tools.split_every(batch_size, record_ids): yield server_id, mail_batch
def _compute_allowed_picking_ids(self): # Backport of f329de26: allowed_picking_ids is useless, view_stock_landed_cost_form no longer uses it, # the field and its compute are kept since this is a stable version. Still, this compute has been made # more resilient to MemoryErrors. valued_picking_ids_per_company = defaultdict(list) if self.company_id: self.env.cr.execute("""SELECT sm.picking_id, sm.company_id FROM stock_move AS sm INNER JOIN stock_valuation_layer AS svl ON svl.stock_move_id = sm.id WHERE sm.picking_id IS NOT NULL AND sm.company_id IN %s GROUP BY sm.picking_id, sm.company_id""", [tuple(self.company_id.ids)]) for res in self.env.cr.fetchall(): valued_picking_ids_per_company[res[1]].append(res[0]) for cost in self: n = 5000 cost.allowed_picking_ids = valued_picking_ids_per_company[cost.company_id.id][:n] for ids_chunk in tools.split_every(n, valued_picking_ids_per_company[cost.company_id.id][n:]): cost.allowed_picking_ids = [(4, id_) for id_ in ids_chunk]
def _procure_orderpoint_confirm(self, use_new_cursor=False, company_id=None, raise_user_error=True): """ Create procurements based on orderpoints. :param bool use_new_cursor: if set, use a dedicated cursor and auto-commit after processing 1000 orderpoints. This is appropriate for batch jobs only. """ self = self.with_company(company_id) orderpoints_noprefetch = self.read(['id']) orderpoints_noprefetch = [ orderpoint['id'] for orderpoint in orderpoints_noprefetch ] for orderpoints_batch in split_every(1000, orderpoints_noprefetch): if use_new_cursor: cr = registry(self._cr.dbname).cursor() self = self.with_env(self.env(cr=cr)) orderpoints_batch = self.env['stock.warehouse.orderpoint'].browse( orderpoints_batch) orderpoints_exceptions = [] while orderpoints_batch: procurements = [] for orderpoint in orderpoints_batch: if float_compare(orderpoint.qty_to_order, 0.0, precision_rounding=orderpoint.product_uom. rounding) == 1: date = datetime.combine(orderpoint.lead_days_date, time.min) values = orderpoint._prepare_procurement_values( date=date) procurements.append( self.env['procurement.group'].Procurement( orderpoint.product_id, orderpoint.qty_to_order, orderpoint.product_uom, orderpoint.location_id, orderpoint.name, orderpoint.name, orderpoint.company_id, values)) try: with self.env.cr.savepoint(): self.env['procurement.group'].with_context( from_orderpoint=True).run( procurements, raise_user_error=raise_user_error) except ProcurementException as errors: for procurement, error_msg in errors.procurement_exceptions: orderpoints_exceptions += [ (procurement.values.get('orderpoint_id'), error_msg) ] failed_orderpoints = self.env[ 'stock.warehouse.orderpoint'].concat( *[o[0] for o in orderpoints_exceptions]) if not failed_orderpoints: _logger.error('Unable to process orderpoints') break orderpoints_batch -= failed_orderpoints except OperationalError: if use_new_cursor: cr.rollback() continue else: raise else: orderpoints_batch._post_process_scheduler() break # Log an activity on product template for failed orderpoints. for orderpoint, error_msg in orderpoints_exceptions: existing_activity = self.env['mail.activity'].search([ ('res_id', '=', orderpoint.product_id.product_tmpl_id.id), ('res_model_id', '=', self.env.ref('product.model_product_template').id), ('note', '=', error_msg) ]) if not existing_activity: orderpoint.product_id.product_tmpl_id.activity_schedule( 'mail.mail_activity_data_warning', note=error_msg, user_id=orderpoint.product_id.responsible_id.id or SUPERUSER_ID, ) if use_new_cursor: cr.commit() cr.close() return {}
def _split_batch(self): batch_size = int(self.env['ir.config_parameter'].sudo().get_param( 'sms.session.batch.size', 500)) for sms_batch in tools.split_every(batch_size, self.ids): yield sms_batch