def _notify_sms_update(self): """ Send bus notifications to update status of notifications in chatter. Purpose is to send the updated status per author. TDE FIXME: author_id strategy seems curious, check with JS """ messages = self.env['mail.message'] for message in self: # YTI FIXME: check allowed_company_ids if necessary if message.model and message.res_id: record = self.env[message.model].browse(message.res_id) try: record.check_access_rights('read') record.check_access_rule('read') except exceptions.AccessError: continue else: messages |= message """ Notify channels after update of SMS status """ updates = [[(self._cr.dbname, 'res.partner', author.id), { 'type': 'sms_update', 'elements': self.env['mail.message'].concat( *author_messages)._format_mail_failures() }] for author, author_messages in groupby(messages, itemgetter('author_id'))] self.env['bus.bus'].sendmany(updates)
def _compute_average_price(self, qty_invoiced, qty_to_invoice, stock_moves): self.ensure_one() if stock_moves.product_id == self: return super()._compute_average_price(qty_invoiced, qty_to_invoice, stock_moves) bom = self.env['mrp.bom']._bom_find( self, company_id=stock_moves.company_id.id, bom_type='phantom')[self] if not bom: return super()._compute_average_price(qty_invoiced, qty_to_invoice, stock_moves) value = 0 dummy, bom_lines = bom.explode(self, 1) bom_lines = {line: data for line, data in bom_lines} for bom_line, moves_list in groupby( stock_moves.filtered(lambda sm: sm.state != 'cancel'), lambda sm: sm.bom_line_id): if bom_line not in bom_lines: for move in moves_list: value += move.product_qty * move.product_id._compute_average_price( qty_invoiced * move.product_qty, qty_to_invoice * move.product_qty, move) continue line_qty = bom_line.product_uom_id._compute_quantity( bom_line.product_qty, bom_line.product_id.uom_id) moves = self.env['stock.move'].concat(*moves_list) value += line_qty * bom_line.product_id._compute_average_price( qty_invoiced * line_qty, qty_to_invoice * line_qty, moves) return value
def _compute_default_locations(iterator, field_name, model_name): random = populate.Random('_compute_default_locations') locations_by_company = dict( groupby(internal_locations, key=lambda loc: loc.company_id.id)) locations_by_company = { company_id: self.env['stock.location'].concat(*locations) for company_id, locations in locations_by_company.items() } for values in iterator: locations_company = locations_by_company[values['company_id']] # TODO : choice only location child of warehouse.lot_stock_id inter_location = random.choice(locations_company) values['warehouse_id'] = inter_location.get_warehouse().id if values['code'] == 'internal': values['default_location_src_id'] = inter_location.id values['default_location_dest_id'] = random.choice( locations_company - inter_location).id elif values['code'] == 'incoming': values['default_location_dest_id'] = inter_location.id elif values['code'] == 'outgoing': values['default_location_src_id'] = inter_location.id yield values
def _reminder(self): cutoff = self.env.context.get( 'forwardport_updated_before') or fields.Datetime.to_string( datetime.datetime.now() - DEFAULT_DELTA) for source, prs in groupby( self.env['runbot_merge.pull_requests'].search([ # only FP PRs ('source_id', '!=', False), # active ('state', 'not in', ['merged', 'closed']), # last updated more than <cutoff> ago ('write_date', '<', cutoff), ]), lambda p: p.source_id): self.env['runbot_merge.pull_requests.feedback'].create({ 'repository': source.repository.id, 'pull_request': source.number, 'message': "This pull request has forward-port PRs awaiting action (not merged or closed): %s" % ', '.join(pr.display_name for pr in sorted(prs, key=lambda p: p.number)), 'token_field': 'fp_github_token', })
def _compute_thumb_urls(self): for backend, records in tools.groupby(self, lambda x: x.backend_id): url_fname = "url" if backend.backend_view_use_internal_url: url_fname = "internal_url" for rec in records: rec.image_medium_url = rec.thumb_medium_id[url_fname] rec.image_small_url = rec.thumb_small_id[url_fname]
def _get_procurements_to_merge(self, procurements): """ Get a list of procurements values and create groups of procurements that would use the same purchase order line. params procurements_list list: procurements requests (not ordered nor sorted). return list: procurements requests grouped by their product_id. """ return [ pro_g for __, pro_g in groupby( procurements, key=self._get_procurements_to_merge_groupby) ]
def _compute_locations(iterator, field_name, model_name): locations_out = cross_company_locations.filtered_domain([ ('usage', '=', 'customer') ]) locations_in = cross_company_locations.filtered_domain([ ('usage', '=', 'supplier') ]) locations_internal = locations_companies.filtered_domain([ ('usage', '=', 'internal') ]) locations_by_company = dict( groupby(locations_companies, key=lambda loc: loc.company_id.id)) locations_by_company = { com: self.env['stock.location'].concat(*locs) for com, locs in locations_by_company.items() } random = populate.Random('_compute_locations') for values in iterator: picking_type = self.env['stock.picking.type'].browse( values['picking_type_id']) source_loc = picking_type.default_location_src_id dest_loc = picking_type.default_location_dest_id locations_company = locations_by_company[ picking_type.company_id.id] if not source_loc or random.random() > 0.8: if picking_type.code == 'incoming': source_loc = random.choice(locations_in) elif picking_type.code == 'outgoing': source_loc = random.choice(locations_internal & locations_company) elif picking_type.code == 'internal': source_loc = random.choice(locations_internal & locations_company) if not dest_loc or random.random() > 0.8: if picking_type.code == 'incoming': dest_loc = random.choice(locations_internal & locations_company) elif picking_type.code == 'outgoing': dest_loc = random.choice(locations_out) elif picking_type.code == 'internal': # Need at most 2 internal locations dest_loc = random.choice((locations_internal & locations_company) - source_loc) values['location_id'] = source_loc.id values['location_dest_id'] = dest_loc.id yield values
def _match_all_variant_values(self, product_template_attribute_value_ids): """ It currently checks that all variant values (`product_template_attribute_value_ids`) are in the product (`self`). If multiple values are encoded for the same attribute line, only one of them has to be found on the variant. """ self.ensure_one() if not product_template_attribute_value_ids: return True for _, iter_ptav in groupby(product_template_attribute_value_ids, lambda ptav: ptav.attribute_line_id): if not any(ptav in self.product_template_attribute_value_ids for ptav in iter_ptav): return False return True
def x_ids_by_company(recordset, with_false=True): x_by_company = dict( groupby(recordset, key=lambda x_record: x_record.company_id.id)) if with_false: x_inter_company = self.env[recordset._name].concat( *x_by_company.get(False, [])) else: x_inter_company = self.env[recordset._name] return { com: (self.env[recordset._name].concat(*x_records) | x_inter_company).ids for com, x_records in x_by_company.items() if com }
def _get_stats_summary_data_choice(self, user_input_lines): right_inputs, partial_inputs = self.env['survey.user_input'], self.env['survey.user_input'] right_answers = self.suggested_answer_ids.filtered(lambda label: label.is_correct) if self.question_type == 'multiple_choice': for user_input, lines in tools.groupby(user_input_lines, operator.itemgetter('user_input_id')): user_input_answers = self.env['survey.user_input.line'].concat(*lines).filtered(lambda l: l.answer_is_correct).mapped('suggested_answer_id') if user_input_answers and user_input_answers < right_answers: partial_inputs += user_input elif user_input_answers: right_inputs += user_input else: right_inputs = user_input_lines.filtered(lambda line: line.answer_is_correct).mapped('user_input_id') return { 'right_answers': right_answers, 'right_inputs_count': len(right_inputs), 'partial_inputs_count': len(partial_inputs), }
def _populate_factories(self): now = datetime.now() company_ids = self.env.registry.populated_models[ 'res.company'][:COMPANY_NB_WITH_STOCK] all_partners = self.env['res.partner'].browse( self.env.registry.populated_models['res.partner']) partners_by_company = dict( groupby(all_partners, key=lambda par: par.company_id.id)) partners_inter_company = self.env['res.partner'].concat( *partners_by_company.get(False, [])) partners_by_company = { com: self.env['res.partner'].concat(*partners) | partners_inter_company for com, partners in partners_by_company.items() if com } def get_date_order(values, counter, random): # 95.45 % of picking scheduled between (-5, 10) days and follow a gauss distribution (only +-15% PO is late) delta = random.gauss(5, 5) return now + timedelta(days=delta) def get_date_planned(values, counter, random): # 95 % of PO Receipt Date between (1, 16) days after the order deadline and follow a exponential distribution delta = random.expovariate(5) + 1 return values['date_order'] + timedelta(days=delta) def get_partner_id(values, counter, random): return random.choice(partners_by_company[values['company_id']]).id def get_currency_id(values, counter, random): company = self.env['res.company'].browse(values['company_id']) return company.currency_id.id return [ ('company_id', populate.randomize(company_ids)), ('date_order', populate.compute(get_date_order)), ('date_planned', populate.compute(get_date_planned)), ('partner_id', populate.compute(get_partner_id)), ('currency_id', populate.compute(get_currency_id)), ]
def _populate_factories(self): res = super()._populate_factories() picking_types = self.env['stock.picking.type'].search([('code', '=', 'incoming')]) picking_types_by_company = dict( groupby(picking_types, key=lambda par: par.company_id.id)) picking_types_inter_company = self.env['stock.picking.type'].concat( *picking_types_by_company.get(False, [])) picking_types_by_company = { com: self.env['stock.picking.type'].concat(*pt) | picking_types_inter_company for com, pt in picking_types_by_company.items() if com } def get_picking_type_id(values=None, random=None, **kwargs): return random.choice( picking_types_by_company[values["company_id"]]).id return res + [ ("picking_type_id", populate.compute(get_picking_type_id)) ]
def _reminder(self): now = datetime.datetime.now() cutoff = self.env.context.get( 'forwardport_updated_before') or fields.Datetime.to_string( now - DEFAULT_DELTA) cutoff_dt = fields.Datetime.from_string(cutoff) for source, prs in groupby( self.env['runbot_merge.pull_requests'].search( [ # only FP PRs ('source_id', '!=', False), # active ('state', 'not in', ['merged', 'closed']), # original merged more than <cutoff> ago ('source_id.merge_date', '<', cutoff), ], order='source_id, id'), lambda p: p.source_id): backoff = dateutil.relativedelta.relativedelta( days=2**source.reminder_backoff_factor) prs = list(prs) if source.merge_date > (cutoff_dt - backoff): continue source.reminder_backoff_factor += 1 self.env['runbot_merge.pull_requests.feedback'].create({ 'repository': source.repository.id, 'pull_request': source.number, 'message': "This pull request has forward-port PRs awaiting action (not merged or closed): %s" % ', '.join(pr.display_name for pr in sorted(prs, key=lambda p: p.number)), 'token_field': 'fp_github_token', })
def _populate_factories(self): company_ids = self.env.registry.populated_models[ 'res.company'][:COMPANY_NB_WITH_STOCK] valid_locations = self.env['stock.location'].search([ ('company_id', 'in', company_ids), ('usage', 'in', ['internal', 'transit']) ]) locations_by_company = dict( groupby(valid_locations, key=lambda loc: loc.company_id.id)) locations_by_company = { company_id: self.env['stock.location'].concat(*locations) for company_id, locations in locations_by_company.items() } products = self.env['product.product'].browse( self.env.registry.populated_models['product.product']).filtered( lambda p: p.type == 'product') def get_locations_ids(values, counter, random): location_ids_company = locations_by_company[values['company_id']] # We set locations to ease the creation of stock.inventory.line # Should be larger enough to avoid the emptiness of generator_product_loc_dict return random.sample(location_ids_company.ids, int(len(location_ids_company.ids) * 0.5)) def get_product_ids(values, counter, random): # We set products to ease the creation of stock.inventory.line # Should be larger enough to avoid the emptiness of generator_product_loc_dict return random.sample(products.ids, int(len(products.ids) * 0.5)) return [ ('name', populate.constant("Inventory-Pop-{counter}")), ('company_id', populate.iterate(company_ids)), ('location_ids', populate.compute(get_locations_ids)), ('product_ids', populate.compute(get_product_ids)), ]
def create(self, vals_list): """ create(vals_list) -> records Creates new records for the model. The new records are initialized using the values from the list of dicts ``vals_list``, and if necessary those from :meth:`~.default_get`. :param list vals_list: values for the model's fields, as a list of dictionaries:: [{'field_name': field_value, ...}, ...] For backward compatibility, ``vals_list`` may be a dictionary. It is treated as a singleton list ``[vals]``, and a single record is returned. see :meth:`~.write` for details :return: the created records :raise AccessError: * if user has no create rights on the requested object * if user tries to bypass access rules for create on the requested object :raise ValidationError: if user tries to enter invalid value for a field that is not in selection :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent) """ if not vals_list: return self.browse() self = self.browse() self.check_access_rights('create') audit_hash = self.check_multiple_audit_hash(vals_list) bad_names = {'id', 'parent_path'} if self._log_access: # the superuser can set log_access fields while loading registry if not (self.env.uid == SUPERUSER_ID and not self.pool.ready) and not audit_hash: bad_names.update(LOG_ACCESS_COLUMNS) # classify fields for each record data_list = [] inversed_fields = set() for vals in vals_list: # add missing defaults vals = self._add_missing_default_values(vals) #audit_hash = self.check_audit_hash(vals) if 'hashed_value' in vals: del vals['hashed_value'] # distribute fields into sets for various purposes data = {} data['stored'] = stored = {} data['inversed'] = inversed = {} data['inherited'] = inherited = defaultdict(dict) data['protected'] = protected = set() data['audit_hash'] = audit_hash for key, val in vals.items(): if key in bad_names: continue field = self._fields.get(key) if not field: raise ValueError("Invalid field %r on model %r" % (key, self._name)) if field.company_dependent: irprop_def = self.env['ir.property'].get(key, self._name) cached_def = field.convert_to_cache(irprop_def, self) cached_val = field.convert_to_cache(val, self) if cached_val == cached_def: # val is the same as the default value defined in # 'ir.property'; by design, 'ir.property' will not # create entries specific to these records; skipping the # field inverse saves 4 SQL queries continue if field.store: stored[key] = val if field.inherited: inherited[field.related_field.model_name][key] = val elif field.inverse: inversed[key] = val inversed_fields.add(field) # protect non-readonly computed fields against (re)computation if field.compute and not field.readonly: protected.update(self._field_computed.get(field, [field])) data_list.append(data) # create or update parent records for model_name, parent_name in self._inherits.items(): parent_data_list = [] for data in data_list: if not data['stored'].get(parent_name): parent_data_list.append(data) elif data['inherited'][model_name]: parent = self.env[model_name].browse( data['stored'][parent_name]) parent.write(data['inherited'][model_name]) if parent_data_list: parents = self.env[model_name].create( [data['inherited'][model_name] for data in parent_data_list]) for parent, data in zip(parents, parent_data_list): data['stored'][parent_name] = parent.id # create records with stored fields records = self._create(data_list) # protect fields being written against recomputation protected = [(data['protected'], data['record']) for data in data_list] with self.env.protecting(protected): # group fields by inverse method (to call it once), and order groups # by dependence (in case they depend on each other) field_groups = (fields for _inv, fields in groupby( inversed_fields, attrgetter('inverse'))) for fields in field_groups: # determine which records to inverse for those fields inv_names = {field.name for field in fields} rec_vals = [(data['record'], { name: data['inversed'][name] for name in inv_names if name in data['inversed'] }) for data in data_list if not inv_names.isdisjoint(data['inversed'])] # If a field is not stored, its inverse method will probably # write on its dependencies, which will invalidate the field on # all records. We therefore inverse the field record by record. if all(field.store or field.company_dependent for field in fields): batches = [rec_vals] else: batches = [[rec_data] for rec_data in rec_vals] for batch in batches: for record, vals in batch: record._update_cache(vals) batch_recs = self.concat(*(record for record, vals in batch)) fields[0].determine_inverse(batch_recs) # check Python constraints for non-stored inversed fields for data in data_list: data['record']._validate_fields( set(data['inversed']) - set(data['stored'])) if self._check_company_auto: records._check_company() return records
def _populate(self, size): locations = super()._populate(size) random = populate.Random('stock_location_sample') locations_sample = self.browse( random.sample(locations.ids, len(locations.ids))) company_ids = self.env.registry.populated_models[ 'res.company'][:COMPANY_NB_WITH_STOCK] warehouses = self.env['stock.warehouse'].browse( self.env.registry.populated_models['stock.warehouse']) warehouse_by_company = dict( groupby(warehouses, lambda ware: ware.company_id.id)) loc_ids_by_company = dict( groupby(locations_sample, lambda loc: loc.company_id.id)) scenario_index = 0 for company_id in company_ids: loc_ids_by_company[company_id] = loc_ids_by_company[ company_id][::-1] # Inverse the order to use pop() warehouses = warehouse_by_company[company_id] nb_loc_by_warehouse = math.ceil( len(loc_ids_by_company[company_id]) / len(warehouses)) for warehouse in warehouses: # Manage the ceil, the last warehouse can have less locations than others. nb_loc_to_take = min(nb_loc_by_warehouse, len(loc_ids_by_company[company_id])) if scenario_index % 3 == 0: # Scenario 1 : remain companies with "normal" level depth keep 4 levels max depth = 3 # Force the number of level to 3 (root doesn't count) elif scenario_index % 3 == 1: # Scenario 2 : one company with very low level depth location tree (all child of root) depth = 1 else: # Scenario 3 : one company with high depth location tree depth = 20 nb_by_level = int( math.log(nb_loc_to_take, depth) ) + 1 if depth > 1 else nb_loc_to_take # number of loc to put by level _logger.info( "Create locations (%d) tree for one warehouse - depth : %d, width : %d" % (nb_loc_to_take, depth, nb_by_level)) # Root is the lot_stock_id of warehouse root = warehouse.lot_stock_id def link_next_locations(parent, level): if level < depth: children = [] nonlocal nb_loc_to_take nb_loc = min(nb_by_level, nb_loc_to_take) nb_loc_to_take -= nb_loc for i in range(nb_loc): children.append( loc_ids_by_company[company_id].pop()) child_locations = self.env['stock.location'].concat( *children) child_locations.location_id = parent for child in child_locations: link_next_locations(child, level + 1) link_next_locations(root, 0) scenario_index += 1 # Change 20 % the usage of some no-leaf location into 'view' (instead of 'internal') to_views = locations_sample.filtered_domain([('child_ids', '!=', []) ]).ids random = populate.Random('stock_location_views') self.browse(random.sample(to_views, int(len(to_views) * 0.2))).usage = 'view' return locations
def _run_buy(self, procurements): procurements_by_po_domain = defaultdict(list) errors = [] for procurement, rule in procurements: # Get the schedule date in order to find a valid seller procurement_date_planned = fields.Datetime.from_string( procurement.values['date_planned']) schedule_date = ( procurement_date_planned - relativedelta(days=procurement.company_id.po_lead)) supplier = False if procurement.values.get('supplierinfo_id'): supplier = procurement.values['supplierinfo_id'] else: supplier = procurement.product_id.with_company( procurement.company_id.id)._select_seller( partner_id=procurement.values.get("supplierinfo_name"), quantity=procurement.product_qty, date=schedule_date.date(), uom_id=procurement.product_uom) # Fall back on a supplier for which no price may be defined. Not ideal, but better than # blocking the user. supplier = supplier or procurement.product_id._prepare_sellers( False).filtered(lambda s: not s.company_id or s.company_id == procurement.company_id)[:1] if not supplier: msg = _( 'There is no matching vendor price to generate the purchase order for product %s (no vendor defined, minimum quantity not reached, dates not valid, ...). Go on the product form and complete the list of vendors.' ) % (procurement.product_id.display_name) errors.append((procurement, msg)) partner = supplier.name # we put `supplier_info` in values for extensibility purposes procurement.values['supplier'] = supplier procurement.values['propagate_cancel'] = rule.propagate_cancel domain = rule._make_po_get_domain(procurement.company_id, procurement.values, partner) procurements_by_po_domain[domain].append((procurement, rule)) if errors: raise ProcurementException(errors) for domain, procurements_rules in procurements_by_po_domain.items(): # Get the procurements for the current domain. # Get the rules for the current domain. Their only use is to create # the PO if it does not exist. procurements, rules = zip(*procurements_rules) # Get the set of procurement origin for the current domain. origins = set([p.origin for p in procurements]) # Check if a PO exists for the current domain. po = self.env['purchase.order'].sudo().search( [dom for dom in domain], limit=1) company_id = procurements[0].company_id if not po: # We need a rule to generate the PO. However the rule generated # the same domain for PO and the _prepare_purchase_order method # should only uses the common rules's fields. vals = rules[0]._prepare_purchase_order( company_id, origins, [p.values for p in procurements]) # The company_id is the same for all procurements since # _make_po_get_domain add the company in the domain. # We use SUPERUSER_ID since we don't want the current user to be follower of the PO. # Indeed, the current user may be a user without access to Purchase, or even be a portal user. po = self.env['purchase.order'].with_company( company_id).with_user(SUPERUSER_ID).create(vals) else: # If a purchase order is found, adapt its `origin` field. if po.origin: missing_origins = origins - set(po.origin.split(', ')) if missing_origins: po.write({ 'origin': po.origin + ', ' + ', '.join(missing_origins) }) else: po.write({'origin': ', '.join(origins)}) procurements_to_merge = self._get_procurements_to_merge( procurements) procurements = self._merge_procurements(procurements_to_merge) po_lines_by_product = {} grouped_po_lines = groupby( po.order_line.filtered(lambda l: not l.display_type and l. product_uom == l.product_id.uom_po_id), key=lambda l: l.product_id.id) for product, po_lines in grouped_po_lines: po_lines_by_product[product] = self.env[ 'purchase.order.line'].concat(*po_lines) po_line_values = [] for procurement in procurements: po_lines = po_lines_by_product.get( procurement.product_id.id, self.env['purchase.order.line']) po_line = po_lines._find_candidate(*procurement) if po_line: # If the procurement can be merge in an existing line. Directly # write the new values on it. vals = self._update_purchase_order_line( procurement.product_id, procurement.product_qty, procurement.product_uom, company_id, procurement.values, po_line) po_line.write(vals) else: # If it does not exist a PO line for current procurement. # Generate the create values for it and add it to a list in # order to create it in batch. partner = procurement.values['supplier'].name po_line_values.append( self.env['purchase.order.line']. _prepare_purchase_order_line_from_procurement( procurement.product_id, procurement.product_qty, procurement.product_uom, procurement.company_id, procurement.values, po)) self.env['purchase.order.line'].sudo().create(po_line_values)
def _populate_factories(self): company_ids = self.env.registry.populated_models[ 'res.company'][:COMPANY_NB_WITH_STOCK] picking_types_ids = self.env['stock.picking.type'].browse( self.env.registry.populated_models['stock.picking.type']).ids now = datetime.now() cross_company_locations = self.env['stock.location'].search([ ('company_id', '=', False) ]) locations_companies = self.env['stock.location'].search([ ('company_id', 'in', company_ids) ]) all_partners = self.env['res.partner'].browse( self.env.registry.populated_models['res.partner']) partners_by_company = dict( groupby(all_partners, key=lambda par: par.company_id.id)) partners_inter_company = self.env['res.partner'].concat( *partners_by_company.get(False, [])) partners_by_company = { com: self.env['res.partner'].concat(*partners) | partners_inter_company for com, partners in partners_by_company.items() if com } def get_until_date(values, counter, random): # 95.45 % of picking scheduled between (-10, 30) days and follow a gauss distribution (only +-15% picking is late) delta = random.gauss(10, 10) return now + timedelta(days=delta) def get_partner_id(values, counter, random): picking_type = self.env['stock.picking.type'].browse( values['picking_type_id']) company = picking_type.company_id return partners_by_company.get(company.id) and random.choice( partners_by_company[company.id]).id or False def _compute_locations(iterator, field_name, model_name): locations_out = cross_company_locations.filtered_domain([ ('usage', '=', 'customer') ]) locations_in = cross_company_locations.filtered_domain([ ('usage', '=', 'supplier') ]) locations_internal = locations_companies.filtered_domain([ ('usage', '=', 'internal') ]) locations_by_company = dict( groupby(locations_companies, key=lambda loc: loc.company_id.id)) locations_by_company = { com: self.env['stock.location'].concat(*locs) for com, locs in locations_by_company.items() } random = populate.Random('_compute_locations') for values in iterator: picking_type = self.env['stock.picking.type'].browse( values['picking_type_id']) source_loc = picking_type.default_location_src_id dest_loc = picking_type.default_location_dest_id locations_company = locations_by_company[ picking_type.company_id.id] if not source_loc or random.random() > 0.8: if picking_type.code == 'incoming': source_loc = random.choice(locations_in) elif picking_type.code == 'outgoing': source_loc = random.choice(locations_internal & locations_company) elif picking_type.code == 'internal': source_loc = random.choice(locations_internal & locations_company) if not dest_loc or random.random() > 0.8: if picking_type.code == 'incoming': dest_loc = random.choice(locations_internal & locations_company) elif picking_type.code == 'outgoing': dest_loc = random.choice(locations_out) elif picking_type.code == 'internal': # Need at most 2 internal locations dest_loc = random.choice((locations_internal & locations_company) - source_loc) values['location_id'] = source_loc.id values['location_dest_id'] = dest_loc.id yield values return [ ('priority', populate.randomize(['1', '0'], [0.05, 0.95])), ('scheduled_date', populate.compute(get_until_date)), ('picking_type_id', populate.iterate(picking_types_ids)), ('partner_id', populate.compute(get_partner_id)), ('_compute_locations', _compute_locations), ]