def _eval_quant_domain(self, quants, domain): quant_domain = [("id", "in", quants.ids)] return self.env["stock.quant"].search( expression.AND([quant_domain, domain]))
def retrieve_dashboard(self): domain = [('user_id', '=', self.env.uid)] group_fields = ['priority', 'create_date', 'stage_id', 'close_hours'] list_fields = ['priority', 'create_date', 'stage_id', 'close_hours'] #TODO: remove SLA calculations if user_uses_sla is false. user_uses_sla = self.user_has_groups('helpdesk.group_use_sla') and\ bool(self.env['helpdesk.team'].search([('use_sla', '=', True), '|', ('member_ids', 'in', self._uid), ('member_ids', '=', False)])) if user_uses_sla: group_fields.insert(1, 'sla_deadline:year') group_fields.insert(2, 'sla_deadline:hour') group_fields.insert(3, 'sla_reached_late') list_fields.insert(1, 'sla_deadline') list_fields.insert(2, 'sla_reached_late') HelpdeskTicket = self.env['helpdesk.ticket'] tickets = HelpdeskTicket.search_read(expression.AND([domain, [('stage_id.is_close', '=', False)]]), ['sla_deadline', 'open_hours', 'sla_reached_late', 'priority']) result = { 'helpdesk_target_closed': self.env.user.helpdesk_target_closed, 'helpdesk_target_rating': self.env.user.helpdesk_target_rating, 'helpdesk_target_success': self.env.user.helpdesk_target_success, 'today': {'count': 0, 'rating': 0, 'success': 0}, '7days': {'count': 0, 'rating': 0, 'success': 0}, 'my_all': {'count': 0, 'hours': 0, 'failed': 0}, 'my_high': {'count': 0, 'hours': 0, 'failed': 0}, 'my_urgent': {'count': 0, 'hours': 0, 'failed': 0}, 'show_demo': not bool(HelpdeskTicket.search([], limit=1)), 'rating_enable': False, 'success_rate_enable': user_uses_sla } def _is_sla_failed(data): deadline = data.get('sla_deadline') sla_deadline = fields.Datetime.now() > deadline if deadline else False return sla_deadline or data.get('sla_reached_late') def add_to(ticket, key="my_all"): result[key]['count'] += 1 result[key]['hours'] += ticket['open_hours'] if _is_sla_failed(ticket): result[key]['failed'] += 1 for ticket in tickets: add_to(ticket, 'my_all') if ticket['priority'] == '2': add_to(ticket, 'my_high') if ticket['priority'] == '3': add_to(ticket, 'my_urgent') dt = fields.Date.today() tickets = HelpdeskTicket.read_group(domain + [('stage_id.is_close', '=', True), ('close_date', '>=', dt)], list_fields, group_fields, lazy=False) for ticket in tickets: result['today']['count'] += ticket['__count'] if not _is_sla_failed(ticket): result['today']['success'] += ticket['__count'] dt = fields.Datetime.to_string((datetime.date.today() - relativedelta.relativedelta(days=6))) tickets = HelpdeskTicket.read_group(domain + [('stage_id.is_close', '=', True), ('close_date', '>=', dt)], list_fields, group_fields, lazy=False) for ticket in tickets: result['7days']['count'] += ticket['__count'] if not _is_sla_failed(ticket): result['7days']['success'] += ticket['__count'] result['today']['success'] = (result['today']['success'] * 100) / (result['today']['count'] or 1) result['7days']['success'] = (result['7days']['success'] * 100) / (result['7days']['count'] or 1) result['my_all']['hours'] = round(result['my_all']['hours'] / (result['my_all']['count'] or 1), 2) result['my_high']['hours'] = round(result['my_high']['hours'] / (result['my_high']['count'] or 1), 2) result['my_urgent']['hours'] = round(result['my_urgent']['hours'] / (result['my_urgent']['count'] or 1), 2) if self.env['helpdesk.team'].search([('use_rating', '=', True), '|', ('member_ids', 'in', self._uid), ('member_ids', '=', False)]): result['rating_enable'] = True # rating of today domain = [('user_id', '=', self.env.uid)] dt = fields.Date.today() tickets = self.env['helpdesk.ticket'].search(domain + [('stage_id.is_close', '=', True), ('close_date', '>=', dt)]) activity = tickets.rating_get_grades() total_rating = self._compute_activity_avg(activity) total_activity_values = sum(activity.values()) team_satisfaction = round((total_rating / total_activity_values if total_activity_values else 0), 2) * 5 if team_satisfaction: result['today']['rating'] = team_satisfaction # rating of last 7 days (6 days + today) dt = fields.Datetime.to_string((datetime.date.today() - relativedelta.relativedelta(days=6))) tickets = self.env['helpdesk.ticket'].search(domain + [('stage_id.is_close', '=', True), ('close_date', '>=', dt)]) activity = tickets.rating_get_grades() total_rating = self._compute_activity_avg(activity) total_activity_values = sum(activity.values()) team_satisfaction_7days = round((total_rating / total_activity_values if total_activity_values else 0), 2) * 5 if team_satisfaction_7days: result['7days']['rating'] = team_satisfaction_7days return result
def _run_conflict_inventory_tasks(self, company_id=False): """ Updates/creates/deletes conflict inventories. Conflict inventories include: - negative quantities values, created per warehouse Note that an in progress inventory will prevent an update/unlink when its corresponding values are out of date. """ company_domain = [('company_id', '!=', False)] if company_id: # if manually triggered => only apply to user's companies company_domain = [('company_id', '=', company_id)] # negative quantity check inventory_vals_to_create = [] updated_invs = self.env['stock.inventory'] existing_conflict_invs = self.search( expression.AND([[('state', 'in', ['draft', 'confirm']), ('is_conflict_inventory', '=', True)], company_domain])) neg_quants = self.env['stock.quant'].search( expression.AND([[('quantity', '<', 0.0), ('location_id.usage', 'in', ['internal', 'transit'])], company_domain])) company_ids = neg_quants.mapped('company_id') warehouse_ids = self.env['stock.warehouse'].search([ ('company_id', 'in', company_ids.ids) ]) warehouse_locations = self.env['stock.location'].search([ ('id', 'child_of', warehouse_ids.mapped('view_location_id').ids) ]) company_to_warehouses = defaultdict( lambda: self.env['stock.warehouse']) warehouse_to_locations = defaultdict( lambda: self.env['stock.location']) for warehouse in warehouse_ids: company_to_warehouses[warehouse.company_id] |= warehouse warehouse_to_locations[warehouse] = warehouse_locations.filtered( lambda l: any( int(location_id) == warehouse.view_location_id.id for location_id in l.parent_path.split('/')[:-2])) for company_id in company_ids: # separate auto-generated inventories by warehouse for warehouse in company_to_warehouses[company_id]: # avoid conflicting inventories! Wait until the next time this is run after the previous neg qty inventory is completed if existing_conflict_invs.filtered( lambda i: i.state == 'confirm' and i.company_id == company_id and i.location_ids & warehouse_to_locations[ warehouse]): continue warehouse_quants = neg_quants.filtered( lambda q: q.location_id in warehouse_to_locations[warehouse ]) if warehouse_quants: draft_inv = existing_conflict_invs.filtered( lambda i: i.state == 'draft' and i.company_id == company_id and i.location_ids & warehouse_to_locations[ warehouse]) if draft_inv: # only write in in first draft in case there are duplicates due to function being called while its already draft_inv[0].write({ 'product_ids': warehouse_quants.mapped('product_id') }) updated_invs |= draft_inv[0] else: inventory_vals_to_create.append({ 'name': "Negative Quantity Inventory: " + warehouse.name, 'company_id': company_id.id, 'product_ids': warehouse_quants.mapped('product_id'), 'is_conflict_inventory': True, 'location_ids': warehouse.view_location_id.child_ids }) # conflicting SN check domain = expression.AND([[('location_id.usage', 'in', ['internal', 'transit']), ('lot_id', '!=', False), ('product_id.tracking', '=', 'serial'), ('quantity', '!=', 0.0)], company_domain]) quants = self.env['stock.quant'].read_group( domain, ['lot_id', 'company_id', 'product_id'], ['lot_id', 'company_id', 'product_id'], lazy=False) company_to_sn_conflicts = defaultdict(lambda: ([], [])) for quant in quants: if quant['__count'] > 1: company_to_sn_conflicts[quant['company_id'][0]][0].append( quant['lot_id'][0]) company_to_sn_conflicts[quant['company_id'][0]][1].append( quant['product_id'][0]) for company_id, (lot_ids, product_ids) in company_to_sn_conflicts.items(): # avoid conflicting inventories! Wait until the next time this is run after the previous conflicting SN inventory is completed if existing_conflict_invs.filtered( lambda i: i.state == 'confirm' and i.lot_ids and i. company_id.id == company_id): continue draft_inv = existing_conflict_invs.filtered( lambda i: i.state == 'draft' and i.lot_ids and i.company_id.id == company_id) if draft_inv: draft_inv.write({ 'lot_ids': lot_ids, 'product_ids': product_ids }) updated_invs |= draft_inv else: inventory_vals_to_create.append({ 'name': "Duplicate SN Inventory", 'company_id': company_id, 'lot_ids': lot_ids, 'product_ids': product_ids, 'is_conflict_inventory': True }) self.create(inventory_vals_to_create) # remove all obsolete conflict inventories (existing_conflict_invs.filtered(lambda i: i.state == 'draft') - updated_invs).unlink()
values = { 'sale_order': order_sudo, 'message': message, 'token': access_token, 'return_url': '/shop/payment/validate', 'bootstrap_formatting': True, 'partner_id': order_sudo.partner_id.id, 'report_type': 'html', 'action': order_sudo._get_portal_return_action(), } if order_sudo.company_id: values['res_company'] = order_sudo.company_id if order_sudo.has_to_be_paid(): domain = expression.AND([ ['&', ('state', 'in', ['enabled', 'test']), ('company_id', '=', order_sudo.company_id.id)], ['|', ('country_ids', '=', False), ('country_ids', 'in', [order_sudo.partner_id.country_id.id])] ]) acquirers = request.env['payment.acquirer'].sudo().search(domain) values['acquirers'] = acquirers.filtered(lambda acq: (acq.payment_flow == 'form' and acq.view_template_id) or (acq.payment_flow == 's2s' and acq.registration_view_template_id)) values['pms'] = request.env['payment.token'].search([('partner_id', '=', order_sudo.partner_id.id)]) values['acq_extra_fees'] = acquirers.get_acquirer_extra_fees(order_sudo.amount_total, order_sudo.currency_id, order_sudo.partner_id.country_id.id) if order_sudo.state in ('draft', 'sent', 'cancel'): history = request.session.get('my_quotations_history', []) else: history = request.session.get('my_orders_history', []) values.update(get_records_pager(history, order_sudo)) return request.render('sale.sale_order_portal_template', values)
def name_search(self, name='', args=None, operator='ilike', limit=100): if not args: args = [] if name: positive_operators = ['=', 'ilike', '=ilike', 'like', '=like'] products = self.env['product.product'] if operator in positive_operators: products = self.search( ['|', ('cat_no', '=', name), ('default_code', '=', name)] + args, limit=limit) if not products: products = self.search([('barcode', '=', name)] + args, limit=limit) if not products and operator not in expression.NEGATIVE_TERM_OPERATORS: # Do not merge the 2 next lines into one single search, SQL search performance would be abysmal # on a database with thousands of matching products, due to the huge merge+unique needed for the # OR operator (and given the fact that the 'name' lookup results come from the ir.translation table # Performing a quick memory merge of ids in Python will give much better performance products = self.search(args + [ '|', ('cat_no', operator, name), ('default_code', operator, name) ], limit=limit) if not limit or len(products) < limit: # we may underrun the limit because of dupes in the results, that's fine limit2 = (limit - len(products)) if limit else False products += self.search(args + [('name', operator, name), ('id', 'not in', products.ids)], limit=limit2) elif not products and operator in expression.NEGATIVE_TERM_OPERATORS: domain = expression.OR([ [ '&', ('default_code', operator, name), ('name', operator, name) ], [ '&', ('default_code', '=', False), ('name', operator, name) ], [ '&', ('cat_no', operator, name), ('name', operator, name) ], ['&', ('cat_no', '=', False), ('name', operator, name)], ]) domain = expression.AND([args, domain]) products = self.search(domain, limit=limit) if not products and operator in positive_operators: ptrn = re.compile('(\[(.*?)\])') res = ptrn.search(name) if res: products = self.search([ '|', ('cat_no', '=', res.group(2)), ('default_code', '=', res.group(2)) ] + args, limit=limit) # still no results, partner in context: search on supplier info as last hope to find something if not products and self._context.get('partner_id'): suppliers = self.env['product.supplierinfo'].search([ ('name', '=', self._context.get('partner_id')), '|', ('product_code', operator, name), ('product_name', operator, name) ]) if suppliers: products = self.search( [('product_tmpl_id.seller_ids', 'in', suppliers.ids)], limit=limit) else: products = self.search(args, limit=limit) return products.name_get()
def _get_moves_to_assign_domain(self): return expression.AND([[('state', 'in', ['confirmed', 'partially_available'])], [('product_uom_qty', '!=', 0.0)]])
def _name_search(self, name, args=None, operator='ilike', limit=100, name_get_uid=None): if not args: args = [] if name: positive_operators = ['=', 'ilike', '=ilike', 'like', '=like'] category_ids = [] if operator in positive_operators: category_ids = self._search([('code', '=', name)] + args, limit=limit, access_rights_uid=name_get_uid) if not category_ids and operator not in expression.NEGATIVE_TERM_OPERATORS: # Do not merge the 2 next lines into one single search, SQL # search performance would be abysmal on a database with # thousands of matching products, due to the huge merge+unique # needed for the OR operator (and given the fact that the # 'name' lookup results come from the ir.translation table # Performing a quick memory merge of ids in Python will give # much better performance category_ids = self._search(args + [('code', operator, name)], limit=limit) if not limit or len(category_ids) < limit: # we may underrun the limit because of dupes in the # results, that's fine limit2 = (limit - len(category_ids)) if limit else False product2_ids = self._search( args + [('name', operator, name), ('id', 'not in', category_ids)], limit=limit2, access_rights_uid=name_get_uid) category_ids.extend(product2_ids) elif not category_ids and operator in expression.NEGATIVE_TERM_OPERATORS: domain = expression.OR([ [ '&', ('code', operator, name), ('name', operator, name), ], [ '&', ('code', '=', False), ('name', operator, name), ], ]) domain = expression.AND([args, domain]) category_ids = self._search(domain, limit=limit, access_rights_uid=name_get_uid) if not category_ids and operator in positive_operators: ptrn = re.compile('(\[(.*?)\])') res = ptrn.search(name) if res: category_ids = self._search([('code', '=', res.group(2))] + args, limit=limit, access_rights_uid=name_get_uid) else: category_ids = self._search(args, limit=limit, access_rights_uid=name_get_uid) return self.browse(category_ids).name_get()
def _event_exhibitors_get_values(self, event, **searches): # init and process search terms searches.setdefault('search', '') searches.setdefault('countries', '') searches.setdefault('sponsorships', '') search_domain_base = self._get_event_sponsors_base_domain(event) search_domain = search_domain_base # search on content if searches.get('search'): search_domain = expression.AND([ search_domain, [ '|', ('name', 'ilike', searches['search']), ('website_description', 'ilike', searches['search']) ] ]) # search on countries search_countries = self._get_search_countries(searches['countries']) if search_countries: search_domain = expression.AND([ search_domain, [('partner_id.country_id', 'in', search_countries.ids)] ]) # search on sponsor types search_sponsorships = self._get_search_sponsorships( searches['sponsorships']) if search_sponsorships: search_domain = expression.AND([ search_domain, [('sponsor_type_id', 'in', search_sponsorships.ids)] ]) # fetch data to display; use sudo to allow reading partner info, be sure domain is correct event = event.with_context(tz=event.date_tz or 'UTC') sponsors = request.env['event.sponsor'].sudo().search(search_domain) sponsors_all = request.env['event.sponsor'].sudo().search( search_domain_base) sponsor_types = sponsors_all.mapped('sponsor_type_id') sponsor_countries = sponsors_all.mapped( 'partner_id.country_id').sorted('name') # organize sponsors into categories to help display sponsor_categories = dict() for sponsor in sponsors: if not sponsor_categories.get(sponsor.sponsor_type_id): sponsor_categories[sponsor.sponsor_type_id] = request.env[ 'event.sponsor'].sudo() sponsor_categories[sponsor.sponsor_type_id] |= sponsor sponsor_categories = [ dict({ 'sponsorship': sponsor_category, 'sponsors': sample(sponsors, len(sponsors)), }) for sponsor_category, sponsors in sponsor_categories.items() ] # return rendering values return { # event information 'event': event, 'main_object': event, 'sponsor_categories': sponsor_categories, 'hide_sponsors': True, # search information 'searches': searches, 'search_key': searches['search'], 'search_countries': search_countries, 'search_sponsorships': search_sponsorships, 'sponsor_types': sponsor_types, 'sponsor_countries': sponsor_countries, # environment 'hostname': request.httprequest.host.split(':')[0], 'user_event_manager': request.env.user.has_group('event.group_event_manager'), }
def _rating_domain(self): """ Only take the published rating into account to compute avg and count """ domain = super(Channel, self)._rating_domain() return expression.AND([domain, [('website_published', '=', True)]])
def _get_invoice_basis_domain(self, domain_params): domain_company = [('company_id', '=', domain_params['company_id'])] domain_tax_date = self._get_tax_date_domain(domain_params) domain_account_date = self._get_accounting_date_domain(domain_params) domain_dates = expression.OR([domain_tax_date, domain_account_date]) return expression.AND([domain_company, domain_dates])
def get_or_create_object_sosanh( self, class_name, search_dict, write_dict={}, is_must_update=False, noti_dict={}, inactive_include_search=False, model_dict={}, key_tram=None, # only_search = False, exist_val=False, # search_func_para={}, setting={}, check_file=False, is_search=True, is_create=True, is_write=True, searched_object=False # instance_some_dict={} ): search_dict_new = {} write_dict_new = {} # if noti_dict !=None: this_model_noti_dict = noti_dict.setdefault(class_name, {}) # is_search = True # if only_search: # is_create = False # is_write = False # # else: # if exist_val: # is_create = False # is_write = True # else: # is_create = True # is_write = True if is_search: this_model_noti_dict['search'] = this_model_noti_dict.get('search', 0) + 1 search_func = model_dict.get('search_func') if search_func: searched_object = search_func(model_dict, self, exist_val, setting) if not searched_object and is_create: for f_name in search_dict: try: field_attr = model_dict['fields'][f_name] except: field_attr = {} val = search_dict[f_name] f_name = get_key(field_attr, 'transfer_name') or f_name search_dict_new[f_name] = val else: if inactive_include_search: domain_not_active = [ '|', ('active', '=', True), ('active', '=', False) ] else: domain_not_active = [] domain = [] break_condition = False for f_name in search_dict: try: field_attr = model_dict['fields'][f_name] except: field_attr = {} val = search_dict[f_name] # if only_search and val == None: # return None,None if val == None: if check_file: searched_object, get_or_create = None, False break_condition = True break else: raise UserError(u'val không thể bằng None') f_name = get_key(field_attr, 'transfer_name') or f_name operator_search = field_attr.get('operator_search', '=') tuple_in = (f_name, operator_search, val) domain.append(tuple_in) if is_create: search_dict_new[f_name] = val if not break_condition: domain = expression.AND([domain_not_active, domain]) searched_object = self.env[class_name].search(domain) return_obj = searched_object get_or_create = bool(searched_object) if get_or_create: this_model_noti_dict['search_yes'] = this_model_noti_dict.get( 'search_yes', 0) + 1 else: this_model_noti_dict['search_no'] = this_model_noti_dict.get( 'search_no', 0) + 1 else: return_obj = None get_or_create = None if is_create: if not searched_object: #create only_get = get_key(model_dict, 'only_get') if only_get: raise UserError( u'Model %s này chỉ được get chứ không được tạo' % class_name) for f_name, val in write_dict.items(): try: field_attr = model_dict['fields'][f_name] except: field_attr = {} f_name = get_key(field_attr, 'transfer_name') or f_name search_dict_new[f_name] = val created_object = self.env[class_name].create(search_dict_new) this_model_noti_dict['create'] = this_model_noti_dict.get( 'create', 0) + 1 return_obj = created_object return return_obj allow_write_all_field = setting.get('allow_write', True) if is_write: if exist_val: searched_object = exist_val if searched_object: # write if len(searched_object) > 1: raise ValueError( u' exist_val: %s len(searched_object) > 1, searched_object: %s, %s' % (exist_val, searched_object, searched_object.mapped('id'))) for f_name, val in write_dict.items(): try: field_attr = model_dict['fields'][f_name] except: field_attr = {} f_name = get_key(field_attr, 'transfer_name') or f_name if 'write_field' in field_attr and field_attr[ 'write_field'] != None: write_field = field_attr['write_field'] else: write_field = allow_write_all_field # allow_write_from_False_to_not_false = field_attr.get('allow_write_from_False_to_not_false',True) allow_write_from_False_to_not_false = field_attr.get( 'allow_write_from_False_to_not_false' ) if 'allow_write_from_False_to_not_false' in field_attr else setting.get( 'allow_write_from_False_to_not_false', True) if allow_write_all_field and write_field == False and val == False: if allow_write_from_False_to_not_false: write_field = True print('class_name', write_field) write_func = field_attr.get('write_func') if write_func: code = write_func(searched_object=searched_object, f_name=f_name, val=val) if code == 'continue': continue raise_if_diff = field_attr.get('raise_if_diff') if raise_if_diff: if val == False and allow_write_from_False_to_not_false: raise_if_diff = False else: raise_if_diff_only_write = field_attr.get( 'raise_if_diff_only_write', True) if raise_if_diff_only_write: raise_if_diff = field_attr.get( 'raise_if_diff') and write_field if not (write_field or raise_if_diff): print('field_name', f_name, 'continue') continue if not is_must_update or raise_if_diff: orm_field_val = getattr(searched_object, f_name, None) if orm_field_val == None: continue diff = check_diff_write_val_with_exist_obj( orm_field_val, val) if diff: if raise_if_diff: raise UserError( u'raise_if_diff model:%s-f_name:%s - orm_field_val: %s - val:%s ' % (class_name, f_name, orm_field_val, val)) if write_field: write_dict_new[f_name] = val else: write_dict_new[f_name] = val if write_dict_new: if model_dict.get('print_write_dict_new', True): print('***write_dict_new***', write_dict_new) searched_object.write(write_dict_new) this_model_noti_dict['update'] = this_model_noti_dict.get( 'update', 0) + 1 else: #'not update' this_model_noti_dict['skipupdate'] = this_model_noti_dict.get( 'skipupdate', 0) + 1 return return_obj # bool(searched_object)
def events(self, page=1, **searches): Event = request.env['event.event'] EventType = request.env['event.type'] searches.setdefault('search', '') searches.setdefault('date', 'all') searches.setdefault('tags', '') searches.setdefault('type', 'all') searches.setdefault('country', 'all') website = request.website today = datetime.today() def sdn(date): return fields.Datetime.to_string( date.replace(hour=23, minute=59, second=59)) def sd(date): return fields.Datetime.to_string(date) def get_month_filter_domain(filter_name, months_delta): first_day_of_the_month = today.replace(day=1) filter_string = _('This month') if months_delta == 0 \ else format_date(request.env, value=today + relativedelta(months=months_delta), date_format='LLLL', lang_code=get_lang(request.env).code).capitalize() return [ filter_name, filter_string, [("date_end", ">=", sd(first_day_of_the_month + relativedelta(months=months_delta))), ("date_begin", "<", sd(first_day_of_the_month + relativedelta(months=months_delta + 1)))], 0 ] dates = [ ['all', _('Upcoming Events'), [("date_end", ">", sd(today))], 0], [ 'today', _('Today'), [("date_end", ">", sd(today)), ("date_begin", "<", sdn(today))], 0 ], get_month_filter_domain('month', 0), get_month_filter_domain('nextmonth1', 1), get_month_filter_domain('nextmonth2', 2), ['old', _('Past Events'), [("date_end", "<", sd(today))], 0], ] # search domains domain_search = {'website_specific': website.website_domain()} if searches['search']: domain_search['search'] = [('name', 'ilike', searches['search'])] search_tags = self._extract_searched_event_tags(searches) if search_tags: # Example: You filter on age: 10-12 and activity: football. # Doing it this way allows to only get events who are tagged "age: 10-12" AND "activity: football". # Add another tag "age: 12-15" to the search and it would fetch the ones who are tagged: # ("age: 10-12" OR "age: 12-15") AND "activity: football grouped_tags = defaultdict(list) for tag in search_tags: grouped_tags[tag.category_id].append(tag) domain_search['tags'] = [] for group in grouped_tags: domain_search['tags'] = expression.AND([ domain_search['tags'], [('tag_ids', 'in', [tag.id for tag in grouped_tags[group]])] ]) current_date = None current_type = None current_country = None for date in dates: if searches["date"] == date[0]: domain_search["date"] = date[2] if date[0] != 'all': current_date = date[1] if searches["type"] != 'all': current_type = EventType.browse(int(searches['type'])) domain_search["type"] = [("event_type_id", "=", int(searches["type"]))] if searches["country"] != 'all': current_country = request.env['res.country'].browse( int(searches['country'])) domain_search["country"] = [ '|', ("country_id", "=", int(searches["country"])), ("country_id", "=", False) ] def dom_without(without): domain = [] for key, search in domain_search.items(): if key != without: domain += search return domain # count by domains without self search for date in dates: if date[0] != 'old': date[3] = Event.search_count(dom_without('date') + date[2]) domain = dom_without('type') domain = dom_without('country') countries = Event.read_group(domain, ["id", "country_id"], groupby="country_id", orderby="country_id") countries.insert( 0, { 'country_id_count': sum([ int(country['country_id_count']) for country in countries ]), 'country_id': ("all", _("All Countries")) }) step = 12 # Number of events per page event_count = Event.search_count(dom_without("none")) pager = website.pager(url="/event", url_args=searches, total=event_count, page=page, step=step, scope=5) order = 'date_begin' if searches.get('date', 'all') == 'old': order = 'date_begin desc' order = 'is_published desc, ' + order events = Event.search(dom_without("none"), limit=step, offset=pager['offset'], order=order) keep = QueryURL( '/event', **{ key: value for key, value in searches.items() if (key == 'search' or value != 'all') }) values = { 'current_date': current_date, 'current_country': current_country, 'current_type': current_type, 'event_ids': events, # event_ids used in website_event_track so we keep name as it is 'dates': dates, 'categories': request.env['event.tag.category'].search([]), 'countries': countries, 'pager': pager, 'searches': searches, 'search_tags': search_tags, 'keep': keep, } if searches['date'] == 'old': # the only way to display this content is to set date=old so it must be canonical values['canonical_params'] = OrderedMultiDict([('date', 'old')]) return request.render("website_event.index", values)
def _get_invoiced(self): """ Compute the invoice status of a SO. Possible statuses: - no: if the SO is not in status 'sale' or 'done', we consider that there is nothing to invoice. This is also the default value if the conditions of no other status is met. - to invoice: if any SO line is 'to invoice', the whole SO is 'to invoice' - invoiced: if all SO lines are invoiced, the SO is invoiced. - upselling: if all SO lines are invoiced or upselling, the status is upselling. The invoice_ids are obtained thanks to the invoice lines of the SO lines, and we also search for possible refunds created directly from existing invoices. This is necessary since such a refund is not directly linked to the SO. """ # Ignore the status of the deposit product deposit_product_id = self.env[ 'sale.advance.payment.inv']._default_product_id() line_invoice_status_all = [ (d['order_id'][0], d['invoice_status']) for d in self.env['sale.order.line'].read_group( [('order_id', 'in', self.ids), ('product_id', '!=', deposit_product_id.id)], ['order_id', 'invoice_status'], ['order_id', 'invoice_status'], lazy=False) ] for order in self: invoice_ids = order.order_line.mapped('invoice_lines').mapped( 'invoice_id').filtered( lambda r: r.type in ['out_invoice', 'out_refund']) # Search for invoices which have been 'cancelled' (filter_refund = 'modify' in # 'account.invoice.refund') # use like as origin may contains multiple references (e.g. 'SO01, SO02') refunds = invoice_ids.search([ ('origin', 'like', order.name), ('company_id', '=', order.company_id.id), ('type', 'in', ('out_invoice', 'out_refund')) ]) invoice_ids |= refunds.filtered( lambda r: order.name in [origin.strip() for origin in r.origin.split(',')]) # Search for refunds as well domain_inv = expression.OR([[ '&', ('origin', '=', inv.number), ('journal_id', '=', inv.journal_id.id) ] for inv in invoice_ids if inv.number]) if domain_inv: refund_ids = self.env['account.invoice'].search( expression.AND([[ '&', ('type', '=', 'out_refund'), ('origin', '!=', False) ], domain_inv])) else: refund_ids = self.env['account.invoice'].browse() line_invoice_status = [ d[1] for d in line_invoice_status_all if d[0] == order.id ] if order.ssi_job_id: if order.state not in ('sale', 'done'): invoice_status = 'no' elif all(invoice_status == 'invoiced' for invoice_status in line_invoice_status): invoice_status = 'invoiced' elif all(invoice_status in ['to invoice', 'invoiced'] for invoice_status in line_invoice_status): invoice_status = 'to invoice' elif all(invoice_status in ['invoiced', 'upselling'] for invoice_status in line_invoice_status): invoice_status = 'upselling' else: invoice_status = 'no' else: if order.state not in ('sale', 'done'): invoice_status = 'no' elif all(invoice_status == 'invoiced' for invoice_status in line_invoice_status): invoice_status = 'invoiced' elif any(invoice_status == 'to invoice' for invoice_status in line_invoice_status): invoice_status = 'to invoice' elif all(invoice_status in ['invoiced', 'upselling'] for invoice_status in line_invoice_status): invoice_status = 'upselling' else: invoice_status = 'no' order.update({ 'invoice_count': len(set(invoice_ids.ids + refund_ids.ids)), 'invoice_ids': invoice_ids.ids + refund_ids.ids, 'invoice_status': invoice_status })
def _name_search(self, name, args=None, operator='ilike', limit=100, name_get_uid=None): args = args or [] if name: action_ids = self._search(expression.AND([[('action_id', operator, name)], args]), limit=limit, access_rights_uid=name_get_uid) return self.browse(action_ids).name_get() return super(IrActionsTodo, self)._name_search(name, args=args, operator=operator, limit=limit, name_get_uid=name_get_uid)
def search_panel_select_multi_range(self, field_name, **kwargs): """ Return possible values of the field field_name (case select="multi"), possibly with counters and groups. :param field_name: the name of a filter field; possible types are many2one, many2many, selection. :param search_domain: base domain of search :param category_domain: domain generated by categories :param filter_domain: domain generated by filters :param comodel_domain: domain of field values (if relational) :param group_by: extra field to read on comodel, to group comodel records :param disable_counters: whether to count records by value :return: a list of possible values, each being a dict with keys 'id' (value), 'name' (value label), 'count' (how many records with that value), 'group_id' (value of group), 'group_name' (label of group). """ field = self._fields[field_name] supported_types = ['many2one', 'many2many', 'selection'] if field.type not in supported_types: raise UserError( _('Only types %(supported_types)s are supported for filter (found type %(field_type)s)' ) % ({ 'supported_types': supported_types, 'field_type': field.type })) Comodel = self.env.get(field.comodel_name) model_domain = expression.AND([ kwargs.get('search_domain', []), kwargs.get('category_domain', []), kwargs.get('filter_domain', []), [(field_name, '!=', False)], ]) comodel_domain = kwargs.get('comodel_domain', []) disable_counters = kwargs.get('disable_counters', False) group_by = kwargs.get('group_by', False) if group_by: # determine the labeling of values returned by the group_by field group_by_field = Comodel._fields[group_by] if group_by_field.type == 'many2one': def group_id_name(value): return value or (False, _("Not Set")) elif group_by_field.type == 'selection': desc = Comodel.fields_get([group_by])[group_by] group_by_selection = dict(desc['selection']) group_by_selection[False] = _("Not Set") def group_id_name(value): return value, group_by_selection[value] else: def group_id_name(value): return (value, value) if value else (False, _("Not Set")) # get filter_values filter_values = [] if field.type == 'many2one': counters = {} if not disable_counters: groups = self.read_group(model_domain, [field_name], [field_name]) counters = { group[field_name][0]: group[field_name + '_count'] for group in groups } # retrieve all possible values, and return them with their label and counter field_names = ['display_name', group_by ] if group_by else ['display_name'] records = Comodel.search_read(comodel_domain, field_names) for record in records: record_id = record['id'] values = { 'id': record_id, 'name': record['display_name'], 'count': counters.get(record_id, 0), } if group_by: values['group_id'], values['group_name'] = group_id_name( record[group_by]) filter_values.append(values) elif field.type == 'many2many': # retrieve all possible values, and return them with their label and counter field_names = ['display_name', group_by ] if group_by else ['display_name'] records = Comodel.search_read(comodel_domain, field_names) for record in records: record_id = record['id'] values = { 'id': record_id, 'name': record['display_name'], 'count': 0, } if not disable_counters: count_domain = expression.AND( [model_domain, [(field_name, 'in', record_id)]]) values['count'] = self.search_count(count_domain) if group_by: values['group_id'], values['group_name'] = group_id_name( record[group_by]) filter_values.append(values) elif field.type == 'selection': counters = {} if not disable_counters: groups = self.read_group(model_domain, [field_name], [field_name]) counters = { group[field_name]: group[field_name + '_count'] for group in groups } # retrieve all possible values, and return them with their label and counter selection = self.fields_get([field_name])[field_name] for value, label in selection: filter_values.append({ 'id': value, 'name': label, 'count': counters.get(value, 0), }) return filter_values
def get_translation_frontend_modules(self): Modules = request.env['ir.module.module'].sudo() domain = self._get_translation_frontend_modules_domain() return Modules.search( expression.AND([domain, [('state', '=', 'installed')]]) ).mapped('name')
def _rating_domain(self): """ Only take the published rating into account to compute avg and count """ domain = super(ProductTemplate, self)._rating_domain() return expression.AND([domain, [('is_internal', '=', False)]])
def _get_slide_detail(self, slide): base_domain = self._get_channel_slides_base_domain(slide.channel_id) if slide.channel_id.channel_type == 'documentation': related_domain = expression.AND( [base_domain, [('category_id', '=', slide.category_id.id)]]) most_viewed_slides = request.env['slide.slide'].search( base_domain, limit=self._slides_per_aside, order='total_views desc') related_slides = request.env['slide.slide'].search( related_domain, limit=self._slides_per_aside) category_data = [] uncategorized_slides = request.env['slide.slide'] else: most_viewed_slides, related_slides = request.env[ 'slide.slide'], request.env['slide.slide'] category_data = slide.channel_id._get_categorized_slides( base_domain, order=request.env['slide.slide']. _order_by_strategy['sequence'], force_void=True) # temporarily kept for fullscreen, to remove asap uncategorized_domain = expression.AND([ base_domain, [('channel_id', '=', slide.channel_id.id), ('category_id', '=', False)] ]) uncategorized_slides = request.env['slide.slide'].search( uncategorized_domain) channel_slides_ids = slide.channel_id.slide_ids.ids slide_index = channel_slides_ids.index(slide.id) previous_slide = slide.channel_id.slide_ids[ slide_index - 1] if slide_index > 0 else None next_slide = slide.channel_id.slide_ids[ slide_index + 1] if slide_index < len(channel_slides_ids) - 1 else None values = { # slide 'slide': slide, 'most_viewed_slides': most_viewed_slides, 'related_slides': related_slides, 'previous_slide': previous_slide, 'next_slide': next_slide, 'uncategorized_slides': uncategorized_slides, 'category_data': category_data, # user 'user': request.env.user, 'is_public_user': request.website.is_public_user(), # rating and comments 'comments': slide.website_message_ids or [], } # allow rating and comments if slide.channel_id.allow_comment: values.update({ 'message_post_hash': slide._generate_signed_token(request.env.user.partner_id.id), 'message_post_pid': request.env.user.partner_id.id, }) return values
def portal_order_page(self, order_id, report_type=None, access_token=None, message=False, download=False, **kw): # res = super(CustomerPortal, self).portal_order_page(self, order_id, access_token=None, message=False, download=False, **kw) try: order_sudo = self._document_check_access('sale.order', order_id, access_token=access_token) except (AccessError, MissingError): return request.redirect('/my') if report_type in ('html', 'pdf', 'text'): if order_sudo.sale_type == 'requirements': return self._show_report( model=order_sudo, report_type=report_type, report_ref= 'wi_requirement.action_report_user_requirement_template', download=download) else: return self._show_report( model=order_sudo, report_type=report_type, report_ref='sale.action_report_saleorder', download=download) # use sudo to allow accessing/viewing orders for public user # only if he knows the private token # Log only once a day if order_sudo: now = fields.Date.today().isoformat() session_obj_date = request.session.get('view_quote_%s' % order_sudo.id) if isinstance(session_obj_date, date): session_obj_date = session_obj_date.isoformat() if session_obj_date != now and request.env.user.share and access_token: request.session['view_quote_%s' % order_sudo.id] = now body = _('Quotation viewed by customer %s' ) % order_sudo.partner_id.name _message_post_helper( "sale.order", order_sudo.id, body, token=order_sudo.access_token, message_type="notification", subtype="mail.mt_note", partner_ids=order_sudo.user_id.sudo().partner_id.ids, ) values = { 'sale_order': order_sudo, 'message': message, 'token': access_token, 'return_url': '/shop/payment/validate', 'bootstrap_formatting': True, 'partner_id': order_sudo.partner_id.id, 'report_type': 'html', 'action': order_sudo._get_portal_return_action(), } if order_sudo.company_id: values['res_company'] = order_sudo.company_id if order_sudo.has_to_be_paid(): domain = expression.AND( [[ '&', ('state', 'in', ['enabled', 'test']), ('company_id', '=', order_sudo.company_id.id) ], [ '|', ('country_ids', '=', False), ('country_ids', 'in', [order_sudo.partner_id.country_id.id]) ]]) acquirers = request.env['payment.acquirer'].sudo().search(domain) values['acquirers'] = acquirers.filtered(lambda acq: ( acq.payment_flow == 'form' and acq.view_template_id) or ( acq.payment_flow == 's2s' and acq. registration_view_template_id)) values['pms'] = request.env['payment.token'].search([ ('partner_id', '=', order_sudo.partner_id.id) ]) values['acq_extra_fees'] = acquirers.get_acquirer_extra_fees( order_sudo.amount_total, order_sudo.currency_id, order_sudo.partner_id.country_id.id) if order_sudo.state in ('draft', 'sent', 'cancel'): history = request.session.get('my_quotations_history', []) else: history = request.session.get('my_orders_history', []) values.update(get_records_pager(history, order_sudo)) return request.render('sale.sale_order_portal_template', values)
def _get_moves_to_assign_domain(self): domain = super(ProcurementGroup, self)._get_moves_to_assign_domain() domain = expression.AND([domain, [('production_id', '=', False)]]) return domain
def save_snippet(self, name, arch, template_key, snippet_key, thumbnail_url): """ Saves a new snippet arch so that it appears with the given name when using the given snippets template. :param name: the name of the snippet to save :param arch: the html structure of the snippet to save :param template_key: the key of the view regrouping all snippets in which the snippet to save is meant to appear :param snippet_key: the key (without module part) to identify the snippet from which the snippet to save originates :param thumbnail_url: the url of the thumbnail to use when displaying the snippet to save """ app_name = template_key.split('.')[0] snippet_key = '%s_%s' % (snippet_key, uuid.uuid4().hex) full_snippet_key = '%s.%s' % (app_name, snippet_key) # find available name current_website = self.env['website'].browse( self._context.get('website_id')) website_domain = current_website.website_domain() used_names = self.search( expression.AND([[('name', '=like', '%s%%' % name)], website_domain])).mapped('name') name = self._find_available_name(name, used_names) # html to xml to add '/' at the end of self closing tags like br, ... xml_arch = etree.tostring(html.fromstring(arch)) new_snippet_view_values = { 'name': name, 'key': full_snippet_key, 'type': 'qweb', 'arch': xml_arch, } new_snippet_view_values.update(self._snippet_save_view_values_hook()) self.create(new_snippet_view_values) custom_section = self.search([('key', '=', template_key)]) snippet_addition_view_values = { 'name': name + ' Block', 'key': self._get_snippet_addition_view_key(template_key, snippet_key), 'inherit_id': custom_section.id, 'type': 'qweb', 'arch': """ <data inherit_id="%s"> <xpath expr="//div[@id='snippet_custom']" position="attributes"> <attribute name="class" remove="d-none" separator=" "/> </xpath> <xpath expr="//div[@id='snippet_custom_body']" position="inside"> <t t-snippet="%s" t-thumbnail="%s"/> </xpath> </data> """ % (template_key, full_snippet_key, thumbnail_url), } snippet_addition_view_values.update( self._snippet_save_view_values_hook()) self.create(snippet_addition_view_values)
def view_all_users_page(self, page=1, **kwargs): User = request.env['res.users'] dom = [('karma', '>', 1), ('website_published', '=', True)] # Searches search_term = kwargs.get('search') group_by = kwargs.get('group_by', False) render_values = { 'search': search_term, 'group_by': group_by or 'all', } if search_term: dom = expression.AND([[ '|', ('name', 'ilike', search_term), ('partner_id.commercial_company_name', 'ilike', search_term) ], dom]) user_count = User.sudo().search_count(dom) my_user = request.env.user current_user_values = False if user_count: page_count = math.ceil(user_count / self._users_per_page) pager = request.website.pager( url="/profile/users", total=user_count, page=page, step=self._users_per_page, scope=page_count if page_count < self._pager_max_pages else self._pager_max_pages) users = User.sudo().search(dom, limit=self._users_per_page, offset=pager['offset'], order='karma DESC') user_values = self._prepare_all_users_values(users) # Get karma position for users (only website_published) position_domain = [('karma', '>', 1), ('website_published', '=', True)] position_map = self._get_position_map(position_domain, users, group_by) max_position = max([ user_data['karma_position'] for user_data in position_map.values() ], default=1) for user in user_values: user_data = position_map.get(user['id'], dict()) user['position'] = user_data.get('karma_position', max_position + 1) user['karma_gain'] = user_data.get('karma_gain_total', 0) user_values.sort(key=itemgetter('position')) if my_user.website_published and my_user.karma and my_user.id not in users.ids: # Need to keep the dom to search only for users that appear in the ranking page current_user = User.sudo().search( expression.AND([[('id', '=', my_user.id)], dom])) if current_user: current_user_values = self._prepare_all_users_values( current_user)[0] user_data = self._get_position_map(position_domain, current_user, group_by).get( current_user.id, {}) current_user_values['position'] = user_data.get( 'karma_position', 0) current_user_values['karma_gain'] = user_data.get( 'karma_gain_total', 0) else: user_values = [] pager = {'page_count': 0} render_values.update({ 'top3_users': user_values[:3] if not search_term and page == 1 else [], 'users': user_values, 'my_user': current_user_values, 'pager': pager, }) return request.render("website_profile.users_page_main", render_values)
def _search_fetch(self, search_detail, search, limit, order): with_description = 'description' in search_detail['mapping'] results, count = super()._search_fetch(search_detail, search, limit, order) if with_description and search: # Perform search in translations # TODO Remove when domains will support xml_translate fields query = sql.SQL(""" SELECT {table}.{id} FROM {table} LEFT JOIN ir_ui_view v ON {table}.{view_id} = v.{id} LEFT JOIN ir_translation t ON v.{id} = t.{res_id} WHERE t.lang = {lang} AND t.name = ANY({names}) AND t.type = 'model_terms' AND t.value ilike {search} LIMIT {limit} """).format( table=sql.Identifier(self._table), id=sql.Identifier('id'), view_id=sql.Identifier('view_id'), res_id=sql.Identifier('res_id'), lang=sql.Placeholder('lang'), names=sql.Placeholder('names'), search=sql.Placeholder('search'), limit=sql.Placeholder('limit'), ) self.env.cr.execute( query, { 'lang': self.env.lang, 'names': ['ir.ui.view,arch_db', 'ir.ui.view,name'], 'search': '%%%s%%' % escape_psql(search), 'limit': limit, }) ids = {row[0] for row in self.env.cr.fetchall()} ids.update(results.ids) domains = search_detail['base_domain'].copy() domains.append([('id', 'in', list(ids))]) domain = expression.AND(domains) model = self.sudo() if search_detail.get('requires_sudo') else self results = model.search(domain, limit=limit, order=search_detail.get('order', order)) count = max(count, len(results)) def filter_page(search, page, all_pages): # Search might have matched words in the xml tags and parameters therefore we make # sure the terms actually appear inside the text. text = '%s %s %s' % (page.name, page.url, text_from_html( page.arch)) pattern = '|'.join( [re.escape(search_term) for search_term in search.split()]) return re.findall('(%s)' % pattern, text, flags=re.I) if pattern else False if 'url' not in order: results = results._get_most_specific_pages() if search and with_description: results = results.filtered( lambda result: filter_page(search, result, results)) return results, count
def _domain_move_lines_for_reconciliation(self, st_line, aml_accounts, partner_id, excluded_ids=[], search_str=False, mode='rp'): """ Return the domain for account.move.line records which can be used for bank statement reconciliation. :param aml_accounts: :param partner_id: :param excluded_ids: :param search_str: :param mode: 'rp' for receivable/payable or 'other' """ AccountMoveLine = self.env['account.move.line'] #Always exclude the journal items that have been marked as 'to be checked' in a former bank statement reconciliation to_check_excluded = AccountMoveLine.search(AccountMoveLine._get_suspense_moves_domain()).ids excluded_ids.extend(to_check_excluded) domain_reconciliation = [ '&', '&', '&', ('statement_line_id', '=', False), ('account_id', 'in', aml_accounts), ('payment_id', '<>', False), ('balance', '!=', 0.0), ] # default domain matching domain_matching = [ '&', '&', ('reconciled', '=', False), ('account_id.reconcile', '=', True), ('balance', '!=', 0.0), ] domain = expression.OR([domain_reconciliation, domain_matching]) if partner_id: domain = expression.AND([domain, [('partner_id', '=', partner_id)]]) if mode == 'rp': domain = expression.AND([domain, [('account_id.internal_type', 'in', ['receivable', 'payable', 'liquidity'])] ]) else: domain = expression.AND([domain, [('account_id.internal_type', 'not in', ['receivable', 'payable', 'liquidity'])] ]) # Domain factorized for all reconciliation use cases if search_str: str_domain = self._domain_move_lines(search_str=search_str) str_domain = expression.OR([ str_domain, [('partner_id.name', 'ilike', search_str)] ]) domain = expression.AND([ domain, str_domain ]) if excluded_ids: domain = expression.AND([ [('id', 'not in', excluded_ids)], domain ]) # filter on account.move.line having the same company as the statement line domain = expression.AND([domain, [('company_id', '=', st_line.company_id.id)]]) # take only moves in valid state. Draft is accepted only when "Post At" is set # to "Bank Reconciliation" in the associated journal domain_post_at = [ '|', '&', ('move_id.state', '=', 'draft'), ('journal_id.post_at', '=', 'bank_rec'), ('move_id.state', 'not in', ['draft', 'cancel']), ] domain = expression.AND([domain, domain_post_at]) if st_line.company_id.account_bank_reconciliation_start: domain = expression.AND([domain, [('date', '>=', st_line.company_id.account_bank_reconciliation_start)]]) return domain
def _get_customize_data(self, keys, is_view_data): model = 'ir.ui.view' if is_view_data else 'ir.asset' Model = request.env[model].with_context(active_test=False) domain = expression.AND([[("key", "in", keys)], request.website.website_domain()]) return Model.search(domain).filter_duplicate()
def _allocate_leads(self, work_days=1): """ Allocate leads to teams given by self. This method sets ``team_id`` field on lead records that are unassigned (no team and no responsible). No salesperson is assigned in this process. Its purpose is simply to allocate leads within teams. This process allocates all available leads on teams weighted by their maximum assignment by month that indicates their relative workload. Heuristic of this method is the following: * find unassigned leads for each team, aka leads being * without team, without user -> not assigned; * not in a won stage, and not having False/0 (lost) or 100 (won) probability) -> live leads; * if set, a delay after creation can be applied (see BUNDLE_HOURS_DELAY) parameter explanations here below; * matching the team's assignment domain (empty means everything); * assign a weight to each team based on their assignment_max that indicates their relative workload; * pick a random team using a weighted random choice and find a lead to assign: * remove already assigned leads from the available leads. If there is not any lead spare to assign, remove team from active teams; * pick the first lead and set the current team; * when setting a team on leads, leads are also merged with their duplicates. Purpose is to clean database and avoid assigning duplicates to same or different teams; * add lead and its duplicates to already assigned leads; * pick another random team until their is no more leads to assign to any team; This process ensure that teams having overlapping domains will all receive leads as lead allocation is done one lead at a time. This allocation will be proportional to their size (assignment of their members). :config int crm.assignment.bundle: deprecated :config int crm.assignment.commit.bundle: optional config parameter allowing to set size of lead batch to be committed together. By default 100 which is a good trade-off between transaction time and speed :config int crm.assignment.delay: optional config parameter giving a delay before taking a lead into assignment process (BUNDLE_HOURS_DELAY) given in hours. Purpose if to allow other crons or automated actions to make their job. This option is mainly historic as its purpose was to let automated actions prepare leads and score before PLS was added into CRM. This is now not required anymore but still supported; :param float work_days: see ``CrmTeam.action_assign_leads()``; :return teams_data: dict() with each team assignment result: team: { 'assigned': set of lead IDs directly assigned to the team (no duplicate or merged found); 'merged': set of lead IDs merged and assigned to the team (main leads being results of merge process); 'duplicates': set of lead IDs found as duplicates and merged into other leads. Those leads are unlinked during assign process and are already removed at return of this method; }, ... """ if work_days < 0.2 or work_days > 30: raise ValueError( _( 'Leads team allocation should be done for at least 0.2 or maximum 30 work days, not %.2f.', work_days)) BUNDLE_HOURS_DELAY = int( self.env['ir.config_parameter'].sudo().get_param( 'crm.assignment.delay', default=0)) BUNDLE_COMMIT_SIZE = int( self.env['ir.config_parameter'].sudo().get_param( 'crm.assignment.commit.bundle', 100)) auto_commit = not getattr(threading.currentThread(), 'testing', False) # leads max_create_dt = fields.Datetime.now() - datetime.timedelta( hours=BUNDLE_HOURS_DELAY) duplicates_lead_cache = dict() # teams data teams_data, population, weights = dict(), list(), list() for team in self: if not team.assignment_max: continue lead_domain = expression.AND([ literal_eval(team.assignment_domain or '[]'), [('create_date', '<', max_create_dt)], ['&', ('team_id', '=', False), ('user_id', '=', False)], [ '|', ('stage_id', '=', False), ('stage_id.is_won', '=', False) ] ]) leads = self.env["crm.lead"].search(lead_domain) # Fill duplicate cache: search for duplicate lead before the assignation # avoid to flush during the search at every assignation for lead in leads: if lead not in duplicates_lead_cache: duplicates_lead_cache[lead] = lead._get_lead_duplicates( email=lead.email_from) teams_data[team] = { "team": team, "leads": leads, "assigned": set(), "merged": set(), "duplicates": set(), } population.append(team) weights.append(team.assignment_max) # Start a new transaction, since data fetching take times # and the first commit occur at the end of the bundle, # the first transaction can be long which we want to avoid if auto_commit: self._cr.commit() # assignment process data global_data = dict(assigned=set(), merged=set(), duplicates=set()) leads_done_ids, lead_unlink_ids, counter = set(), set(), 0 while population: counter += 1 team = random.choices(population, weights=weights, k=1)[0] # filter remaining leads, remove team if no more leads for it teams_data[team]["leads"] = teams_data[team]["leads"].filtered( lambda l: l.id not in leads_done_ids).exists() if not teams_data[team]["leads"]: population_index = population.index(team) population.pop(population_index) weights.pop(population_index) continue # assign + deduplicate and concatenate results in teams_data to keep some history candidate_lead = teams_data[team]["leads"][0] assign_res = team._allocate_leads_deduplicate( candidate_lead, duplicates_cache=duplicates_lead_cache) for key in ('assigned', 'merged', 'duplicates'): teams_data[team][key].update(assign_res[key]) leads_done_ids.update(assign_res[key]) global_data[key].update(assign_res[key]) lead_unlink_ids.update(assign_res['duplicates']) # auto-commit except in testing mode. As this process may be time consuming or we # may encounter errors, already commit what is allocated to avoid endless cron loops. if auto_commit and counter % BUNDLE_COMMIT_SIZE == 0: # unlink duplicates once self.env['crm.lead'].browse(lead_unlink_ids).unlink() lead_unlink_ids = set() self._cr.commit() # unlink duplicates once self.env['crm.lead'].browse(lead_unlink_ids).unlink() if auto_commit: self._cr.commit() # some final log _logger.info( '## Assigned %s leads', (len(global_data['assigned']) + len(global_data['merged']))) for team, team_data in teams_data.items(): _logger.info('## Assigned %s leads to team %s', len(team_data['assigned']) + len(team_data['merged']), team.id) _logger.info( '\tLeads: direct assign %s / merge result %s / duplicates merged: %s', team_data['assigned'], team_data['merged'], team_data['duplicates']) return teams_data
def _build_search_childs_domain(self, parent_id, domain=[]): self._check_parent_field() parent_domain = [[self._parent_name, '=', parent_id]] return expression.AND([parent_domain, domain ]) if domain else parent_domain
def _plan_prepare_values(self): currency = self.env.company.currency_id uom_hour = self.env.ref('uom.product_uom_hour') company_uom = self.env.company.timesheet_encode_uom_id is_uom_day = company_uom == self.env.ref('uom.product_uom_day') hour_rounding = uom_hour.rounding billable_types = [ 'non_billable', 'non_billable_project', 'billable_time', 'non_billable_timesheet', 'billable_fixed' ] values = { 'projects': self, 'currency': currency, 'timesheet_domain': [('project_id', 'in', self.ids)], 'profitability_domain': [('project_id', 'in', self.ids)], 'stat_buttons': self._plan_get_stat_button(), 'is_uom_day': is_uom_day, } # # Hours, Rates and Profitability # dashboard_values = { 'time': dict.fromkeys(billable_types + ['total'], 0.0), 'rates': dict.fromkeys(billable_types + ['total'], 0.0), 'profit': { 'invoiced': 0.0, 'to_invoice': 0.0, 'cost': 0.0, 'total': 0.0, } } # hours from non-invoiced timesheets that are linked to canceled so canceled_hours_domain = [('project_id', 'in', self.ids), ('timesheet_invoice_type', '!=', False), ('so_line.state', '=', 'cancel')] total_canceled_hours = sum(self.env['account.analytic.line'].search( canceled_hours_domain).mapped('unit_amount')) canceled_hours = float_round(total_canceled_hours, precision_rounding=hour_rounding) if is_uom_day: # convert time from hours to days canceled_hours = round( uom_hour._compute_quantity(canceled_hours, company_uom, raise_if_failure=False), 2) dashboard_values['time']['canceled'] = canceled_hours dashboard_values['time']['total'] += canceled_hours # hours (from timesheet) and rates (by billable type) dashboard_domain = [('project_id', 'in', self.ids), ('timesheet_invoice_type', '!=', False), '|', ('so_line', '=', False), ('so_line.state', '!=', 'cancel') ] # force billable type dashboard_data = self.env['account.analytic.line'].read_group( dashboard_domain, ['unit_amount', 'timesheet_invoice_type'], ['timesheet_invoice_type']) dashboard_total_hours = sum( [data['unit_amount'] for data in dashboard_data]) + total_canceled_hours for data in dashboard_data: billable_type = data['timesheet_invoice_type'] amount = float_round(data.get('unit_amount'), precision_rounding=hour_rounding) if is_uom_day: # convert time from hours to days amount = round( uom_hour._compute_quantity(amount, company_uom, raise_if_failure=False), 2) dashboard_values['time'][billable_type] = amount dashboard_values['time']['total'] += amount # rates rate = round( data.get('unit_amount') / dashboard_total_hours * 100, 2) if dashboard_total_hours else 0.0 dashboard_values['rates'][billable_type] = rate dashboard_values['rates']['total'] += rate dashboard_values['time']['total'] = round( dashboard_values['time']['total'], 2) # rates from non-invoiced timesheets that are linked to canceled so dashboard_values['rates']['canceled'] = float_round( 100 * total_canceled_hours / (dashboard_total_hours or 1), precision_rounding=hour_rounding) # profitability, using profitability SQL report field_map = { 'amount_untaxed_invoiced': 'invoiced', 'amount_untaxed_to_invoice': 'to_invoice', 'timesheet_cost': 'cost', 'expense_cost': 'expense_cost', 'expense_amount_untaxed_invoiced': 'expense_amount_untaxed_invoiced', 'other_revenues': 'other_revenues' } profit = dict.fromkeys( list(field_map.values()) + ['other_revenues', 'total'], 0.0) profitability_raw_data = self.env[ 'project.profitability.report'].read_group( [('project_id', 'in', self.ids)], ['project_id'] + list(field_map), ['project_id']) for data in profitability_raw_data: company_id = self.env['project.project'].browse( data.get('project_id')[0]).company_id from_currency = company_id.currency_id for field in field_map: value = data.get(field, 0.0) if from_currency != currency: value = from_currency._convert(value, currency, company_id, date.today()) profit[field_map[field]] += value profit['total'] = sum([profit[item] for item in profit.keys()]) dashboard_values['profit'] = profit values['dashboard'] = dashboard_values # # Time Repartition (per employee per billable types) # employee_ids = self._plan_get_employee_ids() employee_ids = list(set(employee_ids)) # Retrieve the employees for which the current user can see theirs timesheets employee_domain = expression.AND([[ ('company_id', 'in', self.env.companies.ids) ], self.env['account.analytic.line']._domain_employee_id()]) employees = self.env['hr.employee'].sudo().browse( employee_ids).filtered_domain(employee_domain) repartition_domain = [('project_id', 'in', self.ids), ('employee_id', '!=', False), ('timesheet_invoice_type', '!=', False) ] # force billable type # repartition data, without timesheet on cancelled so repartition_data = self.env['account.analytic.line'].read_group( repartition_domain + ['|', ('so_line', '=', False), ('so_line.state', '!=', 'cancel')], ['employee_id', 'timesheet_invoice_type', 'unit_amount'], ['employee_id', 'timesheet_invoice_type'], lazy=False) # read timesheet on cancelled so cancelled_so_timesheet = self.env['account.analytic.line'].read_group( repartition_domain + [('so_line.state', '=', 'cancel')], ['employee_id', 'unit_amount'], ['employee_id'], lazy=False) repartition_data += [{ **canceled, 'timesheet_invoice_type': 'canceled' } for canceled in cancelled_so_timesheet] # set repartition per type per employee repartition_employee = {} for employee in employees: repartition_employee[employee.id] = dict( employee_id=employee.id, employee_name=employee.name, non_billable_project=0.0, non_billable=0.0, billable_time=0.0, non_billable_timesheet=0.0, billable_fixed=0.0, canceled=0.0, total=0.0, ) for data in repartition_data: employee_id = data['employee_id'][0] repartition_employee.setdefault( employee_id, dict( employee_id=data['employee_id'][0], employee_name=data['employee_id'][1], non_billable_project=0.0, non_billable=0.0, billable_time=0.0, non_billable_timesheet=0.0, billable_fixed=0.0, canceled=0.0, total=0.0, ))[data['timesheet_invoice_type']] = float_round( data.get('unit_amount', 0.0), precision_rounding=hour_rounding) repartition_employee[employee_id][ '__domain_' + data['timesheet_invoice_type']] = data['__domain'] # compute total for employee_id, vals in repartition_employee.items(): repartition_employee[employee_id]['total'] = sum( [vals[inv_type] for inv_type in [*billable_types, 'canceled']]) if is_uom_day: # convert all times from hours to days for time_type in [ 'non_billable_project', 'non_billable', 'billable_time', 'non_billable_timesheet', 'billable_fixed', 'canceled', 'total' ]: if repartition_employee[employee_id][time_type]: repartition_employee[employee_id][time_type] = round( uom_hour._compute_quantity( repartition_employee[employee_id][time_type], company_uom, raise_if_failure=False), 2) hours_per_employee = [ repartition_employee[employee_id]['total'] for employee_id in repartition_employee ] values['repartition_employee_max'] = (max(hours_per_employee) if hours_per_employee else 1) or 1 values['repartition_employee'] = repartition_employee # # Table grouped by SO / SOL / Employees # timesheet_forecast_table_rows = self._table_get_line_values(employees) if timesheet_forecast_table_rows: values['timesheet_forecast_table'] = timesheet_forecast_table_rows return values
def pay(self, reference='', order_id=None, amount=False, currency_id=None, acquirer_id=None, partner_id=False, access_token=None, **kw): """ Generic payment page allowing public and logged in users to pay an arbitrary amount. In the case of a public user access, we need to ensure that the payment is made anonymously - e.g. it should not be possible to pay for a specific partner simply by setting the partner_id GET param to a random id. In the case where a partner_id is set, we do an access_token check based on the payment.link.wizard model (since links for specific partners should be created from there and there only). Also noteworthy is the filtering of s2s payment methods - we don't want to create payment tokens for public users. In the case of a logged in user, then we let access rights and security rules do their job. """ env = request.env user = env.user.sudo() reference = normalize('NFKD', reference).encode('ascii', 'ignore').decode('utf-8') if partner_id and not access_token: raise werkzeug.exceptions.NotFound if partner_id and access_token: token_ok = request.env['payment.link.wizard'].check_token( access_token, int(partner_id), float(amount), int(currency_id)) if not token_ok: raise werkzeug.exceptions.NotFound invoice_id = kw.get('invoice_id') # Default values values = { 'amount': 0.0, 'currency': user.company_id.currency_id, } # Check sale order if order_id: try: order_id = int(order_id) if partner_id: # `sudo` needed if the user is not connected. # A public user woudn't be able to read the sale order. # With `partner_id`, an access_token should be validated, preventing a data breach. order = env['sale.order'].sudo().browse(order_id) else: order = env['sale.order'].browse(order_id) values.update({ 'currency': order.currency_id, 'amount': order.amount_total, 'order_id': order_id }) except: order_id = None if invoice_id: try: values['invoice_id'] = int(invoice_id) except ValueError: invoice_id = None # Check currency if currency_id: try: currency_id = int(currency_id) values['currency'] = env['res.currency'].browse(currency_id) except: pass # Check amount if amount: try: amount = float(amount) values['amount'] = amount except: pass # Check reference reference_values = order_id and { 'sale_order_ids': [(4, order_id)] } or {} values['reference'] = env['payment.transaction']._compute_reference( values=reference_values, prefix=reference) # Check acquirer acquirers = None if order_id and order: cid = order.company_id.id elif kw.get('company_id'): try: cid = int(kw.get('company_id')) except: cid = user.company_id.id else: cid = user.company_id.id # Check partner if not user._is_public(): # NOTE: this means that if the partner was set in the GET param, it gets overwritten here # This is something we want, since security rules are based on the partner - assuming the # access_token checked out at the start, this should have no impact on the payment itself # existing besides making reconciliation possibly more difficult (if the payment partner is # not the same as the invoice partner, for example) partner_id = user.partner_id.id elif partner_id: partner_id = int(partner_id) values.update({ 'partner_id': partner_id, 'bootstrap_formatting': True, 'error_msg': kw.get('error_msg') }) acquirer_domain = [ '&', ('state', 'in', ['enabled', 'test']), ('company_id', '=', cid) ] if partner_id: partner = request.env['res.partner'].browse([partner_id]) acquirer_domain = expression.AND([ acquirer_domain, [ '|', ('country_ids', '=', False), ('country_ids', 'in', [partner.sudo().country_id.id]) ] ]) if acquirer_id: acquirers = env['payment.acquirer'].browse(int(acquirer_id)) if order_id: acquirers = env['payment.acquirer'].search(acquirer_domain) if not acquirers: acquirers = env['payment.acquirer'].search(acquirer_domain) # s2s mode will always generate a token, which we don't want for public users valid_flows = ['form', 's2s'] if not user._is_public() else ['form'] values['acquirers'] = [ acq for acq in acquirers if acq.payment_flow in valid_flows ] if partner_id: values['pms'] = request.env['payment.token'].search([ ('acquirer_id', 'in', acquirers.ids), ('partner_id', 'child_of', partner.commercial_partner_id.id) ]) else: values['pms'] = [] return request.render('payment.pay', values)
def _search_panel_domain(self, field, operator, directory_id, comodel_domain=[]): files_ids = self.search([('directory', operator, directory_id)]).ids return expression.AND([comodel_domain, [(field, 'in', files_ids)]])