def action_validate(self): current_employee = self.env.user.employee_id for holiday in self: if holiday.state not in ['confirm', 'validate1']: raise UserError( _('Allocation request must be confirmed in order to approve it.' )) holiday.write({'state': 'validate'}) if holiday.validation_type == 'both': holiday.write({'second_approver_id': current_employee.id}) else: holiday.write({'first_approver_id': current_employee.id}) holiday._action_validate_create_childs() self.activity_update() return True
def sips_form_generate_values(self, values): self.ensure_one() base_url = self.get_base_url() currency = self.env['res.currency'].sudo().browse( values['currency_id']) currency_code = CURRENCY_CODES.get(currency.name, False) if not currency_code: raise ValidationError(_('Currency not supported by Wordline')) amount = round(values['amount'] * 100) if self.state == 'enabled': # For production environment, key version 2 is required merchant_id = getattr(self, 'sips_merchant_id') key_version = self.env['ir.config_parameter'].sudo().get_param( 'sips.key_version', '2') else: # Test key provided by Atos Wordline works only with version 1 merchant_id = '002001000000001' key_version = '1' sips_tx_values = dict(values) sips_tx_values.update({ 'Data': u'amount=%s|' % amount + u'currencyCode=%s|' % currency_code + u'merchantId=%s|' % merchant_id + u'normalReturnUrl=%s|' % urls.url_join(base_url, SipsController._return_url) + u'automaticResponseUrl=%s|' % urls.url_join(base_url, SipsController._notify_url) + u'transactionReference=%s|' % values['reference'] + u'statementReference=%s|' % values['reference'] + u'keyVersion=%s' % key_version, 'InterfaceVersion': self.sips_version, }) return_context = {} if sips_tx_values.get('return_url'): return_context[u'return_url'] = u'%s' % urls.url_quote( sips_tx_values.pop('return_url')) return_context[u'reference'] = u'%s' % sips_tx_values['reference'] sips_tx_values['Data'] += u'|returnContext=%s' % ( json.dumps(return_context)) shasign = self._sips_generate_shasign(sips_tx_values) sips_tx_values['Seal'] = shasign return sips_tx_values
def copy_menu_hierarchy(self, top_menu): def copy_menu(menu, t_menu): new_menu = menu.copy({ 'parent_id': t_menu.id, 'website_id': self.id, }) for submenu in menu.child_id: copy_menu(submenu, new_menu) for website in self: new_top_menu = top_menu.copy({ 'name': _('Top Menu for Website %s') % website.id, 'website_id': website.id, }) for submenu in top_menu.child_id: copy_menu(submenu, new_top_menu)
def update_notification(self, cron_mode=True): """ Send a message to Harpiya's publisher warranty server to check the validity of the contracts, get notifications, etc... @param cron_mode: If true, catch all exceptions (appropriate for usage in a cron). @type cron_mode: boolean """ try: try: result = self._get_sys_logs() except Exception: if cron_mode: # we don't want to see any stack trace in cron return False _logger.debug("Exception while sending a get logs messages", exc_info=1) raise UserError(_("Error during communication with the publisher warranty server.")) # old behavior based on res.log; now on mail.message, that is not necessarily installed user = self.env['res.users'].sudo().browse(SUPERUSER_ID) poster = self.sudo().env.ref('mail.channel_all_employees') if not (poster and poster.exists()): if not user.exists(): return True poster = user for message in result["messages"]: try: poster.message_post(body=message, subtype='mt_comment', partner_ids=[user.partner_id.id]) except Exception: pass if result.get('enterprise_info'): # Update expiration date set_param = self.env['ir.config_parameter'].sudo().set_param set_param('database.expiration_date', result['enterprise_info'].get('expiration_date')) set_param('database.expiration_reason', result['enterprise_info'].get('expiration_reason', 'trial')) set_param('database.enterprise_code', result['enterprise_info'].get('enterprise_code')) set_param('database.already_linked_subscription_url', result['enterprise_info'].get('database_already_linked_subscription_url')) set_param('database.already_linked_email', result['enterprise_info'].get('database_already_linked_email')) set_param('database.already_linked_send_mail_url', result['enterprise_info'].get('database_already_linked_send_mail_url')) except Exception: if cron_mode: return False # we don't want to see any stack trace in cron else: raise return True
def action_approve(self): # if validation_type == 'both': this method is the first approval approval # if validation_type != 'both': this method calls action_validate() below if any(holiday.state != 'confirm' for holiday in self): raise UserError( _('Allocation request must be confirmed ("To Approve") in order to approve it.' )) current_employee = self.env.user.employee_id self.filtered(lambda hol: hol.validation_type == 'both').write({ 'state': 'validate1', 'first_approver_id': current_employee.id }) self.filtered( lambda hol: not hol.validation_type == 'both').action_validate() self.activity_update()
def __init__(self, base64_source, verify_resolution=True): """Initialize the `base64_source` image for processing. :param base64_source: the original image base64 encoded No processing will be done if the `base64_source` is falsy or if the image is SVG. :type base64_source: string or bytes :param verify_resolution: if True, make sure the original image size is not excessive before starting to process it. The max allowed resolution is defined by `IMAGE_MAX_RESOLUTION`. :type verify_resolution: bool :return: self :rtype: ImageProcess :raise: ValueError if `verify_resolution` is True and the image is too large :raise: UserError if the base64 is incorrect or the image can't be identified by PIL """ self.base64_source = base64_source or False self.operationsCount = 0 if not base64_source or base64_source[:1] in (b'P', 'P'): # don't process empty source or SVG self.image = False else: self.image = base64_to_image(self.base64_source) # Original format has to be saved before fixing the orientation or # doing any other operations because the information will be lost on # the resulting image. self.original_format = (self.image.format or '').upper() self.image = image_fix_orientation(self.image) w, h = self.image.size if verify_resolution and w * h > IMAGE_MAX_RESOLUTION: raise ValueError( _("Image size excessive, uploaded images must be smaller than %s million pixels." ) % str(IMAGE_MAX_RESOLUTION / 10e6))
def insert_attachment(self, model, id_record, files): orphan_attachment_ids = [] model_name = model.sudo().model record = model.env[model_name].browse(id_record) authorized_fields = model.sudo()._get_form_writable_fields() for file in files: custom_field = file.field_name not in authorized_fields attachment_value = { 'name': file.filename, 'datas': base64.encodestring(file.read()), 'res_model': model_name, 'res_id': record.id, } attachment_id = request.env['ir.attachment'].sudo().create( attachment_value) if attachment_id and not custom_field: record.sudo()[file.field_name] = [(4, attachment_id.id)] else: orphan_attachment_ids.append(attachment_id.id) if model_name != 'mail.mail': # If some attachments didn't match a field on the model, # we create a mail.message to link them to the record if orphan_attachment_ids: values = { 'body': _('<p>Attached files : </p>'), 'model': model_name, 'message_type': 'comment', 'no_auto_thread': False, 'res_id': id_record, 'attachment_ids': [(6, 0, orphan_attachment_ids)], } mail_id = request.env['mail.message'].with_user( SUPERUSER_ID).create(values) else: # If the model is mail.mail then we have no other choice but to # attach the custom binary field files on the attachment_ids field. for attachment_id_id in orphan_attachment_ids: record.attachment_ids = [(4, attachment_id_id)]
def _convert_import_data(self, fields, options): """ Extracts the input BaseModel and fields list (with ``False``-y placeholders for fields to *not* import) into a format Model.import_data can use: a fields list without holes and the precisely matching data matrix :param list(str|bool): fields :returns: (data, fields) :rtype: (list(list(str)), list(str)) :raises ValueError: in case the import data could not be converted """ # Get indices for non-empty fields indices = [index for index, field in enumerate(fields) if field] if not indices: raise ValueError( _("You must configure at least one field to import")) # If only one index, itemgetter will return an atom rather # than a 1-tuple if len(indices) == 1: mapper = lambda row: [row[indices[0]]] else: mapper = operator.itemgetter(*indices) # Get only list of actually imported fields import_fields = [f for f in fields if f] rows_to_import = self._read_file(options) if options.get('headers'): rows_to_import = itertools.islice(rows_to_import, 1, None) data = [ list(row) for row in map(mapper, rows_to_import) # don't try inserting completely empty rows (e.g. from # filtering out o2m fields) if any(row) ] # slicing needs to happen after filtering out empty rows as the # data offsets from load are post-filtering return data[options.get('skip'):], import_fields
def activity_update(self): to_clean, to_do = self.env['hr.leave.allocation'], self.env[ 'hr.leave.allocation'] for allocation in self: note = _('New Allocation Request created by %s: %s Days of %s') % ( allocation.create_uid.name, allocation.number_of_days, allocation.holiday_status_id.name) if allocation.state == 'draft': to_clean |= allocation elif allocation.state == 'confirm': allocation.activity_schedule( 'hr_holidays.mail_act_leave_allocation_approval', note=note, user_id=allocation.sudo()._get_responsible_for_approval( ).id or self.env.user.id) elif allocation.state == 'validate1': allocation.activity_feedback( ['hr_holidays.mail_act_leave_allocation_approval']) allocation.activity_schedule( 'hr_holidays.mail_act_leave_allocation_second_approval', note=note, user_id=allocation.sudo()._get_responsible_for_approval( ).id or self.env.user.id) elif allocation.state == 'validate': to_do |= allocation elif allocation.state == 'refuse': to_clean |= allocation if to_clean: to_clean.activity_unlink([ 'hr_holidays.mail_act_leave_allocation_approval', 'hr_holidays.mail_act_leave_allocation_second_approval' ]) if to_do: to_do.activity_feedback([ 'hr_holidays.mail_act_leave_allocation_approval', 'hr_holidays.mail_act_leave_allocation_second_approval' ])
def get_google_drive_url(self, res_id, template_id): self.ensure_one() self = self.sudo() model = self.model_id filter_name = self.filter_id.name if self.filter_id else False record = self.env[model.model].browse(res_id).read()[0] record.update({ 'model': model.name, 'filter': filter_name }) name_gdocs = self.name_template try: name_gdocs = name_gdocs % record except: raise UserError(_("At least one key cannot be found in your Google Drive name pattern.")) attachments = self.env["ir.attachment"].search([('res_model', '=', model.model), ('name', '=', name_gdocs), ('res_id', '=', res_id)]) url = False if attachments: url = attachments[0].url else: url = self.copy_doc(res_id, template_id, name_gdocs, model.model).get('url') return url
def members(self, membership_id=None, country_name=None, country_id=0, page=1, **post): Product = request.env['product.product'] Country = request.env['res.country'] MembershipLine = request.env['membership.membership_line'] Partner = request.env['res.partner'] post_name = post.get('search') or post.get('name', '') current_country = None today = fields.Date.today() # base domain for groupby / searches base_line_domain = [("partner.website_published", "=", True), ('state', '=', 'paid'), ('date_to', '>=', today), ('date_from', '<=', today)] if membership_id and membership_id != 'free': membership_id = int(membership_id) base_line_domain.append(('membership_id', '=', membership_id)) if post_name: base_line_domain += [ '|', ('partner.name', 'ilike', post_name), ('partner.website_description', 'ilike', post_name) ] # group by country, based on all customers (base domain) if membership_id != 'free': membership_lines = MembershipLine.sudo().search(base_line_domain) country_domain = [('member_lines', 'in', membership_lines.ids)] if not membership_id: country_domain = [ '|', country_domain[0], ('membership_state', '=', 'free') ] else: country_domain = [('membership_state', '=', 'free')] if post_name: country_domain += [ '|', ('name', 'ilike', post_name), ('website_description', 'ilike', post_name) ] countries = Partner.sudo().read_group( country_domain + [("website_published", "=", True)], ["id", "country_id"], groupby="country_id", orderby="country_id") countries_total = sum(country_dict['country_id_count'] for country_dict in countries) line_domain = list(base_line_domain) if country_id: line_domain.append(('partner.country_id', '=', country_id)) current_country = Country.browse(country_id).read(['id', 'name'])[0] if not any(x['country_id'][0] == country_id for x in countries if x['country_id']): countries.append({ 'country_id_count': 0, 'country_id': (country_id, current_country["name"]) }) countries = [d for d in countries if d['country_id']] countries.sort(key=lambda d: d['country_id'][1]) countries.insert( 0, { 'country_id_count': countries_total, 'country_id': (0, _("All Countries")) }) # format domain for group_by and memberships memberships = Product.search([('membership', '=', True)], order="website_sequence") # make sure we don't access to lines with unpublished membershipts line_domain.append(('membership_id', 'in', memberships.ids)) limit = self._references_per_page offset = limit * (page - 1) count_members = 0 membership_lines = MembershipLine.sudo() # displayed non-free membership lines if membership_id != 'free': count_members = MembershipLine.sudo().search_count(line_domain) if offset <= count_members: membership_lines = MembershipLine.sudo().search( line_domain, offset, limit) page_partner_ids = set(m.partner.id for m in membership_lines) # get google maps localization of partners google_map_partner_ids = [] if request.website.viewref( 'website_membership.opt_index_google_map').active: google_map_partner_ids = MembershipLine.search( line_domain).get_published_companies(limit=2000) search_domain = [('membership_state', '=', 'free'), ('website_published', '=', True)] if post_name: search_domain += [ '|', ('name', 'ilike', post_name), ('website_description', 'ilike', post_name) ] if country_id: search_domain += [('country_id', '=', country_id)] free_partners = Partner.sudo().search(search_domain) free_partner_ids = [] memberships_data = [] for membership_record in memberships: memberships_data.append({ 'id': membership_record.id, 'name': membership_record.name }) memberships_partner_ids = {} for line in membership_lines: memberships_partner_ids.setdefault(line.membership_id.id, []).append(line.partner.id) if free_partners: memberships_data.append({'id': 'free', 'name': _('Free Members')}) if not membership_id or membership_id == 'free': if count_members < offset + limit: free_start = max(offset - count_members, 0) free_end = max(offset + limit - count_members, 0) memberships_partner_ids['free'] = free_partners.ids[ free_start:free_end] page_partner_ids |= set(memberships_partner_ids['free']) google_map_partner_ids += free_partner_ids[:2000 - len( google_map_partner_ids)] count_members += len(free_partner_ids) google_map_partner_ids = ",".join( str(it) for it in google_map_partner_ids) google_maps_api_key = request.website.google_maps_api_key partners = { p.id: p for p in Partner.sudo().browse(list(page_partner_ids)) } base_url = '/members%s%s' % ('/association/%s' % membership_id if membership_id else '', '/country/%s' % country_id if country_id else '') # request pager for lines pager = request.website.pager(url=base_url, total=count_members, page=page, step=limit, scope=7, url_args=post) values = { 'partners': partners, 'memberships_data': memberships_data, 'memberships_partner_ids': memberships_partner_ids, 'membership_id': membership_id, 'countries': countries, 'current_country': current_country and [current_country['id'], current_country['name']] or None, 'current_country_id': current_country and current_country['id'] or 0, 'google_map_partner_ids': google_map_partner_ids, 'pager': pager, 'post': post, 'search': "?%s" % werkzeug.url_encode(post), 'search_count': count_members, 'google_maps_api_key': google_maps_api_key, } return request.render("website_membership.index", values)
def get_fields(self, model, depth=FIELDS_RECURSION_LIMIT): """ Recursively get fields for the provided model (through fields_get) and filter them according to importability The output format is a list of ``Field``, with ``Field`` defined as: .. class:: Field .. attribute:: id (str) A non-unique identifier for the field, used to compute the span of the ``required`` attribute: if multiple ``required`` fields have the same id, only one of them is necessary. .. attribute:: name (str) The field's logical (Harpiya) name within the scope of its parent. .. attribute:: string (str) The field's human-readable name (``@string``) .. attribute:: required (bool) Whether the field is marked as required in the model. Clients must provide non-empty import values for all required fields or the import will error out. .. attribute:: fields (list(Field)) The current field's subfields. The database and external identifiers for m2o and m2m fields; a filtered and transformed fields_get for o2m fields (to a variable depth defined by ``depth``). Fields with no sub-fields will have an empty list of sub-fields. :param str model: name of the model to get fields form :param int depth: depth of recursion into o2m fields """ Model = self.env[model] importable_fields = [{ 'id': 'id', 'name': 'id', 'string': _("External ID"), 'required': False, 'fields': [], 'type': 'id', }] if not depth: return importable_fields model_fields = Model.fields_get() blacklist = models.MAGIC_COLUMNS + [Model.CONCURRENCY_CHECK_FIELD] for name, field in model_fields.items(): if name in blacklist: continue # an empty string means the field is deprecated, @deprecated must # be absent or False to mean not-deprecated if field.get('deprecated', False) is not False: continue if field.get('readonly'): states = field.get('states') if not states: continue # states = {state: [(attr, value), (attr2, value2)], state2:...} if not any(attr == 'readonly' and value is False for attr, value in itertools.chain.from_iterable( states.values())): continue field_value = { 'id': name, 'name': name, 'string': field['string'], # Y U NO ALWAYS HAS REQUIRED 'required': bool(field.get('required')), 'fields': [], 'type': field['type'], } if field['type'] in ('many2many', 'many2one'): field_value['fields'] = [ dict(field_value, name='id', string=_("External ID"), type='id'), dict(field_value, name='.id', string=_("Database ID"), type='id'), ] elif field['type'] == 'one2many': field_value['fields'] = self.get_fields(field['relation'], depth=depth - 1) if self.user_has_groups('base.group_no_one'): field_value['fields'].append({ 'id': '.id', 'name': '.id', 'string': _("Database ID"), 'required': False, 'fields': [], 'type': 'id' }) importable_fields.append(field_value) # TODO: cache on model? return importable_fields
def _fields_view_get_address(self, arch): arch = super(Partner, self)._fields_view_get_address(arch) # render the partner address accordingly to address_view_id doc = etree.fromstring(arch) if doc.xpath("//field[@name='city_id']"): return arch replacement_xml = """ <div> <field name="country_enforce_cities" invisible="1"/> <field name="parent_id" invisible="1"/> <field name='city' placeholder="%(placeholder)s" class="o_address_city" attrs="{ 'invisible': [('country_enforce_cities', '=', True), '|', ('city_id', '!=', False), ('city', 'in', ['', False ])], 'readonly': [('type', '=', 'contact')%(parent_condition)s] }" /> <field name='city_id' placeholder="%(placeholder)s" string="%(placeholder)s" class="o_address_city" context="{'default_country_id': country_id, 'default_name': city, 'default_zipcode': zip, 'default_state_id': state_id}" domain="[('country_id', '=', country_id)]" attrs="{ 'invisible': [('country_enforce_cities', '=', False)], 'readonly': [('type', '=', 'contact')%(parent_condition)s] }" /> </div> """ replacement_data = { 'placeholder': _('City'), } def _arch_location(node): in_subview = False view_type = False parent = node.getparent() while parent is not None and (not view_type or not in_subview): if parent.tag == 'field': in_subview = True elif parent.tag in ['list', 'tree', 'kanban', 'form']: view_type = parent.tag parent = parent.getparent() return { 'view_type': view_type, 'in_subview': in_subview, } for city_node in doc.xpath("//field[@name='city']"): location = _arch_location(city_node) replacement_data['parent_condition'] = '' if location['view_type'] == 'form' or not location['in_subview']: replacement_data[ 'parent_condition'] = ", ('parent_id', '!=', False)" replacement_formatted = replacement_xml % replacement_data for replace_node in etree.fromstring( replacement_formatted).getchildren(): city_node.addprevious(replace_node) parent = city_node.getparent() parent.remove(city_node) arch = etree.tostring(doc, encoding='unicode') return arch
def web_auth_reset_password(self, *args, **kw): qcontext = kw result = {} if 'error' not in qcontext and request.httprequest.method == 'POST': try: values = {key: qcontext.get(key) for key in ('login', )} if not values: result.update({ 'is_success': False, 'error': 'The form was not properly filled in.' }) return result if request.env["res.users"].sudo().search([ ("login", "=", qcontext.get("login")) ]): login = qcontext.get('login') _logger.info( "Password reset attempt for <%s> by user <%s> from %s", login, request.env.user.login, request.httprequest.remote_addr) result.update({ 'is_success': True, 'message': 'An email has been sent with credentials to reset your password' }) request.env['res.users'].sudo().reset_password(login) return result if not request.env["res.users"].sudo().search( [("login", "=", qcontext.get("login"))]): result.update({ 'is_success': False, 'error': 'Reset password: invalid username or email' }) return result except Error as e: result.update({ 'is_success': False, 'error': 'error when resetting password' }) return result except Exception as e: result.update({'is_success': False, 'error': str(e)}) return result except UserError as e: result.update({ 'is_success': False, 'error': str(e.value or e.name) }) return result except SignupError: qcontext['error'] = _("Could not reset your password") result.update({ 'is_success': False, 'error': "Could not reset your password" }) _logger.exception('error when resetting password') return result
def portal_my_opportunities(self, page=1, date_begin=None, date_end=None, sortby=None, filterby=None, **kw): values = self._prepare_portal_layout_values() CrmLead = request.env['crm.lead'] domain = self.get_domain_my_opp(request.env.user) today = fields.Date.today() this_week_end_date = fields.Date.to_string(fields.Date.from_string(today) + datetime.timedelta(days=7)) searchbar_filters = { 'all': {'label': _('Active'), 'domain': []}, 'today': {'label': _('Today Activities'), 'domain': [('activity_date_deadline', '=', today)]}, 'week': {'label': _('This Week Activities'), 'domain': [('activity_date_deadline', '>=', today), ('activity_date_deadline', '<=', this_week_end_date)]}, 'overdue': {'label': _('Overdue Activities'), 'domain': [('activity_date_deadline', '<', today)]}, 'won': {'label': _('Won'), 'domain': [('stage_id.is_won', '=', True)]}, 'lost': {'label': _('Lost'), 'domain': [('active', '=', False), ('probability', '=', 0)]}, } searchbar_sortings = { 'date': {'label': _('Newest'), 'order': 'create_date desc'}, 'name': {'label': _('Name'), 'order': 'name'}, 'contact_name': {'label': _('Contact Name'), 'order': 'contact_name'}, 'revenue': {'label': _('Expected Revenue'), 'order': 'planned_revenue desc'}, 'probability': {'label': _('Probability'), 'order': 'probability desc'}, 'stage': {'label': _('Stage'), 'order': 'stage_id'}, } # default sort by value if not sortby: sortby = 'date' order = searchbar_sortings[sortby]['order'] # default filter by value if not filterby: filterby = 'all' domain += searchbar_filters[filterby]['domain'] if filterby == 'lost': CrmLead = CrmLead.with_context(active_test=False) # archive groups - Default Group By 'create_date' archive_groups = self._get_archive_groups('crm.lead', domain) if date_begin and date_end: domain += [('create_date', '>', date_begin), ('create_date', '<=', date_end)] # pager opp_count = CrmLead.search_count(domain) pager = request.website.pager( url="/my/opportunities", url_args={'date_begin': date_begin, 'date_end': date_end, 'sortby': sortby, 'filterby': filterby}, total=opp_count, page=page, step=self._items_per_page ) # content according to pager and archive selected opportunities = CrmLead.search(domain, order=order, limit=self._items_per_page, offset=pager['offset']) values.update({ 'date': date_begin, 'opportunities': opportunities, 'page_name': 'opportunity', 'archive_groups': archive_groups, 'default_url': '/my/opportunities', 'pager': pager, 'searchbar_sortings': searchbar_sortings, 'sortby': sortby, 'searchbar_filters': OrderedDict(sorted(searchbar_filters.items())), 'filterby': filterby, }) return request.render("website_crm_partner_assign.portal_my_opportunities", values)
def partners(self, country=None, grade=None, page=0, **post): country_all = post.pop('country_all', False) partner_obj = request.env['res.partner'] country_obj = request.env['res.country'] search = post.get('search', '') base_partner_domain = [('is_company', '=', True), ('grade_id', '!=', False), ('website_published', '=', True)] if not request.env['res.users'].has_group('website.group_website_publisher'): base_partner_domain += [('grade_id.website_published', '=', True)] if search: base_partner_domain += ['|', ('name', 'ilike', search), ('website_description', 'ilike', search)] # group by grade grade_domain = list(base_partner_domain) if not country and not country_all: country_code = request.session['geoip'].get('country_code') if country_code: country = country_obj.search([('code', '=', country_code)], limit=1) if country: grade_domain += [('country_id', '=', country.id)] grades = partner_obj.sudo().read_group( grade_domain, ["id", "grade_id"], groupby="grade_id") grades_partners = partner_obj.sudo().search_count(grade_domain) # flag active grade for grade_dict in grades: grade_dict['active'] = grade and grade_dict['grade_id'][0] == grade.id grades.insert(0, { 'grade_id_count': grades_partners, 'grade_id': (0, _("All Categories")), 'active': bool(grade is None), }) # group by country country_domain = list(base_partner_domain) if grade: country_domain += [('grade_id', '=', grade.id)] countries = partner_obj.sudo().read_group( country_domain, ["id", "country_id"], groupby="country_id", orderby="country_id") countries_partners = partner_obj.sudo().search_count(country_domain) # flag active country for country_dict in countries: country_dict['active'] = country and country_dict['country_id'] and country_dict['country_id'][0] == country.id countries.insert(0, { 'country_id_count': countries_partners, 'country_id': (0, _("All Countries")), 'active': bool(country is None), }) # current search if grade: base_partner_domain += [('grade_id', '=', grade.id)] if country: base_partner_domain += [('country_id', '=', country.id)] # format pager if grade and not country: url = '/partners/grade/' + slug(grade) elif country and not grade: url = '/partners/country/' + slug(country) elif country and grade: url = '/partners/grade/' + slug(grade) + '/country/' + slug(country) else: url = '/partners' url_args = {} if search: url_args['search'] = search if country_all: url_args['country_all'] = True partner_count = partner_obj.sudo().search_count(base_partner_domain) pager = request.website.pager( url=url, total=partner_count, page=page, step=self._references_per_page, scope=7, url_args=url_args) # search partners matching current search parameters partner_ids = partner_obj.sudo().search( base_partner_domain, order="grade_sequence DESC, implemented_count DESC, display_name ASC, id ASC", offset=pager['offset'], limit=self._references_per_page) partners = partner_ids.sudo() google_map_partner_ids = ','.join(str(p.id) for p in partners) google_maps_api_key = request.website.google_maps_api_key values = { 'countries': countries, 'country_all': country_all, 'current_country': country, 'grades': grades, 'current_grade': grade, 'partners': partners, 'google_map_partner_ids': google_map_partner_ids, 'pager': pager, 'searches': post, 'search_path': "%s" % werkzeug.url_encode(post), 'google_maps_api_key': google_maps_api_key, } return request.render("website_crm_partner_assign.index", values, status=partners and 200 or 404)
def _message_get_suggested_recipients(self): recipients = super(Track, self)._message_get_suggested_recipients() for track in self: if track.partner_email and track.partner_email != track.partner_id.email: track._message_add_suggested_recipient(recipients, email=track.partner_email, reason=_('Speaker Email')) return recipients
def wrapper(___dbname, *args, **kwargs): """ Wraps around OSV functions and normalises a few exceptions """ dbname = ___dbname # NOTE: this forbid to use "___dbname" as arguments in http routes def tr(src, ttype): # We try to do the same as the _(), but without the frame # inspection, since we aready are wrapping an osv function # trans_obj = self.get('ir.translation') cannot work yet :( ctx = {} if not kwargs: if args and isinstance(args[-1], dict): ctx = args[-1] elif isinstance(kwargs, dict): if 'context' in kwargs: ctx = kwargs['context'] elif 'kwargs' in kwargs and kwargs['kwargs'].get('context'): # http entry points such as call_kw() ctx = kwargs['kwargs'].get('context') else: try: from harpiya.http import request ctx = request.env.context except Exception: pass lang = ctx and ctx.get('lang') if not (lang or hasattr(src, '__call__')): return src # We open a *new* cursor here, one reason is that failed SQL # queries (as in IntegrityError) will invalidate the current one. with closing(harpiya.sql_db.db_connect(dbname).cursor()) as cr: if ttype == 'sql_constraint': res = translate_sql_constraint(cr, key=key, lang=lang) else: res = translate(cr, name=False, source_type=ttype, lang=lang, source=src) return res or src def _(src): return tr(src, 'code') tries = 0 while True: try: if harpiya.registry(dbname)._init and not harpiya.tools.config[ 'test_enable']: raise harpiya.exceptions.Warning( 'Currently, this database is not fully loaded and can not be used.' ) return f(dbname, *args, **kwargs) except (OperationalError, QWebException) as e: if isinstance(e, QWebException): cause = e.qweb.get('cause') if isinstance(cause, OperationalError): e = cause else: raise # Automatically retry the typical transaction serialization errors if e.pgcode not in PG_CONCURRENCY_ERRORS_TO_RETRY: raise if tries >= MAX_TRIES_ON_CONCURRENCY_FAILURE: _logger.info("%s, maximum number of tries reached" % errorcodes.lookup(e.pgcode)) raise wait_time = random.uniform(0.0, 2**tries) tries += 1 _logger.info("%s, retry %d/%d in %.04f sec..." % (errorcodes.lookup(e.pgcode), tries, MAX_TRIES_ON_CONCURRENCY_FAILURE, wait_time)) time.sleep(wait_time) except IntegrityError as inst: registry = harpiya.registry(dbname) key = inst.diag.constraint_name if key in registry._sql_constraints: raise ValidationError( tr(key, 'sql_constraint') or inst.pgerror) if inst.pgcode in (errorcodes.NOT_NULL_VIOLATION, errorcodes.FOREIGN_KEY_VIOLATION, errorcodes.RESTRICT_VIOLATION): msg = _('The operation cannot be completed:') _logger.debug("IntegrityError", exc_info=True) try: # Get corresponding model and field model = field = None for name, rclass in registry.items(): if inst.diag.table_name == rclass._table: model = rclass field = model._fields.get( inst.diag.column_name) break if inst.pgcode == errorcodes.NOT_NULL_VIOLATION: # This is raised when a field is set with `required=True`. 2 cases: # - Create/update: a mandatory field is not set. # - Delete: another model has a not nullable using the deleted record. msg += '\n' msg += _( '- Create/update: a mandatory field is not set.\n' '- Delete: another model requires the record being deleted. If possible, archive it instead.' ) if model: msg += '\n\n{} {} ({}), {} {} ({})'.format( _('Model:'), model._description, model._name, _('Field:'), field.string if field else _('Unknown'), field.name if field else _('Unknown'), ) elif inst.pgcode == errorcodes.FOREIGN_KEY_VIOLATION: # This is raised when a field is set with `ondelete='restrict'`, at # unlink only. msg += _( ' another model requires the record being deleted. If possible, archive it instead.' ) constraint = inst.diag.constraint_name if model or constraint: msg += '\n\n{} {} ({}), {} {}'.format( _('Model:'), model._description if model else _('Unknown'), model._name if model else _('Unknown'), _('Constraint:'), constraint if constraint else _('Unknown'), ) except Exception: pass raise ValidationError(msg) else: raise ValidationError(inst.args[0])
def page_search_dependencies(self, page_id=False): """ Search dependencies just for information. It will not catch 100% of dependencies and False positive is more than possible Each module could add dependences in this dict :returns a dictionnary where key is the 'categorie' of object related to the given view, and the value is the list of text and link to the resource using given page """ dependencies = {} if not page_id: return dependencies page = self.env['website.page'].browse(int(page_id)) website = self.env['website'].browse(self._context.get('website_id')) url = page.url # search for website_page with link website_page_search_dom = [('view_id.arch_db', 'ilike', url) ] + website.website_domain() pages = self.env['website.page'].search(website_page_search_dom) page_key = _('Page') if len(pages) > 1: page_key = _('Pages') page_view_ids = [] for page in pages: dependencies.setdefault(page_key, []) dependencies[page_key].append({ 'text': _('Page <b>%s</b> contains a link to this page') % page.url, 'item': page.name, 'link': page.url, }) page_view_ids.append(page.view_id.id) # search for ir_ui_view (not from a website_page) with link page_search_dom = [('arch_db', 'ilike', url), ('id', 'not in', page_view_ids) ] + website.website_domain() views = self.env['ir.ui.view'].search(page_search_dom) view_key = _('Template') if len(views) > 1: view_key = _('Templates') for view in views: dependencies.setdefault(view_key, []) dependencies[view_key].append({ 'text': _('Template <b>%s (id:%s)</b> contains a link to this page') % (view.key or view.name, view.id), 'link': '/web#id=%s&view_type=form&model=ir.ui.view' % view.id, 'item': _('%s (id:%s)') % (view.key or view.name, view.id), }) # search for menu with link menu_search_dom = [('url', 'ilike', '%s' % url) ] + website.website_domain() menus = self.env['website.menu'].search(menu_search_dom) menu_key = _('Menu') if len(menus) > 1: menu_key = _('Menus') for menu in menus: dependencies.setdefault(menu_key, []).append({ 'text': _('This page is in the menu <b>%s</b>') % menu.name, 'link': '/web#id=%s&view_type=form&model=website.menu' % menu.id, 'item': menu.name, }) return dependencies
class WebsiteForm(http.Controller): # Check and insert values from the form on the model <model> @http.route('/website_form/<string:model_name>', type='http', auth="public", methods=['POST'], website=True) def website_form(self, model_name, **kwargs): model_record = request.env['ir.model'].sudo().search([ ('model', '=', model_name), ('website_form_access', '=', True) ]) if not model_record: return json.dumps(False) try: data = self.extract_data(model_record, request.params) # If we encounter an issue while extracting data except ValidationError as e: # I couldn't find a cleaner way to pass data to an exception return json.dumps({'error_fields': e.args[0]}) try: id_record = self.insert_record(request, model_record, data['record'], data['custom'], data.get('meta')) if id_record: self.insert_attachment(model_record, id_record, data['attachments']) # in case of an email, we want to send it immediately instead of waiting # for the email queue to process if model_name == 'mail.mail': request.env[model_name].sudo().browse(id_record).send() # Some fields have additional SQL constraints that we can't check generically # Ex: crm.lead.probability which is a float between 0 and 1 # TODO: How to get the name of the erroneous field ? except IntegrityError: return json.dumps(False) request.session['form_builder_model_model'] = model_record.model request.session['form_builder_model'] = model_record.name request.session['form_builder_id'] = id_record return json.dumps({'id': id_record}) # Constants string to make metadata readable on a text field _meta_label = "%s\n________\n\n" % _("Metadata") # Title for meta data # Dict of dynamically called filters following type of field to be fault tolerent def identity(self, field_label, field_input): return field_input def integer(self, field_label, field_input): return int(field_input) def floating(self, field_label, field_input): return float(field_input) def boolean(self, field_label, field_input): return bool(field_input) def date(self, field_label, field_input): lang = request.env['ir.qweb.field'].user_lang() return datetime.strptime( field_input, lang.date_format).strftime(DEFAULT_SERVER_DATE_FORMAT) def datetime(self, field_label, field_input): lang = request.env['ir.qweb.field'].user_lang() strftime_format = (u"%s %s" % (lang.date_format, lang.time_format)) user_tz = pytz.timezone( request.context.get('tz') or request.env.user.tz or 'UTC') dt = user_tz.localize(datetime.strptime( field_input, strftime_format)).astimezone(pytz.utc) return dt.strftime(DEFAULT_SERVER_DATETIME_FORMAT) def binary(self, field_label, field_input): return base64.b64encode(field_input.read()) def one2many(self, field_label, field_input): return [int(i) for i in field_input.split(',')] def many2many(self, field_label, field_input, *args): return [(args[0] if args else (6, 0)) + (self.one2many(field_label, field_input), )] _input_filters = { 'char': identity, 'text': identity, 'html': identity, 'date': date, 'datetime': datetime, 'many2one': integer, 'one2many': one2many, 'many2many': many2many, 'selection': identity, 'boolean': boolean, 'integer': integer, 'float': floating, 'binary': binary, 'monetary': floating, } # Extract all data sent by the form and sort its on several properties def extract_data(self, model, values): dest_model = request.env[model.sudo().model] data = { 'record': {}, # Values to create record 'attachments': [], # Attached files 'custom': '', # Custom fields values 'meta': '', # Add metadata if enabled } authorized_fields = model.sudo()._get_form_writable_fields() error_fields = [] custom_fields = [] for field_name, field_value in values.items(): # If the value of the field if a file if hasattr(field_value, 'filename'): # Undo file upload field name indexing field_name = field_name.split('[', 1)[0] # If it's an actual binary field, convert the input file # If it's not, we'll use attachments instead if field_name in authorized_fields and authorized_fields[ field_name]['type'] == 'binary': data['record'][field_name] = base64.b64encode( field_value.read()) field_value.stream.seek(0) # do not consume value forever if authorized_fields[field_name][ 'manual'] and field_name + "_filename" in dest_model: data['record'][field_name + "_filename"] = field_value.filename else: field_value.field_name = field_name data['attachments'].append(field_value) # If it's a known field elif field_name in authorized_fields: try: input_filter = self._input_filters[ authorized_fields[field_name]['type']] data['record'][field_name] = input_filter( self, field_name, field_value) except ValueError: error_fields.append(field_name) # If it's a custom field elif field_name != 'context': custom_fields.append((field_name, field_value)) data['custom'] = "\n".join([u"%s : %s" % v for v in custom_fields]) # Add metadata if enabled environ = request.httprequest.headers.environ if (request.website.website_form_enable_metadata): data['meta'] += "%s : %s\n%s : %s\n%s : %s\n%s : %s\n" % ( "IP", environ.get("REMOTE_ADDR"), "USER_AGENT", environ.get("HTTP_USER_AGENT"), "ACCEPT_LANGUAGE", environ.get("HTTP_ACCEPT_LANGUAGE"), "REFERER", environ.get("HTTP_REFERER")) # This function can be defined on any model to provide # a model-specific filtering of the record values # Example: # def website_form_input_filter(self, values): # values['name'] = '%s\'s Application' % values['partner_name'] # return values if hasattr(dest_model, "website_form_input_filter"): data['record'] = dest_model.website_form_input_filter( request, data['record']) missing_required_fields = [ label for label, field in authorized_fields.items() if field['required'] and not label in data['record'] ] if any(error_fields): raise ValidationError(error_fields + missing_required_fields) return data def insert_record(self, request, model, values, custom, meta=None): model_name = model.sudo().model if model_name == 'mail.mail': values.update({'reply_to': values.get('email_from')}) record = request.env[model_name].with_user(SUPERUSER_ID).with_context( mail_create_nosubscribe=True).create(values) if custom or meta: _custom_label = "%s\n___________\n\n" % _( "Other Information:") # Title for custom fields if model_name == 'mail.mail': _custom_label = "%s\n___________\n\n" % _( "This message has been posted on your website!") default_field = model.website_form_default_field_id default_field_data = values.get(default_field.name, '') custom_content = (default_field_data + "\n\n" if default_field_data else '') \ + (_custom_label + custom + "\n\n" if custom else '') \ + (self._meta_label + meta if meta else '') # If there is a default field configured for this model, use it. # If there isn't, put the custom data in a message instead if default_field.name: if default_field.ttype == 'html' or model_name == 'mail.mail': custom_content = nl2br(custom_content) record.update({default_field.name: custom_content}) else: values = { 'body': nl2br(custom_content), 'model': model_name, 'message_type': 'comment', 'no_auto_thread': False, 'res_id': record.id, } mail_id = request.env['mail.message'].with_user( SUPERUSER_ID).create(values) return record.id # Link all files attached on the form def insert_attachment(self, model, id_record, files): orphan_attachment_ids = [] model_name = model.sudo().model record = model.env[model_name].browse(id_record) authorized_fields = model.sudo()._get_form_writable_fields() for file in files: custom_field = file.field_name not in authorized_fields attachment_value = { 'name': file.filename, 'datas': base64.encodestring(file.read()), 'res_model': model_name, 'res_id': record.id, } attachment_id = request.env['ir.attachment'].sudo().create( attachment_value) if attachment_id and not custom_field: record.sudo()[file.field_name] = [(4, attachment_id.id)] else: orphan_attachment_ids.append(attachment_id.id) if model_name != 'mail.mail': # If some attachments didn't match a field on the model, # we create a mail.message to link them to the record if orphan_attachment_ids: values = { 'body': _('<p>Attached files : </p>'), 'model': model_name, 'message_type': 'comment', 'no_auto_thread': False, 'res_id': id_record, 'attachment_ids': [(6, 0, orphan_attachment_ids)], } mail_id = request.env['mail.message'].with_user( SUPERUSER_ID).create(values) else: # If the model is mail.mail then we have no other choice but to # attach the custom binary field files on the attachment_ids field. for attachment_id_id in orphan_attachment_ids: record.attachment_ids = [(4, attachment_id_id)]
def customers(self, country=None, industry=None, page=0, **post): Tag = request.env['res.partner.tag'] Partner = request.env['res.partner'] search_value = post.get('search') domain = [('website_published', '=', True), ('assigned_partner_id', '!=', False)] if search_value: domain += [ '|', '|', ('name', 'ilike', search_value), ('website_description', 'ilike', search_value), ('industry_id.name', 'ilike', search_value), ] tag_id = post.get('tag_id') if tag_id: tag_id = unslug(tag_id)[1] or 0 domain += [('website_tag_ids', 'in', tag_id)] # group by industry, based on customers found with the search(domain) industries = Partner.sudo().read_group(domain, ["id", "industry_id"], groupby="industry_id", orderby="industry_id") partners_count = Partner.sudo().search_count(domain) if industry: domain.append(('industry_id', '=', industry.id)) if industry.id not in (x['industry_id'][0] for x in industries if x['industry_id']): if industry.exists(): industries.append({ 'industry_id_count': 0, 'industry_id': (industry.id, industry.name) }) industries.sort(key=lambda d: (d.get('industry_id') or (0, ''))[1]) industries.insert( 0, { 'industry_id_count': partners_count, 'industry_id': (0, _("All Sectors of Activity")) }) # group by country, based on customers found with the search(domain) countries = Partner.sudo().read_group(domain, ["id", "country_id"], groupby="country_id", orderby="country_id") country_count = Partner.sudo().search_count(domain) if country: domain += [('country_id', '=', country.id)] if country.id not in (x['country_id'][0] for x in countries if x['country_id']): if country.exists(): countries.append({ 'country_id_count': 0, 'country_id': (country.id, country.name) }) countries.sort( key=lambda d: (d['country_id'] or (0, ""))[1]) countries.insert( 0, { 'country_id_count': country_count, 'country_id': (0, _("All Countries")) }) # search customers to display partner_count = Partner.sudo().search_count(domain) # pager url = '/customers' if industry: url += '/industry/%s' % industry.id if country: url += '/country/%s' % country.id pager = request.website.pager(url=url, total=partner_count, page=page, step=self._references_per_page, scope=7, url_args=post) partners = Partner.sudo().search(domain, offset=pager['offset'], limit=self._references_per_page) google_map_partner_ids = ','.join(str(it) for it in partners.ids) google_maps_api_key = request.website.google_maps_api_key tags = Tag.search([('website_published', '=', True), ('partner_ids', 'in', partners.ids)], order='classname, name ASC') tag = tag_id and Tag.browse(tag_id) or False values = { 'countries': countries, 'current_country_id': country.id if country else 0, 'current_country': country or False, 'industries': industries, 'current_industry_id': industry.id if industry else 0, 'current_industry': industry or False, 'partners': partners, 'google_map_partner_ids': google_map_partner_ids, 'pager': pager, 'post': post, 'search_path': "?%s" % werkzeug.url_encode(post), 'tag': tag, 'tags': tags, 'google_maps_api_key': google_maps_api_key, } return request.render("website_customer.index", values)
def apply_inheritance_specs(source, specs_tree, inherit_branding=False, pre_locate=lambda s: True): """ Apply an inheriting view (a descendant of the base view) Apply to a source architecture all the spec nodes (i.e. nodes describing where and what changes to apply to some parent architecture) given by an inheriting view. :param Element source: a parent architecture to modify :param pre_locate: function that is executed before locating a node. This function receives an arch as argument. This is required by studio to properly handle group_ids. :return: a modified source where the specs are applied :rtype: Element """ # Queue of specification nodes (i.e. nodes describing where and # changes to apply to some parent architecture). specs = specs_tree if isinstance(specs_tree, list) else [specs_tree] def extract(spec): """ Utility function that locates a node given a specification, remove it from the source and returns it. """ if len(spec): raise ValueError( _("Invalid specification for moved nodes: '%s'") % etree.tostring(spec)) pre_locate(spec) to_extract = locate_node(source, spec) if to_extract is not None: remove_element(to_extract) return to_extract else: raise ValueError( _("Element '%s' cannot be located in parent view") % etree.tostring(spec)) while len(specs): spec = specs.pop(0) if isinstance(spec, SKIPPED_ELEMENT_TYPES): continue if spec.tag == 'data': specs += [c for c in spec] continue pre_locate(spec) node = locate_node(source, spec) if node is not None: pos = spec.get('position', 'inside') if pos == 'replace': for loc in spec.xpath(".//*[text()='$0']"): loc.text = '' loc.append(copy.deepcopy(node)) if node.getparent() is None: spec_content = None comment = None for content in spec: if content.tag is not etree.Comment: spec_content = content break else: comment = content source = copy.deepcopy(spec_content) if comment is not None: text = source.text source.text = None comment.tail = text source.insert(0, comment) else: replaced_node_tag = None for child in spec: if child.get('position') == 'move': child = extract(child) if inherit_branding and not replaced_node_tag and child.tag is not etree.Comment: # To make a correct branding, we need to # - know exactly which node has been replaced # - store it before anything else has altered the Tree # Do it exactly here :D child.set('meta-oe-xpath-replacing', node.tag) # We just store the replaced node tag on the first # child of the xpath replacing it replaced_node_tag = node.tag node.addprevious(child) node.getparent().remove(node) elif pos == 'attributes': for child in spec.getiterator('attribute'): attribute = child.get('name') value = child.text or '' if child.get('add') or child.get('remove'): assert not child.text separator = child.get('separator', ',') if separator == ' ': separator = None # squash spaces to_add = (s for s in ( s.strip() for s in child.get('add', '').split(separator)) if s) to_remove = { s.strip() for s in child.get('remove', '').split(separator) } values = ( s.strip() for s in node.get(attribute, '').split(separator)) value = (separator or ' ').join( itertools.chain( (v for v in values if v not in to_remove), to_add)) if value: node.set(attribute, value) elif attribute in node.attrib: del node.attrib[attribute] elif pos == 'inside': add_text_inside(node, spec.text) for child in spec: if child.get('position') == 'move': child = extract(child) node.append(child) elif pos == 'after': # add a sentinel element right after node, insert content of # spec before the sentinel, then remove the sentinel element sentinel = E.sentinel() node.addnext(sentinel) add_text_before(sentinel, spec.text) for child in spec: if child.get('position') == 'move': child = extract(child) sentinel.addprevious(child) remove_element(sentinel) elif pos == 'before': add_text_before(node, spec.text) for child in spec: if child.get('position') == 'move': child = extract(child) node.addprevious(child) else: raise ValueError(_("Invalid position attribute: '%s'") % pos) else: attrs = ''.join([ ' %s="%s"' % (attr, spec.get(attr)) for attr in spec.attrib if attr != 'position' ]) tag = "<%s%s>" % (spec.tag, attrs) raise ValueError( _("Element '%s' cannot be located in parent view") % tag) return source
def portal_my_purchase_orders(self, page=1, date_begin=None, date_end=None, sortby=None, filterby=None, **kw): values = self._prepare_portal_layout_values() partner = request.env.user.partner_id PurchaseOrder = request.env['purchase.order'] domain = [] archive_groups = self._get_archive_groups('purchase.order', domain) if date_begin and date_end: domain += [('create_date', '>', date_begin), ('create_date', '<=', date_end)] searchbar_sortings = { 'date': {'label': _('Newest'), 'order': 'create_date desc, id desc'}, 'name': {'label': _('Name'), 'order': 'name asc, id asc'}, 'amount_total': {'label': _('Total'), 'order': 'amount_total desc, id desc'}, } # default sort by value if not sortby: sortby = 'date' order = searchbar_sortings[sortby]['order'] searchbar_filters = { 'all': {'label': _('All'), 'domain': [('state', 'in', ['purchase', 'done', 'cancel'])]}, 'purchase': {'label': _('Purchase Order'), 'domain': [('state', '=', 'purchase')]}, 'cancel': {'label': _('Cancelled'), 'domain': [('state', '=', 'cancel')]}, 'done': {'label': _('Locked'), 'domain': [('state', '=', 'done')]}, } # default filter by value if not filterby: filterby = 'all' domain += searchbar_filters[filterby]['domain'] # count for pager purchase_count = PurchaseOrder.search_count(domain) # make pager pager = portal_pager( url="/my/purchase", url_args={'date_begin': date_begin, 'date_end': date_end}, total=purchase_count, page=page, step=self._items_per_page ) # search the purchase orders to display, according to the pager data orders = PurchaseOrder.search( domain, order=order, limit=self._items_per_page, offset=pager['offset'] ) request.session['my_purchases_history'] = orders.ids[:100] values.update({ 'date': date_begin, 'orders': orders, 'page_name': 'purchase', 'pager': pager, 'archive_groups': archive_groups, 'searchbar_sortings': searchbar_sortings, 'sortby': sortby, 'searchbar_filters': OrderedDict(sorted(searchbar_filters.items())), 'filterby': filterby, 'default_url': '/my/purchase', }) return request.render("purchase.portal_my_purchase_orders", values)