def _search_get_detail(self, website, order, options): with_description = options['displayDescription'] with_date = options['displayDetail'] blog = options.get('blog') tags = options.get('tag') date_begin = options.get('date_begin') date_end = options.get('date_end') state = options.get('state') domain = [website.website_domain()] if blog: domain.append([('blog_id', '=', unslug(blog)[1])]) if tags: active_tag_ids = [unslug(tag)[1] for tag in tags.split(',')] or [] if active_tag_ids: domain.append([('tag_ids', 'in', active_tag_ids)]) if date_begin and date_end: domain.append([("post_date", ">=", date_begin), ("post_date", "<=", date_end)]) if self.env.user.has_group('website.group_website_designer'): if state == "published": domain.append([("website_published", "=", True), ("post_date", "<=", fields.Datetime.now())]) elif state == "unpublished": domain.append(['|', ("website_published", "=", False), ("post_date", ">", fields.Datetime.now())]) else: domain.append([("post_date", "<=", fields.Datetime.now())]) search_fields = ['name', 'author_name'] def search_in_tags(env, search_term): tags_like_search = env['blog.tag'].search([('name', 'ilike', search_term)]) return [('tag_ids', 'in', tags_like_search.ids)] fetch_fields = ['name', 'website_url'] mapping = { 'name': {'name': 'name', 'type': 'text', 'match': True}, 'website_url': {'name': 'website_url', 'type': 'text', 'truncate': False}, } if with_description: search_fields.append('content') fetch_fields.append('content') mapping['description'] = {'name': 'content', 'type': 'text', 'html': True, 'match': True} if with_date: fetch_fields.append('published_date') mapping['detail'] = {'name': 'published_date', 'type': 'date'} return { 'model': 'blog.post', 'base_domain': domain, 'search_fields': search_fields, 'search_extra': search_in_tags, 'fetch_fields': fetch_fields, 'mapping': mapping, 'icon': 'fa-rss', }
def sale_quotation_builder_template_view(self, template_id, **post): template_id = unslug(template_id)[-1] template = request.env['sale.order.template'].browse( template_id).with_context( allowed_company_ids=request.env.user.company_ids.ids, ) return request.render('sale_quotation_builder.so_template', {'template': template})
def _search_get_detail(self, website, order, options): with_image = options['displayImage'] with_description = options['displayDescription'] with_category = options['displayExtraLink'] with_price = options['displayDetail'] domains = [website.sale_product_domain()] category = options.get('category') min_price = options.get('min_price') max_price = options.get('max_price') attrib_values = options.get('attrib_values') if category: domains.append([('public_categ_ids', 'child_of', unslug(category)[1])]) if min_price: domains.append([('list_price', '>=', min_price)]) if max_price: domains.append([('list_price', '<=', max_price)]) if attrib_values: attrib = None ids = [] for value in attrib_values: if not attrib: attrib = value[0] ids.append(value[1]) elif value[0] == attrib: ids.append(value[1]) else: domains.append([('attribute_line_ids.value_ids', 'in', ids)]) attrib = value[0] ids = [value[1]] if attrib: domains.append([('attribute_line_ids.value_ids', 'in', ids)]) search_fields = ['name', 'product_variant_ids.default_code'] fetch_fields = ['id', 'name', 'website_url'] mapping = { 'name': {'name': 'name', 'type': 'text', 'match': True}, 'product_variant_ids.default_code': {'name': 'product_variant_ids.default_code', 'type': 'text', 'match': True}, 'website_url': {'name': 'website_url', 'type': 'text', 'truncate': False}, } if with_image: mapping['image_url'] = {'name': 'image_url', 'type': 'html'} if with_description: # Internal note is not part of the rendering. search_fields.append('description') fetch_fields.append('description') search_fields.append('description_sale') fetch_fields.append('description_sale') mapping['description'] = {'name': 'description_sale', 'type': 'text', 'match': True} if with_price: mapping['detail'] = {'name': 'price', 'type': 'html', 'display_currency': options['display_currency']} mapping['detail_strike'] = {'name': 'list_price', 'type': 'html', 'display_currency': options['display_currency']} if with_category: mapping['extra_link'] = {'name': 'category', 'type': 'html'} return { 'model': 'product.template', 'base_domain': domains, 'search_fields': search_fields, 'fetch_fields': fetch_fields, 'mapping': mapping, 'icon': 'fa-shopping-cart', }
def partners_detail(self, partner_id, **post): _, partner_id = unslug(partner_id) current_grade, current_country = None, None grade_id = post.get('grade_id') country_id = post.get('country_id') if grade_id: current_grade = request.env['res.partner.grade'].browse( int(grade_id)).exists() if country_id: current_country = request.env['res.country'].browse( int(country_id)).exists() if partner_id: partner = request.env['res.partner'].sudo().browse(partner_id) is_website_publisher = request.env['res.users'].has_group( 'website.group_website_publisher') if partner.exists() and (partner.website_published or is_website_publisher): values = { 'main_object': partner, 'partner': partner, 'current_grade': current_grade, 'current_country': current_country } return request.render("website_crm_partner_assign.partner", values) return self.partners(**post)
def shape(self, module, filename, **kwargs): """ Returns a color-customized svg (background shape or illustration). """ svg = None if module == 'illustration': attachment = request.env['ir.attachment'].sudo().browse( unslug(filename)[1]) if (not attachment.exists() or attachment.type != 'binary' or not attachment.public or not attachment.url.startswith(request.httprequest.path)): raise werkzeug.exceptions.NotFound() svg = attachment.raw.decode('utf-8') else: svg = self._get_shape_svg(module, 'shapes', filename) svg, options = self._update_svg_colors(kwargs, svg) flip_value = options.get('flip', False) if flip_value == 'x': svg = svg.replace('<svg ', '<svg style="transform: scaleX(-1);" ') elif flip_value == 'y': svg = svg.replace('<svg ', '<svg style="transform: scaleY(-1)" ') elif flip_value == 'xy': svg = svg.replace('<svg ', '<svg style="transform: scale(-1)" ') return request.make_response(svg, [ ('Content-type', 'image/svg+xml'), ('Cache-control', 'max-age=%s' % http.STATIC_CACHE_LONG), ])
def partners_detail(self, partner_id, **post): _, partner_id = unslug(partner_id) if partner_id: partner = request.env['res.partner'].sudo().browse(partner_id) if partner.exists() and partner.website_published: # TODO should be done with access rules values = {} values['main_object'] = values['partner'] = partner return request.render("website_membership.partner", values) return self.members(**post)
def partners_detail(self, partner_id, **post): _, partner_id = unslug(partner_id) if partner_id: partner = request.env['res.partner'].sudo().browse(partner_id) if partner.exists() and partner.website_published: values = {} values['main_object'] = values['partner'] = partner return request.render("website_customer.details", values) return self.customers(**post)
def shape(self, module, filename, **kwargs): """ Returns a color-customized svg (background shape or illustration). """ svg = None if module == 'illustration': attachment = request.env['ir.attachment'].sudo().browse(unslug(filename)[1]) if (not attachment.exists() or attachment.type != 'binary' or not attachment.public or not attachment.url.startswith(request.httprequest.path)): raise werkzeug.exceptions.NotFound() svg = b64decode(attachment.datas).decode('utf-8') else: shape_path = get_resource_path(module, 'static', 'shapes', filename) if not shape_path: raise werkzeug.exceptions.NotFound() with tools.file_open(shape_path, 'r') as file: svg = file.read() user_colors = [] for key, value in kwargs.items(): colorMatch = re.match('^c([1-5])$', key) if colorMatch: # Check that color is hex or rgb(a) to prevent arbitrary injection if not re.match(r'(?i)^#[0-9A-F]{6,8}$|^rgba?\(\d{1,3},\d{1,3},\d{1,3}(?:,[0-9.]{1,4})?\)$', value.replace(' ', '')): raise werkzeug.exceptions.BadRequest() user_colors.append([tools.html_escape(value), colorMatch.group(1)]) elif key == 'flip': if value == 'x': svg = svg.replace('<svg ', '<svg style="transform: scaleX(-1);" ') elif value == 'y': svg = svg.replace('<svg ', '<svg style="transform: scaleY(-1)" ') elif value == 'xy': svg = svg.replace('<svg ', '<svg style="transform: scale(-1)" ') default_palette = { '1': '#3AADAA', '2': '#7C6576', '3': '#F6F6F6', '4': '#FFFFFF', '5': '#383E45', } color_mapping = {default_palette[palette_number]: color for color, palette_number in user_colors} # create a case-insensitive regex to match all the colors to replace, eg: '(?i)(#3AADAA)|(#7C6576)' regex = '(?i)%s' % '|'.join('(%s)' % color for color in color_mapping.keys()) def subber(match): key = match.group().upper() return color_mapping[key] if key in color_mapping else key svg = re.sub(regex, subber, svg) return request.make_response(svg, [ ('Content-type', 'image/svg+xml'), ('Cache-control', 'max-age=%s' % http.STATIC_CACHE_LONG), ])
def referer_to_page(self): """Translate HTTP REFERER to cms page if possible.""" ref = request.httprequest.referrer if not ref: return None parsed = urlparse3.urlparse(ref) if parsed.path.startswith('/cms'): last_bit = parsed.path.split('/')[-1] page_id = unslug(last_bit)[-1] return request.env['cms.page'].browse(page_id) return None
def psc_team_project_detail(self, project_id, **kwargs): project_name, project_id = unslug(project_id) project = request.env['project.project'].sudo().browse(project_id) if project.sudo().exists(): values = {'project': project} return request.render("website_oca_psc_team.psc_project_detail", values) return self.psc_teams(**kwargs)
def partners_detail(self, partner_id, **post): _, partner_id = unslug(partner_id) if partner_id: partner_sudo = request.env['res.partner'].sudo().browse(partner_id) is_website_publisher = request.env['res.users'].has_group( 'website.group_website_publisher') if partner_sudo.exists() and (partner_sudo.website_published or is_website_publisher): values = { 'main_object': partner_sudo, 'partner': partner_sudo, 'edit_page': False } return request.render("website_partner.partner_page", values) return request.not_found()
def partners_detail(self, seller_id, page=0, ppg=False, **post): _, seller_id = unslug(seller_id) if seller_id: if ppg: try: ppg = int(ppg) except ValueError: ppg = PPG post["ppg"] = ppg else: ppg = PPG partner = request.env['res.partner'].sudo().browse(seller_id) if partner.exists(): url = "/shop" keep = QueryURL('/shop') Product = request.env['product.template'].with_context( bin_size=True) product_count = Product.search_count([ ('seller_id', '=', partner.id), ('website_published', '=', True) ]) pager = request.website.pager(url=url, total=product_count, page=page, step=ppg, scope=7, url_args=post) products = Product.search([('seller_id', '=', partner.id), ('website_published', '=', True)], limit=ppg, offset=pager['offset']) total_page = (len(partner.website_message_ids) / 10) + 1 values = { 'main_object': partner, 'partner': partner, 'edit_page': False, 'products': products, 'pager': pager, 'keep': keep, 'bins': TableCompute().process(products, ppg), 'rows': PPR, 'total_page': math.floor(total_page) } return request.render("odoo_website_marketplace.seller_page", values) return request.not_found()
def _search_get_detail(self, website, order, options): with_description = options['displayDescription'] with_date = options['displayDetail'] my = options.get('my') search_tags = options.get('tag') slide_category = options.get('slide_category') domain = [website.website_domain()] if my: domain.append([('partner_ids', '=', self.env.user.partner_id.id)]) if search_tags: ChannelTag = self.env['slide.channel.tag'] try: tag_ids = list(filter(None, [unslug(tag)[1] for tag in search_tags.split(',')])) tags = ChannelTag.search([('id', 'in', tag_ids)]) if tag_ids else ChannelTag except Exception: tags = ChannelTag # Group by group_id grouped_tags = defaultdict(list) for tag in tags: grouped_tags[tag.group_id].append(tag) # OR inside a group, AND between groups. for group in grouped_tags: domain.append([('tag_ids', 'in', [tag.id for tag in grouped_tags[group]])]) if slide_category and 'nbr_%s' % slide_category in self: domain.append([('nbr_%s' % slide_category, '>', 0)]) search_fields = ['name'] fetch_fields = ['name', 'website_url'] mapping = { 'name': {'name': 'name', 'type': 'text', 'match': True}, 'website_url': {'name': 'website_url', 'type': 'text', 'truncate': False}, } if with_description: search_fields.append('description_short') fetch_fields.append('description_short') mapping['description'] = {'name': 'description_short', 'type': 'text', 'html': True, 'match': True} if with_date: fetch_fields.append('slide_last_update') mapping['detail'] = {'name': 'slide_last_update', 'type': 'date'} return { 'model': 'slide.channel', 'base_domain': domain, 'search_fields': search_fields, 'fetch_fields': fetch_fields, 'mapping': mapping, 'icon': 'fa-graduation-cap', }
def test_unslug(self): tests = { '': (None, None), 'foo': (None, None), 'foo-': (None, None), '-': (None, None), 'foo-1': ('foo', 1), 'foo-bar-1': ('foo-bar', 1), 'foo--1': ('foo', -1), '1': (None, 1), '1-1': ('1', 1), '--1': (None, None), 'foo---1': (None, None), 'foo1': (None, None), } for slug, expected in pycompat.items(tests): self.assertEqual(unslug(slug), expected)
def test_unslug(self): tests = { '': (None, None), 'foo': (None, None), 'foo-': (None, None), '-': (None, None), 'foo-1': ('foo', 1), 'foo-bar-1': ('foo-bar', 1), 'foo--1': ('foo', -1), '1': (None, 1), '1-1': ('1', 1), '--1': (None, None), 'foo---1': (None, None), 'foo1': (None, None), } for slug, expected in tests.items(): self.assertEqual(unslug(slug), expected)
def integrators_detail(self, integrator_id, **post): """ Display integrator's detail. """ _, integrator_id = unslug(integrator_id) current_country = None country_id = post.get('country_id') if country_id: current_country = request.env['res.country'].browse( int(country_id)).exists() if integrator_id: integrator = request.env['res.partner'].sudo().browse( integrator_id) is_website_publisher = request.env['res.users'].has_group( 'website.group_website_publisher') if integrator.sudo().exists() and\ (integrator.website_published or is_website_publisher): modules_list, developed_module_count = \ self.get_integrator_modules_list(integrator) references = self.get_integrator_references(integrator) sponsorship_lines = integrator.sponsorship_line_ids.sorted( key=lambda r: r.date_end, reverse=True)[:5] display_all_modules = True if \ developed_module_count > 5 else False values = { 'main_object': integrator, 'integrator': integrator, 'current_country': current_country, 'references': references, 'modules_list': modules_list, 'sponsorship_lines': sponsorship_lines, 'display_all_modules': display_all_modules } return request.render("website_oca_integrator.integrators", values) return self.integrators(**post)
def get_root(self, item=None, upper_level=0): """Walk trough page path to find root ancestor. URL is made of items' slug so we can jump at any level by looking at path parts. Use `upper_level` to stop walking at a precise hierarchy level. """ item = item or self # 1st bit is `/cms` bits = item.website_url.split('/')[2:] try: _slug = bits[upper_level] except IndexError: # safely default to real root _slug = bits[0] _, page_id = unslug(_slug) return self.browse(page_id)
def partners_detail(self, partner_id, **post): _, partner_id = unslug(partner_id) current_grade, current_country = None, None grade_id = post.get('grade_id') country_id = post.get('country_id') if grade_id: current_grade = request.env['res.partner.grade'].browse(int(grade_id)).exists() if country_id: current_country = request.env['res.country'].browse(int(country_id)).exists() if partner_id: partner = request.env['res.partner'].sudo().browse(partner_id) is_website_publisher = request.env['res.users'].has_group('website.group_website_publisher') if partner.exists() and (partner.website_published or is_website_publisher): values = { 'main_object': partner, 'partner': partner, 'current_grade': current_grade, 'current_country': current_country } return request.render("website_crm_partner_assign.partner", values) return self.partners(**post)
def blog(self, blog=None, tag=None, page=1, **opt): """ Dispaly the blog in website. :return dict values: values for the templates, containing - 'blogs': all blogs for navigation """ if not blog.can_access_from_current_website(): raise werkzeug.exceptions.NotFound() date_begin, date_end, state = opt.get('date_begin'), opt.get('date_end'), opt.get('state') domain = request.website.website_domain() BlogPost = request.env['blog.post'] active_tag_ids = tag and [int(unslug(t)[1]) for t in tag.split(',')] or [] if active_tag_ids: domain += [('tag_ids', 'in', active_tag_ids)] if blog: domain += [('blog_id', '=', blog.id)] post={} blog_posts = BlogPost.sudo().search(domain, order="post_date desc") for vlogs in blog_posts: content_arr = BeautifulSoup(vlogs.content).get_text() extra_content = "" if (len(content_arr)>220): extra_content = "..." post[vlogs.id] = content_arr[1:220]+extra_content values = { 'blog_posts': blog_posts, 'post': post, } response = request.render("website_blog.latest_blogs", values) return response
def blog(self, blog=None, tag=None, page=1, **opt): """ Prepare all values to display the blog. :return dict values: values for the templates, containing - 'blog': current blog - 'blogs': all blogs for navigation - 'pager': pager of posts - 'active_tag_ids' : list of active tag ids, - 'tags_list' : function to built the comma-separated tag list ids (for the url), - 'tags': all tags, for navigation - 'state_info': state of published/unpublished filter - 'nav_list': a dict [year][month] for archives navigation - 'date': date_begin optional parameter, used in archives navigation - 'blog_url': help object to create URLs """ date_begin, date_end, state = opt.get('date_begin'), opt.get('date_end'), opt.get('state') published_count, unpublished_count = 0, 0 BlogPost = request.env['blog.post'] Blog = request.env['blog.blog'] blogs = Blog.search([], order="create_date asc") # build the domain for blog post to display domain = [] # retrocompatibility to accept tag as slug active_tag_ids = tag and [int(unslug(t)[1]) for t in tag.split(',')] or [] if active_tag_ids: domain += [('tag_ids', 'in', active_tag_ids)] if blog: domain += [('blog_id', '=', blog.id)] if date_begin and date_end: domain += [("post_date", ">=", date_begin), ("post_date", "<=", date_end)] if request.env.user.has_group('website.group_website_designer'): count_domain = domain + [("website_published", "=", True), ("post_date", "<=", fields.Datetime.now())] published_count = BlogPost.search_count(count_domain) unpublished_count = BlogPost.search_count(domain) - published_count if state == "published": domain += [("website_published", "=", True), ("post_date", "<=", fields.Datetime.now())] elif state == "unpublished": domain += ['|', ("website_published", "=", False), ("post_date", ">", fields.Datetime.now())] else: domain += [("post_date", "<=", fields.Datetime.now())] blog_url = QueryURL('', ['blog', 'tag'], blog=blog, tag=tag, date_begin=date_begin, date_end=date_end) blog_posts = BlogPost.search(domain, order="post_date desc") pager = request.website.pager( url=request.httprequest.path.partition('/page/')[0], total=len(blog_posts), page=page, step=self._blog_post_per_page, url_args=opt, ) pager_begin = (page - 1) * self._blog_post_per_page pager_end = page * self._blog_post_per_page blog_posts = blog_posts[pager_begin:pager_end] all_tags = blog.all_tags()[blog.id] # function to create the string list of tag ids, and toggle a given one. # used in the 'Tags Cloud' template. def tags_list(tag_ids, current_tag): tag_ids = list(tag_ids) # required to avoid using the same list if current_tag in tag_ids: tag_ids.remove(current_tag) else: tag_ids.append(current_tag) tag_ids = request.env['blog.tag'].browse(tag_ids).exists() return ','.join(slug(tag) for tag in tag_ids) tag_category = sorted(all_tags.mapped('category_id'), key=lambda category: category.name.upper()) other_tags = sorted(all_tags.filtered(lambda x: not x.category_id), key=lambda tag: tag.name.upper()) values = { 'blog': blog, 'blogs': blogs, 'main_object': blog, 'other_tags': other_tags, 'state_info': {"state": state, "published": published_count, "unpublished": unpublished_count}, 'active_tag_ids': active_tag_ids, 'tags_list' : tags_list, 'blog_posts': blog_posts, 'blog_posts_cover_properties': [json.loads(b.cover_properties) for b in blog_posts], 'pager': pager, 'nav_list': self.nav_list(blog), 'blog_url': blog_url, 'date': date_begin, 'tag_category': tag_category, } response = request.render("website_blog.blog_post_short", values) return response
def blog(self, blog=None, tag=None, page=1, **opt): date_begin, date_end, state = opt.get('date_begin'), opt.get( 'date_end'), opt.get('state') published_count, unpublished_count = 0, 0 BlogPost = request.env['blog.post'] Blog = request.env['blog.blog'] blogs = Blog.search([], order="create_date asc") # build the domain for blog post to display domain = [] # retrocompatibility to accept tag as slug active_tag_ids = tag and [int(unslug(t)[1]) for t in tag.split(',')] or [] if active_tag_ids: domain += [('tag_ids', 'in', active_tag_ids)] if blog: domain += [('blog_id', '=', blog.id)] if date_begin and date_end: domain += [("post_date", ">=", date_begin), ("post_date", "<=", date_end)] if request.env.user.has_group('website.group_website_designer'): count_domain = domain + \ [("website_published", "=", True), ("post_date", "<=", fields.Datetime.now())] published_count = BlogPost.search_count(count_domain) unpublished_count = BlogPost.search_count(domain) - published_count if state == "published": domain += [("website_published", "=", True), ("post_date", "<=", fields.Datetime.now())] elif state == "unpublished": domain += [ '|', ("website_published", "=", False), ("post_date", ">", fields.Datetime.now()) ] else: domain += [("post_date", "<=", fields.Datetime.now())] blog_url = QueryURL('', ['blog', 'tag'], blog=blog, tag=tag, date_begin=date_begin, date_end=date_end) # Changes for the filters START all_authors = [] types = [] selected = {'author': '', 'type': '', 'tags': ''} if blog: all_authors = blog.all_authors()[blog.id] types = blog.all_types()[blog.id] tags = opt.get('tags') if tags: selected['tags'] = int(tags) domain += [('tag_ids', 'in', int(tags))] blog_posts = BlogPost.search(domain, order="post_date desc") author = opt.get('author') if author: selected['author'] = int(author) blog_posts = blog_posts.filtered( lambda r: r.author_id.id == int(author)) type_id = opt.get('type') if type_id: selected['type'] = int(type_id) blog_posts = blog_posts.filtered( lambda r: r.blog_type.id == int(type_id)) # Changes for the filters END pager = request.website.pager( url=request.httprequest.path.partition('/page/')[0], total=len(blog_posts), page=page, step=self._blog_post_per_page, url_args=opt, ) pager_begin = (page - 1) * self._blog_post_per_page pager_end = page * self._blog_post_per_page blog_posts = blog_posts[pager_begin:pager_end] all_tags = blog.all_tags()[blog.id] # function to create the string list of tag ids, and toggle a given one. # used in the 'Tags Cloud' template. def tags_list(tag_ids, current_tag): tag_ids = list(tag_ids) if current_tag in tag_ids: tag_ids.remove(current_tag) else: tag_ids.append(current_tag) tag_ids = request.env['blog.tag'].browse(tag_ids).exists() return ','.join(slug(tag) for tag in tag_ids) featured_blog_cover_properties = None featured_blog = request.env['blog.post'].get_featured_blog(blog.id) if featured_blog: featured_blog_cover_properties = json.loads( featured_blog.cover_properties) values = { 'blog': blog, 'blogs': blogs, 'main_object': blog, 'tags': all_tags, 'state_info': { "state": state, "published": published_count, "unpublished": unpublished_count }, 'active_tag_ids': active_tag_ids, 'tags_list': tags_list, 'blog_posts': blog_posts, 'blog_posts_cover_properties': [json.loads(b.cover_properties) for b in blog_posts], 'pager': pager, 'nav_list': self.nav_list(blog), 'blog_url': blog_url, 'date': date_begin, 'authors': all_authors, 'types': types, 'selected': selected, 'featured_blog': featured_blog, 'featured_blog_cover_properties': featured_blog_cover_properties, } response = request.render("website_blog.blog_post_short", values) return response
def customers(self, country=None, industry=None, page=0, **post): Tag = request.env['res.partner.tag'] Partner = request.env['res.partner'] search_value = post.get('search') domain = [('website_published', '=', True), ('assigned_partner_id', '!=', False)] if search_value: domain += [ '|', '|', ('name', 'ilike', search_value), ('website_description', 'ilike', search_value), ('industry_id.name', 'ilike', search_value), ] tag_id = post.get('tag_id') if tag_id: tag_id = unslug(tag_id)[1] or 0 domain += [('website_tag_ids', 'in', tag_id)] # group by industry, based on customers found with the search(domain) industries = Partner.sudo().read_group(domain, ["id", "industry_id"], groupby="industry_id", orderby="industry_id") partners_count = Partner.sudo().search_count(domain) if industry: domain.append(('industry_id', '=', industry.id)) if industry.id not in (x['industry_id'][0] for x in industries if x['industry_id']): if industry.exists(): industries.append({ 'industry_id_count': 0, 'industry_id': (industry.id, industry.name) }) industries.sort(key=lambda d: (d.get('industry_id') or (0, ''))[1]) industries.insert( 0, { 'industry_id_count': partners_count, 'industry_id': (0, _("All Sectors of Activity")) }) # group by country, based on customers found with the search(domain) countries = Partner.sudo().read_group(domain, ["id", "country_id"], groupby="country_id", orderby="country_id") country_count = Partner.sudo().search_count(domain) if country: domain += [('country_id', '=', country.id)] if country.id not in (x['country_id'][0] for x in countries if x['country_id']): if country.exists(): countries.append({ 'country_id_count': 0, 'country_id': (country.id, country.name) }) countries.sort( key=lambda d: (d['country_id'] or (0, ""))[1]) countries.insert( 0, { 'country_id_count': country_count, 'country_id': (0, _("All Countries")) }) # search customers to display partner_count = Partner.sudo().search_count(domain) # pager url = '/customers' if industry: url += '/industry/%s' % industry.id if country: url += '/country/%s' % country.id pager = request.website.pager(url=url, total=partner_count, page=page, step=self._references_per_page, scope=7, url_args=post) partners = Partner.sudo().search(domain, offset=pager['offset'], limit=self._references_per_page) google_map_partner_ids = ','.join(str(it) for it in partners.ids) google_maps_api_key = request.website.google_maps_api_key tags = Tag.search([('website_published', '=', True), ('partner_ids', 'in', partners.ids)], order='classname, name ASC') tag = tag_id and Tag.browse(tag_id) or False values = { 'countries': countries, 'current_country_id': country.id if country else 0, 'current_country': country or False, 'industries': industries, 'current_industry_id': industry.id if industry else 0, 'current_industry': industry or False, 'partners': partners, 'google_map_partner_ids': google_map_partner_ids, 'pager': pager, 'post': post, 'search_path': "?%s" % werkzeug.url_encode(post), 'tag': tag, 'tags': tags, 'google_maps_api_key': google_maps_api_key, } return request.render("website_customer.index", values)
def shop(self, page=0, category=None, search='', integrator='', ppg=False, **post): """ Filter products by integrator. """ response = super(WebsiteIntegratorSale, self).shop( page=page, category=category, search=search, ppg=ppg, **post) # execute below block if url contains integrator parameter if integrator: _, integrator_id = unslug(integrator) integrator = request.env['res.partner'].sudo().browse( integrator_id).exists() if integrator: ppg, post = self.get_product_per_page(ppg, **post) attrib_values = response.qcontext['attrib_values'] attrib_list = request.httprequest.args.getlist('attrib') product = request.env['product.template'] url = "/shop" if search: post["search"] = search if category: category = request.env['product.public.category'].browse( int(category)) url = "/shop/category/%s" % slug(category) if attrib_list: post['attrib'] = attrib_list post["integrator"] = integrator_id domain = self._get_search_domain( search, category, attrib_values) domain += [('id', 'in', integrator.developed_module_ids.ids)] product_count = product.search_count(domain) pager = request.website.pager( url=url, total=product_count, page=page, step=ppg, scope=7, url_args=post) products = product.search(domain, limit=ppg, offset=pager['offset'], order=self._get_search_order(post)) keep = QueryURL('/shop', category=category and int(category), search=search, integrator=slug(integrator), attrib=attrib_list, order=post.get('order')) values = { 'products': products, 'bins': TableCompute().process(products, ppg), 'pager': pager, 'search_count': product_count, 'search': search, 'integrator': integrator, 'keep': keep, } response.qcontext.update(values) return response return response
def _prepare_blog_values(self, blogs, blog=False, date_begin=False, date_end=False, tags=False, state=False, page=False): """ Prepare all values to display the blogs index page or one specific blog""" BlogPost = request.env['blog.post'] BlogTag = request.env['blog.tag'] # prepare domain domain = request.website.website_domain() if blog: domain += [('blog_id', '=', blog.id)] if date_begin and date_end: domain += [("post_date", ">=", date_begin), ("post_date", "<=", date_end)] active_tag_ids = tags and [unslug(tag)[1] for tag in tags.split(',')] or [] active_tags = BlogTag if active_tag_ids: active_tags = BlogTag.browse(active_tag_ids).exists() fixed_tag_slug = ",".join(slug(t) for t in active_tags) if fixed_tag_slug != tags: new_url = request.httprequest.full_path.replace( "/tag/%s" % tags, "/tag/%s" % fixed_tag_slug, 1) if new_url != request.httprequest.full_path: # check that really replaced and avoid loop return request.redirect(new_url, 301) domain += [('tag_ids', 'in', active_tags.ids)] if request.env.user.has_group('website.group_website_designer'): count_domain = domain + [("website_published", "=", True), ("post_date", "<=", fields.Datetime.now()) ] published_count = BlogPost.search_count(count_domain) unpublished_count = BlogPost.search_count(domain) - published_count if state == "published": domain += [("website_published", "=", True), ("post_date", "<=", fields.Datetime.now())] elif state == "unpublished": domain += [ '|', ("website_published", "=", False), ("post_date", ">", fields.Datetime.now()) ] else: domain += [("post_date", "<=", fields.Datetime.now())] use_cover = request.website.is_view_active( 'website_blog.opt_blog_cover_post') fullwidth_cover = request.website.is_view_active( 'website_blog.opt_blog_cover_post_fullwidth_design') # if blog, we show blog title, if use_cover and not fullwidth_cover we need pager + latest always offset = (page - 1) * self._blog_post_per_page first_post = BlogPost if not blog: first_post = BlogPost.search(domain + [('website_published', '=', True)], order="post_date desc, id asc", limit=1) if use_cover and not fullwidth_cover: offset += 1 posts = BlogPost.search( domain, offset=offset, limit=self._blog_post_per_page, order="is_published desc, post_date desc, id asc") total = BlogPost.search_count(domain) pager = request.website.pager( url=request.httprequest.path.partition('/page/')[0], total=total, page=page, step=self._blog_post_per_page, ) all_tags = blog and blogs.all_tags()[blog.id] or blogs.all_tags( join=True) tag_category = sorted(all_tags.mapped('category_id'), key=lambda category: category.name.upper()) other_tags = sorted(all_tags.filtered(lambda x: not x.category_id), key=lambda tag: tag.name.upper()) # for performance prefetch the first post with the others post_ids = (first_post | posts).ids return { 'date_begin': date_begin, 'date_end': date_end, 'first_post': first_post.with_prefetch(post_ids), 'other_tags': other_tags, 'tag_category': tag_category, 'nav_list': self.nav_list(), 'tags_list': self.tags_list, 'pager': pager, 'posts': posts.with_prefetch(post_ids), 'tag': tags, 'active_tag_ids': active_tags.ids, 'domain': domain, 'state_info': state and { "state": state, "published": published_count, "unpublished": unpublished_count }, 'blogs': blogs, 'blog': blog, }
def blog(self, blog=None, tag=None, page=1, **opt): tags = tag active_tag_ids = tags and [unslug(tag)[1] for tag in tags.split(',')] or [] if active_tag_ids: fixed_tag_slug = ",".join( slug(t) for t in request.env['blog.tag'].browse(active_tag_ids)) if fixed_tag_slug != tags: return request.redirect( request.httprequest.full_path.replace( "/tag/%s/" % tags, "/tag/%s/" % fixed_tag_slug, 1), 301) ConfigParameter = request.env['ir.config_parameter'].sudo() is_blog_website_mana2many = ConfigParameter.get_param( 'anodoo_blog.is_blog_website_mana2many') #return super(AnodooBlog, self).blog(blog=blog, tag=tag, page=page, **opt) #override Blog = request.env['blog.blog'] if blog and not blog.can_access_from_current_website(): raise werkzeug.exceptions.NotFound() blogs_domain = request.website.website_domain() + [ ('is_public', '=', True) ] blogs = Blog.search(blogs_domain) if not blog and len(blogs) == 1: return werkzeug.utils.redirect('/blog/%s' % slug(blogs[0]), code=302) series, date_begin, date_end, state = opt.get('series'), opt.get( 'date_begin'), opt.get('date_end'), opt.get('state') values = self._prepare_blog_values(blogs=blogs, blog=blog, date_begin=date_begin, date_end=date_end, tags=tag, series=series, state=state, page=page) if blog: values['main_object'] = blog values['edit_in_backend'] = True values['blog_url'] = QueryURL('', ['blog', 'tag'], blog=blog, tag=tag, series=series, date_begin=date_begin, date_end=date_end) else: values['blog_url'] = QueryURL('/blog', ['tag'], series=series, date_begin=date_begin, date_end=date_end) #是否显示footer values['no_footer'] = ConfigParameter.get_param( 'anodoo_blog.is_hide_footer') return request.render("website_blog.blog_post_short", values)
def customers(self, country=None, industry=None, page=0, **post): Tag = request.env['res.partner.tag'] Partner = request.env['res.partner'] search_value = post.get('search') domain = [('website_published', '=', True), ('assigned_partner_id', '!=', False)] if search_value: domain += [ '|', '|', ('name', 'ilike', search_value), ('website_description', 'ilike', search_value), ('industry_id.name', 'ilike', search_value), ] tag_id = post.get('tag_id') if tag_id: tag_id = unslug(tag_id)[1] or 0 domain += [('website_tag_ids', 'in', tag_id)] # group by industry, based on customers found with the search(domain) industries = Partner.sudo().read_group(domain, ["id", "industry_id"], groupby="industry_id", orderby="industry_id") partners_count = Partner.sudo().search_count(domain) if industry: domain.append(('industry_id', '=', industry.id)) if industry.id not in (x['industry_id'][0] for x in industries if x['industry_id']): if industry.exists(): industries.append({ 'industry_id_count': 0, 'industry_id': (industry.id, industry.name) }) industries.sort(key=lambda d: (d.get('industry_id') or (0, ''))[1]) industries.insert(0, { 'industry_id_count': partners_count, 'industry_id': (0, _("All Sectors of Activity")) }) # group by country, based on customers found with the search(domain) countries = Partner.sudo().read_group(domain, ["id", "country_id"], groupby="country_id", orderby="country_id") country_count = Partner.sudo().search_count(domain) if country: domain += [('country_id', '=', country.id)] if country.id not in (x['country_id'][0] for x in countries if x['country_id']): if country.exists(): countries.append({ 'country_id_count': 0, 'country_id': (country.id, country.name) }) countries.sort(key=lambda d: (d['country_id'] or (0, ""))[1]) countries.insert(0, { 'country_id_count': country_count, 'country_id': (0, _("All Countries")) }) # search customers to display partner_count = Partner.sudo().search_count(domain) # pager url = '/customers' if industry: url += '/industry/%s' % industry.id if country: url += '/country/%s' % country.id pager = request.website.pager( url=url, total=partner_count, page=page, step=self._references_per_page, scope=7, url_args=post ) partners = Partner.sudo().search(domain, offset=pager['offset'], limit=self._references_per_page) google_map_partner_ids = ','.join(str(it) for it in partners.ids) google_maps_api_key = request.env['ir.config_parameter'].sudo().get_param('google_maps_api_key') tags = Tag.search([('website_published', '=', True), ('partner_ids', 'in', partners.ids)], order='classname, name ASC') tag = tag_id and Tag.browse(tag_id) or False values = { 'countries': countries, 'current_country_id': country.id if country else 0, 'current_country': country or False, 'industries': industries, 'current_industry_id': industry.id if industry else 0, 'current_industry': industry or False, 'partners': partners, 'google_map_partner_ids': google_map_partner_ids, 'pager': pager, 'post': post, 'search_path': "?%s" % werkzeug.url_encode(post), 'tag': tag, 'tags': tags, 'google_maps_api_key': google_maps_api_key, } return request.render("website_customer.index", values)
def integrator_contributors(self, integrator_id=None, country_name=None, country_id=0, page=1, **post): integrator = integrator_id integrator_name, integrator_id = unslug(integrator_id) country = request.env['res.country'] partner = request.env['res.partner'] post_name = post.get('search') or post.get('name', '') current_country = None country_domain = [ '|', ('membership_state', '=', 'paid'), ('github_login', '!=', False), ("website_published", "=", True), ("parent_id", "=", integrator_id) ] if post_name: country_domain += [ '|', ('name', 'ilike', post_name), ('website_description', 'ilike', post_name) ] countries = partner.sudo().read_group(country_domain, ["id", "country_id"], groupby="country_id", orderby="country_id") countries_total = sum(country_dict['country_id_count'] for country_dict in countries) if country_id: country_domain += [("country_id", '=', country_id)] current_country = country.browse(country_id).read(['id', 'name'])[0] if not any(x['country_id'][0] == country_id for x in countries if x['country_id']): countries.append({ 'country_id_count': 0, 'country_id': (country_id, current_country["name"]) }) countries = [d for d in countries if d['country_id']] countries.sort(key=lambda d: d['country_id'][1]) countries.insert( 0, { 'country_id_count': countries_total, 'country_id': (0, _("All Countries")) }) base_url = '/integrators/%s/contributors%s' % ( integrator, '/country/%s' % country_id if country_id else '') contributors_count = partner.sudo().search_count(country_domain) pager = request.website.pager(url=base_url, total=contributors_count, page=page, step=self._references_per_page, scope=7, url_args=post) contributors = partner.sudo().search(country_domain, order="display_name ASC", offset=pager['offset'], limit=self._references_per_page) values = { 'contributors': contributors, 'integrator': integrator, 'countries': countries, 'current_country': current_country and [current_country['id'], current_country['name']] or None, 'current_country_id': current_country and current_country['id'] or 0, 'pager': pager, 'post': post, 'search': "?%s" % werkzeug.url_encode(post), } return request.render("website_oca_integrator.contributor_index", values)
def _post_processing_att(self, tagName, atts, options): atts = super(irQweb, self)._post_processing_att(tagName, atts, options) tmp_atts = atts try: name = self.URL_ATTRS.get(tagName) website = request and getattr(request, 'website', None) url = None if not website and options.get('website_id'): website = self.env['website'].browse(options['website_id']) if website and website.is_allow_url_rewrite and request and name and atts.get( name) and name in atts and ('/shop/product' in atts.get(name)): url = atts.get(name) surl = url.split('/') langs = [ lg[0] for lg in request.env['res.lang'].get_available() ] if surl[1] in langs: surl = url.split('/', 5) surl.pop(1) else: surl = url.split('/', 4) if surl.__len__() < 4: return tmp_atts query_url = surl[3].split('?') unslug_url = unslug(query_url[0]) seo = request.env['seo.url'].sudo().search([ ('product_id', '=', unslug_url[1]), ('website_id', '=', request.website.id), ('is_active', '=', True) ]) if seo: if surl.__len__() >= 5: re_url = seo[0].url + '/' + surl[surl.__len__() - 1] else: re_url = seo[0].url if query_url.__len__() > 1: re_url += '?' + query_url[1] if (len(langs) > 1) and is_multilang_url(url, langs): ps = url.split(u'/') if ps[1] in langs: re_url = "/" + ps[1] + re_url atts[name] = re_url if website and website.is_allow_url_rewrite and request and name and atts.get( name) and name in atts and ('/shop/category' in atts.get(name)): url = atts.get(name) surl = url.split('/') langs = [ lg[0] for lg in request.env['res.lang'].get_available() ] if surl[1] in langs: surl = url.split('/', 5) surl.pop(1) else: surl = url.split('/', 4) if surl.__len__() < 4: return tmp_atts query_url = surl[3].split('?') unslug_url = unslug(query_url[0]) seo = request.env['seo.url'].sudo().search([ ('categ_id', '=', unslug_url[1]), ('website_id', '=', request.website.id), ('is_active', '=', True) ]) if seo: if surl.__len__() >= 5: re_url = seo[0].url + '/' + surl[surl.__len__() - 1] else: re_url = seo[0].url if query_url.__len__() > 1: re_url += '?' + query_url[1] if (len(langs) > 1) and is_multilang_url(url, langs): ps = url.split(u'/') if ps[1] in langs: re_url = "/" + ps[1] + re_url atts[name] = re_url return atts except Exception as e: return tmp_atts
def _prepare_blog_values(self, blogs, blog=False, date_begin=False, date_end=False, tags=False, state=False, page=False, search=None): """ Prepare all values to display the blogs index page or one specific blog""" BlogPost = request.env['blog.post'] BlogTag = request.env['blog.tag'] # prepare domain domain = request.website.website_domain() if blog: domain += [('blog_id', '=', blog.id)] if date_begin and date_end: domain += [("post_date", ">=", date_begin), ("post_date", "<=", date_end)] active_tag_ids = tags and [unslug(tag)[1] for tag in tags.split(',')] or [] active_tags = BlogTag if active_tag_ids: active_tags = BlogTag.browse(active_tag_ids).exists() fixed_tag_slug = ",".join(slug(t) for t in active_tags) if fixed_tag_slug != tags: new_url = request.httprequest.full_path.replace("/tag/%s" % tags, "/tag/%s" % fixed_tag_slug, 1) if new_url != request.httprequest.full_path: # check that really replaced and avoid loop return request.redirect(new_url, 301) domain += [('tag_ids', 'in', active_tags.ids)] if request.env.user.has_group('website.group_website_designer'): count_domain = domain + [("website_published", "=", True), ("post_date", "<=", fields.Datetime.now())] published_count = BlogPost.search_count(count_domain) unpublished_count = BlogPost.search_count(domain) - published_count if state == "published": domain += [("website_published", "=", True), ("post_date", "<=", fields.Datetime.now())] elif state == "unpublished": domain += ['|', ("website_published", "=", False), ("post_date", ">", fields.Datetime.now())] else: domain += [("post_date", "<=", fields.Datetime.now())] use_cover = request.website.is_view_active('website_blog.opt_blog_cover_post') fullwidth_cover = request.website.is_view_active('website_blog.opt_blog_cover_post_fullwidth_design') # if blog, we show blog title, if use_cover and not fullwidth_cover we need pager + latest always offset = (page - 1) * self._blog_post_per_page if not blog: if use_cover and not fullwidth_cover: offset += 1 options = { 'displayDescription': True, 'displayDetail': False, 'displayExtraDetail': False, 'displayExtraLink': False, 'displayImage': False, 'allowFuzzy': not request.params.get('noFuzzy'), 'blog': str(blog.id) if blog else None, 'tag': ','.join([str(id) for id in active_tags.ids]), 'date_begin': date_begin, 'date_end': date_end, 'state': state, } total, details, fuzzy_search_term = request.website._search_with_fuzzy("blog_posts_only", search, limit=page * self._blog_post_per_page, order="is_published desc, post_date desc, id asc", options=options) posts = details[0].get('results', BlogPost) first_post = BlogPost if posts and not blog and posts[0].website_published: first_post = posts[0] posts = posts[offset:offset + self._blog_post_per_page] url_args = dict() if search: url_args["search"] = search if date_begin and date_end: url_args["date_begin"] = date_begin url_args["date_end"] = date_end pager = tools.lazy(lambda: request.website.pager( url=request.httprequest.path.partition('/page/')[0], total=total, page=page, step=self._blog_post_per_page, url_args=url_args, )) if not blogs: all_tags = request.env['blog.tag'] else: all_tags = tools.lazy(lambda: blogs.all_tags(join=True) if not blog else blogs.all_tags().get(blog.id, request.env['blog.tag'])) tag_category = tools.lazy(lambda: sorted(all_tags.mapped('category_id'), key=lambda category: category.name.upper())) other_tags = tools.lazy(lambda: sorted(all_tags.filtered(lambda x: not x.category_id), key=lambda tag: tag.name.upper())) # for performance prefetch the first post with the others post_ids = (first_post | posts).ids # and avoid accessing related blogs one by one posts.blog_id return { 'date_begin': date_begin, 'date_end': date_end, 'first_post': first_post.with_prefetch(post_ids), 'other_tags': other_tags, 'tag_category': tag_category, 'nav_list': self.nav_list, 'tags_list': self.tags_list, 'pager': pager, 'posts': posts.with_prefetch(post_ids), 'tag': tags, 'active_tag_ids': active_tags.ids, 'domain': domain, 'state_info': state and {"state": state, "published": published_count, "unpublished": unpublished_count}, 'blogs': blogs, 'blog': blog, 'search': fuzzy_search_term or search, 'search_count': total, 'original_search': fuzzy_search_term and search, }
def blog(self, blog=None, category=None, author=None, tag=None, page=1, search='', **opt): """ Prepare all values to display the blog. :return dict values: values for the templates, containing - 'blog': current blog - 'blogs': all blogs for navigation - 'pager': pager of posts - 'active_tag_ids' : list of active tag ids, - 'tags_list' : function to built the comma-separated tag list ids (for the url), - 'tags': all tags, for navigation - 'state_info': state of published/unpublished filter - 'nav_list': a dict [year][month] for archives navigation - 'date': date_begin optional parameter, used in archives navigation - 'blog_url': help object to create URLs """ date_begin, date_end, state = opt.get('date_begin'), opt.get( 'date_end'), opt.get('state') published_count, unpublished_count = 0, 0 BlogPost = request.env['blog.post'] Blog = request.env['blog.blog'] blogs = Blog.search([], order="create_date asc") # build the domain for blog post to display domain = [] target = '' color = False if blog: domain += [('blog_id', '=', blog.id)] header = 'blog' # retrocompatibility to accept tag as slug active_tag_ids = tag and [int(unslug(t)[1]) for t in tag.split(',')] or [] if category: active_category_id = unslug(category)[1] category = request.env['blog.category'].search_read( [('id', '=', active_category_id)], ['name', 'color'])[0] target = category['name'] color = category['color'] domain += [('website_category_id', 'child_of', active_category_id)] header = 'category' if author: author_id = unslug(author)[1] domain += [('author_id', '=', author_id)] header = 'author' target = request.env['res.partner'].sudo().search([('id', '=', author_id)])[0] if active_tag_ids: domain += [('tag_ids', 'in', active_tag_ids)] tag_obj = request.env['blog.tag'] tags_ids = tag and [unslug(t)[1] for t in tag.split(',')] or [] target = '' for tag_id in tags_ids: target += tag_obj.search([('id', '=', tag_id)])[0].name + ', ' target = target[:-2] header = 'tag' if date_begin and date_end: domain += [("post_date", ">=", date_begin), ("post_date", "<=", date_end)] if search != '': domain += [ '|', '|', ('name', 'ilike', search), ('subtitle', 'ilike', search), ('content', 'ilike', search) ] header = 'search' if request.env.user.has_group('website.group_website_designer'): count_domain = domain + [("website_published", "=", True), ("post_date", "<=", fields.Datetime.now()) ] published_count = BlogPost.search_count(count_domain) unpublished_count = BlogPost.search_count(domain) - published_count if state == "published": domain += [("website_published", "=", True), ("post_date", "<=", fields.Datetime.now())] elif state == "unpublished": domain += [ '|', ("website_published", "=", False), ("post_date", ">", fields.Datetime.now()) ] else: domain += [("post_date", "<=", fields.Datetime.now())] blog_url = QueryURL('', ['blog', 'tag'], blog=blog, tag=tag, date_begin=date_begin, date_end=date_end) blog_posts = BlogPost.search(domain, order="post_date desc") pager = request.website.pager( url=request.httprequest.path.partition('/page/')[0], total=len(blog_posts), page=page, step=self._blog_post_per_page, url_args=opt, ) pager_begin = (page - 1) * self._blog_post_per_page pager_end = page * self._blog_post_per_page blog_posts = blog_posts[pager_begin:pager_end] all_tags = blog.all_tags()[blog.id] # function to create the string list of tag ids, and toggle a given one. # used in the 'Tags Cloud' template. def tags_list(tag_ids, current_tag): tag_ids = list(tag_ids) # required to avoid using the same list if current_tag in tag_ids: tag_ids.remove(current_tag) else: tag_ids.append(current_tag) tag_ids = request.env['blog.tag'].browse(tag_ids).exists() return ','.join(slug(tag) for tag in tag_ids) values = { 'blog': blog, 'blogs': blogs, 'main_object': blog, 'tags': all_tags, 'state_info': { "state": state, "published": published_count, "unpublished": unpublished_count }, 'active_tag_ids': active_tag_ids, 'tags_list': tags_list, 'blog_posts': blog_posts, 'blog_posts_cover_properties': [json.loads(b.cover_properties) for b in blog_posts], 'pager': pager, 'nav_list': self.nav_list(blog), 'blog_url': blog_url, 'date': date_begin, 'header': header, 'search': search, 'target': target, 'color': color } response = request.render("website_blog.blog_post_short", values) return response
def blog(self, blog=None, tag=None, page=1, **opt): """function related to blog display""" date_begin, date_end, state = opt.get('date_begin'), opt.get( 'date_end'), opt.get('state') published_count, unpublished_count = 0, 0 domain = request.website.website_domain() blog_post = request.env['blog.post'] blogs = request.env['blog.blog'].search(domain, order="create_date asc", limit=2) # retrocompatibility to accept tag as slug active_tag_ids = tag and [int(unslug(t)[1]) for t in tag.split(',')] if tag else [] if active_tag_ids: fixed_tag_slug = ",".join( slug(t) for t in request.env['blog.tag'].browse(active_tag_ids)) if fixed_tag_slug != tag: return request.redirect( request.httprequest.full_path.replace( "/tag/%s/" % tag, "/tag/%s/" % fixed_tag_slug, 1), 301) domain += [('tag_ids', 'in', active_tag_ids)] if blog: domain += [('blog_id', '=', blog.id)] if date_begin and date_end: domain += [("post_date", ">=", date_begin), ("post_date", "<=", date_end)] if request.env.user.has_group('website.group_website_designer'): count_domain = domain + [("website_published", "=", True), ("post_date", "<=", fields.Datetime.now()) ] published_count = blog_post.search_count(count_domain) unpublished_count = blog_post.search_count( domain) - published_count if state == "published": domain += [("website_published", "=", True), ("post_date", "<=", fields.Datetime.now())] elif state == "unpublished": domain += [ '|', ("website_published", "=", False), ("post_date", ">", fields.Datetime.now()) ] else: domain += [("post_date", "<=", fields.Datetime.now())] blog_url = QueryURL('', ['blog', 'tag'], blog=blog, tag=tag, date_begin=date_begin, date_end=date_end) search_string = opt.get('search', None) blog_posts = blog_post.search([('name', 'ilike', search_string)], offset=(page - 1) * self._blog_post_per_page, limit=self._blog_post_per_page) if search_string \ else blog_post.search(domain, order="post_date desc") pager = request.website.pager( url=request.httprequest.path.partition('/page/')[0], total=len(blog_posts), page=page, step=self._blog_post_per_page, url_args=opt, ) pager_begin = (page - 1) * self._blog_post_per_page pager_end = page * self._blog_post_per_page blog_posts = blog_posts[pager_begin:pager_end] all_tags = request.env['blog.tag'].search([]) use_cover = request.website.viewref( 'website_blog.opt_blog_cover_post').active fullwidth_cover = request.website.viewref( 'website_blog.opt_blog_cover_post_fullwidth_design').active offset = (page - 1) * self._blog_post_per_page first_post = blog_posts if not blog: first_post = blog_posts.search(domain + [('website_published', '=', True)], order="post_date desc, id asc", limit=1) if use_cover and not fullwidth_cover: offset += 1 # function to create the string list of tag ids, and toggle a given one. # used in the 'Tags Cloud' template. def tags_list(tag_ids, current_tag): tag_ids = list(tag_ids) # required to avoid using the same list if current_tag in tag_ids: tag_ids.remove(current_tag) else: tag_ids.append(current_tag) tag_ids = request.env['blog.tag'].browse(tag_ids).exists() return ','.join(slug(tags) for tags in tag_ids) tag_category = sorted(all_tags.mapped('category_id'), key=lambda category: category.name.upper()) other_tags = sorted(all_tags.filtered(lambda x: not x.category_id), key=lambda tags: tags.name.upper()) values = { 'blog': blog, 'blogs': blogs, 'first_post': first_post.with_prefetch(blog_posts.ids) if not search_string else None, 'other_tags': other_tags, 'state_info': { "state": state, "published": published_count, "unpublished": unpublished_count }, 'active_tag_ids': active_tag_ids, 'tags_list': tags_list, 'posts': blog_posts, 'blog_posts_cover_properties': [json.loads(b.cover_properties) for b in blog_posts], 'pager': pager, 'nav_list': self.nav_list(blog), 'blog_url': blog_url, 'date': date_begin, 'tag_category': tag_category, } response = request.render("website_blog.blog_post_short", values) return response
def _prepare_blog_values(self, blogs, blog=False, date_begin=False, date_end=False, tags=False, series=False, state=False, page=False): """ Prepare all values to display the blogs index page or one specific blog""" BlogPost = request.env['blog.post'] ConfigParameter = request.env['ir.config_parameter'].sudo() is_post_blog_mana2many = ConfigParameter.get_param( 'anodoo_blog.is_post_blog_mana2many') # prepare domain if not is_post_blog_mana2many: domain = request.website.website_domain() else: domain = ['|'] + request.website.website_domain() + [ ('multi_website_ids', 'in', [request.website.id]) ] #is_public 支持有些文章不在列表显示,但可以单独链接浏览 domain += [('is_public', '=', True)] if blog: if is_post_blog_mana2many: domain += [ '|', ('blog_id', '=', blog.id), ('multi_blog_ids', 'in', [blog.id]) ] else: domain += [('blog_id', '=', blog.id)] if date_begin and date_end: domain += [("post_date", ">=", date_begin), ("post_date", "<=", date_end)] active_tag_ids = tags and [unslug(tag)[1] for tag in tags.split(',')] or [] if active_tag_ids: fixed_tag_slug = ",".join( slug(t) for t in request.env['blog.tag'].browse(active_tag_ids)) if fixed_tag_slug != tags: return request.redirect( request.httprequest.full_path.replace( "/tag/%s/" % tags, "/tag/%s/" % fixed_tag_slug, 1), 301) domain += [('tag_ids', 'in', active_tag_ids)] series_id = False if series: series_id = unslug(series)[1] domain += [('series_id', '=', series_id)] if request.env.user.has_group('website.group_website_designer'): count_domain = domain + [("website_published", "=", True), ("post_date", "<=", fields.Datetime.now()) ] published_count = BlogPost.search_count(count_domain) unpublished_count = BlogPost.search_count(domain) - published_count if state == "published": domain += [("website_published", "=", True), ("post_date", "<=", fields.Datetime.now())] elif state == "unpublished": domain += [ '|', ("website_published", "=", False), ("post_date", ">", fields.Datetime.now()) ] else: domain += [("post_date", "<=", fields.Datetime.now())] use_cover = request.website.viewref( 'website_blog.opt_blog_cover_post').active fullwidth_cover = request.website.viewref( 'website_blog.opt_blog_cover_post_fullwidth_design').active # if blog, we show blog title, if use_cover and not fullwidth_cover we need pager + latest always offset = (page - 1) * self._blog_post_per_page first_post = BlogPost if not blog: first_post = BlogPost.search(domain + [('website_published', '=', True)], order="post_date desc, id asc", limit=1) if use_cover and not fullwidth_cover: offset += 1 posts = BlogPost.search( domain, offset=offset, limit=self._blog_post_per_page, order="is_published desc, post_date desc, id asc") total = BlogPost.search_count(domain) pager = request.website.pager( url=request.httprequest.path.partition('/page/')[0], total=total, page=page, step=self._blog_post_per_page, ) all_tags = blog and blogs.all_tags()[blog.id] or blogs.all_tags( join=True) tag_category = sorted(all_tags.mapped('category_id'), key=lambda category: category.name.upper()) other_tags = sorted(all_tags.filtered(lambda x: not x.category_id), key=lambda tag: tag.name.upper()) # for performance prefetch the first post with the others post_ids = (first_post | posts).ids PostSeries = request.env['anodoo.blog.post.series'].sudo() all_series = PostSeries.search(request.website.website_domain(), limit=10) series = PostSeries.browse(series_id) return { 'date_begin': date_begin, 'date_end': date_end, 'first_post': first_post.with_prefetch(post_ids), 'other_tags': other_tags, 'tag_category': tag_category, 'nav_list': self.nav_list(), 'tags_list': self.tags_list, 'pager': pager, 'posts': posts.with_prefetch(post_ids), 'tag': tags, 'active_tag_ids': active_tag_ids, 'domain': domain, 'state_info': state and { "state": state, "published": published_count, "unpublished": unpublished_count }, 'blogs': blogs, 'blog': blog, 'series': series, 'all_series': all_series }
def blog(self, blog=None, tag=None, page=1, **opt): """ Prepare all values to display the blog. :return dict values: values for the templates, containing - 'blog': current blog - 'blogs': all blogs for navigation - 'pager': pager of posts - 'active_tag_ids' : list of active tag ids, - 'tags_list' : function to built the comma-separated tag list ids (for the url), - 'tags': all tags, for navigation - 'state_info': state of published/unpublished filter - 'nav_list': a dict [year][month] for archives navigation - 'date': date_begin optional parameter, used in archives navigation - 'blog_url': help object to create URLs """ if not blog.can_access_from_current_website(): raise werkzeug.exceptions.NotFound() date_begin, date_end, state = opt.get('date_begin'), opt.get( 'date_end'), opt.get('state') published_count, unpublished_count = 0, 0 domain = request.website.website_domain() BlogPost = request.env['blog.post'] Blog = request.env['blog.blog'] blogs = Blog.search(domain, order="create_date asc") # retrocompatibility to accept tag as slug active_tag_ids = tag and [int(unslug(t)[1]) for t in tag.split(',')] or [] if active_tag_ids: fixed_tag_slug = ",".join( slug(t) for t in request.env['blog.tag'].browse(active_tag_ids)) if fixed_tag_slug != tag: return request.redirect( request.httprequest.full_path.replace( "/tag/%s/" % tag, "/tag/%s/" % fixed_tag_slug, 1), 301) domain += [('tag_ids', 'in', active_tag_ids)] if blog: domain += [('blog_id', '=', blog.id)] if date_begin and date_end: domain += [("post_date", ">=", date_begin), ("post_date", "<=", date_end)] if request.env.user.has_group('website.group_website_designer'): count_domain = domain + [("website_published", "=", True), ("post_date", "<=", fields.Datetime.now()) ] published_count = BlogPost.search_count(count_domain) unpublished_count = BlogPost.search_count(domain) - published_count if state == "published": domain += [("website_published", "=", True), ("post_date", "<=", fields.Datetime.now())] elif state == "unpublished": domain += [ '|', ("website_published", "=", False), ("post_date", ">", fields.Datetime.now()) ] else: domain += [("post_date", "<=", fields.Datetime.now())] blog_url = QueryURL('', ['blog', 'tag'], blog=blog, tag=tag, date_begin=date_begin, date_end=date_end) blog_posts = BlogPost.search(domain, order="post_date desc") pager = request.website.pager( url=request.httprequest.path.partition('/page/')[0], total=len(blog_posts), page=page, step=self._blog_post_per_page, url_args=opt, ) pager_begin = (page - 1) * self._blog_post_per_page pager_end = page * self._blog_post_per_page blog_posts = blog_posts[pager_begin:pager_end] all_tags = blog.all_tags()[blog.id] # function to create the string list of tag ids, and toggle a given one. # used in the 'Tags Cloud' template. def tags_list(tag_ids, current_tag): tag_ids = list(tag_ids) # required to avoid using the same list if current_tag in tag_ids: tag_ids.remove(current_tag) else: tag_ids.append(current_tag) tag_ids = request.env['blog.tag'].browse(tag_ids).exists() return ','.join(slug(tag) for tag in tag_ids) tag_category = sorted(all_tags.mapped('category_id'), key=lambda category: category.name.upper()) other_tags = sorted(all_tags.filtered(lambda x: not x.category_id), key=lambda tag: tag.name.upper()) values = { 'blog': blog, 'blogs': blogs, 'main_object': blog, 'other_tags': other_tags, 'state_info': { "state": state, "published": published_count, "unpublished": unpublished_count }, 'active_tag_ids': active_tag_ids, 'tags_list': tags_list, 'blog_posts': blog_posts, 'blog_posts_cover_properties': [json.loads(b.cover_properties) for b in blog_posts], 'pager': pager, 'nav_list': self.nav_list(blog), 'blog_url': blog_url, 'date': date_begin, 'tag_category': tag_category, } response = request.render("website_blog.blog_post_short", values) return response
def blog(self, blog=None, tag=None, page=1, **opt): date_begin, date_end, state = opt.get('date_begin'), opt.get( 'date_end'), opt.get('state') published_count, unpublished_count = 0, 0 BlogPost = request.env['blog.post'] Blog = request.env['blog.blog'] blogs = Blog.search([], order="create_date asc") # build the domain for blog post to display domain = [] # retrocompatibility to accept tag as slug active_tag_ids = tag and [int(unslug(t)[1]) for t in tag.split(',')] or [] if active_tag_ids: domain += [('tag_ids', 'in', active_tag_ids)] if blog: domain += [('blog_id', '=', blog.id)] if date_begin and date_end: domain += [("post_date", ">=", date_begin), ("post_date", "<=", date_end)] if request.env.user.has_group('website.group_website_designer'): count_domain = domain + [("website_published", "=", True), ("post_date", "<=", fields.Datetime.now()) ] published_count = BlogPost.search_count(count_domain) unpublished_count = BlogPost.search_count(domain) - published_count if state == "published": domain += [("website_published", "=", True), ("post_date", "<=", fields.Datetime.now())] elif state == "unpublished": domain += [ '|', ("website_published", "=", False), ("post_date", ">", fields.Datetime.now()) ] else: domain += [("post_date", "<=", fields.Datetime.now())] if opt.get('search'): if opt['search'] != '': search_list = opt['search'].split() for se in search_list: domain += [ '|', '|', ("name", "ilike", se), ("subtitle", "ilike", se), ("content", "ilike", se) ] blog_url = QueryURL('', ['blog', 'tag'], blog=blog, tag=tag, date_begin=date_begin, date_end=date_end) blog_posts = BlogPost.search(domain, order="post_date desc") pager = request.website.pager( url=request.httprequest.path.partition('/page/')[0], total=len(blog_posts), page=page, step=self._blog_post_per_page, url_args=opt, ) pager_begin = (page - 1) * self._blog_post_per_page pager_end = page * self._blog_post_per_page blog_posts = blog_posts[pager_begin:pager_end] all_tags = blog.all_tags()[blog.id] # function to create the string list of tag ids, and toggle a given one. # used in the 'Tags Cloud' template. def tags_list(tag_ids, current_tag): tag_ids = list(tag_ids) # required to avoid using the same list if current_tag in tag_ids: tag_ids.remove(current_tag) else: tag_ids.append(current_tag) tag_ids = request.env['blog.tag'].browse(tag_ids).exists() return ','.join(slug(tag) for tag in tag_ids) values = { 'blog': blog, 'blogs': blogs, 'main_object': blog, 'tags': all_tags, 'state_info': { "state": state, "published": published_count, "unpublished": unpublished_count }, 'active_tag_ids': active_tag_ids, 'tags_list': tags_list, 'blog_posts': blog_posts, 'blog_posts_cover_properties': [json.loads(b.cover_properties) for b in blog_posts], 'pager': pager, 'nav_list': self.nav_list(blog), 'blog_url': blog_url, 'date': date_begin, } response = request.render("website_blog.blog_post_short", values) return response