def display_extract(page, query): if page and query: text_search = "" for f in (field.name for field in page._meta.fields if isinstance(field, (models.CharField, models.TextField))): if f in ('path', 'title', 'slug', 'url', 'url_path'): continue attrf = getattr(page, f) if attrf: text_search += defaultfilters.strip_tags(attrf) p = re.compile(r'(?i)\b%s' % query) text_search = p.sub("<b>%s</b>" % query, text_search) # Extract around the text m = p.search(text_search) # Find the sentence of the match sentences = [s + '.' for s in text_search.split('.') if query in s][:3] extract = "" for e in sentences: if not text_search.startswith(e): extract += "..." + e else: extract += e if len(sentences) > 0: if not text_search.endswith(sentences[-1]): extract += "..." return extract return ""
def export_review_data(data): all_rows = list() headers = [ 'Reviewer', 'Journal', 'Date Requested', 'Date Accepted', 'Date Due', 'Date Complete', 'Time to Acceptance', 'Time to Completion', ] all_rows.append(headers) for data_point in data: for review in data_point.get('reviews'): row = [ review.reviewer.full_name(), strip_tags(data_point.get('article').title), review.date_requested, review.date_accepted, review.date_due, review.date_complete, review.request_to_accept, review.accept_to_complete, ] all_rows.append(row) return export_csv(all_rows)
def send_mail_template(subject, template_name, context, recipient_list, from_email=settings.DEFAULT_FROM_EMAIL, fail_silently=False): message_html = render_to_string(template_name, context) # renderiza em formato texto message_txt = strip_tags(message_html) # remove as tags o html # EmailMultiAlternatives é utilizado para enviar uma alternativa em html email = EmailMultiAlternatives(subject=subject, body=message_txt, from_email=from_email, to=recipient_list) email.attach_alternative(message_html, "text/html") email.send(fail_silently=fail_silently)
def send_email_template(subject, template_name, context, recipient_list, from_email=settings.DEFAULT_FROM_EMAIL, fail_silently=False): message_html = render_to_string(template_name, context) message_txt = strip_tags(message_html) email = EmailMultiAlternatives( subject=subject, body=message_txt, from_email=from_email, to=recipient_list) email.attach_alternative(message_html, "text/html") email.send(fail_silently=fail_silently)
def send_mail_template(subject, template_name, context, recipient_list, from_email=settings.DEFAULT_FROM_EMAIL, fail_silently=False): message_html = render_to_string(template_name, context) message_txt = strip_tags(message_html) email = EmailMultiAlternatives( subject=subject, body=message_txt, from_email=from_email, to=recipient_list ) email.attach_alternative(message_html, "text/html") email.send(fail_silently=fail_silently)
def export_article_csv(articles, data): all_rows = list() info_header_row = [ 'Articles', 'Submissions', 'Published Articles', 'Rejected Articles', 'Views', 'Downloads', ] all_rows.append(info_header_row) row = [ data[0].get('articles').count(), data[0].get('submissions').count(), data[0].get('published_articles').count(), data[0].get('rejected_articles').count(), data[0].get('views').count(), data[0].get('downloads').count(), ] all_rows.append(row) main_header_row = [ 'Title', 'Section', 'Date Submitted', 'Date Accepted', 'Date Published', 'Days to Publication' 'Views', 'Downloads', ] all_rows.append(main_header_row) for article in articles: row = [ strip_tags(article.title), article.section.name if article.section else 'No Section', article.date_submitted, article.date_accepted, article.date_published, article.editorial_delta.days, article.views.count(), article.downloads.count(), ] all_rows.append(row) return export_csv(all_rows)
def send_mail_template(assunto, template_html, context, para_email, de_email=settings.DEFAULT_FROM_EMAIL, fail_silently=False): mensagem_html = render_to_string(template_html, context) mensagem_txt = strip_tags(template_html) #subject, body, from_email e to, são padrões da classe EmailLMultiAlternatives email = EmailMultiAlternatives(subject=assunto, body=mensagem_txt, from_email=de_email, to=para_email) email.attach_alternative(mensagem_html, "text/html") email.send(fail_silently=fail_silently)
def send(self, connection=None, logger=None): if not logger: logger = logging.getLogger(__name__) msg = EmailMultiAlternatives(subject=self.subject, body=strip_tags(self.content), from_email=settings.DEFAULT_FROM_EMAIL, to=self.recipients.split(','), reply_to=self.reply_to.split(','), connection=connection) msg.attach_alternative(self.content, 'text/html') try: msg.send() self.sent = True self.postdate = timezone.now() self.save() return True except (SMTPException, os.error) as e: logger.error('Error while sending email (pk: {}): {}'.format( self.pk, e)) return False
def send_mail_template(subject, template_name, context, recipient_list, from_email=settings.DEFAULT_FROM_EMAIL, fail_silently=False): """ Centraliza aqui o envio de email usando templates """ # Renderiza o tema message_html = render_to_string(template_name, context) # Converte para um formato de texto message_txt = strip_tags(message_html) # Configura o email primeiro como texto email = EmailMultiAlternatives(subject=subject, body=message_txt, from_email=from_email, to=recipient_list) # Configura o email como HTML email.attach_alternative(message_html, 'text/html') # Envia o email email.send(fail_silently=fail_silently)
def get_excerpt(self, obj): """ Get plain text excerpt of blog post """ return strip_tags(obj.description_from_content())
def search(request): keywords = request.GET.get("keywords", None) page = int(request.GET.get("page", 1)) if page < 1: page = 1 elif page > PAGE_COUNTER: page = PAGE_COUNTER tags = set() results = [] total = 0 if keywords and keywords.strip(): keywords = strip_tags(keywords.strip()).replace("-", " ") with silk_profile(name='Search By Keywords #%s' % keywords): datas = es.search(index='it', doc_type='stackoverflow_questions', body={ "query": { "filtered": { "query": { "match": { "title": { "query": keywords.lower(), "minimum_should_match": "75%", "operator": "and" } } } } }, "from": (page - 1) * PAGE_SIZE, "size": PAGE_SIZE, 'sort': [{ 'creation_date': { 'order': 'desc' } }], "aggs": { "tags_stats": { "terms": { "field": "tags", "size": 50, "min_doc_count": 1, "order": { "_count": "desc" } } } } }) hits, took, tags = datas["hits"], datas["took"], datas[ "aggregations"]["tags_stats"]["buckets"] total = hits["total"] for h in hits["hits"]: results.append({ "id": h["_id"], "body": h["_source"]["body"], "title": h["_source"]["title"], "tags": h["_source"]["tags"], "created": time.strftime( '%Y-%m-%d', time.localtime(h["_source"]["creation_date"])) }) total_page = total / PAGE_SIZE + 2 if total_page < PAGE_COUNTER: page_list = range(1, total_page) elif page + PAGE_COUNTER < total_page: page_list = range(1, page + PAGE_COUNTER) else: page_list = range(1, total_page) if len(page_list) > PAGE_COUNTER: page_list = page_list[:PAGE_COUNTER] next_page = 0 if int(page) + 1 >= total_page else page + 1 return render_to_response( 'list.html', { "results": results, "total": total, "page": page, "keywords": keywords, "page_list": page_list, "next_page": next_page, "tags": tags }, RequestContext(request))
def render_text_plain(value, format): if str(format) == 'TextFormat.Markdown': # Render Markdown to HTML, then strip it tags. value = markdown2.markdown(value, safe_mode=True) return strip_tags(value)
def edit_coordinator(request, coordinator): history = History.objects.filter(submitter=request.user, job=coordinator).order_by('-submission_date') DatasetFormSet = inlineformset_factory(Coordinator, Dataset, form=DatasetForm, max_num=0, can_order=False, can_delete=True) DataInputFormSet = inlineformset_factory(Coordinator, DataInput, form=DataInputForm, max_num=0, can_order=False, can_delete=True) DataInputFormSet.form = staticmethod(curry(DataInputForm, coordinator=coordinator)) DataOutputFormSet = inlineformset_factory(Coordinator, DataOutput, form=DataOutputForm, max_num=0, can_order=False, can_delete=True) DataOutputFormSet.form = staticmethod(curry(DataOutputForm, coordinator=coordinator)) dataset = Dataset(coordinator=coordinator) dataset_form = DatasetForm(instance=dataset, prefix='create') NewDataInputFormSet = inlineformset_factory(Coordinator, DataInput, form=DataInputForm, extra=0, can_order=False, can_delete=False) NewDataInputFormSet.form = staticmethod(curry(DataInputForm, coordinator=coordinator)) NewDataOutputFormSet = inlineformset_factory(Coordinator, DataOutput, form=DataOutputForm, extra=0, can_order=False, can_delete=False) NewDataOutputFormSet.form = staticmethod(curry(DataOutputForm, coordinator=coordinator)) enable_cron_scheduling = ENABLE_CRON_SCHEDULING.get() if request.method == 'POST': coordinator_form = CoordinatorForm(request.POST, instance=coordinator, user=request.user) dataset_formset = DatasetFormSet(request.POST, request.FILES, instance=coordinator) data_input_formset = DataInputFormSet(request.POST, request.FILES, instance=coordinator) data_output_formset = DataOutputFormSet(request.POST, request.FILES, instance=coordinator) new_data_input_formset = NewDataInputFormSet(request.POST, request.FILES, instance=coordinator, prefix='input') new_data_output_formset = NewDataOutputFormSet(request.POST, request.FILES, instance=coordinator, prefix='output') if coordinator_form.is_valid() and dataset_formset.is_valid() and data_input_formset.is_valid() and data_output_formset.is_valid() \ and new_data_input_formset.is_valid() and new_data_output_formset.is_valid(): coordinator = coordinator_form.save() dataset_formset.save() data_input_formset.save() data_output_formset.save() new_data_input_formset.save() new_data_output_formset.save() coordinator.sla = json.loads(request.POST.get('sla')) if enable_cron_scheduling: coordinator.cron_frequency = {'frequency': strip_tags(request.POST.get('cron_frequency')), 'isAdvancedCron': request.POST.get('isAdvancedCron') == 'on'} coordinator.save() request.info(_('Coordinator saved.')) return redirect(reverse('oozie:edit_coordinator', kwargs={'coordinator': coordinator.id})) else: coordinator_form = CoordinatorForm(instance=coordinator, user=request.user) dataset_formset = DatasetFormSet(instance=coordinator) data_input_formset = DataInputFormSet(instance=coordinator) data_output_formset = DataOutputFormSet(instance=coordinator) new_data_input_formset = NewDataInputFormSet(queryset=DataInput.objects.none(), instance=coordinator, prefix='input') new_data_output_formset = NewDataOutputFormSet(queryset=DataOutput.objects.none(), instance=coordinator, prefix='output') return render('editor/edit_coordinator.mako', request, { 'coordinator': coordinator, 'coordinator_form': coordinator_form, 'dataset_formset': dataset_formset, 'data_input_formset': data_input_formset, 'data_output_formset': data_output_formset, 'dataset': dataset, 'dataset_form': dataset_form, 'new_data_input_formset': new_data_input_formset, 'new_data_output_formset': new_data_output_formset, 'history': history, 'coordinator_frequency': json.dumps(coordinator.cron_frequency), 'enable_cron_scheduling': enable_cron_scheduling, })
def raw_details(self): return strip_tags(self.details)
def blog_list(request): keywords = request.GET.get("keywords", None) page = int(request.GET.get("page", 1)) if page < 1: page = 1 elif page>PAGE_COUNTER: page=PAGE_COUNTER tags=set() results = [] total = 0 if keywords and keywords.strip(): keywords = strip_tags(keywords.strip()).replace("-"," ") with silk_profile(name='Search By Keywords #%s' % keywords): datas = es.search(index='it', doc_type='stackoverflow_questions', body={ "query": { "filtered": { "query": { "match": { "title": { "query":keywords.lower(), "minimum_should_match": "75%", "operator": "and" } } } } }, "from": (page-1) * PAGE_SIZE, "size": PAGE_SIZE, 'sort': [ {'creation_date': {'order': 'desc'}} ], "aggs": { "tags_stats": { "terms": { "field": "tags", "size": 50, "min_doc_count": 1, "order": {"_count": "desc"} } } } }) hits, took, tags = datas["hits"], datas["took"], datas["aggregations"]["tags_stats"]["buckets"] total = hits["total"] for h in hits["hits"]: results.append({ "id": h["_id"], "body": h["_source"]["body"], "title": h["_source"]["title"], "tags": h["_source"]["tags"], "created": time.strftime('%Y-%m-%d', time.localtime(h["_source"]["creation_date"])) }) total_page = total / PAGE_SIZE + 2 if total_page < PAGE_COUNTER: page_list = range(1, total_page) elif page + PAGE_COUNTER < total_page: page_list = range(1, page + PAGE_COUNTER) else: page_list = range(1, total_page) if len(page_list)>PAGE_COUNTER: page_list=page_list[:PAGE_COUNTER] next_page = 0 if int(page) + 1 >= total_page else page + 1 return render_to_response('blog/list.html', {"results": results, "total": total, "page": page, "keywords": keywords, "page_list":page_list, "next_page":next_page, "tags":tags }, RequestContext(request))
def docpage(request, version, filename): loaddate = None if version == 'current': ver = Version.objects.filter(current=True)[0].tree elif version == 'devel': ver = Decimal(0) loaddate = Version.objects.get(tree=Decimal(0)).docsloaded else: ver = Decimal(version) if ver == Decimal(0): raise Http404("Version not found") if ver < Decimal("7.1") and ver > Decimal(0): extension = "htm" else: extension = "html" if ver < Decimal("7.1") and ver > Decimal(0): indexname = "postgres.htm" elif ver == Decimal("7.1"): indexname = "postgres.html" else: indexname = "index.html" if ver >= 10 and version.find('.') > -1: # Version 10 and up, but specified as 10.0 / 11.0 etc, so redirect back without the # decimal. return HttpResponsePermanentRedirect("/docs/{0}/{1}.html".format( int(ver), filename)) fullname = "%s.%s" % (filename, extension) # Before looking up the documentation, we need to make a check for release # notes. Based on a change, from PostgreSQL 9.4 and up, release notes are # only available for the current version (e.g. 11 only has 11.0, 11.1, 11.2) # This checks to see if there is a mismatch (e.g. ver = 9.4, fullname = release-9-3-2.html) # or if these are the development docs that are pointing to a released version # and performs a redirect to the older version if fullname.startswith('release-') and ( ver >= Decimal("9.4") or version == "devel") and not fullname.startswith('release-prior'): # figure out which version to redirect to. Note that the oldest version # of the docs loaded is 7.2 release_version = re.sub(r'release-((\d+)(-\d+)?)(-\d+)?.html', r'\1', fullname).replace('-', '.') # convert to Decimal for ease of manipulation try: release_version = Decimal(release_version) except Exception as e: # If it's not a proper decimal, just return 404. This can happen from many # broken links around the web. raise Http404("Invalid version format") # if the version is greater than 10, truncate the number if release_version >= Decimal('10'): release_version = release_version.quantize(Decimal('1'), rounding=ROUND_DOWN) # if these are developer docs (i.e. from the nightly build), we need to # determine if these are release notes for a branched version or not, # i.e. if we are: # a) viewing the docs for a version that does not exist yet (e.g. active # development before an initial beta) OR # b) viewing the docs for a beta, RC, or fully released version is_branched = Version.objects.filter( tree=release_version).exists() if version == "devel" else True # If we are viewing a released version of the release notesand the # release versions do not match, then we redirect if is_branched and release_version != ver: url = "/docs/" if release_version >= Decimal('10'): url += "{}/{}".format(int(release_version), fullname) elif release_version < Decimal('7.2'): url += "7.2/{}".format(fullname) else: url += "{}/{}".format(release_version, fullname) return HttpResponsePermanentRedirect(url) # try to get the page outright. If it's not found, check to see if it's a # doc alias with a redirect, and if so, redirect to that page try: page = DocPage.objects.select_related('version').get(version=ver, file=fullname) except DocPage.DoesNotExist: # if the page does not exist but there is a special pgae redirect, check # for the existence of that. if that does not exist, then we're really # done and can 404 page_redirect = get_object_or_404(DocPageRedirect, redirect_from=fullname) url = "/docs/{}/{}".format(version, page_redirect.redirect_to) return HttpResponsePermanentRedirect(url) versions = DocPage.objects.select_related('version').extra( where=[ "file=%s OR file IN (SELECT file2 FROM docsalias WHERE file1=%s) OR file IN (SELECT file1 FROM docsalias WHERE file2=%s)" ], params=[fullname, fullname, fullname], ).order_by('-version__supported', 'version').only('version', 'file') # If possible (e.g. if we match), remove the header part of the docs so that we can generate a plain text # preview. For older versions where this doesn't match, we just leave it empty. m = re.match(r'^<div [^>]*class="navheader"[^>]*>.*?</div>(.*)$', page.content, re.S) if m: contentpreview = strip_tags(m.group(1)) else: contentpreview = '' r = render( request, 'docs/docspage.html', { 'page': page, 'supported_versions': [v for v in versions if v.version.supported], 'devel_versions': [ v for v in versions if not v.version.supported and v.version.testing ], 'unsupported_versions': [ v for v in versions if not v.version.supported and not v.version.testing ], 'title': page.title, 'doc_index_filename': indexname, 'loaddate': loaddate, 'og': { 'url': '/docs/{}/{}'.format(page.display_version(), page.file), 'time': page.version.docsloaded, 'title': page.title.strip(), 'description': contentpreview, 'sitename': 'PostgreSQL Documentation', } }) r['xkey'] = 'pgdocs_{}'.format(page.display_version()) return r
def edit_coordinator(request, coordinator): history = History.objects.filter( submitter=request.user, job=coordinator).order_by('-submission_date') DatasetFormSet = inlineformset_factory(Coordinator, Dataset, form=DatasetForm, max_num=0, can_order=False, can_delete=True) DataInputFormSet = inlineformset_factory(Coordinator, DataInput, form=DataInputForm, max_num=0, can_order=False, can_delete=True) DataInputFormSet.form = staticmethod( curry(DataInputForm, coordinator=coordinator)) DataOutputFormSet = inlineformset_factory(Coordinator, DataOutput, form=DataOutputForm, max_num=0, can_order=False, can_delete=True) DataOutputFormSet.form = staticmethod( curry(DataOutputForm, coordinator=coordinator)) dataset = Dataset(coordinator=coordinator) dataset_form = DatasetForm(instance=dataset, prefix='create') NewDataInputFormSet = inlineformset_factory(Coordinator, DataInput, form=DataInputForm, extra=0, can_order=False, can_delete=False) NewDataInputFormSet.form = staticmethod( curry(DataInputForm, coordinator=coordinator)) NewDataOutputFormSet = inlineformset_factory(Coordinator, DataOutput, form=DataOutputForm, extra=0, can_order=False, can_delete=False) NewDataOutputFormSet.form = staticmethod( curry(DataOutputForm, coordinator=coordinator)) enable_cron_scheduling = ENABLE_CRON_SCHEDULING.get() if request.method == 'POST': coordinator_form = CoordinatorForm(request.POST, instance=coordinator, user=request.user) dataset_formset = DatasetFormSet(request.POST, request.FILES, instance=coordinator) data_input_formset = DataInputFormSet(request.POST, request.FILES, instance=coordinator) data_output_formset = DataOutputFormSet(request.POST, request.FILES, instance=coordinator) new_data_input_formset = NewDataInputFormSet(request.POST, request.FILES, instance=coordinator, prefix='input') new_data_output_formset = NewDataOutputFormSet(request.POST, request.FILES, instance=coordinator, prefix='output') if coordinator_form.is_valid() and dataset_formset.is_valid() and data_input_formset.is_valid() and data_output_formset.is_valid() \ and new_data_input_formset.is_valid() and new_data_output_formset.is_valid(): coordinator = coordinator_form.save() dataset_formset.save() data_input_formset.save() data_output_formset.save() new_data_input_formset.save() new_data_output_formset.save() coordinator.sla = json.loads(request.POST.get('sla')) if enable_cron_scheduling: coordinator.cron_frequency = { 'frequency': strip_tags(request.POST.get('cron_frequency')), 'isAdvancedCron': request.POST.get('isAdvancedCron') == 'on' } coordinator.save() request.info(_('Coordinator saved.')) return redirect( reverse('oozie:edit_coordinator', kwargs={'coordinator': coordinator.id})) else: coordinator_form = CoordinatorForm(instance=coordinator, user=request.user) dataset_formset = DatasetFormSet(instance=coordinator) data_input_formset = DataInputFormSet(instance=coordinator) data_output_formset = DataOutputFormSet(instance=coordinator) new_data_input_formset = NewDataInputFormSet( queryset=DataInput.objects.none(), instance=coordinator, prefix='input') new_data_output_formset = NewDataOutputFormSet( queryset=DataOutput.objects.none(), instance=coordinator, prefix='output') return render( 'editor/edit_coordinator.mako', request, { 'coordinator': coordinator, 'coordinator_form': coordinator_form, 'dataset_formset': dataset_formset, 'data_input_formset': data_input_formset, 'data_output_formset': data_output_formset, 'dataset': dataset, 'dataset_form': dataset_form, 'new_data_input_formset': new_data_input_formset, 'new_data_output_formset': new_data_output_formset, 'history': history, 'coordinator_frequency': json.dumps(coordinator.cron_frequency), 'enable_cron_scheduling': enable_cron_scheduling, })
def get_excerpt(self, obj): """ Get plain text excerpt of blog post """ return strip_tags(obj.description_from_content())
def get_meta_description(self): if self.object.meta_description: return self.object.meta_description return Truncator(strip_tags(safe_markdown( self.object.description))).words(20)