def import_mail(f): message = email.message_from_file(f) if message and message['to'].find(settings.MAILLIST_NAME) != -1: subject, encoding = email.header.decode_header(message['subject'])[0] if encoding: subject = subject.decode(encoding) match = settings.MAILLIST_HEADER.search(subject) if match: message_hash = hashlib.sha1(message.as_string()) if settings.MAILLIST_DIR: tfn = "%s/%s.msg" % (settings.MAILLIST_DIR, message_hash.hexdigest()) if not os.path.exists(tfn): tf = open(tfn, 'w') tf.write(message.as_string()) tf.close() comics = match.group(1) name, address = email.utils.getaddresses([message['from']])[0] name, encoding = email.header.decode_header(name)[0] if not encoding is None: name = name.decode(encoding) date = datetime.datetime.fromtimestamp( email.utils.mktime_tz( email.utils.parsedate_tz(message['date']))) logging.debug('Comics: %s(%s).' % (comics, message_hash.hexdigest())) if message.is_multipart(): for part in message.walk(): if part.get_content_type() == 'text/plain': text = part.get_payload(decode=True).\ decode(part.get_content_charset('utf-8')) break else: logging.debug('No text/plain in multipart message.') for part in message.walk(): if part.get_content_type() == 'text/html': text = part.get_payload(decode=True).\ decode(part.get_content_charset('utf-8')) text = strip_entities(strip_tags(text)) break else: logging.warning('No text/html in message %s.' % message_hash.hexdigest()) else: text = message.get_payload(decode=True).\ decode(message.get_content_charset('utf-8')) if message.get_content_type() == 'text/html': logging.debug('No text/plain in message.') text = strip_entities(strip_tags(text)) for footer in settings.MAILLIST_FOOTER: try: text = text.split(footer)[0] except: pass try: m = Mail(sender="%s <%s>" % (name, address), message=text, date=date, comics=Comics.objects.get(cid=comics)) m.save() except Comics.DoesNotExist: pass
def compareTwoFile(request, revOne, revTwo, altName, cloudItem, tokenID): """ Compare two file by using two revisions """ dajax = Dajax() try: t = parseAjaxParam(tokenID) ci = checkCloudItem(cloudItem, request.user.id) tkn = checkAccessToken(t, ci) #compute the diff c = Comparator(tkn) info = c.compareTwo(str(revOne), str(revTwo), altName) imgMimeList = ['image/jpeg', 'image/png', 'image/gif', 'image/bmp'] table = render_to_string( "dashboard/timeliner/diffViewer.html", { 'fileName': info["filename"], 'revOne': strip_tags(strip_entities(revOne)), 'revTwo': strip_tags(strip_entities(revTwo)), 'info': info, 'imgMimes': imgMimeList }) dajax.assign("#comparator", "innerHTML", table) dajax.assign("#comparatorError", "innerHTML", "") except Exception as e: dajax.assign("#comparatorError", "innerHTML", formatException(e)) return dajax.json()
def strip_description(description): description_str = etree.tostring(description) \ if isinstance(description, html.HtmlElement) \ else description return ''.join(char for char in strip_entities(strip_tags(description_str)) if unicodedata.category(char)[0] in 'LNZ')
def cleanHTML(txt): return html.strip_entities( html.strip_tags( txt.replace('\n', ' ') .replace('\r', "") .replace('<br>', "\n") .replace(" ", " ")))
def strip_description(description): description_str = etree.tostring(description) \ if isinstance(description, html.HtmlElement) \ else description return ''.join( char for char in strip_entities(strip_tags(description_str)) if unicodedata.category(char)[0] in 'LNZ')
def get_identifier(cls, instance): identifier = super(AcceptConditionFormPlugin, cls).get_identifier(instance) html_content = cls.html_parser.unescape( instance.glossary.get('html_content', '')) html_content = strip_entities(strip_tags(html_content)) html_content = Truncator(html_content).words(3, truncate=' ...') return format_html('{}{}', identifier, html_content)
def get_description(self): # TODO: Optimize this SEM algorithm description = self.object.description description = strip_tags(description) description = strip_entities(description) description = truncate_words(description, 40, '') return description
def handle(self, *args, **options): if 'create_index' in args: if not os.path.exists(settings.PAGE_SEARCH_INDEX): os.makedirs(settings.PAGE_SEARCH_INDEX) ix = index.create_in(settings.PAGE_SEARCH_INDEX, PAGE_SCHEMA) writer = ix.writer() for page in Page.objects.all(): writer.add_document( id=page.id, region=page.region.id, name=page.name, content=strip_entities(strip_tags(page.content)) ) writer.commit()
def send_email(to, subject="", text="", body="", sender=None, reply=None, internal=False, silent=True): c = MailgunClient(silent=silent) body = body or text text = text or strip_entities(strip_tags(body)) headers = {} if internal: headers[settings.INTERNAL_EMAIL_HEADER] = "Made by MY team." if reply: headers["In-Reply-To"] = reply resp = c.send_email(sender or settings.DEFAULT_EMAIL, to, text, body, subject=subject, headers=headers) return resp.get("id", "")
def smart_truncate(text, length_threshold): """ Removes all values of arg from the given string """ if not length_threshold: return text text = strip_tags(strip_entities(text)) if len(text) <= length_threshold: return text if text[length_threshold-1] == u'.': return text[0:length_threshold] elif text[length_threshold-1] == u' ': return text[0:length_threshold-1] + u'...' else: return text[0:length_threshold] + u'...'
def update_page_index(sender, instance, created, **kwargs): if sender is not Page: return # Raw instance may contain non unicode strings page = Page.objects.get(pk=instance.id) page_doc = { 'id': page.id, 'region': page.region.id, 'name': page.name, 'content': strip_entities(strip_tags(page.content)) } ix = index.open_dir(settings.PAGE_SEARCH_INDEX) writer = ix.writer() if created: writer.add_document(**page_doc) else: writer.update_document(**page_doc) writer.commit()
def get_identifier(cls, instance): html_content = cls.html_parser.unescape( instance.glossary.get('html_content', '')) html_content = strip_entities(strip_tags(html_content)) html_content = Truncator(html_content).words(3, truncate=' ...') return mark_safe(html_content)
def prepare_intro(self, obj): intro = obj.intro intro = strip_tags(intro) intro = strip_entities(intro) return intro
def compareTwoFile(request,revOne,revTwo,altName,cloudItem,tokenID): """ Compare two file by using two revisions """ dajax = Dajax() try: t = parseAjaxParam(tokenID) ci = checkCloudItem(cloudItem,request.user.id) tkn = checkAccessToken(t,ci) #compute the diff c = Comparator(tkn) info = c.compareTwo(str(revOne),str(revTwo),altName) imgMimeList = ['image/jpeg','image/png','image/gif','image/bmp'] table = render_to_string("dashboard/timeliner/diffViewer.html",{'fileName': info["filename"],'revOne': strip_tags(strip_entities(revOne)),'revTwo': strip_tags(strip_entities(revTwo)),'info': info,'imgMimes': imgMimeList}) dajax.assign("#comparator","innerHTML",table) dajax.assign("#comparatorError","innerHTML","") except Exception as e: dajax.assign("#comparatorError","innerHTML",formatException(e)) return dajax.json()
def prepare_content(self, obj): content = obj.content content = strip_tags(content) content = strip_entities(content) return content
def prepare_description(self, obj): description = obj.description description = strip_tags(description) description = strip_entities(description) return description
def prepare_answer(self, obj): answer = obj.answer answer = strip_tags(answer) answer = strip_entities(answer) return answer
def search(request): form = SearchForm(request.GET) form.full_clean() # Init the query string q = form.cleaned_data['q'] # Where search content or title? search_in = form.cleaned_data['search_in'] # Init the category search param menu = form.cleaned_data['menu'] # Init date_start and date_end search params date_start = form.cleaned_data['date_start'] date_end = form.cleaned_data['date_end'] # Search in index hits = None if q is not None: # Open index dir ix = whoosh_index.open_dir(settings.PAGE_SEARCH_INDEX) # Make parser parser = QueryParser(search_in, schema=ix.schema) # Configure filter filter = Term('region', get_region().id) # Make query string qstr = q.replace('+', ' AND ').replace(' -', ' NOT ').replace(' | ', ' OR ') # Parse query string query = parser.parse(qstr) # And... search in index! hits = ix.searcher().search(query, filter=filter, limit=None) hits.formatter = HtmlFormatter(tagname='span', classname='text-error') pages = Page.objects.for_region().select_related('menu') # Apply filter of category pages = pages.filter( menu__in=menu.get_descendants(include_self=True)) # Apply filter of date range if date_start and date_end: pages = pages.filter( create_date__gte=date_start ).filter( create_date__lte=date_end) elif date_start and not date_end: pages = pages.filter(create_date__gte=date_start) elif not date_start and date_end: pages = pages.filter(create_date__lte=date_end) # If not the q param if hits is None and not hits: # Total count hits_count = pages.count() # Numbered for num, page in enumerate(pages): page.num = num + 1 # Paginate it pages = paginate(request, pages, 20) else: # Merge hits and filtered publications pages = pages.filter(pk__in=[h.get('id') for h in hits]) # Numbered for num, page in enumerate(pages): page.num = num + 1 # Total count hits_count = pages.count() # Paginate it pages = paginate(request, pages) # Highlight results for hit in hits: for page in pages: if page.id == hit['id']: if search_in == 'name': page.name_hit = hit.highlights('name', text=strip_entities(strip_tags(page.name))) if search_in == 'content': page.content = hit.highlights('content', text=strip_entities(strip_tags(page.content)), top=5) if 'ix' in locals(): ix.close() return render(request, 'cms/search.html', { 'LANG': get_language(), 'url_GET_params': form.url_GET_params(), 'q': q, 'search_in': search_in, 'menu': menu, 'pages': pages, 'hits_count': hits_count, 'form': form, })
def list(request, slug, ids=None, internal_mode=False, report_header_visible=True): advreport = get_report_or_404(slug) advreport.set_request(request) advreport.internal_mode = internal_mode advreport.report_header_visible = report_header_visible def inner(request, slug, ids): context = {} # Handle POST if request.method == 'POST': sorted_keys = [k for k in request.POST.keys()] sorted_keys.sort() selected_object_ids = [k.split('_')[2] for k in sorted_keys if 'checkbox_' in k and request.POST[k] == 'true'] method = request.POST['method'] if not method: messages.warning(request, _(u'You did not select any action.')) if not advreport.internal_mode: return _get_redirect(advreport) if len(selected_object_ids) == 0: messages.warning(request, _(u'You did not select any %(object)s.') % {'object': advreport.verbose_name}) if not advreport.internal_mode: return _get_redirect(advreport) try: response, count = advreport.handle_multiple_actions(method, selected_object_ids, request) if response: return response if count > 0: messages.success(request, _(u'Successfully executed action on %(count)d %(objects)s') % {'count': count, 'objects': advreport.verbose_name_plural if count != 1 else advreport.verbose_name}) else: messages.error(request, _(u'No selected %(object)s is applicable for this action.') % {'object': advreport.verbose_name}) if not advreport.internal_mode: return _get_redirect(advreport, querystring=request.META['QUERY_STRING']) except ActionException, e: context.update({'error': e.msg}) object_list, extra_context = advreport.get_object_list(request, ids=ids) context.update(extra_context) # CSV? if 'csv' in request.GET: from cStringIO import StringIO csv = StringIO() header = u'%s\n' % u';'.join(c['verbose_name'] for c in advreport.column_headers) lines = (u'%s\n' % u';'.join((c['html'] for c in o.advreport_column_values)) for o in object_list[:]) lines = (line.replace(u' ', u' ') for line in lines) lines = (line.replace(u'€', u'€') for line in lines) lines = (line.replace(u'<br/>', u' ') for line in lines) lines = (strip_entities(line) for line in lines) lines = (strip_tags(line).encode('utf-8') for line in lines) csv.write(header) csv.writelines(lines) response = HttpResponse(csv.getvalue(), 'text/csv') response['Content-Disposition'] = 'attachment; filename="%s.csv"' % advreport.slug return response # Paginate paginated = paginate(request, object_list, num_per_page=advreport.items_per_page, use_get_parameters=True) # Extra context? context.update(advreport._extra_context(request)) # Render context.update({'advreport': advreport, 'paginated': paginated, 'object_list': object_list}) func = render_to_string if advreport.internal_mode else render_to_response return func(advreport.get_template(), context, context_instance=RequestContext(request))
def prepare_caption(self, obj): caption = obj.caption caption = strip_tags(caption) caption = strip_entities(caption) return caption
def add(request, form_class=MakePaymentForm, template_name="make_payments/add.html"): if request.method == "POST": form = form_class(request.user, request.POST) if form.is_valid(): mp = form.save(commit=False) # we might need to create a user record if not exist if request.user.is_authenticated(): user = request.user else: try: user = User.objects.get(email=mp.email) except: user = request.user if not user.is_anonymous(): mp.user = user mp.creator = user mp.creator_username = user.username mp.save(user) # create invoice invoice = make_payment_inv_add(user, mp) EventLog.objects.log(instance=invoice) # updated the invoice_id for mp, so save again mp.save(user) EventLog.objects.log(instance=mp) # send notification to administrators # get admin notice recipients recipients = get_notice_recipients('module', 'payments', 'paymentrecipients') if recipients: if notification: extra_context = { 'mp': mp, 'invoice': invoice, 'request': request, } notification.send_emails(recipients, 'make_payment_added', extra_context) # email to user email_receipt = form.cleaned_data['email_receipt'] if email_receipt: make_payment_email_user(request, mp, invoice) # redirect to online payment or confirmation page if mp.payment_method == 'cc' or mp.payment_method == 'credit card': return HttpResponseRedirect( reverse('payment.pay_online', args=[invoice.id, invoice.guid])) else: return HttpResponseRedirect( reverse('make_payment.add_confirm', args=[mp.id])) else: form = form_class(request.user) # check for initial payment_amount and clean up payment_amount = request.GET.get('payment_amount', 0) try: payment_amount = float(payment_amount) except: payment_amount = 0 if payment_amount > 0: form.fields['payment_amount'].initial = payment_amount # check for initial comment and clean up comments = request.GET.get('comments', '') if comments: comments = strip_tags(comments) comments = strip_entities(comments) form.fields['comments'].initial = comments currency_symbol = get_setting("site", "global", "currencysymbol") if not currency_symbol: currency_symbol = "$" return render_to_response(template_name, { 'form': form, 'currency_symbol': currency_symbol }, context_instance=RequestContext(request))
def cleanHTML(txt): return html.strip_entities( html.strip_tags(txt.replace("\n", " ").replace("\r", "").replace("<br>", "\n").replace(" ", " ")) )
def get_identifier(cls, instance): html_content = cls.html_parser.unescape(instance.glossary.get('html_content', '')) html_content = strip_entities(strip_tags(html_content)) html_content = Truncator(html_content).words(3, truncate=' ...') return mark_safe(html_content)
def search(self, searchTerm, category, location): """ Parse and store Craigslist search results """ # Initialize list to store Craigslist postings postings = [] # Create URL for webpage of search results. Spaces in search terms are represented by '+' in the URL urlPart = 'http://{2}.craigslist.org/search/?query={0}&catAbb={1}'.format( searchTerm.replace(' ', '+'), self.categories[category], self.locations[location]) url = urlPart # Create variable to indicate that all postings of the day have been scrapped reached = False # Create variable to represent page number of search results page = 0 while not reached: # Parse html fileHTML = BeautifulSoup(urlopen(url)) # Extract postings postingsHTML = fileHTML('p') # Store date, title and link for each Craigslist posting made today for posting in postingsHTML: description = ''.join(posting.findAll(text=True)) # Strip \n and \t from title description = description.replace('\n', '') description = description.replace('\t', '') # Strip HTML entities from title description = strip_entities(description) # Split description into date and shorter title descriptionSplit = description.split('-', 1) title = descriptionSplit[1].strip() date = descriptionSplit[0].strip() # Get date information date = datetime.strptime(date, '%b %d') # Store title and link of postings made today if date.month == self.today.month and date.day == self.today.day: link = posting('a')[0]['href'] # Strip title of unwanted information and characters title = title[0:title.rfind(')') + 1] title = title.replace(' - ', ' ') title = re.sub('\s{2,}', ' ', title) postings.append((title, link)) else: reached = True break # Create URL of next page of search results page += 100 url = urlPart + '&s=' + str(page) return postings
def prepare_response(self, obj): response = obj.response response = strip_tags(response) response = strip_entities(response) return response
def clean_html(text): return strip_entities(strip_tags(text)).replace('\n', '')
def get_identifier(cls, instance): identifier = super(AcceptConditionFormPlugin, cls).get_identifier(instance) html_content = cls.html_parser.unescape(instance.glossary.get('html_content', '')) html_content = strip_entities(strip_tags(html_content)) html_content = Truncator(html_content).words(3, truncate=' ...') return format_html('{}{}', identifier, html_content)
def search(request): # Init the query string q = request.GET.get('q') # Where search content or title? search_in = request.GET.get('search_in', 'content') if search_in not in ('content', 'name',): search_in = 'content' # No need show a publication content if q is empty if not q: search_in = 'name' # Init the region search param region_id = request.GET.get('region_search', get_region().id) try: region = Region.objects.get(pk=region_id) except Region.DoesNotExist: region = Region.objects.get(pk=get_region().id) # Init the category search param menu_search = request.GET.get('menu_search', 0) try: menu = Menu.objects.get(pk=menu_search) if menu.region != region: # Change region and find similar category request.session['region_id'] = region.id menu = Menu.objects.root_nodes().filter( region=region).get(name=menu.name) except Menu.DoesNotExist: menu = Menu.objects.root_nodes().filter(region=region)[0] from datetime import datetime # Init date_start and date_end search params date_start = request.GET.get('date_start', '') date_end = request.GET.get('date_end', '') try: if date_start: date_start = datetime.strptime(date_start, '%d.%m.%Y') except ValueError: date_start = '' try: if date_end: date_end = datetime.strptime(date_end, '%d.%m.%Y') except ValueError: date_end = '' # Search in index hits = None if q is not None: # Open index dir ix = whoosh_index.open_dir(settings.PAGE_SEARCH_INDEX) # Make parser parser = QueryParser(search_in, schema=ix.schema) # Configure filter filter = Term('region', region.id) # Make query string qstr = q.replace('+', ' AND ').replace(' -', ' NOT ').replace(' | ', ' OR ') # Parse query string query = parser.parse(qstr) # And... search in index! hits = ix.searcher().search(query, filter=filter, limit=None) pages = Page.objects.filter(region=region).filter(visible=True) # Apply filter of category pages = pages.filter( menu__in=menu.get_descendants(include_self=True)) # Apply filter of date range if date_start and date_end: pages = pages.filter( create_date__gte=date_start ).filter( create_date__lte=date_end) elif date_start and not date_end: pages = pages.filter(create_date__gte=date_start) elif not date_start and date_end: pages = pages.filter(create_date__lte=date_end) from django.utils.html import strip_tags, strip_entities from cms.views.utils import paginate # If not the q param if hits is None and not hits: # Total count hits_count = pages.count() # Numbered for num, page in enumerate(pages): page.num = num + 1 # Paginate it pages = paginate(request, pages, 20) else: # Merge hits and filtered publications pages = pages.filter(pk__in=[h.get('id') for h in hits]) # Numbered for num, page in enumerate(pages): page.num = num + 1 # Total count hits_count = pages.count() # Paginate it pages = paginate(request, pages, 20) # Highlight results for hit in hits: for page in pages: if page.id == hit['id']: if search_in == 'name': page.name = hit.highlights('name', text=strip_entities(strip_tags(page.name))) if search_in == 'content': page.content = hit.highlights('content', text=strip_entities(strip_tags(page.content))) if 'ix' in locals(): ix.close() if date_start: date_start = '%s.%s.%s' % (date_start.day, date_start.month, date_start.year) if date_end: date_end = '%s.%s.%s' % (date_end.day, date_end.month, date_end.year) return render(request, 'search.html', { 'q': q, 'menu_search': menu.id, 'region_search': region.id, 'search_in': search_in, 'date_start': date_start, 'date_end': date_end, 'pages': pages, 'hits_count': hits_count })
def normalize(cls, tag): tag = strip_entities(bleach.clean(tag)) normalized = TAG_NORMALIZE_PATTERN.sub(u'', tag).lower() return [tag, normalized] if tag != normalized else [tag]
def process(self): for ent in self.json['entries']: id = ent['id'][2:] uuid = '%s-%s-%s-%s-%s' % (id[0:8], id[8:12], id[12:16], id[16:20], id[20:]) guid = 'tag:friendfeed.com,2007:%s' % uuid if self.verbose: print("ID: %s" % guid) t = datetime.datetime.strptime(ent['date'], '%Y-%m-%dT%H:%M:%SZ') try: e = Entry.objects.get(service=self.service, guid=guid) if not self.force_overwrite and \ e.date_updated and mtime(t.timetuple()) <= e.date_updated: continue if e.protected: continue except Entry.DoesNotExist: e = Entry(service=self.service, guid=guid) e.guid = guid e.title = truncate.smart(strip_entities(strip_tags(ent['body'])), max_length=40) e.link = ent['url'] image_url = 'http://friendfeed-api.com/v2/picture/%s' % ent[ 'from']['id'] e.link_image = media.save_image(image_url, direct_image=False) e.date_published = t e.date_updated = t e.author_name = ent['from']['name'] content = ent['body'] if 'thumbnails' in ent: content += '<p class="thumbnails">' for t in ent['thumbnails']: if self.service.public: t['url'] = media.save_image(t['url']) if 'width' in t and 'height' in t: iwh = ' width="%d" height="%d"' % (t['width'], t['height']) else: iwh = '' if 'friendfeed.com/e/' in t['link'] and \ ('youtube.com' in t['url'] or 'ytimg.com' in t['url']): m = re.search(r'/vi/([\-\w]+)/', t['url']) yid = m.groups()[0] if m else None if yid: t['link'] = 'http://www.youtube.com/watch?v=%s' % yid content += '<a href="%s" rel="nofollow"><img src="%s"%s alt="thumbnail" /></a> ' % ( t['link'], t['url'], iwh) content += '</p>' if 'files' in ent: content += '<ul class="files">\n' for f in ent['files']: if 'friendfeed-media' in f['url']: content += ' <li><a href="%s" rel="nofollow">%s</a>' % ( f['url'], f['name']) if 'size' in f: content += ' <span class="size">%s</span>' % bytes_to_human( f['size']) content += '</li>\n' content += '</ul>\n' e.content = content try: e.save() media.extract_and_register(e) except: pass
# Avoid microsoft SYLK problem http://support.microsoft.com/kb/215591 _mark_safe = lambda s: s if unicode(s) != u'ID' else u'"%s"' % s object_count = len(object_list) #csv = StringIO() header = u'%s\n' % u';'.join( _mark_safe(c['verbose_name']) for c in advreport.column_headers) lines = (u'%s\n' % u';'.join((c['html'] for c in o.advreport_column_values)) \ for o in with_progress(object_list.iterator() \ if hasattr(object_list, 'iterator') \ else object_list[:], name='CSV export of %s' % advreport.slug, count=object_count)) lines = (line.replace(u' ', u' ') for line in lines) lines = (line.replace(u'€', u'€') for line in lines) lines = (line.replace(u'<br/>', u' ') for line in lines) lines = (strip_entities(line) for line in lines) lines = (strip_tags(line).encode('utf-8') for line in lines) #csv.write(header) #csv.writelines(lines) response_content = itertools.chain([header], lines) # We use streaming http response because sometimes generation of each line takes some time and for big exports # it leads to timeout on the response generation response = StreamingHttpResponse(response_content, content_type='text/csv') response[ 'Content-Disposition'] = 'attachment; filename="%s.csv"' % advreport.slug return response # Paginate
def process(self): for ent in self.json: guid = 'tag:twitter.com,2007:http://twitter.com/%s/statuses/%s' % \ (ent['user']['screen_name'], ent['id']) if self.verbose: print("ID: %s" % guid) t = datetime.datetime.strptime(ent['created_at'], '%a %b %d %H:%M:%S +0000 %Y') try: e = Entry.objects.get(service=self.service, guid=guid) if not self.force_overwrite and \ e.date_updated and mtime(t.timetuple()) <= e.date_updated: continue if e.protected: continue except Entry.DoesNotExist: e = Entry(service=self.service, guid=guid) e.guid = guid e.title = 'Tweet: %s' % truncate.smart( strip_entities(strip_tags(ent['text'])), max_length=40) e.title = e.title.replace('#', '').replace('@', '') e.link = 'https://twitter.com/%s/status/%s' % \ (ent['user']['screen_name'], ent['id']) image_url = ent['user']['profile_image_url_https'] e.link_image = media.save_image(image_url, direct_image=False) e.date_published = t e.date_updated = t e.author_name = ent['user']['name'] # double expand e.content = 'Tweet: %s' % expand.all(expand.shorturls(ent['text'])) if 'entities' in ent and 'media' in ent['entities']: content = ' <p class="thumbnails">' for t in ent['entities']['media']: if t['type'] == 'photo': tsize = 'thumb' if 'media_url_https' in t: image_url = '%s:%s' % (t['media_url_https'], tsize) large_url = '%s:large' % t['media_url_https'] else: image_url = '%s:%s' % (t['media_url'], tsize) large_url = t['media_url'] link = t['expanded_url'] if self.service.public: image_url = media.save_image(image_url) if 'sizes' in t and tsize in t['sizes']: sizes = t['sizes'][tsize] iwh = ' width="%d" height="%d"' % (sizes['w'], sizes['h']) else: iwh = '' content += '<a href="%s" rel="nofollow" data-imgurl="%s"><img src="%s"%s alt="thumbnail" /></a> ' % ( link, large_url, image_url, iwh) content += '</p>' e.content += content try: e.save() media.extract_and_register(e) except: pass
def stripentities(value): """Strips all [X]HTML tags.""" from django.utils.html import strip_entities return strip_entities(value)
def stripMarkup(string): return html.strip_tags(html.strip_entities(string))
def prepare_body(self, obj): body = obj.body body = strip_tags(body) body = strip_entities(body) return body
def add(request, form_class=MakePaymentForm, template_name="make_payments/add.html"): if request.method == "POST": form = form_class(request.user, request.POST) if form.is_valid(): mp = form.save(commit=False) # we might need to create a user record if not exist if request.user.is_authenticated(): user = request.user else: try: user = User.objects.get(email=mp.email) except: user = request.user if not user.is_anonymous(): mp.user = user mp.creator = user mp.creator_username = user.username mp.save(user) # create invoice invoice = make_payment_inv_add(user, mp) EventLog.objects.log(instance=invoice) # updated the invoice_id for mp, so save again mp.save(user) EventLog.objects.log(instance=mp) # send notification to administrators # get admin notice recipients recipients = get_notice_recipients('module', 'payments', 'paymentrecipients') if recipients: if notification: extra_context = { 'mp': mp, 'invoice': invoice, 'request': request, } notification.send_emails(recipients,'make_payment_added', extra_context) # email to user email_receipt = form.cleaned_data['email_receipt'] if email_receipt: make_payment_email_user(request, mp, invoice) # redirect to online payment or confirmation page if mp.payment_method == 'cc' or mp.payment_method == 'credit card': return HttpResponseRedirect(reverse('payment.pay_online', args=[invoice.id, invoice.guid])) else: return HttpResponseRedirect(reverse('make_payment.add_confirm', args=[mp.id])) else: form = form_class(request.user) # check for initial payment_amount and clean up payment_amount = request.GET.get('payment_amount', 0) try: payment_amount = float(payment_amount) except: payment_amount = 0 if payment_amount > 0: form.fields['payment_amount'].initial = payment_amount # check for initial comment and clean up comments = request.GET.get('comments','') if comments: comments = strip_tags(comments) comments = strip_entities(comments) form.fields['comments'].initial = comments currency_symbol = get_setting("site", "global", "currencysymbol") if not currency_symbol: currency_symbol = "$" return render_to_response(template_name, {'form':form, 'currency_symbol': currency_symbol}, context_instance=RequestContext(request))
def content_plain(self): return strip_entities(strip_tags(self.content))
def clean_html(text): return strip_entities(strip_tags(text)).replace('\n','')
def process(self): for ent in self.json['entries']: id = ent['id'][2:] uuid = '%s-%s-%s-%s-%s' % (id[0:8], id[8:12], id[12:16], id[16:20], id[20:]) guid = 'tag:friendfeed.com,2007:%s' % uuid if self.verbose: print("ID: %s" % guid) t = datetime.datetime.strptime(ent['date'], '%Y-%m-%dT%H:%M:%SZ') try: e = Entry.objects.get(service=self.service, guid=guid) if not self.force_overwrite and \ e.date_updated and mtime(t.timetuple()) <= e.date_updated: continue if e.protected: continue except Entry.DoesNotExist: e = Entry(service=self.service, guid=guid) e.guid = guid e.title = truncate.smart( strip_entities(strip_tags(ent['body'])), max_length=40) e.link = ent['url'] image_url = 'http://friendfeed-api.com/v2/picture/%s' % ent[ 'from']['id'] e.link_image = media.save_image(image_url, direct_image=False) e.date_published = t e.date_updated = t e.author_name = ent['from']['name'] content = ent['body'] if 'thumbnails' in ent: content += '<p class="thumbnails">' for t in ent['thumbnails']: if self.service.public: t['url'] = media.save_image(t['url']) if 'width' in t and 'height' in t: iwh = ' width="%d" height="%d"' % (t['width'], t['height']) else: iwh = '' if 'friendfeed.com/e/' in t['link'] and \ ('youtube.com' in t['url'] or 'ytimg.com' in t['url']): m = re.search(r'/vi/([\-\w]+)/', t['url']) yid = m.groups()[0] if m else None if yid: t['link'] = 'http://www.youtube.com/watch?v=%s' % yid content += '<a href="%s" rel="nofollow"><img src="%s"%s alt="thumbnail" /></a> ' % ( t['link'], t['url'], iwh) content += '</p>' if 'files' in ent: content += '<ul class="files">\n' for f in ent['files']: if 'friendfeed-media' in f['url']: content += ' <li><a href="%s" rel="nofollow">%s</a>' % ( f['url'], f['name']) if 'size' in f: content += ' <span class="size">%s</span>' % bytes_to_human( f['size']) content += '</li>\n' content += '</ul>\n' e.content = content try: e.save() media.extract_and_register(e) except: pass
def set_txn(basket, shipping_methods, currency, return_url, cancel_url, update_url=None, action=SALE, user=None, user_address=None, shipping_method=None, shipping_address=None, no_shipping=False): """ Register the transaction with PayPal to get a token which we use in the redirect URL. This is the 'SetExpressCheckout' from their documentation. There are quite a few options that can be passed to PayPal to configure this request - most are controlled by PAYPAL_* settings. """ # PayPal have an upper limit on transactions. It's in dollars which is a # fiddly to work with. Lazy solution - only check when dollars are used as # the PayPal currency. amount = basket.total_incl_tax if currency == 'USD' and amount > 10000: raise exceptions.PayPalError( 'PayPal can only be used for orders up to 10000 USD') # if amount <= 0: # raise exceptions.PayPalError('Zero value basket is not allowed') # PAYMENTREQUEST_0_AMT should include tax, shipping and handling params = { 'PAYMENTREQUEST_0_AMT': amount, 'PAYMENTREQUEST_0_CURRENCYCODE': currency, 'RETURNURL': return_url, 'CANCELURL': cancel_url, 'PAYMENTREQUEST_0_PAYMENTACTION': action, } # Add item details index = 0 for index, line in enumerate(basket.all_lines()): product = line.product params['L_PAYMENTREQUEST_0_NAME%d' % index] = product.get_title() params['L_PAYMENTREQUEST_0_NUMBER%d' % index] = (product.upc if product.upc else '') desc = '' if product.description: desc = truncatewords(strip_entities(strip_tags(product.description)), 12) params['L_PAYMENTREQUEST_0_DESC%d' % index] = desc # Note, we don't include discounts here - they are handled as separate # lines - see below params['L_PAYMENTREQUEST_0_AMT%d' % index] = _format_currency( line.unit_price_incl_tax) params['L_PAYMENTREQUEST_0_QTY%d' % index] = line.quantity # If the order has discounts associated with it, the way PayPal suggests # using the API is to add a separate item for the discount with the value # as a negative price. See "Integrating Order Details into the Express # Checkout Flow" # https://cms.paypal.com/us/cgi-bin/?cmd=_render-content&content_ID=developer/e_howto_api_ECCustomizing # Iterate over the 3 types of discount that can occur for discount in basket.offer_discounts: index += 1 name = _("Special Offer: %s") % discount['name'] params['L_PAYMENTREQUEST_0_NAME%d' % index] = name params['L_PAYMENTREQUEST_0_DESC%d' % index] = truncatewords(name, 12) params['L_PAYMENTREQUEST_0_AMT%d' % index] = _format_currency( -discount['discount']) params['L_PAYMENTREQUEST_0_QTY%d' % index] = 1 for discount in basket.voucher_discounts: index += 1 name = "%s (%s)" % (discount['voucher'].name, discount['voucher'].code) params['L_PAYMENTREQUEST_0_NAME%d' % index] = name params['L_PAYMENTREQUEST_0_DESC%d' % index] = truncatewords(name, 12) params['L_PAYMENTREQUEST_0_AMT%d' % index] = _format_currency( -discount['discount']) params['L_PAYMENTREQUEST_0_QTY%d' % index] = 1 for discount in basket.shipping_discounts: index += 1 name = _("Shipping Offer: %s") % discount['name'] params['L_PAYMENTREQUEST_0_NAME%d' % index] = name params['L_PAYMENTREQUEST_0_DESC%d' % index] = truncatewords(name, 12) params['L_PAYMENTREQUEST_0_AMT%d' % index] = _format_currency( -discount['discount']) params['L_PAYMENTREQUEST_0_QTY%d' % index] = 1 # We include tax in the prices rather than separately as that's how it's # done on most British/Australian sites. Will need to refactor in the # future no doubt. # Note that the following constraint must be met # # PAYMENTREQUEST_0_AMT = ( # PAYMENTREQUEST_0_ITEMAMT + # PAYMENTREQUEST_0_TAXAMT + # PAYMENTREQUEST_0_SHIPPINGAMT + # PAYMENTREQUEST_0_HANDLINGAMT) # # Hence, if tax is to be shown then it has to be aggregated up to the order # level. # params['PAYMENTREQUEST_0_ITEMAMT'] = _format_currency( # basket.total_incl_tax + ) params['PAYMENTREQUEST_0_TAXAMT'] = _format_currency(D('0.00')) # Customer services number customer_service_num = getattr( settings, 'PAYPAL_CUSTOMER_SERVICES_NUMBER', None) if customer_service_num: params['CUSTOMERSERVICENUMBER'] = customer_service_num # Display settings page_style = getattr(settings, 'PAYPAL_PAGESTYLE', None) header_image = getattr(settings, 'PAYPAL_HEADER_IMG', None) if page_style: params['PAGESTYLE'] = page_style elif header_image: params['LOGOIMG'] = header_image else: # Think these settings maybe deprecated in latest version of PayPal's # API display_params = { 'HDRBACKCOLOR': getattr(settings, 'PAYPAL_HEADER_BACK_COLOR', None), 'HDRBORDERCOLOR': getattr(settings, 'PAYPAL_HEADER_BORDER_COLOR', None), } params.update(x for x in display_params.items() if bool(x[1])) # Locale locale = getattr(settings, 'PAYPAL_LOCALE', None) if locale: valid_choices = ('AU', 'DE', 'FR', 'GB', 'IT', 'ES', 'JP', 'US') if locale not in valid_choices: raise ImproperlyConfigured( "'%s' is not a valid locale code" % locale) params['LOCALECODE'] = locale # Confirmed shipping address confirm_shipping_addr = getattr(settings, 'PAYPAL_CONFIRM_SHIPPING', None) if confirm_shipping_addr: params['REQCONFIRMSHIPPING'] = 1 # Instant update callback information if update_url: params['CALLBACK'] = update_url params['CALLBACKTIMEOUT'] = getattr( settings, 'PAYPAL_CALLBACK_TIMEOUT', 3) # Contact details and address details - we provide these as it would make # the PayPal registration process smoother is the user doesn't already have # an account. if user: params['EMAIL'] = user.email if user_address: params['SHIPTONAME'] = user_address.name() params['SHIPTOSTREET'] = user_address.line1 params['SHIPTOSTREET2'] = user_address.line2 params['SHIPTOCITY'] = user_address.city params['SHIPTOSTATE'] = US_ABBREVIATIONS.get(user_address.state.title(), user_address.state) params['SHIPTOZIP'] = user_address.postcode params['SHIPTOCOUNTRYCODE'] = user_address.country.iso_3166_1_a2 # Shipping details (if already set) - we override the SHIPTO* fields and # set a flag to indicate that these can't be altered on the PayPal side. if shipping_method and shipping_address: params['ADDROVERRIDE'] = 1 # It's recommend not to set 'confirmed shipping' if supplying the # shipping address directly. params['REQCONFIRMSHIPPING'] = 0 params['SHIPTONAME'] = shipping_address.name() params['SHIPTOSTREET'] = shipping_address.line1 params['SHIPTOSTREET2'] = shipping_address.line2 params['SHIPTOCITY'] = shipping_address.city params['SHIPTOSTATE'] = US_ABBREVIATIONS.get(shipping_address.state.title(), shipping_address.state) params['SHIPTOZIP'] = shipping_address.postcode params['SHIPTOCOUNTRYCODE'] = shipping_address.country.iso_3166_1_a2 elif no_shipping: params['NOSHIPPING'] = 1 # Allow customer to specify a shipping note allow_note = getattr(settings, 'PAYPAL_ALLOW_NOTE', True) if allow_note: params['ALLOWNOTE'] = 1 # Shipping charges #params['PAYMENTREQUEST_0_SHIPPINGAMT'] = _format_currency(D('0.00')) # max_charge = D('0.00') # for index, method in enumerate(shipping_methods): # is_default = index == 0 # params['L_SHIPPINGOPTIONISDEFAULT%d' % index] = 'true' if is_default else 'false' # charge = method.basket_charge_incl_tax() # if charge > max_charge: # max_charge = charge # if is_default: # params['PAYMENTREQUEST_0_SHIPPINGAMT'] = _format_currency(charge) # params['PAYMENTREQUEST_0_AMT'] += charge # params['L_SHIPPINGOPTIONNAME%d' % index] = unicode(method.name) # params['L_SHIPPINGOPTIONAMOUNT%d' % index] = _format_currency(charge) # Set shipping charge explicitly if it has been passed if shipping_method: index += 1 max_charge = charge = shipping_method.basket_charge_incl_tax() #params['PAYMENTREQUEST_0_SHIPPINGAMT'] = _format_currency(charge) params['L_PAYMENTREQUEST_0_NAME%d' % index] = "Shipping" params['L_PAYMENTREQUEST_0_NUMBER%d' % index] = "" desc = '' params['L_PAYMENTREQUEST_0_DESC%d' % index] = desc # Note, we don't include discounts here - they are handled as separate # lines - see below params['L_PAYMENTREQUEST_0_AMT%d' % index] = _format_currency( charge) params['L_PAYMENTREQUEST_0_QTY%d' % index] = 1 params['PAYMENTREQUEST_0_AMT'] += charge # Both the old version (MAXAMT) and the new version (PAYMENT...) are needed # here - think it's a problem with the API. params['PAYMENTREQUEST_0_MAXAMT'] = _format_currency(amount + max_charge) params['PAYMENTREQUEST_0_ITEMAMT'] = _format_currency(amount + max_charge) params['MAXAMT'] = _format_currency(amount + max_charge) # Handling set to zero for now - I've never worked on a site that needed a # handling charge. params['PAYMENTREQUEST_0_HANDLINGAMT'] = _format_currency(D('0.00')) # Ensure that the total is formatted correctly. params['PAYMENTREQUEST_0_AMT'] = _format_currency( params['PAYMENTREQUEST_0_AMT']) if getattr(settings, 'PAYPAL_NO_SHIPPING', False): params['NOSHIPPING'] = 1 txn = _fetch_response(SET_EXPRESS_CHECKOUT, params) # Construct return URL if getattr(settings, 'PAYPAL_SANDBOX_MODE', True): url = 'https://www.sandbox.paypal.com/webscr' else: url = 'https://www.paypal.com/webscr' params = (('cmd', '_express-checkout'), ('token', txn.token),) return '%s?%s' % (url, urllib.urlencode(params))
def item_title(self, item): return strip_tags(strip_entities("%s - %s" % (item.name, item.position)))
def process(self): for ent in self.stream['data']: guid = 'tag:facebook.com,2004:post/%s' % ent['id'] if self.verbose: print("ID: %s" % guid) if 'updated_time' in ent: t = from_rfc3339(ent['updated_time']) else: t = from_rfc3339(ent['created_time']) try: e = Entry.objects.get(service=self.service, guid=guid) if not self.force_overwrite and \ e.date_updated and mtime(t.timetuple()) <= e.date_updated: continue if e.protected: continue except Entry.DoesNotExist: e = Entry(service=self.service, guid=guid) e.guid = guid e.link = ent['actions'][0]['link'] if 'from' in ent: frm = ent['from'] image_url = 'http://graph.facebook.com/%s/picture' % frm['id'] e.link_image = media.save_image(image_url, direct_image=False) e.author_name = frm['name'] e.date_published = from_rfc3339(ent['created_time']) e.date_updated = t content = '' if 'message' in ent: content = expand.shorts(ent['message']) content = '<p>' + urlizetrunc(content, 45) + '</p>' name = '' if 'name' in ent: name = ent['name'] content += ' <p>' + ent['name'] + '</p>' if 'picture' in ent and 'link' in ent: content += '<p class="thumbnails">' content += '<a href="%s" rel="nofollow">' \ '<img src="%s" alt="thumbnail" /></a> ' \ % (ent['link'], media.save_image(ent['picture'], downscale=True)) if 'description' in ent: content += '<div class="fb-description">%s</div>' % \ ent['description'] elif 'caption' in ent and name != ent['caption']: content += '<div class="fb-caption">%s</div>' % \ ent['caption'] content += '</p>' else: if 'description' in ent: content += '<div class="fb-description">%s</div>' % \ ent['description'] elif 'caption' in ent and name != ent['caption']: content += '<div class="fb-caption">%s</div>' % \ ent['caption'] e.content = content if 'message' in ent: e.title = truncate.smart(strip_tags(ent['message']), max_length=48) if e.title == '': e.title = strip_entities(strip_tags(content))[0:128] try: e.save() media.extract_and_register(e) except: pass