def external_share(request): if request.method == 'POST': user = User.objects.get(id__exact=request.POST['user_id']) url = request.POST['url'] comment = request.POST['comment'] title = request.POST['title'] entries = Entry.objects.filter(link=url) if not entries: entry = Entry() entry.content = comment entry.uuid = url entry.link = url entry.title = title entry.feed = None entry.published = datetime.date.today() entry.save() print "entry saved" user_entry = UserEntry() user_entry.user = user else : entry = entries[0] try: user_entry = UserEntry.objects.get(user=user,entry=entry) except UserEntry.DoesNotExist: user_entry = UserEntry() user_entry.user = user user_entry.entry = entry user_entry.shared = True user_entry.read = True user_entry.save() print "userentry saved" return HttpResponse("saved") else: return HttpResponse(status=404)
def external_share(request): print request.POST if request.method == 'POST': form = BookmarkletForm(request.POST, request.FILES) if form.is_valid(): user = User.objects.get(id__exact=form.cleaned_data['user_id']) url = form.cleaned_data['url'] title = form.cleaned_data['title'] comment = form.cleaned_data['comment'] #standardize url and use for uuid url = validate_url(url) #this only wants bytestrs uuid = uuid5(NAMESPACE_URL, smart_str(url)) try: entry = Entry.objects.get(uuid=uuid) except Entry.DoesNotExist: entry = Entry() entry.feed = None entry.uuid = uuid entry.link = url entry.title = title #temporarily use comment for content entry.content = comment entry.published = datetime.date.today() entry.save() user_entry = UserEntry() user_entry.entry = entry user_entry.user = user user_entry.shared = True user_entry.read = True user_entry.save() return HttpResponse("saved")
def refresh_feed(feed_id): # Used to unescape html entities in titles html_parser = HTMLParser.HTMLParser() feed = Feed.objects.get(pk=feed_id) parsed = feedparser.parse(feed.uri) parsed_feed = parsed.feed feed.title = html_parser.unescape(parsed_feed.title) feed.save() for parsed_entry in parsed.entries: uuid = getattr(parsed_entry, "id", parsed_entry.link) try: entry = Entry.objects.get(uuid=uuid) except Entry.DoesNotExist: entry = Entry() entry.feed = feed entry.uuid = uuid entry.link = parsed_entry.link entry.title = html_parser.unescape(parsed_entry.title) entry.author = getattr(parsed_entry, "author", None) timestamp = time.mktime(parsed_entry.updated_parsed) entry.published = datetime.datetime.fromtimestamp(timestamp) if hasattr(parsed_entry, "content"): entry.content = parsed_entry.content[0].value elif hasattr(parsed_entry, "summary"): entry.content = parsed_entry.summary entry.save()
def refresh_feed(feed_id): # Used to unescape html entities in titles html_parser = HTMLParser.HTMLParser() feed = Feed.objects.get(pk=feed_id) parsed = feedparser.parse(feed.uri) #import pdb; pdb.set_trace() if parsed.bozo: logger.warning('feedparser got bozo error. skipping feed') return parsed_feed = parsed.feed title = parsed_feed.title if hasattr(parsed_feed,'title') else feed.uri feed.title = html_parser.unescape(title) feed.save() for parsed_entry in parsed.entries: uuid = getattr(parsed_entry, 'id', parsed_entry.link) try: entry = Entry.objects.get(uuid=uuid) except Entry.DoesNotExist: entry = Entry() entry.feed = feed entry.uuid = uuid entry.link = parsed_entry.link entry.title = html_parser.unescape(parsed_entry.title) entry.author = getattr(parsed_entry, 'author', None) timestamp = time.mktime(parsed_entry.updated_parsed) entry.published = datetime.datetime.fromtimestamp(timestamp) if hasattr(parsed_entry, 'content'): entry.content = parsed_entry.content[0].value elif hasattr(parsed_entry, 'summary'): entry.content = parsed_entry.summary entry.save()