def process(self): for ent in self.json: guid = 'tag:twitter.com,2007:http://twitter.com/%s/statuses/%s' % \ (ent['user']['screen_name'], ent['id']) if self.verbose: print("ID: %s" % guid) t = datetime.datetime.strptime(ent['created_at'], '%a %b %d %H:%M:%S +0000 %Y') try: e = Entry.objects.get(service=self.service, guid=guid) if not self.force_overwrite and \ e.date_updated and mtime(t.timetuple()) <= e.date_updated: continue if e.protected: continue except Entry.DoesNotExist: e = Entry(service=self.service, guid=guid) e.guid = guid e.title = 'Tweet: %s' % truncate.smart( strip_entities(strip_tags(ent['text'])), max_length=40) e.title = e.title.replace('#', '').replace('@', '') e.link = 'https://twitter.com/%s/status/%s' % \ (ent['user']['screen_name'], ent['id']) image_url = ent['user']['profile_image_url_https'] e.link_image = media.save_image(image_url, direct_image=False) e.date_published = t e.date_updated = t e.author_name = ent['user']['name'] # double expand e.content = 'Tweet: %s' % expand.all(expand.shorturls(ent['text'])) if 'entities' in ent and 'media' in ent['entities']: content = ' <p class="thumbnails">' for t in ent['entities']['media']: if t['type'] == 'photo': tsize = 'thumb' if 'media_url_https' in t: image_url = '%s:%s' % (t['media_url_https'], tsize) large_url = '%s:large' % t['media_url_https'] else: image_url = '%s:%s' % (t['media_url'], tsize) large_url = t['media_url'] link = t['expanded_url'] if self.service.public: image_url = media.save_image(image_url) if 'sizes' in t and tsize in t['sizes']: sizes = t['sizes'][tsize] iwh = ' width="%d" height="%d"' % (sizes['w'], sizes['h']) else: iwh = '' content += '<a href="%s" rel="nofollow" data-imgurl="%s"><img src="%s"%s alt="thumbnail" /></a> ' % ( link, large_url, image_url, iwh) content += '</p>' e.content += content try: e.save() media.extract_and_register(e) except: pass
def custom_process(self, e, ent): e.title = 'Tweet: %s' % truncate.smart(ent.title) e.content = expand.all(ent.summary) e.custom_mblob = media.mrss_scan(e.content)
def process(self): for ent in self.json['entries']: id = ent['id'][2:] uuid = '%s-%s-%s-%s-%s' % (id[0:8], id[8:12], id[12:16], id[16:20], id[20:]) guid = 'tag:friendfeed.com,2007:%s' % uuid if self.verbose: print("ID: %s" % guid) t = datetime.datetime.strptime(ent['date'], '%Y-%m-%dT%H:%M:%SZ') try: e = Entry.objects.get(service=self.service, guid=guid) if not self.force_overwrite and \ e.date_updated and mtime(t.timetuple()) <= e.date_updated: continue if e.protected: continue except Entry.DoesNotExist: e = Entry(service=self.service, guid=guid) e.guid = guid e.title = truncate.smart(strip_entities(strip_tags(ent['body'])), max_length=40) e.link = ent['url'] image_url = 'http://friendfeed-api.com/v2/picture/%s' % ent[ 'from']['id'] e.link_image = media.save_image(image_url, direct_image=False) e.date_published = t e.date_updated = t e.author_name = ent['from']['name'] content = ent['body'] if 'thumbnails' in ent: content += '<p class="thumbnails">' for t in ent['thumbnails']: if self.service.public: t['url'] = media.save_image(t['url']) if 'width' in t and 'height' in t: iwh = ' width="%d" height="%d"' % (t['width'], t['height']) else: iwh = '' if 'friendfeed.com/e/' in t['link'] and \ ('youtube.com' in t['url'] or 'ytimg.com' in t['url']): m = re.search(r'/vi/([\-\w]+)/', t['url']) yid = m.groups()[0] if m else None if yid: t['link'] = 'http://www.youtube.com/watch?v=%s' % yid content += '<a href="%s" rel="nofollow"><img src="%s"%s alt="thumbnail" /></a> ' % ( t['link'], t['url'], iwh) content += '</p>' if 'files' in ent: content += '<ul class="files">\n' for f in ent['files']: if 'friendfeed-media' in f['url']: content += ' <li><a href="%s" rel="nofollow">%s</a>' % ( f['url'], f['name']) if 'size' in f: content += ' <span class="size">%s</span>' % bytes_to_human( f['size']) content += '</li>\n' content += '</ul>\n' e.content = content try: e.save() media.extract_and_register(e) except: pass
def custom_process(self, e, ent): e.title = "Tweet: %s" % truncate.smart(ent.title) e.content = expand.all(ent.summary) e.custom_mblob = media.mrss_scan(e.content)
def process(self): for ent in self.stream['data']: guid = 'tag:facebook.com,2004:post/%s' % ent['id'] if self.verbose: print("ID: %s" % guid) if 'updated_time' in ent: t = from_rfc3339(ent['updated_time']) else: t = from_rfc3339(ent['created_time']) try: e = Entry.objects.get(service=self.service, guid=guid) if not self.force_overwrite and \ e.date_updated and mtime(t.timetuple()) <= e.date_updated: continue if e.protected: continue except Entry.DoesNotExist: e = Entry(service=self.service, guid=guid) e.guid = guid e.link = ent['actions'][0]['link'] if 'from' in ent: frm = ent['from'] image_url = 'http://graph.facebook.com/%s/picture' % frm['id'] e.link_image = media.save_image(image_url, direct_image=False) e.author_name = frm['name'] e.date_published = from_rfc3339(ent['created_time']) e.date_updated = t content = '' if 'message' in ent: content = expand.shorts(ent['message']) content = '<p>' + urlizetrunc(content, 45) + '</p>' name = '' if 'name' in ent: name = ent['name'] content += ' <p>' + ent['name'] + '</p>' if 'picture' in ent and 'link' in ent: content += '<p class="thumbnails">' content += '<a href="%s" rel="nofollow">' \ '<img src="%s" alt="thumbnail" /></a> ' \ % (ent['link'], media.save_image(ent['picture'], downscale=True)) if 'description' in ent: content += '<div class="fb-description">%s</div>' % \ ent['description'] elif 'caption' in ent and name != ent['caption']: content += '<div class="fb-caption">%s</div>' % \ ent['caption'] content += '</p>' else: if 'description' in ent: content += '<div class="fb-description">%s</div>' % \ ent['description'] elif 'caption' in ent and name != ent['caption']: content += '<div class="fb-caption">%s</div>' % \ ent['caption'] e.content = content if 'message' in ent: e.title = truncate.smart(strip_tags(ent['message']), max_length=48) if e.title == '': e.title = strip_entities(strip_tags(content))[0:128] try: e.save() media.extract_and_register(e) except: pass
def process(self): for ent in self.json['entries']: id = ent['id'][2:] uuid = '%s-%s-%s-%s-%s' % (id[0:8], id[8:12], id[12:16], id[16:20], id[20:]) guid = 'tag:friendfeed.com,2007:%s' % uuid if self.verbose: print("ID: %s" % guid) t = datetime.datetime.strptime(ent['date'], '%Y-%m-%dT%H:%M:%SZ') try: e = Entry.objects.get(service=self.service, guid=guid) if not self.force_overwrite and \ e.date_updated and mtime(t.timetuple()) <= e.date_updated: continue if e.protected: continue except Entry.DoesNotExist: e = Entry(service=self.service, guid=guid) e.guid = guid e.title = truncate.smart( strip_entities(strip_tags(ent['body'])), max_length=40) e.link = ent['url'] image_url = 'http://friendfeed-api.com/v2/picture/%s' % ent[ 'from']['id'] e.link_image = media.save_image(image_url, direct_image=False) e.date_published = t e.date_updated = t e.author_name = ent['from']['name'] content = ent['body'] if 'thumbnails' in ent: content += '<p class="thumbnails">' for t in ent['thumbnails']: if self.service.public: t['url'] = media.save_image(t['url']) if 'width' in t and 'height' in t: iwh = ' width="%d" height="%d"' % (t['width'], t['height']) else: iwh = '' if 'friendfeed.com/e/' in t['link'] and \ ('youtube.com' in t['url'] or 'ytimg.com' in t['url']): m = re.search(r'/vi/([\-\w]+)/', t['url']) yid = m.groups()[0] if m else None if yid: t['link'] = 'http://www.youtube.com/watch?v=%s' % yid content += '<a href="%s" rel="nofollow"><img src="%s"%s alt="thumbnail" /></a> ' % ( t['link'], t['url'], iwh) content += '</p>' if 'files' in ent: content += '<ul class="files">\n' for f in ent['files']: if 'friendfeed-media' in f['url']: content += ' <li><a href="%s" rel="nofollow">%s</a>' % ( f['url'], f['name']) if 'size' in f: content += ' <span class="size">%s</span>' % bytes_to_human( f['size']) content += '</li>\n' content += '</ul>\n' e.content = content try: e.save() media.extract_and_register(e) except: pass
def share(self, args={}): content = args.get('content', '') sid = args.get('sid', None) title = args.get('title', None) link = args.get('link', None) images = args.get('images', None) files = args.get('files', MultiValueDict()) source = args.get('source', '') user = args.get('user', None) un = utcnow() guid = '%s/entry/%s' % (settings.FEED_TAGURI, un.strftime('%Y-%m-%dT%H:%M:%SZ')) if sid: s = Service.objects.get(id=sid, api='selfposts') else: s = Service.objects.filter(api='selfposts').order_by('id')[0] e = Entry(service=s, guid=guid) e.link = link if link else settings.BASE_URL + '/' e.date_published = un e.date_updated = un e.draft = int(args.get('draft', False)) e.friends_only = int(args.get('friends_only', False)) if user and user.first_name and user.last_name: e.author_name = user.first_name + ' ' + user.last_name content = smart_text(content) editor_syntax = getattr(settings, 'EDITOR_SYNTAX', 'markdown') if source == 'bookmarklet': editor_syntax = 'html' if editor_syntax == 'markdown' and markdown: e.content = expand.all(markdown.markdown(content)) else: e.content = expand.all(content.replace('\n', '<br/>')) e.content = urlizetrunc(e.content, 45) e.content = strip_script(e.content) e.content = expand.imgloc(e.content) e.content = smart_text(e.content) if images: thumbs = '\n<p class="thumbnails">\n' for img in images: img = media.save_image(img, force=True, downscale=True) thumbs += """ <a href="%s" rel="nofollow"><img src="%s" alt="thumbnail" /></a>\n""" % ( e.link, img) thumbs += '</p>\n' e.content += thumbs if title: e.title = smart_text(title) else: e.title = truncate.smart(strip_tags(e.content)).strip() if e.title == '': e.title = truncate.smart(strip_tags(content)).strip() mblob = media.mrss_scan(e.content) e.mblob = media.mrss_gen_json(mblob) try: e.save() pictures = [] docs = [] for f in files.getlist('docs'): md = Media(entry=e) md.file.save(f.name, f) md.save() if f.content_type.startswith('image/'): pictures.append((md, f)) else: docs.append((md, f)) if len(pictures): thumbs = '\n<p class="thumbnails">\n' for o in pictures: thumb, orig = media.downsave_uploaded_image(o[0].file) thumbs += ' <a href="%s"><img src="%s" alt="thumbnail" /></a>\n' % ( orig, thumb) mrss = {'url': orig, 'medium': 'image', 'fileSize': o[1].size} if orig.lower().endswith('.jpg'): mrss['type'] = 'image/jpeg' mblob['content'].append([mrss]) thumbs += '</p>\n' e.content += thumbs if len(docs): doc = '\n<ul class="files">\n' for o in docs: target = '[GLS-UPLOAD]/%s' % o[ 0].file.name.replace('upload/', '') doc += ' <li><a href="%s">%s</a> ' % (target, o[1].name) doc += '<span class="size">%s</span></li>\n' % \ bytes_to_human(o[1].size) mrss = {'url': target, 'fileSize': o[1].size} target = target.lower() if target.endswith('.mp3'): mrss['medium'] = 'audio' mrss['type'] = 'audio/mpeg' elif target.endswith('.ogg'): mrss['medium'] = 'audio' mrss['type'] = 'audio/ogg' elif target.endswith('.avi'): mrss['medium'] = 'video' mrss['type'] = 'video/avi' elif target.endswith('.pdf'): mrss['medium'] = 'document' mrss['type'] = 'application/pdf' else: mrss['medium'] = 'document' mblob['content'].append([mrss]) doc += '</ul>\n' e.content += doc e.mblob = media.mrss_gen_json(mblob) if len(pictures) or len(docs): e.save() media.extract_and_register(e) return e except: pass
def share(self, args={}): content = args.get('content', '') sid = args.get('sid', None) title = args.get('title', None) link = args.get('link', None) images = args.get('images', None) files = args.get('files', MultiValueDict()) source = args.get('source', '') user = args.get('user', None) un = utcnow() guid = '%s/entry/%s' % (settings.FEED_TAGURI, un.strftime('%Y-%m-%dT%H:%M:%SZ')) if sid: s = Service.objects.get(id=sid, api='selfposts') else: s = Service.objects.filter(api='selfposts').order_by('id')[0] e = Entry(service=s, guid=guid) e.link = link if link else settings.BASE_URL + '/' e.date_published = un e.date_updated = un e.draft = int(args.get('draft', False)) e.friends_only = int(args.get('friends_only', False)) if user and user.first_name and user.last_name: e.author_name = user.first_name + ' ' + user.last_name content = smart_text(content) editor_syntax = getattr(settings, 'EDITOR_SYNTAX', 'markdown') if source == 'bookmarklet': editor_syntax = 'html' if editor_syntax == 'markdown' and markdown: e.content = expand.all(markdown.markdown(content)) else: e.content = expand.all(content.replace('\n', '<br/>')) e.content = urlizetrunc(e.content, 45) e.content = strip_script(e.content) e.content = expand.imgloc(e.content) e.content = smart_text(e.content) if images: thumbs = '\n<p class="thumbnails">\n' for img in images: img = media.save_image(img, force=True, downscale=True) thumbs += """ <a href="%s" rel="nofollow"><img src="%s" alt="thumbnail" /></a>\n""" % ( e.link, img) thumbs += '</p>\n' e.content += thumbs if title: e.title = smart_text(title) else: e.title = truncate.smart(strip_tags(e.content)).strip() if e.title == '': e.title = truncate.smart(strip_tags(content)).strip() mblob = media.mrss_scan(e.content) e.mblob = media.mrss_gen_json(mblob) try: e.save() pictures = [] docs = [] for f in files.getlist('docs'): md = Media(entry=e) md.file.save(f.name, f) md.save() if f.content_type.startswith('image/'): pictures.append((md, f)) else: docs.append((md, f)) if len(pictures): thumbs = '\n<p class="thumbnails">\n' for o in pictures: thumb, orig = media.downsave_uploaded_image(o[0].file) thumbs += ' <a href="%s"><img src="%s" alt="thumbnail" /></a>\n' % ( orig, thumb) mrss = { 'url': orig, 'medium': 'image', 'fileSize': o[1].size } if orig.lower().endswith('.jpg'): mrss['type'] = 'image/jpeg' mblob['content'].append([mrss]) thumbs += '</p>\n' e.content += thumbs if len(docs): doc = '\n<ul class="files">\n' for o in docs: target = '[GLS-UPLOAD]/%s' % o[0].file.name.replace( 'upload/', '') doc += ' <li><a href="%s">%s</a> ' % (target, o[1].name) doc += '<span class="size">%s</span></li>\n' % \ bytes_to_human(o[1].size) mrss = {'url': target, 'fileSize': o[1].size} target = target.lower() if target.endswith('.mp3'): mrss['medium'] = 'audio' mrss['type'] = 'audio/mpeg' elif target.endswith('.ogg'): mrss['medium'] = 'audio' mrss['type'] = 'audio/ogg' elif target.endswith('.avi'): mrss['medium'] = 'video' mrss['type'] = 'video/avi' elif target.endswith('.pdf'): mrss['medium'] = 'document' mrss['type'] = 'application/pdf' else: mrss['medium'] = 'document' mblob['content'].append([mrss]) doc += '</ul>\n' e.content += doc e.mblob = media.mrss_gen_json(mblob) if len(pictures) or len(docs): e.save() media.extract_and_register(e) return e except: pass