def reshare(self, entry, args={}): sid = args.get('sid', None) as_me = int(args.get('as_me', False)) user = args.get('user', None) un = utcnow() guid = '%s/entry/%s' % (settings.FEED_TAGURI, un.strftime('%Y-%m-%dT%H:%M:%SZ')) if sid: s = Service.objects.get(id=sid, api='selfposts') else: s = Service.objects.filter(api='selfposts').order_by('id')[0] e = Entry(service=s, guid=guid) e.date_published = un e.date_updated = un if as_me: if user and user.first_name and user.last_name: e.author_name = user.first_name + ' ' + user.last_name else: e.author_name = '' e.author_email = '' e.author_uri = '' if entry.service.api == 'greader': e.link = entry.link else: e.link = settings.BASE_URL + '/' if entry.service.api == 'twitter': entry.content = entry.content.split(': ', 1)[1] else: e.author_name = entry.author_name e.author_email = entry.author_email e.author_uri = entry.author_uri e.link = entry.link e.geolat = entry.geolat e.geolng = entry.geolng e.mblob = entry.mblob e.title = entry.title if entry.service.api == 'greader': e.content = '<a href="%s" rel="nofollow">%s</a>' % ( e.link, e.title) elif entry.service.api in ('youtube', 'vimeo'): e.content = '<p>%s</p>%s' % (df_title(e.title), entry.content) else: e.content = urlizetrunc(entry.content, 45) try: media.transform_to_local(e) media.extract_and_register(e) e.save() return e except: pass
def reshare(self, entry, args={}): sid = args.get('sid', None) as_me = int(args.get('as_me', False)) user = args.get('user', None) un = utcnow() guid = '%s/entry/%s' % (settings.FEED_TAGURI, un.strftime('%Y-%m-%dT%H:%M:%SZ')) if sid: s = Service.objects.get(id=sid, api='selfposts') else: s = Service.objects.filter(api='selfposts').order_by('id')[0] e = Entry(service=s, guid=guid) e.date_published = un e.date_updated = un if as_me: if user and user.first_name and user.last_name: e.author_name = user.first_name + ' ' + user.last_name else: e.author_name = '' e.author_email = '' e.author_uri = '' if entry.service.api == 'greader': e.link = entry.link else: e.link = settings.BASE_URL + '/' if entry.service.api == 'twitter': entry.content = entry.content.split(': ', 1)[1] else: e.author_name = entry.author_name e.author_email = entry.author_email e.author_uri = entry.author_uri e.link = entry.link e.geolat = entry.geolat e.geolng = entry.geolng e.mblob = entry.mblob e.title = entry.title if entry.service.api == 'greader': e.content = '<a href="%s" rel="nofollow">%s</a>' % (e.link, e.title) elif entry.service.api in ('youtube', 'vimeo'): e.content = '<p>%s</p>%s' % (df_title(e.title), entry.content) else: e.content = urlizetrunc(entry.content, 45) try: media.transform_to_local(e) media.extract_and_register(e) e.save() return e except: pass
def process_userdid(self): """Process what user did.""" for ent in self.json: if 'type' in ent and ent['type'] == 'like': date = ent['date'][:10] guid = 'tag:vimeo,%s:clip%s' % (date, ent['video_id']) if self.verbose: print("ID: %s" % guid) try: e = Entry.objects.get(service=self.service, guid=guid) if not self.force_overwrite and e.date_updated \ and mtime(ent['date']) <= e.date_updated: continue if e.protected: continue except Entry.DoesNotExist: e = Entry(service=self.service, guid=guid) e.title = ent['video_title'] e.link = ent['video_url'] e.date_published = ent['date'] e.date_updated = ent['date'] e.author_name = ent['user_name'] e.idata = 'liked' if self.service.public: ent['video_thumbnail_medium'] = media.save_image( ent['video_thumbnail_medium']) e.content = """<table class="vc"><tr><td><div id="vimeo-%s" class="play-video"><a href="%s" rel="nofollow"><img src="%s" width="200" height="150" alt="%s" /></a><div class="playbutton"></div></div></td></tr></table>""" % ( ent['video_id'], e.link, ent['video_thumbnail_medium'], ent['video_title']) mblob = media.mrss_init() mblob['content'].append([{ 'url': 'http://vimeo.com/moogaloop.swf?clip_id=%s' % ent['video_id'], 'type': 'application/x-shockwave-flash', 'medium': 'video' }]) e.mblob = media.mrss_gen_json(mblob) try: e.save() except: pass
def process_userdid(self): """Process what user did.""" for ent in self.json: if 'type' in ent and ent['type'] == 'like': date = ent['date'][:10] guid = 'tag:vimeo,%s:clip%s' % (date, ent['video_id']) if self.verbose: print("ID: %s" % guid) try: e = Entry.objects.get(service=self.service, guid=guid) if not self.force_overwrite and e.date_updated \ and mtime(ent['date']) <= e.date_updated: continue if e.protected: continue except Entry.DoesNotExist: e = Entry(service=self.service, guid=guid) e.title = ent['video_title'] e.link = ent['video_url'] e.date_published = ent['date'] e.date_updated = ent['date'] e.author_name = ent['user_name'] e.idata = 'liked' if self.service.public: ent['video_thumbnail_medium'] = media.save_image( ent['video_thumbnail_medium']) e.content = """<table class="vc"><tr><td><div id="vimeo-%s" class="play-video"><a href="%s" rel="nofollow"><img src="%s" width="200" height="150" alt="%s" /></a><div class="playbutton"></div></div></td></tr></table>""" % ( ent['video_id'], e.link, ent['video_thumbnail_medium'], ent['video_title']) mblob = media.mrss_init() mblob[ 'content'].append([{'url': 'http://vimeo.com/moogaloop.swf?clip_id=%s' % ent['video_id'], 'type': 'application/x-shockwave-flash', 'medium': 'video'}]) e.mblob = media.mrss_gen_json(mblob) try: e.save() except: pass
def process(self): for ent in self.fp.entries: guid = ent.id if 'id' in ent else ent.link if self.verbose: print('ID: %s' % guid) try: e = Entry.objects.get(service=self.service, guid=guid) if not self.force_overwrite and 'updated_parsed' in ent: if e.date_updated and \ mtime(ent.updated_parsed) <= e.date_updated: continue if e.protected: continue except Entry.DoesNotExist: e = Entry(service=self.service, guid=guid) e.title = ent.title e.link = ent.get('feedburner_origlink', ent.get('link', '')) if 'author_detail' in ent: e.author_name = ent.author_detail.get('name', '') e.author_email = ent.author_detail.get('email', '') e.author_uri = ent.author_detail.get('href', '') else: e.author_name = ent.get('author', ent.get('creator', '')) if not e.author_name and 'author_detail' in self.fp.feed: e.author_name = self.fp.feed.author_detail.get('name', '') e.author_email = self.fp.feed.author_detail.get( 'email', '') e.author_uri = self.fp.feed.author_detail.get('href', '') try: e.content = ent.content[0].value except: e.content = ent.get('summary', ent.get('description', '')) if 'published_parsed' in ent: e.date_published = mtime(ent.published_parsed) elif 'updated_parsed' in ent: e.date_published = mtime(ent.updated_parsed) if 'updated_parsed' in ent: e.date_updated = mtime(ent.updated_parsed) if 'geo_lat' in ent and 'geo_long' in ent: e.geolat = ent.geo_lat e.geolng = ent.geo_long elif 'georss_point' in ent: geo = ent['georss_point'].split(' ') e.geolat = geo[0] e.geolng = geo[1] if 'image' in self.fp.feed: e.link_image = media.save_image(self.fp.feed.image.url) else: for link in ent.links: if link.rel == 'image' or link.rel == 'photo': e.link_image = media.save_image(link.href) if hasattr(self, 'custom_process'): self.custom_process(e, ent) if hasattr(e, 'custom_mblob'): e.mblob = e.custom_mblob else: e.mblob = None mblob = media.mrss_init(e.mblob) if 'media_content' in ent: mblob['content'].append(ent.media_content) e.mblob = media.mrss_gen_json(mblob) e.content = strip_script(e.content) try: e.save() media.extract_and_register(e) except: pass
def process(self): for key, group in groupby(self.fp.entries, lambda x: x.updated[0:19]): mblob = media.mrss_init() lgroup = 0 content = '<p class="thumbnails">\n' first = True for ent in group: lgroup += 1 if first: firstent = ent first = False if self.verbose: print("ID: %s" % ent.id) if 'media_thumbnail' in ent: tn = ent.media_thumbnail[0] if self.service.public: tn['url'] = media.save_image(tn['url']) content += """ <a href="%s" rel="nofollow"><img src="%s" width="%s" height="%s" alt="thumbnail" /></a>\n""" % ( ent.link, tn['url'], tn['width'], tn['height']) if 'media_content' in ent: mblob['content'].append(ent.media_content) ent = firstent content += '</p>' guid = 'tag:flickr.com,2004:/photo/%s' % ent.id try: e = Entry.objects.get(service=self.service, guid=ent.id) if not self.force_overwrite and 'updated_parsed' in ent: if e.date_updated and \ mtime(ent.updated_parsed) <= e.date_updated: continue if e.protected: continue except Entry.DoesNotExist: e = Entry(service=self.service, guid=ent.id) e.mblob = media.mrss_gen_json(mblob) if lgroup > 1: e.idata = 'grouped' e.link = self.service.link e.title = 'Posted Photos' e.content = content if 'published_parsed' in ent: e.date_published = mtime(ent.published_parsed) elif 'updated_parsed' in ent: e.date_published = mtime(ent.updated_parsed) if 'updated_parsed' in ent: e.date_updated = mtime(ent.updated_parsed) if 'image' in self.fp.feed: e.link_image = media.save_image(self.fp.feed.image.href) else: for link in ent.links: if link.rel == 'image': e.link_image = media.save_image(link.href) try: e.save() except: pass
def share(self, args={}): content = args.get('content', '') sid = args.get('sid', None) title = args.get('title', None) link = args.get('link', None) images = args.get('images', None) files = args.get('files', MultiValueDict()) source = args.get('source', '') user = args.get('user', None) un = utcnow() guid = '%s/entry/%s' % (settings.FEED_TAGURI, un.strftime('%Y-%m-%dT%H:%M:%SZ')) if sid: s = Service.objects.get(id=sid, api='selfposts') else: s = Service.objects.filter(api='selfposts').order_by('id')[0] e = Entry(service=s, guid=guid) e.link = link if link else settings.BASE_URL + '/' e.date_published = un e.date_updated = un e.draft = int(args.get('draft', False)) e.friends_only = int(args.get('friends_only', False)) if user and user.first_name and user.last_name: e.author_name = user.first_name + ' ' + user.last_name content = smart_text(content) editor_syntax = getattr(settings, 'EDITOR_SYNTAX', 'markdown') if source == 'bookmarklet': editor_syntax = 'html' if editor_syntax == 'markdown' and markdown: e.content = expand.all(markdown.markdown(content)) else: e.content = expand.all(content.replace('\n', '<br/>')) e.content = urlizetrunc(e.content, 45) e.content = strip_script(e.content) e.content = expand.imgloc(e.content) e.content = smart_text(e.content) if images: thumbs = '\n<p class="thumbnails">\n' for img in images: img = media.save_image(img, force=True, downscale=True) thumbs += """ <a href="%s" rel="nofollow"><img src="%s" alt="thumbnail" /></a>\n""" % ( e.link, img) thumbs += '</p>\n' e.content += thumbs if title: e.title = smart_text(title) else: e.title = truncate.smart(strip_tags(e.content)).strip() if e.title == '': e.title = truncate.smart(strip_tags(content)).strip() mblob = media.mrss_scan(e.content) e.mblob = media.mrss_gen_json(mblob) try: e.save() pictures = [] docs = [] for f in files.getlist('docs'): md = Media(entry=e) md.file.save(f.name, f) md.save() if f.content_type.startswith('image/'): pictures.append((md, f)) else: docs.append((md, f)) if len(pictures): thumbs = '\n<p class="thumbnails">\n' for o in pictures: thumb, orig = media.downsave_uploaded_image(o[0].file) thumbs += ' <a href="%s"><img src="%s" alt="thumbnail" /></a>\n' % ( orig, thumb) mrss = {'url': orig, 'medium': 'image', 'fileSize': o[1].size} if orig.lower().endswith('.jpg'): mrss['type'] = 'image/jpeg' mblob['content'].append([mrss]) thumbs += '</p>\n' e.content += thumbs if len(docs): doc = '\n<ul class="files">\n' for o in docs: target = '[GLS-UPLOAD]/%s' % o[ 0].file.name.replace('upload/', '') doc += ' <li><a href="%s">%s</a> ' % (target, o[1].name) doc += '<span class="size">%s</span></li>\n' % \ bytes_to_human(o[1].size) mrss = {'url': target, 'fileSize': o[1].size} target = target.lower() if target.endswith('.mp3'): mrss['medium'] = 'audio' mrss['type'] = 'audio/mpeg' elif target.endswith('.ogg'): mrss['medium'] = 'audio' mrss['type'] = 'audio/ogg' elif target.endswith('.avi'): mrss['medium'] = 'video' mrss['type'] = 'video/avi' elif target.endswith('.pdf'): mrss['medium'] = 'document' mrss['type'] = 'application/pdf' else: mrss['medium'] = 'document' mblob['content'].append([mrss]) doc += '</ul>\n' e.content += doc e.mblob = media.mrss_gen_json(mblob) if len(pictures) or len(docs): e.save() media.extract_and_register(e) return e except: pass
def share(self, args={}): content = args.get('content', '') sid = args.get('sid', None) title = args.get('title', None) link = args.get('link', None) images = args.get('images', None) files = args.get('files', MultiValueDict()) source = args.get('source', '') user = args.get('user', None) un = utcnow() guid = '%s/entry/%s' % (settings.FEED_TAGURI, un.strftime('%Y-%m-%dT%H:%M:%SZ')) if sid: s = Service.objects.get(id=sid, api='selfposts') else: s = Service.objects.filter(api='selfposts').order_by('id')[0] e = Entry(service=s, guid=guid) e.link = link if link else settings.BASE_URL + '/' e.date_published = un e.date_updated = un e.draft = int(args.get('draft', False)) e.friends_only = int(args.get('friends_only', False)) if user and user.first_name and user.last_name: e.author_name = user.first_name + ' ' + user.last_name content = smart_text(content) editor_syntax = getattr(settings, 'EDITOR_SYNTAX', 'markdown') if source == 'bookmarklet': editor_syntax = 'html' if editor_syntax == 'markdown' and markdown: e.content = expand.all(markdown.markdown(content)) else: e.content = expand.all(content.replace('\n', '<br/>')) e.content = urlizetrunc(e.content, 45) e.content = strip_script(e.content) e.content = expand.imgloc(e.content) e.content = smart_text(e.content) if images: thumbs = '\n<p class="thumbnails">\n' for img in images: img = media.save_image(img, force=True, downscale=True) thumbs += """ <a href="%s" rel="nofollow"><img src="%s" alt="thumbnail" /></a>\n""" % ( e.link, img) thumbs += '</p>\n' e.content += thumbs if title: e.title = smart_text(title) else: e.title = truncate.smart(strip_tags(e.content)).strip() if e.title == '': e.title = truncate.smart(strip_tags(content)).strip() mblob = media.mrss_scan(e.content) e.mblob = media.mrss_gen_json(mblob) try: e.save() pictures = [] docs = [] for f in files.getlist('docs'): md = Media(entry=e) md.file.save(f.name, f) md.save() if f.content_type.startswith('image/'): pictures.append((md, f)) else: docs.append((md, f)) if len(pictures): thumbs = '\n<p class="thumbnails">\n' for o in pictures: thumb, orig = media.downsave_uploaded_image(o[0].file) thumbs += ' <a href="%s"><img src="%s" alt="thumbnail" /></a>\n' % ( orig, thumb) mrss = { 'url': orig, 'medium': 'image', 'fileSize': o[1].size } if orig.lower().endswith('.jpg'): mrss['type'] = 'image/jpeg' mblob['content'].append([mrss]) thumbs += '</p>\n' e.content += thumbs if len(docs): doc = '\n<ul class="files">\n' for o in docs: target = '[GLS-UPLOAD]/%s' % o[0].file.name.replace( 'upload/', '') doc += ' <li><a href="%s">%s</a> ' % (target, o[1].name) doc += '<span class="size">%s</span></li>\n' % \ bytes_to_human(o[1].size) mrss = {'url': target, 'fileSize': o[1].size} target = target.lower() if target.endswith('.mp3'): mrss['medium'] = 'audio' mrss['type'] = 'audio/mpeg' elif target.endswith('.ogg'): mrss['medium'] = 'audio' mrss['type'] = 'audio/ogg' elif target.endswith('.avi'): mrss['medium'] = 'video' mrss['type'] = 'video/avi' elif target.endswith('.pdf'): mrss['medium'] = 'document' mrss['type'] = 'application/pdf' else: mrss['medium'] = 'document' mblob['content'].append([mrss]) doc += '</ul>\n' e.content += doc e.mblob = media.mrss_gen_json(mblob) if len(pictures) or len(docs): e.save() media.extract_and_register(e) return e except: pass