def reshare(self, entry, args={}): sid = args.get('sid', None) as_me = int(args.get('as_me', False)) user = args.get('user', None) un = utcnow() guid = '%s/entry/%s' % (settings.FEED_TAGURI, un.strftime('%Y-%m-%dT%H:%M:%SZ')) if sid: s = Service.objects.get(id=sid, api='selfposts') else: s = Service.objects.filter(api='selfposts').order_by('id')[0] e = Entry(service=s, guid=guid) e.date_published = un e.date_updated = un if as_me: if user and user.first_name and user.last_name: e.author_name = user.first_name + ' ' + user.last_name else: e.author_name = '' e.author_email = '' e.author_uri = '' if entry.service.api == 'greader': e.link = entry.link else: e.link = settings.BASE_URL + '/' if entry.service.api == 'twitter': entry.content = entry.content.split(': ', 1)[1] else: e.author_name = entry.author_name e.author_email = entry.author_email e.author_uri = entry.author_uri e.link = entry.link e.geolat = entry.geolat e.geolng = entry.geolng e.mblob = entry.mblob e.title = entry.title if entry.service.api == 'greader': e.content = '<a href="%s" rel="nofollow">%s</a>' % ( e.link, e.title) elif entry.service.api in ('youtube', 'vimeo'): e.content = '<p>%s</p>%s' % (df_title(e.title), entry.content) else: e.content = urlizetrunc(entry.content, 45) try: media.transform_to_local(e) media.extract_and_register(e) e.save() return e except: pass
def reshare(self, entry, args={}): sid = args.get('sid', None) as_me = int(args.get('as_me', False)) user = args.get('user', None) un = utcnow() guid = '%s/entry/%s' % (settings.FEED_TAGURI, un.strftime('%Y-%m-%dT%H:%M:%SZ')) if sid: s = Service.objects.get(id=sid, api='selfposts') else: s = Service.objects.filter(api='selfposts').order_by('id')[0] e = Entry(service=s, guid=guid) e.date_published = un e.date_updated = un if as_me: if user and user.first_name and user.last_name: e.author_name = user.first_name + ' ' + user.last_name else: e.author_name = '' e.author_email = '' e.author_uri = '' if entry.service.api == 'greader': e.link = entry.link else: e.link = settings.BASE_URL + '/' if entry.service.api == 'twitter': entry.content = entry.content.split(': ', 1)[1] else: e.author_name = entry.author_name e.author_email = entry.author_email e.author_uri = entry.author_uri e.link = entry.link e.geolat = entry.geolat e.geolng = entry.geolng e.mblob = entry.mblob e.title = entry.title if entry.service.api == 'greader': e.content = '<a href="%s" rel="nofollow">%s</a>' % (e.link, e.title) elif entry.service.api in ('youtube', 'vimeo'): e.content = '<p>%s</p>%s' % (df_title(e.title), entry.content) else: e.content = urlizetrunc(entry.content, 45) try: media.transform_to_local(e) media.extract_and_register(e) e.save() return e except: pass
def process_videos(self): """Process videos uploaded by user.""" for ent in self.json: date = ent['upload_date'][:10] guid = 'tag:vimeo,%s:clip%s' % (date, ent['id']) if self.verbose: print("ID: %s" % guid) try: e = Entry.objects.get(service=self.service, guid=guid) if not self.force_overwrite and e.date_updated \ and mtime(ent['upload_date']) <= e.date_updated: continue if e.protected: continue except Entry.DoesNotExist: e = Entry(service=self.service, guid=guid) e.title = ent['title'] e.link = ent['url'] e.date_published = ent['upload_date'] e.date_updated = ent['upload_date'] e.author_name = ent['user_name'] if self.service.public: ent['thumbnail_medium'] = media.save_image( ent['thumbnail_medium']) e.content = """<table class="vc"><tr><td><div id="vimeo-%s" class="play-video"><a href="%s" rel="nofollow"><img src="%s" width="200" height="150" alt="%s" /></a><div class="playbutton"></div></div></td></tr></table>""" % ( ent['id'], e.link, ent['thumbnail_medium'], ent['title']) mblob = media.mrss_init() mblob['content'].append([{ 'url': 'http://vimeo.com/moogaloop.swf?clip_id=%s' % ent['id'], 'type': 'application/x-shockwave-flash', 'medium': 'video' }]) e.mblob = media.mrss_gen_json(mblob) try: e.save() media.extract_and_register(e) except: pass
def process_videos(self): """Process videos uploaded by user.""" for ent in self.json: date = ent['upload_date'][:10] guid = 'tag:vimeo,%s:clip%s' % (date, ent['id']) if self.verbose: print("ID: %s" % guid) try: e = Entry.objects.get(service=self.service, guid=guid) if not self.force_overwrite and e.date_updated \ and mtime(ent['upload_date']) <= e.date_updated: continue if e.protected: continue except Entry.DoesNotExist: e = Entry(service=self.service, guid=guid) e.title = ent['title'] e.link = ent['url'] e.date_published = ent['upload_date'] e.date_updated = ent['upload_date'] e.author_name = ent['user_name'] if self.service.public: ent['thumbnail_medium'] = media.save_image( ent['thumbnail_medium']) e.content = """<table class="vc"><tr><td><div id="vimeo-%s" class="play-video"><a href="%s" rel="nofollow"><img src="%s" width="200" height="150" alt="%s" /></a><div class="playbutton"></div></div></td></tr></table>""" % ( ent['id'], e.link, ent['thumbnail_medium'], ent['title']) mblob = media.mrss_init() mblob[ 'content'].append([{'url': 'http://vimeo.com/moogaloop.swf?clip_id=%s' % ent['id'], 'type': 'application/x-shockwave-flash', 'medium': 'video'}]) e.mblob = media.mrss_gen_json(mblob) try: e.save() media.extract_and_register(e) except: pass
def process(self): for ent in self.json: guid = 'tag:twitter.com,2007:http://twitter.com/%s/statuses/%s' % \ (ent['user']['screen_name'], ent['id']) if self.verbose: print("ID: %s" % guid) t = datetime.datetime.strptime(ent['created_at'], '%a %b %d %H:%M:%S +0000 %Y') try: e = Entry.objects.get(service=self.service, guid=guid) if not self.force_overwrite and \ e.date_updated and mtime(t.timetuple()) <= e.date_updated: continue if e.protected: continue except Entry.DoesNotExist: e = Entry(service=self.service, guid=guid) e.guid = guid e.title = 'Tweet: %s' % truncate.smart( strip_entities(strip_tags(ent['text'])), max_length=40) e.title = e.title.replace('#', '').replace('@', '') e.link = 'https://twitter.com/%s/status/%s' % \ (ent['user']['screen_name'], ent['id']) image_url = ent['user']['profile_image_url_https'] e.link_image = media.save_image(image_url, direct_image=False) e.date_published = t e.date_updated = t e.author_name = ent['user']['name'] # double expand e.content = 'Tweet: %s' % expand.all(expand.shorturls(ent['text'])) if 'entities' in ent and 'media' in ent['entities']: content = ' <p class="thumbnails">' for t in ent['entities']['media']: if t['type'] == 'photo': tsize = 'thumb' if 'media_url_https' in t: image_url = '%s:%s' % (t['media_url_https'], tsize) large_url = '%s:large' % t['media_url_https'] else: image_url = '%s:%s' % (t['media_url'], tsize) large_url = t['media_url'] link = t['expanded_url'] if self.service.public: image_url = media.save_image(image_url) if 'sizes' in t and tsize in t['sizes']: sizes = t['sizes'][tsize] iwh = ' width="%d" height="%d"' % (sizes['w'], sizes['h']) else: iwh = '' content += '<a href="%s" rel="nofollow" data-imgurl="%s"><img src="%s"%s alt="thumbnail" /></a> ' % ( link, large_url, image_url, iwh) content += '</p>' e.content += content try: e.save() media.extract_and_register(e) except: pass
def process(self): for ent in self.json['entries']: id = ent['id'][2:] uuid = '%s-%s-%s-%s-%s' % (id[0:8], id[8:12], id[12:16], id[16:20], id[20:]) guid = 'tag:friendfeed.com,2007:%s' % uuid if self.verbose: print("ID: %s" % guid) t = datetime.datetime.strptime(ent['date'], '%Y-%m-%dT%H:%M:%SZ') try: e = Entry.objects.get(service=self.service, guid=guid) if not self.force_overwrite and \ e.date_updated and mtime(t.timetuple()) <= e.date_updated: continue if e.protected: continue except Entry.DoesNotExist: e = Entry(service=self.service, guid=guid) e.guid = guid e.title = truncate.smart(strip_entities(strip_tags(ent['body'])), max_length=40) e.link = ent['url'] image_url = 'http://friendfeed-api.com/v2/picture/%s' % ent[ 'from']['id'] e.link_image = media.save_image(image_url, direct_image=False) e.date_published = t e.date_updated = t e.author_name = ent['from']['name'] content = ent['body'] if 'thumbnails' in ent: content += '<p class="thumbnails">' for t in ent['thumbnails']: if self.service.public: t['url'] = media.save_image(t['url']) if 'width' in t and 'height' in t: iwh = ' width="%d" height="%d"' % (t['width'], t['height']) else: iwh = '' if 'friendfeed.com/e/' in t['link'] and \ ('youtube.com' in t['url'] or 'ytimg.com' in t['url']): m = re.search(r'/vi/([\-\w]+)/', t['url']) yid = m.groups()[0] if m else None if yid: t['link'] = 'http://www.youtube.com/watch?v=%s' % yid content += '<a href="%s" rel="nofollow"><img src="%s"%s alt="thumbnail" /></a> ' % ( t['link'], t['url'], iwh) content += '</p>' if 'files' in ent: content += '<ul class="files">\n' for f in ent['files']: if 'friendfeed-media' in f['url']: content += ' <li><a href="%s" rel="nofollow">%s</a>' % ( f['url'], f['name']) if 'size' in f: content += ' <span class="size">%s</span>' % bytes_to_human( f['size']) content += '</li>\n' content += '</ul>\n' e.content = content try: e.save() media.extract_and_register(e) except: pass
def api(request, **args): cmd = args.get('cmd', '') entry = request.POST.get('entry', None) authed = request.user.is_authenticated() and request.user.is_staff friend = request.user.is_authenticated() and not request.user.is_staff if not authed and cmd != 'getcontent': return HttpResponseForbidden() if cmd == 'hide' and entry: Entry.objects.filter(id=int(entry)).update(active=False) elif cmd == 'unhide' and entry: Entry.objects.filter(id=int(entry)).update(active=True) elif cmd == 'gsc': # get selfposts classes _srvs = Service.objects.filter (api='selfposts')\ .order_by('cls').values('id', 'cls') srvs = {} for item in _srvs: if item['cls'] not in srvs: srvs[item['cls']] = item srvs = list(srvs.values()) d = [] for s in srvs: d.append({'id': s['id'], 'cls': s['cls']}) return HttpResponse(json.dumps(d), content_type='application/json') elif cmd == 'share': images = [] for i in range(0, 5): img = request.POST.get('image' + str(i), None) if img: images.append(img) source = request.POST.get('from', '') entry = selfposts.API(False).share({ 'content': request.POST.get('content', ''), 'sid': request.POST.get('sid', None), 'draft': request.POST.get('draft', False), 'friends_only': request.POST.get('friends_only', False), 'link': request.POST.get('link', None), 'images': images, 'files': request.FILES, 'source': source, 'user': request.user }) if entry: if not entry.draft: pshb.publish() if source == 'bookmarklet': d = { 'close_msg': _("You've successfully shared this web page at your stream." ) } return HttpResponse(json.dumps(d), content_type='application/json') else: entry.friends_only = False if request.is_ajax(): return render_to_response('stream-pure.html', { 'entries': (entry, ), 'authed': authed }) else: return HttpResponseRedirect(settings.BASE_URL + '/') elif cmd == 'reshare' and entry: try: entry = Entry.objects.get(id=int(entry)) if entry: entry = selfposts.API(False).reshare( entry, { 'as_me': request.POST.get('as_me', False), 'user': request.user }) if entry: pshb.publish() return render_to_response('stream-pure.html', { 'entries': (entry, ), 'authed': authed }) except Exception: pass elif cmd == 'favorite': try: entry = Entry.objects.get(id=int(entry)) if entry: try: fav = Favorite.objects.get(user=request.user, entry=entry) except Favorite.DoesNotExist: fav = Favorite(user=request.user, entry=entry) fav.save() media.transform_to_local(entry) media.extract_and_register(entry) entry.save() except Exception: pass elif cmd == 'unfavorite': try: if entry: entry = Entry.objects.get(id=int(entry)) if entry: Favorite.objects.get(user=request.user, entry=entry).delete() except Exception: pass elif cmd == 'getcontent': try: if entry: if not authed: entry = Entry.objects.get(id=int(entry), service__public=True) else: entry = Entry.objects.get(id=int(entry)) if entry: if request.POST.get('raw', False) and authed: return HttpResponse(entry.content) if authed or friend: entry.friends_only = False content = fix_ampersands(gls_content('', entry)) return HttpResponse(content) except Exception: pass elif cmd == 'putcontent': try: if entry and authed: content = request.POST.get('content', '') if content: Entry.objects.filter(id=int(entry)).update(content=content) entry = Entry.objects.get(id=int(entry)) if entry: content = fix_ampersands(gls_content('', entry)) return HttpResponse(content) except Exception: pass return HttpResponse()
def process(self): for ent in self.stream['data']: guid = 'tag:facebook.com,2004:post/%s' % ent['id'] if self.verbose: print("ID: %s" % guid) if 'updated_time' in ent: t = from_rfc3339(ent['updated_time']) else: t = from_rfc3339(ent['created_time']) try: e = Entry.objects.get(service=self.service, guid=guid) if not self.force_overwrite and \ e.date_updated and mtime(t.timetuple()) <= e.date_updated: continue if e.protected: continue except Entry.DoesNotExist: e = Entry(service=self.service, guid=guid) e.guid = guid e.link = ent['actions'][0]['link'] if 'from' in ent: frm = ent['from'] image_url = 'http://graph.facebook.com/%s/picture' % frm['id'] e.link_image = media.save_image(image_url, direct_image=False) e.author_name = frm['name'] e.date_published = from_rfc3339(ent['created_time']) e.date_updated = t content = '' if 'message' in ent: content = expand.shorts(ent['message']) content = '<p>' + urlizetrunc(content, 45) + '</p>' name = '' if 'name' in ent: name = ent['name'] content += ' <p>' + ent['name'] + '</p>' if 'picture' in ent and 'link' in ent: content += '<p class="thumbnails">' content += '<a href="%s" rel="nofollow">' \ '<img src="%s" alt="thumbnail" /></a> ' \ % (ent['link'], media.save_image(ent['picture'], downscale=True)) if 'description' in ent: content += '<div class="fb-description">%s</div>' % \ ent['description'] elif 'caption' in ent and name != ent['caption']: content += '<div class="fb-caption">%s</div>' % \ ent['caption'] content += '</p>' else: if 'description' in ent: content += '<div class="fb-description">%s</div>' % \ ent['description'] elif 'caption' in ent and name != ent['caption']: content += '<div class="fb-caption">%s</div>' % \ ent['caption'] e.content = content if 'message' in ent: e.title = truncate.smart(strip_tags(ent['message']), max_length=48) if e.title == '': e.title = strip_entities(strip_tags(content))[0:128] try: e.save() media.extract_and_register(e) except: pass
def process(self): for ent in self.fp.entries: guid = ent.id if 'id' in ent else ent.link if self.verbose: print('ID: %s' % guid) try: e = Entry.objects.get(service=self.service, guid=guid) if not self.force_overwrite and 'updated_parsed' in ent: if e.date_updated and \ mtime(ent.updated_parsed) <= e.date_updated: continue if e.protected: continue except Entry.DoesNotExist: e = Entry(service=self.service, guid=guid) e.title = ent.title e.link = ent.get('feedburner_origlink', ent.get('link', '')) if 'author_detail' in ent: e.author_name = ent.author_detail.get('name', '') e.author_email = ent.author_detail.get('email', '') e.author_uri = ent.author_detail.get('href', '') else: e.author_name = ent.get('author', ent.get('creator', '')) if not e.author_name and 'author_detail' in self.fp.feed: e.author_name = self.fp.feed.author_detail.get('name', '') e.author_email = self.fp.feed.author_detail.get( 'email', '') e.author_uri = self.fp.feed.author_detail.get('href', '') try: e.content = ent.content[0].value except: e.content = ent.get('summary', ent.get('description', '')) if 'published_parsed' in ent: e.date_published = mtime(ent.published_parsed) elif 'updated_parsed' in ent: e.date_published = mtime(ent.updated_parsed) if 'updated_parsed' in ent: e.date_updated = mtime(ent.updated_parsed) if 'geo_lat' in ent and 'geo_long' in ent: e.geolat = ent.geo_lat e.geolng = ent.geo_long elif 'georss_point' in ent: geo = ent['georss_point'].split(' ') e.geolat = geo[0] e.geolng = geo[1] if 'image' in self.fp.feed: e.link_image = media.save_image(self.fp.feed.image.url) else: for link in ent.links: if link.rel == 'image' or link.rel == 'photo': e.link_image = media.save_image(link.href) if hasattr(self, 'custom_process'): self.custom_process(e, ent) if hasattr(e, 'custom_mblob'): e.mblob = e.custom_mblob else: e.mblob = None mblob = media.mrss_init(e.mblob) if 'media_content' in ent: mblob['content'].append(ent.media_content) e.mblob = media.mrss_gen_json(mblob) e.content = strip_script(e.content) try: e.save() media.extract_and_register(e) except: pass
def process(self): for ent in self.json['entries']: id = ent['id'][2:] uuid = '%s-%s-%s-%s-%s' % (id[0:8], id[8:12], id[12:16], id[16:20], id[20:]) guid = 'tag:friendfeed.com,2007:%s' % uuid if self.verbose: print("ID: %s" % guid) t = datetime.datetime.strptime(ent['date'], '%Y-%m-%dT%H:%M:%SZ') try: e = Entry.objects.get(service=self.service, guid=guid) if not self.force_overwrite and \ e.date_updated and mtime(t.timetuple()) <= e.date_updated: continue if e.protected: continue except Entry.DoesNotExist: e = Entry(service=self.service, guid=guid) e.guid = guid e.title = truncate.smart( strip_entities(strip_tags(ent['body'])), max_length=40) e.link = ent['url'] image_url = 'http://friendfeed-api.com/v2/picture/%s' % ent[ 'from']['id'] e.link_image = media.save_image(image_url, direct_image=False) e.date_published = t e.date_updated = t e.author_name = ent['from']['name'] content = ent['body'] if 'thumbnails' in ent: content += '<p class="thumbnails">' for t in ent['thumbnails']: if self.service.public: t['url'] = media.save_image(t['url']) if 'width' in t and 'height' in t: iwh = ' width="%d" height="%d"' % (t['width'], t['height']) else: iwh = '' if 'friendfeed.com/e/' in t['link'] and \ ('youtube.com' in t['url'] or 'ytimg.com' in t['url']): m = re.search(r'/vi/([\-\w]+)/', t['url']) yid = m.groups()[0] if m else None if yid: t['link'] = 'http://www.youtube.com/watch?v=%s' % yid content += '<a href="%s" rel="nofollow"><img src="%s"%s alt="thumbnail" /></a> ' % ( t['link'], t['url'], iwh) content += '</p>' if 'files' in ent: content += '<ul class="files">\n' for f in ent['files']: if 'friendfeed-media' in f['url']: content += ' <li><a href="%s" rel="nofollow">%s</a>' % ( f['url'], f['name']) if 'size' in f: content += ' <span class="size">%s</span>' % bytes_to_human( f['size']) content += '</li>\n' content += '</ul>\n' e.content = content try: e.save() media.extract_and_register(e) except: pass
def share(self, args={}): content = args.get('content', '') sid = args.get('sid', None) title = args.get('title', None) link = args.get('link', None) images = args.get('images', None) files = args.get('files', MultiValueDict()) source = args.get('source', '') user = args.get('user', None) un = utcnow() guid = '%s/entry/%s' % (settings.FEED_TAGURI, un.strftime('%Y-%m-%dT%H:%M:%SZ')) if sid: s = Service.objects.get(id=sid, api='selfposts') else: s = Service.objects.filter(api='selfposts').order_by('id')[0] e = Entry(service=s, guid=guid) e.link = link if link else settings.BASE_URL + '/' e.date_published = un e.date_updated = un e.draft = int(args.get('draft', False)) e.friends_only = int(args.get('friends_only', False)) if user and user.first_name and user.last_name: e.author_name = user.first_name + ' ' + user.last_name content = smart_text(content) editor_syntax = getattr(settings, 'EDITOR_SYNTAX', 'markdown') if source == 'bookmarklet': editor_syntax = 'html' if editor_syntax == 'markdown' and markdown: e.content = expand.all(markdown.markdown(content)) else: e.content = expand.all(content.replace('\n', '<br/>')) e.content = urlizetrunc(e.content, 45) e.content = strip_script(e.content) e.content = expand.imgloc(e.content) e.content = smart_text(e.content) if images: thumbs = '\n<p class="thumbnails">\n' for img in images: img = media.save_image(img, force=True, downscale=True) thumbs += """ <a href="%s" rel="nofollow"><img src="%s" alt="thumbnail" /></a>\n""" % ( e.link, img) thumbs += '</p>\n' e.content += thumbs if title: e.title = smart_text(title) else: e.title = truncate.smart(strip_tags(e.content)).strip() if e.title == '': e.title = truncate.smart(strip_tags(content)).strip() mblob = media.mrss_scan(e.content) e.mblob = media.mrss_gen_json(mblob) try: e.save() pictures = [] docs = [] for f in files.getlist('docs'): md = Media(entry=e) md.file.save(f.name, f) md.save() if f.content_type.startswith('image/'): pictures.append((md, f)) else: docs.append((md, f)) if len(pictures): thumbs = '\n<p class="thumbnails">\n' for o in pictures: thumb, orig = media.downsave_uploaded_image(o[0].file) thumbs += ' <a href="%s"><img src="%s" alt="thumbnail" /></a>\n' % ( orig, thumb) mrss = {'url': orig, 'medium': 'image', 'fileSize': o[1].size} if orig.lower().endswith('.jpg'): mrss['type'] = 'image/jpeg' mblob['content'].append([mrss]) thumbs += '</p>\n' e.content += thumbs if len(docs): doc = '\n<ul class="files">\n' for o in docs: target = '[GLS-UPLOAD]/%s' % o[ 0].file.name.replace('upload/', '') doc += ' <li><a href="%s">%s</a> ' % (target, o[1].name) doc += '<span class="size">%s</span></li>\n' % \ bytes_to_human(o[1].size) mrss = {'url': target, 'fileSize': o[1].size} target = target.lower() if target.endswith('.mp3'): mrss['medium'] = 'audio' mrss['type'] = 'audio/mpeg' elif target.endswith('.ogg'): mrss['medium'] = 'audio' mrss['type'] = 'audio/ogg' elif target.endswith('.avi'): mrss['medium'] = 'video' mrss['type'] = 'video/avi' elif target.endswith('.pdf'): mrss['medium'] = 'document' mrss['type'] = 'application/pdf' else: mrss['medium'] = 'document' mblob['content'].append([mrss]) doc += '</ul>\n' e.content += doc e.mblob = media.mrss_gen_json(mblob) if len(pictures) or len(docs): e.save() media.extract_and_register(e) return e except: pass
def share(self, args={}): content = args.get('content', '') sid = args.get('sid', None) title = args.get('title', None) link = args.get('link', None) images = args.get('images', None) files = args.get('files', MultiValueDict()) source = args.get('source', '') user = args.get('user', None) un = utcnow() guid = '%s/entry/%s' % (settings.FEED_TAGURI, un.strftime('%Y-%m-%dT%H:%M:%SZ')) if sid: s = Service.objects.get(id=sid, api='selfposts') else: s = Service.objects.filter(api='selfposts').order_by('id')[0] e = Entry(service=s, guid=guid) e.link = link if link else settings.BASE_URL + '/' e.date_published = un e.date_updated = un e.draft = int(args.get('draft', False)) e.friends_only = int(args.get('friends_only', False)) if user and user.first_name and user.last_name: e.author_name = user.first_name + ' ' + user.last_name content = smart_text(content) editor_syntax = getattr(settings, 'EDITOR_SYNTAX', 'markdown') if source == 'bookmarklet': editor_syntax = 'html' if editor_syntax == 'markdown' and markdown: e.content = expand.all(markdown.markdown(content)) else: e.content = expand.all(content.replace('\n', '<br/>')) e.content = urlizetrunc(e.content, 45) e.content = strip_script(e.content) e.content = expand.imgloc(e.content) e.content = smart_text(e.content) if images: thumbs = '\n<p class="thumbnails">\n' for img in images: img = media.save_image(img, force=True, downscale=True) thumbs += """ <a href="%s" rel="nofollow"><img src="%s" alt="thumbnail" /></a>\n""" % ( e.link, img) thumbs += '</p>\n' e.content += thumbs if title: e.title = smart_text(title) else: e.title = truncate.smart(strip_tags(e.content)).strip() if e.title == '': e.title = truncate.smart(strip_tags(content)).strip() mblob = media.mrss_scan(e.content) e.mblob = media.mrss_gen_json(mblob) try: e.save() pictures = [] docs = [] for f in files.getlist('docs'): md = Media(entry=e) md.file.save(f.name, f) md.save() if f.content_type.startswith('image/'): pictures.append((md, f)) else: docs.append((md, f)) if len(pictures): thumbs = '\n<p class="thumbnails">\n' for o in pictures: thumb, orig = media.downsave_uploaded_image(o[0].file) thumbs += ' <a href="%s"><img src="%s" alt="thumbnail" /></a>\n' % ( orig, thumb) mrss = { 'url': orig, 'medium': 'image', 'fileSize': o[1].size } if orig.lower().endswith('.jpg'): mrss['type'] = 'image/jpeg' mblob['content'].append([mrss]) thumbs += '</p>\n' e.content += thumbs if len(docs): doc = '\n<ul class="files">\n' for o in docs: target = '[GLS-UPLOAD]/%s' % o[0].file.name.replace( 'upload/', '') doc += ' <li><a href="%s">%s</a> ' % (target, o[1].name) doc += '<span class="size">%s</span></li>\n' % \ bytes_to_human(o[1].size) mrss = {'url': target, 'fileSize': o[1].size} target = target.lower() if target.endswith('.mp3'): mrss['medium'] = 'audio' mrss['type'] = 'audio/mpeg' elif target.endswith('.ogg'): mrss['medium'] = 'audio' mrss['type'] = 'audio/ogg' elif target.endswith('.avi'): mrss['medium'] = 'video' mrss['type'] = 'video/avi' elif target.endswith('.pdf'): mrss['medium'] = 'document' mrss['type'] = 'application/pdf' else: mrss['medium'] = 'document' mblob['content'].append([mrss]) doc += '</ul>\n' e.content += doc e.mblob = media.mrss_gen_json(mblob) if len(pictures) or len(docs): e.save() media.extract_and_register(e) return e except: pass
def api(request, **args): cmd = args.get('cmd', '') entry = request.POST.get('entry', None) authed = request.user.is_authenticated() and request.user.is_staff friend = request.user.is_authenticated() and not request.user.is_staff if not authed and cmd != 'getcontent': return HttpResponseForbidden() if cmd == 'hide' and entry: Entry.objects.filter(id=int(entry)).update(active=False) elif cmd == 'unhide' and entry: Entry.objects.filter(id=int(entry)).update(active=True) elif cmd == 'gsc': # get selfposts classes _srvs = Service.objects.filter (api='selfposts')\ .order_by('cls').values('id', 'cls') srvs = {} for item in _srvs: if item['cls'] not in srvs: srvs[item['cls']] = item srvs = list(srvs.values()) d = [] for s in srvs: d.append({'id': s['id'], 'cls': s['cls']}) return HttpResponse(json.dumps(d), content_type='application/json') elif cmd == 'share': images = [] for i in range(0, 5): img = request.POST.get('image' + str(i), None) if img: images.append(img) source = request.POST.get('from', '') entry = selfposts.API(False).share( {'content': request.POST.get('content', ''), 'sid': request.POST.get('sid', None), 'draft': request.POST.get('draft', False), 'friends_only': request.POST.get('friends_only', False), 'link': request.POST.get('link', None), 'images': images, 'files': request.FILES, 'source': source, 'user': request.user}) if entry: if not entry.draft: pshb.publish() if source == 'bookmarklet': d = {'close_msg': _( "You've successfully shared this web page at your stream.")} return HttpResponse(json.dumps(d), content_type='application/json') else: entry.friends_only = False if request.is_ajax(): return render_to_response('stream-pure.html', {'entries': (entry,), 'authed': authed}) else: return HttpResponseRedirect(settings.BASE_URL + '/') elif cmd == 'reshare' and entry: try: entry = Entry.objects.get(id=int(entry)) if entry: entry = selfposts.API(False).reshare( entry, {'as_me': request.POST.get('as_me', False), 'user': request.user}) if entry: pshb.publish() return render_to_response('stream-pure.html', {'entries': (entry,), 'authed': authed}) except Exception: pass elif cmd == 'favorite': try: entry = Entry.objects.get(id=int(entry)) if entry: try: fav = Favorite.objects.get(user=request.user, entry=entry) except Favorite.DoesNotExist: fav = Favorite(user=request.user, entry=entry) fav.save() media.transform_to_local(entry) media.extract_and_register(entry) entry.save() except Exception: pass elif cmd == 'unfavorite': try: if entry: entry = Entry.objects.get(id=int(entry)) if entry: Favorite.objects.get(user=request.user, entry=entry).delete() except Exception: pass elif cmd == 'getcontent': try: if entry: if not authed: entry = Entry.objects.get(id=int(entry), service__public=True) else: entry = Entry.objects.get(id=int(entry)) if entry: if request.POST.get('raw', False) and authed: return HttpResponse(entry.content) if authed or friend: entry.friends_only = False content = fix_ampersands(gls_content('', entry)) return HttpResponse(content) except Exception: pass elif cmd == 'putcontent': try: if entry and authed: content = request.POST.get('content', '') if content: Entry.objects.filter(id=int(entry)).update( content=content) entry = Entry.objects.get(id=int(entry)) if entry: content = fix_ampersands(gls_content('', entry)) return HttpResponse(content) except Exception: pass return HttpResponse()