コード例 #1
0
ファイル: youtube.py プロジェクト: wojciechpolak/glifestream
    def process(self, url):
        for ent in self.json.get('items', ()):
            snippet = ent.get('snippet', {})
            date = snippet['publishedAt'][:10]

            vid = ent['contentDetails']['videoId']
            if self.playlist_types[url] == 'favorite':
                guid = 'tag:youtube.com,2008:favorite:%s' % ent.get('id')
            else:
                guid = 'tag:youtube.com,2008:video:%s' % vid

            t = datetime.datetime.strptime(snippet['publishedAt'],
                                           '%Y-%m-%dT%H:%M:%S.000Z')

            if self.verbose:
                print("ID: %s" % guid)
            try:
                e = Entry.objects.get(service=self.service, guid=guid)
                if not self.force_overwrite and e.date_updated \
                   and mtime(t.timetuple()) <= e.date_updated:
                    continue
                if e.protected:
                    continue
            except Entry.DoesNotExist:
                e = Entry(service=self.service, guid=guid)

            e.title = snippet['title']
            e.link = 'https://www.youtube.com/watch?v=%s' % vid
            e.date_published = t
            e.date_updated = t
            e.author_name = snippet['channelTitle']

            if vid and 'thumbnails' in snippet:
                tn = None
                if 'high' in snippet['thumbnails']:
                    tn = snippet['thumbnails']['high']
                    tn['width'], tn['height'] = 200, 150
                elif 'medium' in snippet['thumbnails']:
                    tn = snippet['thumbnails']['medium']
                    tn['width'], tn['height'] = 200, 150
                if not tn:
                    tn = snippet['thumbnails']['default']

                if self.service.public:
                    tn['url'] = media.save_image(tn['url'], downscale=True,
                                                 size=(200, 150))

                e.content = """<table class="vc"><tr><td><div id="youtube-%s" class="play-video"><a href="%s" rel="nofollow"><img src="%s" width="%s" height="%s" alt="YouTube Video" /></a><div class="playbutton"></div></div></td></tr></table>""" % (
                    vid, e.link, tn['url'], tn['width'], tn['height'])
            else:
                e.content = '<a href="%s">%s</a>' % (e.link, e.title)

            try:
                e.save()
            except Exception as exc:
                print(exc)
コード例 #2
0
ファイル: vimeo.py プロジェクト: kleopatra999/glifestream
    def process_videos(self):
        """Process videos uploaded by user."""
        for ent in self.json:
            date = ent['upload_date'][:10]
            guid = 'tag:vimeo,%s:clip%s' % (date, ent['id'])
            if self.verbose:
                print("ID: %s" % guid)
            try:
                e = Entry.objects.get(service=self.service, guid=guid)
                if not self.force_overwrite and e.date_updated \
                   and mtime(ent['upload_date']) <= e.date_updated:
                    continue
                if e.protected:
                    continue
            except Entry.DoesNotExist:
                e = Entry(service=self.service, guid=guid)

            e.title = ent['title']
            e.link = ent['url']
            e.date_published = ent['upload_date']
            e.date_updated = ent['upload_date']
            e.author_name = ent['user_name']

            if self.service.public:
                ent['thumbnail_medium'] = media.save_image(
                    ent['thumbnail_medium'])

            e.content = """<table class="vc"><tr><td><div id="vimeo-%s" class="play-video"><a href="%s" rel="nofollow"><img src="%s" width="200" height="150" alt="%s" /></a><div class="playbutton"></div></div></td></tr></table>""" % (
                ent['id'], e.link, ent['thumbnail_medium'], ent['title'])

            mblob = media.mrss_init()
            mblob[
                'content'].append([{'url': 'http://vimeo.com/moogaloop.swf?clip_id=%s' % ent['id'],
                                    'type': 'application/x-shockwave-flash',
                                    'medium': 'video'}])
            e.mblob = media.mrss_gen_json(mblob)

            try:
                e.save()
                media.extract_and_register(e)
            except:
                pass
コード例 #3
0
ファイル: twitter.py プロジェクト: kleopatra999/glifestream
    def process(self):
        for ent in self.json:
            guid = 'tag:twitter.com,2007:http://twitter.com/%s/statuses/%s' % \
                (ent['user']['screen_name'], ent['id'])
            if self.verbose:
                print("ID: %s" % guid)

            t = datetime.datetime.strptime(ent['created_at'],
                                           '%a %b %d %H:%M:%S +0000 %Y')
            try:
                e = Entry.objects.get(service=self.service, guid=guid)
                if not self.force_overwrite and \
                   e.date_updated and mtime(t.timetuple()) <= e.date_updated:
                    continue
                if e.protected:
                    continue
            except Entry.DoesNotExist:
                e = Entry(service=self.service, guid=guid)

            e.guid = guid
            e.title = 'Tweet: %s' % truncate.smart(
                strip_entities(strip_tags(ent['text'])), max_length=40)
            e.title = e.title.replace('#', '').replace('@', '')

            e.link  = 'https://twitter.com/%s/status/%s' % \
                (ent['user']['screen_name'], ent['id'])
            image_url = ent['user']['profile_image_url_https']
            e.link_image = media.save_image(image_url, direct_image=False)

            e.date_published = t
            e.date_updated = t
            e.author_name = ent['user']['name']

            # double expand
            e.content = 'Tweet: %s' % expand.all(expand.shorturls(ent['text']))

            if 'entities' in ent and 'media' in ent['entities']:
                content = ' <p class="thumbnails">'
                for t in ent['entities']['media']:
                    if t['type'] == 'photo':
                        tsize = 'thumb'
                        if 'media_url_https' in t:
                            image_url = '%s:%s' % (t['media_url_https'], tsize)
                            large_url = '%s:large' % t['media_url_https']
                        else:
                            image_url = '%s:%s' % (t['media_url'], tsize)
                            large_url = t['media_url']
                        link = t['expanded_url']
                        if self.service.public:
                            image_url = media.save_image(image_url)
                        if 'sizes' in t and tsize in t['sizes']:
                            sizes = t['sizes'][tsize]
                            iwh = ' width="%d" height="%d"' % (sizes['w'],
                                                               sizes['h'])
                        else:
                            iwh = ''
                        content += '<a href="%s" rel="nofollow" data-imgurl="%s"><img src="%s"%s alt="thumbnail" /></a> ' % (
                            link, large_url, image_url, iwh)
                content += '</p>'
                e.content += content

            try:
                e.save()
                media.extract_and_register(e)
            except:
                pass
コード例 #4
0
    def process(self):
        for ent in self.json['entries']:
            id = ent['id'][2:]
            uuid = '%s-%s-%s-%s-%s' % (id[0:8], id[8:12], id[12:16], id[16:20],
                                       id[20:])
            guid = 'tag:friendfeed.com,2007:%s' % uuid
            if self.verbose:
                print("ID: %s" % guid)

            t = datetime.datetime.strptime(ent['date'], '%Y-%m-%dT%H:%M:%SZ')
            try:
                e = Entry.objects.get(service=self.service, guid=guid)
                if not self.force_overwrite and \
                   e.date_updated and mtime(t.timetuple()) <= e.date_updated:
                    continue
                if e.protected:
                    continue
            except Entry.DoesNotExist:
                e = Entry(service=self.service, guid=guid)

            e.guid = guid
            e.title = truncate.smart(strip_entities(strip_tags(ent['body'])),
                                     max_length=40)
            e.link = ent['url']
            image_url = 'http://friendfeed-api.com/v2/picture/%s' % ent[
                'from']['id']
            e.link_image = media.save_image(image_url, direct_image=False)

            e.date_published = t
            e.date_updated = t
            e.author_name = ent['from']['name']

            content = ent['body']
            if 'thumbnails' in ent:
                content += '<p class="thumbnails">'
                for t in ent['thumbnails']:
                    if self.service.public:
                        t['url'] = media.save_image(t['url'])
                    if 'width' in t and 'height' in t:
                        iwh = ' width="%d" height="%d"' % (t['width'],
                                                           t['height'])
                    else:
                        iwh = ''

                    if 'friendfeed.com/e/' in t['link'] and \
                       ('youtube.com' in t['url'] or 'ytimg.com' in t['url']):
                        m = re.search(r'/vi/([\-\w]+)/', t['url'])
                        yid = m.groups()[0] if m else None
                        if yid:
                            t['link'] = 'http://www.youtube.com/watch?v=%s' % yid

                    content += '<a href="%s" rel="nofollow"><img src="%s"%s alt="thumbnail" /></a> ' % (
                        t['link'], t['url'], iwh)
                content += '</p>'

            if 'files' in ent:
                content += '<ul class="files">\n'
                for f in ent['files']:
                    if 'friendfeed-media' in f['url']:
                        content += '  <li><a href="%s" rel="nofollow">%s</a>' % (
                            f['url'], f['name'])
                        if 'size' in f:
                            content += ' <span class="size">%s</span>' % bytes_to_human(
                                f['size'])
                        content += '</li>\n'
                content += '</ul>\n'

            e.content = content

            try:
                e.save()
                media.extract_and_register(e)
            except:
                pass
コード例 #5
0
    def process(self):
        for ent in self.stream['data']:
            guid = 'tag:facebook.com,2004:post/%s' % ent['id']
            if self.verbose:
                print("ID: %s" % guid)

            if 'updated_time' in ent:
                t = from_rfc3339(ent['updated_time'])
            else:
                t = from_rfc3339(ent['created_time'])

            try:
                e = Entry.objects.get(service=self.service, guid=guid)
                if not self.force_overwrite and \
                   e.date_updated and mtime(t.timetuple()) <= e.date_updated:
                    continue
                if e.protected:
                    continue
            except Entry.DoesNotExist:
                e = Entry(service=self.service, guid=guid)

            e.guid = guid
            e.link = ent['actions'][0]['link']

            if 'from' in ent:
                frm = ent['from']
                image_url = 'http://graph.facebook.com/%s/picture' % frm['id']
                e.link_image = media.save_image(image_url, direct_image=False)
                e.author_name = frm['name']

            e.date_published = from_rfc3339(ent['created_time'])
            e.date_updated = t

            content = ''
            if 'message' in ent:
                content = expand.shorts(ent['message'])
                content = '<p>' + urlizetrunc(content, 45) + '</p>'

            name = ''
            if 'name' in ent:
                name = ent['name']
                content += ' <p>' + ent['name'] + '</p>'

            if 'picture' in ent and 'link' in ent:
                content += '<p class="thumbnails">'
                content += '<a href="%s" rel="nofollow">' \
                    '<img src="%s" alt="thumbnail" /></a> ' \
                    % (ent['link'], media.save_image(ent['picture'],
                                                     downscale=True))

                if 'description' in ent:
                    content += '<div class="fb-description">%s</div>' % \
                        ent['description']
                elif 'caption' in ent and name != ent['caption']:
                    content += '<div class="fb-caption">%s</div>' % \
                        ent['caption']

                content += '</p>'
            else:
                if 'description' in ent:
                    content += '<div class="fb-description">%s</div>' % \
                        ent['description']
                elif 'caption' in ent and name != ent['caption']:
                    content += '<div class="fb-caption">%s</div>' % \
                        ent['caption']

            e.content = content
            if 'message' in ent:
                e.title = truncate.smart(strip_tags(ent['message']),
                                         max_length=48)
            if e.title == '':
                e.title = strip_entities(strip_tags(content))[0:128]

            try:
                e.save()
                media.extract_and_register(e)
            except:
                pass
コード例 #6
0
ファイル: webfeed.py プロジェクト: wojciechpolak/glifestream
    def process(self):
        for ent in self.fp.entries:
            guid = ent.id if 'id' in ent else ent.link
            if self.verbose:
                print('ID: %s' % guid)
            try:
                e = Entry.objects.get(service=self.service, guid=guid)
                if not self.force_overwrite and 'updated_parsed' in ent:
                    if e.date_updated and \
                       mtime(ent.updated_parsed) <= e.date_updated:
                        continue
                if e.protected:
                    continue
            except Entry.DoesNotExist:
                e = Entry(service=self.service, guid=guid)

            e.title = ent.title
            e.link = ent.get('feedburner_origlink', ent.get('link', ''))

            if 'author_detail' in ent:
                e.author_name = ent.author_detail.get('name', '')
                e.author_email = ent.author_detail.get('email', '')
                e.author_uri = ent.author_detail.get('href', '')
            else:
                e.author_name = ent.get('author', ent.get('creator', ''))
                if not e.author_name and 'author_detail' in self.fp.feed:
                    e.author_name = self.fp.feed.author_detail.get('name', '')
                    e.author_email = self.fp.feed.author_detail.get(
                        'email', '')
                    e.author_uri = self.fp.feed.author_detail.get('href', '')

            try:
                e.content = ent.content[0].value
            except:
                e.content = ent.get('summary', ent.get('description', ''))

            if 'published_parsed' in ent:
                e.date_published = mtime(ent.published_parsed)
            elif 'updated_parsed' in ent:
                e.date_published = mtime(ent.updated_parsed)

            if 'updated_parsed' in ent:
                e.date_updated = mtime(ent.updated_parsed)

            if 'geo_lat' in ent and 'geo_long' in ent:
                e.geolat = ent.geo_lat
                e.geolng = ent.geo_long
            elif 'georss_point' in ent:
                geo = ent['georss_point'].split(' ')
                e.geolat = geo[0]
                e.geolng = geo[1]

            if 'image' in self.fp.feed:
                e.link_image = media.save_image(self.fp.feed.image.url)
            else:
                for link in ent.links:
                    if link.rel == 'image' or link.rel == 'photo':
                        e.link_image = media.save_image(link.href)

            if hasattr(self, 'custom_process'):
                self.custom_process(e, ent)

            if hasattr(e, 'custom_mblob'):
                e.mblob = e.custom_mblob
            else:
                e.mblob = None

            mblob = media.mrss_init(e.mblob)
            if 'media_content' in ent:
                mblob['content'].append(ent.media_content)
            e.mblob = media.mrss_gen_json(mblob)

            e.content = strip_script(e.content)

            try:
                e.save()
                media.extract_and_register(e)
            except:
                pass
コード例 #7
0
    def process(self):
        for ent in self.json['entries']:
            id = ent['id'][2:]
            uuid = '%s-%s-%s-%s-%s' % (id[0:8], id[8:12], id[12:16],
                                       id[16:20], id[20:])
            guid = 'tag:friendfeed.com,2007:%s' % uuid
            if self.verbose:
                print("ID: %s" % guid)

            t = datetime.datetime.strptime(ent['date'], '%Y-%m-%dT%H:%M:%SZ')
            try:
                e = Entry.objects.get(service=self.service, guid=guid)
                if not self.force_overwrite and \
                   e.date_updated and mtime(t.timetuple()) <= e.date_updated:
                    continue
                if e.protected:
                    continue
            except Entry.DoesNotExist:
                e = Entry(service=self.service, guid=guid)

            e.guid = guid
            e.title = truncate.smart(
                strip_entities(strip_tags(ent['body'])),
                max_length=40)
            e.link = ent['url']
            image_url = 'http://friendfeed-api.com/v2/picture/%s' % ent[
                'from']['id']
            e.link_image = media.save_image(image_url, direct_image=False)

            e.date_published = t
            e.date_updated = t
            e.author_name = ent['from']['name']

            content = ent['body']
            if 'thumbnails' in ent:
                content += '<p class="thumbnails">'
                for t in ent['thumbnails']:
                    if self.service.public:
                        t['url'] = media.save_image(t['url'])
                    if 'width' in t and 'height' in t:
                        iwh = ' width="%d" height="%d"' % (t['width'],
                                                           t['height'])
                    else:
                        iwh = ''

                    if 'friendfeed.com/e/' in t['link'] and \
                       ('youtube.com' in t['url'] or 'ytimg.com' in t['url']):
                        m = re.search(r'/vi/([\-\w]+)/', t['url'])
                        yid = m.groups()[0] if m else None
                        if yid:
                            t['link'] = 'http://www.youtube.com/watch?v=%s' % yid

                    content += '<a href="%s" rel="nofollow"><img src="%s"%s alt="thumbnail" /></a> ' % (
                        t['link'], t['url'], iwh)
                content += '</p>'

            if 'files' in ent:
                content += '<ul class="files">\n'
                for f in ent['files']:
                    if 'friendfeed-media' in f['url']:
                        content += '  <li><a href="%s" rel="nofollow">%s</a>' % (
                            f['url'], f['name'])
                        if 'size' in f:
                            content += ' <span class="size">%s</span>' % bytes_to_human(
                                f['size'])
                        content += '</li>\n'
                content += '</ul>\n'

            e.content = content

            try:
                e.save()
                media.extract_and_register(e)
            except:
                pass
コード例 #8
0
    def process(self):
        for key, group in groupby(self.fp.entries, lambda x: x.updated[0:19]):
            mblob = media.mrss_init()
            lgroup = 0
            content = '<p class="thumbnails">\n'
            first = True
            for ent in group:
                lgroup += 1
                if first:
                    firstent = ent
                    first = False
                if self.verbose:
                    print("ID: %s" % ent.id)

                if 'media_thumbnail' in ent:
                    tn = ent.media_thumbnail[0]
                    if self.service.public:
                        tn['url'] = media.save_image(tn['url'])
                    content += """  <a href="%s" rel="nofollow"><img src="%s" width="%s" height="%s" alt="thumbnail" /></a>\n""" % (
                        ent.link, tn['url'], tn['width'], tn['height'])

                if 'media_content' in ent:
                    mblob['content'].append(ent.media_content)

            ent = firstent
            content += '</p>'
            guid = 'tag:flickr.com,2004:/photo/%s' % ent.id

            try:
                e = Entry.objects.get(service=self.service, guid=ent.id)
                if not self.force_overwrite and 'updated_parsed' in ent:
                    if e.date_updated and \
                       mtime(ent.updated_parsed) <= e.date_updated:
                        continue
                if e.protected:
                    continue
            except Entry.DoesNotExist:
                e = Entry(service=self.service, guid=ent.id)

            e.mblob = media.mrss_gen_json(mblob)
            if lgroup > 1:
                e.idata = 'grouped'

            e.link = self.service.link
            e.title = 'Posted Photos'
            e.content = content

            if 'published_parsed' in ent:
                e.date_published = mtime(ent.published_parsed)
            elif 'updated_parsed' in ent:
                e.date_published = mtime(ent.updated_parsed)
            if 'updated_parsed' in ent:
                e.date_updated = mtime(ent.updated_parsed)

            if 'image' in self.fp.feed:
                e.link_image = media.save_image(self.fp.feed.image.href)
            else:
                for link in ent.links:
                    if link.rel == 'image':
                        e.link_image = media.save_image(link.href)
            try:
                e.save()
            except:
                pass
コード例 #9
0
    def process_userdid(self):
        """Process what user did."""
        for ent in self.json:
            if 'type' in ent and ent['type'] == 'like':
                date = ent['date'][:10]
                guid = 'tag:vimeo,%s:clip%s' % (date, ent['video_id'])
                if self.verbose:
                    print("ID: %s" % guid)
                try:
                    e = Entry.objects.get(service=self.service, guid=guid)
                    if not self.force_overwrite and e.date_updated \
                       and mtime(ent['date']) <= e.date_updated:
                        continue
                    if e.protected:
                        continue
                except Entry.DoesNotExist:
                    e = Entry(service=self.service, guid=guid)

                e.title = ent['video_title']
                e.link = ent['video_url']
                e.date_published = ent['date']
                e.date_updated = ent['date']
                e.author_name = ent['user_name']

                e.idata = 'liked'

                if self.service.public:
                    ent['video_thumbnail_medium'] = media.save_image(
                        ent['video_thumbnail_medium'])

                e.content = """<table class="vc"><tr><td><div id="vimeo-%s" class="play-video"><a href="%s" rel="nofollow"><img src="%s" width="200" height="150" alt="%s" /></a><div class="playbutton"></div></div></td></tr></table>""" % (
                    ent['video_id'], e.link, ent['video_thumbnail_medium'],
                    ent['video_title'])

                mblob = media.mrss_init()
                mblob['content'].append([{
                    'url':
                    'http://vimeo.com/moogaloop.swf?clip_id=%s' %
                    ent['video_id'],
                    'type':
                    'application/x-shockwave-flash',
                    'medium':
                    'video'
                }])
                e.mblob = media.mrss_gen_json(mblob)

                try:
                    e.save()
                except:
                    pass
コード例 #10
0
ファイル: webfeed.py プロジェクト: kleopatra999/glifestream
    def process(self):
        for ent in self.fp.entries:
            guid = ent.id if 'id' in ent else ent.link
            if self.verbose:
                print('ID: %s' % guid)
            try:
                e = Entry.objects.get(service=self.service, guid=guid)
                if not self.force_overwrite and 'updated_parsed' in ent:
                    if e.date_updated and \
                       mtime(ent.updated_parsed) <= e.date_updated:
                        continue
                if e.protected:
                    continue
            except Entry.DoesNotExist:
                e = Entry(service=self.service, guid=guid)

            e.title = ent.title
            e.link = ent.get('feedburner_origlink', ent.get('link', ''))

            if 'author_detail' in ent:
                e.author_name = ent.author_detail.get('name', '')
                e.author_email = ent.author_detail.get('email', '')
                e.author_uri = ent.author_detail.get('href', '')
            else:
                e.author_name = ent.get('author', ent.get('creator', ''))
                if not e.author_name and 'author_detail' in self.fp.feed:
                    e.author_name = self.fp.feed.author_detail.get('name', '')
                    e.author_email = self.fp.feed.author_detail.get(
                        'email', '')
                    e.author_uri = self.fp.feed.author_detail.get('href', '')

            try:
                e.content = ent.content[0].value
            except:
                e.content = ent.get('summary', ent.get('description', ''))

            if 'published_parsed' in ent:
                e.date_published = mtime(ent.published_parsed)
            elif 'updated_parsed' in ent:
                e.date_published = mtime(ent.updated_parsed)

            if 'updated_parsed' in ent:
                e.date_updated = mtime(ent.updated_parsed)

            if 'geo_lat' in ent and 'geo_long' in ent:
                e.geolat = ent.geo_lat
                e.geolng = ent.geo_long
            elif 'georss_point' in ent:
                geo = ent['georss_point'].split(' ')
                e.geolat = geo[0]
                e.geolng = geo[1]

            if 'image' in self.fp.feed:
                e.link_image = media.save_image(self.fp.feed.image.url)
            else:
                for link in ent.links:
                    if link.rel == 'image' or link.rel == 'photo':
                        e.link_image = media.save_image(link.href)

            if hasattr(self, 'custom_process'):
                self.custom_process(e, ent)

            if hasattr(e, 'custom_mblob'):
                e.mblob = e.custom_mblob
            else:
                e.mblob = None

            mblob = media.mrss_init(e.mblob)
            if 'media_content' in ent:
                mblob['content'].append(ent.media_content)
            e.mblob = media.mrss_gen_json(mblob)

            e.content = strip_script(e.content)

            try:
                e.save()
                media.extract_and_register(e)
            except:
                pass
コード例 #11
0
ファイル: selfposts.py プロジェクト: kleopatra999/glifestream
    def share(self, args={}):
        content = args.get('content', '')
        sid = args.get('sid', None)
        title = args.get('title', None)
        link = args.get('link', None)
        images = args.get('images', None)
        files = args.get('files', MultiValueDict())
        source = args.get('source', '')
        user = args.get('user', None)

        un = utcnow()
        guid = '%s/entry/%s' % (settings.FEED_TAGURI,
                                un.strftime('%Y-%m-%dT%H:%M:%SZ'))
        if sid:
            s = Service.objects.get(id=sid, api='selfposts')
        else:
            s = Service.objects.filter(api='selfposts').order_by('id')[0]
        e = Entry(service=s, guid=guid)

        e.link = link if link else settings.BASE_URL + '/'
        e.date_published = un
        e.date_updated = un
        e.draft = int(args.get('draft', False))
        e.friends_only = int(args.get('friends_only', False))

        if user and user.first_name and user.last_name:
            e.author_name = user.first_name + ' ' + user.last_name

        content = smart_text(content)

        editor_syntax = getattr(settings, 'EDITOR_SYNTAX', 'markdown')
        if source == 'bookmarklet':
            editor_syntax = 'html'

        if editor_syntax == 'markdown' and markdown:
            e.content = expand.all(markdown.markdown(content))
        else:
            e.content = expand.all(content.replace('\n', '<br/>'))
            e.content = urlizetrunc(e.content, 45)

        e.content = strip_script(e.content)
        e.content = expand.imgloc(e.content)
        e.content = smart_text(e.content)

        if images:
            thumbs = '\n<p class="thumbnails">\n'
            for img in images:
                img = media.save_image(img, force=True, downscale=True)
                thumbs += """  <a href="%s" rel="nofollow"><img src="%s" alt="thumbnail" /></a>\n""" % (
                    e.link, img)
            thumbs += '</p>\n'
            e.content += thumbs

        if title:
            e.title = smart_text(title)
        else:
            e.title = truncate.smart(strip_tags(e.content)).strip()
        if e.title == '':
            e.title = truncate.smart(strip_tags(content)).strip()

        mblob = media.mrss_scan(e.content)
        e.mblob = media.mrss_gen_json(mblob)

        try:
            e.save()

            pictures = []
            docs = []

            for f in files.getlist('docs'):
                md = Media(entry=e)
                md.file.save(f.name, f)
                md.save()
                if f.content_type.startswith('image/'):
                    pictures.append((md, f))
                else:
                    docs.append((md, f))

            if len(pictures):
                thumbs = '\n<p class="thumbnails">\n'
                for o in pictures:
                    thumb, orig = media.downsave_uploaded_image(o[0].file)
                    thumbs += '  <a href="%s"><img src="%s" alt="thumbnail" /></a>\n' % (
                        orig, thumb)
                    mrss = {'url': orig, 'medium': 'image',
                            'fileSize': o[1].size}
                    if orig.lower().endswith('.jpg'):
                        mrss['type'] = 'image/jpeg'
                    mblob['content'].append([mrss])
                thumbs += '</p>\n'
                e.content += thumbs

            if len(docs):
                doc = '\n<ul class="files">\n'
                for o in docs:
                    target = '[GLS-UPLOAD]/%s' % o[
                        0].file.name.replace('upload/', '')
                    doc += '  <li><a href="%s">%s</a> ' % (target, o[1].name)
                    doc += '<span class="size">%s</span></li>\n' % \
                        bytes_to_human(o[1].size)

                    mrss = {'url': target, 'fileSize': o[1].size}
                    target = target.lower()
                    if target.endswith('.mp3'):
                        mrss['medium'] = 'audio'
                        mrss['type'] = 'audio/mpeg'
                    elif target.endswith('.ogg'):
                        mrss['medium'] = 'audio'
                        mrss['type'] = 'audio/ogg'
                    elif target.endswith('.avi'):
                        mrss['medium'] = 'video'
                        mrss['type'] = 'video/avi'
                    elif target.endswith('.pdf'):
                        mrss['medium'] = 'document'
                        mrss['type'] = 'application/pdf'
                    else:
                        mrss['medium'] = 'document'
                    mblob['content'].append([mrss])

                doc += '</ul>\n'
                e.content += doc

            e.mblob = media.mrss_gen_json(mblob)
            if len(pictures) or len(docs):
                e.save()

            media.extract_and_register(e)
            return e
        except:
            pass
コード例 #12
0
ファイル: selfposts.py プロジェクト: kleopatra999/glifestream
    def reshare(self, entry, args={}):
        sid = args.get('sid', None)
        as_me = int(args.get('as_me', False))
        user = args.get('user', None)

        un = utcnow()
        guid = '%s/entry/%s' % (settings.FEED_TAGURI,
                                un.strftime('%Y-%m-%dT%H:%M:%SZ'))
        if sid:
            s = Service.objects.get(id=sid, api='selfposts')
        else:
            s = Service.objects.filter(api='selfposts').order_by('id')[0]
        e = Entry(service=s, guid=guid)

        e.date_published = un
        e.date_updated = un

        if as_me:
            if user and user.first_name and user.last_name:
                e.author_name = user.first_name + ' ' + user.last_name
            else:
                e.author_name = ''
            e.author_email = ''
            e.author_uri = ''
            if entry.service.api == 'greader':
                e.link = entry.link
            else:
                e.link = settings.BASE_URL + '/'
            if entry.service.api == 'twitter':
                entry.content = entry.content.split(': ', 1)[1]
        else:
            e.author_name = entry.author_name
            e.author_email = entry.author_email
            e.author_uri = entry.author_uri
            e.link = entry.link

        e.geolat = entry.geolat
        e.geolng = entry.geolng
        e.mblob = entry.mblob

        e.title = entry.title
        if entry.service.api == 'greader':
            e.content = '<a href="%s" rel="nofollow">%s</a>' % (
                e.link, e.title)
        elif entry.service.api in ('youtube', 'vimeo'):
            e.content = '<p>%s</p>%s' % (df_title(e.title), entry.content)
        else:
            e.content = urlizetrunc(entry.content, 45)

        try:
            media.transform_to_local(e)
            media.extract_and_register(e)
            e.save()
            return e
        except:
            pass
コード例 #13
0
ファイル: selfposts.py プロジェクト: kleopatra999/glifestream
    def share(self, args={}):
        content = args.get('content', '')
        sid = args.get('sid', None)
        title = args.get('title', None)
        link = args.get('link', None)
        images = args.get('images', None)
        files = args.get('files', MultiValueDict())
        source = args.get('source', '')
        user = args.get('user', None)

        un = utcnow()
        guid = '%s/entry/%s' % (settings.FEED_TAGURI,
                                un.strftime('%Y-%m-%dT%H:%M:%SZ'))
        if sid:
            s = Service.objects.get(id=sid, api='selfposts')
        else:
            s = Service.objects.filter(api='selfposts').order_by('id')[0]
        e = Entry(service=s, guid=guid)

        e.link = link if link else settings.BASE_URL + '/'
        e.date_published = un
        e.date_updated = un
        e.draft = int(args.get('draft', False))
        e.friends_only = int(args.get('friends_only', False))

        if user and user.first_name and user.last_name:
            e.author_name = user.first_name + ' ' + user.last_name

        content = smart_text(content)

        editor_syntax = getattr(settings, 'EDITOR_SYNTAX', 'markdown')
        if source == 'bookmarklet':
            editor_syntax = 'html'

        if editor_syntax == 'markdown' and markdown:
            e.content = expand.all(markdown.markdown(content))
        else:
            e.content = expand.all(content.replace('\n', '<br/>'))
            e.content = urlizetrunc(e.content, 45)

        e.content = strip_script(e.content)
        e.content = expand.imgloc(e.content)
        e.content = smart_text(e.content)

        if images:
            thumbs = '\n<p class="thumbnails">\n'
            for img in images:
                img = media.save_image(img, force=True, downscale=True)
                thumbs += """  <a href="%s" rel="nofollow"><img src="%s" alt="thumbnail" /></a>\n""" % (
                    e.link, img)
            thumbs += '</p>\n'
            e.content += thumbs

        if title:
            e.title = smart_text(title)
        else:
            e.title = truncate.smart(strip_tags(e.content)).strip()
        if e.title == '':
            e.title = truncate.smart(strip_tags(content)).strip()

        mblob = media.mrss_scan(e.content)
        e.mblob = media.mrss_gen_json(mblob)

        try:
            e.save()

            pictures = []
            docs = []

            for f in files.getlist('docs'):
                md = Media(entry=e)
                md.file.save(f.name, f)
                md.save()
                if f.content_type.startswith('image/'):
                    pictures.append((md, f))
                else:
                    docs.append((md, f))

            if len(pictures):
                thumbs = '\n<p class="thumbnails">\n'
                for o in pictures:
                    thumb, orig = media.downsave_uploaded_image(o[0].file)
                    thumbs += '  <a href="%s"><img src="%s" alt="thumbnail" /></a>\n' % (
                        orig, thumb)
                    mrss = {
                        'url': orig,
                        'medium': 'image',
                        'fileSize': o[1].size
                    }
                    if orig.lower().endswith('.jpg'):
                        mrss['type'] = 'image/jpeg'
                    mblob['content'].append([mrss])
                thumbs += '</p>\n'
                e.content += thumbs

            if len(docs):
                doc = '\n<ul class="files">\n'
                for o in docs:
                    target = '[GLS-UPLOAD]/%s' % o[0].file.name.replace(
                        'upload/', '')
                    doc += '  <li><a href="%s">%s</a> ' % (target, o[1].name)
                    doc += '<span class="size">%s</span></li>\n' % \
                        bytes_to_human(o[1].size)

                    mrss = {'url': target, 'fileSize': o[1].size}
                    target = target.lower()
                    if target.endswith('.mp3'):
                        mrss['medium'] = 'audio'
                        mrss['type'] = 'audio/mpeg'
                    elif target.endswith('.ogg'):
                        mrss['medium'] = 'audio'
                        mrss['type'] = 'audio/ogg'
                    elif target.endswith('.avi'):
                        mrss['medium'] = 'video'
                        mrss['type'] = 'video/avi'
                    elif target.endswith('.pdf'):
                        mrss['medium'] = 'document'
                        mrss['type'] = 'application/pdf'
                    else:
                        mrss['medium'] = 'document'
                    mblob['content'].append([mrss])

                doc += '</ul>\n'
                e.content += doc

            e.mblob = media.mrss_gen_json(mblob)
            if len(pictures) or len(docs):
                e.save()

            media.extract_and_register(e)
            return e
        except:
            pass
コード例 #14
0
ファイル: selfposts.py プロジェクト: kleopatra999/glifestream
    def reshare(self, entry, args={}):
        sid = args.get('sid', None)
        as_me = int(args.get('as_me', False))
        user = args.get('user', None)

        un = utcnow()
        guid = '%s/entry/%s' % (settings.FEED_TAGURI,
                                un.strftime('%Y-%m-%dT%H:%M:%SZ'))
        if sid:
            s = Service.objects.get(id=sid, api='selfposts')
        else:
            s = Service.objects.filter(api='selfposts').order_by('id')[0]
        e = Entry(service=s, guid=guid)

        e.date_published = un
        e.date_updated = un

        if as_me:
            if user and user.first_name and user.last_name:
                e.author_name = user.first_name + ' ' + user.last_name
            else:
                e.author_name = ''
            e.author_email = ''
            e.author_uri = ''
            if entry.service.api == 'greader':
                e.link = entry.link
            else:
                e.link = settings.BASE_URL + '/'
            if entry.service.api == 'twitter':
                entry.content = entry.content.split(': ', 1)[1]
        else:
            e.author_name = entry.author_name
            e.author_email = entry.author_email
            e.author_uri = entry.author_uri
            e.link = entry.link

        e.geolat = entry.geolat
        e.geolng = entry.geolng
        e.mblob = entry.mblob

        e.title = entry.title
        if entry.service.api == 'greader':
            e.content = '<a href="%s" rel="nofollow">%s</a>' % (e.link,
                                                                e.title)
        elif entry.service.api in ('youtube', 'vimeo'):
            e.content = '<p>%s</p>%s' % (df_title(e.title), entry.content)
        else:
            e.content = urlizetrunc(entry.content, 45)

        try:
            media.transform_to_local(e)
            media.extract_and_register(e)
            e.save()
            return e
        except:
            pass
コード例 #15
0
ファイル: twitter.py プロジェクト: kleopatra999/glifestream
    def process(self):
        for ent in self.json:
            guid = 'tag:twitter.com,2007:http://twitter.com/%s/statuses/%s' % \
                (ent['user']['screen_name'], ent['id'])
            if self.verbose:
                print("ID: %s" % guid)

            t = datetime.datetime.strptime(ent['created_at'],
                                           '%a %b %d %H:%M:%S +0000 %Y')
            try:
                e = Entry.objects.get(service=self.service, guid=guid)
                if not self.force_overwrite and \
                   e.date_updated and mtime(t.timetuple()) <= e.date_updated:
                    continue
                if e.protected:
                    continue
            except Entry.DoesNotExist:
                e = Entry(service=self.service, guid=guid)

            e.guid = guid
            e.title = 'Tweet: %s' % truncate.smart(
                strip_entities(strip_tags(ent['text'])), max_length=40)
            e.title = e.title.replace('#', '').replace('@', '')

            e.link  = 'https://twitter.com/%s/status/%s' % \
                (ent['user']['screen_name'], ent['id'])
            image_url = ent['user']['profile_image_url_https']
            e.link_image = media.save_image(image_url, direct_image=False)

            e.date_published = t
            e.date_updated = t
            e.author_name = ent['user']['name']

            # double expand
            e.content = 'Tweet: %s' % expand.all(expand.shorturls(ent['text']))

            if 'entities' in ent and 'media' in ent['entities']:
                content = ' <p class="thumbnails">'
                for t in ent['entities']['media']:
                    if t['type'] == 'photo':
                        tsize = 'thumb'
                        if 'media_url_https' in t:
                            image_url = '%s:%s' % (t['media_url_https'], tsize)
                            large_url = '%s:large' % t['media_url_https']
                        else:
                            image_url = '%s:%s' % (t['media_url'], tsize)
                            large_url = t['media_url']
                        link = t['expanded_url']
                        if self.service.public:
                            image_url = media.save_image(image_url)
                        if 'sizes' in t and tsize in t['sizes']:
                            sizes = t['sizes'][tsize]
                            iwh = ' width="%d" height="%d"' % (sizes['w'],
                                                               sizes['h'])
                        else:
                            iwh = ''
                        content += '<a href="%s" rel="nofollow" data-imgurl="%s"><img src="%s"%s alt="thumbnail" /></a> ' % (
                            link, large_url, image_url, iwh)
                content += '</p>'
                e.content += content

            try:
                e.save()
                media.extract_and_register(e)
            except:
                pass
コード例 #16
0
ファイル: fb.py プロジェクト: kleopatra999/glifestream
    def process(self):
        for ent in self.stream['data']:
            guid = 'tag:facebook.com,2004:post/%s' % ent['id']
            if self.verbose:
                print("ID: %s" % guid)

            if 'updated_time' in ent:
                t = from_rfc3339(ent['updated_time'])
            else:
                t = from_rfc3339(ent['created_time'])

            try:
                e = Entry.objects.get(service=self.service, guid=guid)
                if not self.force_overwrite and \
                   e.date_updated and mtime(t.timetuple()) <= e.date_updated:
                    continue
                if e.protected:
                    continue
            except Entry.DoesNotExist:
                e = Entry(service=self.service, guid=guid)

            e.guid = guid
            e.link = ent['actions'][0]['link']

            if 'from' in ent:
                frm = ent['from']
                image_url = 'http://graph.facebook.com/%s/picture' % frm['id']
                e.link_image = media.save_image(image_url, direct_image=False)
                e.author_name = frm['name']

            e.date_published = from_rfc3339(ent['created_time'])
            e.date_updated = t

            content = ''
            if 'message' in ent:
                content = expand.shorts(ent['message'])
                content = '<p>' + urlizetrunc(content, 45) + '</p>'

            name = ''
            if 'name' in ent:
                name = ent['name']
                content += ' <p>' + ent['name'] + '</p>'

            if 'picture' in ent and 'link' in ent:
                content += '<p class="thumbnails">'
                content += '<a href="%s" rel="nofollow">' \
                    '<img src="%s" alt="thumbnail" /></a> ' \
                    % (ent['link'], media.save_image(ent['picture'],
                                                     downscale=True))

                if 'description' in ent:
                    content += '<div class="fb-description">%s</div>' % \
                        ent['description']
                elif 'caption' in ent and name != ent['caption']:
                    content += '<div class="fb-caption">%s</div>' % \
                        ent['caption']

                content += '</p>'
            else:
                if 'description' in ent:
                    content += '<div class="fb-description">%s</div>' % \
                        ent['description']
                elif 'caption' in ent and name != ent['caption']:
                    content += '<div class="fb-caption">%s</div>' % \
                        ent['caption']

            e.content = content
            if 'message' in ent:
                e.title = truncate.smart(strip_tags(ent['message']),
                                         max_length=48)
            if e.title == '':
                e.title = strip_entities(strip_tags(content))[0:128]

            try:
                e.save()
                media.extract_and_register(e)
            except:
                pass