Esempio n. 1
0
    def process(self):
        for ent in self.fp.entries:
            guid = ent.id if 'id' in ent else ent.link
            if self.verbose:
                print('ID: %s' % guid)
            try:
                e = Entry.objects.get(service=self.service, guid=guid)
                if not self.force_overwrite and 'updated_parsed' in ent:
                    if e.date_updated and \
                       mtime(ent.updated_parsed) <= e.date_updated:
                        continue
                if e.protected:
                    continue
            except Entry.DoesNotExist:
                e = Entry(service=self.service, guid=guid)

            e.title = ent.title
            e.link = ent.get('feedburner_origlink', ent.get('link', ''))

            if 'author_detail' in ent:
                e.author_name = ent.author_detail.get('name', '')
                e.author_email = ent.author_detail.get('email', '')
                e.author_uri = ent.author_detail.get('href', '')
            else:
                e.author_name = ent.get('author', ent.get('creator', ''))
                if not e.author_name and 'author_detail' in self.fp.feed:
                    e.author_name = self.fp.feed.author_detail.get('name', '')
                    e.author_email = self.fp.feed.author_detail.get(
                        'email', '')
                    e.author_uri = self.fp.feed.author_detail.get('href', '')

            try:
                e.content = ent.content[0].value
            except:
                e.content = ent.get('summary', ent.get('description', ''))

            if 'published_parsed' in ent:
                e.date_published = mtime(ent.published_parsed)
            elif 'updated_parsed' in ent:
                e.date_published = mtime(ent.updated_parsed)

            if 'updated_parsed' in ent:
                e.date_updated = mtime(ent.updated_parsed)

            if 'geo_lat' in ent and 'geo_long' in ent:
                e.geolat = ent.geo_lat
                e.geolng = ent.geo_long
            elif 'georss_point' in ent:
                geo = ent['georss_point'].split(' ')
                e.geolat = geo[0]
                e.geolng = geo[1]

            if 'image' in self.fp.feed:
                e.link_image = media.save_image(self.fp.feed.image.url)
            else:
                for link in ent.links:
                    if link.rel == 'image' or link.rel == 'photo':
                        e.link_image = media.save_image(link.href)

            if hasattr(self, 'custom_process'):
                self.custom_process(e, ent)

            if hasattr(e, 'custom_mblob'):
                e.mblob = e.custom_mblob
            else:
                e.mblob = None

            mblob = media.mrss_init(e.mblob)
            if 'media_content' in ent:
                mblob['content'].append(ent.media_content)
            e.mblob = media.mrss_gen_json(mblob)

            e.content = strip_script(e.content)

            try:
                e.save()
                media.extract_and_register(e)
            except:
                pass
Esempio n. 2
0
    def share(self, args={}):
        content = args.get('content', '')
        sid = args.get('sid', None)
        title = args.get('title', None)
        link = args.get('link', None)
        images = args.get('images', None)
        files = args.get('files', MultiValueDict())
        source = args.get('source', '')
        user = args.get('user', None)

        un = utcnow()
        guid = '%s/entry/%s' % (settings.FEED_TAGURI,
                                un.strftime('%Y-%m-%dT%H:%M:%SZ'))
        if sid:
            s = Service.objects.get(id=sid, api='selfposts')
        else:
            s = Service.objects.filter(api='selfposts').order_by('id')[0]
        e = Entry(service=s, guid=guid)

        e.link = link if link else settings.BASE_URL + '/'
        e.date_published = un
        e.date_updated = un
        e.draft = int(args.get('draft', False))
        e.friends_only = int(args.get('friends_only', False))

        if user and user.first_name and user.last_name:
            e.author_name = user.first_name + ' ' + user.last_name

        content = smart_text(content)

        editor_syntax = getattr(settings, 'EDITOR_SYNTAX', 'markdown')
        if source == 'bookmarklet':
            editor_syntax = 'html'

        if editor_syntax == 'markdown' and markdown:
            e.content = expand.all(markdown.markdown(content))
        else:
            e.content = expand.all(content.replace('\n', '<br/>'))
            e.content = urlizetrunc(e.content, 45)

        e.content = strip_script(e.content)
        e.content = expand.imgloc(e.content)
        e.content = smart_text(e.content)

        if images:
            thumbs = '\n<p class="thumbnails">\n'
            for img in images:
                img = media.save_image(img, force=True, downscale=True)
                thumbs += """  <a href="%s" rel="nofollow"><img src="%s" alt="thumbnail" /></a>\n""" % (
                    e.link, img)
            thumbs += '</p>\n'
            e.content += thumbs

        if title:
            e.title = smart_text(title)
        else:
            e.title = truncate.smart(strip_tags(e.content)).strip()
        if e.title == '':
            e.title = truncate.smart(strip_tags(content)).strip()

        mblob = media.mrss_scan(e.content)
        e.mblob = media.mrss_gen_json(mblob)

        try:
            e.save()

            pictures = []
            docs = []

            for f in files.getlist('docs'):
                md = Media(entry=e)
                md.file.save(f.name, f)
                md.save()
                if f.content_type.startswith('image/'):
                    pictures.append((md, f))
                else:
                    docs.append((md, f))

            if len(pictures):
                thumbs = '\n<p class="thumbnails">\n'
                for o in pictures:
                    thumb, orig = media.downsave_uploaded_image(o[0].file)
                    thumbs += '  <a href="%s"><img src="%s" alt="thumbnail" /></a>\n' % (
                        orig, thumb)
                    mrss = {'url': orig, 'medium': 'image',
                            'fileSize': o[1].size}
                    if orig.lower().endswith('.jpg'):
                        mrss['type'] = 'image/jpeg'
                    mblob['content'].append([mrss])
                thumbs += '</p>\n'
                e.content += thumbs

            if len(docs):
                doc = '\n<ul class="files">\n'
                for o in docs:
                    target = '[GLS-UPLOAD]/%s' % o[
                        0].file.name.replace('upload/', '')
                    doc += '  <li><a href="%s">%s</a> ' % (target, o[1].name)
                    doc += '<span class="size">%s</span></li>\n' % \
                        bytes_to_human(o[1].size)

                    mrss = {'url': target, 'fileSize': o[1].size}
                    target = target.lower()
                    if target.endswith('.mp3'):
                        mrss['medium'] = 'audio'
                        mrss['type'] = 'audio/mpeg'
                    elif target.endswith('.ogg'):
                        mrss['medium'] = 'audio'
                        mrss['type'] = 'audio/ogg'
                    elif target.endswith('.avi'):
                        mrss['medium'] = 'video'
                        mrss['type'] = 'video/avi'
                    elif target.endswith('.pdf'):
                        mrss['medium'] = 'document'
                        mrss['type'] = 'application/pdf'
                    else:
                        mrss['medium'] = 'document'
                    mblob['content'].append([mrss])

                doc += '</ul>\n'
                e.content += doc

            e.mblob = media.mrss_gen_json(mblob)
            if len(pictures) or len(docs):
                e.save()

            media.extract_and_register(e)
            return e
        except:
            pass
Esempio n. 3
0
    def process(self):
        for ent in self.fp.entries:
            guid = ent.id if 'id' in ent else ent.link
            if self.verbose:
                print('ID: %s' % guid)
            try:
                e = Entry.objects.get(service=self.service, guid=guid)
                if not self.force_overwrite and 'updated_parsed' in ent:
                    if e.date_updated and \
                       mtime(ent.updated_parsed) <= e.date_updated:
                        continue
                if e.protected:
                    continue
            except Entry.DoesNotExist:
                e = Entry(service=self.service, guid=guid)

            e.title = ent.title
            e.link = ent.get('feedburner_origlink', ent.get('link', ''))

            if 'author_detail' in ent:
                e.author_name = ent.author_detail.get('name', '')
                e.author_email = ent.author_detail.get('email', '')
                e.author_uri = ent.author_detail.get('href', '')
            else:
                e.author_name = ent.get('author', ent.get('creator', ''))
                if not e.author_name and 'author_detail' in self.fp.feed:
                    e.author_name = self.fp.feed.author_detail.get('name', '')
                    e.author_email = self.fp.feed.author_detail.get(
                        'email', '')
                    e.author_uri = self.fp.feed.author_detail.get('href', '')

            try:
                e.content = ent.content[0].value
            except:
                e.content = ent.get('summary', ent.get('description', ''))

            if 'published_parsed' in ent:
                e.date_published = mtime(ent.published_parsed)
            elif 'updated_parsed' in ent:
                e.date_published = mtime(ent.updated_parsed)

            if 'updated_parsed' in ent:
                e.date_updated = mtime(ent.updated_parsed)

            if 'geo_lat' in ent and 'geo_long' in ent:
                e.geolat = ent.geo_lat
                e.geolng = ent.geo_long
            elif 'georss_point' in ent:
                geo = ent['georss_point'].split(' ')
                e.geolat = geo[0]
                e.geolng = geo[1]

            if 'image' in self.fp.feed:
                e.link_image = media.save_image(self.fp.feed.image.url)
            else:
                for link in ent.links:
                    if link.rel == 'image' or link.rel == 'photo':
                        e.link_image = media.save_image(link.href)

            if hasattr(self, 'custom_process'):
                self.custom_process(e, ent)

            if hasattr(e, 'custom_mblob'):
                e.mblob = e.custom_mblob
            else:
                e.mblob = None

            mblob = media.mrss_init(e.mblob)
            if 'media_content' in ent:
                mblob['content'].append(ent.media_content)
            e.mblob = media.mrss_gen_json(mblob)

            e.content = strip_script(e.content)

            try:
                e.save()
                media.extract_and_register(e)
            except:
                pass
Esempio n. 4
0
    def share(self, args={}):
        content = args.get('content', '')
        sid = args.get('sid', None)
        title = args.get('title', None)
        link = args.get('link', None)
        images = args.get('images', None)
        files = args.get('files', MultiValueDict())
        source = args.get('source', '')
        user = args.get('user', None)

        un = utcnow()
        guid = '%s/entry/%s' % (settings.FEED_TAGURI,
                                un.strftime('%Y-%m-%dT%H:%M:%SZ'))
        if sid:
            s = Service.objects.get(id=sid, api='selfposts')
        else:
            s = Service.objects.filter(api='selfposts').order_by('id')[0]
        e = Entry(service=s, guid=guid)

        e.link = link if link else settings.BASE_URL + '/'
        e.date_published = un
        e.date_updated = un
        e.draft = int(args.get('draft', False))
        e.friends_only = int(args.get('friends_only', False))

        if user and user.first_name and user.last_name:
            e.author_name = user.first_name + ' ' + user.last_name

        content = smart_text(content)

        editor_syntax = getattr(settings, 'EDITOR_SYNTAX', 'markdown')
        if source == 'bookmarklet':
            editor_syntax = 'html'

        if editor_syntax == 'markdown' and markdown:
            e.content = expand.all(markdown.markdown(content))
        else:
            e.content = expand.all(content.replace('\n', '<br/>'))
            e.content = urlizetrunc(e.content, 45)

        e.content = strip_script(e.content)
        e.content = expand.imgloc(e.content)
        e.content = smart_text(e.content)

        if images:
            thumbs = '\n<p class="thumbnails">\n'
            for img in images:
                img = media.save_image(img, force=True, downscale=True)
                thumbs += """  <a href="%s" rel="nofollow"><img src="%s" alt="thumbnail" /></a>\n""" % (
                    e.link, img)
            thumbs += '</p>\n'
            e.content += thumbs

        if title:
            e.title = smart_text(title)
        else:
            e.title = truncate.smart(strip_tags(e.content)).strip()
        if e.title == '':
            e.title = truncate.smart(strip_tags(content)).strip()

        mblob = media.mrss_scan(e.content)
        e.mblob = media.mrss_gen_json(mblob)

        try:
            e.save()

            pictures = []
            docs = []

            for f in files.getlist('docs'):
                md = Media(entry=e)
                md.file.save(f.name, f)
                md.save()
                if f.content_type.startswith('image/'):
                    pictures.append((md, f))
                else:
                    docs.append((md, f))

            if len(pictures):
                thumbs = '\n<p class="thumbnails">\n'
                for o in pictures:
                    thumb, orig = media.downsave_uploaded_image(o[0].file)
                    thumbs += '  <a href="%s"><img src="%s" alt="thumbnail" /></a>\n' % (
                        orig, thumb)
                    mrss = {
                        'url': orig,
                        'medium': 'image',
                        'fileSize': o[1].size
                    }
                    if orig.lower().endswith('.jpg'):
                        mrss['type'] = 'image/jpeg'
                    mblob['content'].append([mrss])
                thumbs += '</p>\n'
                e.content += thumbs

            if len(docs):
                doc = '\n<ul class="files">\n'
                for o in docs:
                    target = '[GLS-UPLOAD]/%s' % o[0].file.name.replace(
                        'upload/', '')
                    doc += '  <li><a href="%s">%s</a> ' % (target, o[1].name)
                    doc += '<span class="size">%s</span></li>\n' % \
                        bytes_to_human(o[1].size)

                    mrss = {'url': target, 'fileSize': o[1].size}
                    target = target.lower()
                    if target.endswith('.mp3'):
                        mrss['medium'] = 'audio'
                        mrss['type'] = 'audio/mpeg'
                    elif target.endswith('.ogg'):
                        mrss['medium'] = 'audio'
                        mrss['type'] = 'audio/ogg'
                    elif target.endswith('.avi'):
                        mrss['medium'] = 'video'
                        mrss['type'] = 'video/avi'
                    elif target.endswith('.pdf'):
                        mrss['medium'] = 'document'
                        mrss['type'] = 'application/pdf'
                    else:
                        mrss['medium'] = 'document'
                    mblob['content'].append([mrss])

                doc += '</ul>\n'
                e.content += doc

            e.mblob = media.mrss_gen_json(mblob)
            if len(pictures) or len(docs):
                e.save()

            media.extract_and_register(e)
            return e
        except:
            pass