Esempio n. 1
0
def show_eric_options(req):
    """This renders the eric admin panel. Allow switching the skin and show
    the available skins.
    """
    new_skin = req.args.get('select')
    if new_skin in SKINS:
        try:
            req.app.cfg.change_single('eric_the_fish/skin', new_skin)
        except ConfigurationTransactionError, e:
            flash(_('The skin could not be changed.'), 'error')
        return redirect(url_for('eric_the_fish/config'))
Esempio n. 2
0
def show_image_manager_options(req):
    image_dir = req.args.get('images_directory')
    base_url = req.args.get('base_url')
    thumb_max_width = req.args.get('thumb_max_width')
    thumb_max_height = req.args.get('thumb_max_height')

    if image_dir:
        try:
            req.app.cfg.change_single('img_upload/images_directory', image_dir)
        except ConfigurationTransactionError, e:
            flash(_('The images directory could not be changed.'), 'error')
Esempio n. 3
0
def view_ad_sense_config(req):
    client_code = req.args.get('client_code')
    banner_slot = req.args.get('banner_slot')
    width = req.args.get('width')
    height = req.args.get('height')
    if client_code and banner_slot and width and height:
        try:
            req.app.cfg.change_single('ad_sense/client_code', client_code)
            req.app.cfg.change_single('ad_sense/banner_slot', banner_slot)
            req.app.cfg.change_single('ad_sense/width', width)
            req.app.cfg.change_single('ad_sense/height', height)
            flash(_('Config updated!'), 'info')
        except ConfigurationTransactionError, e:
            flash(_('The code could not be changed.'), 'error')
        return redirect(url_for('ad_sense/config'))
Esempio n. 4
0
def view_ad_sense_config(req):
    client_code = req.args.get('client_code')
    banner_slot = req.args.get('banner_slot')
    width = req.args.get('width')
    height = req.args.get('height')
    if client_code and banner_slot and width and height:
        try:
            req.app.cfg.change_single('ad_sense/client_code', client_code)
            req.app.cfg.change_single('ad_sense/banner_slot', banner_slot)
            req.app.cfg.change_single('ad_sense/width', width)
            req.app.cfg.change_single('ad_sense/height', height)
            flash(_('Config updated!'), 'info')
        except ConfigurationTransactionError, e:
            flash(_('The code could not be changed.'), 'error')
        return redirect(url_for('ad_sense/config'))
Esempio n. 5
0
def show_markdown_config(req):
    """Show Markdown Parser configuration options."""
    form = ConfigurationForm(initial=dict(
                                    extensions=req.app.cfg[CFG_EXTENSIONS],
                                    makeintro=req.app.cfg[CFG_MAKEINTRO]))

    if req.method == 'POST' and form.validate(req.form):
        if form.has_changed:
            cfg = req.app.cfg.edit()
            cfg[CFG_EXTENSIONS] = form['extensions']
            cfg[CFG_MAKEINTRO] = form['makeintro']
            cfg.commit()
            flash(_('Markdown Parser settings saved.'), 'ok')
    return render_admin_response('admin/markdown_options.html',
                                 'options.markdown',
                                 form=form.as_widget())
Esempio n. 6
0
def show_markdown_config(req):
    """Show Markdown Parser configuration options."""
    form = ConfigurationForm(
        initial=dict(extensions=req.app.cfg[CFG_EXTENSIONS],
                     makeintro=req.app.cfg[CFG_MAKEINTRO]))

    if req.method == 'POST' and form.validate(req.form):
        if form.has_changed:
            cfg = req.app.cfg.edit()
            cfg[CFG_EXTENSIONS] = form['extensions']
            cfg[CFG_MAKEINTRO] = form['makeintro']
            cfg.commit()
            flash(_('Markdown Parser settings saved.'), 'ok')
    return render_admin_response('admin/markdown_options.html',
                                 'options.markdown',
                                 form=form.as_widget())
Esempio n. 7
0
    def configure(self, request):
        form = WordPressImportForm()

        if request.method == 'POST' and form.validate(request.form):
            dump = request.files.get('dump')
            if form.data['download_url']:
                try:
                    dump = open_url(form.data['download_url']).stream
                except Exception, e:
                    error = _(u'Error downloading from URL: %s') % e
            elif not dump:
                return redirect_to('import/wordpress')

            try:
                blog = parse_feed(dump)
            except Exception, e:
                log.exception(_(u'Error parsing uploaded file'))
                flash(_(u'Error parsing uploaded file: %s') % e, 'error')
Esempio n. 8
0
    def configure(self, request):
        form = FeedImportForm()

        if request.method == 'POST' and form.validate(request.form):
            feed = request.files.get('feed')
            if form.data['download_url']:
                try:
                    feed = open_url(form.data['download_url']).stream
                except Exception, e:
                    log.exception(_('Error downloading feed'))
                    flash(_(u'Error downloading from URL: %s') % e, 'error')
            if not feed:
                return redirect_to('import/feed')

            try:
                blog = parse_feed(feed)
            except Exception, e:
                log.exception(_(u'Error parsing uploaded file'))
                flash(_(u'Error parsing feed: %s') % e, 'error')
Esempio n. 9
0
    def configure(self, request):
        form = FeedImportForm()

        if request.method == 'POST' and form.validate(request.form):
            feed = request.files.get('feed')
            if form.data['download_url']:
                try:
                    feed = open_url(form.data['download_url']).stream
                except Exception, e:
                    log.exception(_('Error downloading feed'))
                    flash(_(u'Error downloading from URL: %s') % e, 'error')
            if not feed:
                return redirect_to('import/feed')

            try:
                blog = parse_feed(feed)
            except Exception, e:
                log.exception(_(u'Error parsing uploaded file'))
                flash(_(u'Error parsing feed: %s') % e, 'error')
Esempio n. 10
0
    def configure(self, request):
        form = WordPressImportForm()

        if request.method == 'POST' and form.validate(request.form):
            dump = request.files.get('dump')
            if form.data['download_url']:
                try:
                    dump = open_url(form.data['download_url']).stream
                except Exception, e:
                    log.exception(_('Error downloading feed'))
                    flash(_(u'Error downloading from URL: %s') % e, 'error')
            if not dump:
                return redirect_to('import/wordpress')

            try:
                blog = parse_feed(dump)
            except Exception, e:
                raise
                log.exception(_(u'Error parsing uploaded file'))
                flash(_(u'Error parsing uploaded file: %s') % e, 'error')
Esempio n. 11
0
def show_config(req):
    """The configuration form."""
    form = ConfigurationForm(initial=dict((k, req.app.cfg['typography/' + k])
                                          for k in ConfigurationForm.fields))

    if req.method == 'POST' and form.validate(req.form):
        if form.has_changed:
            t = req.app.cfg.edit()
            for key, value in form.data.iteritems():
                t['typography/' + key] = value
            try:
                t.commit()
            except IOError:
                flash(_('Typography settings could not be changed.'), 'error')
            else:
                flash(_('Typography settings changed.'), 'configure')
        return redirect_to('typography/config')

    return render_admin_response('admin/typography.html',
                                 'options.typography', form=form.as_widget())
Esempio n. 12
0
def show_config(req):
    """The configuration form."""
    form = ConfigurationForm(initial=dict(
        (k, req.app.cfg['typography/' + k]) for k in ConfigurationForm.fields))

    if req.method == 'POST' and form.validate(req.form):
        if form.has_changed:
            t = req.app.cfg.edit()
            for key, value in form.data.iteritems():
                t['typography/' + key] = value
            try:
                t.commit()
            except IOError:
                flash(_('Typography settings could not be changed.'), 'error')
            else:
                flash(_('Typography settings changed.'), 'configure')
        return redirect_to('typography/config')

    return render_admin_response('admin/typography.html',
                                 'options.typography',
                                 form=form.as_widget())
Esempio n. 13
0
    def configure(self, request):
        form = FeedImportForm()

        if request.method == 'POST' and form.validate(request.form):
            feed = request.files.get('feed')
            if form.data['download_url']:
                if not form.data['download_url'].endswith('.tpxa'):
                    error = _(u"Don't pass a real feed URL, it should be a "
                              u"regular URL where you're serving the file "
                              u"generated with the textpress_exporter.py script")
                    flash(error, 'error')
                    return self.render_admin_page('import_textpress.html',
                                                  form=form.as_widget(),
                                                  bugs_link=BUGS_LINK)
                try:
                    feed = urllib.urlopen(form.data['download_url'])
                except Exception, e:
                    error = _(u'Error downloading from URL: %s') % e
                    flash(error, 'error')
                    return self.render_admin_page('import_textpress.html',
                                                  form=form.as_widget(),
                                                  bugs_link=BUGS_LINK)
            elif not feed:
                return redirect_to('import/feed')

            try:
                blog = parse_feed(feed)
            except Exception, e:
                log.exception(_(u'Error parsing uploaded file'))
                print repr(e)
                flash(_(u'Error parsing feed: %s') % e, 'error')
Esempio n. 14
0
def configure(request):
    """This callback is called from the admin panel if the theme configuration
    page is opened.  Because only the active theme can be configured it's
    perfectly okay to ship the template for the configuration page as part of
    the theme template folder.  No need to register a separate template folder
    just for the admin panel template.
    """
    cfg = request.app.cfg
    form = ConfigurationForm(initial=dict(
        variation=cfg['vessel_theme/variation']
    ))

    if request.method == 'POST':
        if 'cancel' in request.form:
            return form.redirect('admin/theme')
        elif form.validate(request.form):
            flash(_('Color variation changed successfully.'), 'configure')
            cfg.change_single('vessel_theme/variation', form['variation'])
            return form.redirect('admin/theme')

    return render_admin_response('admin/configure_vessel_theme.html',
                                 'options.theme', form=form.as_widget())
Esempio n. 15
0
def configure(request):
    """This callback is called from the admin panel if the theme configuration
    page is opened.  Because only the active theme can be configured it's
    perfectly okay to ship the template for the configuration page as part of
    the theme template folder.  No need to register a separate template folder
    just for the admin panel template.
    """
    cfg = request.app.cfg
    form = ConfigurationForm(initial=dict(
        variation=cfg['kubrick_theme/variation']
    ))

    if request.method == 'POST':
        if 'cancel' in request.form:
            return form.redirect('admin/theme')
        elif form.validate(request.form):
            flash(_('Variation changed successfully.'), 'configure')
            cfg.change_single('kubrick_theme/variation', form['variation'])
            return form.redirect('admin/theme')

    return render_admin_response('admin/configure_kubrick_theme.html',
                                 'options.theme', form=form.as_widget())
Esempio n. 16
0
                try:
                    feed = open_url(form.data['download_url']).stream
                except Exception, e:
                    log.exception(_('Error downloading feed'))
                    flash(_(u'Error downloading from URL: %s') % e, 'error')
            if not feed:
                return redirect_to('import/feed')

            try:
                blog = parse_feed(feed)
            except Exception, e:
                log.exception(_(u'Error parsing uploaded file'))
                flash(_(u'Error parsing feed: %s') % e, 'error')
            else:
                self.enqueue_dump(blog)
                flash(_(u'Added imported items to queue.'))
                return redirect_to('admin/import')

        return self.render_admin_page('admin/import_feed.html',
                                      form=form.as_widget())


class Extension(object):
    """Extensions are instanciated for each parsing process."""
    feed_types = frozenset()

    def __init__(self, app, parser, root):
        self.app = app
        self.parser = parser
        self.root = root
Esempio n. 17
0
                try:
                    feed = open_url(form.data['download_url']).stream
                except Exception, e:
                    log.exception(_('Error downloading feed'))
                    flash(_(u'Error downloading from URL: %s') % e, 'error')
            if not feed:
                return redirect_to('import/feed')

            try:
                blog = parse_feed(feed)
            except Exception, e:
                log.exception(_(u'Error parsing uploaded file'))
                flash(_(u'Error parsing feed: %s') % e, 'error')
            else:
                self.enqueue_dump(blog)
                flash(_(u'Added imported items to queue.'))
                return redirect_to('admin/import')

        return self.render_admin_page('admin/import_feed.html',
                                      form=form.as_widget())


class Extension(object):
    """Extensions are instanciated for each parsing process."""
    feed_types = frozenset()

    def __init__(self, app, parser, root):
        self.app = app
        self.parser = parser
        self.root = root
Esempio n. 18
0
    def import_quills(self, blogurl, username, password):
        """Import from Quills using Zope's XML-RPC interface."""
        yield _(u'<p>Beginning Quills import. Attempting to get data...</p>')
        urlparts = urlparse.urlsplit(blogurl)
        urlnetloc = urlparts.netloc
        urlpath = urlparts.path
        if not urlpath.endswith('/'):
            urlpath += '/' # Trailing slash required for XML-RPC
        if username:
            #: We're using simple HTTP auth, which isn't the smartest thing to
            #: do, but Plone's default cookie-auth system is just a base64
            #: encoding of username:password, which isn't any better. Quills
            #: runs on Plone 2.1 and 2.5, neither of which shipped with a more
            #: secure auth mechanism, so we'll just go with what works. HTTP
            #: auth fallback has been supported by every Zope 2.x release.
            urlnetloc = '%s:%s@%s' % (username, password, urlnetloc)
        useblogurl = urlparse.urlunsplit((urlparts.scheme, urlnetloc, urlpath,
                                          '', ''))
        conn = xmlrpclib.ServerProxy(useblogurl)
        title = conn.Title()
        data = conn.zine_export()
        yield _(u'<p>Got data. Parsing for weblog entries and replies.</p>')

        tags = {}
        posts = {}
        authors = {}

        yield _(u'<ol>')
        for entry in data:
            itemtags = []
            for tag in entry['tags']:
                if tag in tags:
                    itemtags.append(tags[tag])
                else:
                    newtag = Tag(gen_slug(tag), tag)
                    tags[tag] = newtag
                    itemtags.append(newtag)
            if entry['author'] in authors:
                author = authors[entry['author']]
            else:
                author = Author(entry['author'], '', '')
                authors[entry['author']] = author
            status = PLONE_STATUS.get(entry['status'], STATUS_PUBLISHED)
            body = reunicode(entry['body'])
            description = reunicode(entry['description'])
            subject = reunicode(entry['title'])
            parser = PLONE_PARSERS.get(entry['format'], 'zeml')
            pub_date = parse_plone_date(entry['date'])

            if description:
                #: Assume description is text/plain. Anything else is unlikely
                if parser in ['zeml', 'html']:
                    body = u'<intro><p>%s</p></intro>%s' % (description, body)
                else:
                    # We don't know how this parser works, so just insert
                    # description before body, with a blank line in between
                    body = u'%s\n\n%s' % (description, body)

            comments = {}

            for comment in entry['replies']:
                c_body = reunicode(comment['body'])
                c_author = comment['author']
                if c_author in authors:
                    c_author = authors[c_author]
                #: Fix for Jace's anon comments hack
                elif c_author.startswith('!'):
                    c_author = c_author[1:]
                c_body = reunicode(comment['body'])
                c_subject = reunicode(comment['title'])
                if c_subject:
                    c_body = '%s\n\n%s' % (c_subject, c_body)

                comments[comment['id']] = Comment(
                    author = c_author,
                    body = c_body,
                    pub_date = parse_plone_date(
                                            comment['date']).astimezone(UTC),
                    author_email = None,
                    author_url = None,
                    remote_addr = None,
                    parent = comment['parent'],
                    parser = 'text',
                    status = COMMENT_MODERATED
                    )

            # Re-thread comments
            for comment in comments.values():
                comment.parent = comments.get(comment.parent, None)


            posts[entry['id']] = Post(
                slug=gen_timestamped_slug(entry['id'],
                                          'entry', pub_date),
                title=subject,
                link=entry['url'],
                pub_date=pub_date.astimezone(UTC),
                author=authors[entry['author']],
                intro=u'',
                body=body,
                tags=itemtags,
                categories=[],
                comments=comments.values(),
                comments_enabled=entry['allow_comments'],
                pings_enabled=True,
                uid=entry['id'],
                parser=parser,
                content_type='entry',
                status=status
                )
            yield _(u'<li><strong>%s</strong> (by %s; %d comments)</li>') % (
                subject, author.username, len(comments))

        yield _(u'</ol>')
        self.enqueue_dump(Blog(
            title,
            blogurl,
            '',
            'en',
            tags.values(),
            [],
            posts.values(),
            authors.values()))
        flash(_(u'Added imported items to queue.'))

        yield _(u'<p><strong>All done.</strong></p>')
Esempio n. 19
0
class LiveJournalImporter(Importer):
    name = 'livejournal'
    title = 'LiveJournal'

    def import_livejournal(self,
                           username,
                           password,
                           import_what=IMPORT_JOURNAL,
                           community='',
                           security_custom=SECURITY_PROTECTED,
                           categories=[],
                           getcomments=True):
        """Import from LiveJournal using specified parameters."""
        yield _(u'<p>Beginning LiveJournal import. Attempting to login...</p>')
        if import_what != IMPORT_JOURNAL:
            usejournal = community
        else:
            usejournal = None
        lj = LiveJournalConnect(username, password, usejournal)
        result = lj.login(getmoods=0)
        authors = {
            username:
            Author(username=username,
                   email='',
                   real_name=unicode(result['fullname'], 'utf-8'))
        }
        yield _(u'<p>Your name: <strong>%s</strong></p>') % \
                                                    authors[username].real_name
        moodlist = dict([(int(m['id']), unicode(str(m['name']), 'utf-8'))
                         for m in result['moods']])

        result = lj.getusertags()
        tags = dict([
            (tag, Tag(gen_slug(tag), tag))
            for tag in [unicode(t['name'], 'utf-8') for t in result['tags']]
        ])
        yield _(u'<p><strong>Tags:</strong> %s</p>') % _(u', ').join(
            tags.keys())

        ##result = lj.getdaycounts()
        ##daycounts = [(date(*strptime(item['date'], '%Y-%m-%d')[0:3]),
        ##              item['count']) for item in result['daycounts']]
        ##totalposts = sum([x[1] for x in daycounts])
        ##yield _(u'<p>Found <strong>%d</strong> posts on <strong>%d days'\
        ##        u'</strong> between %s and %s.</p>') % (
        ##                                totalposts,
        ##                                len(daycounts),
        ##                                daycounts[0][0].strftime('%Y-%m-%d'),
        ##                                daycounts[-1][0].strftime('%Y-%m-%d'))

        posts = {}

        # Process implemented as per
        # http://www.livejournal.com/doc/server/ljp.csp.entry_downloading.html
        yield _(u'<ul>')
        yield _(u'<li>Getting metadata...</li>')
        result = lj.syncitems()
        sync_items = []
        sync_total = int(result['total'])
        yield _(u'<li>%d items...</li>') % sync_total
        sync_items.extend(result['syncitems'])
        while len(sync_items) < sync_total:
            lastsync = max([
                parse_lj_date(item['time']) for item in sync_items
            ]).strftime('%Y-%m-%d %H:%M:%S')
            yield _(u'<li>Got %d items up to %s...</li>') % (len(sync_items),
                                                             lastsync)
            result = lj.syncitems(lastsync=lastsync)
            sync_items.extend(result['syncitems'])
        yield _(u'<li>Got all %d items.</li>') % len(sync_items)
        yield _(u'</ul>')
        #: Discard non-journal items.
        sync_items = [i for i in sync_items if i['item'].startswith('L-')]
        yield _(u'<p>Downloading <strong>%d</strong> entries...</p>') % len(
            sync_items)
        # Track what items we need to get
        sync_data = {}
        for item in sync_items:
            sync_data[int(item['item'][2:])] = {
                'downloaded': False,
                'time': parse_lj_date(item['time'])
            }

        # Start downloading bodies
        sync_left = [
            sync_data[x] for x in sync_data
            if sync_data[x]['downloaded'] is False
        ]
        if sync_left:
            lastsync = (min([x['time'] for x in sync_left]) -
                        timedelta(seconds=1)).strftime('%Y-%m-%d %H:%M:%S')
        while len(sync_left) > 0:
            yield _(u'<p>Getting a batch...</p>')
            try:
                result = lj.getevents(selecttype='syncitems',
                                      lastsync=lastsync)
            except xmlrpclib.Fault, fault:
                if fault.faultCode == 406:
                    # LJ doesn't like us. Go back one second and try again.
                    yield _(u'<p>LiveJournal says we are retrying the same '\
                            u'date and time too often. Trying again with the '\
                            u'time set behind by one second.</p>')
                    lastsync = (
                        parse_lj_date(lastsync) -
                        timedelta(seconds=1)).strftime('%Y-%m-%d %H:%M:%S')
                    continue
                else:
                    yield _(u'<p>Process failed. LiveJournal says: '\
                            u'(%d) %s</p>') % (fault.faultCode,
                                               fault.faultString)
                    break

            yield _(u'<ol start="%d">') % (len(posts) + 1)
            for item in result['events']:
                if sync_data[item['itemid']]['downloaded'] is True:
                    # Dupe, thanks to our lastsync time manipulation. Skip.
                    continue
                sync_data[item['itemid']]['downloaded'] = True
                sync_data[item['itemid']]['item'] = item

                subject = item.get('subject', '')
                if isinstance(subject, xmlrpclib.Binary):
                    subject = subject.data
                subject = unicode(str(subject), 'utf-8')
                #: LiveJournal subjects may contain HTML tags. Strip them and
                #: convert HTML entities to Unicode equivalents.
                subject = unescape(
                    tag_re.sub('', ljuser_re.sub('\\2', subject)))
                poster = item.get('poster', username)
                if poster != username and import_what != IMPORT_COMMUNITY_ALL:
                    # Discard, since we don't want this.
                    yield _(
                        u'<li><strong>Discarded:</strong> %s <em>(by %s)</em></li>'
                    ) % (subject, poster)
                    continue
                if poster not in authors:
                    authors[poster] = Author(poster, '', '')
                # Map LiveJournal security codes to Zine status flags
                security = item.get('security', 'public')
                if security == 'usemask' and item['allowmask'] == 1:
                    security = 'friends'
                if security == 'usemask':
                    status = {
                        SECURITY_DISCARD: None,
                        SECURITY_PUBLIC: STATUS_PUBLISHED,
                        SECURITY_PROTECTED: STATUS_PROTECTED,
                        SECURITY_PRIVATE: STATUS_PRIVATE
                    }[security_custom]
                    if status is None:
                        yield _(u'<li><strong>Discarded (masked):</strong> '\
                                u'%s</li>') % subject
                        continue
                else:
                    status = {
                        'public': STATUS_PUBLISHED,
                        'friends': STATUS_PROTECTED,
                        'private': STATUS_PRIVATE,
                    }[security]

                #: Read time as local timezone and then convert to UTC. Zine
                #: doesn't seem to like non-UTC timestamps in imports.
                pub_date = get_timezone().localize(
                    parse_lj_date(item['eventtime'])).astimezone(UTC)
                itemtags = [
                    t.strip() for t in unicode(
                        item['props'].get('taglist', ''), 'utf-8').split(',')
                ]
                while '' in itemtags:
                    itemtags.remove('')
                itemtags = [tags[t] for t in itemtags]
                extras = {}
                if 'current_music' in item['props']:
                    if isinstance(item['props']['current_music'],
                                  xmlrpclib.Binary):
                        extras['current_music'] = unicode(
                            item['props']['current_music'].data, 'utf-8')
                    else:
                        extras['current_music'] = unicode(
                            str(item['props']['current_music']), 'utf-8')
                if 'current_mood' in item['props']:
                    if isinstance(item['props']['current_mood'],
                                  xmlrpclib.Binary):
                        extras['current_mood'] = unicode(
                            item['props']['current_mood'].data, 'utf-8')
                    else:
                        extras['current_mood'] = unicode(
                            str(item['props']['current_mood']), 'utf-8')
                elif 'current_moodid' in item['props']:
                    extras['current_mood'] = moodlist[int(
                        item['props']['current_moodid'])]
                if 'current_coords' in item['props']:
                    if isinstance(item['props']['current_coords'],
                                  xmlrpclib.Binary):
                        extras['current_coords'] = unicode(
                            item['props']['current_coords'].data, 'utf-8')
                    else:
                        extras['current_coords'] = unicode(
                            str(item['props']['current_coords']), 'utf-8')
                if 'current_location' in item['props']:
                    if isinstance(item['props']['current_location'],
                                  xmlrpclib.Binary):
                        extras['current_location'] = unicode(
                            item['props']['current_location'].data, 'utf-8')
                    else:
                        extras['current_location'] = unicode(
                            str(item['props']['current_location']), 'utf-8')
                if 'picture_keyword' in item['props']:
                    if isinstance(item['props']['picture_keyword'],
                                  xmlrpclib.Binary):
                        extras['picture_keyword'] = unicode(
                            item['props']['picture_keyword'].data, 'utf-8')
                    else:
                        extras['picture_keyword'] = unicode(
                            str(item['props']['picture_keyword']), 'utf-8')

                extras['lj_post_id'] = item['itemid']
                extras['original_url'] = item['url']
                posts[item['itemid']] = Post(
                    #: Generate slug. If there's no subject, use '-'+itemid.
                    #: Why the prefix? Because if the user wants %year%/%month%/
                    #: for the post url format and we end up creating a slug
                    #: like 2003/12/1059, it will conflict with the archive
                    #: access path format of %Y/%m/%d and the post will become
                    #: inaccessible, since archive paths take higher priority
                    #: to slugs in zine's urls.py.
                    slug=gen_timestamped_slug(
                        gen_slug(subject) or ('-' + str(item['itemid'])),
                        'entry', pub_date),
                    title=subject,
                    link=item['url'],
                    pub_date=pub_date,
                    author=authors[poster],
                    intro='',
                    body=isinstance(item['event'], xmlrpclib.Binary)
                    and unicode(item['event'].data, 'utf-8')
                    or url_unquote_plus(str(item['event'])),
                    tags=itemtags,
                    categories=[Category(x) for x in categories],
                    comments=[],  # Will be updated later.
                    comments_enabled=not item['props'].get(
                        'opt_nocomments', False),
                    pings_enabled=False,  # LiveJournal did not support pings
                    uid='livejournal;%s;%d' %
                    (usejournal or username, item['itemid']),
                    parser=item['props'].get('opt_preformatted', False)
                    and 'html' or 'livejournal',
                    status=status,
                    extra=extras)
                yield _(u'<li>%s <em>(by %s on %s)</em></li>') % (
                    subject, poster, pub_date.strftime('%Y-%m-%d %H:%M'))
            # Done processing batch.
            yield _(u'</ol>')
            sync_left = [
                sync_data[x] for x in sync_data
                if sync_data[x]['downloaded'] is False
            ]
            if sync_left:
                lastsync = (min([x['time'] for x in sync_left]) -
                            timedelta(seconds=1)).strftime('%Y-%m-%d %H:%M:%S')

        # ------------------------------------------------------------------
        if getcomments:
            yield _(u"<p>Importing comments...</p>")

            #: Get session key to use for the HTTP request to retrieve comments.
            ljsession = lj.sessiongenerate(expiration='short',
                                           ipfixed=True)['ljsession']

            #: See http://www.livejournal.com/bots/ and
            #: http://www.livejournal.com/doc/server/ljp.csp.auth.cookies.html
            headers = {
                'X-LJ-Auth': 'cookie', # Needed only for flat interface, but anyway
                'Cookie': 'ljsession=%s' % ljsession,
                'User-Agent': 'LiveJournal-Zine/%s '\
                              '(http://bitbucket.org/jace/zine-plugins; '\
                              '<jace at pobox dot com>; en-IN)' % __version__
                }

            c_usermap = {}  # User id to LJ user name
            c_info = {}  # id: {'posterid', 'state'}

            c_startid = 0
            c_maxid = None

            while c_maxid is None or c_startid <= c_maxid:
                #: See http://www.livejournal.com/developer/exporting.bml and
                #: http://www.livejournal.com/doc/server/ljp.csp.export_comments.html
                conn = HTTPHandler(urlparse.urlsplit(
                    LIVEJOURNAL_COMMENTS + '?get=comment_meta&startid=%d%s' %
                    (c_startid, usejournal and '&authas=%s' % usejournal or '')
                ),
                                   timeout=TIMEOUT,
                                   method='GET')
                conn.headers.extend(headers)
                yield _(
                    u'<p>Retrieving comment metadata starting from %d...</p>'
                ) % c_startid
                c_metadata = etree.fromstring(conn.open().data)

                if not c_maxid:
                    if c_metadata.find('maxid') is not None:
                        c_maxid = int(c_metadata.find('maxid').text)

                for user in c_metadata.find('usermaps'):
                    c_usermap[int(user.attrib['id'])] = user.attrib['user']

                for comment in c_metadata.find('comments'):
                    c_id = int(comment.attrib['id'])
                    c_userid = int(comment.attrib.get('posterid', '0'))
                    c_username = c_usermap.get(c_userid,
                                               u'')  # Anonymous == blank
                    if c_userid != 0:
                        c_website = url_to_journal(c_username)
                    else:
                        c_website = u''
                    c_info[c_id] = dict(
                        userid=c_userid,
                        username=c_username,
                        author=authors.get(c_username, None),
                        website=c_website,
                        state={
                            'D': COMMENT_DELETED,
                            'S': COMMENT_BLOCKED_USER,
                            'F': COMMENT_MODERATED,  # No Frozen state in Zine
                            'A': COMMENT_MODERATED
                        }[comment.attrib.get('state', 'A')])

                if not c_maxid:
                    yield _(u'<p>Something wrong with comment retrieval. '\
                            u'LiveJournal will not tell us how many there are. '\
                            u'Aborting.</p>')
                    break
                c_startid = max(c_info.keys()) + 1

            yield _(
                u'<p>Got metadata for %d comments. Retrieving bodies...</p>'
            ) % len(c_info)

            c_startid = 0  # Start over again for comment bodies
            comments = {}  # Holds Comment objects.
            while c_startid <= c_maxid:
                conn = HTTPHandler(urlparse.urlsplit(
                    LIVEJOURNAL_COMMENTS + '?get=comment_body&startid=%d%s' %
                    (c_startid, usejournal and "&authas=%s" % usejournal or '')
                ),
                                   timeout=TIMEOUT,
                                   method='GET')
                conn.headers.extend(headers)
                yield _(u'<p>Retrieving comment bodies starting from %d...</p>'
                        ) % c_startid
                yield _(u'<ol>')
                c_bodies = etree.fromstring(conn.open().data)
                for comment in c_bodies.find('comments'):
                    c_id = int(comment.attrib['id'])
                    info = c_info[c_id]
                    bodytag = comment.find('body')
                    subjecttag = comment.find('subject')
                    body = bodytag is not None and bodytag.text or u''
                    if subjecttag is not None:
                        body = u'<span class="subject">%s</span>\n%s' % (
                            subjecttag.text, body)
                    datetag = comment.find('date')
                    if datetag is None:  # Deleted comments have no date
                        pub_date = None
                    else:
                        pub_date = UTC.localize(
                            datetime(*(strptime(
                                comment.find('date').text,
                                '%Y-%m-%dT%H:%M:%SZ')[:6])))
                    remote_addr = None
                    if comment.find('property'):
                        for property in comment.find('property'):
                            if property.attrib['name'] == 'poster_ip':
                                remote_addr = property.text
                    comments[c_id] = Comment(
                        author=info['author'] or info['username'],
                        body=body,
                        author_email=None,
                        author_url=not info['author'] and info['website']
                        or None,
                        parent='parentid' in comment.attrib
                        and int(comment.attrib['parentid']) or None,
                        pub_date=pub_date,
                        remote_addr=remote_addr,
                        parser=u'livejournal',
                        status=info['state'],
                    )
                    postid = int(comment.attrib['jitemid'])
                    c_info[c_id]['postid'] = postid
                    if postid in posts:
                        posts[postid].comments.append(comments[c_id])
                    else:
                        # Orphan comment, either because post was dropped or
                        # because it is not downloaded yet (only when testing)
                        yield _(
                            u'<li>Dropping orphan comment %d on missing post %d.</li>'
                        ) % (c_id, postid)
                c_startid = max(comments.keys()) + 1
                yield _(u'</ol>')
            # Calculate timestamps for deleted comments.
            yield _(u'<p>Guessing timestamps for deleted comments...</p>')
            sortedcomments = comments.keys()
            sortedcomments.sort()
            totalcomments = len(sortedcomments)
            for counter in range(totalcomments):
                comment = comments[sortedcomments[counter]]
                if comment.pub_date is None:
                    prev_time = comments[sortedcomments[max(0, counter -
                                                            1)]].pub_date
                    next_time = comments[sortedcomments[min(
                        totalcomments - 1, counter + 1)]].pub_date
                    if prev_time is None and next_time is None:
                        # No luck with finding time from neighbouring
                        # comments. Let's look for the post instead.
                        c_id = sortedcomments[counter]
                        postid = c_info['c_id']['postid']
                        if postid in posts:
                            new_time = posts[postid].pub_date
                        # else: orphaned comment, anyway. don't bother.
                    elif next_time is None:
                        new_time = prev_time
                    elif prev_time is None:
                        new_time = next_time
                    else:
                        # Midway between previous and next
                        new_time = prev_time + (next_time - prev_time) / 2
                    # Save new timestamp
                    comment.pub_date = new_time
            # Re-thread comments
            yield _(u'<p>Rethreading comments...</p>')
            for comment in comments.values():
                comment.parent = comments.get(comment.parent, None)
        else:
            yield _(u'<p>Skipping comment import.</p>')
        # --------------------------------------------------------------------

        self.enqueue_dump(
            Blog(usejournal or username, url_to_journal(username), '', 'en',
                 tags.values(), [], posts.values(), authors.values()))
        flash(_(u'Added imported items to queue.'))

        yield _(u'<p><strong>All done.</strong></p>')
Esempio n. 20
0
    image_dir = req.args.get('images_directory')
    base_url = req.args.get('base_url')
    thumb_max_width = req.args.get('thumb_max_width')
    thumb_max_height = req.args.get('thumb_max_height')

    if image_dir:
        try:
            req.app.cfg.change_single('img_upload/images_directory', image_dir)
        except ConfigurationTransactionError, e:
            flash(_('The images directory could not be changed.'), 'error')

    if base_url:
        try:
            req.app.cfg.change_single('img_upload/base_url', base_url)
        except ConfigurationTransactionError, e:
            flash(_('The base url could not be changed.'), 'error')

    if thumb_max_width:
        try:
            req.app.cfg.change_single('img_upload/thumb_max_width', thumb_max_width)
        except ConfigurationTransactionError, e:
            flash(_('The thumb max width could not be changed.'), 'error')

    if thumb_max_height:
        try:
            req.app.cfg.change_single('img_upload/thumb_max_height', thumb_max_height)
        except ConfigurationTransactionError, e:
            flash(_('The thumb max height could not be changed.'), 'error')

    return render_admin_response('admin/img_uploader.html',
            images_directory=req.app.cfg['img_upload/images_directory'],