コード例 #1
0
ファイル: publish.py プロジェクト: sheyril/bridgy
    def _get_or_add_publish_entity(self, source_url):
        page = PublishedPage.get_or_insert(source_url)

        # Detect concurrent publish request for the same page
        # https://github.com/snarfed/bridgy/issues/996
        pending = Publish.query(Publish.status == 'new',
                                Publish.type != 'preview',
                                Publish.source == self.source.key,
                                ancestor=page.key).get()
        if pending:
            logging.warning(
                f'Collided with publish: {pending.key.urlsafe().decode()}')
            raise CollisionError()

        entity = Publish.query(Publish.status == 'complete',
                               Publish.type != 'preview',
                               Publish.source == self.source.key,
                               ancestor=page.key).get()
        if entity is None:
            entity = Publish(parent=page.key, source=self.source.key)
            if self.PREVIEW:
                entity.type = 'preview'
            entity.put()

        logging.debug("Publish entity: '%s'", entity.key.urlsafe().decode())
        return entity
コード例 #2
0
    def test_already_published(self):
        """We shouldn't allow duplicating an existing, *completed* publish."""
        page = PublishedPage(id='http://foo.com/bar')

        # these are all fine
        Publish(parent=page.key, source=self.source.key, status='new').put()
        Publish(parent=page.key, source=self.source.key, status='failed').put()
        Publish(parent=page.key,
                source=self.source.key,
                status='complete',
                type='preview').put()

        for i in range(2):
            self.expect_requests_get('http://foo.com/bar',
                                     self.post_html % 'foo')
        self.mox.ReplayAll()

        # first attempt should work
        self.assert_created('foo - http://foo.com/bar')
        self.assertEquals(4, Publish.query().count())
        self.assertEquals(2,
                          Publish.query(Publish.status == 'complete').count())

        # now that there's a complete Publish entity, more attempts should fail
        self.assert_error("Sorry, you've already published that page")
        # try again to test for a bug we had where a second try would succeed
        self.assert_error("Sorry, you've already published that page")
        # should still be able to preview though
        self.assert_success('preview of foo', preview=True)
コード例 #3
0
ファイル: test_publish.py プロジェクト: Maymanaf/bridgy
  def test_already_published(self):
    """We shouldn't allow duplicating an existing, *completed* publish."""
    page = PublishedPage(id='http://foo.com/bar')

    # these are all fine
    Publish(parent=page.key, source=self.source.key, status='new').put()
    Publish(parent=page.key, source=self.source.key, status='failed').put()
    Publish(parent=page.key, source=self.source.key, status='complete',
            type='preview').put()

    for i in range(2):
      self.expect_requests_get('http://foo.com/bar', self.post_html % 'foo')
    self.mox.ReplayAll()

    # first attempt should work
    self.assert_created('foo - http://foo.com/bar')
    self.assertEquals(4, Publish.query().count())
    self.assertEquals(2, Publish.query(Publish.status == 'complete').count())

    # now that there's a complete Publish entity, more attempts should fail
    self.assert_error("Sorry, you've already published that page")
    # try again to test for a bug we had where a second try would succeed
    self.assert_error("Sorry, you've already published that page")
    # should still be able to preview though
    self.assert_success('preview of foo', preview=True)
コード例 #4
0
    def test_bad_source(self):
        # no source
        self.source.key.delete()
        self.assert_error(
            'Could not find <b>FakeSource</b> account for <b>foo.com</b>.')

        # source without publish feature
        self.source.features = ['listen']
        self.source.put()
        msg = 'Publish is not enabled'
        self.assert_error(msg)

        # status disabled
        self.source.features = ['publish']
        self.source.status = 'disabled'
        self.source.put()
        self.assert_error(msg)

        # two bad sources with same domain
        source_2 = self.source = testutil.FakeSource(id='z',
                                                     **self.source.to_dict())
        source_2.status = 'enabled'
        source_2.features = ['listen']
        source_2.put()
        self.assert_error(msg)

        # one bad source, one good source, same domain. should automatically use the
        # good source.
        source_2.features.append('publish')
        source_2.put()
        self.expect_requests_get('http://foo.com/bar', self.post_html % 'xyz')
        self.mox.ReplayAll()
        self.assert_created('xyz - http://foo.com/bar')
        self.assertEquals(source_2.key, Publish.query().get().source)
コード例 #5
0
  def test_no_content(self):
    self.expect_requests_get('http://foo.com/bar',
                             '<article class="h-entry"></article>')
    self.mox.ReplayAll()

    self.assert_error('Could not find content')
    self.assertEquals('failed', Publish.query().get().status)
コード例 #6
0
ファイル: test_publish.py プロジェクト: Maymanaf/bridgy
  def test_facebook_comment_and_like_disabled(self):
    self.source = facebook.FacebookPage(id='789', features=['publish'],
                                        domains=['mr.x'])
    self.source.put()

    self.expect_requests_get('http://mr.x/like', """
    <article class="h-entry">
      <a class="u-like-of" href="http://facebook.com/789/posts/456">liked this</a>
      <a href="http://localhost/publish/facebook"></a>
    </article>""")
    self.expect_requests_get('http://mr.x/comment', """
    <article class="h-entry">
      <a class="u-in-reply-to" href="http://facebook.com/789/posts/456">reply</a>
      <a href="http://localhost/publish/facebook"></a>
    </article>""")
    self.mox.ReplayAll()

    self.assert_error('Facebook comments and likes are no longer supported',
                      source='http://mr.x/like',
                      target='https://brid.gy/publish/facebook')
    self.assertEquals('failed', Publish.query().get().status)

    self.assert_error('Facebook comments and likes are no longer supported',
                      source='http://mr.x/comment',
                      target='https://brid.gy/publish/facebook',
                      preview=True)
コード例 #7
0
ファイル: test_publish.py プロジェクト: Maymanaf/bridgy
  def test_no_content(self):
    self.expect_requests_get('http://foo.com/bar',
                             '<article class="h-entry h-as-note"></article>')
    self.mox.ReplayAll()

    self.assert_error('or no content was found')
    self.assertEquals('failed', Publish.query().get().status)
コード例 #8
0
    def test_facebook_comment_and_like_disabled(self):
        self.source = facebook.FacebookPage(id='789',
                                            features=['publish'],
                                            domains=['mr.x'])
        self.source.domain_urls = ['http://mr.x/']
        self.source.put()

        self.expect_requests_get(
            'http://mr.x/like', """
    <article class="h-entry">
      <a class="u-like-of" href="http://facebook.com/789/posts/456">liked this</a>
      <a href="http://localhost/publish/facebook"></a>
    </article>""")
        self.expect_requests_get(
            'http://mr.x/comment', """
    <article class="h-entry">
      <a class="u-in-reply-to" href="http://facebook.com/789/posts/456">reply</a>
      <a href="http://localhost/publish/facebook"></a>
    </article>""")
        self.mox.ReplayAll()

        self.assert_error(
            'Facebook comments and likes are no longer supported',
            source='http://mr.x/like',
            target='https://brid.gy/publish/facebook')
        self.assertEquals('failed', Publish.query().get().status)

        self.assert_error(
            'Facebook comments and likes are no longer supported',
            source='http://mr.x/comment',
            target='https://brid.gy/publish/facebook',
            preview=True)
コード例 #9
0
ファイル: test_publish.py プロジェクト: Maymanaf/bridgy
  def test_bad_source(self):
    # no source
    self.source.key.delete()
    self.assert_error('Could not find <b>FakeSource</b> account for <b>foo.com</b>.')

    # source without publish feature
    self.source.features = ['listen']
    self.source.put()
    msg = 'Publish is not enabled'
    self.assert_error(msg)

    # status disabled
    self.source.features = ['publish']
    self.source.status = 'disabled'
    self.source.put()
    self.assert_error(msg)

    # two bad sources with same domain
    source_2 = self.source = testutil.FakeSource(id='z', **self.source.to_dict())
    source_2.status = 'enabled'
    source_2.features = ['listen']
    source_2.put()
    self.assert_error(msg)

    # one bad source, one good source, same domain. should automatically use the
    # good source.
    source_2.features.append('publish')
    source_2.put()
    self.expect_requests_get('http://foo.com/bar', self.post_html % 'xyz')
    self.mox.ReplayAll()
    self.assert_created('xyz - http://foo.com/bar')
    self.assertEquals(source_2.key, Publish.query().get().source)
コード例 #10
0
    def test_no_content(self):
        self.expect_requests_get('http://foo.com/bar',
                                 '<article class="h-entry"></article>')
        self.mox.ReplayAll()

        self.assert_error('Could not find content')
        self.assertEquals('failed', Publish.query().get().status)
コード例 #11
0
ファイル: test_publish.py プロジェクト: Maymanaf/bridgy
  def test_type_not_implemented(self):
    self.expect_requests_get('http://foo.com/bar',
                             '<article class="h-entry h-as-like"></article>')
    self.mox.ReplayAll()

    # FakeSource.create() raises NotImplementedError on likes
    self.assert_error('Cannot publish likes')
    self.assertEquals('failed', Publish.query().get().status)
コード例 #12
0
  def test_no_content_ignore_formatting(self):
    self.expect_requests_get('http://foo.com/bar',
                             '<article class="h-entry"></article>')
    self.mox.ReplayAll()

    self.assert_error('Could not find content',
                      params={'bridgy_ignore_formatting': ''})
    self.assertEquals('failed', Publish.query().get().status)
コード例 #13
0
    def test_no_content_ignore_formatting(self):
        self.expect_requests_get('http://foo.com/bar',
                                 '<article class="h-entry"></article>')
        self.mox.ReplayAll()

        self.assert_error('Could not find content',
                          params={'bridgy_ignore_formatting': ''})
        self.assertEquals('failed', Publish.query().get().status)
コード例 #14
0
ファイル: test_publish.py プロジェクト: Maymanaf/bridgy
  def test_source_missing_mf2(self):
    self.expect_requests_get('http://foo.com/bar', '')
    self.mox.ReplayAll()
    self.assert_error('No microformats2 data found in http://foo.com/')

    self.assertTrue(PublishedPage.get_by_id('http://foo.com/bar'))
    publish = Publish.query().get()
    self.assertEquals('failed', publish.status)
    self.assertEquals(self.source.key, publish.source)
コード例 #15
0
ファイル: publish_test.py プロジェクト: notenoughneon/bridgy
  def test_rsvp_without_in_reply_to(self):
    self.expect_requests_get('http://foo.com/bar', """
<article class="h-entry">
<p class="e-content">
<data class="p-rsvp" value="yes">I'm in!</data>
</p></article>""")
    self.mox.ReplayAll()
    self.assert_error("looks like an RSVP, but it's missing an in-reply-to link")
    self.assertEquals('failed', Publish.query().get().status)
コード例 #16
0
ファイル: test_publish.py プロジェクト: Maymanaf/bridgy
 def test_source_with_multiple_domains(self):
   """Publish domain is second in source's domains list."""
   self.source.domains = ['baj.com', 'foo.com']
   self.source.domain_urls = ['http://baj.com/', 'http://foo.com/']
   self.source.put()
   self.expect_requests_get('http://foo.com/bar', self.post_html % 'xyz')
   self.mox.ReplayAll()
   self.assert_created('xyz - http://foo.com/bar')
   self.assertEquals(self.source.key, Publish.query().get().source)
コード例 #17
0
 def test_source_with_multiple_domains(self):
     """Publish domain is second in source's domains list."""
     self.source.domains = ['baj.com', 'foo.com']
     self.source.domain_urls = ['http://baj.com/', 'http://foo.com/']
     self.source.put()
     self.expect_requests_get('http://foo.com/bar', self.post_html % 'xyz')
     self.mox.ReplayAll()
     self.assert_created('xyz - http://foo.com/bar')
     self.assertEquals(self.source.key, Publish.query().get().source)
コード例 #18
0
    def test_source_missing_mf2(self):
        self.expect_requests_get('http://foo.com/bar', '')
        self.mox.ReplayAll()
        self.assert_error('No microformats2 data found in http://foo.com/')

        self.assertTrue(PublishedPage.get_by_id('http://foo.com/bar'))
        publish = Publish.query().get()
        self.assertEquals('failed', publish.status)
        self.assertEquals(self.source.key, publish.source)
コード例 #19
0
ファイル: test_publish.py プロジェクト: Maymanaf/bridgy
  def test_interactive_oauth_decline(self):
    self.auth_entity = None
    resp = self.get_response(interactive=True)
    self.assertEquals(302, resp.status_int)
    self.assertEquals(
      'http://localhost/fake/foo.com#!'
        'If you want to publish or preview, please approve the prompt.',
      urllib.unquote_plus(resp.headers['Location']))

    self.assertIsNone(Publish.query().get())
コード例 #20
0
    def test_interactive_oauth_decline(self):
        self.auth_entity = None
        resp = self.get_response(interactive=True)
        self.assertEquals(302, resp.status_int)
        self.assertEquals(
            'http://localhost/fake/foo.com#!'
            'If you want to publish or preview, please approve the prompt.',
            urllib.unquote_plus(resp.headers['Location']))

        self.assertIsNone(Publish.query().get())
コード例 #21
0
    def test_type_not_implemented(self):
        self.expect_requests_get(
            'http://foo.com/bar', """
<article class="h-entry"><a class="u-like-of" href="xyz">W</a></article>""")
        self.expect_requests_get('http://foo.com/xyz', '')
        self.mox.ReplayAll()

        # FakeSource.create() raises NotImplementedError on likes
        self.assert_error('Cannot publish likes')
        self.assertEquals('failed', Publish.query().get().status)
コード例 #22
0
ファイル: publish_test.py プロジェクト: sanduhrs/bridgy
  def test_source_with_multiple_domains(self):
    """Publish domain is second in source's domains list."""
    self.source.domains = ['baj.com', 'foo.com']
    self.source.domain_urls = ['http://baj.com/', 'http://foo.com/']
    self.source.put()
    self.expect_requests_get('http://foo.com/bar', """
<article class="h-entry"><p class="e-content">xyz</p></article>""")
    self.mox.ReplayAll()
    self.assert_success('xyz - http://foo.com/bar')
    self.assertEquals(self.source.key, Publish.query().get().source)
コード例 #23
0
ファイル: test_publish.py プロジェクト: Maymanaf/bridgy
  def test_returned_type_overrides(self):
    # FakeSource returns type 'post' when it sees 'rsvp'
    self.expect_requests_get('http://foo.com/bar', """
<article class="h-entry h-as-rsvp">
<p class="e-content">
<data class="p-rsvp" value="yes"></data>
<a class="u-in-reply-to" href="http://fa.ke/event"></a>
</p></article>""")
    self.mox.ReplayAll()
    self.assert_created('')
    self.assertEquals('post', Publish.query().get().type)
コード例 #24
0
    def test_interactive_no_state(self):
        """https://github.com/snarfed/bridgy/issues/449"""
        self.oauth_state = None
        resp = self.get_response(interactive=True)
        self.assertEquals(302, resp.status_int)
        self.assertEquals(
            'http://localhost/#!'
            'If you want to publish or preview, please approve the prompt.',
            urllib.unquote_plus(resp.headers['Location']))

        self.assertIsNone(Publish.query().get())
コード例 #25
0
ファイル: test_publish.py プロジェクト: Maymanaf/bridgy
  def test_interactive_no_state(self):
    """https://github.com/snarfed/bridgy/issues/449"""
    self.oauth_state = None
    resp = self.get_response(interactive=True)
    self.assertEquals(302, resp.status_int)
    self.assertEquals(
      'http://localhost/#!'
        'If you want to publish or preview, please approve the prompt.',
      urllib.unquote_plus(resp.headers['Location']))

    self.assertIsNone(Publish.query().get())
コード例 #26
0
    def test_interactive_from_wrong_user_page(self):
        other_source = testutil.FakeSource.new(None).put()
        self.oauth_state['source_key'] = other_source.urlsafe()

        resp = self.get_response(interactive=True)
        self.assertEquals(302, resp.status_int)
        self.assertEquals(
            'http://localhost/fake/%s#!'
            'Please log into FakeSource as fake to publish that page.' %
            other_source.id(), urllib.unquote_plus(resp.headers['Location']))

        self.assertIsNone(Publish.query().get())
コード例 #27
0
    def test_returned_type_overrides(self):
        # FakeSource returns type 'post' when it sees 'rsvp'
        self.expect_requests_get(
            'http://foo.com/bar', """
<article class="h-entry">
<p class="e-content">
<data class="p-rsvp" value="yes"></data>
<a class="u-in-reply-to" href="http://fa.ke/event"></a>
</p></article>""")
        self.mox.ReplayAll()
        self.assert_created('')
        self.assertEquals('post', Publish.query().get().type)
コード例 #28
0
ファイル: publish_test.py プロジェクト: notenoughneon/bridgy
  def test_embedded_type_not_implemented(self):
    self.expect_requests_get('http://foo.com/bar', """
<article class="h-entry">
  <div class="p-like-of">
    foo <a class="u-url" href="http://url">bar</a>
  </div>
</article>""")
    self.mox.ReplayAll()

    # FakeSource.create() raises NotImplementedError on likes
    self.assert_error("FakeSource doesn't support type(s) like-of")
    self.assertEquals('failed', Publish.query().get().status)
コード例 #29
0
ファイル: test_publish.py プロジェクト: Maymanaf/bridgy
  def test_embedded_type_not_implemented(self):
    self.expect_requests_get('http://foo.com/bar', """
<article class="h-entry">
  <div class="p-like-of">
    foo <a class="u-url" href="http://url">bar</a>
  </div>
</article>""")
    self.mox.ReplayAll()

    # FakeSource.create() returns an error message for verb='like'
    self.assert_error("Cannot publish likes")
    self.assertEquals('failed', Publish.query().get().status)
コード例 #30
0
ファイル: test_publish.py プロジェクト: singpolyma/bridgy
 def _check_entity(self):
   self.assertTrue(PublishedPage.get_by_id('http://foo.com/bar'))
   publish = Publish.query().get()
   self.assertEquals(self.source.key, publish.source)
   self.assertEquals('complete', publish.status)
   self.assertEquals('post', publish.type)
   self.assertEquals('FakeSource post label', publish.type_label)
   expected_html = (self.post_html % 'foo') + self.backlink
   self.assertEquals(expected_html, publish.html)
   self.assertEquals({'id': 'fake id', 'url': 'http://fake/url',
                      'content': 'foo - http://foo.com/bar'},
                     publish.published)
コード例 #31
0
ファイル: publish_test.py プロジェクト: sanduhrs/bridgy
  def test_preview(self):
    html = '<article class="h-entry"><p class="e-content">foo</p></article>'
    self.expect_requests_get('http://foo.com/bar', html)
    # make sure create() isn't called
    self.mox.StubOutWithMock(self.source.as_source, 'create', use_mock_anything=True)
    self.mox.ReplayAll()
    self.assert_success('preview of foo - http://foo.com/bar', preview=True)

    publish = Publish.query().get()
    self.assertEquals(self.source.key, publish.source)
    self.assertEquals('complete', publish.status)
    self.assertEquals('preview', publish.type)
    self.assertEquals(html, publish.html)
コード例 #32
0
ファイル: test_publish.py プロジェクト: Maymanaf/bridgy
  def test_preview(self):
    html = self.post_html % 'foo'
    self.expect_requests_get('http://foo.com/bar', html)
    # make sure create() isn't called
    self.mox.StubOutWithMock(self.source.gr_source, 'create', use_mock_anything=True)
    self.mox.ReplayAll()
    self.assert_success('preview of foo', preview=True)

    publish = Publish.query().get()
    self.assertEquals(self.source.key, publish.source)
    self.assertEquals('complete', publish.status)
    self.assertEquals('preview', publish.type)
    self.assertEquals(html + self.backlink, publish.html)
コード例 #33
0
ファイル: test_publish.py プロジェクト: Maymanaf/bridgy
  def test_interactive_from_wrong_user_page(self):
    other_source = testutil.FakeSource.new(None).put()
    self.oauth_state['source_key'] = other_source.urlsafe()

    resp = self.get_response(interactive=True)
    self.assertEquals(302, resp.status_int)
    self.assertEquals(
      'http://localhost/fake/%s#!'
        'Please log into FakeSource as fake to publish that page.' %
        other_source.id(),
      urllib.unquote_plus(resp.headers['Location']))

    self.assertIsNone(Publish.query().get())
コード例 #34
0
    def test_embedded_type_not_implemented(self):
        self.expect_requests_get(
            'http://foo.com/bar', """
<article class="h-entry">
  <div class="p-like-of">
    foo <a class="u-url" href="http://url">bar</a>
  </div>
</article>""")
        self.mox.ReplayAll()

        # FakeSource.create() returns an error message for verb='like'
        self.assert_error("Cannot publish likes")
        self.assertEquals('failed', Publish.query().get().status)
コード例 #35
0
 def test_multiple_users_on_domain(self):
   source_2 = testutil.FakeSource(
     id='foo.com/b', features=['publish'], domains=['foo.com'],
     domain_urls=['http://foo.com/b'], auth_entity=self.auth_entity.key)
   source_2.put()
   source_3 = testutil.FakeSource(
     id='foo.com/c', features=['publish'], domains=['foo.com'],
     domain_urls=['http://foo.com/c'], auth_entity=self.auth_entity.key)
   source_3.put()
   self.expect_requests_get('http://foo.com/bar', self.post_html % 'foo')
   self.mox.ReplayAll()
   self.assert_created('foo - http://foo.com/bar', interactive=False)
   self.assertEquals(source_2.key, Publish.query().get().source)
コード例 #36
0
    def test_preview(self):
        html = self.post_html % 'foo'
        self.expect_requests_get('http://foo.com/bar', html)
        # make sure create() isn't called
        self.mox.StubOutWithMock(self.source.gr_source,
                                 'create',
                                 use_mock_anything=True)
        self.mox.ReplayAll()
        self.assert_success('preview of foo', preview=True)

        publish = Publish.query().get()
        self.assertEquals(self.source.key, publish.source)
        self.assertEquals('complete', publish.status)
        self.assertEquals('preview', publish.type)
        self.assertEquals(html + self.backlink, publish.html)
コード例 #37
0
ファイル: publish.py プロジェクト: stedn/bridgy
    def _get_or_add_publish_entity(self, source_url):
        page = PublishedPage.get_or_insert(source_url)
        entity = Publish.query(Publish.status == 'complete',
                               Publish.type != 'preview',
                               Publish.source == self.source.key,
                               ancestor=page.key).get()

        if entity is None:
            entity = Publish(parent=page.key, source=self.source.key)
            if self.PREVIEW:
                entity.type = 'preview'
            entity.put()

        logging.debug("Publish entity: '%s'", entity.key.urlsafe().decode())
        return entity
コード例 #38
0
ファイル: publish_test.py プロジェクト: sanduhrs/bridgy
  def test_success(self):
    html = '<article class="h-entry"><p class="e-content">foo</p></article>'
    self.expect_requests_get('http://foo.com/bar', html)
    self.mox.ReplayAll()
    self.assert_success('foo - http://foo.com/bar')

    self.assertTrue(PublishedPage.get_by_id('http://foo.com/bar'))
    publish = Publish.query().get()
    self.assertEquals(self.source.key, publish.source)
    self.assertEquals('complete', publish.status)
    self.assertEquals('post', publish.type)
    self.assertEquals('FakeSource post label', publish.type_label)
    self.assertEquals(html, publish.html)
    self.assertEquals({'id': 'fake id', 'url': 'http://fake/url',
                       'content': 'foo - http://foo.com/bar'},
                      publish.published)
コード例 #39
0
 def test_multiple_users_on_domain(self):
     source_2 = testutil.FakeSource(id='foo.com/b',
                                    features=['publish'],
                                    domains=['foo.com'],
                                    domain_urls=['http://foo.com/b'],
                                    auth_entity=self.auth_entity.key)
     source_2.put()
     source_3 = testutil.FakeSource(id='foo.com/c',
                                    features=['publish'],
                                    domains=['foo.com'],
                                    domain_urls=['http://foo.com/c'],
                                    auth_entity=self.auth_entity.key)
     source_3.put()
     self.expect_requests_get('http://foo.com/bar', self.post_html % 'foo')
     self.mox.ReplayAll()
     self.assert_created('foo - http://foo.com/bar', interactive=False)
     self.assertEquals(source_2.key, Publish.query().get().source)
コード例 #40
0
 def _check_entity(self, content='foo', html_content=None):
   if html_content is None:
     html_content = content
   self.assertTrue(PublishedPage.get_by_id('http://foo.com/bar'))
   publish = Publish.query().get()
   self.assertEquals(self.source.key, publish.source)
   self.assertEquals('complete', publish.status)
   self.assertEquals('post', publish.type)
   self.assertEquals('FakeSource post label', publish.type_label)
   expected_html = (self.post_html % html_content) + self.backlink
   self.assertEquals(expected_html, publish.html)
   self.assertEquals({
     'id': 'fake id',
     'url': 'http://fake/url',
     'content': '%s - http://foo.com/bar' % content,
     'granary_message': 'granary message',
   }, publish.published)
コード例 #41
0
 def _check_entity(self, content='foo', html_content=None):
     if html_content is None:
         html_content = content
     self.assertTrue(PublishedPage.get_by_id('http://foo.com/bar'))
     publish = Publish.query().get()
     self.assertEquals(self.source.key, publish.source)
     self.assertEquals('complete', publish.status)
     self.assertEquals('post', publish.type)
     self.assertEquals('FakeSource post label', publish.type_label)
     expected_html = (self.post_html % html_content) + self.backlink
     self.assertEquals(expected_html, publish.html)
     self.assertEquals(
         {
             'id': 'fake id',
             'url': 'http://fake/url',
             'content': '%s - http://foo.com/bar' % content,
             'granary_message': 'granary message',
         }, publish.published)
コード例 #42
0
    def get_or_add_publish_entity(self, source_url):
        """Creates and stores Publish and (if necessary) PublishedPage entities.

    Args:
      source_url: string
    """
        page = PublishedPage.get_or_insert(source_url)
        entity = Publish.query(Publish.status == 'complete',
                               Publish.type != 'preview',
                               Publish.source == self.source.key,
                               ancestor=page.key).get()

        if entity is None:
            entity = Publish(parent=page.key, source=self.source.key)
            if self.PREVIEW:
                entity.type = 'preview'
            entity.put()

        logging.debug('Publish entity: %s', entity.key.urlsafe())
        return entity
コード例 #43
0
ファイル: publish.py プロジェクト: lcorbasson/bridgy
  def get_or_add_publish_entity(self, source_url):
    """Creates and stores Publish and (if necessary) PublishedPage entities.

    Args:
      source_url: string
    """
    page = PublishedPage.get_or_insert(source_url)
    entity = Publish.query(
      Publish.status == 'complete', Publish.type != 'preview',
      Publish.source == self.source.key,
      ancestor=page.key).get()

    if entity is None:
      entity = Publish(parent=page.key, source=self.source.key)
      if self.PREVIEW:
        entity.type = 'preview'
      entity.put()

    logging.debug('Publish entity: %s', entity.key.urlsafe())
    return entity
コード例 #44
0
  def get_or_add_publish_entity(self, source_url):
    """Creates and stores :class:`models.Publish` entity.

    ...and if necessary, :class:`models.PublishedPage` entity.

    Args:
      source_url: string
    """
    page = PublishedPage.get_or_insert(source_url)
    entity = Publish.query(
      Publish.status == 'complete', Publish.type != 'preview',
      Publish.source == self.source.key,
      ancestor=page.key).get()

    if entity is None:
      entity = Publish(parent=page.key, source=self.source.key)
      if self.PREVIEW:
        entity.type = 'preview'
      entity.put()

    logging.debug("Publish entity: '%s'", entity.key.urlsafe().decode())
    return entity
コード例 #45
0
ファイル: publish.py プロジェクト: snarfed/bridgy
  def get_or_add_publish_entity(self, source_url):
    """Creates and stores :class:`models.Publish` entity.

    ...and if necessary, :class:`models.PublishedPage` entity.

    Args:
      source_url: string
    """
    page = PublishedPage.get_or_insert(native_str(source_url.encode('utf-8')))
    entity = Publish.query(
      Publish.status == 'complete', Publish.type != 'preview',
      Publish.source == self.source.key,
      ancestor=page.key).get()

    if entity is None:
      entity = Publish(parent=page.key, source=self.source.key)
      if self.PREVIEW:
        entity.type = 'preview'
      entity.put()

    logging.debug("Publish entity: '%s'", entity.key.urlsafe())
    return entity
コード例 #46
0
  def template_vars(self):
    vars = super(UserHandler, self).template_vars()
    vars.update({
        'source': self.source,
        'EPOCH': util.EPOCH,
        'REFETCH_HFEED_TRIGGER': models.REFETCH_HFEED_TRIGGER,
        'RECENT_PRIVATE_POSTS_THRESHOLD': RECENT_PRIVATE_POSTS_THRESHOLD,
        })
    if not self.source:
      return vars

    if isinstance(self.source, instagram.Instagram):
      auth = self.source.auth_entity
      vars['indieauth_me'] = (
        auth.id if isinstance(auth, indieauth.IndieAuth)
        else self.source.domain_urls[0] if self.source.domain_urls
        else None)

    # Blog webmention promos
    if 'webmention' not in self.source.features:
      if self.source.SHORT_NAME in ('blogger', 'medium', 'tumblr', 'wordpress'):
        vars[self.source.SHORT_NAME + '_promo'] = True
      else:
        for domain in self.source.domains:
          if ('.blogspot.' in domain and  # Blogger uses country TLDs
              not Blogger.query(Blogger.domains == domain).get()):
            vars['blogger_promo'] = True
          elif (domain.endswith('tumblr.com') and
                not Tumblr.query(Tumblr.domains == domain).get()):
            vars['tumblr_promo'] = True
          elif (domain.endswith('wordpress.com') and
                not WordPress.query(WordPress.domains == domain).get()):
            vars['wordpress_promo'] = True

    # Responses
    if 'listen' in self.source.features:
      vars['responses'] = []
      query = Response.query().filter(Response.source == self.source.key)

      # if there's a paging param (responses_before or responses_after), update
      # query with it
      def get_paging_param(param):
        val = self.request.get(param)
        try:
          return util.parse_iso8601(val) if val else None
        except:
          msg = "Couldn't parse %s %r as ISO8601" % (param, val)
          logging.exception(msg)
          self.abort(400, msg)

      before = get_paging_param('responses_before')
      after = get_paging_param('responses_after')
      if before and after:
        self.abort(400, "can't handle both responses_before and responses_after")
      elif after:
        query = query.filter(Response.updated > after).order(Response.updated)
      elif before:
        query = query.filter(Response.updated < before).order(-Response.updated)
      else:
        query = query.order(-Response.updated)

      query_iter = query.iter()
      for i, r in enumerate(query_iter):
        r.response = json.loads(r.response_json)
        r.activities = [json.loads(a) for a in r.activities_json]

        if (not self.source.is_activity_public(r.response) or
            not all(self.source.is_activity_public(a) for a in r.activities)):
          continue
        elif r.type == 'post':
          r.activities = []

        r.actor = r.response.get('author') or r.response.get('actor', {})

        for a in r.activities + [r.response]:
          if not a.get('content'):
            a['content'] = a.get('object', {}).get('content')

        if not r.response.get('content'):
          phrases = {
            'like': 'liked this',
            'repost': 'reposted this',
            'rsvp-yes': 'is attending',
            'rsvp-no': 'is not attending',
            'rsvp-maybe': 'might attend',
            'rsvp-interested': 'is interested',
            'invite': 'is invited',
          }
          r.response['content'] = '%s %s.' % (
            r.actor.get('displayName') or '',
            phrases.get(r.type) or phrases.get(r.response.get('verb')))

        # convert image URL to https if we're serving over SSL
        image_url = r.actor.setdefault('image', {}).get('url')
        if image_url:
          r.actor['image']['url'] = util.update_scheme(image_url, self)

        # generate original post links
        r.links = self.process_webmention_links(r)
        r.original_links = [util.pretty_link(url, new_tab=True)
                            for url in r.original_posts]

        vars['responses'].append(r)
        if len(vars['responses']) >= 10 or i > 200:
          break

      vars['responses'].sort(key=lambda r: r.updated, reverse=True)

      # calculate new paging param(s)
      new_after = (
        before if before else
        vars['responses'][0].updated if
          vars['responses'] and query_iter.probably_has_next() and (before or after)
        else None)
      if new_after:
        vars['responses_after_link'] = ('?responses_after=%s#responses' %
                                         new_after.isoformat())

      new_before = (
        after if after else
        vars['responses'][-1].updated if
          vars['responses'] and query_iter.probably_has_next()
        else None)
      if new_before:
        vars['responses_before_link'] = ('?responses_before=%s#responses' %
                                         new_before.isoformat())

      vars['next_poll'] = max(
        self.source.last_poll_attempt + self.source.poll_period(),
        # lower bound is 1 minute from now
        util.now_fn() + datetime.timedelta(seconds=90))

    # Publishes
    if 'publish' in self.source.features:
      publishes = Publish.query().filter(Publish.source == self.source.key)\
                                 .order(-Publish.updated)\
                                 .fetch(10)
      for p in publishes:
        p.pretty_page = util.pretty_link(
          p.key.parent().id().decode('utf-8'),
          attrs={'class': 'original-post u-url u-name'},
          new_tab=True)

      vars['publishes'] = publishes

    if 'webmention' in self.source.features:
      # Blog posts
      blogposts = BlogPost.query().filter(BlogPost.source == self.source.key)\
                                  .order(-BlogPost.created)\
                                  .fetch(10)
      for b in blogposts:
        b.links = self.process_webmention_links(b)
        try:
          text = b.feed_item.get('title')
        except ValueError:
          text = None
        b.pretty_url = util.pretty_link(
          b.key.id(), text=text, attrs={'class': 'original-post u-url u-name'},
          max_length=40, new_tab=True)

      # Blog webmentions
      webmentions = BlogWebmention.query()\
          .filter(BlogWebmention.source == self.source.key)\
          .order(-BlogWebmention.updated)\
          .fetch(10)
      for w in webmentions:
        w.pretty_source = util.pretty_link(
          w.source_url(), attrs={'class': 'original-post'}, new_tab=True)
        try:
          target_is_source = (urlparse.urlparse(w.target_url()).netloc in
                              self.source.domains)
        except BaseException:
          target_is_source = False
        w.pretty_target = util.pretty_link(
          w.target_url(), attrs={'class': 'original-post'}, new_tab=True,
          keep_host=target_is_source)

      vars.update({'blogposts': blogposts, 'webmentions': webmentions})

    return vars
コード例 #47
0
ファイル: app.py プロジェクト: LennonFlores/bridgy
  def template_vars(self):
    vars = super(UserHandler, self).template_vars()
    vars.update({
        'source': self.source,
        'EPOCH': util.EPOCH,
        'REFETCH_HFEED_TRIGGER': models.REFETCH_HFEED_TRIGGER,
        'RECENT_PRIVATE_POSTS_THRESHOLD': RECENT_PRIVATE_POSTS_THRESHOLD,
        })
    if not self.source:
      return vars

    if isinstance(self.source, instagram.Instagram):
      auth = self.source.auth_entity
      vars['indieauth_me'] = (
        auth.id if isinstance(auth, indieauth.IndieAuth)
        else self.source.domain_urls[0] if self.source.domain_urls
        else None)

    # Blog webmention promos
    if 'webmention' not in self.source.features:
      if self.source.SHORT_NAME in ('blogger', 'tumblr', 'wordpress'):
        vars[self.source.SHORT_NAME + '_promo'] = True
      else:
        for domain in self.source.domains:
          if ('.blogspot.' in domain and  # Blogger uses country TLDs
              not Blogger.query(Blogger.domains == domain).get()):
            vars['blogger_promo'] = True
          elif (domain.endswith('tumblr.com') and
                not Tumblr.query(Tumblr.domains == domain).get()):
            vars['tumblr_promo'] = True
          elif (domain.endswith('wordpress.com') and
                not WordPress.query(WordPress.domains == domain).get()):
            vars['wordpress_promo'] = True

    # Responses
    if 'listen' in self.source.features:
      vars['responses'] = []
      query = Response.query().filter(Response.source == self.source.key)

      # if there's a paging param (responses_before or responses_after), update
      # query with it
      def get_paging_param(param):
        val = self.request.get(param)
        try:
          return util.parse_iso8601(val) if val else None
        except:
          msg = "Couldn't parse %s %r as ISO8601" % (param, val)
          logging.exception(msg)
          self.abort(400, msg)

      before = get_paging_param('responses_before')
      after = get_paging_param('responses_after')
      if before and after:
        self.abort(400, "can't handle both responses_before and responses_after")
      elif after:
        query = query.filter(Response.updated > after).order(Response.updated)
      elif before:
        query = query.filter(Response.updated < before).order(-Response.updated)
      else:
        query = query.order(-Response.updated)

      query_iter = query.iter()
      for i, r in enumerate(query_iter):
        r.response = json.loads(r.response_json)
        r.activities = [json.loads(a) for a in r.activities_json]

        if (not self.source.is_activity_public(r.response) or
            not all(self.source.is_activity_public(a) for a in r.activities)):
          continue
        elif r.type == 'post':
          r.activities = []

        r.actor = r.response.get('author') or r.response.get('actor', {})

        for a in r.activities + [r.response]:
          if not a.get('content'):
            a['content'] = a.get('object', {}).get('content')

        if not r.response.get('content'):
          phrases = {
            'like': 'liked this',
            'repost': 'reposted this',
            'rsvp-yes': 'is attending',
            'rsvp-no': 'is not attending',
            'rsvp-maybe': 'might attend',
            'rsvp-interested': 'is interested',
            'invite': 'is invited',
          }
          r.response['content'] = '%s %s.' % (
            r.actor.get('displayName') or '',
            phrases.get(r.type) or phrases.get(r.response.get('verb')))

        # convert image URL to https if we're serving over SSL
        image_url = r.actor.setdefault('image', {}).get('url')
        if image_url:
          r.actor['image']['url'] = util.update_scheme(image_url, self)

        # generate original post links
        r.links = self.process_webmention_links(r)
        r.original_links = [util.pretty_link(url, new_tab=True)
                            for url in r.original_posts]

        vars['responses'].append(r)
        if len(vars['responses']) >= 10 or i > 200:
          break

      vars['responses'].sort(key=lambda r: r.updated, reverse=True)

      # calculate new paging param(s)
      new_after = (
        before if before else
        vars['responses'][0].updated if
          vars['responses'] and query_iter.probably_has_next() and (before or after)
        else None)
      if new_after:
        vars['responses_after_link'] = ('?responses_after=%s#responses' %
                                         new_after.isoformat())

      new_before = (
        after if after else
        vars['responses'][-1].updated if
          vars['responses'] and query_iter.probably_has_next()
        else None)
      if new_before:
        vars['responses_before_link'] = ('?responses_before=%s#responses' %
                                         new_before.isoformat())

      vars['next_poll'] = max(
        self.source.last_poll_attempt + self.source.poll_period(),
        # lower bound is 1 minute from now
        util.now_fn() + datetime.timedelta(seconds=90))

    # Publishes
    if 'publish' in self.source.features:
      publishes = Publish.query().filter(Publish.source == self.source.key)\
                                 .order(-Publish.updated)\
                                 .fetch(10)
      for p in publishes:
        p.pretty_page = util.pretty_link(
          p.key.parent().id(), attrs={'class': 'original-post u-url u-name'},
          new_tab=True)

      vars['publishes'] = publishes

    if 'webmention' in self.source.features:
      # Blog posts
      blogposts = BlogPost.query().filter(BlogPost.source == self.source.key)\
                                  .order(-BlogPost.created)\
                                  .fetch(10)
      for b in blogposts:
        b.links = self.process_webmention_links(b)
        try:
          text = b.feed_item.get('title')
        except ValueError:
          text = None
        b.pretty_url = util.pretty_link(
          b.key.id(), text=text, attrs={'class': 'original-post u-url u-name'},
          max_length=40, new_tab=True)

      # Blog webmentions
      webmentions = BlogWebmention.query()\
          .filter(BlogWebmention.source == self.source.key)\
          .order(-BlogWebmention.updated)\
          .fetch(10)
      for w in webmentions:
        w.pretty_source = util.pretty_link(
          w.source_url(), attrs={'class': 'original-post'}, new_tab=True)
        try:
          target_is_source = (urlparse.urlparse(w.target_url()).netloc in
                              self.source.domains)
        except BaseException:
          target_is_source = False
        w.pretty_target = util.pretty_link(
          w.target_url(), attrs={'class': 'original-post'}, new_tab=True,
          keep_host=target_is_source)

      vars.update({'blogposts': blogposts, 'webmentions': webmentions})

    return vars
コード例 #48
0
ファイル: pages.py プロジェクト: snarfed/bridgy
def user(site, id):
    """View for a user page."""
    cls = models.sources.get(site)
    if not cls:
        return render_template('user_not_found.html'), 404

    source = cls.lookup(id)

    if not source:
        key = cls.query(
            ndb.OR(*[
                ndb.GenericProperty(prop) == id
                for prop in ('domains', 'inferred_username', 'name',
                             'username')
            ])).get(keys_only=True)
        if key:
            return redirect(cls(key=key).bridgy_path(), code=301)

    if not source or not source.features:
        return render_template('user_not_found.html'), 404

    source.verify()
    source = util.preprocess_source(source)

    vars = {
        'source': source,
        'logs': logs,
        'REFETCH_HFEED_TRIGGER': models.REFETCH_HFEED_TRIGGER,
        'RECENT_PRIVATE_POSTS_THRESHOLD': RECENT_PRIVATE_POSTS_THRESHOLD,
    }

    # Blog webmention promos
    if 'webmention' not in source.features:
        if source.SHORT_NAME in ('blogger', 'medium', 'tumblr', 'wordpress'):
            vars[source.SHORT_NAME + '_promo'] = True
        else:
            for domain in source.domains:
                if ('.blogspot.' in domain and  # Blogger uses country TLDs
                        not Blogger.query(Blogger.domains == domain).get()):
                    vars['blogger_promo'] = True
                elif (util.domain_or_parent_in(domain, ['tumblr.com'])
                      and not Tumblr.query(Tumblr.domains == domain).get()):
                    vars['tumblr_promo'] = True
                elif (util.domain_or_parent_in(domain, 'wordpress.com') and
                      not WordPress.query(WordPress.domains == domain).get()):
                    vars['wordpress_promo'] = True

    # Responses
    if 'listen' in source.features or 'email' in source.features:
        vars['responses'] = []
        query = Response.query().filter(Response.source == source.key)

        # if there's a paging param (responses_before or responses_after), update
        # query with it
        def get_paging_param(param):
            val = request.values.get(param)
            try:
                return util.parse_iso8601(val.replace(' ',
                                                      '+')) if val else None
            except BaseException:
                error(f"Couldn't parse {param}, {val!r} as ISO8601")

        before = get_paging_param('responses_before')
        after = get_paging_param('responses_after')
        if before and after:
            error("can't handle both responses_before and responses_after")
        elif after:
            query = query.filter(Response.updated > after).order(
                Response.updated)
        elif before:
            query = query.filter(
                Response.updated < before).order(-Response.updated)
        else:
            query = query.order(-Response.updated)

        query_iter = query.iter()
        for i, r in enumerate(query_iter):
            r.response = json_loads(r.response_json)
            r.activities = [json_loads(a) for a in r.activities_json]

            if (not source.is_activity_public(r.response) or not all(
                    source.is_activity_public(a) for a in r.activities)):
                continue
            elif r.type == 'post':
                r.activities = []

            verb = r.response.get('verb')
            r.actor = (r.response.get('object')
                       if verb == 'invite' else r.response.get('author')
                       or r.response.get('actor')) or {}

            activity_content = ''
            for a in r.activities + [r.response]:
                if not a.get('content'):
                    obj = a.get('object', {})
                    a['content'] = activity_content = (
                        obj.get('content') or obj.get('displayName') or
                        # historical, from a Reddit bug fixed in granary@4f9df7c
                        obj.get('name') or '')

            response_content = r.response.get('content')
            phrases = {
                'like': 'liked this',
                'repost': 'reposted this',
                'rsvp-yes': 'is attending',
                'rsvp-no': 'is not attending',
                'rsvp-maybe': 'might attend',
                'rsvp-interested': 'is interested',
                'invite': 'is invited',
            }
            phrase = phrases.get(r.type) or phrases.get(verb)
            if phrase and (r.type != 'repost'
                           or activity_content.startswith(response_content)):
                r.response[
                    'content'] = f'{r.actor.get("displayName") or ""} {phrase}.'

            # convert image URL to https if we're serving over SSL
            image_url = r.actor.setdefault('image', {}).get('url')
            if image_url:
                r.actor['image']['url'] = util.update_scheme(
                    image_url, request)

            # generate original post links
            r.links = process_webmention_links(r)
            r.original_links = [
                util.pretty_link(url, new_tab=True) for url in r.original_posts
            ]

            vars['responses'].append(r)
            if len(vars['responses']) >= 10 or i > 200:
                break

        vars['responses'].sort(key=lambda r: r.updated, reverse=True)

        # calculate new paging param(s)
        new_after = (before if before else vars['responses'][0].updated if
                     vars['responses'] and query_iter.probably_has_next() and
                     (before or after) else None)
        if new_after:
            vars[
                'responses_after_link'] = f'?responses_after={new_after.isoformat()}#responses'

        new_before = (after if after else
                      vars['responses'][-1].updated if vars['responses']
                      and query_iter.probably_has_next() else None)
        if new_before:
            vars[
                'responses_before_link'] = f'?responses_before={new_before.isoformat()}#responses'

        vars['next_poll'] = max(
            source.last_poll_attempt + source.poll_period(),
            # lower bound is 1 minute from now
            util.now_fn() + datetime.timedelta(seconds=90))

    # Publishes
    if 'publish' in source.features:
        publishes = Publish.query().filter(Publish.source == source.key)\
                                   .order(-Publish.updated)\
                                   .fetch(10)
        for p in publishes:
            p.pretty_page = util.pretty_link(
                p.key.parent().id(),
                attrs={'class': 'original-post u-url u-name'},
                new_tab=True)

        vars['publishes'] = publishes

    if 'webmention' in source.features:
        # Blog posts
        blogposts = BlogPost.query().filter(BlogPost.source == source.key)\
                                    .order(-BlogPost.created)\
                                    .fetch(10)
        for b in blogposts:
            b.links = process_webmention_links(b)
            try:
                text = b.feed_item.get('title')
            except ValueError:
                text = None
            b.pretty_url = util.pretty_link(
                b.key.id(),
                text=text,
                attrs={'class': 'original-post u-url u-name'},
                max_length=40,
                new_tab=True)

        # Blog webmentions
        webmentions = BlogWebmention.query()\
            .filter(BlogWebmention.source == source.key)\
            .order(-BlogWebmention.updated)\
            .fetch(10)
        for w in webmentions:
            w.pretty_source = util.pretty_link(
                w.source_url(), attrs={'class': 'original-post'}, new_tab=True)
            try:
                target_is_source = (urllib.parse.urlparse(
                    w.target_url()).netloc in source.domains)
            except BaseException:
                target_is_source = False
            w.pretty_target = util.pretty_link(
                w.target_url(),
                attrs={'class': 'original-post'},
                new_tab=True,
                keep_host=target_is_source)

        vars.update({'blogposts': blogposts, 'webmentions': webmentions})

    return render_template(f'{source.SHORT_NAME}_user.html', **vars)
コード例 #49
0
ファイル: app.py プロジェクト: uniteddiversity/bridgy
  def template_vars(self):
    if not self.source:
      return {}

    vars = super(UserHandler, self).template_vars()
    vars.update({
        'source': self.source,
        'epoch': util.EPOCH,
        })

    # Blog webmention promos
    if 'webmention' not in self.source.features:
      if self.source.SHORT_NAME in ('blogger', 'tumblr', 'wordpress'):
        vars[self.source.SHORT_NAME + '_promo'] = True
      else:
        for domain in self.source.domains:
          if ('.blogspot.' in domain and  # Blogger uses country TLDs
              not Blogger.query(Blogger.domains == domain).get()):
            vars['blogger_promo'] = True
          elif (domain.endswith('tumblr.com') and
                not Tumblr.query(Tumblr.domains == domain).get()):
            vars['tumblr_promo'] = True
          elif (domain.endswith('wordpress.com') and
                not WordPress.query(WordPress.domains == domain).get()):
            vars['wordpress_promo'] = True

    # Responses
    if 'listen' in self.source.features:
      vars['responses'] = []
      for i, r in enumerate(Response.query()
                              .filter(Response.source == self.source.key)\
                              .order(-Response.updated)):
        r.response = json.loads(r.response_json)
        if r.activity_json:  # handle old entities
          r.activities_json.append(r.activity_json)
        r.activities = [json.loads(a) for a in r.activities_json]

        if (not gr_source.Source.is_public(r.response) or
            not all(gr_source.Source.is_public(a) for a in r.activities)):
          continue

        r.actor = r.response.get('author') or r.response.get('actor', {})
        if not r.response.get('content'):
          phrases = {
            'like': 'liked this',
            'repost': 'reposted this',
            'rsvp-yes': 'is attending',
            'rsvp-no': 'is not attending',
            'rsvp-maybe': 'might attend',
            'invite': 'is invited',
          }
          r.response['content'] = '%s %s.' % (
            r.actor.get('displayName') or '',
            phrases.get(r.type) or phrases.get(r.response.get('verb')))

        # convert image URL to https if we're serving over SSL
        image_url = r.actor.setdefault('image', {}).get('url')
        if image_url:
          r.actor['image']['url'] = util.update_scheme(image_url, self)

        # generate original post links
        r.links = self.process_webmention_links(r)

        vars['responses'].append(r)
        if len(vars['responses']) >= 10 or i > 200:
          break

    # Publishes
    if 'publish' in self.source.features:
      publishes = Publish.query().filter(Publish.source == self.source.key)\
                                 .order(-Publish.updated)\
                                 .fetch(10)
      for p in publishes:
        p.pretty_page = util.pretty_link(
          p.key.parent().id(), a_class='original-post', new_tab=True)

      vars['publishes'] = publishes

    if 'webmention' in self.source.features:
      # Blog posts
      blogposts = BlogPost.query().filter(BlogPost.source == self.source.key)\
                                  .order(-BlogPost.created)\
                                  .fetch(10)
      for b in blogposts:
        b.links = self.process_webmention_links(b)
        try:
          text = b.feed_item.get('title')
        except ValueError:
          text = None
        b.pretty_url = util.pretty_link(b.key.id(), text=text,
                                        a_class='original-post', max_length=40,
                                        new_tab=True)

      # Blog webmentions
      webmentions = BlogWebmention.query()\
          .filter(BlogWebmention.source == self.source.key)\
          .order(-BlogWebmention.updated)\
          .fetch(10)
      for w in webmentions:
        w.pretty_source = util.pretty_link(w.source_url(), a_class='original-post',
                                           new_tab=True)
        try:
          target_is_source = (urlparse.urlparse(w.target_url()).netloc in
                              self.source.domains)
        except BaseException:
          target_is_source = False
        w.pretty_target = util.pretty_link(w.target_url(), a_class='original-post',
                                           new_tab=True, keep_host=target_is_source)

      vars.update({'blogposts': blogposts, 'webmentions': webmentions})

    return vars
コード例 #50
0
 def test_h_feed_no_items(self):
   self.expect_requests_get('http://foo.com/bar', '<div class="h-feed"></div>')
   self.mox.ReplayAll()
   self.assert_error('Could not find content')
   self.assertEquals('failed', Publish.query().get().status)