def test_already_published(self): """We shouldn't allow duplicating an existing, *completed* publish.""" page = PublishedPage(id='http://foo.com/bar') # these are all fine Publish(parent=page.key, source=self.source.key, status='new').put() Publish(parent=page.key, source=self.source.key, status='failed').put() Publish(parent=page.key, source=self.source.key, status='complete', type='preview').put() for i in range(2): self.expect_requests_get('http://foo.com/bar', self.post_html % 'foo') self.mox.ReplayAll() # first attempt should work self.assert_created('foo - http://foo.com/bar') self.assertEquals(4, Publish.query().count()) self.assertEquals(2, Publish.query(Publish.status == 'complete').count()) # now that there's a complete Publish entity, more attempts should fail self.assert_error("Sorry, you've already published that page") # try again to test for a bug we had where a second try would succeed self.assert_error("Sorry, you've already published that page") # should still be able to preview though self.assert_success('preview of foo', preview=True)
def test_no_content(self): self.expect_requests_get('http://foo.com/bar', '<article class="h-entry"></article>') self.mox.ReplayAll() self.assert_error('Could not find content') self.assertEquals('failed', Publish.query().get().status)
def test_facebook_comment_and_like_disabled(self): self.source = facebook.FacebookPage(id='789', features=['publish'], domains=['mr.x']) self.source.put() self.expect_requests_get('http://mr.x/like', """ <article class="h-entry"> <a class="u-like-of" href="http://facebook.com/789/posts/456">liked this</a> <a href="http://localhost/publish/facebook"></a> </article>""") self.expect_requests_get('http://mr.x/comment', """ <article class="h-entry"> <a class="u-in-reply-to" href="http://facebook.com/789/posts/456">reply</a> <a href="http://localhost/publish/facebook"></a> </article>""") self.mox.ReplayAll() self.assert_error('Facebook comments and likes are no longer supported', source='http://mr.x/like', target='https://brid.gy/publish/facebook') self.assertEquals('failed', Publish.query().get().status) self.assert_error('Facebook comments and likes are no longer supported', source='http://mr.x/comment', target='https://brid.gy/publish/facebook', preview=True)
def test_bad_source(self): # no source self.source.key.delete() self.assert_error('Could not find <b>FakeSource</b> account for <b>foo.com</b>.') # source without publish feature self.source.features = ['listen'] self.source.put() msg = 'Publish is not enabled' self.assert_error(msg) # status disabled self.source.features = ['publish'] self.source.status = 'disabled' self.source.put() self.assert_error(msg) # two bad sources with same domain source_2 = self.source = testutil.FakeSource(id='z', **self.source.to_dict()) source_2.status = 'enabled' source_2.features = ['listen'] source_2.put() self.assert_error(msg) # one bad source, one good source, same domain. should automatically use the # good source. source_2.features.append('publish') source_2.put() self.expect_requests_get('http://foo.com/bar', self.post_html % 'xyz') self.mox.ReplayAll() self.assert_created('xyz - http://foo.com/bar') self.assertEquals(source_2.key, Publish.query().get().source)
def test_no_content(self): self.expect_requests_get('http://foo.com/bar', '<article class="h-entry h-as-note"></article>') self.mox.ReplayAll() self.assert_error('or no content was found') self.assertEquals('failed', Publish.query().get().status)
def test_facebook_comment_and_like_disabled(self): self.source = facebook.FacebookPage(id='789', features=['publish'], domains=['mr.x']) self.source.domain_urls = ['http://mr.x/'] self.source.put() self.expect_requests_get( 'http://mr.x/like', """ <article class="h-entry"> <a class="u-like-of" href="http://facebook.com/789/posts/456">liked this</a> <a href="http://localhost/publish/facebook"></a> </article>""") self.expect_requests_get( 'http://mr.x/comment', """ <article class="h-entry"> <a class="u-in-reply-to" href="http://facebook.com/789/posts/456">reply</a> <a href="http://localhost/publish/facebook"></a> </article>""") self.mox.ReplayAll() self.assert_error( 'Facebook comments and likes are no longer supported', source='http://mr.x/like', target='https://brid.gy/publish/facebook') self.assertEquals('failed', Publish.query().get().status) self.assert_error( 'Facebook comments and likes are no longer supported', source='http://mr.x/comment', target='https://brid.gy/publish/facebook', preview=True)
def test_bad_source(self): # no source self.source.key.delete() self.assert_error( 'Could not find <b>FakeSource</b> account for <b>foo.com</b>.') # source without publish feature self.source.features = ['listen'] self.source.put() msg = 'Publish is not enabled' self.assert_error(msg) # status disabled self.source.features = ['publish'] self.source.status = 'disabled' self.source.put() self.assert_error(msg) # two bad sources with same domain source_2 = self.source = testutil.FakeSource(id='z', **self.source.to_dict()) source_2.status = 'enabled' source_2.features = ['listen'] source_2.put() self.assert_error(msg) # one bad source, one good source, same domain. should automatically use the # good source. source_2.features.append('publish') source_2.put() self.expect_requests_get('http://foo.com/bar', self.post_html % 'xyz') self.mox.ReplayAll() self.assert_created('xyz - http://foo.com/bar') self.assertEquals(source_2.key, Publish.query().get().source)
def test_type_not_implemented(self): self.expect_requests_get('http://foo.com/bar', '<article class="h-entry h-as-like"></article>') self.mox.ReplayAll() # FakeSource.create() raises NotImplementedError on likes self.assert_error('Cannot publish likes') self.assertEquals('failed', Publish.query().get().status)
def test_no_content_ignore_formatting(self): self.expect_requests_get('http://foo.com/bar', '<article class="h-entry"></article>') self.mox.ReplayAll() self.assert_error('Could not find content', params={'bridgy_ignore_formatting': ''}) self.assertEquals('failed', Publish.query().get().status)
def _get_or_add_publish_entity(self, source_url): page = PublishedPage.get_or_insert(source_url) # Detect concurrent publish request for the same page # https://github.com/snarfed/bridgy/issues/996 pending = Publish.query(Publish.status == 'new', Publish.type != 'preview', Publish.source == self.source.key, ancestor=page.key).get() if pending: logging.warning( f'Collided with publish: {pending.key.urlsafe().decode()}') raise CollisionError() entity = Publish.query(Publish.status == 'complete', Publish.type != 'preview', Publish.source == self.source.key, ancestor=page.key).get() if entity is None: entity = Publish(parent=page.key, source=self.source.key) if self.PREVIEW: entity.type = 'preview' entity.put() logging.debug("Publish entity: '%s'", entity.key.urlsafe().decode()) return entity
def test_source_with_multiple_domains(self): """Publish domain is second in source's domains list.""" self.source.domains = ['baj.com', 'foo.com'] self.source.domain_urls = ['http://baj.com/', 'http://foo.com/'] self.source.put() self.expect_requests_get('http://foo.com/bar', self.post_html % 'xyz') self.mox.ReplayAll() self.assert_created('xyz - http://foo.com/bar') self.assertEquals(self.source.key, Publish.query().get().source)
def test_source_missing_mf2(self): self.expect_requests_get('http://foo.com/bar', '') self.mox.ReplayAll() self.assert_error('No microformats2 data found in http://foo.com/') self.assertTrue(PublishedPage.get_by_id('http://foo.com/bar')) publish = Publish.query().get() self.assertEquals('failed', publish.status) self.assertEquals(self.source.key, publish.source)
def test_rsvp_without_in_reply_to(self): self.expect_requests_get('http://foo.com/bar', """ <article class="h-entry"> <p class="e-content"> <data class="p-rsvp" value="yes">I'm in!</data> </p></article>""") self.mox.ReplayAll() self.assert_error("looks like an RSVP, but it's missing an in-reply-to link") self.assertEquals('failed', Publish.query().get().status)
def test_interactive_oauth_decline(self): self.auth_entity = None resp = self.get_response(interactive=True) self.assertEquals(302, resp.status_int) self.assertEquals( 'http://localhost/fake/foo.com#!' 'If you want to publish or preview, please approve the prompt.', urllib.unquote_plus(resp.headers['Location'])) self.assertIsNone(Publish.query().get())
def test_type_not_implemented(self): self.expect_requests_get( 'http://foo.com/bar', """ <article class="h-entry"><a class="u-like-of" href="xyz">W</a></article>""") self.expect_requests_get('http://foo.com/xyz', '') self.mox.ReplayAll() # FakeSource.create() raises NotImplementedError on likes self.assert_error('Cannot publish likes') self.assertEquals('failed', Publish.query().get().status)
def test_source_with_multiple_domains(self): """Publish domain is second in source's domains list.""" self.source.domains = ['baj.com', 'foo.com'] self.source.domain_urls = ['http://baj.com/', 'http://foo.com/'] self.source.put() self.expect_requests_get('http://foo.com/bar', """ <article class="h-entry"><p class="e-content">xyz</p></article>""") self.mox.ReplayAll() self.assert_success('xyz - http://foo.com/bar') self.assertEquals(self.source.key, Publish.query().get().source)
def test_returned_type_overrides(self): # FakeSource returns type 'post' when it sees 'rsvp' self.expect_requests_get('http://foo.com/bar', """ <article class="h-entry h-as-rsvp"> <p class="e-content"> <data class="p-rsvp" value="yes"></data> <a class="u-in-reply-to" href="http://fa.ke/event"></a> </p></article>""") self.mox.ReplayAll() self.assert_created('') self.assertEquals('post', Publish.query().get().type)
def test_interactive_no_state(self): """https://github.com/snarfed/bridgy/issues/449""" self.oauth_state = None resp = self.get_response(interactive=True) self.assertEquals(302, resp.status_int) self.assertEquals( 'http://localhost/#!' 'If you want to publish or preview, please approve the prompt.', urllib.unquote_plus(resp.headers['Location'])) self.assertIsNone(Publish.query().get())
def get_or_add_publish_entity(self, source_url): """Creates and stores Publish and (if necessary) PublishedPage entities. Args: source_url: string """ page = PublishedPage.get_or_insert(source_url) entity = Publish.query( Publish.status == 'complete', Publish.type != 'preview', Publish.source == self.source.key, ancestor=page.key).get() if entity is None: entity = Publish(parent=page.key, source=self.source.key) if self.PREVIEW: entity.type = 'preview' entity.put() logging.debug('Publish entity: %s', entity.key.urlsafe()) return entity
def test_interactive_from_wrong_user_page(self): other_source = testutil.FakeSource.new(None).put() self.oauth_state['source_key'] = other_source.urlsafe() resp = self.get_response(interactive=True) self.assertEquals(302, resp.status_int) self.assertEquals( 'http://localhost/fake/%s#!' 'Please log into FakeSource as fake to publish that page.' % other_source.id(), urllib.unquote_plus(resp.headers['Location'])) self.assertIsNone(Publish.query().get())
def test_returned_type_overrides(self): # FakeSource returns type 'post' when it sees 'rsvp' self.expect_requests_get( 'http://foo.com/bar', """ <article class="h-entry"> <p class="e-content"> <data class="p-rsvp" value="yes"></data> <a class="u-in-reply-to" href="http://fa.ke/event"></a> </p></article>""") self.mox.ReplayAll() self.assert_created('') self.assertEquals('post', Publish.query().get().type)
def test_embedded_type_not_implemented(self): self.expect_requests_get('http://foo.com/bar', """ <article class="h-entry"> <div class="p-like-of"> foo <a class="u-url" href="http://url">bar</a> </div> </article>""") self.mox.ReplayAll() # FakeSource.create() raises NotImplementedError on likes self.assert_error("FakeSource doesn't support type(s) like-of") self.assertEquals('failed', Publish.query().get().status)
def _check_entity(self): self.assertTrue(PublishedPage.get_by_id('http://foo.com/bar')) publish = Publish.query().get() self.assertEquals(self.source.key, publish.source) self.assertEquals('complete', publish.status) self.assertEquals('post', publish.type) self.assertEquals('FakeSource post label', publish.type_label) expected_html = (self.post_html % 'foo') + self.backlink self.assertEquals(expected_html, publish.html) self.assertEquals({'id': 'fake id', 'url': 'http://fake/url', 'content': 'foo - http://foo.com/bar'}, publish.published)
def test_embedded_type_not_implemented(self): self.expect_requests_get('http://foo.com/bar', """ <article class="h-entry"> <div class="p-like-of"> foo <a class="u-url" href="http://url">bar</a> </div> </article>""") self.mox.ReplayAll() # FakeSource.create() returns an error message for verb='like' self.assert_error("Cannot publish likes") self.assertEquals('failed', Publish.query().get().status)
def test_multiple_users_on_domain(self): source_2 = testutil.FakeSource( id='foo.com/b', features=['publish'], domains=['foo.com'], domain_urls=['http://foo.com/b'], auth_entity=self.auth_entity.key) source_2.put() source_3 = testutil.FakeSource( id='foo.com/c', features=['publish'], domains=['foo.com'], domain_urls=['http://foo.com/c'], auth_entity=self.auth_entity.key) source_3.put() self.expect_requests_get('http://foo.com/bar', self.post_html % 'foo') self.mox.ReplayAll() self.assert_created('foo - http://foo.com/bar', interactive=False) self.assertEquals(source_2.key, Publish.query().get().source)
def test_embedded_type_not_implemented(self): self.expect_requests_get( 'http://foo.com/bar', """ <article class="h-entry"> <div class="p-like-of"> foo <a class="u-url" href="http://url">bar</a> </div> </article>""") self.mox.ReplayAll() # FakeSource.create() returns an error message for verb='like' self.assert_error("Cannot publish likes") self.assertEquals('failed', Publish.query().get().status)
def test_preview(self): html = '<article class="h-entry"><p class="e-content">foo</p></article>' self.expect_requests_get('http://foo.com/bar', html) # make sure create() isn't called self.mox.StubOutWithMock(self.source.as_source, 'create', use_mock_anything=True) self.mox.ReplayAll() self.assert_success('preview of foo - http://foo.com/bar', preview=True) publish = Publish.query().get() self.assertEquals(self.source.key, publish.source) self.assertEquals('complete', publish.status) self.assertEquals('preview', publish.type) self.assertEquals(html, publish.html)
def test_preview(self): html = self.post_html % 'foo' self.expect_requests_get('http://foo.com/bar', html) # make sure create() isn't called self.mox.StubOutWithMock(self.source.gr_source, 'create', use_mock_anything=True) self.mox.ReplayAll() self.assert_success('preview of foo', preview=True) publish = Publish.query().get() self.assertEquals(self.source.key, publish.source) self.assertEquals('complete', publish.status) self.assertEquals('preview', publish.type) self.assertEquals(html + self.backlink, publish.html)
def get_or_add_publish_entity(self, source_url): """Creates and stores :class:`models.Publish` entity. ...and if necessary, :class:`models.PublishedPage` entity. Args: source_url: string """ page = PublishedPage.get_or_insert(native_str(source_url.encode('utf-8'))) entity = Publish.query( Publish.status == 'complete', Publish.type != 'preview', Publish.source == self.source.key, ancestor=page.key).get() if entity is None: entity = Publish(parent=page.key, source=self.source.key) if self.PREVIEW: entity.type = 'preview' entity.put() logging.debug("Publish entity: '%s'", entity.key.urlsafe()) return entity
def test_user_page_publish_url_with_unicode_char(self): """Check the custom mf2 we render on social user pages.""" self.sources[0].features = ['publish'] self.sources[0].put() url = u'https://ptt.com/ransomw…ocks-user-access/' Publish(parent=PublishedPage(id=url.encode('utf-8')).key, source=self.sources[0].key).put() user_url = self.sources[0].bridgy_path() resp = app.application.get_response(user_url) self.assertEquals(200, resp.status_int) parsed = util.mf2py_parse(resp.body, user_url) publish = parsed['items'][0]['children'][0]
def test_user_page_publish_url_with_unicode_char(self): """Check the custom mf2 we render on social user pages.""" self.sources[0].features = ['publish'] self.sources[0].put() url = 'https://ptt.com/ransomw…ocks-user-access/' Publish(parent=PublishedPage(id=url).key, source=self.sources[0].key).put() user_url = self.sources[0].bridgy_path() resp = self.client.get(user_url) self.assertEqual(200, resp.status_code) parsed = util.parse_mf2(resp.get_data(as_text=True), user_url) publish = parsed['items'][0]['children'][0]
def test_success(self): html = '<article class="h-entry"><p class="e-content">foo</p></article>' self.expect_requests_get('http://foo.com/bar', html) self.mox.ReplayAll() self.assert_success('foo - http://foo.com/bar') self.assertTrue(PublishedPage.get_by_id('http://foo.com/bar')) publish = Publish.query().get() self.assertEquals(self.source.key, publish.source) self.assertEquals('complete', publish.status) self.assertEquals('post', publish.type) self.assertEquals('FakeSource post label', publish.type_label) self.assertEquals(html, publish.html) self.assertEquals({'id': 'fake id', 'url': 'http://fake/url', 'content': 'foo - http://foo.com/bar'}, publish.published)
def test_multiple_users_on_domain(self): source_2 = testutil.FakeSource(id='foo.com/b', features=['publish'], domains=['foo.com'], domain_urls=['http://foo.com/b'], auth_entity=self.auth_entity.key) source_2.put() source_3 = testutil.FakeSource(id='foo.com/c', features=['publish'], domains=['foo.com'], domain_urls=['http://foo.com/c'], auth_entity=self.auth_entity.key) source_3.put() self.expect_requests_get('http://foo.com/bar', self.post_html % 'foo') self.mox.ReplayAll() self.assert_created('foo - http://foo.com/bar', interactive=False) self.assertEquals(source_2.key, Publish.query().get().source)
def _check_entity(self, content='foo', html_content=None): if html_content is None: html_content = content self.assertTrue(PublishedPage.get_by_id('http://foo.com/bar')) publish = Publish.query().get() self.assertEquals(self.source.key, publish.source) self.assertEquals('complete', publish.status) self.assertEquals('post', publish.type) self.assertEquals('FakeSource post label', publish.type_label) expected_html = (self.post_html % html_content) + self.backlink self.assertEquals(expected_html, publish.html) self.assertEquals({ 'id': 'fake id', 'url': 'http://fake/url', 'content': '%s - http://foo.com/bar' % content, 'granary_message': 'granary message', }, publish.published)
def _check_entity(self, content='foo', html_content=None): if html_content is None: html_content = content self.assertTrue(PublishedPage.get_by_id('http://foo.com/bar')) publish = Publish.query().get() self.assertEquals(self.source.key, publish.source) self.assertEquals('complete', publish.status) self.assertEquals('post', publish.type) self.assertEquals('FakeSource post label', publish.type_label) expected_html = (self.post_html % html_content) + self.backlink self.assertEquals(expected_html, publish.html) self.assertEquals( { 'id': 'fake id', 'url': 'http://fake/url', 'content': '%s - http://foo.com/bar' % content, 'granary_message': 'granary message', }, publish.published)
def _get_or_add_publish_entity(self, source_url): page = PublishedPage.get_or_insert(source_url) entity = Publish.query(Publish.status == 'complete', Publish.type != 'preview', Publish.source == self.source.key, ancestor=page.key).get() if entity is None: entity = Publish(parent=page.key, source=self.source.key) if self.PREVIEW: entity.type = 'preview' entity.put() logging.debug("Publish entity: '%s'", entity.key.urlsafe().decode()) return entity
def get_or_add_publish_entity(self, source_url): """Creates and stores Publish and (if necessary) PublishedPage entities. Args: source_url: string """ page = PublishedPage.get_or_insert(source_url) entity = Publish.query(Publish.status == 'complete', Publish.type != 'preview', Publish.source == self.source.key, ancestor=page.key).get() if entity is None: entity = Publish(parent=page.key, source=self.source.key) if self.PREVIEW: entity.type = 'preview' entity.put() logging.debug('Publish entity: %s', entity.key.urlsafe()) return entity
def get_or_add_publish_entity(self, source_url): """Creates and stores :class:`models.Publish` entity. ...and if necessary, :class:`models.PublishedPage` entity. Args: source_url: string """ page = PublishedPage.get_or_insert(source_url) entity = Publish.query( Publish.status == 'complete', Publish.type != 'preview', Publish.source == self.source.key, ancestor=page.key).get() if entity is None: entity = Publish(parent=page.key, source=self.source.key) if self.PREVIEW: entity.type = 'preview' entity.put() logging.debug("Publish entity: '%s'", entity.key.urlsafe().decode()) return entity
def setUp(self): super(ModelsTest, self).setUp() # sources auth_entities = [ FakeAuthEntity(key=ndb.Key('FakeAuthEntity', '01122334455'), user_json=json_dumps({ 'id': '0123456789', 'name': 'Fake User', 'url': 'http://fakeuser.com/', })), FakeAuthEntity(key=ndb.Key('FakeAuthEntity', '0022446688'), user_json=json_dumps({ 'id': '0022446688', 'name': 'Another Fake', 'url': 'http://anotherfake.com/', })) ] for entity in auth_entities: entity.put() self.sources = [ FakeSource.new(None, auth_entity=auth_entities[0]), FakeSource.new(None, auth_entity=auth_entities[1]) ] for entity in self.sources: entity.features = ['listen'] entity.put() # activities self.activities = [{ 'id': 'tag:source.com,2013:%s' % id, 'url': 'http://fa.ke/post/url', 'object': { 'objectType': 'note', 'id': 'tag:source.com,2013:%s' % id, 'url': 'http://fa.ke/post/url', 'content': 'foo http://target1/post/url bar', 'to': [{ 'objectType': 'group', 'alias': '@public' }], 'replies': { 'items': [{ 'objectType': 'comment', 'id': 'tag:source.com,2013:1_2_%s' % id, 'url': 'http://fa.ke/comment/url', 'content': 'foo bar', }], 'totalItems': 1, }, 'tags': [{ 'objectType': 'activity', 'verb': 'like', 'id': 'tag:source.com,2013:%s_liked_by_alice' % id, 'object': { 'url': 'http://example.com/abc' }, 'author': { 'url': 'http://example.com/alice' }, }, { 'id': 'tag:source.com,2013:%s_reposted_by_bob' % id, 'objectType': 'activity', 'verb': 'share', 'object': { 'url': 'http://example.com/def' }, 'author': { 'url': 'http://example.com/bob' }, }, { 'id': 'tag:source.com,2013:%s_scissors_by_bob' % id, 'objectType': 'activity', 'verb': 'react', 'content': '✁', 'object': { 'url': 'http://example.com/def' }, 'author': { 'url': 'http://example.com/bob' }, }], }, } for id in ('a', 'b', 'c')] FakeGrSource.activities = self.activities # responses self.responses = [] created = datetime.datetime.utcnow() - datetime.timedelta(days=10) for activity in self.activities: obj = activity['object'] pruned_activity = { 'id': activity['id'], 'url': 'http://fa.ke/post/url', 'object': { 'content': 'foo http://target1/post/url bar', } } comment = obj['replies']['items'][0] self.responses.append( Response(id=comment['id'], activities_json=[json_dumps(pruned_activity)], response_json=json_dumps(comment), type='comment', source=self.sources[0].key, unsent=['http://target1/post/url'], created=created)) created += datetime.timedelta(hours=1) like = obj['tags'][0] self.responses.append( Response(id=like['id'], activities_json=[json_dumps(pruned_activity)], response_json=json_dumps(like), type='like', source=self.sources[0].key, unsent=['http://target1/post/url'], created=created)) created += datetime.timedelta(hours=1) share = obj['tags'][1] self.responses.append( Response(id=share['id'], activities_json=[json_dumps(pruned_activity)], response_json=json_dumps(share), type='repost', source=self.sources[0].key, unsent=['http://target1/post/url'], created=created)) created += datetime.timedelta(hours=1) reaction = obj['tags'][2] self.responses.append( Response(id=reaction['id'], activities_json=[json_dumps(pruned_activity)], response_json=json_dumps(reaction), type='react', source=self.sources[0].key, unsent=['http://target1/post/url'], created=created)) created += datetime.timedelta(hours=1) # publishes self.publishes = [ Publish( parent=PublishedPage(id='https://post').key, source=self.sources[0].key, status='complete', published={'url': 'http://fa.ke/syndpost'}, ) ] # blogposts self.blogposts = [ BlogPost( id='https://post', source=self.sources[0].key, status='complete', feed_item={'title': 'a post'}, sent=['http://a/link'], ) ]
def template_vars(self): vars = super(UserHandler, self).template_vars() vars.update({ 'source': self.source, 'EPOCH': util.EPOCH, 'REFETCH_HFEED_TRIGGER': models.REFETCH_HFEED_TRIGGER, 'RECENT_PRIVATE_POSTS_THRESHOLD': RECENT_PRIVATE_POSTS_THRESHOLD, }) if not self.source: return vars if isinstance(self.source, instagram.Instagram): auth = self.source.auth_entity vars['indieauth_me'] = ( auth.id if isinstance(auth, indieauth.IndieAuth) else self.source.domain_urls[0] if self.source.domain_urls else None) # Blog webmention promos if 'webmention' not in self.source.features: if self.source.SHORT_NAME in ('blogger', 'medium', 'tumblr', 'wordpress'): vars[self.source.SHORT_NAME + '_promo'] = True else: for domain in self.source.domains: if ('.blogspot.' in domain and # Blogger uses country TLDs not Blogger.query(Blogger.domains == domain).get()): vars['blogger_promo'] = True elif (domain.endswith('tumblr.com') and not Tumblr.query(Tumblr.domains == domain).get()): vars['tumblr_promo'] = True elif (domain.endswith('wordpress.com') and not WordPress.query(WordPress.domains == domain).get()): vars['wordpress_promo'] = True # Responses if 'listen' in self.source.features: vars['responses'] = [] query = Response.query().filter(Response.source == self.source.key) # if there's a paging param (responses_before or responses_after), update # query with it def get_paging_param(param): val = self.request.get(param) try: return util.parse_iso8601(val) if val else None except: msg = "Couldn't parse %s %r as ISO8601" % (param, val) logging.exception(msg) self.abort(400, msg) before = get_paging_param('responses_before') after = get_paging_param('responses_after') if before and after: self.abort(400, "can't handle both responses_before and responses_after") elif after: query = query.filter(Response.updated > after).order(Response.updated) elif before: query = query.filter(Response.updated < before).order(-Response.updated) else: query = query.order(-Response.updated) query_iter = query.iter() for i, r in enumerate(query_iter): r.response = json.loads(r.response_json) r.activities = [json.loads(a) for a in r.activities_json] if (not self.source.is_activity_public(r.response) or not all(self.source.is_activity_public(a) for a in r.activities)): continue elif r.type == 'post': r.activities = [] r.actor = r.response.get('author') or r.response.get('actor', {}) for a in r.activities + [r.response]: if not a.get('content'): a['content'] = a.get('object', {}).get('content') if not r.response.get('content'): phrases = { 'like': 'liked this', 'repost': 'reposted this', 'rsvp-yes': 'is attending', 'rsvp-no': 'is not attending', 'rsvp-maybe': 'might attend', 'rsvp-interested': 'is interested', 'invite': 'is invited', } r.response['content'] = '%s %s.' % ( r.actor.get('displayName') or '', phrases.get(r.type) or phrases.get(r.response.get('verb'))) # convert image URL to https if we're serving over SSL image_url = r.actor.setdefault('image', {}).get('url') if image_url: r.actor['image']['url'] = util.update_scheme(image_url, self) # generate original post links r.links = self.process_webmention_links(r) r.original_links = [util.pretty_link(url, new_tab=True) for url in r.original_posts] vars['responses'].append(r) if len(vars['responses']) >= 10 or i > 200: break vars['responses'].sort(key=lambda r: r.updated, reverse=True) # calculate new paging param(s) new_after = ( before if before else vars['responses'][0].updated if vars['responses'] and query_iter.probably_has_next() and (before or after) else None) if new_after: vars['responses_after_link'] = ('?responses_after=%s#responses' % new_after.isoformat()) new_before = ( after if after else vars['responses'][-1].updated if vars['responses'] and query_iter.probably_has_next() else None) if new_before: vars['responses_before_link'] = ('?responses_before=%s#responses' % new_before.isoformat()) vars['next_poll'] = max( self.source.last_poll_attempt + self.source.poll_period(), # lower bound is 1 minute from now util.now_fn() + datetime.timedelta(seconds=90)) # Publishes if 'publish' in self.source.features: publishes = Publish.query().filter(Publish.source == self.source.key)\ .order(-Publish.updated)\ .fetch(10) for p in publishes: p.pretty_page = util.pretty_link( p.key.parent().id().decode('utf-8'), attrs={'class': 'original-post u-url u-name'}, new_tab=True) vars['publishes'] = publishes if 'webmention' in self.source.features: # Blog posts blogposts = BlogPost.query().filter(BlogPost.source == self.source.key)\ .order(-BlogPost.created)\ .fetch(10) for b in blogposts: b.links = self.process_webmention_links(b) try: text = b.feed_item.get('title') except ValueError: text = None b.pretty_url = util.pretty_link( b.key.id(), text=text, attrs={'class': 'original-post u-url u-name'}, max_length=40, new_tab=True) # Blog webmentions webmentions = BlogWebmention.query()\ .filter(BlogWebmention.source == self.source.key)\ .order(-BlogWebmention.updated)\ .fetch(10) for w in webmentions: w.pretty_source = util.pretty_link( w.source_url(), attrs={'class': 'original-post'}, new_tab=True) try: target_is_source = (urlparse.urlparse(w.target_url()).netloc in self.source.domains) except BaseException: target_is_source = False w.pretty_target = util.pretty_link( w.target_url(), attrs={'class': 'original-post'}, new_tab=True, keep_host=target_is_source) vars.update({'blogposts': blogposts, 'webmentions': webmentions}) return vars
def template_vars(self): vars = super(UserHandler, self).template_vars() vars.update({ 'source': self.source, 'EPOCH': util.EPOCH, 'REFETCH_HFEED_TRIGGER': models.REFETCH_HFEED_TRIGGER, 'RECENT_PRIVATE_POSTS_THRESHOLD': RECENT_PRIVATE_POSTS_THRESHOLD, }) if not self.source: return vars if isinstance(self.source, instagram.Instagram): auth = self.source.auth_entity vars['indieauth_me'] = ( auth.id if isinstance(auth, indieauth.IndieAuth) else self.source.domain_urls[0] if self.source.domain_urls else None) # Blog webmention promos if 'webmention' not in self.source.features: if self.source.SHORT_NAME in ('blogger', 'tumblr', 'wordpress'): vars[self.source.SHORT_NAME + '_promo'] = True else: for domain in self.source.domains: if ('.blogspot.' in domain and # Blogger uses country TLDs not Blogger.query(Blogger.domains == domain).get()): vars['blogger_promo'] = True elif (domain.endswith('tumblr.com') and not Tumblr.query(Tumblr.domains == domain).get()): vars['tumblr_promo'] = True elif (domain.endswith('wordpress.com') and not WordPress.query(WordPress.domains == domain).get()): vars['wordpress_promo'] = True # Responses if 'listen' in self.source.features: vars['responses'] = [] query = Response.query().filter(Response.source == self.source.key) # if there's a paging param (responses_before or responses_after), update # query with it def get_paging_param(param): val = self.request.get(param) try: return util.parse_iso8601(val) if val else None except: msg = "Couldn't parse %s %r as ISO8601" % (param, val) logging.exception(msg) self.abort(400, msg) before = get_paging_param('responses_before') after = get_paging_param('responses_after') if before and after: self.abort(400, "can't handle both responses_before and responses_after") elif after: query = query.filter(Response.updated > after).order(Response.updated) elif before: query = query.filter(Response.updated < before).order(-Response.updated) else: query = query.order(-Response.updated) query_iter = query.iter() for i, r in enumerate(query_iter): r.response = json.loads(r.response_json) r.activities = [json.loads(a) for a in r.activities_json] if (not self.source.is_activity_public(r.response) or not all(self.source.is_activity_public(a) for a in r.activities)): continue elif r.type == 'post': r.activities = [] r.actor = r.response.get('author') or r.response.get('actor', {}) for a in r.activities + [r.response]: if not a.get('content'): a['content'] = a.get('object', {}).get('content') if not r.response.get('content'): phrases = { 'like': 'liked this', 'repost': 'reposted this', 'rsvp-yes': 'is attending', 'rsvp-no': 'is not attending', 'rsvp-maybe': 'might attend', 'rsvp-interested': 'is interested', 'invite': 'is invited', } r.response['content'] = '%s %s.' % ( r.actor.get('displayName') or '', phrases.get(r.type) or phrases.get(r.response.get('verb'))) # convert image URL to https if we're serving over SSL image_url = r.actor.setdefault('image', {}).get('url') if image_url: r.actor['image']['url'] = util.update_scheme(image_url, self) # generate original post links r.links = self.process_webmention_links(r) r.original_links = [util.pretty_link(url, new_tab=True) for url in r.original_posts] vars['responses'].append(r) if len(vars['responses']) >= 10 or i > 200: break vars['responses'].sort(key=lambda r: r.updated, reverse=True) # calculate new paging param(s) new_after = ( before if before else vars['responses'][0].updated if vars['responses'] and query_iter.probably_has_next() and (before or after) else None) if new_after: vars['responses_after_link'] = ('?responses_after=%s#responses' % new_after.isoformat()) new_before = ( after if after else vars['responses'][-1].updated if vars['responses'] and query_iter.probably_has_next() else None) if new_before: vars['responses_before_link'] = ('?responses_before=%s#responses' % new_before.isoformat()) vars['next_poll'] = max( self.source.last_poll_attempt + self.source.poll_period(), # lower bound is 1 minute from now util.now_fn() + datetime.timedelta(seconds=90)) # Publishes if 'publish' in self.source.features: publishes = Publish.query().filter(Publish.source == self.source.key)\ .order(-Publish.updated)\ .fetch(10) for p in publishes: p.pretty_page = util.pretty_link( p.key.parent().id(), attrs={'class': 'original-post u-url u-name'}, new_tab=True) vars['publishes'] = publishes if 'webmention' in self.source.features: # Blog posts blogposts = BlogPost.query().filter(BlogPost.source == self.source.key)\ .order(-BlogPost.created)\ .fetch(10) for b in blogposts: b.links = self.process_webmention_links(b) try: text = b.feed_item.get('title') except ValueError: text = None b.pretty_url = util.pretty_link( b.key.id(), text=text, attrs={'class': 'original-post u-url u-name'}, max_length=40, new_tab=True) # Blog webmentions webmentions = BlogWebmention.query()\ .filter(BlogWebmention.source == self.source.key)\ .order(-BlogWebmention.updated)\ .fetch(10) for w in webmentions: w.pretty_source = util.pretty_link( w.source_url(), attrs={'class': 'original-post'}, new_tab=True) try: target_is_source = (urlparse.urlparse(w.target_url()).netloc in self.source.domains) except BaseException: target_is_source = False w.pretty_target = util.pretty_link( w.target_url(), attrs={'class': 'original-post'}, new_tab=True, keep_host=target_is_source) vars.update({'blogposts': blogposts, 'webmentions': webmentions}) return vars
def user(site, id): """View for a user page.""" cls = models.sources.get(site) if not cls: return render_template('user_not_found.html'), 404 source = cls.lookup(id) if not source: key = cls.query( ndb.OR(*[ ndb.GenericProperty(prop) == id for prop in ('domains', 'inferred_username', 'name', 'username') ])).get(keys_only=True) if key: return redirect(cls(key=key).bridgy_path(), code=301) if not source or not source.features: return render_template('user_not_found.html'), 404 source.verify() source = util.preprocess_source(source) vars = { 'source': source, 'logs': logs, 'REFETCH_HFEED_TRIGGER': models.REFETCH_HFEED_TRIGGER, 'RECENT_PRIVATE_POSTS_THRESHOLD': RECENT_PRIVATE_POSTS_THRESHOLD, } # Blog webmention promos if 'webmention' not in source.features: if source.SHORT_NAME in ('blogger', 'medium', 'tumblr', 'wordpress'): vars[source.SHORT_NAME + '_promo'] = True else: for domain in source.domains: if ('.blogspot.' in domain and # Blogger uses country TLDs not Blogger.query(Blogger.domains == domain).get()): vars['blogger_promo'] = True elif (util.domain_or_parent_in(domain, ['tumblr.com']) and not Tumblr.query(Tumblr.domains == domain).get()): vars['tumblr_promo'] = True elif (util.domain_or_parent_in(domain, 'wordpress.com') and not WordPress.query(WordPress.domains == domain).get()): vars['wordpress_promo'] = True # Responses if 'listen' in source.features or 'email' in source.features: vars['responses'] = [] query = Response.query().filter(Response.source == source.key) # if there's a paging param (responses_before or responses_after), update # query with it def get_paging_param(param): val = request.values.get(param) try: return util.parse_iso8601(val.replace(' ', '+')) if val else None except BaseException: error(f"Couldn't parse {param}, {val!r} as ISO8601") before = get_paging_param('responses_before') after = get_paging_param('responses_after') if before and after: error("can't handle both responses_before and responses_after") elif after: query = query.filter(Response.updated > after).order( Response.updated) elif before: query = query.filter( Response.updated < before).order(-Response.updated) else: query = query.order(-Response.updated) query_iter = query.iter() for i, r in enumerate(query_iter): r.response = json_loads(r.response_json) r.activities = [json_loads(a) for a in r.activities_json] if (not source.is_activity_public(r.response) or not all( source.is_activity_public(a) for a in r.activities)): continue elif r.type == 'post': r.activities = [] verb = r.response.get('verb') r.actor = (r.response.get('object') if verb == 'invite' else r.response.get('author') or r.response.get('actor')) or {} activity_content = '' for a in r.activities + [r.response]: if not a.get('content'): obj = a.get('object', {}) a['content'] = activity_content = ( obj.get('content') or obj.get('displayName') or # historical, from a Reddit bug fixed in granary@4f9df7c obj.get('name') or '') response_content = r.response.get('content') phrases = { 'like': 'liked this', 'repost': 'reposted this', 'rsvp-yes': 'is attending', 'rsvp-no': 'is not attending', 'rsvp-maybe': 'might attend', 'rsvp-interested': 'is interested', 'invite': 'is invited', } phrase = phrases.get(r.type) or phrases.get(verb) if phrase and (r.type != 'repost' or activity_content.startswith(response_content)): r.response[ 'content'] = f'{r.actor.get("displayName") or ""} {phrase}.' # convert image URL to https if we're serving over SSL image_url = r.actor.setdefault('image', {}).get('url') if image_url: r.actor['image']['url'] = util.update_scheme( image_url, request) # generate original post links r.links = process_webmention_links(r) r.original_links = [ util.pretty_link(url, new_tab=True) for url in r.original_posts ] vars['responses'].append(r) if len(vars['responses']) >= 10 or i > 200: break vars['responses'].sort(key=lambda r: r.updated, reverse=True) # calculate new paging param(s) new_after = (before if before else vars['responses'][0].updated if vars['responses'] and query_iter.probably_has_next() and (before or after) else None) if new_after: vars[ 'responses_after_link'] = f'?responses_after={new_after.isoformat()}#responses' new_before = (after if after else vars['responses'][-1].updated if vars['responses'] and query_iter.probably_has_next() else None) if new_before: vars[ 'responses_before_link'] = f'?responses_before={new_before.isoformat()}#responses' vars['next_poll'] = max( source.last_poll_attempt + source.poll_period(), # lower bound is 1 minute from now util.now_fn() + datetime.timedelta(seconds=90)) # Publishes if 'publish' in source.features: publishes = Publish.query().filter(Publish.source == source.key)\ .order(-Publish.updated)\ .fetch(10) for p in publishes: p.pretty_page = util.pretty_link( p.key.parent().id(), attrs={'class': 'original-post u-url u-name'}, new_tab=True) vars['publishes'] = publishes if 'webmention' in source.features: # Blog posts blogposts = BlogPost.query().filter(BlogPost.source == source.key)\ .order(-BlogPost.created)\ .fetch(10) for b in blogposts: b.links = process_webmention_links(b) try: text = b.feed_item.get('title') except ValueError: text = None b.pretty_url = util.pretty_link( b.key.id(), text=text, attrs={'class': 'original-post u-url u-name'}, max_length=40, new_tab=True) # Blog webmentions webmentions = BlogWebmention.query()\ .filter(BlogWebmention.source == source.key)\ .order(-BlogWebmention.updated)\ .fetch(10) for w in webmentions: w.pretty_source = util.pretty_link( w.source_url(), attrs={'class': 'original-post'}, new_tab=True) try: target_is_source = (urllib.parse.urlparse( w.target_url()).netloc in source.domains) except BaseException: target_is_source = False w.pretty_target = util.pretty_link( w.target_url(), attrs={'class': 'original-post'}, new_tab=True, keep_host=target_is_source) vars.update({'blogposts': blogposts, 'webmentions': webmentions}) return render_template(f'{source.SHORT_NAME}_user.html', **vars)
def template_vars(self): if not self.source: return {} vars = super(UserHandler, self).template_vars() vars.update({ 'source': self.source, 'epoch': util.EPOCH, }) # Blog webmention promos if 'webmention' not in self.source.features: if self.source.SHORT_NAME in ('blogger', 'tumblr', 'wordpress'): vars[self.source.SHORT_NAME + '_promo'] = True else: for domain in self.source.domains: if ('.blogspot.' in domain and # Blogger uses country TLDs not Blogger.query(Blogger.domains == domain).get()): vars['blogger_promo'] = True elif (domain.endswith('tumblr.com') and not Tumblr.query(Tumblr.domains == domain).get()): vars['tumblr_promo'] = True elif (domain.endswith('wordpress.com') and not WordPress.query(WordPress.domains == domain).get()): vars['wordpress_promo'] = True # Responses if 'listen' in self.source.features: vars['responses'] = [] for i, r in enumerate(Response.query() .filter(Response.source == self.source.key)\ .order(-Response.updated)): r.response = json.loads(r.response_json) if r.activity_json: # handle old entities r.activities_json.append(r.activity_json) r.activities = [json.loads(a) for a in r.activities_json] if (not gr_source.Source.is_public(r.response) or not all(gr_source.Source.is_public(a) for a in r.activities)): continue r.actor = r.response.get('author') or r.response.get('actor', {}) if not r.response.get('content'): phrases = { 'like': 'liked this', 'repost': 'reposted this', 'rsvp-yes': 'is attending', 'rsvp-no': 'is not attending', 'rsvp-maybe': 'might attend', 'invite': 'is invited', } r.response['content'] = '%s %s.' % ( r.actor.get('displayName') or '', phrases.get(r.type) or phrases.get(r.response.get('verb'))) # convert image URL to https if we're serving over SSL image_url = r.actor.setdefault('image', {}).get('url') if image_url: r.actor['image']['url'] = util.update_scheme(image_url, self) # generate original post links r.links = self.process_webmention_links(r) vars['responses'].append(r) if len(vars['responses']) >= 10 or i > 200: break # Publishes if 'publish' in self.source.features: publishes = Publish.query().filter(Publish.source == self.source.key)\ .order(-Publish.updated)\ .fetch(10) for p in publishes: p.pretty_page = util.pretty_link( p.key.parent().id(), a_class='original-post', new_tab=True) vars['publishes'] = publishes if 'webmention' in self.source.features: # Blog posts blogposts = BlogPost.query().filter(BlogPost.source == self.source.key)\ .order(-BlogPost.created)\ .fetch(10) for b in blogposts: b.links = self.process_webmention_links(b) try: text = b.feed_item.get('title') except ValueError: text = None b.pretty_url = util.pretty_link(b.key.id(), text=text, a_class='original-post', max_length=40, new_tab=True) # Blog webmentions webmentions = BlogWebmention.query()\ .filter(BlogWebmention.source == self.source.key)\ .order(-BlogWebmention.updated)\ .fetch(10) for w in webmentions: w.pretty_source = util.pretty_link(w.source_url(), a_class='original-post', new_tab=True) try: target_is_source = (urlparse.urlparse(w.target_url()).netloc in self.source.domains) except BaseException: target_is_source = False w.pretty_target = util.pretty_link(w.target_url(), a_class='original-post', new_tab=True, keep_host=target_is_source) vars.update({'blogposts': blogposts, 'webmentions': webmentions}) return vars
def test_h_feed_no_items(self): self.expect_requests_get('http://foo.com/bar', '<div class="h-feed"></div>') self.mox.ReplayAll() self.assert_error('Could not find content') self.assertEquals('failed', Publish.query().get().status)
def setUp(self): super().setUp() FakeGrSource.clear() util.now_fn = lambda: NOW # add FakeSource everywhere necessary util.BLOCKLIST.add('fa.ke') util.webmention_endpoint_cache.clear() self.stubbed_create_task = False tasks_client.create_task = lambda *args, **kwargs: Task(name='foo') self.client = self.app.test_client() self.client.__enter__() self.clear_datastore() self.ndb_context = ndb_client.context() self.ndb_context.__enter__() # sources self.auth_entities = [ FakeAuthEntity(key=ndb.Key('FakeAuthEntity', '01122334455'), user_json=json_dumps({ 'id': '0123456789', 'name': 'Fake User', 'url': 'http://fakeuser.com/', })), FakeAuthEntity(key=ndb.Key('FakeAuthEntity', '0022446688'), user_json=json_dumps({ 'id': '0022446688', 'name': 'Another Fake', 'url': 'http://anotherfake.com/', })) ] self.sources = [ FakeSource.new(auth_entity=self.auth_entities[0]), FakeSource.new(auth_entity=self.auth_entities[1]) ] for entity in self.sources: entity.features = ['listen'] with self.app.test_request_context(): self.source_bridgy_url = self.sources[0].bridgy_url() self.actor = FakeGrSource.actor = { 'objectType': 'person', 'id': 'tag:fa.ke,2013:212038', 'username': '******', 'displayName': 'Ryan B', 'url': 'https://snarfed.org/', 'image': { 'url': 'http://pic.ture/url' }, } # activities self.activities = FakeGrSource.activities = [{ 'id': f'tag:source.com,2013:{id}', 'url': 'http://fa.ke/post/url', 'object': { 'objectType': 'note', 'id': f'tag:source.com,2013:{id}', 'url': 'http://fa.ke/post/url', 'content': 'foo http://target1/post/url bar', 'to': [{ 'objectType': 'group', 'alias': '@public' }], 'replies': { 'items': [{ 'objectType': 'comment', 'id': f'tag:source.com,2013:1_2_{id}', 'url': 'http://fa.ke/comment/url', 'content': 'foo bar', }], 'totalItems': 1, }, 'tags': [{ 'objectType': 'activity', 'verb': 'like', 'id': f'tag:source.com,2013:{id}_liked_by_alice', 'object': { 'url': 'http://example.com/abc' }, 'author': { 'id': 'tag:source.com,2013:alice', 'url': 'http://example.com/alice', }, }, { 'id': f'tag:source.com,2013:{id}_reposted_by_bob', 'objectType': 'activity', 'verb': 'share', 'object': { 'url': 'http://example.com/def' }, 'author': { 'url': 'http://example.com/bob' }, }, { 'id': f'tag:source.com,2013:{id}_scissors_by_bob', 'objectType': 'activity', 'verb': 'react', 'content': '✁', 'object': { 'url': 'http://example.com/def' }, 'author': { 'url': 'http://example.com/bob' }, }], }, } for id in ('a', 'b', 'c')] # responses self.responses = [] created = datetime.now(timezone.utc) - timedelta(days=10) for activity in self.activities: obj = activity['object'] pruned_activity = { 'id': activity['id'], 'url': 'http://fa.ke/post/url', 'object': { 'content': 'foo http://target1/post/url bar', } } comment = obj['replies']['items'][0] self.responses.append( Response(id=comment['id'], activities_json=[json_dumps(pruned_activity)], response_json=json_dumps(comment), type='comment', source=self.sources[0].key, unsent=['http://target1/post/url'], created=created)) created += timedelta(hours=1) like = obj['tags'][0] self.responses.append( Response(id=like['id'], activities_json=[json_dumps(pruned_activity)], response_json=json_dumps(like), type='like', source=self.sources[0].key, unsent=['http://target1/post/url'], created=created)) created += timedelta(hours=1) share = obj['tags'][1] self.responses.append( Response(id=share['id'], activities_json=[json_dumps(pruned_activity)], response_json=json_dumps(share), type='repost', source=self.sources[0].key, unsent=['http://target1/post/url'], created=created)) created += timedelta(hours=1) reaction = obj['tags'][2] self.responses.append( Response(id=reaction['id'], activities_json=[json_dumps(pruned_activity)], response_json=json_dumps(reaction), type='react', source=self.sources[0].key, unsent=['http://target1/post/url'], created=created)) created += timedelta(hours=1) # publishes self.publishes = [ Publish( parent=PublishedPage(id='https://post').key, source=self.sources[0].key, status='complete', published={'url': 'http://fa.ke/syndpost'}, ) ] # blogposts self.blogposts = [ BlogPost( id='https://post', source=self.sources[0].key, status='complete', feed_item={'title': 'a post'}, sent=['http://a/link'], ) ]