def post(self):
        newUser = Responder(name = self.request.get('name'), email = self.request.get('email'), company = self.request.get('company'))
        newUser.put()

        set = ResponseSet(product = 'ADSync', responder = newUser)
        set.put()

        adQuestions = Question.gql('WHERE product = :1', 'ADSync')

        htmlBody = '<h2>Response to ADSync Questionnaire</h2><p><i>Submitted by ' + newUser.name +', ' + newUser.email + '</i></p>'

        for adQuestion in adQuestions:
            responseText = self.request.get('response' + str(adQuestion.key().id()))
            response = Response(text = responseText, question = adQuestion, responseSet = set)
            response.put()
            htmlBody += '<h3>' + adQuestion.text + '</h3>' + '<p>' + response.text + '</p>'

        #send email notification
        sender = '*****@*****.**'
        recipients = ['*****@*****.**', '*****@*****.**', '*****@*****.**']
        sub = newUser.name + ' from ' + newUser.company + ' responded to the ADSync Questionnaire'
        plainBody = 'Get response here: http://yammerie.appspot.com/responsesets?id=' + str(set.key().id())

        mail.send_mail(sender, recipients, sub, plainBody, html = htmlBody)

        self.redirect('/adsuccess')
def process_requests():
    global account
    global last_processed_request_id

    # process request data
    ubr_list = UBRList()
    ubr_list.deserialize(request.get_json())

    app.logger.debug('Processing received bank requests.')

    # sort request data
    sorted_bank_requests = ubr_list.get_sorted()

    # perform bank requests in order
    for req in sorted_bank_requests:
        if last_processed_request_id < req.id:
            if req.operation == 'CREDIT':
                account.increase_balance(req.amount)
            else:
                account.decrease_balance(req.amount)
            last_processed_request_id = req.id

    # response
    response = Response(code=status.HTTP_200_OK,
                        description='Bankovní operace úspěšně zpracovány.')
    return make_response(response.serialize(), status.HTTP_200_OK)
Example #3
0
def take_survey(request, survey_id):
    survey = get_object_or_404(Survey, pk=survey_id)
    questions = survey.question_set.all()
    AnswerFormset = get_answer_formset(questions)

    formset = AnswerFormset(request.POST or None, queryset=Answer.objects.none())
    if formset.is_valid():
        answers = formset.save(commit=False)
        response = Response(survey=survey)
        response.save()
        for answer in answers:
            answer.response = response
            answer.save()    
        return HttpResponseRedirect(reverse('simple_survey.views.list_surveys'))
        
    for index in range(len(questions)):
        question = questions[index]
        form = formset.forms[index]
        form.fields['answer'].label = question.question
        form.fields['question'].initial = question
        
    responseParameters = {
        "survey" : survey,
        "formset" : formset,
    }
    return render_to_response('simple_survey/take_survey.html', responseParameters, context_instance=RequestContext(request))
Example #4
0
 def _on_rmq_message(self, *args, **kwargs):
   if 'rmq_message' in kwargs:
     rmq_message = kwargs['rmq_message']
   else:
     return
   if rmq_message is not None:
     channel, user, message, plugin_name, plugin_response = \
         self._process_rmq_message(rmq_message)
     if channel is not None:
       if user is not None:
         if message is not None:
           if plugin_name is not None:
             if plugin_response is not None:
               is_approved = True if channel.is_secure is False \
                   else False
               response = Response(
                   text=plugin_response,
                   from_plugin=plugin_name,
                   in_response_to=message,
                   to_channel=channel,
                   to_user=user,
                   is_approved=is_approved,
                   is_sent=False,
               )
               response.save()
Example #5
0
def post_bot_message(response_bot_params):
    bot_params = {}
    bot_params['bot_id'] = vars(response_bot_params)['bot_id']
    bot_params['text'] = vars(response_bot_params)['text']
    if response_bot_params.picture_url is not None:
        bot_params['picture_url'] = vars(response_bot_params)['picture_url']
    result = requests.post('https://api.groupme.com/v3/bots/post', params=bot_params)
    if result.status_code == 201:
        print("EVENT: POST 201 SUCCESSFUL RESPONSE")
        created_response = result.json()['response']['message']
        response = Response(201, created_response['id'], None)
        return response
    if result.status_code == 202:
        print("EVENT: POST 202 SUCCESSFUL RESPONSE")
        response = Response(202, None, bot_params['text'])
        return response
    if result.status_code == 400:
        print("ERROR: POST 400 BAD REQUEST")
        write_error_to_logfile("ERROR: POST 400 BAD REQUEST")
        return False
    if result.status_code == 401:
        print("ERROR: POST 401 UNAUTHORIZED")
        write_error_to_logfile("ERROR: POST 401 UNAUTHORIZED")
        return False
    if result.status_code == 409:
        print("ERROR: POST 409 CONFLICT")
        write_error_to_logfile("ERROR: POST 409 CONFLICT")
        return False
    else:
        print("ERROR: REQUEST EXCEPTION")
        raise requests.exceptions.RequestException
Example #6
0
    async def add_player(self, name, ws):
        player = Player(name=name, ws=ws)
        action = "add_player"

        if len(self._players) > 4:
            response = Response(action=action,
                                message='"Game is already full"')

        elif player in self._players:
            response = Response(action=action,
                                message=f"{player.name} already exists")

        else:
            self._players.append(player)
            player.turn_id = self._players.index(player)
            player_id = player.turn_id

            setattr(self.game, f"player_{player_id}", player)

            response = Response(
                action=action,
                message=f'Player added! Player ID: {player_id}',
                body={
                    "player_id": player_id,
                    **player.to_dict()
                })

        await self.send_response(response=response, player=player)
        await self.distribute_game(to_waiting=True)
Example #7
0
def answer(request, poll_id, user_uuid):
    poll = get_object_or_404(Poll, pk=poll_id)
    participant = get_object_or_404(Participant, unique_id=user_uuid)
    if participant.completed:
        return redirect(reverse('polls:thanks',
                                args=(poll.id,
                                      user_uuid,))) 
    questions = poll.question_set.all()
    if request.method == 'POST':
        form = DetailForm(request.POST, questions=questions)
        if form.is_valid():
            to_delete = Response.objects.filter(participant=participant)
            to_delete.delete()
            for choice_id in form.answers():
                response = Response(participant=participant,
                                    choice_id=choice_id)
                response.save()
            return HttpResponseRedirect(reverse('polls:sign',
                                                args=(poll.id,
                                                      user_uuid,)))
    else:
        form = DetailForm(questions=questions)
    return render(request, 'polls/detail.html',
                  {
                      'poll': poll,
                      'form': form,
                      'user_uuid': user_uuid,
                  })
Example #8
0
def volumedown():
    """Decreases the Sonos volume by 10"""

    sonos.volume = sonos.volume - 10;

    resp = Response("Sonos volume now set to " + str(sonos.volume))
    resp.volume = sonos.volume
    return jsonpickle.encode(resp, unpicklable=False)
Example #9
0
def volume():
    """Returns (GET) or sets (POST) the current Sonos volume (0 - 100)"""

    if request.method == 'POST':
        sonos.volume = request.form.get('volume')

    resp = Response("Sonos volume now set to " + str(sonos.volume))
    resp.volume = sonos.volume
    return jsonpickle.encode(resp, unpicklable=False)
Example #10
0
 def mutate(self, info, **kwargs):
     req_kw = kwargs['request']
     request_instance = RequestModel.objects.get(**req_kw)
     kwargs['request'] = request_instance
     del kwargs['request']
     new_response_instance = ResponseModel(**kwargs)
     print 'new response instance: '
     print new_response_instance
     new_response_instance.save()
     return CreateResponse(response=new_response_instance)
Example #11
0
 def test_get_type(self):
     self.assertEqual(
         'repost',
         Response.get_type({
             'objectType': 'activity',
             'verb': 'share'
         }))
     self.assertEqual('rsvp', Response.get_type({'verb': 'rsvp-no'}))
     self.assertEqual('rsvp', Response.get_type({'verb': 'invite'}))
     self.assertEqual('comment',
                      Response.get_type({'objectType': 'comment'}))
     self.assertEqual('post', Response.get_type({'verb': 'post'}))
     self.assertEqual('post', Response.get_type({'objectType': 'event'}))
     self.assertEqual('post', Response.get_type({'objectType': 'image'}))
     self.assertEqual(
         'comment',
         Response.get_type({
             'objectType': 'note',
             'context': {
                 'inReplyTo': {
                     'foo': 'bar'
                 }
             },
         }))
     self.assertEqual(
         'comment',
         Response.get_type({
             'objectType': 'comment',
             'verb': 'post',
         }))
Example #12
0
  def test_hooks(self):
    resp = Response(id='x', activity_json='{"foo": "bar"}')
    self.assertRaises(AssertionError, resp.put)

    pre_put = Response._pre_put_hook
    del Response._pre_put_hook
    resp.put()
    Response._pre_put_hook = pre_put
    got = resp.key.get()
    self.assertEqual(['{"foo": "bar"}'], got.activities_json)
    self.assertIsNone(got.activity_json)
Example #13
0
def send_webmentions(handler, activity, **response_props):

    """Sends webmentions for an incoming Salmon slap or ActivityPub inbox delivery.
    Args:
      handler: RequestHandler
      activity: dict, AS1 activity
      response_props: passed through to the newly created Responses
    """
    verb = activity.get('verb')
    if verb and verb not in SUPPORTED_VERBS:
        error(handler, '%s activities are not supported yet.' % verb)

    # extract source and targets
    source = activity.get('url') or activity.get('id')
    obj = activity.get('object')
    obj_url = util.get_url(obj)

    targets = util.get_list(activity, 'inReplyTo')
    if isinstance(obj, dict):
        if not source:
            source = obj_url or obj.get('id')
        targets.extend(util.get_list(obj, 'inReplyTo'))
    if verb in ('like', 'share'):
         targets.append(obj_url)

    targets = util.dedupe_urls(util.get_url(t) for t in targets)
    if not source:
        error(handler, "Couldn't find original post URL")
    if not targets:
        error(handler, "Couldn't find target URLs (inReplyTo or object)")

    # send webmentions and store Responses
    errors = []
    for target in targets:
        if not target:
            continue

        response = Response(source=source, target=target, direction='in',
                            **response_props)
        response.put()
        wm_source = response.proxy_url() if verb in ('like', 'share') else source
        logging.info('Sending webmention from %s to %s', wm_source, target)

        wm = send.WebmentionSend(wm_source, target)
        if wm.send(headers=HEADERS):
            logging.info('Success: %s', wm.response)
            response.status = 'complete'
        else:
            logging.warning('Failed: %s', wm.error)
            errors.append(wm.error)
            response.status = 'error'
        response.put()

    if errors:
        msg = 'Errors:\n' + '\n'.join(json.dumps(e, indent=2) for e in errors)
        error(handler, msg, status=errors[0].get('http_status'))
Example #14
0
    def test_good(self):
        self.expect_requests_get(
            'http://foo.com/', """
    <html class="h-feed">
      <div class="h-entry">
        <a class="u-url" href="http://foo.com/post"></a>
        <a class="u-syndication" href="https://www.facebook.com/snarfed.org/posts/123"></a>
      </div>
    </html>""")
        self.mox.ReplayAll()

        self.handler.receive(self.mail)
        self.assert_equals(200, self.response.status_code)

        emails = list(FacebookEmail.query())
        self.assertEquals(1, len(emails))
        self.assert_equals('SMTP-123-xyz', emails[0].key.id())
        self.assert_equals(self.fea.key, emails[0].source)
        self.assert_equals([COMMENT_EMAIL_USERNAME], emails[0].htmls)
        resp_id = EMAIL_COMMENT_OBJ_USERNAME['id']
        self.assert_equals(ndb.Key('Response', resp_id), emails[0].response)

        expected = Response(
            id=resp_id,
            source=self.fea.key,
            type='comment',
            response_json=json.dumps(EMAIL_COMMENT_OBJ_USERNAME),
            activities_json=[
                json.dumps({
                    'id': '123',
                    'numeric_id': '123',
                    'url': 'https://www.facebook.com/212038/posts/123',
                    'author': {
                        'id': 'snarfed.org'
                    },
                })
            ],
            unsent=['http://foo.com/post'])
        self.assert_entities_equal([expected],
                                   list(Response.query()),
                                   ignore=('created', 'updated'))

        tasks = self.taskqueue_stub.GetTasks('propagate')
        self.assertEquals(1, len(tasks))
        self.assert_equals(expected.key.urlsafe(),
                           testutil.get_task_params(tasks[0])['response_key'])

        self.assert_equals(EMAIL_COMMENT_OBJ_USERNAME,
                           self.fea.get_comment('123_789'))
Example #15
0
 async def distribute_game(self, to_waiting=False):
     response = Response(action=Actions.UPDATE_GAME.value,
                         body=self.game.to_dict())
     serialized_response = response.serialize()
     if to_waiting:
         tasks = [
             asyncio.ensure_future(ws.send(serialized_response))
             for ws in self._waiting
         ]
     else:
         tasks = [
             asyncio.ensure_future(player.ws.send(serialized_response))
             for player in self._players
         ]
     await asyncio.gather(*tasks)
Example #16
0
 def mutate(self, info, **kwargs):
     print "getting request info: "
     req_kw = kwargs['request']
     print req_kw
     request_instance = RequestModel.objects.get(**req_kw)
     print "request instance: "
     print request_instance.id
     #kwargs['request'] = request_instance
     del kwargs['request']
     new_response_instance = ResponseModel(**kwargs)
     new_response_instance.request = request_instance
     print 'new response instance: '
     print new_response_instance
     new_response_instance.save()
     return CreateResponse(response=new_response_instance)
Example #17
0
def respond(request):
	"""
	Request handler when someone posts a response
	1. Add response content to the database
	2. Send push notification to client device
	3. Update the credit of the responder
	"""
	if request.method == 'POST':
		json_data = json.loads(request.body)

		try:
			thread_id = json_data['thread_id']
			response_content = json_data['content']
			device_id = json_data['device_id']
		except KeyError:
			print "Error: A posted response did not have a JSON object with the required properties"
		else:
			# check that the thread id and the device ids are valid
			thread = Thread.objects.filter(id=thread_id)
			device = Device.objects.filter(device_id=device_id)

			print "Passed parameter validation"
			print thread.count()
			print device.count()

			if thread.exists() and device.exists():
				# add response to database
				response = Response(thread=thread[0], responder_device=device[0], response_content=response_content)
				response.save()

				# add update to the other device
				asker_device = thread[0].asker_device
				answerer_device = thread[0].answerer_device
				
				print "Thread and device actually exist"
				print device_id
				print asker_device.device_id
				print answerer_device.device_id

				if asker_device.device_id == device_id:
					ResponseUpdates.add_update(answerer_device, response)
					print "Adding an update to the answerers queue"
					
				elif answerer_device.device_id == device_id:
					ResponseUpdates.add_update(asker_device, response)
					print "Adding an update to the askers queue"

				return HttpResponse(json.dumps({}), content_type="application/json")
Example #18
0
def add_response(**kwargs):
    response = Response(**kwargs)
    try:
        db.session.add(response)
        db.session.commit()
    except SQLAlchemyError as e:
        print 'ERROR adding response', e
Example #19
0
    def test_salmon_like(self, mock_get, mock_post):
        mock_get.side_effect = [self.like, self.orig_html_atom, self.orig_atom]

        got = self.client.post('/webmention',
                               data={
                                   'source': 'http://a/like',
                                   'target': 'http://orig/post',
                               })
        self.assertEqual(200, got.status_code)

        mock_get.assert_has_calls((
            self.req('http://a/like'),
            self.req('http://orig/post', headers=CONNEG_HEADERS_AS2_HTML),
            self.req('http://orig/atom'),
        ))

        data = self.verify_salmon(mock_post)
        parsed = feedparser.parse(data)
        entry = parsed.entries[0]

        self.assertEqual('http://a/like', entry['id'])
        self.assertIn(
            {
                'rel': 'alternate',
                'href': 'http://a/like',
                'type': 'text/html',
            }, entry['links'])
        self.assertEqual('http://orig/post', entry['activity_object'])

        resp = Response.get_by_id('http://a/like http://orig/post')
        self.assertEqual('out', resp.direction)
        self.assertEqual('ostatus', resp.protocol)
        self.assertEqual('complete', resp.status)
        self.assertEqual(self.like_mf2, json_loads(resp.source_mf2))
Example #20
0
    def test_activitypub_create_repost(self, mock_get, mock_post):
        mock_get.side_effect = [self.repost, self.orig_as2, self.actor]
        mock_post.return_value = requests_response('abc xyz')

        got = self.client.post('/webmention',
                               data={
                                   'source': 'http://a/repost',
                                   'target': 'https://fed.brid.gy/',
                               })
        self.assertEqual(200, got.status_code)

        mock_get.assert_has_calls((
            self.req('http://a/repost'),
            self.req('http://orig/post', headers=CONNEG_HEADERS_AS2_HTML),
            self.req('http://orig/author', headers=CONNEG_HEADERS_AS2_HTML),
        ))

        args, kwargs = mock_post.call_args
        self.assertEqual(('https://foo.com/inbox', ), args)
        self.assertEqual(self.repost_as2, json_loads(kwargs['data']))

        headers = kwargs['headers']
        self.assertEqual(CONTENT_TYPE_AS2, headers['Content-Type'])

        rsa_key = kwargs['auth'].header_signer._rsa._key
        self.assertEqual(self.key.private_pem(), rsa_key.exportKey())

        resp = Response.get_by_id('http://a/repost http://orig/as2')
        self.assertEqual('out', resp.direction)
        self.assertEqual('activitypub', resp.protocol)
        self.assertEqual('complete', resp.status)
        self.assertEqual(self.repost_mf2, json_loads(resp.source_mf2))
Example #21
0
    def get(self):
        source = util.get_required_param(self, 'source')
        target = util.get_required_param(self, 'target')

        id = '%s %s' % (source, target)
        resp = Response.get_by_id(id)
        if not resp:
            self.abort(404, 'No stored response for %s' % id)

        if resp.source_mf2:
            as1 = microformats2.json_to_object(json.loads(resp.source_mf2))
        elif resp.source_as2:
            as1 = as2.to_as1(json.loads(resp.source_as2))
        elif resp.source_atom:
            as1 = atom.atom_to_activity(resp.source_atom)
        else:
            self.abort(404, 'Stored response for %s has no data' % id)

        # add HTML meta redirect to source page. should trigger for end users in
        # browsers but not for webmention receivers (hopefully).
        html = microformats2.activities_to_html([as1])
        utf8 = '<meta charset="utf-8">'
        refresh = '<meta http-equiv="refresh" content="0;url=%s">' % source
        html = html.replace(utf8, utf8 + '\n' + refresh)

        self.response.write(html)
Example #22
0
    def try_salmon(self):
        """
        Returns Flask response (string body or tuple) if we attempted OStatus
        delivery (whether successful or not), None if we didn't attempt, raises
        an exception otherwise.
        """
        target = None
        if self.target_resp:
            target = self.target_resp.url
        else:
            targets = self._targets()
            if targets:
                target = targets[0]
        if not target:
            logging.warning("No targets or followers. Ignoring.")
            return

        status = None
        try:
            ret = self._try_salmon(target)
            if isinstance(ret, str):
                status = 'complete'
            return ret
        except:
            status = 'error'
            raise
        finally:
            if status:
                Response(source=self.source_url,
                         target=target,
                         status=status,
                         direction='out',
                         protocol='ostatus',
                         source_mf2=json_dumps(self.source_mf2)).put()
Example #23
0
    def _test_inbox_mention(self, as2, mock_head, mock_get, mock_post):
        mock_head.return_value = requests_response(url='http://target')
        mock_get.return_value = requests_response(
            '<html><head><link rel="webmention" href="/webmention"></html>')
        mock_post.return_value = requests_response()

        with self.client:
            got = self.client.post('/foo.com/inbox', json=as2)
            self.assertEqual(200, got.status_code, got.get_data(as_text=True))
            mock_get.assert_called_once_with(
                'http://target/', headers=common.HEADERS, timeout=15, stream=True)

            expected_headers = copy.deepcopy(common.HEADERS)
            expected_headers['Accept'] = '*/*'
            mock_post.assert_called_once_with(
                'http://target/webmention',
                data={
                    'source': 'http://localhost/render?source=http%3A%2F%2Fthis%2Fmention&target=http%3A%2F%2Ftarget%2F',
                    'target': 'http://target/',
                },
                allow_redirects=False, timeout=15, stream=True,
                headers=expected_headers)

            resp = Response.get_by_id('http://this/mention http://target/')
            self.assertEqual('in', resp.direction)
            self.assertEqual('activitypub', resp.protocol)
            self.assertEqual('complete', resp.status)
            self.assertEqual(common.redirect_unwrap(as2), json_loads(resp.source_as2))
Example #24
0
    def test_proxy_url(self):
        resp = Response.get_or_create('abc', 'xyz')
        self.assertIsNone(resp.proxy_url())

        resp.source_as2 = 'as2'
        self.assertEquals('http://localhost/render?source=abc&target=xyz',
                          resp.proxy_url())
Example #25
0
    def test_activitypub_follow(self, mock_get, mock_post):
        mock_get.side_effect = [self.follow, self.actor]
        mock_post.return_value = requests_response('abc xyz')

        got = app.get_response('/webmention',
                               method='POST',
                               body=urllib.urlencode({
                                   'source':
                                   'http://a/follow',
                                   'target':
                                   'https://fed.brid.gy/',
                               }))
        self.assertEquals(200, got.status_int)

        mock_get.assert_has_calls((
            self.req('http://a/follow'),
            self.req('http://followee', headers=CONNEG_HEADERS_AS2_HTML),
        ))

        args, kwargs = mock_post.call_args
        self.assertEqual(('https://foo.com/inbox', ), args)
        self.assertEqual(self.follow_as2, kwargs['json'])

        headers = kwargs['headers']
        self.assertEqual(CONTENT_TYPE_AS2, headers['Content-Type'])

        rsa_key = kwargs['auth'].header_signer._rsa._key
        self.assertEqual(self.key.private_pem(), rsa_key.exportKey())

        resp = Response.get_by_id('http://a/follow http://followee')
        self.assertEqual('out', resp.direction)
        self.assertEqual('activitypub', resp.protocol)
        self.assertEqual('complete', resp.status)
        self.assertEqual(self.follow_mf2, json.loads(resp.source_mf2))
Example #26
0
    def test_activitypub_create_reply(self, mock_get, mock_post):
        mock_get.side_effect = self.activitypub_gets
        mock_post.return_value = requests_response('abc xyz', status=203)

        got = application.get_response('/webmention',
                                       method='POST',
                                       body=urlencode({
                                           'source':
                                           'http://a/reply',
                                           'target':
                                           'https://fed.brid.gy/',
                                       }).encode())
        self.assertEqual(203, got.status_int)

        mock_get.assert_has_calls((
            self.req('http://a/reply'),
            self.req('http://orig/post', headers=CONNEG_HEADERS_AS2_HTML),
            self.req('http://orig/author', headers=CONNEG_HEADERS_AS2_HTML),
        ))

        args, kwargs = mock_post.call_args
        self.assertEqual(('https://foo.com/inbox', ), args)
        self.assertEqual(self.as2_create, kwargs['json'])

        headers = kwargs['headers']
        self.assertEqual(CONTENT_TYPE_AS2, headers['Content-Type'])

        rsa_key = kwargs['auth'].header_signer._rsa._key
        self.assertEqual(self.key.private_pem(), rsa_key.exportKey())

        resp = Response.get_by_id('http://a/reply http://orig/as2')
        self.assertEqual('out', resp.direction)
        self.assertEqual('activitypub', resp.protocol)
        self.assertEqual('complete', resp.status)
        self.assertEqual(self.reply_mf2, json_loads(resp.source_mf2))
Example #27
0
 def test_render_atom(self):
     Response(id='abc xyz', source_atom=self.atom).put()
     resp = application.get_response('/render?source=abc&target=xyz')
     self.assertEqual(200, resp.status_int)
     self.assert_multiline_equals(self.html,
                                  resp.body.decode(),
                                  ignore_blanks=True)
Example #28
0
    def try_salmon(self):
        """Returns True if we attempted OStatus delivery. Raises otherwise."""
        target = None
        if self.target_resp:
            target = self.target_resp.url
        else:
            targets = self._targets()
            if targets:
                target = targets[0]
        if not target:
            logging.warning("No targets or followers. Ignoring.")
            return False

        resp = Response.get_or_create(source=self.source_url,
                                      target=target,
                                      direction='out',
                                      source_mf2=json_dumps(self.source_mf2))
        resp.protocol = 'ostatus'

        try:
            ret = self._try_salmon(resp)
            resp.status = 'complete'
            return ret
        except:
            resp.status = 'error'
            raise
        finally:
            resp.put()
Example #29
0
def api_view_request(request_rid):
    check_admin()
    request = Request.find_by('where rid = ?', request_rid)
    response = Response.find_by('where rid = ?', request_rid)
    if request is None or response is None:
        raise notfound()
    return dict(request=content_escape(request), response=html_encode(response))
Example #30
0
 def test_render_mf2(self):
     Response(id='abc xyz', source_mf2=json.dumps(self.mf2)).put()
     resp = app.get_response('/render?source=abc&target=xyz')
     self.assertEquals(200, resp.status_int)
     self.assert_multiline_equals(self.html,
                                  resp.body.decode('utf-8'),
                                  ignore_blanks=True)
Example #31
0
    def test_reply(self, mock_urlopen, mock_head, mock_get, mock_post):
        atom_reply = """\
<?xml version='1.0' encoding='UTF-8'?>
<entry xmlns='http://www.w3.org/2005/Atom'>
  <id>https://my/reply</id>
  <uri>https://my/reply</uri>
  <author>
    <name>Alice</name>
    <uri>[email protected]</uri>
  </author>
  <thr:in-reply-to xmlns:thr="http://purl.org/syndication/thread/1.0">
    http://orig/post
  </thr:in-reply-to>
  <content>I hereby reply.</content>
  <title>My Reply</title>
  <updated>%s</updated>
</entry>""" % datetime.datetime.now().isoformat('T')
        self.send_slap(mock_urlopen, mock_head, mock_get, mock_post, atom_reply)

        # check webmention post
        mock_post.assert_called_once_with(
            'http://orig/webmention',
            data={'source': 'https://my/reply', 'target': 'http://orig/post'},
            allow_redirects=False, timeout=15, stream=True,
            headers=self.expected_headers)

        # check stored response
        resp = Response.get_by_id('https://my/reply http://orig/post')
        self.assertEqual('in', resp.direction)
        self.assertEqual('ostatus', resp.protocol)
        self.assertEqual('complete', resp.status)
        self.assertEqual(atom_reply, resp.source_atom)
Example #32
0
def render():
    """Fetches a stored Response and renders it as HTML."""
    source = flask_util.get_required_param('source')
    target = flask_util.get_required_param('target')

    id = f'{source} {target}'
    resp = Response.get_by_id(id)
    if not resp:
        error(f'No stored response for {id}', status=404)

    if resp.source_mf2:
        as1 = microformats2.json_to_object(json_loads(resp.source_mf2))
    elif resp.source_as2:
        as1 = as2.to_as1(json_loads(resp.source_as2))
    elif resp.source_atom:
        as1 = atom.atom_to_activity(resp.source_atom)
    else:
        error(f'Stored response for {id} has no data', status=404)

    # add HTML meta redirect to source page. should trigger for end users in
    # browsers but not for webmention receivers (hopefully).
    html = microformats2.activities_to_html([as1])
    utf8 = '<meta charset="utf-8">'
    refresh = f'<meta http-equiv="refresh" content="0;url={source}">'
    return html.replace(utf8, utf8 + '\n' + refresh)
Example #33
0
    def test_inbox_like(self, mock_head, mock_get, mock_post):
        mock_head.return_value = requests_response(url='http://orig/post')
        mock_get.side_effect = [
            # source actor
            requests_response(LIKE_WITH_ACTOR['actor'], headers={'Content-Type': common.CONTENT_TYPE_AS2}),
            # target post webmention discovery
            requests_response(
                '<html><head><link rel="webmention" href="/webmention"></html>'),
        ]
        mock_post.return_value = requests_response()

        got = self.client.post('/foo.com/inbox', json=LIKE)
        self.assertEqual(200, got.status_code)

        as2_headers = copy.deepcopy(common.HEADERS)
        as2_headers.update(common.CONNEG_HEADERS_AS2_HTML)
        mock_get.assert_has_calls((
            call('http://orig/actor', headers=as2_headers, stream=True, timeout=15),
            call('http://orig/post', headers=common.HEADERS, stream=True, timeout=15),
        ))

        args, kwargs = mock_post.call_args
        self.assertEqual(('http://orig/webmention',), args)
        self.assertEqual({
            # TODO
            'source': 'http://localhost/render?source=http%3A%2F%2Fthis%2Flike__ok&target=http%3A%2F%2Forig%2Fpost',
            'target': 'http://orig/post',
        }, kwargs['data'])

        resp = Response.get_by_id('http://this/like__ok http://orig/post')
        self.assertEqual('in', resp.direction)
        self.assertEqual('activitypub', resp.protocol)
        self.assertEqual('complete', resp.status)
        self.assertEqual(LIKE_WITH_ACTOR, json_loads(resp.source_as2))
Example #34
0
def join_quiz(quiz_id, user_id=None):
    ## Get user from request
    if request:
        user = User.query.get(request.form['user_id'])
    else:
        user = User.query.get(user_id)
    quiz = Quiz.query.get(quiz_id)
    if Timer.query.filter(Timer.quiz_id == quiz_id).filter(
            Timer.user_id == user.id).first() is None:
        timer = Timer(quiz_id=quiz_id,
                      user_id=user.id,
                      last_answered=datetime.now())
        db.session.add(timer)

    if Response.query.filter(Response.quiz_id == quiz_id).filter(
            Response.user_id == user.id).first() is None:
        ## Create responses for user
        for index in range(0, 10):
            questions = json.loads(quiz.questions)
            response = Response(question_id=questions[index],
                                user_id=user.id,
                                quiz_id=quiz_id)
            db.session.add(response)

    ## Store responses for user
    db.session.commit()
    status_dict = dict()
    status_dict['status'] = 'JOINED'
    status_dict['quiz_id'] = quiz_id
    return jsonify(status_dict)
Example #35
0
def api_view_request(request_rid):
    check_admin()
    request = Request.find_by('where rid = ?', request_rid)
    response = Response.find_by('where rid = ?', request_rid)
    if request is None or response is None:
        raise notfound()
    return dict(request=content_escape(request), response=html_encode(response))
Example #36
0
    def _test_inbox_mention(self, as2, mock_head, mock_get, mock_post):
        mock_head.return_value = requests_response(url='http://target')
        mock_get.return_value = requests_response(
            '<html><head><link rel="webmention" href="/webmention"></html>')
        mock_post.return_value = requests_response()

        got = app.get_response('/foo.com/inbox',
                               method='POST',
                               body=json.dumps(as2))
        self.assertEquals(200, got.status_int, got.body)
        mock_get.assert_called_once_with('http://target/',
                                         headers=common.HEADERS,
                                         verify=False)

        expected_headers = copy.deepcopy(common.HEADERS)
        expected_headers['Accept'] = '*/*'
        mock_post.assert_called_once_with(
            'http://target/webmention',
            data={
                'source':
                'http://localhost/render?source=http%3A%2F%2Fthis%2Fmention&target=http%3A%2F%2Ftarget%2F',
                'target': 'http://target/',
            },
            allow_redirects=False,
            headers=expected_headers,
            verify=False)

        resp = Response.get_by_id('http://this/mention http://target/')
        self.assertEqual('in', resp.direction)
        self.assertEqual('activitypub', resp.protocol)
        self.assertEqual('complete', resp.status)
        self.assertEqual(common.redirect_unwrap(as2),
                         json.loads(resp.source_as2))
Example #37
0
    def test_like(self, mock_urlopen, mock_head, mock_get, mock_post):
        atom_like = """\
<?xml version='1.0' encoding='UTF-8'?>
<entry xmlns='http://www.w3.org/2005/Atom'
       xmlns:activity='http://activitystrea.ms/spec/1.0/'>
  <uri>https://my/like</uri>
  <author>
    <name>Alice</name>
    <uri>[email protected]</uri>
  </author>
  <activity:verb>http://activitystrea.ms/schema/1.0/like</activity:verb>
  <activity:object>http://orig/post</activity:object>
  <updated>%s</updated>
</entry>""" % datetime.datetime.now().isoformat('T')
        self.send_slap(mock_urlopen, mock_head, mock_get, mock_post, atom_like)

        # check webmention post
        mock_post.assert_called_once_with(
            'http://orig/webmention',
            data={
                'source':
                'http://localhost/render?source=https%3A%2F%2Fmy%2Flike&target=http%3A%2F%2Forig%2Fpost',
                'target': 'http://orig/post',
            },
            allow_redirects=False,
            headers=self.expected_headers,
            verify=False)

        # check stored response
        resp = Response.get_by_id('https://my/like http://orig/post')
        self.assertEqual('in', resp.direction)
        self.assertEqual('ostatus', resp.protocol)
        self.assertEqual('complete', resp.status)
        self.assertEqual(atom_like, resp.source_atom)
    def get(self):
        qId = int(self.request.get('id'))
        question = Question.get_by_id(qId)
        responses = Response.all()
        responses.filter("question =", question)

        values = {'responses': responses, 'question': question}
        self.response.out.write(template.render('templates/responses.html', values))
    def get(self):
        qId = int(self.request.get('id'))
        responseSet = ResponseSet.get_by_id(qId)
        responses = Response.all()
        responses.filter('responseSet =', responseSet)

        values = {'responseSet': responseSet, 'responses': responses}
        self.response.out.write(template.render('templates/responseSet.html', values))
Example #40
0
  def refetch_hfeed(self, source):
    """refetch and reprocess the author's url, looking for
    new or updated syndication urls that we may have missed the first
    time we looked for them.
    """
    logging.debug('refetching h-feed for source %s', source.label())
    relationships = original_post_discovery.refetch(source)
    if not relationships:
      return

    logging.debug('refetch h-feed found %d new rel=syndication relationships',
                  len(relationships))

    # grab the Responses and see if any of them have a a syndication
    # url matching one of the newly discovered relationships. We'll
    # check each response until we've seen all of them or until
    # the 60s timer runs out.
    # TODO maybe add a (canonicalized) url field to Response so we can
    # query by it instead of iterating over all of them
    for response in (Response.query(Response.source == source.key)
                     .order(-Response.created)):
      if response.activity_json:  # handle old entities
        response.activities_json.append(response.activity_json)
        response.activity_json = None

      new_orig_urls = set()
      for activity_json in response.activities_json:
        activity = json.loads(activity_json)
        activity_url = activity.get('url') or activity.get('object', {}).get('url')
        if not activity_url:
          logging.warning('activity has no url %s', activity_json)
          continue

        activity_url = source.canonicalize_syndication_url(activity_url)
        # look for activity url in the newly discovered list of relationships
        for relationship in relationships.get(activity_url, []):
          # won't re-propagate if the discovered link is already among
          # these well-known upstream duplicates
          if relationship.original in response.sent:
            logging.info(
              '%s found a new rel=syndication link %s -> %s, but the '
              'relationship had already been discovered by another method',
              response.label(), relationship.original,
              relationship.syndication)
          else:
            logging.info(
              '%s found a new rel=syndication link %s -> %s, and '
              'will be repropagated with a new target!',
              response.label(), relationship.original,
              relationship.syndication)
            new_orig_urls.add(relationship.original)

      if new_orig_urls:
        # re-open a previously 'complete' propagate task
        response.status = 'new'
        response.unsent.extend(list(new_orig_urls))
        response.put()
        response.add_task()
Example #41
0
 def test_get_type(self):
   self.assertEqual('repost', Response.get_type(
       {'objectType': 'activity', 'verb': 'share'}))
   self.assertEqual('rsvp', Response.get_type({'verb': 'rsvp-no'}))
   self.assertEqual('rsvp', Response.get_type({'verb': 'invite'}))
   self.assertEqual('comment', Response.get_type({'objectType': 'comment'}))
   self.assertEqual('post', Response.get_type({'verb': 'post'}))
   self.assertEqual('post', Response.get_type({'objectType': 'event'}))
   self.assertEqual('post', Response.get_type({'objectType': 'image'}))
   self.assertEqual('comment', Response.get_type({
     'objectType': 'note',
     'context': {'inReplyTo': {'foo': 'bar'}},
   }))
   self.assertEqual('comment', Response.get_type({
     'objectType': 'comment',
     'verb': 'post',
   }))
Example #42
0
File: app.py Project: t20/skore
def add_response():
    username = request.form.get('username')
    username = username or 'anonymous'
    board_id = request.form.get('board_id')
    items = request.form.getlist('item')

    # print 'Items: {} // {}'.format(type(items), items)
    items = [int(x) for x in items if x]
    # print 'Items: {} // {}'.format(type(items), items)

    response = Response(username=username, board_id=int(board_id),
            items=items)
    saved = response.save()

    # print 'saved response? : {}'.format(saved)
    if saved == True:
        flash('Response saved. Thanks {}.'.format(username))
    else:
        flash('Could not save response')

    return redirect(url_for('response', response_id=response.id))
Example #43
0
    def repropagate_old_responses(self, source, relationships):
        """Find old Responses that match a new SyndicatedPost and repropagate them.

    We look through as many responses as we can until the datastore query expires.

    Args:
      source: :class:`models.Source`
      relationships: refetch result
    """
        for response in Response.query(Response.source == source.key).order(-Response.updated):
            new_orig_urls = set()
            for activity_json in response.activities_json:
                activity = json.loads(activity_json)
                activity_url = activity.get("url") or activity.get("object", {}).get("url")
                if not activity_url:
                    logging.warning("activity has no url %s", activity_json)
                    continue

                activity_url = source.canonicalize_url(activity_url, activity=activity)
                if not activity_url:
                    continue

                # look for activity url in the newly discovered list of relationships
                for relationship in relationships.get(activity_url, []):
                    # won't re-propagate if the discovered link is already among
                    # these well-known upstream duplicates
                    if relationship.original in response.sent or relationship.original in response.original_posts:
                        logging.info(
                            "%s found a new rel=syndication link %s -> %s, but the "
                            "relationship had already been discovered by another method",
                            response.label(),
                            relationship.original,
                            relationship.syndication,
                        )
                    else:
                        logging.info(
                            "%s found a new rel=syndication link %s -> %s, and "
                            "will be repropagated with a new target!",
                            response.label(),
                            relationship.original,
                            relationship.syndication,
                        )
                        new_orig_urls.add(relationship.original)

            if new_orig_urls:
                # re-open a previously 'complete' propagate task
                response.status = "new"
                response.unsent.extend(list(new_orig_urls))
                response.put()
                response.add_task()
    def get(self):
        id = int(self.request.get('id'))
        responseSet = ResponseSet.get_by_id(id)

        #First, need to delete all the responses that are associated with the response set that I'm deleting
        responses = Response.all()
        responses.filter('responseSet =', responseSet)
        
        for response in responses:
            response.delete()

        responseSet.delete()

        self.redirect('/qadmin')
Example #45
0
 def post(self, request):
     if request.POST.get('response', False):
         response = Response()
         question = Question.objects.get(id=request.POST['question'])
         response.message = request.POST['response']
         response.question = question
         response.user = request.user
         response.save()
     return HttpResponseRedirect(request.META['HTTP_REFERER'])
Example #46
0
  def repropagate_old_responses(self, source, relationships):
    """Find old Responses that match a new SyndicatedPost and repropagate them.

    We look through as many responses as we can until the datastore query expires.
    """
    for response in (Response.query(Response.source == source.key)
                     .order(-Response.updated)):
      if response.activity_json:  # handle old entities
        response.activities_json.append(response.activity_json)
        response.activity_json = None

      new_orig_urls = set()
      for activity_json in response.activities_json:
        activity = json.loads(activity_json)
        activity_url = activity.get('url') or activity.get('object', {}).get('url')
        if not activity_url:
          logging.warning('activity has no url %s', activity_json)
          continue

        activity_url = source.canonicalize_syndication_url(activity_url,
                                                           activity=activity)
        # look for activity url in the newly discovered list of relationships
        for relationship in relationships.get(activity_url, []):
          # won't re-propagate if the discovered link is already among
          # these well-known upstream duplicates
          if (relationship.original in response.sent or
              relationship.original in response.original_posts):
            logging.info(
              '%s found a new rel=syndication link %s -> %s, but the '
              'relationship had already been discovered by another method',
              response.label(), relationship.original,
              relationship.syndication)
          else:
            logging.info(
              '%s found a new rel=syndication link %s -> %s, and '
              'will be repropagated with a new target!',
              response.label(), relationship.original,
              relationship.syndication)
            new_orig_urls.add(relationship.original)

      if new_orig_urls:
        # re-open a previously 'complete' propagate task
        response.status = 'new'
        response.unsent.extend(list(new_orig_urls))
        response.put()
        response.add_task()
Example #47
0
  def test_get_or_save(self):
    response = self.responses[0]
    self.assertEqual(0, Response.query().count())
    self.assert_no_propagate_task()

    # new. should add a propagate task.
    saved = response.get_or_save(self.sources[0])
    self.assertEqual(response.key, saved.key)
    self.assertEqual(response.source, saved.source)
    self.assertEqual('comment', saved.type)
    self.assertEqual([], saved.old_response_jsons)
    self.assert_propagate_task()

    # existing. no new task.
    same = saved.get_or_save(self.sources[0])
    self.assert_entities_equal(saved, same)
    self.assert_no_propagate_task()
Example #48
0
  def template_vars(self):
    responses = []

    # Find the most recently propagated responses with error URLs
    for r in Response.query().order(-Response.updated):
      if (len(responses) >= self.NUM_RESPONSES or
          r.updated < datetime.datetime.now() - datetime.timedelta(hours=1)):
        break
      elif not r.error or r.status == 'complete':
        continue

      # r.source = r.source.get()
      r.links = [util.pretty_link(u, new_tab=True) for u in r.error + r.failed]
      r.response = json.loads(r.response_json)
      r.activities = [json.loads(a) for a in r.activities_json]

      responses.append(r)

    responses.sort(key=lambda r: (r.source, r.activities, r.response))
    return {'responses': responses}
Example #49
0
  def test_get_or_save(self):
    self.sources[0].put()

    response = self.responses[0]
    self.assertEqual(0, Response.query().count())
    self.assertEqual(0, len(self.taskqueue_stub.GetTasks('propagate')))

    # new. should add a propagate task.
    saved = response.get_or_save()
    self.assertEqual(response.key, saved.key)
    self.assertEqual(response.source, saved.source)
    self.assertEqual('comment', saved.type)

    tasks = self.taskqueue_stub.GetTasks('propagate')
    self.assertEqual(1, len(tasks))
    self.assertEqual(response.key.urlsafe(),
                     testutil.get_task_params(tasks[0])['response_key'])
    self.assertEqual('/_ah/queue/propagate', tasks[0]['url'])

    # existing. no new task.
    same = saved.get_or_save()
    self.assertEqual(saved.source, same.source)
    self.assertEqual(1, len(tasks))
Example #50
0
  def template_vars(self):
    if not self.source:
      return {}

    vars = super(UserHandler, self).template_vars()
    vars.update({
        'source': self.source,
        'epoch': util.EPOCH,
        })

    # Blog webmention promos
    if 'webmention' not in self.source.features:
      if self.source.SHORT_NAME in ('blogger', 'tumblr', 'wordpress'):
        vars[self.source.SHORT_NAME + '_promo'] = True
      else:
        for domain in self.source.domains:
          if ('.blogspot.' in domain and  # Blogger uses country TLDs
              not Blogger.query(Blogger.domains == domain).get()):
            vars['blogger_promo'] = True
          elif (domain.endswith('tumblr.com') and
                not Tumblr.query(Tumblr.domains == domain).get()):
            vars['tumblr_promo'] = True
          elif (domain.endswith('wordpress.com') and
                not WordPress.query(WordPress.domains == domain).get()):
            vars['wordpress_promo'] = True

    # Responses
    if 'listen' in self.source.features:
      vars['responses'] = []
      for i, r in enumerate(Response.query()
                              .filter(Response.source == self.source.key)\
                              .order(-Response.updated)):
        r.response = json.loads(r.response_json)
        if r.activity_json:  # handle old entities
          r.activities_json.append(r.activity_json)
        r.activities = [json.loads(a) for a in r.activities_json]

        if (not gr_source.Source.is_public(r.response) or
            not all(gr_source.Source.is_public(a) for a in r.activities)):
          continue

        r.actor = r.response.get('author') or r.response.get('actor', {})
        if not r.response.get('content'):
          phrases = {
            'like': 'liked this',
            'repost': 'reposted this',
            'rsvp-yes': 'is attending',
            'rsvp-no': 'is not attending',
            'rsvp-maybe': 'might attend',
            'invite': 'is invited',
          }
          r.response['content'] = '%s %s.' % (
            r.actor.get('displayName') or '',
            phrases.get(r.type) or phrases.get(r.response.get('verb')))

        # convert image URL to https if we're serving over SSL
        image_url = r.actor.setdefault('image', {}).get('url')
        if image_url:
          r.actor['image']['url'] = util.update_scheme(image_url, self)

        # generate original post links
        r.links = self.process_webmention_links(r)

        vars['responses'].append(r)
        if len(vars['responses']) >= 10 or i > 200:
          break

    # Publishes
    if 'publish' in self.source.features:
      publishes = Publish.query().filter(Publish.source == self.source.key)\
                                 .order(-Publish.updated)\
                                 .fetch(10)
      for p in publishes:
        p.pretty_page = util.pretty_link(
          p.key.parent().id(), a_class='original-post', new_tab=True)

      vars['publishes'] = publishes

    if 'webmention' in self.source.features:
      # Blog posts
      blogposts = BlogPost.query().filter(BlogPost.source == self.source.key)\
                                  .order(-BlogPost.created)\
                                  .fetch(10)
      for b in blogposts:
        b.links = self.process_webmention_links(b)
        try:
          text = b.feed_item.get('title')
        except ValueError:
          text = None
        b.pretty_url = util.pretty_link(b.key.id(), text=text,
                                        a_class='original-post', max_length=40,
                                        new_tab=True)

      # Blog webmentions
      webmentions = BlogWebmention.query()\
          .filter(BlogWebmention.source == self.source.key)\
          .order(-BlogWebmention.updated)\
          .fetch(10)
      for w in webmentions:
        w.pretty_source = util.pretty_link(w.source_url(), a_class='original-post',
                                           new_tab=True)
        try:
          target_is_source = (urlparse.urlparse(w.target_url()).netloc in
                              self.source.domains)
        except BaseException:
          target_is_source = False
        w.pretty_target = util.pretty_link(w.target_url(), a_class='original-post',
                                           new_tab=True, keep_host=target_is_source)

      vars.update({'blogposts': blogposts, 'webmentions': webmentions})

    return vars
Example #51
0
import instagram
import tumblr
import twitter
import wordpress_rest

domains = collections.defaultdict(int)  # maps domain to # of users
for cls in models.sources.values():
  for src in cls.query(cls.domains > ''):
    for domain in src.domains:
      print domain
      domains[domain] += 1

with open('domains.txt', 'w') as f:
  f.write('domain,num_users\n')
  f.write('\n'.join(str(item) for item in reversed(sorted(
    '%s,%s' % (item[1], item[0]) for item in domains.items()))))

with open('domains_sent.txt', 'w') as f:
  url = ''
  while True:
    resp = Response.query(Response.sent > url).get(projection=['sent'])
    if not resp:
      break
    domain = None
    for sent in resp.sent:
      parsed = urlparse.urlparse(sent)
      if sent > url and (domain is None or parsed.netloc < domain):
        domain = parsed.netloc
    url = urlparse.urlunparse(parsed[:2] + ('', '', '', '')) + chr(ord('/') + 1)
    print domain
Example #52
0
  def template_vars(self):
    vars = super(UserHandler, self).template_vars()
    vars.update({
        'source': self.source,
        'EPOCH': util.EPOCH,
        'REFETCH_HFEED_TRIGGER': models.REFETCH_HFEED_TRIGGER,
        'RECENT_PRIVATE_POSTS_THRESHOLD': RECENT_PRIVATE_POSTS_THRESHOLD,
        })
    if not self.source:
      return vars

    if isinstance(self.source, instagram.Instagram):
      auth = self.source.auth_entity
      vars['indieauth_me'] = (
        auth.id if isinstance(auth, indieauth.IndieAuth)
        else self.source.domain_urls[0] if self.source.domain_urls
        else None)

    # Blog webmention promos
    if 'webmention' not in self.source.features:
      if self.source.SHORT_NAME in ('blogger', 'tumblr', 'wordpress'):
        vars[self.source.SHORT_NAME + '_promo'] = True
      else:
        for domain in self.source.domains:
          if ('.blogspot.' in domain and  # Blogger uses country TLDs
              not Blogger.query(Blogger.domains == domain).get()):
            vars['blogger_promo'] = True
          elif (domain.endswith('tumblr.com') and
                not Tumblr.query(Tumblr.domains == domain).get()):
            vars['tumblr_promo'] = True
          elif (domain.endswith('wordpress.com') and
                not WordPress.query(WordPress.domains == domain).get()):
            vars['wordpress_promo'] = True

    # Responses
    if 'listen' in self.source.features:
      vars['responses'] = []
      query = Response.query().filter(Response.source == self.source.key)

      # if there's a paging param (responses_before or responses_after), update
      # query with it
      def get_paging_param(param):
        val = self.request.get(param)
        try:
          return util.parse_iso8601(val) if val else None
        except:
          msg = "Couldn't parse %s %r as ISO8601" % (param, val)
          logging.exception(msg)
          self.abort(400, msg)

      before = get_paging_param('responses_before')
      after = get_paging_param('responses_after')
      if before and after:
        self.abort(400, "can't handle both responses_before and responses_after")
      elif after:
        query = query.filter(Response.updated > after).order(Response.updated)
      elif before:
        query = query.filter(Response.updated < before).order(-Response.updated)
      else:
        query = query.order(-Response.updated)

      query_iter = query.iter()
      for i, r in enumerate(query_iter):
        r.response = json.loads(r.response_json)
        r.activities = [json.loads(a) for a in r.activities_json]

        if (not self.source.is_activity_public(r.response) or
            not all(self.source.is_activity_public(a) for a in r.activities)):
          continue
        elif r.type == 'post':
          r.activities = []

        r.actor = r.response.get('author') or r.response.get('actor', {})

        for a in r.activities + [r.response]:
          if not a.get('content'):
            a['content'] = a.get('object', {}).get('content')

        if not r.response.get('content'):
          phrases = {
            'like': 'liked this',
            'repost': 'reposted this',
            'rsvp-yes': 'is attending',
            'rsvp-no': 'is not attending',
            'rsvp-maybe': 'might attend',
            'rsvp-interested': 'is interested',
            'invite': 'is invited',
          }
          r.response['content'] = '%s %s.' % (
            r.actor.get('displayName') or '',
            phrases.get(r.type) or phrases.get(r.response.get('verb')))

        # convert image URL to https if we're serving over SSL
        image_url = r.actor.setdefault('image', {}).get('url')
        if image_url:
          r.actor['image']['url'] = util.update_scheme(image_url, self)

        # generate original post links
        r.links = self.process_webmention_links(r)
        r.original_links = [util.pretty_link(url, new_tab=True)
                            for url in r.original_posts]

        vars['responses'].append(r)
        if len(vars['responses']) >= 10 or i > 200:
          break

      vars['responses'].sort(key=lambda r: r.updated, reverse=True)

      # calculate new paging param(s)
      new_after = (
        before if before else
        vars['responses'][0].updated if
          vars['responses'] and query_iter.probably_has_next() and (before or after)
        else None)
      if new_after:
        vars['responses_after_link'] = ('?responses_after=%s#responses' %
                                         new_after.isoformat())

      new_before = (
        after if after else
        vars['responses'][-1].updated if
          vars['responses'] and query_iter.probably_has_next()
        else None)
      if new_before:
        vars['responses_before_link'] = ('?responses_before=%s#responses' %
                                         new_before.isoformat())

      vars['next_poll'] = max(
        self.source.last_poll_attempt + self.source.poll_period(),
        # lower bound is 1 minute from now
        util.now_fn() + datetime.timedelta(seconds=90))

    # Publishes
    if 'publish' in self.source.features:
      publishes = Publish.query().filter(Publish.source == self.source.key)\
                                 .order(-Publish.updated)\
                                 .fetch(10)
      for p in publishes:
        p.pretty_page = util.pretty_link(
          p.key.parent().id(), attrs={'class': 'original-post u-url u-name'},
          new_tab=True)

      vars['publishes'] = publishes

    if 'webmention' in self.source.features:
      # Blog posts
      blogposts = BlogPost.query().filter(BlogPost.source == self.source.key)\
                                  .order(-BlogPost.created)\
                                  .fetch(10)
      for b in blogposts:
        b.links = self.process_webmention_links(b)
        try:
          text = b.feed_item.get('title')
        except ValueError:
          text = None
        b.pretty_url = util.pretty_link(
          b.key.id(), text=text, attrs={'class': 'original-post u-url u-name'},
          max_length=40, new_tab=True)

      # Blog webmentions
      webmentions = BlogWebmention.query()\
          .filter(BlogWebmention.source == self.source.key)\
          .order(-BlogWebmention.updated)\
          .fetch(10)
      for w in webmentions:
        w.pretty_source = util.pretty_link(
          w.source_url(), attrs={'class': 'original-post'}, new_tab=True)
        try:
          target_is_source = (urlparse.urlparse(w.target_url()).netloc in
                              self.source.domains)
        except BaseException:
          target_is_source = False
        w.pretty_target = util.pretty_link(
          w.target_url(), attrs={'class': 'original-post'}, new_tab=True,
          keep_host=target_is_source)

      vars.update({'blogposts': blogposts, 'webmentions': webmentions})

    return vars
Example #53
0
 def test_get_type(self):
   self.assertEqual('repost', Response.get_type(
       {'objectType': 'activity', 'verb': 'share'}))
   self.assertEqual('rsvp', Response.get_type({'verb': 'rsvp-no'}))
   self.assertEqual('rsvp', Response.get_type({'verb': 'invite'}))
   self.assertEqual('comment', Response.get_type({'objectType': 'other'}))
Example #54
0
    def poll(self, source):
        """Actually runs the poll.

    Stores property names and values to update in source.updates.
    """
        if source.last_activities_etag or source.last_activity_id:
            logging.debug("Using ETag %s, last activity id %s", source.last_activities_etag, source.last_activity_id)

        #
        # Step 1: fetch activities:
        # * posts by the user
        # * search all posts for the user's domain URLs to find links
        #
        cache = util.CacheDict()
        if source.last_activities_cache_json:
            cache.update(json.loads(source.last_activities_cache_json))

        # search for links first so that the user's activities and responses
        # override them if they overlap
        links = source.search_for_links()

        # this user's own activities (and user mentions)
        resp = source.get_activities_response(
            fetch_replies=True,
            fetch_likes=True,
            fetch_shares=True,
            fetch_mentions=True,
            count=50,
            etag=source.last_activities_etag,
            min_id=source.last_activity_id,
            cache=cache,
        )
        etag = resp.get("etag")  # used later
        user_activities = resp.get("items", [])

        # these map ids to AS objects
        responses = {a["id"]: a for a in links}
        activities = {a["id"]: a for a in links + user_activities}

        # extract silo activity ids, update last_activity_id
        silo_activity_ids = set()
        last_activity_id = source.last_activity_id
        for id, activity in activities.items():
            # maybe replace stored last activity id
            parsed = util.parse_tag_uri(id)
            if parsed:
                id = parsed[1]
            silo_activity_ids.add(id)
            try:
                # try numeric comparison first
                greater = int(id) > int(last_activity_id)
            except (TypeError, ValueError):
                greater = id > last_activity_id
            if greater:
                last_activity_id = id

        if last_activity_id and last_activity_id != source.last_activity_id:
            source.updates["last_activity_id"] = last_activity_id

        # trim cache to just the returned activity ids, so that it doesn't grow
        # without bound. (WARNING: depends on get_activities_response()'s cache key
        # format, e.g. 'PREFIX ACTIVITY_ID'!)
        source.updates["last_activities_cache_json"] = json.dumps(
            {k: v for k, v in cache.items() if k.split()[-1] in silo_activity_ids}
        )

        # Cache to make sure we only fetch the author's h-feed(s) the
        # first time we see it
        fetched_hfeeds = set()

        # narrow down to just public activities
        public = {}
        private = {}
        for id, activity in activities.items():
            (public if source.is_activity_public(activity) else private)[id] = activity
        logging.info("Found %d public activities: %s", len(public), public.keys())
        logging.info("Found %d private activities: %s", len(private), private.keys())

        last_public_post = (source.last_public_post or util.EPOCH).isoformat()
        public_published = util.trim_nulls([a.get("published") for a in public.values()])
        if public_published:
            max_published = max(public_published)
            if max_published > last_public_post:
                last_public_post = max_published
                source.updates["last_public_post"] = util.as_utc(util.parse_iso8601(max_published))

        source.updates["recent_private_posts"] = len(
            [a for a in private.values() if a.get("published", util.EPOCH_ISO) > last_public_post]
        )

        #
        # Step 2: extract responses, store their activities in response['activities']
        #
        # WARNING: this creates circular references in link posts found by search
        # queries in step 1, since they are their own activity. We use
        # prune_activity() and prune_response() in step 4 to remove these before
        # serializing to JSON.
        #
        for id, activity in public.items():
            obj = activity.get("object") or activity

            # handle user mentions
            user_id = source.user_tag_id()
            if obj.get("author", {}).get("id") != user_id:
                for tag in obj.get("tags", []):
                    urls = tag.get("urls")
                    if tag.get("objectType") == "person" and tag.get("id") == user_id and urls:
                        activity["originals"], activity["mentions"] = original_post_discovery.discover(
                            source,
                            activity,
                            fetch_hfeed=True,
                            include_redirect_sources=False,
                            already_fetched_hfeeds=fetched_hfeeds,
                        )
                        activity["mentions"].update(u.get("value") for u in urls)
                        responses[id] = activity
                        break

            # handle quote mentions
            for att in obj.get("attachments", []):
                if (
                    att.get("objectType") in ("note", "article")
                    and att.get("author", {}).get("id") == source.user_tag_id()
                ):
                    # now that we've confirmed that one exists, OPD will dig
                    # into the actual attachments
                    if "originals" not in activity or "mentions" not in activity:
                        activity["originals"], activity["mentions"] = original_post_discovery.discover(
                            source,
                            activity,
                            fetch_hfeed=True,
                            include_redirect_sources=False,
                            already_fetched_hfeeds=fetched_hfeeds,
                        )
                    responses[id] = activity
                    break

            # extract replies, likes, reactions, reposts, and rsvps
            replies = obj.get("replies", {}).get("items", [])
            tags = obj.get("tags", [])
            likes = [t for t in tags if Response.get_type(t) == "like"]
            reactions = [t for t in tags if Response.get_type(t) == "react"]
            reposts = [t for t in tags if Response.get_type(t) == "repost"]
            rsvps = Source.get_rsvps_from_event(obj)

            # coalesce responses. drop any without ids
            for resp in replies + likes + reactions + reposts + rsvps:
                id = resp.get("id")
                if not id:
                    logging.error("Skipping response without id: %s", json.dumps(resp, indent=2))
                    continue

                resp.setdefault("activities", []).append(activity)

                # when we find two responses with the same id, the earlier one may have
                # come from a link post or user mention, and this one is probably better
                # since it probably came from the user's activity, so prefer this one.
                # background: https://github.com/snarfed/bridgy/issues/533
                existing = responses.get(id)
                if existing:
                    if source.gr_source.activity_changed(resp, existing, log=True):
                        logging.warning("Got two different versions of same response!\n%s\n%s", existing, resp)
                    resp["activities"].extend(existing.get("activities", []))

                responses[id] = resp

        #
        # Step 3: filter out responses we've already seen
        #
        # seen responses (JSON objects) for each source are stored in its entity.
        unchanged_responses = []
        if source.seen_responses_cache_json:
            for seen in json.loads(source.seen_responses_cache_json):
                id = seen["id"]
                resp = responses.get(id)
                if resp and not source.gr_source.activity_changed(seen, resp, log=True):
                    unchanged_responses.append(seen)
                    del responses[id]

        #
        # Step 4: store new responses and enqueue propagate tasks
        #
        pruned_responses = []
        for id, resp in responses.items():
            resp_type = Response.get_type(resp)
            activities = resp.pop("activities", [])
            if not activities and resp_type == "post":
                activities = [resp]
            too_long = set()
            urls_to_activity = {}
            for i, activity in enumerate(activities):
                # we'll usually have multiple responses for the same activity, and the
                # objects in resp['activities'] are shared, so cache each activity's
                # discovered webmention targets inside its object.
                if "originals" not in activity or "mentions" not in activity:
                    activity["originals"], activity["mentions"] = original_post_discovery.discover(
                        source,
                        activity,
                        fetch_hfeed=True,
                        include_redirect_sources=False,
                        already_fetched_hfeeds=fetched_hfeeds,
                    )

                targets = original_post_discovery.targets_for_response(
                    resp, originals=activity["originals"], mentions=activity["mentions"]
                )
                if targets:
                    logging.info(
                        "%s has %d webmention target(s): %s", activity.get("url"), len(targets), " ".join(targets)
                    )
                for t in targets:
                    if len(t) <= _MAX_STRING_LENGTH:
                        urls_to_activity[t] = i
                    else:
                        logging.warning("Giving up on target URL over %s chars! %s", _MAX_STRING_LENGTH, t)
                        too_long.add(t[: _MAX_STRING_LENGTH - 4] + "...")

            # store/update response entity. the prune_*() calls are important to
            # remove circular references in link responses, which are their own
            # activities. details in the step 2 comment above.
            pruned_response = util.prune_response(resp)
            pruned_responses.append(pruned_response)
            resp_entity = Response(
                id=id,
                source=source.key,
                activities_json=[json.dumps(util.prune_activity(a, source)) for a in activities],
                response_json=json.dumps(pruned_response),
                type=resp_type,
                unsent=list(urls_to_activity.keys()),
                failed=list(too_long),
                original_posts=resp.get("originals", []),
            )
            if urls_to_activity and len(activities) > 1:
                resp_entity.urls_to_activity = json.dumps(urls_to_activity)
            resp_entity.get_or_save(source)

        # update cache
        if pruned_responses:
            source.updates["seen_responses_cache_json"] = json.dumps(pruned_responses + unchanged_responses)

        source.updates.update({"last_polled": source.last_poll_attempt, "poll_status": "ok"})
        if etag and etag != source.last_activities_etag:
            source.updates["last_activities_etag"] = etag

        #
        # Step 5. possibly refetch updated syndication urls
        #
        # if the author has added syndication urls since the first time
        # original_post_discovery ran, we'll miss them. this cleanup task will
        # periodically check for updated urls. only kicks in if the author has
        # *ever* published a rel=syndication url
        if source.should_refetch():
            logging.info("refetching h-feed for source %s", source.label())
            relationships = original_post_discovery.refetch(source)

            now = util.now_fn()
            source.updates["last_hfeed_refetch"] = now

            if relationships:
                logging.info("refetch h-feed found new rel=syndication relationships: %s", relationships)
                try:
                    self.repropagate_old_responses(source, relationships)
                except BaseException, e:
                    if isinstance(
                        e, (datastore_errors.BadRequestError, datastore_errors.Timeout)
                    ) or util.is_connection_failure(e):
                        logging.info("Timeout while repropagating responses.", exc_info=True)
                    else:
                        raise
Example #55
0
  def backfeed(self, source, responses=None, activities=None):
    """Processes responses and activities and generates propagate tasks.

    Stores property names and values to update in source.updates.

    Args:
      source: Source
      responses: dict mapping AS response id to AS object
      activities: dict mapping AS activity id to AS object
    """
    if responses is None:
      responses = {}
    if activities is None:
      activities = {}

    # Cache to make sure we only fetch the author's h-feed(s) the
    # first time we see it
    fetched_hfeeds = set()

    # narrow down to just public activities
    public = {}
    private = {}
    for id, activity in activities.items():
      (public if source.is_activity_public(activity) else private)[id] = activity
    logging.info('Found %d public activities: %s', len(public), public.keys())
    logging.info('Found %d private activities: %s', len(private), private.keys())

    last_public_post = (source.last_public_post or util.EPOCH).isoformat()
    public_published = util.trim_nulls([a.get('published') for a in public.values()])
    if public_published:
      max_published = max(public_published)
      if max_published > last_public_post:
        last_public_post = max_published
        source.updates['last_public_post'] = \
          util.as_utc(util.parse_iso8601(max_published))

    source.updates['recent_private_posts'] = \
      len([a for a in private.values()
           if a.get('published', util.EPOCH_ISO) > last_public_post])

    #
    # Step 2: extract responses, store their activities in response['activities']
    #
    # WARNING: this creates circular references in link posts found by search
    # queries in step 1, since they are their own activity. We use
    # prune_activity() and prune_response() in step 4 to remove these before
    # serializing to JSON.
    #
    for id, activity in public.items():
      obj = activity.get('object') or activity

      # handle user mentions
      user_id = source.user_tag_id()
      if obj.get('author', {}).get('id') != user_id:
        for tag in obj.get('tags', []):
          urls = tag.get('urls')
          if tag.get('objectType') == 'person' and tag.get('id') == user_id and urls:
            activity['originals'], activity['mentions'] = \
              original_post_discovery.discover(
                source, activity, fetch_hfeed=True,
                include_redirect_sources=False,
                already_fetched_hfeeds=fetched_hfeeds)
            activity['mentions'].update(u.get('value') for u in urls)
            responses[id] = activity
            break

      # handle quote mentions
      for att in obj.get('attachments', []):
        if (att.get('objectType') in ('note', 'article')
                and att.get('author', {}).get('id') == source.user_tag_id()):
          # now that we've confirmed that one exists, OPD will dig
          # into the actual attachments
          if 'originals' not in activity or 'mentions' not in activity:
            activity['originals'], activity['mentions'] = \
              original_post_discovery.discover(
                source, activity, fetch_hfeed=True,
                include_redirect_sources=False,
                already_fetched_hfeeds=fetched_hfeeds)
          responses[id] = activity
          break

      # extract replies, likes, reactions, reposts, and rsvps
      replies = obj.get('replies', {}).get('items', [])
      tags = obj.get('tags', [])
      likes = [t for t in tags if Response.get_type(t) == 'like']
      reactions = [t for t in tags if Response.get_type(t) == 'react']
      reposts = [t for t in tags if Response.get_type(t) == 'repost']
      rsvps = Source.get_rsvps_from_event(obj)

      # coalesce responses. drop any without ids
      for resp in replies + likes + reactions + reposts + rsvps:
        id = resp.get('id')
        if not id:
          logging.error('Skipping response without id: %s', json.dumps(resp, indent=2))
          continue

        if source.is_blocked(resp):
          logging.info('Skipping response by blocked user: %s',
                       json.dumps(resp.get('author') or resp.get('actor'), indent=2))
          continue

        resp.setdefault('activities', []).append(activity)

        # when we find two responses with the same id, the earlier one may have
        # come from a link post or user mention, and this one is probably better
        # since it probably came from the user's activity, so prefer this one.
        # background: https://github.com/snarfed/bridgy/issues/533
        existing = responses.get(id)
        if existing:
          if source.gr_source.activity_changed(resp, existing, log=True):
            logging.warning('Got two different versions of same response!\n%s\n%s',
                            existing, resp)
          resp['activities'].extend(existing.get('activities', []))

        responses[id] = resp

    #
    # Step 3: filter out responses we've already seen
    #
    # seen responses (JSON objects) for each source are stored in its entity.
    unchanged_responses = []
    if source.seen_responses_cache_json:
      for seen in json.loads(source.seen_responses_cache_json):
        id = seen['id']
        resp = responses.get(id)
        if resp and not source.gr_source.activity_changed(seen, resp, log=True):
          unchanged_responses.append(seen)
          del responses[id]

    #
    # Step 4: store new responses and enqueue propagate tasks
    #
    pruned_responses = []
    for id, resp in responses.items():
      resp_type = Response.get_type(resp)
      activities = resp.pop('activities', [])
      if not activities and resp_type == 'post':
        activities = [resp]
      too_long = set()
      urls_to_activity = {}
      for i, activity in enumerate(activities):
        # we'll usually have multiple responses for the same activity, and the
        # objects in resp['activities'] are shared, so cache each activity's
        # discovered webmention targets inside its object.
        if 'originals' not in activity or 'mentions' not in activity:
          activity['originals'], activity['mentions'] = \
            original_post_discovery.discover(
              source, activity, fetch_hfeed=True,
              include_redirect_sources=False,
              already_fetched_hfeeds=fetched_hfeeds)

        targets = original_post_discovery.targets_for_response(
          resp, originals=activity['originals'], mentions=activity['mentions'])
        if targets:
          logging.info('%s has %d webmention target(s): %s', activity.get('url'),
                       len(targets), ' '.join(targets))
        for t in targets:
          if len(t) <= _MAX_STRING_LENGTH:
            urls_to_activity[t] = i
          else:
            logging.info('Giving up on target URL over %s chars! %s',
                         _MAX_STRING_LENGTH, t)
            too_long.add(t[:_MAX_STRING_LENGTH - 4] + '...')

      # store/update response entity. the prune_*() calls are important to
      # remove circular references in link responses, which are their own
      # activities. details in the step 2 comment above.
      pruned_response = util.prune_response(resp)
      pruned_responses.append(pruned_response)
      resp_entity = Response(
        id=id,
        source=source.key,
        activities_json=[json.dumps(util.prune_activity(a, source))
                         for a in activities],
        response_json=json.dumps(pruned_response),
        type=resp_type,
        unsent=list(urls_to_activity.keys()),
        failed=list(too_long),
        original_posts=resp.get('originals', []))
      if urls_to_activity and len(activities) > 1:
        resp_entity.urls_to_activity=json.dumps(urls_to_activity)
      resp_entity.get_or_save(source, restart=self.RESTART_EXISTING_TASKS)

    # update cache
    if pruned_responses:
      source.updates['seen_responses_cache_json'] = json.dumps(
        pruned_responses + unchanged_responses)