Exemplo n.º 1
0
  def get(self):
    expected_inputs = ('activitystreams', 'html', 'json-mf2')
    input = util.get_required_param(self, 'input')
    if input not in expected_inputs:
      raise exc.HTTPBadRequest('Invalid input: %s, expected one of %r' %
                               (input, expected_inputs))
    url = util.get_required_param(self, 'url')

    # check if request is cached
    cache = self.request.get('cache', '').lower() != 'false'
    cache_key = 'U %s' % url
    cached = memcache.get(cache_key) if cache else None

    if cached:
      logging.info('Serving cached response %r', cache_key)
      url = cached['url']
      body = cached['body']
    else:
      # fetch url
      try:
        resp = util.urlopen(url)
      except (ValueError, httplib.InvalidURL) as e:
        self.abort(400, str(e))
        # other exceptions are handled by webutil.handlers.handle_exception(),
        # which uses interpret_http_exception(), etc.

      if url != resp.geturl():
        url = resp.geturl()
        logging.info('Redirected to %s', url)
      body = resp.read()

      if cache:
        logging.info('Caching response in %r', cache_key)
        memcache.set(cache_key, {'url': url, 'body': body}, URL_CACHE_TIME)

    # decode data
    mf2 = None
    if input == 'html':
      mf2 = mf2py.parse(doc=body, url=url)
    elif input == 'json-mf2':
      mf2 = json.loads(body)
      mf2.setdefault('rels', {})  # mf2util expects rels

    actor = None
    title = None
    if mf2:
      actor = microformats2.find_author(
        mf2, fetch_mf2_func=lambda url: mf2py.parse(url=url))
      title = mf2util.interpret_feed(mf2, url).get('name')

    if input == 'activitystreams':
      activities = json.loads(body)
    elif input == 'html':
      activities = microformats2.html_to_activities(body, url, actor)
    elif input == 'json-mf2':
      activities = [microformats2.json_to_object(item, actor=actor)
                    for item in mf2.get('items', [])]

    self.write_response(source.Source.make_activities_base_response(activities),
                        url=url, actor=actor, title=title)
Exemplo n.º 2
0
  def upload_images(self, urls):
    """Uploads one or more images from web URLs.

    https://dev.twitter.com/rest/reference/post/media/upload

    Args:
      urls: sequence of string URLs of images

    Returns: list of string media ids
    """
    ids = []
    for url in urls:
      image_resp = util.urlopen(url)
      bad_type = self._check_mime_type(url, image_resp, IMAGE_MIME_TYPES,
                                       'JPG, PNG, GIF, and WEBP images')
      if bad_type:
        return bad_type

      headers = twitter_auth.auth_header(
        API_UPLOAD_MEDIA, self.access_token_key, self.access_token_secret, 'POST')
      resp = util.requests_post(API_UPLOAD_MEDIA,
                                files={'media': image_resp},
                                headers=headers)
      resp.raise_for_status()
      logging.info('Got: %s', resp.text)
      try:
        ids.append(json.loads(resp.text)['media_id_string'])
      except ValueError, KeyError:
        logging.exception("Couldn't parse response: %s", resp.text)
        raise
Exemplo n.º 3
0
    def _urlopen(self, url):
        # check if request is cached
        cache = self.request.get('cache', '').lower() != 'false'
        cache_key = 'U %s' % url
        cached = memcache.get(cache_key) if cache else None

        if cached:
            logging.info('Serving cached response %r', cache_key)
            url = cached['url']
            body = cached['body']
        else:
            # fetch url
            try:
                resp = util.urlopen(url)
            except (ValueError, httplib.InvalidURL) as e:
                self.abort(400, str(e))
                # other exceptions are handled by webutil.handlers.handle_exception(),
                # which uses interpret_http_exception(), etc.

            if url != resp.geturl():
                url = resp.geturl()
                logging.info('Redirected to %s', url)
            body = resp.read()

            if cache:
                logging.info('Caching response in %r', cache_key)
                memcache.set(cache_key, {
                    'url': url,
                    'body': body
                }, URL_CACHE_TIME)

        return url, body
Exemplo n.º 4
0
  def upload_images(self, urls):
    """Uploads one or more images from web URLs.

    https://dev.twitter.com/rest/reference/post/media/upload

    Args:
      urls: sequence of string URLs of images

    Returns:
      list of string media ids
    """
    ids = []
    for url in urls:
      image_resp = util.urlopen(url)
      bad_type = self._check_mime_type(url, image_resp, IMAGE_MIME_TYPES,
                                       'JPG, PNG, GIF, and WEBP images')
      if bad_type:
        return bad_type

      headers = twitter_auth.auth_header(
        API_UPLOAD_MEDIA, self.access_token_key, self.access_token_secret, 'POST')
      resp = util.requests_post(API_UPLOAD_MEDIA,
                                files={'media': image_resp},
                                headers=headers)
      resp.raise_for_status()
      logging.info('Got: %s', resp.text)
      ids.append(source.load_json(resp.text, API_UPLOAD_MEDIA)['media_id_string'])

    return ids
Exemplo n.º 5
0
 def urlopen(self, url, **kwargs):
   """Wraps urllib2.urlopen() and passes through the access token."""
   log_url = url
   if self.access_token:
     # TODO add access_token to the data parameter for POST requests
     url = util.add_query_params(url, [('access_token', self.access_token)])
   resp = util.urlopen(urllib2.Request(url, **kwargs))
   return resp if kwargs.get('data') else json.loads(resp.read()).get('data')
Exemplo n.º 6
0
 def urlopen(self, url, **kwargs):
   """Wraps :func:`urllib2.urlopen()` and passes through the access token."""
   log_url = url
   if self.access_token:
     # TODO add access_token to the data parameter for POST requests
     url = util.add_query_params(url, [('access_token', self.access_token)])
   resp = util.urlopen(urllib2.Request(url, **kwargs))
   return (resp if kwargs.get('data')
           else source.load_json(resp.read(), url).get('data'))
Exemplo n.º 7
0
 def urlopen(self, url, **kwargs):
     """Wraps :func:`urllib2.urlopen()` and passes through the access token."""
     if self.access_token:
         # TODO add access_token to the data parameter for POST requests
         url = util.add_query_params(url,
                                     [('access_token', self.access_token)])
     resp = util.urlopen(urllib.request.Request(url, **kwargs))
     return (resp if kwargs.get('data') else source.load_json(
         resp.read(), url).get('data'))
Exemplo n.º 8
0
    def user_to_actor(self, resp):
        """Convert a Flickr user dict into an ActivityStreams actor.
    """
        person = resp.get('person', {})
        username = person.get('username', {}).get('_content')
        obj = util.trim_nulls({
            'objectType':
            'person',
            'displayName':
            person.get('realname', {}).get('_content') or username,
            'image': {
                'url':
                self.get_user_image(person.get('iconfarm'),
                                    person.get('iconserver'),
                                    person.get('nsid')),
            },
            'id':
            self.tag_uri(username),
            # numeric_id is our own custom field that always has the source's numeric
            # user id, if available.
            'numeric_id':
            person.get('nsid'),
            'location': {
                'displayName': person.get('location', {}).get('_content'),
            },
            'username':
            username,
            'description':
            person.get('description', {}).get('_content'),
        })

        # fetch profile page to get url(s)
        profile_url = person.get('profileurl', {}).get('_content')
        if profile_url:
            try:
                resp = util.urlopen(profile_url)
                profile_json = mf2py.parse(doc=resp, url=profile_url)
                # personal site is likely the first non-flickr url
                urls = profile_json.get('rels', {}).get('me', [])
                obj['urls'] = [{'value': u} for u in urls]
                obj['url'] = next(
                    (u for u in urls
                     if not u.startswith('https://www.flickr.com/')), None)
            except urllib_error.URLError:
                logging.warning('could not fetch user homepage %s',
                                profile_url)

        return self.postprocess_object(obj)
Exemplo n.º 9
0
  def user_to_actor(self, resp):
    """Convert a Flickr user dict into an ActivityStreams actor.
    """
    person = resp.get('person', {})
    username = person.get('username', {}).get('_content')
    obj = util.trim_nulls({
      'objectType': 'person',
      'displayName': person.get('realname', {}).get('_content') or username,
      'image': {
        'url': self.get_user_image(person.get('iconfarm'),
                                   person.get('iconserver'),
                                   person.get('nsid')),
      },
      'id': self.tag_uri(username),
      # numeric_id is our own custom field that always has the source's numeric
      # user id, if available.
      'numeric_id': person.get('nsid'),
      'location': {
        'displayName': person.get('location', {}).get('_content'),
      },
      'username': username,
      'description': person.get('description', {}).get('_content'),
    })

    # fetch profile page to get url(s)
    profile_url = person.get('profileurl', {}).get('_content')
    if profile_url:
      try:
        resp = util.urlopen(profile_url)
        profile_json = mf2py.parse(doc=resp, url=profile_url, img_with_alt=True)
        urls = profile_json.get('rels', {}).get('me', [])
        if urls:
          obj['url'] = urls[0]
        if len(urls) > 1:
          obj['urls'] = [{'value': u} for u in urls]
      except urllib_error.URLError:
        logging.warning('could not fetch user homepage %s', profile_url)

    return self.postprocess_object(obj)
Exemplo n.º 10
0
  def user_to_actor(self, resp):
    """Convert a Flickr user dict into an ActivityStreams actor.
    """
    person = resp.get('person', {})
    username = person.get('username', {}).get('_content')
    obj = util.trim_nulls({
      'objectType': 'person',
      'displayName': person.get('realname', {}).get('_content') or username,
      'image': {
        'url': self.get_user_image(person.get('iconfarm'),
                                   person.get('iconserver'),
                                   person.get('nsid')),
      },
      'id': self.tag_uri(username),
      # numeric_id is our own custom field that always has the source's numeric
      # user id, if available.
      'numeric_id': person.get('nsid'),
      'location': {
        'displayName': person.get('location', {}).get('_content'),
      },
      'username': username,
      'description': person.get('description', {}).get('_content'),
    })

    # fetch profile page to get url(s)
    profile_url = person.get('profileurl', {}).get('_content')
    if profile_url:
      try:
        resp = util.urlopen(profile_url)
        profile_json = mf2py.parse(doc=resp, url=profile_url)
        # personal site is likely the first non-flickr url
        urls = profile_json.get('rels', {}).get('me', [])
        obj['urls'] = [{'value': u} for u in urls]
        obj['url'] = next(
          (u for u in urls if not u.startswith('https://www.flickr.com/')),
          None)
      except urllib2.URLError, e:
        logging.warning('could not fetch user homepage %s', profile_url)
Exemplo n.º 11
0
  def get(self):
    expected_inputs = ('activitystreams', 'html', 'json-mf2')
    input = util.get_required_param(self, 'input')
    if input not in expected_inputs:
      raise exc.HTTPBadRequest('Invalid input: %s, expected one of %r' %
                               (input, expected_inputs))

    # fetch url
    url = util.get_required_param(self, 'url')
    resp = util.urlopen(url)
    if url != resp.geturl():
      url = resp.geturl()
      logging.info('Redirected to %s', url)
    body = resp.read()

    # decode data
    mf2 = None
    if input == 'activitystreams':
      activities = json.loads(body)
    elif input == 'html':
      activities = microformats2.html_to_activities(body, url)
      mf2 = mf2py.parse(doc=body, url=url)
    elif input == 'json-mf2':
      mf2 = json.loads(body)
      mf2['rels'] = {}  # mf2util expects rels
      activities = [microformats2.json_to_object(item)
                    for item in mf2.get('items', [])]

    author = None
    title = None
    if mf2:
      author = microformats2.find_author(mf2)
      title = mf2util.interpret_feed(mf2, url).get('name')

    self.write_response(source.Source.make_activities_base_response(activities),
                        url=url, actor=author, title=title)
Exemplo n.º 12
0
  def upload_images(self, urls):
    """Uploads one or more images from web URLs.

    https://dev.twitter.com/rest/reference/post/media/upload

    Args:
      urls: sequence of string URLs of images

    Returns: list of string media ids
    """
    ids = []
    for url in urls:
      headers = twitter_auth.auth_header(
        API_UPLOAD_MEDIA, self.access_token_key, self.access_token_secret, 'POST')
      resp = util.requests_post(API_UPLOAD_MEDIA,
                                files={'media': util.urlopen(url)},
                                headers=headers)
      resp.raise_for_status()
      logging.info('Got: %s', resp.text)
      try:
        ids.append(json.loads(resp.text)['media_id_string'])
      except ValueError, KeyError:
        logging.exception("Couldn't parse response: %s", resp.text)
        raise
Exemplo n.º 13
0
    if fetch_mentions:
      # fetch mentions *after* replies so that we don't get replies to mentions
      # https://github.com/snarfed/bridgy/issues/631
      mentions = self.fetch_mentions(_user().get('screen_name'), tweets,
                                     min_id=min_id)
      tweet_activities += [self.tweet_to_activity(m) for m in mentions]

    if fetch_likes:
      for tweet, activity in zip(tweets, tweet_activities):
        id = tweet['id_str']
        count = tweet.get('favorite_count')
        if self.is_public(activity) and count and count != cached.get('ATF ' + id):
          url = HTML_FAVORITES % id
          try:
            html = json.loads(util.urlopen(url).read()).get('htmlUsers', '')
          except urllib2.URLError, e:
            util.interpret_http_exception(e)  # just log it
            continue
          likes = self.favorites_html_to_likes(tweet, html)
          activity['object'].setdefault('tags', []).extend(likes)
          cache_updates['ATF ' + id] = count

    activities += tweet_activities
    response = self.make_activities_base_response(activities)
    response.update({'total_count': total_count, 'etag': etag})
    if cache_updates and cache is not None:
      cache.set_multi(cache_updates)
    return response

  def fetch_replies(self, activities, min_id=None):
Exemplo n.º 14
0
  def _create(self, obj, preview, include_link=False, ignore_formatting=False):
    """Creates or previews creating for the previous two methods.

    https://www.flickr.com/services/api/upload.api.html
    https://www.flickr.com/services/api/flickr.photos.comments.addComment.html
    https://www.flickr.com/services/api/flickr.favorites.add.html
    https://www.flickr.com/services/api/flickr.photos.people.add.html

    Args:
      obj: ActivityStreams object
      preview: boolean
      include_link: boolean

    Return:
      a CreationResult
    """
    # photo, comment, or like
    type = source.object_type(obj)
    logging.debug('publishing object type %s to Flickr', type)
    link_text = '(Originally published at: %s)' % obj.get('url')

    image_url = util.get_first(obj, 'image', {}).get('url')
    video_url = util.get_first(obj, 'stream', {}).get('url')
    content = self._content_for_create(obj, ignore_formatting=ignore_formatting,
                                       strip_first_video_tag=bool(video_url))

    if (video_url or image_url) and type in ('note', 'article'):
      name = obj.get('displayName')
      people = self._get_person_tags(obj)
      hashtags = [t.get('displayName') for t in obj.get('tags', [])
                  if t.get('objectType') == 'hashtag' and t.get('displayName')]
      lat = obj.get('location', {}).get('latitude')
      lng = obj.get('location', {}).get('longitude')

      # if name does not represent an explicit title, then we'll just
      # use it as the title and wipe out the content
      if name and content and not mf2util.is_name_a_title(name, content):
        name = content
        content = None

      # add original post link
      if include_link:
        content = ((content + '\n\n') if content else '') + link_text

      if preview:
        preview_content = ''
        if name:
          preview_content += '<h4>%s</h4>' % name
        if content:
          preview_content += '<div>%s</div>' % content
        if hashtags:
          preview_content += '<div> %s</div>' % ' '.join('#' + t for t in hashtags)
        if people:
          preview_content += '<div> with %s</div>' % ', '.join(
            ('<a href="%s">%s</a>' % (
              p.get('url'), p.get('displayName') or 'User %s' % p.get('id'))
             for p in people))
        if lat and lng:
          preview_content += '<div> at <a href="https://maps.google.com/maps?q=%s,%s">%s, %s</a></div>' % (lat, lng, lat, lng)

        if video_url:
          preview_content += ('<video controls src="%s"><a href="%s">this video'
                              '</a></video>' % (video_url, video_url))
        else:
          preview_content += '<img src="%s" />' % image_url

        return source.creation_result(content=preview_content, description='post')

      params = []
      if name:
        params.append(('title', name))
      if content:
        params.append(('description', content))
      if hashtags:
        params.append(
          ('tags', ','.join('"%s"' % t if ' ' in t else t for t in hashtags)))

      file = util.urlopen(video_url or image_url)
      resp = self.upload(params, file)
      photo_id = resp.get('id')
      resp.update({
        'type': 'post',
        'url': self.photo_url(self.path_alias() or self.user_id(), photo_id),
      })
      if video_url:
        resp['granary_message'] = \
          "Note that videos take time to process before they're visible."

      # add person tags
      for person_id in sorted(p.get('id') for p in people):
        self.call_api_method('flickr.photos.people.add', {
          'photo_id': photo_id,
          'user_id': person_id,
        })

      # add location
      if lat and lng:
        self.call_api_method('flickr.photos.geo.setLocation', {
            'photo_id': photo_id,
            'lat': lat,
            'lon': lng,
        })

      return source.creation_result(resp)

    base_obj = self.base_object(obj)
    base_id = base_obj.get('id')
    base_url = base_obj.get('url')

    # maybe a comment on a flickr photo?
    if type == 'comment' or obj.get('inReplyTo'):
      if not base_id:
        return source.creation_result(
          abort=True,
          error_plain='Could not find a photo to comment on.',
          error_html='Could not find a photo to <a href="http://indiewebcamp.com/reply">comment on</a>. '
          'Check that your post has an <a href="http://indiewebcamp.com/comment">in-reply-to</a> '
          'link to a Flickr photo or to an original post that publishes a '
          '<a href="http://indiewebcamp.com/rel-syndication">rel-syndication</a> link to Flickr.')

      if include_link:
        content += '\n\n' + link_text
      if preview:
        return source.creation_result(
          content=content,
          description='comment on <a href="%s">this photo</a>.' % base_url)

      resp = self.call_api_method('flickr.photos.comments.addComment', {
        'photo_id': base_id,
        'comment_text': content,
      })
      resp = resp.get('comment', {})
      resp.update({
        'type': 'comment',
        'url': resp.get('permalink'),
      })
      return source.creation_result(resp)

    if type == 'like':
      if not base_id:
        return source.creation_result(
          abort=True,
          error_plain='Could not find a photo to favorite.',
          error_html='Could not find a photo to <a href="http://indiewebcamp.com/like">favorite</a>. '
          'Check that your post has an <a href="http://indiewebcamp.com/like">like-of</a> '
          'link to a Flickr photo or to an original post that publishes a '
          '<a href="http://indiewebcamp.com/rel-syndication">rel-syndication</a> link to Flickr.')
      if preview:
        return source.creation_result(
          description='favorite <a href="%s">this photo</a>.' % base_url)

      # this method doesn't return any data
      self.call_api_method('flickr.favorites.add', {
        'photo_id': base_id,
      })
      # TODO should we canonicalize the base_url (e.g. removing trailing path
      # info like "/in/contacts/")
      return source.creation_result({
        'type': 'like',
        'url': '%s#favorited-by-%s' % (base_url, self.user_id()),
      })

    return source.creation_result(
      abort=False,
      error_plain='Cannot publish type=%s to Flickr.' % type,
      error_html='Cannot publish type=%s to Flickr.' % type)
Exemplo n.º 15
0
  def upload_video(self, url):
    """Uploads a video from web URLs using the chunked upload process.

    Chunked upload consists of multiple API calls:
    * command=INIT, which allocates the media id
    * command=APPEND for each 5MB block, up to 15MB total
    * command=FINALIZE

    https://dev.twitter.com/rest/reference/post/media/upload-chunked
    https://dev.twitter.com/rest/public/uploading-media#chunkedupload

    Args:
      url: string URL of images

    Returns: string media id or CreationResult on error
    """
    video_resp = util.urlopen(url)

    # check format and size
    type = video_resp.headers.get('Content-Type')
    if not type:
      type, _ = mimetypes.guess_type(url)
    if type and type not in VIDEO_MIME_TYPES:
      msg = 'Twitter only supports MP4 videos; yours looks like a %s.' % type
      return source.creation_result(abort=True, error_plain=msg, error_html=msg)

    length = video_resp.headers.get('Content-Length')
    if not util.is_int(length):
      msg = "Couldn't determine your video's size."
      return source.creation_result(abort=True, error_plain=msg, error_html=msg)

    length = int(length)
    if int(length) > MAX_VIDEO_SIZE:
      msg = "Your %sMB video is larger than Twitter's %dMB limit." % (
        length // MB, MAX_VIDEO_SIZE // MB)
      return source.creation_result(abort=True, error_plain=msg, error_html=msg)

    # INIT
    media_id = self.urlopen(API_UPLOAD_MEDIA, data=urllib.urlencode({
      'command': 'INIT',
      'media_type': 'video/mp4',
      'total_bytes': length,
    }))['media_id_string']

    # APPEND
    headers = twitter_auth.auth_header(
      API_UPLOAD_MEDIA, self.access_token_key, self.access_token_secret, 'POST')

    i = 0
    while True:
      chunk = util.FileLimiter(video_resp, UPLOAD_CHUNK_SIZE)
      data = {
        'command': 'APPEND',
        'media_id': media_id,
        'segment_index': i,
      }
      resp = util.requests_post(API_UPLOAD_MEDIA, data=data,
                                files={'media': chunk}, headers=headers)
      resp.raise_for_status()

      if chunk.ateof:
        break
      i += 1

    # FINALIZE
    self.urlopen(API_UPLOAD_MEDIA, data=urllib.urlencode({
      'command': 'FINALIZE',
      'media_id': media_id,
    }))

    return media_id
Exemplo n.º 16
0
    if fetch_mentions:
      # fetch mentions *after* replies so that we don't get replies to mentions
      # https://github.com/snarfed/bridgy/issues/631
      mentions = self.fetch_mentions(_user().get('screen_name'), tweets,
                                     min_id=min_id)
      tweet_activities += [self.tweet_to_activity(m) for m in mentions]

    if fetch_likes:
      for tweet, activity in zip(tweets, tweet_activities):
        id = tweet['id_str']
        count = tweet.get('favorite_count')
        if self.is_public(activity) and count and count != cached.get('ATF ' + id):
          url = HTML_FAVORITES % id
          try:
            html = json.loads(util.urlopen(url).read()).get('htmlUsers', '')
          except urllib2.URLError, e:
            util.interpret_http_exception(e)  # just log it
            continue
          likes = self.favorites_html_to_likes(tweet, html)
          activity['object'].setdefault('tags', []).extend(likes)
          cache_updates['ATF ' + id] = count

    activities += tweet_activities
    response = self.make_activities_base_response(activities)
    response.update({'total_count': total_count, 'etag': etag})
    if cache_updates and cache is not None:
      cache.set_multi(cache_updates)
    return response

  def fetch_replies(self, activities, min_id=None):
Exemplo n.º 17
0
  def upload_video(self, url):
    """Uploads a video from web URLs using the chunked upload process.

    Chunked upload consists of multiple API calls:
    * command=INIT, which allocates the media id
    * command=APPEND for each 5MB block, up to 15MB total
    * command=FINALIZE

    https://dev.twitter.com/rest/reference/post/media/upload-chunked
    https://dev.twitter.com/rest/public/uploading-media#chunkedupload

    Args:
      url: string URL of images

    Returns: string media id or CreationResult on error
    """
    video_resp = util.urlopen(url)
    bad_type = self._check_mime_type(url, video_resp, VIDEO_MIME_TYPES, 'MP4 videos')
    if bad_type:
      return bad_type

    length = video_resp.headers.get('Content-Length')
    if not util.is_int(length):
      msg = "Couldn't determine your video's size."
      return source.creation_result(abort=True, error_plain=msg, error_html=msg)

    length = int(length)
    if int(length) > MAX_VIDEO_SIZE:
      msg = "Your %sMB video is larger than Twitter's %dMB limit." % (
        length // MB, MAX_VIDEO_SIZE // MB)
      return source.creation_result(abort=True, error_plain=msg, error_html=msg)

    # INIT
    media_id = self.urlopen(API_UPLOAD_MEDIA, data=urllib.urlencode({
      'command': 'INIT',
      'media_type': 'video/mp4',
      'total_bytes': length,
    }))['media_id_string']

    # APPEND
    headers = twitter_auth.auth_header(
      API_UPLOAD_MEDIA, self.access_token_key, self.access_token_secret, 'POST')

    i = 0
    while True:
      chunk = util.FileLimiter(video_resp, UPLOAD_CHUNK_SIZE)
      data = {
        'command': 'APPEND',
        'media_id': media_id,
        'segment_index': i,
      }
      resp = util.requests_post(API_UPLOAD_MEDIA, data=data,
                                files={'media': chunk}, headers=headers)
      resp.raise_for_status()

      if chunk.ateof:
        break
      i += 1

    # FINALIZE
    self.urlopen(API_UPLOAD_MEDIA, data=urllib.urlencode({
      'command': 'FINALIZE',
      'media_id': media_id,
    }))

    return media_id
Exemplo n.º 18
0
  def get(self):
    expected_inputs = ('activitystreams', 'html', 'json-mf2')
    input = util.get_required_param(self, 'input')
    if input not in expected_inputs:
      raise exc.HTTPBadRequest('Invalid input: %s, expected one of %r' %
                               (input, expected_inputs))
    url = util.get_required_param(self, 'url')

    # check if request is cached
    cache = self.request.get('cache', '').lower() != 'false'
    cache_key = 'U %s' % url
    cached = memcache.get(cache_key) if cache else None

    if cached:
      logging.info('Serving cached response %r', cache_key)
      url = cached['url']
      body = cached['body']
    else:
      # fetch url
      try:
        resp = util.urlopen(url)
      except (ValueError, httplib.InvalidURL) as e:
        self.abort(400, str(e))
      except Exception as e:
        if util.is_connection_failure(e):
          # HTTP 504 Gateway Timeout
          self.abort(504, str(e))
        raise

      if url != resp.geturl():
        url = resp.geturl()
        logging.info('Redirected to %s', url)
      body = resp.read()

      if cache:
        logging.info('Caching response in %r', cache_key)
        memcache.set(cache_key, {'url': url, 'body': body}, URL_CACHE_TIME)

    # decode data
    mf2 = None
    if input == 'html':
      mf2 = mf2py.parse(doc=body, url=url)
    elif input == 'json-mf2':
      mf2 = json.loads(body)
      mf2.setdefault('rels', {})  # mf2util expects rels

    actor = None
    title = None
    if mf2:
      actor = microformats2.find_author(
        mf2, fetch_mf2_func=lambda url: mf2py.parse(url=url))
      title = mf2util.interpret_feed(mf2, url).get('name')

    if input == 'activitystreams':
      activities = json.loads(body)
    elif input == 'html':
      activities = microformats2.html_to_activities(body, url, actor)
    elif input == 'json-mf2':
      activities = [microformats2.json_to_object(item, actor=actor)
                    for item in mf2.get('items', [])]

    self.write_response(source.Source.make_activities_base_response(activities),
                        url=url, actor=actor, title=title)
Exemplo n.º 19
0
    if fetch_mentions:
      # fetch mentions *after* replies so that we don't get replies to mentions
      # https://github.com/snarfed/bridgy/issues/631
      mentions = self.fetch_mentions(_user().get('screen_name'), tweets,
                                     min_id=min_id)
      tweet_activities += [self.tweet_to_activity(m) for m in mentions]

    if fetch_likes:
      for tweet, activity in zip(tweets, tweet_activities):
        id = tweet['id_str']
        count = tweet.get('favorite_count')
        if self.is_public(activity) and count and count != cached.get('ATF ' + id):
          url = HTML_FAVORITES % id
          try:
            resp = util.urlopen(url).read()
            html = source.load_json(resp, url).get('htmlUsers', '')
          except urllib2.URLError, e:
            util.interpret_http_exception(e)  # just log it
            continue
          likes = self.favorites_html_to_likes(tweet, html)
          activity['object'].setdefault('tags', []).extend(likes)
          cache_updates['ATF ' + id] = count

    activities += tweet_activities
    response = self.make_activities_base_response(activities)
    response.update({'total_count': total_count, 'etag': etag})
    if cache_updates and cache is not None:
      cache.set_multi(cache_updates)
    return response
Exemplo n.º 20
0
    def _create(self,
                obj,
                preview,
                include_link=source.OMIT_LINK,
                ignore_formatting=False):
        """Creates or previews creating for the previous two methods.

    https://www.flickr.com/services/api/upload.api.html
    https://www.flickr.com/services/api/flickr.photos.comments.addComment.html
    https://www.flickr.com/services/api/flickr.favorites.add.html
    https://www.flickr.com/services/api/flickr.photos.people.add.html

    Args:
      obj: ActivityStreams object
      preview: boolean
      include_link: string
      ignore_formatting: boolean

    Return:
      a CreationResult
    """
        # photo, comment, or like
        type = source.object_type(obj)
        logging.debug('publishing object type %s to Flickr', type)
        link_text = '(Originally published at: %s)' % obj.get('url')

        image_url = util.get_first(obj, 'image', {}).get('url')
        video_url = util.get_first(obj, 'stream', {}).get('url')
        content = self._content_for_create(
            obj,
            ignore_formatting=ignore_formatting,
            strip_first_video_tag=bool(video_url))

        if (video_url or image_url) and type in ('note', 'article'):
            name = obj.get('displayName')
            people = self._get_person_tags(obj)
            hashtags = [
                t.get('displayName') for t in obj.get('tags', [])
                if t.get('objectType') == 'hashtag' and t.get('displayName')
            ]
            lat = obj.get('location', {}).get('latitude')
            lng = obj.get('location', {}).get('longitude')

            # if name does not represent an explicit title, then we'll just
            # use it as the title and wipe out the content
            if name and content and not mf2util.is_name_a_title(name, content):
                name = content
                content = None

            # add original post link
            if include_link == source.INCLUDE_LINK:
                content = ((content + '\n\n') if content else '') + link_text

            if preview:
                preview_content = ''
                if name:
                    preview_content += '<h4>%s</h4>' % name
                if content:
                    preview_content += '<div>%s</div>' % content
                if hashtags:
                    preview_content += '<div> %s</div>' % ' '.join(
                        '#' + t for t in hashtags)
                if people:
                    preview_content += '<div> with %s</div>' % ', '.join(
                        ('<a href="%s">%s</a>' %
                         (p.get('url'), p.get('displayName')
                          or 'User %s' % p.get('id')) for p in people))
                if lat and lng:
                    preview_content += '<div> at <a href="https://maps.google.com/maps?q=%s,%s">%s, %s</a></div>' % (
                        lat, lng, lat, lng)

                if video_url:
                    preview_content += (
                        '<video controls src="%s"><a href="%s">this video'
                        '</a></video>' % (video_url, video_url))
                else:
                    preview_content += '<img src="%s" />' % image_url

                return source.creation_result(content=preview_content,
                                              description='post')

            params = []
            if name:
                params.append(('title', name))
            if content:
                params.append(('description', content.encode('utf-8')))
            if hashtags:
                params.append(('tags', ','.join(
                    ('"%s"' % t if ' ' in t else t).encode('utf-8')
                    for t in hashtags)))

            file = util.urlopen(video_url or image_url)
            try:
                resp = self.upload(params, file)
            except requests.exceptions.ConnectionError as e:
                if e.args[0].message.startswith(
                        'Request exceeds 10 MiB limit'):
                    msg = 'Sorry, photos and videos must be under 10MB.'
                    return source.creation_result(error_plain=msg,
                                                  error_html=msg)
                else:
                    raise

            photo_id = resp.get('id')
            resp.update({
                'type':
                'post',
                'url':
                self.photo_url(self.path_alias() or self.user_id(), photo_id),
            })
            if video_url:
                resp['granary_message'] = \
                  "Note that videos take time to process before they're visible."

            # add person tags
            for person_id in sorted(p.get('id') for p in people):
                self.call_api_method('flickr.photos.people.add', {
                    'photo_id': photo_id,
                    'user_id': person_id,
                })

            # add location
            if lat and lng:
                self.call_api_method('flickr.photos.geo.setLocation', {
                    'photo_id': photo_id,
                    'lat': lat,
                    'lon': lng,
                })

            return source.creation_result(resp)

        base_obj = self.base_object(obj)
        base_id = base_obj.get('id')
        base_url = base_obj.get('url')

        # maybe a comment on a flickr photo?
        if type == 'comment' or obj.get('inReplyTo'):
            if not base_id:
                return source.creation_result(
                    abort=True,
                    error_plain='Could not find a photo to comment on.',
                    error_html=
                    'Could not find a photo to <a href="http://indiewebcamp.com/reply">comment on</a>. '
                    'Check that your post has an <a href="http://indiewebcamp.com/comment">in-reply-to</a> '
                    'link to a Flickr photo or to an original post that publishes a '
                    '<a href="http://indiewebcamp.com/rel-syndication">rel-syndication</a> link to Flickr.'
                )

            if include_link == source.INCLUDE_LINK:
                content += '\n\n' + link_text
            if preview:
                return source.creation_result(
                    content=content,
                    description='comment on <a href="%s">this photo</a>.' %
                    base_url)

            resp = self.call_api_method(
                'flickr.photos.comments.addComment', {
                    'photo_id': base_id,
                    'comment_text': content.encode('utf-8'),
                })
            resp = resp.get('comment', {})
            resp.update({
                'type': 'comment',
                'url': resp.get('permalink'),
            })
            return source.creation_result(resp)

        if type == 'like':
            if not base_id:
                return source.creation_result(
                    abort=True,
                    error_plain='Could not find a photo to favorite.',
                    error_html=
                    'Could not find a photo to <a href="http://indiewebcamp.com/like">favorite</a>. '
                    'Check that your post has an <a href="http://indiewebcamp.com/like">like-of</a> '
                    'link to a Flickr photo or to an original post that publishes a '
                    '<a href="http://indiewebcamp.com/rel-syndication">rel-syndication</a> link to Flickr.'
                )
            if preview:
                return source.creation_result(
                    description='favorite <a href="%s">this photo</a>.' %
                    base_url)

            # this method doesn't return any data
            self.call_api_method('flickr.favorites.add', {
                'photo_id': base_id,
            })
            # TODO should we canonicalize the base_url (e.g. removing trailing path
            # info like "/in/contacts/")
            return source.creation_result({
                'type':
                'like',
                'url':
                '%s#favorited-by-%s' % (base_url, self.user_id()),
            })

        return source.creation_result(
            abort=False,
            error_plain='Cannot publish type=%s to Flickr.' % type,
            error_html='Cannot publish type=%s to Flickr.' % type)