Esempio n. 1
0
 def _release(cls, snap, revision):
     """Release a snap revision to specified channels."""
     release_url = urlappend(
         config.snappy.store_url, "dev/api/snap-release/")
     data = {
         "name": snap.store_name,
         "revision": revision,
         # The security proxy is useless and breaks JSON serialisation.
         "channels": removeSecurityProxy(snap.store_channels),
         "series": snap.store_series.name,
         }
     # XXX cjwatson 2016-06-28: This should add timeline information, but
     # that's currently difficult in jobs.
     try:
         assert snap.store_secrets is not None
         urlfetch(
             release_url, method="POST", json=data,
             auth=MacaroonAuth(
                 snap.store_secrets["root"],
                 snap.store_secrets.get("discharge")))
     except requests.HTTPError as e:
         if e.response.status_code == 401:
             if (e.response.headers.get("WWW-Authenticate") ==
                     "Macaroon needs_refresh=1"):
                 raise NeedsRefreshResponse()
         raise cls._makeSnapStoreError(ReleaseFailedResponse, e)
Esempio n. 2
0
 def test_urlfetch_no_proxy_by_default(self):
     """urlfetch does not use a proxy by default."""
     self.pushConfig('launchpad', http_proxy='http://proxy.example:3128/')
     fake_send = FakeMethod(result=Response())
     self.useFixture(
         MonkeyPatch('requests.adapters.HTTPAdapter.send', fake_send))
     urlfetch('http://example.com/')
     self.assertEqual({}, fake_send.calls[0][1]['proxies'])
Esempio n. 3
0
 def test_urlfetch_uses_proxies_if_requested(self):
     """urlfetch uses proxies if explicitly requested."""
     proxy = 'http://proxy.example:3128/'
     self.pushConfig('launchpad', http_proxy=proxy)
     fake_send = FakeMethod(result=Response())
     self.useFixture(
         MonkeyPatch('requests.adapters.HTTPAdapter.send', fake_send))
     urlfetch('http://example.com/', use_proxy=True)
     self.assertEqual({scheme: proxy
                       for scheme in ('http', 'https')},
                      fake_send.calls[0][1]['proxies'])
Esempio n. 4
0
    def test_urlfetch_supports_ftp_urls_if_allow_ftp(self):
        """urlfetch() supports ftp urls via a proxy if explicitly asked."""
        sock, http_server_url = self.make_test_socket()
        sock.listen(1)

        def success_result():
            (client_sock, client_addr) = sock.accept()
            # We only provide a test HTTP proxy, not anything beyond that.
            client_sock.sendall(
                dedent("""\
                HTTP/1.0 200 Ok
                Content-Type: text/plain
                Content-Length: 8

                Success."""))
            client_sock.close()

        self.pushConfig('launchpad', http_proxy=http_server_url)
        t = threading.Thread(target=success_result)
        t.start()
        response = urlfetch('ftp://example.com/',
                            use_proxy=True,
                            allow_ftp=True)
        self.assertThat(
            response,
            MatchesStructure(status_code=Equals(200),
                             headers=ContainsDict(
                                 {'content-length': Equals('8')}),
                             content=Equals('Success.')))
        t.join()
Esempio n. 5
0
    def search(self, terms, start=0):
        """See `ISearchService`.

        The config.google.client_id is used as Google client-id in the
        search request. Search returns 20 or fewer results for each query.
        For terms that match more than 20 results, the start param can be
        used over multiple queries to get successive sets of results.

        :return: `ISearchResults` (PageMatches).
        :raise: `GoogleWrongGSPVersion` if the xml cannot be parsed.
        """
        search_url = self.create_search_url(terms, start=start)
        from lp.services.timeout import urlfetch
        request = get_current_browser_request()
        timeline = get_request_timeline(request)
        action = timeline.start("google-search-api", search_url)
        try:
            gsp_xml = urlfetch(search_url)
        except (TimeoutError, urllib2.HTTPError, urllib2.URLError) as error:
            # Google search service errors are not code errors. Let the
            # call site choose to handle the unavailable service.
            raise GoogleResponseError("The response errored: %s" % str(error))
        finally:
            action.finish()
        page_matches = self._parse_google_search_protocol(gsp_xml)
        return page_matches
Esempio n. 6
0
 def _request(self, method, path, **kwargs):
     """Make a request to the Git hosting API."""
     # Fetch the current timeout before starting the timeline action,
     # since making a database query inside this action will result in an
     # OverlappingActionError.
     get_default_timeout_function()()
     timeline = get_request_timeline(get_current_browser_request())
     action = timeline.start("git-hosting-%s" % method,
                             "%s %s" % (path, json.dumps(kwargs)))
     try:
         response = urlfetch(urljoin(self.endpoint, path),
                             method=method,
                             **kwargs)
     except TimeoutError:
         # Re-raise this directly so that it can be handled specially by
         # callers.
         raise
     except requests.RequestException:
         raise
     except Exception:
         _, val, tb = sys.exc_info()
         reraise(RequestExceptionWrapper,
                 RequestExceptionWrapper(*val.args), tb)
     finally:
         action.finish()
     if response.content:
         return response.json()
     else:
         return None
Esempio n. 7
0
    def search(self, terms, start=0):
        """See `ISearchService`.

        The `subscription_key` and `custom_config_id` are used in the
        search request. Search returns 20 or fewer results for each query.
        For terms that match more than 20 results, the start param can be
        used over multiple queries to get successive sets of results.

        :return: `ISearchResults` (PageMatches).
        :raise: `SiteSearchResponseError` if the json response is incomplete or
            cannot be parsed.
        """
        search_url = self.create_search_url(terms, start=start)
        search_headers = self.create_search_headers()
        request = get_current_browser_request()
        timeline = get_request_timeline(request)
        action = timeline.start("bing-search-api", search_url)
        try:
            response = urlfetch(
                search_url, headers=search_headers, use_proxy=True)
        except (TimeoutError, requests.RequestException) as error:
            raise SiteSearchResponseError(
                "The response errored: %s" % str(error))
        finally:
            action.finish()
        page_matches = self._parse_search_response(response.content, start)
        return page_matches
Esempio n. 8
0
 def _uploadFile(cls, lfa, lfc):
     """Upload a single file."""
     assert config.snappy.store_upload_url is not None
     unscanned_upload_url = urlappend(
         config.snappy.store_upload_url, "unscanned-upload/")
     lfa.open()
     try:
         lfa_wrapper = LibraryFileAliasWrapper(lfa)
         encoder = MultipartEncoder(
             fields={
                 "binary": (
                     lfa.filename, lfa_wrapper, "application/octet-stream"),
                 })
         # XXX cjwatson 2016-05-09: This should add timeline information,
         # but that's currently difficult in jobs.
         try:
             response = urlfetch(
                 unscanned_upload_url, method="POST", data=encoder,
                 headers={
                     "Content-Type": encoder.content_type,
                     "Accept": "application/json",
                     })
             response_data = response.json()
             if not response_data.get("successful", False):
                 raise UploadFailedResponse(response.text)
             return {"upload_id": response_data["upload_id"]}
         except requests.HTTPError as e:
             raise cls._makeSnapStoreError(UploadFailedResponse, e)
     finally:
         lfa.close()
Esempio n. 9
0
 def _uploadApp(cls, snap, upload_data):
     """Create a new store upload based on the uploaded file."""
     assert config.snappy.store_url is not None
     assert snap.store_name is not None
     upload_url = urlappend(config.snappy.store_url, "dev/api/snap-push/")
     data = {
         "name": snap.store_name,
         "updown_id": upload_data["upload_id"],
         "series": snap.store_series.name,
         }
     # XXX cjwatson 2016-05-09: This should add timeline information, but
     # that's currently difficult in jobs.
     try:
         assert snap.store_secrets is not None
         response = urlfetch(
             upload_url, method="POST", json=data,
             auth=MacaroonAuth(
                 snap.store_secrets["root"],
                 snap.store_secrets.get("discharge")))
         response_data = response.json()
         return response_data["status_details_url"]
     except requests.HTTPError as e:
         if e.response.status_code == 401:
             if (e.response.headers.get("WWW-Authenticate") ==
                     "Macaroon needs_refresh=1"):
                 raise NeedsRefreshResponse()
             else:
                 raise cls._makeSnapStoreError(
                     UnauthorizedUploadResponse, e)
         raise cls._makeSnapStoreError(UploadFailedResponse, e)
Esempio n. 10
0
 def getExternalBugTrackerToUse(self):
     """See `IExternalBugTracker`."""
     base_auth_url = urlappend(self.baseurl, 'launchpad-auth')
     # Any token will do.
     auth_url = urlappend(base_auth_url, 'check')
     try:
         with override_timeout(config.checkwatches.default_socket_timeout):
             response = urlfetch(auth_url, use_proxy=True)
     except requests.HTTPError as e:
         # If the error is HTTP 401 Unauthorized then we're
         # probably talking to the LP plugin.
         if e.response.status_code == 401:
             return TracLPPlugin(self.baseurl)
         else:
             return self
     except requests.RequestException:
         return self
     else:
         # If the response contains a trac_auth cookie then we're
         # talking to the LP plugin. However, it's unlikely that
         # the remote system will authorize the bogus auth token we
         # sent, so this check is really intended to detect broken
         # Trac instances that return HTTP 200 for a missing page.
         for cookie in response.cookies:
             if cookie.name == 'trac_auth':
                 return TracLPPlugin(self.baseurl)
         else:
             return self
Esempio n. 11
0
    def search(self, terms, start=0):
        """See `ISearchService`.

        The config.google.client_id is used as Google client-id in the
        search request. Search returns 20 or fewer results for each query.
        For terms that match more than 20 results, the start param can be
        used over multiple queries to get successive sets of results.

        :return: `ISearchResults` (PageMatches).
        :raise: `GoogleWrongGSPVersion` if the xml cannot be parsed.
        """
        search_url = self.create_search_url(terms, start=start)
        from lp.services.timeout import urlfetch
        request = get_current_browser_request()
        timeline = get_request_timeline(request)
        action = timeline.start("google-search-api", search_url)
        try:
            gsp_xml = urlfetch(search_url)
        except (TimeoutError, urllib2.HTTPError, urllib2.URLError) as error:
            # Google search service errors are not code errors. Let the
            # call site choose to handle the unavailable service.
            raise GoogleResponseError(
                "The response errored: %s" % str(error))
        finally:
            action.finish()
        page_matches = self._parse_google_search_protocol(gsp_xml)
        return page_matches
Esempio n. 12
0
    def _setHead(self, target_url, target_ref):
        """Set HEAD on a remote repository.

        This relies on the turnip-set-symbolic-ref extension.
        """
        service = "turnip-set-symbolic-ref"
        url = urljoin(target_url, service)
        headers = {
            "Content-Type": "application/x-%s-request" % service,
        }
        body = pkt_line("HEAD %s" % target_ref) + pkt_line(None)
        try:
            response = urlfetch(url, method="POST", headers=headers, data=body)
            response.raise_for_status()
        except Exception as e:
            raise GitProtocolError(str(e))
        content_type = response.headers.get("Content-Type")
        if content_type != ("application/x-%s-result" % service):
            raise GitProtocolError("Invalid Content-Type from server: %s" %
                                   content_type)
        content = io.BytesIO(response.content)
        proto = Protocol(content.read, None)
        pkt = proto.read_pkt_line()
        if pkt is None:
            raise GitProtocolError("Unexpected flush-pkt from server")
        elif pkt.rstrip(b"\n") == b"ACK HEAD":
            pass
        elif pkt.startswith(b"ERR "):
            raise GitProtocolError(
                pkt[len(b"ERR "):].rstrip(b"\n").decode("UTF-8"))
        else:
            raise GitProtocolError("Unexpected packet %r from server" % pkt)
Esempio n. 13
0
    def makeRequest(self, method, url, **kwargs):
        """Make a request.

        :param method: The HTTP request method.
        :param url: The URL to request.
        :return: A `requests.Response` object.
        :raises requests.RequestException: if the request fails.
        """
        with override_timeout(self.timeout):
            return urlfetch(url, method=method, use_proxy=True, **kwargs)
Esempio n. 14
0
 def test_urlfetch_supports_file_urls_if_allow_file(self):
     """urlfetch() supports file urls if explicitly asked to do so."""
     test_path = self.useFixture(TempDir()).join('file')
     write_file(test_path, 'Success.')
     url = 'file://' + test_path
     self.assertThat(
         urlfetch(url, allow_file=True),
         MatchesStructure(status_code=Equals(200),
                          headers=ContainsDict(
                              {'Content-Length': Equals(8)}),
                          content=Equals('Success.')))
Esempio n. 15
0
    def request(self, method, path):
        """Make an HTTP request.

        Returns the Response object.
        """
        self.log.debug("Requesting %s with method %s", path, method)
        return urlfetch(urljoin(self.base, path),
                        method=method,
                        allow_redirects=False,
                        use_proxy=True,
                        allow_ftp=True)
Esempio n. 16
0
    def handleRelease(self, product_name, series_name, url, file_names):
        """If the given URL looks like a release tarball, download it
        and create a corresponding ProductRelease."""
        filename = urlparse.urlsplit(url)[2]
        slash = filename.rfind("/")
        if slash != -1:
            filename = filename[slash + 1:]
        self.log.debug("Filename portion is %s", filename)

        version = extract_version(filename)
        if version is None:
            self.log.info("Unable to parse version from %s", url)
            return
        self.log.debug("Version is %s", version)
        if not sane_version(version):
            self.log.error("Version number '%s' for '%s' is not sane",
                           version, url)
            return

        if filename in file_names:
            self.log.debug("Already have a tarball for release %s", version)
            return

        mimetype, encoding = mimetypes.guess_type(url)
        self.log.debug("Mime type is %s", mimetype)
        if mimetype is None:
            mimetype = 'application/octet-stream'

        self.log.info("Downloading %s", url)
        with tempfile.TemporaryFile(prefix="product-release-finder") as fp:
            try:
                response = urlfetch(url, use_proxy=True, output_file=fp)
                # XXX cjwatson 2018-06-26: This will all change with
                # requests 3.x.  See:
                #   https://blog.petrzemek.net/2018/04/22/
                expected_length = response.headers.get("Content-Length")
                if expected_length is not None:
                    actual_length = response.raw.tell()
                    expected_length = int(expected_length)
                    if actual_length < expected_length:
                        raise IOError(
                            "Incomplete read: got %d, expected %d" %
                            (actual_length, expected_length))
            except (IOError, requests.RequestException):
                self.log.exception("Download of %s failed", url)
                raise
            stat = os.fstat(fp.fileno())
            fp.seek(0)

            self.addReleaseTarball(product_name, series_name, version,
                                   filename, stat.st_size, fp, mimetype)
            file_names.add(filename)
Esempio n. 17
0
 def _update(self, host, timeout, auth_header=None):
     headers = {
         "User-Agent": LP_USER_AGENT,
         "Host": host,
         "Accept": "application/vnd.github.v3+json",
         }
     if auth_header is not None:
         headers["Authorization"] = auth_header
     url = "https://%s/rate_limit" % host
     try:
         with override_timeout(timeout):
             response = urlfetch(url, headers=headers, use_proxy=True)
             return response.json()["resources"]["core"]
     except requests.RequestException as e:
         raise BugTrackerConnectError(url, e)
Esempio n. 18
0
    def test_urlfetch_writes_to_output_file(self):
        """If given an output_file, urlfetch writes to it."""
        sock, http_server_url = self.make_test_socket()
        sock.listen(1)

        def success_result():
            (client_sock, client_addr) = sock.accept()
            client_sock.sendall(
                dedent("""\
                HTTP/1.0 200 Ok
                Content-Type: text/plain
                Content-Length: 8

                Success."""))
            client_sock.close()

        t = threading.Thread(target=success_result)
        t.start()
        output_path = self.useFixture(TempDir()).join('out')
        with open(output_path, 'wb+') as f:
            urlfetch(http_server_url, output_file=f)
            f.seek(0)
            self.assertEqual(b'Success.', f.read())
        t.join()
Esempio n. 19
0
    def test_urlfetch_returns_the_content(self):
        """When the request succeeds, the result content is returned."""
        sock, http_server_url = self.make_test_socket()
        sock.listen(1)
        def success_result():
            (client_sock, client_addr) = sock.accept()
            client_sock.sendall(dedent("""\
                HTTP/1.0 200 Ok
                Content-Type: text/plain
                Content-Length: 8

                Success."""))
            client_sock.close()
        t = threading.Thread(target=success_result)
        t.start()
        self.assertEqual('Success.', urlfetch(http_server_url))
        t.join()
Esempio n. 20
0
def _fetch_blob_from_launchpad(repository_url, ref_path, filename):
    repo_path = urlsplit(repository_url).path.strip("/")
    try:
        response = urlfetch(
            "https://git.launchpad.net/%s/plain/%s" % (
                repo_path, quote(filename)),
            params={"h": ref_path})
    except requests.RequestException as e:
        if (e.response is not None and
                e.response.status_code == requests.codes.NOT_FOUND):
            raise GitRepositoryBlobNotFound(
                repository_url, filename, rev=ref_path)
        else:
            raise GitRepositoryScanFault(
                "Failed to get file from Git repository at %s: %s" %
                (repository_url, str(e)))
    return response.content
Esempio n. 21
0
 def refreshDischargeMacaroon(cls, snap):
     """See `ISnapStoreClient`."""
     assert config.launchpad.openid_provider_root is not None
     assert snap.store_secrets is not None
     refresh_url = urlappend(
         config.launchpad.openid_provider_root, "api/v2/tokens/refresh")
     data = {"discharge_macaroon": snap.store_secrets["discharge"]}
     try:
         response = urlfetch(refresh_url, method="POST", json=data)
         response_data = response.json()
         if "discharge_macaroon" not in response_data:
             raise BadRefreshResponse(response.text)
         # Set a new dict here to avoid problems with security proxies.
         new_secrets = dict(snap.store_secrets)
         new_secrets["discharge"] = response_data["discharge_macaroon"]
         snap.store_secrets = new_secrets
     except requests.HTTPError as e:
         raise cls._makeSnapStoreError(BadRefreshResponse, e)
Esempio n. 22
0
 def _request(self,
              method,
              branch_id,
              quoted_tail,
              as_json=False,
              **kwargs):
     """Make a request to the Loggerhead API."""
     # Fetch the current timeout before starting the timeline action,
     # since making a database query inside this action will result in an
     # OverlappingActionError.
     get_default_timeout_function()()
     timeline = get_request_timeline(get_current_browser_request())
     components = [BRANCH_ID_ALIAS_PREFIX, str(branch_id)]
     if as_json:
         components.append("+json")
     components.append(quoted_tail)
     path = "/" + "/".join(components)
     action = timeline.start("branch-hosting-%s" % method,
                             "%s %s" % (path, json.dumps(kwargs)))
     try:
         response = urlfetch(urljoin(self.endpoint, path),
                             method=method,
                             **kwargs)
     except TimeoutError:
         # Re-raise this directly so that it can be handled specially by
         # callers.
         raise
     except requests.RequestException:
         raise
     except Exception:
         _, val, tb = sys.exc_info()
         reraise(RequestExceptionWrapper,
                 RequestExceptionWrapper(*val.args), tb)
     finally:
         action.finish()
     if as_json:
         if response.content:
             return response.json()
         else:
             return None
     else:
         return response.content
Esempio n. 23
0
    def fetchCVEURL(self, url):
        """Fetch CVE data from a URL, decompressing if necessary."""
        self.logger.info("Downloading CVE database from %s..." % url)
        try:
            with override_timeout(config.cveupdater.timeout):
                # Command-line options are trusted, so allow file://
                # URLs to ease testing.
                response = urlfetch(url, use_proxy=True, allow_file=True)
        except requests.RequestException:
            raise LaunchpadScriptFailure(
                'Unable to connect for CVE database %s' % url)

        cve_db = response.content
        self.logger.info("%d bytes downloaded." % len(cve_db))
        # requests will normally decompress this automatically, but that
        # might not be the case if we're given a file:// URL to a gzipped
        # file.
        if cve_db[:2] == b'\037\213':  # gzip magic
            cve_db = gzip.GzipFile(fileobj=io.BytesIO(cve_db)).read()
        return cve_db
Esempio n. 24
0
    def test_urlfetch_returns_the_content(self):
        """When the request succeeds, the result content is returned."""
        sock, http_server_url = self.make_test_socket()
        sock.listen(1)

        def success_result():
            (client_sock, client_addr) = sock.accept()
            client_sock.sendall(
                dedent("""\
                HTTP/1.0 200 Ok
                Content-Type: text/plain
                Content-Length: 8

                Success."""))
            client_sock.close()

        t = threading.Thread(target=success_result)
        t.start()
        self.assertEqual('Success.', urlfetch(http_server_url))
        t.join()
Esempio n. 25
0
def _get_cdimage_file_list():
    url = config.distributionmirrorprober.cdimage_file_list_url
    # In test environments, this may be a file: URL.  Adjust it to be in a
    # form that requests can cope with (i.e. using an absolute path).
    parsed_url = urlparse.urlparse(url)
    if parsed_url.scheme == 'file' and not os.path.isabs(parsed_url.path):
        assert parsed_url.path == parsed_url[2]
        parsed_url = list(parsed_url)
        parsed_url[2] = os.path.join(config.root, parsed_url[2])
    url = urlparse.urlunparse(parsed_url)
    try:
        return urlfetch(url,
                        headers={
                            'Pragma': 'no-cache',
                            'Cache-control': 'no-cache'
                        },
                        use_proxy=True,
                        allow_file=True)
    except requests.RequestException as e:
        raise UnableToFetchCDImageFileList('Unable to fetch %s: %s' % (url, e))
Esempio n. 26
0
    def getRecentBlogPosts(self):
        """Return the parsed feed of the most recent blog posts.

        It returns a list of dict with keys title, description, link and date.

        The date is formatted and the description which may contain HTML is
        sanitized.

        The number of blog posts to display is controlled through
        launchpad.homepage_recent_posts_count. The posts are fetched
        from the feed specified in launchpad.homepage_recent_posts_feed.

        FeedParser takes care of sanitizing the HTML contained in the feed.
        """
        key = '%s:homepage-blog-posts' % config.instance_name
        cached_data = getUtility(IMemcacheClient).get(key)
        if cached_data:
            return cached_data
        try:
            # Use urlfetch which supports timeout
            data = urlfetch(config.launchpad.homepage_recent_posts_feed)
        except IOError:
            return []
        feed = feedparser.parse(data)
        posts = []
        max_count = config.launchpad.homepage_recent_posts_count
        # FeedParser takes care of HTML sanitisation.
        for entry in feed.entries[:max_count]:
            posts.append({
                'title':
                entry.title,
                'description':
                entry.description,
                'link':
                entry.link,
                'date':
                time.strftime('%d %b %Y', entry.updated_parsed),
            })
        # The cache of posts expires after an hour.
        getUtility(IMemcacheClient).set(key, posts, time=3600)
        return posts
Esempio n. 27
0
 def listChannels(cls):
     """See `ISnapStoreClient`."""
     if config.snappy.store_search_url is None:
         return _default_store_channels
     channels = None
     memcache_client = getUtility(IMemcacheClient)
     search_host = urlsplit(config.snappy.store_search_url).hostname
     memcache_key = ("%s:channels" % search_host).encode("UTF-8")
     cached_channels = memcache_client.get(memcache_key)
     if cached_channels is not None:
         try:
             channels = json.loads(cached_channels)
         except JSONDecodeError:
             log.exception(
                 "Cannot load cached channels for %s; deleting" %
                 search_host)
             memcache_client.delete(memcache_key)
     if (channels is None and
             not getFeatureFlag(u"snap.disable_channel_search")):
         path = "api/v1/channels"
         timeline = cls._getTimeline()
         if timeline is not None:
             action = timeline.start("store-search-get", "/" + path)
         channels_url = urlappend(config.snappy.store_search_url, path)
         try:
             response = urlfetch(
                 channels_url, headers={"Accept": "application/hal+json"})
         except requests.HTTPError as e:
             raise cls._makeSnapStoreError(BadSearchResponse, e)
         finally:
             if timeline is not None:
                 action.finish()
         channels = response.json().get("_embedded", {}).get(
             "clickindex:channel", [])
         expire_time = time.time() + 60 * 60 * 24
         memcache_client.set(
             memcache_key, json.dumps(channels), expire_time)
     if channels is None:
         channels = _default_store_channels
     return channels
Esempio n. 28
0
    def request(self, host, handler, request_body, verbose=0):
        """Make an XMLRPC request.

        Uses the configured proxy server to make the connection.
        """
        url = urlunparse((self.scheme, host, handler, '', '', ''))
        # httplib can raise a UnicodeDecodeError when using a Unicode
        # URL, a non-ASCII body and a proxy. http://bugs.python.org/issue12398
        url = six.ensure_binary(url)
        try:
            with override_timeout(self.timeout):
                response = urlfetch(
                    url, method='POST', headers={'Content-Type': 'text/xml'},
                    data=request_body, cookies=self.cookie_jar,
                    hooks={'response': repost_on_redirect_hook},
                    use_proxy=True)
        except requests.HTTPError as e:
            raise ProtocolError(
                url.decode('utf-8'), e.response.status_code, e.response.reason,
                e.response.headers)
        else:
            traceback_info(response.text)
            return self.parse_response(BytesIO(response.content))
Esempio n. 29
0
 def requestPackageUploadPermission(cls, snappy_series, snap_name):
     assert config.snappy.store_url is not None
     request_url = urlappend(config.snappy.store_url, "dev/api/acl/")
     request = get_current_browser_request()
     timeline_action = get_request_timeline(request).start(
         "request-snap-upload-macaroon",
         "%s/%s" % (snappy_series.name, snap_name), allow_nested=True)
     try:
         response = urlfetch(
             request_url, method="POST",
             json={
                 "packages": [
                     {"name": snap_name, "series": snappy_series.name}],
                 "permissions": ["package_upload"],
                 })
         response_data = response.json()
         if "macaroon" not in response_data:
             raise BadRequestPackageUploadResponse(response.text)
         return response_data["macaroon"]
     except requests.HTTPError as e:
         raise cls._makeSnapStoreError(BadRequestPackageUploadResponse, e)
     finally:
         timeline_action.finish()
Esempio n. 30
0
    def getRecentBlogPosts(self):
        """Return the parsed feed of the most recent blog posts.

        It returns a list of dict with keys title, description, link and date.

        The date is formatted and the description which may contain HTML is
        sanitized.

        The number of blog posts to display is controlled through
        launchpad.homepage_recent_posts_count. The posts are fetched
        from the feed specified in launchpad.homepage_recent_posts_feed.

        FeedParser takes care of sanitizing the HTML contained in the feed.
        """
        key = '%s:homepage-blog-posts' % config.instance_name
        cached_data = getUtility(IMemcacheClient).get(key)
        if cached_data:
            return cached_data
        try:
            # Use urlfetch which supports timeout
            data = urlfetch(config.launchpad.homepage_recent_posts_feed)
        except IOError:
            return []
        feed = feedparser.parse(data)
        posts = []
        max_count = config.launchpad.homepage_recent_posts_count
        # FeedParser takes care of HTML sanitisation.
        for entry in feed.entries[:max_count]:
            posts.append({
                'title': entry.title,
                'description': entry.description,
                'link': entry.link,
                'date': time.strftime('%d %b %Y', entry.updated_parsed),
                })
        # The cache of posts expires after an hour.
        getUtility(IMemcacheClient).set(key, posts, time=3600)
        return posts
Esempio n. 31
0
def _fetch_blob_from_github(repository_url, ref_path, filename):
    repo_path = urlsplit(repository_url).path.strip("/")
    if repo_path.endswith(".git"):
        repo_path = repo_path[:-len(".git")]
    try:
        response = urlfetch(
            "https://raw.githubusercontent.com/%s/%s/%s" % (
                repo_path,
                # GitHub supports either branch or tag names here, but both
                # must be shortened.  (If both a branch and a tag exist with
                # the same name, it appears to pick the branch.)
                quote(re.sub(r"^refs/(?:heads|tags)/", "", ref_path)),
                quote(filename)),
            use_proxy=True)
    except requests.RequestException as e:
        if (e.response is not None and
                e.response.status_code == requests.codes.NOT_FOUND):
            raise GitRepositoryBlobNotFound(
                repository_url, filename, rev=ref_path)
        else:
            raise GitRepositoryScanFault(
                "Failed to get file from Git repository at %s: %s" %
                (repository_url, str(e)))
    return response.content
Esempio n. 32
0
 def checkStatus(cls, status_url):
     """See `ISnapStoreClient`."""
     try:
         response = urlfetch(status_url)
         response_data = response.json()
         if not response_data["processed"]:
             raise UploadNotScannedYetResponse()
         elif "errors" in response_data:
             error_message = "\n".join(
                 error["message"] for error in response_data["errors"])
             error_messages = []
             for error in response_data["errors"]:
                 error_detail = {"message": error["message"]}
                 if "link" in error:
                     error_detail["link"] = error["link"]
                 error_messages.append(error_detail)
             raise ScanFailedResponse(
                 error_message, messages=error_messages)
         elif not response_data["can_release"]:
             return response_data["url"], None
         else:
             return response_data["url"], response_data["revision"]
     except requests.HTTPError as e:
         raise cls._makeSnapStoreError(BadScanStatusResponse, e)
Esempio n. 33
0
 def _grabPage(self, action, fingerprint):
     """Wrapper to collect KeyServer Pages."""
     url = self.getURLForKeyInServer(fingerprint, action)
     return urlfetch(url)
 def getPage(self):
     """Download and return content from the Bugzilla page"""
     with override_timeout(config.updatebugzillaremotecomponents.timeout):
         return urlfetch(self.url, use_proxy=True).content