Пример #1
0
    def _runwget(self, ud, d, command, quiet, workdir=None):

        progresshandler = WgetProgressHandler(d)

        logger.debug2("Fetching %s using command '%s'" % (ud.url, command))
        bb.fetch2.check_network_access(d, command, ud.url)
        runfetchcmd(command + ' --progress=dot -v', d, quiet, log=progresshandler, workdir=workdir)
Пример #2
0
    def download(self, ud, d, retries=3):
        """Fetch urls"""

        # If were reaching the account transaction limit we might be refused a connection,
        # retrying allows us to avoid false negatives since the limit changes over time
        fetchcmd = self.basecmd + ' --retry-connrefused --waitretry=5'

        # We need to provide a localpath to avoid wget using the SAS
        # ud.localfile either has the downloadfilename or ud.path
        localpath = os.path.join(d.getVar("DL_DIR"), ud.localfile)
        bb.utils.mkdirhier(os.path.dirname(localpath))
        fetchcmd += " -O %s" % shlex.quote(localpath)

        if ud.user and ud.pswd:
            fetchcmd += " --user=%s --password=%s --auth-no-challenge" % (
                ud.user, ud.pswd)

        # Check if a Shared Access Signature was given and use it
        az_sas = d.getVar('AZ_SAS')

        if az_sas:
            azuri = '%s%s%s%s' % ('https://', ud.host, ud.path, az_sas)
        else:
            azuri = '%s%s%s' % ('https://', ud.host, ud.path)

        if os.path.exists(ud.localpath):
            # file exists, but we didnt complete it.. trying again.
            fetchcmd += d.expand(" -c -P ${DL_DIR} '%s'" % azuri)
        else:
            fetchcmd += d.expand(" -P ${DL_DIR} '%s'" % azuri)

        try:
            self._runwget(ud, d, fetchcmd, False)
        except FetchError as e:
            # Azure fails on handshake sometimes when using wget after some stress, producing a
            # FetchError from the fetcher, if the artifact exists retyring should succeed
            if 'Unable to establish SSL connection' in str(e):
                logger.debug2(
                    'Unable to establish SSL connection: Retries remaining: %s, Retrying...'
                    % retries)
                self.download(ud, d, retries - 1)

        # Sanity check since wget can pretend it succeed when it didn't
        # Also, this used to happen if sourceforge sent us to the mirror page
        if not os.path.exists(ud.localpath):
            raise FetchError(
                "The fetch command returned success for url %s but %s doesn't exist?!"
                % (azuri, ud.localpath), azuri)

        if os.path.getsize(ud.localpath) == 0:
            os.remove(ud.localpath)
            raise FetchError(
                "The fetch of %s resulted in a zero size file?! Deleting and failing since this isn't right."
                % (azuri), azuri)

        return True
Пример #3
0
    def download(self, ud, d):
        """Fetch url"""

        logger.debug2("Fetch: checking for module directory '" + ud.moddir + "'")

        lf = bb.utils.lockfile(ud.svnlock)

        try:
            if os.access(os.path.join(ud.moddir, '.svn'), os.R_OK):
                svncmd = self._buildsvncommand(ud, d, "update")
                logger.info("Update " + ud.url)
                # We need to attempt to run svn upgrade first in case its an older working format
                try:
                    runfetchcmd(ud.basecmd + " upgrade", d, workdir=ud.moddir)
                except FetchError:
                    pass
                logger.debug("Running %s", svncmd)
                bb.fetch2.check_network_access(d, svncmd, ud.url)
                runfetchcmd(svncmd, d, workdir=ud.moddir)
            else:
                svncmd = self._buildsvncommand(ud, d, "fetch")
                logger.info("Fetch " + ud.url)
                # check out sources there
                bb.utils.mkdirhier(ud.pkgdir)
                logger.debug("Running %s", svncmd)
                bb.fetch2.check_network_access(d, svncmd, ud.url)
                runfetchcmd(svncmd, d, workdir=ud.pkgdir)

            if not ("externals" in ud.parm and ud.parm["externals"] == "nowarn"):
                # Warn the user if this had externals (won't catch them all)
                output = runfetchcmd("svn propget svn:externals || true", d, workdir=ud.moddir)
                if output:
                    if "--ignore-externals" in svncmd.split():
                        bb.warn("%s contains svn:externals." % ud.url)
                        bb.warn("These should be added to the recipe SRC_URI as necessary.")
                        bb.warn("svn fetch has ignored externals:\n%s" % output)
                        bb.warn("To disable this warning add ';externals=nowarn' to the url.")
                    else:
                        bb.debug(1, "svn repository has externals:\n%s" % output)

            scmdata = ud.parm.get("scmdata", "")
            if scmdata == "keep":
                tar_flags = ""
            else:
                tar_flags = "--exclude='.svn'"

            # tar them up to a defined filename
            runfetchcmd("tar %s -czf %s %s" % (tar_flags, ud.localpath, ud.path_spec), d,
                        cleanup=[ud.localpath], workdir=ud.pkgdir)
        finally:
            bb.utils.unlockfile(lf)
Пример #4
0
 def localpaths(self, urldata, d):
     """
     Return the local filename of a given url assuming a successful fetch.
     """
     searched = []
     path = urldata.decodedurl
     newpath = path
     if path[0] == "/":
         return [path]
     filespath = d.getVar('FILESPATH')
     if filespath:
         logger.debug2("Searching for %s in paths:\n    %s" % (path, "\n    ".join(filespath.split(":"))))
         newpath, hist = bb.utils.which(filespath, path, history=True)
         searched.extend(hist)
     return searched
Пример #5
0
    def download(self, ud, d):
        """Fetch url"""

        logger.debug2("Fetch: checking for module directory '" + ud.moddir +
                      "'")

        # If the checkout doesn't exist and the mirror tarball does, extract it
        if not os.path.exists(ud.pkgdir) and os.path.exists(ud.fullmirror):
            bb.utils.mkdirhier(ud.pkgdir)
            runfetchcmd("tar -xzf %s" % (ud.fullmirror), d, workdir=ud.pkgdir)

        if os.access(os.path.join(ud.moddir, '.hg'), os.R_OK):
            # Found the source, check whether need pull
            updatecmd = self._buildhgcommand(ud, d, "update")
            logger.debug("Running %s", updatecmd)
            try:
                runfetchcmd(updatecmd, d, workdir=ud.moddir)
            except bb.fetch2.FetchError:
                # Runnning pull in the repo
                pullcmd = self._buildhgcommand(ud, d, "pull")
                logger.info("Pulling " + ud.url)
                # update sources there
                logger.debug("Running %s", pullcmd)
                bb.fetch2.check_network_access(d, pullcmd, ud.url)
                runfetchcmd(pullcmd, d, workdir=ud.moddir)
                try:
                    os.unlink(ud.fullmirror)
                except OSError as exc:
                    if exc.errno != errno.ENOENT:
                        raise

        # No source found, clone it.
        if not os.path.exists(ud.moddir):
            fetchcmd = self._buildhgcommand(ud, d, "fetch")
            logger.info("Fetch " + ud.url)
            # check out sources there
            bb.utils.mkdirhier(ud.pkgdir)
            logger.debug("Running %s", fetchcmd)
            bb.fetch2.check_network_access(d, fetchcmd, ud.url)
            runfetchcmd(fetchcmd, d, workdir=ud.pkgdir)

        # Even when we clone (fetch), we still need to update as hg's clone
        # won't checkout the specified revision if its on a branch
        updatecmd = self._buildhgcommand(ud, d, "update")
        logger.debug("Running %s", updatecmd)
        runfetchcmd(updatecmd, d, workdir=ud.moddir)
Пример #6
0
 def localpaths(self, urldata, d):
     """
     Return the local filename of a given url assuming a successful fetch.
     """
     searched = []
     path = urldata.decodedurl
     newpath = path
     if path[0] == "/":
         return [path]
     filespath = d.getVar('FILESPATH')
     if filespath:
         logger.debug2("Searching for %s in paths:\n    %s" %
                       (path, "\n    ".join(filespath.split(":"))))
         newpath, hist = bb.utils.which(filespath, path, history=True)
         searched.extend(hist)
     if not os.path.exists(newpath):
         dldirfile = os.path.join(d.getVar("DL_DIR"), path)
         logger.debug2("Defaulting to %s for %s" % (dldirfile, path))
         bb.utils.mkdirhier(os.path.dirname(dldirfile))
         searched.append(dldirfile)
         return searched
     return searched
Пример #7
0
    def unpack(self, ud, destdir, d):
        """
        Make a local clone or export for the url
        """

        revflag = "-r %s" % ud.revision
        subdir = ud.parm.get("destsuffix", ud.module)
        codir = "%s/%s" % (destdir, subdir)

        scmdata = ud.parm.get("scmdata", "")
        if scmdata != "nokeep":
            proto = ud.parm.get('protocol', 'http')
            if not os.access(os.path.join(codir, '.hg'), os.R_OK):
                logger.debug2("Unpack: creating new hg repository in '" +
                              codir + "'")
                runfetchcmd("%s init %s" % (ud.basecmd, codir), d)
            logger.debug2("Unpack: updating source in '" + codir + "'")
            if ud.user and ud.pswd:
                runfetchcmd(
                    "%s --config auth.default.prefix=* --config auth.default.username=%s --config auth.default.password=%s --config \"auth.default.schemes=%s\" pull %s"
                    % (ud.basecmd, ud.user, ud.pswd, proto, ud.moddir),
                    d,
                    workdir=codir)
            else:
                runfetchcmd("%s pull %s" % (ud.basecmd, ud.moddir),
                            d,
                            workdir=codir)
            if ud.user and ud.pswd:
                runfetchcmd(
                    "%s --config auth.default.prefix=* --config auth.default.username=%s --config auth.default.password=%s --config \"auth.default.schemes=%s\" up -C %s"
                    % (ud.basecmd, ud.user, ud.pswd, proto, revflag),
                    d,
                    workdir=codir)
            else:
                runfetchcmd("%s up -C %s" % (ud.basecmd, revflag),
                            d,
                            workdir=codir)
        else:
            logger.debug2("Unpack: extracting source to '" + codir + "'")
            runfetchcmd("%s archive -t files %s %s" %
                        (ud.basecmd, revflag, codir),
                        d,
                        workdir=ud.moddir)
Пример #8
0
    def checkstatus(self, fetch, ud, d, try_again=True):
        class HTTPConnectionCache(http.client.HTTPConnection):
            if fetch.connection_cache:

                def connect(self):
                    """Connect to the host and port specified in __init__."""

                    sock = fetch.connection_cache.get_connection(
                        self.host, self.port)
                    if sock:
                        self.sock = sock
                    else:
                        self.sock = socket.create_connection(
                            (self.host, self.port), self.timeout,
                            self.source_address)
                        fetch.connection_cache.add_connection(
                            self.host, self.port, self.sock)

                    if self._tunnel_host:
                        self._tunnel()

        class CacheHTTPHandler(urllib.request.HTTPHandler):
            def http_open(self, req):
                return self.do_open(HTTPConnectionCache, req)

            def do_open(self, http_class, req):
                """Return an addinfourl object for the request, using http_class.

                http_class must implement the HTTPConnection API from httplib.
                The addinfourl return value is a file-like object.  It also
                has methods and attributes including:
                    - info(): return a mimetools.Message object for the headers
                    - geturl(): return the original request URL
                    - code: HTTP status code
                """
                host = req.host
                if not host:
                    raise urllib.error.URLError('no host given')

                h = http_class(host,
                               timeout=req.timeout)  # will parse host:port
                h.set_debuglevel(self._debuglevel)

                headers = dict(req.unredirected_hdrs)
                headers.update(
                    dict((k, v) for k, v in list(req.headers.items())
                         if k not in headers))

                # We want to make an HTTP/1.1 request, but the addinfourl
                # class isn't prepared to deal with a persistent connection.
                # It will try to read all remaining data from the socket,
                # which will block while the server waits for the next request.
                # So make sure the connection gets closed after the (only)
                # request.

                # Don't close connection when connection_cache is enabled,
                if fetch.connection_cache is None:
                    headers["Connection"] = "close"
                else:
                    headers["Connection"] = "Keep-Alive"  # Works for HTTP/1.0

                headers = dict(
                    (name.title(), val) for name, val in list(headers.items()))

                if req._tunnel_host:
                    tunnel_headers = {}
                    proxy_auth_hdr = "Proxy-Authorization"
                    if proxy_auth_hdr in headers:
                        tunnel_headers[proxy_auth_hdr] = headers[
                            proxy_auth_hdr]
                        # Proxy-Authorization should not be sent to origin
                        # server.
                        del headers[proxy_auth_hdr]
                    h.set_tunnel(req._tunnel_host, headers=tunnel_headers)

                try:
                    h.request(req.get_method(), req.selector, req.data,
                              headers)
                except socket.error as err:  # XXX what error?
                    # Don't close connection when cache is enabled.
                    # Instead, try to detect connections that are no longer
                    # usable (for example, closed unexpectedly) and remove
                    # them from the cache.
                    if fetch.connection_cache is None:
                        h.close()
                    elif isinstance(err, OSError) and err.errno == errno.EBADF:
                        # This happens when the server closes the connection despite the Keep-Alive.
                        # Apparently urllib then uses the file descriptor, expecting it to be
                        # connected, when in reality the connection is already gone.
                        # We let the request fail and expect it to be
                        # tried once more ("try_again" in check_status()),
                        # with the dead connection removed from the cache.
                        # If it still fails, we give up, which can happend for bad
                        # HTTP proxy settings.
                        fetch.connection_cache.remove_connection(
                            h.host, h.port)
                    raise urllib.error.URLError(err)
                else:
                    r = h.getresponse()

                # Pick apart the HTTPResponse object to get the addinfourl
                # object initialized properly.

                # Wrap the HTTPResponse object in socket's file object adapter
                # for Windows.  That adapter calls recv(), so delegate recv()
                # to read().  This weird wrapping allows the returned object to
                # have readline() and readlines() methods.

                # XXX It might be better to extract the read buffering code
                # out of socket._fileobject() and into a base class.
                r.recv = r.read

                # no data, just have to read
                r.read()

                class fp_dummy(object):
                    def read(self):
                        return ""

                    def readline(self):
                        return ""

                    def close(self):
                        pass

                    closed = False

                resp = urllib.response.addinfourl(fp_dummy(), r.msg,
                                                  req.get_full_url())
                resp.code = r.status
                resp.msg = r.reason

                # Close connection when server request it.
                if fetch.connection_cache is not None:
                    if 'Connection' in r.msg and r.msg['Connection'] == 'close':
                        fetch.connection_cache.remove_connection(
                            h.host, h.port)

                return resp

        class HTTPMethodFallback(urllib.request.BaseHandler):
            """
            Fallback to GET if HEAD is not allowed (405 HTTP error)
            """
            def http_error_405(self, req, fp, code, msg, headers):
                fp.read()
                fp.close()

                if req.get_method() != 'GET':
                    newheaders = dict(
                        (k, v) for k, v in list(req.headers.items())
                        if k.lower() not in ("content-length", "content-type"))
                    return self.parent.open(
                        urllib.request.Request(
                            req.get_full_url(),
                            headers=newheaders,
                            origin_req_host=req.origin_req_host,
                            unverifiable=True))

                raise urllib.request.HTTPError(req, code, msg, headers, None)

            # Some servers (e.g. GitHub archives, hosted on Amazon S3) return 403
            # Forbidden when they actually mean 405 Method Not Allowed.
            http_error_403 = http_error_405

        class FixedHTTPRedirectHandler(urllib.request.HTTPRedirectHandler):
            """
            urllib2.HTTPRedirectHandler resets the method to GET on redirect,
            when we want to follow redirects using the original method.
            """
            def redirect_request(self, req, fp, code, msg, headers, newurl):
                newreq = urllib.request.HTTPRedirectHandler.redirect_request(
                    self, req, fp, code, msg, headers, newurl)
                newreq.get_method = req.get_method
                return newreq

        # We need to update the environment here as both the proxy and HTTPS
        # handlers need variables set. The proxy needs http_proxy and friends to
        # be set, and HTTPSHandler ends up calling into openssl to load the
        # certificates. In buildtools configurations this will be looking at the
        # wrong place for certificates by default: we set SSL_CERT_FILE to the
        # right location in the buildtools environment script but as BitBake
        # prunes prunes the environment this is lost. When binaries are executed
        # runfetchcmd ensures these values are in the environment, but this is
        # pure Python so we need to update the environment.
        #
        # Avoid tramping the environment too much by using bb.utils.environment
        # to scope the changes to the build_opener request, which is when the
        # environment lookups happen.
        newenv = {}
        for name in bb.fetch2.FETCH_EXPORT_VARS:
            value = d.getVar(name)
            if not value:
                origenv = d.getVar("BB_ORIGENV")
                if origenv:
                    value = origenv.getVar(name)
            if value:
                newenv[name] = value

        with bb.utils.environment(**newenv):
            import ssl

            if self.check_certs(d):
                context = ssl.create_default_context()
            else:
                context = ssl._create_unverified_context()

            handlers = [
                FixedHTTPRedirectHandler, HTTPMethodFallback,
                urllib.request.ProxyHandler(),
                CacheHTTPHandler(),
                urllib.request.HTTPSHandler(context=context)
            ]
            opener = urllib.request.build_opener(*handlers)

            try:
                uri = ud.url.split(";")[0]
                r = urllib.request.Request(uri)
                r.get_method = lambda: "HEAD"
                # Some servers (FusionForge, as used on Alioth) require that the
                # optional Accept header is set.
                r.add_header("Accept", "*/*")
                r.add_header("User-Agent", self.user_agent)

                def add_basic_auth(login_str, request):
                    '''Adds Basic auth to http request, pass in login:password as string'''
                    import base64
                    encodeuser = base64.b64encode(
                        login_str.encode('utf-8')).decode("utf-8")
                    authheader = "Basic %s" % encodeuser
                    r.add_header("Authorization", authheader)

                if ud.user and ud.pswd:
                    add_basic_auth(ud.user + ':' + ud.pswd, r)

                try:
                    import netrc
                    n = netrc.netrc()
                    login, unused, password = n.authenticators(
                        urllib.parse.urlparse(uri).hostname)
                    add_basic_auth("%s:%s" % (login, password), r)
                except (TypeError, ImportError, IOError,
                        netrc.NetrcParseError):
                    pass

                with opener.open(r) as response:
                    pass
            except urllib.error.URLError as e:
                if try_again:
                    logger.debug2("checkstatus: trying again")
                    return self.checkstatus(fetch, ud, d, False)
                else:
                    # debug for now to avoid spamming the logs in e.g. remote sstate searches
                    logger.debug2("checkstatus() urlopen failed: %s" % e)
                    return False
            except ConnectionResetError as e:
                if try_again:
                    logger.debug2("checkstatus: trying again")
                    return self.checkstatus(fetch, ud, d, False)
                else:
                    # debug for now to avoid spamming the logs in e.g. remote sstate searches
                    logger.debug2("checkstatus() urlopen failed: %s" % e)
                    return False

        return True
Пример #9
0
    def download(self, ud, d):

        method = ud.parm.get('method', 'pserver')
        localdir = ud.parm.get('localdir', ud.module)
        cvs_port = ud.parm.get('port', '')

        cvs_rsh = None
        if method == "ext":
            if "rsh" in ud.parm:
                cvs_rsh = ud.parm["rsh"]

        if method == "dir":
            cvsroot = ud.path
        else:
            cvsroot = ":" + method
            cvsproxyhost = d.getVar('CVS_PROXY_HOST')
            if cvsproxyhost:
                cvsroot += ";proxy=" + cvsproxyhost
            cvsproxyport = d.getVar('CVS_PROXY_PORT')
            if cvsproxyport:
                cvsroot += ";proxyport=" + cvsproxyport
            cvsroot += ":" + ud.user
            if ud.pswd:
                cvsroot += ":" + ud.pswd
            cvsroot += "@" + ud.host + ":" + cvs_port + ud.path

        options = []
        if 'norecurse' in ud.parm:
            options.append("-l")
        if ud.date:
            # treat YYYYMMDDHHMM specially for CVS
            if len(ud.date) == 12:
                options.append("-D \"%s %s:%s UTC\"" % (ud.date[0:8], ud.date[8:10], ud.date[10:12]))
            else:
                options.append("-D \"%s UTC\"" % ud.date)
        if ud.tag:
            options.append("-r %s" % ud.tag)

        cvsbasecmd = d.getVar("FETCHCMD_cvs") or "/usr/bin/env cvs"
        cvscmd = cvsbasecmd + " '-d" + cvsroot + "' co " + " ".join(options) + " " + ud.module
        cvsupdatecmd = cvsbasecmd + " '-d" + cvsroot + "' update -d -P " + " ".join(options)

        if cvs_rsh:
            cvscmd = "CVS_RSH=\"%s\" %s" % (cvs_rsh, cvscmd)
            cvsupdatecmd = "CVS_RSH=\"%s\" %s" % (cvs_rsh, cvsupdatecmd)

        # create module directory
        logger.debug2("Fetch: checking for module directory")
        moddir = os.path.join(ud.pkgdir, localdir)
        workdir = None
        if os.access(os.path.join(moddir, 'CVS'), os.R_OK):
            logger.info("Update " + ud.url)
            bb.fetch2.check_network_access(d, cvsupdatecmd, ud.url)
            # update sources there
            workdir = moddir
            cmd = cvsupdatecmd
        else:
            logger.info("Fetch " + ud.url)
            # check out sources there
            bb.utils.mkdirhier(ud.pkgdir)
            workdir = ud.pkgdir
            logger.debug("Running %s", cvscmd)
            bb.fetch2.check_network_access(d, cvscmd, ud.url)
            cmd = cvscmd

        runfetchcmd(cmd, d, cleanup=[moddir], workdir=workdir)

        if not os.access(moddir, os.R_OK):
            raise FetchError("Directory %s was not readable despite sucessful fetch?!" % moddir, ud.url)

        scmdata = ud.parm.get("scmdata", "")
        if scmdata == "keep":
            tar_flags = ""
        else:
            tar_flags = "--exclude='CVS'"

        # tar them up to a defined filename
        workdir = None
        if 'fullpath' in ud.parm:
            workdir = ud.pkgdir
            cmd = "tar %s -czf %s %s" % (tar_flags, ud.localpath, localdir)
        else:
            workdir = os.path.dirname(os.path.realpath(moddir))
            cmd = "tar %s -czf %s %s" % (tar_flags, ud.localpath, os.path.basename(moddir))

        runfetchcmd(cmd, d, cleanup=[ud.localpath], workdir=workdir)