예제 #1
0
파일: rsync.py 프로젝트: Zlogene/portage-1
    def update(self):
        '''Internal update function which performs the transfer'''
        opts = self.options.get('emerge_config').opts
        self.usersync_uid = self.options.get('usersync_uid', None)
        enter_invalid = '--ask-enter-invalid' in opts
        quiet = '--quiet' in opts
        out = portage.output.EOutput(quiet=quiet)
        syncuri = self.repo.sync_uri
        if self.repo.module_specific_options.get('sync-rsync-vcs-ignore',
                                                 'false').lower() == 'true':
            vcs_dirs = ()
        else:
            vcs_dirs = frozenset(VCS_DIRS)
            vcs_dirs = vcs_dirs.intersection(os.listdir(self.repo.location))

        for vcs_dir in vcs_dirs:
            writemsg_level(("!!! %s appears to be under revision " + \
             "control (contains %s).\n!!! Aborting rsync sync "
             "(override with \"sync-rsync-vcs-ignore = true\" in repos.conf).\n") % \
             (self.repo.location, vcs_dir), level=logging.ERROR, noiselevel=-1)
            return (1, False)
        self.timeout = 180

        rsync_opts = []
        if self.settings["PORTAGE_RSYNC_OPTS"] == "":
            rsync_opts = self._set_rsync_defaults()
        else:
            rsync_opts = self._validate_rsync_opts(rsync_opts, syncuri)
        self.rsync_opts = self._rsync_opts_extend(opts, rsync_opts)

        self.extra_rsync_opts = list()
        if self.repo.module_specific_options.get('sync-rsync-extra-opts'):
            self.extra_rsync_opts.extend(
                portage.util.shlex_split(
                    self.repo.module_specific_options['sync-rsync-extra-opts'])
            )

        exitcode = 0
        verify_failure = False

        # Process GLEP74 verification options.
        # Default verification to 'no'; it's enabled for ::gentoo
        # via default repos.conf though.
        self.verify_metamanifest = (self.repo.module_specific_options.get(
            'sync-rsync-verify-metamanifest', 'no') in ('yes', 'true'))
        # Support overriding job count.
        self.verify_jobs = self.repo.module_specific_options.get(
            'sync-rsync-verify-jobs', None)
        if self.verify_jobs is not None:
            try:
                self.verify_jobs = int(self.verify_jobs)
                if self.verify_jobs < 0:
                    raise ValueError(self.verify_jobs)
            except ValueError:
                writemsg_level(
                    "!!! sync-rsync-verify-jobs not a positive integer: %s\n" %
                    (self.verify_jobs, ),
                    level=logging.WARNING,
                    noiselevel=-1)
                self.verify_jobs = None
            else:
                if self.verify_jobs == 0:
                    # Use the apparent number of processors if gemato
                    # supports it.
                    self.verify_jobs = None
        # Support overriding max age.
        self.max_age = self.repo.module_specific_options.get(
            'sync-rsync-verify-max-age', '')
        if self.max_age:
            try:
                self.max_age = int(self.max_age)
                if self.max_age < 0:
                    raise ValueError(self.max_age)
            except ValueError:
                writemsg_level(
                    "!!! sync-rsync-max-age must be a non-negative integer: %s\n"
                    % (self.max_age, ),
                    level=logging.WARNING,
                    noiselevel=-1)
                self.max_age = 0
        else:
            self.max_age = 0

        openpgp_env = None
        if self.verify_metamanifest and gemato is not None:
            # Use isolated environment if key is specified,
            # system environment otherwise
            if self.repo.sync_openpgp_key_path is not None:
                openpgp_env = gemato.openpgp.OpenPGPEnvironment()
            else:
                openpgp_env = gemato.openpgp.OpenPGPSystemEnvironment()

        try:
            # Load and update the keyring early. If it fails, then verification
            # will not be performed and the user will have to fix it and try again,
            # so we may as well bail out before actual rsync happens.
            if openpgp_env is not None and self.repo.sync_openpgp_key_path is not None:
                try:
                    out.einfo('Using keys from %s' %
                              (self.repo.sync_openpgp_key_path, ))
                    with io.open(self.repo.sync_openpgp_key_path, 'rb') as f:
                        openpgp_env.import_key(f)
                    self._refresh_keys(openpgp_env)
                except (GematoException, asyncio.TimeoutError) as e:
                    writemsg_level(
                        "!!! Manifest verification impossible due to keyring problem:\n%s\n"
                        % (e, ),
                        level=logging.ERROR,
                        noiselevel=-1)
                    return (1, False)

            # Real local timestamp file.
            self.servertimestampfile = os.path.join(self.repo.location,
                                                    "metadata",
                                                    "timestamp.chk")

            content = portage.util.grabfile(self.servertimestampfile)
            timestamp = 0
            if content:
                try:
                    timestamp = time.mktime(
                        time.strptime(content[0], TIMESTAMP_FORMAT))
                except (OverflowError, ValueError):
                    pass
            del content

            try:
                self.rsync_initial_timeout = \
                 int(self.settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
            except ValueError:
                self.rsync_initial_timeout = 15

            try:
                maxretries = int(self.settings["PORTAGE_RSYNC_RETRIES"])
            except SystemExit as e:
                raise  # Needed else can't exit
            except:
                maxretries = -1  #default number of retries

            if syncuri.startswith("file://"):
                self.proto = "file"
                dosyncuri = syncuri[7:]
                unchanged, is_synced, exitcode, updatecache_flg = self._do_rsync(
                    dosyncuri, timestamp, opts)
                self._process_exitcode(exitcode, dosyncuri, out, 1)
                if exitcode == 0:
                    if unchanged:
                        self.repo_storage.abort_update()
                    else:
                        self.repo_storage.commit_update()
                        self.repo_storage.garbage_collection()
                return (exitcode, updatecache_flg)

            retries = 0
            try:
                self.proto, user_name, hostname, port = re.split(
                    r"(rsync|ssh)://([^:/]+@)?(\[[:\da-fA-F]*\]|[^:/]*)(:[0-9]+)?",
                    syncuri,
                    maxsplit=4)[1:5]
            except ValueError:
                writemsg_level("!!! sync-uri is invalid: %s\n" % syncuri,
                               noiselevel=-1,
                               level=logging.ERROR)
                return (1, False)

            self.ssh_opts = self.settings.get("PORTAGE_SSH_OPTS")

            if port is None:
                port = ""
            if user_name is None:
                user_name = ""
            if re.match(r"^\[[:\da-fA-F]*\]$", hostname) is None:
                getaddrinfo_host = hostname
            else:
                # getaddrinfo needs the brackets stripped
                getaddrinfo_host = hostname[1:-1]
            updatecache_flg = False
            all_rsync_opts = set(self.rsync_opts)
            all_rsync_opts.update(self.extra_rsync_opts)

            family = socket.AF_UNSPEC
            if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
                family = socket.AF_INET
            elif socket.has_ipv6 and \
             ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
                family = socket.AF_INET6

            addrinfos = None
            uris = []

            try:
                addrinfos = getaddrinfo_validate(
                    socket.getaddrinfo(getaddrinfo_host, None, family,
                                       socket.SOCK_STREAM))
            except socket.error as e:
                writemsg_level("!!! getaddrinfo failed for '%s': %s\n" %
                               (_unicode_decode(hostname), _unicode(e)),
                               noiselevel=-1,
                               level=logging.ERROR)

            if addrinfos:

                AF_INET = socket.AF_INET
                AF_INET6 = None
                if socket.has_ipv6:
                    AF_INET6 = socket.AF_INET6

                ips_v4 = []
                ips_v6 = []

                for addrinfo in addrinfos:
                    if addrinfo[0] == AF_INET:
                        ips_v4.append("%s" % addrinfo[4][0])
                    elif AF_INET6 is not None and addrinfo[0] == AF_INET6:
                        # IPv6 addresses need to be enclosed in square brackets
                        ips_v6.append("[%s]" % addrinfo[4][0])

                random.shuffle(ips_v4)
                random.shuffle(ips_v6)

                # Give priority to the address family that
                # getaddrinfo() returned first.
                if AF_INET6 is not None and addrinfos and \
                 addrinfos[0][0] == AF_INET6:
                    ips = ips_v6 + ips_v4
                else:
                    ips = ips_v4 + ips_v6

                for ip in ips:
                    uris.append(
                        syncuri.replace(
                            "//" + user_name + hostname + port + "/",
                            "//" + user_name + ip + port + "/", 1))

            if not uris:
                # With some configurations we need to use the plain hostname
                # rather than try to resolve the ip addresses (bug #340817).
                uris.append(syncuri)

            # reverse, for use with pop()
            uris.reverse()
            uris_orig = uris[:]

            effective_maxretries = maxretries
            if effective_maxretries < 0:
                effective_maxretries = len(uris) - 1

            local_state_unchanged = True
            while (1):
                if uris:
                    dosyncuri = uris.pop()
                elif maxretries < 0 or retries > maxretries:
                    writemsg("!!! Exhausted addresses for %s\n" %
                             _unicode_decode(hostname),
                             noiselevel=-1)
                    return (1, False)
                else:
                    uris.extend(uris_orig)
                    dosyncuri = uris.pop()

                if (retries == 0):
                    if "--ask" in opts:
                        uq = UserQuery(opts)
                        if uq.query("Do you want to sync your ebuild repository " + \
                         "with the mirror at\n" + blue(dosyncuri) + bold("?"),
                         enter_invalid) == "No":
                            print()
                            print("Quitting.")
                            print()
                            sys.exit(128 + signal.SIGINT)
                    self.logger(self.xterm_titles,
                                ">>> Starting rsync with " + dosyncuri)
                    if "--quiet" not in opts:
                        print(">>> Starting rsync with " + dosyncuri + "...")
                else:
                    self.logger(self.xterm_titles,
                     ">>> Starting retry %d of %d with %s" % \
                      (retries, effective_maxretries, dosyncuri))
                    writemsg_stdout(
                     "\n\n>>> Starting retry %d of %d with %s\n" % \
                     (retries, effective_maxretries, dosyncuri), noiselevel=-1)

                if dosyncuri.startswith('ssh://'):
                    dosyncuri = dosyncuri[6:].replace('/', ':/', 1)

                unchanged, is_synced, exitcode, updatecache_flg = self._do_rsync(
                    dosyncuri, timestamp, opts)
                if not unchanged:
                    local_state_unchanged = False
                if is_synced:
                    break

                retries = retries + 1

                if maxretries < 0 or retries <= maxretries:
                    print(">>> Retrying...")
                else:
                    # over retries
                    # exit loop
                    exitcode = EXCEEDED_MAX_RETRIES
                    break

            self._process_exitcode(exitcode, dosyncuri, out, maxretries)

            if local_state_unchanged:
                # The quarantine download_dir is not intended to exist
                # in this case, so refer gemato to the normal repository
                # location.
                download_dir = self.repo.location
            else:
                download_dir = self.download_dir

            # if synced successfully, verify now
            if exitcode == 0 and self.verify_metamanifest:
                if gemato is None:
                    writemsg_level(
                        "!!! Unable to verify: gemato-11.0+ is required\n",
                        level=logging.ERROR,
                        noiselevel=-1)
                    exitcode = 127
                else:
                    try:
                        # we always verify the Manifest signature, in case
                        # we had to deal with key revocation case
                        m = gemato.recursiveloader.ManifestRecursiveLoader(
                            os.path.join(download_dir, 'Manifest'),
                            verify_openpgp=True,
                            openpgp_env=openpgp_env,
                            max_jobs=self.verify_jobs)
                        if not m.openpgp_signed:
                            raise RuntimeError(
                                'OpenPGP signature not found on Manifest')

                        ts = m.find_timestamp()
                        if ts is None:
                            raise RuntimeError(
                                'Timestamp not found in Manifest')
                        if (self.max_age != 0
                                and (datetime.datetime.utcnow() - ts.ts).days >
                                self.max_age):
                            out.quiet = False
                            out.ewarn(
                                'Manifest is over %d days old, this is suspicious!'
                                % (self.max_age, ))
                            out.ewarn(
                                'You may want to try using another mirror and/or reporting this one:'
                            )
                            out.ewarn('  %s' % (dosyncuri, ))
                            out.ewarn('')
                            out.quiet = quiet

                        out.einfo('Manifest timestamp: %s UTC' % (ts.ts, ))
                        out.einfo('Valid OpenPGP signature found:')
                        out.einfo(
                            '- primary key: %s' %
                            (m.openpgp_signature.primary_key_fingerprint))
                        out.einfo('- subkey: %s' %
                                  (m.openpgp_signature.fingerprint))
                        out.einfo('- timestamp: %s UTC' %
                                  (m.openpgp_signature.timestamp))

                        # if nothing has changed, skip the actual Manifest
                        # verification
                        if not local_state_unchanged:
                            out.ebegin('Verifying %s' % (download_dir, ))
                            m.assert_directory_verifies()
                            out.eend(0)
                    except GematoException as e:
                        writemsg_level(
                            "!!! Manifest verification failed:\n%s\n" % (e, ),
                            level=logging.ERROR,
                            noiselevel=-1)
                        exitcode = 1
                        verify_failure = True

            if exitcode == 0 and not local_state_unchanged:
                self.repo_storage.commit_update()
                self.repo_storage.garbage_collection()

            return (exitcode, updatecache_flg)
        finally:
            # Don't delete the update if verification failed, in case
            # the cause needs to be investigated.
            if not verify_failure:
                self.repo_storage.abort_update()
            if openpgp_env is not None:
                openpgp_env.close()
예제 #2
0
    def update(self):
        '''Internal update function which performs the transfer'''
        opts = self.options.get('emerge_config').opts
        self.usersync_uid = self.options.get('usersync_uid', None)
        enter_invalid = '--ask-enter-invalid' in opts
        out = portage.output.EOutput()
        syncuri = self.repo.sync_uri
        if self.repo.module_specific_options.get('sync-rsync-vcs-ignore',
                                                 'false').lower() == 'true':
            vcs_dirs = ()
        else:
            vcs_dirs = frozenset(VCS_DIRS)
            vcs_dirs = vcs_dirs.intersection(os.listdir(self.repo.location))

        for vcs_dir in vcs_dirs:
            writemsg_level(("!!! %s appears to be under revision " + \
             "control (contains %s).\n!!! Aborting rsync sync "
             "(override with \"sync-rsync-vcs-ignore = true\" in repos.conf).\n") % \
             (self.repo.location, vcs_dir), level=logging.ERROR, noiselevel=-1)
            return (1, False)
        self.timeout = 180

        rsync_opts = []
        if self.settings["PORTAGE_RSYNC_OPTS"] == "":
            rsync_opts = self._set_rsync_defaults()
        else:
            rsync_opts = self._validate_rsync_opts(rsync_opts, syncuri)
        self.rsync_opts = self._rsync_opts_extend(opts, rsync_opts)

        self.extra_rsync_opts = list()
        if self.repo.module_specific_options.get('sync-rsync-extra-opts'):
            self.extra_rsync_opts.extend(
                portage.util.shlex_split(
                    self.repo.module_specific_options['sync-rsync-extra-opts'])
            )

        # Real local timestamp file.
        self.servertimestampfile = os.path.join(self.repo.location, "metadata",
                                                "timestamp.chk")

        content = portage.util.grabfile(self.servertimestampfile)
        timestamp = 0
        if content:
            try:
                timestamp = time.mktime(
                    time.strptime(content[0], TIMESTAMP_FORMAT))
            except (OverflowError, ValueError):
                pass
        del content

        try:
            self.rsync_initial_timeout = \
             int(self.settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
        except ValueError:
            self.rsync_initial_timeout = 15

        try:
            maxretries = int(self.settings["PORTAGE_RSYNC_RETRIES"])
        except SystemExit as e:
            raise  # Needed else can't exit
        except:
            maxretries = -1  #default number of retries

        if syncuri.startswith("file://"):
            self.proto = "file"
            dosyncuri = syncuri[7:]
            is_synced, exitcode, updatecache_flg = self._do_rsync(
                dosyncuri, timestamp, opts)
            self._process_exitcode(exitcode, dosyncuri, out, 1)
            return (exitcode, updatecache_flg)

        retries = 0
        try:
            self.proto, user_name, hostname, port = re.split(
                r"(rsync|ssh)://([^:/]+@)?(\[[:\da-fA-F]*\]|[^:/]*)(:[0-9]+)?",
                syncuri,
                maxsplit=4)[1:5]
        except ValueError:
            writemsg_level("!!! sync-uri is invalid: %s\n" % syncuri,
                           noiselevel=-1,
                           level=logging.ERROR)
            return (1, False)

        self.ssh_opts = self.settings.get("PORTAGE_SSH_OPTS")

        if port is None:
            port = ""
        if user_name is None:
            user_name = ""
        if re.match(r"^\[[:\da-fA-F]*\]$", hostname) is None:
            getaddrinfo_host = hostname
        else:
            # getaddrinfo needs the brackets stripped
            getaddrinfo_host = hostname[1:-1]
        updatecache_flg = False
        all_rsync_opts = set(self.rsync_opts)
        all_rsync_opts.update(self.extra_rsync_opts)

        family = socket.AF_UNSPEC
        if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
            family = socket.AF_INET
        elif socket.has_ipv6 and \
         ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
            family = socket.AF_INET6

        addrinfos = None
        uris = []

        try:
            addrinfos = getaddrinfo_validate(
                socket.getaddrinfo(getaddrinfo_host, None, family,
                                   socket.SOCK_STREAM))
        except socket.error as e:
            writemsg_level("!!! getaddrinfo failed for '%s': %s\n" %
                           (_unicode_decode(hostname), _unicode(e)),
                           noiselevel=-1,
                           level=logging.ERROR)

        if addrinfos:

            AF_INET = socket.AF_INET
            AF_INET6 = None
            if socket.has_ipv6:
                AF_INET6 = socket.AF_INET6

            ips_v4 = []
            ips_v6 = []

            for addrinfo in addrinfos:
                if addrinfo[0] == AF_INET:
                    ips_v4.append("%s" % addrinfo[4][0])
                elif AF_INET6 is not None and addrinfo[0] == AF_INET6:
                    # IPv6 addresses need to be enclosed in square brackets
                    ips_v6.append("[%s]" % addrinfo[4][0])

            random.shuffle(ips_v4)
            random.shuffle(ips_v6)

            # Give priority to the address family that
            # getaddrinfo() returned first.
            if AF_INET6 is not None and addrinfos and \
             addrinfos[0][0] == AF_INET6:
                ips = ips_v6 + ips_v4
            else:
                ips = ips_v4 + ips_v6

            for ip in ips:
                uris.append(
                    syncuri.replace("//" + user_name + hostname + port + "/",
                                    "//" + user_name + ip + port + "/", 1))

        if not uris:
            # With some configurations we need to use the plain hostname
            # rather than try to resolve the ip addresses (bug #340817).
            uris.append(syncuri)

        # reverse, for use with pop()
        uris.reverse()
        uris_orig = uris[:]

        effective_maxretries = maxretries
        if effective_maxretries < 0:
            effective_maxretries = len(uris) - 1

        while (1):
            if uris:
                dosyncuri = uris.pop()
            elif maxretries < 0 or retries > maxretries:
                writemsg("!!! Exhausted addresses for %s\n" %
                         _unicode_decode(hostname),
                         noiselevel=-1)
                return (1, False)
            else:
                uris.extend(uris_orig)
                dosyncuri = uris.pop()

            if (retries == 0):
                if "--ask" in opts:
                    uq = UserQuery(opts)
                    if uq.query("Do you want to sync your Portage tree " + \
                     "with the mirror at\n" + blue(dosyncuri) + bold("?"),
                     enter_invalid) == "No":
                        print()
                        print("Quitting.")
                        print()
                        sys.exit(128 + signal.SIGINT)
                self.logger(self.xterm_titles,
                            ">>> Starting rsync with " + dosyncuri)
                if "--quiet" not in opts:
                    print(">>> Starting rsync with " + dosyncuri + "...")
            else:
                self.logger(self.xterm_titles,
                 ">>> Starting retry %d of %d with %s" % \
                  (retries, effective_maxretries, dosyncuri))
                writemsg_stdout(
                 "\n\n>>> Starting retry %d of %d with %s\n" % \
                 (retries, effective_maxretries, dosyncuri), noiselevel=-1)

            if dosyncuri.startswith('ssh://'):
                dosyncuri = dosyncuri[6:].replace('/', ':/', 1)

            is_synced, exitcode, updatecache_flg = self._do_rsync(
                dosyncuri, timestamp, opts)
            if is_synced:
                break

            retries = retries + 1

            if maxretries < 0 or retries <= maxretries:
                print(">>> Retrying...")
            else:
                # over retries
                # exit loop
                exitcode = EXCEEDED_MAX_RETRIES
                break
        self._process_exitcode(exitcode, dosyncuri, out, maxretries)
        return (exitcode, updatecache_flg)
예제 #3
0
파일: rsync.py 프로젝트: jonasstein/portage
	def _sync(self):
		'''Internal sync function which performs only the sync'''
		opts = self.options.get('emerge_config').opts
		self.usersync_uid = self.options.get('usersync_uid', None)
		enter_invalid = '--ask-enter-invalid' in opts
		out = portage.output.EOutput()
		syncuri = self.repo.sync_uri
		vcs_dirs = frozenset(VCS_DIRS)
		vcs_dirs = vcs_dirs.intersection(os.listdir(self.repo.location))

		for vcs_dir in vcs_dirs:
			writemsg_level(("!!! %s appears to be under revision " + \
				"control (contains %s).\n!!! Aborting rsync sync.\n") % \
				(self.repo.location, vcs_dir), level=logging.ERROR, noiselevel=-1)
			return (1, False)
		self.timeout=180

		rsync_opts = []
		if self.settings["PORTAGE_RSYNC_OPTS"] == "":
			rsync_opts = self._set_rsync_defaults()
		else:
			rsync_opts = self._validate_rsync_opts(rsync_opts, syncuri)
		self.rsync_opts = self._rsync_opts_extend(opts, rsync_opts)

		self.extra_rsync_opts = portage.util.shlex_split(
			self.settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))

		# Real local timestamp file.
		self.servertimestampfile = os.path.join(
			self.repo.location, "metadata", "timestamp.chk")

		content = portage.util.grabfile(self.servertimestampfile)
		timestamp = 0
		if content:
			try:
				timestamp = time.mktime(time.strptime(content[0],
					TIMESTAMP_FORMAT))
			except (OverflowError, ValueError):
				pass
		del content

		try:
			self.rsync_initial_timeout = \
				int(self.settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
		except ValueError:
			self.rsync_initial_timeout = 15

		try:
			maxretries=int(self.settings["PORTAGE_RSYNC_RETRIES"])
		except SystemExit as e:
			raise # Needed else can't exit
		except:
			maxretries = -1 #default number of retries

		if syncuri.startswith("file://"):
			self.proto = "file"
			dosyncuri = syncuri[6:]
			is_synced, exitcode = self._do_rsync(
				dosyncuri, timestamp, opts)
			self._process_exitcode(exitcode, dosyncuri, out, 1)
			return (exitcode, exitcode == os.EX_OK)

		retries=0
		try:
			self.proto, user_name, hostname, port = re.split(
				r"(rsync|ssh)://([^:/]+@)?(\[[:\da-fA-F]*\]|[^:/]*)(:[0-9]+)?",
				syncuri, maxsplit=4)[1:5]
		except ValueError:
			writemsg_level("!!! sync-uri is invalid: %s\n" % syncuri,
				noiselevel=-1, level=logging.ERROR)
			return (1, False)

		self.ssh_opts = self.settings.get("PORTAGE_SSH_OPTS")

		if port is None:
			port=""
		if user_name is None:
			user_name=""
		if re.match(r"^\[[:\da-fA-F]*\]$", hostname) is None:
			getaddrinfo_host = hostname
		else:
			# getaddrinfo needs the brackets stripped
			getaddrinfo_host = hostname[1:-1]
		updatecache_flg=True
		all_rsync_opts = set(self.rsync_opts)
		all_rsync_opts.update(self.extra_rsync_opts)

		family = socket.AF_UNSPEC
		if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
			family = socket.AF_INET
		elif socket.has_ipv6 and \
			("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
			family = socket.AF_INET6

		addrinfos = None
		uris = []

		try:
			addrinfos = getaddrinfo_validate(
				socket.getaddrinfo(getaddrinfo_host, None,
				family, socket.SOCK_STREAM))
		except socket.error as e:
			writemsg_level(
				"!!! getaddrinfo failed for '%s': %s\n" % (hostname, e),
				noiselevel=-1, level=logging.ERROR)

		if addrinfos:

			AF_INET = socket.AF_INET
			AF_INET6 = None
			if socket.has_ipv6:
				AF_INET6 = socket.AF_INET6

			ips_v4 = []
			ips_v6 = []

			for addrinfo in addrinfos:
				if addrinfo[0] == AF_INET:
					ips_v4.append("%s" % addrinfo[4][0])
				elif AF_INET6 is not None and addrinfo[0] == AF_INET6:
					# IPv6 addresses need to be enclosed in square brackets
					ips_v6.append("[%s]" % addrinfo[4][0])

			random.shuffle(ips_v4)
			random.shuffle(ips_v6)

			# Give priority to the address family that
			# getaddrinfo() returned first.
			if AF_INET6 is not None and addrinfos and \
				addrinfos[0][0] == AF_INET6:
				ips = ips_v6 + ips_v4
			else:
				ips = ips_v4 + ips_v6

			for ip in ips:
				uris.append(syncuri.replace(
					"//" + user_name + hostname + port + "/",
					"//" + user_name + ip + port + "/", 1))

		if not uris:
			# With some configurations we need to use the plain hostname
			# rather than try to resolve the ip addresses (bug #340817).
			uris.append(syncuri)

		# reverse, for use with pop()
		uris.reverse()

		effective_maxretries = maxretries
		if effective_maxretries < 0:
			effective_maxretries = len(uris) - 1

		while (1):
			if uris:
				dosyncuri = uris.pop()
			else:
				writemsg("!!! Exhausted addresses for %s\n" % \
					hostname, noiselevel=-1)
				return (1, False)

			if (retries==0):
				if "--ask" in opts:
					uq = UserQuery(opts)
					if uq.query("Do you want to sync your Portage tree " + \
						"with the mirror at\n" + blue(dosyncuri) + bold("?"),
						enter_invalid) == "No":
						print()
						print("Quitting.")
						print()
						sys.exit(128 + signal.SIGINT)
				self.logger(self.xterm_titles,
					">>> Starting rsync with " + dosyncuri)
				if "--quiet" not in opts:
					print(">>> Starting rsync with "+dosyncuri+"...")
			else:
				self.logger(self.xterm_titles,
					">>> Starting retry %d of %d with %s" % \
						(retries, effective_maxretries, dosyncuri))
				writemsg_stdout(
					"\n\n>>> Starting retry %d of %d with %s\n" % \
					(retries, effective_maxretries, dosyncuri), noiselevel=-1)

			if dosyncuri.startswith('ssh://'):
				dosyncuri = dosyncuri[6:].replace('/', ':/', 1)

			is_synced, exitcode = self._do_rsync(dosyncuri, timestamp, opts)
			if is_synced:
				break

			retries=retries+1

			if maxretries < 0 or retries <= maxretries:
				print(">>> Retrying...")
			else:
				# over retries
				# exit loop
				updatecache_flg=False
				exitcode = EXCEEDED_MAX_RETRIES
				break
		self._process_exitcode(exitcode, dosyncuri, out, maxretries)
		return (exitcode, updatecache_flg)
예제 #4
0
파일: rsync.py 프로젝트: gentoo/portage
	def update(self):
		'''Internal update function which performs the transfer'''
		opts = self.options.get('emerge_config').opts
		self.usersync_uid = self.options.get('usersync_uid', None)
		enter_invalid = '--ask-enter-invalid' in opts
		quiet = '--quiet' in opts
		out = portage.output.EOutput(quiet=quiet)
		syncuri = self.repo.sync_uri
		if self.repo.module_specific_options.get(
			'sync-rsync-vcs-ignore', 'false').lower() == 'true':
			vcs_dirs = ()
		else:
			vcs_dirs = frozenset(VCS_DIRS)
			vcs_dirs = vcs_dirs.intersection(os.listdir(self.repo.location))

		for vcs_dir in vcs_dirs:
			writemsg_level(("!!! %s appears to be under revision " + \
				"control (contains %s).\n!!! Aborting rsync sync "
				"(override with \"sync-rsync-vcs-ignore = true\" in repos.conf).\n") % \
				(self.repo.location, vcs_dir), level=logging.ERROR, noiselevel=-1)
			return (1, False)
		self.timeout=180

		rsync_opts = []
		if self.settings["PORTAGE_RSYNC_OPTS"] == "":
			rsync_opts = self._set_rsync_defaults()
		else:
			rsync_opts = self._validate_rsync_opts(rsync_opts, syncuri)
		self.rsync_opts = self._rsync_opts_extend(opts, rsync_opts)

		self.extra_rsync_opts = list()
		if self.repo.module_specific_options.get('sync-rsync-extra-opts'):
			self.extra_rsync_opts.extend(portage.util.shlex_split(
				self.repo.module_specific_options['sync-rsync-extra-opts']))

		exitcode = 0
		verify_failure = False

		# Process GLEP74 verification options.
		# Default verification to 'no'; it's enabled for ::gentoo
		# via default repos.conf though.
		self.verify_metamanifest = (
				self.repo.module_specific_options.get(
					'sync-rsync-verify-metamanifest', 'no') in ('yes', 'true'))
		# Support overriding job count.
		self.verify_jobs = self.repo.module_specific_options.get(
				'sync-rsync-verify-jobs', None)
		if self.verify_jobs is not None:
			try:
				self.verify_jobs = int(self.verify_jobs)
				if self.verify_jobs < 0:
					raise ValueError(self.verify_jobs)
			except ValueError:
				writemsg_level("!!! sync-rsync-verify-jobs not a positive integer: %s\n" % (self.verify_jobs,),
					level=logging.WARNING, noiselevel=-1)
				self.verify_jobs = None
			else:
				if self.verify_jobs == 0:
					# Use the apparent number of processors if gemato
					# supports it.
					self.verify_jobs = None
		# Support overriding max age.
		self.max_age = self.repo.module_specific_options.get(
				'sync-rsync-verify-max-age', '')
		if self.max_age:
			try:
				self.max_age = int(self.max_age)
				if self.max_age < 0:
					raise ValueError(self.max_age)
			except ValueError:
				writemsg_level("!!! sync-rsync-max-age must be a non-negative integer: %s\n" % (self.max_age,),
					level=logging.WARNING, noiselevel=-1)
				self.max_age = 0
		else:
			self.max_age = 0

		openpgp_env = None
		if self.verify_metamanifest and gemato is not None:
			# Use isolated environment if key is specified,
			# system environment otherwise
			if self.repo.sync_openpgp_key_path is not None:
				openpgp_env = gemato.openpgp.OpenPGPEnvironment()
			else:
				openpgp_env = gemato.openpgp.OpenPGPSystemEnvironment()

		try:
			# Load and update the keyring early. If it fails, then verification
			# will not be performed and the user will have to fix it and try again,
			# so we may as well bail out before actual rsync happens.
			if openpgp_env is not None and self.repo.sync_openpgp_key_path is not None:
				try:
					out.einfo('Using keys from %s' % (self.repo.sync_openpgp_key_path,))
					with io.open(self.repo.sync_openpgp_key_path, 'rb') as f:
						openpgp_env.import_key(f)
					self._refresh_keys(openpgp_env)
				except (GematoException, asyncio.TimeoutError) as e:
					writemsg_level("!!! Manifest verification impossible due to keyring problem:\n%s\n"
							% (e,),
							level=logging.ERROR, noiselevel=-1)
					return (1, False)

			# Real local timestamp file.
			self.servertimestampfile = os.path.join(
				self.repo.location, "metadata", "timestamp.chk")

			content = portage.util.grabfile(self.servertimestampfile)
			timestamp = 0
			if content:
				try:
					timestamp = time.mktime(time.strptime(content[0],
						TIMESTAMP_FORMAT))
				except (OverflowError, ValueError):
					pass
			del content

			try:
				self.rsync_initial_timeout = \
					int(self.settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
			except ValueError:
				self.rsync_initial_timeout = 15

			try:
				maxretries=int(self.settings["PORTAGE_RSYNC_RETRIES"])
			except SystemExit as e:
				raise # Needed else can't exit
			except:
				maxretries = -1 #default number of retries

			if syncuri.startswith("file://"):
				self.proto = "file"
				dosyncuri = syncuri[7:]
				unchanged, is_synced, exitcode, updatecache_flg = self._do_rsync(
					dosyncuri, timestamp, opts)
				self._process_exitcode(exitcode, dosyncuri, out, 1)
				if exitcode == 0:
					if unchanged:
						self.repo_storage.abort_update()
					else:
						self.repo_storage.commit_update()
						self.repo_storage.garbage_collection()
				return (exitcode, updatecache_flg)

			retries=0
			try:
				self.proto, user_name, hostname, port = re.split(
					r"(rsync|ssh)://([^:/]+@)?(\[[:\da-fA-F]*\]|[^:/]*)(:[0-9]+)?",
					syncuri, maxsplit=4)[1:5]
			except ValueError:
				writemsg_level("!!! sync-uri is invalid: %s\n" % syncuri,
					noiselevel=-1, level=logging.ERROR)
				return (1, False)

			self.ssh_opts = self.settings.get("PORTAGE_SSH_OPTS")

			if port is None:
				port=""
			if user_name is None:
				user_name=""
			if re.match(r"^\[[:\da-fA-F]*\]$", hostname) is None:
				getaddrinfo_host = hostname
			else:
				# getaddrinfo needs the brackets stripped
				getaddrinfo_host = hostname[1:-1]
			updatecache_flg = False
			all_rsync_opts = set(self.rsync_opts)
			all_rsync_opts.update(self.extra_rsync_opts)

			family = socket.AF_UNSPEC
			if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
				family = socket.AF_INET
			elif socket.has_ipv6 and \
				("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
				family = socket.AF_INET6

			addrinfos = None
			uris = []

			try:
				addrinfos = getaddrinfo_validate(
					socket.getaddrinfo(getaddrinfo_host, None,
					family, socket.SOCK_STREAM))
			except socket.error as e:
				writemsg_level(
					"!!! getaddrinfo failed for '%s': %s\n"
					% (_unicode_decode(hostname), _unicode(e)),
					noiselevel=-1, level=logging.ERROR)

			if addrinfos:

				AF_INET = socket.AF_INET
				AF_INET6 = None
				if socket.has_ipv6:
					AF_INET6 = socket.AF_INET6

				ips_v4 = []
				ips_v6 = []

				for addrinfo in addrinfos:
					if addrinfo[0] == AF_INET:
						ips_v4.append("%s" % addrinfo[4][0])
					elif AF_INET6 is not None and addrinfo[0] == AF_INET6:
						# IPv6 addresses need to be enclosed in square brackets
						ips_v6.append("[%s]" % addrinfo[4][0])

				random.shuffle(ips_v4)
				random.shuffle(ips_v6)

				# Give priority to the address family that
				# getaddrinfo() returned first.
				if AF_INET6 is not None and addrinfos and \
					addrinfos[0][0] == AF_INET6:
					ips = ips_v6 + ips_v4
				else:
					ips = ips_v4 + ips_v6

				for ip in ips:
					uris.append(syncuri.replace(
						"//" + user_name + hostname + port + "/",
						"//" + user_name + ip + port + "/", 1))

			if not uris:
				# With some configurations we need to use the plain hostname
				# rather than try to resolve the ip addresses (bug #340817).
				uris.append(syncuri)

			# reverse, for use with pop()
			uris.reverse()
			uris_orig = uris[:]

			effective_maxretries = maxretries
			if effective_maxretries < 0:
				effective_maxretries = len(uris) - 1

			local_state_unchanged = True
			while (1):
				if uris:
					dosyncuri = uris.pop()
				elif maxretries < 0 or retries > maxretries:
					writemsg("!!! Exhausted addresses for %s\n"
						% _unicode_decode(hostname), noiselevel=-1)
					return (1, False)
				else:
					uris.extend(uris_orig)
					dosyncuri = uris.pop()

				if (retries==0):
					if "--ask" in opts:
						uq = UserQuery(opts)
						if uq.query("Do you want to sync your ebuild repository " + \
							"with the mirror at\n" + blue(dosyncuri) + bold("?"),
							enter_invalid) == "No":
							print()
							print("Quitting.")
							print()
							sys.exit(128 + signal.SIGINT)
					self.logger(self.xterm_titles,
						">>> Starting rsync with " + dosyncuri)
					if "--quiet" not in opts:
						print(">>> Starting rsync with "+dosyncuri+"...")
				else:
					self.logger(self.xterm_titles,
						">>> Starting retry %d of %d with %s" % \
							(retries, effective_maxretries, dosyncuri))
					writemsg_stdout(
						"\n\n>>> Starting retry %d of %d with %s\n" % \
						(retries, effective_maxretries, dosyncuri), noiselevel=-1)

				if dosyncuri.startswith('ssh://'):
					dosyncuri = dosyncuri[6:].replace('/', ':/', 1)

				unchanged, is_synced, exitcode, updatecache_flg = self._do_rsync(
					dosyncuri, timestamp, opts)
				if not unchanged:
					local_state_unchanged = False
				if is_synced:
					break

				retries=retries+1

				if maxretries < 0 or retries <= maxretries:
					print(">>> Retrying...")
				else:
					# over retries
					# exit loop
					exitcode = EXCEEDED_MAX_RETRIES
					break

			self._process_exitcode(exitcode, dosyncuri, out, maxretries)

			if local_state_unchanged:
				# The quarantine download_dir is not intended to exist
				# in this case, so refer gemato to the normal repository
				# location.
				download_dir = self.repo.location
			else:
				download_dir = self.download_dir

			# if synced successfully, verify now
			if exitcode == 0 and self.verify_metamanifest:
				if gemato is None:
					writemsg_level("!!! Unable to verify: gemato-11.0+ is required\n",
						level=logging.ERROR, noiselevel=-1)
					exitcode = 127
				else:
					try:
						# we always verify the Manifest signature, in case
						# we had to deal with key revocation case
						m = gemato.recursiveloader.ManifestRecursiveLoader(
								os.path.join(download_dir, 'Manifest'),
								verify_openpgp=True,
								openpgp_env=openpgp_env,
								max_jobs=self.verify_jobs)
						if not m.openpgp_signed:
							raise RuntimeError('OpenPGP signature not found on Manifest')

						ts = m.find_timestamp()
						if ts is None:
							raise RuntimeError('Timestamp not found in Manifest')
						if (self.max_age != 0 and
								(datetime.datetime.utcnow() - ts.ts).days > self.max_age):
							out.quiet = False
							out.ewarn('Manifest is over %d days old, this is suspicious!' % (self.max_age,))
							out.ewarn('You may want to try using another mirror and/or reporting this one:')
							out.ewarn('  %s' % (dosyncuri,))
							out.ewarn('')
							out.quiet = quiet

						out.einfo('Manifest timestamp: %s UTC' % (ts.ts,))
						out.einfo('Valid OpenPGP signature found:')
						out.einfo('- primary key: %s' % (
							m.openpgp_signature.primary_key_fingerprint))
						out.einfo('- subkey: %s' % (
							m.openpgp_signature.fingerprint))
						out.einfo('- timestamp: %s UTC' % (
							m.openpgp_signature.timestamp))

						# if nothing has changed, skip the actual Manifest
						# verification
						if not local_state_unchanged:
							out.ebegin('Verifying %s' % (download_dir,))
							m.assert_directory_verifies()
							out.eend(0)
					except GematoException as e:
						writemsg_level("!!! Manifest verification failed:\n%s\n"
								% (e,),
								level=logging.ERROR, noiselevel=-1)
						exitcode = 1
						verify_failure = True

			if exitcode == 0 and not local_state_unchanged:
				self.repo_storage.commit_update()
				self.repo_storage.garbage_collection()

			return (exitcode, updatecache_flg)
		finally:
			# Don't delete the update if verification failed, in case
			# the cause needs to be investigated.
			if not verify_failure:
				self.repo_storage.abort_update()
			if openpgp_env is not None:
				openpgp_env.close()