コード例 #1
0
ファイル: rsync.py プロジェクト: Zlogene/portage-1
    def update(self):
        '''Internal update function which performs the transfer'''
        opts = self.options.get('emerge_config').opts
        self.usersync_uid = self.options.get('usersync_uid', None)
        enter_invalid = '--ask-enter-invalid' in opts
        quiet = '--quiet' in opts
        out = portage.output.EOutput(quiet=quiet)
        syncuri = self.repo.sync_uri
        if self.repo.module_specific_options.get('sync-rsync-vcs-ignore',
                                                 'false').lower() == 'true':
            vcs_dirs = ()
        else:
            vcs_dirs = frozenset(VCS_DIRS)
            vcs_dirs = vcs_dirs.intersection(os.listdir(self.repo.location))

        for vcs_dir in vcs_dirs:
            writemsg_level(("!!! %s appears to be under revision " + \
             "control (contains %s).\n!!! Aborting rsync sync "
             "(override with \"sync-rsync-vcs-ignore = true\" in repos.conf).\n") % \
             (self.repo.location, vcs_dir), level=logging.ERROR, noiselevel=-1)
            return (1, False)
        self.timeout = 180

        rsync_opts = []
        if self.settings["PORTAGE_RSYNC_OPTS"] == "":
            rsync_opts = self._set_rsync_defaults()
        else:
            rsync_opts = self._validate_rsync_opts(rsync_opts, syncuri)
        self.rsync_opts = self._rsync_opts_extend(opts, rsync_opts)

        self.extra_rsync_opts = list()
        if self.repo.module_specific_options.get('sync-rsync-extra-opts'):
            self.extra_rsync_opts.extend(
                portage.util.shlex_split(
                    self.repo.module_specific_options['sync-rsync-extra-opts'])
            )

        exitcode = 0
        verify_failure = False

        # Process GLEP74 verification options.
        # Default verification to 'no'; it's enabled for ::gentoo
        # via default repos.conf though.
        self.verify_metamanifest = (self.repo.module_specific_options.get(
            'sync-rsync-verify-metamanifest', 'no') in ('yes', 'true'))
        # Support overriding job count.
        self.verify_jobs = self.repo.module_specific_options.get(
            'sync-rsync-verify-jobs', None)
        if self.verify_jobs is not None:
            try:
                self.verify_jobs = int(self.verify_jobs)
                if self.verify_jobs < 0:
                    raise ValueError(self.verify_jobs)
            except ValueError:
                writemsg_level(
                    "!!! sync-rsync-verify-jobs not a positive integer: %s\n" %
                    (self.verify_jobs, ),
                    level=logging.WARNING,
                    noiselevel=-1)
                self.verify_jobs = None
            else:
                if self.verify_jobs == 0:
                    # Use the apparent number of processors if gemato
                    # supports it.
                    self.verify_jobs = None
        # Support overriding max age.
        self.max_age = self.repo.module_specific_options.get(
            'sync-rsync-verify-max-age', '')
        if self.max_age:
            try:
                self.max_age = int(self.max_age)
                if self.max_age < 0:
                    raise ValueError(self.max_age)
            except ValueError:
                writemsg_level(
                    "!!! sync-rsync-max-age must be a non-negative integer: %s\n"
                    % (self.max_age, ),
                    level=logging.WARNING,
                    noiselevel=-1)
                self.max_age = 0
        else:
            self.max_age = 0

        openpgp_env = None
        if self.verify_metamanifest and gemato is not None:
            # Use isolated environment if key is specified,
            # system environment otherwise
            if self.repo.sync_openpgp_key_path is not None:
                openpgp_env = gemato.openpgp.OpenPGPEnvironment()
            else:
                openpgp_env = gemato.openpgp.OpenPGPSystemEnvironment()

        try:
            # Load and update the keyring early. If it fails, then verification
            # will not be performed and the user will have to fix it and try again,
            # so we may as well bail out before actual rsync happens.
            if openpgp_env is not None and self.repo.sync_openpgp_key_path is not None:
                try:
                    out.einfo('Using keys from %s' %
                              (self.repo.sync_openpgp_key_path, ))
                    with io.open(self.repo.sync_openpgp_key_path, 'rb') as f:
                        openpgp_env.import_key(f)
                    self._refresh_keys(openpgp_env)
                except (GematoException, asyncio.TimeoutError) as e:
                    writemsg_level(
                        "!!! Manifest verification impossible due to keyring problem:\n%s\n"
                        % (e, ),
                        level=logging.ERROR,
                        noiselevel=-1)
                    return (1, False)

            # Real local timestamp file.
            self.servertimestampfile = os.path.join(self.repo.location,
                                                    "metadata",
                                                    "timestamp.chk")

            content = portage.util.grabfile(self.servertimestampfile)
            timestamp = 0
            if content:
                try:
                    timestamp = time.mktime(
                        time.strptime(content[0], TIMESTAMP_FORMAT))
                except (OverflowError, ValueError):
                    pass
            del content

            try:
                self.rsync_initial_timeout = \
                 int(self.settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
            except ValueError:
                self.rsync_initial_timeout = 15

            try:
                maxretries = int(self.settings["PORTAGE_RSYNC_RETRIES"])
            except SystemExit as e:
                raise  # Needed else can't exit
            except:
                maxretries = -1  #default number of retries

            if syncuri.startswith("file://"):
                self.proto = "file"
                dosyncuri = syncuri[7:]
                unchanged, is_synced, exitcode, updatecache_flg = self._do_rsync(
                    dosyncuri, timestamp, opts)
                self._process_exitcode(exitcode, dosyncuri, out, 1)
                if exitcode == 0:
                    if unchanged:
                        self.repo_storage.abort_update()
                    else:
                        self.repo_storage.commit_update()
                        self.repo_storage.garbage_collection()
                return (exitcode, updatecache_flg)

            retries = 0
            try:
                self.proto, user_name, hostname, port = re.split(
                    r"(rsync|ssh)://([^:/]+@)?(\[[:\da-fA-F]*\]|[^:/]*)(:[0-9]+)?",
                    syncuri,
                    maxsplit=4)[1:5]
            except ValueError:
                writemsg_level("!!! sync-uri is invalid: %s\n" % syncuri,
                               noiselevel=-1,
                               level=logging.ERROR)
                return (1, False)

            self.ssh_opts = self.settings.get("PORTAGE_SSH_OPTS")

            if port is None:
                port = ""
            if user_name is None:
                user_name = ""
            if re.match(r"^\[[:\da-fA-F]*\]$", hostname) is None:
                getaddrinfo_host = hostname
            else:
                # getaddrinfo needs the brackets stripped
                getaddrinfo_host = hostname[1:-1]
            updatecache_flg = False
            all_rsync_opts = set(self.rsync_opts)
            all_rsync_opts.update(self.extra_rsync_opts)

            family = socket.AF_UNSPEC
            if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
                family = socket.AF_INET
            elif socket.has_ipv6 and \
             ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
                family = socket.AF_INET6

            addrinfos = None
            uris = []

            try:
                addrinfos = getaddrinfo_validate(
                    socket.getaddrinfo(getaddrinfo_host, None, family,
                                       socket.SOCK_STREAM))
            except socket.error as e:
                writemsg_level("!!! getaddrinfo failed for '%s': %s\n" %
                               (_unicode_decode(hostname), _unicode(e)),
                               noiselevel=-1,
                               level=logging.ERROR)

            if addrinfos:

                AF_INET = socket.AF_INET
                AF_INET6 = None
                if socket.has_ipv6:
                    AF_INET6 = socket.AF_INET6

                ips_v4 = []
                ips_v6 = []

                for addrinfo in addrinfos:
                    if addrinfo[0] == AF_INET:
                        ips_v4.append("%s" % addrinfo[4][0])
                    elif AF_INET6 is not None and addrinfo[0] == AF_INET6:
                        # IPv6 addresses need to be enclosed in square brackets
                        ips_v6.append("[%s]" % addrinfo[4][0])

                random.shuffle(ips_v4)
                random.shuffle(ips_v6)

                # Give priority to the address family that
                # getaddrinfo() returned first.
                if AF_INET6 is not None and addrinfos and \
                 addrinfos[0][0] == AF_INET6:
                    ips = ips_v6 + ips_v4
                else:
                    ips = ips_v4 + ips_v6

                for ip in ips:
                    uris.append(
                        syncuri.replace(
                            "//" + user_name + hostname + port + "/",
                            "//" + user_name + ip + port + "/", 1))

            if not uris:
                # With some configurations we need to use the plain hostname
                # rather than try to resolve the ip addresses (bug #340817).
                uris.append(syncuri)

            # reverse, for use with pop()
            uris.reverse()
            uris_orig = uris[:]

            effective_maxretries = maxretries
            if effective_maxretries < 0:
                effective_maxretries = len(uris) - 1

            local_state_unchanged = True
            while (1):
                if uris:
                    dosyncuri = uris.pop()
                elif maxretries < 0 or retries > maxretries:
                    writemsg("!!! Exhausted addresses for %s\n" %
                             _unicode_decode(hostname),
                             noiselevel=-1)
                    return (1, False)
                else:
                    uris.extend(uris_orig)
                    dosyncuri = uris.pop()

                if (retries == 0):
                    if "--ask" in opts:
                        uq = UserQuery(opts)
                        if uq.query("Do you want to sync your ebuild repository " + \
                         "with the mirror at\n" + blue(dosyncuri) + bold("?"),
                         enter_invalid) == "No":
                            print()
                            print("Quitting.")
                            print()
                            sys.exit(128 + signal.SIGINT)
                    self.logger(self.xterm_titles,
                                ">>> Starting rsync with " + dosyncuri)
                    if "--quiet" not in opts:
                        print(">>> Starting rsync with " + dosyncuri + "...")
                else:
                    self.logger(self.xterm_titles,
                     ">>> Starting retry %d of %d with %s" % \
                      (retries, effective_maxretries, dosyncuri))
                    writemsg_stdout(
                     "\n\n>>> Starting retry %d of %d with %s\n" % \
                     (retries, effective_maxretries, dosyncuri), noiselevel=-1)

                if dosyncuri.startswith('ssh://'):
                    dosyncuri = dosyncuri[6:].replace('/', ':/', 1)

                unchanged, is_synced, exitcode, updatecache_flg = self._do_rsync(
                    dosyncuri, timestamp, opts)
                if not unchanged:
                    local_state_unchanged = False
                if is_synced:
                    break

                retries = retries + 1

                if maxretries < 0 or retries <= maxretries:
                    print(">>> Retrying...")
                else:
                    # over retries
                    # exit loop
                    exitcode = EXCEEDED_MAX_RETRIES
                    break

            self._process_exitcode(exitcode, dosyncuri, out, maxretries)

            if local_state_unchanged:
                # The quarantine download_dir is not intended to exist
                # in this case, so refer gemato to the normal repository
                # location.
                download_dir = self.repo.location
            else:
                download_dir = self.download_dir

            # if synced successfully, verify now
            if exitcode == 0 and self.verify_metamanifest:
                if gemato is None:
                    writemsg_level(
                        "!!! Unable to verify: gemato-11.0+ is required\n",
                        level=logging.ERROR,
                        noiselevel=-1)
                    exitcode = 127
                else:
                    try:
                        # we always verify the Manifest signature, in case
                        # we had to deal with key revocation case
                        m = gemato.recursiveloader.ManifestRecursiveLoader(
                            os.path.join(download_dir, 'Manifest'),
                            verify_openpgp=True,
                            openpgp_env=openpgp_env,
                            max_jobs=self.verify_jobs)
                        if not m.openpgp_signed:
                            raise RuntimeError(
                                'OpenPGP signature not found on Manifest')

                        ts = m.find_timestamp()
                        if ts is None:
                            raise RuntimeError(
                                'Timestamp not found in Manifest')
                        if (self.max_age != 0
                                and (datetime.datetime.utcnow() - ts.ts).days >
                                self.max_age):
                            out.quiet = False
                            out.ewarn(
                                'Manifest is over %d days old, this is suspicious!'
                                % (self.max_age, ))
                            out.ewarn(
                                'You may want to try using another mirror and/or reporting this one:'
                            )
                            out.ewarn('  %s' % (dosyncuri, ))
                            out.ewarn('')
                            out.quiet = quiet

                        out.einfo('Manifest timestamp: %s UTC' % (ts.ts, ))
                        out.einfo('Valid OpenPGP signature found:')
                        out.einfo(
                            '- primary key: %s' %
                            (m.openpgp_signature.primary_key_fingerprint))
                        out.einfo('- subkey: %s' %
                                  (m.openpgp_signature.fingerprint))
                        out.einfo('- timestamp: %s UTC' %
                                  (m.openpgp_signature.timestamp))

                        # if nothing has changed, skip the actual Manifest
                        # verification
                        if not local_state_unchanged:
                            out.ebegin('Verifying %s' % (download_dir, ))
                            m.assert_directory_verifies()
                            out.eend(0)
                    except GematoException as e:
                        writemsg_level(
                            "!!! Manifest verification failed:\n%s\n" % (e, ),
                            level=logging.ERROR,
                            noiselevel=-1)
                        exitcode = 1
                        verify_failure = True

            if exitcode == 0 and not local_state_unchanged:
                self.repo_storage.commit_update()
                self.repo_storage.garbage_collection()

            return (exitcode, updatecache_flg)
        finally:
            # Don't delete the update if verification failed, in case
            # the cause needs to be investigated.
            if not verify_failure:
                self.repo_storage.abort_update()
            if openpgp_env is not None:
                openpgp_env.close()
コード例 #2
0
    def perform(self, qa_output):
        myunadded, mydeleted = self._vcs_unadded()

        myautoadd = self._vcs_autoadd(myunadded)

        self._vcs_deleted(mydeleted)

        changes = self.get_vcs_changed(mydeleted)

        mynew, mychanged, myremoved, no_expansion, expansion = changes

        # Manifests need to be regenerated after all other commits, so don't commit
        # them now even if they have changed.
        mymanifests = set()
        myupdates = set()
        for f in mychanged + mynew:
            if "Manifest" == os.path.basename(f):
                mymanifests.add(f)
            else:
                myupdates.add(f)
        myupdates.difference_update(myremoved)
        myupdates = list(myupdates)
        mymanifests = list(mymanifests)
        myheaders = []

        commitmessage = self.options.commitmsg
        if self.options.commitmsgfile:
            try:
                f = io.open(_unicode_encode(self.options.commitmsgfile,
                                            encoding=_encodings['fs'],
                                            errors='strict'),
                            mode='r',
                            encoding=_encodings['content'],
                            errors='replace')
                commitmessage = f.read()
                f.close()
                del f
            except (IOError, OSError) as e:
                if e.errno == errno.ENOENT:
                    portage.writemsg("!!! File Not Found:"
                                     " --commitmsgfile='%s'\n" %
                                     self.options.commitmsgfile)
                else:
                    raise
        if not commitmessage or not commitmessage.strip():
            commitmessage = self.get_new_commit_message(qa_output)

        commitmessage = commitmessage.rstrip()

        myupdates, broken_changelog_manifests = self.changelogs(
            myupdates, mymanifests, myremoved, mychanged, myautoadd, mynew,
            commitmessage)

        commit_footer = self.get_commit_footer()
        commitmessage += commit_footer

        print("* %s files being committed..." % green(str(len(myupdates))),
              end=' ')

        if self.vcs_settings.vcs not in ('cvs', 'svn'):
            # With git, bzr and hg, there's never any keyword expansion, so
            # there's no need to regenerate manifests and all files will be
            # committed in one big commit at the end.
            print()
        elif not self.repo_settings.repo_config.thin_manifest:
            self.thick_manifest(myupdates, myheaders, no_expansion, expansion)

        logging.info("myupdates: %s", myupdates)
        logging.info("myheaders: %s", myheaders)

        uq = UserQuery(self.options)
        if self.options.ask and uq.query('Commit changes?', True) != 'Yes':
            print("* aborting commit.")
            sys.exit(128 + signal.SIGINT)

        # Handle the case where committed files have keywords which
        # will change and need a priming commit before the Manifest
        # can be committed.
        if (myupdates or myremoved) and myheaders:
            self.priming_commit(myupdates, myremoved, commitmessage)

        # When files are removed and re-added, the cvs server will put /Attic/
        # inside the $Header path. This code detects the problem and corrects it
        # so that the Manifest will generate correctly. See bug #169500.
        # Use binary mode in order to avoid potential character encoding issues.
        self.clear_attic(myheaders)

        if self.scanner.repolevel == 1:
            utilities.repoman_sez("\"You're rather crazy... "
                                  "doing the entire repository.\"\n")

        if self.vcs_settings.vcs in ('cvs', 'svn') and (myupdates
                                                        or myremoved):
            for x in sorted(
                    vcs_files_to_cps(chain(myupdates, myremoved, mymanifests),
                                     self.scanner.repolevel,
                                     self.scanner.reposplit,
                                     self.scanner.categories)):
                self.repoman_settings["O"] = os.path.join(
                    self.repo_settings.repodir, x)
                digestgen(mysettings=self.repoman_settings,
                          myportdb=self.repo_settings.portdb)

        elif broken_changelog_manifests:
            for x in broken_changelog_manifests:
                self.repoman_settings["O"] = os.path.join(
                    self.repo_settings.repodir, x)
                digestgen(mysettings=self.repoman_settings,
                          myportdb=self.repo_settings.portdb)

        if self.repo_settings.sign_manifests:
            self.sign_manifest(myupdates, myremoved, mymanifests)

        if self.vcs_settings.vcs == 'git':
            # It's not safe to use the git commit -a option since there might
            # be some modified files elsewhere in the working tree that the
            # user doesn't want to commit. Therefore, call git update-index
            # in order to ensure that the index is updated with the latest
            # versions of all new and modified files in the relevant portion
            # of the working tree.
            myfiles = mymanifests + myupdates
            myfiles.sort()
            update_index_cmd = ["git", "update-index"]
            update_index_cmd.extend(f.lstrip("./") for f in myfiles)
            if self.options.pretend:
                print("(%s)" % (" ".join(update_index_cmd), ))
            else:
                retval = spawn(update_index_cmd, env=os.environ)
                if retval != os.EX_OK:
                    writemsg_level("!!! Exiting on %s (shell) "
                                   "error code: %s\n" %
                                   (self.vcs_settings.vcs, retval),
                                   level=logging.ERROR,
                                   noiselevel=-1)
                    sys.exit(retval)

        self.add_manifest(mymanifests, myheaders, myupdates, myremoved,
                          commitmessage)

        if self.options.quiet:
            return
        print()
        if self.vcs_settings.vcs:
            print("Commit complete.")
        else:
            print("repoman was too scared"
                  " by not seeing any familiar version control file"
                  " that he forgot to commit anything")
        utilities.repoman_sez(
            "\"If everyone were like you, I'd be out of business!\"\n")
        return
コード例 #3
0
ファイル: rsync.py プロジェクト: simonvanderveldt/portage
    def update(self):
        '''Internal update function which performs the transfer'''
        opts = self.options.get('emerge_config').opts
        self.usersync_uid = self.options.get('usersync_uid', None)
        enter_invalid = '--ask-enter-invalid' in opts
        out = portage.output.EOutput()
        syncuri = self.repo.sync_uri
        if self.repo.module_specific_options.get('sync-rsync-vcs-ignore',
                                                 'false').lower() == 'true':
            vcs_dirs = ()
        else:
            vcs_dirs = frozenset(VCS_DIRS)
            vcs_dirs = vcs_dirs.intersection(os.listdir(self.repo.location))

        for vcs_dir in vcs_dirs:
            writemsg_level(("!!! %s appears to be under revision " + \
             "control (contains %s).\n!!! Aborting rsync sync "
             "(override with \"sync-rsync-vcs-ignore = true\" in repos.conf).\n") % \
             (self.repo.location, vcs_dir), level=logging.ERROR, noiselevel=-1)
            return (1, False)
        self.timeout = 180

        rsync_opts = []
        if self.settings["PORTAGE_RSYNC_OPTS"] == "":
            rsync_opts = self._set_rsync_defaults()
        else:
            rsync_opts = self._validate_rsync_opts(rsync_opts, syncuri)
        self.rsync_opts = self._rsync_opts_extend(opts, rsync_opts)

        self.extra_rsync_opts = list()
        if self.repo.module_specific_options.get('sync-rsync-extra-opts'):
            self.extra_rsync_opts.extend(
                portage.util.shlex_split(
                    self.repo.module_specific_options['sync-rsync-extra-opts'])
            )

        # Real local timestamp file.
        self.servertimestampfile = os.path.join(self.repo.location, "metadata",
                                                "timestamp.chk")

        content = portage.util.grabfile(self.servertimestampfile)
        timestamp = 0
        if content:
            try:
                timestamp = time.mktime(
                    time.strptime(content[0], TIMESTAMP_FORMAT))
            except (OverflowError, ValueError):
                pass
        del content

        try:
            self.rsync_initial_timeout = \
             int(self.settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
        except ValueError:
            self.rsync_initial_timeout = 15

        try:
            maxretries = int(self.settings["PORTAGE_RSYNC_RETRIES"])
        except SystemExit as e:
            raise  # Needed else can't exit
        except:
            maxretries = -1  #default number of retries

        if syncuri.startswith("file://"):
            self.proto = "file"
            dosyncuri = syncuri[7:]
            is_synced, exitcode, updatecache_flg = self._do_rsync(
                dosyncuri, timestamp, opts)
            self._process_exitcode(exitcode, dosyncuri, out, 1)
            return (exitcode, updatecache_flg)

        retries = 0
        try:
            self.proto, user_name, hostname, port = re.split(
                r"(rsync|ssh)://([^:/]+@)?(\[[:\da-fA-F]*\]|[^:/]*)(:[0-9]+)?",
                syncuri,
                maxsplit=4)[1:5]
        except ValueError:
            writemsg_level("!!! sync-uri is invalid: %s\n" % syncuri,
                           noiselevel=-1,
                           level=logging.ERROR)
            return (1, False)

        self.ssh_opts = self.settings.get("PORTAGE_SSH_OPTS")

        if port is None:
            port = ""
        if user_name is None:
            user_name = ""
        if re.match(r"^\[[:\da-fA-F]*\]$", hostname) is None:
            getaddrinfo_host = hostname
        else:
            # getaddrinfo needs the brackets stripped
            getaddrinfo_host = hostname[1:-1]
        updatecache_flg = False
        all_rsync_opts = set(self.rsync_opts)
        all_rsync_opts.update(self.extra_rsync_opts)

        family = socket.AF_UNSPEC
        if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
            family = socket.AF_INET
        elif socket.has_ipv6 and \
         ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
            family = socket.AF_INET6

        addrinfos = None
        uris = []

        try:
            addrinfos = getaddrinfo_validate(
                socket.getaddrinfo(getaddrinfo_host, None, family,
                                   socket.SOCK_STREAM))
        except socket.error as e:
            writemsg_level("!!! getaddrinfo failed for '%s': %s\n" %
                           (_unicode_decode(hostname), _unicode(e)),
                           noiselevel=-1,
                           level=logging.ERROR)

        if addrinfos:

            AF_INET = socket.AF_INET
            AF_INET6 = None
            if socket.has_ipv6:
                AF_INET6 = socket.AF_INET6

            ips_v4 = []
            ips_v6 = []

            for addrinfo in addrinfos:
                if addrinfo[0] == AF_INET:
                    ips_v4.append("%s" % addrinfo[4][0])
                elif AF_INET6 is not None and addrinfo[0] == AF_INET6:
                    # IPv6 addresses need to be enclosed in square brackets
                    ips_v6.append("[%s]" % addrinfo[4][0])

            random.shuffle(ips_v4)
            random.shuffle(ips_v6)

            # Give priority to the address family that
            # getaddrinfo() returned first.
            if AF_INET6 is not None and addrinfos and \
             addrinfos[0][0] == AF_INET6:
                ips = ips_v6 + ips_v4
            else:
                ips = ips_v4 + ips_v6

            for ip in ips:
                uris.append(
                    syncuri.replace("//" + user_name + hostname + port + "/",
                                    "//" + user_name + ip + port + "/", 1))

        if not uris:
            # With some configurations we need to use the plain hostname
            # rather than try to resolve the ip addresses (bug #340817).
            uris.append(syncuri)

        # reverse, for use with pop()
        uris.reverse()
        uris_orig = uris[:]

        effective_maxretries = maxretries
        if effective_maxretries < 0:
            effective_maxretries = len(uris) - 1

        while (1):
            if uris:
                dosyncuri = uris.pop()
            elif maxretries < 0 or retries > maxretries:
                writemsg("!!! Exhausted addresses for %s\n" %
                         _unicode_decode(hostname),
                         noiselevel=-1)
                return (1, False)
            else:
                uris.extend(uris_orig)
                dosyncuri = uris.pop()

            if (retries == 0):
                if "--ask" in opts:
                    uq = UserQuery(opts)
                    if uq.query("Do you want to sync your Portage tree " + \
                     "with the mirror at\n" + blue(dosyncuri) + bold("?"),
                     enter_invalid) == "No":
                        print()
                        print("Quitting.")
                        print()
                        sys.exit(128 + signal.SIGINT)
                self.logger(self.xterm_titles,
                            ">>> Starting rsync with " + dosyncuri)
                if "--quiet" not in opts:
                    print(">>> Starting rsync with " + dosyncuri + "...")
            else:
                self.logger(self.xterm_titles,
                 ">>> Starting retry %d of %d with %s" % \
                  (retries, effective_maxretries, dosyncuri))
                writemsg_stdout(
                 "\n\n>>> Starting retry %d of %d with %s\n" % \
                 (retries, effective_maxretries, dosyncuri), noiselevel=-1)

            if dosyncuri.startswith('ssh://'):
                dosyncuri = dosyncuri[6:].replace('/', ':/', 1)

            is_synced, exitcode, updatecache_flg = self._do_rsync(
                dosyncuri, timestamp, opts)
            if is_synced:
                break

            retries = retries + 1

            if maxretries < 0 or retries <= maxretries:
                print(">>> Retrying...")
            else:
                # over retries
                # exit loop
                exitcode = EXCEEDED_MAX_RETRIES
                break
        self._process_exitcode(exitcode, dosyncuri, out, maxretries)
        return (exitcode, updatecache_flg)
コード例 #4
0
ファイル: rsync.py プロジェクト: jonasstein/portage
	def _sync(self):
		'''Internal sync function which performs only the sync'''
		opts = self.options.get('emerge_config').opts
		self.usersync_uid = self.options.get('usersync_uid', None)
		enter_invalid = '--ask-enter-invalid' in opts
		out = portage.output.EOutput()
		syncuri = self.repo.sync_uri
		vcs_dirs = frozenset(VCS_DIRS)
		vcs_dirs = vcs_dirs.intersection(os.listdir(self.repo.location))

		for vcs_dir in vcs_dirs:
			writemsg_level(("!!! %s appears to be under revision " + \
				"control (contains %s).\n!!! Aborting rsync sync.\n") % \
				(self.repo.location, vcs_dir), level=logging.ERROR, noiselevel=-1)
			return (1, False)
		self.timeout=180

		rsync_opts = []
		if self.settings["PORTAGE_RSYNC_OPTS"] == "":
			rsync_opts = self._set_rsync_defaults()
		else:
			rsync_opts = self._validate_rsync_opts(rsync_opts, syncuri)
		self.rsync_opts = self._rsync_opts_extend(opts, rsync_opts)

		self.extra_rsync_opts = portage.util.shlex_split(
			self.settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))

		# Real local timestamp file.
		self.servertimestampfile = os.path.join(
			self.repo.location, "metadata", "timestamp.chk")

		content = portage.util.grabfile(self.servertimestampfile)
		timestamp = 0
		if content:
			try:
				timestamp = time.mktime(time.strptime(content[0],
					TIMESTAMP_FORMAT))
			except (OverflowError, ValueError):
				pass
		del content

		try:
			self.rsync_initial_timeout = \
				int(self.settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
		except ValueError:
			self.rsync_initial_timeout = 15

		try:
			maxretries=int(self.settings["PORTAGE_RSYNC_RETRIES"])
		except SystemExit as e:
			raise # Needed else can't exit
		except:
			maxretries = -1 #default number of retries

		if syncuri.startswith("file://"):
			self.proto = "file"
			dosyncuri = syncuri[6:]
			is_synced, exitcode = self._do_rsync(
				dosyncuri, timestamp, opts)
			self._process_exitcode(exitcode, dosyncuri, out, 1)
			return (exitcode, exitcode == os.EX_OK)

		retries=0
		try:
			self.proto, user_name, hostname, port = re.split(
				r"(rsync|ssh)://([^:/]+@)?(\[[:\da-fA-F]*\]|[^:/]*)(:[0-9]+)?",
				syncuri, maxsplit=4)[1:5]
		except ValueError:
			writemsg_level("!!! sync-uri is invalid: %s\n" % syncuri,
				noiselevel=-1, level=logging.ERROR)
			return (1, False)

		self.ssh_opts = self.settings.get("PORTAGE_SSH_OPTS")

		if port is None:
			port=""
		if user_name is None:
			user_name=""
		if re.match(r"^\[[:\da-fA-F]*\]$", hostname) is None:
			getaddrinfo_host = hostname
		else:
			# getaddrinfo needs the brackets stripped
			getaddrinfo_host = hostname[1:-1]
		updatecache_flg=True
		all_rsync_opts = set(self.rsync_opts)
		all_rsync_opts.update(self.extra_rsync_opts)

		family = socket.AF_UNSPEC
		if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
			family = socket.AF_INET
		elif socket.has_ipv6 and \
			("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
			family = socket.AF_INET6

		addrinfos = None
		uris = []

		try:
			addrinfos = getaddrinfo_validate(
				socket.getaddrinfo(getaddrinfo_host, None,
				family, socket.SOCK_STREAM))
		except socket.error as e:
			writemsg_level(
				"!!! getaddrinfo failed for '%s': %s\n" % (hostname, e),
				noiselevel=-1, level=logging.ERROR)

		if addrinfos:

			AF_INET = socket.AF_INET
			AF_INET6 = None
			if socket.has_ipv6:
				AF_INET6 = socket.AF_INET6

			ips_v4 = []
			ips_v6 = []

			for addrinfo in addrinfos:
				if addrinfo[0] == AF_INET:
					ips_v4.append("%s" % addrinfo[4][0])
				elif AF_INET6 is not None and addrinfo[0] == AF_INET6:
					# IPv6 addresses need to be enclosed in square brackets
					ips_v6.append("[%s]" % addrinfo[4][0])

			random.shuffle(ips_v4)
			random.shuffle(ips_v6)

			# Give priority to the address family that
			# getaddrinfo() returned first.
			if AF_INET6 is not None and addrinfos and \
				addrinfos[0][0] == AF_INET6:
				ips = ips_v6 + ips_v4
			else:
				ips = ips_v4 + ips_v6

			for ip in ips:
				uris.append(syncuri.replace(
					"//" + user_name + hostname + port + "/",
					"//" + user_name + ip + port + "/", 1))

		if not uris:
			# With some configurations we need to use the plain hostname
			# rather than try to resolve the ip addresses (bug #340817).
			uris.append(syncuri)

		# reverse, for use with pop()
		uris.reverse()

		effective_maxretries = maxretries
		if effective_maxretries < 0:
			effective_maxretries = len(uris) - 1

		while (1):
			if uris:
				dosyncuri = uris.pop()
			else:
				writemsg("!!! Exhausted addresses for %s\n" % \
					hostname, noiselevel=-1)
				return (1, False)

			if (retries==0):
				if "--ask" in opts:
					uq = UserQuery(opts)
					if uq.query("Do you want to sync your Portage tree " + \
						"with the mirror at\n" + blue(dosyncuri) + bold("?"),
						enter_invalid) == "No":
						print()
						print("Quitting.")
						print()
						sys.exit(128 + signal.SIGINT)
				self.logger(self.xterm_titles,
					">>> Starting rsync with " + dosyncuri)
				if "--quiet" not in opts:
					print(">>> Starting rsync with "+dosyncuri+"...")
			else:
				self.logger(self.xterm_titles,
					">>> Starting retry %d of %d with %s" % \
						(retries, effective_maxretries, dosyncuri))
				writemsg_stdout(
					"\n\n>>> Starting retry %d of %d with %s\n" % \
					(retries, effective_maxretries, dosyncuri), noiselevel=-1)

			if dosyncuri.startswith('ssh://'):
				dosyncuri = dosyncuri[6:].replace('/', ':/', 1)

			is_synced, exitcode = self._do_rsync(dosyncuri, timestamp, opts)
			if is_synced:
				break

			retries=retries+1

			if maxretries < 0 or retries <= maxretries:
				print(">>> Retrying...")
			else:
				# over retries
				# exit loop
				updatecache_flg=False
				exitcode = EXCEEDED_MAX_RETRIES
				break
		self._process_exitcode(exitcode, dosyncuri, out, maxretries)
		return (exitcode, updatecache_flg)
コード例 #5
0
ファイル: unmerge.py プロジェクト: steeznson/portage
def unmerge(root_config,
            myopts,
            unmerge_action,
            unmerge_files,
            ldpath_mtimes,
            autoclean=0,
            clean_world=1,
            clean_delay=1,
            ordered=0,
            raise_on_error=0,
            scheduler=None,
            writemsg_level=portage.util.writemsg_level):
    """
	Returns os.EX_OK if no errors occur, 1 if an error occurs, and
	130 if interrupted due to a 'no' answer for --ask.
	"""

    if clean_world:
        clean_world = myopts.get('--deselect') != 'n'

    rval, pkgmap = _unmerge_display(root_config,
                                    myopts,
                                    unmerge_action,
                                    unmerge_files,
                                    clean_delay=clean_delay,
                                    ordered=ordered,
                                    writemsg_level=writemsg_level)

    if rval != os.EX_OK:
        return rval

    enter_invalid = '--ask-enter-invalid' in myopts
    vartree = root_config.trees["vartree"]
    sets = root_config.sets
    settings = root_config.settings
    mysettings = portage.config(clone=settings)
    xterm_titles = "notitles" not in settings.features

    if "--pretend" in myopts:
        #we're done... return
        return os.EX_OK
    if "--ask" in myopts:
        uq = UserQuery(myopts)
        if uq.query("Would you like to unmerge these packages?",
                    enter_invalid) == "No":
            # enter pretend mode for correct formatting of results
            myopts["--pretend"] = True
            print()
            print("Quitting.")
            print()
            return 128 + signal.SIGINT

    if not vartree.dbapi.writable:
        writemsg_level("!!! %s\n" % _("Read-only file system: %s") %
                       vartree.dbapi._dbroot,
                       level=logging.ERROR,
                       noiselevel=-1)
        return 1

    #the real unmerging begins, after a short delay unless we're raging....
    if not unmerge_action == "rage-clean" and clean_delay and not autoclean:
        countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")

    all_selected = set()
    all_selected.update(*[x["selected"] for x in pkgmap])

    # Set counter variables
    curval = 1
    maxval = len(all_selected)

    for x in range(len(pkgmap)):
        for y in pkgmap[x]["selected"]:
            emergelog(xterm_titles, "=== Unmerging... (" + y + ")")
            message = ">>> Unmerging ({0} of {1}) {2}...\n".format(
                colorize("MERGE_LIST_PROGRESS", str(curval)),
                colorize("MERGE_LIST_PROGRESS", str(maxval)), y)
            writemsg_level(message, noiselevel=-1)
            curval += 1

            mysplit = y.split("/")
            #unmerge...
            retval = portage.unmerge(mysplit[0],
                                     mysplit[1],
                                     settings=mysettings,
                                     vartree=vartree,
                                     ldpath_mtimes=ldpath_mtimes,
                                     scheduler=scheduler)

            if retval != os.EX_OK:
                emergelog(xterm_titles, " !!! unmerge FAILURE: " + y)
                if raise_on_error:
                    raise UninstallFailure(retval)
                sys.exit(retval)
            else:
                if clean_world and hasattr(sets["selected"], "cleanPackage")\
                  and hasattr(sets["selected"], "lock"):
                    sets["selected"].lock()
                    if hasattr(sets["selected"], "load"):
                        sets["selected"].load()
                    sets["selected"].cleanPackage(vartree.dbapi, y)
                    sets["selected"].unlock()
                emergelog(xterm_titles, " >>> unmerge success: " + y)

    if clean_world and hasattr(sets["selected"], "remove")\
      and hasattr(sets["selected"], "lock"):
        sets["selected"].lock()
        # load is called inside remove()
        for s in root_config.setconfig.active:
            sets["selected"].remove(SETPREFIX + s)
        sets["selected"].unlock()

    return os.EX_OK
コード例 #6
0
ファイル: actions.py プロジェクト: dol-sen/portage
	def perform(self, qa_output):
		myautoadd = self._vcs_autoadd()

		self._vcs_deleted()

		changes = self.get_vcs_changed()

		mynew, mychanged, myremoved, no_expansion, expansion = changes

		# Manifests need to be regenerated after all other commits, so don't commit
		# them now even if they have changed.
		mymanifests = set()
		myupdates = set()
		for f in mychanged + mynew:
			if "Manifest" == os.path.basename(f):
				mymanifests.add(f)
			else:
				myupdates.add(f)
		myupdates.difference_update(myremoved)
		myupdates = list(myupdates)
		mymanifests = list(mymanifests)
		myheaders = []

		commitmessage = self.options.commitmsg
		if self.options.commitmsgfile:
			try:
				f = io.open(
					_unicode_encode(
						self.options.commitmsgfile,
						encoding=_encodings['fs'], errors='strict'),
					mode='r', encoding=_encodings['content'], errors='replace')
				commitmessage = f.read()
				f.close()
				del f
			except (IOError, OSError) as e:
				if e.errno == errno.ENOENT:
					portage.writemsg(
						"!!! File Not Found:"
						" --commitmsgfile='%s'\n" % self.options.commitmsgfile)
				else:
					raise
			if commitmessage[:9].lower() in ("cat/pkg: ",):
				commitmessage = self.msg_prefix() + commitmessage[9:]

		if not commitmessage or not commitmessage.strip():
			commitmessage = self.get_new_commit_message(qa_output)

		commitmessage = commitmessage.rstrip()

		# Update copyright for new and changed files
		year = time.strftime('%Y', time.gmtime())
		for fn in chain(mynew, mychanged):
			if fn.endswith('.diff') or fn.endswith('.patch'):
				continue
			update_copyright(fn, year, pretend=self.options.pretend)

		myupdates, broken_changelog_manifests = self.changelogs(
					myupdates, mymanifests, myremoved, mychanged, myautoadd,
					mynew, commitmessage)

		lines = commitmessage.splitlines()
		lastline = lines[-1]
		if len(lines) == 1 or re.match(r'^\S+:\s', lastline) is None:
			commitmessage += '\n'

		commit_footer = self.get_commit_footer()
		commitmessage += commit_footer

		print("* %s files being committed..." % green(str(len(myupdates))), end=' ')

		if not self.vcs_settings.needs_keyword_expansion:
			# With some VCS types there's never any keyword expansion, so
			# there's no need to regenerate manifests and all files will be
			# committed in one big commit at the end.
			logging.debug("VCS type doesn't need keyword expansion")
			print()
		elif not self.repo_settings.repo_config.thin_manifest:
			logging.debug("perform: Calling thick_manifest()")
			self.vcs_settings.changes.thick_manifest(myupdates, myheaders,
				no_expansion, expansion)

		logging.info("myupdates: %s", myupdates)
		logging.info("myheaders: %s", myheaders)

		uq = UserQuery(self.options)
		if self.options.ask and uq.query('Commit changes?', True) != 'Yes':
			print("* aborting commit.")
			sys.exit(128 + signal.SIGINT)

		# Handle the case where committed files have keywords which
		# will change and need a priming commit before the Manifest
		# can be committed.
		if (myupdates or myremoved) and myheaders:
			self.priming_commit(myupdates, myremoved, commitmessage)

		# When files are removed and re-added, the cvs server will put /Attic/
		# inside the $Header path. This code detects the problem and corrects it
		# so that the Manifest will generate correctly. See bug #169500.
		# Use binary mode in order to avoid potential character encoding issues.
		self.vcs_settings.changes.clear_attic(myheaders)

		if self.scanner.repolevel == 1:
			utilities.repoman_sez(
				"\"You're rather crazy... "
				"doing the entire repository.\"\n")

		self.vcs_settings.changes.digest_regen(myupdates, myremoved, mymanifests,
			self.scanner, broken_changelog_manifests)

		if self.repo_settings.sign_manifests:
			self.sign_manifest(myupdates, myremoved, mymanifests)

		self.vcs_settings.changes.update_index(mymanifests, myupdates)

		self.add_manifest(mymanifests, myheaders, myupdates, myremoved, commitmessage)

		if self.options.quiet:
			return
		print()
		if self.vcs_settings.vcs:
			print("Commit complete.")
		else:
			print(
				"repoman was too scared"
				" by not seeing any familiar version control file"
				" that he forgot to commit anything")
		utilities.repoman_sez(
			"\"If everyone were like you, I'd be out of business!\"\n")
		return
コード例 #7
0
    def perform(self, qa_output):
        myautoadd = self._vcs_autoadd()

        self._vcs_deleted()

        changes = self.get_vcs_changed()

        mynew, mychanged, myremoved, no_expansion, expansion = changes

        # Manifests need to be regenerated after all other commits, so don't commit
        # them now even if they have changed.
        mymanifests = set()
        myupdates = set()
        for f in mychanged + mynew:
            if "Manifest" == os.path.basename(f):
                mymanifests.add(f)
            else:
                myupdates.add(f)
        myupdates.difference_update(myremoved)
        myupdates = list(myupdates)
        mymanifests = list(mymanifests)
        myheaders = []

        commitmessage = self.options.commitmsg
        if self.options.commitmsgfile:
            try:
                f = io.open(_unicode_encode(self.options.commitmsgfile,
                                            encoding=_encodings['fs'],
                                            errors='strict'),
                            mode='r',
                            encoding=_encodings['content'],
                            errors='replace')
                commitmessage = f.read()
                f.close()
                del f
            except (IOError, OSError) as e:
                if e.errno == errno.ENOENT:
                    portage.writemsg("!!! File Not Found:"
                                     " --commitmsgfile='%s'\n" %
                                     self.options.commitmsgfile)
                else:
                    raise
            if commitmessage[:9].lower() in ("cat/pkg: ", ):
                commitmessage = self.msg_prefix() + commitmessage[9:]

        if not commitmessage or not commitmessage.strip():
            commitmessage = self.get_new_commit_message(qa_output)

        commitmessage = commitmessage.rstrip()

        # Update copyright for new and changed files
        year = time.strftime('%Y', time.gmtime())
        for fn in chain(mynew, mychanged):
            if fn.endswith('.diff') or fn.endswith('.patch'):
                continue
            update_copyright(fn, year, pretend=self.options.pretend)

        myupdates, broken_changelog_manifests = self.changelogs(
            myupdates, mymanifests, myremoved, mychanged, myautoadd, mynew,
            commitmessage)

        lines = commitmessage.splitlines()
        lastline = lines[-1]
        if len(lines) == 1 or re.match(r'^\S+:\s', lastline) is None:
            commitmessage += '\n'

        commit_footer = self.get_commit_footer()
        commitmessage += commit_footer

        print("* %s files being committed..." % green(str(len(myupdates))),
              end=' ')

        if not self.vcs_settings.needs_keyword_expansion:
            # With some VCS types there's never any keyword expansion, so
            # there's no need to regenerate manifests and all files will be
            # committed in one big commit at the end.
            logging.debug("VCS type doesn't need keyword expansion")
            print()
        elif not self.repo_settings.repo_config.thin_manifest:
            logging.debug("perform: Calling thick_manifest()")
            self.vcs_settings.changes.thick_manifest(myupdates, myheaders,
                                                     no_expansion, expansion)

        logging.info("myupdates: %s", myupdates)
        logging.info("myheaders: %s", myheaders)

        uq = UserQuery(self.options)
        if self.options.ask and uq.query('Commit changes?', True) != 'Yes':
            print("* aborting commit.")
            sys.exit(128 + signal.SIGINT)

        # Handle the case where committed files have keywords which
        # will change and need a priming commit before the Manifest
        # can be committed.
        if (myupdates or myremoved) and myheaders:
            self.priming_commit(myupdates, myremoved, commitmessage)

        # When files are removed and re-added, the cvs server will put /Attic/
        # inside the $Header path. This code detects the problem and corrects it
        # so that the Manifest will generate correctly. See bug #169500.
        # Use binary mode in order to avoid potential character encoding issues.
        self.vcs_settings.changes.clear_attic(myheaders)

        if self.scanner.repolevel == 1:
            utilities.repoman_sez("\"You're rather crazy... "
                                  "doing the entire repository.\"\n")

        self.vcs_settings.changes.digest_regen(myupdates, myremoved,
                                               mymanifests, self.scanner,
                                               broken_changelog_manifests)

        if self.repo_settings.sign_manifests:
            self.sign_manifest(myupdates, myremoved, mymanifests)

        self.vcs_settings.changes.update_index(mymanifests, myupdates)

        self.add_manifest(mymanifests, myheaders, myupdates, myremoved,
                          commitmessage)

        if self.options.quiet:
            return
        print()
        if self.vcs_settings.vcs:
            print("Commit complete.")
        else:
            print("repoman was too scared"
                  " by not seeing any familiar version control file"
                  " that he forgot to commit anything")
        utilities.repoman_sez(
            "\"If everyone were like you, I'd be out of business!\"\n")
        return
コード例 #8
0
ファイル: actions.py プロジェクト: armills/portage
	def perform(self, qa_output):
		myunadded, mydeleted = self._vcs_unadded()

		myautoadd = self._vcs_autoadd(myunadded)

		self._vcs_deleted(mydeleted)

		changes = self.get_vcs_changed(mydeleted)

		mynew, mychanged, myremoved, no_expansion, expansion = changes

		# Manifests need to be regenerated after all other commits, so don't commit
		# them now even if they have changed.
		mymanifests = set()
		myupdates = set()
		for f in mychanged + mynew:
			if "Manifest" == os.path.basename(f):
				mymanifests.add(f)
			else:
				myupdates.add(f)
		myupdates.difference_update(myremoved)
		myupdates = list(myupdates)
		mymanifests = list(mymanifests)
		myheaders = []

		commitmessage = self.options.commitmsg
		if self.options.commitmsgfile:
			try:
				f = io.open(
					_unicode_encode(
						self.options.commitmsgfile,
						encoding=_encodings['fs'], errors='strict'),
					mode='r', encoding=_encodings['content'], errors='replace')
				commitmessage = f.read()
				f.close()
				del f
			except (IOError, OSError) as e:
				if e.errno == errno.ENOENT:
					portage.writemsg(
						"!!! File Not Found:"
						" --commitmsgfile='%s'\n" % self.options.commitmsgfile)
				else:
					raise
		if not commitmessage or not commitmessage.strip():
			commitmessage = self.get_new_commit_message(qa_output)

		commitmessage = commitmessage.rstrip()

		myupdates, broken_changelog_manifests = self.changelogs(
					myupdates, mymanifests, myremoved, mychanged, myautoadd,
					mynew, commitmessage)

		commit_footer = self.get_commit_footer()
		commitmessage += commit_footer

		print("* %s files being committed..." % green(str(len(myupdates))), end=' ')

		if self.vcs_settings.vcs not in ('cvs', 'svn'):
			# With git, bzr and hg, there's never any keyword expansion, so
			# there's no need to regenerate manifests and all files will be
			# committed in one big commit at the end.
			print()
		elif not self.repo_settings.repo_config.thin_manifest:
			self.thick_manifest(myupdates, myheaders, no_expansion, expansion)

		logging.info("myupdates: %s", myupdates)
		logging.info("myheaders: %s", myheaders)

		uq = UserQuery(self.options)
		if self.options.ask and uq.query('Commit changes?', True) != 'Yes':
			print("* aborting commit.")
			sys.exit(128 + signal.SIGINT)

		# Handle the case where committed files have keywords which
		# will change and need a priming commit before the Manifest
		# can be committed.
		if (myupdates or myremoved) and myheaders:
			self.priming_commit(myupdates, myremoved, commitmessage)

		# When files are removed and re-added, the cvs server will put /Attic/
		# inside the $Header path. This code detects the problem and corrects it
		# so that the Manifest will generate correctly. See bug #169500.
		# Use binary mode in order to avoid potential character encoding issues.
		self.clear_attic(myheaders)

		if self.scanner.repolevel == 1:
			utilities.repoman_sez(
				"\"You're rather crazy... "
				"doing the entire repository.\"\n")

		if self.vcs_settings.vcs in ('cvs', 'svn') and (myupdates or myremoved):
			for x in sorted(vcs_files_to_cps(
				chain(myupdates, myremoved, mymanifests),
				self.scanner.repolevel, self.scanner.reposplit, self.scanner.categories)):
				self.repoman_settings["O"] = os.path.join(self.repo_settings.repodir, x)
				digestgen(mysettings=self.repoman_settings, myportdb=self.repo_settings.portdb)

		elif broken_changelog_manifests:
			for x in broken_changelog_manifests:
				self.repoman_settings["O"] = os.path.join(self.repo_settings.repodir, x)
				digestgen(mysettings=self.repoman_settings, myportdb=self.repo_settings.portdb)

		if self.repo_settings.sign_manifests:
			self.sign_manifest(myupdates, myremoved, mymanifests)

		if self.vcs_settings.vcs == 'git':
			# It's not safe to use the git commit -a option since there might
			# be some modified files elsewhere in the working tree that the
			# user doesn't want to commit. Therefore, call git update-index
			# in order to ensure that the index is updated with the latest
			# versions of all new and modified files in the relevant portion
			# of the working tree.
			myfiles = mymanifests + myupdates
			myfiles.sort()
			update_index_cmd = ["git", "update-index"]
			update_index_cmd.extend(f.lstrip("./") for f in myfiles)
			if self.options.pretend:
				print("(%s)" % (" ".join(update_index_cmd),))
			else:
				retval = spawn(update_index_cmd, env=os.environ)
				if retval != os.EX_OK:
					writemsg_level(
						"!!! Exiting on %s (shell) "
						"error code: %s\n" % (self.vcs_settings.vcs, retval),
						level=logging.ERROR, noiselevel=-1)
					sys.exit(retval)

		self.add_manifest(mymanifests, myheaders, myupdates, myremoved, commitmessage)

		if self.options.quiet:
			return
		print()
		if self.vcs_settings.vcs:
			print("Commit complete.")
		else:
			print(
				"repoman was too scared"
				" by not seeing any familiar version control file"
				" that he forgot to commit anything")
		utilities.repoman_sez(
			"\"If everyone were like you, I'd be out of business!\"\n")
		return
コード例 #9
0
ファイル: actions.py プロジェクト: mattst88/portage
    def perform(self, qa_output):
        myautoadd = self._vcs_autoadd()

        self._vcs_deleted()

        changes = self.get_vcs_changed()

        mynew, mychanged, myremoved, no_expansion, expansion = changes

        # Manifests need to be regenerated after all other commits, so don't commit
        # them now even if they have changed.
        mymanifests = set()
        myupdates = set()
        for f in mychanged + mynew:
            if "Manifest" == os.path.basename(f):
                mymanifests.add(f)
            else:
                myupdates.add(f)
        myupdates.difference_update(myremoved)
        myupdates = list(myupdates)
        mymanifests = list(mymanifests)
        myheaders = []

        commitmessage = self.options.commitmsg
        if self.options.commitmsgfile:
            try:
                f = io.open(
                    _unicode_encode(
                        self.options.commitmsgfile,
                        encoding=_encodings["fs"],
                        errors="strict",
                    ),
                    mode="r",
                    encoding=_encodings["content"],
                    errors="replace",
                )
                commitmessage = f.read()
                f.close()
                del f
            except (IOError, OSError) as e:
                if e.errno == errno.ENOENT:
                    portage.writemsg("!!! File Not Found:"
                                     " --commitmsgfile='%s'\n" %
                                     self.options.commitmsgfile)
                else:
                    raise
            if commitmessage[:9].lower() in ("cat/pkg: ", ):
                commitmessage = self.msg_prefix() + commitmessage[9:]

        if commitmessage is not None and commitmessage.strip():
            res, expl = self.verify_commit_message(commitmessage)
            if not res:
                print(bad("RepoMan does not like your commit message:"))
                print(expl)
                if self.options.force:
                    print("(but proceeding due to --force)")
                else:
                    sys.exit(1)
        else:
            commitmessage = None
            msg_qa_output = qa_output
            initial_message = None
            while True:
                commitmessage = self.get_new_commit_message(
                    msg_qa_output, commitmessage)
                res, expl = self.verify_commit_message(commitmessage)
                if res:
                    break
                else:
                    full_expl = (
                        """Issues with the commit message were found. Please fix it or remove
the whole commit message to abort.

""" + expl)
                    msg_qa_output = [
                        " %s\n" % x for x in full_expl.splitlines()
                    ] + qa_output

        commitmessage = commitmessage.rstrip()

        # Update copyright for new and changed files
        year = time.strftime("%Y", time.gmtime())
        updated_copyright = []
        for fn in chain(mynew, mychanged):
            if fn.endswith(".diff") or fn.endswith(".patch"):
                continue
            if update_copyright(fn, year, pretend=self.options.pretend):
                updated_copyright.append(fn)

        if updated_copyright and not (
                self.options.pretend
                or self.repo_settings.repo_config.thin_manifest):
            for cp in sorted(self._vcs_files_to_cps(iter(updated_copyright))):
                self._manifest_gen(cp)

        myupdates, broken_changelog_manifests = self.changelogs(
            myupdates,
            mymanifests,
            myremoved,
            mychanged,
            myautoadd,
            mynew,
            commitmessage,
        )

        lines = commitmessage.splitlines()
        lastline = lines[-1]
        if len(lines) == 1 or re.match(r"^\S+:\s", lastline) is None:
            commitmessage += "\n"

        commit_footer = self.get_commit_footer()
        commitmessage += commit_footer

        print("* %s files being committed..." % green(str(len(myupdates))),
              end=" ")

        if not self.vcs_settings.needs_keyword_expansion:
            # With some VCS types there's never any keyword expansion, so
            # there's no need to regenerate manifests and all files will be
            # committed in one big commit at the end.
            logging.debug("VCS type doesn't need keyword expansion")
            print()
        elif not self.repo_settings.repo_config.thin_manifest:
            logging.debug("perform: Calling thick_manifest()")
            self.vcs_settings.changes.thick_manifest(myupdates, myheaders,
                                                     no_expansion, expansion)

        logging.info("myupdates: %s", myupdates)
        logging.info("myheaders: %s", myheaders)

        uq = UserQuery(self.options)
        if self.options.ask and uq.query("Commit changes?", True) != "Yes":
            print("* aborting commit.")
            sys.exit(128 + signal.SIGINT)

        # Handle the case where committed files have keywords which
        # will change and need a priming commit before the Manifest
        # can be committed.
        if (myupdates or myremoved) and myheaders:
            self.priming_commit(myupdates, myremoved, commitmessage)

        # When files are removed and re-added, the cvs server will put /Attic/
        # inside the $Header path. This code detects the problem and corrects it
        # so that the Manifest will generate correctly. See bug #169500.
        # Use binary mode in order to avoid potential character encoding issues.
        self.vcs_settings.changes.clear_attic(myheaders)

        if self.scanner.repolevel == 1:
            utilities.repoman_sez("\"You're rather crazy... "
                                  'doing the entire repository."\n')

        self.vcs_settings.changes.digest_regen(myupdates, myremoved,
                                               mymanifests, self.scanner,
                                               broken_changelog_manifests)

        if self.repo_settings.sign_manifests:
            self.sign_manifest(myupdates, myremoved, mymanifests)

        self.vcs_settings.changes.update_index(mymanifests, myupdates)

        self.add_manifest(mymanifests, myheaders, myupdates, myremoved,
                          commitmessage)

        if self.options.quiet:
            return
        print()
        if self.vcs_settings.vcs:
            print("Commit complete.")
        else:
            print("repoman was too scared"
                  " by not seeing any familiar version control file"
                  " that he forgot to commit anything")
        utilities.repoman_sez(
            '"If everyone were like you, I\'d be out of business!"\n')
        return
コード例 #10
0
ファイル: unmerge.py プロジェクト: aeroniero33/portage
def unmerge(root_config, myopts, unmerge_action,
	unmerge_files, ldpath_mtimes, autoclean=0,
	clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
	scheduler=None, writemsg_level=portage.util.writemsg_level):
	"""
	Returns os.EX_OK if no errors occur, 1 if an error occurs, and
	130 if interrupted due to a 'no' answer for --ask.
	"""

	if clean_world:
		clean_world = myopts.get('--deselect') != 'n'

	rval, pkgmap = _unmerge_display(root_config, myopts,
		unmerge_action, unmerge_files,
		clean_delay=clean_delay, ordered=ordered,
		writemsg_level=writemsg_level)

	if rval != os.EX_OK:
		return rval

	enter_invalid = '--ask-enter-invalid' in myopts
	vartree = root_config.trees["vartree"]
	sets = root_config.sets
	settings = root_config.settings
	mysettings = portage.config(clone=settings)
	xterm_titles = "notitles" not in settings.features

	if "--pretend" in myopts:
		#we're done... return
		return os.EX_OK
	if "--ask" in myopts:
		uq = UserQuery(myopts)
		if uq.query("Would you like to unmerge these packages?",
			enter_invalid) == "No":
			# enter pretend mode for correct formatting of results
			myopts["--pretend"] = True
			print()
			print("Quitting.")
			print()
			return 128 + signal.SIGINT

	if not vartree.dbapi.writable:
		writemsg_level("!!! %s\n" %
			_("Read-only file system: %s") % vartree.dbapi._dbroot,
			level=logging.ERROR, noiselevel=-1)
		return 1

	#the real unmerging begins, after a short delay unless we're raging....
	if not unmerge_action == "rage-clean" and clean_delay and not autoclean:
		countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")

	all_selected = set()
	all_selected.update(*[x["selected"] for x in pkgmap])

	# Set counter variables
	curval = 1
	maxval = len(all_selected)

	for x in range(len(pkgmap)):
		for y in pkgmap[x]["selected"]:
			emergelog(xterm_titles, "=== Unmerging... ("+y+")")
			message = ">>> Unmerging ({0} of {1}) {2}...\n".format(
				colorize("MERGE_LIST_PROGRESS", str(curval)),
				colorize("MERGE_LIST_PROGRESS", str(maxval)),
				y)
			writemsg_level(message, noiselevel=-1)
			curval += 1

			mysplit = y.split("/")
			#unmerge...
			retval = portage.unmerge(mysplit[0], mysplit[1],
				settings=mysettings,
				vartree=vartree, ldpath_mtimes=ldpath_mtimes,
				scheduler=scheduler)

			if retval != os.EX_OK:
				emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
				if raise_on_error:
					raise UninstallFailure(retval)
				sys.exit(retval)
			else:
				if clean_world and hasattr(sets["selected"], "cleanPackage")\
						and hasattr(sets["selected"], "lock"):
					sets["selected"].lock()
					if hasattr(sets["selected"], "load"):
						sets["selected"].load()
					sets["selected"].cleanPackage(vartree.dbapi, y)
					sets["selected"].unlock()
				emergelog(xterm_titles, " >>> unmerge success: "+y)

	if clean_world and hasattr(sets["selected"], "remove")\
			and hasattr(sets["selected"], "lock"):
		sets["selected"].lock()
		# load is called inside remove()
		for s in root_config.setconfig.active:
			sets["selected"].remove(SETPREFIX + s)
		sets["selected"].unlock()

	return os.EX_OK
コード例 #11
0
ファイル: rsync.py プロジェクト: gentoo/portage
	def update(self):
		'''Internal update function which performs the transfer'''
		opts = self.options.get('emerge_config').opts
		self.usersync_uid = self.options.get('usersync_uid', None)
		enter_invalid = '--ask-enter-invalid' in opts
		quiet = '--quiet' in opts
		out = portage.output.EOutput(quiet=quiet)
		syncuri = self.repo.sync_uri
		if self.repo.module_specific_options.get(
			'sync-rsync-vcs-ignore', 'false').lower() == 'true':
			vcs_dirs = ()
		else:
			vcs_dirs = frozenset(VCS_DIRS)
			vcs_dirs = vcs_dirs.intersection(os.listdir(self.repo.location))

		for vcs_dir in vcs_dirs:
			writemsg_level(("!!! %s appears to be under revision " + \
				"control (contains %s).\n!!! Aborting rsync sync "
				"(override with \"sync-rsync-vcs-ignore = true\" in repos.conf).\n") % \
				(self.repo.location, vcs_dir), level=logging.ERROR, noiselevel=-1)
			return (1, False)
		self.timeout=180

		rsync_opts = []
		if self.settings["PORTAGE_RSYNC_OPTS"] == "":
			rsync_opts = self._set_rsync_defaults()
		else:
			rsync_opts = self._validate_rsync_opts(rsync_opts, syncuri)
		self.rsync_opts = self._rsync_opts_extend(opts, rsync_opts)

		self.extra_rsync_opts = list()
		if self.repo.module_specific_options.get('sync-rsync-extra-opts'):
			self.extra_rsync_opts.extend(portage.util.shlex_split(
				self.repo.module_specific_options['sync-rsync-extra-opts']))

		exitcode = 0
		verify_failure = False

		# Process GLEP74 verification options.
		# Default verification to 'no'; it's enabled for ::gentoo
		# via default repos.conf though.
		self.verify_metamanifest = (
				self.repo.module_specific_options.get(
					'sync-rsync-verify-metamanifest', 'no') in ('yes', 'true'))
		# Support overriding job count.
		self.verify_jobs = self.repo.module_specific_options.get(
				'sync-rsync-verify-jobs', None)
		if self.verify_jobs is not None:
			try:
				self.verify_jobs = int(self.verify_jobs)
				if self.verify_jobs < 0:
					raise ValueError(self.verify_jobs)
			except ValueError:
				writemsg_level("!!! sync-rsync-verify-jobs not a positive integer: %s\n" % (self.verify_jobs,),
					level=logging.WARNING, noiselevel=-1)
				self.verify_jobs = None
			else:
				if self.verify_jobs == 0:
					# Use the apparent number of processors if gemato
					# supports it.
					self.verify_jobs = None
		# Support overriding max age.
		self.max_age = self.repo.module_specific_options.get(
				'sync-rsync-verify-max-age', '')
		if self.max_age:
			try:
				self.max_age = int(self.max_age)
				if self.max_age < 0:
					raise ValueError(self.max_age)
			except ValueError:
				writemsg_level("!!! sync-rsync-max-age must be a non-negative integer: %s\n" % (self.max_age,),
					level=logging.WARNING, noiselevel=-1)
				self.max_age = 0
		else:
			self.max_age = 0

		openpgp_env = None
		if self.verify_metamanifest and gemato is not None:
			# Use isolated environment if key is specified,
			# system environment otherwise
			if self.repo.sync_openpgp_key_path is not None:
				openpgp_env = gemato.openpgp.OpenPGPEnvironment()
			else:
				openpgp_env = gemato.openpgp.OpenPGPSystemEnvironment()

		try:
			# Load and update the keyring early. If it fails, then verification
			# will not be performed and the user will have to fix it and try again,
			# so we may as well bail out before actual rsync happens.
			if openpgp_env is not None and self.repo.sync_openpgp_key_path is not None:
				try:
					out.einfo('Using keys from %s' % (self.repo.sync_openpgp_key_path,))
					with io.open(self.repo.sync_openpgp_key_path, 'rb') as f:
						openpgp_env.import_key(f)
					self._refresh_keys(openpgp_env)
				except (GematoException, asyncio.TimeoutError) as e:
					writemsg_level("!!! Manifest verification impossible due to keyring problem:\n%s\n"
							% (e,),
							level=logging.ERROR, noiselevel=-1)
					return (1, False)

			# Real local timestamp file.
			self.servertimestampfile = os.path.join(
				self.repo.location, "metadata", "timestamp.chk")

			content = portage.util.grabfile(self.servertimestampfile)
			timestamp = 0
			if content:
				try:
					timestamp = time.mktime(time.strptime(content[0],
						TIMESTAMP_FORMAT))
				except (OverflowError, ValueError):
					pass
			del content

			try:
				self.rsync_initial_timeout = \
					int(self.settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
			except ValueError:
				self.rsync_initial_timeout = 15

			try:
				maxretries=int(self.settings["PORTAGE_RSYNC_RETRIES"])
			except SystemExit as e:
				raise # Needed else can't exit
			except:
				maxretries = -1 #default number of retries

			if syncuri.startswith("file://"):
				self.proto = "file"
				dosyncuri = syncuri[7:]
				unchanged, is_synced, exitcode, updatecache_flg = self._do_rsync(
					dosyncuri, timestamp, opts)
				self._process_exitcode(exitcode, dosyncuri, out, 1)
				if exitcode == 0:
					if unchanged:
						self.repo_storage.abort_update()
					else:
						self.repo_storage.commit_update()
						self.repo_storage.garbage_collection()
				return (exitcode, updatecache_flg)

			retries=0
			try:
				self.proto, user_name, hostname, port = re.split(
					r"(rsync|ssh)://([^:/]+@)?(\[[:\da-fA-F]*\]|[^:/]*)(:[0-9]+)?",
					syncuri, maxsplit=4)[1:5]
			except ValueError:
				writemsg_level("!!! sync-uri is invalid: %s\n" % syncuri,
					noiselevel=-1, level=logging.ERROR)
				return (1, False)

			self.ssh_opts = self.settings.get("PORTAGE_SSH_OPTS")

			if port is None:
				port=""
			if user_name is None:
				user_name=""
			if re.match(r"^\[[:\da-fA-F]*\]$", hostname) is None:
				getaddrinfo_host = hostname
			else:
				# getaddrinfo needs the brackets stripped
				getaddrinfo_host = hostname[1:-1]
			updatecache_flg = False
			all_rsync_opts = set(self.rsync_opts)
			all_rsync_opts.update(self.extra_rsync_opts)

			family = socket.AF_UNSPEC
			if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
				family = socket.AF_INET
			elif socket.has_ipv6 and \
				("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
				family = socket.AF_INET6

			addrinfos = None
			uris = []

			try:
				addrinfos = getaddrinfo_validate(
					socket.getaddrinfo(getaddrinfo_host, None,
					family, socket.SOCK_STREAM))
			except socket.error as e:
				writemsg_level(
					"!!! getaddrinfo failed for '%s': %s\n"
					% (_unicode_decode(hostname), _unicode(e)),
					noiselevel=-1, level=logging.ERROR)

			if addrinfos:

				AF_INET = socket.AF_INET
				AF_INET6 = None
				if socket.has_ipv6:
					AF_INET6 = socket.AF_INET6

				ips_v4 = []
				ips_v6 = []

				for addrinfo in addrinfos:
					if addrinfo[0] == AF_INET:
						ips_v4.append("%s" % addrinfo[4][0])
					elif AF_INET6 is not None and addrinfo[0] == AF_INET6:
						# IPv6 addresses need to be enclosed in square brackets
						ips_v6.append("[%s]" % addrinfo[4][0])

				random.shuffle(ips_v4)
				random.shuffle(ips_v6)

				# Give priority to the address family that
				# getaddrinfo() returned first.
				if AF_INET6 is not None and addrinfos and \
					addrinfos[0][0] == AF_INET6:
					ips = ips_v6 + ips_v4
				else:
					ips = ips_v4 + ips_v6

				for ip in ips:
					uris.append(syncuri.replace(
						"//" + user_name + hostname + port + "/",
						"//" + user_name + ip + port + "/", 1))

			if not uris:
				# With some configurations we need to use the plain hostname
				# rather than try to resolve the ip addresses (bug #340817).
				uris.append(syncuri)

			# reverse, for use with pop()
			uris.reverse()
			uris_orig = uris[:]

			effective_maxretries = maxretries
			if effective_maxretries < 0:
				effective_maxretries = len(uris) - 1

			local_state_unchanged = True
			while (1):
				if uris:
					dosyncuri = uris.pop()
				elif maxretries < 0 or retries > maxretries:
					writemsg("!!! Exhausted addresses for %s\n"
						% _unicode_decode(hostname), noiselevel=-1)
					return (1, False)
				else:
					uris.extend(uris_orig)
					dosyncuri = uris.pop()

				if (retries==0):
					if "--ask" in opts:
						uq = UserQuery(opts)
						if uq.query("Do you want to sync your ebuild repository " + \
							"with the mirror at\n" + blue(dosyncuri) + bold("?"),
							enter_invalid) == "No":
							print()
							print("Quitting.")
							print()
							sys.exit(128 + signal.SIGINT)
					self.logger(self.xterm_titles,
						">>> Starting rsync with " + dosyncuri)
					if "--quiet" not in opts:
						print(">>> Starting rsync with "+dosyncuri+"...")
				else:
					self.logger(self.xterm_titles,
						">>> Starting retry %d of %d with %s" % \
							(retries, effective_maxretries, dosyncuri))
					writemsg_stdout(
						"\n\n>>> Starting retry %d of %d with %s\n" % \
						(retries, effective_maxretries, dosyncuri), noiselevel=-1)

				if dosyncuri.startswith('ssh://'):
					dosyncuri = dosyncuri[6:].replace('/', ':/', 1)

				unchanged, is_synced, exitcode, updatecache_flg = self._do_rsync(
					dosyncuri, timestamp, opts)
				if not unchanged:
					local_state_unchanged = False
				if is_synced:
					break

				retries=retries+1

				if maxretries < 0 or retries <= maxretries:
					print(">>> Retrying...")
				else:
					# over retries
					# exit loop
					exitcode = EXCEEDED_MAX_RETRIES
					break

			self._process_exitcode(exitcode, dosyncuri, out, maxretries)

			if local_state_unchanged:
				# The quarantine download_dir is not intended to exist
				# in this case, so refer gemato to the normal repository
				# location.
				download_dir = self.repo.location
			else:
				download_dir = self.download_dir

			# if synced successfully, verify now
			if exitcode == 0 and self.verify_metamanifest:
				if gemato is None:
					writemsg_level("!!! Unable to verify: gemato-11.0+ is required\n",
						level=logging.ERROR, noiselevel=-1)
					exitcode = 127
				else:
					try:
						# we always verify the Manifest signature, in case
						# we had to deal with key revocation case
						m = gemato.recursiveloader.ManifestRecursiveLoader(
								os.path.join(download_dir, 'Manifest'),
								verify_openpgp=True,
								openpgp_env=openpgp_env,
								max_jobs=self.verify_jobs)
						if not m.openpgp_signed:
							raise RuntimeError('OpenPGP signature not found on Manifest')

						ts = m.find_timestamp()
						if ts is None:
							raise RuntimeError('Timestamp not found in Manifest')
						if (self.max_age != 0 and
								(datetime.datetime.utcnow() - ts.ts).days > self.max_age):
							out.quiet = False
							out.ewarn('Manifest is over %d days old, this is suspicious!' % (self.max_age,))
							out.ewarn('You may want to try using another mirror and/or reporting this one:')
							out.ewarn('  %s' % (dosyncuri,))
							out.ewarn('')
							out.quiet = quiet

						out.einfo('Manifest timestamp: %s UTC' % (ts.ts,))
						out.einfo('Valid OpenPGP signature found:')
						out.einfo('- primary key: %s' % (
							m.openpgp_signature.primary_key_fingerprint))
						out.einfo('- subkey: %s' % (
							m.openpgp_signature.fingerprint))
						out.einfo('- timestamp: %s UTC' % (
							m.openpgp_signature.timestamp))

						# if nothing has changed, skip the actual Manifest
						# verification
						if not local_state_unchanged:
							out.ebegin('Verifying %s' % (download_dir,))
							m.assert_directory_verifies()
							out.eend(0)
					except GematoException as e:
						writemsg_level("!!! Manifest verification failed:\n%s\n"
								% (e,),
								level=logging.ERROR, noiselevel=-1)
						exitcode = 1
						verify_failure = True

			if exitcode == 0 and not local_state_unchanged:
				self.repo_storage.commit_update()
				self.repo_storage.garbage_collection()

			return (exitcode, updatecache_flg)
		finally:
			# Don't delete the update if verification failed, in case
			# the cause needs to be investigated.
			if not verify_failure:
				self.repo_storage.abort_update()
			if openpgp_env is not None:
				openpgp_env.close()