示例#1
0
文件: Admin.py 项目: feuri/bcfg2
 def create_key(self):
     """Creates a bcfg2.key at the directory specifed by keypath."""
     cmd = Executor(timeout=120)
     subject = "/C=%s/ST=%s/L=%s/CN=%s'" % (
         self.data['country'], self.data['state'], self.data['location'],
         self.data['shostname'])
     key = cmd.run(["openssl", "req", "-batch", "-x509", "-nodes",
                    "-subj", subject, "-days", "1000",
                    "-newkey", "rsa:2048",
                    "-keyout", self.data['keypath'], "-noout"])
     if not key.success:
         print("Error generating key: %s" % key.error)
         return
     os.chmod(self.data['keypath'], stat.S_IRUSR | stat.S_IWUSR)  # 0600
     csr = cmd.run(["openssl", "req", "-batch", "-new", "-subj", subject,
                    "-key", self.data['keypath']])
     if not csr.success:
         print("Error generating certificate signing request: %s" %
               csr.error)
         return
     cert = cmd.run(["openssl", "x509", "-req", "-days", "1000",
                     "-signkey", self.data['keypath'],
                     "-out", self.data['certpath']],
                    inputdata=csr.stdout)
     if not cert.success:
         print("Error signing certificate: %s" % cert.error)
         return
示例#2
0
文件: Admin.py 项目: tomaszov/bcfg2
 def create_key(self):
     """Creates a bcfg2.key at the directory specifed by keypath."""
     cmd = Executor(timeout=120)
     subject = "/C=%s/ST=%s/L=%s/CN=%s'" % (
         self.data['country'], self.data['state'], self.data['location'],
         self.data['shostname'])
     key = cmd.run(["openssl", "req", "-batch", "-x509", "-nodes",
                    "-subj", subject, "-days", "1000",
                    "-newkey", "rsa:2048",
                    "-keyout", self.data['keypath'], "-noout"])
     if not key.success:
         print("Error generating key: %s" % key.error)
         return
     os.chmod(self.data['keypath'], stat.S_IRUSR | stat.S_IWUSR)  # 0600
     csr = cmd.run(["openssl", "req", "-batch", "-new", "-subj", subject,
                    "-key", self.data['keypath']])
     if not csr.success:
         print("Error generating certificate signing request: %s" %
               csr.error)
         return
     cert = cmd.run(["openssl", "x509", "-req", "-days", "1000",
                     "-signkey", self.data['keypath'],
                     "-out", self.data['certpath']],
                    inputdata=csr.stdout)
     if not cert.success:
         print("Error signing certificate: %s" % cert.error)
         return
示例#3
0
class Fossil(Bcfg2.Server.Plugin.Version):
    """ The Fossil plugin provides a revision interface for Bcfg2
    repos using fossil. """
    __author__ = '*****@*****.**'
    __vcs_metadata_path__ = "_FOSSIL_"

    def __init__(self, core):
        Bcfg2.Server.Plugin.Version.__init__(self, core)
        self.cmd = Executor()
        self.logger.debug("Initialized Fossil plugin with fossil directory %s"
                          % self.vcs_path)

    def get_revision(self):
        """Read fossil revision information for the Bcfg2 repository."""
        result = self.cmd.run(["env LC_ALL=C", "fossil", "info"],
                              shell=True, cwd=Bcfg2.Options.setup.vcs_root)
        try:
            revision = None
            for line in result.stdout.splitlines():
                ldata = line.split(': ')
                if ldata[0].strip() == 'checkout':
                    revision = line[1].strip().split(' ')[0]
            return revision
        except (IndexError, AttributeError):
            msg = "Failed to read revision from Fossil: %s" % result.error
            self.logger.error(msg)
            raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
示例#4
0
class Fossil(Bcfg2.Server.Plugin.Version):
    """ The Fossil plugin provides a revision interface for Bcfg2
    repos using fossil. """
    __author__ = '*****@*****.**'
    __vcs_metadata_path__ = "_FOSSIL_"

    def __init__(self, core, datastore):
        Bcfg2.Server.Plugin.Version.__init__(self, core, datastore)
        self.cmd = Executor()
        self.logger.debug(
            "Initialized Fossil plugin with fossil directory %s" %
            self.vcs_path)

    def get_revision(self):
        """Read fossil revision information for the Bcfg2 repository."""
        result = self.cmd.run(["env LC_ALL=C", "fossil", "info"],
                              shell=True,
                              cwd=self.vcs_root)
        try:
            revision = None
            for line in result.stdout.splitlines():
                ldata = line.split(': ')
                if ldata[0].strip() == 'checkout':
                    revision = line[1].strip().split(' ')[0]
            return revision
        except (IndexError, AttributeError):
            msg = "Failed to read revision from Fossil: %s" % result.error
            self.logger.error(msg)
            raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
class CfgExternalCommandVerifier(CfgVerifier):
    """ Invoke an external script to verify
    :ref:`server-plugins-generators-cfg` file contents """

    #: Handle :file:`:test` files
    __basenames__ = [':test']

    def __init__(self, name, specific, encoding):
        CfgVerifier.__init__(self, name, specific, encoding)
        self.cmd = []
        self.exc = Executor(timeout=30)
    __init__.__doc__ = CfgVerifier.__init__.__doc__

    def verify_entry(self, entry, metadata, data):
        try:
            result = self.exc.run(self.cmd, inputdata=data)
            if not result.success:
                raise CfgVerificationError(result.error)
        except OSError:
            raise CfgVerificationError(sys.exc_info()[1])
    verify_entry.__doc__ = CfgVerifier.verify_entry.__doc__

    def handle_event(self, event):
        CfgVerifier.handle_event(self, event)
        if not self.data:
            return
        self.cmd = []
        if not os.access(self.name, os.X_OK):
            bangpath = self.data.splitlines()[0].strip()
            if bangpath.startswith("#!"):
                self.cmd.extend(shlex.split(bangpath[2:].strip()))
            else:
                raise PluginExecutionError("Cannot execute %s" % self.name)
        self.cmd.append(self.name)
    handle_event.__doc__ = CfgVerifier.handle_event.__doc__
示例#6
0
文件: Admin.py 项目: feuri/bcfg2
    def run(self, setup):
        if setup.outfile:
            fmt = setup.outfile.split('.')[-1]
        else:
            fmt = 'png'

        exc = Executor()
        cmd = ["dot", "-T", fmt]
        if setup.outfile:
            cmd.extend(["-o", setup.outfile])
        inputlist = ["digraph groups {",
                     '\trankdir="LR";',
                     self.metadata.viz(setup.includehosts,
                                       setup.includebundles,
                                       setup.includekey,
                                       setup.only_client,
                                       self.colors)]
        if setup.includekey:
            inputlist.extend(
                ["\tsubgraph cluster_key {",
                 '\tstyle="filled";',
                 '\tcolor="lightblue";',
                 '\tBundle [ shape="septagon" ];',
                 '\tGroup [shape="ellipse"];',
                 '\tGroup Category [shape="trapezium"];\n',
                 '\tProfile [style="bold", shape="ellipse"];',
                 '\tHblock [label="Host1|Host2|Host3",shape="record"];',
                 '\tlabel="Key";',
                 "\t}"])
        inputlist.append("}")
        idata = "\n".join(inputlist)
        try:
            result = exc.run(cmd, inputdata=idata)
        except OSError:
            # on some systems (RHEL 6), you cannot run dot with
            # shell=True.  on others (Gentoo with Python 2.7), you
            # must.  In yet others (RHEL 5), either way works.  I have
            # no idea what the difference is, but it's kind of a PITA.
            result = exc.run(cmd, shell=True, inputdata=idata)
        if not result.success:
            self.errExit("Error running %s: %s" % (cmd, result.error))
        if not setup.outfile:
            print(result.stdout)
示例#7
0
文件: Admin.py 项目: tomaszov/bcfg2
    def run(self, setup):
        if setup.outfile:
            fmt = setup.outfile.split('.')[-1]
        else:
            fmt = 'png'

        exc = Executor()
        cmd = ["dot", "-T", fmt]
        if setup.outfile:
            cmd.extend(["-o", setup.outfile])
        inputlist = ["digraph groups {",
                     '\trankdir="LR";',
                     self.metadata.viz(setup.includehosts,
                                       setup.includebundles,
                                       setup.includekey,
                                       setup.only_client,
                                       self.colors)]
        if setup.includekey:
            inputlist.extend(
                ["\tsubgraph cluster_key {",
                 '\tstyle="filled";',
                 '\tcolor="lightblue";',
                 '\tBundle [ shape="septagon" ];',
                 '\tGroup [shape="ellipse"];',
                 '\tGroup Category [shape="trapezium"];\n',
                 '\tProfile [style="bold", shape="ellipse"];',
                 '\tHblock [label="Host1|Host2|Host3",shape="record"];',
                 '\tlabel="Key";',
                 "\t}"])
        inputlist.append("}")
        idata = "\n".join(inputlist)
        try:
            result = exc.run(cmd, inputdata=idata)
        except OSError:
            # on some systems (RHEL 6), you cannot run dot with
            # shell=True.  on others (Gentoo with Python 2.7), you
            # must.  In yet others (RHEL 5), either way works.  I have
            # no idea what the difference is, but it's kind of a PITA.
            result = exc.run(cmd, shell=True, inputdata=idata)
        if not result.success:
            self.errExit("Error running %s: %s" % (cmd, result.error))
        if not setup.outfile:
            print(result.stdout)
示例#8
0
文件: Viz.py 项目: rcuza/bcfg2
    def Visualize(self,
                  hosts=False,
                  bundles=False,
                  key=False,
                  only_client=None,
                  output=None):
        """Build visualization of groups file."""
        if output:
            fmt = output.split('.')[-1]
        else:
            fmt = 'png'

        exc = Executor()
        cmd = ["dot", "-T", fmt]
        if output:
            cmd.extend(["-o", output])
        idata = [
            "digraph groups {", '\trankdir="LR";',
            self.metadata.viz(hosts, bundles, key, only_client, self.colors)
        ]
        if key:
            idata.extend([
                "\tsubgraph cluster_key {", '\tstyle="filled";',
                '\tcolor="lightblue";', '\tBundle [ shape="septagon" ];',
                '\tGroup [shape="ellipse"];',
                '\tProfile [style="bold", shape="ellipse"];',
                '\tHblock [label="Host1|Host2|Host3",shape="record"];',
                '\tlabel="Key";', "\t}"
            ])
        idata.append("}")
        try:
            result = exc.run(cmd, inputdata=idata)
        except OSError:
            # on some systems (RHEL 6), you cannot run dot with
            # shell=True.  on others (Gentoo with Python 2.7), you
            # must.  In yet others (RHEL 5), either way works.  I have
            # no idea what the difference is, but it's kind of a PITA.
            result = exc.run(cmd, shell=True, inputdata=idata)
        if not result.success:
            print("Error running %s: %s" % (cmd, result.error))
            raise SystemExit(result.retval)
示例#9
0
文件: Viz.py 项目: danfoster/bcfg2
    def Visualize(self, hosts=False, bundles=False, key=False,
                  only_client=None, output=None):
        """Build visualization of groups file."""
        if output:
            fmt = output.split('.')[-1]
        else:
            fmt = 'png'

        exc = Executor()
        cmd = ["dot", "-T", fmt]
        if output:
            cmd.extend(["-o", output])
        idata = ["digraph groups {",
                 '\trankdir="LR";',
                 self.metadata.viz(hosts, bundles,
                                   key, only_client, self.colors)]
        if key:
            idata.extend(
                ["\tsubgraph cluster_key {",
                 '\tstyle="filled";',
                 '\tcolor="lightblue";',
                 '\tBundle [ shape="septagon" ];',
                 '\tGroup [shape="ellipse"];',
                 '\tProfile [style="bold", shape="ellipse"];',
                 '\tHblock [label="Host1|Host2|Host3",shape="record"];',
                 '\tlabel="Key";',
                 "\t}"])
        idata.append("}")
        try:
            result = exc.run(cmd, inputdata=idata)
        except OSError:
            # on some systems (RHEL 6), you cannot run dot with
            # shell=True.  on others (Gentoo with Python 2.7), you
            # must.  In yet others (RHEL 5), either way works.  I have
            # no idea what the difference is, but it's kind of a PITA.
            result = exc.run(cmd, shell=True, inputdata=idata)
        if not result.success:
            print("Error running %s: %s" % (cmd, result.error))
            raise SystemExit(result.retval)
示例#10
0
文件: Darcs.py 项目: xschlef/bcfg2
class Darcs(Bcfg2.Server.Plugin.Version):
    """ Darcs is a version plugin for dealing with Bcfg2 repos stored
    in the Darcs VCS. """
    __author__ = '*****@*****.**'
    __vcs_metadata_path__ = "_darcs"

    def __init__(self, core):
        Bcfg2.Server.Plugin.Version.__init__(self, core)
        self.cmd = Executor()
        self.logger.debug("Initialized Darcs plugin with darcs directory %s" %
                          self.vcs_path)

    def get_revision(self):
        """Read Darcs changeset information for the Bcfg2 repository."""
        result = self.cmd.run(["env LC_ALL=C", "darcs", "changes"],
                              shell=True, cwd=Bcfg2.Options.setup.vcs_root)
        if result.success:
            return result.stdout.splitlines()[0].strip()
        else:
            msg = "Failed to read revision from darcs: %s" % result.error
            self.logger.error(msg)
            raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
示例#11
0
文件: Cvs.py 项目: rcuza/bcfg2
class Cvs(Bcfg2.Server.Plugin.Version):
    """ The Cvs plugin provides a revision interface for Bcfg2 repos
    using cvs."""
    __author__ = '*****@*****.**'
    __vcs_metadata_path__ = "CVSROOT"

    def __init__(self, core, datastore):
        Bcfg2.Server.Plugin.Version.__init__(self, core, datastore)
        self.cmd = Executor()
        self.logger.debug("Initialized cvs plugin with CVS directory %s" %
                          self.vcs_path)

    def get_revision(self):
        """Read cvs revision information for the Bcfg2 repository."""
        result = self.cmd.run(["env LC_ALL=C", "cvs", "log"],
                              shell=True, cwd=self.vcs_root)
        try:
            return result.stdout.splitlines()[0].strip()
        except (IndexError, AttributeError):
            msg = "Failed to read revision from CVS: %s" % result.error
            self.logger.error(msg)
            raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
示例#12
0
class Trigger(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.ClientRunHooks,
              Bcfg2.Server.Plugin.DirectoryBacked):
    """Trigger is a plugin that calls external scripts (on the server)."""
    __author__ = '*****@*****.**'

    def __init__(self, core, datastore):
        Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
        Bcfg2.Server.Plugin.ClientRunHooks.__init__(self)
        Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, self.data)
        self.cmd = Executor()

    def async_run(self, args):
        """ Run the trigger script asynchronously in a forked process
        """
        pid = os.fork()
        if pid:
            os.waitpid(pid, 0)
        else:
            dpid = os.fork()
            if not dpid:
                self.debug_log("Running %s" %
                               " ".join(pipes.quote(a) for a in args))
                result = self.cmd.run(args)
                if not result.success:
                    self.logger.error("Trigger: Error running %s: %s" %
                                      (args[0], result.error))
                elif result.stderr:
                    self.debug_log("Trigger: Error: %s" % result.stderr)
            os._exit(0)  # pylint: disable=W0212

    def end_client_run(self, metadata):
        args = [
            metadata.hostname, '-p', metadata.profile, '-g',
            ':'.join([g for g in metadata.groups])
        ]
        for notifier in self.entries.keys():
            npath = os.path.join(self.data, notifier)
            self.async_run([npath] + args)
示例#13
0
class CfgExternalCommandVerifier(CfgVerifier):
    """ Invoke an external script to verify
    :ref:`server-plugins-generators-cfg` file contents """

    #: Handle :file:`:test` files
    __basenames__ = [':test']

    def __init__(self, name, specific, encoding):
        CfgVerifier.__init__(self, name, specific, encoding)
        self.cmd = []
        self.exc = Executor(timeout=30)

    __init__.__doc__ = CfgVerifier.__init__.__doc__

    def verify_entry(self, entry, metadata, data):
        try:
            result = self.exc.run(self.cmd, inputdata=data)
            if not result.success:
                raise CfgVerificationError(result.error)
        except OSError:
            raise CfgVerificationError(sys.exc_info()[1])

    verify_entry.__doc__ = CfgVerifier.verify_entry.__doc__

    def handle_event(self, event):
        CfgVerifier.handle_event(self, event)
        if not self.data:
            return
        self.cmd = []
        if not os.access(self.name, os.X_OK):
            bangpath = self.data.splitlines()[0].strip()
            if bangpath.startswith("#!"):
                self.cmd.extend(shlex.split(bangpath[2:].strip()))
            else:
                raise PluginExecutionError("Cannot execute %s" % self.name)
        self.cmd.append(self.name)

    handle_event.__doc__ = CfgVerifier.handle_event.__doc__
示例#14
0
class Trigger(Bcfg2.Server.Plugin.Plugin,
              Bcfg2.Server.Plugin.ClientRunHooks,
              Bcfg2.Server.Plugin.DirectoryBacked):
    """Trigger is a plugin that calls external scripts (on the server)."""
    __author__ = '*****@*****.**'

    def __init__(self, core, datastore):
        Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
        Bcfg2.Server.Plugin.ClientRunHooks.__init__(self)
        Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, self.data)
        self.cmd = Executor()

    def async_run(self, args):
        """ Run the trigger script asynchronously in a forked process
        """
        pid = os.fork()
        if pid:
            os.waitpid(pid, 0)
        else:
            dpid = os.fork()
            if not dpid:
                self.debug_log("Running %s" % " ".join(pipes.quote(a)
                                                       for a in args))
                result = self.cmd.run(args)
                if not result.success:
                    self.logger.error("Trigger: Error running %s: %s" %
                                      (args[0], result.error))
                elif result.stderr:
                    self.debug_log("Trigger: Error: %s" % result.stderr)
            os._exit(0)  # pylint: disable=W0212

    def end_client_run(self, metadata):
        args = [metadata.hostname, '-p', metadata.profile, '-g',
                ':'.join([g for g in metadata.groups])]
        for notifier in self.entries.keys():
            npath = os.path.join(self.data, notifier)
            self.async_run([npath] + args)
示例#15
0
文件: Svn.py 项目: rcuza/bcfg2
class Svn(Bcfg2.Server.Plugin.Version):
    """Svn is a version plugin for dealing with Bcfg2 repos."""
    __author__ = '*****@*****.**'
    __vcs_metadata_path__ = ".svn"
    if HAS_SVN:
        __rmi__ = Bcfg2.Server.Plugin.Version.__rmi__ + ['Update', 'Commit']
    else:
        __vcs_metadata_path__ = ".svn"

    def __init__(self, core, datastore):
        Bcfg2.Server.Plugin.Version.__init__(self, core, datastore)

        self.revision = None
        self.svn_root = None
        self.client = None
        self.cmd = None
        if not HAS_SVN:
            self.logger.debug("Svn: PySvn not found, using CLI interface to "
                              "SVN")
            self.cmd = Executor()
        else:
            self.client = pysvn.Client()
            # pylint: disable=E1101
            choice = pysvn.wc_conflict_choice.postpone
            try:
                resolution = self.core.setup.cfp.get(
                    "svn",
                    "conflict_resolution").replace('-', '_')
                if resolution in ["edit", "launch", "working"]:
                    self.logger.warning("Svn: Conflict resolver %s requires "
                                        "manual intervention, using %s" %
                                        choice)
                else:
                    choice = getattr(pysvn.wc_conflict_choice, resolution)
            except AttributeError:
                self.logger.warning("Svn: Conflict resolver %s does not "
                                    "exist, using %s" % choice)
            except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
                self.logger.info("Svn: No conflict resolution method "
                                 "selected, using %s" % choice)
            # pylint: enable=E1101
            self.debug_log("Svn: Conflicts will be resolved with %s" %
                           choice)
            self.client.callback_conflict_resolver = \
                self.get_conflict_resolver(choice)

            try:
                if self.core.setup.cfp.get(
                        "svn",
                        "always_trust").lower() == "true":
                    self.client.callback_ssl_server_trust_prompt = \
                        self.ssl_server_trust_prompt
            except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
                self.logger.debug("Svn: Using subversion cache for SSL "
                                  "certificate trust")

            try:
                if (self.core.setup.cfp.get("svn", "user") and
                    self.core.setup.cfp.get("svn", "password")):
                    self.client.callback_get_login = \
                        self.get_login
            except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
                self.logger.info("Svn: Using subversion cache for "
                                 "password-based authetication")

        self.logger.debug("Svn: Initialized svn plugin with SVN directory %s" %
                          self.vcs_path)

    # pylint: disable=W0613
    def get_login(self, realm, username, may_save):
        """ PySvn callback to get credentials for HTTP basic authentication """
        self.logger.debug("Svn: Logging in with username: %s" %
                          self.core.setup.cfp.get("svn", "user"))
        return True, \
            self.core.setup.cfp.get("svn", "user"), \
            self.core.setup.cfp.get("svn", "password"), \
            False
    # pylint: enable=W0613

    def ssl_server_trust_prompt(self, trust_dict):
        """ PySvn callback to always trust SSL certificates from SVN server """
        self.logger.debug("Svn: Trusting SSL certificate from %s, "
                          "issued by %s for realm %s" %
                          (trust_dict['hostname'],
                           trust_dict['issuer_dname'],
                           trust_dict['realm']))
        return True, trust_dict['failures'], False

    def get_conflict_resolver(self, choice):
        """ Get a PySvn conflict resolution callback """
        def callback(conflict_description):
            """ PySvn callback function to resolve conflicts """
            self.logger.info("Svn: Resolving conflict for %s with %s" %
                             (conflict_description['path'], choice))
            return choice, None, False

        return callback

    def get_revision(self):
        """Read svn revision information for the Bcfg2 repository."""
        msg = None
        if HAS_SVN:
            try:
                info = self.client.info(self.vcs_root)
                self.revision = info.revision
                self.svn_root = info.url
                return str(self.revision.number)
            except pysvn.ClientError:  # pylint: disable=E1101
                msg = "Svn: Failed to get revision: %s" % sys.exc_info()[1]
        else:
            result = self.cmd.run(["env LC_ALL=C", "svn", "info",
                                   self.vcs_root],
                                  shell=True)
            if result.success:
                self.revision = [line.split(': ')[1]
                                 for line in result.stdout.splitlines()
                                 if line.startswith('Revision:')][-1]
                return self.revision
            else:
                msg = "Failed to read svn info: %s" % result.error
        self.revision = None
        raise Bcfg2.Server.Plugin.PluginExecutionError(msg)

    def Update(self):
        '''Svn.Update() => True|False\nUpdate svn working copy\n'''
        try:
            old_revision = self.revision.number
            self.revision = self.client.update(self.vcs_root, recurse=True)[0]
        except pysvn.ClientError:  # pylint: disable=E1101
            err = sys.exc_info()[1]
            # try to be smart about the error we got back
            details = None
            if "callback_ssl_server_trust_prompt" in str(err):
                details = "SVN server certificate is not trusted"
            elif "callback_get_login" in str(err):
                details = "SVN credentials not cached"

            if details is None:
                self.logger.error("Svn: Failed to update server repository",
                                  exc_info=1)
            else:
                self.logger.error("Svn: Failed to update server repository: "
                                  "%s" % details)
            return False

        if old_revision == self.revision.number:
            self.logger.debug("repository is current")
        else:
            self.logger.info("Updated %s from revision %s to %s" %
                             (self.vcs_root, old_revision,
                              self.revision.number))
        return True

    def Commit(self):
        """Svn.Commit() => True|False\nCommit svn repository\n"""
        # First try to update
        if not self.Update():
            self.logger.error("Failed to update svn repository, refusing to "
                              "commit changes")
            return False

        try:
            self.revision = self.client.checkin([self.vcs_root],
                                                'Svn: autocommit',
                                                recurse=True)
            self.revision = self.client.update(self.vcs_root, recurse=True)[0]
            self.logger.info("Svn: Commited changes. At %s" %
                             self.revision.number)
            return True
        except pysvn.ClientError:  # pylint: disable=E1101
            err = sys.exc_info()[1]
            # try to be smart about the error we got back
            details = None
            if "callback_ssl_server_trust_prompt" in str(err):
                details = "SVN server certificate is not trusted"
            elif "callback_get_login" in str(err):
                details = "SVN credentials not cached"

            if details is None:
                self.logger.error("Svn: Failed to commit changes",
                                  exc_info=1)
            else:
                self.logger.error("Svn: Failed to commit changes: %s" %
                                  details)
            return False
示例#16
0
文件: SSLCA.py 项目: dhutty/bcfg2
class SSLCAEntrySet(Bcfg2.Server.Plugin.EntrySet):
    """ Entry set to handle SSLCA entries and XML files """
    def __init__(self, _, path, entry_type, parent=None):
        Bcfg2.Server.Plugin.EntrySet.__init__(self, os.path.basename(path),
                                              path, entry_type)
        self.parent = parent
        self.key = None
        self.cert = None
        self.cmd = Executor(timeout=120)

    def handle_event(self, event):
        action = event.code2str()
        fpath = os.path.join(self.path, event.filename)

        if event.filename == 'key.xml':
            if action in ['exists', 'created', 'changed']:
                self.key = SSLCAKeySpec(fpath)
            self.key.HandleEvent(event)
        elif event.filename == 'cert.xml':
            if action in ['exists', 'created', 'changed']:
                self.cert = SSLCACertSpec(fpath)
            self.cert.HandleEvent(event)
        else:
            Bcfg2.Server.Plugin.EntrySet.handle_event(self, event)

    def build_key(self, entry, metadata):
        """
        either grabs a prexisting key hostfile, or triggers the generation
        of a new key if one doesn't exist.
        """
        # TODO: verify key fits the specs
        filename = "%s.H_%s" % (os.path.basename(entry.get('name')),
                                metadata.hostname)
        self.logger.info("SSLCA: Generating new key %s" % filename)
        key_spec = self.key.get_spec(metadata)
        ktype = key_spec['type']
        bits = key_spec['bits']
        if ktype == 'rsa':
            cmd = ["openssl", "genrsa", bits]
        elif ktype == 'dsa':
            cmd = ["openssl", "dsaparam", "-noout", "-genkey", bits]
        self.debug_log("SSLCA: Generating new key: %s" % " ".join(cmd))
        result = self.cmd.run(cmd)
        if not result.success:
            raise PluginExecutionError("SSLCA: Failed to generate key %s for "
                                       "%s: %s" % (entry.get("name"),
                                                   metadata.hostname,
                                                   result.error))
        open(os.path.join(self.path, filename), 'w').write(result.stdout)
        return result.stdout

    def build_cert(self, entry, metadata, keyfile):
        """ generate a new cert """
        filename = "%s.H_%s" % (os.path.basename(entry.get('name')),
                                metadata.hostname)
        self.logger.info("SSLCA: Generating new cert %s" % filename)
        cert_spec = self.cert.get_spec(metadata)
        ca = self.parent.get_ca(cert_spec['ca'])
        req_config = None
        req = None
        try:
            req_config = self.build_req_config(metadata)
            req = self.build_request(keyfile, req_config, metadata)
            days = cert_spec['days']
            cmd = ["openssl", "ca", "-config", ca['config'], "-in", req,
                   "-days", days, "-batch"]
            passphrase = ca.get('passphrase')
            if passphrase:
                cmd.extend(["-passin", "pass:%s" % passphrase])

                def _scrub_pass(arg):
                    """ helper to scrub the passphrase from the
                    argument list """
                    if arg.startswith("pass:"******"pass:******"
                    else:
                        return arg
            else:
                _scrub_pass = lambda a: a

            self.debug_log("SSLCA: Generating new certificate: %s" %
                           " ".join(_scrub_pass(a) for a in cmd))
            result = self.cmd.run(cmd)
            if not result.success:
                raise PluginExecutionError("SSLCA: Failed to generate cert: %s"
                                           % result.error)
        finally:
            try:
                if req_config and os.path.exists(req_config):
                    os.unlink(req_config)
                if req and os.path.exists(req):
                    os.unlink(req)
            except OSError:
                self.logger.error("SSLCA: Failed to unlink temporary files: %s"
                                  % sys.exc_info()[1])
        cert = result.stdout
        if cert_spec['append_chain'] and 'chaincert' in ca:
            cert += open(ca['chaincert']).read()

        open(os.path.join(self.path, filename), 'w').write(cert)
        return cert

    def build_req_config(self, metadata):
        """
        generates a temporary openssl configuration file that is
        used to generate the required certificate request
        """
        # create temp request config file
        fd, fname = tempfile.mkstemp()
        cfp = ConfigParser.ConfigParser({})
        cfp.optionxform = str
        defaults = {
            'req': {
                'default_md': 'sha1',
                'distinguished_name': 'req_distinguished_name',
                'req_extensions': 'v3_req',
                'x509_extensions': 'v3_req',
                'prompt': 'no'
            },
            'req_distinguished_name': {},
            'v3_req': {
                'subjectAltName': '@alt_names'
            },
            'alt_names': {}
        }
        for section in list(defaults.keys()):
            cfp.add_section(section)
            for key in defaults[section]:
                cfp.set(section, key, defaults[section][key])
        cert_spec = self.cert.get_spec(metadata)
        altnamenum = 1
        altnames = cert_spec['subjectaltname']
        altnames.extend(list(metadata.aliases))
        altnames.append(metadata.hostname)
        for altname in altnames:
            cfp.set('alt_names', 'DNS.' + str(altnamenum), altname)
            altnamenum += 1
        for item in ['C', 'L', 'ST', 'O', 'OU', 'emailAddress']:
            if cert_spec[item]:
                cfp.set('req_distinguished_name', item, cert_spec[item])
        cfp.set('req_distinguished_name', 'CN', metadata.hostname)
        self.debug_log("SSLCA: Writing temporary request config to %s" % fname)
        try:
            cfp.write(os.fdopen(fd, 'w'))
        except IOError:
            raise PluginExecutionError("SSLCA: Failed to write temporary CSR "
                                       "config file: %s" % sys.exc_info()[1])
        return fname

    def build_request(self, keyfile, req_config, metadata):
        """
        creates the certificate request
        """
        fd, req = tempfile.mkstemp()
        os.close(fd)
        days = self.cert.get_spec(metadata)['days']
        cmd = ["openssl", "req", "-new", "-config", req_config,
               "-days", days, "-key", keyfile, "-text", "-out", req]
        self.debug_log("SSLCA: Generating new CSR: %s" % " ".join(cmd))
        result = self.cmd.run(cmd)
        if not result.success:
            raise PluginExecutionError("SSLCA: Failed to generate CSR: %s" %
                                       result.error)
        return req

    def verify_cert(self, filename, keyfile, entry, metadata):
        """ Perform certification verification against the CA and
        against the key """
        ca = self.parent.get_ca(self.cert.get_spec(metadata)['ca'])
        do_verify = ca.get('chaincert')
        if do_verify:
            return (self.verify_cert_against_ca(filename, entry, metadata) and
                    self.verify_cert_against_key(filename, keyfile))
        return True

    def verify_cert_against_ca(self, filename, entry, metadata):
        """
        check that a certificate validates against the ca cert,
        and that it has not expired.
        """
        ca = self.parent.get_ca(self.cert.get_spec(metadata)['ca'])
        chaincert = ca.get('chaincert')
        cert = os.path.join(self.path, filename)
        cmd = ["openssl", "verify"]
        is_root = ca.get('root_ca', "false").lower() == 'true'
        if is_root:
            cmd.append("-CAfile")
        else:
            # verifying based on an intermediate cert
            cmd.extend(["-purpose", "sslserver", "-untrusted"])
        cmd.extend([chaincert, cert])
        self.debug_log("SSLCA: Verifying %s against CA: %s" %
                       (entry.get("name"), " ".join(cmd)))
        result = self.cmd.run(cmd)
        if result.stdout == cert + ": OK\n":
            self.debug_log("SSLCA: %s verified successfully against CA" %
                           entry.get("name"))
            return True
        self.logger.warning("SSLCA: %s failed verification against CA: %s" %
                            (entry.get("name"), result.error))
        return False

    def _get_modulus(self, fname, ftype="x509"):
        """ get the modulus from the given file """
        cmd = ["openssl", ftype, "-noout", "-modulus", "-in", fname]
        self.debug_log("SSLCA: Getting modulus of %s for verification: %s" %
                       (fname, " ".join(cmd)))
        result = self.cmd.run(cmd)
        if not result.success:
            self.logger.warning("SSLCA: Failed to get modulus of %s: %s" %
                                (fname, result.error))
        return result.stdout.strip()

    def verify_cert_against_key(self, filename, keyfile):
        """
        check that a certificate validates against its private key.
        """

        certfile = os.path.join(self.path, filename)
        cert = self._get_modulus(certfile)
        key = self._get_modulus(keyfile, ftype="rsa")
        if cert == key:
            self.debug_log("SSLCA: %s verified successfully against key %s" %
                           (filename, keyfile))
            return True
        self.logger.warning("SSLCA: %s failed verification against key %s" %
                            (filename, keyfile))
        return False

    def bind_entry(self, entry, metadata):
        if self.key:
            self.bind_info_to_entry(entry, metadata)
            try:
                return self.best_matching(metadata).bind_entry(entry, metadata)
            except PluginExecutionError:
                entry.text = self.build_key(entry, metadata)
                entry.set("type", "file")
                return entry
        elif self.cert:
            key = self.cert.get_spec(metadata)['key']
            cleanup_keyfile = False
            try:
                keyfile = self.parent.entries[key].best_matching(metadata).name
            except PluginExecutionError:
                cleanup_keyfile = True
                # create a temp file with the key in it
                fd, keyfile = tempfile.mkstemp()
                os.chmod(keyfile, 384)  # 0600
                el = lxml.etree.Element('Path', name=key)
                self.parent.core.Bind(el, metadata)
                os.fdopen(fd, 'w').write(el.text)

            try:
                self.bind_info_to_entry(entry, metadata)
                try:
                    best = self.best_matching(metadata)
                    if self.verify_cert(best.name, keyfile, entry, metadata):
                        return best.bind_entry(entry, metadata)
                except PluginExecutionError:
                    pass
                # if we get here, it's because either a) there was no best
                # matching entry; or b) the existing cert did not verify
                entry.text = self.build_cert(entry, metadata, keyfile)
                entry.set("type", "file")
                return entry
            finally:
                if cleanup_keyfile:
                    try:
                        os.unlink(keyfile)
                    except OSError:
                        err = sys.exc_info()[1]
                        self.logger.error("SSLCA: Failed to unlink temporary "
                                          "key %s: %s" % (keyfile, err))
示例#17
0
文件: Svn.py 项目: fabaff/bcfg2
class Svn(Bcfg2.Server.Plugin.Version):
    """Svn is a version plugin for dealing with Bcfg2 repos."""
    options = Bcfg2.Server.Plugin.Version.options + [
        Bcfg2.Options.Option(
            cf=("svn", "conflict_resolution"), dest="svn_conflict_resolution",
            type=lambda v: v.replace("-", "_"),
            choices=dir(pysvn.wc_conflict_choice),
            default=pysvn.wc_conflict_choice.postpone,
            help="SVN conflict resolution method"),
        Bcfg2.Options.Option(
            cf=("svn", "user"), dest="svn_user", help="SVN username"),
        Bcfg2.Options.Option(
            cf=("svn", "password"), dest="svn_password", help="SVN password"),
        Bcfg2.Options.BooleanOption(
            cf=("svn", "always_trust"), dest="svn_trust_ssl",
            help="Always trust SSL certs from SVN server")]

    __author__ = '*****@*****.**'
    __vcs_metadata_path__ = ".svn"
    if HAS_SVN:
        __rmi__ = Bcfg2.Server.Plugin.Version.__rmi__ + ['Update', 'Commit']
    else:
        __vcs_metadata_path__ = ".svn"

    def __init__(self, core):
        Bcfg2.Server.Plugin.Version.__init__(self, core)

        self.revision = None
        self.svn_root = None
        self.client = None
        self.cmd = None
        if not HAS_SVN:
            self.logger.debug("Svn: PySvn not found, using CLI interface to "
                              "SVN")
            self.cmd = Executor()
        else:
            self.client = pysvn.Client()
            self.debug_log("Svn: Conflicts will be resolved with %s" %
                           Bcfg2.Options.setup.svn_conflict_resolution)
            self.client.callback_conflict_resolver = self.conflict_resolver

            if Bcfg2.Options.setup.svn_trust_ssl:
                self.client.callback_ssl_server_trust_prompt = \
                    self.ssl_server_trust_prompt

            if (Bcfg2.Options.setup.svn_user and
                    Bcfg2.Options.setup.svn_password):
                self.client.callback_get_login = self.get_login

        self.logger.debug("Svn: Initialized svn plugin with SVN directory %s" %
                          self.vcs_path)

    def get_login(self, realm, username, may_save):  # pylint: disable=W0613
        """ PySvn callback to get credentials for HTTP basic authentication """
        self.logger.debug("Svn: Logging in with username: %s" %
                          Bcfg2.Options.setup.svn_user)
        return (True,
                Bcfg2.Options.setup.svn_user,
                Bcfg2.Options.setup.svn_password,
                False)

    def ssl_server_trust_prompt(self, trust_dict):
        """ PySvn callback to always trust SSL certificates from SVN server """
        self.logger.debug("Svn: Trusting SSL certificate from %s, "
                          "issued by %s for realm %s" %
                          (trust_dict['hostname'],
                           trust_dict['issuer_dname'],
                           trust_dict['realm']))
        return True, trust_dict['failures'], False

    def conflict_resolver(self, conflict_description):
        """ PySvn callback function to resolve conflicts """
        self.logger.info("Svn: Resolving conflict for %s with %s" %
                         (conflict_description['path'],
                          Bcfg2.Options.setup.svn_conflict_resolution))
        return Bcfg2.Options.setup.svn_conflict_resolution, None, False

    def get_revision(self):
        """Read svn revision information for the Bcfg2 repository."""
        msg = None
        if HAS_SVN:
            try:
                info = self.client.info(Bcfg2.Options.setup.vcs_root)
                self.revision = info.revision
                self.svn_root = info.url
                return str(self.revision.number)
            except pysvn.ClientError:  # pylint: disable=E1101
                msg = "Svn: Failed to get revision: %s" % sys.exc_info()[1]
        else:
            result = self.cmd.run(["env LC_ALL=C", "svn", "info",
                                   Bcfg2.Options.setup.vcs_root],
                                  shell=True)
            if result.success:
                self.revision = [line.split(': ')[1]
                                 for line in result.stdout.splitlines()
                                 if line.startswith('Revision:')][-1]
                return self.revision
            else:
                msg = "Failed to read svn info: %s" % result.error
        self.revision = None
        raise Bcfg2.Server.Plugin.PluginExecutionError(msg)

    def Update(self):
        '''Svn.Update() => True|False\nUpdate svn working copy\n'''
        try:
            old_revision = self.revision.number
            self.revision = self.client.update(Bcfg2.Options.setup.vcs_root,
                                               recurse=True)[0]
        except pysvn.ClientError:  # pylint: disable=E1101
            err = sys.exc_info()[1]
            # try to be smart about the error we got back
            details = None
            if "callback_ssl_server_trust_prompt" in str(err):
                details = "SVN server certificate is not trusted"
            elif "callback_get_login" in str(err):
                details = "SVN credentials not cached"

            if details is None:
                self.logger.error("Svn: Failed to update server repository",
                                  exc_info=1)
            else:
                self.logger.error("Svn: Failed to update server repository: "
                                  "%s" % details)
            return False

        if old_revision == self.revision.number:
            self.logger.debug("repository is current")
        else:
            self.logger.info("Updated %s from revision %s to %s" %
                             (Bcfg2.Options.setup.vcs_root, old_revision,
                              self.revision.number))
        return True

    def Commit(self):
        """Svn.Commit() => True|False\nCommit svn repository\n"""
        # First try to update
        if not self.Update():
            self.logger.error("Failed to update svn repository, refusing to "
                              "commit changes")
            return False

        try:
            self.revision = self.client.checkin([Bcfg2.Options.setup.vcs_root],
                                                'Svn: autocommit',
                                                recurse=True)
            self.revision = self.client.update(Bcfg2.Options.setup.vcs_root,
                                               recurse=True)[0]
            self.logger.info("Svn: Commited changes. At %s" %
                             self.revision.number)
            return True
        except pysvn.ClientError:  # pylint: disable=E1101
            err = sys.exc_info()[1]
            # try to be smart about the error we got back
            details = None
            if "callback_ssl_server_trust_prompt" in str(err):
                details = "SVN server certificate is not trusted"
            elif "callback_get_login" in str(err):
                details = "SVN credentials not cached"

            if details is None:
                self.logger.error("Svn: Failed to commit changes",
                                  exc_info=1)
            else:
                self.logger.error("Svn: Failed to commit changes: %s" %
                                  details)
            return False
示例#18
0
class Client(object):
    """ The main Bcfg2 client class """

    options = Proxy.ComponentProxy.options + [
        Bcfg2.Options.Common.syslog,
        Bcfg2.Options.Common.interactive,
        Bcfg2.Options.BooleanOption(
            "-q", "--quick", help="Disable some checksum verification"),
        Bcfg2.Options.Option(
            cf=('client', 'probe_timeout'),
            type=Bcfg2.Options.Types.timeout,
            help="Timeout when running client probes"),
        Bcfg2.Options.Option(
            "-b", "--only-bundles", default=[],
            type=Bcfg2.Options.Types.colon_list,
            help='Only configure the given bundle(s)'),
        Bcfg2.Options.Option(
            "-B", "--except-bundles", default=[],
            type=Bcfg2.Options.Types.colon_list,
            help='Configure everything except the given bundle(s)'),
        Bcfg2.Options.ExclusiveOptionGroup(
            Bcfg2.Options.BooleanOption(
                "-Q", "--bundle-quick",
                help='Only verify the given bundle(s)'),
            Bcfg2.Options.Option(
                '-r', '--remove',
                choices=['all', 'services', 'packages', 'users'],
                help='Force removal of additional configuration items')),
        Bcfg2.Options.ExclusiveOptionGroup(
            Bcfg2.Options.PathOption(
                '-f', '--file', type=argparse.FileType('rb'),
                help='Configure from a file rather than querying the server'),
            Bcfg2.Options.PathOption(
                '-c', '--cache', type=argparse.FileType('wb'),
                help='Store the configuration in a file')),
        Bcfg2.Options.BooleanOption(
            '--exit-on-probe-failure', default=True,
            cf=('client', 'exit_on_probe_failure'),
            help="The client should exit if a probe fails"),
        Bcfg2.Options.Option(
            '-p', '--profile', cf=('client', 'profile'),
            help='Assert the given profile for the host'),
        Bcfg2.Options.Option(
            '-l', '--decision', cf=('client', 'decision'),
            choices=['whitelist', 'blacklist', 'none'],
            help='Run client in server decision list mode'),
        Bcfg2.Options.BooleanOption(
            "-O", "--no-lock", help='Omit lock check'),
        Bcfg2.Options.PathOption(
            cf=('components', 'lockfile'), default='/var/lock/bcfg2.run',
            help='Client lock file'),
        Bcfg2.Options.BooleanOption(
            "-n", "--dry-run", help='Do not actually change the system'),
        Bcfg2.Options.Option(
            "-D", "--drivers", cf=('client', 'drivers'),
            type=Bcfg2.Options.Types.comma_list,
            default=[m[1] for m in walk_packages(path=Tools.__path__)],
            action=ClientDriverAction, help='Client drivers'),
        Bcfg2.Options.BooleanOption(
            "-e", "--show-extra", help='Enable extra entry output'),
        Bcfg2.Options.BooleanOption(
            "-k", "--kevlar", help='Run in bulletproof mode'),
        Bcfg2.Options.BooleanOption(
            "-i", "--only-important",
            help='Only configure the important entries')]

    def __init__(self):
        self.config = None
        self._proxy = None
        self.logger = logging.getLogger('bcfg2')
        self.cmd = Executor(Bcfg2.Options.setup.probe_timeout)
        self.tools = []
        self.times = dict()
        self.times['initialization'] = time.time()

        if Bcfg2.Options.setup.bundle_quick:
            if (not Bcfg2.Options.setup.only_bundles and
                    not Bcfg2.Options.setup.except_bundles):
                self.logger.error("-Q option requires -b or -B")
                raise SystemExit(1)
        if Bcfg2.Options.setup.remove == 'services':
            self.logger.error("Service removal is nonsensical; "
                              "removed services will only be disabled")
        if not Bcfg2.Options.setup.server.startswith('https://'):
            Bcfg2.Options.setup.server = \
                'https://' + Bcfg2.Options.setup.server

        #: A dict of the state of each entry.  Keys are the entries.
        #: Values are boolean: True means that the entry is good,
        #: False means that the entry is bad.
        self.states = {}
        self.whitelist = []
        self.blacklist = []
        self.removal = []
        self.unhandled = []
        self.logger = logging.getLogger(__name__)

    def _probe_failure(self, probename, msg):
        """ handle failure of a probe in the way the user wants us to
        (exit or continue) """
        message = "Failed to execute probe %s: %s" % (probename, msg)
        if Bcfg2.Options.setup.exit_on_probe_failure:
            self.fatal_error(message)
        else:
            self.logger.error(message)

    def run_probe(self, probe):
        """Execute probe."""
        name = probe.get('name')
        self.logger.info("Running probe %s" % name)
        ret = XML.Element("probe-data", name=name, source=probe.get('source'))
        try:
            scripthandle, scriptname = tempfile.mkstemp()
            if sys.hexversion >= 0x03000000:
                script = os.fdopen(scripthandle, 'w',
                                   encoding=Bcfg2.Options.setup.encoding)
            else:
                script = os.fdopen(scripthandle, 'w')
            try:
                script.write("#!%s\n" %
                             (probe.attrib.get('interpreter', '/bin/sh')))
                if sys.hexversion >= 0x03000000:
                    script.write(probe.text)
                else:
                    script.write(probe.text.encode('utf-8'))
                script.close()
                os.chmod(scriptname,
                         stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH |
                         stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH |
                         stat.S_IWUSR)  # 0755
                rv = self.cmd.run(scriptname)
                if rv.stderr:
                    self.logger.warning("Probe %s has error output: %s" %
                                        (name, rv.stderr))
                if not rv.success:
                    self._probe_failure(name, "Return value %s" % rv.retval)
                self.logger.info("Probe %s has result:" % name)
                self.logger.info(rv.stdout)
                if sys.hexversion >= 0x03000000:
                    ret.text = rv.stdout
                else:
                    ret.text = rv.stdout.decode('utf-8')
            finally:
                os.unlink(scriptname)
        except SystemExit:
            raise
        except:
            self._probe_failure(name, sys.exc_info()[1])
        return ret

    def fatal_error(self, message):
        """Signal a fatal error."""
        self.logger.error("Fatal error: %s" % (message))
        raise SystemExit(1)

    @property
    def proxy(self):
        """ get an XML-RPC proxy to the server """
        if self._proxy is None:
            self._proxy = Proxy.ComponentProxy()
        return self._proxy

    def run_probes(self):
        """ run probes and upload probe data """
        try:
            probes = XML.XML(str(self.proxy.GetProbes()))
        except (Proxy.ProxyError,
                Proxy.CertificateError,
                socket.gaierror,
                socket.error):
            err = sys.exc_info()[1]
            self.fatal_error("Failed to download probes from bcfg2: %s" % err)
        except XML.ParseError:
            err = sys.exc_info()[1]
            self.fatal_error("Server returned invalid probe requests: %s" %
                             err)

        self.times['probe_download'] = time.time()

        # execute probes
        probedata = XML.Element("ProbeData")
        for probe in probes.findall(".//probe"):
            probedata.append(self.run_probe(probe))

        if len(probes.findall(".//probe")) > 0:
            try:
                # upload probe responses
                self.proxy.RecvProbeData(
                    XML.tostring(probedata,
                                 xml_declaration=False).decode('utf-8'))
            except Proxy.ProxyError:
                err = sys.exc_info()[1]
                self.fatal_error("Failed to upload probe data: %s" % err)

        self.times['probe_upload'] = time.time()

    def get_config(self):
        """ load the configuration, either from the cached
        configuration file (-f), or from the server """
        if Bcfg2.Options.setup.file:
            # read config from file
            try:
                self.logger.debug("Reading cached configuration from %s" %
                                  Bcfg2.Options.setup.file.name)
                return Bcfg2.Options.setup.file.read()
            except IOError:
                self.fatal_error("Failed to read cached configuration from: %s"
                                 % Bcfg2.Options.setup.file.name)
        else:
            # retrieve config from server
            if Bcfg2.Options.setup.profile:
                try:
                    self.proxy.AssertProfile(Bcfg2.Options.setup.profile)
                except Proxy.ProxyError:
                    err = sys.exc_info()[1]
                    self.fatal_error("Failed to set client profile: %s" % err)

            try:
                self.proxy.DeclareVersion(__version__)
            except (xmlrpclib.Fault,
                    Proxy.ProxyError,
                    Proxy.CertificateError,
                    socket.gaierror,
                    socket.error):
                err = sys.exc_info()[1]
                self.fatal_error("Failed to declare version: %s" % err)

            self.run_probes()

            if Bcfg2.Options.setup.decision in ['whitelist', 'blacklist']:
                try:
                    # TODO: read decision list from --decision-list
                    Bcfg2.Options.setup.decision_list = \
                        self.proxy.GetDecisionList(
                            Bcfg2.Options.setup.decision)
                    self.logger.info("Got decision list from server:")
                    self.logger.info(Bcfg2.Options.setup.decision_list)
                except Proxy.ProxyError:
                    err = sys.exc_info()[1]
                    self.fatal_error("Failed to get decision list: %s" % err)

            try:
                rawconfig = self.proxy.GetConfig().encode('utf-8')
            except Proxy.ProxyError:
                err = sys.exc_info()[1]
                self.fatal_error("Failed to download configuration from "
                                 "Bcfg2: %s" % err)

            self.times['config_download'] = time.time()

        if Bcfg2.Options.setup.cache:
            try:
                Bcfg2.Options.setup.cache.write(rawconfig)
                os.chmod(Bcfg2.Options.setup.cache.name, 384)  # 0600
            except IOError:
                self.logger.warning("Failed to write config cache file %s" %
                                    (Bcfg2.Options.setup.cache))
            self.times['caching'] = time.time()

        return rawconfig

    def parse_config(self, rawconfig):
        """ Parse the XML configuration received from the Bcfg2 server """
        try:
            self.config = XML.XML(rawconfig)
        except XML.ParseError:
            syntax_error = sys.exc_info()[1]
            self.fatal_error("The configuration could not be parsed: %s" %
                             syntax_error)

        self.load_tools()

        # find entries not handled by any tools
        self.unhandled = [entry for struct in self.config
                          for entry in struct
                          if entry not in self.handled]

        if self.unhandled:
            self.logger.error("The following entries are not handled by any "
                              "tool:")
            for entry in self.unhandled:
                self.logger.error("%s:%s:%s" % (entry.tag, entry.get('type'),
                                                entry.get('name')))

        # find duplicates
        self.find_dups(self.config)

        pkgs = [(entry.get('name'), entry.get('origin'))
                for struct in self.config
                for entry in struct
                if entry.tag == 'Package']
        if pkgs:
            self.logger.debug("The following packages are specified in bcfg2:")
            self.logger.debug([pkg[0] for pkg in pkgs if pkg[1] is None])
            self.logger.debug("The following packages are prereqs added by "
                              "Packages:")
            self.logger.debug([pkg[0] for pkg in pkgs if pkg[1] == 'Packages'])

        self.times['config_parse'] = time.time()

    def run(self):
        """Perform client execution phase."""
        # begin configuration
        self.times['start'] = time.time()

        self.logger.info("Starting Bcfg2 client run at %s" %
                         self.times['start'])

        self.parse_config(self.get_config().decode('utf-8'))

        if self.config.tag == 'error':
            self.fatal_error("Server error: %s" % (self.config.text))

        if Bcfg2.Options.setup.bundle_quick:
            newconfig = XML.XML('<Configuration/>')
            for bundle in self.config.getchildren():
                name = bundle.get("name")
                if (name and (name in Bcfg2.Options.setup.only_bundles or
                              name not in Bcfg2.Options.setup.except_bundles)):
                    newconfig.append(bundle)
            self.config = newconfig

        if not Bcfg2.Options.setup.no_lock:
            # check lock here
            try:
                lockfile = open(Bcfg2.Options.setup.lockfile, 'w')
                if locked(lockfile.fileno()):
                    self.fatal_error("Another instance of Bcfg2 is running. "
                                     "If you want to bypass the check, run "
                                     "with the -O/--no-lock option")
            except SystemExit:
                raise
            except:
                lockfile = None
                self.logger.error("Failed to open lockfile %s: %s" %
                                  (Bcfg2.Options.setup.lockfile,
                                   sys.exc_info()[1]))

        # execute the configuration
        self.Execute()

        if not Bcfg2.Options.setup.no_lock:
            # unlock here
            if lockfile:
                try:
                    fcntl.lockf(lockfile.fileno(), fcntl.LOCK_UN)
                    os.remove(Bcfg2.Options.setup.lockfile)
                except OSError:
                    self.logger.error("Failed to unlock lockfile %s" %
                                      lockfile.name)

        if (not Bcfg2.Options.setup.file and
                not Bcfg2.Options.setup.bundle_quick):
            # upload statistics
            feedback = self.GenerateStats()

            try:
                self.proxy.RecvStats(
                    XML.tostring(feedback,
                                 xml_declaration=False).decode('utf-8'))
            except Proxy.ProxyError:
                err = sys.exc_info()[1]
                self.logger.error("Failed to upload configuration statistics: "
                                  "%s" % err)
                raise SystemExit(2)

        self.logger.info("Finished Bcfg2 client run at %s" % time.time())

    def load_tools(self):
        """ Load all applicable client tools """
        for tool in Bcfg2.Options.setup.drivers:
            try:
                self.tools.append(tool(self.config))
            except Tools.ToolInstantiationError:
                continue
            except:
                self.logger.error("Failed to instantiate tool %s" % tool,
                                  exc_info=1)

        for tool in self.tools[:]:
            for conflict in getattr(tool, 'conflicts', []):
                for item in self.tools:
                    if item.name == conflict:
                        self.tools.remove(item)

        self.logger.info("Loaded tool drivers:")
        self.logger.info([tool.name for tool in self.tools])

        deprecated = [tool.name for tool in self.tools if tool.deprecated]
        if deprecated:
            self.logger.warning("Loaded deprecated tool drivers:")
            self.logger.warning(deprecated)
        experimental = [tool.name for tool in self.tools if tool.experimental]
        if experimental:
            self.logger.warning("Loaded experimental tool drivers:")
            self.logger.warning(experimental)

    def find_dups(self, config):
        """ Find duplicate entries and warn about them """
        entries = dict()
        for struct in config:
            for entry in struct:
                for tool in self.tools:
                    if tool.handlesEntry(entry):
                        pkey = tool.primarykey(entry)
                        if pkey in entries:
                            entries[pkey] += 1
                        else:
                            entries[pkey] = 1
        multi = [e for e, c in entries.items() if c > 1]
        if multi:
            self.logger.debug("The following entries are included multiple "
                              "times:")
            for entry in multi:
                self.logger.debug(entry)

    def promptFilter(self, msg, entries):
        """Filter a supplied list based on user input."""
        ret = []
        entries.sort(key=lambda e: e.tag + ":" + e.get('name'))
        for entry in entries[:]:
            if entry in self.unhandled:
                # don't prompt for entries that can't be installed
                continue
            if 'qtext' in entry.attrib:
                iprompt = entry.get('qtext')
            else:
                iprompt = msg % (entry.tag, entry.get('name'))
            if prompt(iprompt):
                ret.append(entry)
        return ret

    def __getattr__(self, name):
        if name in ['extra', 'handled', 'modified', '__important__']:
            ret = []
            for tool in self.tools:
                ret += getattr(tool, name)
            return ret
        elif name in self.__dict__:
            return self.__dict__[name]
        raise AttributeError(name)

    def InstallImportant(self):
        """Install important entries

        We also process the decision mode stuff here because we want to prevent
        non-whitelisted/blacklisted 'important' entries from being installed
        prior to determining the decision mode on the client.
        """
        # Need to process decision stuff early so that dryrun mode
        # works with it
        self.whitelist = [entry for entry in self.states
                          if not self.states[entry]]
        if not Bcfg2.Options.setup.file:
            if Bcfg2.Options.setup.decision == 'whitelist':
                dwl = Bcfg2.Options.setup.decision_list
                w_to_rem = [e for e in self.whitelist
                            if not matches_white_list(e, dwl)]
                if w_to_rem:
                    self.logger.info("In whitelist mode: "
                                     "suppressing installation of:")
                    self.logger.info(["%s:%s" % (e.tag, e.get('name'))
                                      for e in w_to_rem])
                    self.whitelist = [x for x in self.whitelist
                                      if x not in w_to_rem]
            elif Bcfg2.Options.setup.decision == 'blacklist':
                b_to_rem = \
                    [e for e in self.whitelist
                     if not
                     passes_black_list(e, Bcfg2.Options.setup.decision_list)]
                if b_to_rem:
                    self.logger.info("In blacklist mode: "
                                     "suppressing installation of:")
                    self.logger.info(["%s:%s" % (e.tag, e.get('name'))
                                      for e in b_to_rem])
                    self.whitelist = [x for x in self.whitelist
                                      if x not in b_to_rem]

        # take care of important entries first
        if (not Bcfg2.Options.setup.dry_run or
                Bcfg2.Options.setup.only_important):
            important_installs = set()
            for parent in self.config.findall(".//Path/.."):
                name = parent.get("name")
                if not name or (name in Bcfg2.Options.setup.except_bundles and
                                name not in Bcfg2.Options.setup.only_bundles):
                    continue
                for cfile in parent.findall("./Path"):
                    if (cfile.get('name') not in self.__important__ or
                            cfile.get('type') != 'file' or
                            cfile not in self.whitelist):
                        continue
                    tools = [t for t in self.tools
                             if t.handlesEntry(cfile) and t.canVerify(cfile)]
                    if not tools:
                        continue
                    if Bcfg2.Options.setup.dry_run:
                        important_installs.add(cfile)
                        continue
                    if (Bcfg2.Options.setup.interactive and not
                            self.promptFilter("Install %s: %s? (y/N):",
                                              [cfile])):
                        self.whitelist.remove(cfile)
                        continue
                    try:
                        self.states[cfile] = tools[0].InstallPath(cfile)
                        if self.states[cfile]:
                            tools[0].modified.append(cfile)
                    except:  # pylint: disable=W0702
                        self.logger.error("Unexpected tool failure",
                                          exc_info=1)
                    cfile.set('qtext', '')
                    if tools[0].VerifyPath(cfile, []):
                        self.whitelist.remove(cfile)
            if Bcfg2.Options.setup.dry_run and len(important_installs) > 0:
                self.logger.info("In dryrun mode: "
                                 "suppressing entry installation for:")
                self.logger.info(["%s:%s" % (e.tag, e.get('name'))
                                  for e in important_installs])

    def Inventory(self):
        """
           Verify all entries,
           find extra entries,
           and build up workqueues

        """
        # initialize all states
        for struct in self.config.getchildren():
            for entry in struct.getchildren():
                self.states[entry] = False
        for tool in self.tools:
            try:
                self.states.update(tool.Inventory())
            except:  # pylint: disable=W0702
                self.logger.error("%s.Inventory() call failed:" % tool.name,
                                  exc_info=1)

    def Decide(self):  # pylint: disable=R0912
        """Set self.whitelist based on user interaction."""
        iprompt = "Install %s: %s? (y/N): "
        rprompt = "Remove %s: %s? (y/N): "
        if Bcfg2.Options.setup.remove:
            if Bcfg2.Options.setup.remove == 'all':
                self.removal = self.extra
            elif Bcfg2.Options.setup.remove == 'services':
                self.removal = [entry for entry in self.extra
                                if entry.tag == 'Service']
            elif Bcfg2.Options.setup.remove == 'packages':
                self.removal = [entry for entry in self.extra
                                if entry.tag == 'Package']
            elif Bcfg2.Options.setup.remove == 'users':
                self.removal = [entry for entry in self.extra
                                if entry.tag in ['POSIXUser', 'POSIXGroup']]

        candidates = [entry for entry in self.states
                      if not self.states[entry]]

        if Bcfg2.Options.setup.dry_run:
            if self.whitelist:
                self.logger.info("In dryrun mode: "
                                 "suppressing entry installation for:")
                self.logger.info(["%s:%s" % (entry.tag, entry.get('name'))
                                  for entry in self.whitelist])
                self.whitelist = []
            if self.removal:
                self.logger.info("In dryrun mode: "
                                 "suppressing entry removal for:")
                self.logger.info(["%s:%s" % (entry.tag, entry.get('name'))
                                  for entry in self.removal])
            self.removal = []

        # Here is where most of the work goes
        # first perform bundle filtering
        all_bundle_names = [b.get('name')
                            for b in self.config.findall('./Bundle')]
        bundles = self.config.getchildren()
        if Bcfg2.Options.setup.only_bundles:
            # warn if non-existent bundle given
            for bundle in Bcfg2.Options.setup.only_bundles:
                if bundle not in all_bundle_names:
                    self.logger.info("Warning: Bundle %s not found" % bundle)
            bundles = [b for b in bundles
                       if b.get('name') in Bcfg2.Options.setup.only_bundles]
        if Bcfg2.Options.setup.except_bundles:
            # warn if non-existent bundle given
            if not Bcfg2.Options.setup.bundle_quick:
                for bundle in Bcfg2.Options.setup.except_bundles:
                    if bundle not in all_bundle_names:
                        self.logger.info("Warning: Bundle %s not found" %
                                         bundle)
            bundles = [
                b for b in bundles
                if b.get('name') not in Bcfg2.Options.setup.except_bundles]
        self.whitelist = [e for e in self.whitelist
                          if any(e in b for b in bundles)]

        # first process prereq actions
        for bundle in bundles[:]:
            if bundle.tag == 'Bundle':
                bmodified = any((item in self.whitelist or
                                 item in self.modified) for item in bundle)
            else:
                bmodified = False
            actions = [a for a in bundle.findall('./Action')
                       if (a.get('timing') in ['pre', 'both'] and
                           (bmodified or a.get('when') == 'always'))]
            # now we process all "pre" and "both" actions that are either
            # always or the bundle has been modified
            if Bcfg2.Options.setup.interactive:
                self.promptFilter(iprompt, actions)
            self.DispatchInstallCalls(actions)

            if bundle.tag != 'Bundle':
                continue

            # need to test to fail entries in whitelist
            if not all(self.states[a] for a in actions):
                # then display bundles forced off with entries
                self.logger.info("%s %s failed prerequisite action" %
                                 (bundle.tag, bundle.get('name')))
                bundles.remove(bundle)
                b_to_remv = [ent for ent in self.whitelist if ent in bundle]
                if b_to_remv:
                    self.logger.info("Not installing entries from %s %s" %
                                     (bundle.tag, bundle.get('name')))
                    self.logger.info(["%s:%s" % (e.tag, e.get('name'))
                                      for e in b_to_remv])
                    for ent in b_to_remv:
                        self.whitelist.remove(ent)

        self.logger.debug("Installing entries in the following bundle(s):")
        self.logger.debug("  %s" % ", ".join(b.get("name") for b in bundles
                                             if b.get("name")))

        if Bcfg2.Options.setup.interactive:
            self.whitelist = self.promptFilter(iprompt, self.whitelist)
            self.removal = self.promptFilter(rprompt, self.removal)

        for entry in candidates:
            if entry not in self.whitelist:
                self.blacklist.append(entry)

    def DispatchInstallCalls(self, entries):
        """Dispatch install calls to underlying tools."""
        for tool in self.tools:
            handled = [entry for entry in entries if tool.canInstall(entry)]
            if not handled:
                continue
            try:
                self.states.update(tool.Install(handled))
            except:  # pylint: disable=W0702
                self.logger.error("%s.Install() call failed:" % tool.name,
                                  exc_info=1)

    def Install(self):
        """Install all entries."""
        self.DispatchInstallCalls(self.whitelist)
        mods = self.modified
        mbundles = [struct for struct in self.config.findall('Bundle')
                    if any(True for mod in mods if mod in struct)]

        if self.modified:
            # Handle Bundle interdeps
            if mbundles:
                self.logger.info("The Following Bundles have been modified:")
                self.logger.info([mbun.get('name') for mbun in mbundles])
            tbm = [(t, b) for t in self.tools for b in mbundles]
            for tool, bundle in tbm:
                try:
                    self.states.update(tool.Inventory(structures=[bundle]))
                except:  # pylint: disable=W0702
                    self.logger.error("%s.Inventory() call failed:" %
                                      tool.name,
                                      exc_info=1)
            clobbered = [entry for bundle in mbundles for entry in bundle
                         if (not self.states[entry] and
                             entry not in self.blacklist)]
            if clobbered:
                self.logger.debug("Found clobbered entries:")
                self.logger.debug(["%s:%s" % (entry.tag, entry.get('name'))
                                   for entry in clobbered])
                if not Bcfg2.Options.setup.interactive:
                    self.DispatchInstallCalls(clobbered)

        for bundle in self.config.findall('.//Bundle'):
            if (Bcfg2.Options.setup.only_bundles and
                    bundle.get('name') not in
                    Bcfg2.Options.setup.only_bundles):
                # prune out unspecified bundles when running with -b
                continue
            if bundle in mbundles:
                self.logger.debug("Bundle %s was modified" %
                                  bundle.get('name'))
                func = "BundleUpdated"
            else:
                self.logger.debug("Bundle %s was not modified" %
                                  bundle.get('name'))
                func = "BundleNotUpdated"
            for tool in self.tools:
                try:
                    self.states.update(getattr(tool, func)(bundle))
                except:  # pylint: disable=W0702
                    self.logger.error("%s.%s(%s:%s) call failed:" %
                                      (tool.name, func, bundle.tag,
                                       bundle.get("name")), exc_info=1)

        for indep in self.config.findall('.//Independent'):
            for tool in self.tools:
                try:
                    self.states.update(tool.BundleNotUpdated(indep))
                except:  # pylint: disable=W0702
                    self.logger.error("%s.BundleNotUpdated(%s:%s) call failed:"
                                      % (tool.name, indep.tag,
                                         indep.get("name")), exc_info=1)

    def Remove(self):
        """Remove extra entries."""
        for tool in self.tools:
            extras = [entry for entry in self.removal
                      if tool.handlesEntry(entry)]
            if extras:
                try:
                    tool.Remove(extras)
                except:  # pylint: disable=W0702
                    self.logger.error("%s.Remove() failed" % tool.name,
                                      exc_info=1)

    def CondDisplayState(self, phase):
        """Conditionally print tracing information."""
        self.logger.info('Phase: %s' % phase)
        self.logger.info('Correct entries:        %d' %
                         list(self.states.values()).count(True))
        self.logger.info('Incorrect entries:      %d' %
                         list(self.states.values()).count(False))
        if phase == 'final' and list(self.states.values()).count(False):
            for entry in sorted(self.states.keys(), key=lambda e: e.tag + ":" +
                                e.get('name')):
                if not self.states[entry]:
                    etype = entry.get('type')
                    if etype:
                        self.logger.info("%s:%s:%s" % (entry.tag, etype,
                                                       entry.get('name')))
                    else:
                        self.logger.info("%s:%s" % (entry.tag,
                                                    entry.get('name')))
        self.logger.info('Total managed entries:  %d' %
                         len(list(self.states.values())))
        self.logger.info('Unmanaged entries:      %d' % len(self.extra))
        if phase == 'final' and Bcfg2.Options.setup.show_extra:
            for entry in sorted(self.extra,
                                key=lambda e: e.tag + ":" + e.get('name')):
                etype = entry.get('type')
                if etype:
                    self.logger.info("%s:%s:%s" % (entry.tag, etype,
                                                   entry.get('name')))
                else:
                    self.logger.info("%s:%s" % (entry.tag,
                                                entry.get('name')))

        if ((list(self.states.values()).count(False) == 0) and not self.extra):
            self.logger.info('All entries correct.')

    def ReInventory(self):
        """Recheck everything."""
        if not Bcfg2.Options.setup.dry_run and Bcfg2.Options.setup.kevlar:
            self.logger.info("Rechecking system inventory")
            self.Inventory()

    def Execute(self):
        """Run all methods."""
        self.Inventory()
        self.times['inventory'] = time.time()
        self.CondDisplayState('initial')
        self.InstallImportant()
        if not Bcfg2.Options.setup.only_important:
            self.Decide()
            self.Install()
            self.times['install'] = time.time()
            self.Remove()
            self.times['remove'] = time.time()

        if self.modified:
            self.ReInventory()
            self.times['reinventory'] = time.time()
        self.times['finished'] = time.time()
        self.CondDisplayState('final')

    def GenerateStats(self):
        """Generate XML summary of execution statistics."""
        feedback = XML.Element("upload-statistics")
        stats = XML.SubElement(feedback,
                               'Statistics', total=str(len(self.states)),
                               version='2.0',
                               revision=self.config.get('revision', '-1'))
        good_entries = [key for key, val in list(self.states.items()) if val]
        good = len(good_entries)
        stats.set('good', str(good))
        if any(not val for val in list(self.states.values())):
            stats.set('state', 'dirty')
        else:
            stats.set('state', 'clean')

        # List bad elements of the configuration
        for (data, ename) in [(self.modified, 'Modified'),
                              (self.extra, "Extra"),
                              (good_entries, "Good"),
                              ([entry for entry in self.states
                                if not self.states[entry]], "Bad")]:
            container = XML.SubElement(stats, ename)
            for item in data:
                item.set('qtext', '')
                container.append(item)
                item.text = None

        timeinfo = XML.Element("OpStamps")
        feedback.append(stats)
        for (event, timestamp) in list(self.times.items()):
            timeinfo.set(event, str(timestamp))
        stats.append(timeinfo)
        return feedback
示例#19
0
文件: Client.py 项目: rcuza/bcfg2
class Client(object):
    """ The main Bcfg2 client class """

    def __init__(self):
        self.toolset = None
        self.tools = None
        self.config = None
        self._proxy = None
        self.setup = Bcfg2.Options.get_option_parser()

        if self.setup['debug']:
            level = logging.DEBUG
        elif self.setup['verbose']:
            level = logging.INFO
        else:
            level = logging.WARNING
        Bcfg2.Logger.setup_logging('bcfg2',
                                   to_syslog=self.setup['syslog'],
                                   level=level,
                                   to_file=self.setup['logging'])
        self.logger = logging.getLogger('bcfg2')
        self.logger.debug(self.setup)

        self.cmd = Executor(self.setup['command_timeout'])

        if self.setup['bundle_quick']:
            if not self.setup['bundle'] and not self.setup['skipbundle']:
                self.logger.error("-Q option requires -b or -B")
                raise SystemExit(1)
            elif self.setup['remove']:
                self.logger.error("-Q option incompatible with -r")
                raise SystemExit(1)
        if 'drivers' in self.setup and self.setup['drivers'] == 'help':
            self.logger.info("The following drivers are available:")
            self.logger.info(Bcfg2.Client.Tools.__all__)
            raise SystemExit(0)
        if self.setup['remove'] and 'services' in self.setup['remove'].lower():
            self.logger.error("Service removal is nonsensical; "
                              "removed services will only be disabled")
        if (self.setup['remove'] and
            self.setup['remove'].lower() not in ['all', 'services', 'packages',
                                                 'users']):
            self.logger.error("Got unknown argument %s for -r" %
                              self.setup['remove'])
        if self.setup["file"] and self.setup["cache"]:
            print("cannot use -f and -c together")
            raise SystemExit(1)
        if not self.setup['server'].startswith('https://'):
            self.setup['server'] = 'https://' + self.setup['server']

    def _probe_failure(self, probename, msg):
        """ handle failure of a probe in the way the user wants us to
        (exit or continue) """
        message = "Failed to execute probe %s: %s" % (probename, msg)
        if self.setup['probe_exit']:
            self.fatal_error(message)
        else:
            self.logger.error(message)

    def run_probe(self, probe):
        """Execute probe."""
        name = probe.get('name')
        self.logger.info("Running probe %s" % name)
        ret = Bcfg2.Client.XML.Element("probe-data",
                                       name=name,
                                       source=probe.get('source'))
        try:
            scripthandle, scriptname = tempfile.mkstemp()
            script = os.fdopen(scripthandle, 'w')
            try:
                script.write("#!%s\n" %
                             (probe.attrib.get('interpreter', '/bin/sh')))
                if sys.hexversion >= 0x03000000:
                    script.write(probe.text)
                else:
                    script.write(probe.text.encode('utf-8'))
                script.close()
                os.chmod(scriptname,
                         stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH |
                         stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH |
                         stat.S_IWUSR)  # 0755
                rv = self.cmd.run(scriptname, timeout=self.setup['timeout'])
                if rv.stderr:
                    self.logger.warning("Probe %s has error output: %s" %
                                        (name, rv.stderr))
                if not rv.success:
                    self._probe_failure(name, "Return value %s" % rv)
                self.logger.info("Probe %s has result:" % name)
                self.logger.info(rv.stdout)
                if sys.hexversion >= 0x03000000:
                    ret.text = rv.stdout
                else:
                    ret.text = rv.stdout.decode('utf-8')
            finally:
                os.unlink(scriptname)
        except SystemExit:
            raise
        except:
            self._probe_failure(name, sys.exc_info()[1])
        return ret

    def fatal_error(self, message):
        """Signal a fatal error."""
        self.logger.error("Fatal error: %s" % (message))
        raise SystemExit(1)

    @property
    def proxy(self):
        """ get an XML-RPC proxy to the server """
        if self._proxy is None:
            self._proxy = Bcfg2.Client.Proxy.ComponentProxy(
                self.setup['server'],
                self.setup['user'],
                self.setup['password'],
                key=self.setup['key'],
                cert=self.setup['certificate'],
                ca=self.setup['ca'],
                allowedServerCNs=self.setup['serverCN'],
                timeout=self.setup['timeout'],
                retries=int(self.setup['retries']),
                delay=int(self.setup['retry_delay']))
        return self._proxy

    def run_probes(self, times=None):
        """ run probes and upload probe data """
        if times is None:
            times = dict()

        try:
            probes = Bcfg2.Client.XML.XML(str(self.proxy.GetProbes()))
        except (Bcfg2.Client.Proxy.ProxyError,
                Bcfg2.Client.Proxy.CertificateError,
                socket.gaierror,
                socket.error):
            err = sys.exc_info()[1]
            self.fatal_error("Failed to download probes from bcfg2: %s" % err)
        except Bcfg2.Client.XML.ParseError:
            err = sys.exc_info()[1]
            self.fatal_error("Server returned invalid probe requests: %s" %
                             err)

        times['probe_download'] = time.time()

        # execute probes
        probedata = Bcfg2.Client.XML.Element("ProbeData")
        for probe in probes.findall(".//probe"):
            probedata.append(self.run_probe(probe))

        if len(probes.findall(".//probe")) > 0:
            try:
                # upload probe responses
                self.proxy.RecvProbeData(
                    Bcfg2.Client.XML.tostring(
                        probedata,
                        xml_declaration=False).decode('utf-8'))
            except Bcfg2.Client.Proxy.ProxyError:
                err = sys.exc_info()[1]
                self.fatal_error("Failed to upload probe data: %s" % err)

        times['probe_upload'] = time.time()

    def get_config(self, times=None):
        """ load the configuration, either from the cached
        configuration file (-f), or from the server """
        if times is None:
            times = dict()

        if self.setup['file']:
            # read config from file
            try:
                self.logger.debug("Reading cached configuration from %s" %
                                  self.setup['file'])
                return open(self.setup['file'], 'r').read()
            except IOError:
                self.fatal_error("Failed to read cached configuration from: %s"
                                 % (self.setup['file']))
        else:
            # retrieve config from server
            if self.setup['profile']:
                try:
                    self.proxy.AssertProfile(self.setup['profile'])
                except Bcfg2.Client.Proxy.ProxyError:
                    err = sys.exc_info()[1]
                    self.fatal_error("Failed to set client profile: %s" % err)

            try:
                self.proxy.DeclareVersion(__version__)
            except xmlrpclib.Fault:
                err = sys.exc_info()[1]
                if (err.faultCode == xmlrpclib.METHOD_NOT_FOUND or
                    (err.faultCode == 7 and
                     err.faultString.startswith("Unknown method"))):
                    self.logger.debug("Server does not support declaring "
                                      "client version")
                else:
                    self.logger.error("Failed to declare version: %s" % err)
            except (Bcfg2.Client.Proxy.ProxyError,
                    Bcfg2.Client.Proxy.CertificateError,
                    socket.gaierror,
                    socket.error):
                err = sys.exc_info()[1]
                self.logger.error("Failed to declare version: %s" % err)

            self.run_probes(times=times)

            if self.setup['decision'] in ['whitelist', 'blacklist']:
                try:
                    self.setup['decision_list'] = \
                        self.proxy.GetDecisionList(self.setup['decision'])
                    self.logger.info("Got decision list from server:")
                    self.logger.info(self.setup['decision_list'])
                except Bcfg2.Client.Proxy.ProxyError:
                    err = sys.exc_info()[1]
                    self.fatal_error("Failed to get decision list: %s" % err)

            try:
                rawconfig = self.proxy.GetConfig().encode('utf-8')
            except Bcfg2.Client.Proxy.ProxyError:
                err = sys.exc_info()[1]
                self.fatal_error("Failed to download configuration from "
                                 "Bcfg2: %s" % err)

            times['config_download'] = time.time()
        return rawconfig

    def run(self):
        """Perform client execution phase."""
        times = {}

        # begin configuration
        times['start'] = time.time()

        self.logger.info("Starting Bcfg2 client run at %s" % times['start'])

        rawconfig = self.get_config(times=times).decode('utf-8')

        if self.setup['cache']:
            try:
                open(self.setup['cache'], 'w').write(rawconfig)
                os.chmod(self.setup['cache'], 33152)
            except IOError:
                self.logger.warning("Failed to write config cache file %s" %
                                    (self.setup['cache']))
            times['caching'] = time.time()

        try:
            self.config = Bcfg2.Client.XML.XML(rawconfig)
        except Bcfg2.Client.XML.ParseError:
            syntax_error = sys.exc_info()[1]
            self.fatal_error("The configuration could not be parsed: %s" %
                             syntax_error)

        times['config_parse'] = time.time()

        if self.config.tag == 'error':
            self.fatal_error("Server error: %s" % (self.config.text))
            return(1)

        if self.setup['bundle_quick']:
            newconfig = Bcfg2.Client.XML.XML('<Configuration/>')
            for bundle in self.config.getchildren():
                if (bundle.tag == 'Bundle' and
                    ((self.setup['bundle'] and
                      bundle.get('name') in self.setup['bundle']) or
                     (self.setup['skipbundle'] and
                      bundle.get('name') not in self.setup['skipbundle']))):
                    newconfig.append(bundle)
            self.config = newconfig

        self.tools = Bcfg2.Client.Frame.Frame(self.config, times)

        if not self.setup['omit_lock_check']:
            #check lock here
            try:
                lockfile = open(self.setup['lockfile'], 'w')
                if locked(lockfile.fileno()):
                    self.fatal_error("Another instance of Bcfg2 is running. "
                                     "If you want to bypass the check, run "
                                     "with the %s option" %
                                     Bcfg2.Options.OMIT_LOCK_CHECK.cmd)
            except SystemExit:
                raise
            except:
                lockfile = None
                self.logger.error("Failed to open lockfile %s: %s" %
                                  (self.setup['lockfile'], sys.exc_info()[1]))

        # execute the configuration
        self.tools.Execute()

        if not self.setup['omit_lock_check']:
            # unlock here
            if lockfile:
                try:
                    fcntl.lockf(lockfile.fileno(), fcntl.LOCK_UN)
                    os.remove(self.setup['lockfile'])
                except OSError:
                    self.logger.error("Failed to unlock lockfile %s" %
                                      lockfile.name)

        if not self.setup['file'] and not self.setup['bundle_quick']:
            # upload statistics
            feedback = self.tools.GenerateStats()

            try:
                self.proxy.RecvStats(
                    Bcfg2.Client.XML.tostring(
                        feedback,
                        xml_declaration=False).decode('utf-8'))
            except Bcfg2.Client.Proxy.ProxyError:
                err = sys.exc_info()[1]
                self.logger.error("Failed to upload configuration statistics: "
                                  "%s" % err)
                raise SystemExit(2)

        self.logger.info("Finished Bcfg2 client run at %s" % time.time())
示例#20
0
class CfgPrivateKeyCreator(XMLCfgCreator):
    """The CfgPrivateKeyCreator creates SSH keys on the fly. """

    #: Different configurations for different clients/groups can be
    #: handled with Client and Group tags within privkey.xml
    __specific__ = False

    #: Handle XML specifications of private keys
    __basenames__ = ['privkey.xml']

    cfg_section = "sshkeys"
    options = [
        Bcfg2.Options.Option(
            cf=("sshkeys", "category"),
            dest="sshkeys_category",
            help="Metadata category that generated SSH keys are specific to"),
        Bcfg2.Options.Option(
            cf=("sshkeys", "passphrase"),
            dest="sshkeys_passphrase",
            help="Passphrase used to encrypt generated SSH private keys")
    ]

    def __init__(self, fname):
        XMLCfgCreator.__init__(self, fname)
        pubkey_path = os.path.dirname(self.name) + ".pub"
        pubkey_name = os.path.join(pubkey_path, os.path.basename(pubkey_path))
        self.pubkey_creator = CfgPublicKeyCreator(pubkey_name)
        self.cmd = Executor()

    def _gen_keypair(self, metadata, spec=None):
        """ Generate a keypair according to the given client medata
        and key specification.

        :param metadata: The client metadata to generate keys for
        :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
        :param spec: The key specification to follow when creating the
                     keys. This should be an XML document that only
                     contains key specification data that applies to
                     the given client metadata, and may be obtained by
                     doing ``self.XMLMatch(metadata)``
        :type spec: lxml.etree._Element
        :returns: tuple - (private key data, public key data)
        """
        if spec is None:
            spec = self.XMLMatch(metadata)

        # set key parameters
        ktype = "rsa"
        bits = None
        params = spec.find("Params")
        if params is not None:
            bits = params.get("bits")
            ktype = params.get("type", ktype)
        try:
            passphrase = spec.find("Passphrase").text
        except AttributeError:
            passphrase = ''
        tempdir = tempfile.mkdtemp()
        try:
            filename = os.path.join(tempdir, "privkey")

            # generate key pair
            cmd = ["ssh-keygen", "-f", filename, "-t", ktype]
            if bits:
                cmd.extend(["-b", bits])
            cmd.append("-N")
            log_cmd = cmd[:]
            cmd.append(passphrase)
            if passphrase:
                log_cmd.append("******")
            else:
                log_cmd.append("''")
            self.debug_log("Cfg: Generating new SSH key pair: %s" %
                           " ".join(log_cmd))
            result = self.cmd.run(cmd)
            if not result.success:
                raise CfgCreationError(
                    "Cfg: Failed to generate SSH key pair "
                    "at %s for %s: %s" %
                    (filename, metadata.hostname, result.error))
            elif result.stderr:
                self.logger.warning(
                    "Cfg: Generated SSH key pair at %s for %s "
                    "with errors: %s" %
                    (filename, metadata.hostname, result.stderr))
            return (open(filename).read(), open(filename + ".pub").read())
        finally:
            shutil.rmtree(tempdir)

    # pylint: disable=W0221
    def create_data(self, entry, metadata):
        """ Create data for the given entry on the given client

        :param entry: The abstract entry to create data for.  This
                      will not be modified
        :type entry: lxml.etree._Element
        :param metadata: The client metadata to create data for
        :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
        :returns: string - The private key data
        """
        spec = self.XMLMatch(metadata)
        specificity = self.get_specificity(metadata)
        privkey, pubkey = self._gen_keypair(metadata, spec)

        # write the public key, stripping the comment and
        # replacing it with a comment that specifies the filename.
        kdata = pubkey.split()[:2]
        kdata.append(self.pubkey_creator.get_filename(**specificity))
        pubkey = " ".join(kdata) + "\n"
        self.pubkey_creator.write_data(pubkey, **specificity)

        # encrypt the private key, write to the proper place, and
        # return it
        self.write_data(privkey, **specificity)
        return privkey
示例#21
0
文件: SSHbase.py 项目: rcuza/bcfg2
class SSHbase(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.Generator,
              Bcfg2.Server.Plugin.PullTarget):
    """
       The sshbase generator manages ssh host keys (both v1 and v2)
       for hosts.  It also manages the ssh_known_hosts file. It can
       integrate host keys from other management domains and similarly
       export its keys. The repository contains files in the following
       formats:

       ssh_host_key.H_(hostname) -> the v1 host private key for
         (hostname)
       ssh_host_key.pub.H_(hostname) -> the v1 host public key
         for (hostname)
       ssh_host_(ec)(dr)sa_key.H_(hostname) -> the v2 ssh host
         private key for (hostname)
       ssh_host_(ec)(dr)sa_key.pub.H_(hostname) -> the v2 ssh host
         public key for (hostname)
       ssh_known_hosts -> the current known hosts file. this
         is regenerated each time a new key is generated.

    """
    __author__ = '*****@*****.**'
    keypatterns = [
        "ssh_host_dsa_key", "ssh_host_ecdsa_key", "ssh_host_rsa_key",
        "ssh_host_key", "ssh_host_dsa_key.pub", "ssh_host_ecdsa_key.pub",
        "ssh_host_rsa_key.pub", "ssh_host_key.pub"
    ]

    def __init__(self, core, datastore):
        Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
        Bcfg2.Server.Plugin.Generator.__init__(self)
        Bcfg2.Server.Plugin.PullTarget.__init__(self)
        self.ipcache = {}
        self.namecache = {}
        self.__skn = False

        # keep track of which bogus keys we've warned about, and only
        # do so once
        self.badnames = dict()

        self.fam = Bcfg2.Server.FileMonitor.get_fam()
        self.fam.AddMonitor(self.data, self)

        self.static = dict()
        self.entries = dict()
        self.Entries['Path'] = dict()

        self.entries['/etc/ssh/ssh_known_hosts'] = \
            KnownHostsEntrySet(self.data)
        self.Entries['Path']['/etc/ssh/ssh_known_hosts'] = self.build_skn
        for keypattern in self.keypatterns:
            self.entries["/etc/ssh/" + keypattern] = \
                HostKeyEntrySet(keypattern, self.data)
            self.Entries['Path']["/etc/ssh/" + keypattern] = self.build_hk

        self.cmd = Executor()

    def get_skn(self):
        """Build memory cache of the ssh known hosts file."""
        if not self.__skn:
            # if no metadata is registered yet, defer
            if len(self.core.metadata.query.all()) == 0:
                self.__skn = False
                return self.__skn

            skn = [s.data.rstrip() for s in list(self.static.values())]

            mquery = self.core.metadata.query

            # build hostname cache
            names = dict()
            for cmeta in mquery.all():
                names[cmeta.hostname] = set([cmeta.hostname])
                names[cmeta.hostname].update(cmeta.aliases)
                newnames = set()
                newips = set()
                for name in names[cmeta.hostname]:
                    newnames.add(name.split('.')[0])
                    try:
                        newips.update(self.get_ipcache_entry(name)[0])
                    except:  # pylint: disable=W0702
                        continue
                names[cmeta.hostname].update(newnames)
                names[cmeta.hostname].update(cmeta.addresses)
                names[cmeta.hostname].update(newips)
                # TODO: Only perform reverse lookups on IPs if an
                # option is set.
                if True:
                    for ip in newips:
                        try:
                            names[cmeta.hostname].update(
                                self.get_namecache_entry(ip))
                        except:  # pylint: disable=W0702
                            continue
                names[cmeta.hostname] = sorted(names[cmeta.hostname])

            pubkeys = [
                pubk for pubk in list(self.entries.keys())
                if pubk.endswith('.pub')
            ]
            pubkeys.sort()
            for pubkey in pubkeys:
                for entry in sorted(self.entries[pubkey].entries.values(),
                                    key=lambda e:
                                    (e.specific.hostname or e.specific.group)):
                    specific = entry.specific
                    hostnames = []
                    if specific.hostname and specific.hostname in names:
                        hostnames = names[specific.hostname]
                    elif specific.group:
                        hostnames = list(
                            chain(*[
                                names[cmeta.hostname]
                                for cmeta in mquery.by_groups([specific.group])
                            ]))
                    elif specific.all:
                        # a generic key for all hosts?  really?
                        hostnames = list(chain(*list(names.values())))
                    if not hostnames:
                        if specific.hostname:
                            key = specific.hostname
                            ktype = "host"
                        elif specific.group:
                            key = specific.group
                            ktype = "group"
                        else:
                            # user has added a global SSH key, but
                            # have no clients yet.  don't warn about
                            # this.
                            continue

                        if key not in self.badnames:
                            self.badnames[key] = True
                            self.logger.info("Ignoring key for unknown %s %s" %
                                             (ktype, key))
                        continue

                    skn.append("%s %s" %
                               (','.join(hostnames), entry.data.rstrip()))

            self.__skn = "\n".join(skn) + "\n"
        return self.__skn

    def set_skn(self, value):
        """Set backing data for skn."""
        self.__skn = value

    skn = property(get_skn, set_skn)

    def HandleEvent(self, event=None):
        """Local event handler that does skn regen on pubkey change."""
        # skip events we don't care about
        action = event.code2str()
        if action == "endExist" or event.filename == self.data:
            return

        for entry in list(self.entries.values()):
            if entry.specific.match(event.filename):
                entry.handle_event(event)
                if any(
                        event.filename.startswith(kp)
                        for kp in self.keypatterns if kp.endswith(".pub")):
                    self.debug_log("New public key %s; invalidating "
                                   "ssh_known_hosts cache" % event.filename)
                    self.skn = False
                return

        if event.filename == 'info.xml':
            for entry in list(self.entries.values()):
                entry.handle_event(event)
            return

        if event.filename.endswith('.static'):
            self.logger.info("Static key %s %s; invalidating ssh_known_hosts "
                             "cache" % (event.filename, action))
            if action == "deleted" and event.filename in self.static:
                del self.static[event.filename]
                self.skn = False
            else:
                self.static[event.filename] = Bcfg2.Server.Plugin.FileBacked(
                    os.path.join(self.data, event.filename))
                self.static[event.filename].HandleEvent(event)
                self.skn = False
            return

        self.logger.warn("SSHbase: Got unknown event %s %s" %
                         (event.filename, action))

    def get_ipcache_entry(self, client):
        """ Build a cache of dns results. """
        if client in self.ipcache:
            if self.ipcache[client]:
                return self.ipcache[client]
            else:
                raise PluginExecutionError("No cached IP address for %s" %
                                           client)
        else:
            # need to add entry
            try:
                ipaddr = set(
                    [info[4][0] for info in socket.getaddrinfo(client, None)])
                self.ipcache[client] = (ipaddr, client)
                return (ipaddr, client)
            except socket.gaierror:
                result = self.cmd.run(["getent", "hosts", client])
                if result.success:
                    ipaddr = result.stdout.strip().split()
                    if ipaddr:
                        self.ipcache[client] = (ipaddr, client)
                        return (ipaddr, client)
                self.ipcache[client] = False
                msg = "Failed to find IP address for %s: %s" % (client,
                                                                result.error)
                self.logger(msg)
                raise PluginExecutionError(msg)

    def get_namecache_entry(self, cip):
        """Build a cache of name lookups from client IP addresses."""
        if cip in self.namecache:
            # lookup cached name from IP
            if self.namecache[cip]:
                return self.namecache[cip]
            else:
                raise socket.gaierror
        else:
            # add an entry that has not been cached
            try:
                rvlookup = socket.gethostbyaddr(cip)
                if rvlookup[0]:
                    self.namecache[cip] = [rvlookup[0]]
                else:
                    self.namecache[cip] = []
                self.namecache[cip].extend(rvlookup[1])
                return self.namecache[cip]
            except socket.gaierror:
                self.namecache[cip] = False
                self.logger.error("Failed to find any names associated with "
                                  "IP address %s" % cip)
                raise

    def build_skn(self, entry, metadata):
        """This function builds builds a host specific known_hosts file."""
        try:
            self.entries[entry.get('name')].bind_entry(entry, metadata)
        except Bcfg2.Server.Plugin.PluginExecutionError:
            entry.text = self.skn
            hostkeys = []
            for key in self.keypatterns:
                if key.endswith(".pub"):
                    try:
                        hostkeys.append(
                            self.entries["/etc/ssh/" +
                                         key].best_matching(metadata))
                    except Bcfg2.Server.Plugin.PluginExecutionError:
                        pass
            hostkeys.sort()
            for hostkey in hostkeys:
                entry.text += "localhost,localhost.localdomain,127.0.0.1 %s" \
                    % hostkey.data
            self.entries[entry.get('name')].bind_info_to_entry(entry, metadata)

    def build_hk(self, entry, metadata):
        """This binds host key data into entries."""
        try:
            self.entries[entry.get('name')].bind_entry(entry, metadata)
        except Bcfg2.Server.Plugin.PluginExecutionError:
            filename = entry.get('name').split('/')[-1]
            self.GenerateHostKeyPair(metadata.hostname, filename)
            # Service the FAM events queued up by the key generation
            # so the data structure entries will be available for
            # binding.
            #
            # NOTE: We wait for up to ten seconds. There is some
            # potential for race condition, because if the file
            # monitor doesn't get notified about the new key files in
            # time, those entries won't be available for binding. In
            # practice, this seems "good enough".
            tries = 0
            is_bound = False
            while not is_bound:
                if tries >= 10:
                    msg = "%s still not registered" % filename
                    self.logger.error(msg)
                    raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
                self.fam.handle_events_in_interval(1)
                tries += 1
                try:
                    self.entries[entry.get('name')].bind_entry(entry, metadata)
                    is_bound = True
                except Bcfg2.Server.Plugin.PluginExecutionError:
                    pass

    def GenerateHostKeyPair(self, client, filename):
        """Generate new host key pair for client."""
        match = re.search(r'(ssh_host_(?:((?:ecd|d|r)sa)_)?key)', filename)
        if match:
            hostkey = "%s.H_%s" % (match.group(1), client)
            if match.group(2):
                keytype = match.group(2)
            else:
                keytype = 'rsa1'
        else:
            raise PluginExecutionError("Unknown key filename: %s" % filename)

        fileloc = os.path.join(self.data, hostkey)
        publoc = os.path.join(
            self.data,
            ".".join([hostkey.split('.')[0], 'pub',
                      "H_%s" % client]))
        tempdir = tempfile.mkdtemp()
        temploc = os.path.join(tempdir, hostkey)
        cmd = [
            "ssh-keygen", "-q", "-f", temploc, "-N", "", "-t", keytype, "-C",
            "root@%s" % client
        ]
        self.debug_log("SSHbase: Running: %s" % " ".join(cmd))
        result = self.cmd.run(cmd)
        if not result.success:
            raise PluginExecutionError(
                "SSHbase: Error running ssh-keygen: %s" % result.error)

        try:
            shutil.copy(temploc, fileloc)
            shutil.copy("%s.pub" % temploc, publoc)
        except IOError:
            err = sys.exc_info()[1]
            raise PluginExecutionError("Temporary SSH keys not found: %s" %
                                       err)

        try:
            os.unlink(temploc)
            os.unlink("%s.pub" % temploc)
            os.rmdir(tempdir)
        except OSError:
            err = sys.exc_info()[1]
            raise PluginExecutionError("Failed to unlink temporary ssh keys: "
                                       "%s" % err)

    def AcceptChoices(self, _, metadata):
        return [Bcfg2.Server.Plugin.Specificity(hostname=metadata.hostname)]

    def AcceptPullData(self, specific, entry, log):
        """Per-plugin bcfg2-admin pull support."""
        # specific will always be host specific
        filename = os.path.join(
            self.data,
            "%s.H_%s" % (entry['name'].split('/')[-1], specific.hostname))
        try:
            open(filename, 'w').write(entry['text'])
            if log:
                print("Wrote file %s" % filename)
        except KeyError:
            self.logger.error("Failed to pull %s. This file does not "
                              "currently exist on the client" %
                              entry.get('name'))
示例#22
0
class CfgPublicKeyCreator(CfgCreator, StructFile):
    """ .. currentmodule:: Bcfg2.Server.Plugins.Cfg

    The CfgPublicKeyCreator creates SSH public keys on the fly. It is
    invoked by :class:`CfgPrivateKeyCreator.CfgPrivateKeyCreator` to
    handle the creation of the public key, and can also call
    :class:`CfgPrivateKeyCreator.CfgPrivateKeyCreator` to trigger the
    creation of a keypair when a public key is created. """

    #: Different configurations for different clients/groups can be
    #: handled with Client and Group tags within pubkey.xml
    __specific__ = False

    #: Handle XML specifications of private keys
    __basenames__ = ['pubkey.xml']

    #: No text content on any tags, so encryption support disabled
    encryption = False

    def __init__(self, fname):
        CfgCreator.__init__(self, fname)
        StructFile.__init__(self, fname)
        self.cfg = get_cfg()
        self.core = self.cfg.core
        self.cmd = Executor()

    def create_data(self, entry, metadata):
        if entry.get("name").endswith(".pub"):
            privkey = entry.get("name")[:-4]
        else:
            raise CfgCreationError("Cfg: Could not determine private key for "
                                   "%s: Filename does not end in .pub" %
                                   entry.get("name"))

        privkey_entry = lxml.etree.Element("Path", name=privkey)
        try:
            self.core.Bind(privkey_entry, metadata)
        except PluginExecutionError:
            raise CfgCreationError("Cfg: Could not bind %s (private key for "
                                   "%s): %s" %
                                   (privkey, self.name, sys.exc_info()[1]))

        try:
            eset = self.cfg.entries[privkey]
            creator = eset.best_matching(
                metadata, eset.get_handlers(metadata, CfgCreator))
        except KeyError:
            raise CfgCreationError("Cfg: No private key defined for %s (%s)" %
                                   (self.name, privkey))
        except PluginExecutionError:
            raise CfgCreationError("Cfg: No privkey.xml defined for %s "
                                   "(private key for %s)" %
                                   (privkey, self.name))

        specificity = creator.get_specificity(metadata)
        fname = self.get_filename(**specificity)

        # if the private key didn't exist, then creating it may have
        # created the private key, too.  check for it first.
        if os.path.exists(fname):
            return open(fname).read()
        else:
            # generate public key from private key
            fd, privfile = tempfile.mkstemp()
            try:
                os.fdopen(fd, 'w').write(privkey_entry.text)
                cmd = ["ssh-keygen", "-y", "-f", privfile]
                self.debug_log("Cfg: Extracting SSH public key from %s: %s" %
                               (privkey, " ".join(cmd)))
                result = self.cmd.run(cmd)
                if not result.success:
                    raise CfgCreationError("Cfg: Failed to extract public key "
                                           "from %s: %s" %
                                           (privkey, result.error))
                self.write_data(result.stdout, **specificity)
                return result.stdout
            finally:
                os.unlink(privfile)

    def handle_event(self, event):
        CfgCreator.handle_event(self, event)
        StructFile.HandleEvent(self, event)

    handle_event.__doc__ = CfgCreator.handle_event.__doc__
示例#23
0
class Validate(Bcfg2.Server.Lint.ServerlessPlugin):
    """ Ensure that all XML files in the Bcfg2 repository validate
    according to their respective schemas. """

    def __init__(self, *args, **kwargs):
        Bcfg2.Server.Lint.ServerlessPlugin.__init__(self, *args, **kwargs)

        #: A dict of <file glob>: <schema file> that maps files in the
        #: Bcfg2 specification to their schemas.  The globs are
        #: extended :mod:`fnmatch` globs that also support ``**``,
        #: which matches any number of any characters, including
        #: forward slashes.  The schema files are relative to the
        #: schema directory, which can be controlled by the
        #: ``bcfg2-lint --schema`` option.
        self.filesets = \
            {"Metadata/groups.xml": "metadata.xsd",
             "Metadata/clients.xml": "clients.xsd",
             "Cfg/**/info.xml": "info.xsd",
             "Cfg/**/privkey.xml": "privkey.xsd",
             "Cfg/**/pubkey.xml": "pubkey.xsd",
             "Cfg/**/authorizedkeys.xml": "authorizedkeys.xsd",
             "Cfg/**/authorized_keys.xml": "authorizedkeys.xsd",
             "SSHbase/**/info.xml": "info.xsd",
             "SSLCA/**/info.xml": "info.xsd",
             "TGenshi/**/info.xml": "info.xsd",
             "TCheetah/**/info.xml": "info.xsd",
             "Bundler/*.xml": "bundle.xsd",
             "Bundler/*.genshi": "bundle.xsd",
             "Pkgmgr/*.xml": "pkglist.xsd",
             "Rules/*.xml": "rules.xsd",
             "Defaults/*.xml": "defaults.xsd",
             "etc/report-configuration.xml": "report-configuration.xsd",
             "Deps/*.xml": "deps.xsd",
             "Decisions/*.xml": "decisions.xsd",
             "Packages/sources.xml": "packages.xsd",
             "GroupPatterns/config.xml": "grouppatterns.xsd",
             "NagiosGen/config.xml": "nagiosgen.xsd",
             "FileProbes/config.xml": "fileprobes.xsd",
             "SSLCA/**/cert.xml": "sslca-cert.xsd",
             "SSLCA/**/key.xml": "sslca-key.xsd",
             "GroupLogic/groups.xml": "grouplogic.xsd"
             }

        self.filelists = {}
        self.get_filelists()
        self.cmd = Executor()

    def Run(self):
        schemadir = self.config['schema']

        for path, schemaname in self.filesets.items():
            try:
                filelist = self.filelists[path]
            except KeyError:
                filelist = []

            if filelist:
                # avoid loading schemas for empty file lists
                schemafile = os.path.join(schemadir, schemaname)
                schema = self._load_schema(schemafile)
                if schema:
                    for filename in filelist:
                        self.validate(filename, schemafile, schema=schema)

        self.check_properties()

    @classmethod
    def Errors(cls):
        return {"schema-failed-to-parse": "warning",
                "properties-schema-not-found": "warning",
                "xml-failed-to-parse": "error",
                "xml-failed-to-read": "error",
                "xml-failed-to-verify": "error",
                "input-output-error": "error"}

    def check_properties(self):
        """ Check Properties files against their schemas. """
        for filename in self.filelists['props']:
            schemafile = "%s.xsd" % os.path.splitext(filename)[0]
            if os.path.exists(schemafile):
                self.validate(filename, schemafile)
            else:
                self.LintError("properties-schema-not-found",
                               "No schema found for %s" % filename)
                # ensure that it at least parses
                self.parse(filename)

    def parse(self, filename):
        """ Parse an XML file, raising the appropriate LintErrors if
        it can't be parsed or read.  Return the
        lxml.etree._ElementTree parsed from the file.

        :param filename: The full path to the file to parse
        :type filename: string
        :returns: lxml.etree._ElementTree - the parsed data"""
        try:
            return lxml.etree.parse(filename)
        except SyntaxError:
            result = self.cmd.run(["xmllint", filename])
            self.LintError("xml-failed-to-parse",
                           "%s fails to parse:\n%s" %
                           (filename, result.stdout + result.stderr))
            return False
        except IOError:
            self.LintError("xml-failed-to-read",
                           "Failed to open file %s" % filename)
            return False

    def validate(self, filename, schemafile, schema=None):
        """ Validate a file against the given schema.

        :param filename: The full path to the file to validate
        :type filename: string
        :param schemafile: The full path to the schema file to
                           validate against
        :type schemafile: string
        :param schema: The loaded schema to validate against.  This
                       can be used to avoid parsing a single schema
                       file for every file that needs to be validate
                       against it.
        :type schema: lxml.etree.Schema
        :returns: bool - True if the file validates, false otherwise
        """
        if schema is None:
            # if no schema object was provided, instantiate one
            schema = self._load_schema(schemafile)
            if not schema:
                return False
        datafile = self.parse(filename)
        if not schema.validate(datafile):
            cmd = ["xmllint"]
            if self.files is None:
                cmd.append("--xinclude")
            cmd.extend(["--noout", "--schema", schemafile, filename])
            result = self.cmd.run(cmd)
            if not result.success:
                self.LintError("xml-failed-to-verify",
                               "%s fails to verify:\n%s" %
                               (filename, result.stdout + result.stderr))
                return False
        return True

    def get_filelists(self):
        """ Get lists of different kinds of files to validate.  This
        doesn't return anything, but it sets
        :attr:`Bcfg2.Server.Lint.Validate.Validate.filelists` to a
        dict whose keys are path globs given in
        :attr:`Bcfg2.Server.Lint.Validate.Validate.filesets` and whose
        values are lists of the full paths to all files in the Bcfg2
        repository (or given with ``bcfg2-lint --stdin``) that match
        the glob."""
        if self.files is not None:
            listfiles = lambda p: fnmatch.filter(self.files,
                                                 os.path.join('*', p))
        else:
            listfiles = lambda p: glob.glob(os.path.join(self.config['repo'],
                                                         p))

        for path in self.filesets.keys():
            if '/**/' in path:
                if self.files is not None:
                    self.filelists[path] = listfiles(path)
                else:  # self.files is None
                    fpath, fname = path.split('/**/')
                    self.filelists[path] = []
                    for root, _, files in \
                            os.walk(os.path.join(self.config['repo'],
                                                 fpath)):
                        self.filelists[path].extend([os.path.join(root, f)
                                                     for f in files
                                                     if f == fname])
            else:
                self.filelists[path] = listfiles(path)

        self.filelists['props'] = listfiles("Properties/*.xml")

    def _load_schema(self, filename):
        """ Load an XML schema document, returning the Schema object
        and raising appropriate lint errors on failure.

        :param filename: The full path to the schema file to load.
        :type filename: string
        :returns: lxml.etree.Schema - The loaded schema data
        """
        try:
            return lxml.etree.XMLSchema(lxml.etree.parse(filename))
        except IOError:
            err = sys.exc_info()[1]
            self.LintError("input-output-error", str(err))
        except lxml.etree.XMLSchemaParseError:
            err = sys.exc_info()[1]
            self.LintError("schema-failed-to-parse",
                           "Failed to process schema %s: %s" %
                           (filename, err))
        return None
示例#24
0
文件: __init__.py 项目: Ank2015/bcfg2
class Client(object):
    """ The main Bcfg2 client class """

    options = Proxy.ComponentProxy.options + [
        Bcfg2.Options.Common.syslog,
        Bcfg2.Options.Common.interactive,
        Bcfg2.Options.BooleanOption(
            "-q", "--quick", help="Disable some checksum verification"),
        Bcfg2.Options.Option(
            cf=('client', 'probe_timeout'),
            type=Bcfg2.Options.Types.timeout,
            help="Timeout when running client probes"),
        Bcfg2.Options.Option(
            "-b", "--only-bundles", default=[],
            type=Bcfg2.Options.Types.colon_list,
            help='Only configure the given bundle(s)'),
        Bcfg2.Options.Option(
            "-B", "--except-bundles", default=[],
            type=Bcfg2.Options.Types.colon_list,
            help='Configure everything except the given bundle(s)'),
        Bcfg2.Options.ExclusiveOptionGroup(
            Bcfg2.Options.BooleanOption(
                "-Q", "--bundle-quick",
                help='Only verify the given bundle(s)'),
            Bcfg2.Options.Option(
                '-r', '--remove',
                choices=['all', 'services', 'packages', 'users'],
                help='Force removal of additional configuration items')),
        Bcfg2.Options.ExclusiveOptionGroup(
            Bcfg2.Options.PathOption(
                '-f', '--file', type=argparse.FileType('rb'),
                help='Configure from a file rather than querying the server'),
            Bcfg2.Options.PathOption(
                '-c', '--cache', type=argparse.FileType('wb'),
                help='Store the configuration in a file')),
        Bcfg2.Options.BooleanOption(
            '--exit-on-probe-failure', default=True,
            cf=('client', 'exit_on_probe_failure'),
            help="The client should exit if a probe fails"),
        Bcfg2.Options.Option(
            '-p', '--profile', cf=('client', 'profile'),
            help='Assert the given profile for the host'),
        Bcfg2.Options.Option(
            '-l', '--decision', cf=('client', 'decision'),
            choices=['whitelist', 'blacklist', 'none'],
            help='Run client in server decision list mode'),
        Bcfg2.Options.BooleanOption(
            "-O", "--no-lock", help='Omit lock check'),
        Bcfg2.Options.PathOption(
            cf=('components', 'lockfile'), default='/var/lock/bcfg2.run',
            help='Client lock file'),
        Bcfg2.Options.BooleanOption(
            "-n", "--dry-run", help='Do not actually change the system'),
        Bcfg2.Options.Option(
            "-D", "--drivers", cf=('client', 'drivers'),
            type=Bcfg2.Options.Types.comma_list,
            default=[m[1] for m in walk_packages(path=Tools.__path__)],
            action=ClientDriverAction, help='Client drivers'),
        Bcfg2.Options.BooleanOption(
            "-e", "--show-extra", help='Enable extra entry output'),
        Bcfg2.Options.BooleanOption(
            "-k", "--kevlar", help='Run in bulletproof mode'),
        Bcfg2.Options.BooleanOption(
            "-i", "--only-important",
            help='Only configure the important entries')]

    def __init__(self):
        self.config = None
        self._proxy = None
        self.logger = logging.getLogger('bcfg2')
        self.cmd = Executor(Bcfg2.Options.setup.probe_timeout)
        self.tools = []
        self.times = dict()
        self.times['initialization'] = time.time()

        if Bcfg2.Options.setup.bundle_quick:
            if (not Bcfg2.Options.setup.only_bundles and
                    not Bcfg2.Options.setup.except_bundles):
                self.logger.error("-Q option requires -b or -B")
                raise SystemExit(1)
        if Bcfg2.Options.setup.remove == 'services':
            self.logger.error("Service removal is nonsensical; "
                              "removed services will only be disabled")
        if not Bcfg2.Options.setup.server.startswith('https://'):
            Bcfg2.Options.setup.server = \
                'https://' + Bcfg2.Options.setup.server

        #: A dict of the state of each entry.  Keys are the entries.
        #: Values are boolean: True means that the entry is good,
        #: False means that the entry is bad.
        self.states = {}
        self.whitelist = []
        self.blacklist = []
        self.removal = []
        self.unhandled = []
        self.logger = logging.getLogger(__name__)

    def _probe_failure(self, probename, msg):
        """ handle failure of a probe in the way the user wants us to
        (exit or continue) """
        message = "Failed to execute probe %s: %s" % (probename, msg)
        if Bcfg2.Options.setup.exit_on_probe_failure:
            self.fatal_error(message)
        else:
            self.logger.error(message)

    def run_probe(self, probe):
        """Execute probe."""
        name = probe.get('name')
        self.logger.info("Running probe %s" % name)
        ret = XML.Element("probe-data", name=name, source=probe.get('source'))
        try:
            scripthandle, scriptname = tempfile.mkstemp()
            if sys.hexversion >= 0x03000000:
                script = os.fdopen(scripthandle, 'w',
                                   encoding=Bcfg2.Options.setup.encoding)
            else:
                script = os.fdopen(scripthandle, 'w')
            try:
                script.write("#!%s\n" %
                             (probe.attrib.get('interpreter', '/bin/sh')))
                if sys.hexversion >= 0x03000000:
                    script.write(probe.text)
                else:
                    script.write(probe.text.encode('utf-8'))
                script.close()
                os.chmod(scriptname,
                         stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH |
                         stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH |
                         stat.S_IWUSR)  # 0755
                rv = self.cmd.run(scriptname)
                if rv.stderr:
                    self.logger.warning("Probe %s has error output: %s" %
                                        (name, rv.stderr))
                if not rv.success:
                    self._probe_failure(name, "Return value %s" % rv.retval)
                self.logger.info("Probe %s has result:" % name)
                self.logger.info(rv.stdout)
                if sys.hexversion >= 0x03000000:
                    ret.text = rv.stdout
                else:
                    ret.text = rv.stdout.decode('utf-8')
            finally:
                os.unlink(scriptname)
        except SystemExit:
            raise
        except:
            self._probe_failure(name, sys.exc_info()[1])
        return ret

    def fatal_error(self, message):
        """Signal a fatal error."""
        self.logger.error("Fatal error: %s" % (message))
        raise SystemExit(1)

    @property
    def proxy(self):
        """ get an XML-RPC proxy to the server """
        if self._proxy is None:
            self._proxy = Proxy.ComponentProxy()
        return self._proxy

    def run_probes(self):
        """ run probes and upload probe data """
        try:
            probes = XML.XML(str(self.proxy.GetProbes()))
        except (Proxy.ProxyError,
                Proxy.CertificateError,
                socket.gaierror,
                socket.error):
            err = sys.exc_info()[1]
            self.fatal_error("Failed to download probes from bcfg2: %s" % err)
        except XML.ParseError:
            err = sys.exc_info()[1]
            self.fatal_error("Server returned invalid probe requests: %s" %
                             err)

        self.times['probe_download'] = time.time()

        # execute probes
        probedata = XML.Element("ProbeData")
        for probe in probes.findall(".//probe"):
            probedata.append(self.run_probe(probe))

        if len(probes.findall(".//probe")) > 0:
            try:
                # upload probe responses
                self.proxy.RecvProbeData(
                    XML.tostring(probedata,
                                 xml_declaration=False).decode('utf-8'))
            except Proxy.ProxyError:
                err = sys.exc_info()[1]
                self.fatal_error("Failed to upload probe data: %s" % err)

        self.times['probe_upload'] = time.time()

    def get_config(self):
        """ load the configuration, either from the cached
        configuration file (-f), or from the server """
        if Bcfg2.Options.setup.file:
            # read config from file
            try:
                self.logger.debug("Reading cached configuration from %s" %
                                  Bcfg2.Options.setup.file.name)
                return Bcfg2.Options.setup.file.read()
            except IOError:
                self.fatal_error("Failed to read cached configuration from: %s"
                                 % Bcfg2.Options.setup.file.name)
        else:
            # retrieve config from server
            if Bcfg2.Options.setup.profile:
                try:
                    self.proxy.AssertProfile(Bcfg2.Options.setup.profile)
                except Proxy.ProxyError:
                    err = sys.exc_info()[1]
                    self.fatal_error("Failed to set client profile: %s" % err)

            try:
                self.proxy.DeclareVersion(__version__)
            except (xmlrpclib.Fault,
                    Proxy.ProxyError,
                    Proxy.CertificateError,
                    socket.gaierror,
                    socket.error):
                err = sys.exc_info()[1]
                self.fatal_error("Failed to declare version: %s" % err)

            self.run_probes()

            if Bcfg2.Options.setup.decision in ['whitelist', 'blacklist']:
                try:
                    # TODO: read decision list from --decision-list
                    Bcfg2.Options.setup.decision_list = \
                        self.proxy.GetDecisionList(
                            Bcfg2.Options.setup.decision)
                    self.logger.info("Got decision list from server:")
                    self.logger.info(Bcfg2.Options.setup.decision_list)
                except Proxy.ProxyError:
                    err = sys.exc_info()[1]
                    self.fatal_error("Failed to get decision list: %s" % err)

            try:
                rawconfig = self.proxy.GetConfig().encode('utf-8')
            except Proxy.ProxyError:
                err = sys.exc_info()[1]
                self.fatal_error("Failed to download configuration from "
                                 "Bcfg2: %s" % err)

            self.times['config_download'] = time.time()

        if Bcfg2.Options.setup.cache:
            try:
                Bcfg2.Options.setup.cache.write(rawconfig)
                os.chmod(Bcfg2.Options.setup.cache.name, 384)  # 0600
            except IOError:
                self.logger.warning("Failed to write config cache file %s" %
                                    (Bcfg2.Options.setup.cache))
            self.times['caching'] = time.time()

        return rawconfig

    def parse_config(self, rawconfig):
        """ Parse the XML configuration received from the Bcfg2 server """
        try:
            self.config = XML.XML(rawconfig)
        except XML.ParseError:
            syntax_error = sys.exc_info()[1]
            self.fatal_error("The configuration could not be parsed: %s" %
                             syntax_error)

        self.load_tools()

        # find entries not handled by any tools
        self.unhandled = [entry for struct in self.config
                          for entry in struct
                          if entry not in self.handled]

        if self.unhandled:
            self.logger.error("The following entries are not handled by any "
                              "tool:")
            for entry in self.unhandled:
                self.logger.error("%s:%s:%s" % (entry.tag, entry.get('type'),
                                                entry.get('name')))

        # find duplicates
        self.find_dups(self.config)

        pkgs = [(entry.get('name'), entry.get('origin'))
                for struct in self.config
                for entry in struct
                if entry.tag == 'Package']
        if pkgs:
            self.logger.debug("The following packages are specified in bcfg2:")
            self.logger.debug([pkg[0] for pkg in pkgs if pkg[1] is None])
            self.logger.debug("The following packages are prereqs added by "
                              "Packages:")
            self.logger.debug([pkg[0] for pkg in pkgs if pkg[1] == 'Packages'])

        self.times['config_parse'] = time.time()

    def run(self):
        """Perform client execution phase."""
        # begin configuration
        self.times['start'] = time.time()

        self.logger.info("Starting Bcfg2 client run at %s" %
                         self.times['start'])

        self.parse_config(self.get_config().decode('utf-8'))

        if self.config.tag == 'error':
            self.fatal_error("Server error: %s" % (self.config.text))

        if Bcfg2.Options.setup.bundle_quick:
            newconfig = XML.XML('<Configuration/>')
            for bundle in self.config.getchildren():
                name = bundle.get("name")
                if (name and (name in Bcfg2.Options.setup.only_bundles or
                              name not in Bcfg2.Options.setup.except_bundles)):
                    newconfig.append(bundle)
            self.config = newconfig

        if not Bcfg2.Options.setup.no_lock:
            # check lock here
            try:
                lockfile = open(Bcfg2.Options.setup.lockfile, 'w')
                if locked(lockfile.fileno()):
                    self.fatal_error("Another instance of Bcfg2 is running. "
                                     "If you want to bypass the check, run "
                                     "with the -O/--no-lock option")
            except SystemExit:
                raise
            except:
                lockfile = None
                self.logger.error("Failed to open lockfile %s: %s" %
                                  (Bcfg2.Options.setup.lockfile,
                                   sys.exc_info()[1]))

        # execute the configuration
        self.Execute()

        if not Bcfg2.Options.setup.no_lock:
            # unlock here
            if lockfile:
                try:
                    fcntl.lockf(lockfile.fileno(), fcntl.LOCK_UN)
                    os.remove(Bcfg2.Options.setup.lockfile)
                except OSError:
                    self.logger.error("Failed to unlock lockfile %s" %
                                      lockfile.name)

        if (not Bcfg2.Options.setup.file and
                not Bcfg2.Options.setup.bundle_quick):
            # upload statistics
            feedback = self.GenerateStats()

            try:
                self.proxy.RecvStats(
                    XML.tostring(feedback,
                                 xml_declaration=False).decode('utf-8'))
            except Proxy.ProxyError:
                err = sys.exc_info()[1]
                self.logger.error("Failed to upload configuration statistics: "
                                  "%s" % err)
                raise SystemExit(2)

        self.logger.info("Finished Bcfg2 client run at %s" % time.time())

    def load_tools(self):
        """ Load all applicable client tools """
        for tool in Bcfg2.Options.setup.drivers:
            try:
                self.tools.append(tool(self.config))
            except Tools.ToolInstantiationError:
                continue
            except:
                self.logger.error("Failed to instantiate tool %s" % tool,
                                  exc_info=1)

        for tool in self.tools[:]:
            for conflict in getattr(tool, 'conflicts', []):
                for item in self.tools:
                    if item.name == conflict:
                        self.tools.remove(item)

        self.logger.info("Loaded tool drivers:")
        self.logger.info([tool.name for tool in self.tools])

        deprecated = [tool.name for tool in self.tools if tool.deprecated]
        if deprecated:
            self.logger.warning("Loaded deprecated tool drivers:")
            self.logger.warning(deprecated)
        experimental = [tool.name for tool in self.tools if tool.experimental]
        if experimental:
            self.logger.warning("Loaded experimental tool drivers:")
            self.logger.warning(experimental)

    def find_dups(self, config):
        """ Find duplicate entries and warn about them """
        entries = dict()
        for struct in config:
            for entry in struct:
                for tool in self.tools:
                    if tool.handlesEntry(entry):
                        pkey = tool.primarykey(entry)
                        if pkey in entries:
                            entries[pkey] += 1
                        else:
                            entries[pkey] = 1
        multi = [e for e, c in entries.items() if c > 1]
        if multi:
            self.logger.debug("The following entries are included multiple "
                              "times:")
            for entry in multi:
                self.logger.debug(entry)

    def promptFilter(self, msg, entries):
        """Filter a supplied list based on user input."""
        ret = []
        entries.sort(key=lambda e: e.tag + ":" + e.get('name'))
        for entry in entries[:]:
            if entry in self.unhandled:
                # don't prompt for entries that can't be installed
                continue
            if 'qtext' in entry.attrib:
                iprompt = entry.get('qtext')
            else:
                iprompt = msg % (entry.tag, entry.get('name'))
            if prompt(iprompt):
                ret.append(entry)
        return ret

    def __getattr__(self, name):
        if name in ['extra', 'handled', 'modified', '__important__']:
            ret = []
            for tool in self.tools:
                ret += getattr(tool, name)
            return ret
        elif name in self.__dict__:
            return self.__dict__[name]
        raise AttributeError(name)

    def InstallImportant(self):
        """Install important entries

        We also process the decision mode stuff here because we want to prevent
        non-whitelisted/blacklisted 'important' entries from being installed
        prior to determining the decision mode on the client.
        """
        # Need to process decision stuff early so that dryrun mode
        # works with it
        self.whitelist = [entry for entry in self.states
                          if not self.states[entry]]
        if not Bcfg2.Options.setup.file:
            if Bcfg2.Options.setup.decision == 'whitelist':
                dwl = Bcfg2.Options.setup.decision_list
                w_to_rem = [e for e in self.whitelist
                            if not matches_white_list(e, dwl)]
                if w_to_rem:
                    self.logger.info("In whitelist mode: "
                                     "suppressing installation of:")
                    self.logger.info(["%s:%s" % (e.tag, e.get('name'))
                                      for e in w_to_rem])
                    self.whitelist = [x for x in self.whitelist
                                      if x not in w_to_rem]
            elif Bcfg2.Options.setup.decision == 'blacklist':
                b_to_rem = \
                    [e for e in self.whitelist
                     if not
                     passes_black_list(e, Bcfg2.Options.setup.decision_list)]
                if b_to_rem:
                    self.logger.info("In blacklist mode: "
                                     "suppressing installation of:")
                    self.logger.info(["%s:%s" % (e.tag, e.get('name'))
                                      for e in b_to_rem])
                    self.whitelist = [x for x in self.whitelist
                                      if x not in b_to_rem]

        # take care of important entries first
        if (not Bcfg2.Options.setup.dry_run or
                Bcfg2.Options.setup.only_important):
            important_installs = set()
            for parent in self.config.findall(".//Path/.."):
                name = parent.get("name")
                if not name or (name in Bcfg2.Options.setup.except_bundles and
                                name not in Bcfg2.Options.setup.only_bundles):
                    continue
                for cfile in parent.findall("./Path"):
                    if (cfile.get('name') not in self.__important__ or
                            cfile.get('type') != 'file' or
                            cfile not in self.whitelist):
                        continue
                    tools = [t for t in self.tools
                             if t.handlesEntry(cfile) and t.canVerify(cfile)]
                    if not tools:
                        continue
                    if Bcfg2.Options.setup.dry_run:
                        important_installs.add(cfile)
                        continue
                    if (Bcfg2.Options.setup.interactive and not
                            self.promptFilter("Install %s: %s? (y/N):",
                                              [cfile])):
                        self.whitelist.remove(cfile)
                        continue
                    try:
                        self.states[cfile] = tools[0].InstallPath(cfile)
                        if self.states[cfile]:
                            tools[0].modified.append(cfile)
                    except:  # pylint: disable=W0702
                        self.logger.error("Unexpected tool failure",
                                          exc_info=1)
                    cfile.set('qtext', '')
                    if tools[0].VerifyPath(cfile, []):
                        self.whitelist.remove(cfile)
            if Bcfg2.Options.setup.dry_run and len(important_installs) > 0:
                self.logger.info("In dryrun mode: "
                                 "suppressing entry installation for:")
                self.logger.info(["%s:%s" % (e.tag, e.get('name'))
                                  for e in important_installs])

    def Inventory(self):
        """
           Verify all entries,
           find extra entries,
           and build up workqueues

        """
        # initialize all states
        for struct in self.config.getchildren():
            for entry in struct.getchildren():
                self.states[entry] = False
        for tool in self.tools:
            try:
                self.states.update(tool.Inventory())
            except KeyboardInterrupt:
                raise
            except:  # pylint: disable=W0702
                self.logger.error("%s.Inventory() call failed:" % tool.name,
                                  exc_info=1)

    def Decide(self):  # pylint: disable=R0912
        """Set self.whitelist based on user interaction."""
        iprompt = "Install %s: %s? (y/N): "
        rprompt = "Remove %s: %s? (y/N): "
        if Bcfg2.Options.setup.remove:
            if Bcfg2.Options.setup.remove == 'all':
                self.removal = self.extra
            elif Bcfg2.Options.setup.remove == 'services':
                self.removal = [entry for entry in self.extra
                                if entry.tag == 'Service']
            elif Bcfg2.Options.setup.remove == 'packages':
                self.removal = [entry for entry in self.extra
                                if entry.tag == 'Package']
            elif Bcfg2.Options.setup.remove == 'users':
                self.removal = [entry for entry in self.extra
                                if entry.tag in ['POSIXUser', 'POSIXGroup']]

        candidates = [entry for entry in self.states
                      if not self.states[entry]]

        if Bcfg2.Options.setup.dry_run:
            if self.whitelist:
                self.logger.info("In dryrun mode: "
                                 "suppressing entry installation for:")
                self.logger.info(["%s:%s" % (entry.tag, entry.get('name'))
                                  for entry in self.whitelist])
                self.whitelist = []
            if self.removal:
                self.logger.info("In dryrun mode: "
                                 "suppressing entry removal for:")
                self.logger.info(["%s:%s" % (entry.tag, entry.get('name'))
                                  for entry in self.removal])
            self.removal = []

        # Here is where most of the work goes
        # first perform bundle filtering
        all_bundle_names = [b.get('name')
                            for b in self.config.findall('./Bundle')]
        bundles = self.config.getchildren()
        if Bcfg2.Options.setup.only_bundles:
            # warn if non-existent bundle given
            for bundle in Bcfg2.Options.setup.only_bundles:
                if bundle not in all_bundle_names:
                    self.logger.info("Warning: Bundle %s not found" % bundle)
            bundles = [b for b in bundles
                       if b.get('name') in Bcfg2.Options.setup.only_bundles]
        if Bcfg2.Options.setup.except_bundles:
            # warn if non-existent bundle given
            if not Bcfg2.Options.setup.bundle_quick:
                for bundle in Bcfg2.Options.setup.except_bundles:
                    if bundle not in all_bundle_names:
                        self.logger.info("Warning: Bundle %s not found" %
                                         bundle)
            bundles = [
                b for b in bundles
                if b.get('name') not in Bcfg2.Options.setup.except_bundles]
        self.whitelist = [e for e in self.whitelist
                          if any(e in b for b in bundles)]

        # first process prereq actions
        for bundle in bundles[:]:
            if bundle.tag == 'Bundle':
                bmodified = any((item in self.whitelist or
                                 item in self.modified) for item in bundle)
            else:
                bmodified = False
            actions = [a for a in bundle.findall('./Action')
                       if (a.get('timing') in ['pre', 'both'] and
                           (bmodified or a.get('when') == 'always'))]
            # now we process all "pre" and "both" actions that are either
            # always or the bundle has been modified
            if Bcfg2.Options.setup.interactive:
                self.promptFilter(iprompt, actions)
            self.DispatchInstallCalls(actions)

            if bundle.tag != 'Bundle':
                continue

            # need to test to fail entries in whitelist
            if not all(self.states[a] for a in actions):
                # then display bundles forced off with entries
                self.logger.info("%s %s failed prerequisite action" %
                                 (bundle.tag, bundle.get('name')))
                bundles.remove(bundle)
                b_to_remv = [ent for ent in self.whitelist if ent in bundle]
                if b_to_remv:
                    self.logger.info("Not installing entries from %s %s" %
                                     (bundle.tag, bundle.get('name')))
                    self.logger.info(["%s:%s" % (e.tag, e.get('name'))
                                      for e in b_to_remv])
                    for ent in b_to_remv:
                        self.whitelist.remove(ent)

        self.logger.debug("Installing entries in the following bundle(s):")
        self.logger.debug("  %s" % ", ".join(b.get("name") for b in bundles
                                             if b.get("name")))

        if Bcfg2.Options.setup.interactive:
            self.whitelist = self.promptFilter(iprompt, self.whitelist)
            self.removal = self.promptFilter(rprompt, self.removal)

        for entry in candidates:
            if entry not in self.whitelist:
                self.blacklist.append(entry)

    def DispatchInstallCalls(self, entries):
        """Dispatch install calls to underlying tools."""
        for tool in self.tools:
            handled = [entry for entry in entries if tool.canInstall(entry)]
            if not handled:
                continue
            try:
                self.states.update(tool.Install(handled))
            except KeyboardInterrupt:
                raise
            except:  # pylint: disable=W0702
                self.logger.error("%s.Install() call failed:" % tool.name,
                                  exc_info=1)

    def Install(self):
        """Install all entries."""
        self.DispatchInstallCalls(self.whitelist)
        mods = self.modified
        mbundles = [struct for struct in self.config.findall('Bundle')
                    if any(True for mod in mods if mod in struct)]

        if self.modified:
            # Handle Bundle interdeps
            if mbundles:
                self.logger.info("The Following Bundles have been modified:")
                self.logger.info([mbun.get('name') for mbun in mbundles])
            tbm = [(t, b) for t in self.tools for b in mbundles]
            for tool, bundle in tbm:
                try:
                    self.states.update(tool.Inventory(structures=[bundle]))
                except KeyboardInterrupt:
                    raise
                except:  # pylint: disable=W0702
                    self.logger.error("%s.Inventory() call failed:" %
                                      tool.name,
                                      exc_info=1)
            clobbered = [entry for bundle in mbundles for entry in bundle
                         if (not self.states[entry] and
                             entry not in self.blacklist)]
            if clobbered:
                self.logger.debug("Found clobbered entries:")
                self.logger.debug(["%s:%s" % (entry.tag, entry.get('name'))
                                   for entry in clobbered])
                if not Bcfg2.Options.setup.interactive:
                    self.DispatchInstallCalls(clobbered)

        all_bundles = self.config.findall('./Bundle')
        mbundles.extend(self._get_all_modified_bundles(mbundles, all_bundles))

        for bundle in all_bundles:
            if (Bcfg2.Options.setup.only_bundles and
                    bundle.get('name') not in
                    Bcfg2.Options.setup.only_bundles):
                # prune out unspecified bundles when running with -b
                continue
            if bundle in mbundles:
                continue

            self.logger.debug("Bundle %s was not modified" %
                              bundle.get('name'))
            for tool in self.tools:
                try:
                    self.states.update(tool.BundleNotUpdated(bundle))
                except KeyboardInterrupt:
                    raise
                except:  # pylint: disable=W0702
                    self.logger.error('%s.BundleNotUpdated(%s:%s) call failed:'
                                      % (tool.name, bundle.tag,
                                         bundle.get('name')), exc_info=1)

        for indep in self.config.findall('.//Independent'):
            for tool in self.tools:
                try:
                    self.states.update(tool.BundleNotUpdated(indep))
                except KeyboardInterrupt:
                    raise
                except:  # pylint: disable=W0702
                    self.logger.error("%s.BundleNotUpdated(%s:%s) call failed:"
                                      % (tool.name, indep.tag,
                                         indep.get("name")), exc_info=1)

    def _get_all_modified_bundles(self, mbundles, all_bundles):
        """This gets all modified bundles by calling BundleUpdated until no
        new bundles get added to the modification list."""
        new_mbundles = mbundles
        add_mbundles = []

        while new_mbundles:
            for bundle in self.config.findall('./Bundle'):
                if (Bcfg2.Options.setup.only_bundles and
                        bundle.get('name') not in
                        Bcfg2.Options.setup.only_bundles):
                    # prune out unspecified bundles when running with -b
                    continue
                if bundle not in new_mbundles:
                    continue

                self.logger.debug('Bundle %s was modified' %
                                  bundle.get('name'))
                for tool in self.tools:
                    try:
                        self.states.update(tool.BundleUpdated(bundle))
                    except:  # pylint: disable=W0702
                        self.logger.error('%s.BundleUpdated(%s:%s) call '
                                          'failed:' % (tool.name, bundle.tag,
                                                       bundle.get("name")),
                                          exc_info=1)

            mods = self.modified
            new_mbundles = [struct for struct in all_bundles
                            if any(True for mod in mods if mod in struct) and
                            struct not in mbundles + add_mbundles]
            add_mbundles.extend(new_mbundles)

        return add_mbundles

    def Remove(self):
        """Remove extra entries."""
        for tool in self.tools:
            extras = [entry for entry in self.removal
                      if tool.handlesEntry(entry)]
            if extras:
                try:
                    tool.Remove(extras)
                except:  # pylint: disable=W0702
                    self.logger.error("%s.Remove() failed" % tool.name,
                                      exc_info=1)

    def CondDisplayState(self, phase):
        """Conditionally print tracing information."""
        self.logger.info('Phase: %s' % phase)
        self.logger.info('Correct entries:        %d' %
                         list(self.states.values()).count(True))
        self.logger.info('Incorrect entries:      %d' %
                         list(self.states.values()).count(False))
        if phase == 'final' and list(self.states.values()).count(False):
            for entry in sorted(self.states.keys(), key=lambda e: e.tag + ":" +
                                e.get('name')):
                if not self.states[entry]:
                    etype = entry.get('type')
                    if etype:
                        self.logger.info("%s:%s:%s" % (entry.tag, etype,
                                                       entry.get('name')))
                    else:
                        self.logger.info("%s:%s" % (entry.tag,
                                                    entry.get('name')))
        self.logger.info('Total managed entries: %d' %
                         len(list(self.states.values())))
        self.logger.info('Unmanaged entries:      %d' % len(self.extra))
        if phase == 'final' and Bcfg2.Options.setup.show_extra:
            for entry in sorted(self.extra,
                                key=lambda e: e.tag + ":" + e.get('name')):
                etype = entry.get('type')
                if etype:
                    self.logger.info("%s:%s:%s" % (entry.tag, etype,
                                                   entry.get('name')))
                else:
                    self.logger.info("%s:%s" % (entry.tag,
                                                entry.get('name')))

        if ((list(self.states.values()).count(False) == 0) and not self.extra):
            self.logger.info('All entries correct.')

    def ReInventory(self):
        """Recheck everything."""
        if not Bcfg2.Options.setup.dry_run and Bcfg2.Options.setup.kevlar:
            self.logger.info("Rechecking system inventory")
            self.Inventory()

    def Execute(self):
        """Run all methods."""
        self.Inventory()
        self.times['inventory'] = time.time()
        self.CondDisplayState('initial')
        self.InstallImportant()
        if not Bcfg2.Options.setup.only_important:
            self.Decide()
            self.Install()
            self.times['install'] = time.time()
            self.Remove()
            self.times['remove'] = time.time()

        if self.modified:
            self.ReInventory()
            self.times['reinventory'] = time.time()
        self.times['finished'] = time.time()
        self.CondDisplayState('final')

    def GenerateStats(self):
        """Generate XML summary of execution statistics."""
        states = {}
        for (item, val) in list(self.states.items()):
            if not Bcfg2.Options.setup.only_important or \
               item.get('important', 'false').lower() == 'true':
                states[item] = val

        feedback = XML.Element("upload-statistics")
        stats = XML.SubElement(feedback,
                               'Statistics', total=str(len(states)),
                               version='2.0',
                               revision=self.config.get('revision', '-1'))
        flags = XML.SubElement(stats, "Flags")
        XML.SubElement(flags, "Flag", name="dry_run",
                       value=str(Bcfg2.Options.setup.dry_run))
        XML.SubElement(flags, "Flag", name="only_important",
                       value=str(Bcfg2.Options.setup.only_important))
        good_entries = [key for key, val in list(states.items()) if val]
        good = len(good_entries)
        stats.set('good', str(good))
        if any(not val for val in list(states.values())):
            stats.set('state', 'dirty')
        else:
            stats.set('state', 'clean')

        # List bad elements of the configuration
        for (data, ename) in [(self.modified, 'Modified'),
                              (self.extra, "Extra"),
                              (good_entries, "Good"),
                              ([entry for entry in states
                                if not states[entry]], "Bad")]:
            container = XML.SubElement(stats, ename)
            for item in data:
                item.set('qtext', '')
                container.append(item)
                item.text = None

        timeinfo = XML.Element("OpStamps")
        feedback.append(stats)
        for (event, timestamp) in list(self.times.items()):
            timeinfo.set(event, str(timestamp))
        stats.append(timeinfo)
        return feedback
示例#25
0
class Validate(Bcfg2.Server.Lint.ServerlessPlugin):
    """ Ensure that the repo validates """

    def __init__(self, *args, **kwargs):
        Bcfg2.Server.Lint.ServerlessPlugin.__init__(self, *args, **kwargs)
        self.filesets = \
            {"Metadata/groups.xml": "metadata.xsd",
             "Metadata/clients.xml": "clients.xsd",
             "Cfg/**/info.xml": "info.xsd",
             "Cfg/**/privkey.xml": "privkey.xsd",
             "Cfg/**/pubkey.xml": "pubkey.xsd",
             "Cfg/**/authorizedkeys.xml": "authorizedkeys.xsd",
             "Cfg/**/authorized_keys.xml": "authorizedkeys.xsd",
             "SSHbase/**/info.xml": "info.xsd",
             "SSLCA/**/info.xml": "info.xsd",
             "TGenshi/**/info.xml": "info.xsd",
             "TCheetah/**/info.xml": "info.xsd",
             "Bundler/*.xml": "bundle.xsd",
             "Bundler/*.genshi": "bundle.xsd",
             "Pkgmgr/*.xml": "pkglist.xsd",
             "Rules/*.xml": "rules.xsd",
             "Defaults/*.xml": "defaults.xsd",
             "etc/report-configuration.xml": "report-configuration.xsd",
             "Deps/*.xml": "deps.xsd",
             "Decisions/*.xml": "decisions.xsd",
             "Packages/sources.xml": "packages.xsd",
             "GroupPatterns/config.xml": "grouppatterns.xsd",
             "NagiosGen/config.xml": "nagiosgen.xsd",
             "FileProbes/config.xml": "fileprobes.xsd",
             "SSLCA/**/cert.xml": "sslca-cert.xsd",
             "SSLCA/**/key.xml": "sslca-key.xsd",
             "GroupLogic/groups.xml": "grouplogic.xsd"
             }

        self.filelists = {}
        self.get_filelists()
        self.cmd = Executor()

    def Run(self):
        schemadir = self.config['schema']

        for path, schemaname in self.filesets.items():
            try:
                filelist = self.filelists[path]
            except KeyError:
                filelist = []

            if filelist:
                # avoid loading schemas for empty file lists
                schemafile = os.path.join(schemadir, schemaname)
                schema = self._load_schema(schemafile)
                if schema:
                    for filename in filelist:
                        self.validate(filename, schemafile, schema=schema)

        self.check_properties()

    @classmethod
    def Errors(cls):
        return {"schema-failed-to-parse": "warning",
                "properties-schema-not-found": "warning",
                "xml-failed-to-parse": "error",
                "xml-failed-to-read": "error",
                "xml-failed-to-verify": "error",
                "input-output-error": "error"}

    def check_properties(self):
        """ check Properties files against their schemas """
        for filename in self.filelists['props']:
            schemafile = "%s.xsd" % os.path.splitext(filename)[0]
            if os.path.exists(schemafile):
                self.validate(filename, schemafile)
            else:
                self.LintError("properties-schema-not-found",
                               "No schema found for %s" % filename)
                # ensure that it at least parses
                self.parse(filename)

    def parse(self, filename):
        """ Parse an XML file, raising the appropriate LintErrors if
        it can't be parsed or read.  Return the
        lxml.etree._ElementTree parsed from the file. """
        try:
            return lxml.etree.parse(filename)
        except SyntaxError:
            result = self.cmd.run(["xmllint", filename])
            self.LintError("xml-failed-to-parse",
                           "%s fails to parse:\n%s" %
                           (filename, result.stdout + result.stderr))
            return False
        except IOError:
            self.LintError("xml-failed-to-read",
                           "Failed to open file %s" % filename)
            return False

    def validate(self, filename, schemafile, schema=None):
        """validate a file against the given lxml.etree.Schema.
        return True on success, False on failure """
        if schema is None:
            # if no schema object was provided, instantiate one
            schema = self._load_schema(schemafile)
            if not schema:
                return False
        datafile = self.parse(filename)
        if not schema.validate(datafile):
            cmd = ["xmllint"]
            if self.files is None:
                cmd.append("--xinclude")
            cmd.extend(["--noout", "--schema", schemafile, filename])
            result = self.cmd.run(cmd)
            if not result.success:
                self.LintError("xml-failed-to-verify",
                               "%s fails to verify:\n%s" %
                               (filename, result.stdout + result.stderr))
                return False
        return True

    def get_filelists(self):
        """ get lists of different kinds of files to validate """
        if self.files is not None:
            listfiles = lambda p: fnmatch.filter(self.files,
                                                 os.path.join('*', p))
        else:
            listfiles = lambda p: glob.glob(os.path.join(self.config['repo'],
                                                         p))

        for path in self.filesets.keys():
            if '/**/' in path:
                if self.files is not None:
                    self.filelists[path] = listfiles(path)
                else:  # self.files is None
                    fpath, fname = path.split('/**/')
                    self.filelists[path] = []
                    for root, _, files in \
                            os.walk(os.path.join(self.config['repo'],
                                                 fpath)):
                        self.filelists[path].extend([os.path.join(root, f)
                                                     for f in files
                                                     if f == fname])
            else:
                self.filelists[path] = listfiles(path)

        self.filelists['props'] = listfiles("Properties/*.xml")

    def _load_schema(self, filename):
        """ load an XML schema document, returning the Schema object """
        try:
            return lxml.etree.XMLSchema(lxml.etree.parse(filename))
        except IOError:
            err = sys.exc_info()[1]
            self.LintError("input-output-error", str(err))
        except lxml.etree.XMLSchemaParseError:
            err = sys.exc_info()[1]
            self.LintError("schema-failed-to-parse",
                           "Failed to process schema %s: %s" %
                           (filename, err))
        return None
示例#26
0
class CfgPrivateKeyCreator(XMLCfgCreator):
    """The CfgPrivateKeyCreator creates SSH keys on the fly. """

    #: Different configurations for different clients/groups can be
    #: handled with Client and Group tags within privkey.xml
    __specific__ = False

    #: Handle XML specifications of private keys
    __basenames__ = ['privkey.xml']

    cfg_section = "sshkeys"
    options = [
        Bcfg2.Options.Option(
            cf=("sshkeys", "category"), dest="sshkeys_category",
            help="Metadata category that generated SSH keys are specific to"),
        Bcfg2.Options.Option(
            cf=("sshkeys", "passphrase"), dest="sshkeys_passphrase",
            help="Passphrase used to encrypt generated SSH private keys")]

    def __init__(self, fname):
        XMLCfgCreator.__init__(self, fname)
        pubkey_path = os.path.dirname(self.name) + ".pub"
        pubkey_name = os.path.join(pubkey_path, os.path.basename(pubkey_path))
        self.pubkey_creator = CfgPublicKeyCreator(pubkey_name)
        self.cmd = Executor()

    def _gen_keypair(self, metadata, spec=None):
        """ Generate a keypair according to the given client medata
        and key specification.

        :param metadata: The client metadata to generate keys for
        :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
        :param spec: The key specification to follow when creating the
                     keys. This should be an XML document that only
                     contains key specification data that applies to
                     the given client metadata, and may be obtained by
                     doing ``self.XMLMatch(metadata)``
        :type spec: lxml.etree._Element
        :returns: tuple - (private key data, public key data)
        """
        if spec is None:
            spec = self.XMLMatch(metadata)

        # set key parameters
        ktype = "rsa"
        bits = None
        params = spec.find("Params")
        if params is not None:
            bits = params.get("bits")
            ktype = params.get("type", ktype)
        try:
            passphrase = spec.find("Passphrase").text
        except AttributeError:
            passphrase = ''
        tempdir = tempfile.mkdtemp()
        try:
            filename = os.path.join(tempdir, "privkey")

            # generate key pair
            cmd = ["ssh-keygen", "-f", filename, "-t", ktype]
            if bits:
                cmd.extend(["-b", bits])
            cmd.append("-N")
            log_cmd = cmd[:]
            cmd.append(passphrase)
            if passphrase:
                log_cmd.append("******")
            else:
                log_cmd.append("''")
            self.debug_log("Cfg: Generating new SSH key pair: %s" %
                           " ".join(log_cmd))
            result = self.cmd.run(cmd)
            if not result.success:
                raise CfgCreationError("Cfg: Failed to generate SSH key pair "
                                       "at %s for %s: %s" %
                                       (filename, metadata.hostname,
                                        result.error))
            elif result.stderr:
                self.logger.warning("Cfg: Generated SSH key pair at %s for %s "
                                    "with errors: %s" % (filename,
                                                         metadata.hostname,
                                                         result.stderr))
            return (open(filename).read(), open(filename + ".pub").read())
        finally:
            shutil.rmtree(tempdir)

    # pylint: disable=W0221
    def create_data(self, entry, metadata):
        """ Create data for the given entry on the given client

        :param entry: The abstract entry to create data for.  This
                      will not be modified
        :type entry: lxml.etree._Element
        :param metadata: The client metadata to create data for
        :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
        :returns: string - The private key data
        """
        spec = self.XMLMatch(metadata)
        specificity = self.get_specificity(metadata)
        privkey, pubkey = self._gen_keypair(metadata, spec)

        # write the public key, stripping the comment and
        # replacing it with a comment that specifies the filename.
        kdata = pubkey.split()[:2]
        kdata.append(self.pubkey_creator.get_filename(**specificity))
        pubkey = " ".join(kdata) + "\n"
        self.pubkey_creator.write_data(pubkey, **specificity)

        # encrypt the private key, write to the proper place, and
        # return it
        self.write_data(privkey, **specificity)
        return privkey
示例#27
0
class CfgPrivateKeyCreator(CfgCreator, StructFile):
    """The CfgPrivateKeyCreator creates SSH keys on the fly. """

    #: Different configurations for different clients/groups can be
    #: handled with Client and Group tags within privkey.xml
    __specific__ = False

    #: Handle XML specifications of private keys
    __basenames__ = ['privkey.xml']

    def __init__(self, fname):
        CfgCreator.__init__(self, fname)
        StructFile.__init__(self, fname)

        pubkey_path = os.path.dirname(self.name) + ".pub"
        pubkey_name = os.path.join(pubkey_path, os.path.basename(pubkey_path))
        self.pubkey_creator = CfgPublicKeyCreator(pubkey_name)
        self.setup = get_option_parser()
        self.cmd = Executor()
    __init__.__doc__ = CfgCreator.__init__.__doc__

    @property
    def category(self):
        """ The name of the metadata category that generated keys are
        specific to """
        if (self.setup.cfp.has_section("sshkeys") and
            self.setup.cfp.has_option("sshkeys", "category")):
            return self.setup.cfp.get("sshkeys", "category")
        return None

    @property
    def passphrase(self):
        """ The passphrase used to encrypt private keys """
        if (HAS_CRYPTO and
            self.setup.cfp.has_section("sshkeys") and
            self.setup.cfp.has_option("sshkeys", "passphrase")):
            return Bcfg2.Server.Encryption.get_passphrases()[
                self.setup.cfp.get("sshkeys", "passphrase")]
        return None

    def handle_event(self, event):
        CfgCreator.handle_event(self, event)
        StructFile.HandleEvent(self, event)
    handle_event.__doc__ = CfgCreator.handle_event.__doc__

    def _gen_keypair(self, metadata, spec=None):
        """ Generate a keypair according to the given client medata
        and key specification.

        :param metadata: The client metadata to generate keys for
        :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
        :param spec: The key specification to follow when creating the
                     keys. This should be an XML document that only
                     contains key specification data that applies to
                     the given client metadata, and may be obtained by
                     doing ``self.XMLMatch(metadata)``
        :type spec: lxml.etree._Element
        :returns: string - The filename of the private key
        """
        if spec is None:
            spec = self.XMLMatch(metadata)

        # set key parameters
        ktype = "rsa"
        bits = None
        params = spec.find("Params")
        if params is not None:
            bits = params.get("bits")
            ktype = params.get("type", ktype)
        try:
            passphrase = spec.find("Passphrase").text
        except AttributeError:
            passphrase = ''
        tempdir = tempfile.mkdtemp()
        try:
            filename = os.path.join(tempdir, "privkey")

            # generate key pair
            cmd = ["ssh-keygen", "-f", filename, "-t", ktype]
            if bits:
                cmd.extend(["-b", bits])
            cmd.append("-N")
            log_cmd = cmd[:]
            cmd.append(passphrase)
            if passphrase:
                log_cmd.append("******")
            else:
                log_cmd.append("''")
            self.debug_log("Cfg: Generating new SSH key pair: %s" %
                           " ".join(log_cmd))
            result = self.cmd.run(cmd)
            if not result.success:
                raise CfgCreationError("Cfg: Failed to generate SSH key pair "
                                       "at %s for %s: %s" %
                                       (filename, metadata.hostname,
                                        result.error))
            elif result.stderr:
                self.logger.warning("Cfg: Generated SSH key pair at %s for %s "
                                    "with errors: %s" % (filename,
                                                         metadata.hostname,
                                                         result.stderr))
            return filename
        except:
            shutil.rmtree(tempdir)
            raise

    def get_specificity(self, metadata, spec=None):
        """ Get config settings for key generation specificity
        (per-host or per-group).

        :param metadata: The client metadata to create data for
        :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
        :param spec: The key specification to follow when creating the
                     keys. This should be an XML document that only
                     contains key specification data that applies to
                     the given client metadata, and may be obtained by
                     doing ``self.XMLMatch(metadata)``
        :type spec: lxml.etree._Element
        :returns: dict - A dict of specificity arguments suitable for
                  passing to
                  :func:`Bcfg2.Server.Plugins.Cfg.CfgCreator.write_data`
                  or
                  :func:`Bcfg2.Server.Plugins.Cfg.CfgCreator.get_filename`
        """
        if spec is None:
            spec = self.XMLMatch(metadata)
        category = spec.get("category", self.category)
        if category is None:
            per_host_default = "true"
        else:
            per_host_default = "false"
        per_host = spec.get("perhost", per_host_default).lower() == "true"

        specificity = dict(host=metadata.hostname)
        if category and not per_host:
            group = metadata.group_in_category(category)
            if group:
                specificity = dict(group=group,
                                   prio=int(spec.get("priority", 50)))
            else:
                self.logger.info("Cfg: %s has no group in category %s, "
                                 "creating host-specific key" %
                                 (metadata.hostname, category))
        return specificity

    # pylint: disable=W0221
    def create_data(self, entry, metadata, return_pair=False):
        """ Create data for the given entry on the given client

        :param entry: The abstract entry to create data for.  This
                      will not be modified
        :type entry: lxml.etree._Element
        :param metadata: The client metadata to create data for
        :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
        :param return_pair: Return a tuple of ``(public key, private
                            key)`` instead of just the private key.
                            This is used by
                            :class:`Bcfg2.Server.Plugins.Cfg.CfgPublicKeyCreator.CfgPublicKeyCreator`
                            to create public keys as requested.
        :type return_pair: bool
        :returns: string - The private key data
        :returns: tuple - Tuple of ``(public key, private key)``, if
                  ``return_pair`` is set to True
        """
        spec = self.XMLMatch(metadata)
        specificity = self.get_specificity(metadata, spec)
        filename = self._gen_keypair(metadata, spec)

        try:
            # write the public key, stripping the comment and
            # replacing it with a comment that specifies the filename.
            kdata = open(filename + ".pub").read().split()[:2]
            kdata.append(self.pubkey_creator.get_filename(**specificity))
            pubkey = " ".join(kdata) + "\n"
            self.pubkey_creator.write_data(pubkey, **specificity)

            # encrypt the private key, write to the proper place, and
            # return it
            privkey = open(filename).read()
            if HAS_CRYPTO and self.passphrase:
                self.debug_log("Cfg: Encrypting key data at %s" % filename)
                privkey = Bcfg2.Server.Encryption.ssl_encrypt(privkey,
                                                              self.passphrase)
                specificity['ext'] = '.crypt'

            self.write_data(privkey, **specificity)

            if return_pair:
                return (pubkey, privkey)
            else:
                return privkey
        finally:
            shutil.rmtree(os.path.dirname(filename))
示例#28
0
class CfgPublicKeyCreator(CfgCreator, StructFile):
    """ .. currentmodule:: Bcfg2.Server.Plugins.Cfg

    The CfgPublicKeyCreator creates SSH public keys on the fly. It is
    invoked by :class:`CfgPrivateKeyCreator.CfgPrivateKeyCreator` to
    handle the creation of the public key, and can also call
    :class:`CfgPrivateKeyCreator.CfgPrivateKeyCreator` to trigger the
    creation of a keypair when a public key is created. """

    #: Different configurations for different clients/groups can be
    #: handled with Client and Group tags within pubkey.xml
    __specific__ = False

    #: Handle XML specifications of private keys
    __basenames__ = ['pubkey.xml']

    #: No text content on any tags, so encryption support disabled
    encryption = False

    def __init__(self, fname):
        CfgCreator.__init__(self, fname)
        StructFile.__init__(self, fname)
        self.cfg = get_cfg()
        self.core = self.cfg.core
        self.cmd = Executor()

    def create_data(self, entry, metadata):
        if entry.get("name").endswith(".pub"):
            privkey = entry.get("name")[:-4]
        else:
            raise CfgCreationError("Cfg: Could not determine private key for "
                                   "%s: Filename does not end in .pub" %
                                   entry.get("name"))

        privkey_entry = lxml.etree.Element("Path", name=privkey)
        try:
            self.core.Bind(privkey_entry, metadata)
        except PluginExecutionError:
            raise CfgCreationError("Cfg: Could not bind %s (private key for "
                                   "%s): %s" % (privkey, self.name,
                                                sys.exc_info()[1]))

        try:
            eset = self.cfg.entries[privkey]
            creator = eset.best_matching(metadata,
                                         eset.get_handlers(metadata,
                                                           CfgCreator))
        except KeyError:
            raise CfgCreationError("Cfg: No private key defined for %s (%s)" %
                                   (self.name, privkey))
        except PluginExecutionError:
            raise CfgCreationError("Cfg: No privkey.xml defined for %s "
                                   "(private key for %s)" % (privkey,
                                                             self.name))

        specificity = creator.get_specificity(metadata)
        fname = self.get_filename(**specificity)

        # if the private key didn't exist, then creating it may have
        # created the private key, too.  check for it first.
        if os.path.exists(fname):
            return open(fname).read()
        else:
            # generate public key from private key
            fd, privfile = tempfile.mkstemp()
            try:
                os.fdopen(fd, 'w').write(privkey_entry.text)
                cmd = ["ssh-keygen", "-y", "-f", privfile]
                self.debug_log("Cfg: Extracting SSH public key from %s: %s" %
                               (privkey, " ".join(cmd)))
                result = self.cmd.run(cmd)
                if not result.success:
                    raise CfgCreationError("Cfg: Failed to extract public key "
                                           "from %s: %s" % (privkey,
                                                            result.error))
                self.write_data(result.stdout, **specificity)
                return result.stdout
            finally:
                os.unlink(privfile)

    def handle_event(self, event):
        CfgCreator.handle_event(self, event)
        StructFile.HandleEvent(self, event)
    handle_event.__doc__ = CfgCreator.handle_event.__doc__
示例#29
0
文件: SSHbase.py 项目: shellox/bcfg2
class SSHbase(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.Generator, Bcfg2.Server.Plugin.PullTarget):
    """
       The sshbase generator manages ssh host keys (both v1 and v2)
       for hosts.  It also manages the ssh_known_hosts file. It can
       integrate host keys from other management domains and similarly
       export its keys. The repository contains files in the following
       formats:

       ssh_host_key.H_(hostname) -> the v1 host private key for
         (hostname)
       ssh_host_key.pub.H_(hostname) -> the v1 host public key
         for (hostname)
       ssh_host_(ec)(dr)sa_key.H_(hostname) -> the v2 ssh host
         private key for (hostname)
       ssh_host_(ec)(dr)sa_key.pub.H_(hostname) -> the v2 ssh host
         public key for (hostname)
       ssh_known_hosts -> the current known hosts file. this
         is regenerated each time a new key is generated.

    """

    __author__ = "*****@*****.**"
    keypatterns = [
        "ssh_host_dsa_key",
        "ssh_host_ecdsa_key",
        "ssh_host_rsa_key",
        "ssh_host_key",
        "ssh_host_dsa_key.pub",
        "ssh_host_ecdsa_key.pub",
        "ssh_host_rsa_key.pub",
        "ssh_host_key.pub",
    ]

    def __init__(self, core, datastore):
        Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
        Bcfg2.Server.Plugin.Generator.__init__(self)
        Bcfg2.Server.Plugin.PullTarget.__init__(self)
        self.ipcache = {}
        self.namecache = {}
        self.__skn = False

        # keep track of which bogus keys we've warned about, and only
        # do so once
        self.badnames = dict()

        self.fam = Bcfg2.Server.FileMonitor.get_fam()
        self.fam.AddMonitor(self.data, self)

        self.static = dict()
        self.entries = dict()
        self.Entries["Path"] = dict()

        self.entries["/etc/ssh/ssh_known_hosts"] = KnownHostsEntrySet(self.data)
        self.Entries["Path"]["/etc/ssh/ssh_known_hosts"] = self.build_skn
        for keypattern in self.keypatterns:
            self.entries["/etc/ssh/" + keypattern] = HostKeyEntrySet(keypattern, self.data)
            self.Entries["Path"]["/etc/ssh/" + keypattern] = self.build_hk

        self.cmd = Executor()

    def get_skn(self):
        """Build memory cache of the ssh known hosts file."""
        if not self.__skn:
            # if no metadata is registered yet, defer
            if len(self.core.metadata.query.all()) == 0:
                self.__skn = False
                return self.__skn

            skn = [s.data.rstrip() for s in list(self.static.values())]

            mquery = self.core.metadata.query

            # build hostname cache
            names = dict()
            for cmeta in mquery.all():
                names[cmeta.hostname] = set([cmeta.hostname])
                names[cmeta.hostname].update(cmeta.aliases)
                newnames = set()
                newips = set()
                for name in names[cmeta.hostname]:
                    newnames.add(name.split(".")[0])
                    try:
                        newips.add(self.get_ipcache_entry(name)[0])
                    except PluginExecutionError:
                        continue
                names[cmeta.hostname].update(newnames)
                names[cmeta.hostname].update(cmeta.addresses)
                names[cmeta.hostname].update(newips)
                # TODO: Only perform reverse lookups on IPs if an
                # option is set.
                if True:
                    for ip in newips:
                        try:
                            names[cmeta.hostname].update(self.get_namecache_entry(ip))
                        except:  # pylint: disable=W0702
                            continue
                names[cmeta.hostname] = sorted(names[cmeta.hostname])

            pubkeys = [pubk for pubk in list(self.entries.keys()) if pubk.endswith(".pub")]
            pubkeys.sort()
            for pubkey in pubkeys:
                for entry in sorted(
                    self.entries[pubkey].entries.values(), key=lambda e: (e.specific.hostname or e.specific.group)
                ):
                    specific = entry.specific
                    hostnames = []
                    if specific.hostname and specific.hostname in names:
                        hostnames = names[specific.hostname]
                    elif specific.group:
                        hostnames = list(
                            chain(*[names[cmeta.hostname] for cmeta in mquery.by_groups([specific.group])])
                        )
                    elif specific.all:
                        # a generic key for all hosts?  really?
                        hostnames = list(chain(*list(names.values())))
                    if not hostnames:
                        if specific.hostname:
                            key = specific.hostname
                            ktype = "host"
                        elif specific.group:
                            key = specific.group
                            ktype = "group"
                        else:
                            # user has added a global SSH key, but
                            # have no clients yet.  don't warn about
                            # this.
                            continue

                        if key not in self.badnames:
                            self.badnames[key] = True
                            self.logger.info("Ignoring key for unknown %s %s" % (ktype, key))
                        continue

                    skn.append("%s %s" % (",".join(hostnames), entry.data.rstrip()))

            self.__skn = "\n".join(skn) + "\n"
        return self.__skn

    def set_skn(self, value):
        """Set backing data for skn."""
        self.__skn = value

    skn = property(get_skn, set_skn)

    def HandleEvent(self, event=None):
        """Local event handler that does skn regen on pubkey change."""
        # skip events we don't care about
        action = event.code2str()
        if action == "endExist" or event.filename == self.data:
            return

        for entry in list(self.entries.values()):
            if entry.specific.match(event.filename):
                entry.handle_event(event)
                if any(event.filename.startswith(kp) for kp in self.keypatterns if kp.endswith(".pub")):
                    self.debug_log("New public key %s; invalidating " "ssh_known_hosts cache" % event.filename)
                    self.skn = False
                return

        if event.filename == "info.xml":
            for entry in list(self.entries.values()):
                entry.handle_event(event)
            return

        if event.filename.endswith(".static"):
            self.logger.info("Static key %s %s; invalidating ssh_known_hosts " "cache" % (event.filename, action))
            if action == "deleted" and event.filename in self.static:
                del self.static[event.filename]
                self.skn = False
            else:
                self.static[event.filename] = Bcfg2.Server.Plugin.FileBacked(os.path.join(self.data, event.filename))
                self.static[event.filename].HandleEvent(event)
                self.skn = False
            return

        self.logger.warn("SSHbase: Got unknown event %s %s" % (event.filename, action))

    def get_ipcache_entry(self, client):
        """ Build a cache of dns results. """
        if client in self.ipcache:
            if self.ipcache[client]:
                return self.ipcache[client]
            else:
                raise PluginExecutionError("No cached IP address for %s" % client)
        else:
            # need to add entry
            try:
                ipaddr = socket.gethostbyname(client)
                self.ipcache[client] = (ipaddr, client)
                return (ipaddr, client)
            except socket.gaierror:
                result = self.cmd.run(["getent", "hosts", client])
                if result.success:
                    ipaddr = result.stdout.strip().split()
                    if ipaddr:
                        self.ipcache[client] = (ipaddr, client)
                        return (ipaddr, client)
                self.ipcache[client] = False
                msg = "Failed to find IP address for %s: %s" % (client, result.error)
                self.logger(msg)
                raise PluginExecutionError(msg)

    def get_namecache_entry(self, cip):
        """Build a cache of name lookups from client IP addresses."""
        if cip in self.namecache:
            # lookup cached name from IP
            if self.namecache[cip]:
                return self.namecache[cip]
            else:
                raise socket.gaierror
        else:
            # add an entry that has not been cached
            try:
                rvlookup = socket.gethostbyaddr(cip)
                if rvlookup[0]:
                    self.namecache[cip] = [rvlookup[0]]
                else:
                    self.namecache[cip] = []
                self.namecache[cip].extend(rvlookup[1])
                return self.namecache[cip]
            except socket.gaierror:
                self.namecache[cip] = False
                self.logger.error("Failed to find any names associated with " "IP address %s" % cip)
                raise

    def build_skn(self, entry, metadata):
        """This function builds builds a host specific known_hosts file."""
        try:
            self.entries[entry.get("name")].bind_entry(entry, metadata)
        except Bcfg2.Server.Plugin.PluginExecutionError:
            entry.text = self.skn
            hostkeys = []
            for key in self.keypatterns:
                if key.endswith(".pub"):
                    try:
                        hostkeys.append(self.entries["/etc/ssh/" + key].best_matching(metadata))
                    except Bcfg2.Server.Plugin.PluginExecutionError:
                        pass
            hostkeys.sort()
            for hostkey in hostkeys:
                entry.text += "localhost,localhost.localdomain,127.0.0.1 %s" % hostkey.data
            self.entries[entry.get("name")].bind_info_to_entry(entry, metadata)

    def build_hk(self, entry, metadata):
        """This binds host key data into entries."""
        try:
            self.entries[entry.get("name")].bind_entry(entry, metadata)
        except Bcfg2.Server.Plugin.PluginExecutionError:
            filename = entry.get("name").split("/")[-1]
            self.GenerateHostKeyPair(metadata.hostname, filename)
            # Service the FAM events queued up by the key generation
            # so the data structure entries will be available for
            # binding.
            #
            # NOTE: We wait for up to ten seconds. There is some
            # potential for race condition, because if the file
            # monitor doesn't get notified about the new key files in
            # time, those entries won't be available for binding. In
            # practice, this seems "good enough".
            tries = 0
            is_bound = False
            while not is_bound:
                if tries >= 10:
                    msg = "%s still not registered" % filename
                    self.logger.error(msg)
                    raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
                self.fam.handle_events_in_interval(1)
                tries += 1
                try:
                    self.entries[entry.get("name")].bind_entry(entry, metadata)
                    is_bound = True
                except Bcfg2.Server.Plugin.PluginExecutionError:
                    pass

    def GenerateHostKeyPair(self, client, filename):
        """Generate new host key pair for client."""
        match = re.search(r"(ssh_host_(?:((?:ecd|d|r)sa)_)?key)", filename)
        if match:
            hostkey = "%s.H_%s" % (match.group(1), client)
            if match.group(2):
                keytype = match.group(2)
            else:
                keytype = "rsa1"
        else:
            raise PluginExecutionError("Unknown key filename: %s" % filename)

        fileloc = os.path.join(self.data, hostkey)
        publoc = os.path.join(self.data, ".".join([hostkey.split(".")[0], "pub", "H_%s" % client]))
        tempdir = tempfile.mkdtemp()
        temploc = os.path.join(tempdir, hostkey)
        cmd = ["ssh-keygen", "-q", "-f", temploc, "-N", "", "-t", keytype, "-C", "root@%s" % client]
        self.debug_log("SSHbase: Running: %s" % " ".join(cmd))
        result = self.cmd.run(cmd)
        if not result.success:
            raise PluginExecutionError("SSHbase: Error running ssh-keygen: %s" % result.error)

        try:
            shutil.copy(temploc, fileloc)
            shutil.copy("%s.pub" % temploc, publoc)
        except IOError:
            err = sys.exc_info()[1]
            raise PluginExecutionError("Temporary SSH keys not found: %s" % err)

        try:
            os.unlink(temploc)
            os.unlink("%s.pub" % temploc)
            os.rmdir(tempdir)
        except OSError:
            err = sys.exc_info()[1]
            raise PluginExecutionError("Failed to unlink temporary ssh keys: " "%s" % err)

    def AcceptChoices(self, _, metadata):
        return [Bcfg2.Server.Plugin.Specificity(hostname=metadata.hostname)]

    def AcceptPullData(self, specific, entry, log):
        """Per-plugin bcfg2-admin pull support."""
        # specific will always be host specific
        filename = os.path.join(self.data, "%s.H_%s" % (entry["name"].split("/")[-1], specific.hostname))
        try:
            open(filename, "w").write(entry["text"])
            if log:
                print("Wrote file %s" % filename)
        except KeyError:
            self.logger.error(
                "Failed to pull %s. This file does not " "currently exist on the client" % entry.get("name")
            )
示例#30
0
class SSLCAEntrySet(Bcfg2.Server.Plugin.EntrySet):
    """ Entry set to handle SSLCA entries and XML files """
    def __init__(self, _, path, entry_type, encoding, parent=None):
        Bcfg2.Server.Plugin.EntrySet.__init__(self, os.path.basename(path),
                                              path, entry_type, encoding)
        self.parent = parent
        self.key = None
        self.cert = None
        self.cmd = Executor(timeout=120)

    def handle_event(self, event):
        action = event.code2str()
        fpath = os.path.join(self.path, event.filename)

        if event.filename == 'key.xml':
            if action in ['exists', 'created', 'changed']:
                self.key = SSLCAKeySpec(fpath)
            self.key.HandleEvent(event)
        elif event.filename == 'cert.xml':
            if action in ['exists', 'created', 'changed']:
                self.cert = SSLCACertSpec(fpath)
            self.cert.HandleEvent(event)
        else:
            Bcfg2.Server.Plugin.EntrySet.handle_event(self, event)

    def build_key(self, entry, metadata):
        """
        either grabs a prexisting key hostfile, or triggers the generation
        of a new key if one doesn't exist.
        """
        # TODO: verify key fits the specs
        filename = "%s.H_%s" % (os.path.basename(
            entry.get('name')), metadata.hostname)
        self.logger.info("SSLCA: Generating new key %s" % filename)
        key_spec = self.key.get_spec(metadata)
        ktype = key_spec['type']
        bits = key_spec['bits']
        if ktype == 'rsa':
            cmd = ["openssl", "genrsa", bits]
        elif ktype == 'dsa':
            cmd = ["openssl", "dsaparam", "-noout", "-genkey", bits]
        self.debug_log("SSLCA: Generating new key: %s" % " ".join(cmd))
        result = self.cmd.run(cmd)
        if not result.success:
            raise PluginExecutionError(
                "SSLCA: Failed to generate key %s for "
                "%s: %s" %
                (entry.get("name"), metadata.hostname, result.error))
        open(os.path.join(self.path, filename), 'w').write(result.stdout)
        return result.stdout

    def build_cert(self, entry, metadata, keyfile):
        """ generate a new cert """
        filename = "%s.H_%s" % (os.path.basename(
            entry.get('name')), metadata.hostname)
        self.logger.info("SSLCA: Generating new cert %s" % filename)
        cert_spec = self.cert.get_spec(metadata)
        ca = self.parent.get_ca(cert_spec['ca'])
        req_config = None
        req = None
        try:
            req_config = self.build_req_config(metadata)
            req = self.build_request(keyfile, req_config, metadata)
            days = cert_spec['days']
            cmd = [
                "openssl", "ca", "-config", ca['config'], "-in", req, "-days",
                days, "-batch"
            ]
            passphrase = ca.get('passphrase')
            if passphrase:
                cmd.extend(["-passin", "pass:%s" % passphrase])

                def _scrub_pass(arg):
                    """ helper to scrub the passphrase from the
                    argument list """
                    if arg.startswith("pass:"******"pass:******"
                    else:
                        return arg
            else:
                _scrub_pass = lambda a: a

            self.debug_log("SSLCA: Generating new certificate: %s" %
                           " ".join(_scrub_pass(a) for a in cmd))
            result = self.cmd.run(cmd)
            if not result.success:
                raise PluginExecutionError(
                    "SSLCA: Failed to generate cert: %s" % result.error)
        finally:
            try:
                if req_config and os.path.exists(req_config):
                    os.unlink(req_config)
                if req and os.path.exists(req):
                    os.unlink(req)
            except OSError:
                self.logger.error(
                    "SSLCA: Failed to unlink temporary files: %s" %
                    sys.exc_info()[1])
        cert = result.stdout
        if cert_spec['append_chain'] and 'chaincert' in ca:
            cert += open(ca['chaincert']).read()

        open(os.path.join(self.path, filename), 'w').write(cert)
        return cert

    def build_req_config(self, metadata):
        """
        generates a temporary openssl configuration file that is
        used to generate the required certificate request
        """
        # create temp request config file
        fd, fname = tempfile.mkstemp()
        cfp = ConfigParser.ConfigParser({})
        cfp.optionxform = str
        defaults = {
            'req': {
                'default_md': 'sha1',
                'distinguished_name': 'req_distinguished_name',
                'req_extensions': 'v3_req',
                'x509_extensions': 'v3_req',
                'prompt': 'no'
            },
            'req_distinguished_name': {},
            'v3_req': {
                'subjectAltName': '@alt_names'
            },
            'alt_names': {}
        }
        for section in list(defaults.keys()):
            cfp.add_section(section)
            for key in defaults[section]:
                cfp.set(section, key, defaults[section][key])
        cert_spec = self.cert.get_spec(metadata)
        altnamenum = 1
        altnames = cert_spec['subjectaltname']
        altnames.extend(list(metadata.aliases))
        altnames.append(metadata.hostname)
        for altname in altnames:
            cfp.set('alt_names', 'DNS.' + str(altnamenum), altname)
            altnamenum += 1
        for item in ['C', 'L', 'ST', 'O', 'OU', 'emailAddress']:
            if cert_spec[item]:
                cfp.set('req_distinguished_name', item, cert_spec[item])
        cfp.set('req_distinguished_name', 'CN', metadata.hostname)
        self.debug_log("SSLCA: Writing temporary request config to %s" % fname)
        try:
            cfp.write(os.fdopen(fd, 'w'))
        except IOError:
            raise PluginExecutionError("SSLCA: Failed to write temporary CSR "
                                       "config file: %s" % sys.exc_info()[1])
        return fname

    def build_request(self, keyfile, req_config, metadata):
        """
        creates the certificate request
        """
        fd, req = tempfile.mkstemp()
        os.close(fd)
        days = self.cert.get_spec(metadata)['days']
        cmd = [
            "openssl", "req", "-new", "-config", req_config, "-days", days,
            "-key", keyfile, "-text", "-out", req
        ]
        self.debug_log("SSLCA: Generating new CSR: %s" % " ".join(cmd))
        result = self.cmd.run(cmd)
        if not result.success:
            raise PluginExecutionError("SSLCA: Failed to generate CSR: %s" %
                                       result.error)
        return req

    def verify_cert(self, filename, keyfile, entry, metadata):
        """ Perform certification verification against the CA and
        against the key """
        ca = self.parent.get_ca(self.cert.get_spec(metadata)['ca'])
        do_verify = ca.get('chaincert')
        if do_verify:
            return (self.verify_cert_against_ca(filename, entry, metadata)
                    and self.verify_cert_against_key(filename, keyfile))
        return True

    def verify_cert_against_ca(self, filename, entry, metadata):
        """
        check that a certificate validates against the ca cert,
        and that it has not expired.
        """
        ca = self.parent.get_ca(self.cert.get_spec(metadata)['ca'])
        chaincert = ca.get('chaincert')
        cert = os.path.join(self.path, filename)
        cmd = ["openssl", "verify"]
        is_root = ca.get('root_ca', "false").lower() == 'true'
        if is_root:
            cmd.append("-CAfile")
        else:
            # verifying based on an intermediate cert
            cmd.extend(["-purpose", "sslserver", "-untrusted"])
        cmd.extend([chaincert, cert])
        self.debug_log("SSLCA: Verifying %s against CA: %s" %
                       (entry.get("name"), " ".join(cmd)))
        result = self.cmd.run(cmd)
        if result.stdout == cert + ": OK\n":
            self.debug_log("SSLCA: %s verified successfully against CA" %
                           entry.get("name"))
            return True
        self.logger.warning("SSLCA: %s failed verification against CA: %s" %
                            (entry.get("name"), result.error))
        return False

    def _get_modulus(self, fname, ftype="x509"):
        """ get the modulus from the given file """
        cmd = ["openssl", ftype, "-noout", "-modulus", "-in", fname]
        self.debug_log("SSLCA: Getting modulus of %s for verification: %s" %
                       (fname, " ".join(cmd)))
        result = self.cmd.run(cmd)
        if not result.success:
            self.logger.warning("SSLCA: Failed to get modulus of %s: %s" %
                                (fname, result.error))
        return result.stdout.strip()

    def verify_cert_against_key(self, filename, keyfile):
        """
        check that a certificate validates against its private key.
        """

        certfile = os.path.join(self.path, filename)
        cert = self._get_modulus(certfile)
        key = self._get_modulus(keyfile, ftype="rsa")
        if cert == key:
            self.debug_log("SSLCA: %s verified successfully against key %s" %
                           (filename, keyfile))
            return True
        self.logger.warning("SSLCA: %s failed verification against key %s" %
                            (filename, keyfile))
        return False

    def bind_entry(self, entry, metadata):
        if self.key:
            self.bind_info_to_entry(entry, metadata)
            try:
                return self.best_matching(metadata).bind_entry(entry, metadata)
            except PluginExecutionError:
                entry.text = self.build_key(entry, metadata)
                entry.set("type", "file")
                return entry
        elif self.cert:
            key = self.cert.get_spec(metadata)['key']
            cleanup_keyfile = False
            try:
                keyfile = self.parent.entries[key].best_matching(metadata).name
            except PluginExecutionError:
                cleanup_keyfile = True
                # create a temp file with the key in it
                fd, keyfile = tempfile.mkstemp()
                os.chmod(keyfile, 384)  # 0600
                el = lxml.etree.Element('Path', name=key)
                self.parent.core.Bind(el, metadata)
                os.fdopen(fd, 'w').write(el.text)

            try:
                self.bind_info_to_entry(entry, metadata)
                try:
                    best = self.best_matching(metadata)
                    if self.verify_cert(best.name, keyfile, entry, metadata):
                        return best.bind_entry(entry, metadata)
                except PluginExecutionError:
                    pass
                # if we get here, it's because either a) there was no best
                # matching entry; or b) the existing cert did not verify
                entry.text = self.build_cert(entry, metadata, keyfile)
                entry.set("type", "file")
                return entry
            finally:
                if cleanup_keyfile:
                    try:
                        os.unlink(keyfile)
                    except OSError:
                        err = sys.exc_info()[1]
                        self.logger.error("SSLCA: Failed to unlink temporary "
                                          "key %s: %s" % (keyfile, err))
示例#31
0
文件: Yum.py 项目: shellox/bcfg2
class YumCollection(Collection):
    """ Handle collections of Yum sources.  If we're using the yum
    Python libraries, then this becomes a very full-featured
    :class:`Bcfg2.Server.Plugins.Packages.Collection.Collection`
    object; if not, then it defers to the :class:`YumSource`
    object.

    .. private-include: _add_gpg_instances, _get_pulp_consumer
    """

    #: Options that are included in the [packages:yum] section of the
    #: config but that should not be included in the temporary
    #: yum.conf we write out
    option_blacklist = ["use_yum_libraries", "helper"]

    #: :class:`PulpCertificateSet` object used to handle Pulp certs
    pulp_cert_set = None

    def __init__(self, metadata, sources, cachepath, basepath, debug=False):
        Collection.__init__(self, metadata, sources, cachepath, basepath, debug=debug)
        self.keypath = os.path.join(self.cachepath, "keys")

        self._helper = None
        if self.use_yum:
            #: Define a unique cache file for this collection to use
            #: for cached yum metadata
            self.cachefile = os.path.join(self.cachepath, "cache-%s" % self.cachekey)
            if not os.path.exists(self.cachefile):
                os.mkdir(self.cachefile)

            #: The path to the server-side config file used when
            #: resolving packages with the Python yum libraries
            self.cfgfile = os.path.join(self.cachefile, "yum.conf")
            self.write_config()
            self.cmd = Executor()
        else:
            self.cachefile = None
            self.cmd = None

        if HAS_PULP and self.has_pulp_sources:
            _setup_pulp()
            if self.pulp_cert_set is None:
                certdir = os.path.join(self.basepath, "pulp", os.path.basename(PulpCertificateSet.certpath))
                try:
                    os.makedirs(certdir)
                except OSError:
                    err = sys.exc_info()[1]
                    if err.errno == errno.EEXIST:
                        pass
                    else:
                        self.logger.error("Could not create Pulp consumer " "cert directory at %s: %s" % (certdir, err))
                self.pulp_cert_set = PulpCertificateSet(certdir)

    @property
    def __package_groups__(self):
        return True

    @property
    def helper(self):
        """ The full path to :file:`bcfg2-yum-helper`.  First, we
        check in the config file to see if it has been explicitly
        specified; next we see if it's in $PATH (which we do by making
        a call to it; I wish there was a way to do this without
        forking, but apparently not); finally we check in /usr/sbin,
        the default location. """
        if not self._helper:
            try:
                self._helper = self.setup.cfp.get("packages:yum", "helper")
            except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
                # first see if bcfg2-yum-helper is in PATH
                try:
                    self.debug_log("Checking for bcfg2-yum-helper in $PATH")
                    self.cmd.run(["bcfg2-yum-helper"])
                    self._helper = "bcfg2-yum-helper"
                except OSError:
                    self._helper = "/usr/sbin/bcfg2-yum-helper"
        return self._helper

    @property
    def use_yum(self):
        """ True if we should use the yum Python libraries, False
        otherwise """
        return HAS_YUM and self.setup.cfp.getboolean("packages:yum", "use_yum_libraries", default=False)

    @property
    def has_pulp_sources(self):
        """ True if there are any Pulp sources to handle, False
        otherwise """
        return any(s.pulp_id for s in self)

    @property
    def cachefiles(self):
        """ A list of the full path to all cachefiles used by this
        collection."""
        cachefiles = set(Collection.cachefiles.fget(self))
        if self.cachefile:
            cachefiles.add(self.cachefile)
        return list(cachefiles)

    @track_statistics()
    def write_config(self):
        """ Write the server-side config file to :attr:`cfgfile` based
        on the data from :func:`get_config`"""
        if not os.path.exists(self.cfgfile):
            yumconf = self.get_config(raw=True)
            yumconf.add_section("main")

            # we set installroot to the cache directory so
            # bcfg2-yum-helper works with an empty rpmdb.  otherwise
            # the rpmdb is so hopelessly intertwined with yum that we
            # have to totally reinvent the dependency resolver.
            mainopts = dict(
                cachedir="/",
                installroot=self.cachefile,
                keepcache="0",
                debuglevel="0",
                sslverify="0",
                reposdir="/dev/null",
            )
            if self.setup["debug"]:
                mainopts["debuglevel"] = "5"
            elif self.setup["verbose"]:
                mainopts["debuglevel"] = "2"

            try:
                for opt in self.setup.cfp.options("packages:yum"):
                    if opt not in self.option_blacklist:
                        mainopts[opt] = self.setup.cfp.get("packages:yum", opt)
            except ConfigParser.NoSectionError:
                pass

            for opt, val in list(mainopts.items()):
                yumconf.set("main", opt, val)

            yumconf.write(open(self.cfgfile, "w"))

    def get_config(self, raw=False):  # pylint: disable=W0221
        """ Get the yum configuration for this collection.

        :param raw: Return a :class:`ConfigParser.SafeConfigParser`
                    object representing the configuration instead of a
                    string.  This is useful if you need to modify the
                    config before writing it (as :func:`write_config`
                    does in order to produce a server-specific
                    configuration).
        :type raw: bool
        :returns: string or ConfigParser.SafeConfigParser """

        config = ConfigParser.SafeConfigParser()
        for source in self:
            for url_map in source.url_map:
                if url_map["arch"] not in self.metadata.groups:
                    continue
                basereponame = source.get_repo_name(url_map)
                reponame = basereponame

                added = False
                while not added:
                    try:
                        config.add_section(reponame)
                        added = True
                    except ConfigParser.DuplicateSectionError:
                        match = re.search(r"-(\d+)", reponame)
                        if match:
                            rid = int(match.group(1)) + 1
                        else:
                            rid = 1
                        reponame = "%s-%d" % (basereponame, rid)

                config.set(reponame, "name", reponame)
                config.set(reponame, "baseurl", url_map["url"])
                config.set(reponame, "enabled", "1")
                if len(source.gpgkeys):
                    config.set(reponame, "gpgcheck", "1")
                    config.set(reponame, "gpgkey", " ".join(source.gpgkeys))
                else:
                    config.set(reponame, "gpgcheck", "0")

                if len(source.blacklist):
                    config.set(reponame, "exclude", " ".join(source.blacklist))
                if len(source.whitelist):
                    config.set(reponame, "includepkgs", " ".join(source.whitelist))

                if raw:
                    opts = source.server_options
                else:
                    opts = source.client_options
                for opt, val in opts.items():
                    config.set(reponame, opt, val)

        if raw:
            return config
        else:
            # configparser only writes to file, so we have to use a
            # StringIO object to get the data out as a string
            buf = StringIO()
            config.write(buf)
            return "# This config was generated automatically by the Bcfg2 " "Packages plugin\n\n" + buf.getvalue()

    @track_statistics()
    def build_extra_structures(self, independent):
        """ Add additional entries to the ``<Independent/>`` section
        of the final configuration.  This adds several kinds of
        entries:

        * For GPG keys, adds a ``Package`` entry that describes the
          version and release of all expected ``gpg-pubkey`` packages;
          and ``Path`` entries to copy all of the GPG keys to the
          appropriate place on the client filesystem.  Calls
          :func:`_add_gpg_instances`.

        * For Pulp Sources, adds a ``Path`` entry for the consumer
          certificate; and ``Action`` entries to update the
          consumer-side Pulp config if the consumer is newly
          registered.  Creates a new Pulp consumer from the Bcfg2
          server as necessary.

        :param independent: The XML tag to add extra entries to.  This
                            is modified in place.
        :type independent: lxml.etree._Element
        """
        needkeys = set()
        for source in self:
            for key in source.gpgkeys:
                needkeys.add(key)

        if len(needkeys):
            if HAS_YUM:
                # this must be be HAS_YUM, not use_yum, because
                # regardless of whether the user wants to use the yum
                # resolver we want to include gpg key data
                keypkg = lxml.etree.Element("BoundPackage", name="gpg-pubkey", type=self.ptype, origin="Packages")
            else:
                self.logger.warning(
                    "GPGKeys were specified for yum sources " "in sources.xml, but no yum libraries " "were found"
                )
                self.logger.warning("GPG key version/release data cannot be " "determined automatically")
                self.logger.warning("Install yum libraries, or manage GPG " "keys manually")
                keypkg = None

            for key in needkeys:
                # figure out the path of the key on the client
                keydir = self.setup.cfp.get("global", "gpg_keypath", default="/etc/pki/rpm-gpg")
                remotekey = os.path.join(keydir, os.path.basename(key))
                localkey = os.path.join(self.keypath, os.path.basename(key))
                kdata = open(localkey).read()

                # copy the key to the client
                keypath = lxml.etree.Element(
                    "BoundPath",
                    name=remotekey,
                    encoding="ascii",
                    owner="root",
                    group="root",
                    type="file",
                    mode="0644",
                    important="true",
                )
                keypath.text = kdata

                # hook to add version/release info if possible
                self._add_gpg_instances(keypkg, localkey, remotekey, keydata=kdata)
                independent.append(keypath)
            if keypkg is not None:
                independent.append(keypkg)

        if self.has_pulp_sources:
            consumerapi = ConsumerAPI()
            consumer = self._get_pulp_consumer(consumerapi=consumerapi)
            if consumer is None:
                try:
                    consumer = consumerapi.create(
                        self.metadata.hostname, self.metadata.hostname, capabilities=dict(bind=False)
                    )
                    lxml.etree.SubElement(
                        independent,
                        "BoundAction",
                        name="pulp-update",
                        timing="pre",
                        when="always",
                        status="check",
                        command="pulp-consumer consumer update",
                    )
                    self.pulp_cert_set.write_data(consumer["certificate"], self.metadata)
                except server.ServerRequestError:
                    err = sys.exc_info()[1]
                    self.logger.error(
                        "Packages: Could not create Pulp " "consumer %s: %s" % (self.metadata.hostname, err)
                    )

            for source in self:
                # each pulp source can only have one arch, so we don't
                # have to check the arch in url_map
                if source.pulp_id and source.pulp_id not in consumer["repoids"]:
                    try:
                        consumerapi.bind(self.metadata.hostname, source.pulp_id)
                    except server.ServerRequestError:
                        err = sys.exc_info()[1]
                        self.logger.error(
                            "Packages: Could not bind %s to "
                            "Pulp repo %s: %s" % (self.metadata.hostname, source.pulp_id, err)
                        )

            crt = lxml.etree.SubElement(independent, "BoundPath", name=self.pulp_cert_set.certpath)
            self.pulp_cert_set.bind_entry(crt, self.metadata)

    @track_statistics()
    def _get_pulp_consumer(self, consumerapi=None):
        """ Get a Pulp consumer object for the client.

        :param consumerapi: A Pulp ConsumerAPI object.  If none is
                            passed, one will be instantiated.
        :type consumerapi: pulp.client.api.consumer.ConsumerAPI
        :returns: dict - the consumer.  Returns None on failure
                  (including if there is no existing Pulp consumer for
                  this client.
        """
        if consumerapi is None:
            consumerapi = ConsumerAPI()
        consumer = None
        try:
            consumer = consumerapi.consumer(self.metadata.hostname)
        except server.ServerRequestError:
            # consumer does not exist
            pass
        except socket.error:
            err = sys.exc_info()[1]
            self.logger.error("Packages: Could not contact Pulp server: %s" % err)
        except:
            err = sys.exc_info()[1]
            self.logger.error("Packages: Unknown error querying Pulp server: " "%s" % err)
        return consumer

    @track_statistics()
    def _add_gpg_instances(self, keyentry, localkey, remotekey, keydata=None):
        """ Add GPG keys instances to a ``Package`` entry.  This is
        called from :func:`build_extra_structures` to add GPG keys to
        the specification.

        :param keyentry: The ``Package`` entry to add key instances
                         to.  This will be modified in place.
        :type keyentry: lxml.etree._Element
        :param localkey: The full path to the key file on the Bcfg2 server
        :type localkey: string
        :param remotekey: The full path to the key file on the client.
                          (If they key is not yet on the client, this
                          will be the full path to where the key file
                          will go eventually.)
        :type remotekey: string
        :param keydata: The contents of the key file.  If this is not
                        provided, read the data from ``localkey``.
        :type keydata: string
        """
        # this must be be HAS_YUM, not use_yum, because regardless of
        # whether the user wants to use the yum resolver we want to
        # include gpg key data
        if not HAS_YUM:
            return

        if keydata is None:
            keydata = open(localkey).read()

        try:
            kinfo = yum.misc.getgpgkeyinfo(keydata)
            version = yum.misc.keyIdToRPMVer(kinfo["keyid"])
            release = yum.misc.keyIdToRPMVer(kinfo["timestamp"])

            lxml.etree.SubElement(keyentry, "Instance", version=version, release=release, simplefile=remotekey)
        except ValueError:
            err = sys.exc_info()[1]
            self.logger.error("Packages: Could not read GPG key %s: %s" % (localkey, err))

    @track_statistics()
    def get_groups(self, grouplist):
        """ If using the yum libraries, given a list of package group
        names, return a dict of ``<group name>: <list of packages>``.
        This is much faster than implementing
        :func:`Bcfg2.Server.Plugins.Packages.Collection.Collection.get_group`,
        since we have to make a call to the bcfg2 Yum helper, and each
        time we do that we make another call to yum, which means we
        set up yum metadata from the cache (hopefully) each time.  So
        resolving ten groups once is much faster than resolving one
        group ten times.

        If you are using the builtin yum parser, this raises a warning
        and returns an empty dict.

        :param grouplist: The list of groups to query
        :type grouplist: list of strings - group names
        :returns: dict of ``<group name>: <list of packages>``

        In this implementation the packages may be strings or tuples.
        See :ref:`yum-pkg-objects` for more information. """
        if not grouplist:
            return dict()

        gdicts = []
        for group, ptype in grouplist:
            if group.startswith("@"):
                group = group[1:]
            if not ptype:
                ptype = "default"
            gdicts.append(dict(group=group, type=ptype))

        if self.use_yum:
            return self.call_helper("get_groups", inputdata=gdicts)
        else:
            pkgs = dict()
            for gdict in gdicts:
                pkgs[gdict["group"]] = Collection.get_group(self, gdict["group"], gdict["type"])
            return pkgs

    def _element_to_pkg(self, el, name):
        """ Convert a Package or Instance element to a package tuple """
        rv = (name, el.get("arch"), el.get("epoch"), el.get("version"), el.get("release"))
        if rv[3] in ["any", "auto"]:
            rv = (rv[0], rv[1], rv[2], None, None)
        # if a package requires no specific version, we just use
        # the name, not the tuple.  this limits the amount of JSON
        # encoding/decoding that has to be done to pass the
        # package list to bcfg2-yum-helper.
        if rv[1:] == (None, None, None, None):
            return name
        else:
            return rv

    def packages_from_entry(self, entry):
        """ When using the Python yum libraries, convert a Package
        entry to a list of package tuples.  See :ref:`yum-pkg-objects`
        and :ref:`pkg-objects` for more information on this process.

        :param entry: The Package entry to convert
        :type entry: lxml.etree._Element
        :returns: list of tuples
        """
        if not self.use_yum:
            return Collection.packages_from_entry(self, entry)

        rv = set()
        name = entry.get("name")

        for inst in entry.getchildren():
            if inst.tag != "Instance":
                continue
            rv.add(self._element_to_pkg(inst, name))
        if not rv:
            rv.add(self._element_to_pkg(entry, name))
        return list(rv)

    def _get_entry_attrs(self, pkgtup):
        """ Given a package tuple, return a dict of attributes
        suitable for applying to either a Package or an Instance
        tag """
        attrs = dict(version=self.setup.cfp.get("packages", "version", default="auto"))
        if attrs["version"] == "any" or not isinstance(pkgtup, tuple):
            return attrs

        try:
            if pkgtup[1]:
                attrs["arch"] = pkgtup[1]
            if pkgtup[2]:
                attrs["epoch"] = pkgtup[2]
            if pkgtup[3]:
                attrs["version"] = pkgtup[3]
            if pkgtup[4]:
                attrs["release"] = pkgtup[4]
        except IndexError:
            self.logger.warning("Malformed package tuple: %s" % pkgtup)
        return attrs

    def packages_to_entry(self, pkglist, entry):
        """ When using the Python yum libraries, convert a list of
        package tuples to a Package entry.  See :ref:`yum-pkg-objects`
        and :ref:`pkg-objects` for more information on this process.

        If pkglist contains only one package, then its data is
        converted to a single ``BoundPackage`` entry that is added as
        a subelement of ``entry``.  If pkglist contains more than one
        package, then a parent ``BoundPackage`` entry is created and
        child ``Instance`` entries are added to it.

        :param pkglist: A list of package tuples to convert to an XML
                         Package entry
        :type pkglist: list of tuples
        :param entry: The base XML entry to add Package entries to.
                      This is modified in place.
        :type entry: lxml.etree._Element
        :returns: None
        """
        if not self.use_yum:
            return Collection.packages_to_entry(self, pkglist, entry)

        packages = dict()
        for pkg in pkglist:
            try:
                packages[pkg[0]].append(pkg)
            except KeyError:
                packages[pkg[0]] = [pkg]
        for name, instances in packages.items():
            pkgattrs = dict(type=self.ptype, origin="Packages", name=name)
            if len(instances) > 1:
                pkg_el = lxml.etree.SubElement(entry, "BoundPackage", **pkgattrs)
                for inst in instances:
                    lxml.etree.SubElement(pkg_el, "Instance", self._get_entry_attrs(inst))
            else:
                attrs = self._get_entry_attrs(instances[0])
                attrs.update(pkgattrs)
                lxml.etree.SubElement(entry, "BoundPackage", **attrs)

    def get_new_packages(self, initial, complete):
        """ Compute the difference between the complete package list
        (as returned by :func:`complete`) and the initial package list
        computed from the specification, allowing for package tuples.
        See :ref:`yum-pkg-objects` and :ref:`pkg-objects` for more
        information on this process.

        :param initial: The initial package list
        :type initial: set of strings, but see :ref:`pkg-objects`
        :param complete: The final package list
        :type complete: set of strings, but see :ref:`pkg-objects`
        :return: set of tuples
        """
        initial_names = []
        for pkg in initial:
            if isinstance(pkg, tuple):
                initial_names.append(pkg[0])
            else:
                initial_names.append(pkg)
        new = []
        for pkg in complete:
            if isinstance(pkg, tuple):
                name = pkg[0]
            else:
                name = pkg
            if name not in initial_names:
                new.append(pkg)
        return new

    @track_statistics()
    def complete(self, packagelist):
        """ Build a complete list of all packages and their dependencies.

        When using the Python yum libraries, this defers to the
        :ref:`bcfg2-yum-helper`; when using the builtin yum parser,
        this defers to
        :func:`Bcfg2.Server.Plugins.Packages.Collection.Collection.complete`.

        :param packagelist: Set of initial packages computed from the
                            specification.
        :type packagelist: set of strings, but see :ref:`pkg-objects`
        :returns: tuple of sets - The first element contains a set of
                  strings (but see :ref:`pkg-objects`) describing the
                  complete package list, and the second element is a
                  set of symbols whose dependencies could not be
                  resolved.
        """
        if not self.use_yum:
            return Collection.complete(self, packagelist)

        if packagelist:
            result = self.call_helper(
                "complete", dict(packages=list(packagelist), groups=list(self.get_relevant_groups()))
            )
            if not result:
                # some sort of error, reported by call_helper()
                return set(), packagelist
            # json doesn't understand sets or tuples, so we get back a
            # lists of lists (packages) and a list of unicode strings
            # (unknown).  turn those into a set of tuples and a set of
            # strings, respectively.
            unknown = set([str(u) for u in result["unknown"]])
            packages = set([tuple(p) for p in result["packages"]])
            self.filter_unknown(unknown)
            return packages, unknown
        else:
            return set(), set()

    @track_statistics()
    def call_helper(self, command, inputdata=None):
        """ Make a call to :ref:`bcfg2-yum-helper`.  The yum libs have
        horrific memory leaks, so apparently the right way to get
        around that in long-running processes it to have a short-lived
        helper.  No, seriously -- check out the yum-updatesd code.
        It's pure madness.

        :param command: The :ref:`bcfg2-yum-helper` command to call.
        :type command: string
        :param inputdata: The input to pass to ``bcfg2-yum-helper`` on
                          stdin.  If this is None, no input will be
                          given at all.
        :type inputdata: Any JSON-encodable data structure.
        :returns: Varies depending on the return value of the
                  ``bcfg2-yum-helper`` command.
        """
        cmd = [self.helper, "-c", self.cfgfile]
        verbose = self.debug_flag or self.setup["verbose"]
        if verbose:
            cmd.append("-v")
        cmd.append(command)
        self.debug_log("Packages: running %s" % " ".join(cmd), flag=verbose)
        if inputdata:
            result = self.cmd.run(cmd, inputdata=json.dumps(inputdata))
        else:
            result = self.cmd.run(cmd)
        if not result.success:
            self.logger.error("Packages: error running bcfg2-yum-helper: %s" % result.error)
        elif result.stderr:
            self.debug_log("Packages: debug info from bcfg2-yum-helper: %s" % result.stderr, flag=verbose)
        try:
            return json.loads(result.stdout)
        except ValueError:
            err = sys.exc_info()[1]
            self.logger.error("Packages: error reading bcfg2-yum-helper " "output: %s" % err)
            return None

    def setup_data(self, force_update=False):
        """ Do any collection-level data setup tasks. This is called
        when sources are loaded or reloaded by
        :class:`Bcfg2.Server.Plugins.Packages.Packages`.

        If the builtin yum parsers are in use, this defers to
        :func:`Bcfg2.Server.Plugins.Packages.Collection.Collection.setup_data`.
        If using the yum Python libraries, this cleans up cached yum
        metadata, regenerates the server-side yum config (in order to
        catch any new sources that have been added to this server),
        and then cleans up cached yum metadata again, in case the new
        config has any preexisting cache.

        :param force_update: Ignore all local cache and setup data
                             from its original upstream sources (i.e.,
                             the package repositories)
        :type force_update: bool
        """
        if not self.use_yum:
            return Collection.setup_data(self, force_update)

        if force_update:
            # we call this twice: one to clean up data from the old
            # config, and once to clean up data from the new config
            self.call_helper("clean")

        os.unlink(self.cfgfile)
        self.write_config()

        if force_update:
            self.call_helper("clean")
示例#32
0
class Validate(Bcfg2.Server.Lint.ServerlessPlugin):
    """ Ensure that all XML files in the Bcfg2 repository validate
    according to their respective schemas. """
    def __init__(self, *args, **kwargs):
        Bcfg2.Server.Lint.ServerlessPlugin.__init__(self, *args, **kwargs)

        #: A dict of <file glob>: <schema file> that maps files in the
        #: Bcfg2 specification to their schemas.  The globs are
        #: extended :mod:`fnmatch` globs that also support ``**``,
        #: which matches any number of any characters, including
        #: forward slashes.  The schema files are relative to the
        #: schema directory, which can be controlled by the
        #: ``bcfg2-lint --schema`` option.
        self.filesets = \
            {"Metadata/groups.xml": "metadata.xsd",
             "Metadata/clients.xml": "clients.xsd",
             "Cfg/**/info.xml": "info.xsd",
             "Cfg/**/privkey.xml": "privkey.xsd",
             "Cfg/**/pubkey.xml": "pubkey.xsd",
             "Cfg/**/authorizedkeys.xml": "authorizedkeys.xsd",
             "Cfg/**/authorized_keys.xml": "authorizedkeys.xsd",
             "SSHbase/**/info.xml": "info.xsd",
             "SSLCA/**/info.xml": "info.xsd",
             "TGenshi/**/info.xml": "info.xsd",
             "TCheetah/**/info.xml": "info.xsd",
             "Bundler/*.xml": "bundle.xsd",
             "Bundler/*.genshi": "bundle.xsd",
             "Pkgmgr/*.xml": "pkglist.xsd",
             "Rules/*.xml": "rules.xsd",
             "Defaults/*.xml": "defaults.xsd",
             "etc/report-configuration.xml": "report-configuration.xsd",
             "Deps/*.xml": "deps.xsd",
             "Decisions/*.xml": "decisions.xsd",
             "Packages/sources.xml": "packages.xsd",
             "GroupPatterns/config.xml": "grouppatterns.xsd",
             "NagiosGen/config.xml": "nagiosgen.xsd",
             "FileProbes/config.xml": "fileprobes.xsd",
             "SSLCA/**/cert.xml": "sslca-cert.xsd",
             "SSLCA/**/key.xml": "sslca-key.xsd",
             "GroupLogic/groups.xml": "grouplogic.xsd"
             }

        self.filelists = {}
        self.get_filelists()
        self.cmd = Executor()

    def Run(self):
        schemadir = self.config['schema']

        for path, schemaname in self.filesets.items():
            try:
                filelist = self.filelists[path]
            except KeyError:
                filelist = []

            if filelist:
                # avoid loading schemas for empty file lists
                schemafile = os.path.join(schemadir, schemaname)
                schema = self._load_schema(schemafile)
                if schema:
                    for filename in filelist:
                        self.validate(filename, schemafile, schema=schema)

        self.check_properties()

    @classmethod
    def Errors(cls):
        return {
            "schema-failed-to-parse": "warning",
            "properties-schema-not-found": "warning",
            "xml-failed-to-parse": "error",
            "xml-failed-to-read": "error",
            "xml-failed-to-verify": "error",
            "input-output-error": "error"
        }

    def check_properties(self):
        """ Check Properties files against their schemas. """
        for filename in self.filelists['props']:
            schemafile = "%s.xsd" % os.path.splitext(filename)[0]
            if os.path.exists(schemafile):
                self.validate(filename, schemafile)
            else:
                self.LintError("properties-schema-not-found",
                               "No schema found for %s" % filename)
                # ensure that it at least parses
                self.parse(filename)

    def parse(self, filename):
        """ Parse an XML file, raising the appropriate LintErrors if
        it can't be parsed or read.  Return the
        lxml.etree._ElementTree parsed from the file.

        :param filename: The full path to the file to parse
        :type filename: string
        :returns: lxml.etree._ElementTree - the parsed data"""
        try:
            return lxml.etree.parse(filename)
        except SyntaxError:
            result = self.cmd.run(["xmllint", filename])
            self.LintError(
                "xml-failed-to-parse", "%s fails to parse:\n%s" %
                (filename, result.stdout + result.stderr))
            return False
        except IOError:
            self.LintError("xml-failed-to-read",
                           "Failed to open file %s" % filename)
            return False

    def validate(self, filename, schemafile, schema=None):
        """ Validate a file against the given schema.

        :param filename: The full path to the file to validate
        :type filename: string
        :param schemafile: The full path to the schema file to
                           validate against
        :type schemafile: string
        :param schema: The loaded schema to validate against.  This
                       can be used to avoid parsing a single schema
                       file for every file that needs to be validate
                       against it.
        :type schema: lxml.etree.Schema
        :returns: bool - True if the file validates, false otherwise
        """
        if schema is None:
            # if no schema object was provided, instantiate one
            schema = self._load_schema(schemafile)
            if not schema:
                return False
        datafile = self.parse(filename)
        if not schema.validate(datafile):
            cmd = ["xmllint"]
            if self.files is None:
                cmd.append("--xinclude")
            cmd.extend(["--noout", "--schema", schemafile, filename])
            result = self.cmd.run(cmd)
            if not result.success:
                self.LintError(
                    "xml-failed-to-verify", "%s fails to verify:\n%s" %
                    (filename, result.stdout + result.stderr))
                return False
        return True

    def get_filelists(self):
        """ Get lists of different kinds of files to validate.  This
        doesn't return anything, but it sets
        :attr:`Bcfg2.Server.Lint.Validate.Validate.filelists` to a
        dict whose keys are path globs given in
        :attr:`Bcfg2.Server.Lint.Validate.Validate.filesets` and whose
        values are lists of the full paths to all files in the Bcfg2
        repository (or given with ``bcfg2-lint --stdin``) that match
        the glob."""
        if self.files is not None:
            listfiles = lambda p: fnmatch.filter(self.files,
                                                 os.path.join('*', p))
        else:
            listfiles = lambda p: glob.glob(
                os.path.join(self.config['repo'], p))

        for path in self.filesets.keys():
            if '/**/' in path:
                if self.files is not None:
                    self.filelists[path] = listfiles(path)
                else:  # self.files is None
                    fpath, fname = path.split('/**/')
                    self.filelists[path] = []
                    for root, _, files in \
                            os.walk(os.path.join(self.config['repo'],
                                                 fpath)):
                        self.filelists[path].extend([
                            os.path.join(root, f) for f in files if f == fname
                        ])
            else:
                self.filelists[path] = listfiles(path)

        self.filelists['props'] = listfiles("Properties/*.xml")

    def _load_schema(self, filename):
        """ Load an XML schema document, returning the Schema object
        and raising appropriate lint errors on failure.

        :param filename: The full path to the schema file to load.
        :type filename: string
        :returns: lxml.etree.Schema - The loaded schema data
        """
        try:
            return lxml.etree.XMLSchema(lxml.etree.parse(filename))
        except IOError:
            err = sys.exc_info()[1]
            self.LintError("input-output-error", str(err))
        except lxml.etree.XMLSchemaParseError:
            err = sys.exc_info()[1]
            self.LintError("schema-failed-to-parse",
                           "Failed to process schema %s: %s" % (filename, err))
        return None
示例#33
0
class Git(Version):
    """ The Git plugin provides a revision interface for Bcfg2 repos
    using git. """
    __author__ = '*****@*****.**'
    __vcs_metadata_path__ = ".git"
    if HAS_GITPYTHON:
        __rmi__ = Version.__rmi__ + ['Update']

    def __init__(self, core):
        Version.__init__(self, core)
        if HAS_GITPYTHON:
            self.repo = git.Repo(Bcfg2.Options.setup.vcs_root)
            self.cmd = None
        else:
            self.logger.debug("Git: GitPython not found, using CLI interface "
                              "to Git")
            self.repo = None
            self.cmd = Executor()
        self.logger.debug("Initialized git plugin with git directory %s" %
                          self.vcs_path)

    def _log_git_cmd(self, output):
        """ Send output from a GitPython command to the debug log """
        for line in output.strip().splitlines():
            self.debug_log("Git: %s" % line)

    def get_revision(self):
        """Read git revision information for the Bcfg2 repository."""
        if HAS_GITPYTHON:
            return self.repo.head.commit.hexsha
        else:
            cmd = ["git", "--git-dir", self.vcs_path,
                   "--work-tree", Bcfg2.Options.setup.vcs_root,
                   "rev-parse", "HEAD"]
            self.debug_log("Git: Running %s" % cmd)
            result = self.cmd.run(cmd)
            if not result.success:
                raise PluginExecutionError(result.stderr)
            return result.stdout

    def Update(self, ref=None):
        """ Git.Update() => True|False
        Update the working copy against the upstream repository
        """
        self.logger.info("Git: Git.Update(ref='%s')" % ref)
        self.debug_log("Git: Performing garbage collection on repo at %s" %
                       Bcfg2.Options.setup.vcs_root)
        try:
            self._log_git_cmd(self.repo.git.gc('--auto'))
        except git.GitCommandError:
            self.logger.warning("Git: Failed to perform garbage collection: %s"
                                % sys.exc_info()[1])

        self.debug_log("Git: Fetching all refs for repo at %s" %
                       Bcfg2.Options.setup.vcs_root)
        try:
            self._log_git_cmd(self.repo.git.fetch('--all'))
        except git.GitCommandError:
            self.logger.warning("Git: Failed to fetch refs: %s" %
                                sys.exc_info()[1])

        if ref:
            self.debug_log("Git: Checking out %s" % ref)
            try:
                self._log_git_cmd(self.repo.git.checkout('-f', ref))
            except git.GitCommandError:
                raise PluginExecutionError("Git: Failed to checkout %s: %s" %
                                           (ref, sys.exc_info()[1]))

        # determine if we should try to pull to get the latest commit
        # on this head
        tracking = None
        if not self.repo.head.is_detached:
            self.debug_log("Git: Determining if %s is a tracking branch" %
                           self.repo.head.ref.name)
            tracking = self.repo.head.ref.tracking_branch()

        if tracking is not None:
            self.debug_log("Git: %s is a tracking branch, pulling from %s" %
                           (self.repo.head.ref.name, tracking))
            try:
                self._log_git_cmd(self.repo.git.pull("--rebase"))
            except git.GitCommandError:
                raise PluginExecutionError("Git: Failed to pull from "
                                           "upstream: %s" % sys.exc_info()[1])

        self.logger.info("Git: Repo at %s updated to %s" %
                         (Bcfg2.Options.setup.vcs_root, self.get_revision()))
        return True
示例#34
0
class Validate(Bcfg2.Server.Lint.ServerlessPlugin):
    """ Ensure that all XML files in the Bcfg2 repository validate
    according to their respective schemas. """

    options = Bcfg2.Server.Lint.ServerlessPlugin.options + [
        Bcfg2.Options.PathOption(
            "--schema", cf=("Validate", "schema"),
            default="/usr/share/bcfg2/schema",
            help="The full path to the XML schema files")]

    def __init__(self, *args, **kwargs):
        Bcfg2.Server.Lint.ServerlessPlugin.__init__(self, *args, **kwargs)

        #: A dict of <file glob>: <schema file> that maps files in the
        #: Bcfg2 specification to their schemas.  The globs are
        #: extended :mod:`fnmatch` globs that also support ``**``,
        #: which matches any number of any characters, including
        #: forward slashes.  The schema files are relative to the
        #: schema directory, which can be controlled by the
        #: ``bcfg2-lint --schema`` option.
        self.filesets = \
            {"Metadata/groups.xml": "metadata.xsd",
             "Metadata/clients.xml": "clients.xsd",
             "Cfg/**/info.xml": "info.xsd",
             "Cfg/**/privkey.xml": "privkey.xsd",
             "Cfg/**/pubkey.xml": "pubkey.xsd",
             "Cfg/**/authorizedkeys.xml": "authorizedkeys.xsd",
             "Cfg/**/authorized_keys.xml": "authorizedkeys.xsd",
             "Cfg/**/sslcert.xml": "sslca-cert.xsd",
             "Cfg/**/sslkey.xml": "sslca-key.xsd",
             "SSHbase/**/info.xml": "info.xsd",
             "TGenshi/**/info.xml": "info.xsd",
             "TCheetah/**/info.xml": "info.xsd",
             "Bundler/*.xml": "bundle.xsd",
             "Bundler/*.genshi": "bundle.xsd",
             "Pkgmgr/*.xml": "pkglist.xsd",
             "Rules/*.xml": "rules.xsd",
             "Defaults/*.xml": "defaults.xsd",
             "etc/report-configuration.xml": "report-configuration.xsd",
             "Deps/*.xml": "deps.xsd",
             "Decisions/*.xml": "decisions.xsd",
             "Packages/sources.xml": "packages.xsd",
             "GroupPatterns/config.xml": "grouppatterns.xsd",
             "AWSTags/config.xml": "awstags.xsd",
             "NagiosGen/config.xml": "nagiosgen.xsd",
             "FileProbes/config.xml": "fileprobes.xsd",
             "GroupLogic/groups.xml": "grouplogic.xsd"
             }

        self.filelists = {}
        self.get_filelists()
        self.cmd = Executor()

    def Run(self):

        for path, schemaname in self.filesets.items():
            try:
                filelist = self.filelists[path]
            except KeyError:
                filelist = []

            if filelist:
                # avoid loading schemas for empty file lists
                schemafile = os.path.join(Bcfg2.Options.setup.schema,
                                          schemaname)
                schema = self._load_schema(schemafile)
                if schema:
                    for filename in filelist:
                        self.validate(filename, schemafile, schema=schema)

        self.check_properties()

    @classmethod
    def Errors(cls):
        return {"schema-failed-to-parse": "warning",
                "properties-schema-not-found": "warning",
                "xml-failed-to-parse": "error",
                "xml-failed-to-read": "error",
                "xml-failed-to-verify": "error",
                "xinclude-does-not-exist": "error",
                "input-output-error": "error"}

    def check_properties(self):
        """ Check Properties files against their schemas. """
        for filename in self.filelists['props']:
            schemafile = "%s.xsd" % os.path.splitext(filename)[0]
            if os.path.exists(schemafile):
                self.validate(filename, schemafile)
            else:
                self.LintError("properties-schema-not-found",
                               "No schema found for %s" % filename)
                # ensure that it at least parses
                self.parse(filename)

    def parse(self, filename):
        """ Parse an XML file, raising the appropriate LintErrors if
        it can't be parsed or read.  Return the
        lxml.etree._ElementTree parsed from the file.

        :param filename: The full path to the file to parse
        :type filename: string
        :returns: lxml.etree._ElementTree - the parsed data"""
        try:
            xdata = lxml.etree.parse(filename)
            if self.files is None:
                self._expand_wildcard_xincludes(xdata)
                xdata.xinclude()
            return xdata
        except (lxml.etree.XIncludeError, SyntaxError):
            cmd = ["xmllint", "--noout"]
            if self.files is None:
                cmd.append("--xinclude")
            cmd.append(filename)
            result = self.cmd.run(cmd)
            self.LintError("xml-failed-to-parse",
                           "%s fails to parse:\n%s" %
                           (filename, result.stdout + result.stderr))
            return False
        except IOError:
            self.LintError("xml-failed-to-read",
                           "Failed to open file %s" % filename)
            return False

    def _expand_wildcard_xincludes(self, xdata):
        """ a lightweight version of
        :func:`Bcfg2.Server.Plugin.helpers.XMLFileBacked._follow_xincludes` """
        xinclude = '%sinclude' % Bcfg2.Server.XI_NAMESPACE
        for el in xdata.findall('//' + xinclude):
            name = el.get("href")
            if name.startswith("/"):
                fpath = name
            else:
                fpath = os.path.join(os.path.dirname(xdata.docinfo.URL), name)

            # expand globs in xinclude, a bcfg2-specific extension
            extras = glob.glob(fpath)
            if not extras:
                msg = "%s: %s does not exist, skipping: %s" % \
                      (xdata.docinfo.URL, name, self.RenderXML(el))
                if el.findall('./%sfallback' % Bcfg2.Server.XI_NAMESPACE):
                    self.logger.debug(msg)
                else:
                    self.LintError("xinclude-does-not-exist", msg)

            parent = el.getparent()
            parent.remove(el)
            for extra in extras:
                if extra != xdata.docinfo.URL:
                    lxml.etree.SubElement(parent, xinclude, href=extra)

    def validate(self, filename, schemafile, schema=None):
        """ Validate a file against the given schema.

        :param filename: The full path to the file to validate
        :type filename: string
        :param schemafile: The full path to the schema file to
                           validate against
        :type schemafile: string
        :param schema: The loaded schema to validate against.  This
                       can be used to avoid parsing a single schema
                       file for every file that needs to be validate
                       against it.
        :type schema: lxml.etree.Schema
        :returns: bool - True if the file validates, false otherwise
        """
        if schema is None:
            # if no schema object was provided, instantiate one
            schema = self._load_schema(schemafile)
            if not schema:
                return False
        datafile = self.parse(filename)
        if not datafile:
            return False
        if not schema.validate(datafile):
            cmd = ["xmllint"]
            if self.files is None:
                cmd.append("--xinclude")
            cmd.extend(["--noout", "--schema", schemafile, filename])
            result = self.cmd.run(cmd)
            if not result.success:
                self.LintError("xml-failed-to-verify",
                               "%s fails to verify:\n%s" %
                               (filename, result.stdout + result.stderr))
                return False
        return True

    def get_filelists(self):
        """ Get lists of different kinds of files to validate.  This
        doesn't return anything, but it sets
        :attr:`Bcfg2.Server.Lint.Validate.Validate.filelists` to a
        dict whose keys are path globs given in
        :attr:`Bcfg2.Server.Lint.Validate.Validate.filesets` and whose
        values are lists of the full paths to all files in the Bcfg2
        repository (or given with ``bcfg2-lint --stdin``) that match
        the glob."""
        if self.files is not None:
            listfiles = lambda p: fnmatch.filter(self.files,
                                                 os.path.join('*', p))
        else:
            listfiles = lambda p: \
                glob.glob(os.path.join(Bcfg2.Options.setup.repository, p))

        for path in self.filesets.keys():
            if '/**/' in path:
                if self.files is not None:
                    self.filelists[path] = listfiles(path)
                else:  # self.files is None
                    fpath, fname = path.split('/**/')
                    self.filelists[path] = []
                    for root, _, files in os.walk(
                            os.path.join(Bcfg2.Options.setup.repository,
                                         fpath)):
                        self.filelists[path].extend([os.path.join(root, f)
                                                     for f in files
                                                     if f == fname])
            else:
                self.filelists[path] = listfiles(path)

        self.filelists['props'] = listfiles("Properties/*.xml")

    def _load_schema(self, filename):
        """ Load an XML schema document, returning the Schema object
        and raising appropriate lint errors on failure.

        :param filename: The full path to the schema file to load.
        :type filename: string
        :returns: lxml.etree.Schema - The loaded schema data
        """
        try:
            return lxml.etree.XMLSchema(lxml.etree.parse(filename))
        except IOError:
            err = sys.exc_info()[1]
            self.LintError("input-output-error", str(err))
        except lxml.etree.XMLSchemaParseError:
            err = sys.exc_info()[1]
            self.LintError("schema-failed-to-parse",
                           "Failed to process schema %s: %s" %
                           (filename, err))
        return None
示例#35
0
class CfgSSLCACertCreator(XMLCfgCreator, CfgVerifier):
    """ This class acts as both a Cfg creator that creates SSL certs,
    and as a Cfg verifier that verifies SSL certs. """

    #: Different configurations for different clients/groups can be
    #: handled with Client and Group tags within pubkey.xml
    __specific__ = False

    #: Handle XML specifications of private keys
    __basenames__ = ['sslcert.xml']

    cfg_section = "sslca"
    options = [
        Bcfg2.Options.Option(
            cf=("sslca", "category"), dest="sslca_category",
            help="Metadata category that generated SSL keys are specific to"),
        Bcfg2.Options.Option(
            cf=("sslca", "passphrase"), dest="sslca_passphrase",
            help="Passphrase used to encrypt generated SSL keys"),
        Bcfg2.Options.WildcardSectionGroup(
            Bcfg2.Options.PathOption(
                cf=("sslca_*", "config"),
                help="Path to the openssl config for the CA"),
            Bcfg2.Options.Option(
                cf=("sslca_*", "passphrase"),
                help="Passphrase for the CA private key"),
            Bcfg2.Options.PathOption(
                cf=("sslca_*", "chaincert"),
                help="Path to the SSL chaining certificate for verification"),
            Bcfg2.Options.BooleanOption(
                cf=("sslca_*", "root_ca"),
                help="Whether or not <chaincert> is a root CA (as opposed to "
                "an intermediate cert"),
            prefix="")]

    def __init__(self, fname):
        XMLCfgCreator.__init__(self, fname)
        CfgVerifier.__init__(self, fname, None)
        self.cmd = Executor()
        self.cfg = get_cfg()

    def build_req_config(self, metadata):
        """ Generates a temporary openssl configuration file that is
        used to generate the required certificate request. """
        fd, fname = tempfile.mkstemp()
        cfp = ConfigParser.ConfigParser({})
        cfp.optionxform = str
        defaults = dict(
            req=dict(
                default_md='sha1',
                distinguished_name='req_distinguished_name',
                req_extensions='v3_req',
                x509_extensions='v3_req',
                prompt='no'),
            req_distinguished_name=dict(),
            v3_req=dict(subjectAltName='@alt_names'),
            alt_names=dict())
        for section in list(defaults.keys()):
            cfp.add_section(section)
            for key in defaults[section]:
                cfp.set(section, key, defaults[section][key])
        spec = self.XMLMatch(metadata)
        cert = spec.find("Cert")
        altnamenum = 1
        altnames = spec.findall('subjectAltName')
        altnames.extend(list(metadata.aliases))
        altnames.append(metadata.hostname)
        for altname in altnames:
            cfp.set('alt_names', 'DNS.' + str(altnamenum), altname)
            altnamenum += 1
        for item in ['C', 'L', 'ST', 'O', 'OU', 'emailAddress']:
            if cert.get(item):
                cfp.set('req_distinguished_name', item, cert.get(item))
        cfp.set('req_distinguished_name', 'CN', metadata.hostname)
        self.debug_log("Cfg: Writing temporary CSR config to %s" % fname)
        try:
            cfp.write(os.fdopen(fd, 'w'))
        except IOError:
            raise CfgCreationError("Cfg: Failed to write temporary CSR config "
                                   "file: %s" % sys.exc_info()[1])
        return fname

    def build_request(self, keyfile, metadata):
        """ Create the certificate request """
        req_config = self.build_req_config(metadata)
        try:
            fd, req = tempfile.mkstemp()
            os.close(fd)
            cert = self.XMLMatch(metadata).find("Cert")
            days = cert.get("days", "365")
            cmd = ["openssl", "req", "-new", "-config", req_config,
                   "-days", days, "-key", keyfile, "-text", "-out", req]
            result = self.cmd.run(cmd)
            if not result.success:
                raise CfgCreationError("Failed to generate CSR: %s" %
                                       result.error)
            return req
        finally:
            try:
                os.unlink(req_config)
            except OSError:
                self.logger.error("Cfg: Failed to unlink temporary CSR "
                                  "config: %s" % sys.exc_info()[1])

    def get_ca(self, name):
        """ get a dict describing a CA from the config file """
        rv = dict()
        prefix = "sslca_%s_" % name
        for attr in dir(Bcfg2.Options.setup):
            if attr.startswith(prefix):
                rv[attr[len(prefix):]] = getattr(Bcfg2.Options.setup, attr)
        return rv

    def create_data(self, entry, metadata):
        """ generate a new cert """
        self.logger.info("Cfg: Generating new SSL cert for %s" % self.name)
        cert = self.XMLMatch(metadata).find("Cert")
        ca = self.get_ca(cert.get('ca', 'default'))
        req = self.build_request(self._get_keyfile(cert, metadata), metadata)
        try:
            days = cert.get('days', '365')
            cmd = ["openssl", "ca", "-config", ca['config'], "-in", req,
                   "-days", days, "-batch"]
            passphrase = ca.get('passphrase')
            if passphrase:
                cmd.extend(["-passin", "pass:%s" % passphrase])
            result = self.cmd.run(cmd)
            if not result.success:
                raise CfgCreationError("Failed to generate cert: %s" %
                                       result.error)
        except KeyError:
            raise CfgCreationError("Cfg: [sslca_%s] section has no 'config' "
                                   "option" % cert.get('ca', 'default'))
        finally:
            try:
                os.unlink(req)
            except OSError:
                self.logger.error("Cfg: Failed to unlink temporary CSR: %s " %
                                  sys.exc_info()[1])
        data = result.stdout
        if cert.get('append_chain') and 'chaincert' in ca:
            data += open(ca['chaincert']).read()

        self.write_data(data, **self.get_specificity(metadata))
        return data

    def verify_entry(self, entry, metadata, data):
        fd, fname = tempfile.mkstemp()
        self.debug_log("Cfg: Writing SSL cert %s to temporary file %s for "
                       "verification" % (entry.get("name"), fname))
        os.fdopen(fd, 'w').write(data)
        cert = self.XMLMatch(metadata).find("Cert")
        ca = self.get_ca(cert.get('ca', 'default'))
        try:
            if ca.get('chaincert'):
                self.verify_cert_against_ca(fname, entry, metadata)
            self.verify_cert_against_key(fname,
                                         self._get_keyfile(cert, metadata))
        finally:
            os.unlink(fname)

    def _get_keyfile(self, cert, metadata):
        """ Given a <Cert/> element and client metadata, return the
        full path to the file on the filesystem that the key lives in."""
        keypath = cert.get("key")
        eset = self.cfg.entries[keypath]
        try:
            return eset.best_matching(metadata).name
        except PluginExecutionError:
            # SSL key needs to be created
            try:
                creator = eset.best_matching(metadata,
                                             eset.get_handlers(metadata,
                                                               CfgCreator))
            except PluginExecutionError:
                raise CfgCreationError("Cfg: No SSL key or key creator "
                                       "defined for %s" % keypath)

            keyentry = lxml.etree.Element("Path", name=keypath)
            creator.create_data(keyentry, metadata)

            tries = 0
            while True:
                if tries >= 10:
                    raise CfgCreationError("Cfg: Timed out waiting for event "
                                           "on SSL key at %s" % keypath)
                get_fam().handle_events_in_interval(1)
                try:
                    return eset.best_matching(metadata).name
                except PluginExecutionError:
                    tries += 1
                    continue

    def verify_cert_against_ca(self, filename, entry, metadata):
        """
        check that a certificate validates against the ca cert,
        and that it has not expired.
        """
        cert = self.XMLMatch(metadata).find("Cert")
        ca = self.get_ca(cert.get("ca", "default"))
        chaincert = ca.get('chaincert')
        cmd = ["openssl", "verify"]
        is_root = ca.get('root_ca', "false").lower() == 'true'
        if is_root:
            cmd.append("-CAfile")
        else:
            # verifying based on an intermediate cert
            cmd.extend(["-purpose", "sslserver", "-untrusted"])
        cmd.extend([chaincert, filename])
        self.debug_log("Cfg: Verifying %s against CA" % entry.get("name"))
        result = self.cmd.run(cmd)
        if result.stdout == cert + ": OK\n":
            self.debug_log("Cfg: %s verified successfully against CA" %
                           entry.get("name"))
        else:
            raise CfgVerificationError("%s failed verification against CA: %s"
                                       % (entry.get("name"), result.error))

    def _get_modulus(self, fname, ftype="x509"):
        """ get the modulus from the given file """
        cmd = ["openssl", ftype, "-noout", "-modulus", "-in", fname]
        self.debug_log("Cfg: Getting modulus of %s for verification: %s" %
                       (fname, " ".join(cmd)))
        result = self.cmd.run(cmd)
        if not result.success:
            raise CfgVerificationError("Failed to get modulus of %s: %s" %
                                       (fname, result.error))
        return result.stdout.strip()

    def verify_cert_against_key(self, filename, keyfile):
        """ check that a certificate validates against its private
        key. """
        cert = self._get_modulus(filename)
        key = self._get_modulus(keyfile, ftype="rsa")
        if cert == key:
            self.debug_log("Cfg: %s verified successfully against key %s" %
                           (filename, keyfile))
        else:
            raise CfgVerificationError("%s failed verification against key %s"
                                       % (filename, keyfile))
示例#36
0
class CfgPrivateKeyCreator(CfgCreator, StructFile):
    """The CfgPrivateKeyCreator creates SSH keys on the fly. """

    #: Different configurations for different clients/groups can be
    #: handled with Client and Group tags within privkey.xml
    __specific__ = False

    #: Handle XML specifications of private keys
    __basenames__ = ['privkey.xml']

    def __init__(self, fname):
        CfgCreator.__init__(self, fname)
        StructFile.__init__(self, fname)

        pubkey_path = os.path.dirname(self.name) + ".pub"
        pubkey_name = os.path.join(pubkey_path, os.path.basename(pubkey_path))
        self.pubkey_creator = CfgPublicKeyCreator(pubkey_name)
        self.setup = get_option_parser()
        self.cmd = Executor()

    __init__.__doc__ = CfgCreator.__init__.__doc__

    @property
    def category(self):
        """ The name of the metadata category that generated keys are
        specific to """
        if (self.setup.cfp.has_section("sshkeys")
                and self.setup.cfp.has_option("sshkeys", "category")):
            return self.setup.cfp.get("sshkeys", "category")
        return None

    @property
    def passphrase(self):
        """ The passphrase used to encrypt private keys """
        if (HAS_CRYPTO and self.setup.cfp.has_section("sshkeys")
                and self.setup.cfp.has_option("sshkeys", "passphrase")):
            return Bcfg2.Server.Encryption.get_passphrases()[
                self.setup.cfp.get("sshkeys", "passphrase")]
        return None

    def handle_event(self, event):
        CfgCreator.handle_event(self, event)
        StructFile.HandleEvent(self, event)

    handle_event.__doc__ = CfgCreator.handle_event.__doc__

    def _gen_keypair(self, metadata, spec=None):
        """ Generate a keypair according to the given client medata
        and key specification.

        :param metadata: The client metadata to generate keys for
        :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
        :param spec: The key specification to follow when creating the
                     keys. This should be an XML document that only
                     contains key specification data that applies to
                     the given client metadata, and may be obtained by
                     doing ``self.XMLMatch(metadata)``
        :type spec: lxml.etree._Element
        :returns: string - The filename of the private key
        """
        if spec is None:
            spec = self.XMLMatch(metadata)

        # set key parameters
        ktype = "rsa"
        bits = None
        params = spec.find("Params")
        if params is not None:
            bits = params.get("bits")
            ktype = params.get("type", ktype)
        try:
            passphrase = spec.find("Passphrase").text
        except AttributeError:
            passphrase = ''
        tempdir = tempfile.mkdtemp()
        try:
            filename = os.path.join(tempdir, "privkey")

            # generate key pair
            cmd = ["ssh-keygen", "-f", filename, "-t", ktype]
            if bits:
                cmd.extend(["-b", bits])
            cmd.append("-N")
            log_cmd = cmd[:]
            cmd.append(passphrase)
            if passphrase:
                log_cmd.append("******")
            else:
                log_cmd.append("''")
            self.debug_log("Cfg: Generating new SSH key pair: %s" %
                           " ".join(log_cmd))
            result = self.cmd.run(cmd)
            if not result.success:
                raise CfgCreationError(
                    "Cfg: Failed to generate SSH key pair "
                    "at %s for %s: %s" %
                    (filename, metadata.hostname, result.error))
            elif result.stderr:
                self.logger.warning(
                    "Cfg: Generated SSH key pair at %s for %s "
                    "with errors: %s" %
                    (filename, metadata.hostname, result.stderr))
            return filename
        except:
            shutil.rmtree(tempdir)
            raise

    def get_specificity(self, metadata, spec=None):
        """ Get config settings for key generation specificity
        (per-host or per-group).

        :param metadata: The client metadata to create data for
        :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
        :param spec: The key specification to follow when creating the
                     keys. This should be an XML document that only
                     contains key specification data that applies to
                     the given client metadata, and may be obtained by
                     doing ``self.XMLMatch(metadata)``
        :type spec: lxml.etree._Element
        :returns: dict - A dict of specificity arguments suitable for
                  passing to
                  :func:`Bcfg2.Server.Plugins.Cfg.CfgCreator.write_data`
                  or
                  :func:`Bcfg2.Server.Plugins.Cfg.CfgCreator.get_filename`
        """
        if spec is None:
            spec = self.XMLMatch(metadata)
        category = spec.get("category", self.category)
        if category is None:
            per_host_default = "true"
        else:
            per_host_default = "false"
        per_host = spec.get("perhost", per_host_default).lower() == "true"

        specificity = dict(host=metadata.hostname)
        if category and not per_host:
            group = metadata.group_in_category(category)
            if group:
                specificity = dict(group=group,
                                   prio=int(spec.get("priority", 50)))
            else:
                self.logger.info("Cfg: %s has no group in category %s, "
                                 "creating host-specific key" %
                                 (metadata.hostname, category))
        return specificity

    # pylint: disable=W0221
    def create_data(self, entry, metadata, return_pair=False):
        """ Create data for the given entry on the given client

        :param entry: The abstract entry to create data for.  This
                      will not be modified
        :type entry: lxml.etree._Element
        :param metadata: The client metadata to create data for
        :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
        :param return_pair: Return a tuple of ``(public key, private
                            key)`` instead of just the private key.
                            This is used by
                            :class:`Bcfg2.Server.Plugins.Cfg.CfgPublicKeyCreator.CfgPublicKeyCreator`
                            to create public keys as requested.
        :type return_pair: bool
        :returns: string - The private key data
        :returns: tuple - Tuple of ``(public key, private key)``, if
                  ``return_pair`` is set to True
        """
        spec = self.XMLMatch(metadata)
        specificity = self.get_specificity(metadata, spec)
        filename = self._gen_keypair(metadata, spec)

        try:
            # write the public key, stripping the comment and
            # replacing it with a comment that specifies the filename.
            kdata = open(filename + ".pub").read().split()[:2]
            kdata.append(self.pubkey_creator.get_filename(**specificity))
            pubkey = " ".join(kdata) + "\n"
            self.pubkey_creator.write_data(pubkey, **specificity)

            # encrypt the private key, write to the proper place, and
            # return it
            privkey = open(filename).read()
            if HAS_CRYPTO and self.passphrase:
                self.debug_log("Cfg: Encrypting key data at %s" % filename)
                privkey = Bcfg2.Server.Encryption.ssl_encrypt(
                    privkey, self.passphrase)
                specificity['ext'] = '.crypt'

            self.write_data(privkey, **specificity)

            if return_pair:
                return (pubkey, privkey)
            else:
                return privkey
        finally:
            shutil.rmtree(os.path.dirname(filename))
示例#37
0
class PuppetENC(Bcfg2.Server.Plugin.Plugin,
                Bcfg2.Server.Plugin.Connector,
                Bcfg2.Server.Plugin.ClientRunHooks,
                Bcfg2.Server.Plugin.DirectoryBacked):
    """ A plugin to run Puppet external node classifiers
    (http://docs.puppetlabs.com/guides/external_nodes.html) """
    __child__ = PuppetENCFile

    def __init__(self, core):
        Bcfg2.Server.Plugin.Plugin.__init__(self, core)
        Bcfg2.Server.Plugin.Connector.__init__(self)
        Bcfg2.Server.Plugin.ClientRunHooks.__init__(self)
        Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, self.data)
        self.cache = dict()
        self.cmd = Executor()

    def _run_encs(self, metadata):
        """ Run all Puppet ENCs """
        cache = dict(groups=[], params=dict())
        for enc in self.entries.keys():
            epath = os.path.join(self.data, enc)
            self.debug_log("PuppetENC: Running ENC %s for %s" %
                           (enc, metadata.hostname))
            result = self.cmd.run([epath, metadata.hostname])
            if not result.success:
                msg = "PuppetENC: Error running ENC %s for %s: %s" % \
                    (enc, metadata.hostname, result.error)
                self.logger.error(msg)
                raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
            if result.stderr:
                self.debug_log("ENC Error: %s" % result.stderr)

            try:
                yaml = yaml_load(result.stdout)
                self.debug_log("Loaded data from %s for %s: %s" %
                               (enc, metadata.hostname, yaml))
            except yaml_error:
                err = sys.exc_info()[1]
                msg = "Error decoding YAML from %s for %s: %s" % \
                    (enc, metadata.hostname, err)
                self.logger.error(msg)
                raise Bcfg2.Server.Plugin.PluginExecutionError(msg)

            groups = yaml.get("classes", yaml.get("groups", dict()))
            if groups:
                if isinstance(groups, list):
                    self.debug_log("ENC %s adding groups to %s: %s" %
                                   (enc, metadata.hostname, groups))
                    cache['groups'].extend(groups)
                else:
                    self.debug_log("ENC %s adding groups to %s: %s" %
                                   (enc, metadata.hostname, groups.keys()))
                    for group, params in groups.items():
                        cache['groups'].append(group)
                        if params:
                            cache['params'].update(params)
            if "parameters" in yaml and yaml['parameters']:
                cache['params'].update(yaml['parameters'])
            if "environment" in yaml:
                self.logger.info("Ignoring unsupported environment section of "
                                 "ENC %s for %s" % (enc, metadata.hostname))

        self.cache[metadata.hostname] = cache

    def get_additional_groups(self, metadata):
        if metadata.hostname not in self.cache:
            self._run_encs(metadata)
        return self.cache[metadata.hostname]['groups']

    def get_additional_data(self, metadata):
        if metadata.hostname not in self.cache:
            self._run_encs(metadata)
        return self.cache[metadata.hostname]['params']

    def end_client_run(self, metadata):
        """ clear the entire cache at the end of each client run. this
        guarantees that each client will run all ENCs at or near the
        start of each run; we have to clear the entire cache instead
        of just the cache for this client because a client that builds
        templates that use metadata for other clients will populate
        the cache for those clients, which we don't want. This makes
        the caching less than stellar, but it does prevent multiple
        runs of ENCs for a single host a) for groups and data
        separately; and b) when a single client's metadata is
        generated multiple times by separate templates """
        self.cache = dict()
        if self.core.metadata_cache_mode == 'aggressive':
            # clear the metadata client cache if we're in aggressive
            # mode, and produce a warning.  PuppetENC really isn't
            # compatible with aggressive mode, since we don't know
            # when the output from a given ENC has changed, and thus
            # can't invalidate the cache sanely.
            self.logger.warning("PuppetENC is incompatible with aggressive "
                                "client metadata caching, try 'cautious' or "
                                "'initial' instead")
            self.core.expire_caches_by_type(Bcfg2.Server.Plugin.Metadata)

    def end_statistics(self, metadata):
        self.end_client_run(self, metadata)
示例#38
0
文件: Svn.py 项目: xschlef/bcfg2
class Svn(Bcfg2.Server.Plugin.Version):
    """Svn is a version plugin for dealing with Bcfg2 repos."""
    options = Bcfg2.Server.Plugin.Version.options + [
        Bcfg2.Options.Option(
            cf=("svn", "user"), dest="svn_user", help="SVN username"),
        Bcfg2.Options.Option(
            cf=("svn", "password"), dest="svn_password", help="SVN password"),
        Bcfg2.Options.BooleanOption(
            cf=("svn", "always_trust"),
            dest="svn_trust_ssl",
            help="Always trust SSL certs from SVN server")
    ]

    if HAS_SVN:
        options.append(
            Bcfg2.Options.Option(
                cf=("svn", "conflict_resolution"),
                dest="svn_conflict_resolution",
                type=lambda v: v.replace("-", "_"),
                # pylint: disable=E1101
                choices=dir(pysvn.wc_conflict_choice),
                default=pysvn.wc_conflict_choice.postpone,
                # pylint: enable=E1101
                help="SVN conflict resolution method"))

    __author__ = '*****@*****.**'
    __vcs_metadata_path__ = ".svn"
    if HAS_SVN:
        __rmi__ = Bcfg2.Server.Plugin.Version.__rmi__ + ['Update', 'Commit']
    else:
        __vcs_metadata_path__ = ".svn"

    def __init__(self, core):
        Bcfg2.Server.Plugin.Version.__init__(self, core)

        self.revision = None
        self.svn_root = None
        self.client = None
        self.cmd = None
        if not HAS_SVN:
            self.logger.debug("Svn: PySvn not found, using CLI interface to "
                              "SVN")
            self.cmd = Executor()
        else:
            self.client = pysvn.Client()
            self.debug_log("Svn: Conflicts will be resolved with %s" %
                           Bcfg2.Options.setup.svn_conflict_resolution)
            self.client.callback_conflict_resolver = self.conflict_resolver

            if Bcfg2.Options.setup.svn_trust_ssl:
                self.client.callback_ssl_server_trust_prompt = \
                    self.ssl_server_trust_prompt

            if (Bcfg2.Options.setup.svn_user
                    and Bcfg2.Options.setup.svn_password):
                self.client.callback_get_login = self.get_login

        self.logger.debug("Svn: Initialized svn plugin with SVN directory %s" %
                          self.vcs_path)

    def get_login(self, realm, username, may_save):  # pylint: disable=W0613
        """ PySvn callback to get credentials for HTTP basic authentication """
        self.logger.debug("Svn: Logging in with username: %s" %
                          Bcfg2.Options.setup.svn_user)
        return (True, Bcfg2.Options.setup.svn_user,
                Bcfg2.Options.setup.svn_password, False)

    def ssl_server_trust_prompt(self, trust_dict):
        """ PySvn callback to always trust SSL certificates from SVN server """
        self.logger.debug("Svn: Trusting SSL certificate from %s, "
                          "issued by %s for realm %s" %
                          (trust_dict['hostname'], trust_dict['issuer_dname'],
                           trust_dict['realm']))
        return True, trust_dict['failures'], False

    def conflict_resolver(self, conflict_description):
        """ PySvn callback function to resolve conflicts """
        self.logger.info("Svn: Resolving conflict for %s with %s" %
                         (conflict_description['path'],
                          Bcfg2.Options.setup.svn_conflict_resolution))
        return Bcfg2.Options.setup.svn_conflict_resolution, None, False

    def get_revision(self):
        """Read svn revision information for the Bcfg2 repository."""
        msg = None
        if HAS_SVN:
            try:
                info = self.client.info(Bcfg2.Options.setup.vcs_root)
                self.revision = info.revision
                self.svn_root = info.url
                return str(self.revision.number)
            except pysvn.ClientError:  # pylint: disable=E1101
                msg = "Svn: Failed to get revision: %s" % sys.exc_info()[1]
        else:
            result = self.cmd.run(
                ["env LC_ALL=C", "svn", "info", Bcfg2.Options.setup.vcs_root],
                shell=True)
            if result.success:
                self.revision = [
                    line.split(': ')[1] for line in result.stdout.splitlines()
                    if line.startswith('Revision:')
                ][-1]
                return self.revision
            else:
                msg = "Failed to read svn info: %s" % result.error
        self.revision = None
        raise Bcfg2.Server.Plugin.PluginExecutionError(msg)

    def Update(self):
        '''Svn.Update() => True|False\nUpdate svn working copy\n'''
        try:
            old_revision = self.revision.number
            self.revision = self.client.update(Bcfg2.Options.setup.vcs_root,
                                               recurse=True)[0]
        except pysvn.ClientError:  # pylint: disable=E1101
            err = sys.exc_info()[1]
            # try to be smart about the error we got back
            details = None
            if "callback_ssl_server_trust_prompt" in str(err):
                details = "SVN server certificate is not trusted"
            elif "callback_get_login" in str(err):
                details = "SVN credentials not cached"

            if details is None:
                self.logger.error("Svn: Failed to update server repository",
                                  exc_info=1)
            else:
                self.logger.error("Svn: Failed to update server repository: "
                                  "%s" % details)
            return False

        if old_revision == self.revision.number:
            self.logger.debug("repository is current")
        else:
            self.logger.info("Updated %s from revision %s to %s" %
                             (Bcfg2.Options.setup.vcs_root, old_revision,
                              self.revision.number))
        return True

    def Commit(self):
        """Svn.Commit() => True|False\nCommit svn repository\n"""
        # First try to update
        if not self.Update():
            self.logger.error("Failed to update svn repository, refusing to "
                              "commit changes")
            return False

        try:
            self.revision = self.client.checkin([Bcfg2.Options.setup.vcs_root],
                                                'Svn: autocommit',
                                                recurse=True)
            self.revision = self.client.update(Bcfg2.Options.setup.vcs_root,
                                               recurse=True)[0]
            self.logger.info("Svn: Commited changes. At %s" %
                             self.revision.number)
            return True
        except pysvn.ClientError:  # pylint: disable=E1101
            err = sys.exc_info()[1]
            # try to be smart about the error we got back
            details = None
            if "callback_ssl_server_trust_prompt" in str(err):
                details = "SVN server certificate is not trusted"
            elif "callback_get_login" in str(err):
                details = "SVN credentials not cached"

            if details is None:
                self.logger.error("Svn: Failed to commit changes", exc_info=1)
            else:
                self.logger.error("Svn: Failed to commit changes: %s" %
                                  details)
            return False