def __init__(self, _, path, entry_type, encoding, parent=None): Bcfg2.Server.Plugin.EntrySet.__init__(self, os.path.basename(path), path, entry_type, encoding) self.parent = parent self.key = None self.cert = None self.cmd = Executor(timeout=120)
def __init__(self, core): Bcfg2.Server.Plugin.Plugin.__init__(self, core) Bcfg2.Server.Plugin.Connector.__init__(self) Bcfg2.Server.Plugin.ClientRunHooks.__init__(self) Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, self.data) self.cache = dict() self.cmd = Executor()
def create_key(self): """Creates a bcfg2.key at the directory specifed by keypath.""" cmd = Executor(timeout=120) subject = "/C=%s/ST=%s/L=%s/CN=%s'" % ( self.data['country'], self.data['state'], self.data['location'], self.data['shostname']) key = cmd.run(["openssl", "req", "-batch", "-x509", "-nodes", "-subj", subject, "-days", "1000", "-newkey", "rsa:2048", "-keyout", self.data['keypath'], "-noout"]) if not key.success: print("Error generating key: %s" % key.error) return os.chmod(self.data['keypath'], stat.S_IRUSR | stat.S_IWUSR) # 0600 csr = cmd.run(["openssl", "req", "-batch", "-new", "-subj", subject, "-key", self.data['keypath']]) if not csr.success: print("Error generating certificate signing request: %s" % csr.error) return cert = cmd.run(["openssl", "x509", "-req", "-days", "1000", "-signkey", self.data['keypath'], "-out", self.data['certpath']], inputdata=csr.stdout) if not cert.success: print("Error signing certificate: %s" % cert.error) return
def __init__(self, core): Bcfg2.Server.Plugin.Version.__init__(self, core) self.revision = None self.svn_root = None self.client = None self.cmd = None if not HAS_SVN: self.logger.debug("Svn: PySvn not found, using CLI interface to " "SVN") self.cmd = Executor() else: self.client = pysvn.Client() self.debug_log("Svn: Conflicts will be resolved with %s" % Bcfg2.Options.setup.svn_conflict_resolution) self.client.callback_conflict_resolver = self.conflict_resolver if Bcfg2.Options.setup.svn_trust_ssl: self.client.callback_ssl_server_trust_prompt = \ self.ssl_server_trust_prompt if (Bcfg2.Options.setup.svn_user and Bcfg2.Options.setup.svn_password): self.client.callback_get_login = self.get_login self.logger.debug("Svn: Initialized svn plugin with SVN directory %s" % self.vcs_path)
def __init__(self): self.config = None self._proxy = None self.logger = logging.getLogger('bcfg2') self.cmd = Executor(Bcfg2.Options.setup.probe_timeout) self.tools = [] self.times = dict() self.times['initialization'] = time.time() if Bcfg2.Options.setup.bundle_quick: if (not Bcfg2.Options.setup.only_bundles and not Bcfg2.Options.setup.except_bundles): self.logger.error("-Q option requires -b or -B") raise SystemExit(1) if Bcfg2.Options.setup.remove == 'services': self.logger.error("Service removal is nonsensical; " "removed services will only be disabled") if not Bcfg2.Options.setup.server.startswith('https://'): Bcfg2.Options.setup.server = \ 'https://' + Bcfg2.Options.setup.server #: A dict of the state of each entry. Keys are the entries. #: Values are boolean: True means that the entry is good, #: False means that the entry is bad. self.states = {} self.whitelist = [] self.blacklist = [] self.removal = [] self.unhandled = [] self.logger = logging.getLogger(__name__)
def __init__(self, core, datastore): Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) Bcfg2.Server.Plugin.Generator.__init__(self) Bcfg2.Server.Plugin.PullTarget.__init__(self) self.ipcache = {} self.namecache = {} self.__skn = False # keep track of which bogus keys we've warned about, and only # do so once self.badnames = dict() self.fam = Bcfg2.Server.FileMonitor.get_fam() self.fam.AddMonitor(self.data, self) self.static = dict() self.entries = dict() self.Entries['Path'] = dict() self.entries['/etc/ssh/ssh_known_hosts'] = \ KnownHostsEntrySet(self.data) self.Entries['Path']['/etc/ssh/ssh_known_hosts'] = self.build_skn for keypattern in self.keypatterns: self.entries["/etc/ssh/" + keypattern] = \ HostKeyEntrySet(keypattern, self.data) self.Entries['Path']["/etc/ssh/" + keypattern] = self.build_hk self.cmd = Executor()
def __init__(self, core, datastore): Bcfg2.Server.Plugin.Version.__init__(self, core, datastore) self.revision = None self.svn_root = None self.client = None self.cmd = None if not HAS_SVN: self.logger.debug("Svn: PySvn not found, using CLI interface to " "SVN") self.cmd = Executor() else: self.client = pysvn.Client() # pylint: disable=E1101 choice = pysvn.wc_conflict_choice.postpone try: resolution = self.core.setup.cfp.get( "svn", "conflict_resolution").replace('-', '_') if resolution in ["edit", "launch", "working"]: self.logger.warning("Svn: Conflict resolver %s requires " "manual intervention, using %s" % choice) else: choice = getattr(pysvn.wc_conflict_choice, resolution) except AttributeError: self.logger.warning("Svn: Conflict resolver %s does not " "exist, using %s" % choice) except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): self.logger.info("Svn: No conflict resolution method " "selected, using %s" % choice) # pylint: enable=E1101 self.debug_log("Svn: Conflicts will be resolved with %s" % choice) self.client.callback_conflict_resolver = \ self.get_conflict_resolver(choice) try: if self.core.setup.cfp.get( "svn", "always_trust").lower() == "true": self.client.callback_ssl_server_trust_prompt = \ self.ssl_server_trust_prompt except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): self.logger.debug("Svn: Using subversion cache for SSL " "certificate trust") try: if (self.core.setup.cfp.get("svn", "user") and self.core.setup.cfp.get("svn", "password")): self.client.callback_get_login = \ self.get_login except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): self.logger.info("Svn: Using subversion cache for " "password-based authetication") self.logger.debug("Svn: Initialized svn plugin with SVN directory %s" % self.vcs_path)
def __init__(self, fname): CfgCreator.__init__(self, fname) StructFile.__init__(self, fname) pubkey_path = os.path.dirname(self.name) + ".pub" pubkey_name = os.path.join(pubkey_path, os.path.basename(pubkey_path)) self.pubkey_creator = CfgPublicKeyCreator(pubkey_name) self.setup = get_option_parser() self.cmd = Executor()
def __init__(self, core): Version.__init__(self, core) if HAS_GITPYTHON: self.repo = git.Repo(Bcfg2.Options.setup.vcs_root) self.cmd = None else: self.logger.debug("Git: GitPython not found, using CLI interface " "to Git") self.repo = None self.cmd = Executor() self.logger.debug("Initialized git plugin with git directory %s" % self.vcs_path)
def __init__(self, core, datastore): Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) Bcfg2.Server.Plugin.Generator.__init__(self) Bcfg2.Server.Plugin.PullTarget.__init__(self) self.ipcache = {} self.namecache = {} self.__skn = False # keep track of which bogus keys we've warned about, and only # do so once self.badnames = dict() self.fam = Bcfg2.Server.FileMonitor.get_fam() self.fam.AddMonitor(self.data, self) self.static = dict() self.entries = dict() self.Entries["Path"] = dict() self.entries["/etc/ssh/ssh_known_hosts"] = KnownHostsEntrySet(self.data) self.Entries["Path"]["/etc/ssh/ssh_known_hosts"] = self.build_skn for keypattern in self.keypatterns: self.entries["/etc/ssh/" + keypattern] = HostKeyEntrySet(keypattern, self.data) self.Entries["Path"]["/etc/ssh/" + keypattern] = self.build_hk self.cmd = Executor()
class Fossil(Bcfg2.Server.Plugin.Version): """ The Fossil plugin provides a revision interface for Bcfg2 repos using fossil. """ __author__ = '*****@*****.**' __vcs_metadata_path__ = "_FOSSIL_" def __init__(self, core, datastore): Bcfg2.Server.Plugin.Version.__init__(self, core, datastore) self.cmd = Executor() self.logger.debug( "Initialized Fossil plugin with fossil directory %s" % self.vcs_path) def get_revision(self): """Read fossil revision information for the Bcfg2 repository.""" result = self.cmd.run(["env LC_ALL=C", "fossil", "info"], shell=True, cwd=self.vcs_root) try: revision = None for line in result.stdout.splitlines(): ldata = line.split(': ') if ldata[0].strip() == 'checkout': revision = line[1].strip().split(' ')[0] return revision except (IndexError, AttributeError): msg = "Failed to read revision from Fossil: %s" % result.error self.logger.error(msg) raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
class CfgExternalCommandVerifier(CfgVerifier): """ Invoke an external script to verify :ref:`server-plugins-generators-cfg` file contents """ #: Handle :file:`:test` files __basenames__ = [':test'] def __init__(self, name, specific, encoding): CfgVerifier.__init__(self, name, specific, encoding) self.cmd = [] self.exc = Executor(timeout=30) __init__.__doc__ = CfgVerifier.__init__.__doc__ def verify_entry(self, entry, metadata, data): try: result = self.exc.run(self.cmd, inputdata=data) if not result.success: raise CfgVerificationError(result.error) except OSError: raise CfgVerificationError(sys.exc_info()[1]) verify_entry.__doc__ = CfgVerifier.verify_entry.__doc__ def handle_event(self, event): CfgVerifier.handle_event(self, event) if not self.data: return self.cmd = [] if not os.access(self.name, os.X_OK): bangpath = self.data.splitlines()[0].strip() if bangpath.startswith("#!"): self.cmd.extend(shlex.split(bangpath[2:].strip())) else: raise PluginExecutionError("Cannot execute %s" % self.name) self.cmd.append(self.name) handle_event.__doc__ = CfgVerifier.handle_event.__doc__
def __init__(self, *args, **kwargs): Bcfg2.Server.Lint.ServerlessPlugin.__init__(self, *args, **kwargs) self.filesets = \ {"Metadata/groups.xml": "metadata.xsd", "Metadata/clients.xml": "clients.xsd", "Cfg/**/info.xml": "info.xsd", "Cfg/**/privkey.xml": "privkey.xsd", "Cfg/**/pubkey.xml": "pubkey.xsd", "Cfg/**/authorizedkeys.xml": "authorizedkeys.xsd", "Cfg/**/authorized_keys.xml": "authorizedkeys.xsd", "SSHbase/**/info.xml": "info.xsd", "SSLCA/**/info.xml": "info.xsd", "TGenshi/**/info.xml": "info.xsd", "TCheetah/**/info.xml": "info.xsd", "Bundler/*.xml": "bundle.xsd", "Bundler/*.genshi": "bundle.xsd", "Pkgmgr/*.xml": "pkglist.xsd", "Rules/*.xml": "rules.xsd", "Defaults/*.xml": "defaults.xsd", "etc/report-configuration.xml": "report-configuration.xsd", "Deps/*.xml": "deps.xsd", "Decisions/*.xml": "decisions.xsd", "Packages/sources.xml": "packages.xsd", "GroupPatterns/config.xml": "grouppatterns.xsd", "NagiosGen/config.xml": "nagiosgen.xsd", "FileProbes/config.xml": "fileprobes.xsd", "SSLCA/**/cert.xml": "sslca-cert.xsd", "SSLCA/**/key.xml": "sslca-key.xsd", "GroupLogic/groups.xml": "grouplogic.xsd" } self.filelists = {} self.get_filelists() self.cmd = Executor()
class Fossil(Bcfg2.Server.Plugin.Version): """ The Fossil plugin provides a revision interface for Bcfg2 repos using fossil. """ __author__ = '*****@*****.**' __vcs_metadata_path__ = "_FOSSIL_" def __init__(self, core): Bcfg2.Server.Plugin.Version.__init__(self, core) self.cmd = Executor() self.logger.debug("Initialized Fossil plugin with fossil directory %s" % self.vcs_path) def get_revision(self): """Read fossil revision information for the Bcfg2 repository.""" result = self.cmd.run(["env LC_ALL=C", "fossil", "info"], shell=True, cwd=Bcfg2.Options.setup.vcs_root) try: revision = None for line in result.stdout.splitlines(): ldata = line.split(': ') if ldata[0].strip() == 'checkout': revision = line[1].strip().split(' ')[0] return revision except (IndexError, AttributeError): msg = "Failed to read revision from Fossil: %s" % result.error self.logger.error(msg) raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
def __init__(self, _, path, entry_type, parent=None): Bcfg2.Server.Plugin.EntrySet.__init__(self, os.path.basename(path), path, entry_type) self.parent = parent self.key = None self.cert = None self.cmd = Executor(timeout=120)
def __init__(self, metadata, sources, cachepath, basepath, debug=False): Collection.__init__(self, metadata, sources, cachepath, basepath, debug=debug) self.keypath = os.path.join(self.cachepath, "keys") self._helper = None if self.use_yum: #: Define a unique cache file for this collection to use #: for cached yum metadata self.cachefile = os.path.join(self.cachepath, "cache-%s" % self.cachekey) if not os.path.exists(self.cachefile): os.mkdir(self.cachefile) #: The path to the server-side config file used when #: resolving packages with the Python yum libraries self.cfgfile = os.path.join(self.cachefile, "yum.conf") self.write_config() self.cmd = Executor() else: self.cachefile = None self.cmd = None if HAS_PULP and self.has_pulp_sources: _setup_pulp() if self.pulp_cert_set is None: certdir = os.path.join(self.basepath, "pulp", os.path.basename(PulpCertificateSet.certpath)) try: os.makedirs(certdir) except OSError: err = sys.exc_info()[1] if err.errno == errno.EEXIST: pass else: self.logger.error("Could not create Pulp consumer " "cert directory at %s: %s" % (certdir, err)) self.pulp_cert_set = PulpCertificateSet(certdir)
def __init__(self, config): """ :param config: The XML configuration for this client :type config: lxml.etree._Element :raises: :exc:`Bcfg2.Client.Tools.ToolInstantiationError` """ #: A :class:`logging.Logger` object that will be used by this #: tool for logging self.logger = logging.getLogger(self.name) #: The XML configuration for this client self.config = config #: An :class:`Bcfg2.Utils.Executor` object for #: running external commands. self.cmd = Executor(timeout=Bcfg2.Options.setup.command_timeout) #: A list of entries that have been modified by this tool self.modified = [] #: A list of extra entries that are not listed in the #: configuration self.extra = [] #: A list of all entries handled by this tool self.handled = [] self._analyze_config() self._check_execs()
def __init__(self): self.toolset = None self.tools = None self.config = None self._proxy = None self.setup = Bcfg2.Options.get_option_parser() if self.setup['debug']: level = logging.DEBUG elif self.setup['verbose']: level = logging.INFO else: level = logging.WARNING Bcfg2.Logger.setup_logging('bcfg2', to_syslog=self.setup['syslog'], level=level, to_file=self.setup['logging']) self.logger = logging.getLogger('bcfg2') self.logger.debug(self.setup) self.cmd = Executor(self.setup['command_timeout']) if self.setup['bundle_quick']: if not self.setup['bundle'] and not self.setup['skipbundle']: self.logger.error("-Q option requires -b or -B") raise SystemExit(1) elif self.setup['remove']: self.logger.error("-Q option incompatible with -r") raise SystemExit(1) if 'drivers' in self.setup and self.setup['drivers'] == 'help': self.logger.info("The following drivers are available:") self.logger.info(Bcfg2.Client.Tools.__all__) raise SystemExit(0) if self.setup['remove'] and 'services' in self.setup['remove'].lower(): self.logger.error("Service removal is nonsensical; " "removed services will only be disabled") if (self.setup['remove'] and self.setup['remove'].lower() not in ['all', 'services', 'packages', 'users']): self.logger.error("Got unknown argument %s for -r" % self.setup['remove']) if self.setup["file"] and self.setup["cache"]: print("cannot use -f and -c together") raise SystemExit(1) if not self.setup['server'].startswith('https://'): self.setup['server'] = 'https://' + self.setup['server']
def __init__(self, fname): CfgCreator.__init__(self, fname) StructFile.__init__(self, fname) pubkey_path = os.path.dirname(self.name) + ".pub" pubkey_name = os.path.join(pubkey_path, os.path.basename(pubkey_path)) self.pubkey_creator = CfgPublicKeyCreator(pubkey_name) self.cmd = Executor()
def run(self, setup): if setup.outfile: fmt = setup.outfile.split('.')[-1] else: fmt = 'png' exc = Executor() cmd = ["dot", "-T", fmt] if setup.outfile: cmd.extend(["-o", setup.outfile]) inputlist = ["digraph groups {", '\trankdir="LR";', self.metadata.viz(setup.includehosts, setup.includebundles, setup.includekey, setup.only_client, self.colors)] if setup.includekey: inputlist.extend( ["\tsubgraph cluster_key {", '\tstyle="filled";', '\tcolor="lightblue";', '\tBundle [ shape="septagon" ];', '\tGroup [shape="ellipse"];', '\tGroup Category [shape="trapezium"];\n', '\tProfile [style="bold", shape="ellipse"];', '\tHblock [label="Host1|Host2|Host3",shape="record"];', '\tlabel="Key";', "\t}"]) inputlist.append("}") idata = "\n".join(inputlist) try: result = exc.run(cmd, inputdata=idata) except OSError: # on some systems (RHEL 6), you cannot run dot with # shell=True. on others (Gentoo with Python 2.7), you # must. In yet others (RHEL 5), either way works. I have # no idea what the difference is, but it's kind of a PITA. result = exc.run(cmd, shell=True, inputdata=idata) if not result.success: self.errExit("Error running %s: %s" % (cmd, result.error)) if not setup.outfile: print(result.stdout)
def __init__(self, *args, **kwargs): Bcfg2.Server.Lint.ServerlessPlugin.__init__(self, *args, **kwargs) #: A dict of <file glob>: <schema file> that maps files in the #: Bcfg2 specification to their schemas. The globs are #: extended :mod:`fnmatch` globs that also support ``**``, #: which matches any number of any characters, including #: forward slashes. The schema files are relative to the #: schema directory, which can be controlled by the #: ``bcfg2-lint --schema`` option. self.filesets = \ {"Metadata/groups.xml": "metadata.xsd", "Metadata/clients.xml": "clients.xsd", "Cfg/**/info.xml": "info.xsd", "Cfg/**/privkey.xml": "privkey.xsd", "Cfg/**/pubkey.xml": "pubkey.xsd", "Cfg/**/authorizedkeys.xml": "authorizedkeys.xsd", "Cfg/**/authorized_keys.xml": "authorizedkeys.xsd", "Cfg/**/sslcert.xml": "sslca-cert.xsd", "Cfg/**/sslkey.xml": "sslca-key.xsd", "SSHbase/**/info.xml": "info.xsd", "TGenshi/**/info.xml": "info.xsd", "TCheetah/**/info.xml": "info.xsd", "Bundler/*.xml": "bundle.xsd", "Bundler/*.genshi": "bundle.xsd", "Pkgmgr/*.xml": "pkglist.xsd", "Rules/*.xml": "rules.xsd", "Defaults/*.xml": "defaults.xsd", "etc/report-configuration.xml": "report-configuration.xsd", "Deps/*.xml": "deps.xsd", "Decisions/*.xml": "decisions.xsd", "Packages/sources.xml": "packages.xsd", "GroupPatterns/config.xml": "grouppatterns.xsd", "AWSTags/config.xml": "awstags.xsd", "NagiosGen/config.xml": "nagiosgen.xsd", "FileProbes/config.xml": "fileprobes.xsd", "GroupLogic/groups.xml": "grouplogic.xsd" } self.filelists = {} self.get_filelists() self.cmd = Executor()
def Visualize(self, hosts=False, bundles=False, key=False, only_client=None, output=None): """Build visualization of groups file.""" if output: fmt = output.split('.')[-1] else: fmt = 'png' exc = Executor() cmd = ["dot", "-T", fmt] if output: cmd.extend(["-o", output]) idata = [ "digraph groups {", '\trankdir="LR";', self.metadata.viz(hosts, bundles, key, only_client, self.colors) ] if key: idata.extend([ "\tsubgraph cluster_key {", '\tstyle="filled";', '\tcolor="lightblue";', '\tBundle [ shape="septagon" ];', '\tGroup [shape="ellipse"];', '\tProfile [style="bold", shape="ellipse"];', '\tHblock [label="Host1|Host2|Host3",shape="record"];', '\tlabel="Key";', "\t}" ]) idata.append("}") try: result = exc.run(cmd, inputdata=idata) except OSError: # on some systems (RHEL 6), you cannot run dot with # shell=True. on others (Gentoo with Python 2.7), you # must. In yet others (RHEL 5), either way works. I have # no idea what the difference is, but it's kind of a PITA. result = exc.run(cmd, shell=True, inputdata=idata) if not result.success: print("Error running %s: %s" % (cmd, result.error)) raise SystemExit(result.retval)
def Visualize(self, hosts=False, bundles=False, key=False, only_client=None, output=None): """Build visualization of groups file.""" if output: fmt = output.split('.')[-1] else: fmt = 'png' exc = Executor() cmd = ["dot", "-T", fmt] if output: cmd.extend(["-o", output]) idata = ["digraph groups {", '\trankdir="LR";', self.metadata.viz(hosts, bundles, key, only_client, self.colors)] if key: idata.extend( ["\tsubgraph cluster_key {", '\tstyle="filled";', '\tcolor="lightblue";', '\tBundle [ shape="septagon" ];', '\tGroup [shape="ellipse"];', '\tProfile [style="bold", shape="ellipse"];', '\tHblock [label="Host1|Host2|Host3",shape="record"];', '\tlabel="Key";', "\t}"]) idata.append("}") try: result = exc.run(cmd, inputdata=idata) except OSError: # on some systems (RHEL 6), you cannot run dot with # shell=True. on others (Gentoo with Python 2.7), you # must. In yet others (RHEL 5), either way works. I have # no idea what the difference is, but it's kind of a PITA. result = exc.run(cmd, shell=True, inputdata=idata) if not result.success: print("Error running %s: %s" % (cmd, result.error)) raise SystemExit(result.retval)
def create_data(self, entry, metadata): self.logger.info("Cfg: Generating new SSL key for %s" % self.name) spec = self.XMLMatch(metadata) key = spec.find("Key") if not key: key = dict() ktype = key.get('type', 'rsa') bits = key.get('bits', '2048') if ktype == 'rsa': cmd = ["openssl", "genrsa", bits] elif ktype == 'dsa': cmd = ["openssl", "dsaparam", "-noout", "-genkey", bits] result = Executor().run(cmd) if not result.success: raise CfgCreationError("Failed to generate key %s for %s: %s" % (self.name, metadata.hostname, result.error)) self.write_data(result.stdout, **self.get_specificity(metadata)) return result.stdout
def __init__(self, *args, **kwargs): Bcfg2.Server.Lint.ServerlessPlugin.__init__(self, *args, **kwargs) #: A dict of <file glob>: <schema file> that maps files in the #: Bcfg2 specification to their schemas. The globs are #: extended :mod:`fnmatch` globs that also support ``**``, #: which matches any number of any characters, including #: forward slashes. The schema files are relative to the #: schema directory, which can be controlled by the #: ``bcfg2-lint --schema`` option. self.filesets = \ {"Metadata/groups.xml": "metadata.xsd", "Metadata/clients.xml": "clients.xsd", "Cfg/**/info.xml": "info.xsd", "Cfg/**/privkey.xml": "privkey.xsd", "Cfg/**/pubkey.xml": "pubkey.xsd", "Cfg/**/authorizedkeys.xml": "authorizedkeys.xsd", "Cfg/**/authorized_keys.xml": "authorizedkeys.xsd", "SSHbase/**/info.xml": "info.xsd", "SSLCA/**/info.xml": "info.xsd", "TGenshi/**/info.xml": "info.xsd", "TCheetah/**/info.xml": "info.xsd", "Bundler/*.xml": "bundle.xsd", "Bundler/*.genshi": "bundle.xsd", "Pkgmgr/*.xml": "pkglist.xsd", "Rules/*.xml": "rules.xsd", "Defaults/*.xml": "defaults.xsd", "etc/report-configuration.xml": "report-configuration.xsd", "Deps/*.xml": "deps.xsd", "Decisions/*.xml": "decisions.xsd", "Packages/sources.xml": "packages.xsd", "GroupPatterns/config.xml": "grouppatterns.xsd", "NagiosGen/config.xml": "nagiosgen.xsd", "FileProbes/config.xml": "fileprobes.xsd", "SSLCA/**/cert.xml": "sslca-cert.xsd", "SSLCA/**/key.xml": "sslca-key.xsd", "GroupLogic/groups.xml": "grouplogic.xsd" } self.filelists = {} self.get_filelists() self.cmd = Executor()
class Cvs(Bcfg2.Server.Plugin.Version): """ The Cvs plugin provides a revision interface for Bcfg2 repos using cvs.""" __author__ = '*****@*****.**' __vcs_metadata_path__ = "CVSROOT" def __init__(self, core, datastore): Bcfg2.Server.Plugin.Version.__init__(self, core, datastore) self.cmd = Executor() self.logger.debug("Initialized cvs plugin with CVS directory %s" % self.vcs_path) def get_revision(self): """Read cvs revision information for the Bcfg2 repository.""" result = self.cmd.run(["env LC_ALL=C", "cvs", "log"], shell=True, cwd=self.vcs_root) try: return result.stdout.splitlines()[0].strip() except (IndexError, AttributeError): msg = "Failed to read revision from CVS: %s" % result.error self.logger.error(msg) raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
class Darcs(Bcfg2.Server.Plugin.Version): """ Darcs is a version plugin for dealing with Bcfg2 repos stored in the Darcs VCS. """ __author__ = '*****@*****.**' __vcs_metadata_path__ = "_darcs" def __init__(self, core): Bcfg2.Server.Plugin.Version.__init__(self, core) self.cmd = Executor() self.logger.debug("Initialized Darcs plugin with darcs directory %s" % self.vcs_path) def get_revision(self): """Read Darcs changeset information for the Bcfg2 repository.""" result = self.cmd.run(["env LC_ALL=C", "darcs", "changes"], shell=True, cwd=Bcfg2.Options.setup.vcs_root) if result.success: return result.stdout.splitlines()[0].strip() else: msg = "Failed to read revision from darcs: %s" % result.error self.logger.error(msg) raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
class Trigger(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.ClientRunHooks, Bcfg2.Server.Plugin.DirectoryBacked): """Trigger is a plugin that calls external scripts (on the server).""" __author__ = '*****@*****.**' def __init__(self, core, datastore): Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) Bcfg2.Server.Plugin.ClientRunHooks.__init__(self) Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, self.data) self.cmd = Executor() def async_run(self, args): """ Run the trigger script asynchronously in a forked process """ pid = os.fork() if pid: os.waitpid(pid, 0) else: dpid = os.fork() if not dpid: self.debug_log("Running %s" % " ".join(pipes.quote(a) for a in args)) result = self.cmd.run(args) if not result.success: self.logger.error("Trigger: Error running %s: %s" % (args[0], result.error)) elif result.stderr: self.debug_log("Trigger: Error: %s" % result.stderr) os._exit(0) # pylint: disable=W0212 def end_client_run(self, metadata): args = [ metadata.hostname, '-p', metadata.profile, '-g', ':'.join([g for g in metadata.groups]) ] for notifier in self.entries.keys(): npath = os.path.join(self.data, notifier) self.async_run([npath] + args)
class Trigger(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.ClientRunHooks, Bcfg2.Server.Plugin.DirectoryBacked): """Trigger is a plugin that calls external scripts (on the server).""" __author__ = '*****@*****.**' def __init__(self, core, datastore): Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) Bcfg2.Server.Plugin.ClientRunHooks.__init__(self) Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, self.data) self.cmd = Executor() def async_run(self, args): """ Run the trigger script asynchronously in a forked process """ pid = os.fork() if pid: os.waitpid(pid, 0) else: dpid = os.fork() if not dpid: self.debug_log("Running %s" % " ".join(pipes.quote(a) for a in args)) result = self.cmd.run(args) if not result.success: self.logger.error("Trigger: Error running %s: %s" % (args[0], result.error)) elif result.stderr: self.debug_log("Trigger: Error: %s" % result.stderr) os._exit(0) # pylint: disable=W0212 def end_client_run(self, metadata): args = [metadata.hostname, '-p', metadata.profile, '-g', ':'.join([g for g in metadata.groups])] for notifier in self.entries.keys(): npath = os.path.join(self.data, notifier) self.async_run([npath] + args)
class Validate(Bcfg2.Server.Lint.ServerlessPlugin): """ Ensure that all XML files in the Bcfg2 repository validate according to their respective schemas. """ def __init__(self, *args, **kwargs): Bcfg2.Server.Lint.ServerlessPlugin.__init__(self, *args, **kwargs) #: A dict of <file glob>: <schema file> that maps files in the #: Bcfg2 specification to their schemas. The globs are #: extended :mod:`fnmatch` globs that also support ``**``, #: which matches any number of any characters, including #: forward slashes. The schema files are relative to the #: schema directory, which can be controlled by the #: ``bcfg2-lint --schema`` option. self.filesets = \ {"Metadata/groups.xml": "metadata.xsd", "Metadata/clients.xml": "clients.xsd", "Cfg/**/info.xml": "info.xsd", "Cfg/**/privkey.xml": "privkey.xsd", "Cfg/**/pubkey.xml": "pubkey.xsd", "Cfg/**/authorizedkeys.xml": "authorizedkeys.xsd", "Cfg/**/authorized_keys.xml": "authorizedkeys.xsd", "SSHbase/**/info.xml": "info.xsd", "SSLCA/**/info.xml": "info.xsd", "TGenshi/**/info.xml": "info.xsd", "TCheetah/**/info.xml": "info.xsd", "Bundler/*.xml": "bundle.xsd", "Bundler/*.genshi": "bundle.xsd", "Pkgmgr/*.xml": "pkglist.xsd", "Rules/*.xml": "rules.xsd", "Defaults/*.xml": "defaults.xsd", "etc/report-configuration.xml": "report-configuration.xsd", "Deps/*.xml": "deps.xsd", "Decisions/*.xml": "decisions.xsd", "Packages/sources.xml": "packages.xsd", "GroupPatterns/config.xml": "grouppatterns.xsd", "NagiosGen/config.xml": "nagiosgen.xsd", "FileProbes/config.xml": "fileprobes.xsd", "SSLCA/**/cert.xml": "sslca-cert.xsd", "SSLCA/**/key.xml": "sslca-key.xsd", "GroupLogic/groups.xml": "grouplogic.xsd" } self.filelists = {} self.get_filelists() self.cmd = Executor() def Run(self): schemadir = self.config['schema'] for path, schemaname in self.filesets.items(): try: filelist = self.filelists[path] except KeyError: filelist = [] if filelist: # avoid loading schemas for empty file lists schemafile = os.path.join(schemadir, schemaname) schema = self._load_schema(schemafile) if schema: for filename in filelist: self.validate(filename, schemafile, schema=schema) self.check_properties() @classmethod def Errors(cls): return {"schema-failed-to-parse": "warning", "properties-schema-not-found": "warning", "xml-failed-to-parse": "error", "xml-failed-to-read": "error", "xml-failed-to-verify": "error", "input-output-error": "error"} def check_properties(self): """ Check Properties files against their schemas. """ for filename in self.filelists['props']: schemafile = "%s.xsd" % os.path.splitext(filename)[0] if os.path.exists(schemafile): self.validate(filename, schemafile) else: self.LintError("properties-schema-not-found", "No schema found for %s" % filename) # ensure that it at least parses self.parse(filename) def parse(self, filename): """ Parse an XML file, raising the appropriate LintErrors if it can't be parsed or read. Return the lxml.etree._ElementTree parsed from the file. :param filename: The full path to the file to parse :type filename: string :returns: lxml.etree._ElementTree - the parsed data""" try: return lxml.etree.parse(filename) except SyntaxError: result = self.cmd.run(["xmllint", filename]) self.LintError("xml-failed-to-parse", "%s fails to parse:\n%s" % (filename, result.stdout + result.stderr)) return False except IOError: self.LintError("xml-failed-to-read", "Failed to open file %s" % filename) return False def validate(self, filename, schemafile, schema=None): """ Validate a file against the given schema. :param filename: The full path to the file to validate :type filename: string :param schemafile: The full path to the schema file to validate against :type schemafile: string :param schema: The loaded schema to validate against. This can be used to avoid parsing a single schema file for every file that needs to be validate against it. :type schema: lxml.etree.Schema :returns: bool - True if the file validates, false otherwise """ if schema is None: # if no schema object was provided, instantiate one schema = self._load_schema(schemafile) if not schema: return False datafile = self.parse(filename) if not schema.validate(datafile): cmd = ["xmllint"] if self.files is None: cmd.append("--xinclude") cmd.extend(["--noout", "--schema", schemafile, filename]) result = self.cmd.run(cmd) if not result.success: self.LintError("xml-failed-to-verify", "%s fails to verify:\n%s" % (filename, result.stdout + result.stderr)) return False return True def get_filelists(self): """ Get lists of different kinds of files to validate. This doesn't return anything, but it sets :attr:`Bcfg2.Server.Lint.Validate.Validate.filelists` to a dict whose keys are path globs given in :attr:`Bcfg2.Server.Lint.Validate.Validate.filesets` and whose values are lists of the full paths to all files in the Bcfg2 repository (or given with ``bcfg2-lint --stdin``) that match the glob.""" if self.files is not None: listfiles = lambda p: fnmatch.filter(self.files, os.path.join('*', p)) else: listfiles = lambda p: glob.glob(os.path.join(self.config['repo'], p)) for path in self.filesets.keys(): if '/**/' in path: if self.files is not None: self.filelists[path] = listfiles(path) else: # self.files is None fpath, fname = path.split('/**/') self.filelists[path] = [] for root, _, files in \ os.walk(os.path.join(self.config['repo'], fpath)): self.filelists[path].extend([os.path.join(root, f) for f in files if f == fname]) else: self.filelists[path] = listfiles(path) self.filelists['props'] = listfiles("Properties/*.xml") def _load_schema(self, filename): """ Load an XML schema document, returning the Schema object and raising appropriate lint errors on failure. :param filename: The full path to the schema file to load. :type filename: string :returns: lxml.etree.Schema - The loaded schema data """ try: return lxml.etree.XMLSchema(lxml.etree.parse(filename)) except IOError: err = sys.exc_info()[1] self.LintError("input-output-error", str(err)) except lxml.etree.XMLSchemaParseError: err = sys.exc_info()[1] self.LintError("schema-failed-to-parse", "Failed to process schema %s: %s" % (filename, err)) return None
class SSHbase(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.Generator, Bcfg2.Server.Plugin.PullTarget): """ The sshbase generator manages ssh host keys (both v1 and v2) for hosts. It also manages the ssh_known_hosts file. It can integrate host keys from other management domains and similarly export its keys. The repository contains files in the following formats: ssh_host_key.H_(hostname) -> the v1 host private key for (hostname) ssh_host_key.pub.H_(hostname) -> the v1 host public key for (hostname) ssh_host_(ec)(dr)sa_key.H_(hostname) -> the v2 ssh host private key for (hostname) ssh_host_(ec)(dr)sa_key.pub.H_(hostname) -> the v2 ssh host public key for (hostname) ssh_known_hosts -> the current known hosts file. this is regenerated each time a new key is generated. """ __author__ = '*****@*****.**' keypatterns = [ "ssh_host_dsa_key", "ssh_host_ecdsa_key", "ssh_host_rsa_key", "ssh_host_key", "ssh_host_dsa_key.pub", "ssh_host_ecdsa_key.pub", "ssh_host_rsa_key.pub", "ssh_host_key.pub" ] def __init__(self, core, datastore): Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) Bcfg2.Server.Plugin.Generator.__init__(self) Bcfg2.Server.Plugin.PullTarget.__init__(self) self.ipcache = {} self.namecache = {} self.__skn = False # keep track of which bogus keys we've warned about, and only # do so once self.badnames = dict() self.fam = Bcfg2.Server.FileMonitor.get_fam() self.fam.AddMonitor(self.data, self) self.static = dict() self.entries = dict() self.Entries['Path'] = dict() self.entries['/etc/ssh/ssh_known_hosts'] = \ KnownHostsEntrySet(self.data) self.Entries['Path']['/etc/ssh/ssh_known_hosts'] = self.build_skn for keypattern in self.keypatterns: self.entries["/etc/ssh/" + keypattern] = \ HostKeyEntrySet(keypattern, self.data) self.Entries['Path']["/etc/ssh/" + keypattern] = self.build_hk self.cmd = Executor() def get_skn(self): """Build memory cache of the ssh known hosts file.""" if not self.__skn: # if no metadata is registered yet, defer if len(self.core.metadata.query.all()) == 0: self.__skn = False return self.__skn skn = [s.data.rstrip() for s in list(self.static.values())] mquery = self.core.metadata.query # build hostname cache names = dict() for cmeta in mquery.all(): names[cmeta.hostname] = set([cmeta.hostname]) names[cmeta.hostname].update(cmeta.aliases) newnames = set() newips = set() for name in names[cmeta.hostname]: newnames.add(name.split('.')[0]) try: newips.update(self.get_ipcache_entry(name)[0]) except: # pylint: disable=W0702 continue names[cmeta.hostname].update(newnames) names[cmeta.hostname].update(cmeta.addresses) names[cmeta.hostname].update(newips) # TODO: Only perform reverse lookups on IPs if an # option is set. if True: for ip in newips: try: names[cmeta.hostname].update( self.get_namecache_entry(ip)) except: # pylint: disable=W0702 continue names[cmeta.hostname] = sorted(names[cmeta.hostname]) pubkeys = [ pubk for pubk in list(self.entries.keys()) if pubk.endswith('.pub') ] pubkeys.sort() for pubkey in pubkeys: for entry in sorted(self.entries[pubkey].entries.values(), key=lambda e: (e.specific.hostname or e.specific.group)): specific = entry.specific hostnames = [] if specific.hostname and specific.hostname in names: hostnames = names[specific.hostname] elif specific.group: hostnames = list( chain(*[ names[cmeta.hostname] for cmeta in mquery.by_groups([specific.group]) ])) elif specific.all: # a generic key for all hosts? really? hostnames = list(chain(*list(names.values()))) if not hostnames: if specific.hostname: key = specific.hostname ktype = "host" elif specific.group: key = specific.group ktype = "group" else: # user has added a global SSH key, but # have no clients yet. don't warn about # this. continue if key not in self.badnames: self.badnames[key] = True self.logger.info("Ignoring key for unknown %s %s" % (ktype, key)) continue skn.append("%s %s" % (','.join(hostnames), entry.data.rstrip())) self.__skn = "\n".join(skn) + "\n" return self.__skn def set_skn(self, value): """Set backing data for skn.""" self.__skn = value skn = property(get_skn, set_skn) def HandleEvent(self, event=None): """Local event handler that does skn regen on pubkey change.""" # skip events we don't care about action = event.code2str() if action == "endExist" or event.filename == self.data: return for entry in list(self.entries.values()): if entry.specific.match(event.filename): entry.handle_event(event) if any( event.filename.startswith(kp) for kp in self.keypatterns if kp.endswith(".pub")): self.debug_log("New public key %s; invalidating " "ssh_known_hosts cache" % event.filename) self.skn = False return if event.filename == 'info.xml': for entry in list(self.entries.values()): entry.handle_event(event) return if event.filename.endswith('.static'): self.logger.info("Static key %s %s; invalidating ssh_known_hosts " "cache" % (event.filename, action)) if action == "deleted" and event.filename in self.static: del self.static[event.filename] self.skn = False else: self.static[event.filename] = Bcfg2.Server.Plugin.FileBacked( os.path.join(self.data, event.filename)) self.static[event.filename].HandleEvent(event) self.skn = False return self.logger.warn("SSHbase: Got unknown event %s %s" % (event.filename, action)) def get_ipcache_entry(self, client): """ Build a cache of dns results. """ if client in self.ipcache: if self.ipcache[client]: return self.ipcache[client] else: raise PluginExecutionError("No cached IP address for %s" % client) else: # need to add entry try: ipaddr = set( [info[4][0] for info in socket.getaddrinfo(client, None)]) self.ipcache[client] = (ipaddr, client) return (ipaddr, client) except socket.gaierror: result = self.cmd.run(["getent", "hosts", client]) if result.success: ipaddr = result.stdout.strip().split() if ipaddr: self.ipcache[client] = (ipaddr, client) return (ipaddr, client) self.ipcache[client] = False msg = "Failed to find IP address for %s: %s" % (client, result.error) self.logger(msg) raise PluginExecutionError(msg) def get_namecache_entry(self, cip): """Build a cache of name lookups from client IP addresses.""" if cip in self.namecache: # lookup cached name from IP if self.namecache[cip]: return self.namecache[cip] else: raise socket.gaierror else: # add an entry that has not been cached try: rvlookup = socket.gethostbyaddr(cip) if rvlookup[0]: self.namecache[cip] = [rvlookup[0]] else: self.namecache[cip] = [] self.namecache[cip].extend(rvlookup[1]) return self.namecache[cip] except socket.gaierror: self.namecache[cip] = False self.logger.error("Failed to find any names associated with " "IP address %s" % cip) raise def build_skn(self, entry, metadata): """This function builds builds a host specific known_hosts file.""" try: self.entries[entry.get('name')].bind_entry(entry, metadata) except Bcfg2.Server.Plugin.PluginExecutionError: entry.text = self.skn hostkeys = [] for key in self.keypatterns: if key.endswith(".pub"): try: hostkeys.append( self.entries["/etc/ssh/" + key].best_matching(metadata)) except Bcfg2.Server.Plugin.PluginExecutionError: pass hostkeys.sort() for hostkey in hostkeys: entry.text += "localhost,localhost.localdomain,127.0.0.1 %s" \ % hostkey.data self.entries[entry.get('name')].bind_info_to_entry(entry, metadata) def build_hk(self, entry, metadata): """This binds host key data into entries.""" try: self.entries[entry.get('name')].bind_entry(entry, metadata) except Bcfg2.Server.Plugin.PluginExecutionError: filename = entry.get('name').split('/')[-1] self.GenerateHostKeyPair(metadata.hostname, filename) # Service the FAM events queued up by the key generation # so the data structure entries will be available for # binding. # # NOTE: We wait for up to ten seconds. There is some # potential for race condition, because if the file # monitor doesn't get notified about the new key files in # time, those entries won't be available for binding. In # practice, this seems "good enough". tries = 0 is_bound = False while not is_bound: if tries >= 10: msg = "%s still not registered" % filename self.logger.error(msg) raise Bcfg2.Server.Plugin.PluginExecutionError(msg) self.fam.handle_events_in_interval(1) tries += 1 try: self.entries[entry.get('name')].bind_entry(entry, metadata) is_bound = True except Bcfg2.Server.Plugin.PluginExecutionError: pass def GenerateHostKeyPair(self, client, filename): """Generate new host key pair for client.""" match = re.search(r'(ssh_host_(?:((?:ecd|d|r)sa)_)?key)', filename) if match: hostkey = "%s.H_%s" % (match.group(1), client) if match.group(2): keytype = match.group(2) else: keytype = 'rsa1' else: raise PluginExecutionError("Unknown key filename: %s" % filename) fileloc = os.path.join(self.data, hostkey) publoc = os.path.join( self.data, ".".join([hostkey.split('.')[0], 'pub', "H_%s" % client])) tempdir = tempfile.mkdtemp() temploc = os.path.join(tempdir, hostkey) cmd = [ "ssh-keygen", "-q", "-f", temploc, "-N", "", "-t", keytype, "-C", "root@%s" % client ] self.debug_log("SSHbase: Running: %s" % " ".join(cmd)) result = self.cmd.run(cmd) if not result.success: raise PluginExecutionError( "SSHbase: Error running ssh-keygen: %s" % result.error) try: shutil.copy(temploc, fileloc) shutil.copy("%s.pub" % temploc, publoc) except IOError: err = sys.exc_info()[1] raise PluginExecutionError("Temporary SSH keys not found: %s" % err) try: os.unlink(temploc) os.unlink("%s.pub" % temploc) os.rmdir(tempdir) except OSError: err = sys.exc_info()[1] raise PluginExecutionError("Failed to unlink temporary ssh keys: " "%s" % err) def AcceptChoices(self, _, metadata): return [Bcfg2.Server.Plugin.Specificity(hostname=metadata.hostname)] def AcceptPullData(self, specific, entry, log): """Per-plugin bcfg2-admin pull support.""" # specific will always be host specific filename = os.path.join( self.data, "%s.H_%s" % (entry['name'].split('/')[-1], specific.hostname)) try: open(filename, 'w').write(entry['text']) if log: print("Wrote file %s" % filename) except KeyError: self.logger.error("Failed to pull %s. This file does not " "currently exist on the client" % entry.get('name'))
def __init__(self, fname): XMLCfgCreator.__init__(self, fname) CfgVerifier.__init__(self, fname, None) self.cmd = Executor() self.cfg = get_cfg()
class CfgSSLCACertCreator(XMLCfgCreator, CfgVerifier): """ This class acts as both a Cfg creator that creates SSL certs, and as a Cfg verifier that verifies SSL certs. """ #: Different configurations for different clients/groups can be #: handled with Client and Group tags within pubkey.xml __specific__ = False #: Handle XML specifications of private keys __basenames__ = ['sslcert.xml'] cfg_section = "sslca" options = [ Bcfg2.Options.Option( cf=("sslca", "category"), dest="sslca_category", help="Metadata category that generated SSL keys are specific to"), Bcfg2.Options.Option( cf=("sslca", "passphrase"), dest="sslca_passphrase", help="Passphrase used to encrypt generated SSL keys"), Bcfg2.Options.WildcardSectionGroup( Bcfg2.Options.PathOption( cf=("sslca_*", "config"), help="Path to the openssl config for the CA"), Bcfg2.Options.Option( cf=("sslca_*", "passphrase"), help="Passphrase for the CA private key"), Bcfg2.Options.PathOption( cf=("sslca_*", "chaincert"), help="Path to the SSL chaining certificate for verification"), Bcfg2.Options.BooleanOption( cf=("sslca_*", "root_ca"), help="Whether or not <chaincert> is a root CA (as opposed to " "an intermediate cert"), prefix="")] def __init__(self, fname): XMLCfgCreator.__init__(self, fname) CfgVerifier.__init__(self, fname, None) self.cmd = Executor() self.cfg = get_cfg() def build_req_config(self, metadata): """ Generates a temporary openssl configuration file that is used to generate the required certificate request. """ fd, fname = tempfile.mkstemp() cfp = ConfigParser.ConfigParser({}) cfp.optionxform = str defaults = dict( req=dict( default_md='sha1', distinguished_name='req_distinguished_name', req_extensions='v3_req', x509_extensions='v3_req', prompt='no'), req_distinguished_name=dict(), v3_req=dict(subjectAltName='@alt_names'), alt_names=dict()) for section in list(defaults.keys()): cfp.add_section(section) for key in defaults[section]: cfp.set(section, key, defaults[section][key]) spec = self.XMLMatch(metadata) cert = spec.find("Cert") altnamenum = 1 altnames = spec.findall('subjectAltName') altnames.extend(list(metadata.aliases)) altnames.append(metadata.hostname) for altname in altnames: cfp.set('alt_names', 'DNS.' + str(altnamenum), altname) altnamenum += 1 for item in ['C', 'L', 'ST', 'O', 'OU', 'emailAddress']: if cert.get(item): cfp.set('req_distinguished_name', item, cert.get(item)) cfp.set('req_distinguished_name', 'CN', metadata.hostname) self.debug_log("Cfg: Writing temporary CSR config to %s" % fname) try: cfp.write(os.fdopen(fd, 'w')) except IOError: raise CfgCreationError("Cfg: Failed to write temporary CSR config " "file: %s" % sys.exc_info()[1]) return fname def build_request(self, keyfile, metadata): """ Create the certificate request """ req_config = self.build_req_config(metadata) try: fd, req = tempfile.mkstemp() os.close(fd) cert = self.XMLMatch(metadata).find("Cert") days = cert.get("days", "365") cmd = ["openssl", "req", "-new", "-config", req_config, "-days", days, "-key", keyfile, "-text", "-out", req] result = self.cmd.run(cmd) if not result.success: raise CfgCreationError("Failed to generate CSR: %s" % result.error) return req finally: try: os.unlink(req_config) except OSError: self.logger.error("Cfg: Failed to unlink temporary CSR " "config: %s" % sys.exc_info()[1]) def get_ca(self, name): """ get a dict describing a CA from the config file """ rv = dict() prefix = "sslca_%s_" % name for attr in dir(Bcfg2.Options.setup): if attr.startswith(prefix): rv[attr[len(prefix):]] = getattr(Bcfg2.Options.setup, attr) return rv def create_data(self, entry, metadata): """ generate a new cert """ self.logger.info("Cfg: Generating new SSL cert for %s" % self.name) cert = self.XMLMatch(metadata).find("Cert") ca = self.get_ca(cert.get('ca', 'default')) req = self.build_request(self._get_keyfile(cert, metadata), metadata) try: days = cert.get('days', '365') cmd = ["openssl", "ca", "-config", ca['config'], "-in", req, "-days", days, "-batch"] passphrase = ca.get('passphrase') if passphrase: cmd.extend(["-passin", "pass:%s" % passphrase]) result = self.cmd.run(cmd) if not result.success: raise CfgCreationError("Failed to generate cert: %s" % result.error) except KeyError: raise CfgCreationError("Cfg: [sslca_%s] section has no 'config' " "option" % cert.get('ca', 'default')) finally: try: os.unlink(req) except OSError: self.logger.error("Cfg: Failed to unlink temporary CSR: %s " % sys.exc_info()[1]) data = result.stdout if cert.get('append_chain') and 'chaincert' in ca: data += open(ca['chaincert']).read() self.write_data(data, **self.get_specificity(metadata)) return data def verify_entry(self, entry, metadata, data): fd, fname = tempfile.mkstemp() self.debug_log("Cfg: Writing SSL cert %s to temporary file %s for " "verification" % (entry.get("name"), fname)) os.fdopen(fd, 'w').write(data) cert = self.XMLMatch(metadata).find("Cert") ca = self.get_ca(cert.get('ca', 'default')) try: if ca.get('chaincert'): self.verify_cert_against_ca(fname, entry, metadata) self.verify_cert_against_key(fname, self._get_keyfile(cert, metadata)) finally: os.unlink(fname) def _get_keyfile(self, cert, metadata): """ Given a <Cert/> element and client metadata, return the full path to the file on the filesystem that the key lives in.""" keypath = cert.get("key") eset = self.cfg.entries[keypath] try: return eset.best_matching(metadata).name except PluginExecutionError: # SSL key needs to be created try: creator = eset.best_matching(metadata, eset.get_handlers(metadata, CfgCreator)) except PluginExecutionError: raise CfgCreationError("Cfg: No SSL key or key creator " "defined for %s" % keypath) keyentry = lxml.etree.Element("Path", name=keypath) creator.create_data(keyentry, metadata) tries = 0 while True: if tries >= 10: raise CfgCreationError("Cfg: Timed out waiting for event " "on SSL key at %s" % keypath) get_fam().handle_events_in_interval(1) try: return eset.best_matching(metadata).name except PluginExecutionError: tries += 1 continue def verify_cert_against_ca(self, filename, entry, metadata): """ check that a certificate validates against the ca cert, and that it has not expired. """ cert = self.XMLMatch(metadata).find("Cert") ca = self.get_ca(cert.get("ca", "default")) chaincert = ca.get('chaincert') cmd = ["openssl", "verify"] is_root = ca.get('root_ca', "false").lower() == 'true' if is_root: cmd.append("-CAfile") else: # verifying based on an intermediate cert cmd.extend(["-purpose", "sslserver", "-untrusted"]) cmd.extend([chaincert, filename]) self.debug_log("Cfg: Verifying %s against CA" % entry.get("name")) result = self.cmd.run(cmd) if result.stdout == cert + ": OK\n": self.debug_log("Cfg: %s verified successfully against CA" % entry.get("name")) else: raise CfgVerificationError("%s failed verification against CA: %s" % (entry.get("name"), result.error)) def _get_modulus(self, fname, ftype="x509"): """ get the modulus from the given file """ cmd = ["openssl", ftype, "-noout", "-modulus", "-in", fname] self.debug_log("Cfg: Getting modulus of %s for verification: %s" % (fname, " ".join(cmd))) result = self.cmd.run(cmd) if not result.success: raise CfgVerificationError("Failed to get modulus of %s: %s" % (fname, result.error)) return result.stdout.strip() def verify_cert_against_key(self, filename, keyfile): """ check that a certificate validates against its private key. """ cert = self._get_modulus(filename) key = self._get_modulus(keyfile, ftype="rsa") if cert == key: self.debug_log("Cfg: %s verified successfully against key %s" % (filename, keyfile)) else: raise CfgVerificationError("%s failed verification against key %s" % (filename, keyfile))
class Git(Version): """ The Git plugin provides a revision interface for Bcfg2 repos using git. """ __author__ = '*****@*****.**' __vcs_metadata_path__ = ".git" if HAS_GITPYTHON: __rmi__ = Version.__rmi__ + ['Update'] def __init__(self, core): Version.__init__(self, core) if HAS_GITPYTHON: self.repo = git.Repo(Bcfg2.Options.setup.vcs_root) self.cmd = None else: self.logger.debug("Git: GitPython not found, using CLI interface " "to Git") self.repo = None self.cmd = Executor() self.logger.debug("Initialized git plugin with git directory %s" % self.vcs_path) def _log_git_cmd(self, output): """ Send output from a GitPython command to the debug log """ for line in output.strip().splitlines(): self.debug_log("Git: %s" % line) def get_revision(self): """Read git revision information for the Bcfg2 repository.""" if HAS_GITPYTHON: return self.repo.head.commit.hexsha else: cmd = ["git", "--git-dir", self.vcs_path, "--work-tree", Bcfg2.Options.setup.vcs_root, "rev-parse", "HEAD"] self.debug_log("Git: Running %s" % cmd) result = self.cmd.run(cmd) if not result.success: raise PluginExecutionError(result.stderr) return result.stdout def Update(self, ref=None): """ Git.Update() => True|False Update the working copy against the upstream repository """ self.logger.info("Git: Git.Update(ref='%s')" % ref) self.debug_log("Git: Performing garbage collection on repo at %s" % Bcfg2.Options.setup.vcs_root) try: self._log_git_cmd(self.repo.git.gc('--auto')) except git.GitCommandError: self.logger.warning("Git: Failed to perform garbage collection: %s" % sys.exc_info()[1]) self.debug_log("Git: Fetching all refs for repo at %s" % Bcfg2.Options.setup.vcs_root) try: self._log_git_cmd(self.repo.git.fetch('--all')) except git.GitCommandError: self.logger.warning("Git: Failed to fetch refs: %s" % sys.exc_info()[1]) if ref: self.debug_log("Git: Checking out %s" % ref) try: self._log_git_cmd(self.repo.git.checkout('-f', ref)) except git.GitCommandError: raise PluginExecutionError("Git: Failed to checkout %s: %s" % (ref, sys.exc_info()[1])) # determine if we should try to pull to get the latest commit # on this head tracking = None if not self.repo.head.is_detached: self.debug_log("Git: Determining if %s is a tracking branch" % self.repo.head.ref.name) tracking = self.repo.head.ref.tracking_branch() if tracking is not None: self.debug_log("Git: %s is a tracking branch, pulling from %s" % (self.repo.head.ref.name, tracking)) try: self._log_git_cmd(self.repo.git.pull("--rebase")) except git.GitCommandError: raise PluginExecutionError("Git: Failed to pull from " "upstream: %s" % sys.exc_info()[1]) self.logger.info("Git: Repo at %s updated to %s" % (Bcfg2.Options.setup.vcs_root, self.get_revision())) return True
class CfgPrivateKeyCreator(CfgCreator, StructFile): """The CfgPrivateKeyCreator creates SSH keys on the fly. """ #: Different configurations for different clients/groups can be #: handled with Client and Group tags within privkey.xml __specific__ = False #: Handle XML specifications of private keys __basenames__ = ['privkey.xml'] def __init__(self, fname): CfgCreator.__init__(self, fname) StructFile.__init__(self, fname) pubkey_path = os.path.dirname(self.name) + ".pub" pubkey_name = os.path.join(pubkey_path, os.path.basename(pubkey_path)) self.pubkey_creator = CfgPublicKeyCreator(pubkey_name) self.setup = get_option_parser() self.cmd = Executor() __init__.__doc__ = CfgCreator.__init__.__doc__ @property def category(self): """ The name of the metadata category that generated keys are specific to """ if (self.setup.cfp.has_section("sshkeys") and self.setup.cfp.has_option("sshkeys", "category")): return self.setup.cfp.get("sshkeys", "category") return None @property def passphrase(self): """ The passphrase used to encrypt private keys """ if (HAS_CRYPTO and self.setup.cfp.has_section("sshkeys") and self.setup.cfp.has_option("sshkeys", "passphrase")): return Bcfg2.Server.Encryption.get_passphrases()[ self.setup.cfp.get("sshkeys", "passphrase")] return None def handle_event(self, event): CfgCreator.handle_event(self, event) StructFile.HandleEvent(self, event) handle_event.__doc__ = CfgCreator.handle_event.__doc__ def _gen_keypair(self, metadata, spec=None): """ Generate a keypair according to the given client medata and key specification. :param metadata: The client metadata to generate keys for :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :param spec: The key specification to follow when creating the keys. This should be an XML document that only contains key specification data that applies to the given client metadata, and may be obtained by doing ``self.XMLMatch(metadata)`` :type spec: lxml.etree._Element :returns: string - The filename of the private key """ if spec is None: spec = self.XMLMatch(metadata) # set key parameters ktype = "rsa" bits = None params = spec.find("Params") if params is not None: bits = params.get("bits") ktype = params.get("type", ktype) try: passphrase = spec.find("Passphrase").text except AttributeError: passphrase = '' tempdir = tempfile.mkdtemp() try: filename = os.path.join(tempdir, "privkey") # generate key pair cmd = ["ssh-keygen", "-f", filename, "-t", ktype] if bits: cmd.extend(["-b", bits]) cmd.append("-N") log_cmd = cmd[:] cmd.append(passphrase) if passphrase: log_cmd.append("******") else: log_cmd.append("''") self.debug_log("Cfg: Generating new SSH key pair: %s" % " ".join(log_cmd)) result = self.cmd.run(cmd) if not result.success: raise CfgCreationError( "Cfg: Failed to generate SSH key pair " "at %s for %s: %s" % (filename, metadata.hostname, result.error)) elif result.stderr: self.logger.warning( "Cfg: Generated SSH key pair at %s for %s " "with errors: %s" % (filename, metadata.hostname, result.stderr)) return filename except: shutil.rmtree(tempdir) raise def get_specificity(self, metadata, spec=None): """ Get config settings for key generation specificity (per-host or per-group). :param metadata: The client metadata to create data for :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :param spec: The key specification to follow when creating the keys. This should be an XML document that only contains key specification data that applies to the given client metadata, and may be obtained by doing ``self.XMLMatch(metadata)`` :type spec: lxml.etree._Element :returns: dict - A dict of specificity arguments suitable for passing to :func:`Bcfg2.Server.Plugins.Cfg.CfgCreator.write_data` or :func:`Bcfg2.Server.Plugins.Cfg.CfgCreator.get_filename` """ if spec is None: spec = self.XMLMatch(metadata) category = spec.get("category", self.category) if category is None: per_host_default = "true" else: per_host_default = "false" per_host = spec.get("perhost", per_host_default).lower() == "true" specificity = dict(host=metadata.hostname) if category and not per_host: group = metadata.group_in_category(category) if group: specificity = dict(group=group, prio=int(spec.get("priority", 50))) else: self.logger.info("Cfg: %s has no group in category %s, " "creating host-specific key" % (metadata.hostname, category)) return specificity # pylint: disable=W0221 def create_data(self, entry, metadata, return_pair=False): """ Create data for the given entry on the given client :param entry: The abstract entry to create data for. This will not be modified :type entry: lxml.etree._Element :param metadata: The client metadata to create data for :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata :param return_pair: Return a tuple of ``(public key, private key)`` instead of just the private key. This is used by :class:`Bcfg2.Server.Plugins.Cfg.CfgPublicKeyCreator.CfgPublicKeyCreator` to create public keys as requested. :type return_pair: bool :returns: string - The private key data :returns: tuple - Tuple of ``(public key, private key)``, if ``return_pair`` is set to True """ spec = self.XMLMatch(metadata) specificity = self.get_specificity(metadata, spec) filename = self._gen_keypair(metadata, spec) try: # write the public key, stripping the comment and # replacing it with a comment that specifies the filename. kdata = open(filename + ".pub").read().split()[:2] kdata.append(self.pubkey_creator.get_filename(**specificity)) pubkey = " ".join(kdata) + "\n" self.pubkey_creator.write_data(pubkey, **specificity) # encrypt the private key, write to the proper place, and # return it privkey = open(filename).read() if HAS_CRYPTO and self.passphrase: self.debug_log("Cfg: Encrypting key data at %s" % filename) privkey = Bcfg2.Server.Encryption.ssl_encrypt( privkey, self.passphrase) specificity['ext'] = '.crypt' self.write_data(privkey, **specificity) if return_pair: return (pubkey, privkey) else: return privkey finally: shutil.rmtree(os.path.dirname(filename))
def __init__(self, core, datastore): Bcfg2.Server.Plugin.Version.__init__(self, core, datastore) self.cmd = Executor() self.logger.debug( "Initialized Fossil plugin with fossil directory %s" % self.vcs_path)
class Client(object): """ The main Bcfg2 client class """ options = Proxy.ComponentProxy.options + [ Bcfg2.Options.Common.syslog, Bcfg2.Options.Common.interactive, Bcfg2.Options.BooleanOption( "-q", "--quick", help="Disable some checksum verification"), Bcfg2.Options.Option( cf=('client', 'probe_timeout'), type=Bcfg2.Options.Types.timeout, help="Timeout when running client probes"), Bcfg2.Options.Option( "-b", "--only-bundles", default=[], type=Bcfg2.Options.Types.colon_list, help='Only configure the given bundle(s)'), Bcfg2.Options.Option( "-B", "--except-bundles", default=[], type=Bcfg2.Options.Types.colon_list, help='Configure everything except the given bundle(s)'), Bcfg2.Options.ExclusiveOptionGroup( Bcfg2.Options.BooleanOption( "-Q", "--bundle-quick", help='Only verify the given bundle(s)'), Bcfg2.Options.Option( '-r', '--remove', choices=['all', 'services', 'packages', 'users'], help='Force removal of additional configuration items')), Bcfg2.Options.ExclusiveOptionGroup( Bcfg2.Options.PathOption( '-f', '--file', type=argparse.FileType('rb'), help='Configure from a file rather than querying the server'), Bcfg2.Options.PathOption( '-c', '--cache', type=argparse.FileType('wb'), help='Store the configuration in a file')), Bcfg2.Options.BooleanOption( '--exit-on-probe-failure', default=True, cf=('client', 'exit_on_probe_failure'), help="The client should exit if a probe fails"), Bcfg2.Options.Option( '-p', '--profile', cf=('client', 'profile'), help='Assert the given profile for the host'), Bcfg2.Options.Option( '-l', '--decision', cf=('client', 'decision'), choices=['whitelist', 'blacklist', 'none'], help='Run client in server decision list mode'), Bcfg2.Options.BooleanOption( "-O", "--no-lock", help='Omit lock check'), Bcfg2.Options.PathOption( cf=('components', 'lockfile'), default='/var/lock/bcfg2.run', help='Client lock file'), Bcfg2.Options.BooleanOption( "-n", "--dry-run", help='Do not actually change the system'), Bcfg2.Options.Option( "-D", "--drivers", cf=('client', 'drivers'), type=Bcfg2.Options.Types.comma_list, default=[m[1] for m in walk_packages(path=Tools.__path__)], action=ClientDriverAction, help='Client drivers'), Bcfg2.Options.BooleanOption( "-e", "--show-extra", help='Enable extra entry output'), Bcfg2.Options.BooleanOption( "-k", "--kevlar", help='Run in bulletproof mode'), Bcfg2.Options.BooleanOption( "-i", "--only-important", help='Only configure the important entries')] def __init__(self): self.config = None self._proxy = None self.logger = logging.getLogger('bcfg2') self.cmd = Executor(Bcfg2.Options.setup.probe_timeout) self.tools = [] self.times = dict() self.times['initialization'] = time.time() if Bcfg2.Options.setup.bundle_quick: if (not Bcfg2.Options.setup.only_bundles and not Bcfg2.Options.setup.except_bundles): self.logger.error("-Q option requires -b or -B") raise SystemExit(1) if Bcfg2.Options.setup.remove == 'services': self.logger.error("Service removal is nonsensical; " "removed services will only be disabled") if not Bcfg2.Options.setup.server.startswith('https://'): Bcfg2.Options.setup.server = \ 'https://' + Bcfg2.Options.setup.server #: A dict of the state of each entry. Keys are the entries. #: Values are boolean: True means that the entry is good, #: False means that the entry is bad. self.states = {} self.whitelist = [] self.blacklist = [] self.removal = [] self.unhandled = [] self.logger = logging.getLogger(__name__) def _probe_failure(self, probename, msg): """ handle failure of a probe in the way the user wants us to (exit or continue) """ message = "Failed to execute probe %s: %s" % (probename, msg) if Bcfg2.Options.setup.exit_on_probe_failure: self.fatal_error(message) else: self.logger.error(message) def run_probe(self, probe): """Execute probe.""" name = probe.get('name') self.logger.info("Running probe %s" % name) ret = XML.Element("probe-data", name=name, source=probe.get('source')) try: scripthandle, scriptname = tempfile.mkstemp() if sys.hexversion >= 0x03000000: script = os.fdopen(scripthandle, 'w', encoding=Bcfg2.Options.setup.encoding) else: script = os.fdopen(scripthandle, 'w') try: script.write("#!%s\n" % (probe.attrib.get('interpreter', '/bin/sh'))) if sys.hexversion >= 0x03000000: script.write(probe.text) else: script.write(probe.text.encode('utf-8')) script.close() os.chmod(scriptname, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH | stat.S_IWUSR) # 0755 rv = self.cmd.run(scriptname) if rv.stderr: self.logger.warning("Probe %s has error output: %s" % (name, rv.stderr)) if not rv.success: self._probe_failure(name, "Return value %s" % rv.retval) self.logger.info("Probe %s has result:" % name) self.logger.info(rv.stdout) if sys.hexversion >= 0x03000000: ret.text = rv.stdout else: ret.text = rv.stdout.decode('utf-8') finally: os.unlink(scriptname) except SystemExit: raise except: self._probe_failure(name, sys.exc_info()[1]) return ret def fatal_error(self, message): """Signal a fatal error.""" self.logger.error("Fatal error: %s" % (message)) raise SystemExit(1) @property def proxy(self): """ get an XML-RPC proxy to the server """ if self._proxy is None: self._proxy = Proxy.ComponentProxy() return self._proxy def run_probes(self): """ run probes and upload probe data """ try: probes = XML.XML(str(self.proxy.GetProbes())) except (Proxy.ProxyError, Proxy.CertificateError, socket.gaierror, socket.error): err = sys.exc_info()[1] self.fatal_error("Failed to download probes from bcfg2: %s" % err) except XML.ParseError: err = sys.exc_info()[1] self.fatal_error("Server returned invalid probe requests: %s" % err) self.times['probe_download'] = time.time() # execute probes probedata = XML.Element("ProbeData") for probe in probes.findall(".//probe"): probedata.append(self.run_probe(probe)) if len(probes.findall(".//probe")) > 0: try: # upload probe responses self.proxy.RecvProbeData( XML.tostring(probedata, xml_declaration=False).decode('utf-8')) except Proxy.ProxyError: err = sys.exc_info()[1] self.fatal_error("Failed to upload probe data: %s" % err) self.times['probe_upload'] = time.time() def get_config(self): """ load the configuration, either from the cached configuration file (-f), or from the server """ if Bcfg2.Options.setup.file: # read config from file try: self.logger.debug("Reading cached configuration from %s" % Bcfg2.Options.setup.file.name) return Bcfg2.Options.setup.file.read() except IOError: self.fatal_error("Failed to read cached configuration from: %s" % Bcfg2.Options.setup.file.name) else: # retrieve config from server if Bcfg2.Options.setup.profile: try: self.proxy.AssertProfile(Bcfg2.Options.setup.profile) except Proxy.ProxyError: err = sys.exc_info()[1] self.fatal_error("Failed to set client profile: %s" % err) try: self.proxy.DeclareVersion(__version__) except (xmlrpclib.Fault, Proxy.ProxyError, Proxy.CertificateError, socket.gaierror, socket.error): err = sys.exc_info()[1] self.fatal_error("Failed to declare version: %s" % err) self.run_probes() if Bcfg2.Options.setup.decision in ['whitelist', 'blacklist']: try: # TODO: read decision list from --decision-list Bcfg2.Options.setup.decision_list = \ self.proxy.GetDecisionList( Bcfg2.Options.setup.decision) self.logger.info("Got decision list from server:") self.logger.info(Bcfg2.Options.setup.decision_list) except Proxy.ProxyError: err = sys.exc_info()[1] self.fatal_error("Failed to get decision list: %s" % err) try: rawconfig = self.proxy.GetConfig().encode('utf-8') except Proxy.ProxyError: err = sys.exc_info()[1] self.fatal_error("Failed to download configuration from " "Bcfg2: %s" % err) self.times['config_download'] = time.time() if Bcfg2.Options.setup.cache: try: Bcfg2.Options.setup.cache.write(rawconfig) os.chmod(Bcfg2.Options.setup.cache.name, 384) # 0600 except IOError: self.logger.warning("Failed to write config cache file %s" % (Bcfg2.Options.setup.cache)) self.times['caching'] = time.time() return rawconfig def parse_config(self, rawconfig): """ Parse the XML configuration received from the Bcfg2 server """ try: self.config = XML.XML(rawconfig) except XML.ParseError: syntax_error = sys.exc_info()[1] self.fatal_error("The configuration could not be parsed: %s" % syntax_error) self.load_tools() # find entries not handled by any tools self.unhandled = [entry for struct in self.config for entry in struct if entry not in self.handled] if self.unhandled: self.logger.error("The following entries are not handled by any " "tool:") for entry in self.unhandled: self.logger.error("%s:%s:%s" % (entry.tag, entry.get('type'), entry.get('name'))) # find duplicates self.find_dups(self.config) pkgs = [(entry.get('name'), entry.get('origin')) for struct in self.config for entry in struct if entry.tag == 'Package'] if pkgs: self.logger.debug("The following packages are specified in bcfg2:") self.logger.debug([pkg[0] for pkg in pkgs if pkg[1] is None]) self.logger.debug("The following packages are prereqs added by " "Packages:") self.logger.debug([pkg[0] for pkg in pkgs if pkg[1] == 'Packages']) self.times['config_parse'] = time.time() def run(self): """Perform client execution phase.""" # begin configuration self.times['start'] = time.time() self.logger.info("Starting Bcfg2 client run at %s" % self.times['start']) self.parse_config(self.get_config().decode('utf-8')) if self.config.tag == 'error': self.fatal_error("Server error: %s" % (self.config.text)) if Bcfg2.Options.setup.bundle_quick: newconfig = XML.XML('<Configuration/>') for bundle in self.config.getchildren(): name = bundle.get("name") if (name and (name in Bcfg2.Options.setup.only_bundles or name not in Bcfg2.Options.setup.except_bundles)): newconfig.append(bundle) self.config = newconfig if not Bcfg2.Options.setup.no_lock: # check lock here try: lockfile = open(Bcfg2.Options.setup.lockfile, 'w') if locked(lockfile.fileno()): self.fatal_error("Another instance of Bcfg2 is running. " "If you want to bypass the check, run " "with the -O/--no-lock option") except SystemExit: raise except: lockfile = None self.logger.error("Failed to open lockfile %s: %s" % (Bcfg2.Options.setup.lockfile, sys.exc_info()[1])) # execute the configuration self.Execute() if not Bcfg2.Options.setup.no_lock: # unlock here if lockfile: try: fcntl.lockf(lockfile.fileno(), fcntl.LOCK_UN) os.remove(Bcfg2.Options.setup.lockfile) except OSError: self.logger.error("Failed to unlock lockfile %s" % lockfile.name) if (not Bcfg2.Options.setup.file and not Bcfg2.Options.setup.bundle_quick): # upload statistics feedback = self.GenerateStats() try: self.proxy.RecvStats( XML.tostring(feedback, xml_declaration=False).decode('utf-8')) except Proxy.ProxyError: err = sys.exc_info()[1] self.logger.error("Failed to upload configuration statistics: " "%s" % err) raise SystemExit(2) self.logger.info("Finished Bcfg2 client run at %s" % time.time()) def load_tools(self): """ Load all applicable client tools """ for tool in Bcfg2.Options.setup.drivers: try: self.tools.append(tool(self.config)) except Tools.ToolInstantiationError: continue except: self.logger.error("Failed to instantiate tool %s" % tool, exc_info=1) for tool in self.tools[:]: for conflict in getattr(tool, 'conflicts', []): for item in self.tools: if item.name == conflict: self.tools.remove(item) self.logger.info("Loaded tool drivers:") self.logger.info([tool.name for tool in self.tools]) deprecated = [tool.name for tool in self.tools if tool.deprecated] if deprecated: self.logger.warning("Loaded deprecated tool drivers:") self.logger.warning(deprecated) experimental = [tool.name for tool in self.tools if tool.experimental] if experimental: self.logger.warning("Loaded experimental tool drivers:") self.logger.warning(experimental) def find_dups(self, config): """ Find duplicate entries and warn about them """ entries = dict() for struct in config: for entry in struct: for tool in self.tools: if tool.handlesEntry(entry): pkey = tool.primarykey(entry) if pkey in entries: entries[pkey] += 1 else: entries[pkey] = 1 multi = [e for e, c in entries.items() if c > 1] if multi: self.logger.debug("The following entries are included multiple " "times:") for entry in multi: self.logger.debug(entry) def promptFilter(self, msg, entries): """Filter a supplied list based on user input.""" ret = [] entries.sort(key=lambda e: e.tag + ":" + e.get('name')) for entry in entries[:]: if entry in self.unhandled: # don't prompt for entries that can't be installed continue if 'qtext' in entry.attrib: iprompt = entry.get('qtext') else: iprompt = msg % (entry.tag, entry.get('name')) if prompt(iprompt): ret.append(entry) return ret def __getattr__(self, name): if name in ['extra', 'handled', 'modified', '__important__']: ret = [] for tool in self.tools: ret += getattr(tool, name) return ret elif name in self.__dict__: return self.__dict__[name] raise AttributeError(name) def InstallImportant(self): """Install important entries We also process the decision mode stuff here because we want to prevent non-whitelisted/blacklisted 'important' entries from being installed prior to determining the decision mode on the client. """ # Need to process decision stuff early so that dryrun mode # works with it self.whitelist = [entry for entry in self.states if not self.states[entry]] if not Bcfg2.Options.setup.file: if Bcfg2.Options.setup.decision == 'whitelist': dwl = Bcfg2.Options.setup.decision_list w_to_rem = [e for e in self.whitelist if not matches_white_list(e, dwl)] if w_to_rem: self.logger.info("In whitelist mode: " "suppressing installation of:") self.logger.info(["%s:%s" % (e.tag, e.get('name')) for e in w_to_rem]) self.whitelist = [x for x in self.whitelist if x not in w_to_rem] elif Bcfg2.Options.setup.decision == 'blacklist': b_to_rem = \ [e for e in self.whitelist if not passes_black_list(e, Bcfg2.Options.setup.decision_list)] if b_to_rem: self.logger.info("In blacklist mode: " "suppressing installation of:") self.logger.info(["%s:%s" % (e.tag, e.get('name')) for e in b_to_rem]) self.whitelist = [x for x in self.whitelist if x not in b_to_rem] # take care of important entries first if (not Bcfg2.Options.setup.dry_run or Bcfg2.Options.setup.only_important): important_installs = set() for parent in self.config.findall(".//Path/.."): name = parent.get("name") if not name or (name in Bcfg2.Options.setup.except_bundles and name not in Bcfg2.Options.setup.only_bundles): continue for cfile in parent.findall("./Path"): if (cfile.get('name') not in self.__important__ or cfile.get('type') != 'file' or cfile not in self.whitelist): continue tools = [t for t in self.tools if t.handlesEntry(cfile) and t.canVerify(cfile)] if not tools: continue if Bcfg2.Options.setup.dry_run: important_installs.add(cfile) continue if (Bcfg2.Options.setup.interactive and not self.promptFilter("Install %s: %s? (y/N):", [cfile])): self.whitelist.remove(cfile) continue try: self.states[cfile] = tools[0].InstallPath(cfile) if self.states[cfile]: tools[0].modified.append(cfile) except: # pylint: disable=W0702 self.logger.error("Unexpected tool failure", exc_info=1) cfile.set('qtext', '') if tools[0].VerifyPath(cfile, []): self.whitelist.remove(cfile) if Bcfg2.Options.setup.dry_run and len(important_installs) > 0: self.logger.info("In dryrun mode: " "suppressing entry installation for:") self.logger.info(["%s:%s" % (e.tag, e.get('name')) for e in important_installs]) def Inventory(self): """ Verify all entries, find extra entries, and build up workqueues """ # initialize all states for struct in self.config.getchildren(): for entry in struct.getchildren(): self.states[entry] = False for tool in self.tools: try: self.states.update(tool.Inventory()) except KeyboardInterrupt: raise except: # pylint: disable=W0702 self.logger.error("%s.Inventory() call failed:" % tool.name, exc_info=1) def Decide(self): # pylint: disable=R0912 """Set self.whitelist based on user interaction.""" iprompt = "Install %s: %s? (y/N): " rprompt = "Remove %s: %s? (y/N): " if Bcfg2.Options.setup.remove: if Bcfg2.Options.setup.remove == 'all': self.removal = self.extra elif Bcfg2.Options.setup.remove == 'services': self.removal = [entry for entry in self.extra if entry.tag == 'Service'] elif Bcfg2.Options.setup.remove == 'packages': self.removal = [entry for entry in self.extra if entry.tag == 'Package'] elif Bcfg2.Options.setup.remove == 'users': self.removal = [entry for entry in self.extra if entry.tag in ['POSIXUser', 'POSIXGroup']] candidates = [entry for entry in self.states if not self.states[entry]] if Bcfg2.Options.setup.dry_run: if self.whitelist: self.logger.info("In dryrun mode: " "suppressing entry installation for:") self.logger.info(["%s:%s" % (entry.tag, entry.get('name')) for entry in self.whitelist]) self.whitelist = [] if self.removal: self.logger.info("In dryrun mode: " "suppressing entry removal for:") self.logger.info(["%s:%s" % (entry.tag, entry.get('name')) for entry in self.removal]) self.removal = [] # Here is where most of the work goes # first perform bundle filtering all_bundle_names = [b.get('name') for b in self.config.findall('./Bundle')] bundles = self.config.getchildren() if Bcfg2.Options.setup.only_bundles: # warn if non-existent bundle given for bundle in Bcfg2.Options.setup.only_bundles: if bundle not in all_bundle_names: self.logger.info("Warning: Bundle %s not found" % bundle) bundles = [b for b in bundles if b.get('name') in Bcfg2.Options.setup.only_bundles] if Bcfg2.Options.setup.except_bundles: # warn if non-existent bundle given if not Bcfg2.Options.setup.bundle_quick: for bundle in Bcfg2.Options.setup.except_bundles: if bundle not in all_bundle_names: self.logger.info("Warning: Bundle %s not found" % bundle) bundles = [ b for b in bundles if b.get('name') not in Bcfg2.Options.setup.except_bundles] self.whitelist = [e for e in self.whitelist if any(e in b for b in bundles)] # first process prereq actions for bundle in bundles[:]: if bundle.tag == 'Bundle': bmodified = any((item in self.whitelist or item in self.modified) for item in bundle) else: bmodified = False actions = [a for a in bundle.findall('./Action') if (a.get('timing') in ['pre', 'both'] and (bmodified or a.get('when') == 'always'))] # now we process all "pre" and "both" actions that are either # always or the bundle has been modified if Bcfg2.Options.setup.interactive: self.promptFilter(iprompt, actions) self.DispatchInstallCalls(actions) if bundle.tag != 'Bundle': continue # need to test to fail entries in whitelist if not all(self.states[a] for a in actions): # then display bundles forced off with entries self.logger.info("%s %s failed prerequisite action" % (bundle.tag, bundle.get('name'))) bundles.remove(bundle) b_to_remv = [ent for ent in self.whitelist if ent in bundle] if b_to_remv: self.logger.info("Not installing entries from %s %s" % (bundle.tag, bundle.get('name'))) self.logger.info(["%s:%s" % (e.tag, e.get('name')) for e in b_to_remv]) for ent in b_to_remv: self.whitelist.remove(ent) self.logger.debug("Installing entries in the following bundle(s):") self.logger.debug(" %s" % ", ".join(b.get("name") for b in bundles if b.get("name"))) if Bcfg2.Options.setup.interactive: self.whitelist = self.promptFilter(iprompt, self.whitelist) self.removal = self.promptFilter(rprompt, self.removal) for entry in candidates: if entry not in self.whitelist: self.blacklist.append(entry) def DispatchInstallCalls(self, entries): """Dispatch install calls to underlying tools.""" for tool in self.tools: handled = [entry for entry in entries if tool.canInstall(entry)] if not handled: continue try: self.states.update(tool.Install(handled)) except KeyboardInterrupt: raise except: # pylint: disable=W0702 self.logger.error("%s.Install() call failed:" % tool.name, exc_info=1) def Install(self): """Install all entries.""" self.DispatchInstallCalls(self.whitelist) mods = self.modified mbundles = [struct for struct in self.config.findall('Bundle') if any(True for mod in mods if mod in struct)] if self.modified: # Handle Bundle interdeps if mbundles: self.logger.info("The Following Bundles have been modified:") self.logger.info([mbun.get('name') for mbun in mbundles]) tbm = [(t, b) for t in self.tools for b in mbundles] for tool, bundle in tbm: try: self.states.update(tool.Inventory(structures=[bundle])) except KeyboardInterrupt: raise except: # pylint: disable=W0702 self.logger.error("%s.Inventory() call failed:" % tool.name, exc_info=1) clobbered = [entry for bundle in mbundles for entry in bundle if (not self.states[entry] and entry not in self.blacklist)] if clobbered: self.logger.debug("Found clobbered entries:") self.logger.debug(["%s:%s" % (entry.tag, entry.get('name')) for entry in clobbered]) if not Bcfg2.Options.setup.interactive: self.DispatchInstallCalls(clobbered) all_bundles = self.config.findall('./Bundle') mbundles.extend(self._get_all_modified_bundles(mbundles, all_bundles)) for bundle in all_bundles: if (Bcfg2.Options.setup.only_bundles and bundle.get('name') not in Bcfg2.Options.setup.only_bundles): # prune out unspecified bundles when running with -b continue if bundle in mbundles: continue self.logger.debug("Bundle %s was not modified" % bundle.get('name')) for tool in self.tools: try: self.states.update(tool.BundleNotUpdated(bundle)) except KeyboardInterrupt: raise except: # pylint: disable=W0702 self.logger.error('%s.BundleNotUpdated(%s:%s) call failed:' % (tool.name, bundle.tag, bundle.get('name')), exc_info=1) for indep in self.config.findall('.//Independent'): for tool in self.tools: try: self.states.update(tool.BundleNotUpdated(indep)) except KeyboardInterrupt: raise except: # pylint: disable=W0702 self.logger.error("%s.BundleNotUpdated(%s:%s) call failed:" % (tool.name, indep.tag, indep.get("name")), exc_info=1) def _get_all_modified_bundles(self, mbundles, all_bundles): """This gets all modified bundles by calling BundleUpdated until no new bundles get added to the modification list.""" new_mbundles = mbundles add_mbundles = [] while new_mbundles: for bundle in self.config.findall('./Bundle'): if (Bcfg2.Options.setup.only_bundles and bundle.get('name') not in Bcfg2.Options.setup.only_bundles): # prune out unspecified bundles when running with -b continue if bundle not in new_mbundles: continue self.logger.debug('Bundle %s was modified' % bundle.get('name')) for tool in self.tools: try: self.states.update(tool.BundleUpdated(bundle)) except: # pylint: disable=W0702 self.logger.error('%s.BundleUpdated(%s:%s) call ' 'failed:' % (tool.name, bundle.tag, bundle.get("name")), exc_info=1) mods = self.modified new_mbundles = [struct for struct in all_bundles if any(True for mod in mods if mod in struct) and struct not in mbundles + add_mbundles] add_mbundles.extend(new_mbundles) return add_mbundles def Remove(self): """Remove extra entries.""" for tool in self.tools: extras = [entry for entry in self.removal if tool.handlesEntry(entry)] if extras: try: tool.Remove(extras) except: # pylint: disable=W0702 self.logger.error("%s.Remove() failed" % tool.name, exc_info=1) def CondDisplayState(self, phase): """Conditionally print tracing information.""" self.logger.info('Phase: %s' % phase) self.logger.info('Correct entries: %d' % list(self.states.values()).count(True)) self.logger.info('Incorrect entries: %d' % list(self.states.values()).count(False)) if phase == 'final' and list(self.states.values()).count(False): for entry in sorted(self.states.keys(), key=lambda e: e.tag + ":" + e.get('name')): if not self.states[entry]: etype = entry.get('type') if etype: self.logger.info("%s:%s:%s" % (entry.tag, etype, entry.get('name'))) else: self.logger.info("%s:%s" % (entry.tag, entry.get('name'))) self.logger.info('Total managed entries: %d' % len(list(self.states.values()))) self.logger.info('Unmanaged entries: %d' % len(self.extra)) if phase == 'final' and Bcfg2.Options.setup.show_extra: for entry in sorted(self.extra, key=lambda e: e.tag + ":" + e.get('name')): etype = entry.get('type') if etype: self.logger.info("%s:%s:%s" % (entry.tag, etype, entry.get('name'))) else: self.logger.info("%s:%s" % (entry.tag, entry.get('name'))) if ((list(self.states.values()).count(False) == 0) and not self.extra): self.logger.info('All entries correct.') def ReInventory(self): """Recheck everything.""" if not Bcfg2.Options.setup.dry_run and Bcfg2.Options.setup.kevlar: self.logger.info("Rechecking system inventory") self.Inventory() def Execute(self): """Run all methods.""" self.Inventory() self.times['inventory'] = time.time() self.CondDisplayState('initial') self.InstallImportant() if not Bcfg2.Options.setup.only_important: self.Decide() self.Install() self.times['install'] = time.time() self.Remove() self.times['remove'] = time.time() if self.modified: self.ReInventory() self.times['reinventory'] = time.time() self.times['finished'] = time.time() self.CondDisplayState('final') def GenerateStats(self): """Generate XML summary of execution statistics.""" states = {} for (item, val) in list(self.states.items()): if not Bcfg2.Options.setup.only_important or \ item.get('important', 'false').lower() == 'true': states[item] = val feedback = XML.Element("upload-statistics") stats = XML.SubElement(feedback, 'Statistics', total=str(len(states)), version='2.0', revision=self.config.get('revision', '-1')) flags = XML.SubElement(stats, "Flags") XML.SubElement(flags, "Flag", name="dry_run", value=str(Bcfg2.Options.setup.dry_run)) XML.SubElement(flags, "Flag", name="only_important", value=str(Bcfg2.Options.setup.only_important)) good_entries = [key for key, val in list(states.items()) if val] good = len(good_entries) stats.set('good', str(good)) if any(not val for val in list(states.values())): stats.set('state', 'dirty') else: stats.set('state', 'clean') # List bad elements of the configuration for (data, ename) in [(self.modified, 'Modified'), (self.extra, "Extra"), (good_entries, "Good"), ([entry for entry in states if not states[entry]], "Bad")]: container = XML.SubElement(stats, ename) for item in data: item.set('qtext', '') container.append(item) item.text = None timeinfo = XML.Element("OpStamps") feedback.append(stats) for (event, timestamp) in list(self.times.items()): timeinfo.set(event, str(timestamp)) stats.append(timeinfo) return feedback
class Client(object): """ The main Bcfg2 client class """ options = Proxy.ComponentProxy.options + [ Bcfg2.Options.Common.syslog, Bcfg2.Options.Common.interactive, Bcfg2.Options.BooleanOption( "-q", "--quick", help="Disable some checksum verification"), Bcfg2.Options.Option( cf=('client', 'probe_timeout'), type=Bcfg2.Options.Types.timeout, help="Timeout when running client probes"), Bcfg2.Options.Option( "-b", "--only-bundles", default=[], type=Bcfg2.Options.Types.colon_list, help='Only configure the given bundle(s)'), Bcfg2.Options.Option( "-B", "--except-bundles", default=[], type=Bcfg2.Options.Types.colon_list, help='Configure everything except the given bundle(s)'), Bcfg2.Options.ExclusiveOptionGroup( Bcfg2.Options.BooleanOption( "-Q", "--bundle-quick", help='Only verify the given bundle(s)'), Bcfg2.Options.Option( '-r', '--remove', choices=['all', 'services', 'packages', 'users'], help='Force removal of additional configuration items')), Bcfg2.Options.ExclusiveOptionGroup( Bcfg2.Options.PathOption( '-f', '--file', type=argparse.FileType('rb'), help='Configure from a file rather than querying the server'), Bcfg2.Options.PathOption( '-c', '--cache', type=argparse.FileType('wb'), help='Store the configuration in a file')), Bcfg2.Options.BooleanOption( '--exit-on-probe-failure', default=True, cf=('client', 'exit_on_probe_failure'), help="The client should exit if a probe fails"), Bcfg2.Options.Option( '-p', '--profile', cf=('client', 'profile'), help='Assert the given profile for the host'), Bcfg2.Options.Option( '-l', '--decision', cf=('client', 'decision'), choices=['whitelist', 'blacklist', 'none'], help='Run client in server decision list mode'), Bcfg2.Options.BooleanOption( "-O", "--no-lock", help='Omit lock check'), Bcfg2.Options.PathOption( cf=('components', 'lockfile'), default='/var/lock/bcfg2.run', help='Client lock file'), Bcfg2.Options.BooleanOption( "-n", "--dry-run", help='Do not actually change the system'), Bcfg2.Options.Option( "-D", "--drivers", cf=('client', 'drivers'), type=Bcfg2.Options.Types.comma_list, default=[m[1] for m in walk_packages(path=Tools.__path__)], action=ClientDriverAction, help='Client drivers'), Bcfg2.Options.BooleanOption( "-e", "--show-extra", help='Enable extra entry output'), Bcfg2.Options.BooleanOption( "-k", "--kevlar", help='Run in bulletproof mode'), Bcfg2.Options.BooleanOption( "-i", "--only-important", help='Only configure the important entries')] def __init__(self): self.config = None self._proxy = None self.logger = logging.getLogger('bcfg2') self.cmd = Executor(Bcfg2.Options.setup.probe_timeout) self.tools = [] self.times = dict() self.times['initialization'] = time.time() if Bcfg2.Options.setup.bundle_quick: if (not Bcfg2.Options.setup.only_bundles and not Bcfg2.Options.setup.except_bundles): self.logger.error("-Q option requires -b or -B") raise SystemExit(1) if Bcfg2.Options.setup.remove == 'services': self.logger.error("Service removal is nonsensical; " "removed services will only be disabled") if not Bcfg2.Options.setup.server.startswith('https://'): Bcfg2.Options.setup.server = \ 'https://' + Bcfg2.Options.setup.server #: A dict of the state of each entry. Keys are the entries. #: Values are boolean: True means that the entry is good, #: False means that the entry is bad. self.states = {} self.whitelist = [] self.blacklist = [] self.removal = [] self.unhandled = [] self.logger = logging.getLogger(__name__) def _probe_failure(self, probename, msg): """ handle failure of a probe in the way the user wants us to (exit or continue) """ message = "Failed to execute probe %s: %s" % (probename, msg) if Bcfg2.Options.setup.exit_on_probe_failure: self.fatal_error(message) else: self.logger.error(message) def run_probe(self, probe): """Execute probe.""" name = probe.get('name') self.logger.info("Running probe %s" % name) ret = XML.Element("probe-data", name=name, source=probe.get('source')) try: scripthandle, scriptname = tempfile.mkstemp() if sys.hexversion >= 0x03000000: script = os.fdopen(scripthandle, 'w', encoding=Bcfg2.Options.setup.encoding) else: script = os.fdopen(scripthandle, 'w') try: script.write("#!%s\n" % (probe.attrib.get('interpreter', '/bin/sh'))) if sys.hexversion >= 0x03000000: script.write(probe.text) else: script.write(probe.text.encode('utf-8')) script.close() os.chmod(scriptname, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH | stat.S_IWUSR) # 0755 rv = self.cmd.run(scriptname) if rv.stderr: self.logger.warning("Probe %s has error output: %s" % (name, rv.stderr)) if not rv.success: self._probe_failure(name, "Return value %s" % rv.retval) self.logger.info("Probe %s has result:" % name) self.logger.info(rv.stdout) if sys.hexversion >= 0x03000000: ret.text = rv.stdout else: ret.text = rv.stdout.decode('utf-8') finally: os.unlink(scriptname) except SystemExit: raise except: self._probe_failure(name, sys.exc_info()[1]) return ret def fatal_error(self, message): """Signal a fatal error.""" self.logger.error("Fatal error: %s" % (message)) raise SystemExit(1) @property def proxy(self): """ get an XML-RPC proxy to the server """ if self._proxy is None: self._proxy = Proxy.ComponentProxy() return self._proxy def run_probes(self): """ run probes and upload probe data """ try: probes = XML.XML(str(self.proxy.GetProbes())) except (Proxy.ProxyError, Proxy.CertificateError, socket.gaierror, socket.error): err = sys.exc_info()[1] self.fatal_error("Failed to download probes from bcfg2: %s" % err) except XML.ParseError: err = sys.exc_info()[1] self.fatal_error("Server returned invalid probe requests: %s" % err) self.times['probe_download'] = time.time() # execute probes probedata = XML.Element("ProbeData") for probe in probes.findall(".//probe"): probedata.append(self.run_probe(probe)) if len(probes.findall(".//probe")) > 0: try: # upload probe responses self.proxy.RecvProbeData( XML.tostring(probedata, xml_declaration=False).decode('utf-8')) except Proxy.ProxyError: err = sys.exc_info()[1] self.fatal_error("Failed to upload probe data: %s" % err) self.times['probe_upload'] = time.time() def get_config(self): """ load the configuration, either from the cached configuration file (-f), or from the server """ if Bcfg2.Options.setup.file: # read config from file try: self.logger.debug("Reading cached configuration from %s" % Bcfg2.Options.setup.file.name) return Bcfg2.Options.setup.file.read() except IOError: self.fatal_error("Failed to read cached configuration from: %s" % Bcfg2.Options.setup.file.name) else: # retrieve config from server if Bcfg2.Options.setup.profile: try: self.proxy.AssertProfile(Bcfg2.Options.setup.profile) except Proxy.ProxyError: err = sys.exc_info()[1] self.fatal_error("Failed to set client profile: %s" % err) try: self.proxy.DeclareVersion(__version__) except (xmlrpclib.Fault, Proxy.ProxyError, Proxy.CertificateError, socket.gaierror, socket.error): err = sys.exc_info()[1] self.fatal_error("Failed to declare version: %s" % err) self.run_probes() if Bcfg2.Options.setup.decision in ['whitelist', 'blacklist']: try: # TODO: read decision list from --decision-list Bcfg2.Options.setup.decision_list = \ self.proxy.GetDecisionList( Bcfg2.Options.setup.decision) self.logger.info("Got decision list from server:") self.logger.info(Bcfg2.Options.setup.decision_list) except Proxy.ProxyError: err = sys.exc_info()[1] self.fatal_error("Failed to get decision list: %s" % err) try: rawconfig = self.proxy.GetConfig().encode('utf-8') except Proxy.ProxyError: err = sys.exc_info()[1] self.fatal_error("Failed to download configuration from " "Bcfg2: %s" % err) self.times['config_download'] = time.time() if Bcfg2.Options.setup.cache: try: Bcfg2.Options.setup.cache.write(rawconfig) os.chmod(Bcfg2.Options.setup.cache.name, 384) # 0600 except IOError: self.logger.warning("Failed to write config cache file %s" % (Bcfg2.Options.setup.cache)) self.times['caching'] = time.time() return rawconfig def parse_config(self, rawconfig): """ Parse the XML configuration received from the Bcfg2 server """ try: self.config = XML.XML(rawconfig) except XML.ParseError: syntax_error = sys.exc_info()[1] self.fatal_error("The configuration could not be parsed: %s" % syntax_error) self.load_tools() # find entries not handled by any tools self.unhandled = [entry for struct in self.config for entry in struct if entry not in self.handled] if self.unhandled: self.logger.error("The following entries are not handled by any " "tool:") for entry in self.unhandled: self.logger.error("%s:%s:%s" % (entry.tag, entry.get('type'), entry.get('name'))) # find duplicates self.find_dups(self.config) pkgs = [(entry.get('name'), entry.get('origin')) for struct in self.config for entry in struct if entry.tag == 'Package'] if pkgs: self.logger.debug("The following packages are specified in bcfg2:") self.logger.debug([pkg[0] for pkg in pkgs if pkg[1] is None]) self.logger.debug("The following packages are prereqs added by " "Packages:") self.logger.debug([pkg[0] for pkg in pkgs if pkg[1] == 'Packages']) self.times['config_parse'] = time.time() def run(self): """Perform client execution phase.""" # begin configuration self.times['start'] = time.time() self.logger.info("Starting Bcfg2 client run at %s" % self.times['start']) self.parse_config(self.get_config().decode('utf-8')) if self.config.tag == 'error': self.fatal_error("Server error: %s" % (self.config.text)) if Bcfg2.Options.setup.bundle_quick: newconfig = XML.XML('<Configuration/>') for bundle in self.config.getchildren(): name = bundle.get("name") if (name and (name in Bcfg2.Options.setup.only_bundles or name not in Bcfg2.Options.setup.except_bundles)): newconfig.append(bundle) self.config = newconfig if not Bcfg2.Options.setup.no_lock: # check lock here try: lockfile = open(Bcfg2.Options.setup.lockfile, 'w') if locked(lockfile.fileno()): self.fatal_error("Another instance of Bcfg2 is running. " "If you want to bypass the check, run " "with the -O/--no-lock option") except SystemExit: raise except: lockfile = None self.logger.error("Failed to open lockfile %s: %s" % (Bcfg2.Options.setup.lockfile, sys.exc_info()[1])) # execute the configuration self.Execute() if not Bcfg2.Options.setup.no_lock: # unlock here if lockfile: try: fcntl.lockf(lockfile.fileno(), fcntl.LOCK_UN) os.remove(Bcfg2.Options.setup.lockfile) except OSError: self.logger.error("Failed to unlock lockfile %s" % lockfile.name) if (not Bcfg2.Options.setup.file and not Bcfg2.Options.setup.bundle_quick): # upload statistics feedback = self.GenerateStats() try: self.proxy.RecvStats( XML.tostring(feedback, xml_declaration=False).decode('utf-8')) except Proxy.ProxyError: err = sys.exc_info()[1] self.logger.error("Failed to upload configuration statistics: " "%s" % err) raise SystemExit(2) self.logger.info("Finished Bcfg2 client run at %s" % time.time()) def load_tools(self): """ Load all applicable client tools """ for tool in Bcfg2.Options.setup.drivers: try: self.tools.append(tool(self.config)) except Tools.ToolInstantiationError: continue except: self.logger.error("Failed to instantiate tool %s" % tool, exc_info=1) for tool in self.tools[:]: for conflict in getattr(tool, 'conflicts', []): for item in self.tools: if item.name == conflict: self.tools.remove(item) self.logger.info("Loaded tool drivers:") self.logger.info([tool.name for tool in self.tools]) deprecated = [tool.name for tool in self.tools if tool.deprecated] if deprecated: self.logger.warning("Loaded deprecated tool drivers:") self.logger.warning(deprecated) experimental = [tool.name for tool in self.tools if tool.experimental] if experimental: self.logger.warning("Loaded experimental tool drivers:") self.logger.warning(experimental) def find_dups(self, config): """ Find duplicate entries and warn about them """ entries = dict() for struct in config: for entry in struct: for tool in self.tools: if tool.handlesEntry(entry): pkey = tool.primarykey(entry) if pkey in entries: entries[pkey] += 1 else: entries[pkey] = 1 multi = [e for e, c in entries.items() if c > 1] if multi: self.logger.debug("The following entries are included multiple " "times:") for entry in multi: self.logger.debug(entry) def promptFilter(self, msg, entries): """Filter a supplied list based on user input.""" ret = [] entries.sort(key=lambda e: e.tag + ":" + e.get('name')) for entry in entries[:]: if entry in self.unhandled: # don't prompt for entries that can't be installed continue if 'qtext' in entry.attrib: iprompt = entry.get('qtext') else: iprompt = msg % (entry.tag, entry.get('name')) if prompt(iprompt): ret.append(entry) return ret def __getattr__(self, name): if name in ['extra', 'handled', 'modified', '__important__']: ret = [] for tool in self.tools: ret += getattr(tool, name) return ret elif name in self.__dict__: return self.__dict__[name] raise AttributeError(name) def InstallImportant(self): """Install important entries We also process the decision mode stuff here because we want to prevent non-whitelisted/blacklisted 'important' entries from being installed prior to determining the decision mode on the client. """ # Need to process decision stuff early so that dryrun mode # works with it self.whitelist = [entry for entry in self.states if not self.states[entry]] if not Bcfg2.Options.setup.file: if Bcfg2.Options.setup.decision == 'whitelist': dwl = Bcfg2.Options.setup.decision_list w_to_rem = [e for e in self.whitelist if not matches_white_list(e, dwl)] if w_to_rem: self.logger.info("In whitelist mode: " "suppressing installation of:") self.logger.info(["%s:%s" % (e.tag, e.get('name')) for e in w_to_rem]) self.whitelist = [x for x in self.whitelist if x not in w_to_rem] elif Bcfg2.Options.setup.decision == 'blacklist': b_to_rem = \ [e for e in self.whitelist if not passes_black_list(e, Bcfg2.Options.setup.decision_list)] if b_to_rem: self.logger.info("In blacklist mode: " "suppressing installation of:") self.logger.info(["%s:%s" % (e.tag, e.get('name')) for e in b_to_rem]) self.whitelist = [x for x in self.whitelist if x not in b_to_rem] # take care of important entries first if (not Bcfg2.Options.setup.dry_run or Bcfg2.Options.setup.only_important): important_installs = set() for parent in self.config.findall(".//Path/.."): name = parent.get("name") if not name or (name in Bcfg2.Options.setup.except_bundles and name not in Bcfg2.Options.setup.only_bundles): continue for cfile in parent.findall("./Path"): if (cfile.get('name') not in self.__important__ or cfile.get('type') != 'file' or cfile not in self.whitelist): continue tools = [t for t in self.tools if t.handlesEntry(cfile) and t.canVerify(cfile)] if not tools: continue if Bcfg2.Options.setup.dry_run: important_installs.add(cfile) continue if (Bcfg2.Options.setup.interactive and not self.promptFilter("Install %s: %s? (y/N):", [cfile])): self.whitelist.remove(cfile) continue try: self.states[cfile] = tools[0].InstallPath(cfile) if self.states[cfile]: tools[0].modified.append(cfile) except: # pylint: disable=W0702 self.logger.error("Unexpected tool failure", exc_info=1) cfile.set('qtext', '') if tools[0].VerifyPath(cfile, []): self.whitelist.remove(cfile) if Bcfg2.Options.setup.dry_run and len(important_installs) > 0: self.logger.info("In dryrun mode: " "suppressing entry installation for:") self.logger.info(["%s:%s" % (e.tag, e.get('name')) for e in important_installs]) def Inventory(self): """ Verify all entries, find extra entries, and build up workqueues """ # initialize all states for struct in self.config.getchildren(): for entry in struct.getchildren(): self.states[entry] = False for tool in self.tools: try: self.states.update(tool.Inventory()) except: # pylint: disable=W0702 self.logger.error("%s.Inventory() call failed:" % tool.name, exc_info=1) def Decide(self): # pylint: disable=R0912 """Set self.whitelist based on user interaction.""" iprompt = "Install %s: %s? (y/N): " rprompt = "Remove %s: %s? (y/N): " if Bcfg2.Options.setup.remove: if Bcfg2.Options.setup.remove == 'all': self.removal = self.extra elif Bcfg2.Options.setup.remove == 'services': self.removal = [entry for entry in self.extra if entry.tag == 'Service'] elif Bcfg2.Options.setup.remove == 'packages': self.removal = [entry for entry in self.extra if entry.tag == 'Package'] elif Bcfg2.Options.setup.remove == 'users': self.removal = [entry for entry in self.extra if entry.tag in ['POSIXUser', 'POSIXGroup']] candidates = [entry for entry in self.states if not self.states[entry]] if Bcfg2.Options.setup.dry_run: if self.whitelist: self.logger.info("In dryrun mode: " "suppressing entry installation for:") self.logger.info(["%s:%s" % (entry.tag, entry.get('name')) for entry in self.whitelist]) self.whitelist = [] if self.removal: self.logger.info("In dryrun mode: " "suppressing entry removal for:") self.logger.info(["%s:%s" % (entry.tag, entry.get('name')) for entry in self.removal]) self.removal = [] # Here is where most of the work goes # first perform bundle filtering all_bundle_names = [b.get('name') for b in self.config.findall('./Bundle')] bundles = self.config.getchildren() if Bcfg2.Options.setup.only_bundles: # warn if non-existent bundle given for bundle in Bcfg2.Options.setup.only_bundles: if bundle not in all_bundle_names: self.logger.info("Warning: Bundle %s not found" % bundle) bundles = [b for b in bundles if b.get('name') in Bcfg2.Options.setup.only_bundles] if Bcfg2.Options.setup.except_bundles: # warn if non-existent bundle given if not Bcfg2.Options.setup.bundle_quick: for bundle in Bcfg2.Options.setup.except_bundles: if bundle not in all_bundle_names: self.logger.info("Warning: Bundle %s not found" % bundle) bundles = [ b for b in bundles if b.get('name') not in Bcfg2.Options.setup.except_bundles] self.whitelist = [e for e in self.whitelist if any(e in b for b in bundles)] # first process prereq actions for bundle in bundles[:]: if bundle.tag == 'Bundle': bmodified = any((item in self.whitelist or item in self.modified) for item in bundle) else: bmodified = False actions = [a for a in bundle.findall('./Action') if (a.get('timing') in ['pre', 'both'] and (bmodified or a.get('when') == 'always'))] # now we process all "pre" and "both" actions that are either # always or the bundle has been modified if Bcfg2.Options.setup.interactive: self.promptFilter(iprompt, actions) self.DispatchInstallCalls(actions) if bundle.tag != 'Bundle': continue # need to test to fail entries in whitelist if not all(self.states[a] for a in actions): # then display bundles forced off with entries self.logger.info("%s %s failed prerequisite action" % (bundle.tag, bundle.get('name'))) bundles.remove(bundle) b_to_remv = [ent for ent in self.whitelist if ent in bundle] if b_to_remv: self.logger.info("Not installing entries from %s %s" % (bundle.tag, bundle.get('name'))) self.logger.info(["%s:%s" % (e.tag, e.get('name')) for e in b_to_remv]) for ent in b_to_remv: self.whitelist.remove(ent) self.logger.debug("Installing entries in the following bundle(s):") self.logger.debug(" %s" % ", ".join(b.get("name") for b in bundles if b.get("name"))) if Bcfg2.Options.setup.interactive: self.whitelist = self.promptFilter(iprompt, self.whitelist) self.removal = self.promptFilter(rprompt, self.removal) for entry in candidates: if entry not in self.whitelist: self.blacklist.append(entry) def DispatchInstallCalls(self, entries): """Dispatch install calls to underlying tools.""" for tool in self.tools: handled = [entry for entry in entries if tool.canInstall(entry)] if not handled: continue try: self.states.update(tool.Install(handled)) except: # pylint: disable=W0702 self.logger.error("%s.Install() call failed:" % tool.name, exc_info=1) def Install(self): """Install all entries.""" self.DispatchInstallCalls(self.whitelist) mods = self.modified mbundles = [struct for struct in self.config.findall('Bundle') if any(True for mod in mods if mod in struct)] if self.modified: # Handle Bundle interdeps if mbundles: self.logger.info("The Following Bundles have been modified:") self.logger.info([mbun.get('name') for mbun in mbundles]) tbm = [(t, b) for t in self.tools for b in mbundles] for tool, bundle in tbm: try: self.states.update(tool.Inventory(structures=[bundle])) except: # pylint: disable=W0702 self.logger.error("%s.Inventory() call failed:" % tool.name, exc_info=1) clobbered = [entry for bundle in mbundles for entry in bundle if (not self.states[entry] and entry not in self.blacklist)] if clobbered: self.logger.debug("Found clobbered entries:") self.logger.debug(["%s:%s" % (entry.tag, entry.get('name')) for entry in clobbered]) if not Bcfg2.Options.setup.interactive: self.DispatchInstallCalls(clobbered) for bundle in self.config.findall('.//Bundle'): if (Bcfg2.Options.setup.only_bundles and bundle.get('name') not in Bcfg2.Options.setup.only_bundles): # prune out unspecified bundles when running with -b continue if bundle in mbundles: self.logger.debug("Bundle %s was modified" % bundle.get('name')) func = "BundleUpdated" else: self.logger.debug("Bundle %s was not modified" % bundle.get('name')) func = "BundleNotUpdated" for tool in self.tools: try: self.states.update(getattr(tool, func)(bundle)) except: # pylint: disable=W0702 self.logger.error("%s.%s(%s:%s) call failed:" % (tool.name, func, bundle.tag, bundle.get("name")), exc_info=1) for indep in self.config.findall('.//Independent'): for tool in self.tools: try: self.states.update(tool.BundleNotUpdated(indep)) except: # pylint: disable=W0702 self.logger.error("%s.BundleNotUpdated(%s:%s) call failed:" % (tool.name, indep.tag, indep.get("name")), exc_info=1) def Remove(self): """Remove extra entries.""" for tool in self.tools: extras = [entry for entry in self.removal if tool.handlesEntry(entry)] if extras: try: tool.Remove(extras) except: # pylint: disable=W0702 self.logger.error("%s.Remove() failed" % tool.name, exc_info=1) def CondDisplayState(self, phase): """Conditionally print tracing information.""" self.logger.info('Phase: %s' % phase) self.logger.info('Correct entries: %d' % list(self.states.values()).count(True)) self.logger.info('Incorrect entries: %d' % list(self.states.values()).count(False)) if phase == 'final' and list(self.states.values()).count(False): for entry in sorted(self.states.keys(), key=lambda e: e.tag + ":" + e.get('name')): if not self.states[entry]: etype = entry.get('type') if etype: self.logger.info("%s:%s:%s" % (entry.tag, etype, entry.get('name'))) else: self.logger.info("%s:%s" % (entry.tag, entry.get('name'))) self.logger.info('Total managed entries: %d' % len(list(self.states.values()))) self.logger.info('Unmanaged entries: %d' % len(self.extra)) if phase == 'final' and Bcfg2.Options.setup.show_extra: for entry in sorted(self.extra, key=lambda e: e.tag + ":" + e.get('name')): etype = entry.get('type') if etype: self.logger.info("%s:%s:%s" % (entry.tag, etype, entry.get('name'))) else: self.logger.info("%s:%s" % (entry.tag, entry.get('name'))) if ((list(self.states.values()).count(False) == 0) and not self.extra): self.logger.info('All entries correct.') def ReInventory(self): """Recheck everything.""" if not Bcfg2.Options.setup.dry_run and Bcfg2.Options.setup.kevlar: self.logger.info("Rechecking system inventory") self.Inventory() def Execute(self): """Run all methods.""" self.Inventory() self.times['inventory'] = time.time() self.CondDisplayState('initial') self.InstallImportant() if not Bcfg2.Options.setup.only_important: self.Decide() self.Install() self.times['install'] = time.time() self.Remove() self.times['remove'] = time.time() if self.modified: self.ReInventory() self.times['reinventory'] = time.time() self.times['finished'] = time.time() self.CondDisplayState('final') def GenerateStats(self): """Generate XML summary of execution statistics.""" feedback = XML.Element("upload-statistics") stats = XML.SubElement(feedback, 'Statistics', total=str(len(self.states)), version='2.0', revision=self.config.get('revision', '-1')) good_entries = [key for key, val in list(self.states.items()) if val] good = len(good_entries) stats.set('good', str(good)) if any(not val for val in list(self.states.values())): stats.set('state', 'dirty') else: stats.set('state', 'clean') # List bad elements of the configuration for (data, ename) in [(self.modified, 'Modified'), (self.extra, "Extra"), (good_entries, "Good"), ([entry for entry in self.states if not self.states[entry]], "Bad")]: container = XML.SubElement(stats, ename) for item in data: item.set('qtext', '') container.append(item) item.text = None timeinfo = XML.Element("OpStamps") feedback.append(stats) for (event, timestamp) in list(self.times.items()): timeinfo.set(event, str(timestamp)) stats.append(timeinfo) return feedback
class PuppetENC(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.Connector, Bcfg2.Server.Plugin.ClientRunHooks, Bcfg2.Server.Plugin.DirectoryBacked): """ A plugin to run Puppet external node classifiers (http://docs.puppetlabs.com/guides/external_nodes.html) """ __child__ = PuppetENCFile def __init__(self, core): Bcfg2.Server.Plugin.Plugin.__init__(self, core) Bcfg2.Server.Plugin.Connector.__init__(self) Bcfg2.Server.Plugin.ClientRunHooks.__init__(self) Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, self.data) self.cache = dict() self.cmd = Executor() def _run_encs(self, metadata): """ Run all Puppet ENCs """ cache = dict(groups=[], params=dict()) for enc in self.entries.keys(): epath = os.path.join(self.data, enc) self.debug_log("PuppetENC: Running ENC %s for %s" % (enc, metadata.hostname)) result = self.cmd.run([epath, metadata.hostname]) if not result.success: msg = "PuppetENC: Error running ENC %s for %s: %s" % \ (enc, metadata.hostname, result.error) self.logger.error(msg) raise Bcfg2.Server.Plugin.PluginExecutionError(msg) if result.stderr: self.debug_log("ENC Error: %s" % result.stderr) try: yaml = yaml_load(result.stdout) self.debug_log("Loaded data from %s for %s: %s" % (enc, metadata.hostname, yaml)) except yaml_error: err = sys.exc_info()[1] msg = "Error decoding YAML from %s for %s: %s" % \ (enc, metadata.hostname, err) self.logger.error(msg) raise Bcfg2.Server.Plugin.PluginExecutionError(msg) groups = yaml.get("classes", yaml.get("groups", dict())) if groups: if isinstance(groups, list): self.debug_log("ENC %s adding groups to %s: %s" % (enc, metadata.hostname, groups)) cache['groups'].extend(groups) else: self.debug_log("ENC %s adding groups to %s: %s" % (enc, metadata.hostname, groups.keys())) for group, params in groups.items(): cache['groups'].append(group) if params: cache['params'].update(params) if "parameters" in yaml and yaml['parameters']: cache['params'].update(yaml['parameters']) if "environment" in yaml: self.logger.info("Ignoring unsupported environment section of " "ENC %s for %s" % (enc, metadata.hostname)) self.cache[metadata.hostname] = cache def get_additional_groups(self, metadata): if metadata.hostname not in self.cache: self._run_encs(metadata) return self.cache[metadata.hostname]['groups'] def get_additional_data(self, metadata): if metadata.hostname not in self.cache: self._run_encs(metadata) return self.cache[metadata.hostname]['params'] def end_client_run(self, metadata): """ clear the entire cache at the end of each client run. this guarantees that each client will run all ENCs at or near the start of each run; we have to clear the entire cache instead of just the cache for this client because a client that builds templates that use metadata for other clients will populate the cache for those clients, which we don't want. This makes the caching less than stellar, but it does prevent multiple runs of ENCs for a single host a) for groups and data separately; and b) when a single client's metadata is generated multiple times by separate templates """ self.cache = dict() if self.core.metadata_cache_mode == 'aggressive': # clear the metadata client cache if we're in aggressive # mode, and produce a warning. PuppetENC really isn't # compatible with aggressive mode, since we don't know # when the output from a given ENC has changed, and thus # can't invalidate the cache sanely. self.logger.warning("PuppetENC is incompatible with aggressive " "client metadata caching, try 'cautious' or " "'initial' instead") self.core.expire_caches_by_type(Bcfg2.Server.Plugin.Metadata) def end_statistics(self, metadata): self.end_client_run(self, metadata)
class CfgPublicKeyCreator(CfgCreator, StructFile): """ .. currentmodule:: Bcfg2.Server.Plugins.Cfg The CfgPublicKeyCreator creates SSH public keys on the fly. It is invoked by :class:`CfgPrivateKeyCreator.CfgPrivateKeyCreator` to handle the creation of the public key, and can also call :class:`CfgPrivateKeyCreator.CfgPrivateKeyCreator` to trigger the creation of a keypair when a public key is created. """ #: Different configurations for different clients/groups can be #: handled with Client and Group tags within pubkey.xml __specific__ = False #: Handle XML specifications of private keys __basenames__ = ['pubkey.xml'] #: No text content on any tags, so encryption support disabled encryption = False def __init__(self, fname): CfgCreator.__init__(self, fname) StructFile.__init__(self, fname) self.cfg = get_cfg() self.core = self.cfg.core self.cmd = Executor() def create_data(self, entry, metadata): if entry.get("name").endswith(".pub"): privkey = entry.get("name")[:-4] else: raise CfgCreationError("Cfg: Could not determine private key for " "%s: Filename does not end in .pub" % entry.get("name")) privkey_entry = lxml.etree.Element("Path", name=privkey) try: self.core.Bind(privkey_entry, metadata) except PluginExecutionError: raise CfgCreationError("Cfg: Could not bind %s (private key for " "%s): %s" % (privkey, self.name, sys.exc_info()[1])) try: eset = self.cfg.entries[privkey] creator = eset.best_matching(metadata, eset.get_handlers(metadata, CfgCreator)) except KeyError: raise CfgCreationError("Cfg: No private key defined for %s (%s)" % (self.name, privkey)) except PluginExecutionError: raise CfgCreationError("Cfg: No privkey.xml defined for %s " "(private key for %s)" % (privkey, self.name)) specificity = creator.get_specificity(metadata) fname = self.get_filename(**specificity) # if the private key didn't exist, then creating it may have # created the private key, too. check for it first. if os.path.exists(fname): return open(fname).read() else: # generate public key from private key fd, privfile = tempfile.mkstemp() try: os.fdopen(fd, 'w').write(privkey_entry.text) cmd = ["ssh-keygen", "-y", "-f", privfile] self.debug_log("Cfg: Extracting SSH public key from %s: %s" % (privkey, " ".join(cmd))) result = self.cmd.run(cmd) if not result.success: raise CfgCreationError("Cfg: Failed to extract public key " "from %s: %s" % (privkey, result.error)) self.write_data(result.stdout, **specificity) return result.stdout finally: os.unlink(privfile) def handle_event(self, event): CfgCreator.handle_event(self, event) StructFile.HandleEvent(self, event) handle_event.__doc__ = CfgCreator.handle_event.__doc__
class SSHbase(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.Generator, Bcfg2.Server.Plugin.PullTarget): """ The sshbase generator manages ssh host keys (both v1 and v2) for hosts. It also manages the ssh_known_hosts file. It can integrate host keys from other management domains and similarly export its keys. The repository contains files in the following formats: ssh_host_key.H_(hostname) -> the v1 host private key for (hostname) ssh_host_key.pub.H_(hostname) -> the v1 host public key for (hostname) ssh_host_(ec)(dr)sa_key.H_(hostname) -> the v2 ssh host private key for (hostname) ssh_host_(ec)(dr)sa_key.pub.H_(hostname) -> the v2 ssh host public key for (hostname) ssh_known_hosts -> the current known hosts file. this is regenerated each time a new key is generated. """ __author__ = "*****@*****.**" keypatterns = [ "ssh_host_dsa_key", "ssh_host_ecdsa_key", "ssh_host_rsa_key", "ssh_host_key", "ssh_host_dsa_key.pub", "ssh_host_ecdsa_key.pub", "ssh_host_rsa_key.pub", "ssh_host_key.pub", ] def __init__(self, core, datastore): Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) Bcfg2.Server.Plugin.Generator.__init__(self) Bcfg2.Server.Plugin.PullTarget.__init__(self) self.ipcache = {} self.namecache = {} self.__skn = False # keep track of which bogus keys we've warned about, and only # do so once self.badnames = dict() self.fam = Bcfg2.Server.FileMonitor.get_fam() self.fam.AddMonitor(self.data, self) self.static = dict() self.entries = dict() self.Entries["Path"] = dict() self.entries["/etc/ssh/ssh_known_hosts"] = KnownHostsEntrySet(self.data) self.Entries["Path"]["/etc/ssh/ssh_known_hosts"] = self.build_skn for keypattern in self.keypatterns: self.entries["/etc/ssh/" + keypattern] = HostKeyEntrySet(keypattern, self.data) self.Entries["Path"]["/etc/ssh/" + keypattern] = self.build_hk self.cmd = Executor() def get_skn(self): """Build memory cache of the ssh known hosts file.""" if not self.__skn: # if no metadata is registered yet, defer if len(self.core.metadata.query.all()) == 0: self.__skn = False return self.__skn skn = [s.data.rstrip() for s in list(self.static.values())] mquery = self.core.metadata.query # build hostname cache names = dict() for cmeta in mquery.all(): names[cmeta.hostname] = set([cmeta.hostname]) names[cmeta.hostname].update(cmeta.aliases) newnames = set() newips = set() for name in names[cmeta.hostname]: newnames.add(name.split(".")[0]) try: newips.add(self.get_ipcache_entry(name)[0]) except PluginExecutionError: continue names[cmeta.hostname].update(newnames) names[cmeta.hostname].update(cmeta.addresses) names[cmeta.hostname].update(newips) # TODO: Only perform reverse lookups on IPs if an # option is set. if True: for ip in newips: try: names[cmeta.hostname].update(self.get_namecache_entry(ip)) except: # pylint: disable=W0702 continue names[cmeta.hostname] = sorted(names[cmeta.hostname]) pubkeys = [pubk for pubk in list(self.entries.keys()) if pubk.endswith(".pub")] pubkeys.sort() for pubkey in pubkeys: for entry in sorted( self.entries[pubkey].entries.values(), key=lambda e: (e.specific.hostname or e.specific.group) ): specific = entry.specific hostnames = [] if specific.hostname and specific.hostname in names: hostnames = names[specific.hostname] elif specific.group: hostnames = list( chain(*[names[cmeta.hostname] for cmeta in mquery.by_groups([specific.group])]) ) elif specific.all: # a generic key for all hosts? really? hostnames = list(chain(*list(names.values()))) if not hostnames: if specific.hostname: key = specific.hostname ktype = "host" elif specific.group: key = specific.group ktype = "group" else: # user has added a global SSH key, but # have no clients yet. don't warn about # this. continue if key not in self.badnames: self.badnames[key] = True self.logger.info("Ignoring key for unknown %s %s" % (ktype, key)) continue skn.append("%s %s" % (",".join(hostnames), entry.data.rstrip())) self.__skn = "\n".join(skn) + "\n" return self.__skn def set_skn(self, value): """Set backing data for skn.""" self.__skn = value skn = property(get_skn, set_skn) def HandleEvent(self, event=None): """Local event handler that does skn regen on pubkey change.""" # skip events we don't care about action = event.code2str() if action == "endExist" or event.filename == self.data: return for entry in list(self.entries.values()): if entry.specific.match(event.filename): entry.handle_event(event) if any(event.filename.startswith(kp) for kp in self.keypatterns if kp.endswith(".pub")): self.debug_log("New public key %s; invalidating " "ssh_known_hosts cache" % event.filename) self.skn = False return if event.filename == "info.xml": for entry in list(self.entries.values()): entry.handle_event(event) return if event.filename.endswith(".static"): self.logger.info("Static key %s %s; invalidating ssh_known_hosts " "cache" % (event.filename, action)) if action == "deleted" and event.filename in self.static: del self.static[event.filename] self.skn = False else: self.static[event.filename] = Bcfg2.Server.Plugin.FileBacked(os.path.join(self.data, event.filename)) self.static[event.filename].HandleEvent(event) self.skn = False return self.logger.warn("SSHbase: Got unknown event %s %s" % (event.filename, action)) def get_ipcache_entry(self, client): """ Build a cache of dns results. """ if client in self.ipcache: if self.ipcache[client]: return self.ipcache[client] else: raise PluginExecutionError("No cached IP address for %s" % client) else: # need to add entry try: ipaddr = socket.gethostbyname(client) self.ipcache[client] = (ipaddr, client) return (ipaddr, client) except socket.gaierror: result = self.cmd.run(["getent", "hosts", client]) if result.success: ipaddr = result.stdout.strip().split() if ipaddr: self.ipcache[client] = (ipaddr, client) return (ipaddr, client) self.ipcache[client] = False msg = "Failed to find IP address for %s: %s" % (client, result.error) self.logger(msg) raise PluginExecutionError(msg) def get_namecache_entry(self, cip): """Build a cache of name lookups from client IP addresses.""" if cip in self.namecache: # lookup cached name from IP if self.namecache[cip]: return self.namecache[cip] else: raise socket.gaierror else: # add an entry that has not been cached try: rvlookup = socket.gethostbyaddr(cip) if rvlookup[0]: self.namecache[cip] = [rvlookup[0]] else: self.namecache[cip] = [] self.namecache[cip].extend(rvlookup[1]) return self.namecache[cip] except socket.gaierror: self.namecache[cip] = False self.logger.error("Failed to find any names associated with " "IP address %s" % cip) raise def build_skn(self, entry, metadata): """This function builds builds a host specific known_hosts file.""" try: self.entries[entry.get("name")].bind_entry(entry, metadata) except Bcfg2.Server.Plugin.PluginExecutionError: entry.text = self.skn hostkeys = [] for key in self.keypatterns: if key.endswith(".pub"): try: hostkeys.append(self.entries["/etc/ssh/" + key].best_matching(metadata)) except Bcfg2.Server.Plugin.PluginExecutionError: pass hostkeys.sort() for hostkey in hostkeys: entry.text += "localhost,localhost.localdomain,127.0.0.1 %s" % hostkey.data self.entries[entry.get("name")].bind_info_to_entry(entry, metadata) def build_hk(self, entry, metadata): """This binds host key data into entries.""" try: self.entries[entry.get("name")].bind_entry(entry, metadata) except Bcfg2.Server.Plugin.PluginExecutionError: filename = entry.get("name").split("/")[-1] self.GenerateHostKeyPair(metadata.hostname, filename) # Service the FAM events queued up by the key generation # so the data structure entries will be available for # binding. # # NOTE: We wait for up to ten seconds. There is some # potential for race condition, because if the file # monitor doesn't get notified about the new key files in # time, those entries won't be available for binding. In # practice, this seems "good enough". tries = 0 is_bound = False while not is_bound: if tries >= 10: msg = "%s still not registered" % filename self.logger.error(msg) raise Bcfg2.Server.Plugin.PluginExecutionError(msg) self.fam.handle_events_in_interval(1) tries += 1 try: self.entries[entry.get("name")].bind_entry(entry, metadata) is_bound = True except Bcfg2.Server.Plugin.PluginExecutionError: pass def GenerateHostKeyPair(self, client, filename): """Generate new host key pair for client.""" match = re.search(r"(ssh_host_(?:((?:ecd|d|r)sa)_)?key)", filename) if match: hostkey = "%s.H_%s" % (match.group(1), client) if match.group(2): keytype = match.group(2) else: keytype = "rsa1" else: raise PluginExecutionError("Unknown key filename: %s" % filename) fileloc = os.path.join(self.data, hostkey) publoc = os.path.join(self.data, ".".join([hostkey.split(".")[0], "pub", "H_%s" % client])) tempdir = tempfile.mkdtemp() temploc = os.path.join(tempdir, hostkey) cmd = ["ssh-keygen", "-q", "-f", temploc, "-N", "", "-t", keytype, "-C", "root@%s" % client] self.debug_log("SSHbase: Running: %s" % " ".join(cmd)) result = self.cmd.run(cmd) if not result.success: raise PluginExecutionError("SSHbase: Error running ssh-keygen: %s" % result.error) try: shutil.copy(temploc, fileloc) shutil.copy("%s.pub" % temploc, publoc) except IOError: err = sys.exc_info()[1] raise PluginExecutionError("Temporary SSH keys not found: %s" % err) try: os.unlink(temploc) os.unlink("%s.pub" % temploc) os.rmdir(tempdir) except OSError: err = sys.exc_info()[1] raise PluginExecutionError("Failed to unlink temporary ssh keys: " "%s" % err) def AcceptChoices(self, _, metadata): return [Bcfg2.Server.Plugin.Specificity(hostname=metadata.hostname)] def AcceptPullData(self, specific, entry, log): """Per-plugin bcfg2-admin pull support.""" # specific will always be host specific filename = os.path.join(self.data, "%s.H_%s" % (entry["name"].split("/")[-1], specific.hostname)) try: open(filename, "w").write(entry["text"]) if log: print("Wrote file %s" % filename) except KeyError: self.logger.error( "Failed to pull %s. This file does not " "currently exist on the client" % entry.get("name") )
def __init__(self, fname): CfgCreator.__init__(self, fname) StructFile.__init__(self, fname) self.cfg = get_cfg() self.core = self.cfg.core self.cmd = Executor()
def __init__(self, name, specific, encoding): CfgVerifier.__init__(self, name, specific, encoding) self.cmd = [] self.exc = Executor(timeout=30)
class SSLCAEntrySet(Bcfg2.Server.Plugin.EntrySet): """ Entry set to handle SSLCA entries and XML files """ def __init__(self, _, path, entry_type, encoding, parent=None): Bcfg2.Server.Plugin.EntrySet.__init__(self, os.path.basename(path), path, entry_type, encoding) self.parent = parent self.key = None self.cert = None self.cmd = Executor(timeout=120) def handle_event(self, event): action = event.code2str() fpath = os.path.join(self.path, event.filename) if event.filename == 'key.xml': if action in ['exists', 'created', 'changed']: self.key = SSLCAKeySpec(fpath) self.key.HandleEvent(event) elif event.filename == 'cert.xml': if action in ['exists', 'created', 'changed']: self.cert = SSLCACertSpec(fpath) self.cert.HandleEvent(event) else: Bcfg2.Server.Plugin.EntrySet.handle_event(self, event) def build_key(self, entry, metadata): """ either grabs a prexisting key hostfile, or triggers the generation of a new key if one doesn't exist. """ # TODO: verify key fits the specs filename = "%s.H_%s" % (os.path.basename( entry.get('name')), metadata.hostname) self.logger.info("SSLCA: Generating new key %s" % filename) key_spec = self.key.get_spec(metadata) ktype = key_spec['type'] bits = key_spec['bits'] if ktype == 'rsa': cmd = ["openssl", "genrsa", bits] elif ktype == 'dsa': cmd = ["openssl", "dsaparam", "-noout", "-genkey", bits] self.debug_log("SSLCA: Generating new key: %s" % " ".join(cmd)) result = self.cmd.run(cmd) if not result.success: raise PluginExecutionError( "SSLCA: Failed to generate key %s for " "%s: %s" % (entry.get("name"), metadata.hostname, result.error)) open(os.path.join(self.path, filename), 'w').write(result.stdout) return result.stdout def build_cert(self, entry, metadata, keyfile): """ generate a new cert """ filename = "%s.H_%s" % (os.path.basename( entry.get('name')), metadata.hostname) self.logger.info("SSLCA: Generating new cert %s" % filename) cert_spec = self.cert.get_spec(metadata) ca = self.parent.get_ca(cert_spec['ca']) req_config = None req = None try: req_config = self.build_req_config(metadata) req = self.build_request(keyfile, req_config, metadata) days = cert_spec['days'] cmd = [ "openssl", "ca", "-config", ca['config'], "-in", req, "-days", days, "-batch" ] passphrase = ca.get('passphrase') if passphrase: cmd.extend(["-passin", "pass:%s" % passphrase]) def _scrub_pass(arg): """ helper to scrub the passphrase from the argument list """ if arg.startswith("pass:"******"pass:******" else: return arg else: _scrub_pass = lambda a: a self.debug_log("SSLCA: Generating new certificate: %s" % " ".join(_scrub_pass(a) for a in cmd)) result = self.cmd.run(cmd) if not result.success: raise PluginExecutionError( "SSLCA: Failed to generate cert: %s" % result.error) finally: try: if req_config and os.path.exists(req_config): os.unlink(req_config) if req and os.path.exists(req): os.unlink(req) except OSError: self.logger.error( "SSLCA: Failed to unlink temporary files: %s" % sys.exc_info()[1]) cert = result.stdout if cert_spec['append_chain'] and 'chaincert' in ca: cert += open(ca['chaincert']).read() open(os.path.join(self.path, filename), 'w').write(cert) return cert def build_req_config(self, metadata): """ generates a temporary openssl configuration file that is used to generate the required certificate request """ # create temp request config file fd, fname = tempfile.mkstemp() cfp = ConfigParser.ConfigParser({}) cfp.optionxform = str defaults = { 'req': { 'default_md': 'sha1', 'distinguished_name': 'req_distinguished_name', 'req_extensions': 'v3_req', 'x509_extensions': 'v3_req', 'prompt': 'no' }, 'req_distinguished_name': {}, 'v3_req': { 'subjectAltName': '@alt_names' }, 'alt_names': {} } for section in list(defaults.keys()): cfp.add_section(section) for key in defaults[section]: cfp.set(section, key, defaults[section][key]) cert_spec = self.cert.get_spec(metadata) altnamenum = 1 altnames = cert_spec['subjectaltname'] altnames.extend(list(metadata.aliases)) altnames.append(metadata.hostname) for altname in altnames: cfp.set('alt_names', 'DNS.' + str(altnamenum), altname) altnamenum += 1 for item in ['C', 'L', 'ST', 'O', 'OU', 'emailAddress']: if cert_spec[item]: cfp.set('req_distinguished_name', item, cert_spec[item]) cfp.set('req_distinguished_name', 'CN', metadata.hostname) self.debug_log("SSLCA: Writing temporary request config to %s" % fname) try: cfp.write(os.fdopen(fd, 'w')) except IOError: raise PluginExecutionError("SSLCA: Failed to write temporary CSR " "config file: %s" % sys.exc_info()[1]) return fname def build_request(self, keyfile, req_config, metadata): """ creates the certificate request """ fd, req = tempfile.mkstemp() os.close(fd) days = self.cert.get_spec(metadata)['days'] cmd = [ "openssl", "req", "-new", "-config", req_config, "-days", days, "-key", keyfile, "-text", "-out", req ] self.debug_log("SSLCA: Generating new CSR: %s" % " ".join(cmd)) result = self.cmd.run(cmd) if not result.success: raise PluginExecutionError("SSLCA: Failed to generate CSR: %s" % result.error) return req def verify_cert(self, filename, keyfile, entry, metadata): """ Perform certification verification against the CA and against the key """ ca = self.parent.get_ca(self.cert.get_spec(metadata)['ca']) do_verify = ca.get('chaincert') if do_verify: return (self.verify_cert_against_ca(filename, entry, metadata) and self.verify_cert_against_key(filename, keyfile)) return True def verify_cert_against_ca(self, filename, entry, metadata): """ check that a certificate validates against the ca cert, and that it has not expired. """ ca = self.parent.get_ca(self.cert.get_spec(metadata)['ca']) chaincert = ca.get('chaincert') cert = os.path.join(self.path, filename) cmd = ["openssl", "verify"] is_root = ca.get('root_ca', "false").lower() == 'true' if is_root: cmd.append("-CAfile") else: # verifying based on an intermediate cert cmd.extend(["-purpose", "sslserver", "-untrusted"]) cmd.extend([chaincert, cert]) self.debug_log("SSLCA: Verifying %s against CA: %s" % (entry.get("name"), " ".join(cmd))) result = self.cmd.run(cmd) if result.stdout == cert + ": OK\n": self.debug_log("SSLCA: %s verified successfully against CA" % entry.get("name")) return True self.logger.warning("SSLCA: %s failed verification against CA: %s" % (entry.get("name"), result.error)) return False def _get_modulus(self, fname, ftype="x509"): """ get the modulus from the given file """ cmd = ["openssl", ftype, "-noout", "-modulus", "-in", fname] self.debug_log("SSLCA: Getting modulus of %s for verification: %s" % (fname, " ".join(cmd))) result = self.cmd.run(cmd) if not result.success: self.logger.warning("SSLCA: Failed to get modulus of %s: %s" % (fname, result.error)) return result.stdout.strip() def verify_cert_against_key(self, filename, keyfile): """ check that a certificate validates against its private key. """ certfile = os.path.join(self.path, filename) cert = self._get_modulus(certfile) key = self._get_modulus(keyfile, ftype="rsa") if cert == key: self.debug_log("SSLCA: %s verified successfully against key %s" % (filename, keyfile)) return True self.logger.warning("SSLCA: %s failed verification against key %s" % (filename, keyfile)) return False def bind_entry(self, entry, metadata): if self.key: self.bind_info_to_entry(entry, metadata) try: return self.best_matching(metadata).bind_entry(entry, metadata) except PluginExecutionError: entry.text = self.build_key(entry, metadata) entry.set("type", "file") return entry elif self.cert: key = self.cert.get_spec(metadata)['key'] cleanup_keyfile = False try: keyfile = self.parent.entries[key].best_matching(metadata).name except PluginExecutionError: cleanup_keyfile = True # create a temp file with the key in it fd, keyfile = tempfile.mkstemp() os.chmod(keyfile, 384) # 0600 el = lxml.etree.Element('Path', name=key) self.parent.core.Bind(el, metadata) os.fdopen(fd, 'w').write(el.text) try: self.bind_info_to_entry(entry, metadata) try: best = self.best_matching(metadata) if self.verify_cert(best.name, keyfile, entry, metadata): return best.bind_entry(entry, metadata) except PluginExecutionError: pass # if we get here, it's because either a) there was no best # matching entry; or b) the existing cert did not verify entry.text = self.build_cert(entry, metadata, keyfile) entry.set("type", "file") return entry finally: if cleanup_keyfile: try: os.unlink(keyfile) except OSError: err = sys.exc_info()[1] self.logger.error("SSLCA: Failed to unlink temporary " "key %s: %s" % (keyfile, err))
def __init__(self, core): Bcfg2.Server.Plugin.Version.__init__(self, core) self.cmd = Executor() self.logger.debug("Initialized Fossil plugin with fossil directory %s" % self.vcs_path)