Beispiel #1
0
def del_cluster(session, logger, dbcluster, config):
    cluster = str(dbcluster.name)

    if hasattr(dbcluster, 'members') and dbcluster.members:
        raise ArgumentError("%s is still in use by clusters: %s." %
                            (format(dbcluster),
                             ", ".join([c.name for c in dbcluster.members])))
    elif dbcluster.hosts:
        hosts = ", ".join([h.fqdn for h in  dbcluster.hosts])
        raise ArgumentError("%s is still in use by hosts: %s." %
                            (format(dbcluster), hosts))
    cluster_plenary = Plenary.get_plenary(dbcluster, logger=logger)
    resources = PlenaryCollection(logger=logger)
    if dbcluster.resholder:
        for res in dbcluster.resholder.resources:
            resources.append(Plenary.get_plenary(res))
    domain = dbcluster.branch.name
    session.delete(dbcluster)

    session.flush()

    key = cluster_plenary.get_remove_key()
    with CompileKey.merge([key, resources.get_remove_key()]):
        cluster_plenary.cleanup(domain, locked=True)
        # And we also want to remove the profile itself
        profiles = config.get("broker", "profilesdir")
        # Only one of these should exist, but it doesn't hurt
        # to try to clean up both.
        xmlfile = os.path.join(profiles, "clusters", cluster + ".xml")
        remove_file(xmlfile, logger=logger)
        xmlgzfile = xmlfile + ".gz"
        remove_file(xmlgzfile, logger=logger)
        # And the cached template created by ant
        remove_file(os.path.join(config.get("broker",
                                                 "quattordir"),
                                 "objects", "clusters",
                                 cluster + TEMPLATE_EXTENSION),
                    logger=logger)
        resources.remove(locked=True)

    build_index(config, session, profiles, logger=logger)

    return
Beispiel #2
0
def del_cluster(session, logger, dbcluster, config):
    cluster = str(dbcluster.name)

    if hasattr(dbcluster, 'members') and dbcluster.members:
        raise ArgumentError(
            "%s is still in use by clusters: %s." %
            (format(dbcluster), ", ".join([c.name
                                           for c in dbcluster.members])))
    elif dbcluster.hosts:
        hosts = ", ".join([h.fqdn for h in dbcluster.hosts])
        raise ArgumentError("%s is still in use by hosts: %s." %
                            (format(dbcluster), hosts))
    cluster_plenary = Plenary.get_plenary(dbcluster, logger=logger)
    resources = PlenaryCollection(logger=logger)
    if dbcluster.resholder:
        for res in dbcluster.resholder.resources:
            resources.append(Plenary.get_plenary(res))
    domain = dbcluster.branch.name
    session.delete(dbcluster)

    session.flush()

    key = cluster_plenary.get_remove_key()
    with CompileKey.merge([key, resources.get_remove_key()]):
        cluster_plenary.cleanup(domain, locked=True)
        # And we also want to remove the profile itself
        profiles = config.get("broker", "profilesdir")
        # Only one of these should exist, but it doesn't hurt
        # to try to clean up both.
        xmlfile = os.path.join(profiles, "clusters", cluster + ".xml")
        remove_file(xmlfile, logger=logger)
        xmlgzfile = xmlfile + ".gz"
        remove_file(xmlgzfile, logger=logger)
        # And the cached template created by ant
        remove_file(os.path.join(config.get("broker", "quattordir"), "objects",
                                 "clusters", cluster + TEMPLATE_EXTENSION),
                    logger=logger)
        resources.remove(locked=True)

    build_index(config, session, profiles, logger=logger)

    return
Beispiel #3
0
    def render(self, session, logger, hostname, **arguments):
        # removing the plenary host requires a compile lock, however
        # we want to avoid deadlock by the fact that we're messing
        # with two locks here, so we want to be careful. We grab the
        # plenaryhost early on (in order to get the filenames filled
        # in from the db info before we delete it from the db. We then
        # hold onto those references until we've completed the db
        # cleanup and if all of that is successful, then we delete the
        # plenary file (which doesn't require re-evaluating any stale
        # db information) after we've released the delhost lock.
        delplenary = False

        # Any service bindings that we need to clean up afterwards
        bindings = PlenaryCollection(logger=logger)
        resources = PlenaryCollection(logger=logger)
        with DeleteKey("system", logger=logger) as key:
            # Check dependencies, translate into user-friendly message
            dbhost = hostname_to_host(session, hostname)
            host_plenary = Plenary.get_plenary(dbhost, logger=logger)
            domain = dbhost.branch.name
            deps = get_host_dependencies(session, dbhost)
            if (len(deps) != 0):
                deptext = "\n".join(["  %s" % d for d in deps])
                raise ArgumentError("Cannot delete host %s due to the "
                                    "following dependencies:\n%s." %
                                    (hostname, deptext))

            archetype = dbhost.archetype.name
            dbmachine = dbhost.machine
            oldinfo = DSDBRunner.snapshot_hw(dbmachine)

            ip = dbmachine.primary_ip
            fqdn = dbmachine.fqdn

            for si in dbhost.services_used:
                plenary = PlenaryServiceInstanceServer(si)
                bindings.append(plenary)
                logger.info(
                    "Before deleting host '%s', removing binding '%s'" %
                    (fqdn, si.cfg_path))

            del dbhost.services_used[:]

            if dbhost.resholder:
                for res in dbhost.resholder.resources:
                    resources.append(Plenary.get_plenary(res))

            # In case of Zebra, the IP may be configured on multiple interfaces
            for iface in dbmachine.interfaces:
                if ip in iface.addresses:
                    iface.addresses.remove(ip)

            if dbhost.cluster:
                dbcluster = dbhost.cluster
                dbcluster.hosts.remove(dbhost)
                set_committed_value(dbhost, '_cluster', None)
                dbcluster.validate()

            dbdns_rec = dbmachine.primary_name
            dbmachine.primary_name = None
            dbmachine.host = None
            session.delete(dbhost)
            delete_dns_record(dbdns_rec)
            session.flush()
            delplenary = True

            if dbmachine.vm_container:
                bindings.append(Plenary.get_plenary(dbmachine.vm_container))

            if archetype != 'aurora' and ip is not None:
                dsdb_runner = DSDBRunner(logger=logger)
                dsdb_runner.update_host(dbmachine, oldinfo)
                dsdb_runner.commit_or_rollback("Could not remove host %s from "
                                               "DSDB" % hostname)
            if archetype == 'aurora':
                logger.client_info("WARNING: removing host %s from AQDB and "
                                   "*not* changing DSDB." % hostname)

            # Past the point of no return... commit the transaction so
            # that we can free the delete lock.
            session.commit()

        # Only if we got here with no exceptions do we clean the template
        # Trying to clean up after any errors here is really difficult
        # since the changes to dsdb have already been made.
        if (delplenary):
            key = host_plenary.get_remove_key()
            with CompileKey.merge(
                [key,
                 bindings.get_write_key(),
                 resources.get_remove_key()]) as key:
                host_plenary.cleanup(domain, locked=True)
                # And we also want to remove the profile itself
                profiles = self.config.get("broker", "profilesdir")
                # Only one of these should exist, but it doesn't hurt
                # to try to clean up both.
                xmlfile = os.path.join(profiles, fqdn + ".xml")
                remove_file(xmlfile, logger=logger)
                xmlgzfile = xmlfile + ".gz"
                remove_file(xmlgzfile, logger=logger)
                # And the cached template created by ant
                remove_file(os.path.join(
                    self.config.get("broker", "quattordir"), "objects",
                    fqdn + TEMPLATE_EXTENSION),
                            logger=logger)
                bindings.write(locked=True)
                resources.remove(locked=True)

            build_index(self.config, session, profiles, logger=logger)

        return
Beispiel #4
0
    def render(self, session, logger, hostname, **arguments):
        # removing the plenary host requires a compile lock, however
        # we want to avoid deadlock by the fact that we're messing
        # with two locks here, so we want to be careful. We grab the
        # plenaryhost early on (in order to get the filenames filled
        # in from the db info before we delete it from the db. We then
        # hold onto those references until we've completed the db
        # cleanup and if all of that is successful, then we delete the
        # plenary file (which doesn't require re-evaluating any stale
        # db information) after we've released the delhost lock.
        delplenary = False

        # Any service bindings that we need to clean up afterwards
        bindings = PlenaryCollection(logger=logger)
        resources = PlenaryCollection(logger=logger)
        with DeleteKey("system", logger=logger) as key:
            # Check dependencies, translate into user-friendly message
            dbhost = hostname_to_host(session, hostname)
            host_plenary = Plenary.get_plenary(dbhost, logger=logger)
            domain = dbhost.branch.name
            deps = get_host_dependencies(session, dbhost)
            if (len(deps) != 0):
                deptext = "\n".join(["  %s" % d for d in deps])
                raise ArgumentError("Cannot delete host %s due to the "
                                    "following dependencies:\n%s." %
                                    (hostname, deptext))

            archetype = dbhost.archetype.name
            dbmachine = dbhost.machine
            oldinfo = DSDBRunner.snapshot_hw(dbmachine)

            ip = dbmachine.primary_ip
            fqdn = dbmachine.fqdn

            for si in dbhost.services_used:
                plenary = PlenaryServiceInstanceServer(si)
                bindings.append(plenary)
                logger.info("Before deleting host '%s', removing binding '%s'"
                            % (fqdn, si.cfg_path))

            del dbhost.services_used[:]

            if dbhost.resholder:
                for res in dbhost.resholder.resources:
                    resources.append(Plenary.get_plenary(res))

            # In case of Zebra, the IP may be configured on multiple interfaces
            for iface in dbmachine.interfaces:
                if ip in iface.addresses:
                    iface.addresses.remove(ip)

            if dbhost.cluster:
                dbcluster = dbhost.cluster
                dbcluster.hosts.remove(dbhost)
                set_committed_value(dbhost, '_cluster', None)
                dbcluster.validate()

            dbdns_rec = dbmachine.primary_name
            dbmachine.primary_name = None
            dbmachine.host = None
            session.delete(dbhost)
            delete_dns_record(dbdns_rec)
            session.flush()
            delplenary = True

            if dbmachine.vm_container:
                bindings.append(Plenary.get_plenary(dbmachine.vm_container))

            if archetype != 'aurora' and ip is not None:
                dsdb_runner = DSDBRunner(logger=logger)
                dsdb_runner.update_host(dbmachine, oldinfo)
                dsdb_runner.commit_or_rollback("Could not remove host %s from "
                                               "DSDB" % hostname)
            if archetype == 'aurora':
                logger.client_info("WARNING: removing host %s from AQDB and "
                                   "*not* changing DSDB." % hostname)

            # Past the point of no return... commit the transaction so
            # that we can free the delete lock.
            session.commit()

        # Only if we got here with no exceptions do we clean the template
        # Trying to clean up after any errors here is really difficult
        # since the changes to dsdb have already been made.
        if (delplenary):
            key = host_plenary.get_remove_key()
            with CompileKey.merge([key, bindings.get_write_key(),
                                   resources.get_remove_key()]) as key:
                host_plenary.cleanup(domain, locked=True)
                # And we also want to remove the profile itself
                profiles = self.config.get("broker", "profilesdir")
                # Only one of these should exist, but it doesn't hurt
                # to try to clean up both.
                xmlfile = os.path.join(profiles, fqdn + ".xml")
                remove_file(xmlfile, logger=logger)
                xmlgzfile = xmlfile + ".gz"
                remove_file(xmlgzfile, logger=logger)
                # And the cached template created by ant
                remove_file(os.path.join(self.config.get("broker",
                                                         "quattordir"),
                                         "objects", fqdn + TEMPLATE_EXTENSION),
                            logger=logger)
                bindings.write(locked=True)
                resources.remove(locked=True)

            build_index(self.config, session, profiles, logger=logger)

        return
Beispiel #5
0
class TemplateDomain(object):

    def __init__(self, domain, author=None, logger=LOGGER):
        self.domain = domain
        self.author = author
        self.logger = logger

    def directories(self):
        """Return a list of directories required for compiling this domain"""
        config = Config()
        dirs = []

        if self.domain.branch_type == 'domain':
            dirs.append(os.path.join(config.get("broker", "domainsdir"),
                                     self.domain.name))

        dirs.append(os.path.join(config.get("broker", "quattordir"),
                                 "cfg",
                                 "domains",
                                 self.domain.name))

        dirs.append(os.path.join(config.get("broker", "quattordir"),
                                 "build",
                                 "xml",
                                 self.domain.name))

        return dirs

    def outputdirs(self):
        """Returns a list of directories that should exist before compiling"""
        config = Config()
        dirs = []
        dirs.append(config.get("broker", "profilesdir"))
        # The regression tests occasionally have issues with panc
        # auto-creating this directory - not sure why.
        if self.domain.clusters:
            dirs.append(os.path.join(config.get("broker", "quattordir"),
                                     "build", "xml", self.domain.name,
                                     "clusters"))
        return dirs

    def compile(self, session, only=None, locked=False,
                panc_debug_include=None, panc_debug_exclude=None,
                cleandeps=False):
        """The build directories are checked and constructed
        if necessary, so no prior setup is required.  The compile may
        take some time (current rate is 10 hosts per second, with a
        couple of seconds of constant overhead), and the possibility
        of blocking on the compile lock.

        If the 'only' parameter is provided, then it should be a
        list or set containing the profiles that need to be compiled.

        May raise ArgumentError exception, else returns the standard
        output (as a string) of the compile
        """

        config = Config()

        if self.domain.branch_type == 'sandbox':
            if not self.author:
                raise InternalError("Missing required author to compile "
                                    "sandbox %s" % self.domain.name)
            sandboxdir = os.path.join(config.get("broker", "templatesdir"),
                                      self.author.name, self.domain.name)
            if not os.path.exists(sandboxdir):
                raise ArgumentError("Sandbox directory '%s' does not exist." %
                                    sandboxdir)
            if not self.sandbox_has_latest(config, sandboxdir):
                self.logger.warn("Sandbox %s/%s does not contain the "
                                 "latest changes from the prod domain.  If "
                                 "there are failures try "
                                 "`git fetch && git merge origin/prod`" %
                                 (self.author.name, self.domain.name))

        self.logger.info("preparing domain %s for compile" % self.domain.name)

        # Ensure that the compile directory is in a good state.
        outputdir = config.get("broker", "profilesdir")

        for d in self.directories() + self.outputdirs():
            if not os.path.exists(d):
                try:
                    self.logger.info("creating %s" % d)
                    os.makedirs(d)
                except OSError, e:
                    raise ArgumentError("Failed to mkdir %s: %s" % (d, e))

        nothing_to_do = True
        if only:
            nothing_to_do = False
        else:
            hostnames = session.query(Fqdn)
            hostnames = hostnames.join(DnsRecord, HardwareEntity, Machine, Host)
            hostnames = hostnames.filter_by(branch=self.domain,
                                            sandbox_author=self.author)

            clusternames = session.query(Cluster.name)
            clusternames = clusternames.filter_by(branch=self.domain,
                                                  sandbox_author=self.author)
            if self.author:
                # Need to restrict to the subset of the sandbox managed
                # by this author.
                only = [str(fqdn) for fqdn in hostnames]
                only.extend(["cluster/%s" % c.name for c in clusternames])
                nothing_to_do = not bool(only)
            else:
                nothing_to_do = not hostnames.count() and not clusternames.count()

        if nothing_to_do:
            return 'No hosts: nothing to do.'

        # The ant wrapper is silly and it may pick up the wrong set of .jars if
        # ANT_HOME is not set
        panc_env = {"PATH": "%s/bin:%s" % (config.get("broker", "java_home"),
                                           os_environ.get("PATH", "")),
                    "ANT_HOME": config.get("broker", "ant_home"),
                    "JAVA_HOME": config.get("broker", "java_home")}
        if config.has_option("broker", "ant_options"):
            panc_env["ANT_OPTS"] = config.get("broker", "ant_options")

        args = [config.get("broker", "ant")]
        args.append("--noconfig")
        args.append("-f")
        args.append("%s/build.xml" %
                    config.get("broker", "compiletooldir"))
        args.append("-Dbasedir=%s" % config.get("broker", "quattordir"))
        args.append("-Dpanc.jar=%s" % self.domain.compiler)
        args.append("-Dpanc.formatter=%s" %
                    config.get("panc", "formatter"))
        args.append("-Dpanc.template_extension=%s" %
                    config.get("panc", "template_extension"))
        args.append("-Ddomain=%s" % self.domain.name)
        args.append("-Ddistributed.profiles=%s" % outputdir)
        args.append("-Dpanc.batch.size=%s" %
                    config.get("panc", "batch_size"))
        args.append("-Dant-contrib.jar=%s" %
                    config.get("broker", "ant_contrib_jar"))
        args.append("-Dgzip.output=%s" %
                    config.get("panc", "gzip_output"))
        if self.domain.branch_type == 'sandbox':
            args.append("-Ddomain.templates=%s" % sandboxdir)
        if only:
            # Use -Dforce.build=true?
            # TODO: pass the list in a temp file
            args.append("-Dobject.profile=%s" % " ".join(only))
            args.append("compile.object.profile")
        else:
            # Technically this is the default, but being explicit
            # doesn't hurt.
            args.append("compile.domain.profiles")
        if panc_debug_include is not None:
            args.append("-Dpanc.debug.include=%s" % panc_debug_include)
        if panc_debug_exclude is not None:
            args.append("-Dpanc.debug.exclude=%s" % panc_debug_exclude)
        if cleandeps:
            # Cannot send a false value - the test in build.xml is for
            # whether or not the property is defined at all.
            args.append("-Dclean.dep.files=%s" % cleandeps)

        out = ''
        try:
            if not locked:
                if only and len(only) == 1:
                    key = CompileKey(domain=self.domain.name,
                                     profile=list(only)[0],
                                     logger=self.logger)
                else:
                    key = CompileKey(domain=self.domain.name,
                                     logger=self.logger)
                lock_queue.acquire(key)
            self.logger.info("starting compile")
            try:
                out = run_command(args, env=panc_env, logger=self.logger,
                                  path=config.get("broker", "quattordir"),
                                  loglevel=CLIENT_INFO)
            except ProcessException, e:
                raise ArgumentError("\n%s%s" % (e.out, e.err))
        finally:
            if not locked:
                lock_queue.release(key)

        # No need for a lock here - there is only a single file written
        # and it is swapped into place atomically.
        build_index(config, session, outputdir, logger=self.logger)
        return out