예제 #1
0
    def render(self, session, logger, hostname, buildstatus, **arguments):
        dbhost = hostname_to_host(session, hostname)
        dbstatus = HostLifecycle.get_unique(session, buildstatus, compel=True)
        changed = dbhost.status.transition(dbhost, dbstatus)

        if not changed or not dbhost.archetype.is_compileable:
            return

        session.add(dbhost)
        session.flush()

        plenary = PlenaryHost(dbhost, logger=logger)
        # Force a host lock as pan might overwrite the profile...
        key = CompileKey(domain=dbhost.branch.name,
                         profile=dbhost.fqdn,
                         logger=logger)
        try:
            lock_queue.acquire(key)
            plenary.write(locked=True)
            td = TemplateDomain(dbhost.branch,
                                dbhost.sandbox_author,
                                logger=logger)
            td.compile(session, only=[dbhost.fqdn], locked=True)
        except IncompleteError:
            raise ArgumentError("Run aq make for host %s first." % dbhost.fqdn)
        except:
            plenary.restore_stash()
            raise
        finally:
            lock_queue.release(key)
예제 #2
0
파일: uncluster.py 프로젝트: ned21/aquilon
    def render(self, session, logger, hostname, cluster, personality,
               **arguments):
        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        dbhost = hostname_to_host(session, hostname)
        if not dbhost.cluster:
            raise ArgumentError(
                "{0} is not bound to a cluster.".format(dbhost))
        if dbhost.cluster != dbcluster:
            raise ArgumentError("{0} is bound to {1:l}, not {2:l}.".format(
                dbhost, dbhost.cluster, dbcluster))

        if personality:
            dbpersonality = Personality.get_unique(session,
                                                   name=personality,
                                                   archetype=dbhost.archetype,
                                                   compel=True)
            if dbpersonality.cluster_required:
                raise ArgumentError("Cannot switch host to personality %s "
                                    "because that personality requires a "
                                    "cluster" % personality)
            dbhost.personality = dbpersonality
        elif dbhost.personality.cluster_required:
            raise ArgumentError("Host personality %s requires a cluster, "
                                "use --personality to change personality "
                                "when leaving the cluster." %
                                dbhost.personality.name)

        dbcluster.hosts.remove(dbhost)
        remove_service_addresses(dbcluster, dbhost)
        dbcluster.validate()

        session.flush()
        session.expire(dbhost, ['_cluster'])

        # Will need to write a cluster plenary and either write or
        # remove a host plenary.  Grab the domain key since the two
        # must be in the same domain.
        host_plenary = Plenary.get_plenary(dbhost, logger=logger)
        cluster_plenary = Plenary.get_plenary(dbcluster, logger=logger)
        key = CompileKey(domain=dbcluster.branch.name, logger=logger)
        try:
            lock_queue.acquire(key)
            cluster_plenary.write(locked=True)
            try:
                host_plenary.write(locked=True)
            except IncompleteError:
                host_plenary.cleanup(domain=dbhost.branch.name, locked=True)
        except:
            cluster_plenary.restore_stash()
            host_plenary.restore_stash()
            raise
        finally:
            lock_queue.release(key)
예제 #3
0
def sync_domain(dbdomain, logger=LOGGER, locked=False):
    """Update templates on disk to match contents of branch in template-king.

    If this domain is tracking another, first update the branch in
    template-king with the latest from the tracking branch.  Also save
    the current (previous) commit as a potential rollback point.

    """
    config = Config()
    session = object_session(dbdomain)
    kingdir = config.get("broker", "kingdir")
    domaindir = os.path.join(config.get("broker", "domainsdir"), dbdomain.name)
    git_env = {
        "PATH":
        "%s:%s" %
        (config.get("broker", "git_path"), os.environ.get("PATH", ""))
    }
    if dbdomain.tracked_branch:
        # Might need to revisit if using this helper from rollback...
        run_command([
            "git", "push", ".",
            "%s:%s" % (dbdomain.tracked_branch.name, dbdomain.name)
        ],
                    path=kingdir,
                    env=git_env,
                    logger=logger)
    run_command(["git", "fetch", "--prune"],
                path=domaindir,
                env=git_env,
                logger=logger)
    if dbdomain.tracked_branch:
        out = run_command(["git", "rev-list", "-n", "1", "HEAD"],
                          path=domaindir,
                          env=git_env,
                          logger=logger)
        rollback_commit = out.strip()
    try:
        if not locked:
            key = CompileKey(domain=dbdomain.name, logger=logger)
            lock_queue.acquire(key)
        run_command(["git", "reset", "--hard",
                     "origin/%s" % dbdomain.name],
                    path=domaindir,
                    env=git_env,
                    logger=logger)
    finally:
        if not locked:
            lock_queue.release(key)
    if dbdomain.tracked_branch:
        dbdomain.rollback_commit = rollback_commit
        session.add(dbdomain)
예제 #4
0
파일: rollback.py 프로젝트: ned21/aquilon
    def render(self, session, logger, domain, ref, lastsync, **arguments):
        dbdomain = Domain.get_unique(session, domain, compel=True)
        if not dbdomain.tracked_branch:
            # Could check dbdomain.trackers and rollback all of them...
            raise ArgumentError("rollback requires a tracking domain")

        if lastsync:
            if not dbdomain.rollback_commit:
                raise ArgumentError("domain %s does not have a rollback "
                                    "commit saved, please specify one "
                                    "explicitly." % dbdomain.name)
            ref = dbdomain.rollback_commit

        if not ref:
            raise ArgumentError("Commit reference to rollback to required.")

        kingdir = self.config.get("broker", "kingdir")
        domaindir = os.path.join(self.config.get("broker", "domainsdir"),
                                 dbdomain.name)
        out = run_git(["branch", "--contains", ref],
                      logger=logger,
                      path=kingdir)
        if not re.search(r'\b%s\b' % dbdomain.tracked_branch.name, out):
            # There's no real technical reason why this needs to be
            # true.  It just seems like a good sanity check.
            raise ArgumentError("Cannot roll back to commit: "
                                "branch %s does not contain %s" %
                                (dbdomain.tracked_branch.name, ref))

        dbdomain.tracked_branch.is_sync_valid = False
        session.add(dbdomain.tracked_branch)
        dbdomain.rollback_commit = None
        session.add(dbdomain)

        key = CompileKey(domain=dbdomain.name, logger=logger)
        try:
            lock_queue.acquire(key)
            run_git(["push", ".", "+%s:%s" % (ref, dbdomain.name)],
                    path=kingdir,
                    logger=logger)
            # Duplicated this logic from aquilon.worker.processes.sync_domain()
            run_git(["fetch"], path=domaindir, logger=logger)
            run_git(["reset", "--hard",
                     "origin/%s" % dbdomain.name],
                    path=domaindir,
                    logger=logger)
        except ProcessException, e:
            raise ArgumentError(
                "Problem encountered updating templates for "
                "domain %s: %s", dbdomain.name, e)
예제 #5
0
    def render(self, session, logger, cluster, keepbindings, **arguments):
        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        if not dbcluster.personality.archetype.is_compileable:
            raise ArgumentError("{0} is not a compilable archetype "
                                "({1!s}).".format(
                                    dbcluster,
                                    dbcluster.personality.archetype))

        chooser = Chooser(dbcluster,
                          logger=logger,
                          required_only=not (keepbindings))
        chooser.set_required()
        chooser.flush_changes()
        # Force a domain lock as pan might overwrite any of the profiles...
        key = CompileKey.merge([
            chooser.get_write_key(),
            CompileKey(domain=dbcluster.branch.name, logger=logger)
        ])
        try:
            lock_queue.acquire(key)
            chooser.write_plenary_templates(locked=True)

            profile_list = add_cluster_data(dbcluster)
            profile_list.extend(chooser.changed_server_fqdns())

            td = TemplateDomain(dbcluster.branch,
                                dbcluster.sandbox_author,
                                logger=logger)
            td.compile(session, only=profile_list, locked=True)

        except:
            chooser.restore_stash()

            # Okay, cleaned up templates, make sure the caller knows
            # we've aborted so that DB can be appropriately rollback'd.

            raise

        finally:
            lock_queue.release(key)

        return
예제 #6
0
def main():
    logging.basicConfig(level=logging.DEBUG)

    query = session.query(Resource)

    old_paths = []

    with CompileKey():
        for res in query.all():
            PlenaryResource(res).write(locked=True)

            holder = res.holder.holder_object
            if isinstance(holder, ResourceGroup):
                holder = holder.holder.holder_object
            else:
                old_paths.append("resource/%s/%s/%s/%s" %
                                 (res.resource_type, res.holder.holder_type,
                                  res.holder.holder_name, res.name))

            try:
                # Show that something is happening...
                print "Flushing {0:l}".format(holder)

                if isinstance(holder, Host):
                    PlenaryHost(holder).write(locked=True)
                elif isinstance(holder, Cluster):
                    PlenaryCluster(holder).write(locked=True)
                else:
                    raise AquilonError("Unknown holder object: %r" % holder)
            except IncompleteError:
                pass

    plenarydir = config.get("broker", "plenarydir")
    for path in old_paths:
        try:
            os.remove(os.path.join(plenarydir, path, "config.tpl"))
        except OSError:
            pass
        try:
            os.removedirs(os.path.join(plenarydir, path))
        except OSError:
            pass
예제 #7
0
 def compile(self, session, logger, dbhost):
     """ compile plenary templates """
     plenary = PlenaryHost(dbhost, logger=logger)
     # Force a host lock as pan might overwrite the profile...
     key = CompileKey(domain=dbhost.branch.name,
                      profile=dbhost.fqdn,
                      logger=logger)
     try:
         lock_queue.acquire(key)
         plenary.write(locked=True)
         td = TemplateDomain(dbhost.branch,
                             dbhost.sandbox_author,
                             logger=logger)
         td.compile(session, only=[dbhost.fqdn], locked=True)
     except IncompleteError:
         raise ArgumentError("Run aq make for host %s first." % dbhost.fqdn)
     except:
         plenary.restore_stash()
         raise
     finally:
         lock_queue.release(key)
예제 #8
0
파일: branch.py 프로젝트: ned21/aquilon
def remove_branch(config, logger, dbbranch):
    session = object_session(dbbranch)
    deps = get_branch_dependencies(dbbranch)
    if deps:
        raise ArgumentError("\n".join(deps))

    session.delete(dbbranch)

    domain = TemplateDomain(dbbranch, logger=logger)
    # Can this fail?  Is recovery needed?
    with CompileKey(domain=dbbranch.name, logger=logger):
        for dir in domain.directories():
            remove_dir(dir, logger=logger)

    kingdir = config.get("broker", "kingdir")
    try:
        run_git(["branch", "-D", dbbranch.name], path=kingdir, logger=logger)
    except ProcessException, e:
        logger.warning(
            "Error removing branch %s from template-king, "
            "proceeding anyway: %s", dbbranch.name, e)
예제 #9
0
    def compile(self, session, dbhost, logger, keepbindings):
        chooser = Chooser(dbhost,
                          logger=logger,
                          required_only=not (keepbindings))
        chooser.set_required()
        chooser.flush_changes()

        hosts = chooser.changed_server_fqdns()
        hosts.add(dbhost.fqdn)

        # Force a host lock as pan might overwrite the profile...
        key = chooser.get_write_key()
        for fqdn in hosts:
            key = CompileKey.merge([
                key,
                CompileKey(domain=dbhost.branch.name,
                           profile=fqdn,
                           logger=logger)
            ])
        try:
            lock_queue.acquire(key)
            chooser.write_plenary_templates(locked=True)

            td = TemplateDomain(dbhost.branch,
                                dbhost.sandbox_author,
                                logger=logger)
            td.compile(session, only=hosts, locked=True)

        except:
            if chooser:
                chooser.restore_stash()

            # Okay, cleaned up templates, make sure the caller knows
            # we've aborted so that DB can be appropriately rollback'd.
            raise

        finally:
            lock_queue.release(key)

        return
예제 #10
0
    def render(self, session, logger, cluster, buildstatus, **arguments):
        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        dbstatus = ClusterLifecycle.get_unique(session,
                                               buildstatus,
                                               compel=True)

        if not dbcluster.status.transition(dbcluster, dbstatus):
            return

        if not dbcluster.personality.archetype.is_compileable:
            return

        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbcluster))

        for dbhost in dbcluster.hosts:
            plenaries.append(Plenary.get_plenary(dbhost))

        # Force a host lock as pan might overwrite the profile...
        key = CompileKey(domain=dbcluster.branch.name, logger=logger)
        try:
            lock_queue.acquire(key)

            plenaries.write(locked=True)
            td = TemplateDomain(dbcluster.branch,
                                dbcluster.sandbox_author,
                                logger=logger)
            td.compile(session, plenaries.object_templates, locked=True)
        except:
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)
        return
예제 #11
0
 def get_key(self):
     return CompileKey(domain=self.dbobj.branch.name,
                       profile=self.plenary_template,
                       logger=self.logger)
예제 #12
0
    def render(self, session, logger, hostname, domain, sandbox, force,
               **arguments):
        (dbbranch, dbauthor) = get_branch_and_author(session, logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     compel=True)

        if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage:
            raise ArgumentError("Managing hosts to {0:l} is not allowed."
                                .format(dbbranch))

        dbhost = hostname_to_host(session, hostname)
        dbsource = dbhost.branch
        dbsource_author = dbhost.sandbox_author
        old_branch = dbhost.branch.name

        if dbhost.cluster:
            raise ArgumentError("Cluster nodes must be managed at the "
                                "cluster level; this host is a member of "
                                "{0}.".format(dbhost.cluster))

        if not force:
            validate_branch_commits(dbsource, dbsource_author,
                                    dbbranch, dbauthor, logger, self.config)

        dbhost.branch = dbbranch
        dbhost.sandbox_author = dbauthor
        session.add(dbhost)
        session.flush()
        plenary_host = PlenaryHost(dbhost, logger=logger)

        # We're crossing domains, need to lock everything.
        # XXX: There's a directory per domain.  Do we need subdirectories
        # for different authors for a sandbox?
        key = CompileKey(logger=logger)

        try:
            lock_queue.acquire(key)

            plenary_host.stash()
            plenary_host.cleanup(old_branch, locked=True)

            # Now we recreate the plenary to ensure that the domain is ready
            # to compile, however (esp. if there was no existing template), we
            # have to be aware that there might not be enough information yet
            # with which we can create a template
            try:
                plenary_host.write(locked=True)
            except IncompleteError:
                # This template cannot be written, we leave it alone
                # It would be nice to flag the state in the the host?
                pass
        except:
            # This will not restore the cleaned up files.  That's OK.
            # They will be recreated as needed.
            plenary_host.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
예제 #13
0
파일: flush.py 프로젝트: ned21/aquilon
    def render(self, session, logger, services, personalities, machines,
               clusters, hosts, locations, resources, switches, all,
               **arguments):
        success = []
        failed = []
        written = 0

        # Caches for keeping preloaded data pinned in memory, since the SQLA
        # session holds a weak reference only
        resource_by_id = {}
        resholder_by_id = {}
        service_instances = None
        racks = None

        # Object caches that are accessed directly
        disks_by_machine = defaultdict(list)
        interfaces_by_machine = defaultdict(list)
        interfaces_by_id = {}

        if all:
            services = True
            personalities = True
            machines = True
            clusters = True
            hosts = True
            locations = True
            resources = True

        with CompileKey(logger=logger):
            logger.client_info("Loading data.")

            # When flushing clusters/hosts, loading the resource holder is done
            # as the query that loads those objects. But when flushing resources
            # only, we need the holder and the object it belongs to.
            if resources and not clusters:
                q = session.query(ClusterResource)
                # Using joinedload('cluster') would generate an outer join
                q = q.join(Cluster)
                q = q.options(contains_eager('cluster'))
                for resholder in q:
                    resholder_by_id[resholder.id] = resholder
            if resources and not hosts:
                q = session.query(HostResource)
                # Using joinedload('host') would generate an outer join
                q = q.join(Host)
                q = q.options(contains_eager('host'))
                for resholder in q:
                    resholder_by_id[resholder.id] = resholder

            if hosts or clusters or resources:
                # Load the most common resource types. Using
                # with_polymorphic('*') on Resource would generate a huge query,
                # so do something more targeted. More resource subclasses may be
                # added later if they become common.
                preload_classes = {
                    Filesystem: [],
                    RebootSchedule: [],
                    VirtualMachine: [
                        joinedload('machine'),
                        joinedload('machine.primary_name'),
                        joinedload('machine.primary_name.fqdn')
                    ],
                    Share: [],
                }

                share_info = cache_storage_data()

                for cls, options in preload_classes.items():
                    q = session.query(cls)

                    # If only hosts or only clusters are needed, don't load
                    # resources of the other kind
                    if hosts and not clusters and not resources:
                        q = q.join(ResourceHolder)
                        q = q.options(contains_eager('holder'))
                        q = q.filter_by(holder_type='host')
                    if clusters and not hosts and not resources:
                        q = q.join(ResourceHolder)
                        q = q.filter_by(holder_type='cluster')
                        q = q.options(contains_eager('holder'))

                    if options:
                        q = q.options(*options)

                    for res in q:
                        resource_by_id[res.id] = res
                        try:
                            res.populate_share_info(share_info)
                        except AttributeError:
                            pass

            if hosts or machines:
                # Polymorphic loading cannot be applied to eager-loaded
                # attributes, so load interfaces manually.
                q = session.query(Interface)
                q = q.with_polymorphic('*')
                q = q.options(lazyload("hardware_entity"))
                for iface in q:
                    interfaces_by_machine[iface.hardware_entity_id].append(
                        iface)
                    interfaces_by_id[iface.id] = iface

                if hosts:
                    # subqueryload() and with_polymorphic() do not play nice
                    # together, so do it by hand
                    q = session.query(AddressAssignment)
                    q = q.options(joinedload("network"),
                                  joinedload("dns_records"))
                    q = q.order_by(AddressAssignment._label)
                    addrs_by_iface = defaultdict(list)
                    for addr in q:
                        addrs_by_iface[addr.interface_id].append(addr)
                    for interface_id, addrs in addrs_by_iface.items():
                        set_committed_value(interfaces_by_id[interface_id],
                                            "assignments", addrs)

                    q = session.query(Interface.id)
                    q = q.filter(~Interface.assignments.any())
                    for id in q.all():
                        set_committed_value(interfaces_by_id[id[0]],
                                            "assignments", None)

            if hosts or services:
                q = session.query(ServiceInstance)
                q = q.options(subqueryload("service"))
                service_instances = q.all()

            if machines or clusters:
                # Most machines are in racks...
                q = session.query(Rack)
                q = q.options(subqueryload("dns_maps"),
                              subqueryload("parents"))
                racks = q.all()

            if locations:
                logger.client_info("Flushing locations.")
                for dbloc in session.query(City).all():
                    try:
                        plenary = Plenary.get_plenary(dbloc, logger=logger)
                        written += plenary.write(locked=True)
                    except Exception, e:
                        failed.append("City %s failed: %s" % dbloc, e)
                        continue

            if services:
                logger.client_info("Flushing services.")
                q = session.query(Service)
                q = q.options(subqueryload("instances"))
                for dbservice in q:
                    try:
                        plenary_info = Plenary.get_plenary(dbservice,
                                                           logger=logger)
                        written += plenary_info.write(locked=True)
                    except Exception, e:
                        failed.append("Service %s failed: %s" %
                                      (dbservice.name, e))
                        continue

                    for dbinst in dbservice.instances:
                        try:
                            plenary_info = Plenary.get_plenary(dbinst,
                                                               logger=logger)
                            written += plenary_info.write(locked=True)
                        except Exception, e:
                            failed.append("Service %s instance %s failed: %s" %
                                          (dbservice.name, dbinst.name, e))
                            continue
예제 #14
0
    def render(self, session, logger, feature, archetype, personality, model,
               vendor, interface, justification, user, **arguments):

        # Binding a feature to a named interface makes sense in the scope of a
        # personality, but not for a whole archetype.
        if interface and not personality:
            raise ArgumentError("Binding to a named interface needs "
                                "a personality.")

        q = session.query(Personality)
        dbarchetype = None

        feature_type = "host"

        justification_required = True

        # Warning: order matters here!
        params = {}
        if personality:
            justification_required = False
            dbpersonality = Personality.get_unique(session,
                                                   name=personality,
                                                   archetype=archetype,
                                                   compel=True)
            params["personality"] = dbpersonality
            if interface:
                params["interface_name"] = interface
                feature_type = "interface"
            dbarchetype = dbpersonality.archetype
            q = q.filter_by(archetype=dbarchetype)
            q = q.filter_by(name=personality)
        elif archetype:
            dbarchetype = Archetype.get_unique(session, archetype, compel=True)
            params["archetype"] = dbarchetype
            q = q.filter_by(archetype=dbarchetype)
        else:
            # It's highly unlikely that a feature template would work for
            # _any_ archetype, so disallow this case for now. As I can't
            # rule out that such a case will not have some uses in the
            # future, the restriction is here and not in the model.
            raise ArgumentError("Please specify either an archetype or "
                                "a personality when binding a feature.")

        if model:
            dbmodel = Model.get_unique(session,
                                       name=model,
                                       vendor=vendor,
                                       compel=True)

            if dbmodel.machine_type == "nic":
                feature_type = "interface"
            else:
                feature_type = "hardware"

            params["model"] = dbmodel

        if dbarchetype and not dbarchetype.is_compileable:
            raise UnimplementedError("Binding features to non-compilable "
                                     "archetypes is not implemented.")

        if not feature_type:  # pragma: no cover
            raise InternalError("Feature type is not known.")

        dbfeature = Feature.get_unique(session,
                                       name=feature,
                                       feature_type=feature_type,
                                       compel=True)

        cnt = q.count()
        # TODO: should the limit be configurable?
        if justification_required and cnt > 0:
            if not justification:
                raise AuthorizationException(
                    "Changing feature bindings for more "
                    "than just a personality requires --justification.")
            validate_justification(user, justification)

        self.do_link(session, logger, dbfeature, params)
        session.flush()

        idx = 0
        written = 0
        successful = []
        failed = []

        with CompileKey(logger=logger):
            personalities = q.all()

            for personality in personalities:
                idx += 1
                if idx % 1000 == 0:  # pragma: no cover
                    logger.client_info("Processing personality %d of %d..." %
                                       (idx, cnt))

                if not personality.archetype.is_compileable:  # pragma: no cover
                    continue

                try:
                    plenary_personality = PlenaryPersonality(personality)
                    written += plenary_personality.write(locked=True)
                    successful.append(plenary_personality)
                except IncompleteError:
                    pass
                except Exception, err:  # pragma: no cover
                    failed.append("{0} failed: {1}".format(personality, err))

            if failed:  # pragma: no cover
                for plenary in successful:
                    plenary.restore_stash()
                raise PartialError([], failed)
예제 #15
0
파일: domain.py 프로젝트: ned21/aquilon
class TemplateDomain(object):

    def __init__(self, domain, author=None, logger=LOGGER):
        self.domain = domain
        self.author = author
        self.logger = logger

    def directories(self):
        """Return a list of directories required for compiling this domain"""
        config = Config()
        dirs = []

        if self.domain.branch_type == 'domain':
            dirs.append(os.path.join(config.get("broker", "domainsdir"),
                                     self.domain.name))

        dirs.append(os.path.join(config.get("broker", "quattordir"),
                                 "cfg",
                                 "domains",
                                 self.domain.name))

        dirs.append(os.path.join(config.get("broker", "quattordir"),
                                 "build",
                                 "xml",
                                 self.domain.name))

        return dirs

    def outputdirs(self):
        """Returns a list of directories that should exist before compiling"""
        config = Config()
        dirs = []
        dirs.append(config.get("broker", "profilesdir"))
        # The regression tests occasionally have issues with panc
        # auto-creating this directory - not sure why.
        if self.domain.clusters:
            dirs.append(os.path.join(config.get("broker", "quattordir"),
                                     "build", "xml", self.domain.name,
                                     "clusters"))
        return dirs

    def compile(self, session, only=None, locked=False,
                panc_debug_include=None, panc_debug_exclude=None,
                cleandeps=False):
        """The build directories are checked and constructed
        if necessary, so no prior setup is required.  The compile may
        take some time (current rate is 10 hosts per second, with a
        couple of seconds of constant overhead), and the possibility
        of blocking on the compile lock.

        If the 'only' parameter is provided, then it should be a
        list or set containing the profiles that need to be compiled.

        May raise ArgumentError exception, else returns the standard
        output (as a string) of the compile
        """

        config = Config()

        if self.domain.branch_type == 'sandbox':
            if not self.author:
                raise InternalError("Missing required author to compile "
                                    "sandbox %s" % self.domain.name)
            sandboxdir = os.path.join(config.get("broker", "templatesdir"),
                                      self.author.name, self.domain.name)
            if not os.path.exists(sandboxdir):
                raise ArgumentError("Sandbox directory '%s' does not exist." %
                                    sandboxdir)
            if not self.sandbox_has_latest(config, sandboxdir):
                self.logger.warn("Sandbox %s/%s does not contain the "
                                 "latest changes from the prod domain.  If "
                                 "there are failures try "
                                 "`git fetch && git merge origin/prod`" %
                                 (self.author.name, self.domain.name))

        self.logger.info("preparing domain %s for compile" % self.domain.name)

        # Ensure that the compile directory is in a good state.
        outputdir = config.get("broker", "profilesdir")

        for d in self.directories() + self.outputdirs():
            if not os.path.exists(d):
                try:
                    self.logger.info("creating %s" % d)
                    os.makedirs(d)
                except OSError, e:
                    raise ArgumentError("Failed to mkdir %s: %s" % (d, e))

        nothing_to_do = True
        if only:
            nothing_to_do = False
        else:
            hostnames = session.query(Fqdn)
            hostnames = hostnames.join(DnsRecord, HardwareEntity, Machine, Host)
            hostnames = hostnames.filter_by(branch=self.domain,
                                            sandbox_author=self.author)

            clusternames = session.query(Cluster.name)
            clusternames = clusternames.filter_by(branch=self.domain,
                                                  sandbox_author=self.author)
            if self.author:
                # Need to restrict to the subset of the sandbox managed
                # by this author.
                only = [str(fqdn) for fqdn in hostnames]
                only.extend(["cluster/%s" % c.name for c in clusternames])
                nothing_to_do = not bool(only)
            else:
                nothing_to_do = not hostnames.count() and not clusternames.count()

        if nothing_to_do:
            return 'No hosts: nothing to do.'

        # The ant wrapper is silly and it may pick up the wrong set of .jars if
        # ANT_HOME is not set
        panc_env = {"PATH": "%s/bin:%s" % (config.get("broker", "java_home"),
                                           os_environ.get("PATH", "")),
                    "ANT_HOME": config.get("broker", "ant_home"),
                    "JAVA_HOME": config.get("broker", "java_home")}
        if config.has_option("broker", "ant_options"):
            panc_env["ANT_OPTS"] = config.get("broker", "ant_options")

        args = [config.get("broker", "ant")]
        args.append("--noconfig")
        args.append("-f")
        args.append("%s/build.xml" %
                    config.get("broker", "compiletooldir"))
        args.append("-Dbasedir=%s" % config.get("broker", "quattordir"))
        args.append("-Dpanc.jar=%s" % self.domain.compiler)
        args.append("-Dpanc.formatter=%s" %
                    config.get("panc", "formatter"))
        args.append("-Dpanc.template_extension=%s" %
                    config.get("panc", "template_extension"))
        args.append("-Ddomain=%s" % self.domain.name)
        args.append("-Ddistributed.profiles=%s" % outputdir)
        args.append("-Dpanc.batch.size=%s" %
                    config.get("panc", "batch_size"))
        args.append("-Dant-contrib.jar=%s" %
                    config.get("broker", "ant_contrib_jar"))
        args.append("-Dgzip.output=%s" %
                    config.get("panc", "gzip_output"))
        if self.domain.branch_type == 'sandbox':
            args.append("-Ddomain.templates=%s" % sandboxdir)
        if only:
            # Use -Dforce.build=true?
            # TODO: pass the list in a temp file
            args.append("-Dobject.profile=%s" % " ".join(only))
            args.append("compile.object.profile")
        else:
            # Technically this is the default, but being explicit
            # doesn't hurt.
            args.append("compile.domain.profiles")
        if panc_debug_include is not None:
            args.append("-Dpanc.debug.include=%s" % panc_debug_include)
        if panc_debug_exclude is not None:
            args.append("-Dpanc.debug.exclude=%s" % panc_debug_exclude)
        if cleandeps:
            # Cannot send a false value - the test in build.xml is for
            # whether or not the property is defined at all.
            args.append("-Dclean.dep.files=%s" % cleandeps)

        out = ''
        try:
            if not locked:
                if only and len(only) == 1:
                    key = CompileKey(domain=self.domain.name,
                                     profile=list(only)[0],
                                     logger=self.logger)
                else:
                    key = CompileKey(domain=self.domain.name,
                                     logger=self.logger)
                lock_queue.acquire(key)
            self.logger.info("starting compile")
            try:
                out = run_command(args, env=panc_env, logger=self.logger,
                                  path=config.get("broker", "quattordir"),
                                  loglevel=CLIENT_INFO)
            except ProcessException, e:
                raise ArgumentError("\n%s%s" % (e.out, e.err))
        finally:
            if not locked:
                lock_queue.release(key)

        # No need for a lock here - there is only a single file written
        # and it is swapped into place atomically.
        build_index(config, session, outputdir, logger=self.logger)
        return out
예제 #16
0
    def render(self, session, logger, list, domain, sandbox, force,
               **arguments):
        (dbbranch, dbauthor) = get_branch_and_author(session,
                                                     logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     compel=True)

        if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage:
            raise ArgumentError(
                "Managing hosts to {0:l} is not allowed.".format(dbbranch))
        check_hostlist_size(self.command, self.config, list)
        dbhosts = hostlist_to_hosts(session, list)

        failed = []
        branches = defaultdict(ListType)
        authors = defaultdict(ListType)
        for dbhost in dbhosts:
            branches[dbhost.branch].append(dbhost)
            authors[dbhost.sandbox_author].append(dbhost)

            # check if any host in the list is a cluster node
            if dbhost.cluster:
                failed.append(
                    "Cluster nodes must be managed at the "
                    "cluster level; {0} is a member of {1:l}.".format(
                        dbhost.fqdn, dbhost.cluster))

        if failed:
            raise ArgumentError("Cannot modify the following hosts:\n%s" %
                                "\n".join(failed))

        if len(branches) > 1:
            keys = branches.keys()
            branch_sort = lambda x, y: cmp(len(branches[x]), len(branches[y]))
            keys.sort(cmp=branch_sort)
            stats = [
                "{0:d} hosts in {1:l}".format(len(branches[branch]), branch)
                for branch in keys
            ]
            raise ArgumentError("All hosts must be in the same domain or "
                                "sandbox:\n%s" % "\n".join(stats))

        # check if all hosts are from the same sandbox author
        if len(authors) > 1:
            keys = authors.keys()
            author_sort = lambda x, y: cmp(len(authors[x]), len(authors[y]))
            keys.sort(cmp=author_sort)
            stats = [
                "{0:d} hosts with sandbox author {1:l}".format(
                    len(authors[author]), author.name) for author in keys
            ]
            raise ArgumentError("All hosts must be managed by the same "
                                "sandbox author:\n%s" % "\n".join(stats))

        # since we have already checked if all hosts in list are within the
        # same branch, we only need one dbsource to validate the branch
        dbhost = dbhosts[0]
        dbsource = dbhost.branch
        dbsource_author = dbhost.sandbox_author
        if not force:
            validate_branch_commits(dbsource, dbsource_author, dbbranch,
                                    dbauthor, logger, self.config)

        old_branch = branches.keys()[0].name

        plenaries = PlenaryCollection(logger=logger)
        for dbhost in dbhosts:
            dbhost.branch = dbbranch
            dbhost.sandbox_author = dbauthor
            plenaries.append(Plenary.get_plenary(dbhost))

        session.flush()

        # We're crossing domains, need to lock everything.
        key = CompileKey(logger=logger)
        try:
            lock_queue.acquire(key)
            plenaries.stash()
            plenaries.cleanup(old_branch, locked=True)
            plenaries.write(locked=True)
        except:
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
예제 #17
0
class CommandReconfigureList(BrokerCommand):

    required_parameters = ["list"]

    def render(self, session, logger, list, archetype, personality,
               buildstatus, osname, osversion, **arguments):
        check_hostlist_size(self.command, self.config, list)
        dbhosts = hostlist_to_hosts(session, list)

        self.reconfigure_list(session, logger, dbhosts, archetype, personality,
                              buildstatus, osname, osversion, **arguments)

    def reconfigure_list(self, session, logger, dbhosts, archetype,
                         personality, buildstatus, osname, osversion,
                         **arguments):
        failed = []
        # Check all the parameters up front.
        # Some of these could be more intelligent about defaults
        # (either by checking for unique entries or relying on the list)
        # - starting simple.
        if archetype:
            dbarchetype = Archetype.get_unique(session, archetype, compel=True)
            if dbarchetype.cluster_type is not None:
                raise ArgumentError("Archetype %s is a cluster archetype" %
                                    dbarchetype.name)
            # TODO: Once OS is a first class object this block needs
            # to check that either OS is also being reset or that the
            # OS is valid for the new archetype.
        else:
            dbarchetype = None
        if personality:
            dbpersonality = Personality.get_unique(session, name=personality,
                                                   archetype=dbarchetype,
                                                   compel=True)
        if osname and not osversion:
            raise ArgumentError("Please specify --osversion for OS %s." %
                                osname)
        if osversion:
            if not osname:
                raise ArgumentError("Please specify --osname to use with "
                                    "OS version %s." % osversion)
            # Linux model names are the same under aurora and aquilon, so
            # allowing to omit --archetype would not be useful
            if not archetype:
                raise ArgumentError("Please specify --archetype for OS "
                                    "%s, version %s." % (osname, osversion))
            dbos = OperatingSystem.get_unique(session, name=osname,
                                              version=osversion,
                                              archetype=dbarchetype,
                                              compel=True)
        else:
            dbos = None

        if buildstatus:
            dbstatus = HostLifecycle.get_unique(session, buildstatus,
                                                compel=True)

        # Take a shortcut if there's nothing to do, but only after all the other
        # parameters have been checked
        if not dbhosts:
            return

        personalities = {}
        branches = {}
        authors = {}
        # Do any final cross-list or dependency checks before entering
        # the Chooser loop.
        for dbhost in dbhosts:
            if dbhost.branch in branches:
                branches[dbhost.branch].append(dbhost)
            else:
                branches[dbhost.branch] = [dbhost]
            if dbhost.sandbox_author in authors:
                authors[dbhost.sandbox_author].append(dbhost)
            else:
                authors[dbhost.sandbox_author] = [dbhost]

            if dbos and not dbarchetype and dbhost.archetype != dbos.archetype:
                failed.append("{0}: Cannot change operating system because it "
                              "needs {1:l} instead of "
                              "{2:l}.".format(dbhost.fqdn, dbhost.archetype,
                                              dbos.archetype))
            if dbarchetype and not dbos and \
               dbhost.operating_system.archetype != dbarchetype:
                failed.append("{0}: Cannot change archetype because {1:l} needs "
                              "{2:l}.".format(dbhost.fqdn, dbhost.operating_system,
                                              dbhost.operating_system.archetype))
            if (personality and dbhost.cluster and
                len(dbhost.cluster.allowed_personalities) > 0 and
                dbpersonality not in dbhost.cluster.allowed_personalities):
                allowed = ["%s/%s" % (p.archetype.name, p.name) for p in
                           dbhost.cluster.allowed_personalities]
                failed.append("{0}: The {1:l} is not allowed by {2}.  "
                              "Specify one of {3}.".format(
                                  dbhost.fqdn, dbpersonality,
                                  dbhost.cluster, allowed))
            if personality:
                personalities[dbhost.fqdn] = dbpersonality
            elif archetype:
                personalities[dbhost.fqdn] = Personality.get_unique(session,
                        name=dbhost.personality.name, archetype=dbarchetype)
                if not personalities[dbhost.fqdn]:
                    failed.append("%s: No personality %s found for archetype "
                                  "%s." %
                                  (dbhost.fqdn, dbhost.personality.name,
                                   dbarchetype.name))

        if failed:
            raise ArgumentError("Cannot modify the following hosts:\n%s" %
                                "\n".join(failed))
        if len(branches) > 1:
            keys = branches.keys()
            branch_sort = lambda x, y: cmp(len(branches[x]), len(branches[y]))
            keys.sort(cmp=branch_sort)
            stats = ["{0:d} hosts in {1:l}".format(len(branches[branch]), branch)
                     for branch in keys]
            raise ArgumentError("All hosts must be in the same domain or "
                                "sandbox:\n%s" % "\n".join(stats))
        dbbranch = branches.keys()[0]
        if len(authors) > 1:
            keys = authors.keys()
            author_sort = lambda x, y: cmp(len(authors[x]), len(authors[y]))
            keys.sort(cmp=author_sort)
            stats = ["%s hosts with sandbox author %s" %
                     (len(authors[author]), author.name) for author in keys]
            raise ArgumentError("All hosts must be managed by the same "
                                "sandbox author:\n%s" % "\n".join(stats))
        dbauthor = authors.keys()[0]

        failed = []
        choosers = []
        for dbhost in dbhosts:
            if dbhost.fqdn in personalities:
                dbhost.personality = personalities[dbhost.fqdn]
                session.add(dbhost)
            if osversion:
                dbhost.operating_system = dbos
                session.add(dbhost)
            if buildstatus:
                dbhost.status.transition(dbhost, dbstatus)
                session.add(dbhost)
        session.flush()

        logger.client_info("Verifying service bindings.")
        for dbhost in dbhosts:
            if dbhost.archetype.is_compileable:
                if arguments.get("keepbindings", None):
                    chooser = Chooser(dbhost, logger=logger,
                                      required_only=False)
                else:
                    chooser = Chooser(dbhost, logger=logger,
                                      required_only=True)
                choosers.append(chooser)
                try:
                    chooser.set_required()
                except ArgumentError, e:
                    failed.append(str(e))
        if failed:
            raise ArgumentError("The following hosts failed service "
                                "binding:\n%s" % "\n".join(failed))

        session.flush()
        logger.info("reconfigure_hostlist processing: %s" %
                    ",".join([str(dbhost.fqdn) for dbhost in dbhosts]))

        if not choosers:
            return

        # Optimize so that duplicate service plenaries are not re-written
        templates = set()
        for chooser in choosers:
            # chooser.plenaries is a PlenaryCollection - this flattens
            # that top level.
            templates.update(chooser.plenaries.plenaries)

        # Don't bother locking until every possible check before the
        # actual writing and compile is done.  This will allow for fast
        # turnaround on errors (no need to wait for a lock if there's
        # a missing service map entry or something).
        # The lock must be over at least the domain, but could be over
        # all if (for example) service plenaries need to change.
        key = CompileKey.merge([p.get_write_key() for p in templates] +
                               [CompileKey(domain=dbbranch.name,
                                           logger=logger)])
        try:
            lock_queue.acquire(key)
            logger.client_info("Writing %s plenary templates.", len(templates))
            # FIXME: if one of the templates raises IncompleteError (e.g.
            # a host should be in a cluster, but it is not), then we return an
            # InternalError to the client, which is not nice
            for template in templates:
                logger.debug("Writing %s", template)
                template.write(locked=True)
            td = TemplateDomain(dbbranch, dbauthor, logger=logger)
            td.compile(session, locked=True)
        except:
            logger.client_info("Restoring plenary templates.")
            for template in templates:
                logger.debug("Restoring %s", template)
                template.restore_stash()
            # Okay, cleaned up templates, make sure the caller knows
            # we've aborted so that DB can be appropriately rollback'd.
            raise
        finally:
            lock_queue.release(key)

        return
예제 #18
0
    def render(self, session, logger, domain, sandbox, cluster, force,
               **arguments):
        (dbbranch, dbauthor) = get_branch_and_author(session,
                                                     logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     compel=True)

        if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage:
            raise ArgumentError(
                "Managing clusters to {0:l} is not allowed.".format(dbbranch))

        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        dbsource = dbcluster.branch
        dbsource_author = dbcluster.sandbox_author
        old_branch = dbcluster.branch.name

        if not force:
            validate_branch_commits(dbsource, dbsource_author, dbbranch,
                                    dbauthor, logger, self.config)

        if dbcluster.metacluster:
            raise ArgumentError(
                "{0.name} is member of metacluster {1.name}, "
                "it must be managed at metacluster level.".format(
                    dbcluster, dbcluster.metacluster))

        old_branch = dbcluster.branch.name
        plenaries = PlenaryCollection(logger=logger)

        # manage at metacluster level
        if dbcluster.cluster_type == 'meta':
            clusters = dbcluster.members

            dbcluster.branch = dbbranch
            dbcluster.sandbox_author = dbauthor
            session.add(dbcluster)
            plenaries.append(Plenary.get_plenary(dbcluster))
        else:
            clusters = [dbcluster]

        for cluster in clusters:
            # manage at cluster level
            # Need to set the new branch *before* creating the plenary objects.
            cluster.branch = dbbranch
            cluster.sandbox_author = dbauthor
            session.add(cluster)
            plenaries.append(Plenary.get_plenary(cluster))
            for dbhost in cluster.hosts:
                dbhost.branch = dbbranch
                dbhost.sandbox_author = dbauthor
                session.add(dbhost)
                plenaries.append(Plenary.get_plenary(dbhost))

        session.flush()

        # We're crossing domains, need to lock everything.
        key = CompileKey(logger=logger)
        try:
            lock_queue.acquire(key)
            plenaries.stash()
            plenaries.cleanup(old_branch, locked=True)
            plenaries.write(locked=True)
        except:
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
예제 #19
0
파일: base.py 프로젝트: ned21/aquilon
 def get_key(self):
     """Base implementation assumes a full compile lock."""
     return CompileKey(logger=self.logger)
예제 #20
0
파일: cluster.py 프로젝트: ned21/aquilon
 def get_key(self):
     # This takes a domain lock because it could affect all clients...
     return CompileKey(domain=self.dbobj.branch.name, logger=self.logger)