示例#1
0
def del_resource(session, logger, dbresource, dsdb_callback=None, **arguments):
    holder = dbresource.holder
    holder_plenary = Plenary.get_plenary(holder.holder_object, logger=logger)
    remove_plenary = Plenary.get_plenary(dbresource, logger=logger)

    domain = holder.holder_object.branch.name

    holder.resources.remove(dbresource)
    session.flush()

    key = CompileKey.merge(
        [remove_plenary.get_remove_key(),
         holder_plenary.get_write_key()])
    try:
        lock_queue.acquire(key)
        remove_plenary.stash()
        try:
            holder_plenary.write(locked=True)
        except IncompleteError:
            holder_plenary.cleanup(domain, locked=True)

        remove_plenary.remove(locked=True)

        if dsdb_callback:
            dsdb_callback(session, logger, holder, dbresource, **arguments)
    except:
        holder_plenary.restore_stash()
        remove_plenary.restore_stash()
        raise
    finally:
        lock_queue.release(key)

    return
示例#2
0
文件: host.py 项目: ned21/aquilon
 def get_key(self):
     # Going with self.name instead of self.plenary_template_name seems like
     # the right decision here - easier to predict behavior when meshing
     # with other CompileKey generators like PlenaryMachine.
     return CompileKey(domain=self.branch.name,
                       profile=self.name,
                       logger=self.logger)
示例#3
0
    def render(self, session, logger, hostname, buildstatus, **arguments):
        dbhost = hostname_to_host(session, hostname)
        dbstatus = HostLifecycle.get_unique(session, buildstatus, compel=True)
        changed = dbhost.status.transition(dbhost, dbstatus)

        if not changed or not dbhost.archetype.is_compileable:
            return

        session.add(dbhost)
        session.flush()

        plenary = PlenaryHost(dbhost, logger=logger)
        # Force a host lock as pan might overwrite the profile...
        key = CompileKey(domain=dbhost.branch.name,
                         profile=dbhost.fqdn,
                         logger=logger)
        try:
            lock_queue.acquire(key)
            plenary.write(locked=True)
            td = TemplateDomain(dbhost.branch,
                                dbhost.sandbox_author,
                                logger=logger)
            td.compile(session, only=[dbhost.fqdn], locked=True)
        except IncompleteError:
            raise ArgumentError("Run aq make for host %s first." % dbhost.fqdn)
        except:
            plenary.restore_stash()
            raise
        finally:
            lock_queue.release(key)
示例#4
0
    def render(self, session, logger, machine, disk, controller, size, all,
               dbuser, **arguments):

        # Handle deprecated arguments
        if arguments.get("type", None):
            self.deprecated_option("type",
                                   "Please use --controller instead.",
                                   logger=logger,
                                   **arguments)
            controller = arguments["type"]
        if arguments.get("capacity", None):
            self.deprecated_option("capacity",
                                   "Please use --size instead.",
                                   logger=logger,
                                   **arguments)
            size = arguments["capacity"]

        dbmachine = Machine.get_unique(session, machine, compel=True)
        q = session.query(Disk).filter_by(machine=dbmachine)
        if disk:
            q = q.filter_by(device_name=disk)
        if controller:
            if controller not in controller_types:
                raise ArgumentError("%s is not a valid controller type, use "
                                    "one of: %s." %
                                    (controller, ", ".join(controller_types)))
            q = q.filter_by(controller_type=controller)
        if size is not None:
            q = q.filter_by(capacity=size)
        results = q.all()

        if len(results) == 0:
            raise NotFoundException("No disks found.")
        elif len(results) > 1 and not all:
            raise ArgumentError("More than one matching disks found.  "
                                "Use --all to delete them all.")
        for result in results:
            session.delete(result)

        session.flush()
        session.expire(dbmachine, ['disks'])

        plenary_machine = Plenary.get_plenary(dbmachine, logger=logger)
        key = plenary_machine.get_write_key()
        dbcontainer = dbmachine.vm_container
        if dbcontainer:
            plenary_container = Plenary.get_plenary(dbcontainer, logger=logger)
            key = CompileKey.merge([key, plenary_container.get_write_key()])
        try:
            lock_queue.acquire(key)
            if dbcontainer:
                plenary_container.write(locked=True)
            plenary_machine.write(locked=True)
        except:
            plenary_machine.restore_stash()
            if dbcontainer:
                plenary_container.restore_stash()
            raise
        finally:
            lock_queue.release(key)
示例#5
0
文件: make.py 项目: jrha/aquilon
    def compile(self, session, dbhost, logger, keepbindings):
        chooser = Chooser(dbhost, logger=logger,
                          required_only=not(keepbindings))
        chooser.set_required()
        chooser.flush_changes()

        hosts = chooser.changed_server_fqdns()
        hosts.add(dbhost.fqdn)

        # Force a host lock as pan might overwrite the profile...
        key = chooser.get_write_key()
        for fqdn in hosts:
            key = CompileKey.merge([key, CompileKey(domain=dbhost.branch.name,
                                                    profile=fqdn,
                                                    logger=logger)])
        try:
            lock_queue.acquire(key)
            chooser.write_plenary_templates(locked=True)

            td = TemplateDomain(dbhost.branch, dbhost.sandbox_author,
                                logger=logger)
            td.compile(session, only=hosts, locked=True)

        except:
            if chooser:
                chooser.restore_stash()

            # Okay, cleaned up templates, make sure the caller knows
            # we've aborted so that DB can be appropriately rollback'd.
            raise

        finally:
            lock_queue.release(key)

        return
示例#6
0
文件: cluster.py 项目: piojo/aquilon
    def get_key(self, exclusive=True):
        keylist = [super(PlenaryClusterObject, self).get_key(exclusive=exclusive)]

        if not inspect(self.dbobj).deleted:
            keylist.append(PlenaryKey(exclusive=False,
                                      personality=self.dbobj.personality,
                                      logger=self.logger))
            for si in self.dbobj.service_bindings:
                keylist.append(PlenaryKey(exclusive=False, service_instance=si,
                                          logger=self.logger))
            for srv in self.dbobj.services_provided:
                keylist.append(PlenaryKey(exclusive=False,
                                          service_instance=srv.service_instance,
                                          logger=self.logger))

            if self.dbobj.metacluster:
                keylist.append(PlenaryKey(exclusive=False,
                                          cluster_member=self.dbobj.metacluster,
                                          logger=self.logger))
            if isinstance(self.dbobj, EsxCluster) and self.dbobj.network_device:
                # TODO: this should become a CompileKey if we start generating
                # profiles for switches
                keylist.append(PlenaryKey(exclusive=False,
                                          network_device=self.dbobj.network_device,
                                          logger=self.logger))
        return CompileKey.merge(keylist)
示例#7
0
文件: resources.py 项目: jrha/aquilon
def add_resource(session, logger, holder, dbresource, dsdb_callback=None,
                 **arguments):
    if dbresource not in holder.resources:
        holder.resources.append(dbresource)

    holder_plenary = Plenary.get_plenary(holder.holder_object, logger=logger)
    res_plenary = Plenary.get_plenary(dbresource, logger=logger)

    domain = holder.holder_object.branch.name

    session.flush()

    key = CompileKey.merge([res_plenary.get_write_key(),
                            holder_plenary.get_write_key()])
    try:
        lock_queue.acquire(key)
        res_plenary.write(locked=True)
        try:
            holder_plenary.write(locked=True)
        except IncompleteError:
            holder_plenary.cleanup(domain, locked=True)

        if dsdb_callback:
            dsdb_callback(session, logger, dbresource, **arguments)

    except:
        res_plenary.restore_stash()
        holder_plenary.restore_stash()
        raise
    finally:
        lock_queue.release(key)

    return
示例#8
0
    def render(self, session, logger, switch, **arguments):
        dbswitch = Switch.get_unique(session, switch, compel=True)

        # Check and complain if the switch has any other addresses than its
        # primary address
        addrs = []
        for addr in dbswitch.all_addresses():
            if addr.ip == dbswitch.primary_ip:
                continue
            addrs.append(str(addr.ip))
        if addrs:
            raise ArgumentError("{0} still provides the following addresses, "
                                "delete them first: {1}.".format
                                (dbswitch, ", ".join(addrs)))

        dbdns_rec = dbswitch.primary_name
        ip = dbswitch.primary_ip
        old_fqdn = str(dbswitch.primary_name.fqdn)
        old_comments = dbswitch.comments
        session.delete(dbswitch)
        if dbdns_rec:
            delete_dns_record(dbdns_rec)

        session.flush()

        # Any switch ports hanging off this switch should be deleted with
        # the cascade delete of the switch.

        switch_plenary = Plenary.get_plenary(dbswitch, logger=logger)

        # clusters connected to this switch
        plenaries = PlenaryCollection(logger=logger)

        for dbcluster in dbswitch.esx_clusters:
            plenaries.append(Plenary.get_plenary(dbcluster))

        key = CompileKey.merge([switch_plenary.get_remove_key(),
                                plenaries.get_write_key()])

        try:
            lock_queue.acquire(key)
            switch_plenary.stash()
            plenaries.write(locked=True)
            switch_plenary.remove(locked=True)

            if ip:
                dsdb_runner = DSDBRunner(logger=logger)
                # FIXME: restore interface name/MAC on rollback
                dsdb_runner.delete_host_details(old_fqdn, ip, comments=old_comments)
                dsdb_runner.commit_or_rollback("Could not remove switch from DSDB")
            return

        except:
            plenaries.restore_stash()
            switch_plenary.restore_stash()
            raise
        finally:
            lock_queue.release(key)
示例#9
0
文件: del_disk.py 项目: jrha/aquilon
    def render(self, session, logger, machine, disk, controller, size, all,
               dbuser, **arguments):

        # Handle deprecated arguments
        if arguments.get("type", None):
            self.deprecated_option("type", "Please use --controller instead.",
                                   logger=logger, **arguments)
            controller = arguments["type"]
        if arguments.get("capacity", None):
            self.deprecated_option("capacity", "Please use --size instead.",
                                   logger=logger, **arguments)
            size = arguments["capacity"]

        dbmachine = Machine.get_unique(session, machine, compel=True)
        q = session.query(Disk).filter_by(machine=dbmachine)
        if disk:
            q = q.filter_by(device_name=disk)
        if controller:
            if controller not in controller_types:
                raise ArgumentError("%s is not a valid controller type, use "
                                    "one of: %s." % (controller,
                                                     ", ".join(controller_types)
                                                     ))
            q = q.filter_by(controller_type=controller)
        if size is not None:
            q = q.filter_by(capacity=size)
        results = q.all()

        if len(results) == 0:
            raise NotFoundException("No disks found.")
        elif len(results) > 1 and not all:
            raise ArgumentError("More than one matching disks found.  "
                                "Use --all to delete them all.")
        for result in results:
            session.delete(result)

        session.flush()
        session.expire(dbmachine, ['disks'])

        plenary_machine = Plenary.get_plenary(dbmachine, logger=logger)
        key = plenary_machine.get_write_key()
        dbcontainer = dbmachine.vm_container
        if dbcontainer:
            plenary_container = Plenary.get_plenary(dbcontainer, logger=logger)
            key = CompileKey.merge([key, plenary_container.get_write_key()])
        try:
            lock_queue.acquire(key)
            if dbcontainer:
                plenary_container.write(locked=True)
            plenary_machine.write(locked=True)
        except:
            plenary_machine.restore_stash()
            if dbcontainer:
                plenary_container.restore_stash()
            raise
        finally:
            lock_queue.release(key)
示例#10
0
文件: compile.py 项目: piojo/aquilon
    def render(self, session, logger, domain, sandbox,
               pancinclude, pancexclude, pancdebug, cleandeps,
               **arguments):
        (dbdomain, dbauthor) = get_branch_and_author(session, logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     compel=True)

        # Grab a shared lock on personalities and services used by the domain.
        # Object templates (hosts, clusters) are protected by the domain lock.
        plenaries = PlenaryCollection(logger=logger)

        q1 = session.query(Personality)
        q1 = q1.join(Host)
        q1 = q1.filter(and_(Host.branch == dbdomain,
                            Host.sandbox_author == dbauthor))
        q1 = q1.reset_joinpoint()
        q1 = q1.options(joinedload('paramholder'),
                        subqueryload('paramholder.parameters'))

        q2 = session.query(Personality)
        q2 = q2.join(Cluster)
        q2 = q2.filter(and_(Cluster.branch == dbdomain,
                            Cluster.sandbox_author == dbauthor))
        q2 = q2.reset_joinpoint()
        q2 = q2.options(joinedload('paramholder'),
                        subqueryload('paramholder.parameters'))

        for dbpers in q1.union(q2):
            plenaries.append(Plenary.get_plenary(dbpers))

        q1 = session.query(ServiceInstance)
        q1 = q1.join(ServiceInstance.clients)
        q1 = q1.filter(and_(Host.branch == dbdomain,
                            Host.sandbox_author == dbauthor))

        q2 = session.query(ServiceInstance)
        q2 = q2.join(ServiceInstance.cluster_clients)
        q2 = q2.filter(and_(Cluster.branch == dbdomain,
                            Cluster.sandbox_author == dbauthor))

        for si in q1.union(q2):
            plenaries.append(Plenary.get_plenary(si))

        if pancdebug:
            pancinclude = r'.*'
            pancexclude = r'components/spma/functions'
        dom = TemplateDomain(dbdomain, dbauthor, logger=logger)
        with CompileKey.merge([CompileKey(domain=dbdomain.name, logger=logger),
                               plenaries.get_key(exclusive=False)]):
            dom.compile(session,
                        panc_debug_include=pancinclude,
                        panc_debug_exclude=pancexclude,
                        cleandeps=cleandeps,
                        locked=True)
        return
示例#11
0
    def render(self, session, logger, cluster, keepbindings, **arguments):
        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        if not dbcluster.personality.archetype.is_compileable:
            raise ArgumentError("{0} is not a compilable archetype "
                                "({1!s}).".format(
                                    dbcluster,
                                    dbcluster.personality.archetype))

        chooser = Chooser(dbcluster,
                          logger=logger,
                          required_only=not (keepbindings))
        chooser.set_required()
        chooser.flush_changes()
        # Force a domain lock as pan might overwrite any of the profiles...
        key = CompileKey.merge([
            chooser.get_write_key(),
            CompileKey(domain=dbcluster.branch.name, logger=logger)
        ])
        try:
            lock_queue.acquire(key)
            chooser.write_plenary_templates(locked=True)

            profile_list = add_cluster_data(dbcluster)
            profile_list.extend(chooser.changed_server_fqdns())

            td = TemplateDomain(dbcluster.branch,
                                dbcluster.sandbox_author,
                                logger=logger)
            td.compile(session, only=profile_list, locked=True)

        except:
            chooser.restore_stash()

            # Okay, cleaned up templates, make sure the caller knows
            # we've aborted so that DB can be appropriately rollback'd.

            raise

        finally:
            lock_queue.release(key)

        return
示例#12
0
    def get_key(self, exclusive=True):
        keylist = [super(PlenaryMetaClusterObject, self).get_key(exclusive=exclusive)]

        if not inspect(self.dbobj).deleted:
            keylist.append(PlenaryKey(exclusive=False,
                                      personality=self.dbobj.personality,
                                      logger=self.logger))
            for si in self.dbobj.service_bindings:
                keylist.append(PlenaryKey(exclusive=False, service_instance=si,
                                          logger=self.logger))
        return CompileKey.merge(keylist)
示例#13
0
    def render(self, session, logger, network_device, **arguments):
        dbnetdev = NetworkDevice.get_unique(session, network_device, compel=True)

        # Check and complain if the network device has any other addresses than its
        # primary address
        addrs = []
        for addr in dbnetdev.all_addresses():
            if addr.ip == dbnetdev.primary_ip:
                continue
            addrs.append(str(addr.ip))
        if addrs:
            raise ArgumentError("{0} still provides the following addresses, "
                                "delete them first: {1}.".format
                                (dbnetdev, ", ".join(addrs)))

        dbdns_rec = dbnetdev.primary_name
        ip = dbnetdev.primary_ip
        old_fqdn = str(dbnetdev.primary_name.fqdn)
        old_comments = dbnetdev.comments
        session.delete(dbnetdev)
        if dbdns_rec:
            delete_dns_record(dbdns_rec)

        session.flush()

        # Any network device ports hanging off this network device should be deleted with
        # the cascade delete of the network device.

        netdev_plenary = Plenary.get_plenary(dbnetdev, logger=logger)

        # clusters connected to this network device
        plenaries = PlenaryCollection(logger=logger)

        for dbcluster in dbnetdev.esx_clusters:
            plenaries.append(Plenary.get_plenary(dbcluster))

        with CompileKey.merge([netdev_plenary.get_key(), plenaries.get_key()]):
            netdev_plenary.stash()
            try:
                plenaries.write(locked=True)
                netdev_plenary.remove(locked=True)

                if ip:
                    dsdb_runner = DSDBRunner(logger=logger)
                    # FIXME: restore interface name/MAC on rollback
                    dsdb_runner.delete_host_details(old_fqdn, ip,
                                                    comments=old_comments)
                    dsdb_runner.commit_or_rollback("Could not remove network device "
                                                   "from DSDB")
            except:
                plenaries.restore_stash()
                netdev_plenary.restore_stash()
                raise
        return
示例#14
0
    def render(self, session, logger, hostname, cluster, personality,
               **arguments):
        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        dbhost = hostname_to_host(session, hostname)
        if not dbhost.cluster:
            raise ArgumentError(
                "{0} is not bound to a cluster.".format(dbhost))
        if dbhost.cluster != dbcluster:
            raise ArgumentError("{0} is bound to {1:l}, not {2:l}.".format(
                dbhost, dbhost.cluster, dbcluster))

        if personality:
            dbpersonality = Personality.get_unique(session,
                                                   name=personality,
                                                   archetype=dbhost.archetype,
                                                   compel=True)
            if dbpersonality.cluster_required:
                raise ArgumentError("Cannot switch host to personality %s "
                                    "because that personality requires a "
                                    "cluster" % personality)
            dbhost.personality = dbpersonality
        elif dbhost.personality.cluster_required:
            raise ArgumentError("Host personality %s requires a cluster, "
                                "use --personality to change personality "
                                "when leaving the cluster." %
                                dbhost.personality.name)

        dbcluster.hosts.remove(dbhost)
        remove_service_addresses(dbcluster, dbhost)
        dbcluster.validate()

        session.flush()
        session.expire(dbhost, ['_cluster'])

        # Will need to write a cluster plenary and either write or
        # remove a host plenary.  Grab the domain key since the two
        # must be in the same domain.
        host_plenary = Plenary.get_plenary(dbhost, logger=logger)
        cluster_plenary = Plenary.get_plenary(dbcluster, logger=logger)
        key = CompileKey(domain=dbcluster.branch.name, logger=logger)
        try:
            lock_queue.acquire(key)
            cluster_plenary.write(locked=True)
            try:
                host_plenary.write(locked=True)
            except IncompleteError:
                host_plenary.cleanup(domain=dbhost.branch.name, locked=True)
        except:
            cluster_plenary.restore_stash()
            host_plenary.restore_stash()
            raise
        finally:
            lock_queue.release(key)
示例#15
0
    def compile(self, session, dbhost, logger, keepbindings):
        chooser = Chooser(dbhost,
                          logger=logger,
                          required_only=not (keepbindings))
        chooser.set_required()
        chooser.flush_changes()

        hosts = chooser.changed_server_fqdns()
        hosts.add(dbhost.fqdn)

        # Force a host lock as pan might overwrite the profile...
        key = chooser.get_write_key()
        for fqdn in hosts:
            key = CompileKey.merge([
                key,
                CompileKey(domain=dbhost.branch.name,
                           profile=fqdn,
                           logger=logger)
            ])
        try:
            lock_queue.acquire(key)
            chooser.write_plenary_templates(locked=True)

            td = TemplateDomain(dbhost.branch,
                                dbhost.sandbox_author,
                                logger=logger)
            td.compile(session, only=hosts, locked=True)

        except:
            if chooser:
                chooser.restore_stash()

            # Okay, cleaned up templates, make sure the caller knows
            # we've aborted so that DB can be appropriately rollback'd.
            raise

        finally:
            lock_queue.release(key)

        return
示例#16
0
    def render(self, session, logger, list, domain, sandbox, force,
               **arguments):
        dbbranch, dbauthor = get_branch_and_author(session, logger,
                                                   domain=domain,
                                                   sandbox=sandbox, compel=True)

        if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage:
            raise ArgumentError("Managing hosts to {0:l} is not allowed."
                                .format(dbbranch))
        check_hostlist_size(self.command, self.config, list)

        dbhosts = hostlist_to_hosts(session, list)

        failed = []

        dbsource, dbsource_author = validate_branch_author(dbhosts)
        for dbhost in dbhosts:
            # check if any host in the list is a cluster node
            if dbhost.cluster:
                failed.append("Cluster nodes must be managed at the "
                              "cluster level; {0} is a member of {1:l}."
                              .format(dbhost.fqdn, dbhost.cluster))

        if failed:
            raise ArgumentError("Cannot modify the following hosts:\n%s" %
                                "\n".join(failed))

        if not force:
            validate_branch_commits(dbsource, dbsource_author,
                                    dbbranch, dbauthor, logger, self.config)

        plenaries = PlenaryCollection(logger=logger)
        for dbhost in dbhosts:
            plenaries.append(Plenary.get_plenary(dbhost))

            dbhost.branch = dbbranch
            dbhost.sandbox_author = dbauthor

        session.flush()

        # We're crossing domains, need to lock everything.
        with CompileKey.merge([CompileKey(domain=dbsource.name, logger=logger),
                               CompileKey(domain=dbbranch.name, logger=logger)]):
            plenaries.stash()
            try:
                plenaries.write(locked=True)
            except:
                plenaries.restore_stash()
                raise

        return
示例#17
0
def sync_domain(dbdomain, logger=LOGGER, locked=False):
    """Update templates on disk to match contents of branch in template-king.

    If this domain is tracking another, first update the branch in
    template-king with the latest from the tracking branch.  Also save
    the current (previous) commit as a potential rollback point.

    """
    config = Config()
    session = object_session(dbdomain)
    kingdir = config.get("broker", "kingdir")
    domaindir = os.path.join(config.get("broker", "domainsdir"), dbdomain.name)
    git_env = {
        "PATH":
        "%s:%s" %
        (config.get("broker", "git_path"), os.environ.get("PATH", ""))
    }
    if dbdomain.tracked_branch:
        # Might need to revisit if using this helper from rollback...
        run_command([
            "git", "push", ".",
            "%s:%s" % (dbdomain.tracked_branch.name, dbdomain.name)
        ],
                    path=kingdir,
                    env=git_env,
                    logger=logger)
    run_command(["git", "fetch", "--prune"],
                path=domaindir,
                env=git_env,
                logger=logger)
    if dbdomain.tracked_branch:
        out = run_command(["git", "rev-list", "-n", "1", "HEAD"],
                          path=domaindir,
                          env=git_env,
                          logger=logger)
        rollback_commit = out.strip()
    try:
        if not locked:
            key = CompileKey(domain=dbdomain.name, logger=logger)
            lock_queue.acquire(key)
        run_command(["git", "reset", "--hard",
                     "origin/%s" % dbdomain.name],
                    path=domaindir,
                    env=git_env,
                    logger=logger)
    finally:
        if not locked:
            lock_queue.release(key)
    if dbdomain.tracked_branch:
        dbdomain.rollback_commit = rollback_commit
        session.add(dbdomain)
示例#18
0
文件: rollback.py 项目: ned21/aquilon
    def render(self, session, logger, domain, ref, lastsync, **arguments):
        dbdomain = Domain.get_unique(session, domain, compel=True)
        if not dbdomain.tracked_branch:
            # Could check dbdomain.trackers and rollback all of them...
            raise ArgumentError("rollback requires a tracking domain")

        if lastsync:
            if not dbdomain.rollback_commit:
                raise ArgumentError("domain %s does not have a rollback "
                                    "commit saved, please specify one "
                                    "explicitly." % dbdomain.name)
            ref = dbdomain.rollback_commit

        if not ref:
            raise ArgumentError("Commit reference to rollback to required.")

        kingdir = self.config.get("broker", "kingdir")
        domaindir = os.path.join(self.config.get("broker", "domainsdir"),
                                 dbdomain.name)
        out = run_git(["branch", "--contains", ref],
                      logger=logger,
                      path=kingdir)
        if not re.search(r'\b%s\b' % dbdomain.tracked_branch.name, out):
            # There's no real technical reason why this needs to be
            # true.  It just seems like a good sanity check.
            raise ArgumentError("Cannot roll back to commit: "
                                "branch %s does not contain %s" %
                                (dbdomain.tracked_branch.name, ref))

        dbdomain.tracked_branch.is_sync_valid = False
        session.add(dbdomain.tracked_branch)
        dbdomain.rollback_commit = None
        session.add(dbdomain)

        key = CompileKey(domain=dbdomain.name, logger=logger)
        try:
            lock_queue.acquire(key)
            run_git(["push", ".", "+%s:%s" % (ref, dbdomain.name)],
                    path=kingdir,
                    logger=logger)
            # Duplicated this logic from aquilon.worker.processes.sync_domain()
            run_git(["fetch"], path=domaindir, logger=logger)
            run_git(["reset", "--hard",
                     "origin/%s" % dbdomain.name],
                    path=domaindir,
                    logger=logger)
        except ProcessException, e:
            raise ArgumentError(
                "Problem encountered updating templates for "
                "domain %s: %s", dbdomain.name, e)
示例#19
0
    def render(self, session, logger, hostname, domain, sandbox, force,
               **arguments):
        (dbbranch, dbauthor) = get_branch_and_author(session, logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     compel=True)

        if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage:
            raise ArgumentError("Managing hosts to {0:l} is not allowed."
                                .format(dbbranch))

        dbhost = hostname_to_host(session, hostname)
        dbsource = dbhost.branch
        dbsource_author = dbhost.sandbox_author

        if dbhost.cluster:
            raise ArgumentError("Cluster nodes must be managed at the "
                                "cluster level; this host is a member of "
                                "{0}.".format(dbhost.cluster))

        if not force:
            validate_branch_commits(dbsource, dbsource_author,
                                    dbbranch, dbauthor, logger, self.config)

        plenary_host = Plenary.get_plenary(dbhost, logger=logger)

        dbhost.branch = dbbranch
        dbhost.sandbox_author = dbauthor

        session.flush()

        # We're crossing domains, need to lock everything.
        # XXX: There's a directory per domain.  Do we need subdirectories
        # for different authors for a sandbox?
        with CompileKey.merge([CompileKey(domain=dbsource.name, logger=logger),
                               CompileKey(domain=dbbranch.name, logger=logger)]):
            plenary_host.stash()
            try:
                plenary_host.write(locked=True)
            except IncompleteError:
                # This template cannot be written, we leave it alone
                # It would be nice to flag the state in the the host?
                plenary_host.remove(locked=True)
            except:
                # This will not restore the cleaned up build files.  That's OK.
                # They will be recreated as needed.
                plenary_host.restore_stash()
                raise

        return
示例#20
0
    def render(self, session, logger, hostname, cluster,
               personality, **arguments):
        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        dbhost = hostname_to_host(session, hostname)
        if not dbhost.cluster:
            raise ArgumentError("{0} is not bound to a cluster.".format(dbhost))
        if dbhost.cluster != dbcluster:
            raise ArgumentError("{0} is bound to {1:l}, not {2:l}.".format(
                                dbhost, dbhost.cluster, dbcluster))

        if personality:
            dbpersonality = Personality.get_unique(session, name=personality,
                                                   archetype=dbhost.archetype,
                                                   compel=True)
            if dbpersonality.cluster_required:
                raise ArgumentError("Cannot switch host to personality %s "
                                    "because that personality requires a "
                                    "cluster" % personality)
            dbhost.personality = dbpersonality
        elif dbhost.personality.cluster_required:
            raise ArgumentError("Host personality %s requires a cluster, "
                                "use --personality to change personality "
                                "when leaving the cluster." %
                                dbhost.personality.name)

        dbcluster.hosts.remove(dbhost)
        remove_service_addresses(dbcluster, dbhost)
        dbcluster.validate()

        session.flush()
        session.expire(dbhost, ['_cluster'])

        host_plenary = Plenary.get_plenary(dbhost, logger=logger)
        cluster_plenary = Plenary.get_plenary(dbcluster, logger=logger)
        with CompileKey.merge([host_plenary.get_key(),
                              cluster_plenary.get_key()]):
            try:
                cluster_plenary.write(locked=True)
                try:
                    host_plenary.write(locked=True)
                except IncompleteError:
                    host_plenary.remove(locked=True)
            except:
                cluster_plenary.restore_stash()
                host_plenary.restore_stash()
                raise
示例#21
0
    def render(self, session, logger, machine, dbuser, **arguments):
        dbmachine = Machine.get_unique(session, machine, compel=True)

        remove_plenaries = PlenaryCollection(logger=logger)
        remove_plenaries.append(Plenary.get_plenary(dbmachine))
        if dbmachine.vm_container:
            remove_plenaries.append(Plenary.get_plenary(
                dbmachine.vm_container))
            dbcontainer = dbmachine.vm_container.holder.holder_object
        else:
            dbcontainer = None

        if dbmachine.host:
            raise ArgumentError("{0} is still in use by {1:l} and cannot be "
                                "deleted.".format(dbmachine, dbmachine.host))
        addrs = []
        for addr in dbmachine.all_addresses():
            addrs.append("%s: %s" % (addr.logical_name, addr.ip))
        if addrs:
            addrmsg = ", ".join(addrs)
            raise ArgumentError("{0} still provides the following addresses, "
                                "delete them first: {1}.".format(
                                    dbmachine, addrmsg))

        session.delete(dbmachine)
        session.flush()

        key = remove_plenaries.get_remove_key()
        if dbcontainer:
            plenary_container = Plenary.get_plenary(dbcontainer, logger=logger)
            key = CompileKey.merge([key, plenary_container.get_write_key()])
        try:
            lock_queue.acquire(key)
            remove_plenaries.stash()
            if dbcontainer:
                plenary_container.write(locked=True)
            remove_plenaries.remove(locked=True)
        except:
            remove_plenaries.restore_stash()
            if dbcontainer:
                plenary_container.restore_stash()
            raise
        finally:
            lock_queue.release(key)
        return
示例#22
0
    def render(self, session, logger, machine, dbuser, **arguments):
        dbmachine = Machine.get_unique(session, machine, compel=True)

        remove_plenaries = PlenaryCollection(logger=logger)
        remove_plenaries.append(Plenary.get_plenary(dbmachine))
        if dbmachine.vm_container:
            remove_plenaries.append(Plenary.get_plenary(dbmachine.vm_container))
            dbcontainer = dbmachine.vm_container.holder.holder_object
        else:
            dbcontainer = None

        if dbmachine.host:
            raise ArgumentError("{0} is still in use by {1:l} and cannot be "
                                "deleted.".format(dbmachine, dbmachine.host))
        addrs = []
        for addr in dbmachine.all_addresses():
            addrs.append("%s: %s" % (addr.logical_name, addr.ip))
        if addrs:
            addrmsg = ", ".join(addrs)
            raise ArgumentError("{0} still provides the following addresses, "
                                "delete them first: {1}.".format(dbmachine,
                                                                 addrmsg))

        session.delete(dbmachine)
        session.flush()

        key = remove_plenaries.get_remove_key()
        if dbcontainer:
            plenary_container = Plenary.get_plenary(dbcontainer, logger=logger)
            key = CompileKey.merge([key, plenary_container.get_write_key()])
        try:
            lock_queue.acquire(key)
            remove_plenaries.stash()
            if dbcontainer:
                plenary_container.write(locked=True)
            remove_plenaries.remove(locked=True)
        except:
            remove_plenaries.restore_stash()
            if dbcontainer:
                plenary_container.restore_stash()
            raise
        finally:
            lock_queue.release(key)
        return
示例#23
0
def del_cluster(session, logger, dbcluster, config):
    cluster = str(dbcluster.name)

    if hasattr(dbcluster, 'members') and dbcluster.members:
        raise ArgumentError("%s is still in use by clusters: %s." %
                            (format(dbcluster),
                             ", ".join([c.name for c in dbcluster.members])))
    elif dbcluster.hosts:
        hosts = ", ".join([h.fqdn for h in  dbcluster.hosts])
        raise ArgumentError("%s is still in use by hosts: %s." %
                            (format(dbcluster), hosts))
    cluster_plenary = Plenary.get_plenary(dbcluster, logger=logger)
    resources = PlenaryCollection(logger=logger)
    if dbcluster.resholder:
        for res in dbcluster.resholder.resources:
            resources.append(Plenary.get_plenary(res))
    domain = dbcluster.branch.name
    session.delete(dbcluster)

    session.flush()

    key = cluster_plenary.get_remove_key()
    with CompileKey.merge([key, resources.get_remove_key()]):
        cluster_plenary.cleanup(domain, locked=True)
        # And we also want to remove the profile itself
        profiles = config.get("broker", "profilesdir")
        # Only one of these should exist, but it doesn't hurt
        # to try to clean up both.
        xmlfile = os.path.join(profiles, "clusters", cluster + ".xml")
        remove_file(xmlfile, logger=logger)
        xmlgzfile = xmlfile + ".gz"
        remove_file(xmlgzfile, logger=logger)
        # And the cached template created by ant
        remove_file(os.path.join(config.get("broker",
                                                 "quattordir"),
                                 "objects", "clusters",
                                 cluster + TEMPLATE_EXTENSION),
                    logger=logger)
        resources.remove(locked=True)

    build_index(config, session, profiles, logger=logger)

    return
示例#24
0
def del_cluster(session, logger, dbcluster, config):
    cluster = str(dbcluster.name)

    if hasattr(dbcluster, 'members') and dbcluster.members:
        raise ArgumentError(
            "%s is still in use by clusters: %s." %
            (format(dbcluster), ", ".join([c.name
                                           for c in dbcluster.members])))
    elif dbcluster.hosts:
        hosts = ", ".join([h.fqdn for h in dbcluster.hosts])
        raise ArgumentError("%s is still in use by hosts: %s." %
                            (format(dbcluster), hosts))
    cluster_plenary = Plenary.get_plenary(dbcluster, logger=logger)
    resources = PlenaryCollection(logger=logger)
    if dbcluster.resholder:
        for res in dbcluster.resholder.resources:
            resources.append(Plenary.get_plenary(res))
    domain = dbcluster.branch.name
    session.delete(dbcluster)

    session.flush()

    key = cluster_plenary.get_remove_key()
    with CompileKey.merge([key, resources.get_remove_key()]):
        cluster_plenary.cleanup(domain, locked=True)
        # And we also want to remove the profile itself
        profiles = config.get("broker", "profilesdir")
        # Only one of these should exist, but it doesn't hurt
        # to try to clean up both.
        xmlfile = os.path.join(profiles, "clusters", cluster + ".xml")
        remove_file(xmlfile, logger=logger)
        xmlgzfile = xmlfile + ".gz"
        remove_file(xmlgzfile, logger=logger)
        # And the cached template created by ant
        remove_file(os.path.join(config.get("broker", "quattordir"), "objects",
                                 "clusters", cluster + TEMPLATE_EXTENSION),
                    logger=logger)
        resources.remove(locked=True)

    build_index(config, session, profiles, logger=logger)

    return
示例#25
0
def main():
    logging.basicConfig(level=logging.DEBUG)

    query = session.query(Resource)

    old_paths = []

    with CompileKey():
        for res in query.all():
            PlenaryResource(res).write(locked=True)

            holder = res.holder.holder_object
            if isinstance(holder, ResourceGroup):
                holder = holder.holder.holder_object
            else:
                old_paths.append("resource/%s/%s/%s/%s" %
                                 (res.resource_type, res.holder.holder_type,
                                  res.holder.holder_name, res.name))

            try:
                # Show that something is happening...
                print "Flushing {0:l}".format(holder)

                if isinstance(holder, Host):
                    PlenaryHost(holder).write(locked=True)
                elif isinstance(holder, Cluster):
                    PlenaryCluster(holder).write(locked=True)
                else:
                    raise AquilonError("Unknown holder object: %r" % holder)
            except IncompleteError:
                pass

    plenarydir = config.get("broker", "plenarydir")
    for path in old_paths:
        try:
            os.remove(os.path.join(plenarydir, path, "config.tpl"))
        except OSError:
            pass
        try:
            os.removedirs(os.path.join(plenarydir, path))
        except OSError:
            pass
示例#26
0
文件: host.py 项目: piojo/aquilon
    def get_key(self, exclusive=True):
        keylist = [super(PlenaryToplevelHost, self).get_key(exclusive=exclusive)]

        if not inspect(self.dbobj).deleted:
            keylist.append(PlenaryKey(exclusive=False,
                                      personality=self.dbobj.personality,
                                      logger=self.logger))
            for si in self.dbobj.services_used:
                keylist.append(PlenaryKey(exclusive=False, service_instance=si,
                                          logger=self.logger))
            for srv in self.dbobj.services_provided:
                keylist.append(PlenaryKey(exclusive=False,
                                          service_instance=srv.service_instance,
                                          logger=self.logger))

            if self.dbobj.cluster:
                keylist.append(PlenaryKey(exclusive=False,
                                          cluster_member=self.dbobj.cluster,
                                          logger=self.logger))
        return CompileKey.merge(keylist)
示例#27
0
文件: machine.py 项目: piojo/aquilon
    def get_key(self, exclusive=True):
        if not exclusive:
            # CompileKey() does not support shared mode
            raise InternalError("Shared locks are not implemented for machine "
                                "plenaries.")

        # Need a compile key if:
        # - There is a host attached.
        # - This is a virtual machine in a container.
        keylist = [NoLockKey(logger=self.logger)]
        if not inspect(self.dbobj).deleted:
            if self.dbobj.host:
                plenary = Plenary.get_plenary(self.dbobj.host,
                                              logger=self.logger)
                keylist.append(plenary.get_key())
            if self.dbobj.vm_container:
                plenary = Plenary.get_plenary(self.dbobj.vm_container,
                                              logger=self.logger)
                keylist.append(plenary.get_key())
        return CompileKey.merge(keylist)
示例#28
0
文件: branch.py 项目: ned21/aquilon
def remove_branch(config, logger, dbbranch):
    session = object_session(dbbranch)
    deps = get_branch_dependencies(dbbranch)
    if deps:
        raise ArgumentError("\n".join(deps))

    session.delete(dbbranch)

    domain = TemplateDomain(dbbranch, logger=logger)
    # Can this fail?  Is recovery needed?
    with CompileKey(domain=dbbranch.name, logger=logger):
        for dir in domain.directories():
            remove_dir(dir, logger=logger)

    kingdir = config.get("broker", "kingdir")
    try:
        run_git(["branch", "-D", dbbranch.name], path=kingdir, logger=logger)
    except ProcessException, e:
        logger.warning(
            "Error removing branch %s from template-king, "
            "proceeding anyway: %s", dbbranch.name, e)
示例#29
0
 def compile(self, session, logger, dbhost):
     """ compile plenary templates """
     plenary = PlenaryHost(dbhost, logger=logger)
     # Force a host lock as pan might overwrite the profile...
     key = CompileKey(domain=dbhost.branch.name,
                      profile=dbhost.fqdn,
                      logger=logger)
     try:
         lock_queue.acquire(key)
         plenary.write(locked=True)
         td = TemplateDomain(dbhost.branch,
                             dbhost.sandbox_author,
                             logger=logger)
         td.compile(session, only=[dbhost.fqdn], locked=True)
     except IncompleteError:
         raise ArgumentError("Run aq make for host %s first." % dbhost.fqdn)
     except:
         plenary.restore_stash()
         raise
     finally:
         lock_queue.release(key)
示例#30
0
文件: machine.py 项目: ned21/aquilon
 def get_key(self):
     host = self.dbobj.host
     container = self.dbobj.vm_container
     # Need a compile key if:
     # - There is a host attached.
     # - This is a virtual machine in a container.
     if not host and not container:
         return None
     # We have at least host or container, maybe both...
     if host:
         # PlenaryHost is actually a PlenaryCollection... can't call
         # get_key() directly, so using get_remove_key().
         ph = Plenary.get_plenary(host, logger=self.logger)
         host_key = ph.get_remove_key()
     if container:
         pc = Plenary.get_plenary(container, self.logger)
         container_key = pc.get_key()
     if not container:
         return host_key
     if not host:
         return container_key
     return CompileKey.merge([host_key, container_key])
示例#31
0
文件: machine.py 项目: jrha/aquilon
 def get_key(self):
     host = self.dbobj.host
     container = self.dbobj.vm_container
     # Need a compile key if:
     # - There is a host attached.
     # - This is a virtual machine in a container.
     if not host and not container:
         return None
     # We have at least host or container, maybe both...
     if host:
         # PlenaryHost is actually a PlenaryCollection... can't call
         # get_key() directly, so using get_remove_key().
         ph = Plenary.get_plenary(host, logger=self.logger)
         host_key = ph.get_remove_key()
     if container:
         pc = Plenary.get_plenary(container, self.logger)
         container_key = pc.get_key()
     if not container:
         return host_key
     if not host:
         return container_key
     return CompileKey.merge([host_key, container_key])
示例#32
0
    def render(self, session, logger, cluster, keepbindings, **arguments):
        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        if not dbcluster.personality.archetype.is_compileable:
            raise ArgumentError("{0} is not a compilable archetype "
                                "({1!s}).".format(dbcluster,
                                                  dbcluster.personality.archetype))

        chooser = Chooser(dbcluster, logger=logger,
                          required_only=not(keepbindings))
        chooser.set_required()
        chooser.flush_changes()
        # Force a domain lock as pan might overwrite any of the profiles...
        key = CompileKey.merge([chooser.get_write_key(),
                                CompileKey(domain=dbcluster.branch.name,
                                           logger=logger)])
        try:
            lock_queue.acquire(key)
            chooser.write_plenary_templates(locked=True)

            profile_list = add_cluster_data(dbcluster)
            profile_list.extend(chooser.changed_server_fqdns())

            td = TemplateDomain(dbcluster.branch, dbcluster.sandbox_author,
                                logger=logger)
            td.compile(session, only=profile_list, locked=True)

        except:
            chooser.restore_stash()

            # Okay, cleaned up templates, make sure the caller knows
            # we've aborted so that DB can be appropriately rollback'd.

            raise

        finally:
            lock_queue.release(key)

        return
示例#33
0
    def render(self, session, logger, cluster, buildstatus, **arguments):
        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        dbstatus = ClusterLifecycle.get_unique(session,
                                               buildstatus,
                                               compel=True)

        if not dbcluster.status.transition(dbcluster, dbstatus):
            return

        if not dbcluster.personality.archetype.is_compileable:
            return

        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbcluster))

        for dbhost in dbcluster.hosts:
            plenaries.append(Plenary.get_plenary(dbhost))

        # Force a host lock as pan might overwrite the profile...
        key = CompileKey(domain=dbcluster.branch.name, logger=logger)
        try:
            lock_queue.acquire(key)

            plenaries.write(locked=True)
            td = TemplateDomain(dbcluster.branch,
                                dbcluster.sandbox_author,
                                logger=logger)
            td.compile(session, plenaries.object_templates, locked=True)
        except:
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)
        return
示例#34
0
    def render(self, session, logger, switch, model, rack, type, ip, vendor,
               serial, rename_to, discovered_macs, clear, discover, comments, **arguments):
        dbswitch = Switch.get_unique(session, switch, compel=True)

        oldinfo = DSDBRunner.snapshot_hw(dbswitch)

        if discover:
            discover_switch(session, logger, self.config, dbswitch, False)

        if vendor and not model:
            model = dbswitch.model.name
        if model:
            dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                       machine_type='switch', compel=True)
            dbswitch.model = dbmodel

        dblocation = get_location(session, rack=rack)
        if dblocation:
            dbswitch.location = dblocation

        if serial is not None:
            dbswitch.serial_no = serial

        # FIXME: What do the error messages for an invalid enum (switch_type)
        # look like?
        if type:
            Switch.check_type(type)
            dbswitch.switch_type = type

        if ip:
            update_primary_ip(session, dbswitch, ip)

        if comments is not None:
            dbswitch.comments = comments

        remove_plenary = None
        if rename_to:
            # Handling alias renaming would not be difficult in AQDB, but the
            # DSDB synchronization would be painful, so don't do that for now.
            # In theory we should check all configured IP addresses for aliases,
            # but this is the most common case
            if dbswitch.primary_name and dbswitch.primary_name.fqdn.aliases:
                raise ArgumentError("The switch has aliases and it cannot be "
                                    "renamed. Please remove all aliases first.")
            remove_plenary = Plenary.get_plenary(dbswitch, logger=logger)
            rename_hardware(session, dbswitch, rename_to)

        if clear:
            session.query(ObservedMac).filter_by(switch=dbswitch).delete()

        if discovered_macs:
            now = datetime.now()
            for (macaddr, port) in discovered_macs:
                update_or_create_observed_mac(session, dbswitch, port, macaddr, now)

        session.flush()

        switch_plenary = Plenary.get_plenary(dbswitch, logger=logger)

        key = switch_plenary.get_write_key()
        if remove_plenary:
            key = CompileKey.merge([key, remove_plenary.get_remove_key()])
        try:
            lock_queue.acquire(key)
            if remove_plenary:
                remove_plenary.stash()
                remove_plenary.remove(locked=True)
            switch_plenary.write(locked=True)

            dsdb_runner = DSDBRunner(logger=logger)
            dsdb_runner.update_host(dbswitch, oldinfo)
            dsdb_runner.commit_or_rollback("Could not update switch in DSDB")
        except:
            if remove_plenary:
                remove_plenary.restore_stash()
            switch_plenary.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
示例#35
0
    def resetadvertisedstatus_list(self, session, logger, dbhosts):
        branches = {}
        authors = {}
        failed = []
        compileable = []
        # Do any cross-list or dependency checks
        for dbhost in dbhosts:
            ## if archetype is compileable only then
            ## validate for branches and domains
            if (dbhost.archetype.is_compileable):
                compileable.append(dbhost.fqdn)
                if dbhost.branch in branches:
                    branches[dbhost.branch].append(dbhost)
                else:
                    branches[dbhost.branch] = [dbhost]
                if dbhost.sandbox_author in authors:
                    authors[dbhost.sandbox_author].append(dbhost)
                else:
                    authors[dbhost.sandbox_author] = [dbhost]

            if dbhost.status.name == 'ready':
                failed.append("{0:l} is in ready status, "
                              "advertised status can be reset only "
                              "when host is in non ready state".format(dbhost))
        if failed:
            raise ArgumentError("Cannot modify the following hosts:\n%s" %
                                "\n".join(failed))
        if len(branches) > 1:
            keys = branches.keys()
            branch_sort = lambda x, y: cmp(len(branches[x]), len(branches[y]))
            keys.sort(cmp=branch_sort)
            stats = [
                "{0:d} hosts in {1:l}".format(len(branches[branch]), branch)
                for branch in keys
            ]
            raise ArgumentError("All hosts must be in the same domain or "
                                "sandbox:\n%s" % "\n".join(stats))
        if len(authors) > 1:
            keys = authors.keys()
            author_sort = lambda x, y: cmp(len(authors[x]), len(authors[y]))
            keys.sort(cmp=author_sort)
            stats = [
                "%s hosts with sandbox author %s" %
                (len(authors[author]), author.name) for author in keys
            ]
            raise ArgumentError("All hosts must be managed by the same "
                                "sandbox author:\n%s" % "\n".join(stats))

        plenaries = PlenaryCollection(logger=logger)
        for dbhost in dbhosts:
            dbhost.advertise_status = False
            session.add(dbhost)
            plenaries.append(PlenaryHost(dbhost, logger=logger))

        session.flush()

        dbbranch = branches.keys()[0]
        dbauthor = authors.keys()[0]
        key = CompileKey.merge([plenaries.get_write_key()])
        try:
            lock_queue.acquire(key)
            plenaries.stash()
            plenaries.write(locked=True)
            td = TemplateDomain(dbbranch, dbauthor, logger=logger)
            td.compile(session, only=compileable, locked=True)
        except:
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
示例#36
0
文件: cluster.py 项目: piojo/aquilon
    def render(self, session, logger, hostname, cluster,
               personality, **arguments):
        dbhost = hostname_to_host(session, hostname)
        dbcluster = Cluster.get_unique(session, cluster, compel=True)

        if dbcluster.status.name == 'decommissioned':
            raise ArgumentError("Cannot add hosts to decommissioned clusters.")

        # We only support changing personality within the same
        # archetype. The archetype decides things like which OS, how
        # it builds (dhcp, etc), whether it's compilable, and
        # switching all of that by side-effect seems wrong
        # somehow. And besides, it would make the user-interface and
        # implementation for this command ugly in order to support
        # changing all of those options.
        personality_change = False
        if personality is not None:
            dbpersonality = Personality.get_unique(session,
                                                   name=personality,
                                                   archetype=dbhost.archetype,
                                                   compel=True)
            if dbhost.personality != dbpersonality:
                dbhost.personality = dbpersonality
                personality_change = True

        # Allow for non-restricted clusters (the default?)
        if (len(dbcluster.allowed_personalities) > 0 and
            dbhost.personality not in dbcluster.allowed_personalities):
            raise ArgumentError("The personality %s for %s is not allowed "
                                "by the cluster. Specify --personality "
                                "and provide one of %s" %
                                (dbhost.personality, dbhost.fqdn,
                                 ", ".join([x.name for x in
                                            dbcluster.allowed_personalities])))

        # Now that we've changed the personality, we can check
        # if this is a valid membership change
        dbcluster.validate_membership(dbhost)

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbcluster))

        if dbhost.cluster and dbhost.cluster != dbcluster:
            logger.client_info("Removing {0:l} from {1:l}.".format(dbhost,
                                                                   dbhost.cluster))
            old_cluster = dbhost.cluster
            old_cluster.hosts.remove(dbhost)
            remove_service_addresses(old_cluster, dbhost)
            old_cluster.validate()
            session.expire(dbhost, ['_cluster'])
            plenaries.append(Plenary.get_plenary(old_cluster))

        # Apply the service addresses to the new member
        for res in walk_resources(dbcluster):
            if not isinstance(res, ServiceAddress):
                continue
            apply_service_address(dbhost, res.interfaces, res, logger)

        if dbhost.cluster:
            if personality_change:
                raise ArgumentError("{0:l} already in {1:l}, use "
                                    "aq reconfigure to change personality."
                                    .format(dbhost, dbhost.cluster))
            # the cluster has not changed, therefore there's nothing
            # to do here.
            return

        # Calculate the node index: build a map of all possible values, remove
        # the used ones, and pick the smallest remaining one
        node_index_map = set(xrange(len(dbcluster._hosts) + 1))
        for link in dbcluster._hosts:
            # The cluster may have been bigger in the past, so node indexes may
            # be larger than the current cluster size
            try:
                node_index_map.remove(link.node_index)
            except KeyError:
                pass

        dbcluster.hosts.append((dbhost, min(node_index_map)))
        dbcluster.validate()

        # demote a host when switching clusters
        # promote a host when switching clusters
        if dbhost.status.name == 'ready':
            if dbcluster.status.name != 'ready':
                dbalmost = HostAlmostready.get_instance(session)
                dbhost.status.transition(dbhost, dbalmost)
                plenaries.append(Plenary.get_plenary(dbhost))
        elif dbhost.status.name == 'almostready':
            if dbcluster.status.name == 'ready':
                dbready = HostReady.get_instance(session)
                dbhost.status.transition(dbhost, dbready)
                plenaries.append(Plenary.get_plenary(dbhost))

        session.flush()

        # Enforce that service instances are set correctly for the
        # new cluster association.
        chooser = Chooser(dbhost, logger=logger)
        chooser.set_required()
        chooser.flush_changes()
        # the chooser will include the host plenary
        with CompileKey.merge([chooser.get_key(), plenaries.get_key()]):
            plenaries.stash()
            try:
                chooser.write_plenary_templates(locked=True)
                plenaries.write(locked=True)
            except:
                chooser.restore_stash()
                plenaries.restore_stash()
                raise

        return
示例#37
0
    def render(self, session, logger, machine, model, vendor, serial,
               chassis, slot, clearchassis, multislot,
               vmhost, cluster, allow_metacluster_change,
               cpuname, cpuvendor, cpuspeed, cpucount, memory, ip,
               **arguments):
        dbmachine = Machine.get_unique(session, machine, compel=True)
        plenaries = PlenaryCollection(logger=logger)
        oldinfo = DSDBRunner.snapshot_hw(dbmachine)

        if clearchassis:
            del dbmachine.chassis_slot[:]

        remove_plenaries = PlenaryCollection(logger=logger)
        if chassis:
            dbchassis = Chassis.get_unique(session, chassis, compel=True)
            if machine_plenary_will_move(old=dbmachine.location,
                                         new=dbchassis.location):
                remove_plenaries.append(Plenary.get_plenary(dbmachine))
            dbmachine.location = dbchassis.location
            if slot is None:
                raise ArgumentError("Option --chassis requires --slot "
                                    "information.")
            self.adjust_slot(session, logger,
                             dbmachine, dbchassis, slot, multislot)
        elif slot is not None:
            dbchassis = None
            for dbslot in dbmachine.chassis_slot:
                if dbchassis and dbslot.chassis != dbchassis:
                    raise ArgumentError("Machine in multiple chassis, please "
                                        "use --chassis argument.")
                dbchassis = dbslot.chassis
            if not dbchassis:
                raise ArgumentError("Option --slot requires --chassis "
                                    "information.")
            self.adjust_slot(session, logger,
                             dbmachine, dbchassis, slot, multislot)

        dblocation = get_location(session, **arguments)
        if dblocation:
            loc_clear_chassis = False
            for dbslot in dbmachine.chassis_slot:
                dbcl = dbslot.chassis.location
                if dbcl != dblocation:
                    if chassis or slot is not None:
                        raise ArgumentError("{0} conflicts with chassis {1!s} "
                                            "location {2}.".format(dblocation,
                                                        dbslot.chassis, dbcl))
                    else:
                        loc_clear_chassis = True
            if loc_clear_chassis:
                del dbmachine.chassis_slot[:]
            if machine_plenary_will_move(old=dbmachine.location,
                                         new=dblocation):
                remove_plenaries.append(Plenary.get_plenary(dbmachine))
            dbmachine.location = dblocation

        if model or vendor:
            # If overriding model, should probably overwrite default
            # machine specs as well.
            if not model:
                model = dbmachine.model.name
            if not vendor:
                vendor = dbmachine.model.vendor.name
            dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                       compel=True)
            if dbmodel.machine_type not in ['blade', 'rackmount',
                                            'workstation', 'aurora_node',
                                            'virtual_machine']:
                raise ArgumentError("The update_machine command cannot update "
                                    "machines of type %s." %
                                    dbmodel.machine_type)
            # We probably could do this by forcing either cluster or
            # location data to be available as appropriate, but really?
            # Failing seems reasonable.
            if dbmodel.machine_type != dbmachine.model.machine_type and \
               'virtual_machine' in [dbmodel.machine_type,
                                     dbmachine.model.machine_type]:
                raise ArgumentError("Cannot change machine from %s to %s." %
                                    (dbmachine.model.machine_type,
                                     dbmodel.machine_type))

            old_nic_model = dbmachine.model.nic_model
            new_nic_model = dbmodel.nic_model
            if old_nic_model != new_nic_model:
                for iface in dbmachine.interfaces:
                    if iface.model == old_nic_model:
                        iface.model = new_nic_model

            dbmachine.model = dbmodel

        if cpuname or cpuvendor or cpuspeed is not None:
            dbcpu = Cpu.get_unique(session, name=cpuname, vendor=cpuvendor,
                                   speed=cpuspeed, compel=True)
            dbmachine.cpu = dbcpu

        if cpucount is not None:
            dbmachine.cpu_quantity = cpucount
        if memory is not None:
            dbmachine.memory = memory
        if serial:
            dbmachine.serial_no = serial

        if ip:
            update_primary_ip(session, dbmachine, ip)

        # FIXME: For now, if a machine has its interface(s) in a portgroup
        # this command will need to be followed by an update_interface to
        # re-evaluate the portgroup for overflow.
        # It would be better to have --pg and --autopg options to let it
        # happen at this point.
        if cluster or vmhost:
            if not dbmachine.vm_container:
                raise ArgumentError("Cannot convert a physical machine to "
                                    "virtual.")

            old_holder = dbmachine.vm_container.holder.holder_object
            resholder = get_resource_holder(session, hostname=vmhost,
                                            cluster=cluster, compel=False)
            new_holder = resholder.holder_object

            # TODO: do we want to allow moving machines between the cluster and
            # metacluster level?
            if new_holder.__class__ != old_holder.__class__:
                raise ArgumentError("Cannot move a VM between a cluster and a "
                                    "stand-alone host.")

            if cluster:
                if new_holder.metacluster != old_holder.metacluster \
                   and not allow_metacluster_change:
                    raise ArgumentError("Current {0:l} does not match "
                                        "new {1:l}."
                                        .format(old_holder.metacluster,
                                                new_holder.metacluster))

            remove_plenaries.append(Plenary.get_plenary(dbmachine.vm_container))
            dbmachine.vm_container.holder = resholder

            for dbdisk in dbmachine.disks:
                if not isinstance(dbdisk, VirtualDisk):
                    continue
                old_share = dbdisk.share
                if isinstance(old_share.holder, BundleResource):
                    resourcegroup = old_share.holder.name
                else:
                    resourcegroup = None
                new_share = find_share(new_holder, resourcegroup, old_share.name,
                                       error=ArgumentError)

                # If the shares are registered at the metacluster level and both
                # clusters are in the same metacluster, then there will be no
                # real change here
                if new_share != old_share:
                    old_share.disks.remove(dbdisk)
                    new_share.disks.append(dbdisk)

            if isinstance(new_holder, Cluster):
                dbmachine.location = new_holder.location_constraint
            else:
                dbmachine.location = new_holder.location

            session.flush()
            plenaries.append(Plenary.get_plenary(old_holder))
            plenaries.append(Plenary.get_plenary(new_holder))

        if dbmachine.vm_container:
            plenaries.append(Plenary.get_plenary(dbmachine.vm_container))

        session.flush()

        # Check if the changed parameters still meet cluster capacity
        # requiremets
        if dbmachine.cluster:
            dbmachine.cluster.validate()
            if allow_metacluster_change:
                dbmachine.cluster.metacluster.validate()
        if dbmachine.host and dbmachine.host.cluster:
            dbmachine.host.cluster.validate()

        # The check to make sure a plenary file is not written out for
        # dummy aurora hardware is within the call to write().  This way
        # it is consistent without altering (and forgetting to alter)
        # all the calls to the method.
        plenaries.append(Plenary.get_plenary(dbmachine))
        if remove_plenaries.plenaries and dbmachine.host:
            plenaries.append(Plenary.get_plenary(dbmachine.host))

        key = CompileKey.merge([plenaries.get_write_key(),
                                remove_plenaries.get_remove_key()])
        try:
            lock_queue.acquire(key)
            remove_plenaries.stash()
            plenaries.write(locked=True)
            remove_plenaries.remove(locked=True)

            if dbmachine.host:
                # XXX: May need to reconfigure.
                pass

            dsdb_runner = DSDBRunner(logger=logger)
            dsdb_runner.update_host(dbmachine, oldinfo)
            dsdb_runner.commit_or_rollback("Could not update machine in DSDB")
        except:
            plenaries.restore_stash()
            remove_plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
示例#38
0
文件: cluster.py 项目: ned21/aquilon
    def render(self, session, logger, hostname, cluster, personality,
               **arguments):
        dbhost = hostname_to_host(session, hostname)
        dbcluster = Cluster.get_unique(session, cluster, compel=True)

        if dbcluster.status.name == 'decommissioned':
            raise ArgumentError("Cannot add hosts to decommissioned clusters.")

        # We only support changing personality within the same
        # archetype. The archetype decides things like which OS, how
        # it builds (dhcp, etc), whether it's compilable, and
        # switching all of that by side-effect seems wrong
        # somehow. And besides, it would make the user-interface and
        # implementation for this command ugly in order to support
        # changing all of those options.
        personality_change = False
        if personality is not None:
            dbpersonality = Personality.get_unique(session,
                                                   name=personality,
                                                   archetype=dbhost.archetype,
                                                   compel=True)
            if dbhost.personality != dbpersonality:
                dbhost.personality = dbpersonality
                personality_change = True

        # Allow for non-restricted clusters (the default?)
        if (len(dbcluster.allowed_personalities) > 0
                and dbhost.personality not in dbcluster.allowed_personalities):
            raise ArgumentError(
                "The personality %s for %s is not allowed "
                "by the cluster. Specify --personality "
                "and provide one of %s" %
                (dbhost.personality, dbhost.fqdn, ", ".join(
                    [x.name for x in dbcluster.allowed_personalities])))

        # Now that we've changed the personality, we can check
        # if this is a valid membership change
        dbcluster.validate_membership(dbhost)

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbcluster))

        if dbhost.cluster and dbhost.cluster != dbcluster:
            logger.client_info("Removing {0:l} from {1:l}.".format(
                dbhost, dbhost.cluster))
            old_cluster = dbhost.cluster
            old_cluster.hosts.remove(dbhost)
            remove_service_addresses(old_cluster, dbhost)
            old_cluster.validate()
            session.expire(dbhost, ['_cluster'])
            plenaries.append(Plenary.get_plenary(old_cluster))

        # Apply the service addresses to the new member
        for res in walk_resources(dbcluster):
            if not isinstance(res, ServiceAddress):
                continue
            apply_service_address(dbhost, res.interfaces, res)

        if dbhost.cluster:
            if personality_change:
                raise ArgumentError(
                    "{0:l} already in {1:l}, use "
                    "aq reconfigure to change personality.".format(
                        dbhost, dbhost.cluster))
            # the cluster has not changed, therefore there's nothing
            # to do here.
            return

        # Calculate the node index: build a map of all possible values, remove
        # the used ones, and pick the smallest remaining one
        node_index_map = set(xrange(len(dbcluster._hosts) + 1))
        for link in dbcluster._hosts:
            # The cluster may have been bigger in the past, so node indexes may
            # be larger than the current cluster size
            try:
                node_index_map.remove(link.node_index)
            except KeyError:
                pass

        dbcluster.hosts.append((dbhost, min(node_index_map)))
        dbcluster.validate()

        # demote a host when switching clusters
        # promote a host when switching clusters
        if dbhost.status.name == 'ready':
            if dbcluster.status.name != 'ready':
                dbalmost = HostLifecycle.get_unique(session,
                                                    'almostready',
                                                    compel=True)
                dbhost.status.transition(dbhost, dbalmost)
                plenaries.append(Plenary.get_plenary(dbhost))
        elif dbhost.status.name == 'almostready':
            if dbcluster.status.name == 'ready':
                dbready = HostLifecycle.get_unique(session,
                                                   'ready',
                                                   compel=True)
                dbhost.status.transition(dbhost, dbready)
                plenaries.append(Plenary.get_plenary(dbhost))

        session.flush()

        # Enforce that service instances are set correctly for the
        # new cluster association.
        chooser = Chooser(dbhost, logger=logger)
        chooser.set_required()
        chooser.flush_changes()
        # the chooser will include the host plenary
        key = CompileKey.merge(
            [chooser.get_write_key(),
             plenaries.get_write_key()])

        try:
            lock_queue.acquire(key)
            chooser.write_plenary_templates(locked=True)
            plenaries.write(locked=True)
        except:
            chooser.restore_stash()
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
示例#39
0
    def render(
            self,
            session,
            logger,
            cluster,
            personality,
            max_members,
            fix_location,
            down_hosts_threshold,
            maint_threshold,
            comments,
            # ESX specific options
            switch,
            memory_capacity,
            clear_overrides,
            vm_to_host_ratio,
            **arguments):

        dbcluster = Cluster.get_unique(session, cluster, compel=True)

        if dbcluster.cluster_type == 'meta':
            raise ArgumentError("%s should not be a metacluster." %
                                format(dbcluster))

        cluster_updated = False
        remove_plenaries = PlenaryCollection(logger=logger)
        plenaries = PlenaryCollection(logger=logger)

        (vm_count, host_count) = force_ratio("vm_to_host_ratio",
                                             vm_to_host_ratio)
        if down_hosts_threshold is not None:
            (perc, dht) = Cluster.parse_threshold(down_hosts_threshold)
            dbcluster.down_hosts_threshold = dht
            dbcluster.down_hosts_percent = perc
            cluster_updated = True

        if dbcluster.cluster_type == "esx":
            if vm_count is not None or down_hosts_threshold is not None:
                if vm_count is None:
                    vm_count = dbcluster.vm_count
                    host_count = dbcluster.host_count

                dht = dbcluster.down_hosts_threshold
                perc = dbcluster.down_hosts_percent

                dbcluster.validate(vm_part=vm_count,
                                   host_part=host_count,
                                   down_hosts_threshold=dht,
                                   down_hosts_percent=perc)

                dbcluster.vm_count = vm_count
                dbcluster.host_count = host_count
                cluster_updated = True

        if switch is not None:
            if switch:
                # FIXME: Verify that any hosts are on the same network
                dbswitch = Switch.get_unique(session, switch, compel=True)
                plenaries.append(Plenary.get_plenary(dbswitch))
            else:
                dbswitch = None
            dbcluster.switch = dbswitch
            cluster_updated = True

        if memory_capacity is not None:
            dbcluster.memory_capacity = memory_capacity
            dbcluster.validate()
            cluster_updated = True

        if clear_overrides is not None:
            dbcluster.memory_capacity = None
            dbcluster.validate()
            cluster_updated = True

        location_updated = update_cluster_location(session, logger, dbcluster,
                                                   fix_location, plenaries,
                                                   remove_plenaries,
                                                   **arguments)

        if location_updated:
            cluster_updated = True

        if personality:
            archetype = dbcluster.personality.archetype.name
            dbpersonality = Personality.get_unique(session,
                                                   name=personality,
                                                   archetype=archetype,
                                                   compel=True)
            if not dbpersonality.is_cluster:
                raise ArgumentError("Personality {0} is not a cluster " +
                                    "personality".format(dbpersonality))
            dbcluster.personality = dbpersonality
            cluster_updated = True

        if max_members is not None:
            current_members = len(dbcluster.hosts)
            if max_members < current_members:
                raise ArgumentError(
                    "%s has %d hosts bound, which exceeds "
                    "the requested limit %d." %
                    (format(dbcluster), current_members, max_members))
            dbcluster.max_hosts = max_members
            cluster_updated = True

        if comments is not None:
            dbcluster.comments = comments
            cluster_updated = True

        if down_hosts_threshold is not None:
            (dbcluster.down_hosts_percent,
             dbcluster.down_hosts_threshold) = \
                Cluster.parse_threshold(down_hosts_threshold)
            cluster_updated = True

        if maint_threshold is not None:
            (dbcluster.down_maint_percent,
             dbcluster.down_maint_threshold) = \
                Cluster.parse_threshold(maint_threshold)
            cluster_updated = True

        if not cluster_updated:
            return

        session.add(dbcluster)
        session.flush()

        plenaries.append(Plenary.get_plenary(dbcluster))
        key = CompileKey.merge(
            [plenaries.get_write_key(),
             remove_plenaries.get_remove_key()])
        try:
            lock_queue.acquire(key)
            remove_plenaries.stash()
            plenaries.write(locked=True)
            remove_plenaries.remove(locked=True)
        except:
            remove_plenaries.restore_stash()
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
示例#40
0
 def get_key(self):
     return CompileKey(domain=self.dbobj.branch.name,
                       profile=self.plenary_template,
                       logger=self.logger)
示例#41
0
    def render(self, session, logger, feature, archetype, personality, model,
               vendor, interface, justification, user, **arguments):

        # Binding a feature to a named interface makes sense in the scope of a
        # personality, but not for a whole archetype.
        if interface and not personality:
            raise ArgumentError("Binding to a named interface needs "
                                "a personality.")

        q = session.query(Personality)
        dbarchetype = None

        feature_type = "host"

        justification_required = True

        # Warning: order matters here!
        params = {}
        if personality:
            justification_required = False
            dbpersonality = Personality.get_unique(session,
                                                   name=personality,
                                                   archetype=archetype,
                                                   compel=True)
            params["personality"] = dbpersonality
            if interface:
                params["interface_name"] = interface
                feature_type = "interface"
            dbarchetype = dbpersonality.archetype
            q = q.filter_by(archetype=dbarchetype)
            q = q.filter_by(name=personality)
        elif archetype:
            dbarchetype = Archetype.get_unique(session, archetype, compel=True)
            params["archetype"] = dbarchetype
            q = q.filter_by(archetype=dbarchetype)
        else:
            # It's highly unlikely that a feature template would work for
            # _any_ archetype, so disallow this case for now. As I can't
            # rule out that such a case will not have some uses in the
            # future, the restriction is here and not in the model.
            raise ArgumentError("Please specify either an archetype or "
                                "a personality when binding a feature.")

        if model:
            dbmodel = Model.get_unique(session,
                                       name=model,
                                       vendor=vendor,
                                       compel=True)

            if dbmodel.machine_type == "nic":
                feature_type = "interface"
            else:
                feature_type = "hardware"

            params["model"] = dbmodel

        if dbarchetype and not dbarchetype.is_compileable:
            raise UnimplementedError("Binding features to non-compilable "
                                     "archetypes is not implemented.")

        if not feature_type:  # pragma: no cover
            raise InternalError("Feature type is not known.")

        dbfeature = Feature.get_unique(session,
                                       name=feature,
                                       feature_type=feature_type,
                                       compel=True)

        cnt = q.count()
        # TODO: should the limit be configurable?
        if justification_required and cnt > 0:
            if not justification:
                raise AuthorizationException(
                    "Changing feature bindings for more "
                    "than just a personality requires --justification.")
            validate_justification(user, justification)

        self.do_link(session, logger, dbfeature, params)
        session.flush()

        idx = 0
        written = 0
        successful = []
        failed = []

        with CompileKey(logger=logger):
            personalities = q.all()

            for personality in personalities:
                idx += 1
                if idx % 1000 == 0:  # pragma: no cover
                    logger.client_info("Processing personality %d of %d..." %
                                       (idx, cnt))

                if not personality.archetype.is_compileable:  # pragma: no cover
                    continue

                try:
                    plenary_personality = PlenaryPersonality(personality)
                    written += plenary_personality.write(locked=True)
                    successful.append(plenary_personality)
                except IncompleteError:
                    pass
                except Exception, err:  # pragma: no cover
                    failed.append("{0} failed: {1}".format(personality, err))

            if failed:  # pragma: no cover
                for plenary in successful:
                    plenary.restore_stash()
                raise PartialError([], failed)
示例#42
0
class CommandReconfigureList(BrokerCommand):

    required_parameters = ["list"]

    def render(self, session, logger, list, archetype, personality,
               buildstatus, osname, osversion, **arguments):
        check_hostlist_size(self.command, self.config, list)
        dbhosts = hostlist_to_hosts(session, list)

        self.reconfigure_list(session, logger, dbhosts, archetype, personality,
                              buildstatus, osname, osversion, **arguments)

    def reconfigure_list(self, session, logger, dbhosts, archetype,
                         personality, buildstatus, osname, osversion,
                         **arguments):
        failed = []
        # Check all the parameters up front.
        # Some of these could be more intelligent about defaults
        # (either by checking for unique entries or relying on the list)
        # - starting simple.
        if archetype:
            dbarchetype = Archetype.get_unique(session, archetype, compel=True)
            if dbarchetype.cluster_type is not None:
                raise ArgumentError("Archetype %s is a cluster archetype" %
                                    dbarchetype.name)
            # TODO: Once OS is a first class object this block needs
            # to check that either OS is also being reset or that the
            # OS is valid for the new archetype.
        else:
            dbarchetype = None
        if personality:
            dbpersonality = Personality.get_unique(session, name=personality,
                                                   archetype=dbarchetype,
                                                   compel=True)
        if osname and not osversion:
            raise ArgumentError("Please specify --osversion for OS %s." %
                                osname)
        if osversion:
            if not osname:
                raise ArgumentError("Please specify --osname to use with "
                                    "OS version %s." % osversion)
            # Linux model names are the same under aurora and aquilon, so
            # allowing to omit --archetype would not be useful
            if not archetype:
                raise ArgumentError("Please specify --archetype for OS "
                                    "%s, version %s." % (osname, osversion))
            dbos = OperatingSystem.get_unique(session, name=osname,
                                              version=osversion,
                                              archetype=dbarchetype,
                                              compel=True)
        else:
            dbos = None

        if buildstatus:
            dbstatus = HostLifecycle.get_unique(session, buildstatus,
                                                compel=True)

        # Take a shortcut if there's nothing to do, but only after all the other
        # parameters have been checked
        if not dbhosts:
            return

        personalities = {}
        branches = {}
        authors = {}
        # Do any final cross-list or dependency checks before entering
        # the Chooser loop.
        for dbhost in dbhosts:
            if dbhost.branch in branches:
                branches[dbhost.branch].append(dbhost)
            else:
                branches[dbhost.branch] = [dbhost]
            if dbhost.sandbox_author in authors:
                authors[dbhost.sandbox_author].append(dbhost)
            else:
                authors[dbhost.sandbox_author] = [dbhost]

            if dbos and not dbarchetype and dbhost.archetype != dbos.archetype:
                failed.append("{0}: Cannot change operating system because it "
                              "needs {1:l} instead of "
                              "{2:l}.".format(dbhost.fqdn, dbhost.archetype,
                                              dbos.archetype))
            if dbarchetype and not dbos and \
               dbhost.operating_system.archetype != dbarchetype:
                failed.append("{0}: Cannot change archetype because {1:l} needs "
                              "{2:l}.".format(dbhost.fqdn, dbhost.operating_system,
                                              dbhost.operating_system.archetype))
            if (personality and dbhost.cluster and
                len(dbhost.cluster.allowed_personalities) > 0 and
                dbpersonality not in dbhost.cluster.allowed_personalities):
                allowed = ["%s/%s" % (p.archetype.name, p.name) for p in
                           dbhost.cluster.allowed_personalities]
                failed.append("{0}: The {1:l} is not allowed by {2}.  "
                              "Specify one of {3}.".format(
                                  dbhost.fqdn, dbpersonality,
                                  dbhost.cluster, allowed))
            if personality:
                personalities[dbhost.fqdn] = dbpersonality
            elif archetype:
                personalities[dbhost.fqdn] = Personality.get_unique(session,
                        name=dbhost.personality.name, archetype=dbarchetype)
                if not personalities[dbhost.fqdn]:
                    failed.append("%s: No personality %s found for archetype "
                                  "%s." %
                                  (dbhost.fqdn, dbhost.personality.name,
                                   dbarchetype.name))

        if failed:
            raise ArgumentError("Cannot modify the following hosts:\n%s" %
                                "\n".join(failed))
        if len(branches) > 1:
            keys = branches.keys()
            branch_sort = lambda x, y: cmp(len(branches[x]), len(branches[y]))
            keys.sort(cmp=branch_sort)
            stats = ["{0:d} hosts in {1:l}".format(len(branches[branch]), branch)
                     for branch in keys]
            raise ArgumentError("All hosts must be in the same domain or "
                                "sandbox:\n%s" % "\n".join(stats))
        dbbranch = branches.keys()[0]
        if len(authors) > 1:
            keys = authors.keys()
            author_sort = lambda x, y: cmp(len(authors[x]), len(authors[y]))
            keys.sort(cmp=author_sort)
            stats = ["%s hosts with sandbox author %s" %
                     (len(authors[author]), author.name) for author in keys]
            raise ArgumentError("All hosts must be managed by the same "
                                "sandbox author:\n%s" % "\n".join(stats))
        dbauthor = authors.keys()[0]

        failed = []
        choosers = []
        for dbhost in dbhosts:
            if dbhost.fqdn in personalities:
                dbhost.personality = personalities[dbhost.fqdn]
                session.add(dbhost)
            if osversion:
                dbhost.operating_system = dbos
                session.add(dbhost)
            if buildstatus:
                dbhost.status.transition(dbhost, dbstatus)
                session.add(dbhost)
        session.flush()

        logger.client_info("Verifying service bindings.")
        for dbhost in dbhosts:
            if dbhost.archetype.is_compileable:
                if arguments.get("keepbindings", None):
                    chooser = Chooser(dbhost, logger=logger,
                                      required_only=False)
                else:
                    chooser = Chooser(dbhost, logger=logger,
                                      required_only=True)
                choosers.append(chooser)
                try:
                    chooser.set_required()
                except ArgumentError, e:
                    failed.append(str(e))
        if failed:
            raise ArgumentError("The following hosts failed service "
                                "binding:\n%s" % "\n".join(failed))

        session.flush()
        logger.info("reconfigure_hostlist processing: %s" %
                    ",".join([str(dbhost.fqdn) for dbhost in dbhosts]))

        if not choosers:
            return

        # Optimize so that duplicate service plenaries are not re-written
        templates = set()
        for chooser in choosers:
            # chooser.plenaries is a PlenaryCollection - this flattens
            # that top level.
            templates.update(chooser.plenaries.plenaries)

        # Don't bother locking until every possible check before the
        # actual writing and compile is done.  This will allow for fast
        # turnaround on errors (no need to wait for a lock if there's
        # a missing service map entry or something).
        # The lock must be over at least the domain, but could be over
        # all if (for example) service plenaries need to change.
        key = CompileKey.merge([p.get_write_key() for p in templates] +
                               [CompileKey(domain=dbbranch.name,
                                           logger=logger)])
        try:
            lock_queue.acquire(key)
            logger.client_info("Writing %s plenary templates.", len(templates))
            # FIXME: if one of the templates raises IncompleteError (e.g.
            # a host should be in a cluster, but it is not), then we return an
            # InternalError to the client, which is not nice
            for template in templates:
                logger.debug("Writing %s", template)
                template.write(locked=True)
            td = TemplateDomain(dbbranch, dbauthor, logger=logger)
            td.compile(session, locked=True)
        except:
            logger.client_info("Restoring plenary templates.")
            for template in templates:
                logger.debug("Restoring %s", template)
                template.restore_stash()
            # Okay, cleaned up templates, make sure the caller knows
            # we've aborted so that DB can be appropriately rollback'd.
            raise
        finally:
            lock_queue.release(key)

        return
示例#43
0
文件: del_host.py 项目: jrha/aquilon
    def render(self, session, logger, hostname, **arguments):
        # removing the plenary host requires a compile lock, however
        # we want to avoid deadlock by the fact that we're messing
        # with two locks here, so we want to be careful. We grab the
        # plenaryhost early on (in order to get the filenames filled
        # in from the db info before we delete it from the db. We then
        # hold onto those references until we've completed the db
        # cleanup and if all of that is successful, then we delete the
        # plenary file (which doesn't require re-evaluating any stale
        # db information) after we've released the delhost lock.
        delplenary = False

        # Any service bindings that we need to clean up afterwards
        bindings = PlenaryCollection(logger=logger)
        resources = PlenaryCollection(logger=logger)
        with DeleteKey("system", logger=logger) as key:
            # Check dependencies, translate into user-friendly message
            dbhost = hostname_to_host(session, hostname)
            host_plenary = Plenary.get_plenary(dbhost, logger=logger)
            domain = dbhost.branch.name
            deps = get_host_dependencies(session, dbhost)
            if (len(deps) != 0):
                deptext = "\n".join(["  %s" % d for d in deps])
                raise ArgumentError("Cannot delete host %s due to the "
                                    "following dependencies:\n%s." %
                                    (hostname, deptext))

            archetype = dbhost.archetype.name
            dbmachine = dbhost.machine
            oldinfo = DSDBRunner.snapshot_hw(dbmachine)

            ip = dbmachine.primary_ip
            fqdn = dbmachine.fqdn

            for si in dbhost.services_used:
                plenary = PlenaryServiceInstanceServer(si)
                bindings.append(plenary)
                logger.info("Before deleting host '%s', removing binding '%s'"
                            % (fqdn, si.cfg_path))

            del dbhost.services_used[:]

            if dbhost.resholder:
                for res in dbhost.resholder.resources:
                    resources.append(Plenary.get_plenary(res))

            # In case of Zebra, the IP may be configured on multiple interfaces
            for iface in dbmachine.interfaces:
                if ip in iface.addresses:
                    iface.addresses.remove(ip)

            if dbhost.cluster:
                dbcluster = dbhost.cluster
                dbcluster.hosts.remove(dbhost)
                set_committed_value(dbhost, '_cluster', None)
                dbcluster.validate()

            dbdns_rec = dbmachine.primary_name
            dbmachine.primary_name = None
            dbmachine.host = None
            session.delete(dbhost)
            delete_dns_record(dbdns_rec)
            session.flush()
            delplenary = True

            if dbmachine.vm_container:
                bindings.append(Plenary.get_plenary(dbmachine.vm_container))

            if archetype != 'aurora' and ip is not None:
                dsdb_runner = DSDBRunner(logger=logger)
                dsdb_runner.update_host(dbmachine, oldinfo)
                dsdb_runner.commit_or_rollback("Could not remove host %s from "
                                               "DSDB" % hostname)
            if archetype == 'aurora':
                logger.client_info("WARNING: removing host %s from AQDB and "
                                   "*not* changing DSDB." % hostname)

            # Past the point of no return... commit the transaction so
            # that we can free the delete lock.
            session.commit()

        # Only if we got here with no exceptions do we clean the template
        # Trying to clean up after any errors here is really difficult
        # since the changes to dsdb have already been made.
        if (delplenary):
            key = host_plenary.get_remove_key()
            with CompileKey.merge([key, bindings.get_write_key(),
                                   resources.get_remove_key()]) as key:
                host_plenary.cleanup(domain, locked=True)
                # And we also want to remove the profile itself
                profiles = self.config.get("broker", "profilesdir")
                # Only one of these should exist, but it doesn't hurt
                # to try to clean up both.
                xmlfile = os.path.join(profiles, fqdn + ".xml")
                remove_file(xmlfile, logger=logger)
                xmlgzfile = xmlfile + ".gz"
                remove_file(xmlgzfile, logger=logger)
                # And the cached template created by ant
                remove_file(os.path.join(self.config.get("broker",
                                                         "quattordir"),
                                         "objects", fqdn + TEMPLATE_EXTENSION),
                            logger=logger)
                bindings.write(locked=True)
                resources.remove(locked=True)

            build_index(self.config, session, profiles, logger=logger)

        return
示例#44
0
文件: flush.py 项目: ned21/aquilon
    def render(self, session, logger, services, personalities, machines,
               clusters, hosts, locations, resources, switches, all,
               **arguments):
        success = []
        failed = []
        written = 0

        # Caches for keeping preloaded data pinned in memory, since the SQLA
        # session holds a weak reference only
        resource_by_id = {}
        resholder_by_id = {}
        service_instances = None
        racks = None

        # Object caches that are accessed directly
        disks_by_machine = defaultdict(list)
        interfaces_by_machine = defaultdict(list)
        interfaces_by_id = {}

        if all:
            services = True
            personalities = True
            machines = True
            clusters = True
            hosts = True
            locations = True
            resources = True

        with CompileKey(logger=logger):
            logger.client_info("Loading data.")

            # When flushing clusters/hosts, loading the resource holder is done
            # as the query that loads those objects. But when flushing resources
            # only, we need the holder and the object it belongs to.
            if resources and not clusters:
                q = session.query(ClusterResource)
                # Using joinedload('cluster') would generate an outer join
                q = q.join(Cluster)
                q = q.options(contains_eager('cluster'))
                for resholder in q:
                    resholder_by_id[resholder.id] = resholder
            if resources and not hosts:
                q = session.query(HostResource)
                # Using joinedload('host') would generate an outer join
                q = q.join(Host)
                q = q.options(contains_eager('host'))
                for resholder in q:
                    resholder_by_id[resholder.id] = resholder

            if hosts or clusters or resources:
                # Load the most common resource types. Using
                # with_polymorphic('*') on Resource would generate a huge query,
                # so do something more targeted. More resource subclasses may be
                # added later if they become common.
                preload_classes = {
                    Filesystem: [],
                    RebootSchedule: [],
                    VirtualMachine: [
                        joinedload('machine'),
                        joinedload('machine.primary_name'),
                        joinedload('machine.primary_name.fqdn')
                    ],
                    Share: [],
                }

                share_info = cache_storage_data()

                for cls, options in preload_classes.items():
                    q = session.query(cls)

                    # If only hosts or only clusters are needed, don't load
                    # resources of the other kind
                    if hosts and not clusters and not resources:
                        q = q.join(ResourceHolder)
                        q = q.options(contains_eager('holder'))
                        q = q.filter_by(holder_type='host')
                    if clusters and not hosts and not resources:
                        q = q.join(ResourceHolder)
                        q = q.filter_by(holder_type='cluster')
                        q = q.options(contains_eager('holder'))

                    if options:
                        q = q.options(*options)

                    for res in q:
                        resource_by_id[res.id] = res
                        try:
                            res.populate_share_info(share_info)
                        except AttributeError:
                            pass

            if hosts or machines:
                # Polymorphic loading cannot be applied to eager-loaded
                # attributes, so load interfaces manually.
                q = session.query(Interface)
                q = q.with_polymorphic('*')
                q = q.options(lazyload("hardware_entity"))
                for iface in q:
                    interfaces_by_machine[iface.hardware_entity_id].append(
                        iface)
                    interfaces_by_id[iface.id] = iface

                if hosts:
                    # subqueryload() and with_polymorphic() do not play nice
                    # together, so do it by hand
                    q = session.query(AddressAssignment)
                    q = q.options(joinedload("network"),
                                  joinedload("dns_records"))
                    q = q.order_by(AddressAssignment._label)
                    addrs_by_iface = defaultdict(list)
                    for addr in q:
                        addrs_by_iface[addr.interface_id].append(addr)
                    for interface_id, addrs in addrs_by_iface.items():
                        set_committed_value(interfaces_by_id[interface_id],
                                            "assignments", addrs)

                    q = session.query(Interface.id)
                    q = q.filter(~Interface.assignments.any())
                    for id in q.all():
                        set_committed_value(interfaces_by_id[id[0]],
                                            "assignments", None)

            if hosts or services:
                q = session.query(ServiceInstance)
                q = q.options(subqueryload("service"))
                service_instances = q.all()

            if machines or clusters:
                # Most machines are in racks...
                q = session.query(Rack)
                q = q.options(subqueryload("dns_maps"),
                              subqueryload("parents"))
                racks = q.all()

            if locations:
                logger.client_info("Flushing locations.")
                for dbloc in session.query(City).all():
                    try:
                        plenary = Plenary.get_plenary(dbloc, logger=logger)
                        written += plenary.write(locked=True)
                    except Exception, e:
                        failed.append("City %s failed: %s" % dbloc, e)
                        continue

            if services:
                logger.client_info("Flushing services.")
                q = session.query(Service)
                q = q.options(subqueryload("instances"))
                for dbservice in q:
                    try:
                        plenary_info = Plenary.get_plenary(dbservice,
                                                           logger=logger)
                        written += plenary_info.write(locked=True)
                    except Exception, e:
                        failed.append("Service %s failed: %s" %
                                      (dbservice.name, e))
                        continue

                    for dbinst in dbservice.instances:
                        try:
                            plenary_info = Plenary.get_plenary(dbinst,
                                                               logger=logger)
                            written += plenary_info.write(locked=True)
                        except Exception, e:
                            failed.append("Service %s instance %s failed: %s" %
                                          (dbservice.name, dbinst.name, e))
                            continue
示例#45
0
文件: base.py 项目: jrha/aquilon
 def get_remove_key(self):
     keylist = []
     for plen in self.plenaries:
         keylist.append(plen.get_remove_key())
     return CompileKey.merge(keylist)
示例#46
0
文件: base.py 项目: ned21/aquilon
 def get_key(self):
     """Base implementation assumes a full compile lock."""
     return CompileKey(logger=self.logger)
示例#47
0
    def render(self, session, logger, domain, sandbox, cluster, force,
               **arguments):
        (dbbranch, dbauthor) = get_branch_and_author(session,
                                                     logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     compel=True)

        if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage:
            raise ArgumentError(
                "Managing clusters to {0:l} is not allowed.".format(dbbranch))

        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        dbsource = dbcluster.branch
        dbsource_author = dbcluster.sandbox_author
        old_branch = dbcluster.branch.name

        if not force:
            validate_branch_commits(dbsource, dbsource_author, dbbranch,
                                    dbauthor, logger, self.config)

        if dbcluster.metacluster:
            raise ArgumentError(
                "{0.name} is member of metacluster {1.name}, "
                "it must be managed at metacluster level.".format(
                    dbcluster, dbcluster.metacluster))

        old_branch = dbcluster.branch.name
        plenaries = PlenaryCollection(logger=logger)

        # manage at metacluster level
        if dbcluster.cluster_type == 'meta':
            clusters = dbcluster.members

            dbcluster.branch = dbbranch
            dbcluster.sandbox_author = dbauthor
            session.add(dbcluster)
            plenaries.append(Plenary.get_plenary(dbcluster))
        else:
            clusters = [dbcluster]

        for cluster in clusters:
            # manage at cluster level
            # Need to set the new branch *before* creating the plenary objects.
            cluster.branch = dbbranch
            cluster.sandbox_author = dbauthor
            session.add(cluster)
            plenaries.append(Plenary.get_plenary(cluster))
            for dbhost in cluster.hosts:
                dbhost.branch = dbbranch
                dbhost.sandbox_author = dbauthor
                session.add(dbhost)
                plenaries.append(Plenary.get_plenary(dbhost))

        session.flush()

        # We're crossing domains, need to lock everything.
        key = CompileKey(logger=logger)
        try:
            lock_queue.acquire(key)
            plenaries.stash()
            plenaries.cleanup(old_branch, locked=True)
            plenaries.write(locked=True)
        except:
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
示例#48
0
    def render(self, session, logger, cluster, personality,
               max_members, fix_location, down_hosts_threshold,
               maint_threshold, comments,
               # ESX specific options
               switch, memory_capacity, clear_overrides, vm_to_host_ratio,
               **arguments):

        dbcluster = Cluster.get_unique(session, cluster, compel=True)

        if dbcluster.cluster_type == 'meta':
            raise ArgumentError("%s should not be a metacluster."
                                % format(dbcluster))

        cluster_updated = False
        remove_plenaries = PlenaryCollection(logger=logger)
        plenaries = PlenaryCollection(logger=logger)

        (vm_count, host_count) = force_ratio("vm_to_host_ratio",
                                             vm_to_host_ratio)
        if down_hosts_threshold is not None:
            (perc, dht) = Cluster.parse_threshold(down_hosts_threshold)
            dbcluster.down_hosts_threshold = dht
            dbcluster.down_hosts_percent = perc
            cluster_updated = True

        if dbcluster.cluster_type == "esx":
            if vm_count is not None or down_hosts_threshold is not None:
                if vm_count is None:
                    vm_count = dbcluster.vm_count
                    host_count = dbcluster.host_count

                dht = dbcluster.down_hosts_threshold
                perc = dbcluster.down_hosts_percent

                dbcluster.validate(vm_part=vm_count, host_part=host_count,
                                   down_hosts_threshold=dht,
                                   down_hosts_percent=perc)

                dbcluster.vm_count = vm_count
                dbcluster.host_count = host_count
                cluster_updated = True

        if switch is not None:
            if switch:
                # FIXME: Verify that any hosts are on the same network
                dbswitch = Switch.get_unique(session, switch, compel=True)
                plenaries.append(Plenary.get_plenary(dbswitch))
            else:
                dbswitch = None
            dbcluster.switch = dbswitch
            cluster_updated = True

        if memory_capacity is not None:
            dbcluster.memory_capacity = memory_capacity
            dbcluster.validate()
            cluster_updated = True

        if clear_overrides is not None:
            dbcluster.memory_capacity = None
            dbcluster.validate()
            cluster_updated = True

        location_updated = update_cluster_location(session, logger,
                                          dbcluster, fix_location,
                                          plenaries, remove_plenaries,
                                          **arguments)

        if location_updated:
            cluster_updated = True

        if personality:
            archetype = dbcluster.personality.archetype.name
            dbpersonality = Personality.get_unique(session, name=personality,
                                                   archetype=archetype,
                                                   compel=True)
            if not dbpersonality.is_cluster:
                raise ArgumentError("Personality {0} is not a cluster " +
                                    "personality".format(dbpersonality))
            dbcluster.personality = dbpersonality
            cluster_updated = True

        if max_members is not None:
            current_members = len(dbcluster.hosts)
            if max_members < current_members:
                raise ArgumentError("%s has %d hosts bound, which exceeds "
                                    "the requested limit %d." %
                                    (format(dbcluster), current_members,
                                     max_members))
            dbcluster.max_hosts = max_members
            cluster_updated = True

        if comments is not None:
            dbcluster.comments = comments
            cluster_updated = True

        if down_hosts_threshold is not None:
            (dbcluster.down_hosts_percent,
             dbcluster.down_hosts_threshold) = \
                Cluster.parse_threshold(down_hosts_threshold)
            cluster_updated = True

        if maint_threshold is not None:
            (dbcluster.down_maint_percent,
             dbcluster.down_maint_threshold) = \
                Cluster.parse_threshold(maint_threshold)
            cluster_updated = True

        if not cluster_updated:
            return

        session.add(dbcluster)
        session.flush()

        plenaries.append(Plenary.get_plenary(dbcluster))
        key = CompileKey.merge([plenaries.get_write_key(),
                                remove_plenaries.get_remove_key()])
        try:
            lock_queue.acquire(key)
            remove_plenaries.stash()
            plenaries.write(locked=True)
            remove_plenaries.remove(locked=True)
        except:
            remove_plenaries.restore_stash()
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
示例#49
0
文件: del_host.py 项目: piojo/aquilon
    def render(self, session, logger, hostname, **arguments):
        # Check dependencies, translate into user-friendly message
        dbhost = hostname_to_host(session, hostname)

        dbhost.lock_row()

        check_no_provided_service(dbhost)

        # Any service bindings that we need to clean up afterwards
        plenaries = PlenaryCollection(logger=logger)
        remove_plenaries = PlenaryCollection(logger=logger)
        remove_plenaries.append(Plenary.get_plenary(dbhost))

        archetype = dbhost.archetype.name
        dbmachine = dbhost.hardware_entity
        oldinfo = DSDBRunner.snapshot_hw(dbmachine)

        ip = dbmachine.primary_ip

        for si in dbhost.services_used:
            plenaries.append(PlenaryServiceInstanceServer.get_plenary(si))
            logger.info("Before deleting {0:l}, removing binding to {1:l}"
                        .format(dbhost, si))

        del dbhost.services_used[:]

        if dbhost.resholder:
            for res in dbhost.resholder.resources:
                remove_plenaries.append(Plenary.get_plenary(res))

        # In case of Zebra, the IP may be configured on multiple interfaces
        for iface in dbmachine.interfaces:
            if ip in iface.addresses:
                iface.addresses.remove(ip)

        if dbhost.cluster:
            dbcluster = dbhost.cluster
            dbcluster.hosts.remove(dbhost)
            set_committed_value(dbhost, '_cluster', None)
            dbcluster.validate()
            plenaries.append(Plenary.get_plenary(dbcluster))

        dbdns_rec = dbmachine.primary_name
        dbmachine.primary_name = None
        dbmachine.host = None
        session.delete(dbhost)
        delete_dns_record(dbdns_rec)
        session.flush()

        if dbmachine.vm_container:
            plenaries.append(Plenary.get_plenary(dbmachine.vm_container))

        with CompileKey.merge([plenaries.get_key(),
                               remove_plenaries.get_key()]):
            plenaries.stash()
            remove_plenaries.stash()

            try:
                plenaries.write(locked=True)
                remove_plenaries.remove(locked=True, remove_profile=True)

                if archetype != 'aurora' and ip is not None:
                    dsdb_runner = DSDBRunner(logger=logger)
                    dsdb_runner.update_host(dbmachine, oldinfo)
                    dsdb_runner.commit_or_rollback("Could not remove host %s from "
                                                   "DSDB" % hostname)
                if archetype == 'aurora':
                    logger.client_info("WARNING: removing host %s from AQDB and "
                                       "*not* changing DSDB." % hostname)
            except:
                plenaries.restore_stash()
                remove_plenaries.restore_stash()
                raise

        trigger_notifications(self.config, logger, CLIENT_INFO)

        return
示例#50
0
    def render(self, session, logger, hostname, domain, sandbox, force,
               **arguments):
        (dbbranch, dbauthor) = get_branch_and_author(session, logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     compel=True)

        if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage:
            raise ArgumentError("Managing hosts to {0:l} is not allowed."
                                .format(dbbranch))

        dbhost = hostname_to_host(session, hostname)
        dbsource = dbhost.branch
        dbsource_author = dbhost.sandbox_author
        old_branch = dbhost.branch.name

        if dbhost.cluster:
            raise ArgumentError("Cluster nodes must be managed at the "
                                "cluster level; this host is a member of "
                                "{0}.".format(dbhost.cluster))

        if not force:
            validate_branch_commits(dbsource, dbsource_author,
                                    dbbranch, dbauthor, logger, self.config)

        dbhost.branch = dbbranch
        dbhost.sandbox_author = dbauthor
        session.add(dbhost)
        session.flush()
        plenary_host = PlenaryHost(dbhost, logger=logger)

        # We're crossing domains, need to lock everything.
        # XXX: There's a directory per domain.  Do we need subdirectories
        # for different authors for a sandbox?
        key = CompileKey(logger=logger)

        try:
            lock_queue.acquire(key)

            plenary_host.stash()
            plenary_host.cleanup(old_branch, locked=True)

            # Now we recreate the plenary to ensure that the domain is ready
            # to compile, however (esp. if there was no existing template), we
            # have to be aware that there might not be enough information yet
            # with which we can create a template
            try:
                plenary_host.write(locked=True)
            except IncompleteError:
                # This template cannot be written, we leave it alone
                # It would be nice to flag the state in the the host?
                pass
        except:
            # This will not restore the cleaned up files.  That's OK.
            # They will be recreated as needed.
            plenary_host.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
示例#51
0
    def render(self, session, logger, switch, model, rack, type, ip, vendor,
               serial, rename_to, discovered_macs, clear, discover, comments,
               **arguments):
        dbswitch = Switch.get_unique(session, switch, compel=True)

        oldinfo = DSDBRunner.snapshot_hw(dbswitch)

        if discover:
            discover_switch(session, logger, self.config, dbswitch, False)

        if vendor and not model:
            model = dbswitch.model.name
        if model:
            dbmodel = Model.get_unique(session,
                                       name=model,
                                       vendor=vendor,
                                       machine_type='switch',
                                       compel=True)
            dbswitch.model = dbmodel

        dblocation = get_location(session, rack=rack)
        if dblocation:
            dbswitch.location = dblocation

        if serial is not None:
            dbswitch.serial_no = serial

        # FIXME: What do the error messages for an invalid enum (switch_type)
        # look like?
        if type:
            Switch.check_type(type)
            dbswitch.switch_type = type

        if ip:
            update_primary_ip(session, dbswitch, ip)

        if comments is not None:
            dbswitch.comments = comments

        remove_plenary = None
        if rename_to:
            # Handling alias renaming would not be difficult in AQDB, but the
            # DSDB synchronization would be painful, so don't do that for now.
            # In theory we should check all configured IP addresses for aliases,
            # but this is the most common case
            if dbswitch.primary_name and dbswitch.primary_name.fqdn.aliases:
                raise ArgumentError(
                    "The switch has aliases and it cannot be "
                    "renamed. Please remove all aliases first.")
            remove_plenary = Plenary.get_plenary(dbswitch, logger=logger)
            rename_hardware(session, dbswitch, rename_to)

        if clear:
            session.query(ObservedMac).filter_by(switch=dbswitch).delete()

        if discovered_macs:
            now = datetime.now()
            for (macaddr, port) in discovered_macs:
                update_or_create_observed_mac(session, dbswitch, port, macaddr,
                                              now)

        session.flush()

        switch_plenary = Plenary.get_plenary(dbswitch, logger=logger)

        key = switch_plenary.get_write_key()
        if remove_plenary:
            key = CompileKey.merge([key, remove_plenary.get_remove_key()])
        try:
            lock_queue.acquire(key)
            if remove_plenary:
                remove_plenary.stash()
                remove_plenary.remove(locked=True)
            switch_plenary.write(locked=True)

            dsdb_runner = DSDBRunner(logger=logger)
            dsdb_runner.update_host(dbswitch, oldinfo)
            dsdb_runner.commit_or_rollback("Could not update switch in DSDB")
        except:
            if remove_plenary:
                remove_plenary.restore_stash()
            switch_plenary.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
示例#52
0
文件: base.py 项目: piojo/aquilon
 def get_key(self, exclusive=True):
     keylist = [NoLockKey(logger=self.logger)]
     for plen in self.plenaries:
         keylist.append(plen.get_key(exclusive=exclusive))
     return CompileKey.merge(keylist)
示例#53
0
    def render(self, session, logger, domain, sandbox, cluster, force,
               **arguments):
        (dbbranch, dbauthor) = get_branch_and_author(session, logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     compel=True)

        if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage:
            raise ArgumentError("Managing clusters to {0:l} is not allowed."
                                .format(dbbranch))

        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        dbsource = dbcluster.branch
        dbsource_author = dbcluster.sandbox_author

        if not force:
            validate_branch_commits(dbsource, dbsource_author,
                                    dbbranch, dbauthor, logger, self.config)

        if dbcluster.metacluster:
            raise ArgumentError("{0.name} is member of metacluster {1.name}, "
                                "it must be managed at metacluster level.".
                                format(dbcluster, dbcluster.metacluster))

        plenaries = PlenaryCollection(logger=logger)

        # manage at metacluster level
        if isinstance(dbcluster, MetaCluster):
            plenaries.append(Plenary.get_plenary(dbcluster))
            clusters = dbcluster.members

            dbcluster.branch = dbbranch
            dbcluster.sandbox_author = dbauthor
        else:
            clusters = [dbcluster]

        for cluster in clusters:
            plenaries.append(Plenary.get_plenary(cluster))

            cluster.branch = dbbranch
            cluster.sandbox_author = dbauthor

            for dbhost in cluster.hosts:
                plenaries.append(Plenary.get_plenary(dbhost))

                dbhost.branch = dbbranch
                dbhost.sandbox_author = dbauthor

        session.flush()

        # We're crossing domains, need to lock everything.
        with CompileKey.merge([CompileKey(domain=dbsource.name, logger=logger),
                               CompileKey(domain=dbbranch.name, logger=logger)]):
            plenaries.stash()
            try:
                plenaries.write(locked=True)
            except:
                plenaries.restore_stash()
                raise

        return
    def resetadvertisedstatus_list(self, session, logger, dbhosts):
        branches = {}
        authors = {}
        failed = []
        compileable = []
        # Do any cross-list or dependency checks
        for dbhost in dbhosts:
            ## if archetype is compileable only then
            ## validate for branches and domains
            if (dbhost.archetype.is_compileable):
                compileable.append(dbhost.fqdn)
                if dbhost.branch in branches:
                    branches[dbhost.branch].append(dbhost)
                else:
                    branches[dbhost.branch] = [dbhost]
                if dbhost.sandbox_author in authors:
                    authors[dbhost.sandbox_author].append(dbhost)
                else:
                    authors[dbhost.sandbox_author] = [dbhost]

            if dbhost.status.name == 'ready':
                failed.append("{0:l} is in ready status, "
                              "advertised status can be reset only "
                              "when host is in non ready state".format(dbhost))
        if failed:
            raise ArgumentError("Cannot modify the following hosts:\n%s" %
                                "\n".join(failed))
        if len(branches) > 1:
            keys = branches.keys()
            branch_sort = lambda x, y: cmp(len(branches[x]), len(branches[y]))
            keys.sort(cmp=branch_sort)
            stats = ["{0:d} hosts in {1:l}".format(len(branches[branch]),
                                                   branch)
                     for branch in keys]
            raise ArgumentError("All hosts must be in the same domain or "
                                "sandbox:\n%s" % "\n".join(stats))
        if len(authors) > 1:
            keys = authors.keys()
            author_sort = lambda x, y: cmp(len(authors[x]), len(authors[y]))
            keys.sort(cmp=author_sort)
            stats = ["%s hosts with sandbox author %s" %
                     (len(authors[author]), author.name) for author in keys]
            raise ArgumentError("All hosts must be managed by the same "
                                "sandbox author:\n%s" % "\n".join(stats))

        plenaries = PlenaryCollection(logger=logger)
        for dbhost in dbhosts:
            dbhost.advertise_status = False
            session.add(dbhost)
            plenaries.append(PlenaryHost(dbhost, logger=logger))

        session.flush()

        dbbranch = branches.keys()[0]
        dbauthor = authors.keys()[0]
        key = CompileKey.merge([plenaries.get_write_key()])
        try:
            lock_queue.acquire(key)
            plenaries.stash()
            plenaries.write(locked=True)
            td = TemplateDomain(dbbranch, dbauthor, logger=logger)
            td.compile(session, only=compileable, locked=True)
        except:
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
示例#55
0
文件: base.py 项目: ned21/aquilon
 def get_remove_key(self):
     keylist = []
     for plen in self.plenaries:
         keylist.append(plen.get_remove_key())
     return CompileKey.merge(keylist)