Ejemplo n.º 1
0
    def render(self, session, logger, metacluster, max_members,
               fix_location, high_availability, comments, **arguments):
        dbmetacluster = MetaCluster.get_unique(session, metacluster,
                                               compel=True)
        cluster_updated = False

        if max_members is not None:
            current_members = len(dbmetacluster.members)
            if max_members < current_members:
                raise ArgumentError("%s has %d clusters bound, which exceeds "
                                    "the requested limit %d." %
                                    (format(dbmetacluster), current_members,
                                     max_members))
            dbmetacluster.max_clusters = max_members
            cluster_updated = True

        if comments is not None:
            dbmetacluster.comments = comments
            cluster_updated = True

        if high_availability is not None:
            dbmetacluster.high_availability = high_availability
            cluster_updated = True

        # TODO update_cluster_location would update VMs. Metaclusters
        # will contain VMs in Vulcan2 model.
        plenaries = PlenaryCollection(logger=logger)
        remove_plenaries = PlenaryCollection(logger=logger)

        location_updated = update_cluster_location(session, logger,
                                          dbmetacluster,
                                          fix_location,
                                          plenaries, remove_plenaries,
                                          **arguments)

        if location_updated:
            cluster_updated = True

        if not cluster_updated:
            return

        session.add(dbmetacluster)
        session.flush()
        dbmetacluster.validate()

        plenary_info = Plenary.get_plenary(dbmetacluster, logger=logger)
        key = plenary_info.get_write_key()

        try:
            lock_queue.acquire(key)

            plenary_info.write(locked=True)
        except:
            plenary_info.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
Ejemplo n.º 2
0
 def __init__(self, dbservice, logger=LOGGER):
     PlenaryCollection.__init__(self, logger=logger)
     self.dbobj = dbservice
     self.plenaries.append(PlenaryServiceToplevel(dbservice, logger=logger))
     self.plenaries.append(
         PlenaryServiceClientDefault(dbservice, logger=logger))
     self.plenaries.append(
         PlenaryServiceServerDefault(dbservice, logger=logger))
Ejemplo n.º 3
0
 def __init__(self, dbservice, logger=LOGGER):
     PlenaryCollection.__init__(self, logger=logger)
     self.dbobj = dbservice
     self.plenaries.append(PlenaryServiceToplevel(dbservice, logger=logger))
     self.plenaries.append(PlenaryServiceClientDefault(dbservice,
                                                       logger=logger))
     self.plenaries.append(PlenaryServiceServerDefault(dbservice,
                                                       logger=logger))
Ejemplo n.º 4
0
    def __init__(self, dbresource, logger=LOGGER):
        PlenaryCollection.__init__(self, logger=logger)
        self.dbobj = dbresource
        self.real_plenary = PlenaryResource(dbresource)

        self.plenaries.append(self.real_plenary)
        if dbresource.resholder:
            for res in dbresource.resholder.resources:
                self.plenaries.append(PlenaryResource(res))
Ejemplo n.º 5
0
    def __init__(self, dbresource, logger=LOGGER):
        PlenaryCollection.__init__(self, logger=logger)
        self.dbobj = dbresource
        self.real_plenary = PlenaryResource(dbresource)

        self.plenaries.append(self.real_plenary)
        if dbresource.resholder:
            for res in dbresource.resholder.resources:
                self.plenaries.append(PlenaryResource(res))
Ejemplo n.º 6
0
    def render(self, session, logger, service, instance, position, hostname,
               cluster, ip, resourcegroup, service_address, alias, **arguments):
        # Check for invalid combinations. We allow binding as a server:
        # - a host, in which case the primary IP address will be used
        # - an auxiliary IP address of a host
        # - a service address of a host
        # - a service address of a cluster
        if ip:
            if cluster or not hostname:
                raise ArgumentError("Using the --ip option requires --hostname"
                                    "to be specified.")
        if cluster and not service_address:
            raise ArgumentError("Binding a cluster requires --service_address "
                                "to be specified.")

        dbservice = Service.get_unique(session, service, compel=True)
        dbinstance = get_service_instance(session, dbservice, instance)

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbinstance))

        params = lookup_target(session, plenaries, hostname, ip, cluster,
                               resourcegroup, service_address, alias)

        # TODO: someday we should verify that the target really points to the
        # host/cluster specified by the other options
        if "alias" in params and ("host" in params or "cluster" in params):
            logger.client_info("Warning: when using --alias, it is your "
                               "responsibility to ensure it really points to "
                               "the host/cluster you've specified - the broker "
                               "does not verify that.")

        with session.no_autoflush:
            dbsrv = find_server(dbinstance, params)
            if dbsrv:
                raise ArgumentError("The server binding already exists.")

            dbsrv = ServiceInstanceServer(**params)

            # The ordering_list will manage the position for us
            if position is not None:
                dbinstance.servers.insert(position, dbsrv)
            else:
                dbinstance.servers.append(dbsrv)

            if dbsrv.host:
                session.expire(dbsrv.host, ['services_provided'])
            if dbsrv.cluster:
                session.expire(dbsrv.cluster, ['services_provided'])

        session.flush()

        plenaries.write()

        return
Ejemplo n.º 7
0
    def render(self, session, logger, cluster, buildstatus, **arguments):
        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        dbstatus = ClusterLifecycle.get_unique(
            session, buildstatus, compel=True)

        if not dbcluster.status.transition(dbcluster, dbstatus):
            return

        if not dbcluster.personality.archetype.is_compileable:
            return

        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbcluster))

        for dbhost in dbcluster.hosts:
            plenaries.append(Plenary.get_plenary(dbhost))

        # Force a host lock as pan might overwrite the profile...
        key = CompileKey(domain=dbcluster.branch.name, logger=logger)
        try:
            lock_queue.acquire(key)

            plenaries.write(locked=True)
            td = TemplateDomain(
                dbcluster.branch, dbcluster.sandbox_author, logger=logger)
            td.compile(session, plenaries.object_templates, locked=True)
        except:
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)
        return
Ejemplo n.º 8
0
    def render(self, session, logger, service, instance, position, hostname,
               cluster, ip, resourcegroup, service_address, alias, **arguments):
        dbservice = Service.get_unique(session, service, compel=True)

        if instance:
            dbsi = ServiceInstance.get_unique(session, service=dbservice,
                                              name=instance, compel=True)
            dbinstances = [dbsi]
        else:
            # --position for multiple service instances sounds dangerous, so
            # disallow it until a real usecase emerges
            if position:
                raise ArgumentError("The --position option can only be "
                                    "specified for one service instance.")

            q = session.query(ServiceInstance)
            q = q.filter_by(service=dbservice)
            dbinstances = q.all()

        plenaries = PlenaryCollection(logger=logger)

        if position is not None:
            params = None
        else:
            params = lookup_target(session, plenaries, hostname, ip, cluster,
                                   resourcegroup, service_address, alias)

        for dbinstance in dbinstances:
            if position is not None:
                if position < 0 or position >= len(dbinstance.servers):
                    raise ArgumentError("Invalid server position.")
                dbsrv = dbinstance.servers[position]
                if dbsrv.host:
                    plenaries.append(Plenary.get_plenary(dbsrv.host))
                if dbsrv.cluster:
                    plenaries.append(Plenary.get_plenary(dbsrv.cluster))
            else:
                dbsrv = find_server(dbinstance, params)
                if not dbsrv:
                    if instance:
                        raise NotFoundException("No such server binding.")
                    continue

            plenaries.append(Plenary.get_plenary(dbinstance))

            if dbsrv.host:
                session.expire(dbsrv.host, ['services_provided'])
            if dbsrv.cluster:
                session.expire(dbsrv.cluster, ['services_provided'])
            dbinstance.servers.remove(dbsrv)

            if dbinstance.client_count > 0 and not dbinstance.servers:
                logger.warning("Warning: {0} was left without servers, "
                               "but it still has clients.".format(dbinstance))

        session.flush()

        plenaries.write()

        return
Ejemplo n.º 9
0
    def render(self, session, logger, service, need_client_list, comments, **arguments):
        Service.get_unique(session, service, preclude=True)
        dbservice = Service(name=service, comments=comments, need_client_list=need_client_list)
        session.add(dbservice)

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbservice))

        session.flush()
        plenaries.write()
        return
Ejemplo n.º 10
0
 def __init__(self, dbhost, logger=LOGGER):
     if not isinstance(dbhost, Host):
         raise InternalError("PlenaryHost called with %s instead of Host" % dbhost.__class__.name)
     PlenaryCollection.__init__(self, logger=logger)
     self.dbobj = dbhost
     self.config = Config()
     if self.config.getboolean("broker", "namespaced_host_profiles"):
         self.plenaries.append(PlenaryNamespacedHost(dbhost))
     if self.config.getboolean("broker", "flat_host_profiles"):
         self.plenaries.append(PlenaryToplevelHost(dbhost))
     self.plenaries.append(PlenaryHostData(dbhost))
Ejemplo n.º 11
0
    def render(self, session, logger, switch, **arguments):
        dbswitch = Switch.get_unique(session, switch, compel=True)

        # Check and complain if the switch has any other addresses than its
        # primary address
        addrs = []
        for addr in dbswitch.all_addresses():
            if addr.ip == dbswitch.primary_ip:
                continue
            addrs.append(str(addr.ip))
        if addrs:
            raise ArgumentError("{0} still provides the following addresses, "
                                "delete them first: {1}.".format
                                (dbswitch, ", ".join(addrs)))

        dbdns_rec = dbswitch.primary_name
        ip = dbswitch.primary_ip
        old_fqdn = str(dbswitch.primary_name.fqdn)
        old_comments = dbswitch.comments
        session.delete(dbswitch)
        if dbdns_rec:
            delete_dns_record(dbdns_rec)

        session.flush()

        # Any switch ports hanging off this switch should be deleted with
        # the cascade delete of the switch.

        switch_plenary = Plenary.get_plenary(dbswitch, logger=logger)

        # clusters connected to this switch
        plenaries = PlenaryCollection(logger=logger)

        for dbcluster in dbswitch.esx_clusters:
            plenaries.append(Plenary.get_plenary(dbcluster))

        key = CompileKey.merge([switch_plenary.get_remove_key(),
                                plenaries.get_write_key()])

        try:
            lock_queue.acquire(key)
            switch_plenary.stash()
            plenaries.write(locked=True)
            switch_plenary.remove(locked=True)

            if ip:
                dsdb_runner = DSDBRunner(logger=logger)
                # FIXME: restore interface name/MAC on rollback
                dsdb_runner.delete_host_details(old_fqdn, ip, comments=old_comments)
                dsdb_runner.commit_or_rollback("Could not remove switch from DSDB")
            return

        except:
            plenaries.restore_stash()
            switch_plenary.restore_stash()
            raise
        finally:
            lock_queue.release(key)
Ejemplo n.º 12
0
 def __init__(self, dbhost, logger=LOGGER):
     if not isinstance(dbhost, Host):
         raise InternalError("PlenaryHost called with %s instead of Host" %
                             dbhost.__class__.name)
     PlenaryCollection.__init__(self, logger=logger)
     self.dbobj = dbhost
     self.config = Config()
     if self.config.getboolean("broker", "namespaced_host_profiles"):
         self.plenaries.append(PlenaryNamespacedHost(dbhost))
     if self.config.getboolean("broker", "flat_host_profiles"):
         self.plenaries.append(PlenaryToplevelHost(dbhost))
     self.plenaries.append(PlenaryHostData(dbhost))
Ejemplo n.º 13
0
    def render(self, session, logger, service, instance, comments,
               **arguments):
        dbservice = session.query(Service).filter_by(name=service).first()
        if dbservice and instance is None:
            raise ArgumentError("Service %s already exists." % dbservice.name)
        if not dbservice:
            # "add_service --service foo --comments blah" should add the comments
            # to Service,
            # "add_service --service foo --instance bar --comments blah" should
            # add the comments to ServiceInstance
            if instance:
                srvcomments = None
            else:
                srvcomments = comments
            dbservice = Service(name=service, comments=srvcomments)
            session.add(dbservice)

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbservice))

        if instance:
            ServiceInstance.get_unique(session, service=dbservice,
                                       name=instance, preclude=True)
            dbsi = ServiceInstance(service=dbservice, name=instance,
                                   comments=comments)
            session.add(dbsi)
            plenaries.append(Plenary.get_plenary(dbsi))

        session.flush()
        plenaries.write()
        return
Ejemplo n.º 14
0
    def render(self, session, logger, hostname, service, instance,
               **arguments):
        dbhost = hostname_to_host(session, hostname)
        dbservice = Service.get_unique(session, service, compel=True)
        msg = "Service %s" % service
        if instance:
            dbinstances = [get_service_instance(session, dbservice, instance)]
            msg = "Service %s, instance %s" % (service, instance)
        else:
            q = session.query(ServiceInstance)
            q = q.filter_by(service=dbservice)
            q = q.join('servers')
            q = q.filter_by(host=dbhost)
            dbinstances = q.all()
        for dbinstance in dbinstances:
            if dbhost in dbinstance.server_hosts:
                if (dbinstance.client_count > 0
                        and len(dbinstance.server_hosts) <= 1):
                    logger.warning("WARNING: Server %s, is the last server "
                                   "bound to %s which still has clients" %
                                   (hostname, msg))

                dbinstance.server_hosts.remove(dbhost)
                session.expire(dbhost, ['_services_provided'])
        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbhost))
        for dbinstance in dbinstances:
            plenaries.append(Plenary.get_plenary(dbinstance))
        plenaries.write()

        return
Ejemplo n.º 15
0
class CommandRefreshWindowsHosts(BrokerCommand):

    required_parameters = []

    def render(self, session, logger, dryrun, **arguments):
        containers = set()
        partial_error = None
        with SyncKey(data="windows", logger=logger):
            try:
                self.refresh_windows_hosts(session, logger, containers)
                if dryrun:
                    session.rollback()
                    return
                session.commit()
            except PartialError, e:
                if dryrun:
                    raise
                partial_error = e
                # All errors were caught before hitting the session, so
                # keep going with whatever was successful.
                session.commit()

        if containers:
            plenaries = PlenaryCollection(logger=logger)
            for container in containers:
                plenaries.append(Plenary.get_plenary(container))
            plenaries.write()
        if partial_error:
            raise partial_error
        return
Ejemplo n.º 16
0
    def render(self, session, logger, service, instance, comments,
               **arguments):
        dbservice = Service.get_unique(session, service, compel=True)
        ServiceInstance.get_unique(session, service=dbservice, name=instance,
                                   preclude=True)

        dbsi = ServiceInstance(service=dbservice, name=instance,
                               comments=comments)
        session.add(dbsi)

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbsi))

        session.flush()
        plenaries.write()
        return
Ejemplo n.º 17
0
    def render(self, session, logger, list, domain, sandbox, force,
               **arguments):
        dbbranch, dbauthor = get_branch_and_author(session, logger,
                                                   domain=domain,
                                                   sandbox=sandbox, compel=True)

        if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage:
            raise ArgumentError("Managing hosts to {0:l} is not allowed."
                                .format(dbbranch))
        check_hostlist_size(self.command, self.config, list)

        dbhosts = hostlist_to_hosts(session, list)

        failed = []

        dbsource, dbsource_author = validate_branch_author(dbhosts)
        for dbhost in dbhosts:
            # check if any host in the list is a cluster node
            if dbhost.cluster:
                failed.append("Cluster nodes must be managed at the "
                              "cluster level; {0} is a member of {1:l}."
                              .format(dbhost.fqdn, dbhost.cluster))

        if failed:
            raise ArgumentError("Cannot modify the following hosts:\n%s" %
                                "\n".join(failed))

        if not force:
            validate_branch_commits(dbsource, dbsource_author,
                                    dbbranch, dbauthor, logger, self.config)

        plenaries = PlenaryCollection(logger=logger)
        for dbhost in dbhosts:
            plenaries.append(Plenary.get_plenary(dbhost))

            dbhost.branch = dbbranch
            dbhost.sandbox_author = dbauthor

        session.flush()

        # We're crossing domains, need to lock everything.
        with CompileKey.merge([CompileKey(domain=dbsource.name, logger=logger),
                               CompileKey(domain=dbbranch.name, logger=logger)]):
            plenaries.stash()
            try:
                plenaries.write(locked=True)
            except:
                plenaries.restore_stash()
                raise

        return
Ejemplo n.º 18
0
    def render(self, session, logger, hostname, service, instance, **arguments):
        dbhost = hostname_to_host(session, hostname)
        dbservice = Service.get_unique(session, service, compel=True)
        msg = "Service %s" % service
        if instance:
            dbinstances = [get_service_instance(session, dbservice, instance)]
            msg = "Service %s, instance %s" % (service, instance)
        else:
            q = session.query(ServiceInstance)
            q = q.filter_by(service=dbservice)
            q = q.join('servers')
            q = q.filter_by(host=dbhost)
            dbinstances = q.all()
        for dbinstance in dbinstances:
            if dbhost in dbinstance.server_hosts:
                if (dbinstance.client_count > 0 and
                    len(dbinstance.server_hosts) <= 1):
                    logger.warning("WARNING: Server %s, is the last server "
                                   "bound to %s which still has clients" %
                                   (hostname, msg))

                dbinstance.server_hosts.remove(dbhost)
                session.expire(dbhost, ['_services_provided'])
        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbhost))
        for dbinstance in dbinstances:
            plenaries.append(Plenary.get_plenary(dbinstance))
        plenaries.write()

        return
Ejemplo n.º 19
0
    def render(self, session, logger, network_device, **arguments):
        dbnetdev = NetworkDevice.get_unique(session, network_device, compel=True)

        # Check and complain if the network device has any other addresses than its
        # primary address
        addrs = []
        for addr in dbnetdev.all_addresses():
            if addr.ip == dbnetdev.primary_ip:
                continue
            addrs.append(str(addr.ip))
        if addrs:
            raise ArgumentError("{0} still provides the following addresses, "
                                "delete them first: {1}.".format
                                (dbnetdev, ", ".join(addrs)))

        dbdns_rec = dbnetdev.primary_name
        ip = dbnetdev.primary_ip
        old_fqdn = str(dbnetdev.primary_name.fqdn)
        old_comments = dbnetdev.comments
        session.delete(dbnetdev)
        if dbdns_rec:
            delete_dns_record(dbdns_rec)

        session.flush()

        # Any network device ports hanging off this network device should be deleted with
        # the cascade delete of the network device.

        netdev_plenary = Plenary.get_plenary(dbnetdev, logger=logger)

        # clusters connected to this network device
        plenaries = PlenaryCollection(logger=logger)

        for dbcluster in dbnetdev.esx_clusters:
            plenaries.append(Plenary.get_plenary(dbcluster))

        with CompileKey.merge([netdev_plenary.get_key(), plenaries.get_key()]):
            netdev_plenary.stash()
            try:
                plenaries.write(locked=True)
                netdev_plenary.remove(locked=True)

                if ip:
                    dsdb_runner = DSDBRunner(logger=logger)
                    # FIXME: restore interface name/MAC on rollback
                    dsdb_runner.delete_host_details(old_fqdn, ip,
                                                    comments=old_comments)
                    dsdb_runner.commit_or_rollback("Could not remove network device "
                                                   "from DSDB")
            except:
                plenaries.restore_stash()
                netdev_plenary.restore_stash()
                raise
        return
Ejemplo n.º 20
0
    def render(self, session, logger, city, timezone, campus,
               default_dns_domain, comments, **arguments):
        dbcity = get_location(session, city=city)

        # Updating machine templates is expensive, so only do that if needed
        update_machines = False

        if timezone is not None:
            dbcity.timezone = timezone
        if comments is not None:
            dbcity.comments = comments
        if default_dns_domain is not None:
            if default_dns_domain:
                dbdns_domain = DnsDomain.get_unique(session, default_dns_domain,
                                                    compel=True)
                dbcity.default_dns_domain = dbdns_domain
            else:
                dbcity.default_dns_domain = None

        prev_campus = None
        dsdb_runner = None
        dsdb_runner = DSDBRunner(logger=logger)
        if campus is not None:
            dbcampus = get_location(session, campus=campus)
            # This one would change the template's locations hence forbidden
            if dbcampus.hub != dbcity.hub:
                # Doing this both to reduce user error and to limit
                # testing required.
                raise ArgumentError("Cannot change campus.  {0} is in {1:l}, "
                                    "while {2:l} is in {3:l}.".format(
                                        dbcampus, dbcampus.hub,
                                        dbcity, dbcity.hub))

            if dbcity.campus:
                prev_campus = dbcity.campus
            dbcity.update_parent(parent=dbcampus)
            update_machines = True

        session.flush()

        if campus is not None:
            if prev_campus:
                prev_name = prev_campus.name
            else:
                prev_name = None
            dsdb_runner.update_city(city, dbcampus.name, prev_name)

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbcity))

        if update_machines:
            q = session.query(Machine)
            q = q.filter(Machine.location_id.in_(dbcity.offspring_ids()))
            logger.client_info("Updating %d machines..." % q.count())
            for dbmachine in q:
                plenaries.append(Plenary.get_plenary(dbmachine))

        count = plenaries.write()
        dsdb_runner.commit_or_rollback()
        logger.client_info("Flushed %d templates." % count)
Ejemplo n.º 21
0
    def render(self, session, logger, rack, row, column, room, building, bunker,
               fullname, default_dns_domain, comments, **arguments):
        dbrack = get_location(session, rack=rack)
        if row is not None:
            dbrack.rack_row = row
        if column is not None:
            dbrack.rack_column = column
        if fullname is not None:
            dbrack.fullname = fullname
        if comments is not None:
            dbrack.comments = comments
        if default_dns_domain is not None:
            if default_dns_domain:
                dbdns_domain = DnsDomain.get_unique(session, default_dns_domain,
                                                    compel=True)
                dbrack.default_dns_domain = dbdns_domain
            else:
                dbrack.default_dns_domain = None
        if bunker or room or building:
            dbparent = get_location(session, bunker=bunker, room=room,
                                    building=building)
            # This one would change the template's locations hence forbidden
            if dbparent.building != dbrack.building:
                # Doing this both to reduce user error and to limit
                # testing required.
                raise ArgumentError("Cannot change buildings.  {0} is in {1} "
                                    "while {2} is in {3}.".format(
                                        dbparent, dbparent.building,
                                        dbrack, dbrack.building))
            dbrack.update_parent(parent=dbparent)

        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        q = session.query(Machine)
        q = q.filter(Machine.location_id.in_(dbrack.offspring_ids()))
        for dbmachine in q:
            plenaries.append(Plenary.get_plenary(dbmachine))
        plenaries.write()
Ejemplo n.º 22
0
    def render(self, session, logger, metacluster, personality, max_members,
               fix_location, high_availability, comments, **arguments):
        dbmetacluster = MetaCluster.get_unique(session, metacluster,
                                               compel=True)

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbmetacluster))

        if personality:
            archetype = dbmetacluster.personality.archetype.name
            dbpersonality = Personality.get_unique(session, name=personality,
                                                   archetype=archetype,
                                                   compel=True)
            if not dbpersonality.is_cluster:
                raise ArgumentError("Personality {0} is not a cluster " +
                                    "personality".format(dbpersonality))
            dbmetacluster.personality = dbpersonality

        if max_members is not None:
            dbmetacluster.max_clusters = max_members

        if comments is not None:
            dbmetacluster.comments = comments

        if high_availability is not None:
            dbmetacluster.high_availability = high_availability

        # TODO update_cluster_location would update VMs. Metaclusters
        # will contain VMs in Vulcan2 model.
        update_cluster_location(session, logger, dbmetacluster, fix_location,
                                plenaries, **arguments)

        session.flush()
        dbmetacluster.validate()

        plenaries.write(locked=False)

        return
Ejemplo n.º 23
0
    def render(self, session, logger, cluster, buildstatus, **arguments):
        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        dbstatus = ClusterLifecycle.get_unique(session,
                                               buildstatus,
                                               compel=True)

        if not dbcluster.status.transition(dbcluster, dbstatus):
            return

        if not dbcluster.personality.archetype.is_compileable:
            return

        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbcluster))

        for dbhost in dbcluster.hosts:
            plenaries.append(Plenary.get_plenary(dbhost))

        # Force a host lock as pan might overwrite the profile...
        key = CompileKey(domain=dbcluster.branch.name, logger=logger)
        try:
            lock_queue.acquire(key)

            plenaries.write(locked=True)
            td = TemplateDomain(dbcluster.branch,
                                dbcluster.sandbox_author,
                                logger=logger)
            td.compile(session, plenaries.object_templates, locked=True)
        except:
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)
        return
Ejemplo n.º 24
0
    def render(self, session, logger, target, hostname, list, personality,
               archetype, **arguments):

        target_type = "personality" if personality else "host"

        if hostname:
            objs = [hostname_to_host(session, hostname)]
        elif list:
            check_hostlist_size(self.command, self.config, list)
            objs = hostlist_to_hosts(session, list)
        elif personality:
            objs = [Personality.get_unique(session, name=personality,
                                           archetype=archetype, compel=True)]

        plenaries = PlenaryCollection(logger=logger)
        for obj in objs:
            # INFO: Fails for archetypes other than 'aquilon', 'vmhost'
            valid_targets = self.config.get("archetype_" + obj.archetype.name,
                                            target_type + "_grn_targets")

            if target not in [s.strip() for s in valid_targets.split(",")]:
                raise ArgumentError("Invalid %s target %s for archetype %s, please "
                                    "choose from %s" % (target_type, target,
                                                        obj.archetype.name,
                                                        valid_targets))

            for grn_rec in obj._grns[:]:
                if target == grn_rec.target:
                    obj._grns.remove(grn_rec)
            plenaries.append(Plenary.get_plenary(obj))

        session.flush()

        plenaries.write()

        return
Ejemplo n.º 25
0
    def render(self, session, logger, hostname, service, **arguments):
        dbhost = hostname_to_host(session, hostname)
        for srv in (dbhost.archetype.services + dbhost.personality.services):
            if srv.name == service:
                raise ArgumentError("Cannot unbind a required service. "
                                    "Perhaps you want to rebind?")

        dbservice = Service.get_unique(session, service, compel=True)
        si = get_host_bound_service(dbhost, dbservice)
        if si:
            logger.info("Removing client binding")
            dbhost.services_used.remove(si)
            session.flush()

            plenaries = PlenaryCollection(logger=logger)
            plenaries.append(PlenaryHost(dbhost, logger=logger))
            plenaries.append(PlenaryServiceInstanceServer(si, logger=logger))
            plenaries.write()

        return
Ejemplo n.º 26
0
    def render(self, session, logger, service, max_clients, default,
               **arguments):
        dbservice = Service.get_unique(session, name=service, compel=True)
        if default:
            dbservice.max_clients = None
        elif max_clients is not None:
            dbservice.max_clients = max_clients
        else:
            raise ArgumentError("Missing --max_clients or --default argument "
                                "to update service %s." % dbservice.name)
        session.add(dbservice)
        session.flush()

        plenaries = PlenaryCollection()
        plenaries.append(Plenary.get_plenary(dbservice))
        for dbinstance in dbservice.instances:
            plenaries.append(Plenary.get_plenary(dbinstance))
        plenaries.write()

        return
Ejemplo n.º 27
0
def del_cluster(session, logger, dbcluster, config):
    cluster = str(dbcluster.name)

    if hasattr(dbcluster, 'members') and dbcluster.members:
        raise ArgumentError(
            "%s is still in use by clusters: %s." %
            (format(dbcluster), ", ".join([c.name
                                           for c in dbcluster.members])))
    elif dbcluster.hosts:
        hosts = ", ".join([h.fqdn for h in dbcluster.hosts])
        raise ArgumentError("%s is still in use by hosts: %s." %
                            (format(dbcluster), hosts))
    cluster_plenary = Plenary.get_plenary(dbcluster, logger=logger)
    resources = PlenaryCollection(logger=logger)
    if dbcluster.resholder:
        for res in dbcluster.resholder.resources:
            resources.append(Plenary.get_plenary(res))
    domain = dbcluster.branch.name
    session.delete(dbcluster)

    session.flush()

    key = cluster_plenary.get_remove_key()
    with CompileKey.merge([key, resources.get_remove_key()]):
        cluster_plenary.cleanup(domain, locked=True)
        # And we also want to remove the profile itself
        profiles = config.get("broker", "profilesdir")
        # Only one of these should exist, but it doesn't hurt
        # to try to clean up both.
        xmlfile = os.path.join(profiles, "clusters", cluster + ".xml")
        remove_file(xmlfile, logger=logger)
        xmlgzfile = xmlfile + ".gz"
        remove_file(xmlgzfile, logger=logger)
        # And the cached template created by ant
        remove_file(os.path.join(config.get("broker", "quattordir"), "objects",
                                 "clusters", cluster + TEMPLATE_EXTENSION),
                    logger=logger)
        resources.remove(locked=True)

    build_index(config, session, profiles, logger=logger)

    return
Ejemplo n.º 28
0
    def render(self, session, logger, service, max_clients, default,
               **arguments):
        dbservice = Service.get_unique(session, name=service, compel=True)
        if default:
            dbservice.max_clients = None
        elif max_clients is not None:
            dbservice.max_clients = max_clients
        else:
            raise ArgumentError("Missing --max_clients or --default argument "
                                "to update service %s." % dbservice.name)
        session.add(dbservice)
        session.flush()

        plenaries = PlenaryCollection()
        plenaries.append(Plenary.get_plenary(dbservice))
        for dbinstance in dbservice.instances:
            plenaries.append(Plenary.get_plenary(dbinstance))
        plenaries.write()

        return
Ejemplo n.º 29
0
def del_cluster(session, logger, dbcluster, config):
    cluster = str(dbcluster.name)

    if hasattr(dbcluster, 'members') and dbcluster.members:
        raise ArgumentError("%s is still in use by clusters: %s." %
                            (format(dbcluster),
                             ", ".join([c.name for c in dbcluster.members])))
    elif dbcluster.hosts:
        hosts = ", ".join([h.fqdn for h in  dbcluster.hosts])
        raise ArgumentError("%s is still in use by hosts: %s." %
                            (format(dbcluster), hosts))
    cluster_plenary = Plenary.get_plenary(dbcluster, logger=logger)
    resources = PlenaryCollection(logger=logger)
    if dbcluster.resholder:
        for res in dbcluster.resholder.resources:
            resources.append(Plenary.get_plenary(res))
    domain = dbcluster.branch.name
    session.delete(dbcluster)

    session.flush()

    key = cluster_plenary.get_remove_key()
    with CompileKey.merge([key, resources.get_remove_key()]):
        cluster_plenary.cleanup(domain, locked=True)
        # And we also want to remove the profile itself
        profiles = config.get("broker", "profilesdir")
        # Only one of these should exist, but it doesn't hurt
        # to try to clean up both.
        xmlfile = os.path.join(profiles, "clusters", cluster + ".xml")
        remove_file(xmlfile, logger=logger)
        xmlgzfile = xmlfile + ".gz"
        remove_file(xmlgzfile, logger=logger)
        # And the cached template created by ant
        remove_file(os.path.join(config.get("broker",
                                                 "quattordir"),
                                 "objects", "clusters",
                                 cluster + TEMPLATE_EXTENSION),
                    logger=logger)
        resources.remove(locked=True)

    build_index(config, session, profiles, logger=logger)

    return
Ejemplo n.º 30
0
    def render(self, session, logger, machine, disk, controller, size, all,
               **arguments):
        dbmachine = Machine.get_unique(session, machine, compel=True)
        q = session.query(Disk).filter_by(machine=dbmachine)
        if disk:
            q = q.filter_by(device_name=disk)
        if controller:
            if controller not in controller_types:
                raise ArgumentError("%s is not a valid controller type, use "
                                    "one of: %s." % (controller,
                                                     ", ".join(controller_types)
                                                     ))
            q = q.filter_by(controller_type=controller)
        if size is not None:
            q = q.filter_by(capacity=size)
        results = q.all()

        if len(results) == 0:
            raise NotFoundException("No disks found.")
        elif len(results) > 1 and not all:
            raise ArgumentError("More than one matching disks found.  "
                                "Use --all to delete them all.")
        for result in results:
            session.delete(result)

        session.flush()
        session.expire(dbmachine, ['disks'])

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbmachine))
        dbcontainer = dbmachine.vm_container
        if dbcontainer:
            plenaries.append(Plenary.get_plenary(dbcontainer, logger=logger))

        plenaries.write()

        return
Ejemplo n.º 31
0
    def render(self, session, logger, rack, row, column, room, building,
               bunker, fullname, default_dns_domain, comments, **arguments):
        dbrack = get_location(session, rack=rack)
        if row is not None:
            dbrack.rack_row = row
        if column is not None:
            dbrack.rack_column = column
        if fullname is not None:
            dbrack.fullname = fullname
        if comments is not None:
            dbrack.comments = comments
        if default_dns_domain is not None:
            if default_dns_domain:
                dbdns_domain = DnsDomain.get_unique(session,
                                                    default_dns_domain,
                                                    compel=True)
                dbrack.default_dns_domain = dbdns_domain
            else:
                dbrack.default_dns_domain = None
        if bunker or room or building:
            dbparent = get_location(session,
                                    bunker=bunker,
                                    room=room,
                                    building=building)
            # This one would change the template's locations hence forbidden
            if dbparent.building != dbrack.building:
                # Doing this both to reduce user error and to limit
                # testing required.
                raise ArgumentError("Cannot change buildings.  {0} is in {1} "
                                    "while {2} is in {3}.".format(
                                        dbparent, dbparent.building, dbrack,
                                        dbrack.building))
            dbrack.update_parent(parent=dbparent)

        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        q = session.query(Machine)
        q = q.filter(Machine.location_id.in_(dbrack.offspring_ids()))
        for dbmachine in q:
            plenaries.append(Plenary.get_plenary(dbmachine))
        plenaries.write()
Ejemplo n.º 32
0
    def render(self, session, logger, service, max_clients, default,
               need_client_list, comments, **arguments):
        dbservice = Service.get_unique(session, name=service, compel=True)

        if default:
            dbservice.max_clients = None
        elif max_clients is not None:
            dbservice.max_clients = max_clients

        if need_client_list is not None:
            dbservice.need_client_list = need_client_list

        if comments is not None:
            dbservice.comments = comments

        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbservice))
        for dbinstance in dbservice.instances:
            plenaries.append(Plenary.get_plenary(dbinstance))
        plenaries.write()

        return
Ejemplo n.º 33
0
    def render(self, session, logger, hostname, cluster,
               personality, **arguments):
        dbhost = hostname_to_host(session, hostname)
        dbcluster = Cluster.get_unique(session, cluster, compel=True)

        if dbcluster.status.name == 'decommissioned':
            raise ArgumentError("Cannot add hosts to decommissioned clusters.")

        # We only support changing personality within the same
        # archetype. The archetype decides things like which OS, how
        # it builds (dhcp, etc), whether it's compilable, and
        # switching all of that by side-effect seems wrong
        # somehow. And besides, it would make the user-interface and
        # implementation for this command ugly in order to support
        # changing all of those options.
        personality_change = False
        if personality is not None:
            dbpersonality = Personality.get_unique(session,
                                                   name=personality,
                                                   archetype=dbhost.archetype,
                                                   compel=True)
            if dbhost.personality != dbpersonality:
                dbhost.personality = dbpersonality
                personality_change = True

        # Allow for non-restricted clusters (the default?)
        if (len(dbcluster.allowed_personalities) > 0 and
            dbhost.personality not in dbcluster.allowed_personalities):
            raise ArgumentError("The personality %s for %s is not allowed "
                                "by the cluster. Specify --personality "
                                "and provide one of %s" %
                                (dbhost.personality, dbhost.fqdn,
                                 ", ".join([x.name for x in
                                            dbcluster.allowed_personalities])))

        # Now that we've changed the personality, we can check
        # if this is a valid membership change
        dbcluster.validate_membership(dbhost)

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbcluster))

        if dbhost.cluster and dbhost.cluster != dbcluster:
            logger.client_info("Removing {0:l} from {1:l}.".format(dbhost,
                                                                   dbhost.cluster))
            old_cluster = dbhost.cluster
            old_cluster.hosts.remove(dbhost)
            remove_service_addresses(old_cluster, dbhost)
            old_cluster.validate()
            session.expire(dbhost, ['_cluster'])
            plenaries.append(Plenary.get_plenary(old_cluster))

        # Apply the service addresses to the new member
        for res in walk_resources(dbcluster):
            if not isinstance(res, ServiceAddress):
                continue
            apply_service_address(dbhost, res.interfaces, res, logger)

        if dbhost.cluster:
            if personality_change:
                raise ArgumentError("{0:l} already in {1:l}, use "
                                    "aq reconfigure to change personality."
                                    .format(dbhost, dbhost.cluster))
            # the cluster has not changed, therefore there's nothing
            # to do here.
            return

        # Calculate the node index: build a map of all possible values, remove
        # the used ones, and pick the smallest remaining one
        node_index_map = set(xrange(len(dbcluster._hosts) + 1))
        for link in dbcluster._hosts:
            # The cluster may have been bigger in the past, so node indexes may
            # be larger than the current cluster size
            try:
                node_index_map.remove(link.node_index)
            except KeyError:
                pass

        dbcluster.hosts.append((dbhost, min(node_index_map)))
        dbcluster.validate()

        # demote a host when switching clusters
        # promote a host when switching clusters
        if dbhost.status.name == 'ready':
            if dbcluster.status.name != 'ready':
                dbalmost = HostAlmostready.get_instance(session)
                dbhost.status.transition(dbhost, dbalmost)
                plenaries.append(Plenary.get_plenary(dbhost))
        elif dbhost.status.name == 'almostready':
            if dbcluster.status.name == 'ready':
                dbready = HostReady.get_instance(session)
                dbhost.status.transition(dbhost, dbready)
                plenaries.append(Plenary.get_plenary(dbhost))

        session.flush()

        # Enforce that service instances are set correctly for the
        # new cluster association.
        chooser = Chooser(dbhost, logger=logger)
        chooser.set_required()
        chooser.flush_changes()
        # the chooser will include the host plenary
        with CompileKey.merge([chooser.get_key(), plenaries.get_key()]):
            plenaries.stash()
            try:
                chooser.write_plenary_templates(locked=True)
                plenaries.write(locked=True)
            except:
                chooser.restore_stash()
                plenaries.restore_stash()
                raise

        return
Ejemplo n.º 34
0
    def resetadvertisedstatus_list(self, session, logger, dbhosts):
        branches = {}
        authors = {}
        failed = []
        compileable = []
        # Do any cross-list or dependency checks
        for dbhost in dbhosts:
            ## if archetype is compileable only then
            ## validate for branches and domains
            if (dbhost.archetype.is_compileable):
                compileable.append(dbhost.fqdn)
                if dbhost.branch in branches:
                    branches[dbhost.branch].append(dbhost)
                else:
                    branches[dbhost.branch] = [dbhost]
                if dbhost.sandbox_author in authors:
                    authors[dbhost.sandbox_author].append(dbhost)
                else:
                    authors[dbhost.sandbox_author] = [dbhost]

            if dbhost.status.name == 'ready':
                failed.append("{0:l} is in ready status, "
                              "advertised status can be reset only "
                              "when host is in non ready state".format(dbhost))
        if failed:
            raise ArgumentError("Cannot modify the following hosts:\n%s" %
                                "\n".join(failed))
        if len(branches) > 1:
            keys = branches.keys()
            branch_sort = lambda x, y: cmp(len(branches[x]), len(branches[y]))
            keys.sort(cmp=branch_sort)
            stats = [
                "{0:d} hosts in {1:l}".format(len(branches[branch]), branch)
                for branch in keys
            ]
            raise ArgumentError("All hosts must be in the same domain or "
                                "sandbox:\n%s" % "\n".join(stats))
        if len(authors) > 1:
            keys = authors.keys()
            author_sort = lambda x, y: cmp(len(authors[x]), len(authors[y]))
            keys.sort(cmp=author_sort)
            stats = [
                "%s hosts with sandbox author %s" %
                (len(authors[author]), author.name) for author in keys
            ]
            raise ArgumentError("All hosts must be managed by the same "
                                "sandbox author:\n%s" % "\n".join(stats))

        plenaries = PlenaryCollection(logger=logger)
        for dbhost in dbhosts:
            dbhost.advertise_status = False
            session.add(dbhost)
            plenaries.append(PlenaryHost(dbhost, logger=logger))

        session.flush()

        dbbranch = branches.keys()[0]
        dbauthor = authors.keys()[0]
        key = CompileKey.merge([plenaries.get_write_key()])
        try:
            lock_queue.acquire(key)
            plenaries.stash()
            plenaries.write(locked=True)
            td = TemplateDomain(dbbranch, dbauthor, logger=logger)
            td.compile(session, only=compileable, locked=True)
        except:
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
Ejemplo n.º 35
0
    def render(self, session, logger, machine, model, vendor, serial,
               chassis, slot, clearchassis, multislot,
               vmhost, cluster, allow_metacluster_change,
               cpuname, cpuvendor, cpuspeed, cpucount, memory, ip,
               **arguments):
        dbmachine = Machine.get_unique(session, machine, compel=True)
        plenaries = PlenaryCollection(logger=logger)
        oldinfo = DSDBRunner.snapshot_hw(dbmachine)

        if clearchassis:
            del dbmachine.chassis_slot[:]

        remove_plenaries = PlenaryCollection(logger=logger)
        if chassis:
            dbchassis = Chassis.get_unique(session, chassis, compel=True)
            if machine_plenary_will_move(old=dbmachine.location,
                                         new=dbchassis.location):
                remove_plenaries.append(Plenary.get_plenary(dbmachine))
            dbmachine.location = dbchassis.location
            if slot is None:
                raise ArgumentError("Option --chassis requires --slot "
                                    "information.")
            self.adjust_slot(session, logger,
                             dbmachine, dbchassis, slot, multislot)
        elif slot is not None:
            dbchassis = None
            for dbslot in dbmachine.chassis_slot:
                if dbchassis and dbslot.chassis != dbchassis:
                    raise ArgumentError("Machine in multiple chassis, please "
                                        "use --chassis argument.")
                dbchassis = dbslot.chassis
            if not dbchassis:
                raise ArgumentError("Option --slot requires --chassis "
                                    "information.")
            self.adjust_slot(session, logger,
                             dbmachine, dbchassis, slot, multislot)

        dblocation = get_location(session, **arguments)
        if dblocation:
            loc_clear_chassis = False
            for dbslot in dbmachine.chassis_slot:
                dbcl = dbslot.chassis.location
                if dbcl != dblocation:
                    if chassis or slot is not None:
                        raise ArgumentError("{0} conflicts with chassis {1!s} "
                                            "location {2}.".format(dblocation,
                                                        dbslot.chassis, dbcl))
                    else:
                        loc_clear_chassis = True
            if loc_clear_chassis:
                del dbmachine.chassis_slot[:]
            if machine_plenary_will_move(old=dbmachine.location,
                                         new=dblocation):
                remove_plenaries.append(Plenary.get_plenary(dbmachine))
            dbmachine.location = dblocation

        if model or vendor:
            # If overriding model, should probably overwrite default
            # machine specs as well.
            if not model:
                model = dbmachine.model.name
            if not vendor:
                vendor = dbmachine.model.vendor.name
            dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                       compel=True)
            if dbmodel.machine_type not in ['blade', 'rackmount',
                                            'workstation', 'aurora_node',
                                            'virtual_machine']:
                raise ArgumentError("The update_machine command cannot update "
                                    "machines of type %s." %
                                    dbmodel.machine_type)
            # We probably could do this by forcing either cluster or
            # location data to be available as appropriate, but really?
            # Failing seems reasonable.
            if dbmodel.machine_type != dbmachine.model.machine_type and \
               'virtual_machine' in [dbmodel.machine_type,
                                     dbmachine.model.machine_type]:
                raise ArgumentError("Cannot change machine from %s to %s." %
                                    (dbmachine.model.machine_type,
                                     dbmodel.machine_type))

            old_nic_model = dbmachine.model.nic_model
            new_nic_model = dbmodel.nic_model
            if old_nic_model != new_nic_model:
                for iface in dbmachine.interfaces:
                    if iface.model == old_nic_model:
                        iface.model = new_nic_model

            dbmachine.model = dbmodel

        if cpuname or cpuvendor or cpuspeed is not None:
            dbcpu = Cpu.get_unique(session, name=cpuname, vendor=cpuvendor,
                                   speed=cpuspeed, compel=True)
            dbmachine.cpu = dbcpu

        if cpucount is not None:
            dbmachine.cpu_quantity = cpucount
        if memory is not None:
            dbmachine.memory = memory
        if serial:
            dbmachine.serial_no = serial

        if ip:
            update_primary_ip(session, dbmachine, ip)

        # FIXME: For now, if a machine has its interface(s) in a portgroup
        # this command will need to be followed by an update_interface to
        # re-evaluate the portgroup for overflow.
        # It would be better to have --pg and --autopg options to let it
        # happen at this point.
        if cluster or vmhost:
            if not dbmachine.vm_container:
                raise ArgumentError("Cannot convert a physical machine to "
                                    "virtual.")

            old_holder = dbmachine.vm_container.holder.holder_object
            resholder = get_resource_holder(session, hostname=vmhost,
                                            cluster=cluster, compel=False)
            new_holder = resholder.holder_object

            # TODO: do we want to allow moving machines between the cluster and
            # metacluster level?
            if new_holder.__class__ != old_holder.__class__:
                raise ArgumentError("Cannot move a VM between a cluster and a "
                                    "stand-alone host.")

            if cluster:
                if new_holder.metacluster != old_holder.metacluster \
                   and not allow_metacluster_change:
                    raise ArgumentError("Current {0:l} does not match "
                                        "new {1:l}."
                                        .format(old_holder.metacluster,
                                                new_holder.metacluster))

            remove_plenaries.append(Plenary.get_plenary(dbmachine.vm_container))
            dbmachine.vm_container.holder = resholder

            for dbdisk in dbmachine.disks:
                if not isinstance(dbdisk, VirtualDisk):
                    continue
                old_share = dbdisk.share
                if isinstance(old_share.holder, BundleResource):
                    resourcegroup = old_share.holder.name
                else:
                    resourcegroup = None
                new_share = find_share(new_holder, resourcegroup, old_share.name,
                                       error=ArgumentError)

                # If the shares are registered at the metacluster level and both
                # clusters are in the same metacluster, then there will be no
                # real change here
                if new_share != old_share:
                    old_share.disks.remove(dbdisk)
                    new_share.disks.append(dbdisk)

            if isinstance(new_holder, Cluster):
                dbmachine.location = new_holder.location_constraint
            else:
                dbmachine.location = new_holder.location

            session.flush()
            plenaries.append(Plenary.get_plenary(old_holder))
            plenaries.append(Plenary.get_plenary(new_holder))

        if dbmachine.vm_container:
            plenaries.append(Plenary.get_plenary(dbmachine.vm_container))

        session.flush()

        # Check if the changed parameters still meet cluster capacity
        # requiremets
        if dbmachine.cluster:
            dbmachine.cluster.validate()
            if allow_metacluster_change:
                dbmachine.cluster.metacluster.validate()
        if dbmachine.host and dbmachine.host.cluster:
            dbmachine.host.cluster.validate()

        # The check to make sure a plenary file is not written out for
        # dummy aurora hardware is within the call to write().  This way
        # it is consistent without altering (and forgetting to alter)
        # all the calls to the method.
        plenaries.append(Plenary.get_plenary(dbmachine))
        if remove_plenaries.plenaries and dbmachine.host:
            plenaries.append(Plenary.get_plenary(dbmachine.host))

        key = CompileKey.merge([plenaries.get_write_key(),
                                remove_plenaries.get_remove_key()])
        try:
            lock_queue.acquire(key)
            remove_plenaries.stash()
            plenaries.write(locked=True)
            remove_plenaries.remove(locked=True)

            if dbmachine.host:
                # XXX: May need to reconfigure.
                pass

            dsdb_runner = DSDBRunner(logger=logger)
            dsdb_runner.update_host(dbmachine, oldinfo)
            dsdb_runner.commit_or_rollback("Could not update machine in DSDB")
        except:
            plenaries.restore_stash()
            remove_plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
Ejemplo n.º 36
0
    def render(
            self,
            session,
            logger,
            cluster,
            personality,
            max_members,
            fix_location,
            down_hosts_threshold,
            maint_threshold,
            comments,
            # ESX specific options
            switch,
            memory_capacity,
            clear_overrides,
            vm_to_host_ratio,
            **arguments):

        dbcluster = Cluster.get_unique(session, cluster, compel=True)

        if dbcluster.cluster_type == 'meta':
            raise ArgumentError("%s should not be a metacluster." %
                                format(dbcluster))

        cluster_updated = False
        remove_plenaries = PlenaryCollection(logger=logger)
        plenaries = PlenaryCollection(logger=logger)

        (vm_count, host_count) = force_ratio("vm_to_host_ratio",
                                             vm_to_host_ratio)
        if down_hosts_threshold is not None:
            (perc, dht) = Cluster.parse_threshold(down_hosts_threshold)
            dbcluster.down_hosts_threshold = dht
            dbcluster.down_hosts_percent = perc
            cluster_updated = True

        if dbcluster.cluster_type == "esx":
            if vm_count is not None or down_hosts_threshold is not None:
                if vm_count is None:
                    vm_count = dbcluster.vm_count
                    host_count = dbcluster.host_count

                dht = dbcluster.down_hosts_threshold
                perc = dbcluster.down_hosts_percent

                dbcluster.validate(vm_part=vm_count,
                                   host_part=host_count,
                                   down_hosts_threshold=dht,
                                   down_hosts_percent=perc)

                dbcluster.vm_count = vm_count
                dbcluster.host_count = host_count
                cluster_updated = True

        if switch is not None:
            if switch:
                # FIXME: Verify that any hosts are on the same network
                dbswitch = Switch.get_unique(session, switch, compel=True)
                plenaries.append(Plenary.get_plenary(dbswitch))
            else:
                dbswitch = None
            dbcluster.switch = dbswitch
            cluster_updated = True

        if memory_capacity is not None:
            dbcluster.memory_capacity = memory_capacity
            dbcluster.validate()
            cluster_updated = True

        if clear_overrides is not None:
            dbcluster.memory_capacity = None
            dbcluster.validate()
            cluster_updated = True

        location_updated = update_cluster_location(session, logger, dbcluster,
                                                   fix_location, plenaries,
                                                   remove_plenaries,
                                                   **arguments)

        if location_updated:
            cluster_updated = True

        if personality:
            archetype = dbcluster.personality.archetype.name
            dbpersonality = Personality.get_unique(session,
                                                   name=personality,
                                                   archetype=archetype,
                                                   compel=True)
            if not dbpersonality.is_cluster:
                raise ArgumentError("Personality {0} is not a cluster " +
                                    "personality".format(dbpersonality))
            dbcluster.personality = dbpersonality
            cluster_updated = True

        if max_members is not None:
            current_members = len(dbcluster.hosts)
            if max_members < current_members:
                raise ArgumentError(
                    "%s has %d hosts bound, which exceeds "
                    "the requested limit %d." %
                    (format(dbcluster), current_members, max_members))
            dbcluster.max_hosts = max_members
            cluster_updated = True

        if comments is not None:
            dbcluster.comments = comments
            cluster_updated = True

        if down_hosts_threshold is not None:
            (dbcluster.down_hosts_percent,
             dbcluster.down_hosts_threshold) = \
                Cluster.parse_threshold(down_hosts_threshold)
            cluster_updated = True

        if maint_threshold is not None:
            (dbcluster.down_maint_percent,
             dbcluster.down_maint_threshold) = \
                Cluster.parse_threshold(maint_threshold)
            cluster_updated = True

        if not cluster_updated:
            return

        session.add(dbcluster)
        session.flush()

        plenaries.append(Plenary.get_plenary(dbcluster))
        key = CompileKey.merge(
            [plenaries.get_write_key(),
             remove_plenaries.get_remove_key()])
        try:
            lock_queue.acquire(key)
            remove_plenaries.stash()
            plenaries.write(locked=True)
            remove_plenaries.remove(locked=True)
        except:
            remove_plenaries.restore_stash()
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
Ejemplo n.º 37
0
    def render(self, session, logger, hostname, **arguments):
        # removing the plenary host requires a compile lock, however
        # we want to avoid deadlock by the fact that we're messing
        # with two locks here, so we want to be careful. We grab the
        # plenaryhost early on (in order to get the filenames filled
        # in from the db info before we delete it from the db. We then
        # hold onto those references until we've completed the db
        # cleanup and if all of that is successful, then we delete the
        # plenary file (which doesn't require re-evaluating any stale
        # db information) after we've released the delhost lock.
        delplenary = False

        # Any service bindings that we need to clean up afterwards
        bindings = PlenaryCollection(logger=logger)
        resources = PlenaryCollection(logger=logger)
        with DeleteKey("system", logger=logger) as key:
            # Check dependencies, translate into user-friendly message
            dbhost = hostname_to_host(session, hostname)
            host_plenary = Plenary.get_plenary(dbhost, logger=logger)
            domain = dbhost.branch.name
            deps = get_host_dependencies(session, dbhost)
            if (len(deps) != 0):
                deptext = "\n".join(["  %s" % d for d in deps])
                raise ArgumentError("Cannot delete host %s due to the "
                                    "following dependencies:\n%s." %
                                    (hostname, deptext))

            archetype = dbhost.archetype.name
            dbmachine = dbhost.machine
            oldinfo = DSDBRunner.snapshot_hw(dbmachine)

            ip = dbmachine.primary_ip
            fqdn = dbmachine.fqdn

            for si in dbhost.services_used:
                plenary = PlenaryServiceInstanceServer(si)
                bindings.append(plenary)
                logger.info(
                    "Before deleting host '%s', removing binding '%s'" %
                    (fqdn, si.cfg_path))

            del dbhost.services_used[:]

            if dbhost.resholder:
                for res in dbhost.resholder.resources:
                    resources.append(Plenary.get_plenary(res))

            # In case of Zebra, the IP may be configured on multiple interfaces
            for iface in dbmachine.interfaces:
                if ip in iface.addresses:
                    iface.addresses.remove(ip)

            if dbhost.cluster:
                dbcluster = dbhost.cluster
                dbcluster.hosts.remove(dbhost)
                set_committed_value(dbhost, '_cluster', None)
                dbcluster.validate()

            dbdns_rec = dbmachine.primary_name
            dbmachine.primary_name = None
            dbmachine.host = None
            session.delete(dbhost)
            delete_dns_record(dbdns_rec)
            session.flush()
            delplenary = True

            if dbmachine.vm_container:
                bindings.append(Plenary.get_plenary(dbmachine.vm_container))

            if archetype != 'aurora' and ip is not None:
                dsdb_runner = DSDBRunner(logger=logger)
                dsdb_runner.update_host(dbmachine, oldinfo)
                dsdb_runner.commit_or_rollback("Could not remove host %s from "
                                               "DSDB" % hostname)
            if archetype == 'aurora':
                logger.client_info("WARNING: removing host %s from AQDB and "
                                   "*not* changing DSDB." % hostname)

            # Past the point of no return... commit the transaction so
            # that we can free the delete lock.
            session.commit()

        # Only if we got here with no exceptions do we clean the template
        # Trying to clean up after any errors here is really difficult
        # since the changes to dsdb have already been made.
        if (delplenary):
            key = host_plenary.get_remove_key()
            with CompileKey.merge(
                [key,
                 bindings.get_write_key(),
                 resources.get_remove_key()]) as key:
                host_plenary.cleanup(domain, locked=True)
                # And we also want to remove the profile itself
                profiles = self.config.get("broker", "profilesdir")
                # Only one of these should exist, but it doesn't hurt
                # to try to clean up both.
                xmlfile = os.path.join(profiles, fqdn + ".xml")
                remove_file(xmlfile, logger=logger)
                xmlgzfile = xmlfile + ".gz"
                remove_file(xmlgzfile, logger=logger)
                # And the cached template created by ant
                remove_file(os.path.join(
                    self.config.get("broker", "quattordir"), "objects",
                    fqdn + TEMPLATE_EXTENSION),
                            logger=logger)
                bindings.write(locked=True)
                resources.remove(locked=True)

            build_index(self.config, session, profiles, logger=logger)

        return
Ejemplo n.º 38
0
    def render(self, session, logger, model, vendor, newmodel, newvendor,
               comments, leave_existing, **arguments):
        for (arg, value) in arguments.items():
            # Cleaning the strings isn't strictly necessary but allows
            # for simple equality checks below and removes the need to
            # call refresh().
            if arg in [
                    'newmodel', 'newvendor', 'machine_type', 'cpuname',
                    'cpuvendor', 'disktype', 'diskcontroller', 'nicmodel',
                    'nicvendor'
            ]:
                if value is not None:
                    arguments[arg] = value.lower().strip()

        dbmodel = Model.get_unique(session,
                                   name=model,
                                   vendor=vendor,
                                   compel=True)

        if leave_existing and (newmodel or newvendor):
            raise ArgumentError("Cannot update model name or vendor without "
                                "updating any existing machines.")

        fix_existing = not leave_existing
        dbmachines = set()

        # The sub-branching here is a little difficult to read...
        # Basically, there are three different checks to handle
        # setting a new vendor, a new name, or both.
        if newvendor:
            dbnewvendor = Vendor.get_unique(session, newvendor, compel=True)
            if newmodel:
                Model.get_unique(session,
                                 name=newmodel,
                                 vendor=dbnewvendor,
                                 preclude=True)
            else:
                Model.get_unique(session,
                                 name=dbmodel.name,
                                 vendor=dbnewvendor,
                                 preclude=True)
            dbmodel.vendor = dbnewvendor
        if newmodel:
            if not newvendor:
                Model.get_unique(session,
                                 name=newmodel,
                                 vendor=dbmodel.vendor,
                                 preclude=True)
            dbmodel.name = newmodel
        if newvendor or newmodel:
            q = session.query(Machine).filter_by(model=dbmodel)
            dbmachines.update(q.all())

        # For now, can't update machine_type.  There are too many spots
        # that special case things like aurora_node or virtual_machine to
        # know that the transistion is safe.  If there is enough need we
        # can always add those transitions later.
        if arguments['machine_type'] is not None:
            raise UnimplementedError("Cannot (yet) change a model's "
                                     "machine type.")

        if comments:
            dbmodel.comments = comments
            # The comments also do not affect the templates.

        cpu_args = ['cpuname', 'cpuvendor', 'cpuspeed']
        cpu_info = dict([(self.argument_lookup[arg], arguments[arg])
                         for arg in cpu_args])
        cpu_values = [v for v in cpu_info.values() if v is not None]
        nic_args = ['nicmodel', 'nicvendor']
        nic_info = dict([(self.argument_lookup[arg], arguments[arg])
                         for arg in nic_args])
        nic_values = [v for v in nic_info.values() if v is not None]
        spec_args = [
            'cpunum', 'memory', 'disktype', 'diskcontroller', 'disksize',
            'nics'
        ]
        specs = dict([(self.argument_lookup[arg], arguments[arg])
                      for arg in spec_args])
        spec_values = [v for v in specs.values() if v is not None]

        if not dbmodel.machine_specs:
            if cpu_values or nic_values or spec_values:
                if not cpu_values or len(spec_values) < len(spec_args):
                    raise ArgumentError("Missing required parameters to store "
                                        "machine specs for the model.  Please "
                                        "give all CPU, disk, RAM, and NIC "
                                        "count information.")
                dbcpu = Cpu.get_unique(session, compel=True, **cpu_info)
                if nic_values:
                    dbnic = Model.get_unique(session,
                                             compel=True,
                                             machine_type='nic',
                                             **nic_info)
                else:
                    dbnic = Model.default_nic_model(session)
                dbmachine_specs = MachineSpecs(model=dbmodel,
                                               cpu=dbcpu,
                                               nic_model=dbnic,
                                               **specs)
                session.add(dbmachine_specs)

        # Anything below that updates specs should have been verified above.

        if cpu_values:
            dbcpu = Cpu.get_unique(session, compel=True, **cpu_info)
            self.update_machine_specs(model=dbmodel,
                                      dbmachines=dbmachines,
                                      attr='cpu',
                                      value=dbcpu,
                                      fix_existing=fix_existing)

        for arg in ['memory', 'cpunum']:
            if arguments[arg] is not None:
                self.update_machine_specs(model=dbmodel,
                                          dbmachines=dbmachines,
                                          attr=self.argument_lookup[arg],
                                          value=arguments[arg],
                                          fix_existing=fix_existing)

        if arguments['disktype']:
            if fix_existing:
                raise ArgumentError("Please specify --leave_existing to "
                                    "change the model disktype.  This cannot "
                                    "be converted automatically.")
            dbmodel.machine_specs.disk_type = arguments['disktype']

        for arg in ['diskcontroller', 'disksize']:
            if arguments[arg] is not None:
                self.update_disk_specs(model=dbmodel,
                                       dbmachines=dbmachines,
                                       attr=self.argument_lookup[arg],
                                       value=arguments[arg],
                                       fix_existing=fix_existing)

        if nic_values:
            dbnic = Model.get_unique(session, compel=True, **nic_info)
            self.update_interface_specs(model=dbmodel,
                                        dbmachines=dbmachines,
                                        value=dbnic,
                                        fix_existing=fix_existing)

        if arguments['nics'] is not None:
            dbmodel.machine_specs.nic_count = arguments['nics']

        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        for dbmachine in dbmachines:
            plenaries.append(PlenaryMachineInfo(dbmachine, logger=logger))
        plenaries.write()

        return
Ejemplo n.º 39
0
    def render(self, session, logger, city, timezone, campus,
               default_dns_domain, comments, **arguments):
        dbcity = get_location(session, city=city)

        # Updating machine templates is expensive, so only do that if needed
        update_machines = False

        if timezone is not None:
            dbcity.timezone = timezone
        if comments is not None:
            dbcity.comments = comments
        if default_dns_domain is not None:
            if default_dns_domain:
                dbdns_domain = DnsDomain.get_unique(session,
                                                    default_dns_domain,
                                                    compel=True)
                dbcity.default_dns_domain = dbdns_domain
            else:
                dbcity.default_dns_domain = None

        prev_campus = None
        dsdb_runner = None
        dsdb_runner = DSDBRunner(logger=logger)
        if campus is not None:
            dbcampus = get_location(session, campus=campus)
            # This one would change the template's locations hence forbidden
            if dbcampus.hub != dbcity.hub:
                # Doing this both to reduce user error and to limit
                # testing required.
                raise ArgumentError("Cannot change campus.  {0} is in {1:l}, "
                                    "while {2:l} is in {3:l}.".format(
                                        dbcampus, dbcampus.hub, dbcity,
                                        dbcity.hub))

            if dbcity.campus:
                prev_campus = dbcity.campus
            dbcity.update_parent(parent=dbcampus)
            update_machines = True

        session.flush()

        if campus is not None:
            if prev_campus:
                prev_name = prev_campus.name
            else:
                prev_name = None
            dsdb_runner.update_city(city, dbcampus.name, prev_name)

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbcity))

        if update_machines:
            q = session.query(Machine)
            q = q.filter(Machine.location_id.in_(dbcity.offspring_ids()))
            logger.client_info("Updating %d machines..." % q.count())
            for dbmachine in q:
                plenaries.append(Plenary.get_plenary(dbmachine))

        count = plenaries.write()
        dsdb_runner.commit_or_rollback()
        logger.client_info("Flushed %d templates." % count)
Ejemplo n.º 40
0
    def render(self, session, logger, cluster, personality,
               max_members, fix_location, down_hosts_threshold,
               maint_threshold, comments,
               # ESX specific options
               switch, memory_capacity, clear_overrides, vm_to_host_ratio,
               **arguments):

        dbcluster = Cluster.get_unique(session, cluster, compel=True)

        self.check_cluster_type(dbcluster, forbid=MetaCluster)

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbcluster))

        if vm_to_host_ratio:
            self.check_cluster_type(dbcluster, require=EsxCluster)
            (vm_count, host_count) = force_ratio("vm_to_host_ratio",
                                                 vm_to_host_ratio)
            dbcluster.vm_count = vm_count
            dbcluster.host_count = host_count

        if switch is not None:
            self.check_cluster_type(dbcluster, require=EsxCluster)
            if switch:
                # FIXME: Verify that any hosts are on the same network
                dbnetdev = NetworkDevice.get_unique(session, switch, compel=True)
                plenaries.append(Plenary.get_plenary(dbnetdev))
            else:
                dbnetdev = None
            dbcluster.network_device = dbnetdev

        if memory_capacity is not None:
            self.check_cluster_type(dbcluster, require=EsxCluster)
            dbcluster.memory_capacity = memory_capacity

        if clear_overrides is not None:
            self.check_cluster_type(dbcluster, require=EsxCluster)
            dbcluster.memory_capacity = None

        update_cluster_location(session, logger, dbcluster, fix_location,
                                plenaries, **arguments)

        if personality:
            archetype = dbcluster.personality.archetype.name
            dbpersonality = Personality.get_unique(session, name=personality,
                                                   archetype=archetype,
                                                   compel=True)
            if not dbpersonality.is_cluster:
                raise ArgumentError("Personality {0} is not a cluster " +
                                    "personality".format(dbpersonality))
            dbcluster.personality = dbpersonality

        if max_members is not None:
            # Allow removing the restriction
            if max_members < 0:
                max_members = None
            dbcluster.max_hosts = max_members

        if comments is not None:
            dbcluster.comments = comments

        if down_hosts_threshold is not None:
            (dbcluster.down_hosts_percent,
             dbcluster.down_hosts_threshold) = \
                Cluster.parse_threshold(down_hosts_threshold)

        if maint_threshold is not None:
            (dbcluster.down_maint_percent,
             dbcluster.down_maint_threshold) = \
                Cluster.parse_threshold(maint_threshold)

        session.flush()
        dbcluster.validate()

        plenaries.write(locked=False)

        return
Ejemplo n.º 41
0
    def render(self, session, logger, building, city, address, fullname,
               default_dns_domain, comments, **arguments):
        dbbuilding = get_location(session, building=building)

        old_city = dbbuilding.city

        dsdb_runner = DSDBRunner(logger=logger)

        if address is not None:
            old_address = dbbuilding.address
            dbbuilding.address = address
            dsdb_runner.update_building(dbbuilding.name, dbbuilding.address,
                                        old_address)
        if fullname is not None:
            dbbuilding.fullname = fullname
        if comments is not None:
            dbbuilding.comments = comments
        if default_dns_domain is not None:
            if default_dns_domain:
                dbdns_domain = DnsDomain.get_unique(session,
                                                    default_dns_domain,
                                                    compel=True)
                dbbuilding.default_dns_domain = dbdns_domain
            else:
                dbbuilding.default_dns_domain = None

        plenaries = PlenaryCollection(logger=logger)
        if city:
            dbcity = get_location(session, city=city)

            # This one would change the template's locations hence forbidden
            if dbcity.hub != dbbuilding.hub:
                # Doing this both to reduce user error and to limit
                # testing required.
                raise ArgumentError("Cannot change hubs. {0} is in {1} "
                                    "while {2} is in {3}.".format(
                                        dbcity, dbcity.hub, dbbuilding,
                                        dbbuilding.hub))

            # issue svcmap warnings
            maps = 0
            for map_type in [ServiceMap, PersonalityServiceMap]:
                maps = maps + session.query(map_type).\
                    filter_by(location=old_city).count()

            if maps > 0:
                logger.client_info("There are {0} service(s) mapped to the "
                                   "old location of the ({1:l}), please "
                                   "review and manually update mappings for "
                                   "the new location as needed.".format(
                                       maps, dbbuilding.city))

            dbbuilding.update_parent(parent=dbcity)

            if old_city.campus and (old_city.campus != dbcity.campus):
                dsdb_runner.del_campus_building(old_city.campus, building)

            if dbcity.campus and (old_city.campus != dbcity.campus):
                dsdb_runner.add_campus_building(dbcity.campus, building)

            query = session.query(Machine)
            query = query.filter(
                Machine.location_id.in_(dbcity.offspring_ids()))

            for dbmachine in query:
                plenaries.append(PlenaryMachineInfo(dbmachine, logger=logger))

        session.flush()

        if plenaries.plenaries:
            with plenaries.get_write_key() as key:
                plenaries.stash()
                try:
                    plenaries.write(locked=True)
                    dsdb_runner.commit_or_rollback()
                except:
                    plenaries.restore_stash()
        else:
            dsdb_runner.commit_or_rollback()

        return
Ejemplo n.º 42
0
    def render(self, session, logger, machine, dbuser, **arguments):
        dbmachine = Machine.get_unique(session, machine, compel=True)

        remove_plenaries = PlenaryCollection(logger=logger)
        remove_plenaries.append(Plenary.get_plenary(dbmachine))
        if dbmachine.vm_container:
            remove_plenaries.append(Plenary.get_plenary(dbmachine.vm_container))
            dbcontainer = dbmachine.vm_container.holder.holder_object
        else:
            dbcontainer = None

        if dbmachine.host:
            raise ArgumentError("{0} is still in use by {1:l} and cannot be "
                                "deleted.".format(dbmachine, dbmachine.host))
        addrs = []
        for addr in dbmachine.all_addresses():
            addrs.append("%s: %s" % (addr.logical_name, addr.ip))
        if addrs:
            addrmsg = ", ".join(addrs)
            raise ArgumentError("{0} still provides the following addresses, "
                                "delete them first: {1}.".format(dbmachine,
                                                                 addrmsg))

        session.delete(dbmachine)
        session.flush()

        key = remove_plenaries.get_remove_key()
        if dbcontainer:
            plenary_container = Plenary.get_plenary(dbcontainer, logger=logger)
            key = CompileKey.merge([key, plenary_container.get_write_key()])
        try:
            lock_queue.acquire(key)
            remove_plenaries.stash()
            if dbcontainer:
                plenary_container.write(locked=True)
            remove_plenaries.remove(locked=True)
        except:
            remove_plenaries.restore_stash()
            if dbcontainer:
                plenary_container.restore_stash()
            raise
        finally:
            lock_queue.release(key)
        return
Ejemplo n.º 43
0
    def render(self, session, logger, machine, dbuser, **arguments):
        dbmachine = Machine.get_unique(session, machine, compel=True)

        remove_plenaries = PlenaryCollection(logger=logger)
        remove_plenaries.append(Plenary.get_plenary(dbmachine))
        if dbmachine.vm_container:
            remove_plenaries.append(Plenary.get_plenary(
                dbmachine.vm_container))
            dbcontainer = dbmachine.vm_container.holder.holder_object
        else:
            dbcontainer = None

        if dbmachine.host:
            raise ArgumentError("{0} is still in use by {1:l} and cannot be "
                                "deleted.".format(dbmachine, dbmachine.host))
        addrs = []
        for addr in dbmachine.all_addresses():
            addrs.append("%s: %s" % (addr.logical_name, addr.ip))
        if addrs:
            addrmsg = ", ".join(addrs)
            raise ArgumentError("{0} still provides the following addresses, "
                                "delete them first: {1}.".format(
                                    dbmachine, addrmsg))

        session.delete(dbmachine)
        session.flush()

        key = remove_plenaries.get_remove_key()
        if dbcontainer:
            plenary_container = Plenary.get_plenary(dbcontainer, logger=logger)
            key = CompileKey.merge([key, plenary_container.get_write_key()])
        try:
            lock_queue.acquire(key)
            remove_plenaries.stash()
            if dbcontainer:
                plenary_container.write(locked=True)
            remove_plenaries.remove(locked=True)
        except:
            remove_plenaries.restore_stash()
            if dbcontainer:
                plenary_container.restore_stash()
            raise
        finally:
            lock_queue.release(key)
        return
Ejemplo n.º 44
0
    def render(self, session, logger, cluster, archetype, personality, domain,
               sandbox, max_members, down_hosts_threshold, maint_threshold,
               buildstatus, comments, vm_to_host_ratio, switch, metacluster,
               **arguments):

        validate_basic("cluster", cluster)
        dbpersonality = Personality.get_unique(session,
                                               name=personality,
                                               archetype=archetype,
                                               compel=True)
        if not dbpersonality.is_cluster:
            raise ArgumentError("%s is not a cluster personality." %
                                personality)

        ctype = dbpersonality.archetype.cluster_type
        section = "archetype_" + dbpersonality.archetype.name

        if not buildstatus:
            buildstatus = "build"
        dbstatus = ClusterLifecycle.get_unique(session,
                                               buildstatus,
                                               compel=True)

        (dbbranch, dbauthor) = get_branch_and_author(session,
                                                     logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     compel=True)

        if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage:
            raise ArgumentError(
                "Adding clusters to {0:l} is not allowed.".format(dbbranch))

        dbloc = get_location(session, **arguments)
        if not dbloc:
            raise ArgumentError("Adding a cluster requires a location "
                                "constraint.")
        if not dbloc.campus:
            raise ArgumentError("{0} is not within a campus.".format(dbloc))

        if max_members is None:
            if self.config.has_option(section, "max_members_default"):
                max_members = self.config.getint(section,
                                                 "max_members_default")

        Cluster.get_unique(session, cluster, preclude=True)
        # Not finding the cluster type is an internal consistency issue, so make
        # that show up in the logs by using AquilonError
        clus_type = Cluster.polymorphic_subclass(ctype,
                                                 "Unknown cluster type",
                                                 error=AquilonError)

        (down_hosts_pct, dht) = Cluster.parse_threshold(down_hosts_threshold)

        kw = {
            'name': cluster,
            'location_constraint': dbloc,
            'personality': dbpersonality,
            'max_hosts': max_members,
            'branch': dbbranch,
            'sandbox_author': dbauthor,
            'down_hosts_threshold': dht,
            'down_hosts_percent': down_hosts_pct,
            'status': dbstatus,
            'comments': comments
        }

        if ctype == 'esx':
            if vm_to_host_ratio is None:
                if self.config.has_option(section, "vm_to_host_ratio"):
                    vm_to_host_ratio = self.config.get(section,
                                                       "vm_to_host_ratio")
                else:
                    vm_to_host_ratio = "1:1"
            (vm_count, host_count) = force_ratio("vm_to_host_ratio",
                                                 vm_to_host_ratio)
            kw["vm_count"] = vm_count
            kw["host_count"] = host_count

        if switch and hasattr(clus_type, 'switch'):
            kw['switch'] = Switch.get_unique(session, switch, compel=True)

        if maint_threshold is not None:
            (down_hosts_pct, dht) = Cluster.parse_threshold(maint_threshold)
            kw['down_maint_threshold'] = dht
            kw['down_maint_percent'] = down_hosts_pct

        dbcluster = clus_type(**kw)

        plenaries = PlenaryCollection(logger=logger)

        if metacluster:
            dbmetacluster = MetaCluster.get_unique(session,
                                                   metacluster,
                                                   compel=True)

            dbmetacluster.validate_membership(dbcluster)
            dbmetacluster.members.append(dbcluster)

            plenaries.append(Plenary.get_plenary(dbmetacluster))

        session.add(dbcluster)
        session.flush()
        session.refresh(dbcluster)

        plenaries.append(Plenary.get_plenary(dbcluster))

        key = plenaries.get_write_key()

        try:
            lock_queue.acquire(key)
            plenaries.write(locked=True)
        except:
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)
Ejemplo n.º 45
0
 def __init__(self, dbcluster, logger=LOGGER):
     PlenaryCollection.__init__(self, logger=logger)
     self.dbobj = dbcluster
     self.plenaries.append(
         PlenaryMetaClusterObject(dbcluster, logger=logger))
     self.plenaries.append(PlenaryMetaClusterData(dbcluster, logger=logger))
Ejemplo n.º 46
0
    def render(self, session, logger, domain, sandbox, cluster, force,
               **arguments):
        (dbbranch, dbauthor) = get_branch_and_author(session,
                                                     logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     compel=True)

        if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage:
            raise ArgumentError(
                "Managing clusters to {0:l} is not allowed.".format(dbbranch))

        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        dbsource = dbcluster.branch
        dbsource_author = dbcluster.sandbox_author
        old_branch = dbcluster.branch.name

        if not force:
            validate_branch_commits(dbsource, dbsource_author, dbbranch,
                                    dbauthor, logger, self.config)

        if dbcluster.metacluster:
            raise ArgumentError(
                "{0.name} is member of metacluster {1.name}, "
                "it must be managed at metacluster level.".format(
                    dbcluster, dbcluster.metacluster))

        old_branch = dbcluster.branch.name
        plenaries = PlenaryCollection(logger=logger)

        # manage at metacluster level
        if dbcluster.cluster_type == 'meta':
            clusters = dbcluster.members

            dbcluster.branch = dbbranch
            dbcluster.sandbox_author = dbauthor
            session.add(dbcluster)
            plenaries.append(Plenary.get_plenary(dbcluster))
        else:
            clusters = [dbcluster]

        for cluster in clusters:
            # manage at cluster level
            # Need to set the new branch *before* creating the plenary objects.
            cluster.branch = dbbranch
            cluster.sandbox_author = dbauthor
            session.add(cluster)
            plenaries.append(Plenary.get_plenary(cluster))
            for dbhost in cluster.hosts:
                dbhost.branch = dbbranch
                dbhost.sandbox_author = dbauthor
                session.add(dbhost)
                plenaries.append(Plenary.get_plenary(dbhost))

        session.flush()

        # We're crossing domains, need to lock everything.
        key = CompileKey(logger=logger)
        try:
            lock_queue.acquire(key)
            plenaries.stash()
            plenaries.cleanup(old_branch, locked=True)
            plenaries.write(locked=True)
        except:
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
Ejemplo n.º 47
0
    def render(self, session, logger, interface, machine, mac, automac, model,
               vendor, pg, autopg, type, comments, **arguments):
        dbmachine = Machine.get_unique(session, machine, compel=True)
        oldinfo = DSDBRunner.snapshot_hw(dbmachine)
        audit_results = []

        q = session.query(Interface)
        q = q.filter_by(name=interface, hardware_entity=dbmachine)
        if q.first():
            raise ArgumentError(
                "Machine %s already has an interface named %s." %
                (machine, interface))

        if not type:
            type = 'public'
            management_types = ['bmc', 'ilo', 'ipmi']
            for mtype in management_types:
                if interface.startswith(mtype):
                    type = 'management'
                    break

            if interface.startswith("bond"):
                type = 'bonding'
            elif interface.startswith("br"):
                type = 'bridge'

            # Test it last, VLANs can be added on top of almost anything
            if '.' in interface:
                type = 'vlan'

        if type == "oa" or type == "loopback":
            raise ArgumentError("Interface type '%s' is not valid for "
                                "machines." % type)

        bootable = None
        if type == 'public':
            if interface == 'eth0':
                bootable = True
            else:
                bootable = False

        dbmanager = None
        pending_removals = PlenaryCollection()
        dsdb_runner = DSDBRunner(logger=logger)
        if mac:
            prev = session.query(Interface).filter_by(mac=mac).first()
            if prev and prev.hardware_entity == dbmachine:
                raise ArgumentError("{0} already has an interface with MAC "
                                    "address {1}.".format(dbmachine, mac))
            # Is the conflicting interface something that can be
            # removed?  It is if:
            # - we are currently attempting to add a management interface
            # - the old interface belongs to a machine
            # - the old interface is associated with a host
            # - that host was blindly created, and thus can be removed safely
            if prev and type == 'management' and \
               prev.hardware_entity.hardware_type == 'machine' and \
               prev.hardware_entity.host and \
               prev.hardware_entity.host.status.name == 'blind':
                # FIXME: Is this just always allowed?  Maybe restrict
                # to only aqd-admin and the host itself?
                dummy_machine = prev.hardware_entity
                dummy_ip = dummy_machine.primary_ip
                old_fqdn = str(dummy_machine.primary_name)
                old_iface = prev.name
                old_mac = prev.mac
                old_network = get_net_id_from_ip(session, dummy_ip)
                self.remove_prev(session, logger, prev, pending_removals)
                session.flush()
                dsdb_runner.delete_host_details(old_fqdn, dummy_ip, old_iface,
                                                old_mac)
                self.consolidate_names(session, logger, dbmachine,
                                       dummy_machine.label, pending_removals)
                # It seems like a shame to throw away the IP address that
                # had been allocated for the blind host.  Try to use it
                # as it should be used...
                dbmanager = self.add_manager(session, logger, dbmachine,
                                             dummy_ip, old_network)
            elif prev:
                msg = describe_interface(session, prev)
                raise ArgumentError("MAC address %s is already in use: %s." %
                                    (mac, msg))
        elif automac:
            mac = self.generate_mac(session, dbmachine)
            audit_results.append(('mac', mac))
        else:
            #Ignore now that Mac Address can be null
            pass

        if pg is not None:
            port_group = verify_port_group(dbmachine, pg)
        elif autopg:
            port_group = choose_port_group(session, logger, dbmachine)
            audit_results.append(('pg', port_group))
        else:
            port_group = None

        dbinterface = get_or_create_interface(session,
                                              dbmachine,
                                              name=interface,
                                              vendor=vendor,
                                              model=model,
                                              interface_type=type,
                                              mac=mac,
                                              bootable=bootable,
                                              port_group=port_group,
                                              comments=comments,
                                              preclude=True)

        # So far, we're *only* creating a manager if we happen to be
        # removing a blind entry and we can steal its IP address.
        if dbmanager:
            assign_address(dbinterface, dbmanager.ip, dbmanager.network)

        session.add(dbinterface)
        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbmachine))
        if pending_removals and dbmachine.host:
            # Not an exact test, but the file won't be re-written
            # if the contents are the same so calling too often is
            # not a major expense.
            plenaries.append(Plenary.get_plenary(dbmachine.host))
        # Even though there may be removals going on the write key
        # should be sufficient here.
        key = plenaries.get_write_key()
        try:
            lock_queue.acquire(key)
            pending_removals.stash()
            plenaries.write(locked=True)
            pending_removals.remove(locked=True)

            dsdb_runner.update_host(dbmachine, oldinfo)
            dsdb_runner.commit_or_rollback("Could not update host in DSDB")
        except:
            plenaries.restore_stash()
            pending_removals.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        if dbmachine.host:
            # FIXME: reconfigure host
            pass

        for name, value in audit_results:
            self.audit_result(session, name, value, **arguments)
        return
Ejemplo n.º 48
0
    def render(self, session, logger, model, vendor, newmodel, newvendor,
               comments, leave_existing, **arguments):
        for (arg, value) in arguments.items():
            # Cleaning the strings isn't strictly necessary but allows
            # for simple equality checks below and removes the need to
            # call refresh().
            if arg in ['newmodel', 'newvendor', 'machine_type',
                       'cpuname', 'cpuvendor', 'disktype', 'diskcontroller',
                       'nicmodel', 'nicvendor']:
                if value is not None:
                    arguments[arg] = value.lower().strip()

        dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                   compel=True)

        if leave_existing and (newmodel or newvendor):
            raise ArgumentError("Cannot update model name or vendor without "
                                "updating any existing machines.")

        fix_existing = not leave_existing
        dbmachines = set()

        # The sub-branching here is a little difficult to read...
        # Basically, there are three different checks to handle
        # setting a new vendor, a new name, or both.
        if newvendor:
            dbnewvendor = Vendor.get_unique(session, newvendor, compel=True)
            if newmodel:
                Model.get_unique(session, name=newmodel, vendor=dbnewvendor,
                                 preclude=True)
            else:
                Model.get_unique(session, name=dbmodel.name,
                                 vendor=dbnewvendor, preclude=True)
            dbmodel.vendor = dbnewvendor
        if newmodel:
            if not newvendor:
                Model.get_unique(session, name=newmodel, vendor=dbmodel.vendor,
                                 preclude=True)
            dbmodel.name = newmodel
        if newvendor or newmodel:
            q = session.query(Machine).filter_by(model=dbmodel)
            dbmachines.update(q.all())

        # For now, can't update machine_type.  There are too many spots
        # that special case things like aurora_node or virtual_machine to
        # know that the transistion is safe.  If there is enough need we
        # can always add those transitions later.
        if arguments['machine_type'] is not None:
            raise UnimplementedError("Cannot (yet) change a model's "
                                     "machine type.")

        if comments:
            dbmodel.comments = comments
            # The comments also do not affect the templates.

        cpu_args = ['cpuname', 'cpuvendor', 'cpuspeed']
        cpu_info = dict([(self.argument_lookup[arg], arguments[arg])
                         for arg in cpu_args])
        cpu_values = [v for v in cpu_info.values() if v is not None]
        nic_args = ['nicmodel', 'nicvendor']
        nic_info = dict([(self.argument_lookup[arg], arguments[arg])
                         for arg in nic_args])
        nic_values = [v for v in nic_info.values() if v is not None]
        spec_args = ['cpunum', 'memory', 'disktype', 'diskcontroller',
                     'disksize', 'nics']
        specs = dict([(self.argument_lookup[arg], arguments[arg])
                      for arg in spec_args])
        spec_values = [v for v in specs.values() if v is not None]

        if not dbmodel.machine_specs:
            if cpu_values or nic_values or spec_values:
                if not cpu_values or len(spec_values) < len(spec_args):
                    raise ArgumentError("Missing required parameters to store "
                                        "machine specs for the model.  Please "
                                        "give all CPU, disk, RAM, and NIC "
                                        "count information.")
                dbcpu = Cpu.get_unique(session, compel=True, **cpu_info)
                if nic_values:
                    dbnic = Model.get_unique(session, compel=True,
                                             machine_type='nic', **nic_info)
                else:
                    dbnic = Model.default_nic_model(session)
                dbmachine_specs = MachineSpecs(model=dbmodel, cpu=dbcpu,
                                               nic_model=dbnic, **specs)
                session.add(dbmachine_specs)

        # Anything below that updates specs should have been verified above.

        if cpu_values:
            dbcpu = Cpu.get_unique(session, compel=True, **cpu_info)
            self.update_machine_specs(model=dbmodel, dbmachines=dbmachines,
                                      attr='cpu', value=dbcpu,
                                      fix_existing=fix_existing)

        for arg in ['memory', 'cpunum']:
            if arguments[arg] is not None:
                self.update_machine_specs(model=dbmodel, dbmachines=dbmachines,
                                          attr=self.argument_lookup[arg],
                                          value=arguments[arg],
                                          fix_existing=fix_existing)

        if arguments['disktype']:
            if fix_existing:
                raise ArgumentError("Please specify --leave_existing to "
                                    "change the model disktype.  This cannot "
                                    "be converted automatically.")
            dbmodel.machine_specs.disk_type = arguments['disktype']

        for arg in ['diskcontroller', 'disksize']:
            if arguments[arg] is not None:
                self.update_disk_specs(model=dbmodel, dbmachines=dbmachines,
                                       attr=self.argument_lookup[arg],
                                       value=arguments[arg],
                                       fix_existing=fix_existing)

        if nic_values:
            dbnic = Model.get_unique(session, compel=True, **nic_info)
            self.update_interface_specs(model=dbmodel, dbmachines=dbmachines,
                                        value=dbnic, fix_existing=fix_existing)

        if arguments['nics'] is not None:
            dbmodel.machine_specs.nic_count = arguments['nics']

        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        for dbmachine in dbmachines:
            plenaries.append(PlenaryMachineInfo(dbmachine, logger=logger))
        plenaries.write()

        return
Ejemplo n.º 49
0
    def render(self, session, logger, machine, model, vendor, serial, chassis,
               slot, cpuname, cpuvendor, cpuspeed, cpucount, memory, cluster,
               comments, **arguments):
        dblocation = get_location(session,
                                  query_options=[
                                      subqueryload('parents'),
                                      joinedload('parents.dns_maps')
                                  ],
                                  **arguments)
        if chassis:
            dbchassis = Chassis.get_unique(session, chassis, compel=True)
            if slot is None:
                raise ArgumentError("The --chassis option requires a --slot.")
            if dblocation and dblocation != dbchassis.location:
                raise ArgumentError("{0} conflicts with chassis location "
                                    "{1}.".format(dblocation,
                                                  dbchassis.location))
            dblocation = dbchassis.location
        elif slot is not None:
            raise ArgumentError("The --slot option requires a --chassis.")

        dbmodel = Model.get_unique(session,
                                   name=model,
                                   vendor=vendor,
                                   compel=True)

        if dbmodel.machine_type not in [
                'blade', 'rackmount', 'workstation', 'aurora_node',
                'virtual_machine'
        ]:
            raise ArgumentError("The add_machine command cannot add machines "
                                "of type %(type)s.  Try 'add %(type)s'." %
                                {"type": dbmodel.machine_type})

        if cluster:
            if dbmodel.machine_type != 'virtual_machine':
                raise ArgumentError("Only virtual machines can have a cluster "
                                    "attribute.")
            dbcluster = Cluster.get_unique(session,
                                           cluster,
                                           compel=ArgumentError)
            # This test could be either archetype or cluster_type
            if dbcluster.personality.archetype.name != 'esx_cluster':
                raise ArgumentError("Can only add virtual machines to "
                                    "clusters with archetype esx_cluster.")
            # TODO implement the same to vmhosts.
            if dbcluster.status.name == 'decommissioned':
                raise ArgumentError("Cannot add virtual machines to "
                                    "decommissioned clusters.")
            if dblocation and dbcluster.location_constraint != dblocation:
                raise ArgumentError("Cannot override cluster location {0} "
                                    "with location {1}.".format(
                                        dbcluster.location_constraint,
                                        dblocation))
            dblocation = dbcluster.location_constraint
        elif dbmodel.machine_type == 'virtual_machine':
            raise ArgumentError("Virtual machines must be assigned to a "
                                "cluster.")

        Machine.get_unique(session, machine, preclude=True)
        dbmachine = create_machine(session, machine, dblocation, dbmodel,
                                   cpuname, cpuvendor, cpuspeed, cpucount,
                                   memory, serial, comments)

        if chassis:
            # FIXME: Are virtual machines allowed to be in a chassis?
            dbslot = session.query(ChassisSlot).filter_by(
                chassis=dbchassis, slot_number=slot).first()
            if not dbslot:
                dbslot = ChassisSlot(chassis=dbchassis, slot_number=slot)
            dbslot.machine = dbmachine
            session.add(dbslot)

        if cluster:
            if not dbcluster.resholder:
                dbcluster.resholder = ClusterResource(cluster=dbcluster)
            dbvm = VirtualMachine(machine=dbmachine,
                                  name=dbmachine.label,
                                  holder=dbcluster.resholder)
            dbcluster.validate()

        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbmachine))
        if cluster:
            plenaries.append(Plenary.get_plenary(dbcluster))
            plenaries.append(Plenary.get_plenary(dbvm))

        # The check to make sure a plenary file is not written out for
        # dummy aurora hardware is within the call to write().  This way
        # it is consistent without altering (and forgetting to alter)
        # all the calls to the method.
        plenaries.write()
        return
Ejemplo n.º 50
0
    def render(self,
               session,
               logger,
               hostname,
               machine,
               archetype,
               domain,
               sandbox,
               osname,
               osversion,
               buildstatus,
               personality,
               comments,
               zebra_interfaces,
               grn,
               eon_id,
               skip_dsdb_check=False,
               **arguments):
        dbarchetype = Archetype.get_unique(session, archetype, compel=True)
        section = "archetype_" + dbarchetype.name

        # This is for the various add_*_host commands
        if not domain and not sandbox:
            domain = self.config.get(section, "host_domain")

        (dbbranch, dbauthor) = get_branch_and_author(session,
                                                     logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     compel=True)

        if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage:
            raise ArgumentError(
                "Adding hosts to {0:l} is not allowed.".format(dbbranch))

        if not buildstatus:
            buildstatus = 'build'
        dbstatus = HostLifecycle.get_unique(session, buildstatus, compel=True)
        dbmachine = Machine.get_unique(session, machine, compel=True)
        oldinfo = DSDBRunner.snapshot_hw(dbmachine)

        if not personality:
            if self.config.has_option(section, "default_personality"):
                personality = self.config.get(section, "default_personality")
            else:
                personality = 'generic'
        dbpersonality = Personality.get_unique(session,
                                               name=personality,
                                               archetype=dbarchetype,
                                               compel=True)

        if not osname:
            if self.config.has_option(section, "default_osname"):
                osname = self.config.get(section, "default_osname")
        if not osversion:
            if self.config.has_option(section, "default_osversion"):
                osversion = self.config.get(section, "default_osversion")

        if not osname or not osversion:
            raise ArgumentError("Can not determine a sensible default OS "
                                "for archetype %s. Please use the "
                                "--osname and --osversion parameters." %
                                (dbarchetype.name))

        dbos = OperatingSystem.get_unique(session,
                                          name=osname,
                                          version=osversion,
                                          archetype=dbarchetype,
                                          compel=True)

        if (dbmachine.model.machine_type == 'aurora_node'
                and dbpersonality.archetype.name != 'aurora'):
            raise ArgumentError("Machines of type aurora_node can only be "
                                "added with archetype aurora.")

        if dbmachine.host:
            raise ArgumentError("{0:c} {0.label} is already allocated to "
                                "{1:l}.".format(dbmachine, dbmachine.host))

        if grn or eon_id:
            dbgrn = lookup_grn(session,
                               grn,
                               eon_id,
                               logger=logger,
                               config=self.config)
        else:
            dbgrn = dbpersonality.owner_grn

        dbhost = Host(machine=dbmachine,
                      branch=dbbranch,
                      owner_grn=dbgrn,
                      sandbox_author=dbauthor,
                      personality=dbpersonality,
                      status=dbstatus,
                      operating_system=dbos,
                      comments=comments)
        session.add(dbhost)

        if self.config.has_option("archetype_" + archetype,
                                  "default_grn_target"):
            dbhost.grns.append((dbhost, dbgrn,
                                self.config.get("archetype_" + archetype,
                                                "default_grn_target")))

        if zebra_interfaces:
            # --autoip does not make sense for Zebra (at least not the way it's
            # implemented currently)
            dbinterface = None
        else:
            dbinterface = get_boot_interface(dbmachine)

        # This method is allowed to return None. This can only happen
        # (currently) using add_aurora_host, add_windows_host, or possibly by
        # bypassing the aq client and posting a request directly.
        audit_results = []
        ip = generate_ip(session,
                         logger,
                         dbinterface,
                         audit_results=audit_results,
                         **arguments)

        dbdns_rec, newly_created = grab_address(session,
                                                hostname,
                                                ip,
                                                allow_restricted_domain=True,
                                                allow_reserved=True,
                                                preclude=True)
        dbmachine.primary_name = dbdns_rec

        # Fix up auxiliary addresses to point to the primary name by default
        if ip:
            dns_env = dbdns_rec.fqdn.dns_environment

            for addr in dbmachine.all_addresses():
                if addr.interface.interface_type == "management":
                    continue
                if addr.service_address_id:  # pragma: no cover
                    continue
                for rec in addr.dns_records:
                    if rec.fqdn.dns_environment == dns_env:
                        rec.reverse_ptr = dbdns_rec.fqdn

        if zebra_interfaces:
            if not ip:
                raise ArgumentError(
                    "Zebra configuration requires an IP address.")
            dbsrv_addr = self.assign_zebra_address(session, dbmachine,
                                                   dbdns_rec, zebra_interfaces)
        else:
            if ip:
                if not dbinterface:
                    raise ArgumentError(
                        "You have specified an IP address for the "
                        "host, but {0:l} does not have a bootable "
                        "interface.".format(dbmachine))
                assign_address(dbinterface, ip, dbdns_rec.network)
            dbsrv_addr = None

        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbmachine))
        if dbmachine.vm_container:
            plenaries.append(Plenary.get_plenary(dbmachine.vm_container))
        if dbsrv_addr:
            plenaries.append(Plenary.get_plenary(dbsrv_addr))

        key = plenaries.get_write_key()
        try:
            lock_queue.acquire(key)
            plenaries.write(locked=True)

            # XXX: This (and some of the code above) is horrible.  There
            # should be a generic/configurable hook here that could kick
            # in based on archetype and/or domain.
            dsdb_runner = DSDBRunner(logger=logger)
            if dbhost.archetype.name == 'aurora':
                # For aurora, check that DSDB has a record of the host.
                if not skip_dsdb_check:
                    try:
                        dsdb_runner.show_host(hostname)
                    except ProcessException, e:
                        raise ArgumentError("Could not find host in DSDB: %s" %
                                            e)
            elif not dbmachine.primary_ip:
                logger.info("No IP for %s, not adding to DSDB." %
                            dbmachine.fqdn)
Ejemplo n.º 51
0
    def render(self, session, logger, hostname, cluster, personality,
               **arguments):
        dbhost = hostname_to_host(session, hostname)
        dbcluster = Cluster.get_unique(session, cluster, compel=True)

        if dbcluster.status.name == 'decommissioned':
            raise ArgumentError("Cannot add hosts to decommissioned clusters.")

        # We only support changing personality within the same
        # archetype. The archetype decides things like which OS, how
        # it builds (dhcp, etc), whether it's compilable, and
        # switching all of that by side-effect seems wrong
        # somehow. And besides, it would make the user-interface and
        # implementation for this command ugly in order to support
        # changing all of those options.
        personality_change = False
        if personality is not None:
            dbpersonality = Personality.get_unique(session,
                                                   name=personality,
                                                   archetype=dbhost.archetype,
                                                   compel=True)
            if dbhost.personality != dbpersonality:
                dbhost.personality = dbpersonality
                personality_change = True

        # Allow for non-restricted clusters (the default?)
        if (len(dbcluster.allowed_personalities) > 0
                and dbhost.personality not in dbcluster.allowed_personalities):
            raise ArgumentError(
                "The personality %s for %s is not allowed "
                "by the cluster. Specify --personality "
                "and provide one of %s" %
                (dbhost.personality, dbhost.fqdn, ", ".join(
                    [x.name for x in dbcluster.allowed_personalities])))

        # Now that we've changed the personality, we can check
        # if this is a valid membership change
        dbcluster.validate_membership(dbhost)

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbcluster))

        if dbhost.cluster and dbhost.cluster != dbcluster:
            logger.client_info("Removing {0:l} from {1:l}.".format(
                dbhost, dbhost.cluster))
            old_cluster = dbhost.cluster
            old_cluster.hosts.remove(dbhost)
            remove_service_addresses(old_cluster, dbhost)
            old_cluster.validate()
            session.expire(dbhost, ['_cluster'])
            plenaries.append(Plenary.get_plenary(old_cluster))

        # Apply the service addresses to the new member
        for res in walk_resources(dbcluster):
            if not isinstance(res, ServiceAddress):
                continue
            apply_service_address(dbhost, res.interfaces, res)

        if dbhost.cluster:
            if personality_change:
                raise ArgumentError(
                    "{0:l} already in {1:l}, use "
                    "aq reconfigure to change personality.".format(
                        dbhost, dbhost.cluster))
            # the cluster has not changed, therefore there's nothing
            # to do here.
            return

        # Calculate the node index: build a map of all possible values, remove
        # the used ones, and pick the smallest remaining one
        node_index_map = set(xrange(len(dbcluster._hosts) + 1))
        for link in dbcluster._hosts:
            # The cluster may have been bigger in the past, so node indexes may
            # be larger than the current cluster size
            try:
                node_index_map.remove(link.node_index)
            except KeyError:
                pass

        dbcluster.hosts.append((dbhost, min(node_index_map)))
        dbcluster.validate()

        # demote a host when switching clusters
        # promote a host when switching clusters
        if dbhost.status.name == 'ready':
            if dbcluster.status.name != 'ready':
                dbalmost = HostLifecycle.get_unique(session,
                                                    'almostready',
                                                    compel=True)
                dbhost.status.transition(dbhost, dbalmost)
                plenaries.append(Plenary.get_plenary(dbhost))
        elif dbhost.status.name == 'almostready':
            if dbcluster.status.name == 'ready':
                dbready = HostLifecycle.get_unique(session,
                                                   'ready',
                                                   compel=True)
                dbhost.status.transition(dbhost, dbready)
                plenaries.append(Plenary.get_plenary(dbhost))

        session.flush()

        # Enforce that service instances are set correctly for the
        # new cluster association.
        chooser = Chooser(dbhost, logger=logger)
        chooser.set_required()
        chooser.flush_changes()
        # the chooser will include the host plenary
        key = CompileKey.merge(
            [chooser.get_write_key(),
             plenaries.get_write_key()])

        try:
            lock_queue.acquire(key)
            chooser.write_plenary_templates(locked=True)
            plenaries.write(locked=True)
        except:
            chooser.restore_stash()
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
Ejemplo n.º 52
0
    def render(self, session, logger, cluster, archetype, personality, domain,
               sandbox, max_members, down_hosts_threshold, maint_threshold,
               buildstatus, comments, vm_to_host_ratio, switch, metacluster,
               **arguments):

        validate_nlist_key("cluster", cluster)
        dbpersonality = Personality.get_unique(session, name=personality,
                                               archetype=archetype, compel=True)
        if not dbpersonality.is_cluster:
            raise ArgumentError("%s is not a cluster personality." %
                                personality)

        ctype = dbpersonality.archetype.cluster_type
        section = "archetype_" + dbpersonality.archetype.name

        if not buildstatus:
            buildstatus = "build"
        dbstatus = ClusterLifecycle.get_instance(session, buildstatus)

        (dbbranch, dbauthor) = get_branch_and_author(session, logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     compel=True)

        if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage:
            raise ArgumentError("Adding clusters to {0:l} is not allowed."
                                .format(dbbranch))

        dbloc = get_location(session, **arguments)
        if not dbloc:
            raise ArgumentError("Adding a cluster requires a location "
                                "constraint.")
        if not dbloc.campus:
            raise ArgumentError("{0} is not within a campus.".format(dbloc))

        if max_members is None:
            if self.config.has_option(section, "max_members_default"):
                max_members = self.config.getint(section, "max_members_default")

        Cluster.get_unique(session, cluster, preclude=True)
        # Not finding the cluster type is an internal consistency issue, so make
        # that show up in the logs by using AquilonError
        clus_type = Cluster.polymorphic_subclass(ctype, "Unknown cluster type",
                                                 error=AquilonError)

        (down_hosts_pct, dht) = Cluster.parse_threshold(down_hosts_threshold)

        kw = {'name': cluster,
              'location_constraint': dbloc,
              'personality': dbpersonality,
              'max_hosts': max_members,
              'branch': dbbranch,
              'sandbox_author': dbauthor,
              'down_hosts_threshold': dht,
              'down_hosts_percent': down_hosts_pct,
              'status': dbstatus,
              'comments': comments}

        if ctype == 'esx':
            if vm_to_host_ratio is None:
                if self.config.has_option(section, "vm_to_host_ratio"):
                    vm_to_host_ratio = self.config.get(section,
                                                       "vm_to_host_ratio")
                else:
                    vm_to_host_ratio = "1:1"
            (vm_count, host_count) = force_ratio("vm_to_host_ratio",
                                                 vm_to_host_ratio)
            kw["vm_count"] = vm_count
            kw["host_count"] = host_count

        if switch and hasattr(clus_type, 'network_device'):
            kw['network_device'] = NetworkDevice.get_unique(session,
                                                            switch,
                                                            compel=True)

        if maint_threshold is not None:
            (down_hosts_pct, dht) = Cluster.parse_threshold(maint_threshold)
            kw['down_maint_threshold'] = dht
            kw['down_maint_percent'] = down_hosts_pct

        dbcluster = clus_type(**kw)

        plenaries = PlenaryCollection(logger=logger)

        if metacluster:
            dbmetacluster = MetaCluster.get_unique(session,
                                                   metacluster,
                                                   compel=True)

            dbmetacluster.members.append(dbcluster)

            plenaries.append(Plenary.get_plenary(dbmetacluster))

        session.add(dbcluster)
        session.flush()

        plenaries.append(Plenary.get_plenary(dbcluster))
        plenaries.write()

        return
Ejemplo n.º 53
0
    def render(self, session, logger, hostname, machine, archetype, domain,
               sandbox, osname, osversion, buildstatus, personality, comments,
               zebra_interfaces, grn, eon_id, skip_dsdb_check=False,
               **arguments):
        dbarchetype = Archetype.get_unique(session, archetype, compel=True)
        section = "archetype_" + dbarchetype.name

        # This is for the various add_*_host commands
        if not domain and not sandbox:
            domain = self.config.get(section, "host_domain")

        (dbbranch, dbauthor) = get_branch_and_author(session, logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     compel=True)

        if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage:
            raise ArgumentError("Adding hosts to {0:l} is not allowed."
                                .format(dbbranch))

        if not buildstatus:
            buildstatus = 'build'
        dbstatus = HostLifecycle.get_unique(session, buildstatus, compel=True)
        dbmachine = Machine.get_unique(session, machine, compel=True)
        oldinfo = DSDBRunner.snapshot_hw(dbmachine)

        if not personality:
            if self.config.has_option(section, "default_personality"):
                personality = self.config.get(section, "default_personality")
            else:
                personality = 'generic'
        dbpersonality = Personality.get_unique(session, name=personality,
                                               archetype=dbarchetype, compel=True)

        if not osname:
            if self.config.has_option(section, "default_osname"):
                osname = self.config.get(section, "default_osname")
        if not osversion:
            if self.config.has_option(section, "default_osversion"):
                osversion = self.config.get(section, "default_osversion")

        if not osname or not osversion:
            raise ArgumentError("Can not determine a sensible default OS "
                                "for archetype %s. Please use the "
                                "--osname and --osversion parameters." %
                                (dbarchetype.name))

        dbos = OperatingSystem.get_unique(session, name=osname,
                                          version=osversion,
                                          archetype=dbarchetype, compel=True)

        if (dbmachine.model.machine_type == 'aurora_node' and
                dbpersonality.archetype.name != 'aurora'):
            raise ArgumentError("Machines of type aurora_node can only be "
                                "added with archetype aurora.")

        if dbmachine.host:
            raise ArgumentError("{0:c} {0.label} is already allocated to "
                                "{1:l}.".format(dbmachine, dbmachine.host))

        if grn or eon_id:
            dbgrn = lookup_grn(session, grn, eon_id, logger=logger,
                               config=self.config)
        else:
            dbgrn = dbpersonality.owner_grn

        dbhost = Host(machine=dbmachine, branch=dbbranch, owner_grn=dbgrn,
                      sandbox_author=dbauthor, personality=dbpersonality,
                      status=dbstatus, operating_system=dbos, comments=comments)
        session.add(dbhost)

        if self.config.has_option("archetype_" + archetype, "default_grn_target"):
            dbhost.grns.append((dbhost, dbgrn,
                                self.config.get("archetype_" + archetype,
                                                "default_grn_target")))

        if zebra_interfaces:
            # --autoip does not make sense for Zebra (at least not the way it's
            # implemented currently)
            dbinterface = None
        else:
            dbinterface = get_boot_interface(dbmachine)

        # This method is allowed to return None. This can only happen
        # (currently) using add_aurora_host, add_windows_host, or possibly by
        # bypassing the aq client and posting a request directly.
        audit_results = []
        ip = generate_ip(session, logger, dbinterface,
                         audit_results=audit_results, **arguments)

        dbdns_rec, newly_created = grab_address(session, hostname, ip,
                                                allow_restricted_domain=True,
                                                allow_reserved=True,
                                                preclude=True)
        dbmachine.primary_name = dbdns_rec

        # Fix up auxiliary addresses to point to the primary name by default
        if ip:
            dns_env = dbdns_rec.fqdn.dns_environment

            for addr in dbmachine.all_addresses():
                if addr.interface.interface_type == "management":
                    continue
                if addr.service_address_id:  # pragma: no cover
                    continue
                for rec in addr.dns_records:
                    if rec.fqdn.dns_environment == dns_env:
                        rec.reverse_ptr = dbdns_rec.fqdn

        if zebra_interfaces:
            if not ip:
                raise ArgumentError("Zebra configuration requires an IP address.")
            dbsrv_addr = self.assign_zebra_address(session, dbmachine, dbdns_rec,
                                                   zebra_interfaces)
        else:
            if ip:
                if not dbinterface:
                    raise ArgumentError("You have specified an IP address for the "
                                        "host, but {0:l} does not have a bootable "
                                        "interface.".format(dbmachine))
                assign_address(dbinterface, ip, dbdns_rec.network)
            dbsrv_addr = None

        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbmachine))
        if dbmachine.vm_container:
            plenaries.append(Plenary.get_plenary(dbmachine.vm_container))
        if dbsrv_addr:
            plenaries.append(Plenary.get_plenary(dbsrv_addr))

        key = plenaries.get_write_key()
        try:
            lock_queue.acquire(key)
            plenaries.write(locked=True)

            # XXX: This (and some of the code above) is horrible.  There
            # should be a generic/configurable hook here that could kick
            # in based on archetype and/or domain.
            dsdb_runner = DSDBRunner(logger=logger)
            if dbhost.archetype.name == 'aurora':
                # For aurora, check that DSDB has a record of the host.
                if not skip_dsdb_check:
                    try:
                        dsdb_runner.show_host(hostname)
                    except ProcessException, e:
                        raise ArgumentError("Could not find host in DSDB: %s" % e)
            elif not dbmachine.primary_ip:
                logger.info("No IP for %s, not adding to DSDB." % dbmachine.fqdn)
Ejemplo n.º 54
0
 def __init__(self, dbcluster, logger=LOGGER):
     PlenaryCollection.__init__(self, logger=logger)
     self.dbobj = dbcluster
     self.plenaries.append(PlenaryMetaClusterObject(dbcluster, logger=logger))
     self.plenaries.append(PlenaryMetaClusterData(dbcluster, logger=logger))
Ejemplo n.º 55
0
    def render(self, session, logger, list, domain, sandbox, force,
               **arguments):
        (dbbranch, dbauthor) = get_branch_and_author(session,
                                                     logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     compel=True)

        if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage:
            raise ArgumentError(
                "Managing hosts to {0:l} is not allowed.".format(dbbranch))
        check_hostlist_size(self.command, self.config, list)
        dbhosts = hostlist_to_hosts(session, list)

        failed = []
        branches = defaultdict(ListType)
        authors = defaultdict(ListType)
        for dbhost in dbhosts:
            branches[dbhost.branch].append(dbhost)
            authors[dbhost.sandbox_author].append(dbhost)

            # check if any host in the list is a cluster node
            if dbhost.cluster:
                failed.append(
                    "Cluster nodes must be managed at the "
                    "cluster level; {0} is a member of {1:l}.".format(
                        dbhost.fqdn, dbhost.cluster))

        if failed:
            raise ArgumentError("Cannot modify the following hosts:\n%s" %
                                "\n".join(failed))

        if len(branches) > 1:
            keys = branches.keys()
            branch_sort = lambda x, y: cmp(len(branches[x]), len(branches[y]))
            keys.sort(cmp=branch_sort)
            stats = [
                "{0:d} hosts in {1:l}".format(len(branches[branch]), branch)
                for branch in keys
            ]
            raise ArgumentError("All hosts must be in the same domain or "
                                "sandbox:\n%s" % "\n".join(stats))

        # check if all hosts are from the same sandbox author
        if len(authors) > 1:
            keys = authors.keys()
            author_sort = lambda x, y: cmp(len(authors[x]), len(authors[y]))
            keys.sort(cmp=author_sort)
            stats = [
                "{0:d} hosts with sandbox author {1:l}".format(
                    len(authors[author]), author.name) for author in keys
            ]
            raise ArgumentError("All hosts must be managed by the same "
                                "sandbox author:\n%s" % "\n".join(stats))

        # since we have already checked if all hosts in list are within the
        # same branch, we only need one dbsource to validate the branch
        dbhost = dbhosts[0]
        dbsource = dbhost.branch
        dbsource_author = dbhost.sandbox_author
        if not force:
            validate_branch_commits(dbsource, dbsource_author, dbbranch,
                                    dbauthor, logger, self.config)

        old_branch = branches.keys()[0].name

        plenaries = PlenaryCollection(logger=logger)
        for dbhost in dbhosts:
            dbhost.branch = dbbranch
            dbhost.sandbox_author = dbauthor
            plenaries.append(Plenary.get_plenary(dbhost))

        session.flush()

        # We're crossing domains, need to lock everything.
        key = CompileKey(logger=logger)
        try:
            lock_queue.acquire(key)
            plenaries.stash()
            plenaries.cleanup(old_branch, locked=True)
            plenaries.write(locked=True)
        except:
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return