Example #1
0
    def render(self, session, logger, share, latency_threshold,
               comments, **arguments):

        validate_nlist_key("share", share)

        q = session.query(Share).filter_by(name=share)

        if q.count() == 0:
            raise ArgumentError("Share %s is not used on any resource and "
                                "cannot be modified" % share)
        plenaries = PlenaryCollection(logger=logger)

        for dbshare in q.all():
            if latency_threshold:
                dbshare.latency_threshold = latency_threshold

            if comments:
                dbshare.comments = comments

                plenaries.append(Plenary.get_plenary(dbshare))

        session.flush()
        plenaries.write()

        return
Example #2
0
    def render(self, session, logger, cluster, pancinclude, pancexclude, pancdebug, cleandeps, **arguments):
        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        if pancdebug:
            pancinclude = r".*"
            pancexclude = r"components/spma/functions"
        dom = TemplateDomain(dbcluster.branch, dbcluster.sandbox_author, logger=logger)

        plenaries = PlenaryCollection(logger=logger)

        def add_cluster_plenaries(cluster):
            plenaries.append(Plenary.get_plenary(cluster))
            for host in cluster.hosts:
                plenaries.append(Plenary.get_plenary(host))

        add_cluster_plenaries(dbcluster)
        if isinstance(dbcluster, MetaCluster):
            for cluster in dbcluster.members:
                add_cluster_plenaries(cluster)

        with plenaries.get_key():
            dom.compile(
                session,
                only=plenaries.object_templates,
                panc_debug_include=pancinclude,
                panc_debug_exclude=pancexclude,
                cleandeps=cleandeps,
                locked=True,
            )
        return
Example #3
0
    def __init__(self, dbobj, logger, required_only=False):
        """Initialize the chooser.

        To clear out bindings that are not required, pass in
        required_only=True.

        Several staging areas and caches are set up within this object.
        The general flow is that potential service instance choices
        are kept in staging_services (dictionary of service to list of
        service instances) and finalized into chosen_services (dictionary
        of service to single service instance).

        The original state of the object is held in the cache
        original_service_instances (dictionary of service to single service
        instance).

        The instances_bound and instances_unbound lists are populated
        after chosen_services with the differences between chosen_services
        and original_service_instances.

        Subclasses should call this before starting their own
        initialization.

        """
        self.dbobj = dbobj
        self.session = object_session(dbobj)
        self.required_only = required_only
        self.logger = logger
        self.description = self.generate_description()
        self.logger.debug("Creating service Chooser for %s", self.description)
        # Cache of the service maps
        self.mapped_services = {}

        # Stores interim service instance lists
        self.staging_services = {}

        # Report as many errors as possible in one shot
        self.errors = []

        # Cache the servers backing service instances
        self.servers = {}

        # Set of service instances with a new client
        self.instances_bound = set()

        # Set of service instances losing a client
        self.instances_unbound = set()

        # Track the chosen services
        self.chosen_services = {}

        # Keep stashed plenaries for rollback purposes
        self.plenaries = PlenaryCollection(logger=self.logger)
Example #4
0
def del_cluster(session, logger, dbcluster, config):
    check_no_provided_service(dbcluster)

    if hasattr(dbcluster, 'members') and dbcluster.members:
        raise ArgumentError("%s is still in use by clusters: %s." %
                            (format(dbcluster),
                             ", ".join([c.name for c in dbcluster.members])))
    elif dbcluster.hosts:
        hosts = ", ".join([h.fqdn for h in dbcluster.hosts])
        raise ArgumentError("%s is still in use by hosts: %s." %
                            (format(dbcluster), hosts))

    plenaries = PlenaryCollection(logger=logger)
    plenaries.append(Plenary.get_plenary(dbcluster))
    if dbcluster.resholder:
        for res in dbcluster.resholder.resources:
            plenaries.append(Plenary.get_plenary(res))

    session.delete(dbcluster)

    session.flush()

    plenaries.remove(remove_profile=True)

    trigger_notifications(config, logger, CLIENT_INFO)

    return
Example #5
0
    def __init__(self, dbobj, logger, required_only=False):
        """Initialize the chooser.

        To clear out bindings that are not required, pass in
        required_only=True.

        Several staging areas and caches are set up within this object.
        The general flow is that potential service instance choices
        are kept in staging_services (dictionary of service to list of
        service instances) and finalized into chosen_services (dictionary
        of service to single service instance).

        The original state of the object is held in the cache
        original_service_instances (dictionary of service to single service
        instance).

        The instances_bound and instances_unbound lists are populated
        after chosen_services with the differences between chosen_services
        and original_service_instances.

        Subclasses should call this before starting their own
        initialization.

        """
        self.dbobj = dbobj
        self.personality = dbobj.personality
        self.archetype = dbobj.personality.archetype
        self.session = object_session(dbobj)
        self.required_only = required_only
        self.logger = logger
        self.logger.debug("Creating service chooser for {0:l}"
                          .format(self.dbobj))
        # Cache of the service maps
        self.mapped_services = {}

        # Stores interim service instance lists
        self.staging_services = {}

        # Report as many errors as possible in one shot
        self.errors = []

        # Cache the servers backing service instances, used to determine
        # affinity
        self.servers = set()

        # Set of service instances with a new client
        self.instances_bound = set()

        # Set of service instances losing a client
        self.instances_unbound = set()

        # Track the chosen services
        self.chosen_services = {}

        # Keep stashed plenaries for rollback purposes
        self.plenaries = PlenaryCollection(logger=self.logger)
Example #6
0
    def render(self, session, logger, target, grn, eon_id, hostname, list, personality,
               archetype, **arguments):
        dbgrn = lookup_grn(session, grn, eon_id, logger=logger,
                           config=self.config)

        plenaries = PlenaryCollection(logger=logger)

        if hostname:
            objs = [hostname_to_host(session, hostname)]
            config_key = "host_grn_targets"
        elif list:
            check_hostlist_size(self.command, self.config, list)
            objs = hostlist_to_hosts(session, list)
            config_key = "host_grn_targets"
        elif personality:
            objs = [Personality.get_unique(session, name=personality,
                                           archetype=archetype, compel=True)]
            config_key = "personality_grn_targets"

        for obj in objs:
            section = "archetype_" + obj.archetype.name

            if self.config.has_option(section, config_key):
                valid_targets = [s.strip() for s in
                                 self.config.get(section, config_key).split(",")]
            else:
                raise ArgumentError("{0} has no valid GRN targets configured."
                                    .format(obj.archetype))

            if target not in valid_targets:
                raise ArgumentError("Invalid target %s for archetype %s, please "
                                    "choose from: %s." %
                                    (target, obj.archetype.name,
                                     ", ".join(valid_targets)))

            plenaries.append(Plenary.get_plenary(obj))
            self._update_dbobj(obj, target, dbgrn)

        session.flush()

        plenaries.write()

        return
Example #7
0
    def render(self, session, logger, domain, sandbox, archetype, personality,
               pancinclude, pancexclude, pancdebug, cleandeps,
               **arguments):
        dbdomain = None
        dbauthor = None
        if domain or sandbox:
            (dbdomain, dbauthor) = get_branch_and_author(session, logger,
                                                         domain=domain,
                                                         sandbox=sandbox,
                                                         compel=True)

        dbpersonality = Personality.get_unique(session, name=personality,
                                               archetype=archetype, compel=True)
        if pancdebug:
            pancinclude = r'.*'
            pancexclude = r'components/spma/functions'

        q = session.query(Host)
        q = q.filter_by(personality=dbpersonality)
        if dbdomain:
            q = q.filter_by(branch=dbdomain)
        if dbauthor:
            q = q.filter_by(sandbox_author=dbauthor)

        host_list = q.all()

        if not host_list:
            return

        # If the domain was not specified, set it to the domain of first host
        dbdomain, dbauthor = validate_branch_author(host_list)

        plenaries = PlenaryCollection(logger=logger)
        for host in host_list:
            plenaries.append(Plenary.get_plenary(host))

        dom = TemplateDomain(dbdomain, dbauthor, logger=logger)
        with plenaries.get_key():
            dom.compile(session, only=plenaries.object_templates,
                        panc_debug_include=pancinclude,
                        panc_debug_exclude=pancexclude,
                        cleandeps=cleandeps, locked=True)
        return
    def render(self, session, logger, list, **arguments):
        check_hostlist_size(self.command, self.config, list)
        dbhosts = hostlist_to_hosts(session, list)

        dbbranch, dbauthor = validate_branch_author(dbhosts)

        failed = []
        compileable = []
        # Do any cross-list or dependency checks
        for dbhost in dbhosts:
            if dbhost.status.name == 'ready':
                failed.append("{0:l} is in ready status, "
                              "advertised status can be reset only "
                              "when host is in non ready state.".format(dbhost))
        if failed:
            raise ArgumentError("Cannot modify the following hosts:\n%s" %
                                "\n".join(failed))

        plenaries = PlenaryCollection(logger=logger)
        for dbhost in dbhosts:
            dbhost.advertise_status = False
            plenaries.append(Plenary.get_plenary(dbhost))

        session.flush()

        td = TemplateDomain(dbbranch, dbauthor, logger=logger)
        with plenaries.get_key():
            plenaries.stash()
            try:
                plenaries.write(locked=True)
                td.compile(session, only=compileable, locked=True)
            except:
                plenaries.restore_stash()
                raise

        return
Example #9
0
    def render(self, session, logger, domain, sandbox,
               pancinclude, pancexclude, pancdebug, cleandeps,
               **arguments):
        (dbdomain, dbauthor) = get_branch_and_author(session, logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     compel=True)

        # Grab a shared lock on personalities and services used by the domain.
        # Object templates (hosts, clusters) are protected by the domain lock.
        plenaries = PlenaryCollection(logger=logger)

        q1 = session.query(Personality)
        q1 = q1.join(Host)
        q1 = q1.filter(and_(Host.branch == dbdomain,
                            Host.sandbox_author == dbauthor))
        q1 = q1.reset_joinpoint()
        q1 = q1.options(joinedload('paramholder'),
                        subqueryload('paramholder.parameters'))

        q2 = session.query(Personality)
        q2 = q2.join(Cluster)
        q2 = q2.filter(and_(Cluster.branch == dbdomain,
                            Cluster.sandbox_author == dbauthor))
        q2 = q2.reset_joinpoint()
        q2 = q2.options(joinedload('paramholder'),
                        subqueryload('paramholder.parameters'))

        for dbpers in q1.union(q2):
            plenaries.append(Plenary.get_plenary(dbpers))

        q1 = session.query(ServiceInstance)
        q1 = q1.join(ServiceInstance.clients)
        q1 = q1.filter(and_(Host.branch == dbdomain,
                            Host.sandbox_author == dbauthor))

        q2 = session.query(ServiceInstance)
        q2 = q2.join(ServiceInstance.cluster_clients)
        q2 = q2.filter(and_(Cluster.branch == dbdomain,
                            Cluster.sandbox_author == dbauthor))

        for si in q1.union(q2):
            plenaries.append(Plenary.get_plenary(si))

        if pancdebug:
            pancinclude = r'.*'
            pancexclude = r'components/spma/functions'
        dom = TemplateDomain(dbdomain, dbauthor, logger=logger)
        with CompileKey.merge([CompileKey(domain=dbdomain.name, logger=logger),
                               plenaries.get_key(exclusive=False)]):
            dom.compile(session,
                        panc_debug_include=pancinclude,
                        panc_debug_exclude=pancexclude,
                        cleandeps=cleandeps,
                        locked=True)
        return
Example #10
0
    def render(self, session, logger, hostname, service, **arguments):
        dbhost = hostname_to_host(session, hostname)
        for srv in dbhost.archetype.services + dbhost.personality.services:
            if srv.name == service:
                raise ArgumentError("Cannot unbind a required service. "
                                    "Perhaps you want to rebind?")

        dbservice = Service.get_unique(session, service, compel=True)
        si = get_host_bound_service(dbhost, dbservice)
        if si:
            logger.info("Removing client binding")
            dbhost.services_used.remove(si)
            session.flush()

            plenaries = PlenaryCollection(logger=logger)
            plenaries.append(Plenary.get_plenary(dbhost))
            plenaries.append(PlenaryServiceInstanceServer.get_plenary(si))
            plenaries.write()

        return
Example #11
0
class Chooser(object):
    """Helper for choosing services for an object."""
    def __new__(cls, dbobj, *args, **kwargs):
        if isinstance(dbobj, Host):
            chooser = super(Chooser, HostChooser).__new__(HostChooser)
        elif isinstance(dbobj, Cluster):
            chooser = super(Chooser, ClusterChooser).__new__(ClusterChooser)
        else:
            # Just assume the consumer invoked the right subclass...
            chooser = super(Chooser, cls).__new__(cls)

        # Lock the owner in the DB to avoid problems with parallel runs
        dbobj.lock_row()

        return chooser

    # Technically apply_changes is a method, but whatever...
    abstract_fields = [
        "description", "archetype", "personality", "location",
        "required_services", "original_service_instances", "apply_changes"
    ]

    def __init__(self, dbobj, logger, required_only=False):
        """Initialize the chooser.

        To clear out bindings that are not required, pass in
        required_only=True.

        Several staging areas and caches are set up within this object.
        The general flow is that potential service instance choices
        are kept in staging_services (dictionary of service to list of
        service instances) and finalized into chosen_services (dictionary
        of service to single service instance).

        The original state of the object is held in the cache
        original_service_instances (dictionary of service to single service
        instance).

        The instances_bound and instances_unbound lists are populated
        after chosen_services with the differences between chosen_services
        and original_service_instances.

        Subclasses should call this before starting their own
        initialization.

        """
        self.dbobj = dbobj
        self.session = object_session(dbobj)
        self.required_only = required_only
        self.logger = logger
        self.description = self.generate_description()
        self.logger.debug("Creating service Chooser for %s", self.description)
        # Cache of the service maps
        self.mapped_services = {}

        # Stores interim service instance lists
        self.staging_services = {}

        # Report as many errors as possible in one shot
        self.errors = []

        # Cache the servers backing service instances
        self.servers = {}

        # Set of service instances with a new client
        self.instances_bound = set()

        # Set of service instances losing a client
        self.instances_unbound = set()

        # Track the chosen services
        self.chosen_services = {}

        # Keep stashed plenaries for rollback purposes
        self.plenaries = PlenaryCollection(logger=self.logger)

    def generate_description(self):
        return str(self.dbobj)

    def verify_init(self):
        """This is more of a verify-and-finalize method..."""
        for field in self.abstract_fields:
            if not hasattr(self, field):
                raise InternalError("%s provides no %s field" %
                                    (type(self.dbobj), field))
        # This can be tweaked...
        if not self.required_only:
            for (service, instance) in self.original_service_instances.items():
                self.staging_services[service] = [instance]

    def error(self, msg, *args, **kwargs):
        """Errors are consolidated so that many can be reported at once."""
        formatted = msg % args
        self.errors.append(formatted)
        self.logger.info(msg, *args, **kwargs)

    def set_required(self):
        """Main entry point when setting the required services for a host."""
        self.verify_init()
        self.prestash_primary()
        self.logger.debug("Setting required services")
        self.cache_service_maps(self.required_services)
        for dbservice in self.required_services:
            self.find_service_instances(dbservice)
        self.check_errors()
        for dbservice in self.required_services:
            self.choose_cluster_aligned(dbservice)
            self.choose_available_capacity(dbservice)
            self.choose_past_use(dbservice)
        self.check_errors()
        # If this code needs to be made more efficient, this could
        # be refactored.  We don't always need count_servers()...
        # In theory don't always need the loop above, either.
        self.count_servers()
        for dbservice in self.required_services:
            self.reduce_service_instances(dbservice)
        self.finalize_service_instances()
        self.analyze_changes()
        self.stash_services()
        self.apply_changes()
        self.check_errors()

    def set_single(self, service, instance=None, force=False):
        """Use this to update a single service.

        If planning to use this method, construct the Chooser with
        required_only=False.  If required_only is True, all other
        bindings will be cleared.

        """
        self.verify_init()
        self.prestash_primary()
        if instance:
            self.logger.debug("Setting service %s instance %s", service.name,
                              instance.name)
            self.staging_services[service] = [instance]
        else:
            self.logger.debug("Setting service %s with auto-bind",
                              service.name)
            self.staging_services[service] = None
            self.cache_service_maps([service])
            self.find_service_instances(service)
        self.check_errors()
        self.choose_cluster_aligned(service)
        self.choose_available_capacity(service)
        self.check_errors()
        self.choose_past_use(service)
        # If this code needs to be made more efficient, this could
        # be refactored.  We don't always need count_servers()...
        self.count_servers()
        self.reduce_service_instances(service)
        self.finalize_service_instances()
        self.analyze_changes()
        if not force and self.instances_bound and self.instances_unbound:
            cfg_path = list(self.instances_unbound)[0].cfg_path
            self.error("%s is already bound to %s, use unbind "
                       "to clear first or rebind to force." %
                       (self.description, cfg_path))
            self.check_errors()
        self.stash_services()
        self.apply_changes()
        self.check_errors()

    def cache_service_maps(self, dbservices):
        self.service_maps = ServiceInstance.get_mapped_instance_cache(
            self.personality, self.location, dbservices, self.network)

    def find_service_instances(self, dbservice):
        """This finds the "closest" service instances, based on the known maps.

        It expects that cache_service_maps has been run.

        """
        instances = self.service_maps.get(dbservice, [])
        if len(instances) >= 1:
            for instance in instances:
                self.logger.debug(
                    "Found service %s instance %s "
                    "in the maps.", instance.service.name, instance.name)
            self.staging_services[dbservice] = instances
            return
        self.error(
            "Could not find a relevant service map for service %s "
            "on %s", dbservice.name, self.description)

    def check_errors(self):
        if self.errors:
            raise ArgumentError("\n".join(self.errors))

    def choose_cluster_aligned(self, dbservice):
        # Only implemented for hosts.
        pass

    def get_footprint(self, instance):
        return 1

    def instance_full(self, instance, max_clients, current_clients):
        """Check if the instance is effectively full.

        This check is complicated because clusters have a larger impact
        than a single host does.

        """
        if max_clients is None:
            return False
        if instance == self.original_service_instances.get(instance.service):
            if current_clients > max_clients:
                return True
            return False
        return current_clients + self.get_footprint(instance) > max_clients

    def choose_available_capacity(self, dbservice):
        """Verify that the available instances have spare capacity.

        Error out if none should be used.

        """
        maxed_out_instances = set()
        for instance in self.staging_services[dbservice][:]:
            max_clients = instance.enforced_max_clients
            current_clients = instance.client_count
            if self.instance_full(instance, max_clients, current_clients):
                self.staging_services[dbservice].remove(instance)
                maxed_out_instances.add(instance)
                self.logger.debug(
                    "Rejected service %s instance %s with "
                    "max_client value of %s since client_count "
                    "is %s.", instance.service.name, instance.name,
                    max_clients, current_clients)
        if len(self.staging_services[dbservice]) < 1:
            self.error(
                "The available instances %s for service %s are "
                "at full capacity.",
                [str(instance.name) for instance in maxed_out_instances],
                dbservice.name)
        return

    def choose_past_use(self, dbservice):
        """If more than one service instance was found in the maps,
        this method checks to see if we can reduce the list to a single
        choice by checking to see if any of the instances was already in use.

        """
        if len(self.staging_services[dbservice]) > 1 and \
           self.original_service_instances.get(dbservice, None) and \
           self.original_service_instances[dbservice] in \
           self.staging_services[dbservice]:
            self.logger.debug(
                "Chose service %s instance %s because "
                "of past use.", dbservice.name,
                self.original_service_instances[dbservice])
            self.staging_services[dbservice] = [
                self.original_service_instances[dbservice]
            ]
        return

    def count_servers(self, dbservice=None):
        """Get a count of the number of times a server backs
        service instances in use by this host.

        This method is called both to initialize the count and to update
        it as service instances are locked in.

        """
        if dbservice:
            instance_lists = [self.staging_services[dbservice]]
        else:
            instance_lists = self.staging_services.values()

        for instances in instance_lists:
            if len(instances) > 1:
                # Ignore any services where an instance has not been chosen.
                continue
            for host in instances[0].server_hosts:
                if self.servers.get(host, None):
                    self.servers[host] += 1
                else:
                    self.servers[host] = 1

    def reduce_service_instances(self, dbservice):
        if len(self.staging_services[dbservice]) == 1:
            self.count_servers(dbservice)
            return
        self.choose_affinity(dbservice)
        if len(self.staging_services[dbservice]) == 1:
            self.count_servers(dbservice)
            return
        self.choose_least_loaded(dbservice)
        if len(self.staging_services[dbservice]) == 1:
            self.count_servers(dbservice)
            return
        self.choose_random(dbservice)
        self.count_servers(dbservice)
        return

    def choose_affinity(self, dbservice):
        """Attempt to choose a service based on server affinity,
        also known as stickiness.

        This could be extremely complicated when trying to deal with
        instances backed by multiple servers.  Starting simple.
        Count the number of servers backing this instance that
        back other instances used by client.  Any instance that does
        not have the largest count gets tossed.

        """
        max_servers = 0
        max_instances = None
        for instance in self.staging_services[dbservice]:
            common_servers = []
            self.logger.debug("Checking service %s instance %s servers %s",
                              instance.service.name, instance.name,
                              [host.fqdn for host in instance.server_hosts])
            for host in instance.server_hosts:
                if self.servers.get(host, None):
                    common_servers.append(host)
            if not common_servers:
                continue
            if len(common_servers) > max_servers:
                max_servers = len(common_servers)
                max_instances = [instance]
            elif len(common_servers) == max_servers:
                max_instances.append(instance)
        if max_instances and \
           len(max_instances) < len(self.staging_services[dbservice]):
            for instance in self.staging_services[dbservice]:
                if instance not in max_instances:
                    self.logger.debug(
                        "Discounted service %s instance %s "
                        "due to server affinity (stickiness).",
                        instance.service.name, instance.name)
            self.staging_services[dbservice] = max_instances

    def choose_least_loaded(self, dbservice):
        """Choose a service instance based on load."""
        least_clients = None
        least_loaded = []
        for instance in self.staging_services[dbservice]:
            client_count = instance.client_count
            if not least_loaded or client_count < least_clients:
                least_clients = client_count
                least_loaded = [instance]
            elif client_count == least_clients:
                least_loaded.append(instance)
        if len(least_loaded) < len(self.staging_services[dbservice]):
            for instance in self.staging_services[dbservice]:
                if instance not in least_loaded:
                    self.logger.debug(
                        "Discounted service %s instance %s "
                        "due to load.", instance.service.name, instance.name)
            self.staging_services[dbservice] = least_loaded

    def choose_random(self, dbservice):
        """Pick a service instance randomly."""
        self.staging_services[dbservice] = [
            choice(self.staging_services[dbservice])
        ]
        self.logger.debug(
            "Randomly chose service %s instance %s "
            "from remaining choices.", dbservice.name,
            self.staging_services[dbservice][0].name)

    def finalize_service_instances(self):
        """Fill out the list of chosen services."""
        for (service, instances) in self.staging_services.items():
            if len(instances) < 1:  # pragma: no cover
                self.error("Internal Error: Attempt to finalize on "
                           "service %s without any candidates." % service.name)
                continue
            if len(instances) > 1:  # pragma: no cover
                self.error("Internal Error: Attempt to finalize on "
                           "service %s with too many candidates %s." %
                           (service.name, [
                               "service %s instance %s" %
                               (instance.service.name, instance.name)
                               for instance in instances
                           ]))
            self.chosen_services[service] = instances[0]

    def analyze_changes(self):
        """Determine what changed."""
        for (service, instance) in self.chosen_services.items():
            if not self.original_service_instances.get(service, None) or \
               self.original_service_instances[service] != instance:
                self.instances_bound.add(instance)
        for (service, instance) in self.original_service_instances.items():
            if not self.chosen_services.get(service, None) or \
               self.chosen_services[service] != instance:
                self.instances_unbound.add(instance)

    def stash_services(self):
        for instance in self.instances_bound.union(self.instances_unbound):
            plenary = PlenaryServiceInstanceServer(instance,
                                                   logger=self.logger)
            plenary.stash()
            self.plenaries.append(plenary)

    def flush_changes(self):
        self.session.flush()

    def get_write_key(self):
        return self.plenaries.get_write_key()

    def write_plenary_templates(self, locked=False):
        self.plenaries.write(locked=locked)

    def prestash_primary(self):
        pass

    def restore_stash(self):
        self.plenaries.restore_stash()

    def changed_server_fqdns(self):
        hosts = set()
        for instance in chain(self.instances_bound, self.instances_unbound):
            for srv in instance.servers:
                # Skip servers that do not have a profile
                if not srv.host.personality.archetype.is_compileable:
                    continue
                if (srv.host.branch == self.dbobj.branch
                        and srv.host.sandbox_author_id
                        == self.dbobj.sandbox_author_id):
                    hosts.add(str(srv.host.fqdn))
        return hosts
Example #12
0
    def render(self, session, logger, model, vendor, newmodel, newvendor,
               comments, leave_existing, **arguments):
        for (arg, value) in arguments.items():
            # Cleaning the strings isn't strictly necessary but allows
            # for simple equality checks below and removes the need to
            # call refresh().
            if arg in ['newmodel', 'newvendor',
                       'cpuname', 'cpuvendor', 'disktype', 'diskcontroller',
                       'nicmodel', 'nicvendor']:
                if value is not None:
                    arguments[arg] = value.lower().strip()

        dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                   compel=True)

        if leave_existing and (newmodel or newvendor):
            raise ArgumentError("Cannot update model name or vendor without "
                                "updating any existing machines.")

        fix_existing = not leave_existing
        dbmachines = set()

        # The sub-branching here is a little difficult to read...
        # Basically, there are three different checks to handle
        # setting a new vendor, a new name, or both.
        if newvendor:
            dbnewvendor = Vendor.get_unique(session, newvendor, compel=True)
            if newmodel:
                Model.get_unique(session, name=newmodel, vendor=dbnewvendor,
                                 preclude=True)
            else:
                Model.get_unique(session, name=dbmodel.name,
                                 vendor=dbnewvendor, preclude=True)
            dbmodel.vendor = dbnewvendor
        if newmodel:
            if not newvendor:
                Model.get_unique(session, name=newmodel, vendor=dbmodel.vendor,
                                 preclude=True)
            dbmodel.name = newmodel
        if newvendor or newmodel:
            q = session.query(Machine).filter_by(model=dbmodel)
            dbmachines.update(q.all())

        # For now, can't update model_type.  There are too many spots
        # that special case things like aurora_node or virtual_machine to
        # know that the transistion is safe.  If there is enough need we
        # can always add those transitions later.
        if arguments['machine_type'] is not None:
            raise UnimplementedError("Cannot (yet) change a model's "
                                     "machine type.")

        if comments:
            dbmodel.comments = comments
            # The comments also do not affect the templates.

        cpu_args = ['cpuname', 'cpuvendor', 'cpuspeed']
        cpu_info = dict([(self.argument_lookup[arg], arguments[arg])
                         for arg in cpu_args])
        cpu_values = [v for v in cpu_info.values() if v is not None]
        nic_args = ['nicmodel', 'nicvendor']
        nic_info = dict([(self.argument_lookup[arg], arguments[arg])
                         for arg in nic_args])
        nic_values = [v for v in nic_info.values() if v is not None]
        spec_args = ['cpunum', 'memory', 'disktype', 'diskcontroller',
                     'disksize', 'nics']
        specs = dict([(self.argument_lookup[arg], arguments[arg])
                      for arg in spec_args])
        spec_values = [v for v in specs.values() if v is not None]

        if not dbmodel.machine_specs:
            if cpu_values or nic_values or spec_values:
                # You can't add a non-machine model with machine_specs
                # thus we only need to check here if you try and update
                if not dbmodel.model_type.isMachineType():
                    raise ArgumentError("Machine specfications are only valid"
                                        " for machine types")
                if not cpu_values or len(spec_values) < len(spec_args):
                    raise ArgumentError("Missing required parameters to store "
                                        "machine specs for the model.  Please "
                                        "give all CPU, disk, RAM, and NIC "
                                        "count information.")
                dbcpu = Cpu.get_unique(session, compel=True, **cpu_info)
                if nic_values:
                    dbnic = Model.get_unique(session, compel=True,
                                             model_type=NicType.Nic, **nic_info)
                else:
                    dbnic = Model.default_nic_model(session)
                dbmachine_specs = MachineSpecs(model=dbmodel, cpu=dbcpu,
                                               nic_model=dbnic, **specs)
                session.add(dbmachine_specs)

        # Anything below that updates specs should have been verified above.

        if cpu_values:
            dbcpu = Cpu.get_unique(session, compel=True, **cpu_info)
            self.update_machine_specs(model=dbmodel, dbmachines=dbmachines,
                                      attr='cpu', value=dbcpu,
                                      fix_existing=fix_existing)

        for arg in ['memory', 'cpunum']:
            if arguments[arg] is not None:
                self.update_machine_specs(model=dbmodel, dbmachines=dbmachines,
                                          attr=self.argument_lookup[arg],
                                          value=arguments[arg],
                                          fix_existing=fix_existing)

        if arguments['disktype']:
            if fix_existing:
                raise ArgumentError("Please specify --leave_existing to "
                                    "change the model disktype.  This cannot "
                                    "be converted automatically.")
            dbmodel.machine_specs.disk_type = arguments['disktype']

        for arg in ['diskcontroller', 'disksize']:
            if arguments[arg] is not None:
                self.update_disk_specs(model=dbmodel, dbmachines=dbmachines,
                                       attr=self.argument_lookup[arg],
                                       value=arguments[arg],
                                       fix_existing=fix_existing)

        if nic_values:
            dbnic = Model.get_unique(session, compel=True, **nic_info)
            self.update_interface_specs(model=dbmodel, dbmachines=dbmachines,
                                        value=dbnic, fix_existing=fix_existing)

        if arguments['nics'] is not None:
            dbmodel.machine_specs.nic_count = arguments['nics']

        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        for dbmachine in dbmachines:
            plenaries.append(Plenary.get_plenary(dbmachine))
        plenaries.write()

        return
Example #13
0
    def render(self, session, logger, feature, archetype, personality, model,
               vendor, interface, justification, user, **arguments):

        # Binding a feature to a named interface makes sense in the scope of a
        # personality, but not for a whole archetype.
        if interface and not personality:
            raise ArgumentError("Binding to a named interface needs "
                                "a personality.")

        q = session.query(Personality)
        dbarchetype = None

        feature_type = "host"

        justification_required = True

        # Warning: order matters here!
        params = {}
        if personality:
            justification_required = False
            dbpersonality = Personality.get_unique(session,
                                                   name=personality,
                                                   archetype=archetype,
                                                   compel=True)
            params["personality"] = dbpersonality
            if interface:
                params["interface_name"] = interface
                feature_type = "interface"
            dbarchetype = dbpersonality.archetype
            q = q.filter_by(archetype=dbarchetype)
            q = q.filter_by(name=personality)
        elif archetype:
            dbarchetype = Archetype.get_unique(session, archetype, compel=True)
            params["archetype"] = dbarchetype
            q = q.filter_by(archetype=dbarchetype)
        else:
            # It's highly unlikely that a feature template would work for
            # _any_ archetype, so disallow this case for now. As I can't
            # rule out that such a case will not have some uses in the
            # future, the restriction is here and not in the model.
            raise ArgumentError("Please specify either an archetype or "
                                "a personality when binding a feature.")

        if model:
            dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                       compel=True)

            if dbmodel.model_type.isNic():
                feature_type = "interface"
            else:
                feature_type = "hardware"

            params["model"] = dbmodel

        if dbarchetype and not dbarchetype.is_compileable:
            raise UnimplementedError("Binding features to non-compilable "
                                     "archetypes is not implemented.")

        if not feature_type:  # pragma: no cover
            raise InternalError("Feature type is not known.")

        dbfeature = Feature.get_unique(session, name=feature,
                                       feature_type=feature_type, compel=True)

        cnt = q.count()
        # TODO: should the limit be configurable?
        if justification_required and cnt > 0:
            if not justification:
                raise AuthorizationException("Changing feature bindings for "
                                             "more than just a personality "
                                             "requires --justification.")
            validate_justification(user, justification)

        self.do_link(session, logger, dbfeature, params)
        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        for dbpersonality in q:
            plenaries.append(Plenary.get_plenary(dbpersonality))

        written = plenaries.write()
        logger.client_info("Flushed %d/%d templates." %
                           (written, len(plenaries.plenaries)))
        return
Example #14
0
            legacy_env = HostEnvironment.get_unique(session,
                                                    'legacy',
                                                    compel=True)
            if dbpersona.host_environment == legacy_env:
                HostEnvironment.polymorphic_subclass(
                    host_environment, "Unknown environment name")
                Personality.validate_env_in_name(personality, host_environment)
                dbpersona.host_environment = HostEnvironment.get_unique(
                    session, host_environment, compel=True)
            else:
                raise ArgumentError(
                    "The personality '{0}' already has env set to '{1}'"
                    " and cannot be updated".format(str(dbpersona),
                                                    host_environment))

        plenaries = PlenaryCollection(logger=logger)

        if grn or eon_id:
            dbgrn = lookup_grn(session,
                               grn,
                               eon_id,
                               logger=logger,
                               config=self.config)
            old_grn = dbpersona.owner_grn
            dbpersona.owner_grn = dbgrn

            if not leave_existing:
                # If this is a public personality, then there may be hosts with
                # various GRNs inside the personality, so make sure we preserve
                # those GRNs by filtering on the original GRN of the personality
                q = session.query(Host)
Example #15
0
            if q.count() > 0:
                raise ArgumentError("The personality {0} is in use and cannot "
                                    "be modified".format(str(dbpersona)))
            dbpersona.cluster_required = cluster_required

        if host_environment is not None:
            if dbpersona.host_environment.name == 'legacy':
                dbhost_env = HostEnvironment.get_instance(session, host_environment)
                Personality.validate_env_in_name(personality, dbhost_env.name)
                dbpersona.host_environment = dbhost_env
            else:
                raise ArgumentError("The personality '{0!s}' already has env set to '{1!s}'"
                                    " and cannot be updated"
                                    .format(dbpersona, dbpersona.host_environment))

        plenaries = PlenaryCollection(logger=logger)

        if grn or eon_id:
            dbgrn = lookup_grn(session, grn, eon_id, logger=logger,
                               config=self.config)
            old_grn = dbpersona.owner_grn
            dbpersona.owner_grn = dbgrn

            if not leave_existing:
                # If this is a public personality, then there may be hosts with
                # various GRNs inside the personality, so make sure we preserve
                # those GRNs by filtering on the original GRN of the personality
                q = session.query(Host)
                q = q.filter_by(personality=dbpersona, owner_grn=old_grn)
                for dbhost in q.all():
                    dbhost.owner_grn = dbgrn
Example #16
0
                try:
                    chooser.set_required()
                except ArgumentError, e:
                    failed.append(str(e))
        if failed:
            raise ArgumentError("The following hosts failed service "
                                "binding:\n%s" % "\n".join(failed))

        session.flush()
        logger.info("reconfigure_hostlist processing: %s" %
                    ",".join([str(dbhost.fqdn) for dbhost in dbhosts]))

        if not choosers:
            return

        plenaries = PlenaryCollection(logger=logger)

        # chooser.plenaries is a PlenaryCollection - this flattens
        # that top level.
        plenary_set = set()
        for chooser in choosers:
            plenary_set.update(chooser.plenaries.plenaries)
        plenaries.extend(plenary_set)

        td = TemplateDomain(dbbranch, dbauthor, logger=logger)

        # Don't bother locking until every possible check before the
        # actual writing and compile is done.  This will allow for fast
        # turnaround on errors (no need to wait for a lock if there's
        # a missing service map entry or something).
        with plenaries.get_key():
Example #17
0
    def render(self, session, logger, cluster, buildstatus, **arguments):
        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        dbstatus = ClusterLifecycle.get_instance(session, buildstatus)

        if not dbcluster.status.transition(dbcluster, dbstatus):
            return

        if not dbcluster.personality.archetype.is_compileable:
            return

        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbcluster))

        for dbhost in dbcluster.hosts:
            plenaries.append(Plenary.get_plenary(dbhost))

        td = TemplateDomain(dbcluster.branch, dbcluster.sandbox_author,
                            logger=logger)
        # Force a host lock as pan might overwrite the profile...
        with plenaries.get_key():
            plenaries.stash()
            try:
                plenaries.write(locked=True)
                td.compile(session, only=plenaries.object_templates,
                           locked=True)
            except:
                plenaries.restore_stash()
                raise
        return
Example #18
0
class Chooser(object):
    """Helper for choosing services for an object."""

    def __new__(cls, dbobj, *args, **kwargs):
        if isinstance(dbobj, Host):
            chooser = super(Chooser, HostChooser).__new__(HostChooser)
        elif isinstance(dbobj, Cluster):
            chooser = super(Chooser, ClusterChooser).__new__(ClusterChooser)
        else:
            # Just assume the consumer invoked the right subclass...
            chooser = super(Chooser, cls).__new__(cls)

        # Lock the owner in the DB to avoid problems with parallel runs
        dbobj.lock_row()

        return chooser

    abstract_fields = ["location", "required_services", "network",
                       "original_service_instances"]

    def __init__(self, dbobj, logger, required_only=False):
        """Initialize the chooser.

        To clear out bindings that are not required, pass in
        required_only=True.

        Several staging areas and caches are set up within this object.
        The general flow is that potential service instance choices
        are kept in staging_services (dictionary of service to list of
        service instances) and finalized into chosen_services (dictionary
        of service to single service instance).

        The original state of the object is held in the cache
        original_service_instances (dictionary of service to single service
        instance).

        The instances_bound and instances_unbound lists are populated
        after chosen_services with the differences between chosen_services
        and original_service_instances.

        Subclasses should call this before starting their own
        initialization.

        """
        self.dbobj = dbobj
        self.personality = dbobj.personality
        self.archetype = dbobj.personality.archetype
        self.session = object_session(dbobj)
        self.required_only = required_only
        self.logger = logger
        self.logger.debug("Creating service chooser for {0:l}"
                          .format(self.dbobj))
        # Cache of the service maps
        self.mapped_services = {}

        # Stores interim service instance lists
        self.staging_services = {}

        # Report as many errors as possible in one shot
        self.errors = []

        # Cache the servers backing service instances, used to determine
        # affinity
        self.servers = set()

        # Set of service instances with a new client
        self.instances_bound = set()

        # Set of service instances losing a client
        self.instances_unbound = set()

        # Track the chosen services
        self.chosen_services = {}

        # Keep stashed plenaries for rollback purposes
        self.plenaries = PlenaryCollection(logger=self.logger)

    def apply_changes(self):
        raise InternalError("This method must be overridden")

    def verify_init(self):
        """This is more of a verify-and-finalize method..."""
        for field in self.abstract_fields:
            if not hasattr(self, field):
                raise InternalError("%s provides no %s field" %
                                    (type(self.dbobj), field))
        # This can be tweaked...
        if not self.required_only:
            for (service, instance) in self.original_service_instances.items():
                self.staging_services[service] = [instance]

    def error(self, msg, *args, **kwargs):
        """Errors are consolidated so that many can be reported at once."""
        formatted = msg % args
        self.errors.append(formatted)
        self.logger.info(msg, *args, **kwargs)

    def set_required(self):
        """Main entry point when setting the required services for a host."""
        self.verify_init()
        self.prestash_primary()
        self.logger.debug("Setting required services")
        self.cache_service_maps(self.required_services)
        for dbservice in self.required_services:
            self.find_service_instances(dbservice)
        self.check_errors()
        for dbservice in self.required_services:
            self.choose_cluster_aligned(dbservice)
            self.choose_available_capacity(dbservice)
            self.choose_past_use(dbservice)
        self.check_errors()
        # If this code needs to be made more efficient, this could
        # be refactored.  We don't always need count_servers()...
        # In theory don't always need the loop above, either.
        self.count_servers()
        for dbservice in self.required_services:
            self.reduce_service_instances(dbservice)
        self.finalize_service_instances()
        self.analyze_changes()
        self.stash_services()
        self.apply_changes()
        self.check_errors()

    def set_single(self, service, instance=None, force=False):
        """Use this to update a single service.

        If planning to use this method, construct the Chooser with
        required_only=False.  If required_only is True, all other
        bindings will be cleared.

        """
        self.verify_init()
        self.prestash_primary()
        if instance:
            self.logger.debug("Setting service %s instance %s",
                              service.name, instance.name)
            self.staging_services[service] = [instance]
        else:
            self.logger.debug("Setting service %s with auto-bind",
                              service.name)
            self.staging_services[service] = None
            self.cache_service_maps([service])
            self.find_service_instances(service)
        self.check_errors()
        self.choose_cluster_aligned(service)
        self.choose_available_capacity(service)
        self.check_errors()
        self.choose_past_use(service)
        # If this code needs to be made more efficient, this could
        # be refactored.  We don't always need count_servers()...
        self.count_servers()
        self.reduce_service_instances(service)
        self.finalize_service_instances()
        self.analyze_changes()
        if not force and self.instances_bound and self.instances_unbound:
            si = list(self.instances_unbound)[0]
            self.error("{0} is already bound to {1:l}, use unbind "
                       "to clear first or rebind to force."
                       .format(self.dbobj, si))
            self.check_errors()
        self.stash_services()
        self.apply_changes()
        self.check_errors()

    def cache_service_maps(self, dbservices):
        self.service_maps = ServiceInstance.get_mapped_instance_cache(
            self.personality, self.location, dbservices, self.network)

    def find_service_instances(self, dbservice):
        """This finds the "closest" service instances, based on the known maps.

        It expects that cache_service_maps has been run.

        """
        instances = self.service_maps.get(dbservice, [])
        if len(instances) >= 1:
            for instance in instances:
                self.logger.debug("Found {0:l} in the maps.".format(instance))
            self.staging_services[dbservice] = instances
            return
        self.error("Could not find a relevant service map for {0:l} "
                   "on {1:l}".format(dbservice, self.dbobj))

    def check_errors(self):
        if self.errors:
            raise ArgumentError("\n".join(self.errors))

    def choose_cluster_aligned(self, dbservice):
        # Only implemented for hosts.
        pass

    def get_footprint(self, instance):
        return 1

    def instance_full(self, instance, max_clients, current_clients):
        """Check if the instance is effectively full.

        This check is complicated because clusters have a larger impact
        than a single host does.

        """
        if max_clients is None:
            return False
        if instance == self.original_service_instances.get(instance.service):
            if current_clients > max_clients:
                return True
            return False
        return current_clients + self.get_footprint(instance) > max_clients

    def choose_available_capacity(self, dbservice):
        """Verify that the available instances have spare capacity.

        Error out if none should be used.

        """
        maxed_out_instances = set()
        for instance in self.staging_services[dbservice][:]:
            max_clients = instance.enforced_max_clients
            current_clients = instance.client_count
            if self.instance_full(instance, max_clients, current_clients):
                self.staging_services[dbservice].remove(instance)
                maxed_out_instances.add(instance)
                self.logger.debug("Rejected service %s instance %s with "
                                  "max_client value of %s since client_count "
                                  "is %s.",
                                  instance.service.name, instance.name,
                                  max_clients, current_clients)
        if len(self.staging_services[dbservice]) < 1:
            self.error("The available instances %s for service %s are "
                       "at full capacity.",
                       [str(instance.name)
                        for instance in maxed_out_instances],
                       dbservice.name)
        return

    def choose_past_use(self, dbservice):
        """If more than one service instance was found in the maps,
        this method checks to see if we can reduce the list to a single
        choice by checking to see if any of the instances was already in use.

        """
        if len(self.staging_services[dbservice]) > 1 and \
           dbservice in self.original_service_instances and \
           self.original_service_instances[dbservice] in \
           self.staging_services[dbservice]:
            instance = self.original_service_instances[dbservice]
            self.logger.debug("Chose {0:l} because of past use."
                              .format(instance))
            self.staging_services[dbservice] = [instance]
        return

    def append_server_object(self, container, srv):
        """Append server objects to a container.

        This is a helper used for implementing server affinity.

        """
        if srv.host:
            container.add(srv.host)
        if srv.cluster:
            container.add(srv.cluster)

        # If the service is not bound to a host or cluster, then we have
        # no choice than to use the alias for providing server affinity
        if srv.alias and not srv.host and not srv.cluster:
            container.add(srv.alias)

    def count_servers(self, dbservice=None):
        """Get a count of the number of times a server backs
        service instances in use by this host.

        This method is called both to initialize the count and to update
        it as service instances are locked in.

        """
        if dbservice:
            instance_lists = [self.staging_services[dbservice]]
        else:
            instance_lists = self.staging_services.values()

        for instances in instance_lists:
            if len(instances) > 1:
                # Ignore any services where an instance has not been chosen.
                continue
            for srv in instances[0].servers:
                self.append_server_object(self.servers, srv)

    def reduce_service_instances(self, dbservice):
        if len(self.staging_services[dbservice]) == 1:
            self.count_servers(dbservice)
            return
        self.choose_affinity(dbservice)
        if len(self.staging_services[dbservice]) == 1:
            self.count_servers(dbservice)
            return
        self.choose_least_loaded(dbservice)
        if len(self.staging_services[dbservice]) == 1:
            self.count_servers(dbservice)
            return
        self.choose_random(dbservice)
        self.count_servers(dbservice)
        return

    def choose_affinity(self, dbservice):
        """Attempt to choose a service based on server affinity,
        also known as stickiness.

        This could be extremely complicated when trying to deal with
        instances backed by multiple servers.  Starting simple.
        Count the number of servers backing this instance that
        back other instances used by client.  Any instance that does
        not have the largest count gets tossed.

        """
        max_servers = 0
        max_instances = []
        for instance in self.staging_services[dbservice]:
            self.logger.debug("Checking service %s instance %s servers %s",
                              instance.service.name, instance.name,
                              [srv.fqdn for srv in instance.servers])
            instance_servers = set()
            for srv in instance.servers:
                self.append_server_object(instance_servers, srv)

            common_servers = self.servers & instance_servers
            if len(common_servers) > max_servers:
                max_servers = len(common_servers)
                max_instances = [instance]
            elif len(common_servers) == max_servers:
                max_instances.append(instance)
        if max_instances and \
           len(max_instances) < len(self.staging_services[dbservice]):
            for instance in self.staging_services[dbservice]:
                if instance not in max_instances:
                    self.logger.debug("Discounted {0:l} due to server affinity "
                                      "(stickiness).".format(instance))
            self.staging_services[dbservice] = max_instances

    def choose_least_loaded(self, dbservice):
        """Choose a service instance based on load."""
        least_clients = None
        least_loaded = []
        for instance in self.staging_services[dbservice]:
            client_count = instance.client_count
            if not least_loaded or client_count < least_clients:
                least_clients = client_count
                least_loaded = [instance]
            elif client_count == least_clients:
                least_loaded.append(instance)
        if len(least_loaded) < len(self.staging_services[dbservice]):
            for instance in self.staging_services[dbservice]:
                if instance not in least_loaded:
                    self.logger.debug("Discounted {0:l} due to load."
                                      .format(instance))
            self.staging_services[dbservice] = least_loaded

    def choose_random(self, dbservice):
        """Pick a service instance randomly."""
        self.staging_services[dbservice] = [
            choice(self.staging_services[dbservice])]
        self.logger.debug("Randomly chose service %s instance %s "
                          "from remaining choices.",
                          dbservice.name,
                          self.staging_services[dbservice][0].name)

    def finalize_service_instances(self):
        """Fill out the list of chosen services."""
        for (service, instances) in self.staging_services.items():
            if len(instances) < 1:  # pragma: no cover
                self.error("Internal Error: Attempt to finalize on "
                           "service %s without any candidates." %
                           service.name)
                continue
            if len(instances) > 1:  # pragma: no cover
                self.error("Internal Error: Attempt to finalize on "
                           "service %s with too many candidates %s." %
                           (service.name,
                            ["service %s instance %s" %
                             (instance.service.name, instance.name)
                             for instance in instances]))
            self.chosen_services[service] = instances[0]

    def analyze_changes(self):
        """Determine what changed."""
        for (service, instance) in self.chosen_services.items():
            if service not in self.original_service_instances or \
               self.original_service_instances[service] != instance:
                self.instances_bound.add(instance)
        for (service, instance) in self.original_service_instances.items():
            if service not in self.chosen_services or \
               self.chosen_services[service] != instance:
                self.instances_unbound.add(instance)

    def stash_services(self):
        changed_servers = set()
        for instance in self.instances_bound.union(self.instances_unbound):
            if not instance.service.need_client_list:
                continue

            for srv in instance.servers:
                if srv.host:
                    changed_servers.add(srv.host)
                if srv.cluster:
                    changed_servers.add(srv.cluster)

            plenary = PlenaryServiceInstanceServer.get_plenary(instance)
            self.plenaries.append(plenary)

        for dbobj in changed_servers:
            # Skip servers that do not have a profile
            if not dbobj.personality.archetype.is_compileable:
                continue

            # Skip servers that are in a different domain/sandbox
            if (dbobj.branch != self.dbobj.branch or
                dbobj.sandbox_author_id != self.dbobj.sandbox_author_id):
                continue

            self.plenaries.append(Plenary.get_plenary(dbobj))
            if isinstance(dbobj, Cluster):
                for dbhost in dbobj.hosts:
                    self.plenaries.append(Plenary.get_plenary(dbhost))

    def flush_changes(self):
        self.session.flush()

    def get_key(self):
        return self.plenaries.get_key()

    def write_plenary_templates(self, locked=False):
        self.plenaries.stash()
        self.plenaries.write(locked=locked)

    def prestash_primary(self):
        pass

    def restore_stash(self):
        self.plenaries.restore_stash()
Example #19
0
    def render(self, session, logger, building, city, address,
               fullname, default_dns_domain, comments, **arguments):
        dbbuilding = get_location(session, building=building)

        old_city = dbbuilding.city

        dsdb_runner = DSDBRunner(logger=logger)

        if address is not None:
            old_address = dbbuilding.address
            dbbuilding.address = address
            dsdb_runner.update_building(dbbuilding.name, dbbuilding.address,
                                        old_address)
        if fullname is not None:
            dbbuilding.fullname = fullname
        if comments is not None:
            dbbuilding.comments = comments
        if default_dns_domain is not None:
            if default_dns_domain:
                dbdns_domain = DnsDomain.get_unique(session, default_dns_domain,
                                                    compel=True)
                dbbuilding.default_dns_domain = dbdns_domain
            else:
                dbbuilding.default_dns_domain = None

        plenaries = PlenaryCollection(logger=logger)
        if city:
            dbcity = get_location(session, city=city)

            # This one would change the template's locations hence forbidden
            if dbcity.hub != dbbuilding.hub:
                # Doing this both to reduce user error and to limit
                # testing required.
                raise ArgumentError("Cannot change hubs. {0} is in {1} "
                                    "while {2} is in {3}.".format(
                                        dbcity, dbcity.hub,
                                        dbbuilding, dbbuilding.hub))

            # issue svcmap warnings
            maps = 0
            for map_type in [ServiceMap, PersonalityServiceMap]:
                maps = maps + session.query(map_type).\
                    filter_by(location=old_city).count()

            if maps > 0:
                logger.client_info("There are {0} service(s) mapped to the "
                                   "old location of the ({1:l}), please "
                                   "review and manually update mappings for "
                                   "the new location as needed.".format(
                                       maps, dbbuilding.city))

            dbbuilding.update_parent(parent=dbcity)

            if old_city.campus and (old_city.campus != dbcity.campus):
                dsdb_runner.del_campus_building(old_city.campus, building)

            if dbcity.campus and (old_city.campus != dbcity.campus):
                dsdb_runner.add_campus_building(dbcity.campus, building)

            query = session.query(Machine)
            query = query.filter(Machine.location_id.in_(dbcity.offspring_ids()))

            for dbmachine in query:
                plenaries.append(Plenary.get_plenary(dbmachine))

        session.flush()

        if plenaries.plenaries:
            with plenaries.get_key():
                plenaries.stash()
                try:
                    plenaries.write(locked=True)
                    dsdb_runner.commit_or_rollback()
                except:
                    plenaries.restore_stash()
                    raise
        else:
            dsdb_runner.commit_or_rollback()

        return
Example #20
0
    def render(self, session, logger, hostname, **arguments):
        # Check dependencies, translate into user-friendly message
        dbhost = hostname_to_host(session, hostname)

        dbhost.lock_row()

        check_no_provided_service(dbhost)

        # Any service bindings that we need to clean up afterwards
        plenaries = PlenaryCollection(logger=logger)
        remove_plenaries = PlenaryCollection(logger=logger)
        remove_plenaries.append(Plenary.get_plenary(dbhost))

        archetype = dbhost.archetype.name
        dbmachine = dbhost.hardware_entity
        oldinfo = DSDBRunner.snapshot_hw(dbmachine)

        ip = dbmachine.primary_ip

        for si in dbhost.services_used:
            plenaries.append(PlenaryServiceInstanceServer.get_plenary(si))
            logger.info("Before deleting {0:l}, removing binding to {1:l}"
                        .format(dbhost, si))

        del dbhost.services_used[:]

        if dbhost.resholder:
            for res in dbhost.resholder.resources:
                remove_plenaries.append(Plenary.get_plenary(res))

        # In case of Zebra, the IP may be configured on multiple interfaces
        for iface in dbmachine.interfaces:
            if ip in iface.addresses:
                iface.addresses.remove(ip)

        if dbhost.cluster:
            dbcluster = dbhost.cluster
            dbcluster.hosts.remove(dbhost)
            set_committed_value(dbhost, '_cluster', None)
            dbcluster.validate()
            plenaries.append(Plenary.get_plenary(dbcluster))

        dbdns_rec = dbmachine.primary_name
        dbmachine.primary_name = None
        dbmachine.host = None
        session.delete(dbhost)
        delete_dns_record(dbdns_rec)
        session.flush()

        if dbmachine.vm_container:
            plenaries.append(Plenary.get_plenary(dbmachine.vm_container))

        with CompileKey.merge([plenaries.get_key(),
                               remove_plenaries.get_key()]):
            plenaries.stash()
            remove_plenaries.stash()

            try:
                plenaries.write(locked=True)
                remove_plenaries.remove(locked=True, remove_profile=True)

                if archetype != 'aurora' and ip is not None:
                    dsdb_runner = DSDBRunner(logger=logger)
                    dsdb_runner.update_host(dbmachine, oldinfo)
                    dsdb_runner.commit_or_rollback("Could not remove host %s from "
                                                   "DSDB" % hostname)
                if archetype == 'aurora':
                    logger.client_info("WARNING: removing host %s from AQDB and "
                                       "*not* changing DSDB." % hostname)
            except:
                plenaries.restore_stash()
                remove_plenaries.restore_stash()
                raise

        trigger_notifications(self.config, logger, CLIENT_INFO)

        return
Example #21
0
    def render(self, session, logger, interface, machine, mac, model, vendor,
               boot, pg, autopg, comments, master, clear_master, default_route,
               rename_to, **arguments):
        """This command expects to locate an interface based only on name
        and machine - all other fields, if specified, are meant as updates.

        If the machine has a host, dsdb may need to be updated.

        The boot flag can *only* be set to true.  This is mostly technical,
        as at this point in the interface it is difficult to tell if the
        flag was unset or set to false.  However, it also vastly simplifies
        the dsdb logic - we never have to worry about a user trying to
        remove the boot flag from a host in dsdb.

        """

        audit_results = []

        dbhw_ent = Machine.get_unique(session, machine, compel=True)
        dbinterface = Interface.get_unique(session, hardware_entity=dbhw_ent,
                                           name=interface, compel=True)

        oldinfo = DSDBRunner.snapshot_hw(dbhw_ent)

        if arguments.get('hostname', None):
            # Hack to set an intial interface for an aurora host...
            dbhost = dbhw_ent.host
            if dbhost.archetype.name == 'aurora' and \
               dbhw_ent.primary_ip and not dbinterface.addresses:
                assign_address(dbinterface, dbhw_ent.primary_ip,
                               dbhw_ent.primary_name.network, logger=logger)

        # We may need extra IP verification (or an autoip option)...
        # This may also throw spurious errors if attempting to set the
        # port_group to a value it already has.
        if pg is not None and dbinterface.port_group != pg.lower().strip():
            dbinterface.port_group = verify_port_group(
                dbinterface.hardware_entity, pg)
        elif autopg:
            dbinterface.port_group = choose_port_group(
                session, logger, dbinterface.hardware_entity)
            audit_results.append(('pg', dbinterface.port_group))

        if master:
            if dbinterface.addresses:
                # FIXME: as a special case, if the only address is the
                # primary IP, then we could just move it to the master
                # interface. However this can be worked around by bonding
                # the interface before calling "add host", so don't bother
                # for now.
                raise ArgumentError("Can not enslave {0:l} because it has "
                                    "addresses.".format(dbinterface))
            dbmaster = Interface.get_unique(session, hardware_entity=dbhw_ent,
                                            name=master, compel=True)
            if dbmaster in dbinterface.all_slaves():
                raise ArgumentError("Enslaving {0:l} would create a circle, "
                                    "which is not allowed.".format(dbinterface))
            dbinterface.master = dbmaster

        if clear_master:
            if not dbinterface.master:
                raise ArgumentError("{0} is not a slave.".format(dbinterface))
            dbinterface.master = None

        if comments:
            dbinterface.comments = comments

        if boot:
            # Figure out if the current bootble interface also has the
            # default route set; the new bootable interface probably
            # wants to have the same settings.  Note that if
            # old_default_route is None there was no bootable interface.
            old_default_route = None
            for i in dbhw_ent.interfaces:
                if i.bootable == True:
                    old_default_route = i.default_route
                    break

            # Apply the bootable flag to the supplied interface, clearing
            # it on all other interfaces.
            for i in dbhw_ent.interfaces:
                if i == dbinterface:
                    i.bootable = True
                else:
                    i.bootable = False

            # If the user was not explicit about the default route flag
            # (default_route is None); there was an existing bootable
            # interface (old_default_route is not None); the new default
            # route setting differs from the old - then produce a warning.
            if (default_route is None and
                old_default_route is not None and
                dbinterface.default_route != old_default_route):
                if old_default_route:
                    logger.client_info("Warning: New boot interface {0} is no "
                                       "longer provides the default route; it "
                                       "did before!".format(dbinterface))
                else:
                    logger.client_info("Warning: New boot interface {0} now "
                                       "provides the default route; it didn't "
                                       "before!".format(dbinterface))

            # Should we also transfer the primary IP to the new boot interface?
            # That could get tricky if the new interface already has an IP
            # address...

        if default_route is not None:
            dbinterface.default_route = default_route
            if not first_of(dbhw_ent.interfaces, lambda x: x.default_route):
                logger.client_info("Warning: {0:l} has no default route, hope "
                                   "that's ok.".format(dbhw_ent))

        #Set this mac address last so that you can update to a bootable
        #interface *before* adding a mac address. This is so the validation
        #that takes place in the interface class doesn't have to be worried
        #about the order of update to bootable=True and mac address
        if mac:
            q = session.query(Interface).filter_by(mac=mac)
            other = q.first()
            if other and other != dbinterface:
                raise ArgumentError("MAC address {0} is already in use by "
                                    "{1:l}.".format(mac, other))
            dbinterface.mac = mac

        if model or vendor:
            if not dbinterface.model_allowed:
                raise ArgumentError("Model/vendor can not be set for a {0:lc}."
                                    .format(dbinterface))

            dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                       model_type=NicType.Nic, compel=True)
            dbinterface.model = dbmodel
        if rename_to:
            rename_interface(session, dbinterface, rename_to)

        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbhw_ent))
        # Interface renaming affects the host and service addresses
        if dbhw_ent.host:
            plenaries.append(Plenary.get_plenary(dbhw_ent.host))
        for addr in dbinterface.assignments:
            if addr.service_address:
                plenaries.append(Plenary.get_plenary(addr.service_address))

        with plenaries.get_key():
            try:
                plenaries.write(locked=True)

                if dbhw_ent.host and dbhw_ent.host.archetype.name != "aurora":
                    dsdb_runner = DSDBRunner(logger=logger)
                    dsdb_runner.update_host(dbhw_ent, oldinfo)
                    dsdb_runner.commit_or_rollback()
            except AquilonError, err:
                plenaries.restore_stash()
                raise ArgumentError(err)
            except:
Example #22
0
    def render(self, session, logger, machine, disk, controller, share,
               filesystem, resourcegroup, address, comments, size, boot,
               snapshot, rename_to, **kw):
        dbmachine = Machine.get_unique(session, machine, compel=True)
        dbdisk = Disk.get_unique(session, device_name=disk, machine=dbmachine,
                                 compel=True)

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbmachine))

        if rename_to:
            Disk.get_unique(session, device_name=rename_to, machine=dbmachine,
                            preclude=True)
            dbdisk.device_name = rename_to

        if comments is not None:
            dbdisk.comments = comments

        if size is not None:
            dbdisk.capacity = size

        if controller:
            if controller not in controller_types:
                raise ArgumentError("%s is not a valid controller type, use "
                                    "one of: %s." %
                                    (controller, ", ".join(controller_types)))
            dbdisk.controller_type = controller

        if boot is not None:
            dbdisk.bootable = boot
            # There should be just one boot disk. We may need to re-think this
            # if we want to model software RAID in the database.
            for disk in dbmachine.disks:
                if disk == dbdisk:
                    continue
                if boot and disk.bootable:
                    disk.bootable = False

        if address:
            # TODO: do we really care? Bus address makes sense for physical
            # disks as well, even if we cannot use that information today.
            if not isinstance(dbdisk, VirtualDisk):
                raise ArgumentError("Bus address can only be set for virtual "
                                    "disks.")
            dbdisk.address = address

        if snapshot is not None:
            if not isinstance(dbdisk, VirtualDisk):
                raise ArgumentError("Snapshot capability can only be set for "
                                    "virtual disks.")
            dbdisk.snapshotable = snapshot

        if share or filesystem:
            if isinstance(dbdisk, VirtualNasDisk):
                old_share = dbdisk.share
                old_share.disks.remove(dbdisk)
            elif isinstance(dbdisk, VirtualLocalDisk):
                old_fs = dbdisk.filesystem
                old_fs.disks.remove(dbdisk)
            else:
                raise ArgumentError("Disk {0!s} of {1:l} is not a virtual "
                                    "disk, changing the backend store is not "
                                    "possible.".format(dbdisk, dbmachine))

            if share:
                if not isinstance(dbdisk, VirtualNasDisk):
                    new_dbdisk = copy_virt_disk(session, VirtualNasDisk, dbdisk)
                    session.delete(dbdisk)
                    session.flush()
                    session.add(new_dbdisk)
                    dbdisk = new_dbdisk

                new_share = find_resource(Share,
                                          dbmachine.vm_container.holder.holder_object,
                                          resourcegroup, share)
                new_share.disks.append(dbdisk)

            if filesystem:
                if not isinstance(dbdisk, VirtualLocalDisk):
                    new_dbdisk = copy_virt_disk(session, VirtualLocalDisk, dbdisk)
                    session.delete(dbdisk)
                    session.flush()
                    session.add(new_dbdisk)
                    dbdisk = new_dbdisk

                new_fs = find_resource(Filesystem,
                                       dbmachine.vm_container.holder.holder_object,
                                       resourcegroup, filesystem)
                new_fs.disks.append(dbdisk)

        session.flush()

        plenaries.write()

        return
Example #23
0
    def render(self, session, logger, machine, model, vendor, serial,
               chassis, slot, clearchassis, multislot,
               vmhost, cluster, allow_metacluster_change,
               cpuname, cpuvendor, cpuspeed, cpucount, memory, ip, uri,
               **arguments):
        dbmachine = Machine.get_unique(session, machine, compel=True)
        oldinfo = DSDBRunner.snapshot_hw(dbmachine)

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbmachine))
        if dbmachine.vm_container:
            plenaries.append(Plenary.get_plenary(dbmachine.vm_container))
        if dbmachine.host:
            # Using PlenaryHostData directly, to avoid warnings if the host has
            # not been configured yet
            plenaries.append(PlenaryHostData.get_plenary(dbmachine.host))

        if clearchassis:
            del dbmachine.chassis_slot[:]

        if chassis:
            dbchassis = Chassis.get_unique(session, chassis, compel=True)
            dbmachine.location = dbchassis.location
            if slot is None:
                raise ArgumentError("Option --chassis requires --slot "
                                    "information.")
            self.adjust_slot(session, logger,
                             dbmachine, dbchassis, slot, multislot)
        elif slot is not None:
            dbchassis = None
            for dbslot in dbmachine.chassis_slot:
                if dbchassis and dbslot.chassis != dbchassis:
                    raise ArgumentError("Machine in multiple chassis, please "
                                        "use --chassis argument.")
                dbchassis = dbslot.chassis
            if not dbchassis:
                raise ArgumentError("Option --slot requires --chassis "
                                    "information.")
            self.adjust_slot(session, logger,
                             dbmachine, dbchassis, slot, multislot)

        dblocation = get_location(session, **arguments)
        if dblocation:
            loc_clear_chassis = False
            for dbslot in dbmachine.chassis_slot:
                dbcl = dbslot.chassis.location
                if dbcl != dblocation:
                    if chassis or slot is not None:
                        raise ArgumentError("{0} conflicts with chassis {1!s} "
                                            "location {2}."
                                            .format(dblocation, dbslot.chassis,
                                                    dbcl))
                    else:
                        loc_clear_chassis = True
            if loc_clear_chassis:
                del dbmachine.chassis_slot[:]
            dbmachine.location = dblocation

            if dbmachine.host:
                for vm in dbmachine.host.virtual_machines:
                    plenaries.append(Plenary.get_plenary(vm))
                    vm.location = dblocation

        if model or vendor:
            # If overriding model, should probably overwrite default
            # machine specs as well.
            if not model:
                model = dbmachine.model.name
            if not vendor:
                vendor = dbmachine.model.vendor.name
            dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                       compel=True)
            if not dbmodel.model_type.isMachineType():
                raise ArgumentError("The update_machine command cannot update "
                                    "machines of type %s." %
                                    dbmodel.model_type)
            # We probably could do this by forcing either cluster or
            # location data to be available as appropriate, but really?
            # Failing seems reasonable.
            if dbmodel.model_type != dbmachine.model.model_type and \
               (dbmodel.model_type.isVirtualMachineType() or
                dbmachine.model.model_type.isVirtualMachineType()):
                raise ArgumentError("Cannot change machine from %s to %s." %
                                    (dbmachine.model.model_type,
                                     dbmodel.model_type))

            old_nic_model = dbmachine.model.nic_model
            new_nic_model = dbmodel.nic_model
            if old_nic_model != new_nic_model:
                for iface in dbmachine.interfaces:
                    if iface.model == old_nic_model:
                        iface.model = new_nic_model

            dbmachine.model = dbmodel

        if cpuname or cpuvendor or cpuspeed is not None:
            dbcpu = Cpu.get_unique(session, name=cpuname, vendor=cpuvendor,
                                   speed=cpuspeed, compel=True)
            dbmachine.cpu = dbcpu

        if cpucount is not None:
            dbmachine.cpu_quantity = cpucount
        if memory is not None:
            dbmachine.memory = memory
        if serial:
            dbmachine.serial_no = serial

        if ip:
            update_primary_ip(session, logger, dbmachine, ip)

        if uri and not dbmachine.model.model_type.isVirtualAppliance():
            raise ArgumentError("URI can be specified only for virtual "
                                "appliances and the model's type is %s" %
                                dbmachine.model.model_type)

        if uri:
            dbmachine.uri = uri

        # FIXME: For now, if a machine has its interface(s) in a portgroup
        # this command will need to be followed by an update_interface to
        # re-evaluate the portgroup for overflow.
        # It would be better to have --pg and --autopg options to let it
        # happen at this point.
        if cluster or vmhost:
            if not dbmachine.vm_container:
                raise ArgumentError("Cannot convert a physical machine to "
                                    "virtual.")

            old_holder = dbmachine.vm_container.holder.holder_object
            resholder = get_resource_holder(session, hostname=vmhost,
                                            cluster=cluster, compel=False)
            new_holder = resholder.holder_object

            if self.get_metacluster(new_holder) != self.get_metacluster(old_holder) \
               and not allow_metacluster_change:
                raise ArgumentError("Current {0:l} does not match "
                                    "new {1:l}."
                                    .format(self.get_metacluster(old_holder),
                                            self.get_metacluster(new_holder)))

            plenaries.append(Plenary.get_plenary(old_holder))
            plenaries.append(Plenary.get_plenary(new_holder))

            dbmachine.vm_container.holder = resholder

            for dbdisk in dbmachine.disks:
                if isinstance(dbdisk, VirtualNasDisk):
                    old_share = dbdisk.share
                    if isinstance(old_share.holder, BundleResource):
                        resourcegroup = old_share.holder.resourcegroup.name
                    else:
                        resourcegroup = None

                    new_share = find_resource(Share, new_holder, resourcegroup, old_share.name,
                                           error=ArgumentError)

                    # If the shares are registered at the metacluster level and both
                    # clusters are in the same metacluster, then there will be no
                    # real change here
                    if new_share != old_share:
                        old_share.disks.remove(dbdisk)
                        new_share.disks.append(dbdisk)

                if isinstance(dbdisk, VirtualLocalDisk):
                    old_filesystem = dbdisk.filesystem

                    new_filesystem = find_resource(Filesystem, new_holder, None,
                                                   old_filesystem.name,
                                                   error=ArgumentError)

                    if new_filesystem != old_filesystem:
                        old_filesystem.disks.remove(dbdisk)
                        new_filesystem.disks.append(dbdisk)

            if isinstance(new_holder, Cluster):
                dbmachine.location = new_holder.location_constraint
            else:
                # vmhost
                dbmachine.location = new_holder.hardware_entity.location

        session.flush()

        # Check if the changed parameters still meet cluster capacity
        # requiremets
        if dbmachine.cluster:
            dbmachine.cluster.validate()
            if allow_metacluster_change:
                dbmachine.cluster.metacluster.validate()
        if dbmachine.host and dbmachine.host.cluster:
            dbmachine.host.cluster.validate()

        # The check to make sure a plenary file is not written out for
        # dummy aurora hardware is within the call to write().  This way
        # it is consistent without altering (and forgetting to alter)
        # all the calls to the method.

        with plenaries.get_key():
            plenaries.stash()
            try:
                plenaries.write(locked=True)

                dsdb_runner = DSDBRunner(logger=logger)
                dsdb_runner.update_host(dbmachine, oldinfo)
                dsdb_runner.commit_or_rollback("Could not update machine in DSDB")
            except:
                plenaries.restore_stash()
                raise

        return